.. | .. |
---|
21 | 21 | |
---|
22 | 22 | extern int isolate_lru_page(struct page *page); /* from internal.h */ |
---|
23 | 23 | extern bool mm_iommu_preregistered(struct mm_struct *mm); |
---|
24 | | -extern long mm_iommu_get(struct mm_struct *mm, |
---|
| 24 | +extern long mm_iommu_new(struct mm_struct *mm, |
---|
25 | 25 | unsigned long ua, unsigned long entries, |
---|
| 26 | + struct mm_iommu_table_group_mem_t **pmem); |
---|
| 27 | +extern long mm_iommu_newdev(struct mm_struct *mm, unsigned long ua, |
---|
| 28 | + unsigned long entries, unsigned long dev_hpa, |
---|
26 | 29 | struct mm_iommu_table_group_mem_t **pmem); |
---|
27 | 30 | extern long mm_iommu_put(struct mm_struct *mm, |
---|
28 | 31 | struct mm_iommu_table_group_mem_t *mem); |
---|
.. | .. |
---|
32 | 35 | unsigned long ua, unsigned long size); |
---|
33 | 36 | extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm( |
---|
34 | 37 | struct mm_struct *mm, unsigned long ua, unsigned long size); |
---|
35 | | -extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm, |
---|
| 38 | +extern struct mm_iommu_table_group_mem_t *mm_iommu_get(struct mm_struct *mm, |
---|
36 | 39 | unsigned long ua, unsigned long entries); |
---|
37 | 40 | extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, |
---|
38 | 41 | unsigned long ua, unsigned int pageshift, unsigned long *hpa); |
---|
39 | 42 | extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem, |
---|
40 | 43 | unsigned long ua, unsigned int pageshift, unsigned long *hpa); |
---|
41 | 44 | extern void mm_iommu_ua_mark_dirty_rm(struct mm_struct *mm, unsigned long ua); |
---|
| 45 | +extern bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa, |
---|
| 46 | + unsigned int pageshift, unsigned long *size); |
---|
42 | 47 | extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem); |
---|
43 | 48 | extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem); |
---|
| 49 | +#else |
---|
| 50 | +static inline bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa, |
---|
| 51 | + unsigned int pageshift, unsigned long *size) |
---|
| 52 | +{ |
---|
| 53 | + return false; |
---|
| 54 | +} |
---|
| 55 | +static inline void mm_iommu_init(struct mm_struct *mm) { } |
---|
44 | 56 | #endif |
---|
45 | 57 | extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm); |
---|
46 | 58 | extern void set_context(unsigned long id, pgd_t *pgd); |
---|
.. | .. |
---|
82 | 94 | { |
---|
83 | 95 | int context_id; |
---|
84 | 96 | |
---|
85 | | - context_id = get_ea_context(&mm->context, ea); |
---|
| 97 | + context_id = get_user_context(&mm->context, ea); |
---|
86 | 98 | if (!context_id) |
---|
87 | 99 | return true; |
---|
88 | 100 | return false; |
---|
.. | .. |
---|
173 | 185 | dec_mm_active_cpus(mm); |
---|
174 | 186 | } |
---|
175 | 187 | } |
---|
| 188 | + |
---|
| 189 | +/* |
---|
| 190 | + * vas_windows counter shows number of open windows in the mm |
---|
| 191 | + * context. During context switch, use this counter to clear the |
---|
| 192 | + * foreign real address mapping (CP_ABORT) for the thread / process |
---|
| 193 | + * that intend to use COPY/PASTE. When a process closes all windows, |
---|
| 194 | + * disable CP_ABORT which is expensive to run. |
---|
| 195 | + * |
---|
| 196 | + * For user context, register a copro so that TLBIs are seen by the |
---|
| 197 | + * nest MMU. mm_context_add/remove_vas_window() are used only for user |
---|
| 198 | + * space windows. |
---|
| 199 | + */ |
---|
| 200 | +static inline void mm_context_add_vas_window(struct mm_struct *mm) |
---|
| 201 | +{ |
---|
| 202 | + atomic_inc(&mm->context.vas_windows); |
---|
| 203 | + mm_context_add_copro(mm); |
---|
| 204 | +} |
---|
| 205 | + |
---|
| 206 | +static inline void mm_context_remove_vas_window(struct mm_struct *mm) |
---|
| 207 | +{ |
---|
| 208 | + int v; |
---|
| 209 | + |
---|
| 210 | + mm_context_remove_copro(mm); |
---|
| 211 | + v = atomic_dec_if_positive(&mm->context.vas_windows); |
---|
| 212 | + |
---|
| 213 | + /* Detect imbalance between add and remove */ |
---|
| 214 | + WARN_ON(v < 0); |
---|
| 215 | +} |
---|
176 | 216 | #else |
---|
177 | 217 | static inline void inc_mm_active_cpus(struct mm_struct *mm) { } |
---|
178 | 218 | static inline void dec_mm_active_cpus(struct mm_struct *mm) { } |
---|
.. | .. |
---|
217 | 257 | #endif |
---|
218 | 258 | } |
---|
219 | 259 | |
---|
220 | | -#ifndef CONFIG_PPC_BOOK3S_64 |
---|
221 | | -static inline void arch_exit_mmap(struct mm_struct *mm) |
---|
222 | | -{ |
---|
223 | | -} |
---|
224 | | -#else |
---|
225 | 260 | extern void arch_exit_mmap(struct mm_struct *mm); |
---|
226 | | -#endif |
---|
227 | 261 | |
---|
228 | 262 | static inline void arch_unmap(struct mm_struct *mm, |
---|
229 | | - struct vm_area_struct *vma, |
---|
230 | 263 | unsigned long start, unsigned long end) |
---|
231 | 264 | { |
---|
232 | 265 | if (start <= mm->context.vdso_base && mm->context.vdso_base < end) |
---|
233 | 266 | mm->context.vdso_base = 0; |
---|
234 | | -} |
---|
235 | | - |
---|
236 | | -static inline void arch_bprm_mm_init(struct mm_struct *mm, |
---|
237 | | - struct vm_area_struct *vma) |
---|
238 | | -{ |
---|
239 | 267 | } |
---|
240 | 268 | |
---|
241 | 269 | #ifdef CONFIG_PPC_MEM_KEYS |
---|