.. | .. |
---|
17 | 17 | #include <linux/smp.h> |
---|
18 | 18 | #include <linux/slab.h> |
---|
19 | 19 | |
---|
| 20 | +#include <asm/barrier.h> |
---|
20 | 21 | #include <asm/cacheflush.h> |
---|
21 | 22 | #include <asm/dsemul.h> |
---|
| 23 | +#include <asm/ginvt.h> |
---|
22 | 24 | #include <asm/hazards.h> |
---|
23 | 25 | #include <asm/tlbflush.h> |
---|
24 | 26 | #include <asm-generic/mm_hooks.h> |
---|
.. | .. |
---|
73 | 75 | #endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/ |
---|
74 | 76 | |
---|
75 | 77 | /* |
---|
| 78 | + * The ginvt instruction will invalidate wired entries when its type field |
---|
| 79 | + * targets anything other than the entire TLB. That means that if we were to |
---|
| 80 | + * allow the kernel to create wired entries with the MMID of current->active_mm |
---|
| 81 | + * then those wired entries could be invalidated when we later use ginvt to |
---|
| 82 | + * invalidate TLB entries with that MMID. |
---|
| 83 | + * |
---|
| 84 | + * In order to prevent ginvt from trashing wired entries, we reserve one MMID |
---|
| 85 | + * for use by the kernel when creating wired entries. This MMID will never be |
---|
| 86 | + * assigned to a struct mm, and we'll never target it with a ginvt instruction. |
---|
| 87 | + */ |
---|
| 88 | +#define MMID_KERNEL_WIRED 0 |
---|
| 89 | + |
---|
| 90 | +/* |
---|
76 | 91 | * All unused by hardware upper bits will be considered |
---|
77 | 92 | * as a software asid extension. |
---|
78 | 93 | */ |
---|
.. | .. |
---|
88 | 103 | return ~asid_version_mask(cpu) + 1; |
---|
89 | 104 | } |
---|
90 | 105 | |
---|
91 | | -#define cpu_context(cpu, mm) ((mm)->context.asid[cpu]) |
---|
| 106 | +static inline u64 cpu_context(unsigned int cpu, const struct mm_struct *mm) |
---|
| 107 | +{ |
---|
| 108 | + if (cpu_has_mmid) |
---|
| 109 | + return atomic64_read(&mm->context.mmid); |
---|
| 110 | + |
---|
| 111 | + return mm->context.asid[cpu]; |
---|
| 112 | +} |
---|
| 113 | + |
---|
| 114 | +static inline void set_cpu_context(unsigned int cpu, |
---|
| 115 | + struct mm_struct *mm, u64 ctx) |
---|
| 116 | +{ |
---|
| 117 | + if (cpu_has_mmid) |
---|
| 118 | + atomic64_set(&mm->context.mmid, ctx); |
---|
| 119 | + else |
---|
| 120 | + mm->context.asid[cpu] = ctx; |
---|
| 121 | +} |
---|
| 122 | + |
---|
92 | 123 | #define asid_cache(cpu) (cpu_data[cpu].asid_cache) |
---|
93 | 124 | #define cpu_asid(cpu, mm) \ |
---|
94 | 125 | (cpu_context((cpu), (mm)) & cpu_asid_mask(&cpu_data[cpu])) |
---|
.. | .. |
---|
97 | 128 | { |
---|
98 | 129 | } |
---|
99 | 130 | |
---|
100 | | - |
---|
101 | | -/* Normal, classic MIPS get_new_mmu_context */ |
---|
102 | | -static inline void |
---|
103 | | -get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) |
---|
104 | | -{ |
---|
105 | | - u64 asid = asid_cache(cpu); |
---|
106 | | - |
---|
107 | | - if (!((asid += cpu_asid_inc()) & cpu_asid_mask(&cpu_data[cpu]))) { |
---|
108 | | - if (cpu_has_vtag_icache) |
---|
109 | | - flush_icache_all(); |
---|
110 | | - local_flush_tlb_all(); /* start new asid cycle */ |
---|
111 | | - } |
---|
112 | | - |
---|
113 | | - cpu_context(cpu, mm) = asid_cache(cpu) = asid; |
---|
114 | | -} |
---|
| 131 | +extern void get_new_mmu_context(struct mm_struct *mm); |
---|
| 132 | +extern void check_mmu_context(struct mm_struct *mm); |
---|
| 133 | +extern void check_switch_mmu_context(struct mm_struct *mm); |
---|
115 | 134 | |
---|
116 | 135 | /* |
---|
117 | 136 | * Initialize the context related info for a new mm_struct |
---|
.. | .. |
---|
122 | 141 | { |
---|
123 | 142 | int i; |
---|
124 | 143 | |
---|
125 | | - for_each_possible_cpu(i) |
---|
126 | | - cpu_context(i, mm) = 0; |
---|
| 144 | + if (cpu_has_mmid) { |
---|
| 145 | + set_cpu_context(0, mm, 0); |
---|
| 146 | + } else { |
---|
| 147 | + for_each_possible_cpu(i) |
---|
| 148 | + set_cpu_context(i, mm, 0); |
---|
| 149 | + } |
---|
127 | 150 | |
---|
128 | 151 | mm->context.bd_emupage_allocmap = NULL; |
---|
129 | 152 | spin_lock_init(&mm->context.bd_emupage_lock); |
---|
.. | .. |
---|
140 | 163 | local_irq_save(flags); |
---|
141 | 164 | |
---|
142 | 165 | htw_stop(); |
---|
143 | | - /* Check if our ASID is of an older version and thus invalid */ |
---|
144 | | - if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & asid_version_mask(cpu)) |
---|
145 | | - get_new_mmu_context(next, cpu); |
---|
146 | | - write_c0_entryhi(cpu_asid(cpu, next)); |
---|
147 | | - TLBMISS_HANDLER_SETUP_PGD(next->pgd); |
---|
| 166 | + check_switch_mmu_context(next); |
---|
148 | 167 | |
---|
149 | 168 | /* |
---|
150 | 169 | * Mark current->active_mm as not "active" anymore. |
---|
.. | .. |
---|
166 | 185 | dsemul_mm_cleanup(mm); |
---|
167 | 186 | } |
---|
168 | 187 | |
---|
| 188 | +#define activate_mm(prev, next) switch_mm(prev, next, current) |
---|
169 | 189 | #define deactivate_mm(tsk, mm) do { } while (0) |
---|
170 | 190 | |
---|
171 | | -/* |
---|
172 | | - * After we have set current->mm to a new value, this activates |
---|
173 | | - * the context for the new mm so we see the new mappings. |
---|
174 | | - */ |
---|
175 | 191 | static inline void |
---|
176 | | -activate_mm(struct mm_struct *prev, struct mm_struct *next) |
---|
| 192 | +drop_mmu_context(struct mm_struct *mm) |
---|
177 | 193 | { |
---|
178 | 194 | unsigned long flags; |
---|
179 | | - unsigned int cpu = smp_processor_id(); |
---|
| 195 | + unsigned int cpu; |
---|
| 196 | + u32 old_mmid; |
---|
| 197 | + u64 ctx; |
---|
180 | 198 | |
---|
181 | 199 | local_irq_save(flags); |
---|
182 | 200 | |
---|
183 | | - htw_stop(); |
---|
184 | | - /* Unconditionally get a new ASID. */ |
---|
185 | | - get_new_mmu_context(next, cpu); |
---|
| 201 | + cpu = smp_processor_id(); |
---|
| 202 | + ctx = cpu_context(cpu, mm); |
---|
186 | 203 | |
---|
187 | | - write_c0_entryhi(cpu_asid(cpu, next)); |
---|
188 | | - TLBMISS_HANDLER_SETUP_PGD(next->pgd); |
---|
189 | | - |
---|
190 | | - /* mark mmu ownership change */ |
---|
191 | | - cpumask_clear_cpu(cpu, mm_cpumask(prev)); |
---|
192 | | - cpumask_set_cpu(cpu, mm_cpumask(next)); |
---|
193 | | - htw_start(); |
---|
194 | | - |
---|
195 | | - local_irq_restore(flags); |
---|
196 | | -} |
---|
197 | | - |
---|
198 | | -/* |
---|
199 | | - * If mm is currently active_mm, we can't really drop it. Instead, |
---|
200 | | - * we will get a new one for it. |
---|
201 | | - */ |
---|
202 | | -static inline void |
---|
203 | | -drop_mmu_context(struct mm_struct *mm, unsigned cpu) |
---|
204 | | -{ |
---|
205 | | - unsigned long flags; |
---|
206 | | - |
---|
207 | | - local_irq_save(flags); |
---|
208 | | - htw_stop(); |
---|
209 | | - |
---|
210 | | - if (cpumask_test_cpu(cpu, mm_cpumask(mm))) { |
---|
211 | | - get_new_mmu_context(mm, cpu); |
---|
| 204 | + if (!ctx) { |
---|
| 205 | + /* no-op */ |
---|
| 206 | + } else if (cpu_has_mmid) { |
---|
| 207 | + /* |
---|
| 208 | + * Globally invalidating TLB entries associated with the MMID |
---|
| 209 | + * is pretty cheap using the GINVT instruction, so we'll do |
---|
| 210 | + * that rather than incur the overhead of allocating a new |
---|
| 211 | + * MMID. The latter would be especially difficult since MMIDs |
---|
| 212 | + * are global & other CPUs may be actively using ctx. |
---|
| 213 | + */ |
---|
| 214 | + htw_stop(); |
---|
| 215 | + old_mmid = read_c0_memorymapid(); |
---|
| 216 | + write_c0_memorymapid(ctx & cpu_asid_mask(&cpu_data[cpu])); |
---|
| 217 | + mtc0_tlbw_hazard(); |
---|
| 218 | + ginvt_mmid(); |
---|
| 219 | + sync_ginv(); |
---|
| 220 | + write_c0_memorymapid(old_mmid); |
---|
| 221 | + instruction_hazard(); |
---|
| 222 | + htw_start(); |
---|
| 223 | + } else if (cpumask_test_cpu(cpu, mm_cpumask(mm))) { |
---|
| 224 | + /* |
---|
| 225 | + * mm is currently active, so we can't really drop it. |
---|
| 226 | + * Instead we bump the ASID. |
---|
| 227 | + */ |
---|
| 228 | + htw_stop(); |
---|
| 229 | + get_new_mmu_context(mm); |
---|
212 | 230 | write_c0_entryhi(cpu_asid(cpu, mm)); |
---|
| 231 | + htw_start(); |
---|
213 | 232 | } else { |
---|
214 | 233 | /* will get a new context next time */ |
---|
215 | | - cpu_context(cpu, mm) = 0; |
---|
| 234 | + set_cpu_context(cpu, mm, 0); |
---|
216 | 235 | } |
---|
217 | | - htw_start(); |
---|
| 236 | + |
---|
218 | 237 | local_irq_restore(flags); |
---|
219 | 238 | } |
---|
220 | 239 | |
---|