forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-10 9999e48639b3cecb08ffb37358bcba3b48161b29
kernel/arch/mips/include/asm/mmu_context.h
....@@ -17,8 +17,10 @@
1717 #include <linux/smp.h>
1818 #include <linux/slab.h>
1919
20
+#include <asm/barrier.h>
2021 #include <asm/cacheflush.h>
2122 #include <asm/dsemul.h>
23
+#include <asm/ginvt.h>
2224 #include <asm/hazards.h>
2325 #include <asm/tlbflush.h>
2426 #include <asm-generic/mm_hooks.h>
....@@ -73,6 +75,19 @@
7375 #endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/
7476
7577 /*
78
+ * The ginvt instruction will invalidate wired entries when its type field
79
+ * targets anything other than the entire TLB. That means that if we were to
80
+ * allow the kernel to create wired entries with the MMID of current->active_mm
81
+ * then those wired entries could be invalidated when we later use ginvt to
82
+ * invalidate TLB entries with that MMID.
83
+ *
84
+ * In order to prevent ginvt from trashing wired entries, we reserve one MMID
85
+ * for use by the kernel when creating wired entries. This MMID will never be
86
+ * assigned to a struct mm, and we'll never target it with a ginvt instruction.
87
+ */
88
+#define MMID_KERNEL_WIRED 0
89
+
90
+/*
7691 * All unused by hardware upper bits will be considered
7792 * as a software asid extension.
7893 */
....@@ -88,7 +103,23 @@
88103 return ~asid_version_mask(cpu) + 1;
89104 }
90105
91
-#define cpu_context(cpu, mm) ((mm)->context.asid[cpu])
106
+static inline u64 cpu_context(unsigned int cpu, const struct mm_struct *mm)
107
+{
108
+ if (cpu_has_mmid)
109
+ return atomic64_read(&mm->context.mmid);
110
+
111
+ return mm->context.asid[cpu];
112
+}
113
+
114
+static inline void set_cpu_context(unsigned int cpu,
115
+ struct mm_struct *mm, u64 ctx)
116
+{
117
+ if (cpu_has_mmid)
118
+ atomic64_set(&mm->context.mmid, ctx);
119
+ else
120
+ mm->context.asid[cpu] = ctx;
121
+}
122
+
92123 #define asid_cache(cpu) (cpu_data[cpu].asid_cache)
93124 #define cpu_asid(cpu, mm) \
94125 (cpu_context((cpu), (mm)) & cpu_asid_mask(&cpu_data[cpu]))
....@@ -97,21 +128,9 @@
97128 {
98129 }
99130
100
-
101
-/* Normal, classic MIPS get_new_mmu_context */
102
-static inline void
103
-get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
104
-{
105
- u64 asid = asid_cache(cpu);
106
-
107
- if (!((asid += cpu_asid_inc()) & cpu_asid_mask(&cpu_data[cpu]))) {
108
- if (cpu_has_vtag_icache)
109
- flush_icache_all();
110
- local_flush_tlb_all(); /* start new asid cycle */
111
- }
112
-
113
- cpu_context(cpu, mm) = asid_cache(cpu) = asid;
114
-}
131
+extern void get_new_mmu_context(struct mm_struct *mm);
132
+extern void check_mmu_context(struct mm_struct *mm);
133
+extern void check_switch_mmu_context(struct mm_struct *mm);
115134
116135 /*
117136 * Initialize the context related info for a new mm_struct
....@@ -122,8 +141,12 @@
122141 {
123142 int i;
124143
125
- for_each_possible_cpu(i)
126
- cpu_context(i, mm) = 0;
144
+ if (cpu_has_mmid) {
145
+ set_cpu_context(0, mm, 0);
146
+ } else {
147
+ for_each_possible_cpu(i)
148
+ set_cpu_context(i, mm, 0);
149
+ }
127150
128151 mm->context.bd_emupage_allocmap = NULL;
129152 spin_lock_init(&mm->context.bd_emupage_lock);
....@@ -140,11 +163,7 @@
140163 local_irq_save(flags);
141164
142165 htw_stop();
143
- /* Check if our ASID is of an older version and thus invalid */
144
- if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & asid_version_mask(cpu))
145
- get_new_mmu_context(next, cpu);
146
- write_c0_entryhi(cpu_asid(cpu, next));
147
- TLBMISS_HANDLER_SETUP_PGD(next->pgd);
166
+ check_switch_mmu_context(next);
148167
149168 /*
150169 * Mark current->active_mm as not "active" anymore.
....@@ -166,55 +185,55 @@
166185 dsemul_mm_cleanup(mm);
167186 }
168187
188
+#define activate_mm(prev, next) switch_mm(prev, next, current)
169189 #define deactivate_mm(tsk, mm) do { } while (0)
170190
171
-/*
172
- * After we have set current->mm to a new value, this activates
173
- * the context for the new mm so we see the new mappings.
174
- */
175191 static inline void
176
-activate_mm(struct mm_struct *prev, struct mm_struct *next)
192
+drop_mmu_context(struct mm_struct *mm)
177193 {
178194 unsigned long flags;
179
- unsigned int cpu = smp_processor_id();
195
+ unsigned int cpu;
196
+ u32 old_mmid;
197
+ u64 ctx;
180198
181199 local_irq_save(flags);
182200
183
- htw_stop();
184
- /* Unconditionally get a new ASID. */
185
- get_new_mmu_context(next, cpu);
201
+ cpu = smp_processor_id();
202
+ ctx = cpu_context(cpu, mm);
186203
187
- write_c0_entryhi(cpu_asid(cpu, next));
188
- TLBMISS_HANDLER_SETUP_PGD(next->pgd);
189
-
190
- /* mark mmu ownership change */
191
- cpumask_clear_cpu(cpu, mm_cpumask(prev));
192
- cpumask_set_cpu(cpu, mm_cpumask(next));
193
- htw_start();
194
-
195
- local_irq_restore(flags);
196
-}
197
-
198
-/*
199
- * If mm is currently active_mm, we can't really drop it. Instead,
200
- * we will get a new one for it.
201
- */
202
-static inline void
203
-drop_mmu_context(struct mm_struct *mm, unsigned cpu)
204
-{
205
- unsigned long flags;
206
-
207
- local_irq_save(flags);
208
- htw_stop();
209
-
210
- if (cpumask_test_cpu(cpu, mm_cpumask(mm))) {
211
- get_new_mmu_context(mm, cpu);
204
+ if (!ctx) {
205
+ /* no-op */
206
+ } else if (cpu_has_mmid) {
207
+ /*
208
+ * Globally invalidating TLB entries associated with the MMID
209
+ * is pretty cheap using the GINVT instruction, so we'll do
210
+ * that rather than incur the overhead of allocating a new
211
+ * MMID. The latter would be especially difficult since MMIDs
212
+ * are global & other CPUs may be actively using ctx.
213
+ */
214
+ htw_stop();
215
+ old_mmid = read_c0_memorymapid();
216
+ write_c0_memorymapid(ctx & cpu_asid_mask(&cpu_data[cpu]));
217
+ mtc0_tlbw_hazard();
218
+ ginvt_mmid();
219
+ sync_ginv();
220
+ write_c0_memorymapid(old_mmid);
221
+ instruction_hazard();
222
+ htw_start();
223
+ } else if (cpumask_test_cpu(cpu, mm_cpumask(mm))) {
224
+ /*
225
+ * mm is currently active, so we can't really drop it.
226
+ * Instead we bump the ASID.
227
+ */
228
+ htw_stop();
229
+ get_new_mmu_context(mm);
212230 write_c0_entryhi(cpu_asid(cpu, mm));
231
+ htw_start();
213232 } else {
214233 /* will get a new context next time */
215
- cpu_context(cpu, mm) = 0;
234
+ set_cpu_context(cpu, mm, 0);
216235 }
217
- htw_start();
236
+
218237 local_irq_restore(flags);
219238 }
220239