hc
2024-10-12 a5969cabbb4660eab42b6ef0412cbbd1200cf14d
kernel/arch/openrisc/kernel/smp.c
....@@ -14,6 +14,7 @@
1414 #include <linux/smp.h>
1515 #include <linux/cpu.h>
1616 #include <linux/sched.h>
17
+#include <linux/sched/mm.h>
1718 #include <linux/irq.h>
1819 #include <asm/cpuinfo.h>
1920 #include <asm/mmu_context.h>
....@@ -113,7 +114,7 @@
113114 * All kernel threads share the same mm context; grab a
114115 * reference and switch to it.
115116 */
116
- atomic_inc(&mm->mm_count);
117
+ mmgrab(mm);
117118 current->active_mm = mm;
118119 cpumask_set_cpu(cpu, mm_cpumask(mm));
119120
....@@ -133,8 +134,6 @@
133134 set_cpu_online(cpu, true);
134135
135136 local_irq_enable();
136
-
137
- preempt_disable();
138137 /*
139138 * OK, it's off to the idle thread for us
140139 */
....@@ -218,30 +217,101 @@
218217 local_flush_tlb_all();
219218 }
220219
220
+static inline void ipi_flush_tlb_mm(void *info)
221
+{
222
+ struct mm_struct *mm = (struct mm_struct *)info;
223
+
224
+ local_flush_tlb_mm(mm);
225
+}
226
+
227
+static void smp_flush_tlb_mm(struct cpumask *cmask, struct mm_struct *mm)
228
+{
229
+ unsigned int cpuid;
230
+
231
+ if (cpumask_empty(cmask))
232
+ return;
233
+
234
+ cpuid = get_cpu();
235
+
236
+ if (cpumask_any_but(cmask, cpuid) >= nr_cpu_ids) {
237
+ /* local cpu is the only cpu present in cpumask */
238
+ local_flush_tlb_mm(mm);
239
+ } else {
240
+ on_each_cpu_mask(cmask, ipi_flush_tlb_mm, mm, 1);
241
+ }
242
+ put_cpu();
243
+}
244
+
245
+struct flush_tlb_data {
246
+ unsigned long addr1;
247
+ unsigned long addr2;
248
+};
249
+
250
+static inline void ipi_flush_tlb_page(void *info)
251
+{
252
+ struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
253
+
254
+ local_flush_tlb_page(NULL, fd->addr1);
255
+}
256
+
257
+static inline void ipi_flush_tlb_range(void *info)
258
+{
259
+ struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
260
+
261
+ local_flush_tlb_range(NULL, fd->addr1, fd->addr2);
262
+}
263
+
264
+static void smp_flush_tlb_range(const struct cpumask *cmask, unsigned long start,
265
+ unsigned long end)
266
+{
267
+ unsigned int cpuid;
268
+
269
+ if (cpumask_empty(cmask))
270
+ return;
271
+
272
+ cpuid = get_cpu();
273
+
274
+ if (cpumask_any_but(cmask, cpuid) >= nr_cpu_ids) {
275
+ /* local cpu is the only cpu present in cpumask */
276
+ if ((end - start) <= PAGE_SIZE)
277
+ local_flush_tlb_page(NULL, start);
278
+ else
279
+ local_flush_tlb_range(NULL, start, end);
280
+ } else {
281
+ struct flush_tlb_data fd;
282
+
283
+ fd.addr1 = start;
284
+ fd.addr2 = end;
285
+
286
+ if ((end - start) <= PAGE_SIZE)
287
+ on_each_cpu_mask(cmask, ipi_flush_tlb_page, &fd, 1);
288
+ else
289
+ on_each_cpu_mask(cmask, ipi_flush_tlb_range, &fd, 1);
290
+ }
291
+ put_cpu();
292
+}
293
+
221294 void flush_tlb_all(void)
222295 {
223296 on_each_cpu(ipi_flush_tlb_all, NULL, 1);
224297 }
225298
226
-/*
227
- * FIXME: implement proper functionality instead of flush_tlb_all.
228
- * *But*, as things currently stands, the local_tlb_flush_* functions will
229
- * all boil down to local_tlb_flush_all anyway.
230
- */
231299 void flush_tlb_mm(struct mm_struct *mm)
232300 {
233
- on_each_cpu(ipi_flush_tlb_all, NULL, 1);
301
+ smp_flush_tlb_mm(mm_cpumask(mm), mm);
234302 }
235303
236304 void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
237305 {
238
- on_each_cpu(ipi_flush_tlb_all, NULL, 1);
306
+ smp_flush_tlb_range(mm_cpumask(vma->vm_mm), uaddr, uaddr + PAGE_SIZE);
239307 }
240308
241309 void flush_tlb_range(struct vm_area_struct *vma,
242310 unsigned long start, unsigned long end)
243311 {
244
- on_each_cpu(ipi_flush_tlb_all, NULL, 1);
312
+ const struct cpumask *cmask = vma ? mm_cpumask(vma->vm_mm)
313
+ : cpu_online_mask;
314
+ smp_flush_tlb_range(cmask, start, end);
245315 }
246316
247317 /* Instruction cache invalidate - performed on each cpu */