hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/arch/mips/kernel/smp.c
....@@ -1,17 +1,5 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /*
2
- * This program is free software; you can redistribute it and/or
3
- * modify it under the terms of the GNU General Public License
4
- * as published by the Free Software Foundation; either version 2
5
- * of the License, or (at your option) any later version.
6
- *
7
- * This program is distributed in the hope that it will be useful,
8
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
9
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
- * GNU General Public License for more details.
11
- *
12
- * You should have received a copy of the GNU General Public License
13
- * along with this program; if not, write to the Free Software
14
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
153 *
164 * Copyright (C) 2000, 2001 Kanoj Sarcar
175 * Copyright (C) 2000, 2001 Ralf Baechle
....@@ -39,6 +27,7 @@
3927
4028 #include <linux/atomic.h>
4129 #include <asm/cpu.h>
30
+#include <asm/ginvt.h>
4231 #include <asm/processor.h>
4332 #include <asm/idle.h>
4433 #include <asm/r4k-timer.h>
....@@ -218,25 +207,13 @@
218207 return IRQ_HANDLED;
219208 }
220209
221
-static struct irqaction irq_resched = {
222
- .handler = ipi_resched_interrupt,
223
- .flags = IRQF_PERCPU,
224
- .name = "IPI resched"
225
-};
226
-
227
-static struct irqaction irq_call = {
228
- .handler = ipi_call_interrupt,
229
- .flags = IRQF_PERCPU,
230
- .name = "IPI call"
231
-};
232
-
233
-static void smp_ipi_init_one(unsigned int virq,
234
- struct irqaction *action)
210
+static void smp_ipi_init_one(unsigned int virq, const char *name,
211
+ irq_handler_t handler)
235212 {
236213 int ret;
237214
238215 irq_set_handler(virq, handle_percpu_irq);
239
- ret = setup_irq(virq, action);
216
+ ret = request_irq(virq, handler, IRQF_PERCPU, name, NULL);
240217 BUG_ON(ret);
241218 }
242219
....@@ -289,12 +266,15 @@
289266 int cpu;
290267
291268 for_each_cpu(cpu, mask) {
292
- smp_ipi_init_one(call_virq + cpu, &irq_call);
293
- smp_ipi_init_one(sched_virq + cpu, &irq_resched);
269
+ smp_ipi_init_one(call_virq + cpu, "IPI call",
270
+ ipi_call_interrupt);
271
+ smp_ipi_init_one(sched_virq + cpu, "IPI resched",
272
+ ipi_resched_interrupt);
294273 }
295274 } else {
296
- smp_ipi_init_one(call_virq, &irq_call);
297
- smp_ipi_init_one(sched_virq, &irq_resched);
275
+ smp_ipi_init_one(call_virq, "IPI call", ipi_call_interrupt);
276
+ smp_ipi_init_one(sched_virq, "IPI resched",
277
+ ipi_resched_interrupt);
298278 }
299279
300280 return 0;
....@@ -322,8 +302,8 @@
322302 int cpu;
323303
324304 for_each_cpu(cpu, mask) {
325
- remove_irq(call_virq + cpu, &irq_call);
326
- remove_irq(sched_virq + cpu, &irq_resched);
305
+ free_irq(call_virq + cpu, NULL);
306
+ free_irq(sched_virq + cpu, NULL);
327307 }
328308 }
329309 irq_destroy_ipi(call_virq, mask);
....@@ -368,9 +348,11 @@
368348 */
369349
370350 calibrate_delay();
371
- preempt_disable();
372351 cpu = smp_processor_id();
373352 cpu_data[cpu].udelay_val = loops_per_jiffy;
353
+
354
+ set_cpu_sibling_map(cpu);
355
+ set_cpu_core_map(cpu);
374356
375357 cpumask_set_cpu(cpu, &cpu_coherent_mask);
376358 notify_cpu_starting(cpu);
....@@ -382,9 +364,6 @@
382364
383365 /* The CPU is running and counters synchronised, now mark it online */
384366 set_cpu_online(cpu, true);
385
-
386
- set_cpu_sibling_map(cpu);
387
- set_cpu_core_map(cpu);
388367
389368 calculate_cpu_foreign_map();
390369
....@@ -443,6 +422,8 @@
443422 /* preload SMP state for boot cpu */
444423 void smp_prepare_boot_cpu(void)
445424 {
425
+ if (mp_ops->prepare_boot_cpu)
426
+ mp_ops->prepare_boot_cpu();
446427 set_cpu_possible(0, true);
447428 set_cpu_online(0, true);
448429 }
....@@ -482,12 +463,21 @@
482463
483464 void flush_tlb_all(void)
484465 {
466
+ if (cpu_has_mmid) {
467
+ htw_stop();
468
+ ginvt_full();
469
+ sync_ginv();
470
+ instruction_hazard();
471
+ htw_start();
472
+ return;
473
+ }
474
+
485475 on_each_cpu(flush_tlb_all_ipi, NULL, 1);
486476 }
487477
488478 static void flush_tlb_mm_ipi(void *mm)
489479 {
490
- local_flush_tlb_mm((struct mm_struct *)mm);
480
+ drop_mmu_context((struct mm_struct *)mm);
491481 }
492482
493483 /*
....@@ -530,17 +520,22 @@
530520 {
531521 preempt_disable();
532522
533
- if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
523
+ if (cpu_has_mmid) {
524
+ /*
525
+ * No need to worry about other CPUs - the ginvt in
526
+ * drop_mmu_context() will be globalized.
527
+ */
528
+ } else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
534529 smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
535530 } else {
536531 unsigned int cpu;
537532
538533 for_each_online_cpu(cpu) {
539534 if (cpu != smp_processor_id() && cpu_context(cpu, mm))
540
- cpu_context(cpu, mm) = 0;
535
+ set_cpu_context(cpu, mm, 0);
541536 }
542537 }
543
- local_flush_tlb_mm(mm);
538
+ drop_mmu_context(mm);
544539
545540 preempt_enable();
546541 }
....@@ -561,9 +556,26 @@
561556 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
562557 {
563558 struct mm_struct *mm = vma->vm_mm;
559
+ unsigned long addr;
560
+ u32 old_mmid;
564561
565562 preempt_disable();
566
- if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
563
+ if (cpu_has_mmid) {
564
+ htw_stop();
565
+ old_mmid = read_c0_memorymapid();
566
+ write_c0_memorymapid(cpu_asid(0, mm));
567
+ mtc0_tlbw_hazard();
568
+ addr = round_down(start, PAGE_SIZE * 2);
569
+ end = round_up(end, PAGE_SIZE * 2);
570
+ do {
571
+ ginvt_va_mmid(addr);
572
+ sync_ginv();
573
+ addr += PAGE_SIZE * 2;
574
+ } while (addr < end);
575
+ write_c0_memorymapid(old_mmid);
576
+ instruction_hazard();
577
+ htw_start();
578
+ } else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
567579 struct flush_tlb_data fd = {
568580 .vma = vma,
569581 .addr1 = start,
....@@ -571,6 +583,7 @@
571583 };
572584
573585 smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
586
+ local_flush_tlb_range(vma, start, end);
574587 } else {
575588 unsigned int cpu;
576589 int exec = vma->vm_flags & VM_EXEC;
....@@ -583,10 +596,10 @@
583596 * mm has been completely unused by that CPU.
584597 */
585598 if (cpu != smp_processor_id() && cpu_context(cpu, mm))
586
- cpu_context(cpu, mm) = !exec;
599
+ set_cpu_context(cpu, mm, !exec);
587600 }
601
+ local_flush_tlb_range(vma, start, end);
588602 }
589
- local_flush_tlb_range(vma, start, end);
590603 preempt_enable();
591604 }
592605
....@@ -616,14 +629,28 @@
616629
617630 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
618631 {
632
+ u32 old_mmid;
633
+
619634 preempt_disable();
620
- if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
635
+ if (cpu_has_mmid) {
636
+ htw_stop();
637
+ old_mmid = read_c0_memorymapid();
638
+ write_c0_memorymapid(cpu_asid(0, vma->vm_mm));
639
+ mtc0_tlbw_hazard();
640
+ ginvt_va_mmid(page);
641
+ sync_ginv();
642
+ write_c0_memorymapid(old_mmid);
643
+ instruction_hazard();
644
+ htw_start();
645
+ } else if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
646
+ (current->mm != vma->vm_mm)) {
621647 struct flush_tlb_data fd = {
622648 .vma = vma,
623649 .addr1 = page,
624650 };
625651
626652 smp_on_other_tlbs(flush_tlb_page_ipi, &fd);
653
+ local_flush_tlb_page(vma, page);
627654 } else {
628655 unsigned int cpu;
629656
....@@ -635,10 +662,10 @@
635662 * by that CPU.
636663 */
637664 if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
638
- cpu_context(cpu, vma->vm_mm) = 1;
665
+ set_cpu_context(cpu, vma->vm_mm, 1);
639666 }
667
+ local_flush_tlb_page(vma, page);
640668 }
641
- local_flush_tlb_page(vma, page);
642669 preempt_enable();
643670 }
644671
....@@ -659,29 +686,22 @@
659686
660687 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
661688
662
-static DEFINE_PER_CPU(atomic_t, tick_broadcast_count);
663689 static DEFINE_PER_CPU(call_single_data_t, tick_broadcast_csd);
664690
665691 void tick_broadcast(const struct cpumask *mask)
666692 {
667
- atomic_t *count;
668693 call_single_data_t *csd;
669694 int cpu;
670695
671696 for_each_cpu(cpu, mask) {
672
- count = &per_cpu(tick_broadcast_count, cpu);
673697 csd = &per_cpu(tick_broadcast_csd, cpu);
674
-
675
- if (atomic_inc_return(count) == 1)
676
- smp_call_function_single_async(cpu, csd);
698
+ smp_call_function_single_async(cpu, csd);
677699 }
678700 }
679701
680702 static void tick_broadcast_callee(void *info)
681703 {
682
- int cpu = smp_processor_id();
683704 tick_receive_broadcast();
684
- atomic_set(&per_cpu(tick_broadcast_count, cpu), 0);
685705 }
686706
687707 static int __init tick_broadcast_init(void)