.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-or-later |
---|
1 | 2 | /* |
---|
2 | | - * This program is free software; you can redistribute it and/or |
---|
3 | | - * modify it under the terms of the GNU General Public License |
---|
4 | | - * as published by the Free Software Foundation; either version 2 |
---|
5 | | - * of the License, or (at your option) any later version. |
---|
6 | | - * |
---|
7 | | - * This program is distributed in the hope that it will be useful, |
---|
8 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
9 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
---|
10 | | - * GNU General Public License for more details. |
---|
11 | | - * |
---|
12 | | - * You should have received a copy of the GNU General Public License |
---|
13 | | - * along with this program; if not, write to the Free Software |
---|
14 | | - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
---|
15 | 3 | * |
---|
16 | 4 | * Copyright (C) 2000, 2001 Kanoj Sarcar |
---|
17 | 5 | * Copyright (C) 2000, 2001 Ralf Baechle |
---|
.. | .. |
---|
39 | 27 | |
---|
40 | 28 | #include <linux/atomic.h> |
---|
41 | 29 | #include <asm/cpu.h> |
---|
| 30 | +#include <asm/ginvt.h> |
---|
42 | 31 | #include <asm/processor.h> |
---|
43 | 32 | #include <asm/idle.h> |
---|
44 | 33 | #include <asm/r4k-timer.h> |
---|
.. | .. |
---|
218 | 207 | return IRQ_HANDLED; |
---|
219 | 208 | } |
---|
220 | 209 | |
---|
221 | | -static struct irqaction irq_resched = { |
---|
222 | | - .handler = ipi_resched_interrupt, |
---|
223 | | - .flags = IRQF_PERCPU, |
---|
224 | | - .name = "IPI resched" |
---|
225 | | -}; |
---|
226 | | - |
---|
227 | | -static struct irqaction irq_call = { |
---|
228 | | - .handler = ipi_call_interrupt, |
---|
229 | | - .flags = IRQF_PERCPU, |
---|
230 | | - .name = "IPI call" |
---|
231 | | -}; |
---|
232 | | - |
---|
233 | | -static void smp_ipi_init_one(unsigned int virq, |
---|
234 | | - struct irqaction *action) |
---|
| 210 | +static void smp_ipi_init_one(unsigned int virq, const char *name, |
---|
| 211 | + irq_handler_t handler) |
---|
235 | 212 | { |
---|
236 | 213 | int ret; |
---|
237 | 214 | |
---|
238 | 215 | irq_set_handler(virq, handle_percpu_irq); |
---|
239 | | - ret = setup_irq(virq, action); |
---|
| 216 | + ret = request_irq(virq, handler, IRQF_PERCPU, name, NULL); |
---|
240 | 217 | BUG_ON(ret); |
---|
241 | 218 | } |
---|
242 | 219 | |
---|
.. | .. |
---|
289 | 266 | int cpu; |
---|
290 | 267 | |
---|
291 | 268 | for_each_cpu(cpu, mask) { |
---|
292 | | - smp_ipi_init_one(call_virq + cpu, &irq_call); |
---|
293 | | - smp_ipi_init_one(sched_virq + cpu, &irq_resched); |
---|
| 269 | + smp_ipi_init_one(call_virq + cpu, "IPI call", |
---|
| 270 | + ipi_call_interrupt); |
---|
| 271 | + smp_ipi_init_one(sched_virq + cpu, "IPI resched", |
---|
| 272 | + ipi_resched_interrupt); |
---|
294 | 273 | } |
---|
295 | 274 | } else { |
---|
296 | | - smp_ipi_init_one(call_virq, &irq_call); |
---|
297 | | - smp_ipi_init_one(sched_virq, &irq_resched); |
---|
| 275 | + smp_ipi_init_one(call_virq, "IPI call", ipi_call_interrupt); |
---|
| 276 | + smp_ipi_init_one(sched_virq, "IPI resched", |
---|
| 277 | + ipi_resched_interrupt); |
---|
298 | 278 | } |
---|
299 | 279 | |
---|
300 | 280 | return 0; |
---|
.. | .. |
---|
322 | 302 | int cpu; |
---|
323 | 303 | |
---|
324 | 304 | for_each_cpu(cpu, mask) { |
---|
325 | | - remove_irq(call_virq + cpu, &irq_call); |
---|
326 | | - remove_irq(sched_virq + cpu, &irq_resched); |
---|
| 305 | + free_irq(call_virq + cpu, NULL); |
---|
| 306 | + free_irq(sched_virq + cpu, NULL); |
---|
327 | 307 | } |
---|
328 | 308 | } |
---|
329 | 309 | irq_destroy_ipi(call_virq, mask); |
---|
.. | .. |
---|
368 | 348 | */ |
---|
369 | 349 | |
---|
370 | 350 | calibrate_delay(); |
---|
371 | | - preempt_disable(); |
---|
372 | 351 | cpu = smp_processor_id(); |
---|
373 | 352 | cpu_data[cpu].udelay_val = loops_per_jiffy; |
---|
| 353 | + |
---|
| 354 | + set_cpu_sibling_map(cpu); |
---|
| 355 | + set_cpu_core_map(cpu); |
---|
374 | 356 | |
---|
375 | 357 | cpumask_set_cpu(cpu, &cpu_coherent_mask); |
---|
376 | 358 | notify_cpu_starting(cpu); |
---|
.. | .. |
---|
382 | 364 | |
---|
383 | 365 | /* The CPU is running and counters synchronised, now mark it online */ |
---|
384 | 366 | set_cpu_online(cpu, true); |
---|
385 | | - |
---|
386 | | - set_cpu_sibling_map(cpu); |
---|
387 | | - set_cpu_core_map(cpu); |
---|
388 | 367 | |
---|
389 | 368 | calculate_cpu_foreign_map(); |
---|
390 | 369 | |
---|
.. | .. |
---|
443 | 422 | /* preload SMP state for boot cpu */ |
---|
444 | 423 | void smp_prepare_boot_cpu(void) |
---|
445 | 424 | { |
---|
| 425 | + if (mp_ops->prepare_boot_cpu) |
---|
| 426 | + mp_ops->prepare_boot_cpu(); |
---|
446 | 427 | set_cpu_possible(0, true); |
---|
447 | 428 | set_cpu_online(0, true); |
---|
448 | 429 | } |
---|
.. | .. |
---|
482 | 463 | |
---|
483 | 464 | void flush_tlb_all(void) |
---|
484 | 465 | { |
---|
| 466 | + if (cpu_has_mmid) { |
---|
| 467 | + htw_stop(); |
---|
| 468 | + ginvt_full(); |
---|
| 469 | + sync_ginv(); |
---|
| 470 | + instruction_hazard(); |
---|
| 471 | + htw_start(); |
---|
| 472 | + return; |
---|
| 473 | + } |
---|
| 474 | + |
---|
485 | 475 | on_each_cpu(flush_tlb_all_ipi, NULL, 1); |
---|
486 | 476 | } |
---|
487 | 477 | |
---|
488 | 478 | static void flush_tlb_mm_ipi(void *mm) |
---|
489 | 479 | { |
---|
490 | | - local_flush_tlb_mm((struct mm_struct *)mm); |
---|
| 480 | + drop_mmu_context((struct mm_struct *)mm); |
---|
491 | 481 | } |
---|
492 | 482 | |
---|
493 | 483 | /* |
---|
.. | .. |
---|
530 | 520 | { |
---|
531 | 521 | preempt_disable(); |
---|
532 | 522 | |
---|
533 | | - if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { |
---|
| 523 | + if (cpu_has_mmid) { |
---|
| 524 | + /* |
---|
| 525 | + * No need to worry about other CPUs - the ginvt in |
---|
| 526 | + * drop_mmu_context() will be globalized. |
---|
| 527 | + */ |
---|
| 528 | + } else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { |
---|
534 | 529 | smp_on_other_tlbs(flush_tlb_mm_ipi, mm); |
---|
535 | 530 | } else { |
---|
536 | 531 | unsigned int cpu; |
---|
537 | 532 | |
---|
538 | 533 | for_each_online_cpu(cpu) { |
---|
539 | 534 | if (cpu != smp_processor_id() && cpu_context(cpu, mm)) |
---|
540 | | - cpu_context(cpu, mm) = 0; |
---|
| 535 | + set_cpu_context(cpu, mm, 0); |
---|
541 | 536 | } |
---|
542 | 537 | } |
---|
543 | | - local_flush_tlb_mm(mm); |
---|
| 538 | + drop_mmu_context(mm); |
---|
544 | 539 | |
---|
545 | 540 | preempt_enable(); |
---|
546 | 541 | } |
---|
.. | .. |
---|
561 | 556 | void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) |
---|
562 | 557 | { |
---|
563 | 558 | struct mm_struct *mm = vma->vm_mm; |
---|
| 559 | + unsigned long addr; |
---|
| 560 | + u32 old_mmid; |
---|
564 | 561 | |
---|
565 | 562 | preempt_disable(); |
---|
566 | | - if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { |
---|
| 563 | + if (cpu_has_mmid) { |
---|
| 564 | + htw_stop(); |
---|
| 565 | + old_mmid = read_c0_memorymapid(); |
---|
| 566 | + write_c0_memorymapid(cpu_asid(0, mm)); |
---|
| 567 | + mtc0_tlbw_hazard(); |
---|
| 568 | + addr = round_down(start, PAGE_SIZE * 2); |
---|
| 569 | + end = round_up(end, PAGE_SIZE * 2); |
---|
| 570 | + do { |
---|
| 571 | + ginvt_va_mmid(addr); |
---|
| 572 | + sync_ginv(); |
---|
| 573 | + addr += PAGE_SIZE * 2; |
---|
| 574 | + } while (addr < end); |
---|
| 575 | + write_c0_memorymapid(old_mmid); |
---|
| 576 | + instruction_hazard(); |
---|
| 577 | + htw_start(); |
---|
| 578 | + } else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { |
---|
567 | 579 | struct flush_tlb_data fd = { |
---|
568 | 580 | .vma = vma, |
---|
569 | 581 | .addr1 = start, |
---|
.. | .. |
---|
571 | 583 | }; |
---|
572 | 584 | |
---|
573 | 585 | smp_on_other_tlbs(flush_tlb_range_ipi, &fd); |
---|
| 586 | + local_flush_tlb_range(vma, start, end); |
---|
574 | 587 | } else { |
---|
575 | 588 | unsigned int cpu; |
---|
576 | 589 | int exec = vma->vm_flags & VM_EXEC; |
---|
.. | .. |
---|
583 | 596 | * mm has been completely unused by that CPU. |
---|
584 | 597 | */ |
---|
585 | 598 | if (cpu != smp_processor_id() && cpu_context(cpu, mm)) |
---|
586 | | - cpu_context(cpu, mm) = !exec; |
---|
| 599 | + set_cpu_context(cpu, mm, !exec); |
---|
587 | 600 | } |
---|
| 601 | + local_flush_tlb_range(vma, start, end); |
---|
588 | 602 | } |
---|
589 | | - local_flush_tlb_range(vma, start, end); |
---|
590 | 603 | preempt_enable(); |
---|
591 | 604 | } |
---|
592 | 605 | |
---|
.. | .. |
---|
616 | 629 | |
---|
617 | 630 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) |
---|
618 | 631 | { |
---|
| 632 | + u32 old_mmid; |
---|
| 633 | + |
---|
619 | 634 | preempt_disable(); |
---|
620 | | - if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) { |
---|
| 635 | + if (cpu_has_mmid) { |
---|
| 636 | + htw_stop(); |
---|
| 637 | + old_mmid = read_c0_memorymapid(); |
---|
| 638 | + write_c0_memorymapid(cpu_asid(0, vma->vm_mm)); |
---|
| 639 | + mtc0_tlbw_hazard(); |
---|
| 640 | + ginvt_va_mmid(page); |
---|
| 641 | + sync_ginv(); |
---|
| 642 | + write_c0_memorymapid(old_mmid); |
---|
| 643 | + instruction_hazard(); |
---|
| 644 | + htw_start(); |
---|
| 645 | + } else if ((atomic_read(&vma->vm_mm->mm_users) != 1) || |
---|
| 646 | + (current->mm != vma->vm_mm)) { |
---|
621 | 647 | struct flush_tlb_data fd = { |
---|
622 | 648 | .vma = vma, |
---|
623 | 649 | .addr1 = page, |
---|
624 | 650 | }; |
---|
625 | 651 | |
---|
626 | 652 | smp_on_other_tlbs(flush_tlb_page_ipi, &fd); |
---|
| 653 | + local_flush_tlb_page(vma, page); |
---|
627 | 654 | } else { |
---|
628 | 655 | unsigned int cpu; |
---|
629 | 656 | |
---|
.. | .. |
---|
635 | 662 | * by that CPU. |
---|
636 | 663 | */ |
---|
637 | 664 | if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm)) |
---|
638 | | - cpu_context(cpu, vma->vm_mm) = 1; |
---|
| 665 | + set_cpu_context(cpu, vma->vm_mm, 1); |
---|
639 | 666 | } |
---|
| 667 | + local_flush_tlb_page(vma, page); |
---|
640 | 668 | } |
---|
641 | | - local_flush_tlb_page(vma, page); |
---|
642 | 669 | preempt_enable(); |
---|
643 | 670 | } |
---|
644 | 671 | |
---|
.. | .. |
---|
659 | 686 | |
---|
660 | 687 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST |
---|
661 | 688 | |
---|
662 | | -static DEFINE_PER_CPU(atomic_t, tick_broadcast_count); |
---|
663 | 689 | static DEFINE_PER_CPU(call_single_data_t, tick_broadcast_csd); |
---|
664 | 690 | |
---|
665 | 691 | void tick_broadcast(const struct cpumask *mask) |
---|
666 | 692 | { |
---|
667 | | - atomic_t *count; |
---|
668 | 693 | call_single_data_t *csd; |
---|
669 | 694 | int cpu; |
---|
670 | 695 | |
---|
671 | 696 | for_each_cpu(cpu, mask) { |
---|
672 | | - count = &per_cpu(tick_broadcast_count, cpu); |
---|
673 | 697 | csd = &per_cpu(tick_broadcast_csd, cpu); |
---|
674 | | - |
---|
675 | | - if (atomic_inc_return(count) == 1) |
---|
676 | | - smp_call_function_single_async(cpu, csd); |
---|
| 698 | + smp_call_function_single_async(cpu, csd); |
---|
677 | 699 | } |
---|
678 | 700 | } |
---|
679 | 701 | |
---|
680 | 702 | static void tick_broadcast_callee(void *info) |
---|
681 | 703 | { |
---|
682 | | - int cpu = smp_processor_id(); |
---|
683 | 704 | tick_receive_broadcast(); |
---|
684 | | - atomic_set(&per_cpu(tick_broadcast_count, cpu), 0); |
---|
685 | 705 | } |
---|
686 | 706 | |
---|
687 | 707 | static int __init tick_broadcast_init(void) |
---|