.. | .. |
---|
5 | 5 | #include <linux/spinlock.h> |
---|
6 | 6 | #include <linux/smp.h> |
---|
7 | 7 | #include <linux/interrupt.h> |
---|
| 8 | +#include <linux/irq_pipeline.h> |
---|
8 | 9 | #include <linux/export.h> |
---|
9 | 10 | #include <linux/cpu.h> |
---|
10 | 11 | #include <linux/debugfs.h> |
---|
.. | .. |
---|
309 | 310 | void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
---|
310 | 311 | struct task_struct *tsk) |
---|
311 | 312 | { |
---|
312 | | - unsigned long flags; |
---|
| 313 | + unsigned long flags, _flags; |
---|
313 | 314 | |
---|
314 | 315 | local_irq_save(flags); |
---|
| 316 | + protect_inband_mm(_flags); |
---|
315 | 317 | switch_mm_irqs_off(prev, next, tsk); |
---|
| 318 | + unprotect_inband_mm(_flags); |
---|
316 | 319 | local_irq_restore(flags); |
---|
317 | 320 | } |
---|
318 | 321 | |
---|
.. | .. |
---|
440 | 443 | */ |
---|
441 | 444 | |
---|
442 | 445 | /* We don't want flush_tlb_func_* to run concurrently with us. */ |
---|
443 | | - if (IS_ENABLED(CONFIG_PROVE_LOCKING)) |
---|
| 446 | + if (IS_ENABLED(CONFIG_DOVETAIL)) |
---|
| 447 | + WARN_ON_ONCE(!hard_irqs_disabled()); |
---|
| 448 | + else if (IS_ENABLED(CONFIG_PROVE_LOCKING)) |
---|
444 | 449 | WARN_ON_ONCE(!irqs_disabled()); |
---|
445 | 450 | |
---|
446 | 451 | /* |
---|
.. | .. |
---|
666 | 671 | * wants us to catch up to. |
---|
667 | 672 | */ |
---|
668 | 673 | struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm); |
---|
669 | | - u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid); |
---|
670 | | - u64 mm_tlb_gen = atomic64_read(&loaded_mm->context.tlb_gen); |
---|
671 | | - u64 local_tlb_gen = this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen); |
---|
| 674 | + u32 loaded_mm_asid; |
---|
| 675 | + u64 mm_tlb_gen; |
---|
| 676 | + u64 local_tlb_gen; |
---|
| 677 | + unsigned long flags; |
---|
672 | 678 | |
---|
673 | 679 | /* This code cannot presently handle being reentered. */ |
---|
674 | 680 | VM_WARN_ON(!irqs_disabled()); |
---|
675 | 681 | |
---|
676 | | - if (unlikely(loaded_mm == &init_mm)) |
---|
| 682 | + protect_inband_mm(flags); |
---|
| 683 | + |
---|
| 684 | + loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid); |
---|
| 685 | + mm_tlb_gen = atomic64_read(&loaded_mm->context.tlb_gen); |
---|
| 686 | + local_tlb_gen = this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen); |
---|
| 687 | + |
---|
| 688 | + if (unlikely(loaded_mm == &init_mm)) { |
---|
| 689 | + unprotect_inband_mm(flags); |
---|
677 | 690 | return; |
---|
| 691 | + } |
---|
678 | 692 | |
---|
679 | 693 | VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].ctx_id) != |
---|
680 | 694 | loaded_mm->context.ctx_id); |
---|
.. | .. |
---|
690 | 704 | * IPIs to lazy TLB mode CPUs. |
---|
691 | 705 | */ |
---|
692 | 706 | switch_mm_irqs_off(NULL, &init_mm, NULL); |
---|
| 707 | + unprotect_inband_mm(flags); |
---|
693 | 708 | return; |
---|
694 | 709 | } |
---|
695 | 710 | |
---|
.. | .. |
---|
700 | 715 | * be handled can catch us all the way up, leaving no work for |
---|
701 | 716 | * the second flush. |
---|
702 | 717 | */ |
---|
| 718 | + unprotect_inband_mm(flags); |
---|
703 | 719 | trace_tlb_flush(reason, 0); |
---|
704 | 720 | return; |
---|
705 | 721 | } |
---|
706 | 722 | |
---|
707 | 723 | WARN_ON_ONCE(local_tlb_gen > mm_tlb_gen); |
---|
708 | 724 | WARN_ON_ONCE(f->new_tlb_gen > mm_tlb_gen); |
---|
| 725 | + |
---|
| 726 | + unprotect_inband_mm(flags); |
---|
709 | 727 | |
---|
710 | 728 | /* |
---|
711 | 729 | * If we get to this point, we know that our TLB is out of date. |
---|
.. | .. |
---|
1063 | 1081 | * from interrupts. (Use the raw variant because this code can |
---|
1064 | 1082 | * be called from deep inside debugging code.) |
---|
1065 | 1083 | */ |
---|
1066 | | - raw_local_irq_save(flags); |
---|
| 1084 | + flags = hard_local_irq_save(); |
---|
1067 | 1085 | |
---|
1068 | 1086 | cr4 = this_cpu_read(cpu_tlbstate.cr4); |
---|
1069 | 1087 | /* toggle PGE */ |
---|
.. | .. |
---|
1071 | 1089 | /* write old PGE again and flush TLBs */ |
---|
1072 | 1090 | native_write_cr4(cr4); |
---|
1073 | 1091 | |
---|
1074 | | - raw_local_irq_restore(flags); |
---|
| 1092 | + hard_local_irq_restore(flags); |
---|
1075 | 1093 | } |
---|
1076 | 1094 | |
---|
1077 | 1095 | /* |
---|
.. | .. |
---|
1079 | 1097 | */ |
---|
1080 | 1098 | STATIC_NOPV void native_flush_tlb_local(void) |
---|
1081 | 1099 | { |
---|
| 1100 | + unsigned long flags; |
---|
| 1101 | + |
---|
1082 | 1102 | /* |
---|
1083 | 1103 | * Preemption or interrupts must be disabled to protect the access |
---|
1084 | 1104 | * to the per CPU variable and to prevent being preempted between |
---|
.. | .. |
---|
1086 | 1106 | */ |
---|
1087 | 1107 | WARN_ON_ONCE(preemptible()); |
---|
1088 | 1108 | |
---|
| 1109 | + flags = hard_cond_local_irq_save(); |
---|
| 1110 | + |
---|
1089 | 1111 | invalidate_user_asid(this_cpu_read(cpu_tlbstate.loaded_mm_asid)); |
---|
1090 | 1112 | |
---|
1091 | 1113 | /* If current->mm == NULL then the read_cr3() "borrows" an mm */ |
---|
1092 | 1114 | native_write_cr3(__native_read_cr3()); |
---|
| 1115 | + |
---|
| 1116 | + hard_cond_local_irq_restore(flags); |
---|
1093 | 1117 | } |
---|
1094 | 1118 | |
---|
1095 | 1119 | void flush_tlb_local(void) |
---|
.. | .. |
---|
1165 | 1189 | VM_WARN_ON_ONCE(!loaded_mm); |
---|
1166 | 1190 | |
---|
1167 | 1191 | /* |
---|
| 1192 | + * There would be no way for the companion core to switch an |
---|
| 1193 | + * out-of-band task back in-band in order to handle an access |
---|
| 1194 | + * fault over NMI safely. Tell the caller that uaccess from |
---|
| 1195 | + * NMI is NOT ok if the preempted task was running |
---|
| 1196 | + * out-of-band. |
---|
| 1197 | + */ |
---|
| 1198 | + if (running_oob()) |
---|
| 1199 | + return false; |
---|
| 1200 | + |
---|
| 1201 | + /* |
---|
1168 | 1202 | * The condition we want to check is |
---|
1169 | 1203 | * current_mm->pgd == __va(read_cr3_pa()). This may be slow, though, |
---|
1170 | 1204 | * if we're running in a VM with shadow paging, and nmi_uaccess_okay() |
---|