.. | .. |
---|
1 | 1 | // SPDX-License-Identifier: GPL-2.0 |
---|
2 | 2 | #include <linux/kernel.h> |
---|
| 3 | +#include <linux/pgtable.h> |
---|
3 | 4 | |
---|
4 | 5 | #include <linux/string.h> |
---|
5 | 6 | #include <linux/bitops.h> |
---|
.. | .. |
---|
11 | 12 | #include <linux/uaccess.h> |
---|
12 | 13 | |
---|
13 | 14 | #include <asm/cpufeature.h> |
---|
14 | | -#include <asm/pgtable.h> |
---|
15 | 15 | #include <asm/msr.h> |
---|
16 | 16 | #include <asm/bugs.h> |
---|
17 | 17 | #include <asm/cpu.h> |
---|
.. | .. |
---|
19 | 19 | #include <asm/microcode_intel.h> |
---|
20 | 20 | #include <asm/hwcap2.h> |
---|
21 | 21 | #include <asm/elf.h> |
---|
| 22 | +#include <asm/cpu_device_id.h> |
---|
| 23 | +#include <asm/cmdline.h> |
---|
| 24 | +#include <asm/traps.h> |
---|
| 25 | +#include <asm/resctrl.h> |
---|
| 26 | +#include <asm/numa.h> |
---|
22 | 27 | |
---|
23 | 28 | #ifdef CONFIG_X86_64 |
---|
24 | 29 | #include <linux/topology.h> |
---|
.. | .. |
---|
31 | 36 | #include <asm/apic.h> |
---|
32 | 37 | #endif |
---|
33 | 38 | |
---|
| 39 | +enum split_lock_detect_state { |
---|
| 40 | + sld_off = 0, |
---|
| 41 | + sld_warn, |
---|
| 42 | + sld_fatal, |
---|
| 43 | +}; |
---|
| 44 | + |
---|
34 | 45 | /* |
---|
35 | | - * Just in case our CPU detection goes bad, or you have a weird system, |
---|
36 | | - * allow a way to override the automatic disabling of MPX. |
---|
| 46 | + * Default to sld_off because most systems do not support split lock detection |
---|
| 47 | + * split_lock_setup() will switch this to sld_warn on systems that support |
---|
| 48 | + * split lock detect, unless there is a command line override. |
---|
37 | 49 | */ |
---|
38 | | -static int forcempx; |
---|
| 50 | +static enum split_lock_detect_state sld_state __ro_after_init = sld_off; |
---|
| 51 | +static u64 msr_test_ctrl_cache __ro_after_init; |
---|
39 | 52 | |
---|
40 | | -static int __init forcempx_setup(char *__unused) |
---|
| 53 | +/* |
---|
| 54 | + * With a name like MSR_TEST_CTL it should go without saying, but don't touch |
---|
| 55 | + * MSR_TEST_CTL unless the CPU is one of the whitelisted models. Writing it |
---|
| 56 | + * on CPUs that do not support SLD can cause fireworks, even when writing '0'. |
---|
| 57 | + */ |
---|
| 58 | +static bool cpu_model_supports_sld __ro_after_init; |
---|
| 59 | + |
---|
| 60 | +/* |
---|
| 61 | + * Processors which have self-snooping capability can handle conflicting |
---|
| 62 | + * memory type across CPUs by snooping its own cache. However, there exists |
---|
| 63 | + * CPU models in which having conflicting memory types still leads to |
---|
| 64 | + * unpredictable behavior, machine check errors, or hangs. Clear this |
---|
| 65 | + * feature to prevent its use on machines with known erratas. |
---|
| 66 | + */ |
---|
| 67 | +static void check_memory_type_self_snoop_errata(struct cpuinfo_x86 *c) |
---|
41 | 68 | { |
---|
42 | | - forcempx = 1; |
---|
43 | | - |
---|
44 | | - return 1; |
---|
45 | | -} |
---|
46 | | -__setup("intel-skd-046-workaround=disable", forcempx_setup); |
---|
47 | | - |
---|
48 | | -void check_mpx_erratum(struct cpuinfo_x86 *c) |
---|
49 | | -{ |
---|
50 | | - if (forcempx) |
---|
51 | | - return; |
---|
52 | | - /* |
---|
53 | | - * Turn off the MPX feature on CPUs where SMEP is not |
---|
54 | | - * available or disabled. |
---|
55 | | - * |
---|
56 | | - * Works around Intel Erratum SKD046: "Branch Instructions |
---|
57 | | - * May Initialize MPX Bound Registers Incorrectly". |
---|
58 | | - * |
---|
59 | | - * This might falsely disable MPX on systems without |
---|
60 | | - * SMEP, like Atom processors without SMEP. But there |
---|
61 | | - * is no such hardware known at the moment. |
---|
62 | | - */ |
---|
63 | | - if (cpu_has(c, X86_FEATURE_MPX) && !cpu_has(c, X86_FEATURE_SMEP)) { |
---|
64 | | - setup_clear_cpu_cap(X86_FEATURE_MPX); |
---|
65 | | - pr_warn("x86/mpx: Disabling MPX since SMEP not present\n"); |
---|
| 69 | + switch (c->x86_model) { |
---|
| 70 | + case INTEL_FAM6_CORE_YONAH: |
---|
| 71 | + case INTEL_FAM6_CORE2_MEROM: |
---|
| 72 | + case INTEL_FAM6_CORE2_MEROM_L: |
---|
| 73 | + case INTEL_FAM6_CORE2_PENRYN: |
---|
| 74 | + case INTEL_FAM6_CORE2_DUNNINGTON: |
---|
| 75 | + case INTEL_FAM6_NEHALEM: |
---|
| 76 | + case INTEL_FAM6_NEHALEM_G: |
---|
| 77 | + case INTEL_FAM6_NEHALEM_EP: |
---|
| 78 | + case INTEL_FAM6_NEHALEM_EX: |
---|
| 79 | + case INTEL_FAM6_WESTMERE: |
---|
| 80 | + case INTEL_FAM6_WESTMERE_EP: |
---|
| 81 | + case INTEL_FAM6_SANDYBRIDGE: |
---|
| 82 | + setup_clear_cpu_cap(X86_FEATURE_SELFSNOOP); |
---|
66 | 83 | } |
---|
67 | 84 | } |
---|
68 | 85 | |
---|
.. | .. |
---|
71 | 88 | static int __init ring3mwait_disable(char *__unused) |
---|
72 | 89 | { |
---|
73 | 90 | ring3mwait_disabled = true; |
---|
74 | | - return 0; |
---|
| 91 | + return 1; |
---|
75 | 92 | } |
---|
76 | 93 | __setup("ring3mwait=disable", ring3mwait_disable); |
---|
77 | 94 | |
---|
.. | .. |
---|
116 | 133 | u32 microcode; |
---|
117 | 134 | }; |
---|
118 | 135 | static const struct sku_microcode spectre_bad_microcodes[] = { |
---|
119 | | - { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0B, 0x80 }, |
---|
120 | | - { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0A, 0x80 }, |
---|
121 | | - { INTEL_FAM6_KABYLAKE_DESKTOP, 0x09, 0x80 }, |
---|
122 | | - { INTEL_FAM6_KABYLAKE_MOBILE, 0x0A, 0x80 }, |
---|
123 | | - { INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x80 }, |
---|
| 136 | + { INTEL_FAM6_KABYLAKE, 0x0B, 0x80 }, |
---|
| 137 | + { INTEL_FAM6_KABYLAKE, 0x0A, 0x80 }, |
---|
| 138 | + { INTEL_FAM6_KABYLAKE, 0x09, 0x80 }, |
---|
| 139 | + { INTEL_FAM6_KABYLAKE_L, 0x0A, 0x80 }, |
---|
| 140 | + { INTEL_FAM6_KABYLAKE_L, 0x09, 0x80 }, |
---|
124 | 141 | { INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e }, |
---|
125 | 142 | { INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c }, |
---|
126 | | - { INTEL_FAM6_BROADWELL_CORE, 0x04, 0x28 }, |
---|
127 | | - { INTEL_FAM6_BROADWELL_GT3E, 0x01, 0x1b }, |
---|
128 | | - { INTEL_FAM6_BROADWELL_XEON_D, 0x02, 0x14 }, |
---|
129 | | - { INTEL_FAM6_BROADWELL_XEON_D, 0x03, 0x07000011 }, |
---|
| 143 | + { INTEL_FAM6_BROADWELL, 0x04, 0x28 }, |
---|
| 144 | + { INTEL_FAM6_BROADWELL_G, 0x01, 0x1b }, |
---|
| 145 | + { INTEL_FAM6_BROADWELL_D, 0x02, 0x14 }, |
---|
| 146 | + { INTEL_FAM6_BROADWELL_D, 0x03, 0x07000011 }, |
---|
130 | 147 | { INTEL_FAM6_BROADWELL_X, 0x01, 0x0b000025 }, |
---|
131 | | - { INTEL_FAM6_HASWELL_ULT, 0x01, 0x21 }, |
---|
132 | | - { INTEL_FAM6_HASWELL_GT3E, 0x01, 0x18 }, |
---|
133 | | - { INTEL_FAM6_HASWELL_CORE, 0x03, 0x23 }, |
---|
| 148 | + { INTEL_FAM6_HASWELL_L, 0x01, 0x21 }, |
---|
| 149 | + { INTEL_FAM6_HASWELL_G, 0x01, 0x18 }, |
---|
| 150 | + { INTEL_FAM6_HASWELL, 0x03, 0x23 }, |
---|
134 | 151 | { INTEL_FAM6_HASWELL_X, 0x02, 0x3b }, |
---|
135 | 152 | { INTEL_FAM6_HASWELL_X, 0x04, 0x10 }, |
---|
136 | 153 | { INTEL_FAM6_IVYBRIDGE_X, 0x04, 0x42a }, |
---|
.. | .. |
---|
239 | 256 | /* Penwell and Cloverview have the TSC which doesn't sleep on S3 */ |
---|
240 | 257 | if (c->x86 == 6) { |
---|
241 | 258 | switch (c->x86_model) { |
---|
242 | | - case 0x27: /* Penwell */ |
---|
243 | | - case 0x35: /* Cloverview */ |
---|
244 | | - case 0x4a: /* Merrifield */ |
---|
| 259 | + case INTEL_FAM6_ATOM_SALTWELL_MID: |
---|
| 260 | + case INTEL_FAM6_ATOM_SALTWELL_TABLET: |
---|
| 261 | + case INTEL_FAM6_ATOM_SILVERMONT_MID: |
---|
| 262 | + case INTEL_FAM6_ATOM_AIRMONT_NP: |
---|
245 | 263 | set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC_S3); |
---|
246 | 264 | break; |
---|
247 | 265 | default: |
---|
.. | .. |
---|
303 | 321 | c->x86_coreid_bits = get_count_order((ebx >> 16) & 0xff); |
---|
304 | 322 | } |
---|
305 | 323 | |
---|
306 | | - check_mpx_erratum(c); |
---|
| 324 | + check_memory_type_self_snoop_errata(c); |
---|
307 | 325 | |
---|
308 | 326 | /* |
---|
309 | 327 | * Get the number of SMT siblings early from the extended topology |
---|
.. | .. |
---|
311 | 329 | */ |
---|
312 | 330 | if (detect_extended_topology_early(c) < 0) |
---|
313 | 331 | detect_ht_early(c); |
---|
| 332 | +} |
---|
| 333 | + |
---|
| 334 | +static void bsp_init_intel(struct cpuinfo_x86 *c) |
---|
| 335 | +{ |
---|
| 336 | + resctrl_cpu_detect(c); |
---|
314 | 337 | } |
---|
315 | 338 | |
---|
316 | 339 | #ifdef CONFIG_X86_32 |
---|
.. | .. |
---|
466 | 489 | #endif |
---|
467 | 490 | } |
---|
468 | 491 | |
---|
469 | | -static void detect_vmx_virtcap(struct cpuinfo_x86 *c) |
---|
470 | | -{ |
---|
471 | | - /* Intel VMX MSR indicated features */ |
---|
472 | | -#define X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW 0x00200000 |
---|
473 | | -#define X86_VMX_FEATURE_PROC_CTLS_VNMI 0x00400000 |
---|
474 | | -#define X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS 0x80000000 |
---|
475 | | -#define X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC 0x00000001 |
---|
476 | | -#define X86_VMX_FEATURE_PROC_CTLS2_EPT 0x00000002 |
---|
477 | | -#define X86_VMX_FEATURE_PROC_CTLS2_VPID 0x00000020 |
---|
478 | | -#define x86_VMX_FEATURE_EPT_CAP_AD 0x00200000 |
---|
479 | | - |
---|
480 | | - u32 vmx_msr_low, vmx_msr_high, msr_ctl, msr_ctl2; |
---|
481 | | - u32 msr_vpid_cap, msr_ept_cap; |
---|
482 | | - |
---|
483 | | - clear_cpu_cap(c, X86_FEATURE_TPR_SHADOW); |
---|
484 | | - clear_cpu_cap(c, X86_FEATURE_VNMI); |
---|
485 | | - clear_cpu_cap(c, X86_FEATURE_FLEXPRIORITY); |
---|
486 | | - clear_cpu_cap(c, X86_FEATURE_EPT); |
---|
487 | | - clear_cpu_cap(c, X86_FEATURE_VPID); |
---|
488 | | - clear_cpu_cap(c, X86_FEATURE_EPT_AD); |
---|
489 | | - |
---|
490 | | - rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, vmx_msr_low, vmx_msr_high); |
---|
491 | | - msr_ctl = vmx_msr_high | vmx_msr_low; |
---|
492 | | - if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW) |
---|
493 | | - set_cpu_cap(c, X86_FEATURE_TPR_SHADOW); |
---|
494 | | - if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_VNMI) |
---|
495 | | - set_cpu_cap(c, X86_FEATURE_VNMI); |
---|
496 | | - if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS) { |
---|
497 | | - rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2, |
---|
498 | | - vmx_msr_low, vmx_msr_high); |
---|
499 | | - msr_ctl2 = vmx_msr_high | vmx_msr_low; |
---|
500 | | - if ((msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC) && |
---|
501 | | - (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW)) |
---|
502 | | - set_cpu_cap(c, X86_FEATURE_FLEXPRIORITY); |
---|
503 | | - if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_EPT) { |
---|
504 | | - set_cpu_cap(c, X86_FEATURE_EPT); |
---|
505 | | - rdmsr(MSR_IA32_VMX_EPT_VPID_CAP, |
---|
506 | | - msr_ept_cap, msr_vpid_cap); |
---|
507 | | - if (msr_ept_cap & x86_VMX_FEATURE_EPT_CAP_AD) |
---|
508 | | - set_cpu_cap(c, X86_FEATURE_EPT_AD); |
---|
509 | | - } |
---|
510 | | - if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VPID) |
---|
511 | | - set_cpu_cap(c, X86_FEATURE_VPID); |
---|
512 | | - } |
---|
513 | | -} |
---|
514 | | - |
---|
515 | 492 | #define MSR_IA32_TME_ACTIVATE 0x982 |
---|
516 | 493 | |
---|
517 | 494 | /* Helpers to access TME_ACTIVATE MSR */ |
---|
.. | .. |
---|
596 | 573 | c->x86_phys_bits -= keyid_bits; |
---|
597 | 574 | } |
---|
598 | 575 | |
---|
599 | | -static void init_intel_energy_perf(struct cpuinfo_x86 *c) |
---|
600 | | -{ |
---|
601 | | - u64 epb; |
---|
602 | | - |
---|
603 | | - /* |
---|
604 | | - * Initialize MSR_IA32_ENERGY_PERF_BIAS if not already initialized. |
---|
605 | | - * (x86_energy_perf_policy(8) is available to change it at run-time.) |
---|
606 | | - */ |
---|
607 | | - if (!cpu_has(c, X86_FEATURE_EPB)) |
---|
608 | | - return; |
---|
609 | | - |
---|
610 | | - rdmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb); |
---|
611 | | - if ((epb & 0xF) != ENERGY_PERF_BIAS_PERFORMANCE) |
---|
612 | | - return; |
---|
613 | | - |
---|
614 | | - pr_warn_once("ENERGY_PERF_BIAS: Set to 'normal', was 'performance'\n"); |
---|
615 | | - pr_warn_once("ENERGY_PERF_BIAS: View and update with x86_energy_perf_policy(8)\n"); |
---|
616 | | - epb = (epb & ~0xF) | ENERGY_PERF_BIAS_NORMAL; |
---|
617 | | - wrmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb); |
---|
618 | | -} |
---|
619 | | - |
---|
620 | | -static void intel_bsp_resume(struct cpuinfo_x86 *c) |
---|
621 | | -{ |
---|
622 | | - /* |
---|
623 | | - * MSR_IA32_ENERGY_PERF_BIAS is lost across suspend/resume, |
---|
624 | | - * so reinitialize it properly like during bootup: |
---|
625 | | - */ |
---|
626 | | - init_intel_energy_perf(c); |
---|
627 | | -} |
---|
628 | | - |
---|
629 | 576 | static void init_cpuid_fault(struct cpuinfo_x86 *c) |
---|
630 | 577 | { |
---|
631 | 578 | u64 msr; |
---|
.. | .. |
---|
653 | 600 | msr = this_cpu_read(msr_misc_features_shadow); |
---|
654 | 601 | wrmsrl(MSR_MISC_FEATURES_ENABLES, msr); |
---|
655 | 602 | } |
---|
| 603 | + |
---|
| 604 | +static void split_lock_init(void); |
---|
656 | 605 | |
---|
657 | 606 | static void init_intel(struct cpuinfo_x86 *c) |
---|
658 | 607 | { |
---|
.. | .. |
---|
757 | 706 | /* Work around errata */ |
---|
758 | 707 | srat_detect_node(c); |
---|
759 | 708 | |
---|
760 | | - if (cpu_has(c, X86_FEATURE_VMX)) |
---|
761 | | - detect_vmx_virtcap(c); |
---|
| 709 | + init_ia32_feat_ctl(c); |
---|
762 | 710 | |
---|
763 | 711 | if (cpu_has(c, X86_FEATURE_TME)) |
---|
764 | 712 | detect_tme(c); |
---|
765 | | - |
---|
766 | | - init_intel_energy_perf(c); |
---|
767 | 713 | |
---|
768 | 714 | init_intel_misc_features(c); |
---|
769 | 715 | |
---|
.. | .. |
---|
771 | 717 | tsx_enable(); |
---|
772 | 718 | if (tsx_ctrl_state == TSX_CTRL_DISABLE) |
---|
773 | 719 | tsx_disable(); |
---|
| 720 | + |
---|
| 721 | + split_lock_init(); |
---|
774 | 722 | } |
---|
775 | 723 | |
---|
776 | 724 | #ifdef CONFIG_X86_32 |
---|
.. | .. |
---|
823 | 771 | { 0x04, TLB_DATA_4M, 8, " TLB_DATA 4 MByte pages, 4-way set associative" }, |
---|
824 | 772 | { 0x05, TLB_DATA_4M, 32, " TLB_DATA 4 MByte pages, 4-way set associative" }, |
---|
825 | 773 | { 0x0b, TLB_INST_4M, 4, " TLB_INST 4 MByte pages, 4-way set associative" }, |
---|
826 | | - { 0x4f, TLB_INST_4K, 32, " TLB_INST 4 KByte pages */" }, |
---|
| 774 | + { 0x4f, TLB_INST_4K, 32, " TLB_INST 4 KByte pages" }, |
---|
827 | 775 | { 0x50, TLB_INST_ALL, 64, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" }, |
---|
828 | 776 | { 0x51, TLB_INST_ALL, 128, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" }, |
---|
829 | 777 | { 0x52, TLB_INST_ALL, 256, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" }, |
---|
.. | .. |
---|
851 | 799 | { 0xba, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way associative" }, |
---|
852 | 800 | { 0xc0, TLB_DATA_4K_4M, 8, " TLB_DATA 4 KByte and 4 MByte pages, 4-way associative" }, |
---|
853 | 801 | { 0xc1, STLB_4K_2M, 1024, " STLB 4 KByte and 2 MByte pages, 8-way associative" }, |
---|
854 | | - { 0xc2, TLB_DATA_2M_4M, 16, " DTLB 2 MByte/4MByte pages, 4-way associative" }, |
---|
| 802 | + { 0xc2, TLB_DATA_2M_4M, 16, " TLB_DATA 2 MByte/4MByte pages, 4-way associative" }, |
---|
855 | 803 | { 0xca, STLB_4K, 512, " STLB 4 KByte pages, 4-way associative" }, |
---|
856 | 804 | { 0x00, 0, 0 } |
---|
857 | 805 | }; |
---|
.. | .. |
---|
863 | 811 | return; |
---|
864 | 812 | |
---|
865 | 813 | /* look up this descriptor in the table */ |
---|
866 | | - for (k = 0; intel_tlb_table[k].descriptor != desc && \ |
---|
867 | | - intel_tlb_table[k].descriptor != 0; k++) |
---|
| 814 | + for (k = 0; intel_tlb_table[k].descriptor != desc && |
---|
| 815 | + intel_tlb_table[k].descriptor != 0; k++) |
---|
868 | 816 | ; |
---|
869 | 817 | |
---|
870 | 818 | if (intel_tlb_table[k].tlb_type == 0) |
---|
.. | .. |
---|
1027 | 975 | #endif |
---|
1028 | 976 | .c_detect_tlb = intel_detect_tlb, |
---|
1029 | 977 | .c_early_init = early_init_intel, |
---|
| 978 | + .c_bsp_init = bsp_init_intel, |
---|
1030 | 979 | .c_init = init_intel, |
---|
1031 | | - .c_bsp_resume = intel_bsp_resume, |
---|
1032 | 980 | .c_x86_vendor = X86_VENDOR_INTEL, |
---|
1033 | 981 | }; |
---|
1034 | 982 | |
---|
1035 | 983 | cpu_dev_register(intel_cpu_dev); |
---|
1036 | 984 | |
---|
| 985 | +#undef pr_fmt |
---|
| 986 | +#define pr_fmt(fmt) "x86/split lock detection: " fmt |
---|
| 987 | + |
---|
| 988 | +static const struct { |
---|
| 989 | + const char *option; |
---|
| 990 | + enum split_lock_detect_state state; |
---|
| 991 | +} sld_options[] __initconst = { |
---|
| 992 | + { "off", sld_off }, |
---|
| 993 | + { "warn", sld_warn }, |
---|
| 994 | + { "fatal", sld_fatal }, |
---|
| 995 | +}; |
---|
| 996 | + |
---|
| 997 | +static inline bool match_option(const char *arg, int arglen, const char *opt) |
---|
| 998 | +{ |
---|
| 999 | + int len = strlen(opt); |
---|
| 1000 | + |
---|
| 1001 | + return len == arglen && !strncmp(arg, opt, len); |
---|
| 1002 | +} |
---|
| 1003 | + |
---|
| 1004 | +static bool split_lock_verify_msr(bool on) |
---|
| 1005 | +{ |
---|
| 1006 | + u64 ctrl, tmp; |
---|
| 1007 | + |
---|
| 1008 | + if (rdmsrl_safe(MSR_TEST_CTRL, &ctrl)) |
---|
| 1009 | + return false; |
---|
| 1010 | + if (on) |
---|
| 1011 | + ctrl |= MSR_TEST_CTRL_SPLIT_LOCK_DETECT; |
---|
| 1012 | + else |
---|
| 1013 | + ctrl &= ~MSR_TEST_CTRL_SPLIT_LOCK_DETECT; |
---|
| 1014 | + if (wrmsrl_safe(MSR_TEST_CTRL, ctrl)) |
---|
| 1015 | + return false; |
---|
| 1016 | + rdmsrl(MSR_TEST_CTRL, tmp); |
---|
| 1017 | + return ctrl == tmp; |
---|
| 1018 | +} |
---|
| 1019 | + |
---|
| 1020 | +static void __init split_lock_setup(void) |
---|
| 1021 | +{ |
---|
| 1022 | + enum split_lock_detect_state state = sld_warn; |
---|
| 1023 | + char arg[20]; |
---|
| 1024 | + int i, ret; |
---|
| 1025 | + |
---|
| 1026 | + if (!split_lock_verify_msr(false)) { |
---|
| 1027 | + pr_info("MSR access failed: Disabled\n"); |
---|
| 1028 | + return; |
---|
| 1029 | + } |
---|
| 1030 | + |
---|
| 1031 | + ret = cmdline_find_option(boot_command_line, "split_lock_detect", |
---|
| 1032 | + arg, sizeof(arg)); |
---|
| 1033 | + if (ret >= 0) { |
---|
| 1034 | + for (i = 0; i < ARRAY_SIZE(sld_options); i++) { |
---|
| 1035 | + if (match_option(arg, ret, sld_options[i].option)) { |
---|
| 1036 | + state = sld_options[i].state; |
---|
| 1037 | + break; |
---|
| 1038 | + } |
---|
| 1039 | + } |
---|
| 1040 | + } |
---|
| 1041 | + |
---|
| 1042 | + switch (state) { |
---|
| 1043 | + case sld_off: |
---|
| 1044 | + pr_info("disabled\n"); |
---|
| 1045 | + return; |
---|
| 1046 | + case sld_warn: |
---|
| 1047 | + pr_info("warning about user-space split_locks\n"); |
---|
| 1048 | + break; |
---|
| 1049 | + case sld_fatal: |
---|
| 1050 | + pr_info("sending SIGBUS on user-space split_locks\n"); |
---|
| 1051 | + break; |
---|
| 1052 | + } |
---|
| 1053 | + |
---|
| 1054 | + rdmsrl(MSR_TEST_CTRL, msr_test_ctrl_cache); |
---|
| 1055 | + |
---|
| 1056 | + if (!split_lock_verify_msr(true)) { |
---|
| 1057 | + pr_info("MSR access failed: Disabled\n"); |
---|
| 1058 | + return; |
---|
| 1059 | + } |
---|
| 1060 | + |
---|
| 1061 | + sld_state = state; |
---|
| 1062 | + setup_force_cpu_cap(X86_FEATURE_SPLIT_LOCK_DETECT); |
---|
| 1063 | +} |
---|
| 1064 | + |
---|
| 1065 | +/* |
---|
| 1066 | + * MSR_TEST_CTRL is per core, but we treat it like a per CPU MSR. Locking |
---|
| 1067 | + * is not implemented as one thread could undo the setting of the other |
---|
| 1068 | + * thread immediately after dropping the lock anyway. |
---|
| 1069 | + */ |
---|
| 1070 | +static void sld_update_msr(bool on) |
---|
| 1071 | +{ |
---|
| 1072 | + u64 test_ctrl_val = msr_test_ctrl_cache; |
---|
| 1073 | + |
---|
| 1074 | + if (on) |
---|
| 1075 | + test_ctrl_val |= MSR_TEST_CTRL_SPLIT_LOCK_DETECT; |
---|
| 1076 | + |
---|
| 1077 | + wrmsrl(MSR_TEST_CTRL, test_ctrl_val); |
---|
| 1078 | +} |
---|
| 1079 | + |
---|
| 1080 | +static void split_lock_init(void) |
---|
| 1081 | +{ |
---|
| 1082 | + if (cpu_model_supports_sld) |
---|
| 1083 | + split_lock_verify_msr(sld_state != sld_off); |
---|
| 1084 | +} |
---|
| 1085 | + |
---|
| 1086 | +static void split_lock_warn(unsigned long ip) |
---|
| 1087 | +{ |
---|
| 1088 | + pr_warn_ratelimited("#AC: %s/%d took a split_lock trap at address: 0x%lx\n", |
---|
| 1089 | + current->comm, current->pid, ip); |
---|
| 1090 | + |
---|
| 1091 | + /* |
---|
| 1092 | + * Disable the split lock detection for this task so it can make |
---|
| 1093 | + * progress and set TIF_SLD so the detection is re-enabled via |
---|
| 1094 | + * switch_to_sld() when the task is scheduled out. |
---|
| 1095 | + */ |
---|
| 1096 | + sld_update_msr(false); |
---|
| 1097 | + set_tsk_thread_flag(current, TIF_SLD); |
---|
| 1098 | +} |
---|
| 1099 | + |
---|
| 1100 | +bool handle_guest_split_lock(unsigned long ip) |
---|
| 1101 | +{ |
---|
| 1102 | + if (sld_state == sld_warn) { |
---|
| 1103 | + split_lock_warn(ip); |
---|
| 1104 | + return true; |
---|
| 1105 | + } |
---|
| 1106 | + |
---|
| 1107 | + pr_warn_once("#AC: %s/%d %s split_lock trap at address: 0x%lx\n", |
---|
| 1108 | + current->comm, current->pid, |
---|
| 1109 | + sld_state == sld_fatal ? "fatal" : "bogus", ip); |
---|
| 1110 | + |
---|
| 1111 | + current->thread.error_code = 0; |
---|
| 1112 | + current->thread.trap_nr = X86_TRAP_AC; |
---|
| 1113 | + force_sig_fault(SIGBUS, BUS_ADRALN, NULL); |
---|
| 1114 | + return false; |
---|
| 1115 | +} |
---|
| 1116 | +EXPORT_SYMBOL_GPL(handle_guest_split_lock); |
---|
| 1117 | + |
---|
| 1118 | +bool handle_user_split_lock(struct pt_regs *regs, long error_code) |
---|
| 1119 | +{ |
---|
| 1120 | + if ((regs->flags & X86_EFLAGS_AC) || sld_state == sld_fatal) |
---|
| 1121 | + return false; |
---|
| 1122 | + split_lock_warn(regs->ip); |
---|
| 1123 | + return true; |
---|
| 1124 | +} |
---|
| 1125 | + |
---|
| 1126 | +/* |
---|
| 1127 | + * This function is called only when switching between tasks with |
---|
| 1128 | + * different split-lock detection modes. It sets the MSR for the |
---|
| 1129 | + * mode of the new task. This is right most of the time, but since |
---|
| 1130 | + * the MSR is shared by hyperthreads on a physical core there can |
---|
| 1131 | + * be glitches when the two threads need different modes. |
---|
| 1132 | + */ |
---|
| 1133 | +void switch_to_sld(unsigned long tifn) |
---|
| 1134 | +{ |
---|
| 1135 | + sld_update_msr(!(tifn & _TIF_SLD)); |
---|
| 1136 | +} |
---|
| 1137 | + |
---|
| 1138 | +/* |
---|
| 1139 | + * Bits in the IA32_CORE_CAPABILITIES are not architectural, so they should |
---|
| 1140 | + * only be trusted if it is confirmed that a CPU model implements a |
---|
| 1141 | + * specific feature at a particular bit position. |
---|
| 1142 | + * |
---|
| 1143 | + * The possible driver data field values: |
---|
| 1144 | + * |
---|
| 1145 | + * - 0: CPU models that are known to have the per-core split-lock detection |
---|
| 1146 | + * feature even though they do not enumerate IA32_CORE_CAPABILITIES. |
---|
| 1147 | + * |
---|
| 1148 | + * - 1: CPU models which may enumerate IA32_CORE_CAPABILITIES and if so use |
---|
| 1149 | + * bit 5 to enumerate the per-core split-lock detection feature. |
---|
| 1150 | + */ |
---|
| 1151 | +static const struct x86_cpu_id split_lock_cpu_ids[] __initconst = { |
---|
| 1152 | + X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, 0), |
---|
| 1153 | + X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, 0), |
---|
| 1154 | + X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, 0), |
---|
| 1155 | + X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT, 1), |
---|
| 1156 | + X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, 1), |
---|
| 1157 | + X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, 1), |
---|
| 1158 | + X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, 1), |
---|
| 1159 | + X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, 1), |
---|
| 1160 | + X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, 1), |
---|
| 1161 | + X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, 1), |
---|
| 1162 | + X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, 1), |
---|
| 1163 | + {} |
---|
| 1164 | +}; |
---|
| 1165 | + |
---|
| 1166 | +void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c) |
---|
| 1167 | +{ |
---|
| 1168 | + const struct x86_cpu_id *m; |
---|
| 1169 | + u64 ia32_core_caps; |
---|
| 1170 | + |
---|
| 1171 | + if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) |
---|
| 1172 | + return; |
---|
| 1173 | + |
---|
| 1174 | + m = x86_match_cpu(split_lock_cpu_ids); |
---|
| 1175 | + if (!m) |
---|
| 1176 | + return; |
---|
| 1177 | + |
---|
| 1178 | + switch (m->driver_data) { |
---|
| 1179 | + case 0: |
---|
| 1180 | + break; |
---|
| 1181 | + case 1: |
---|
| 1182 | + if (!cpu_has(c, X86_FEATURE_CORE_CAPABILITIES)) |
---|
| 1183 | + return; |
---|
| 1184 | + rdmsrl(MSR_IA32_CORE_CAPS, ia32_core_caps); |
---|
| 1185 | + if (!(ia32_core_caps & MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT)) |
---|
| 1186 | + return; |
---|
| 1187 | + break; |
---|
| 1188 | + default: |
---|
| 1189 | + return; |
---|
| 1190 | + } |
---|
| 1191 | + |
---|
| 1192 | + cpu_model_supports_sld = true; |
---|
| 1193 | + split_lock_setup(); |
---|
| 1194 | +} |
---|