forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-10 37f49e37ab4cb5d0bc4c60eb5c6d4dd57db767bb
kernel/arch/x86/kernel/cpu/intel.c
....@@ -1,5 +1,6 @@
11 // SPDX-License-Identifier: GPL-2.0
22 #include <linux/kernel.h>
3
+#include <linux/pgtable.h>
34
45 #include <linux/string.h>
56 #include <linux/bitops.h>
....@@ -11,7 +12,6 @@
1112 #include <linux/uaccess.h>
1213
1314 #include <asm/cpufeature.h>
14
-#include <asm/pgtable.h>
1515 #include <asm/msr.h>
1616 #include <asm/bugs.h>
1717 #include <asm/cpu.h>
....@@ -19,6 +19,11 @@
1919 #include <asm/microcode_intel.h>
2020 #include <asm/hwcap2.h>
2121 #include <asm/elf.h>
22
+#include <asm/cpu_device_id.h>
23
+#include <asm/cmdline.h>
24
+#include <asm/traps.h>
25
+#include <asm/resctrl.h>
26
+#include <asm/numa.h>
2227
2328 #ifdef CONFIG_X86_64
2429 #include <linux/topology.h>
....@@ -31,38 +36,50 @@
3136 #include <asm/apic.h>
3237 #endif
3338
39
+enum split_lock_detect_state {
40
+ sld_off = 0,
41
+ sld_warn,
42
+ sld_fatal,
43
+};
44
+
3445 /*
35
- * Just in case our CPU detection goes bad, or you have a weird system,
36
- * allow a way to override the automatic disabling of MPX.
46
+ * Default to sld_off because most systems do not support split lock detection
47
+ * split_lock_setup() will switch this to sld_warn on systems that support
48
+ * split lock detect, unless there is a command line override.
3749 */
38
-static int forcempx;
50
+static enum split_lock_detect_state sld_state __ro_after_init = sld_off;
51
+static u64 msr_test_ctrl_cache __ro_after_init;
3952
40
-static int __init forcempx_setup(char *__unused)
53
+/*
54
+ * With a name like MSR_TEST_CTL it should go without saying, but don't touch
55
+ * MSR_TEST_CTL unless the CPU is one of the whitelisted models. Writing it
56
+ * on CPUs that do not support SLD can cause fireworks, even when writing '0'.
57
+ */
58
+static bool cpu_model_supports_sld __ro_after_init;
59
+
60
+/*
61
+ * Processors which have self-snooping capability can handle conflicting
62
+ * memory type across CPUs by snooping its own cache. However, there exists
63
+ * CPU models in which having conflicting memory types still leads to
64
+ * unpredictable behavior, machine check errors, or hangs. Clear this
65
+ * feature to prevent its use on machines with known erratas.
66
+ */
67
+static void check_memory_type_self_snoop_errata(struct cpuinfo_x86 *c)
4168 {
42
- forcempx = 1;
43
-
44
- return 1;
45
-}
46
-__setup("intel-skd-046-workaround=disable", forcempx_setup);
47
-
48
-void check_mpx_erratum(struct cpuinfo_x86 *c)
49
-{
50
- if (forcempx)
51
- return;
52
- /*
53
- * Turn off the MPX feature on CPUs where SMEP is not
54
- * available or disabled.
55
- *
56
- * Works around Intel Erratum SKD046: "Branch Instructions
57
- * May Initialize MPX Bound Registers Incorrectly".
58
- *
59
- * This might falsely disable MPX on systems without
60
- * SMEP, like Atom processors without SMEP. But there
61
- * is no such hardware known at the moment.
62
- */
63
- if (cpu_has(c, X86_FEATURE_MPX) && !cpu_has(c, X86_FEATURE_SMEP)) {
64
- setup_clear_cpu_cap(X86_FEATURE_MPX);
65
- pr_warn("x86/mpx: Disabling MPX since SMEP not present\n");
69
+ switch (c->x86_model) {
70
+ case INTEL_FAM6_CORE_YONAH:
71
+ case INTEL_FAM6_CORE2_MEROM:
72
+ case INTEL_FAM6_CORE2_MEROM_L:
73
+ case INTEL_FAM6_CORE2_PENRYN:
74
+ case INTEL_FAM6_CORE2_DUNNINGTON:
75
+ case INTEL_FAM6_NEHALEM:
76
+ case INTEL_FAM6_NEHALEM_G:
77
+ case INTEL_FAM6_NEHALEM_EP:
78
+ case INTEL_FAM6_NEHALEM_EX:
79
+ case INTEL_FAM6_WESTMERE:
80
+ case INTEL_FAM6_WESTMERE_EP:
81
+ case INTEL_FAM6_SANDYBRIDGE:
82
+ setup_clear_cpu_cap(X86_FEATURE_SELFSNOOP);
6683 }
6784 }
6885
....@@ -71,7 +88,7 @@
7188 static int __init ring3mwait_disable(char *__unused)
7289 {
7390 ring3mwait_disabled = true;
74
- return 0;
91
+ return 1;
7592 }
7693 __setup("ring3mwait=disable", ring3mwait_disable);
7794
....@@ -116,21 +133,21 @@
116133 u32 microcode;
117134 };
118135 static const struct sku_microcode spectre_bad_microcodes[] = {
119
- { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0B, 0x80 },
120
- { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0A, 0x80 },
121
- { INTEL_FAM6_KABYLAKE_DESKTOP, 0x09, 0x80 },
122
- { INTEL_FAM6_KABYLAKE_MOBILE, 0x0A, 0x80 },
123
- { INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x80 },
136
+ { INTEL_FAM6_KABYLAKE, 0x0B, 0x80 },
137
+ { INTEL_FAM6_KABYLAKE, 0x0A, 0x80 },
138
+ { INTEL_FAM6_KABYLAKE, 0x09, 0x80 },
139
+ { INTEL_FAM6_KABYLAKE_L, 0x0A, 0x80 },
140
+ { INTEL_FAM6_KABYLAKE_L, 0x09, 0x80 },
124141 { INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e },
125142 { INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c },
126
- { INTEL_FAM6_BROADWELL_CORE, 0x04, 0x28 },
127
- { INTEL_FAM6_BROADWELL_GT3E, 0x01, 0x1b },
128
- { INTEL_FAM6_BROADWELL_XEON_D, 0x02, 0x14 },
129
- { INTEL_FAM6_BROADWELL_XEON_D, 0x03, 0x07000011 },
143
+ { INTEL_FAM6_BROADWELL, 0x04, 0x28 },
144
+ { INTEL_FAM6_BROADWELL_G, 0x01, 0x1b },
145
+ { INTEL_FAM6_BROADWELL_D, 0x02, 0x14 },
146
+ { INTEL_FAM6_BROADWELL_D, 0x03, 0x07000011 },
130147 { INTEL_FAM6_BROADWELL_X, 0x01, 0x0b000025 },
131
- { INTEL_FAM6_HASWELL_ULT, 0x01, 0x21 },
132
- { INTEL_FAM6_HASWELL_GT3E, 0x01, 0x18 },
133
- { INTEL_FAM6_HASWELL_CORE, 0x03, 0x23 },
148
+ { INTEL_FAM6_HASWELL_L, 0x01, 0x21 },
149
+ { INTEL_FAM6_HASWELL_G, 0x01, 0x18 },
150
+ { INTEL_FAM6_HASWELL, 0x03, 0x23 },
134151 { INTEL_FAM6_HASWELL_X, 0x02, 0x3b },
135152 { INTEL_FAM6_HASWELL_X, 0x04, 0x10 },
136153 { INTEL_FAM6_IVYBRIDGE_X, 0x04, 0x42a },
....@@ -239,9 +256,10 @@
239256 /* Penwell and Cloverview have the TSC which doesn't sleep on S3 */
240257 if (c->x86 == 6) {
241258 switch (c->x86_model) {
242
- case 0x27: /* Penwell */
243
- case 0x35: /* Cloverview */
244
- case 0x4a: /* Merrifield */
259
+ case INTEL_FAM6_ATOM_SALTWELL_MID:
260
+ case INTEL_FAM6_ATOM_SALTWELL_TABLET:
261
+ case INTEL_FAM6_ATOM_SILVERMONT_MID:
262
+ case INTEL_FAM6_ATOM_AIRMONT_NP:
245263 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC_S3);
246264 break;
247265 default:
....@@ -303,7 +321,7 @@
303321 c->x86_coreid_bits = get_count_order((ebx >> 16) & 0xff);
304322 }
305323
306
- check_mpx_erratum(c);
324
+ check_memory_type_self_snoop_errata(c);
307325
308326 /*
309327 * Get the number of SMT siblings early from the extended topology
....@@ -311,6 +329,11 @@
311329 */
312330 if (detect_extended_topology_early(c) < 0)
313331 detect_ht_early(c);
332
+}
333
+
334
+static void bsp_init_intel(struct cpuinfo_x86 *c)
335
+{
336
+ resctrl_cpu_detect(c);
314337 }
315338
316339 #ifdef CONFIG_X86_32
....@@ -466,52 +489,6 @@
466489 #endif
467490 }
468491
469
-static void detect_vmx_virtcap(struct cpuinfo_x86 *c)
470
-{
471
- /* Intel VMX MSR indicated features */
472
-#define X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW 0x00200000
473
-#define X86_VMX_FEATURE_PROC_CTLS_VNMI 0x00400000
474
-#define X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS 0x80000000
475
-#define X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC 0x00000001
476
-#define X86_VMX_FEATURE_PROC_CTLS2_EPT 0x00000002
477
-#define X86_VMX_FEATURE_PROC_CTLS2_VPID 0x00000020
478
-#define x86_VMX_FEATURE_EPT_CAP_AD 0x00200000
479
-
480
- u32 vmx_msr_low, vmx_msr_high, msr_ctl, msr_ctl2;
481
- u32 msr_vpid_cap, msr_ept_cap;
482
-
483
- clear_cpu_cap(c, X86_FEATURE_TPR_SHADOW);
484
- clear_cpu_cap(c, X86_FEATURE_VNMI);
485
- clear_cpu_cap(c, X86_FEATURE_FLEXPRIORITY);
486
- clear_cpu_cap(c, X86_FEATURE_EPT);
487
- clear_cpu_cap(c, X86_FEATURE_VPID);
488
- clear_cpu_cap(c, X86_FEATURE_EPT_AD);
489
-
490
- rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, vmx_msr_low, vmx_msr_high);
491
- msr_ctl = vmx_msr_high | vmx_msr_low;
492
- if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW)
493
- set_cpu_cap(c, X86_FEATURE_TPR_SHADOW);
494
- if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_VNMI)
495
- set_cpu_cap(c, X86_FEATURE_VNMI);
496
- if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS) {
497
- rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
498
- vmx_msr_low, vmx_msr_high);
499
- msr_ctl2 = vmx_msr_high | vmx_msr_low;
500
- if ((msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC) &&
501
- (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW))
502
- set_cpu_cap(c, X86_FEATURE_FLEXPRIORITY);
503
- if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_EPT) {
504
- set_cpu_cap(c, X86_FEATURE_EPT);
505
- rdmsr(MSR_IA32_VMX_EPT_VPID_CAP,
506
- msr_ept_cap, msr_vpid_cap);
507
- if (msr_ept_cap & x86_VMX_FEATURE_EPT_CAP_AD)
508
- set_cpu_cap(c, X86_FEATURE_EPT_AD);
509
- }
510
- if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VPID)
511
- set_cpu_cap(c, X86_FEATURE_VPID);
512
- }
513
-}
514
-
515492 #define MSR_IA32_TME_ACTIVATE 0x982
516493
517494 /* Helpers to access TME_ACTIVATE MSR */
....@@ -596,36 +573,6 @@
596573 c->x86_phys_bits -= keyid_bits;
597574 }
598575
599
-static void init_intel_energy_perf(struct cpuinfo_x86 *c)
600
-{
601
- u64 epb;
602
-
603
- /*
604
- * Initialize MSR_IA32_ENERGY_PERF_BIAS if not already initialized.
605
- * (x86_energy_perf_policy(8) is available to change it at run-time.)
606
- */
607
- if (!cpu_has(c, X86_FEATURE_EPB))
608
- return;
609
-
610
- rdmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
611
- if ((epb & 0xF) != ENERGY_PERF_BIAS_PERFORMANCE)
612
- return;
613
-
614
- pr_warn_once("ENERGY_PERF_BIAS: Set to 'normal', was 'performance'\n");
615
- pr_warn_once("ENERGY_PERF_BIAS: View and update with x86_energy_perf_policy(8)\n");
616
- epb = (epb & ~0xF) | ENERGY_PERF_BIAS_NORMAL;
617
- wrmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
618
-}
619
-
620
-static void intel_bsp_resume(struct cpuinfo_x86 *c)
621
-{
622
- /*
623
- * MSR_IA32_ENERGY_PERF_BIAS is lost across suspend/resume,
624
- * so reinitialize it properly like during bootup:
625
- */
626
- init_intel_energy_perf(c);
627
-}
628
-
629576 static void init_cpuid_fault(struct cpuinfo_x86 *c)
630577 {
631578 u64 msr;
....@@ -653,6 +600,8 @@
653600 msr = this_cpu_read(msr_misc_features_shadow);
654601 wrmsrl(MSR_MISC_FEATURES_ENABLES, msr);
655602 }
603
+
604
+static void split_lock_init(void);
656605
657606 static void init_intel(struct cpuinfo_x86 *c)
658607 {
....@@ -757,13 +706,10 @@
757706 /* Work around errata */
758707 srat_detect_node(c);
759708
760
- if (cpu_has(c, X86_FEATURE_VMX))
761
- detect_vmx_virtcap(c);
709
+ init_ia32_feat_ctl(c);
762710
763711 if (cpu_has(c, X86_FEATURE_TME))
764712 detect_tme(c);
765
-
766
- init_intel_energy_perf(c);
767713
768714 init_intel_misc_features(c);
769715
....@@ -771,6 +717,8 @@
771717 tsx_enable();
772718 if (tsx_ctrl_state == TSX_CTRL_DISABLE)
773719 tsx_disable();
720
+
721
+ split_lock_init();
774722 }
775723
776724 #ifdef CONFIG_X86_32
....@@ -823,7 +771,7 @@
823771 { 0x04, TLB_DATA_4M, 8, " TLB_DATA 4 MByte pages, 4-way set associative" },
824772 { 0x05, TLB_DATA_4M, 32, " TLB_DATA 4 MByte pages, 4-way set associative" },
825773 { 0x0b, TLB_INST_4M, 4, " TLB_INST 4 MByte pages, 4-way set associative" },
826
- { 0x4f, TLB_INST_4K, 32, " TLB_INST 4 KByte pages */" },
774
+ { 0x4f, TLB_INST_4K, 32, " TLB_INST 4 KByte pages" },
827775 { 0x50, TLB_INST_ALL, 64, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
828776 { 0x51, TLB_INST_ALL, 128, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
829777 { 0x52, TLB_INST_ALL, 256, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
....@@ -851,7 +799,7 @@
851799 { 0xba, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way associative" },
852800 { 0xc0, TLB_DATA_4K_4M, 8, " TLB_DATA 4 KByte and 4 MByte pages, 4-way associative" },
853801 { 0xc1, STLB_4K_2M, 1024, " STLB 4 KByte and 2 MByte pages, 8-way associative" },
854
- { 0xc2, TLB_DATA_2M_4M, 16, " DTLB 2 MByte/4MByte pages, 4-way associative" },
802
+ { 0xc2, TLB_DATA_2M_4M, 16, " TLB_DATA 2 MByte/4MByte pages, 4-way associative" },
855803 { 0xca, STLB_4K, 512, " STLB 4 KByte pages, 4-way associative" },
856804 { 0x00, 0, 0 }
857805 };
....@@ -863,8 +811,8 @@
863811 return;
864812
865813 /* look up this descriptor in the table */
866
- for (k = 0; intel_tlb_table[k].descriptor != desc && \
867
- intel_tlb_table[k].descriptor != 0; k++)
814
+ for (k = 0; intel_tlb_table[k].descriptor != desc &&
815
+ intel_tlb_table[k].descriptor != 0; k++)
868816 ;
869817
870818 if (intel_tlb_table[k].tlb_type == 0)
....@@ -1027,10 +975,220 @@
1027975 #endif
1028976 .c_detect_tlb = intel_detect_tlb,
1029977 .c_early_init = early_init_intel,
978
+ .c_bsp_init = bsp_init_intel,
1030979 .c_init = init_intel,
1031
- .c_bsp_resume = intel_bsp_resume,
1032980 .c_x86_vendor = X86_VENDOR_INTEL,
1033981 };
1034982
1035983 cpu_dev_register(intel_cpu_dev);
1036984
985
+#undef pr_fmt
986
+#define pr_fmt(fmt) "x86/split lock detection: " fmt
987
+
988
+static const struct {
989
+ const char *option;
990
+ enum split_lock_detect_state state;
991
+} sld_options[] __initconst = {
992
+ { "off", sld_off },
993
+ { "warn", sld_warn },
994
+ { "fatal", sld_fatal },
995
+};
996
+
997
+static inline bool match_option(const char *arg, int arglen, const char *opt)
998
+{
999
+ int len = strlen(opt);
1000
+
1001
+ return len == arglen && !strncmp(arg, opt, len);
1002
+}
1003
+
1004
+static bool split_lock_verify_msr(bool on)
1005
+{
1006
+ u64 ctrl, tmp;
1007
+
1008
+ if (rdmsrl_safe(MSR_TEST_CTRL, &ctrl))
1009
+ return false;
1010
+ if (on)
1011
+ ctrl |= MSR_TEST_CTRL_SPLIT_LOCK_DETECT;
1012
+ else
1013
+ ctrl &= ~MSR_TEST_CTRL_SPLIT_LOCK_DETECT;
1014
+ if (wrmsrl_safe(MSR_TEST_CTRL, ctrl))
1015
+ return false;
1016
+ rdmsrl(MSR_TEST_CTRL, tmp);
1017
+ return ctrl == tmp;
1018
+}
1019
+
1020
+static void __init split_lock_setup(void)
1021
+{
1022
+ enum split_lock_detect_state state = sld_warn;
1023
+ char arg[20];
1024
+ int i, ret;
1025
+
1026
+ if (!split_lock_verify_msr(false)) {
1027
+ pr_info("MSR access failed: Disabled\n");
1028
+ return;
1029
+ }
1030
+
1031
+ ret = cmdline_find_option(boot_command_line, "split_lock_detect",
1032
+ arg, sizeof(arg));
1033
+ if (ret >= 0) {
1034
+ for (i = 0; i < ARRAY_SIZE(sld_options); i++) {
1035
+ if (match_option(arg, ret, sld_options[i].option)) {
1036
+ state = sld_options[i].state;
1037
+ break;
1038
+ }
1039
+ }
1040
+ }
1041
+
1042
+ switch (state) {
1043
+ case sld_off:
1044
+ pr_info("disabled\n");
1045
+ return;
1046
+ case sld_warn:
1047
+ pr_info("warning about user-space split_locks\n");
1048
+ break;
1049
+ case sld_fatal:
1050
+ pr_info("sending SIGBUS on user-space split_locks\n");
1051
+ break;
1052
+ }
1053
+
1054
+ rdmsrl(MSR_TEST_CTRL, msr_test_ctrl_cache);
1055
+
1056
+ if (!split_lock_verify_msr(true)) {
1057
+ pr_info("MSR access failed: Disabled\n");
1058
+ return;
1059
+ }
1060
+
1061
+ sld_state = state;
1062
+ setup_force_cpu_cap(X86_FEATURE_SPLIT_LOCK_DETECT);
1063
+}
1064
+
1065
+/*
1066
+ * MSR_TEST_CTRL is per core, but we treat it like a per CPU MSR. Locking
1067
+ * is not implemented as one thread could undo the setting of the other
1068
+ * thread immediately after dropping the lock anyway.
1069
+ */
1070
+static void sld_update_msr(bool on)
1071
+{
1072
+ u64 test_ctrl_val = msr_test_ctrl_cache;
1073
+
1074
+ if (on)
1075
+ test_ctrl_val |= MSR_TEST_CTRL_SPLIT_LOCK_DETECT;
1076
+
1077
+ wrmsrl(MSR_TEST_CTRL, test_ctrl_val);
1078
+}
1079
+
1080
+static void split_lock_init(void)
1081
+{
1082
+ if (cpu_model_supports_sld)
1083
+ split_lock_verify_msr(sld_state != sld_off);
1084
+}
1085
+
1086
+static void split_lock_warn(unsigned long ip)
1087
+{
1088
+ pr_warn_ratelimited("#AC: %s/%d took a split_lock trap at address: 0x%lx\n",
1089
+ current->comm, current->pid, ip);
1090
+
1091
+ /*
1092
+ * Disable the split lock detection for this task so it can make
1093
+ * progress and set TIF_SLD so the detection is re-enabled via
1094
+ * switch_to_sld() when the task is scheduled out.
1095
+ */
1096
+ sld_update_msr(false);
1097
+ set_tsk_thread_flag(current, TIF_SLD);
1098
+}
1099
+
1100
+bool handle_guest_split_lock(unsigned long ip)
1101
+{
1102
+ if (sld_state == sld_warn) {
1103
+ split_lock_warn(ip);
1104
+ return true;
1105
+ }
1106
+
1107
+ pr_warn_once("#AC: %s/%d %s split_lock trap at address: 0x%lx\n",
1108
+ current->comm, current->pid,
1109
+ sld_state == sld_fatal ? "fatal" : "bogus", ip);
1110
+
1111
+ current->thread.error_code = 0;
1112
+ current->thread.trap_nr = X86_TRAP_AC;
1113
+ force_sig_fault(SIGBUS, BUS_ADRALN, NULL);
1114
+ return false;
1115
+}
1116
+EXPORT_SYMBOL_GPL(handle_guest_split_lock);
1117
+
1118
+bool handle_user_split_lock(struct pt_regs *regs, long error_code)
1119
+{
1120
+ if ((regs->flags & X86_EFLAGS_AC) || sld_state == sld_fatal)
1121
+ return false;
1122
+ split_lock_warn(regs->ip);
1123
+ return true;
1124
+}
1125
+
1126
+/*
1127
+ * This function is called only when switching between tasks with
1128
+ * different split-lock detection modes. It sets the MSR for the
1129
+ * mode of the new task. This is right most of the time, but since
1130
+ * the MSR is shared by hyperthreads on a physical core there can
1131
+ * be glitches when the two threads need different modes.
1132
+ */
1133
+void switch_to_sld(unsigned long tifn)
1134
+{
1135
+ sld_update_msr(!(tifn & _TIF_SLD));
1136
+}
1137
+
1138
+/*
1139
+ * Bits in the IA32_CORE_CAPABILITIES are not architectural, so they should
1140
+ * only be trusted if it is confirmed that a CPU model implements a
1141
+ * specific feature at a particular bit position.
1142
+ *
1143
+ * The possible driver data field values:
1144
+ *
1145
+ * - 0: CPU models that are known to have the per-core split-lock detection
1146
+ * feature even though they do not enumerate IA32_CORE_CAPABILITIES.
1147
+ *
1148
+ * - 1: CPU models which may enumerate IA32_CORE_CAPABILITIES and if so use
1149
+ * bit 5 to enumerate the per-core split-lock detection feature.
1150
+ */
1151
+static const struct x86_cpu_id split_lock_cpu_ids[] __initconst = {
1152
+ X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, 0),
1153
+ X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, 0),
1154
+ X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, 0),
1155
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT, 1),
1156
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, 1),
1157
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, 1),
1158
+ X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, 1),
1159
+ X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, 1),
1160
+ X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, 1),
1161
+ X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, 1),
1162
+ X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, 1),
1163
+ {}
1164
+};
1165
+
1166
+void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c)
1167
+{
1168
+ const struct x86_cpu_id *m;
1169
+ u64 ia32_core_caps;
1170
+
1171
+ if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
1172
+ return;
1173
+
1174
+ m = x86_match_cpu(split_lock_cpu_ids);
1175
+ if (!m)
1176
+ return;
1177
+
1178
+ switch (m->driver_data) {
1179
+ case 0:
1180
+ break;
1181
+ case 1:
1182
+ if (!cpu_has(c, X86_FEATURE_CORE_CAPABILITIES))
1183
+ return;
1184
+ rdmsrl(MSR_IA32_CORE_CAPS, ia32_core_caps);
1185
+ if (!(ia32_core_caps & MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT))
1186
+ return;
1187
+ break;
1188
+ default:
1189
+ return;
1190
+ }
1191
+
1192
+ cpu_model_supports_sld = true;
1193
+ split_lock_setup();
1194
+}