hc
2024-01-03 2f7c68cb55ecb7331f2381deb497c27155f32faf
kernel/arch/x86/kernel/cpu/bugs.c
....@@ -9,7 +9,6 @@
99 * - Andrew D. Balsa (code cleanup).
1010 */
1111 #include <linux/init.h>
12
-#include <linux/utsname.h>
1312 #include <linux/cpu.h>
1413 #include <linux/module.h>
1514 #include <linux/nospec.h>
....@@ -27,8 +26,6 @@
2726 #include <asm/msr.h>
2827 #include <asm/vmx.h>
2928 #include <asm/paravirt.h>
30
-#include <asm/alternative.h>
31
-#include <asm/set_memory.h>
3229 #include <asm/intel-family.h>
3330 #include <asm/e820/api.h>
3431 #include <asm/hypervisor.h>
....@@ -48,6 +45,8 @@
4845 static void __init taa_select_mitigation(void);
4946 static void __init mmio_select_mitigation(void);
5047 static void __init srbds_select_mitigation(void);
48
+static void __init gds_select_mitigation(void);
49
+static void __init srso_select_mitigation(void);
5150
5251 /* The base value of the SPEC_CTRL MSR without task-specific bits set */
5352 u64 x86_spec_ctrl_base;
....@@ -57,7 +56,12 @@
5756 DEFINE_PER_CPU(u64, x86_spec_ctrl_current);
5857 EXPORT_SYMBOL_GPL(x86_spec_ctrl_current);
5958
59
+u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB;
60
+EXPORT_SYMBOL_GPL(x86_pred_cmd);
61
+
6062 static DEFINE_MUTEX(spec_ctrl_mutex);
63
+
64
+void (*x86_return_thunk)(void) __ro_after_init = &__x86_return_thunk;
6165
6266 /* Update SPEC_CTRL MSR and its cached copy unconditionally */
6367 static void update_spec_ctrl(u64 val)
....@@ -116,28 +120,23 @@
116120 DEFINE_STATIC_KEY_FALSE(mmio_stale_data_clear);
117121 EXPORT_SYMBOL_GPL(mmio_stale_data_clear);
118122
119
-void __init check_bugs(void)
123
+void __init cpu_select_mitigations(void)
120124 {
121
- identify_boot_cpu();
122
-
123
- /*
124
- * identify_boot_cpu() initialized SMT support information, let the
125
- * core code know.
126
- */
127
- cpu_smt_check_topology();
128
-
129
- if (!IS_ENABLED(CONFIG_SMP)) {
130
- pr_info("CPU: ");
131
- print_cpu_info(&boot_cpu_data);
132
- }
133
-
134125 /*
135126 * Read the SPEC_CTRL MSR to account for reserved bits which may
136127 * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
137128 * init code as it is not enumerated and depends on the family.
138129 */
139
- if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
130
+ if (cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL)) {
140131 rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
132
+
133
+ /*
134
+ * Previously running kernel (kexec), may have some controls
135
+ * turned ON. Clear them and let the mitigations setup below
136
+ * rediscover them based on configuration.
137
+ */
138
+ x86_spec_ctrl_base &= ~SPEC_CTRL_MITIGATIONS_MASK;
139
+ }
141140
142141 /* Select the proper CPU mitigations before patching alternatives: */
143142 spectre_v1_select_mitigation();
....@@ -159,38 +158,12 @@
159158 md_clear_select_mitigation();
160159 srbds_select_mitigation();
161160
162
- arch_smt_update();
163
-
164
-#ifdef CONFIG_X86_32
165161 /*
166
- * Check whether we are able to run this kernel safely on SMP.
167
- *
168
- * - i386 is no longer supported.
169
- * - In order to run on anything without a TSC, we need to be
170
- * compiled for a i486.
162
+ * srso_select_mitigation() depends and must run after
163
+ * retbleed_select_mitigation().
171164 */
172
- if (boot_cpu_data.x86 < 4)
173
- panic("Kernel requires i486+ for 'invlpg' and other features");
174
-
175
- init_utsname()->machine[1] =
176
- '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
177
- alternative_instructions();
178
-
179
- fpu__init_check_bugs();
180
-#else /* CONFIG_X86_64 */
181
- alternative_instructions();
182
-
183
- /*
184
- * Make sure the first 2MB area is not mapped by huge pages
185
- * There are typically fixed size MTRRs in there and overlapping
186
- * MTRRs into large pages causes slow downs.
187
- *
188
- * Right now we don't do that with gbpages because there seems
189
- * very little benefit for that case.
190
- */
191
- if (!direct_gbpages)
192
- set_memory_4k((unsigned long)__va(0), 1);
193
-#endif
165
+ srso_select_mitigation();
166
+ gds_select_mitigation();
194167 }
195168
196169 /*
....@@ -649,6 +622,149 @@
649622 early_param("srbds", srbds_parse_cmdline);
650623
651624 #undef pr_fmt
625
+#define pr_fmt(fmt) "GDS: " fmt
626
+
627
+enum gds_mitigations {
628
+ GDS_MITIGATION_OFF,
629
+ GDS_MITIGATION_UCODE_NEEDED,
630
+ GDS_MITIGATION_FORCE,
631
+ GDS_MITIGATION_FULL,
632
+ GDS_MITIGATION_FULL_LOCKED,
633
+ GDS_MITIGATION_HYPERVISOR,
634
+};
635
+
636
+#if IS_ENABLED(CONFIG_GDS_FORCE_MITIGATION)
637
+static enum gds_mitigations gds_mitigation __ro_after_init = GDS_MITIGATION_FORCE;
638
+#else
639
+static enum gds_mitigations gds_mitigation __ro_after_init = GDS_MITIGATION_FULL;
640
+#endif
641
+
642
+static const char * const gds_strings[] = {
643
+ [GDS_MITIGATION_OFF] = "Vulnerable",
644
+ [GDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
645
+ [GDS_MITIGATION_FORCE] = "Mitigation: AVX disabled, no microcode",
646
+ [GDS_MITIGATION_FULL] = "Mitigation: Microcode",
647
+ [GDS_MITIGATION_FULL_LOCKED] = "Mitigation: Microcode (locked)",
648
+ [GDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status",
649
+};
650
+
651
+bool gds_ucode_mitigated(void)
652
+{
653
+ return (gds_mitigation == GDS_MITIGATION_FULL ||
654
+ gds_mitigation == GDS_MITIGATION_FULL_LOCKED);
655
+}
656
+EXPORT_SYMBOL_GPL(gds_ucode_mitigated);
657
+
658
+void update_gds_msr(void)
659
+{
660
+ u64 mcu_ctrl_after;
661
+ u64 mcu_ctrl;
662
+
663
+ switch (gds_mitigation) {
664
+ case GDS_MITIGATION_OFF:
665
+ rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
666
+ mcu_ctrl |= GDS_MITG_DIS;
667
+ break;
668
+ case GDS_MITIGATION_FULL_LOCKED:
669
+ /*
670
+ * The LOCKED state comes from the boot CPU. APs might not have
671
+ * the same state. Make sure the mitigation is enabled on all
672
+ * CPUs.
673
+ */
674
+ case GDS_MITIGATION_FULL:
675
+ rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
676
+ mcu_ctrl &= ~GDS_MITG_DIS;
677
+ break;
678
+ case GDS_MITIGATION_FORCE:
679
+ case GDS_MITIGATION_UCODE_NEEDED:
680
+ case GDS_MITIGATION_HYPERVISOR:
681
+ return;
682
+ };
683
+
684
+ wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
685
+
686
+ /*
687
+ * Check to make sure that the WRMSR value was not ignored. Writes to
688
+ * GDS_MITG_DIS will be ignored if this processor is locked but the boot
689
+ * processor was not.
690
+ */
691
+ rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl_after);
692
+ WARN_ON_ONCE(mcu_ctrl != mcu_ctrl_after);
693
+}
694
+
695
+static void __init gds_select_mitigation(void)
696
+{
697
+ u64 mcu_ctrl;
698
+
699
+ if (!boot_cpu_has_bug(X86_BUG_GDS))
700
+ return;
701
+
702
+ if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
703
+ gds_mitigation = GDS_MITIGATION_HYPERVISOR;
704
+ goto out;
705
+ }
706
+
707
+ if (cpu_mitigations_off())
708
+ gds_mitigation = GDS_MITIGATION_OFF;
709
+ /* Will verify below that mitigation _can_ be disabled */
710
+
711
+ /* No microcode */
712
+ if (!(x86_read_arch_cap_msr() & ARCH_CAP_GDS_CTRL)) {
713
+ if (gds_mitigation == GDS_MITIGATION_FORCE) {
714
+ /*
715
+ * This only needs to be done on the boot CPU so do it
716
+ * here rather than in update_gds_msr()
717
+ */
718
+ setup_clear_cpu_cap(X86_FEATURE_AVX);
719
+ pr_warn("Microcode update needed! Disabling AVX as mitigation.\n");
720
+ } else {
721
+ gds_mitigation = GDS_MITIGATION_UCODE_NEEDED;
722
+ }
723
+ goto out;
724
+ }
725
+
726
+ /* Microcode has mitigation, use it */
727
+ if (gds_mitigation == GDS_MITIGATION_FORCE)
728
+ gds_mitigation = GDS_MITIGATION_FULL;
729
+
730
+ rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
731
+ if (mcu_ctrl & GDS_MITG_LOCKED) {
732
+ if (gds_mitigation == GDS_MITIGATION_OFF)
733
+ pr_warn("Mitigation locked. Disable failed.\n");
734
+
735
+ /*
736
+ * The mitigation is selected from the boot CPU. All other CPUs
737
+ * _should_ have the same state. If the boot CPU isn't locked
738
+ * but others are then update_gds_msr() will WARN() of the state
739
+ * mismatch. If the boot CPU is locked update_gds_msr() will
740
+ * ensure the other CPUs have the mitigation enabled.
741
+ */
742
+ gds_mitigation = GDS_MITIGATION_FULL_LOCKED;
743
+ }
744
+
745
+ update_gds_msr();
746
+out:
747
+ pr_info("%s\n", gds_strings[gds_mitigation]);
748
+}
749
+
750
+static int __init gds_parse_cmdline(char *str)
751
+{
752
+ if (!str)
753
+ return -EINVAL;
754
+
755
+ if (!boot_cpu_has_bug(X86_BUG_GDS))
756
+ return 0;
757
+
758
+ if (!strcmp(str, "off"))
759
+ gds_mitigation = GDS_MITIGATION_OFF;
760
+ else if (!strcmp(str, "force"))
761
+ gds_mitigation = GDS_MITIGATION_FORCE;
762
+
763
+ return 0;
764
+}
765
+early_param("gather_data_sampling", gds_parse_cmdline);
766
+
767
+#undef pr_fmt
652768 #define pr_fmt(fmt) "Spectre V1 : " fmt
653769
654770 enum spectre_v1_mitigation {
....@@ -867,6 +983,9 @@
867983 setup_force_cpu_cap(X86_FEATURE_RETHUNK);
868984 setup_force_cpu_cap(X86_FEATURE_UNRET);
869985
986
+ if (IS_ENABLED(CONFIG_RETHUNK))
987
+ x86_return_thunk = retbleed_return_thunk;
988
+
870989 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
871990 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
872991 pr_err(RETBLEED_UNTRAIN_MSG);
....@@ -1058,12 +1177,16 @@
10581177 return SPECTRE_V2_USER_CMD_AUTO;
10591178 }
10601179
1061
-static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode)
1180
+static inline bool spectre_v2_in_eibrs_mode(enum spectre_v2_mitigation mode)
10621181 {
1063
- return mode == SPECTRE_V2_IBRS ||
1064
- mode == SPECTRE_V2_EIBRS ||
1182
+ return mode == SPECTRE_V2_EIBRS ||
10651183 mode == SPECTRE_V2_EIBRS_RETPOLINE ||
10661184 mode == SPECTRE_V2_EIBRS_LFENCE;
1185
+}
1186
+
1187
+static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode)
1188
+{
1189
+ return spectre_v2_in_eibrs_mode(mode) || mode == SPECTRE_V2_IBRS;
10671190 }
10681191
10691192 static void __init
....@@ -1128,12 +1251,19 @@
11281251 }
11291252
11301253 /*
1131
- * If no STIBP, IBRS or enhanced IBRS is enabled, or SMT impossible,
1132
- * STIBP is not required.
1254
+ * If no STIBP, enhanced IBRS is enabled, or SMT impossible, STIBP
1255
+ * is not required.
1256
+ *
1257
+ * Enhanced IBRS also protects against cross-thread branch target
1258
+ * injection in user-mode as the IBRS bit remains always set which
1259
+ * implicitly enables cross-thread protections. However, in legacy IBRS
1260
+ * mode, the IBRS bit is set only on kernel entry and cleared on return
1261
+ * to userspace. This disables the implicit cross-thread protection,
1262
+ * so allow for STIBP to be selected in that case.
11331263 */
11341264 if (!boot_cpu_has(X86_FEATURE_STIBP) ||
11351265 !smt_possible ||
1136
- spectre_v2_in_ibrs_mode(spectre_v2_enabled))
1266
+ spectre_v2_in_eibrs_mode(spectre_v2_enabled))
11371267 return;
11381268
11391269 /*
....@@ -1896,6 +2026,8 @@
18962026 if (ctrl == PR_SPEC_FORCE_DISABLE)
18972027 task_set_spec_ib_force_disable(task);
18982028 task_update_spec_tif(task);
2029
+ if (task == current)
2030
+ indirect_branch_prediction_barrier();
18992031 break;
19002032 default:
19012033 return -ERANGE;
....@@ -2117,6 +2249,170 @@
21172249 early_param("l1tf", l1tf_cmdline);
21182250
21192251 #undef pr_fmt
2252
+#define pr_fmt(fmt) "Speculative Return Stack Overflow: " fmt
2253
+
2254
+enum srso_mitigation {
2255
+ SRSO_MITIGATION_NONE,
2256
+ SRSO_MITIGATION_MICROCODE,
2257
+ SRSO_MITIGATION_SAFE_RET,
2258
+ SRSO_MITIGATION_IBPB,
2259
+ SRSO_MITIGATION_IBPB_ON_VMEXIT,
2260
+};
2261
+
2262
+enum srso_mitigation_cmd {
2263
+ SRSO_CMD_OFF,
2264
+ SRSO_CMD_MICROCODE,
2265
+ SRSO_CMD_SAFE_RET,
2266
+ SRSO_CMD_IBPB,
2267
+ SRSO_CMD_IBPB_ON_VMEXIT,
2268
+};
2269
+
2270
+static const char * const srso_strings[] = {
2271
+ [SRSO_MITIGATION_NONE] = "Vulnerable",
2272
+ [SRSO_MITIGATION_MICROCODE] = "Mitigation: microcode",
2273
+ [SRSO_MITIGATION_SAFE_RET] = "Mitigation: safe RET",
2274
+ [SRSO_MITIGATION_IBPB] = "Mitigation: IBPB",
2275
+ [SRSO_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT only"
2276
+};
2277
+
2278
+static enum srso_mitigation srso_mitigation __ro_after_init = SRSO_MITIGATION_NONE;
2279
+static enum srso_mitigation_cmd srso_cmd __ro_after_init = SRSO_CMD_SAFE_RET;
2280
+
2281
+static int __init srso_parse_cmdline(char *str)
2282
+{
2283
+ if (!str)
2284
+ return -EINVAL;
2285
+
2286
+ if (!strcmp(str, "off"))
2287
+ srso_cmd = SRSO_CMD_OFF;
2288
+ else if (!strcmp(str, "microcode"))
2289
+ srso_cmd = SRSO_CMD_MICROCODE;
2290
+ else if (!strcmp(str, "safe-ret"))
2291
+ srso_cmd = SRSO_CMD_SAFE_RET;
2292
+ else if (!strcmp(str, "ibpb"))
2293
+ srso_cmd = SRSO_CMD_IBPB;
2294
+ else if (!strcmp(str, "ibpb-vmexit"))
2295
+ srso_cmd = SRSO_CMD_IBPB_ON_VMEXIT;
2296
+ else
2297
+ pr_err("Ignoring unknown SRSO option (%s).", str);
2298
+
2299
+ return 0;
2300
+}
2301
+early_param("spec_rstack_overflow", srso_parse_cmdline);
2302
+
2303
+#define SRSO_NOTICE "WARNING: See https://kernel.org/doc/html/latest/admin-guide/hw-vuln/srso.html for mitigation options."
2304
+
2305
+static void __init srso_select_mitigation(void)
2306
+{
2307
+ bool has_microcode;
2308
+
2309
+ if (!boot_cpu_has_bug(X86_BUG_SRSO) || cpu_mitigations_off())
2310
+ goto pred_cmd;
2311
+
2312
+ /*
2313
+ * The first check is for the kernel running as a guest in order
2314
+ * for guests to verify whether IBPB is a viable mitigation.
2315
+ */
2316
+ has_microcode = boot_cpu_has(X86_FEATURE_IBPB_BRTYPE) || cpu_has_ibpb_brtype_microcode();
2317
+ if (!has_microcode) {
2318
+ pr_warn("IBPB-extending microcode not applied!\n");
2319
+ pr_warn(SRSO_NOTICE);
2320
+ } else {
2321
+ /*
2322
+ * Enable the synthetic (even if in a real CPUID leaf)
2323
+ * flags for guests.
2324
+ */
2325
+ setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE);
2326
+
2327
+ /*
2328
+ * Zen1/2 with SMT off aren't vulnerable after the right
2329
+ * IBPB microcode has been applied.
2330
+ */
2331
+ if (boot_cpu_data.x86 < 0x19 && !cpu_smt_possible()) {
2332
+ setup_force_cpu_cap(X86_FEATURE_SRSO_NO);
2333
+ return;
2334
+ }
2335
+ }
2336
+
2337
+ if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
2338
+ if (has_microcode) {
2339
+ pr_err("Retbleed IBPB mitigation enabled, using same for SRSO\n");
2340
+ srso_mitigation = SRSO_MITIGATION_IBPB;
2341
+ goto pred_cmd;
2342
+ }
2343
+ }
2344
+
2345
+ switch (srso_cmd) {
2346
+ case SRSO_CMD_OFF:
2347
+ goto pred_cmd;
2348
+
2349
+ case SRSO_CMD_MICROCODE:
2350
+ if (has_microcode) {
2351
+ srso_mitigation = SRSO_MITIGATION_MICROCODE;
2352
+ pr_warn(SRSO_NOTICE);
2353
+ }
2354
+ break;
2355
+
2356
+ case SRSO_CMD_SAFE_RET:
2357
+ if (IS_ENABLED(CONFIG_CPU_SRSO)) {
2358
+ /*
2359
+ * Enable the return thunk for generated code
2360
+ * like ftrace, static_call, etc.
2361
+ */
2362
+ setup_force_cpu_cap(X86_FEATURE_RETHUNK);
2363
+ setup_force_cpu_cap(X86_FEATURE_UNRET);
2364
+
2365
+ if (boot_cpu_data.x86 == 0x19) {
2366
+ setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS);
2367
+ x86_return_thunk = srso_alias_return_thunk;
2368
+ } else {
2369
+ setup_force_cpu_cap(X86_FEATURE_SRSO);
2370
+ x86_return_thunk = srso_return_thunk;
2371
+ }
2372
+ srso_mitigation = SRSO_MITIGATION_SAFE_RET;
2373
+ } else {
2374
+ pr_err("WARNING: kernel not compiled with CPU_SRSO.\n");
2375
+ goto pred_cmd;
2376
+ }
2377
+ break;
2378
+
2379
+ case SRSO_CMD_IBPB:
2380
+ if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY)) {
2381
+ if (has_microcode) {
2382
+ setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
2383
+ srso_mitigation = SRSO_MITIGATION_IBPB;
2384
+ }
2385
+ } else {
2386
+ pr_err("WARNING: kernel not compiled with CPU_IBPB_ENTRY.\n");
2387
+ goto pred_cmd;
2388
+ }
2389
+ break;
2390
+
2391
+ case SRSO_CMD_IBPB_ON_VMEXIT:
2392
+ if (IS_ENABLED(CONFIG_CPU_SRSO)) {
2393
+ if (!boot_cpu_has(X86_FEATURE_ENTRY_IBPB) && has_microcode) {
2394
+ setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
2395
+ srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT;
2396
+ }
2397
+ } else {
2398
+ pr_err("WARNING: kernel not compiled with CPU_SRSO.\n");
2399
+ goto pred_cmd;
2400
+ }
2401
+ break;
2402
+
2403
+ default:
2404
+ break;
2405
+ }
2406
+
2407
+ pr_info("%s%s\n", srso_strings[srso_mitigation], (has_microcode ? "" : ", no microcode"));
2408
+
2409
+pred_cmd:
2410
+ if ((boot_cpu_has(X86_FEATURE_SRSO_NO) || srso_cmd == SRSO_CMD_OFF) &&
2411
+ boot_cpu_has(X86_FEATURE_SBPB))
2412
+ x86_pred_cmd = PRED_CMD_SBPB;
2413
+}
2414
+
2415
+#undef pr_fmt
21202416 #define pr_fmt(fmt) fmt
21212417
21222418 #ifdef CONFIG_SYSFS
....@@ -2225,7 +2521,7 @@
22252521
22262522 static char *stibp_state(void)
22272523 {
2228
- if (spectre_v2_in_ibrs_mode(spectre_v2_enabled))
2524
+ if (spectre_v2_in_eibrs_mode(spectre_v2_enabled))
22292525 return "";
22302526
22312527 switch (spectre_v2_user_stibp) {
....@@ -2314,6 +2610,21 @@
23142610 return sprintf(buf, "%s\n", retbleed_strings[retbleed_mitigation]);
23152611 }
23162612
2613
+static ssize_t gds_show_state(char *buf)
2614
+{
2615
+ return sysfs_emit(buf, "%s\n", gds_strings[gds_mitigation]);
2616
+}
2617
+
2618
+static ssize_t srso_show_state(char *buf)
2619
+{
2620
+ if (boot_cpu_has(X86_FEATURE_SRSO_NO))
2621
+ return sysfs_emit(buf, "Mitigation: SMT disabled\n");
2622
+
2623
+ return sysfs_emit(buf, "%s%s\n",
2624
+ srso_strings[srso_mitigation],
2625
+ boot_cpu_has(X86_FEATURE_IBPB_BRTYPE) ? "" : ", no microcode");
2626
+}
2627
+
23172628 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
23182629 char *buf, unsigned int bug)
23192630 {
....@@ -2362,6 +2673,12 @@
23622673
23632674 case X86_BUG_RETBLEED:
23642675 return retbleed_show_state(buf);
2676
+
2677
+ case X86_BUG_GDS:
2678
+ return gds_show_state(buf);
2679
+
2680
+ case X86_BUG_SRSO:
2681
+ return srso_show_state(buf);
23652682
23662683 default:
23672684 break;
....@@ -2427,4 +2744,14 @@
24272744 {
24282745 return cpu_show_common(dev, attr, buf, X86_BUG_RETBLEED);
24292746 }
2747
+
2748
+ssize_t cpu_show_gds(struct device *dev, struct device_attribute *attr, char *buf)
2749
+{
2750
+ return cpu_show_common(dev, attr, buf, X86_BUG_GDS);
2751
+}
2752
+
2753
+ssize_t cpu_show_spec_rstack_overflow(struct device *dev, struct device_attribute *attr, char *buf)
2754
+{
2755
+ return cpu_show_common(dev, attr, buf, X86_BUG_SRSO);
2756
+}
24302757 #endif