forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-08 01573e231f18eb2d99162747186f59511f56b64d
kernel/arch/x86/kernel/cpu/bugs.c
....@@ -15,6 +15,8 @@
1515 #include <linux/nospec.h>
1616 #include <linux/prctl.h>
1717 #include <linux/sched/smt.h>
18
+#include <linux/pgtable.h>
19
+#include <linux/bpf.h>
1820
1921 #include <asm/spec-ctrl.h>
2022 #include <asm/cmdline.h>
....@@ -26,33 +28,68 @@
2628 #include <asm/vmx.h>
2729 #include <asm/paravirt.h>
2830 #include <asm/alternative.h>
29
-#include <asm/pgtable.h>
3031 #include <asm/set_memory.h>
3132 #include <asm/intel-family.h>
3233 #include <asm/e820/api.h>
3334 #include <asm/hypervisor.h>
35
+#include <asm/tlbflush.h>
3436
3537 #include "cpu.h"
3638
3739 static void __init spectre_v1_select_mitigation(void);
3840 static void __init spectre_v2_select_mitigation(void);
41
+static void __init retbleed_select_mitigation(void);
42
+static void __init spectre_v2_user_select_mitigation(void);
3943 static void __init ssb_select_mitigation(void);
4044 static void __init l1tf_select_mitigation(void);
4145 static void __init mds_select_mitigation(void);
42
-static void __init mds_print_mitigation(void);
46
+static void __init md_clear_update_mitigation(void);
47
+static void __init md_clear_select_mitigation(void);
4348 static void __init taa_select_mitigation(void);
49
+static void __init mmio_select_mitigation(void);
4450 static void __init srbds_select_mitigation(void);
4551
46
-/* The base value of the SPEC_CTRL MSR that always has to be preserved. */
52
+/* The base value of the SPEC_CTRL MSR without task-specific bits set */
4753 u64 x86_spec_ctrl_base;
4854 EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
55
+
56
+/* The current value of the SPEC_CTRL MSR with task-specific bits set */
57
+DEFINE_PER_CPU(u64, x86_spec_ctrl_current);
58
+EXPORT_SYMBOL_GPL(x86_spec_ctrl_current);
59
+
4960 static DEFINE_MUTEX(spec_ctrl_mutex);
5061
62
+/* Update SPEC_CTRL MSR and its cached copy unconditionally */
63
+static void update_spec_ctrl(u64 val)
64
+{
65
+ this_cpu_write(x86_spec_ctrl_current, val);
66
+ wrmsrl(MSR_IA32_SPEC_CTRL, val);
67
+}
68
+
5169 /*
52
- * The vendor and possibly platform specific bits which can be modified in
53
- * x86_spec_ctrl_base.
70
+ * Keep track of the SPEC_CTRL MSR value for the current task, which may differ
71
+ * from x86_spec_ctrl_base due to STIBP/SSB in __speculation_ctrl_update().
5472 */
55
-static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
73
+void update_spec_ctrl_cond(u64 val)
74
+{
75
+ if (this_cpu_read(x86_spec_ctrl_current) == val)
76
+ return;
77
+
78
+ this_cpu_write(x86_spec_ctrl_current, val);
79
+
80
+ /*
81
+ * When KERNEL_IBRS this MSR is written on return-to-user, unless
82
+ * forced the update can be delayed until that time.
83
+ */
84
+ if (!cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS))
85
+ wrmsrl(MSR_IA32_SPEC_CTRL, val);
86
+}
87
+
88
+u64 spec_ctrl_current(void)
89
+{
90
+ return this_cpu_read(x86_spec_ctrl_current);
91
+}
92
+EXPORT_SYMBOL_GPL(spec_ctrl_current);
5693
5794 /*
5895 * AMD specific MSR info for Speculative Store Bypass control.
....@@ -74,6 +111,10 @@
74111 /* Control MDS CPU buffer clear before idling (halt, mwait) */
75112 DEFINE_STATIC_KEY_FALSE(mds_idle_clear);
76113 EXPORT_SYMBOL_GPL(mds_idle_clear);
114
+
115
+/* Controls CPU Fill buffer clear before KVM guest MMIO accesses */
116
+DEFINE_STATIC_KEY_FALSE(mmio_stale_data_clear);
117
+EXPORT_SYMBOL_GPL(mmio_stale_data_clear);
77118
78119 void __init check_bugs(void)
79120 {
....@@ -98,24 +139,25 @@
98139 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
99140 rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
100141
101
- /* Allow STIBP in MSR_SPEC_CTRL if supported */
102
- if (boot_cpu_has(X86_FEATURE_STIBP))
103
- x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
104
-
105142 /* Select the proper CPU mitigations before patching alternatives: */
106143 spectre_v1_select_mitigation();
107144 spectre_v2_select_mitigation();
145
+ /*
146
+ * retbleed_select_mitigation() relies on the state set by
147
+ * spectre_v2_select_mitigation(); specifically it wants to know about
148
+ * spectre_v2=ibrs.
149
+ */
150
+ retbleed_select_mitigation();
151
+ /*
152
+ * spectre_v2_user_select_mitigation() relies on the state set by
153
+ * retbleed_select_mitigation(); specifically the STIBP selection is
154
+ * forced for UNRET or IBPB.
155
+ */
156
+ spectre_v2_user_select_mitigation();
108157 ssb_select_mitigation();
109158 l1tf_select_mitigation();
110
- mds_select_mitigation();
111
- taa_select_mitigation();
159
+ md_clear_select_mitigation();
112160 srbds_select_mitigation();
113
-
114
- /*
115
- * As MDS and TAA mitigations are inter-related, print MDS
116
- * mitigation until after TAA mitigation selection is done.
117
- */
118
- mds_print_mitigation();
119161
120162 arch_smt_update();
121163
....@@ -151,31 +193,17 @@
151193 #endif
152194 }
153195
196
+/*
197
+ * NOTE: For VMX, this function is not called in the vmexit path.
198
+ * It uses vmx_spec_ctrl_restore_host() instead.
199
+ */
154200 void
155201 x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
156202 {
157
- u64 msrval, guestval, hostval = x86_spec_ctrl_base;
203
+ u64 msrval, guestval = guest_spec_ctrl, hostval = spec_ctrl_current();
158204 struct thread_info *ti = current_thread_info();
159205
160
- /* Is MSR_SPEC_CTRL implemented ? */
161206 if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
162
- /*
163
- * Restrict guest_spec_ctrl to supported values. Clear the
164
- * modifiable bits in the host base value and or the
165
- * modifiable bits from the guest value.
166
- */
167
- guestval = hostval & ~x86_spec_ctrl_mask;
168
- guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
169
-
170
- /* SSBD controlled in MSR_SPEC_CTRL */
171
- if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
172
- static_cpu_has(X86_FEATURE_AMD_SSBD))
173
- hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
174
-
175
- /* Conditional STIBP enabled? */
176
- if (static_branch_unlikely(&switch_to_cond_stibp))
177
- hostval |= stibp_tif_to_spec_ctrl(ti->flags);
178
-
179207 if (hostval != guestval) {
180208 msrval = setguest ? guestval : hostval;
181209 wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
....@@ -256,14 +284,6 @@
256284 }
257285 }
258286
259
-static void __init mds_print_mitigation(void)
260
-{
261
- if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off())
262
- return;
263
-
264
- pr_info("%s\n", mds_strings[mds_mitigation]);
265
-}
266
-
267287 static int __init mds_cmdline(char *str)
268288 {
269289 if (!boot_cpu_has_bug(X86_BUG_MDS))
....@@ -288,6 +308,13 @@
288308 #undef pr_fmt
289309 #define pr_fmt(fmt) "TAA: " fmt
290310
311
+enum taa_mitigations {
312
+ TAA_MITIGATION_OFF,
313
+ TAA_MITIGATION_UCODE_NEEDED,
314
+ TAA_MITIGATION_VERW,
315
+ TAA_MITIGATION_TSX_DISABLED,
316
+};
317
+
291318 /* Default mitigation for TAA-affected CPUs */
292319 static enum taa_mitigations taa_mitigation __ro_after_init = TAA_MITIGATION_VERW;
293320 static bool taa_nosmt __ro_after_init;
....@@ -311,7 +338,7 @@
311338 /* TSX previously disabled by tsx=off */
312339 if (!boot_cpu_has(X86_FEATURE_RTM)) {
313340 taa_mitigation = TAA_MITIGATION_TSX_DISABLED;
314
- goto out;
341
+ return;
315342 }
316343
317344 if (cpu_mitigations_off()) {
....@@ -325,7 +352,7 @@
325352 */
326353 if (taa_mitigation == TAA_MITIGATION_OFF &&
327354 mds_mitigation == MDS_MITIGATION_OFF)
328
- goto out;
355
+ return;
329356
330357 if (boot_cpu_has(X86_FEATURE_MD_CLEAR))
331358 taa_mitigation = TAA_MITIGATION_VERW;
....@@ -357,18 +384,6 @@
357384
358385 if (taa_nosmt || cpu_mitigations_auto_nosmt())
359386 cpu_smt_disable(false);
360
-
361
- /*
362
- * Update MDS mitigation, if necessary, as the mds_user_clear is
363
- * now enabled for TAA mitigation.
364
- */
365
- if (mds_mitigation == MDS_MITIGATION_OFF &&
366
- boot_cpu_has_bug(X86_BUG_MDS)) {
367
- mds_mitigation = MDS_MITIGATION_FULL;
368
- mds_select_mitigation();
369
- }
370
-out:
371
- pr_info("%s\n", taa_strings[taa_mitigation]);
372387 }
373388
374389 static int __init tsx_async_abort_parse_cmdline(char *str)
....@@ -391,6 +406,154 @@
391406 return 0;
392407 }
393408 early_param("tsx_async_abort", tsx_async_abort_parse_cmdline);
409
+
410
+#undef pr_fmt
411
+#define pr_fmt(fmt) "MMIO Stale Data: " fmt
412
+
413
+enum mmio_mitigations {
414
+ MMIO_MITIGATION_OFF,
415
+ MMIO_MITIGATION_UCODE_NEEDED,
416
+ MMIO_MITIGATION_VERW,
417
+};
418
+
419
+/* Default mitigation for Processor MMIO Stale Data vulnerabilities */
420
+static enum mmio_mitigations mmio_mitigation __ro_after_init = MMIO_MITIGATION_VERW;
421
+static bool mmio_nosmt __ro_after_init = false;
422
+
423
+static const char * const mmio_strings[] = {
424
+ [MMIO_MITIGATION_OFF] = "Vulnerable",
425
+ [MMIO_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode",
426
+ [MMIO_MITIGATION_VERW] = "Mitigation: Clear CPU buffers",
427
+};
428
+
429
+static void __init mmio_select_mitigation(void)
430
+{
431
+ u64 ia32_cap;
432
+
433
+ if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) ||
434
+ boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN) ||
435
+ cpu_mitigations_off()) {
436
+ mmio_mitigation = MMIO_MITIGATION_OFF;
437
+ return;
438
+ }
439
+
440
+ if (mmio_mitigation == MMIO_MITIGATION_OFF)
441
+ return;
442
+
443
+ ia32_cap = x86_read_arch_cap_msr();
444
+
445
+ /*
446
+ * Enable CPU buffer clear mitigation for host and VMM, if also affected
447
+ * by MDS or TAA. Otherwise, enable mitigation for VMM only.
448
+ */
449
+ if (boot_cpu_has_bug(X86_BUG_MDS) || (boot_cpu_has_bug(X86_BUG_TAA) &&
450
+ boot_cpu_has(X86_FEATURE_RTM)))
451
+ static_branch_enable(&mds_user_clear);
452
+ else
453
+ static_branch_enable(&mmio_stale_data_clear);
454
+
455
+ /*
456
+ * If Processor-MMIO-Stale-Data bug is present and Fill Buffer data can
457
+ * be propagated to uncore buffers, clearing the Fill buffers on idle
458
+ * is required irrespective of SMT state.
459
+ */
460
+ if (!(ia32_cap & ARCH_CAP_FBSDP_NO))
461
+ static_branch_enable(&mds_idle_clear);
462
+
463
+ /*
464
+ * Check if the system has the right microcode.
465
+ *
466
+ * CPU Fill buffer clear mitigation is enumerated by either an explicit
467
+ * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS
468
+ * affected systems.
469
+ */
470
+ if ((ia32_cap & ARCH_CAP_FB_CLEAR) ||
471
+ (boot_cpu_has(X86_FEATURE_MD_CLEAR) &&
472
+ boot_cpu_has(X86_FEATURE_FLUSH_L1D) &&
473
+ !(ia32_cap & ARCH_CAP_MDS_NO)))
474
+ mmio_mitigation = MMIO_MITIGATION_VERW;
475
+ else
476
+ mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED;
477
+
478
+ if (mmio_nosmt || cpu_mitigations_auto_nosmt())
479
+ cpu_smt_disable(false);
480
+}
481
+
482
+static int __init mmio_stale_data_parse_cmdline(char *str)
483
+{
484
+ if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
485
+ return 0;
486
+
487
+ if (!str)
488
+ return -EINVAL;
489
+
490
+ if (!strcmp(str, "off")) {
491
+ mmio_mitigation = MMIO_MITIGATION_OFF;
492
+ } else if (!strcmp(str, "full")) {
493
+ mmio_mitigation = MMIO_MITIGATION_VERW;
494
+ } else if (!strcmp(str, "full,nosmt")) {
495
+ mmio_mitigation = MMIO_MITIGATION_VERW;
496
+ mmio_nosmt = true;
497
+ }
498
+
499
+ return 0;
500
+}
501
+early_param("mmio_stale_data", mmio_stale_data_parse_cmdline);
502
+
503
+#undef pr_fmt
504
+#define pr_fmt(fmt) "" fmt
505
+
506
+static void __init md_clear_update_mitigation(void)
507
+{
508
+ if (cpu_mitigations_off())
509
+ return;
510
+
511
+ if (!static_key_enabled(&mds_user_clear))
512
+ goto out;
513
+
514
+ /*
515
+ * mds_user_clear is now enabled. Update MDS, TAA and MMIO Stale Data
516
+ * mitigation, if necessary.
517
+ */
518
+ if (mds_mitigation == MDS_MITIGATION_OFF &&
519
+ boot_cpu_has_bug(X86_BUG_MDS)) {
520
+ mds_mitigation = MDS_MITIGATION_FULL;
521
+ mds_select_mitigation();
522
+ }
523
+ if (taa_mitigation == TAA_MITIGATION_OFF &&
524
+ boot_cpu_has_bug(X86_BUG_TAA)) {
525
+ taa_mitigation = TAA_MITIGATION_VERW;
526
+ taa_select_mitigation();
527
+ }
528
+ if (mmio_mitigation == MMIO_MITIGATION_OFF &&
529
+ boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) {
530
+ mmio_mitigation = MMIO_MITIGATION_VERW;
531
+ mmio_select_mitigation();
532
+ }
533
+out:
534
+ if (boot_cpu_has_bug(X86_BUG_MDS))
535
+ pr_info("MDS: %s\n", mds_strings[mds_mitigation]);
536
+ if (boot_cpu_has_bug(X86_BUG_TAA))
537
+ pr_info("TAA: %s\n", taa_strings[taa_mitigation]);
538
+ if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
539
+ pr_info("MMIO Stale Data: %s\n", mmio_strings[mmio_mitigation]);
540
+ else if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
541
+ pr_info("MMIO Stale Data: Unknown: No mitigations\n");
542
+}
543
+
544
+static void __init md_clear_select_mitigation(void)
545
+{
546
+ mds_select_mitigation();
547
+ taa_select_mitigation();
548
+ mmio_select_mitigation();
549
+
550
+ /*
551
+ * As MDS, TAA and MMIO Stale Data mitigations are inter-related, update
552
+ * and print their mitigation after MDS, TAA and MMIO Stale Data
553
+ * mitigation selection is done.
554
+ */
555
+ md_clear_update_mitigation();
556
+}
394557
395558 #undef pr_fmt
396559 #define pr_fmt(fmt) "SRBDS: " fmt
....@@ -453,11 +616,13 @@
453616 return;
454617
455618 /*
456
- * Check to see if this is one of the MDS_NO systems supporting
457
- * TSX that are only exposed to SRBDS when TSX is enabled.
619
+ * Check to see if this is one of the MDS_NO systems supporting TSX that
620
+ * are only exposed to SRBDS when TSX is enabled or when CPU is affected
621
+ * by Processor MMIO Stale Data vulnerability.
458622 */
459623 ia32_cap = x86_read_arch_cap_msr();
460
- if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM))
624
+ if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) &&
625
+ !boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
461626 srbds_mitigation = SRBDS_MITIGATION_TSX_OFF;
462627 else if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
463628 srbds_mitigation = SRBDS_MITIGATION_HYPERVISOR;
....@@ -536,14 +701,12 @@
536701 * If FSGSBASE is enabled, the user can put a kernel address in
537702 * GS, in which case SMAP provides no protection.
538703 *
539
- * [ NOTE: Don't check for X86_FEATURE_FSGSBASE until the
540
- * FSGSBASE enablement patches have been merged. ]
541
- *
542704 * If FSGSBASE is disabled, the user can only put a user space
543705 * address in GS. That makes an attack harder, but still
544706 * possible if there's no SMAP protection.
545707 */
546
- if (!smap_works_speculatively()) {
708
+ if (boot_cpu_has(X86_FEATURE_FSGSBASE) ||
709
+ !smap_works_speculatively()) {
547710 /*
548711 * Mitigation can be provided from SWAPGS itself or
549712 * PTI as the CR3 write in the Meltdown mitigation
....@@ -575,11 +738,179 @@
575738 }
576739 early_param("nospectre_v1", nospectre_v1_cmdline);
577740
578
-#undef pr_fmt
579
-#define pr_fmt(fmt) "Spectre V2 : " fmt
580
-
581741 static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
582742 SPECTRE_V2_NONE;
743
+
744
+#undef pr_fmt
745
+#define pr_fmt(fmt) "RETBleed: " fmt
746
+
747
+enum retbleed_mitigation {
748
+ RETBLEED_MITIGATION_NONE,
749
+ RETBLEED_MITIGATION_UNRET,
750
+ RETBLEED_MITIGATION_IBPB,
751
+ RETBLEED_MITIGATION_IBRS,
752
+ RETBLEED_MITIGATION_EIBRS,
753
+};
754
+
755
+enum retbleed_mitigation_cmd {
756
+ RETBLEED_CMD_OFF,
757
+ RETBLEED_CMD_AUTO,
758
+ RETBLEED_CMD_UNRET,
759
+ RETBLEED_CMD_IBPB,
760
+};
761
+
762
+const char * const retbleed_strings[] = {
763
+ [RETBLEED_MITIGATION_NONE] = "Vulnerable",
764
+ [RETBLEED_MITIGATION_UNRET] = "Mitigation: untrained return thunk",
765
+ [RETBLEED_MITIGATION_IBPB] = "Mitigation: IBPB",
766
+ [RETBLEED_MITIGATION_IBRS] = "Mitigation: IBRS",
767
+ [RETBLEED_MITIGATION_EIBRS] = "Mitigation: Enhanced IBRS",
768
+};
769
+
770
+static enum retbleed_mitigation retbleed_mitigation __ro_after_init =
771
+ RETBLEED_MITIGATION_NONE;
772
+static enum retbleed_mitigation_cmd retbleed_cmd __ro_after_init =
773
+ RETBLEED_CMD_AUTO;
774
+
775
+static int __ro_after_init retbleed_nosmt = false;
776
+
777
+static int __init retbleed_parse_cmdline(char *str)
778
+{
779
+ if (!str)
780
+ return -EINVAL;
781
+
782
+ while (str) {
783
+ char *next = strchr(str, ',');
784
+ if (next) {
785
+ *next = 0;
786
+ next++;
787
+ }
788
+
789
+ if (!strcmp(str, "off")) {
790
+ retbleed_cmd = RETBLEED_CMD_OFF;
791
+ } else if (!strcmp(str, "auto")) {
792
+ retbleed_cmd = RETBLEED_CMD_AUTO;
793
+ } else if (!strcmp(str, "unret")) {
794
+ retbleed_cmd = RETBLEED_CMD_UNRET;
795
+ } else if (!strcmp(str, "ibpb")) {
796
+ retbleed_cmd = RETBLEED_CMD_IBPB;
797
+ } else if (!strcmp(str, "nosmt")) {
798
+ retbleed_nosmt = true;
799
+ } else {
800
+ pr_err("Ignoring unknown retbleed option (%s).", str);
801
+ }
802
+
803
+ str = next;
804
+ }
805
+
806
+ return 0;
807
+}
808
+early_param("retbleed", retbleed_parse_cmdline);
809
+
810
+#define RETBLEED_UNTRAIN_MSG "WARNING: BTB untrained return thunk mitigation is only effective on AMD/Hygon!\n"
811
+#define RETBLEED_INTEL_MSG "WARNING: Spectre v2 mitigation leaves CPU vulnerable to RETBleed attacks, data leaks possible!\n"
812
+
813
+static void __init retbleed_select_mitigation(void)
814
+{
815
+ bool mitigate_smt = false;
816
+
817
+ if (!boot_cpu_has_bug(X86_BUG_RETBLEED) || cpu_mitigations_off())
818
+ return;
819
+
820
+ switch (retbleed_cmd) {
821
+ case RETBLEED_CMD_OFF:
822
+ return;
823
+
824
+ case RETBLEED_CMD_UNRET:
825
+ if (IS_ENABLED(CONFIG_CPU_UNRET_ENTRY)) {
826
+ retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
827
+ } else {
828
+ pr_err("WARNING: kernel not compiled with CPU_UNRET_ENTRY.\n");
829
+ goto do_cmd_auto;
830
+ }
831
+ break;
832
+
833
+ case RETBLEED_CMD_IBPB:
834
+ if (!boot_cpu_has(X86_FEATURE_IBPB)) {
835
+ pr_err("WARNING: CPU does not support IBPB.\n");
836
+ goto do_cmd_auto;
837
+ } else if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY)) {
838
+ retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
839
+ } else {
840
+ pr_err("WARNING: kernel not compiled with CPU_IBPB_ENTRY.\n");
841
+ goto do_cmd_auto;
842
+ }
843
+ break;
844
+
845
+do_cmd_auto:
846
+ case RETBLEED_CMD_AUTO:
847
+ default:
848
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
849
+ boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
850
+ if (IS_ENABLED(CONFIG_CPU_UNRET_ENTRY))
851
+ retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
852
+ else if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY) && boot_cpu_has(X86_FEATURE_IBPB))
853
+ retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
854
+ }
855
+
856
+ /*
857
+ * The Intel mitigation (IBRS or eIBRS) was already selected in
858
+ * spectre_v2_select_mitigation(). 'retbleed_mitigation' will
859
+ * be set accordingly below.
860
+ */
861
+
862
+ break;
863
+ }
864
+
865
+ switch (retbleed_mitigation) {
866
+ case RETBLEED_MITIGATION_UNRET:
867
+ setup_force_cpu_cap(X86_FEATURE_RETHUNK);
868
+ setup_force_cpu_cap(X86_FEATURE_UNRET);
869
+
870
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
871
+ boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
872
+ pr_err(RETBLEED_UNTRAIN_MSG);
873
+
874
+ mitigate_smt = true;
875
+ break;
876
+
877
+ case RETBLEED_MITIGATION_IBPB:
878
+ setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
879
+ mitigate_smt = true;
880
+ break;
881
+
882
+ default:
883
+ break;
884
+ }
885
+
886
+ if (mitigate_smt && !boot_cpu_has(X86_FEATURE_STIBP) &&
887
+ (retbleed_nosmt || cpu_mitigations_auto_nosmt()))
888
+ cpu_smt_disable(false);
889
+
890
+ /*
891
+ * Let IBRS trump all on Intel without affecting the effects of the
892
+ * retbleed= cmdline option.
893
+ */
894
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
895
+ switch (spectre_v2_enabled) {
896
+ case SPECTRE_V2_IBRS:
897
+ retbleed_mitigation = RETBLEED_MITIGATION_IBRS;
898
+ break;
899
+ case SPECTRE_V2_EIBRS:
900
+ case SPECTRE_V2_EIBRS_RETPOLINE:
901
+ case SPECTRE_V2_EIBRS_LFENCE:
902
+ retbleed_mitigation = RETBLEED_MITIGATION_EIBRS;
903
+ break;
904
+ default:
905
+ pr_err(RETBLEED_INTEL_MSG);
906
+ }
907
+ }
908
+
909
+ pr_info("%s\n", retbleed_strings[retbleed_mitigation]);
910
+}
911
+
912
+#undef pr_fmt
913
+#define pr_fmt(fmt) "Spectre V2 : " fmt
583914
584915 static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init =
585916 SPECTRE_V2_USER_NONE;
....@@ -607,6 +938,33 @@
607938 static inline const char *spectre_v2_module_string(void) { return ""; }
608939 #endif
609940
941
+#define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n"
942
+#define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n"
943
+#define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n"
944
+#define SPECTRE_V2_IBRS_PERF_MSG "WARNING: IBRS mitigation selected on Enhanced IBRS CPU, this may cause unnecessary performance loss\n"
945
+
946
+#ifdef CONFIG_BPF_SYSCALL
947
+void unpriv_ebpf_notify(int new_state)
948
+{
949
+ if (new_state)
950
+ return;
951
+
952
+ /* Unprivileged eBPF is enabled */
953
+
954
+ switch (spectre_v2_enabled) {
955
+ case SPECTRE_V2_EIBRS:
956
+ pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
957
+ break;
958
+ case SPECTRE_V2_EIBRS_LFENCE:
959
+ if (sched_smt_active())
960
+ pr_err(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG);
961
+ break;
962
+ default:
963
+ break;
964
+ }
965
+}
966
+#endif
967
+
610968 static inline bool match_option(const char *arg, int arglen, const char *opt)
611969 {
612970 int len = strlen(opt);
....@@ -621,7 +979,11 @@
621979 SPECTRE_V2_CMD_FORCE,
622980 SPECTRE_V2_CMD_RETPOLINE,
623981 SPECTRE_V2_CMD_RETPOLINE_GENERIC,
624
- SPECTRE_V2_CMD_RETPOLINE_AMD,
982
+ SPECTRE_V2_CMD_RETPOLINE_LFENCE,
983
+ SPECTRE_V2_CMD_EIBRS,
984
+ SPECTRE_V2_CMD_EIBRS_RETPOLINE,
985
+ SPECTRE_V2_CMD_EIBRS_LFENCE,
986
+ SPECTRE_V2_CMD_IBRS,
625987 };
626988
627989 enum spectre_v2_user_cmd {
....@@ -662,13 +1024,15 @@
6621024 pr_info("spectre_v2_user=%s forced on command line.\n", reason);
6631025 }
6641026
1027
+static __ro_after_init enum spectre_v2_mitigation_cmd spectre_v2_cmd;
1028
+
6651029 static enum spectre_v2_user_cmd __init
666
-spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd)
1030
+spectre_v2_parse_user_cmdline(void)
6671031 {
6681032 char arg[20];
6691033 int ret, i;
6701034
671
- switch (v2_cmd) {
1035
+ switch (spectre_v2_cmd) {
6721036 case SPECTRE_V2_CMD_NONE:
6731037 return SPECTRE_V2_USER_CMD_NONE;
6741038 case SPECTRE_V2_CMD_FORCE:
....@@ -694,8 +1058,16 @@
6941058 return SPECTRE_V2_USER_CMD_AUTO;
6951059 }
6961060
1061
+static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode)
1062
+{
1063
+ return mode == SPECTRE_V2_IBRS ||
1064
+ mode == SPECTRE_V2_EIBRS ||
1065
+ mode == SPECTRE_V2_EIBRS_RETPOLINE ||
1066
+ mode == SPECTRE_V2_EIBRS_LFENCE;
1067
+}
1068
+
6971069 static void __init
698
-spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
1070
+spectre_v2_user_select_mitigation(void)
6991071 {
7001072 enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE;
7011073 bool smt_possible = IS_ENABLED(CONFIG_SMP);
....@@ -708,7 +1080,7 @@
7081080 cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
7091081 smt_possible = false;
7101082
711
- cmd = spectre_v2_parse_user_cmdline(v2_cmd);
1083
+ cmd = spectre_v2_parse_user_cmdline();
7121084 switch (cmd) {
7131085 case SPECTRE_V2_USER_CMD_NONE:
7141086 goto set_mode;
....@@ -756,10 +1128,12 @@
7561128 }
7571129
7581130 /*
759
- * If enhanced IBRS is enabled or SMT impossible, STIBP is not
760
- * required.
1131
+ * If no STIBP, IBRS or enhanced IBRS is enabled, or SMT impossible,
1132
+ * STIBP is not required.
7611133 */
762
- if (!smt_possible || spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
1134
+ if (!boot_cpu_has(X86_FEATURE_STIBP) ||
1135
+ !smt_possible ||
1136
+ spectre_v2_in_ibrs_mode(spectre_v2_enabled))
7631137 return;
7641138
7651139 /*
....@@ -771,11 +1145,13 @@
7711145 boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
7721146 mode = SPECTRE_V2_USER_STRICT_PREFERRED;
7731147
774
- /*
775
- * If STIBP is not available, clear the STIBP mode.
776
- */
777
- if (!boot_cpu_has(X86_FEATURE_STIBP))
778
- mode = SPECTRE_V2_USER_NONE;
1148
+ if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET ||
1149
+ retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
1150
+ if (mode != SPECTRE_V2_USER_STRICT &&
1151
+ mode != SPECTRE_V2_USER_STRICT_PREFERRED)
1152
+ pr_info("Selecting STIBP always-on mode to complement retbleed mitigation\n");
1153
+ mode = SPECTRE_V2_USER_STRICT_PREFERRED;
1154
+ }
7791155
7801156 spectre_v2_user_stibp = mode;
7811157
....@@ -785,9 +1161,12 @@
7851161
7861162 static const char * const spectre_v2_strings[] = {
7871163 [SPECTRE_V2_NONE] = "Vulnerable",
788
- [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline",
789
- [SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline",
790
- [SPECTRE_V2_IBRS_ENHANCED] = "Mitigation: Enhanced IBRS",
1164
+ [SPECTRE_V2_RETPOLINE] = "Mitigation: Retpolines",
1165
+ [SPECTRE_V2_LFENCE] = "Mitigation: LFENCE",
1166
+ [SPECTRE_V2_EIBRS] = "Mitigation: Enhanced IBRS",
1167
+ [SPECTRE_V2_EIBRS_LFENCE] = "Mitigation: Enhanced IBRS + LFENCE",
1168
+ [SPECTRE_V2_EIBRS_RETPOLINE] = "Mitigation: Enhanced IBRS + Retpolines",
1169
+ [SPECTRE_V2_IBRS] = "Mitigation: IBRS",
7911170 };
7921171
7931172 static const struct {
....@@ -798,9 +1177,14 @@
7981177 { "off", SPECTRE_V2_CMD_NONE, false },
7991178 { "on", SPECTRE_V2_CMD_FORCE, true },
8001179 { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false },
801
- { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_AMD, false },
1180
+ { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_LFENCE, false },
1181
+ { "retpoline,lfence", SPECTRE_V2_CMD_RETPOLINE_LFENCE, false },
8021182 { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
1183
+ { "eibrs", SPECTRE_V2_CMD_EIBRS, false },
1184
+ { "eibrs,lfence", SPECTRE_V2_CMD_EIBRS_LFENCE, false },
1185
+ { "eibrs,retpoline", SPECTRE_V2_CMD_EIBRS_RETPOLINE, false },
8031186 { "auto", SPECTRE_V2_CMD_AUTO, false },
1187
+ { "ibrs", SPECTRE_V2_CMD_IBRS, false },
8041188 };
8051189
8061190 static void __init spec_v2_print_cond(const char *reason, bool secure)
....@@ -836,22 +1220,133 @@
8361220 }
8371221
8381222 if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
839
- cmd == SPECTRE_V2_CMD_RETPOLINE_AMD ||
840
- cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) &&
1223
+ cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE ||
1224
+ cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC ||
1225
+ cmd == SPECTRE_V2_CMD_EIBRS_LFENCE ||
1226
+ cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) &&
8411227 !IS_ENABLED(CONFIG_RETPOLINE)) {
842
- pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options[i].option);
1228
+ pr_err("%s selected but not compiled in. Switching to AUTO select\n",
1229
+ mitigation_options[i].option);
8431230 return SPECTRE_V2_CMD_AUTO;
8441231 }
8451232
846
- if (cmd == SPECTRE_V2_CMD_RETPOLINE_AMD &&
847
- boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
848
- pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n");
1233
+ if ((cmd == SPECTRE_V2_CMD_EIBRS ||
1234
+ cmd == SPECTRE_V2_CMD_EIBRS_LFENCE ||
1235
+ cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) &&
1236
+ !boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
1237
+ pr_err("%s selected but CPU doesn't have eIBRS. Switching to AUTO select\n",
1238
+ mitigation_options[i].option);
1239
+ return SPECTRE_V2_CMD_AUTO;
1240
+ }
1241
+
1242
+ if ((cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE ||
1243
+ cmd == SPECTRE_V2_CMD_EIBRS_LFENCE) &&
1244
+ !boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
1245
+ pr_err("%s selected, but CPU doesn't have a serializing LFENCE. Switching to AUTO select\n",
1246
+ mitigation_options[i].option);
1247
+ return SPECTRE_V2_CMD_AUTO;
1248
+ }
1249
+
1250
+ if (cmd == SPECTRE_V2_CMD_IBRS && !IS_ENABLED(CONFIG_CPU_IBRS_ENTRY)) {
1251
+ pr_err("%s selected but not compiled in. Switching to AUTO select\n",
1252
+ mitigation_options[i].option);
1253
+ return SPECTRE_V2_CMD_AUTO;
1254
+ }
1255
+
1256
+ if (cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
1257
+ pr_err("%s selected but not Intel CPU. Switching to AUTO select\n",
1258
+ mitigation_options[i].option);
1259
+ return SPECTRE_V2_CMD_AUTO;
1260
+ }
1261
+
1262
+ if (cmd == SPECTRE_V2_CMD_IBRS && !boot_cpu_has(X86_FEATURE_IBRS)) {
1263
+ pr_err("%s selected but CPU doesn't have IBRS. Switching to AUTO select\n",
1264
+ mitigation_options[i].option);
1265
+ return SPECTRE_V2_CMD_AUTO;
1266
+ }
1267
+
1268
+ if (cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_has(X86_FEATURE_XENPV)) {
1269
+ pr_err("%s selected but running as XenPV guest. Switching to AUTO select\n",
1270
+ mitigation_options[i].option);
8491271 return SPECTRE_V2_CMD_AUTO;
8501272 }
8511273
8521274 spec_v2_print_cond(mitigation_options[i].option,
8531275 mitigation_options[i].secure);
8541276 return cmd;
1277
+}
1278
+
1279
+static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void)
1280
+{
1281
+ if (!IS_ENABLED(CONFIG_RETPOLINE)) {
1282
+ pr_err("Kernel not compiled with retpoline; no mitigation available!");
1283
+ return SPECTRE_V2_NONE;
1284
+ }
1285
+
1286
+ return SPECTRE_V2_RETPOLINE;
1287
+}
1288
+
1289
+/* Disable in-kernel use of non-RSB RET predictors */
1290
+static void __init spec_ctrl_disable_kernel_rrsba(void)
1291
+{
1292
+ u64 ia32_cap;
1293
+
1294
+ if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL))
1295
+ return;
1296
+
1297
+ ia32_cap = x86_read_arch_cap_msr();
1298
+
1299
+ if (ia32_cap & ARCH_CAP_RRSBA) {
1300
+ x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S;
1301
+ update_spec_ctrl(x86_spec_ctrl_base);
1302
+ }
1303
+}
1304
+
1305
+static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_mitigation mode)
1306
+{
1307
+ /*
1308
+ * Similar to context switches, there are two types of RSB attacks
1309
+ * after VM exit:
1310
+ *
1311
+ * 1) RSB underflow
1312
+ *
1313
+ * 2) Poisoned RSB entry
1314
+ *
1315
+ * When retpoline is enabled, both are mitigated by filling/clearing
1316
+ * the RSB.
1317
+ *
1318
+ * When IBRS is enabled, while #1 would be mitigated by the IBRS branch
1319
+ * prediction isolation protections, RSB still needs to be cleared
1320
+ * because of #2. Note that SMEP provides no protection here, unlike
1321
+ * user-space-poisoned RSB entries.
1322
+ *
1323
+ * eIBRS should protect against RSB poisoning, but if the EIBRS_PBRSB
1324
+ * bug is present then a LITE version of RSB protection is required,
1325
+ * just a single call needs to retire before a RET is executed.
1326
+ */
1327
+ switch (mode) {
1328
+ case SPECTRE_V2_NONE:
1329
+ return;
1330
+
1331
+ case SPECTRE_V2_EIBRS_LFENCE:
1332
+ case SPECTRE_V2_EIBRS:
1333
+ if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
1334
+ setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE);
1335
+ pr_info("Spectre v2 / PBRSB-eIBRS: Retire a single CALL on VMEXIT\n");
1336
+ }
1337
+ return;
1338
+
1339
+ case SPECTRE_V2_EIBRS_RETPOLINE:
1340
+ case SPECTRE_V2_RETPOLINE:
1341
+ case SPECTRE_V2_LFENCE:
1342
+ case SPECTRE_V2_IBRS:
1343
+ setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT);
1344
+ pr_info("Spectre v2 / SpectreRSB : Filling RSB on VMEXIT\n");
1345
+ return;
1346
+ }
1347
+
1348
+ pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation at VM exit");
1349
+ dump_stack();
8551350 }
8561351
8571352 static void __init spectre_v2_select_mitigation(void)
....@@ -874,85 +1369,172 @@
8741369 case SPECTRE_V2_CMD_FORCE:
8751370 case SPECTRE_V2_CMD_AUTO:
8761371 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
877
- mode = SPECTRE_V2_IBRS_ENHANCED;
878
- /* Force it so VMEXIT will restore correctly */
879
- x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
880
- wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
881
- goto specv2_set_mode;
1372
+ mode = SPECTRE_V2_EIBRS;
1373
+ break;
8821374 }
883
- if (IS_ENABLED(CONFIG_RETPOLINE))
884
- goto retpoline_auto;
1375
+
1376
+ if (IS_ENABLED(CONFIG_CPU_IBRS_ENTRY) &&
1377
+ boot_cpu_has_bug(X86_BUG_RETBLEED) &&
1378
+ retbleed_cmd != RETBLEED_CMD_OFF &&
1379
+ boot_cpu_has(X86_FEATURE_IBRS) &&
1380
+ boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
1381
+ mode = SPECTRE_V2_IBRS;
1382
+ break;
1383
+ }
1384
+
1385
+ mode = spectre_v2_select_retpoline();
8851386 break;
886
- case SPECTRE_V2_CMD_RETPOLINE_AMD:
887
- if (IS_ENABLED(CONFIG_RETPOLINE))
888
- goto retpoline_amd;
1387
+
1388
+ case SPECTRE_V2_CMD_RETPOLINE_LFENCE:
1389
+ pr_err(SPECTRE_V2_LFENCE_MSG);
1390
+ mode = SPECTRE_V2_LFENCE;
8891391 break;
1392
+
8901393 case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
891
- if (IS_ENABLED(CONFIG_RETPOLINE))
892
- goto retpoline_generic;
1394
+ mode = SPECTRE_V2_RETPOLINE;
8931395 break;
1396
+
8941397 case SPECTRE_V2_CMD_RETPOLINE:
895
- if (IS_ENABLED(CONFIG_RETPOLINE))
896
- goto retpoline_auto;
1398
+ mode = spectre_v2_select_retpoline();
1399
+ break;
1400
+
1401
+ case SPECTRE_V2_CMD_IBRS:
1402
+ mode = SPECTRE_V2_IBRS;
1403
+ break;
1404
+
1405
+ case SPECTRE_V2_CMD_EIBRS:
1406
+ mode = SPECTRE_V2_EIBRS;
1407
+ break;
1408
+
1409
+ case SPECTRE_V2_CMD_EIBRS_LFENCE:
1410
+ mode = SPECTRE_V2_EIBRS_LFENCE;
1411
+ break;
1412
+
1413
+ case SPECTRE_V2_CMD_EIBRS_RETPOLINE:
1414
+ mode = SPECTRE_V2_EIBRS_RETPOLINE;
8971415 break;
8981416 }
899
- pr_err("Spectre mitigation: kernel not compiled with retpoline; no mitigation available!");
900
- return;
9011417
902
-retpoline_auto:
903
- if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
904
- retpoline_amd:
905
- if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
906
- pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n");
907
- goto retpoline_generic;
908
- }
909
- mode = SPECTRE_V2_RETPOLINE_AMD;
910
- setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
911
- setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
912
- } else {
913
- retpoline_generic:
914
- mode = SPECTRE_V2_RETPOLINE_GENERIC;
915
- setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
1418
+ if (mode == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
1419
+ pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
1420
+
1421
+ if (spectre_v2_in_ibrs_mode(mode)) {
1422
+ x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
1423
+ update_spec_ctrl(x86_spec_ctrl_base);
9161424 }
9171425
918
-specv2_set_mode:
1426
+ switch (mode) {
1427
+ case SPECTRE_V2_NONE:
1428
+ case SPECTRE_V2_EIBRS:
1429
+ break;
1430
+
1431
+ case SPECTRE_V2_IBRS:
1432
+ setup_force_cpu_cap(X86_FEATURE_KERNEL_IBRS);
1433
+ if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED))
1434
+ pr_warn(SPECTRE_V2_IBRS_PERF_MSG);
1435
+ break;
1436
+
1437
+ case SPECTRE_V2_LFENCE:
1438
+ case SPECTRE_V2_EIBRS_LFENCE:
1439
+ setup_force_cpu_cap(X86_FEATURE_RETPOLINE_LFENCE);
1440
+ fallthrough;
1441
+
1442
+ case SPECTRE_V2_RETPOLINE:
1443
+ case SPECTRE_V2_EIBRS_RETPOLINE:
1444
+ setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
1445
+ break;
1446
+ }
1447
+
1448
+ /*
1449
+ * Disable alternate RSB predictions in kernel when indirect CALLs and
1450
+ * JMPs gets protection against BHI and Intramode-BTI, but RET
1451
+ * prediction from a non-RSB predictor is still a risk.
1452
+ */
1453
+ if (mode == SPECTRE_V2_EIBRS_LFENCE ||
1454
+ mode == SPECTRE_V2_EIBRS_RETPOLINE ||
1455
+ mode == SPECTRE_V2_RETPOLINE)
1456
+ spec_ctrl_disable_kernel_rrsba();
1457
+
9191458 spectre_v2_enabled = mode;
9201459 pr_info("%s\n", spectre_v2_strings[mode]);
9211460
9221461 /*
923
- * If spectre v2 protection has been enabled, unconditionally fill
924
- * RSB during a context switch; this protects against two independent
925
- * issues:
1462
+ * If Spectre v2 protection has been enabled, fill the RSB during a
1463
+ * context switch. In general there are two types of RSB attacks
1464
+ * across context switches, for which the CALLs/RETs may be unbalanced.
9261465 *
927
- * - RSB underflow (and switch to BTB) on Skylake+
928
- * - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs
1466
+ * 1) RSB underflow
1467
+ *
1468
+ * Some Intel parts have "bottomless RSB". When the RSB is empty,
1469
+ * speculated return targets may come from the branch predictor,
1470
+ * which could have a user-poisoned BTB or BHB entry.
1471
+ *
1472
+ * AMD has it even worse: *all* returns are speculated from the BTB,
1473
+ * regardless of the state of the RSB.
1474
+ *
1475
+ * When IBRS or eIBRS is enabled, the "user -> kernel" attack
1476
+ * scenario is mitigated by the IBRS branch prediction isolation
1477
+ * properties, so the RSB buffer filling wouldn't be necessary to
1478
+ * protect against this type of attack.
1479
+ *
1480
+ * The "user -> user" attack scenario is mitigated by RSB filling.
1481
+ *
1482
+ * 2) Poisoned RSB entry
1483
+ *
1484
+ * If the 'next' in-kernel return stack is shorter than 'prev',
1485
+ * 'next' could be tricked into speculating with a user-poisoned RSB
1486
+ * entry.
1487
+ *
1488
+ * The "user -> kernel" attack scenario is mitigated by SMEP and
1489
+ * eIBRS.
1490
+ *
1491
+ * The "user -> user" scenario, also known as SpectreBHB, requires
1492
+ * RSB clearing.
1493
+ *
1494
+ * So to mitigate all cases, unconditionally fill RSB on context
1495
+ * switches.
1496
+ *
1497
+ * FIXME: Is this pointless for retbleed-affected AMD?
9291498 */
9301499 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
9311500 pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
9321501
1502
+ spectre_v2_determine_rsb_fill_type_at_vmexit(mode);
1503
+
9331504 /*
934
- * Retpoline means the kernel is safe because it has no indirect
935
- * branches. Enhanced IBRS protects firmware too, so, enable restricted
936
- * speculation around firmware calls only when Enhanced IBRS isn't
937
- * supported.
1505
+ * Retpoline protects the kernel, but doesn't protect firmware. IBRS
1506
+ * and Enhanced IBRS protect firmware too, so enable IBRS around
1507
+ * firmware calls only when IBRS / Enhanced IBRS aren't otherwise
1508
+ * enabled.
9381509 *
9391510 * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because
9401511 * the user might select retpoline on the kernel command line and if
9411512 * the CPU supports Enhanced IBRS, kernel might un-intentionally not
9421513 * enable IBRS around firmware calls.
9431514 */
944
- if (boot_cpu_has(X86_FEATURE_IBRS) && mode != SPECTRE_V2_IBRS_ENHANCED) {
1515
+ if (boot_cpu_has_bug(X86_BUG_RETBLEED) &&
1516
+ boot_cpu_has(X86_FEATURE_IBPB) &&
1517
+ (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
1518
+ boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)) {
1519
+
1520
+ if (retbleed_cmd != RETBLEED_CMD_IBPB) {
1521
+ setup_force_cpu_cap(X86_FEATURE_USE_IBPB_FW);
1522
+ pr_info("Enabling Speculation Barrier for firmware calls\n");
1523
+ }
1524
+
1525
+ } else if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_ibrs_mode(mode)) {
9451526 setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
9461527 pr_info("Enabling Restricted Speculation for firmware calls\n");
9471528 }
9481529
9491530 /* Set up IBPB and STIBP depending on the general spectre V2 command */
950
- spectre_v2_user_select_mitigation(cmd);
1531
+ spectre_v2_cmd = cmd;
9511532 }
9521533
9531534 static void update_stibp_msr(void * __unused)
9541535 {
955
- wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
1536
+ u64 val = spec_ctrl_current() | (x86_spec_ctrl_base & SPEC_CTRL_STIBP);
1537
+ update_spec_ctrl(val);
9561538 }
9571539
9581540 /* Update x86_spec_ctrl_base in case SMT state changed. */
....@@ -987,6 +1569,8 @@
9871569 /* Update the static key controlling the MDS CPU buffer clear in idle */
9881570 static void update_mds_branch_idle(void)
9891571 {
1572
+ u64 ia32_cap = x86_read_arch_cap_msr();
1573
+
9901574 /*
9911575 * Enable the idle clearing if SMT is active on CPUs which are
9921576 * affected only by MSBDS and not any other MDS variant.
....@@ -998,18 +1582,25 @@
9981582 if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY))
9991583 return;
10001584
1001
- if (sched_smt_active())
1585
+ if (sched_smt_active()) {
10021586 static_branch_enable(&mds_idle_clear);
1003
- else
1587
+ } else if (mmio_mitigation == MMIO_MITIGATION_OFF ||
1588
+ (ia32_cap & ARCH_CAP_FBSDP_NO)) {
10041589 static_branch_disable(&mds_idle_clear);
1590
+ }
10051591 }
10061592
10071593 #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n"
10081594 #define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n"
1595
+#define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n"
10091596
1010
-void arch_smt_update(void)
1597
+void cpu_bugs_smt_update(void)
10111598 {
10121599 mutex_lock(&spec_ctrl_mutex);
1600
+
1601
+ if (sched_smt_active() && unprivileged_ebpf_enabled() &&
1602
+ spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
1603
+ pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG);
10131604
10141605 switch (spectre_v2_user_stibp) {
10151606 case SPECTRE_V2_USER_NONE:
....@@ -1043,6 +1634,16 @@
10431634 break;
10441635 case TAA_MITIGATION_TSX_DISABLED:
10451636 case TAA_MITIGATION_OFF:
1637
+ break;
1638
+ }
1639
+
1640
+ switch (mmio_mitigation) {
1641
+ case MMIO_MITIGATION_VERW:
1642
+ case MMIO_MITIGATION_UCODE_NEEDED:
1643
+ if (sched_smt_active())
1644
+ pr_warn_once(MMIO_MSG_SMT);
1645
+ break;
1646
+ case MMIO_MITIGATION_OFF:
10461647 break;
10471648 }
10481649
....@@ -1150,16 +1751,6 @@
11501751 }
11511752
11521753 /*
1153
- * If SSBD is controlled by the SPEC_CTRL MSR, then set the proper
1154
- * bit in the mask to allow guests to use the mitigation even in the
1155
- * case where the host does not enable it.
1156
- */
1157
- if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
1158
- static_cpu_has(X86_FEATURE_AMD_SSBD)) {
1159
- x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
1160
- }
1161
-
1162
- /*
11631754 * We have three CPU feature flags that are in play here:
11641755 * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
11651756 * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
....@@ -1176,7 +1767,7 @@
11761767 x86_amd_ssb_disable();
11771768 } else {
11781769 x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
1179
- wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
1770
+ update_spec_ctrl(x86_spec_ctrl_base);
11801771 }
11811772 }
11821773
....@@ -1223,15 +1814,25 @@
12231814 if (task_spec_ssb_force_disable(task))
12241815 return -EPERM;
12251816 task_clear_spec_ssb_disable(task);
1817
+ task_clear_spec_ssb_noexec(task);
12261818 task_update_spec_tif(task);
12271819 break;
12281820 case PR_SPEC_DISABLE:
12291821 task_set_spec_ssb_disable(task);
1822
+ task_clear_spec_ssb_noexec(task);
12301823 task_update_spec_tif(task);
12311824 break;
12321825 case PR_SPEC_FORCE_DISABLE:
12331826 task_set_spec_ssb_disable(task);
12341827 task_set_spec_ssb_force_disable(task);
1828
+ task_clear_spec_ssb_noexec(task);
1829
+ task_update_spec_tif(task);
1830
+ break;
1831
+ case PR_SPEC_DISABLE_NOEXEC:
1832
+ if (task_spec_ssb_force_disable(task))
1833
+ return -EPERM;
1834
+ task_set_spec_ssb_disable(task);
1835
+ task_set_spec_ssb_noexec(task);
12351836 task_update_spec_tif(task);
12361837 break;
12371838 default:
....@@ -1335,6 +1936,8 @@
13351936 case SPEC_STORE_BYPASS_PRCTL:
13361937 if (task_spec_ssb_force_disable(task))
13371938 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
1939
+ if (task_spec_ssb_noexec(task))
1940
+ return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC;
13381941 if (task_spec_ssb_disable(task))
13391942 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
13401943 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
....@@ -1382,7 +1985,7 @@
13821985 void x86_spec_ctrl_setup_ap(void)
13831986 {
13841987 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
1385
- wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
1988
+ update_spec_ctrl(x86_spec_ctrl_base);
13861989
13871990 if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
13881991 x86_amd_ssb_disable();
....@@ -1426,15 +2029,15 @@
14262029 case INTEL_FAM6_WESTMERE:
14272030 case INTEL_FAM6_SANDYBRIDGE:
14282031 case INTEL_FAM6_IVYBRIDGE:
1429
- case INTEL_FAM6_HASWELL_CORE:
1430
- case INTEL_FAM6_HASWELL_ULT:
1431
- case INTEL_FAM6_HASWELL_GT3E:
1432
- case INTEL_FAM6_BROADWELL_CORE:
1433
- case INTEL_FAM6_BROADWELL_GT3E:
1434
- case INTEL_FAM6_SKYLAKE_MOBILE:
1435
- case INTEL_FAM6_SKYLAKE_DESKTOP:
1436
- case INTEL_FAM6_KABYLAKE_MOBILE:
1437
- case INTEL_FAM6_KABYLAKE_DESKTOP:
2032
+ case INTEL_FAM6_HASWELL:
2033
+ case INTEL_FAM6_HASWELL_L:
2034
+ case INTEL_FAM6_HASWELL_G:
2035
+ case INTEL_FAM6_BROADWELL:
2036
+ case INTEL_FAM6_BROADWELL_G:
2037
+ case INTEL_FAM6_SKYLAKE_L:
2038
+ case INTEL_FAM6_SKYLAKE:
2039
+ case INTEL_FAM6_KABYLAKE_L:
2040
+ case INTEL_FAM6_KABYLAKE:
14382041 if (c->x86_cache_bits < 44)
14392042 c->x86_cache_bits = 44;
14402043 break;
....@@ -1549,7 +2152,12 @@
15492152
15502153 static ssize_t itlb_multihit_show_state(char *buf)
15512154 {
1552
- if (itlb_multihit_kvm_mitigation)
2155
+ if (!boot_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL) ||
2156
+ !boot_cpu_has(X86_FEATURE_VMX))
2157
+ return sprintf(buf, "KVM: Mitigation: VMX unsupported\n");
2158
+ else if (!(cr4_read_shadow() & X86_CR4_VMXE))
2159
+ return sprintf(buf, "KVM: Mitigation: VMX disabled\n");
2160
+ else if (itlb_multihit_kvm_mitigation)
15532161 return sprintf(buf, "KVM: Mitigation: Split huge pages\n");
15542162 else
15552163 return sprintf(buf, "KVM: Vulnerable\n");
....@@ -1598,9 +2206,26 @@
15982206 sched_smt_active() ? "vulnerable" : "disabled");
15992207 }
16002208
2209
+static ssize_t mmio_stale_data_show_state(char *buf)
2210
+{
2211
+ if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
2212
+ return sysfs_emit(buf, "Unknown: No mitigations\n");
2213
+
2214
+ if (mmio_mitigation == MMIO_MITIGATION_OFF)
2215
+ return sysfs_emit(buf, "%s\n", mmio_strings[mmio_mitigation]);
2216
+
2217
+ if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
2218
+ return sysfs_emit(buf, "%s; SMT Host state unknown\n",
2219
+ mmio_strings[mmio_mitigation]);
2220
+ }
2221
+
2222
+ return sysfs_emit(buf, "%s; SMT %s\n", mmio_strings[mmio_mitigation],
2223
+ sched_smt_active() ? "vulnerable" : "disabled");
2224
+}
2225
+
16012226 static char *stibp_state(void)
16022227 {
1603
- if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
2228
+ if (spectre_v2_in_ibrs_mode(spectre_v2_enabled))
16042229 return "";
16052230
16062231 switch (spectre_v2_user_stibp) {
....@@ -1630,9 +2255,63 @@
16302255 return "";
16312256 }
16322257
2258
+static char *pbrsb_eibrs_state(void)
2259
+{
2260
+ if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
2261
+ if (boot_cpu_has(X86_FEATURE_RSB_VMEXIT_LITE) ||
2262
+ boot_cpu_has(X86_FEATURE_RSB_VMEXIT))
2263
+ return ", PBRSB-eIBRS: SW sequence";
2264
+ else
2265
+ return ", PBRSB-eIBRS: Vulnerable";
2266
+ } else {
2267
+ return ", PBRSB-eIBRS: Not affected";
2268
+ }
2269
+}
2270
+
2271
+static ssize_t spectre_v2_show_state(char *buf)
2272
+{
2273
+ if (spectre_v2_enabled == SPECTRE_V2_LFENCE)
2274
+ return sprintf(buf, "Vulnerable: LFENCE\n");
2275
+
2276
+ if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
2277
+ return sprintf(buf, "Vulnerable: eIBRS with unprivileged eBPF\n");
2278
+
2279
+ if (sched_smt_active() && unprivileged_ebpf_enabled() &&
2280
+ spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
2281
+ return sprintf(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n");
2282
+
2283
+ return sprintf(buf, "%s%s%s%s%s%s%s\n",
2284
+ spectre_v2_strings[spectre_v2_enabled],
2285
+ ibpb_state(),
2286
+ boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
2287
+ stibp_state(),
2288
+ boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
2289
+ pbrsb_eibrs_state(),
2290
+ spectre_v2_module_string());
2291
+}
2292
+
16332293 static ssize_t srbds_show_state(char *buf)
16342294 {
16352295 return sprintf(buf, "%s\n", srbds_strings[srbds_mitigation]);
2296
+}
2297
+
2298
+static ssize_t retbleed_show_state(char *buf)
2299
+{
2300
+ if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET ||
2301
+ retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
2302
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
2303
+ boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
2304
+ return sprintf(buf, "Vulnerable: untrained return thunk / IBPB on non-AMD based uarch\n");
2305
+
2306
+ return sprintf(buf, "%s; SMT %s\n",
2307
+ retbleed_strings[retbleed_mitigation],
2308
+ !sched_smt_active() ? "disabled" :
2309
+ spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
2310
+ spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ?
2311
+ "enabled with STIBP protection" : "vulnerable");
2312
+ }
2313
+
2314
+ return sprintf(buf, "%s\n", retbleed_strings[retbleed_mitigation]);
16362315 }
16372316
16382317 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
....@@ -1655,12 +2334,7 @@
16552334 return sprintf(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]);
16562335
16572336 case X86_BUG_SPECTRE_V2:
1658
- return sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
1659
- ibpb_state(),
1660
- boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
1661
- stibp_state(),
1662
- boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
1663
- spectre_v2_module_string());
2337
+ return spectre_v2_show_state(buf);
16642338
16652339 case X86_BUG_SPEC_STORE_BYPASS:
16662340 return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
....@@ -1681,6 +2355,13 @@
16812355
16822356 case X86_BUG_SRBDS:
16832357 return srbds_show_state(buf);
2358
+
2359
+ case X86_BUG_MMIO_STALE_DATA:
2360
+ case X86_BUG_MMIO_UNKNOWN:
2361
+ return mmio_stale_data_show_state(buf);
2362
+
2363
+ case X86_BUG_RETBLEED:
2364
+ return retbleed_show_state(buf);
16842365
16852366 default:
16862367 break;
....@@ -1733,4 +2414,17 @@
17332414 {
17342415 return cpu_show_common(dev, attr, buf, X86_BUG_SRBDS);
17352416 }
2417
+
2418
+ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf)
2419
+{
2420
+ if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
2421
+ return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_UNKNOWN);
2422
+ else
2423
+ return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA);
2424
+}
2425
+
2426
+ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, char *buf)
2427
+{
2428
+ return cpu_show_common(dev, attr, buf, X86_BUG_RETBLEED);
2429
+}
17362430 #endif