.. | .. |
---|
15 | 15 | #include <linux/nospec.h> |
---|
16 | 16 | #include <linux/prctl.h> |
---|
17 | 17 | #include <linux/sched/smt.h> |
---|
| 18 | +#include <linux/pgtable.h> |
---|
| 19 | +#include <linux/bpf.h> |
---|
18 | 20 | |
---|
19 | 21 | #include <asm/spec-ctrl.h> |
---|
20 | 22 | #include <asm/cmdline.h> |
---|
.. | .. |
---|
26 | 28 | #include <asm/vmx.h> |
---|
27 | 29 | #include <asm/paravirt.h> |
---|
28 | 30 | #include <asm/alternative.h> |
---|
29 | | -#include <asm/pgtable.h> |
---|
30 | 31 | #include <asm/set_memory.h> |
---|
31 | 32 | #include <asm/intel-family.h> |
---|
32 | 33 | #include <asm/e820/api.h> |
---|
33 | 34 | #include <asm/hypervisor.h> |
---|
| 35 | +#include <asm/tlbflush.h> |
---|
34 | 36 | |
---|
35 | 37 | #include "cpu.h" |
---|
36 | 38 | |
---|
37 | 39 | static void __init spectre_v1_select_mitigation(void); |
---|
38 | 40 | static void __init spectre_v2_select_mitigation(void); |
---|
| 41 | +static void __init retbleed_select_mitigation(void); |
---|
| 42 | +static void __init spectre_v2_user_select_mitigation(void); |
---|
39 | 43 | static void __init ssb_select_mitigation(void); |
---|
40 | 44 | static void __init l1tf_select_mitigation(void); |
---|
41 | 45 | static void __init mds_select_mitigation(void); |
---|
42 | | -static void __init mds_print_mitigation(void); |
---|
| 46 | +static void __init md_clear_update_mitigation(void); |
---|
| 47 | +static void __init md_clear_select_mitigation(void); |
---|
43 | 48 | static void __init taa_select_mitigation(void); |
---|
| 49 | +static void __init mmio_select_mitigation(void); |
---|
44 | 50 | static void __init srbds_select_mitigation(void); |
---|
45 | 51 | |
---|
46 | | -/* The base value of the SPEC_CTRL MSR that always has to be preserved. */ |
---|
| 52 | +/* The base value of the SPEC_CTRL MSR without task-specific bits set */ |
---|
47 | 53 | u64 x86_spec_ctrl_base; |
---|
48 | 54 | EXPORT_SYMBOL_GPL(x86_spec_ctrl_base); |
---|
| 55 | + |
---|
| 56 | +/* The current value of the SPEC_CTRL MSR with task-specific bits set */ |
---|
| 57 | +DEFINE_PER_CPU(u64, x86_spec_ctrl_current); |
---|
| 58 | +EXPORT_SYMBOL_GPL(x86_spec_ctrl_current); |
---|
| 59 | + |
---|
49 | 60 | static DEFINE_MUTEX(spec_ctrl_mutex); |
---|
50 | 61 | |
---|
| 62 | +/* Update SPEC_CTRL MSR and its cached copy unconditionally */ |
---|
| 63 | +static void update_spec_ctrl(u64 val) |
---|
| 64 | +{ |
---|
| 65 | + this_cpu_write(x86_spec_ctrl_current, val); |
---|
| 66 | + wrmsrl(MSR_IA32_SPEC_CTRL, val); |
---|
| 67 | +} |
---|
| 68 | + |
---|
51 | 69 | /* |
---|
52 | | - * The vendor and possibly platform specific bits which can be modified in |
---|
53 | | - * x86_spec_ctrl_base. |
---|
| 70 | + * Keep track of the SPEC_CTRL MSR value for the current task, which may differ |
---|
| 71 | + * from x86_spec_ctrl_base due to STIBP/SSB in __speculation_ctrl_update(). |
---|
54 | 72 | */ |
---|
55 | | -static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS; |
---|
| 73 | +void update_spec_ctrl_cond(u64 val) |
---|
| 74 | +{ |
---|
| 75 | + if (this_cpu_read(x86_spec_ctrl_current) == val) |
---|
| 76 | + return; |
---|
| 77 | + |
---|
| 78 | + this_cpu_write(x86_spec_ctrl_current, val); |
---|
| 79 | + |
---|
| 80 | + /* |
---|
| 81 | + * When KERNEL_IBRS this MSR is written on return-to-user, unless |
---|
| 82 | + * forced the update can be delayed until that time. |
---|
| 83 | + */ |
---|
| 84 | + if (!cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS)) |
---|
| 85 | + wrmsrl(MSR_IA32_SPEC_CTRL, val); |
---|
| 86 | +} |
---|
| 87 | + |
---|
| 88 | +u64 spec_ctrl_current(void) |
---|
| 89 | +{ |
---|
| 90 | + return this_cpu_read(x86_spec_ctrl_current); |
---|
| 91 | +} |
---|
| 92 | +EXPORT_SYMBOL_GPL(spec_ctrl_current); |
---|
56 | 93 | |
---|
57 | 94 | /* |
---|
58 | 95 | * AMD specific MSR info for Speculative Store Bypass control. |
---|
.. | .. |
---|
74 | 111 | /* Control MDS CPU buffer clear before idling (halt, mwait) */ |
---|
75 | 112 | DEFINE_STATIC_KEY_FALSE(mds_idle_clear); |
---|
76 | 113 | EXPORT_SYMBOL_GPL(mds_idle_clear); |
---|
| 114 | + |
---|
| 115 | +/* Controls CPU Fill buffer clear before KVM guest MMIO accesses */ |
---|
| 116 | +DEFINE_STATIC_KEY_FALSE(mmio_stale_data_clear); |
---|
| 117 | +EXPORT_SYMBOL_GPL(mmio_stale_data_clear); |
---|
77 | 118 | |
---|
78 | 119 | void __init check_bugs(void) |
---|
79 | 120 | { |
---|
.. | .. |
---|
98 | 139 | if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) |
---|
99 | 140 | rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); |
---|
100 | 141 | |
---|
101 | | - /* Allow STIBP in MSR_SPEC_CTRL if supported */ |
---|
102 | | - if (boot_cpu_has(X86_FEATURE_STIBP)) |
---|
103 | | - x86_spec_ctrl_mask |= SPEC_CTRL_STIBP; |
---|
104 | | - |
---|
105 | 142 | /* Select the proper CPU mitigations before patching alternatives: */ |
---|
106 | 143 | spectre_v1_select_mitigation(); |
---|
107 | 144 | spectre_v2_select_mitigation(); |
---|
| 145 | + /* |
---|
| 146 | + * retbleed_select_mitigation() relies on the state set by |
---|
| 147 | + * spectre_v2_select_mitigation(); specifically it wants to know about |
---|
| 148 | + * spectre_v2=ibrs. |
---|
| 149 | + */ |
---|
| 150 | + retbleed_select_mitigation(); |
---|
| 151 | + /* |
---|
| 152 | + * spectre_v2_user_select_mitigation() relies on the state set by |
---|
| 153 | + * retbleed_select_mitigation(); specifically the STIBP selection is |
---|
| 154 | + * forced for UNRET or IBPB. |
---|
| 155 | + */ |
---|
| 156 | + spectre_v2_user_select_mitigation(); |
---|
108 | 157 | ssb_select_mitigation(); |
---|
109 | 158 | l1tf_select_mitigation(); |
---|
110 | | - mds_select_mitigation(); |
---|
111 | | - taa_select_mitigation(); |
---|
| 159 | + md_clear_select_mitigation(); |
---|
112 | 160 | srbds_select_mitigation(); |
---|
113 | | - |
---|
114 | | - /* |
---|
115 | | - * As MDS and TAA mitigations are inter-related, print MDS |
---|
116 | | - * mitigation until after TAA mitigation selection is done. |
---|
117 | | - */ |
---|
118 | | - mds_print_mitigation(); |
---|
119 | 161 | |
---|
120 | 162 | arch_smt_update(); |
---|
121 | 163 | |
---|
.. | .. |
---|
151 | 193 | #endif |
---|
152 | 194 | } |
---|
153 | 195 | |
---|
| 196 | +/* |
---|
| 197 | + * NOTE: For VMX, this function is not called in the vmexit path. |
---|
| 198 | + * It uses vmx_spec_ctrl_restore_host() instead. |
---|
| 199 | + */ |
---|
154 | 200 | void |
---|
155 | 201 | x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest) |
---|
156 | 202 | { |
---|
157 | | - u64 msrval, guestval, hostval = x86_spec_ctrl_base; |
---|
| 203 | + u64 msrval, guestval = guest_spec_ctrl, hostval = spec_ctrl_current(); |
---|
158 | 204 | struct thread_info *ti = current_thread_info(); |
---|
159 | 205 | |
---|
160 | | - /* Is MSR_SPEC_CTRL implemented ? */ |
---|
161 | 206 | if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) { |
---|
162 | | - /* |
---|
163 | | - * Restrict guest_spec_ctrl to supported values. Clear the |
---|
164 | | - * modifiable bits in the host base value and or the |
---|
165 | | - * modifiable bits from the guest value. |
---|
166 | | - */ |
---|
167 | | - guestval = hostval & ~x86_spec_ctrl_mask; |
---|
168 | | - guestval |= guest_spec_ctrl & x86_spec_ctrl_mask; |
---|
169 | | - |
---|
170 | | - /* SSBD controlled in MSR_SPEC_CTRL */ |
---|
171 | | - if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) || |
---|
172 | | - static_cpu_has(X86_FEATURE_AMD_SSBD)) |
---|
173 | | - hostval |= ssbd_tif_to_spec_ctrl(ti->flags); |
---|
174 | | - |
---|
175 | | - /* Conditional STIBP enabled? */ |
---|
176 | | - if (static_branch_unlikely(&switch_to_cond_stibp)) |
---|
177 | | - hostval |= stibp_tif_to_spec_ctrl(ti->flags); |
---|
178 | | - |
---|
179 | 207 | if (hostval != guestval) { |
---|
180 | 208 | msrval = setguest ? guestval : hostval; |
---|
181 | 209 | wrmsrl(MSR_IA32_SPEC_CTRL, msrval); |
---|
.. | .. |
---|
256 | 284 | } |
---|
257 | 285 | } |
---|
258 | 286 | |
---|
259 | | -static void __init mds_print_mitigation(void) |
---|
260 | | -{ |
---|
261 | | - if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) |
---|
262 | | - return; |
---|
263 | | - |
---|
264 | | - pr_info("%s\n", mds_strings[mds_mitigation]); |
---|
265 | | -} |
---|
266 | | - |
---|
267 | 287 | static int __init mds_cmdline(char *str) |
---|
268 | 288 | { |
---|
269 | 289 | if (!boot_cpu_has_bug(X86_BUG_MDS)) |
---|
.. | .. |
---|
288 | 308 | #undef pr_fmt |
---|
289 | 309 | #define pr_fmt(fmt) "TAA: " fmt |
---|
290 | 310 | |
---|
| 311 | +enum taa_mitigations { |
---|
| 312 | + TAA_MITIGATION_OFF, |
---|
| 313 | + TAA_MITIGATION_UCODE_NEEDED, |
---|
| 314 | + TAA_MITIGATION_VERW, |
---|
| 315 | + TAA_MITIGATION_TSX_DISABLED, |
---|
| 316 | +}; |
---|
| 317 | + |
---|
291 | 318 | /* Default mitigation for TAA-affected CPUs */ |
---|
292 | 319 | static enum taa_mitigations taa_mitigation __ro_after_init = TAA_MITIGATION_VERW; |
---|
293 | 320 | static bool taa_nosmt __ro_after_init; |
---|
.. | .. |
---|
311 | 338 | /* TSX previously disabled by tsx=off */ |
---|
312 | 339 | if (!boot_cpu_has(X86_FEATURE_RTM)) { |
---|
313 | 340 | taa_mitigation = TAA_MITIGATION_TSX_DISABLED; |
---|
314 | | - goto out; |
---|
| 341 | + return; |
---|
315 | 342 | } |
---|
316 | 343 | |
---|
317 | 344 | if (cpu_mitigations_off()) { |
---|
.. | .. |
---|
325 | 352 | */ |
---|
326 | 353 | if (taa_mitigation == TAA_MITIGATION_OFF && |
---|
327 | 354 | mds_mitigation == MDS_MITIGATION_OFF) |
---|
328 | | - goto out; |
---|
| 355 | + return; |
---|
329 | 356 | |
---|
330 | 357 | if (boot_cpu_has(X86_FEATURE_MD_CLEAR)) |
---|
331 | 358 | taa_mitigation = TAA_MITIGATION_VERW; |
---|
.. | .. |
---|
357 | 384 | |
---|
358 | 385 | if (taa_nosmt || cpu_mitigations_auto_nosmt()) |
---|
359 | 386 | cpu_smt_disable(false); |
---|
360 | | - |
---|
361 | | - /* |
---|
362 | | - * Update MDS mitigation, if necessary, as the mds_user_clear is |
---|
363 | | - * now enabled for TAA mitigation. |
---|
364 | | - */ |
---|
365 | | - if (mds_mitigation == MDS_MITIGATION_OFF && |
---|
366 | | - boot_cpu_has_bug(X86_BUG_MDS)) { |
---|
367 | | - mds_mitigation = MDS_MITIGATION_FULL; |
---|
368 | | - mds_select_mitigation(); |
---|
369 | | - } |
---|
370 | | -out: |
---|
371 | | - pr_info("%s\n", taa_strings[taa_mitigation]); |
---|
372 | 387 | } |
---|
373 | 388 | |
---|
374 | 389 | static int __init tsx_async_abort_parse_cmdline(char *str) |
---|
.. | .. |
---|
391 | 406 | return 0; |
---|
392 | 407 | } |
---|
393 | 408 | early_param("tsx_async_abort", tsx_async_abort_parse_cmdline); |
---|
| 409 | + |
---|
| 410 | +#undef pr_fmt |
---|
| 411 | +#define pr_fmt(fmt) "MMIO Stale Data: " fmt |
---|
| 412 | + |
---|
| 413 | +enum mmio_mitigations { |
---|
| 414 | + MMIO_MITIGATION_OFF, |
---|
| 415 | + MMIO_MITIGATION_UCODE_NEEDED, |
---|
| 416 | + MMIO_MITIGATION_VERW, |
---|
| 417 | +}; |
---|
| 418 | + |
---|
| 419 | +/* Default mitigation for Processor MMIO Stale Data vulnerabilities */ |
---|
| 420 | +static enum mmio_mitigations mmio_mitigation __ro_after_init = MMIO_MITIGATION_VERW; |
---|
| 421 | +static bool mmio_nosmt __ro_after_init = false; |
---|
| 422 | + |
---|
| 423 | +static const char * const mmio_strings[] = { |
---|
| 424 | + [MMIO_MITIGATION_OFF] = "Vulnerable", |
---|
| 425 | + [MMIO_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode", |
---|
| 426 | + [MMIO_MITIGATION_VERW] = "Mitigation: Clear CPU buffers", |
---|
| 427 | +}; |
---|
| 428 | + |
---|
| 429 | +static void __init mmio_select_mitigation(void) |
---|
| 430 | +{ |
---|
| 431 | + u64 ia32_cap; |
---|
| 432 | + |
---|
| 433 | + if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) || |
---|
| 434 | + boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN) || |
---|
| 435 | + cpu_mitigations_off()) { |
---|
| 436 | + mmio_mitigation = MMIO_MITIGATION_OFF; |
---|
| 437 | + return; |
---|
| 438 | + } |
---|
| 439 | + |
---|
| 440 | + if (mmio_mitigation == MMIO_MITIGATION_OFF) |
---|
| 441 | + return; |
---|
| 442 | + |
---|
| 443 | + ia32_cap = x86_read_arch_cap_msr(); |
---|
| 444 | + |
---|
| 445 | + /* |
---|
| 446 | + * Enable CPU buffer clear mitigation for host and VMM, if also affected |
---|
| 447 | + * by MDS or TAA. Otherwise, enable mitigation for VMM only. |
---|
| 448 | + */ |
---|
| 449 | + if (boot_cpu_has_bug(X86_BUG_MDS) || (boot_cpu_has_bug(X86_BUG_TAA) && |
---|
| 450 | + boot_cpu_has(X86_FEATURE_RTM))) |
---|
| 451 | + static_branch_enable(&mds_user_clear); |
---|
| 452 | + else |
---|
| 453 | + static_branch_enable(&mmio_stale_data_clear); |
---|
| 454 | + |
---|
| 455 | + /* |
---|
| 456 | + * If Processor-MMIO-Stale-Data bug is present and Fill Buffer data can |
---|
| 457 | + * be propagated to uncore buffers, clearing the Fill buffers on idle |
---|
| 458 | + * is required irrespective of SMT state. |
---|
| 459 | + */ |
---|
| 460 | + if (!(ia32_cap & ARCH_CAP_FBSDP_NO)) |
---|
| 461 | + static_branch_enable(&mds_idle_clear); |
---|
| 462 | + |
---|
| 463 | + /* |
---|
| 464 | + * Check if the system has the right microcode. |
---|
| 465 | + * |
---|
| 466 | + * CPU Fill buffer clear mitigation is enumerated by either an explicit |
---|
| 467 | + * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS |
---|
| 468 | + * affected systems. |
---|
| 469 | + */ |
---|
| 470 | + if ((ia32_cap & ARCH_CAP_FB_CLEAR) || |
---|
| 471 | + (boot_cpu_has(X86_FEATURE_MD_CLEAR) && |
---|
| 472 | + boot_cpu_has(X86_FEATURE_FLUSH_L1D) && |
---|
| 473 | + !(ia32_cap & ARCH_CAP_MDS_NO))) |
---|
| 474 | + mmio_mitigation = MMIO_MITIGATION_VERW; |
---|
| 475 | + else |
---|
| 476 | + mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED; |
---|
| 477 | + |
---|
| 478 | + if (mmio_nosmt || cpu_mitigations_auto_nosmt()) |
---|
| 479 | + cpu_smt_disable(false); |
---|
| 480 | +} |
---|
| 481 | + |
---|
| 482 | +static int __init mmio_stale_data_parse_cmdline(char *str) |
---|
| 483 | +{ |
---|
| 484 | + if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) |
---|
| 485 | + return 0; |
---|
| 486 | + |
---|
| 487 | + if (!str) |
---|
| 488 | + return -EINVAL; |
---|
| 489 | + |
---|
| 490 | + if (!strcmp(str, "off")) { |
---|
| 491 | + mmio_mitigation = MMIO_MITIGATION_OFF; |
---|
| 492 | + } else if (!strcmp(str, "full")) { |
---|
| 493 | + mmio_mitigation = MMIO_MITIGATION_VERW; |
---|
| 494 | + } else if (!strcmp(str, "full,nosmt")) { |
---|
| 495 | + mmio_mitigation = MMIO_MITIGATION_VERW; |
---|
| 496 | + mmio_nosmt = true; |
---|
| 497 | + } |
---|
| 498 | + |
---|
| 499 | + return 0; |
---|
| 500 | +} |
---|
| 501 | +early_param("mmio_stale_data", mmio_stale_data_parse_cmdline); |
---|
| 502 | + |
---|
| 503 | +#undef pr_fmt |
---|
| 504 | +#define pr_fmt(fmt) "" fmt |
---|
| 505 | + |
---|
| 506 | +static void __init md_clear_update_mitigation(void) |
---|
| 507 | +{ |
---|
| 508 | + if (cpu_mitigations_off()) |
---|
| 509 | + return; |
---|
| 510 | + |
---|
| 511 | + if (!static_key_enabled(&mds_user_clear)) |
---|
| 512 | + goto out; |
---|
| 513 | + |
---|
| 514 | + /* |
---|
| 515 | + * mds_user_clear is now enabled. Update MDS, TAA and MMIO Stale Data |
---|
| 516 | + * mitigation, if necessary. |
---|
| 517 | + */ |
---|
| 518 | + if (mds_mitigation == MDS_MITIGATION_OFF && |
---|
| 519 | + boot_cpu_has_bug(X86_BUG_MDS)) { |
---|
| 520 | + mds_mitigation = MDS_MITIGATION_FULL; |
---|
| 521 | + mds_select_mitigation(); |
---|
| 522 | + } |
---|
| 523 | + if (taa_mitigation == TAA_MITIGATION_OFF && |
---|
| 524 | + boot_cpu_has_bug(X86_BUG_TAA)) { |
---|
| 525 | + taa_mitigation = TAA_MITIGATION_VERW; |
---|
| 526 | + taa_select_mitigation(); |
---|
| 527 | + } |
---|
| 528 | + if (mmio_mitigation == MMIO_MITIGATION_OFF && |
---|
| 529 | + boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) { |
---|
| 530 | + mmio_mitigation = MMIO_MITIGATION_VERW; |
---|
| 531 | + mmio_select_mitigation(); |
---|
| 532 | + } |
---|
| 533 | +out: |
---|
| 534 | + if (boot_cpu_has_bug(X86_BUG_MDS)) |
---|
| 535 | + pr_info("MDS: %s\n", mds_strings[mds_mitigation]); |
---|
| 536 | + if (boot_cpu_has_bug(X86_BUG_TAA)) |
---|
| 537 | + pr_info("TAA: %s\n", taa_strings[taa_mitigation]); |
---|
| 538 | + if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) |
---|
| 539 | + pr_info("MMIO Stale Data: %s\n", mmio_strings[mmio_mitigation]); |
---|
| 540 | + else if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN)) |
---|
| 541 | + pr_info("MMIO Stale Data: Unknown: No mitigations\n"); |
---|
| 542 | +} |
---|
| 543 | + |
---|
| 544 | +static void __init md_clear_select_mitigation(void) |
---|
| 545 | +{ |
---|
| 546 | + mds_select_mitigation(); |
---|
| 547 | + taa_select_mitigation(); |
---|
| 548 | + mmio_select_mitigation(); |
---|
| 549 | + |
---|
| 550 | + /* |
---|
| 551 | + * As MDS, TAA and MMIO Stale Data mitigations are inter-related, update |
---|
| 552 | + * and print their mitigation after MDS, TAA and MMIO Stale Data |
---|
| 553 | + * mitigation selection is done. |
---|
| 554 | + */ |
---|
| 555 | + md_clear_update_mitigation(); |
---|
| 556 | +} |
---|
394 | 557 | |
---|
395 | 558 | #undef pr_fmt |
---|
396 | 559 | #define pr_fmt(fmt) "SRBDS: " fmt |
---|
.. | .. |
---|
453 | 616 | return; |
---|
454 | 617 | |
---|
455 | 618 | /* |
---|
456 | | - * Check to see if this is one of the MDS_NO systems supporting |
---|
457 | | - * TSX that are only exposed to SRBDS when TSX is enabled. |
---|
| 619 | + * Check to see if this is one of the MDS_NO systems supporting TSX that |
---|
| 620 | + * are only exposed to SRBDS when TSX is enabled or when CPU is affected |
---|
| 621 | + * by Processor MMIO Stale Data vulnerability. |
---|
458 | 622 | */ |
---|
459 | 623 | ia32_cap = x86_read_arch_cap_msr(); |
---|
460 | | - if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM)) |
---|
| 624 | + if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) && |
---|
| 625 | + !boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) |
---|
461 | 626 | srbds_mitigation = SRBDS_MITIGATION_TSX_OFF; |
---|
462 | 627 | else if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) |
---|
463 | 628 | srbds_mitigation = SRBDS_MITIGATION_HYPERVISOR; |
---|
.. | .. |
---|
536 | 701 | * If FSGSBASE is enabled, the user can put a kernel address in |
---|
537 | 702 | * GS, in which case SMAP provides no protection. |
---|
538 | 703 | * |
---|
539 | | - * [ NOTE: Don't check for X86_FEATURE_FSGSBASE until the |
---|
540 | | - * FSGSBASE enablement patches have been merged. ] |
---|
541 | | - * |
---|
542 | 704 | * If FSGSBASE is disabled, the user can only put a user space |
---|
543 | 705 | * address in GS. That makes an attack harder, but still |
---|
544 | 706 | * possible if there's no SMAP protection. |
---|
545 | 707 | */ |
---|
546 | | - if (!smap_works_speculatively()) { |
---|
| 708 | + if (boot_cpu_has(X86_FEATURE_FSGSBASE) || |
---|
| 709 | + !smap_works_speculatively()) { |
---|
547 | 710 | /* |
---|
548 | 711 | * Mitigation can be provided from SWAPGS itself or |
---|
549 | 712 | * PTI as the CR3 write in the Meltdown mitigation |
---|
.. | .. |
---|
575 | 738 | } |
---|
576 | 739 | early_param("nospectre_v1", nospectre_v1_cmdline); |
---|
577 | 740 | |
---|
578 | | -#undef pr_fmt |
---|
579 | | -#define pr_fmt(fmt) "Spectre V2 : " fmt |
---|
580 | | - |
---|
581 | 741 | static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = |
---|
582 | 742 | SPECTRE_V2_NONE; |
---|
| 743 | + |
---|
| 744 | +#undef pr_fmt |
---|
| 745 | +#define pr_fmt(fmt) "RETBleed: " fmt |
---|
| 746 | + |
---|
| 747 | +enum retbleed_mitigation { |
---|
| 748 | + RETBLEED_MITIGATION_NONE, |
---|
| 749 | + RETBLEED_MITIGATION_UNRET, |
---|
| 750 | + RETBLEED_MITIGATION_IBPB, |
---|
| 751 | + RETBLEED_MITIGATION_IBRS, |
---|
| 752 | + RETBLEED_MITIGATION_EIBRS, |
---|
| 753 | +}; |
---|
| 754 | + |
---|
| 755 | +enum retbleed_mitigation_cmd { |
---|
| 756 | + RETBLEED_CMD_OFF, |
---|
| 757 | + RETBLEED_CMD_AUTO, |
---|
| 758 | + RETBLEED_CMD_UNRET, |
---|
| 759 | + RETBLEED_CMD_IBPB, |
---|
| 760 | +}; |
---|
| 761 | + |
---|
| 762 | +const char * const retbleed_strings[] = { |
---|
| 763 | + [RETBLEED_MITIGATION_NONE] = "Vulnerable", |
---|
| 764 | + [RETBLEED_MITIGATION_UNRET] = "Mitigation: untrained return thunk", |
---|
| 765 | + [RETBLEED_MITIGATION_IBPB] = "Mitigation: IBPB", |
---|
| 766 | + [RETBLEED_MITIGATION_IBRS] = "Mitigation: IBRS", |
---|
| 767 | + [RETBLEED_MITIGATION_EIBRS] = "Mitigation: Enhanced IBRS", |
---|
| 768 | +}; |
---|
| 769 | + |
---|
| 770 | +static enum retbleed_mitigation retbleed_mitigation __ro_after_init = |
---|
| 771 | + RETBLEED_MITIGATION_NONE; |
---|
| 772 | +static enum retbleed_mitigation_cmd retbleed_cmd __ro_after_init = |
---|
| 773 | + RETBLEED_CMD_AUTO; |
---|
| 774 | + |
---|
| 775 | +static int __ro_after_init retbleed_nosmt = false; |
---|
| 776 | + |
---|
| 777 | +static int __init retbleed_parse_cmdline(char *str) |
---|
| 778 | +{ |
---|
| 779 | + if (!str) |
---|
| 780 | + return -EINVAL; |
---|
| 781 | + |
---|
| 782 | + while (str) { |
---|
| 783 | + char *next = strchr(str, ','); |
---|
| 784 | + if (next) { |
---|
| 785 | + *next = 0; |
---|
| 786 | + next++; |
---|
| 787 | + } |
---|
| 788 | + |
---|
| 789 | + if (!strcmp(str, "off")) { |
---|
| 790 | + retbleed_cmd = RETBLEED_CMD_OFF; |
---|
| 791 | + } else if (!strcmp(str, "auto")) { |
---|
| 792 | + retbleed_cmd = RETBLEED_CMD_AUTO; |
---|
| 793 | + } else if (!strcmp(str, "unret")) { |
---|
| 794 | + retbleed_cmd = RETBLEED_CMD_UNRET; |
---|
| 795 | + } else if (!strcmp(str, "ibpb")) { |
---|
| 796 | + retbleed_cmd = RETBLEED_CMD_IBPB; |
---|
| 797 | + } else if (!strcmp(str, "nosmt")) { |
---|
| 798 | + retbleed_nosmt = true; |
---|
| 799 | + } else { |
---|
| 800 | + pr_err("Ignoring unknown retbleed option (%s).", str); |
---|
| 801 | + } |
---|
| 802 | + |
---|
| 803 | + str = next; |
---|
| 804 | + } |
---|
| 805 | + |
---|
| 806 | + return 0; |
---|
| 807 | +} |
---|
| 808 | +early_param("retbleed", retbleed_parse_cmdline); |
---|
| 809 | + |
---|
| 810 | +#define RETBLEED_UNTRAIN_MSG "WARNING: BTB untrained return thunk mitigation is only effective on AMD/Hygon!\n" |
---|
| 811 | +#define RETBLEED_INTEL_MSG "WARNING: Spectre v2 mitigation leaves CPU vulnerable to RETBleed attacks, data leaks possible!\n" |
---|
| 812 | + |
---|
| 813 | +static void __init retbleed_select_mitigation(void) |
---|
| 814 | +{ |
---|
| 815 | + bool mitigate_smt = false; |
---|
| 816 | + |
---|
| 817 | + if (!boot_cpu_has_bug(X86_BUG_RETBLEED) || cpu_mitigations_off()) |
---|
| 818 | + return; |
---|
| 819 | + |
---|
| 820 | + switch (retbleed_cmd) { |
---|
| 821 | + case RETBLEED_CMD_OFF: |
---|
| 822 | + return; |
---|
| 823 | + |
---|
| 824 | + case RETBLEED_CMD_UNRET: |
---|
| 825 | + if (IS_ENABLED(CONFIG_CPU_UNRET_ENTRY)) { |
---|
| 826 | + retbleed_mitigation = RETBLEED_MITIGATION_UNRET; |
---|
| 827 | + } else { |
---|
| 828 | + pr_err("WARNING: kernel not compiled with CPU_UNRET_ENTRY.\n"); |
---|
| 829 | + goto do_cmd_auto; |
---|
| 830 | + } |
---|
| 831 | + break; |
---|
| 832 | + |
---|
| 833 | + case RETBLEED_CMD_IBPB: |
---|
| 834 | + if (!boot_cpu_has(X86_FEATURE_IBPB)) { |
---|
| 835 | + pr_err("WARNING: CPU does not support IBPB.\n"); |
---|
| 836 | + goto do_cmd_auto; |
---|
| 837 | + } else if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY)) { |
---|
| 838 | + retbleed_mitigation = RETBLEED_MITIGATION_IBPB; |
---|
| 839 | + } else { |
---|
| 840 | + pr_err("WARNING: kernel not compiled with CPU_IBPB_ENTRY.\n"); |
---|
| 841 | + goto do_cmd_auto; |
---|
| 842 | + } |
---|
| 843 | + break; |
---|
| 844 | + |
---|
| 845 | +do_cmd_auto: |
---|
| 846 | + case RETBLEED_CMD_AUTO: |
---|
| 847 | + default: |
---|
| 848 | + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || |
---|
| 849 | + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { |
---|
| 850 | + if (IS_ENABLED(CONFIG_CPU_UNRET_ENTRY)) |
---|
| 851 | + retbleed_mitigation = RETBLEED_MITIGATION_UNRET; |
---|
| 852 | + else if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY) && boot_cpu_has(X86_FEATURE_IBPB)) |
---|
| 853 | + retbleed_mitigation = RETBLEED_MITIGATION_IBPB; |
---|
| 854 | + } |
---|
| 855 | + |
---|
| 856 | + /* |
---|
| 857 | + * The Intel mitigation (IBRS or eIBRS) was already selected in |
---|
| 858 | + * spectre_v2_select_mitigation(). 'retbleed_mitigation' will |
---|
| 859 | + * be set accordingly below. |
---|
| 860 | + */ |
---|
| 861 | + |
---|
| 862 | + break; |
---|
| 863 | + } |
---|
| 864 | + |
---|
| 865 | + switch (retbleed_mitigation) { |
---|
| 866 | + case RETBLEED_MITIGATION_UNRET: |
---|
| 867 | + setup_force_cpu_cap(X86_FEATURE_RETHUNK); |
---|
| 868 | + setup_force_cpu_cap(X86_FEATURE_UNRET); |
---|
| 869 | + |
---|
| 870 | + if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && |
---|
| 871 | + boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) |
---|
| 872 | + pr_err(RETBLEED_UNTRAIN_MSG); |
---|
| 873 | + |
---|
| 874 | + mitigate_smt = true; |
---|
| 875 | + break; |
---|
| 876 | + |
---|
| 877 | + case RETBLEED_MITIGATION_IBPB: |
---|
| 878 | + setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB); |
---|
| 879 | + mitigate_smt = true; |
---|
| 880 | + break; |
---|
| 881 | + |
---|
| 882 | + default: |
---|
| 883 | + break; |
---|
| 884 | + } |
---|
| 885 | + |
---|
| 886 | + if (mitigate_smt && !boot_cpu_has(X86_FEATURE_STIBP) && |
---|
| 887 | + (retbleed_nosmt || cpu_mitigations_auto_nosmt())) |
---|
| 888 | + cpu_smt_disable(false); |
---|
| 889 | + |
---|
| 890 | + /* |
---|
| 891 | + * Let IBRS trump all on Intel without affecting the effects of the |
---|
| 892 | + * retbleed= cmdline option. |
---|
| 893 | + */ |
---|
| 894 | + if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { |
---|
| 895 | + switch (spectre_v2_enabled) { |
---|
| 896 | + case SPECTRE_V2_IBRS: |
---|
| 897 | + retbleed_mitigation = RETBLEED_MITIGATION_IBRS; |
---|
| 898 | + break; |
---|
| 899 | + case SPECTRE_V2_EIBRS: |
---|
| 900 | + case SPECTRE_V2_EIBRS_RETPOLINE: |
---|
| 901 | + case SPECTRE_V2_EIBRS_LFENCE: |
---|
| 902 | + retbleed_mitigation = RETBLEED_MITIGATION_EIBRS; |
---|
| 903 | + break; |
---|
| 904 | + default: |
---|
| 905 | + pr_err(RETBLEED_INTEL_MSG); |
---|
| 906 | + } |
---|
| 907 | + } |
---|
| 908 | + |
---|
| 909 | + pr_info("%s\n", retbleed_strings[retbleed_mitigation]); |
---|
| 910 | +} |
---|
| 911 | + |
---|
| 912 | +#undef pr_fmt |
---|
| 913 | +#define pr_fmt(fmt) "Spectre V2 : " fmt |
---|
583 | 914 | |
---|
584 | 915 | static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init = |
---|
585 | 916 | SPECTRE_V2_USER_NONE; |
---|
.. | .. |
---|
607 | 938 | static inline const char *spectre_v2_module_string(void) { return ""; } |
---|
608 | 939 | #endif |
---|
609 | 940 | |
---|
| 941 | +#define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n" |
---|
| 942 | +#define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n" |
---|
| 943 | +#define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n" |
---|
| 944 | +#define SPECTRE_V2_IBRS_PERF_MSG "WARNING: IBRS mitigation selected on Enhanced IBRS CPU, this may cause unnecessary performance loss\n" |
---|
| 945 | + |
---|
| 946 | +#ifdef CONFIG_BPF_SYSCALL |
---|
| 947 | +void unpriv_ebpf_notify(int new_state) |
---|
| 948 | +{ |
---|
| 949 | + if (new_state) |
---|
| 950 | + return; |
---|
| 951 | + |
---|
| 952 | + /* Unprivileged eBPF is enabled */ |
---|
| 953 | + |
---|
| 954 | + switch (spectre_v2_enabled) { |
---|
| 955 | + case SPECTRE_V2_EIBRS: |
---|
| 956 | + pr_err(SPECTRE_V2_EIBRS_EBPF_MSG); |
---|
| 957 | + break; |
---|
| 958 | + case SPECTRE_V2_EIBRS_LFENCE: |
---|
| 959 | + if (sched_smt_active()) |
---|
| 960 | + pr_err(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG); |
---|
| 961 | + break; |
---|
| 962 | + default: |
---|
| 963 | + break; |
---|
| 964 | + } |
---|
| 965 | +} |
---|
| 966 | +#endif |
---|
| 967 | + |
---|
610 | 968 | static inline bool match_option(const char *arg, int arglen, const char *opt) |
---|
611 | 969 | { |
---|
612 | 970 | int len = strlen(opt); |
---|
.. | .. |
---|
621 | 979 | SPECTRE_V2_CMD_FORCE, |
---|
622 | 980 | SPECTRE_V2_CMD_RETPOLINE, |
---|
623 | 981 | SPECTRE_V2_CMD_RETPOLINE_GENERIC, |
---|
624 | | - SPECTRE_V2_CMD_RETPOLINE_AMD, |
---|
| 982 | + SPECTRE_V2_CMD_RETPOLINE_LFENCE, |
---|
| 983 | + SPECTRE_V2_CMD_EIBRS, |
---|
| 984 | + SPECTRE_V2_CMD_EIBRS_RETPOLINE, |
---|
| 985 | + SPECTRE_V2_CMD_EIBRS_LFENCE, |
---|
| 986 | + SPECTRE_V2_CMD_IBRS, |
---|
625 | 987 | }; |
---|
626 | 988 | |
---|
627 | 989 | enum spectre_v2_user_cmd { |
---|
.. | .. |
---|
662 | 1024 | pr_info("spectre_v2_user=%s forced on command line.\n", reason); |
---|
663 | 1025 | } |
---|
664 | 1026 | |
---|
| 1027 | +static __ro_after_init enum spectre_v2_mitigation_cmd spectre_v2_cmd; |
---|
| 1028 | + |
---|
665 | 1029 | static enum spectre_v2_user_cmd __init |
---|
666 | | -spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd) |
---|
| 1030 | +spectre_v2_parse_user_cmdline(void) |
---|
667 | 1031 | { |
---|
668 | 1032 | char arg[20]; |
---|
669 | 1033 | int ret, i; |
---|
670 | 1034 | |
---|
671 | | - switch (v2_cmd) { |
---|
| 1035 | + switch (spectre_v2_cmd) { |
---|
672 | 1036 | case SPECTRE_V2_CMD_NONE: |
---|
673 | 1037 | return SPECTRE_V2_USER_CMD_NONE; |
---|
674 | 1038 | case SPECTRE_V2_CMD_FORCE: |
---|
.. | .. |
---|
694 | 1058 | return SPECTRE_V2_USER_CMD_AUTO; |
---|
695 | 1059 | } |
---|
696 | 1060 | |
---|
| 1061 | +static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode) |
---|
| 1062 | +{ |
---|
| 1063 | + return mode == SPECTRE_V2_IBRS || |
---|
| 1064 | + mode == SPECTRE_V2_EIBRS || |
---|
| 1065 | + mode == SPECTRE_V2_EIBRS_RETPOLINE || |
---|
| 1066 | + mode == SPECTRE_V2_EIBRS_LFENCE; |
---|
| 1067 | +} |
---|
| 1068 | + |
---|
697 | 1069 | static void __init |
---|
698 | | -spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd) |
---|
| 1070 | +spectre_v2_user_select_mitigation(void) |
---|
699 | 1071 | { |
---|
700 | 1072 | enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE; |
---|
701 | 1073 | bool smt_possible = IS_ENABLED(CONFIG_SMP); |
---|
.. | .. |
---|
708 | 1080 | cpu_smt_control == CPU_SMT_NOT_SUPPORTED) |
---|
709 | 1081 | smt_possible = false; |
---|
710 | 1082 | |
---|
711 | | - cmd = spectre_v2_parse_user_cmdline(v2_cmd); |
---|
| 1083 | + cmd = spectre_v2_parse_user_cmdline(); |
---|
712 | 1084 | switch (cmd) { |
---|
713 | 1085 | case SPECTRE_V2_USER_CMD_NONE: |
---|
714 | 1086 | goto set_mode; |
---|
.. | .. |
---|
756 | 1128 | } |
---|
757 | 1129 | |
---|
758 | 1130 | /* |
---|
759 | | - * If enhanced IBRS is enabled or SMT impossible, STIBP is not |
---|
760 | | - * required. |
---|
| 1131 | + * If no STIBP, IBRS or enhanced IBRS is enabled, or SMT impossible, |
---|
| 1132 | + * STIBP is not required. |
---|
761 | 1133 | */ |
---|
762 | | - if (!smt_possible || spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED) |
---|
| 1134 | + if (!boot_cpu_has(X86_FEATURE_STIBP) || |
---|
| 1135 | + !smt_possible || |
---|
| 1136 | + spectre_v2_in_ibrs_mode(spectre_v2_enabled)) |
---|
763 | 1137 | return; |
---|
764 | 1138 | |
---|
765 | 1139 | /* |
---|
.. | .. |
---|
771 | 1145 | boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON)) |
---|
772 | 1146 | mode = SPECTRE_V2_USER_STRICT_PREFERRED; |
---|
773 | 1147 | |
---|
774 | | - /* |
---|
775 | | - * If STIBP is not available, clear the STIBP mode. |
---|
776 | | - */ |
---|
777 | | - if (!boot_cpu_has(X86_FEATURE_STIBP)) |
---|
778 | | - mode = SPECTRE_V2_USER_NONE; |
---|
| 1148 | + if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET || |
---|
| 1149 | + retbleed_mitigation == RETBLEED_MITIGATION_IBPB) { |
---|
| 1150 | + if (mode != SPECTRE_V2_USER_STRICT && |
---|
| 1151 | + mode != SPECTRE_V2_USER_STRICT_PREFERRED) |
---|
| 1152 | + pr_info("Selecting STIBP always-on mode to complement retbleed mitigation\n"); |
---|
| 1153 | + mode = SPECTRE_V2_USER_STRICT_PREFERRED; |
---|
| 1154 | + } |
---|
779 | 1155 | |
---|
780 | 1156 | spectre_v2_user_stibp = mode; |
---|
781 | 1157 | |
---|
.. | .. |
---|
785 | 1161 | |
---|
786 | 1162 | static const char * const spectre_v2_strings[] = { |
---|
787 | 1163 | [SPECTRE_V2_NONE] = "Vulnerable", |
---|
788 | | - [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline", |
---|
789 | | - [SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline", |
---|
790 | | - [SPECTRE_V2_IBRS_ENHANCED] = "Mitigation: Enhanced IBRS", |
---|
| 1164 | + [SPECTRE_V2_RETPOLINE] = "Mitigation: Retpolines", |
---|
| 1165 | + [SPECTRE_V2_LFENCE] = "Mitigation: LFENCE", |
---|
| 1166 | + [SPECTRE_V2_EIBRS] = "Mitigation: Enhanced IBRS", |
---|
| 1167 | + [SPECTRE_V2_EIBRS_LFENCE] = "Mitigation: Enhanced IBRS + LFENCE", |
---|
| 1168 | + [SPECTRE_V2_EIBRS_RETPOLINE] = "Mitigation: Enhanced IBRS + Retpolines", |
---|
| 1169 | + [SPECTRE_V2_IBRS] = "Mitigation: IBRS", |
---|
791 | 1170 | }; |
---|
792 | 1171 | |
---|
793 | 1172 | static const struct { |
---|
.. | .. |
---|
798 | 1177 | { "off", SPECTRE_V2_CMD_NONE, false }, |
---|
799 | 1178 | { "on", SPECTRE_V2_CMD_FORCE, true }, |
---|
800 | 1179 | { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false }, |
---|
801 | | - { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_AMD, false }, |
---|
| 1180 | + { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_LFENCE, false }, |
---|
| 1181 | + { "retpoline,lfence", SPECTRE_V2_CMD_RETPOLINE_LFENCE, false }, |
---|
802 | 1182 | { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false }, |
---|
| 1183 | + { "eibrs", SPECTRE_V2_CMD_EIBRS, false }, |
---|
| 1184 | + { "eibrs,lfence", SPECTRE_V2_CMD_EIBRS_LFENCE, false }, |
---|
| 1185 | + { "eibrs,retpoline", SPECTRE_V2_CMD_EIBRS_RETPOLINE, false }, |
---|
803 | 1186 | { "auto", SPECTRE_V2_CMD_AUTO, false }, |
---|
| 1187 | + { "ibrs", SPECTRE_V2_CMD_IBRS, false }, |
---|
804 | 1188 | }; |
---|
805 | 1189 | |
---|
806 | 1190 | static void __init spec_v2_print_cond(const char *reason, bool secure) |
---|
.. | .. |
---|
836 | 1220 | } |
---|
837 | 1221 | |
---|
838 | 1222 | if ((cmd == SPECTRE_V2_CMD_RETPOLINE || |
---|
839 | | - cmd == SPECTRE_V2_CMD_RETPOLINE_AMD || |
---|
840 | | - cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) && |
---|
| 1223 | + cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE || |
---|
| 1224 | + cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC || |
---|
| 1225 | + cmd == SPECTRE_V2_CMD_EIBRS_LFENCE || |
---|
| 1226 | + cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) && |
---|
841 | 1227 | !IS_ENABLED(CONFIG_RETPOLINE)) { |
---|
842 | | - pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options[i].option); |
---|
| 1228 | + pr_err("%s selected but not compiled in. Switching to AUTO select\n", |
---|
| 1229 | + mitigation_options[i].option); |
---|
843 | 1230 | return SPECTRE_V2_CMD_AUTO; |
---|
844 | 1231 | } |
---|
845 | 1232 | |
---|
846 | | - if (cmd == SPECTRE_V2_CMD_RETPOLINE_AMD && |
---|
847 | | - boot_cpu_data.x86_vendor != X86_VENDOR_AMD) { |
---|
848 | | - pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n"); |
---|
| 1233 | + if ((cmd == SPECTRE_V2_CMD_EIBRS || |
---|
| 1234 | + cmd == SPECTRE_V2_CMD_EIBRS_LFENCE || |
---|
| 1235 | + cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) && |
---|
| 1236 | + !boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) { |
---|
| 1237 | + pr_err("%s selected but CPU doesn't have eIBRS. Switching to AUTO select\n", |
---|
| 1238 | + mitigation_options[i].option); |
---|
| 1239 | + return SPECTRE_V2_CMD_AUTO; |
---|
| 1240 | + } |
---|
| 1241 | + |
---|
| 1242 | + if ((cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE || |
---|
| 1243 | + cmd == SPECTRE_V2_CMD_EIBRS_LFENCE) && |
---|
| 1244 | + !boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) { |
---|
| 1245 | + pr_err("%s selected, but CPU doesn't have a serializing LFENCE. Switching to AUTO select\n", |
---|
| 1246 | + mitigation_options[i].option); |
---|
| 1247 | + return SPECTRE_V2_CMD_AUTO; |
---|
| 1248 | + } |
---|
| 1249 | + |
---|
| 1250 | + if (cmd == SPECTRE_V2_CMD_IBRS && !IS_ENABLED(CONFIG_CPU_IBRS_ENTRY)) { |
---|
| 1251 | + pr_err("%s selected but not compiled in. Switching to AUTO select\n", |
---|
| 1252 | + mitigation_options[i].option); |
---|
| 1253 | + return SPECTRE_V2_CMD_AUTO; |
---|
| 1254 | + } |
---|
| 1255 | + |
---|
| 1256 | + if (cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) { |
---|
| 1257 | + pr_err("%s selected but not Intel CPU. Switching to AUTO select\n", |
---|
| 1258 | + mitigation_options[i].option); |
---|
| 1259 | + return SPECTRE_V2_CMD_AUTO; |
---|
| 1260 | + } |
---|
| 1261 | + |
---|
| 1262 | + if (cmd == SPECTRE_V2_CMD_IBRS && !boot_cpu_has(X86_FEATURE_IBRS)) { |
---|
| 1263 | + pr_err("%s selected but CPU doesn't have IBRS. Switching to AUTO select\n", |
---|
| 1264 | + mitigation_options[i].option); |
---|
| 1265 | + return SPECTRE_V2_CMD_AUTO; |
---|
| 1266 | + } |
---|
| 1267 | + |
---|
| 1268 | + if (cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_has(X86_FEATURE_XENPV)) { |
---|
| 1269 | + pr_err("%s selected but running as XenPV guest. Switching to AUTO select\n", |
---|
| 1270 | + mitigation_options[i].option); |
---|
849 | 1271 | return SPECTRE_V2_CMD_AUTO; |
---|
850 | 1272 | } |
---|
851 | 1273 | |
---|
852 | 1274 | spec_v2_print_cond(mitigation_options[i].option, |
---|
853 | 1275 | mitigation_options[i].secure); |
---|
854 | 1276 | return cmd; |
---|
| 1277 | +} |
---|
| 1278 | + |
---|
| 1279 | +static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void) |
---|
| 1280 | +{ |
---|
| 1281 | + if (!IS_ENABLED(CONFIG_RETPOLINE)) { |
---|
| 1282 | + pr_err("Kernel not compiled with retpoline; no mitigation available!"); |
---|
| 1283 | + return SPECTRE_V2_NONE; |
---|
| 1284 | + } |
---|
| 1285 | + |
---|
| 1286 | + return SPECTRE_V2_RETPOLINE; |
---|
| 1287 | +} |
---|
| 1288 | + |
---|
| 1289 | +/* Disable in-kernel use of non-RSB RET predictors */ |
---|
| 1290 | +static void __init spec_ctrl_disable_kernel_rrsba(void) |
---|
| 1291 | +{ |
---|
| 1292 | + u64 ia32_cap; |
---|
| 1293 | + |
---|
| 1294 | + if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL)) |
---|
| 1295 | + return; |
---|
| 1296 | + |
---|
| 1297 | + ia32_cap = x86_read_arch_cap_msr(); |
---|
| 1298 | + |
---|
| 1299 | + if (ia32_cap & ARCH_CAP_RRSBA) { |
---|
| 1300 | + x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S; |
---|
| 1301 | + update_spec_ctrl(x86_spec_ctrl_base); |
---|
| 1302 | + } |
---|
| 1303 | +} |
---|
| 1304 | + |
---|
| 1305 | +static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_mitigation mode) |
---|
| 1306 | +{ |
---|
| 1307 | + /* |
---|
| 1308 | + * Similar to context switches, there are two types of RSB attacks |
---|
| 1309 | + * after VM exit: |
---|
| 1310 | + * |
---|
| 1311 | + * 1) RSB underflow |
---|
| 1312 | + * |
---|
| 1313 | + * 2) Poisoned RSB entry |
---|
| 1314 | + * |
---|
| 1315 | + * When retpoline is enabled, both are mitigated by filling/clearing |
---|
| 1316 | + * the RSB. |
---|
| 1317 | + * |
---|
| 1318 | + * When IBRS is enabled, while #1 would be mitigated by the IBRS branch |
---|
| 1319 | + * prediction isolation protections, RSB still needs to be cleared |
---|
| 1320 | + * because of #2. Note that SMEP provides no protection here, unlike |
---|
| 1321 | + * user-space-poisoned RSB entries. |
---|
| 1322 | + * |
---|
| 1323 | + * eIBRS should protect against RSB poisoning, but if the EIBRS_PBRSB |
---|
| 1324 | + * bug is present then a LITE version of RSB protection is required, |
---|
| 1325 | + * just a single call needs to retire before a RET is executed. |
---|
| 1326 | + */ |
---|
| 1327 | + switch (mode) { |
---|
| 1328 | + case SPECTRE_V2_NONE: |
---|
| 1329 | + return; |
---|
| 1330 | + |
---|
| 1331 | + case SPECTRE_V2_EIBRS_LFENCE: |
---|
| 1332 | + case SPECTRE_V2_EIBRS: |
---|
| 1333 | + if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) { |
---|
| 1334 | + setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE); |
---|
| 1335 | + pr_info("Spectre v2 / PBRSB-eIBRS: Retire a single CALL on VMEXIT\n"); |
---|
| 1336 | + } |
---|
| 1337 | + return; |
---|
| 1338 | + |
---|
| 1339 | + case SPECTRE_V2_EIBRS_RETPOLINE: |
---|
| 1340 | + case SPECTRE_V2_RETPOLINE: |
---|
| 1341 | + case SPECTRE_V2_LFENCE: |
---|
| 1342 | + case SPECTRE_V2_IBRS: |
---|
| 1343 | + setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT); |
---|
| 1344 | + pr_info("Spectre v2 / SpectreRSB : Filling RSB on VMEXIT\n"); |
---|
| 1345 | + return; |
---|
| 1346 | + } |
---|
| 1347 | + |
---|
| 1348 | + pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation at VM exit"); |
---|
| 1349 | + dump_stack(); |
---|
855 | 1350 | } |
---|
856 | 1351 | |
---|
857 | 1352 | static void __init spectre_v2_select_mitigation(void) |
---|
.. | .. |
---|
874 | 1369 | case SPECTRE_V2_CMD_FORCE: |
---|
875 | 1370 | case SPECTRE_V2_CMD_AUTO: |
---|
876 | 1371 | if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) { |
---|
877 | | - mode = SPECTRE_V2_IBRS_ENHANCED; |
---|
878 | | - /* Force it so VMEXIT will restore correctly */ |
---|
879 | | - x86_spec_ctrl_base |= SPEC_CTRL_IBRS; |
---|
880 | | - wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); |
---|
881 | | - goto specv2_set_mode; |
---|
| 1372 | + mode = SPECTRE_V2_EIBRS; |
---|
| 1373 | + break; |
---|
882 | 1374 | } |
---|
883 | | - if (IS_ENABLED(CONFIG_RETPOLINE)) |
---|
884 | | - goto retpoline_auto; |
---|
| 1375 | + |
---|
| 1376 | + if (IS_ENABLED(CONFIG_CPU_IBRS_ENTRY) && |
---|
| 1377 | + boot_cpu_has_bug(X86_BUG_RETBLEED) && |
---|
| 1378 | + retbleed_cmd != RETBLEED_CMD_OFF && |
---|
| 1379 | + boot_cpu_has(X86_FEATURE_IBRS) && |
---|
| 1380 | + boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { |
---|
| 1381 | + mode = SPECTRE_V2_IBRS; |
---|
| 1382 | + break; |
---|
| 1383 | + } |
---|
| 1384 | + |
---|
| 1385 | + mode = spectre_v2_select_retpoline(); |
---|
885 | 1386 | break; |
---|
886 | | - case SPECTRE_V2_CMD_RETPOLINE_AMD: |
---|
887 | | - if (IS_ENABLED(CONFIG_RETPOLINE)) |
---|
888 | | - goto retpoline_amd; |
---|
| 1387 | + |
---|
| 1388 | + case SPECTRE_V2_CMD_RETPOLINE_LFENCE: |
---|
| 1389 | + pr_err(SPECTRE_V2_LFENCE_MSG); |
---|
| 1390 | + mode = SPECTRE_V2_LFENCE; |
---|
889 | 1391 | break; |
---|
| 1392 | + |
---|
890 | 1393 | case SPECTRE_V2_CMD_RETPOLINE_GENERIC: |
---|
891 | | - if (IS_ENABLED(CONFIG_RETPOLINE)) |
---|
892 | | - goto retpoline_generic; |
---|
| 1394 | + mode = SPECTRE_V2_RETPOLINE; |
---|
893 | 1395 | break; |
---|
| 1396 | + |
---|
894 | 1397 | case SPECTRE_V2_CMD_RETPOLINE: |
---|
895 | | - if (IS_ENABLED(CONFIG_RETPOLINE)) |
---|
896 | | - goto retpoline_auto; |
---|
| 1398 | + mode = spectre_v2_select_retpoline(); |
---|
| 1399 | + break; |
---|
| 1400 | + |
---|
| 1401 | + case SPECTRE_V2_CMD_IBRS: |
---|
| 1402 | + mode = SPECTRE_V2_IBRS; |
---|
| 1403 | + break; |
---|
| 1404 | + |
---|
| 1405 | + case SPECTRE_V2_CMD_EIBRS: |
---|
| 1406 | + mode = SPECTRE_V2_EIBRS; |
---|
| 1407 | + break; |
---|
| 1408 | + |
---|
| 1409 | + case SPECTRE_V2_CMD_EIBRS_LFENCE: |
---|
| 1410 | + mode = SPECTRE_V2_EIBRS_LFENCE; |
---|
| 1411 | + break; |
---|
| 1412 | + |
---|
| 1413 | + case SPECTRE_V2_CMD_EIBRS_RETPOLINE: |
---|
| 1414 | + mode = SPECTRE_V2_EIBRS_RETPOLINE; |
---|
897 | 1415 | break; |
---|
898 | 1416 | } |
---|
899 | | - pr_err("Spectre mitigation: kernel not compiled with retpoline; no mitigation available!"); |
---|
900 | | - return; |
---|
901 | 1417 | |
---|
902 | | -retpoline_auto: |
---|
903 | | - if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) { |
---|
904 | | - retpoline_amd: |
---|
905 | | - if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) { |
---|
906 | | - pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n"); |
---|
907 | | - goto retpoline_generic; |
---|
908 | | - } |
---|
909 | | - mode = SPECTRE_V2_RETPOLINE_AMD; |
---|
910 | | - setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD); |
---|
911 | | - setup_force_cpu_cap(X86_FEATURE_RETPOLINE); |
---|
912 | | - } else { |
---|
913 | | - retpoline_generic: |
---|
914 | | - mode = SPECTRE_V2_RETPOLINE_GENERIC; |
---|
915 | | - setup_force_cpu_cap(X86_FEATURE_RETPOLINE); |
---|
| 1418 | + if (mode == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled()) |
---|
| 1419 | + pr_err(SPECTRE_V2_EIBRS_EBPF_MSG); |
---|
| 1420 | + |
---|
| 1421 | + if (spectre_v2_in_ibrs_mode(mode)) { |
---|
| 1422 | + x86_spec_ctrl_base |= SPEC_CTRL_IBRS; |
---|
| 1423 | + update_spec_ctrl(x86_spec_ctrl_base); |
---|
916 | 1424 | } |
---|
917 | 1425 | |
---|
918 | | -specv2_set_mode: |
---|
| 1426 | + switch (mode) { |
---|
| 1427 | + case SPECTRE_V2_NONE: |
---|
| 1428 | + case SPECTRE_V2_EIBRS: |
---|
| 1429 | + break; |
---|
| 1430 | + |
---|
| 1431 | + case SPECTRE_V2_IBRS: |
---|
| 1432 | + setup_force_cpu_cap(X86_FEATURE_KERNEL_IBRS); |
---|
| 1433 | + if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) |
---|
| 1434 | + pr_warn(SPECTRE_V2_IBRS_PERF_MSG); |
---|
| 1435 | + break; |
---|
| 1436 | + |
---|
| 1437 | + case SPECTRE_V2_LFENCE: |
---|
| 1438 | + case SPECTRE_V2_EIBRS_LFENCE: |
---|
| 1439 | + setup_force_cpu_cap(X86_FEATURE_RETPOLINE_LFENCE); |
---|
| 1440 | + fallthrough; |
---|
| 1441 | + |
---|
| 1442 | + case SPECTRE_V2_RETPOLINE: |
---|
| 1443 | + case SPECTRE_V2_EIBRS_RETPOLINE: |
---|
| 1444 | + setup_force_cpu_cap(X86_FEATURE_RETPOLINE); |
---|
| 1445 | + break; |
---|
| 1446 | + } |
---|
| 1447 | + |
---|
| 1448 | + /* |
---|
| 1449 | + * Disable alternate RSB predictions in kernel when indirect CALLs and |
---|
| 1450 | + * JMPs gets protection against BHI and Intramode-BTI, but RET |
---|
| 1451 | + * prediction from a non-RSB predictor is still a risk. |
---|
| 1452 | + */ |
---|
| 1453 | + if (mode == SPECTRE_V2_EIBRS_LFENCE || |
---|
| 1454 | + mode == SPECTRE_V2_EIBRS_RETPOLINE || |
---|
| 1455 | + mode == SPECTRE_V2_RETPOLINE) |
---|
| 1456 | + spec_ctrl_disable_kernel_rrsba(); |
---|
| 1457 | + |
---|
919 | 1458 | spectre_v2_enabled = mode; |
---|
920 | 1459 | pr_info("%s\n", spectre_v2_strings[mode]); |
---|
921 | 1460 | |
---|
922 | 1461 | /* |
---|
923 | | - * If spectre v2 protection has been enabled, unconditionally fill |
---|
924 | | - * RSB during a context switch; this protects against two independent |
---|
925 | | - * issues: |
---|
| 1462 | + * If Spectre v2 protection has been enabled, fill the RSB during a |
---|
| 1463 | + * context switch. In general there are two types of RSB attacks |
---|
| 1464 | + * across context switches, for which the CALLs/RETs may be unbalanced. |
---|
926 | 1465 | * |
---|
927 | | - * - RSB underflow (and switch to BTB) on Skylake+ |
---|
928 | | - * - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs |
---|
| 1466 | + * 1) RSB underflow |
---|
| 1467 | + * |
---|
| 1468 | + * Some Intel parts have "bottomless RSB". When the RSB is empty, |
---|
| 1469 | + * speculated return targets may come from the branch predictor, |
---|
| 1470 | + * which could have a user-poisoned BTB or BHB entry. |
---|
| 1471 | + * |
---|
| 1472 | + * AMD has it even worse: *all* returns are speculated from the BTB, |
---|
| 1473 | + * regardless of the state of the RSB. |
---|
| 1474 | + * |
---|
| 1475 | + * When IBRS or eIBRS is enabled, the "user -> kernel" attack |
---|
| 1476 | + * scenario is mitigated by the IBRS branch prediction isolation |
---|
| 1477 | + * properties, so the RSB buffer filling wouldn't be necessary to |
---|
| 1478 | + * protect against this type of attack. |
---|
| 1479 | + * |
---|
| 1480 | + * The "user -> user" attack scenario is mitigated by RSB filling. |
---|
| 1481 | + * |
---|
| 1482 | + * 2) Poisoned RSB entry |
---|
| 1483 | + * |
---|
| 1484 | + * If the 'next' in-kernel return stack is shorter than 'prev', |
---|
| 1485 | + * 'next' could be tricked into speculating with a user-poisoned RSB |
---|
| 1486 | + * entry. |
---|
| 1487 | + * |
---|
| 1488 | + * The "user -> kernel" attack scenario is mitigated by SMEP and |
---|
| 1489 | + * eIBRS. |
---|
| 1490 | + * |
---|
| 1491 | + * The "user -> user" scenario, also known as SpectreBHB, requires |
---|
| 1492 | + * RSB clearing. |
---|
| 1493 | + * |
---|
| 1494 | + * So to mitigate all cases, unconditionally fill RSB on context |
---|
| 1495 | + * switches. |
---|
| 1496 | + * |
---|
| 1497 | + * FIXME: Is this pointless for retbleed-affected AMD? |
---|
929 | 1498 | */ |
---|
930 | 1499 | setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW); |
---|
931 | 1500 | pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n"); |
---|
932 | 1501 | |
---|
| 1502 | + spectre_v2_determine_rsb_fill_type_at_vmexit(mode); |
---|
| 1503 | + |
---|
933 | 1504 | /* |
---|
934 | | - * Retpoline means the kernel is safe because it has no indirect |
---|
935 | | - * branches. Enhanced IBRS protects firmware too, so, enable restricted |
---|
936 | | - * speculation around firmware calls only when Enhanced IBRS isn't |
---|
937 | | - * supported. |
---|
| 1505 | + * Retpoline protects the kernel, but doesn't protect firmware. IBRS |
---|
| 1506 | + * and Enhanced IBRS protect firmware too, so enable IBRS around |
---|
| 1507 | + * firmware calls only when IBRS / Enhanced IBRS aren't otherwise |
---|
| 1508 | + * enabled. |
---|
938 | 1509 | * |
---|
939 | 1510 | * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because |
---|
940 | 1511 | * the user might select retpoline on the kernel command line and if |
---|
941 | 1512 | * the CPU supports Enhanced IBRS, kernel might un-intentionally not |
---|
942 | 1513 | * enable IBRS around firmware calls. |
---|
943 | 1514 | */ |
---|
944 | | - if (boot_cpu_has(X86_FEATURE_IBRS) && mode != SPECTRE_V2_IBRS_ENHANCED) { |
---|
| 1515 | + if (boot_cpu_has_bug(X86_BUG_RETBLEED) && |
---|
| 1516 | + boot_cpu_has(X86_FEATURE_IBPB) && |
---|
| 1517 | + (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || |
---|
| 1518 | + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)) { |
---|
| 1519 | + |
---|
| 1520 | + if (retbleed_cmd != RETBLEED_CMD_IBPB) { |
---|
| 1521 | + setup_force_cpu_cap(X86_FEATURE_USE_IBPB_FW); |
---|
| 1522 | + pr_info("Enabling Speculation Barrier for firmware calls\n"); |
---|
| 1523 | + } |
---|
| 1524 | + |
---|
| 1525 | + } else if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_ibrs_mode(mode)) { |
---|
945 | 1526 | setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW); |
---|
946 | 1527 | pr_info("Enabling Restricted Speculation for firmware calls\n"); |
---|
947 | 1528 | } |
---|
948 | 1529 | |
---|
949 | 1530 | /* Set up IBPB and STIBP depending on the general spectre V2 command */ |
---|
950 | | - spectre_v2_user_select_mitigation(cmd); |
---|
| 1531 | + spectre_v2_cmd = cmd; |
---|
951 | 1532 | } |
---|
952 | 1533 | |
---|
953 | 1534 | static void update_stibp_msr(void * __unused) |
---|
954 | 1535 | { |
---|
955 | | - wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); |
---|
| 1536 | + u64 val = spec_ctrl_current() | (x86_spec_ctrl_base & SPEC_CTRL_STIBP); |
---|
| 1537 | + update_spec_ctrl(val); |
---|
956 | 1538 | } |
---|
957 | 1539 | |
---|
958 | 1540 | /* Update x86_spec_ctrl_base in case SMT state changed. */ |
---|
.. | .. |
---|
987 | 1569 | /* Update the static key controlling the MDS CPU buffer clear in idle */ |
---|
988 | 1570 | static void update_mds_branch_idle(void) |
---|
989 | 1571 | { |
---|
| 1572 | + u64 ia32_cap = x86_read_arch_cap_msr(); |
---|
| 1573 | + |
---|
990 | 1574 | /* |
---|
991 | 1575 | * Enable the idle clearing if SMT is active on CPUs which are |
---|
992 | 1576 | * affected only by MSBDS and not any other MDS variant. |
---|
.. | .. |
---|
998 | 1582 | if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY)) |
---|
999 | 1583 | return; |
---|
1000 | 1584 | |
---|
1001 | | - if (sched_smt_active()) |
---|
| 1585 | + if (sched_smt_active()) { |
---|
1002 | 1586 | static_branch_enable(&mds_idle_clear); |
---|
1003 | | - else |
---|
| 1587 | + } else if (mmio_mitigation == MMIO_MITIGATION_OFF || |
---|
| 1588 | + (ia32_cap & ARCH_CAP_FBSDP_NO)) { |
---|
1004 | 1589 | static_branch_disable(&mds_idle_clear); |
---|
| 1590 | + } |
---|
1005 | 1591 | } |
---|
1006 | 1592 | |
---|
1007 | 1593 | #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n" |
---|
1008 | 1594 | #define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n" |
---|
| 1595 | +#define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n" |
---|
1009 | 1596 | |
---|
1010 | | -void arch_smt_update(void) |
---|
| 1597 | +void cpu_bugs_smt_update(void) |
---|
1011 | 1598 | { |
---|
1012 | 1599 | mutex_lock(&spec_ctrl_mutex); |
---|
| 1600 | + |
---|
| 1601 | + if (sched_smt_active() && unprivileged_ebpf_enabled() && |
---|
| 1602 | + spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE) |
---|
| 1603 | + pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG); |
---|
1013 | 1604 | |
---|
1014 | 1605 | switch (spectre_v2_user_stibp) { |
---|
1015 | 1606 | case SPECTRE_V2_USER_NONE: |
---|
.. | .. |
---|
1043 | 1634 | break; |
---|
1044 | 1635 | case TAA_MITIGATION_TSX_DISABLED: |
---|
1045 | 1636 | case TAA_MITIGATION_OFF: |
---|
| 1637 | + break; |
---|
| 1638 | + } |
---|
| 1639 | + |
---|
| 1640 | + switch (mmio_mitigation) { |
---|
| 1641 | + case MMIO_MITIGATION_VERW: |
---|
| 1642 | + case MMIO_MITIGATION_UCODE_NEEDED: |
---|
| 1643 | + if (sched_smt_active()) |
---|
| 1644 | + pr_warn_once(MMIO_MSG_SMT); |
---|
| 1645 | + break; |
---|
| 1646 | + case MMIO_MITIGATION_OFF: |
---|
1046 | 1647 | break; |
---|
1047 | 1648 | } |
---|
1048 | 1649 | |
---|
.. | .. |
---|
1150 | 1751 | } |
---|
1151 | 1752 | |
---|
1152 | 1753 | /* |
---|
1153 | | - * If SSBD is controlled by the SPEC_CTRL MSR, then set the proper |
---|
1154 | | - * bit in the mask to allow guests to use the mitigation even in the |
---|
1155 | | - * case where the host does not enable it. |
---|
1156 | | - */ |
---|
1157 | | - if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) || |
---|
1158 | | - static_cpu_has(X86_FEATURE_AMD_SSBD)) { |
---|
1159 | | - x86_spec_ctrl_mask |= SPEC_CTRL_SSBD; |
---|
1160 | | - } |
---|
1161 | | - |
---|
1162 | | - /* |
---|
1163 | 1754 | * We have three CPU feature flags that are in play here: |
---|
1164 | 1755 | * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible. |
---|
1165 | 1756 | * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass |
---|
.. | .. |
---|
1176 | 1767 | x86_amd_ssb_disable(); |
---|
1177 | 1768 | } else { |
---|
1178 | 1769 | x86_spec_ctrl_base |= SPEC_CTRL_SSBD; |
---|
1179 | | - wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); |
---|
| 1770 | + update_spec_ctrl(x86_spec_ctrl_base); |
---|
1180 | 1771 | } |
---|
1181 | 1772 | } |
---|
1182 | 1773 | |
---|
.. | .. |
---|
1223 | 1814 | if (task_spec_ssb_force_disable(task)) |
---|
1224 | 1815 | return -EPERM; |
---|
1225 | 1816 | task_clear_spec_ssb_disable(task); |
---|
| 1817 | + task_clear_spec_ssb_noexec(task); |
---|
1226 | 1818 | task_update_spec_tif(task); |
---|
1227 | 1819 | break; |
---|
1228 | 1820 | case PR_SPEC_DISABLE: |
---|
1229 | 1821 | task_set_spec_ssb_disable(task); |
---|
| 1822 | + task_clear_spec_ssb_noexec(task); |
---|
1230 | 1823 | task_update_spec_tif(task); |
---|
1231 | 1824 | break; |
---|
1232 | 1825 | case PR_SPEC_FORCE_DISABLE: |
---|
1233 | 1826 | task_set_spec_ssb_disable(task); |
---|
1234 | 1827 | task_set_spec_ssb_force_disable(task); |
---|
| 1828 | + task_clear_spec_ssb_noexec(task); |
---|
| 1829 | + task_update_spec_tif(task); |
---|
| 1830 | + break; |
---|
| 1831 | + case PR_SPEC_DISABLE_NOEXEC: |
---|
| 1832 | + if (task_spec_ssb_force_disable(task)) |
---|
| 1833 | + return -EPERM; |
---|
| 1834 | + task_set_spec_ssb_disable(task); |
---|
| 1835 | + task_set_spec_ssb_noexec(task); |
---|
1235 | 1836 | task_update_spec_tif(task); |
---|
1236 | 1837 | break; |
---|
1237 | 1838 | default: |
---|
.. | .. |
---|
1335 | 1936 | case SPEC_STORE_BYPASS_PRCTL: |
---|
1336 | 1937 | if (task_spec_ssb_force_disable(task)) |
---|
1337 | 1938 | return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; |
---|
| 1939 | + if (task_spec_ssb_noexec(task)) |
---|
| 1940 | + return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC; |
---|
1338 | 1941 | if (task_spec_ssb_disable(task)) |
---|
1339 | 1942 | return PR_SPEC_PRCTL | PR_SPEC_DISABLE; |
---|
1340 | 1943 | return PR_SPEC_PRCTL | PR_SPEC_ENABLE; |
---|
.. | .. |
---|
1382 | 1985 | void x86_spec_ctrl_setup_ap(void) |
---|
1383 | 1986 | { |
---|
1384 | 1987 | if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) |
---|
1385 | | - wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); |
---|
| 1988 | + update_spec_ctrl(x86_spec_ctrl_base); |
---|
1386 | 1989 | |
---|
1387 | 1990 | if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) |
---|
1388 | 1991 | x86_amd_ssb_disable(); |
---|
.. | .. |
---|
1426 | 2029 | case INTEL_FAM6_WESTMERE: |
---|
1427 | 2030 | case INTEL_FAM6_SANDYBRIDGE: |
---|
1428 | 2031 | case INTEL_FAM6_IVYBRIDGE: |
---|
1429 | | - case INTEL_FAM6_HASWELL_CORE: |
---|
1430 | | - case INTEL_FAM6_HASWELL_ULT: |
---|
1431 | | - case INTEL_FAM6_HASWELL_GT3E: |
---|
1432 | | - case INTEL_FAM6_BROADWELL_CORE: |
---|
1433 | | - case INTEL_FAM6_BROADWELL_GT3E: |
---|
1434 | | - case INTEL_FAM6_SKYLAKE_MOBILE: |
---|
1435 | | - case INTEL_FAM6_SKYLAKE_DESKTOP: |
---|
1436 | | - case INTEL_FAM6_KABYLAKE_MOBILE: |
---|
1437 | | - case INTEL_FAM6_KABYLAKE_DESKTOP: |
---|
| 2032 | + case INTEL_FAM6_HASWELL: |
---|
| 2033 | + case INTEL_FAM6_HASWELL_L: |
---|
| 2034 | + case INTEL_FAM6_HASWELL_G: |
---|
| 2035 | + case INTEL_FAM6_BROADWELL: |
---|
| 2036 | + case INTEL_FAM6_BROADWELL_G: |
---|
| 2037 | + case INTEL_FAM6_SKYLAKE_L: |
---|
| 2038 | + case INTEL_FAM6_SKYLAKE: |
---|
| 2039 | + case INTEL_FAM6_KABYLAKE_L: |
---|
| 2040 | + case INTEL_FAM6_KABYLAKE: |
---|
1438 | 2041 | if (c->x86_cache_bits < 44) |
---|
1439 | 2042 | c->x86_cache_bits = 44; |
---|
1440 | 2043 | break; |
---|
.. | .. |
---|
1549 | 2152 | |
---|
1550 | 2153 | static ssize_t itlb_multihit_show_state(char *buf) |
---|
1551 | 2154 | { |
---|
1552 | | - if (itlb_multihit_kvm_mitigation) |
---|
| 2155 | + if (!boot_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL) || |
---|
| 2156 | + !boot_cpu_has(X86_FEATURE_VMX)) |
---|
| 2157 | + return sprintf(buf, "KVM: Mitigation: VMX unsupported\n"); |
---|
| 2158 | + else if (!(cr4_read_shadow() & X86_CR4_VMXE)) |
---|
| 2159 | + return sprintf(buf, "KVM: Mitigation: VMX disabled\n"); |
---|
| 2160 | + else if (itlb_multihit_kvm_mitigation) |
---|
1553 | 2161 | return sprintf(buf, "KVM: Mitigation: Split huge pages\n"); |
---|
1554 | 2162 | else |
---|
1555 | 2163 | return sprintf(buf, "KVM: Vulnerable\n"); |
---|
.. | .. |
---|
1598 | 2206 | sched_smt_active() ? "vulnerable" : "disabled"); |
---|
1599 | 2207 | } |
---|
1600 | 2208 | |
---|
| 2209 | +static ssize_t mmio_stale_data_show_state(char *buf) |
---|
| 2210 | +{ |
---|
| 2211 | + if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN)) |
---|
| 2212 | + return sysfs_emit(buf, "Unknown: No mitigations\n"); |
---|
| 2213 | + |
---|
| 2214 | + if (mmio_mitigation == MMIO_MITIGATION_OFF) |
---|
| 2215 | + return sysfs_emit(buf, "%s\n", mmio_strings[mmio_mitigation]); |
---|
| 2216 | + |
---|
| 2217 | + if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { |
---|
| 2218 | + return sysfs_emit(buf, "%s; SMT Host state unknown\n", |
---|
| 2219 | + mmio_strings[mmio_mitigation]); |
---|
| 2220 | + } |
---|
| 2221 | + |
---|
| 2222 | + return sysfs_emit(buf, "%s; SMT %s\n", mmio_strings[mmio_mitigation], |
---|
| 2223 | + sched_smt_active() ? "vulnerable" : "disabled"); |
---|
| 2224 | +} |
---|
| 2225 | + |
---|
1601 | 2226 | static char *stibp_state(void) |
---|
1602 | 2227 | { |
---|
1603 | | - if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED) |
---|
| 2228 | + if (spectre_v2_in_ibrs_mode(spectre_v2_enabled)) |
---|
1604 | 2229 | return ""; |
---|
1605 | 2230 | |
---|
1606 | 2231 | switch (spectre_v2_user_stibp) { |
---|
.. | .. |
---|
1630 | 2255 | return ""; |
---|
1631 | 2256 | } |
---|
1632 | 2257 | |
---|
| 2258 | +static char *pbrsb_eibrs_state(void) |
---|
| 2259 | +{ |
---|
| 2260 | + if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) { |
---|
| 2261 | + if (boot_cpu_has(X86_FEATURE_RSB_VMEXIT_LITE) || |
---|
| 2262 | + boot_cpu_has(X86_FEATURE_RSB_VMEXIT)) |
---|
| 2263 | + return ", PBRSB-eIBRS: SW sequence"; |
---|
| 2264 | + else |
---|
| 2265 | + return ", PBRSB-eIBRS: Vulnerable"; |
---|
| 2266 | + } else { |
---|
| 2267 | + return ", PBRSB-eIBRS: Not affected"; |
---|
| 2268 | + } |
---|
| 2269 | +} |
---|
| 2270 | + |
---|
| 2271 | +static ssize_t spectre_v2_show_state(char *buf) |
---|
| 2272 | +{ |
---|
| 2273 | + if (spectre_v2_enabled == SPECTRE_V2_LFENCE) |
---|
| 2274 | + return sprintf(buf, "Vulnerable: LFENCE\n"); |
---|
| 2275 | + |
---|
| 2276 | + if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled()) |
---|
| 2277 | + return sprintf(buf, "Vulnerable: eIBRS with unprivileged eBPF\n"); |
---|
| 2278 | + |
---|
| 2279 | + if (sched_smt_active() && unprivileged_ebpf_enabled() && |
---|
| 2280 | + spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE) |
---|
| 2281 | + return sprintf(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n"); |
---|
| 2282 | + |
---|
| 2283 | + return sprintf(buf, "%s%s%s%s%s%s%s\n", |
---|
| 2284 | + spectre_v2_strings[spectre_v2_enabled], |
---|
| 2285 | + ibpb_state(), |
---|
| 2286 | + boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "", |
---|
| 2287 | + stibp_state(), |
---|
| 2288 | + boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "", |
---|
| 2289 | + pbrsb_eibrs_state(), |
---|
| 2290 | + spectre_v2_module_string()); |
---|
| 2291 | +} |
---|
| 2292 | + |
---|
1633 | 2293 | static ssize_t srbds_show_state(char *buf) |
---|
1634 | 2294 | { |
---|
1635 | 2295 | return sprintf(buf, "%s\n", srbds_strings[srbds_mitigation]); |
---|
| 2296 | +} |
---|
| 2297 | + |
---|
| 2298 | +static ssize_t retbleed_show_state(char *buf) |
---|
| 2299 | +{ |
---|
| 2300 | + if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET || |
---|
| 2301 | + retbleed_mitigation == RETBLEED_MITIGATION_IBPB) { |
---|
| 2302 | + if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && |
---|
| 2303 | + boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) |
---|
| 2304 | + return sprintf(buf, "Vulnerable: untrained return thunk / IBPB on non-AMD based uarch\n"); |
---|
| 2305 | + |
---|
| 2306 | + return sprintf(buf, "%s; SMT %s\n", |
---|
| 2307 | + retbleed_strings[retbleed_mitigation], |
---|
| 2308 | + !sched_smt_active() ? "disabled" : |
---|
| 2309 | + spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || |
---|
| 2310 | + spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ? |
---|
| 2311 | + "enabled with STIBP protection" : "vulnerable"); |
---|
| 2312 | + } |
---|
| 2313 | + |
---|
| 2314 | + return sprintf(buf, "%s\n", retbleed_strings[retbleed_mitigation]); |
---|
1636 | 2315 | } |
---|
1637 | 2316 | |
---|
1638 | 2317 | static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, |
---|
.. | .. |
---|
1655 | 2334 | return sprintf(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]); |
---|
1656 | 2335 | |
---|
1657 | 2336 | case X86_BUG_SPECTRE_V2: |
---|
1658 | | - return sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled], |
---|
1659 | | - ibpb_state(), |
---|
1660 | | - boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "", |
---|
1661 | | - stibp_state(), |
---|
1662 | | - boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "", |
---|
1663 | | - spectre_v2_module_string()); |
---|
| 2337 | + return spectre_v2_show_state(buf); |
---|
1664 | 2338 | |
---|
1665 | 2339 | case X86_BUG_SPEC_STORE_BYPASS: |
---|
1666 | 2340 | return sprintf(buf, "%s\n", ssb_strings[ssb_mode]); |
---|
.. | .. |
---|
1681 | 2355 | |
---|
1682 | 2356 | case X86_BUG_SRBDS: |
---|
1683 | 2357 | return srbds_show_state(buf); |
---|
| 2358 | + |
---|
| 2359 | + case X86_BUG_MMIO_STALE_DATA: |
---|
| 2360 | + case X86_BUG_MMIO_UNKNOWN: |
---|
| 2361 | + return mmio_stale_data_show_state(buf); |
---|
| 2362 | + |
---|
| 2363 | + case X86_BUG_RETBLEED: |
---|
| 2364 | + return retbleed_show_state(buf); |
---|
1684 | 2365 | |
---|
1685 | 2366 | default: |
---|
1686 | 2367 | break; |
---|
.. | .. |
---|
1733 | 2414 | { |
---|
1734 | 2415 | return cpu_show_common(dev, attr, buf, X86_BUG_SRBDS); |
---|
1735 | 2416 | } |
---|
| 2417 | + |
---|
| 2418 | +ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf) |
---|
| 2419 | +{ |
---|
| 2420 | + if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN)) |
---|
| 2421 | + return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_UNKNOWN); |
---|
| 2422 | + else |
---|
| 2423 | + return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA); |
---|
| 2424 | +} |
---|
| 2425 | + |
---|
| 2426 | +ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, char *buf) |
---|
| 2427 | +{ |
---|
| 2428 | + return cpu_show_common(dev, attr, buf, X86_BUG_RETBLEED); |
---|
| 2429 | +} |
---|
1736 | 2430 | #endif |
---|