.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * Suspend support specific for i386/x86-64. |
---|
3 | | - * |
---|
4 | | - * Distribute under GPLv2 |
---|
5 | 4 | * |
---|
6 | 5 | * Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl> |
---|
7 | 6 | * Copyright (c) 2002 Pavel Machek <pavel@ucw.cz> |
---|
.. | .. |
---|
14 | 13 | #include <linux/perf_event.h> |
---|
15 | 14 | #include <linux/tboot.h> |
---|
16 | 15 | #include <linux/dmi.h> |
---|
| 16 | +#include <linux/pgtable.h> |
---|
17 | 17 | |
---|
18 | | -#include <asm/pgtable.h> |
---|
19 | 18 | #include <asm/proto.h> |
---|
20 | 19 | #include <asm/mtrr.h> |
---|
21 | 20 | #include <asm/page.h> |
---|
.. | .. |
---|
26 | 25 | #include <asm/cpu.h> |
---|
27 | 26 | #include <asm/mmu_context.h> |
---|
28 | 27 | #include <asm/cpu_device_id.h> |
---|
| 28 | +#include <asm/microcode.h> |
---|
29 | 29 | |
---|
30 | 30 | #ifdef CONFIG_X86_32 |
---|
31 | 31 | __visible unsigned long saved_context_ebx; |
---|
.. | .. |
---|
41 | 41 | struct saved_msr *end = msr + ctxt->saved_msrs.num; |
---|
42 | 42 | |
---|
43 | 43 | while (msr < end) { |
---|
44 | | - msr->valid = !rdmsrl_safe(msr->info.msr_no, &msr->info.reg.q); |
---|
| 44 | + if (msr->valid) |
---|
| 45 | + rdmsrl(msr->info.msr_no, msr->info.reg.q); |
---|
45 | 46 | msr++; |
---|
46 | 47 | } |
---|
47 | 48 | } |
---|
.. | .. |
---|
124 | 125 | ctxt->cr2 = read_cr2(); |
---|
125 | 126 | ctxt->cr3 = __read_cr3(); |
---|
126 | 127 | ctxt->cr4 = __read_cr4(); |
---|
127 | | -#ifdef CONFIG_X86_64 |
---|
128 | | - ctxt->cr8 = read_cr8(); |
---|
129 | | -#endif |
---|
130 | 128 | ctxt->misc_enable_saved = !rdmsrl_safe(MSR_IA32_MISC_ENABLE, |
---|
131 | 129 | &ctxt->misc_enable); |
---|
132 | 130 | msr_save_context(ctxt); |
---|
.. | .. |
---|
197 | 195 | */ |
---|
198 | 196 | static void notrace __restore_processor_state(struct saved_context *ctxt) |
---|
199 | 197 | { |
---|
| 198 | + struct cpuinfo_x86 *c; |
---|
| 199 | + |
---|
200 | 200 | if (ctxt->misc_enable_saved) |
---|
201 | 201 | wrmsrl(MSR_IA32_MISC_ENABLE, ctxt->misc_enable); |
---|
202 | 202 | /* |
---|
.. | .. |
---|
209 | 209 | #else |
---|
210 | 210 | /* CONFIG X86_64 */ |
---|
211 | 211 | wrmsrl(MSR_EFER, ctxt->efer); |
---|
212 | | - write_cr8(ctxt->cr8); |
---|
213 | 212 | __write_cr4(ctxt->cr4); |
---|
214 | 213 | #endif |
---|
215 | 214 | write_cr3(ctxt->cr3); |
---|
.. | .. |
---|
267 | 266 | x86_platform.restore_sched_clock_state(); |
---|
268 | 267 | mtrr_bp_restore(); |
---|
269 | 268 | perf_restore_debug_store(); |
---|
| 269 | + |
---|
| 270 | + c = &cpu_data(smp_processor_id()); |
---|
| 271 | + if (cpu_has(c, X86_FEATURE_MSR_IA32_FEAT_CTL)) |
---|
| 272 | + init_ia32_feat_ctl(c); |
---|
| 273 | + |
---|
| 274 | + microcode_bsp_resume(); |
---|
| 275 | + |
---|
| 276 | + /* |
---|
| 277 | + * This needs to happen after the microcode has been updated upon resume |
---|
| 278 | + * because some of the MSRs are "emulated" in microcode. |
---|
| 279 | + */ |
---|
270 | 280 | msr_restore_context(ctxt); |
---|
271 | 281 | } |
---|
272 | 282 | |
---|
273 | 283 | /* Needed by apm.c */ |
---|
274 | 284 | void notrace restore_processor_state(void) |
---|
275 | 285 | { |
---|
| 286 | +#ifdef __clang__ |
---|
| 287 | + // The following code snippet is copied from __restore_processor_state. |
---|
| 288 | + // Its purpose is to prepare GS segment before the function is called. |
---|
| 289 | + // Since the function is compiled with SCS on, it will use GS at its |
---|
| 290 | + // entry. |
---|
| 291 | + // TODO: Hack to be removed later when compiler bug is fixed. |
---|
| 292 | +#ifdef CONFIG_X86_64 |
---|
| 293 | + wrmsrl(MSR_GS_BASE, saved_context.kernelmode_gs_base); |
---|
| 294 | +#else |
---|
| 295 | + loadsegment(fs, __KERNEL_PERCPU); |
---|
| 296 | + loadsegment(gs, __KERNEL_STACK_CANARY); |
---|
| 297 | +#endif |
---|
| 298 | +#endif |
---|
276 | 299 | __restore_processor_state(&saved_context); |
---|
277 | 300 | } |
---|
278 | 301 | #ifdef CONFIG_X86_32 |
---|
.. | .. |
---|
312 | 335 | if (ret) |
---|
313 | 336 | return ret; |
---|
314 | 337 | smp_ops.play_dead = resume_play_dead; |
---|
315 | | - ret = disable_nonboot_cpus(); |
---|
| 338 | + ret = freeze_secondary_cpus(0); |
---|
316 | 339 | smp_ops.play_dead = play_dead; |
---|
317 | 340 | return ret; |
---|
318 | 341 | } |
---|
.. | .. |
---|
426 | 449 | } |
---|
427 | 450 | |
---|
428 | 451 | for (i = saved_msrs->num, j = 0; i < total_num; i++, j++) { |
---|
| 452 | + u64 dummy; |
---|
| 453 | + |
---|
429 | 454 | msr_array[i].info.msr_no = msr_id[j]; |
---|
430 | | - msr_array[i].valid = false; |
---|
| 455 | + msr_array[i].valid = !rdmsrl_safe(msr_id[j], &dummy); |
---|
431 | 456 | msr_array[i].info.reg.q = 0; |
---|
432 | 457 | } |
---|
433 | 458 | saved_msrs->num = total_num; |
---|
.. | .. |
---|
480 | 505 | } |
---|
481 | 506 | |
---|
482 | 507 | static const struct x86_cpu_id msr_save_cpu_table[] = { |
---|
483 | | - { |
---|
484 | | - .vendor = X86_VENDOR_AMD, |
---|
485 | | - .family = 0x15, |
---|
486 | | - .model = X86_MODEL_ANY, |
---|
487 | | - .feature = X86_FEATURE_ANY, |
---|
488 | | - .driver_data = (kernel_ulong_t)msr_save_cpuid_features, |
---|
489 | | - }, |
---|
490 | | - { |
---|
491 | | - .vendor = X86_VENDOR_AMD, |
---|
492 | | - .family = 0x16, |
---|
493 | | - .model = X86_MODEL_ANY, |
---|
494 | | - .feature = X86_FEATURE_ANY, |
---|
495 | | - .driver_data = (kernel_ulong_t)msr_save_cpuid_features, |
---|
496 | | - }, |
---|
| 508 | + X86_MATCH_VENDOR_FAM(AMD, 0x15, &msr_save_cpuid_features), |
---|
| 509 | + X86_MATCH_VENDOR_FAM(AMD, 0x16, &msr_save_cpuid_features), |
---|
497 | 510 | {} |
---|
498 | 511 | }; |
---|
499 | 512 | |
---|
.. | .. |
---|
514 | 527 | return ret; |
---|
515 | 528 | } |
---|
516 | 529 | |
---|
| 530 | +static void pm_save_spec_msr(void) |
---|
| 531 | +{ |
---|
| 532 | + struct msr_enumeration { |
---|
| 533 | + u32 msr_no; |
---|
| 534 | + u32 feature; |
---|
| 535 | + } msr_enum[] = { |
---|
| 536 | + { MSR_IA32_SPEC_CTRL, X86_FEATURE_MSR_SPEC_CTRL }, |
---|
| 537 | + { MSR_IA32_TSX_CTRL, X86_FEATURE_MSR_TSX_CTRL }, |
---|
| 538 | + { MSR_TSX_FORCE_ABORT, X86_FEATURE_TSX_FORCE_ABORT }, |
---|
| 539 | + { MSR_IA32_MCU_OPT_CTRL, X86_FEATURE_SRBDS_CTRL }, |
---|
| 540 | + { MSR_AMD64_LS_CFG, X86_FEATURE_LS_CFG_SSBD }, |
---|
| 541 | + { MSR_AMD64_DE_CFG, X86_FEATURE_LFENCE_RDTSC }, |
---|
| 542 | + }; |
---|
| 543 | + int i; |
---|
| 544 | + |
---|
| 545 | + for (i = 0; i < ARRAY_SIZE(msr_enum); i++) { |
---|
| 546 | + if (boot_cpu_has(msr_enum[i].feature)) |
---|
| 547 | + msr_build_context(&msr_enum[i].msr_no, 1); |
---|
| 548 | + } |
---|
| 549 | +} |
---|
| 550 | + |
---|
517 | 551 | static int pm_check_save_msr(void) |
---|
518 | 552 | { |
---|
519 | 553 | dmi_check_system(msr_save_dmi_table); |
---|
520 | 554 | pm_cpu_check(msr_save_cpu_table); |
---|
| 555 | + pm_save_spec_msr(); |
---|
521 | 556 | |
---|
522 | 557 | return 0; |
---|
523 | 558 | } |
---|