.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * AMD Memory Encryption Support |
---|
3 | 4 | * |
---|
4 | 5 | * Copyright (C) 2016 Advanced Micro Devices, Inc. |
---|
5 | 6 | * |
---|
6 | 7 | * Author: Tom Lendacky <thomas.lendacky@amd.com> |
---|
7 | | - * |
---|
8 | | - * This program is free software; you can redistribute it and/or modify |
---|
9 | | - * it under the terms of the GNU General Public License version 2 as |
---|
10 | | - * published by the Free Software Foundation. |
---|
11 | 8 | */ |
---|
12 | 9 | |
---|
13 | 10 | #define DISABLE_BRANCH_PROFILING |
---|
.. | .. |
---|
27 | 24 | * be extended when new paravirt and debugging variants are added.) |
---|
28 | 25 | */ |
---|
29 | 26 | #undef CONFIG_PARAVIRT |
---|
| 27 | +#undef CONFIG_PARAVIRT_XXL |
---|
30 | 28 | #undef CONFIG_PARAVIRT_SPINLOCKS |
---|
31 | 29 | |
---|
32 | 30 | /* |
---|
.. | .. |
---|
80 | 78 | unsigned long vaddr; |
---|
81 | 79 | unsigned long vaddr_end; |
---|
82 | 80 | }; |
---|
| 81 | + |
---|
| 82 | +/* |
---|
| 83 | + * This work area lives in the .init.scratch section, which lives outside of |
---|
| 84 | + * the kernel proper. It is sized to hold the intermediate copy buffer and |
---|
| 85 | + * more than enough pagetable pages. |
---|
| 86 | + * |
---|
| 87 | + * By using this section, the kernel can be encrypted in place and it |
---|
| 88 | + * avoids any possibility of boot parameters or initramfs images being |
---|
| 89 | + * placed such that the in-place encryption logic overwrites them. This |
---|
| 90 | + * section is 2MB aligned to allow for simple pagetable setup using only |
---|
| 91 | + * PMD entries (see vmlinux.lds.S). |
---|
| 92 | + */ |
---|
| 93 | +static char sme_workarea[2 * PMD_PAGE_SIZE] __section(".init.scratch"); |
---|
83 | 94 | |
---|
84 | 95 | static char sme_cmdline_arg[] __initdata = "mem_encrypt"; |
---|
85 | 96 | static char sme_cmdline_on[] __initdata = "on"; |
---|
.. | .. |
---|
322 | 333 | } |
---|
323 | 334 | #endif |
---|
324 | 335 | |
---|
325 | | - /* Set the encryption workarea to be immediately after the kernel */ |
---|
326 | | - workarea_start = kernel_end; |
---|
| 336 | + /* |
---|
| 337 | + * We're running identity mapped, so we must obtain the address to the |
---|
| 338 | + * SME encryption workarea using rip-relative addressing. |
---|
| 339 | + */ |
---|
| 340 | + asm ("lea sme_workarea(%%rip), %0" |
---|
| 341 | + : "=r" (workarea_start) |
---|
| 342 | + : "p" (sme_workarea)); |
---|
327 | 343 | |
---|
328 | 344 | /* |
---|
329 | 345 | * Calculate required number of workarea bytes needed: |
---|
.. | .. |
---|
496 | 512 | |
---|
497 | 513 | #define AMD_SME_BIT BIT(0) |
---|
498 | 514 | #define AMD_SEV_BIT BIT(1) |
---|
499 | | - /* |
---|
500 | | - * Set the feature mask (SME or SEV) based on whether we are |
---|
501 | | - * running under a hypervisor. |
---|
502 | | - */ |
---|
503 | | - eax = 1; |
---|
504 | | - ecx = 0; |
---|
505 | | - native_cpuid(&eax, &ebx, &ecx, &edx); |
---|
506 | | - feature_mask = (ecx & BIT(31)) ? AMD_SEV_BIT : AMD_SME_BIT; |
---|
507 | 515 | |
---|
508 | 516 | /* |
---|
509 | 517 | * Check for the SME/SEV feature: |
---|
.. | .. |
---|
516 | 524 | eax = 0x8000001f; |
---|
517 | 525 | ecx = 0; |
---|
518 | 526 | native_cpuid(&eax, &ebx, &ecx, &edx); |
---|
519 | | - if (!(eax & feature_mask)) |
---|
| 527 | + /* Check whether SEV or SME is supported */ |
---|
| 528 | + if (!(eax & (AMD_SEV_BIT | AMD_SME_BIT))) |
---|
520 | 529 | return; |
---|
521 | 530 | |
---|
522 | 531 | me_mask = 1UL << (ebx & 0x3f); |
---|
523 | 532 | |
---|
| 533 | + /* Check the SEV MSR whether SEV or SME is enabled */ |
---|
| 534 | + sev_status = __rdmsr(MSR_AMD64_SEV); |
---|
| 535 | + feature_mask = (sev_status & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT; |
---|
| 536 | + |
---|
524 | 537 | /* Check if memory encryption is enabled */ |
---|
525 | 538 | if (feature_mask == AMD_SME_BIT) { |
---|
| 539 | + /* |
---|
| 540 | + * No SME if Hypervisor bit is set. This check is here to |
---|
| 541 | + * prevent a guest from trying to enable SME. For running as a |
---|
| 542 | + * KVM guest the MSR_K8_SYSCFG will be sufficient, but there |
---|
| 543 | + * might be other hypervisors which emulate that MSR as non-zero |
---|
| 544 | + * or even pass it through to the guest. |
---|
| 545 | + * A malicious hypervisor can still trick a guest into this |
---|
| 546 | + * path, but there is no way to protect against that. |
---|
| 547 | + */ |
---|
| 548 | + eax = 1; |
---|
| 549 | + ecx = 0; |
---|
| 550 | + native_cpuid(&eax, &ebx, &ecx, &edx); |
---|
| 551 | + if (ecx & BIT(31)) |
---|
| 552 | + return; |
---|
| 553 | + |
---|
526 | 554 | /* For SME, check the SYSCFG MSR */ |
---|
527 | 555 | msr = __rdmsr(MSR_K8_SYSCFG); |
---|
528 | 556 | if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT)) |
---|
529 | 557 | return; |
---|
530 | 558 | } else { |
---|
531 | | - /* For SEV, check the SEV MSR */ |
---|
532 | | - msr = __rdmsr(MSR_AMD64_SEV); |
---|
533 | | - if (!(msr & MSR_AMD64_SEV_ENABLED)) |
---|
534 | | - return; |
---|
535 | | - |
---|
536 | 559 | /* SEV state cannot be controlled by a command line option */ |
---|
537 | 560 | sme_me_mask = me_mask; |
---|
538 | 561 | sev_enabled = true; |
---|
.. | .. |
---|
563 | 586 | cmdline_ptr = (const char *)((u64)bp->hdr.cmd_line_ptr | |
---|
564 | 587 | ((u64)bp->ext_cmd_line_ptr << 32)); |
---|
565 | 588 | |
---|
566 | | - cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer)); |
---|
| 589 | + if (cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer)) < 0) |
---|
| 590 | + return; |
---|
567 | 591 | |
---|
568 | 592 | if (!strncmp(buffer, cmdline_on, sizeof(buffer))) |
---|
569 | 593 | sme_me_mask = me_mask; |
---|