| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * AMD Memory Encryption Support |
|---|
| 3 | 4 | * |
|---|
| 4 | 5 | * Copyright (C) 2016 Advanced Micro Devices, Inc. |
|---|
| 5 | 6 | * |
|---|
| 6 | 7 | * Author: Tom Lendacky <thomas.lendacky@amd.com> |
|---|
| 7 | | - * |
|---|
| 8 | | - * This program is free software; you can redistribute it and/or modify |
|---|
| 9 | | - * it under the terms of the GNU General Public License version 2 as |
|---|
| 10 | | - * published by the Free Software Foundation. |
|---|
| 11 | 8 | */ |
|---|
| 12 | 9 | |
|---|
| 13 | 10 | #define DISABLE_BRANCH_PROFILING |
|---|
| .. | .. |
|---|
| 18 | 15 | #include <linux/dma-direct.h> |
|---|
| 19 | 16 | #include <linux/swiotlb.h> |
|---|
| 20 | 17 | #include <linux/mem_encrypt.h> |
|---|
| 18 | +#include <linux/device.h> |
|---|
| 19 | +#include <linux/kernel.h> |
|---|
| 20 | +#include <linux/bitops.h> |
|---|
| 21 | +#include <linux/dma-mapping.h> |
|---|
| 22 | +#include <linux/cc_platform.h> |
|---|
| 21 | 23 | |
|---|
| 22 | 24 | #include <asm/tlbflush.h> |
|---|
| 23 | 25 | #include <asm/fixmap.h> |
|---|
| .. | .. |
|---|
| 36 | 38 | * reside in the .data section so as not to be zeroed out when the .bss |
|---|
| 37 | 39 | * section is later cleared. |
|---|
| 38 | 40 | */ |
|---|
| 39 | | -u64 sme_me_mask __section(.data) = 0; |
|---|
| 41 | +u64 sme_me_mask __section(".data") = 0; |
|---|
| 42 | +u64 sev_status __section(".data") = 0; |
|---|
| 43 | +u64 sev_check_data __section(".data") = 0; |
|---|
| 40 | 44 | EXPORT_SYMBOL(sme_me_mask); |
|---|
| 41 | 45 | DEFINE_STATIC_KEY_FALSE(sev_enable_key); |
|---|
| 42 | 46 | EXPORT_SYMBOL_GPL(sev_enable_key); |
|---|
| 43 | 47 | |
|---|
| 44 | | -bool sev_enabled __section(.data); |
|---|
| 48 | +bool sev_enabled __section(".data"); |
|---|
| 45 | 49 | |
|---|
| 46 | 50 | /* Buffer used for early in-place encryption by BSP, no locking needed */ |
|---|
| 47 | | -static char sme_early_buffer[PAGE_SIZE] __aligned(PAGE_SIZE); |
|---|
| 51 | +static char sme_early_buffer[PAGE_SIZE] __initdata __aligned(PAGE_SIZE); |
|---|
| 48 | 52 | |
|---|
| 49 | 53 | /* |
|---|
| 50 | 54 | * This routine does not change the underlying encryption setting of the |
|---|
| .. | .. |
|---|
| 133 | 137 | size = (size <= PMD_SIZE) ? 0 : size - PMD_SIZE; |
|---|
| 134 | 138 | } while (size); |
|---|
| 135 | 139 | |
|---|
| 136 | | - __native_flush_tlb(); |
|---|
| 140 | + flush_tlb_local(); |
|---|
| 137 | 141 | } |
|---|
| 138 | 142 | |
|---|
| 139 | 143 | void __init sme_unmap_bootdata(char *real_mode_data) |
|---|
| .. | .. |
|---|
| 301 | 305 | else |
|---|
| 302 | 306 | split_page_size_mask = 1 << PG_LEVEL_2M; |
|---|
| 303 | 307 | |
|---|
| 304 | | - kernel_physical_mapping_init(__pa(vaddr & pmask), |
|---|
| 305 | | - __pa((vaddr_end & pmask) + psize), |
|---|
| 306 | | - split_page_size_mask); |
|---|
| 308 | + /* |
|---|
| 309 | + * kernel_physical_mapping_change() does not flush the TLBs, so |
|---|
| 310 | + * a TLB flush is required after we exit from the for loop. |
|---|
| 311 | + */ |
|---|
| 312 | + kernel_physical_mapping_change(__pa(vaddr & pmask), |
|---|
| 313 | + __pa((vaddr_end & pmask) + psize), |
|---|
| 314 | + split_page_size_mask); |
|---|
| 307 | 315 | } |
|---|
| 308 | 316 | |
|---|
| 309 | 317 | ret = 0; |
|---|
| .. | .. |
|---|
| 339 | 347 | { |
|---|
| 340 | 348 | return sme_me_mask && !sev_enabled; |
|---|
| 341 | 349 | } |
|---|
| 342 | | -EXPORT_SYMBOL(sme_active); |
|---|
| 343 | 350 | |
|---|
| 344 | 351 | bool sev_active(void) |
|---|
| 345 | 352 | { |
|---|
| 346 | | - return sme_me_mask && sev_enabled; |
|---|
| 353 | + return sev_status & MSR_AMD64_SEV_ENABLED; |
|---|
| 347 | 354 | } |
|---|
| 348 | | -EXPORT_SYMBOL(sev_active); |
|---|
| 355 | +EXPORT_SYMBOL_GPL(sev_active); |
|---|
| 349 | 356 | |
|---|
| 350 | | -/* Architecture __weak replacement functions */ |
|---|
| 357 | +/* Needs to be called from non-instrumentable code */ |
|---|
| 358 | +bool noinstr sev_es_active(void) |
|---|
| 359 | +{ |
|---|
| 360 | + return sev_status & MSR_AMD64_SEV_ES_ENABLED; |
|---|
| 361 | +} |
|---|
| 362 | + |
|---|
| 363 | +/* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */ |
|---|
| 364 | +bool force_dma_unencrypted(struct device *dev) |
|---|
| 365 | +{ |
|---|
| 366 | + /* |
|---|
| 367 | + * For SEV, all DMA must be to unencrypted addresses. |
|---|
| 368 | + */ |
|---|
| 369 | + if (sev_active()) |
|---|
| 370 | + return true; |
|---|
| 371 | + |
|---|
| 372 | + /* |
|---|
| 373 | + * For SME, all DMA must be to unencrypted addresses if the |
|---|
| 374 | + * device does not support DMA to addresses that include the |
|---|
| 375 | + * encryption mask. |
|---|
| 376 | + */ |
|---|
| 377 | + if (sme_active()) { |
|---|
| 378 | + u64 dma_enc_mask = DMA_BIT_MASK(__ffs64(sme_me_mask)); |
|---|
| 379 | + u64 dma_dev_mask = min_not_zero(dev->coherent_dma_mask, |
|---|
| 380 | + dev->bus_dma_limit); |
|---|
| 381 | + |
|---|
| 382 | + if (dma_dev_mask <= dma_enc_mask) |
|---|
| 383 | + return true; |
|---|
| 384 | + } |
|---|
| 385 | + |
|---|
| 386 | + return false; |
|---|
| 387 | +} |
|---|
| 388 | + |
|---|
| 351 | 389 | void __init mem_encrypt_free_decrypted_mem(void) |
|---|
| 352 | 390 | { |
|---|
| 353 | 391 | unsigned long vaddr, vaddr_end, npages; |
|---|
| .. | .. |
|---|
| 372 | 410 | free_init_pages("unused decrypted", vaddr, vaddr_end); |
|---|
| 373 | 411 | } |
|---|
| 374 | 412 | |
|---|
| 413 | +static void print_mem_encrypt_feature_info(void) |
|---|
| 414 | +{ |
|---|
| 415 | + pr_info("AMD Memory Encryption Features active:"); |
|---|
| 416 | + |
|---|
| 417 | + /* Secure Memory Encryption */ |
|---|
| 418 | + if (sme_active()) { |
|---|
| 419 | + /* |
|---|
| 420 | + * SME is mutually exclusive with any of the SEV |
|---|
| 421 | + * features below. |
|---|
| 422 | + */ |
|---|
| 423 | + pr_cont(" SME\n"); |
|---|
| 424 | + return; |
|---|
| 425 | + } |
|---|
| 426 | + |
|---|
| 427 | + /* Secure Encrypted Virtualization */ |
|---|
| 428 | + if (sev_active()) |
|---|
| 429 | + pr_cont(" SEV"); |
|---|
| 430 | + |
|---|
| 431 | + /* Encrypted Register State */ |
|---|
| 432 | + if (sev_es_active()) |
|---|
| 433 | + pr_cont(" SEV-ES"); |
|---|
| 434 | + |
|---|
| 435 | + pr_cont("\n"); |
|---|
| 436 | +} |
|---|
| 437 | + |
|---|
| 438 | +/* Architecture __weak replacement functions */ |
|---|
| 375 | 439 | void __init mem_encrypt_init(void) |
|---|
| 376 | 440 | { |
|---|
| 377 | 441 | if (!sme_me_mask) |
|---|
| .. | .. |
|---|
| 381 | 445 | swiotlb_update_mem_attributes(); |
|---|
| 382 | 446 | |
|---|
| 383 | 447 | /* |
|---|
| 384 | | - * With SEV, DMA operations cannot use encryption, we need to use |
|---|
| 385 | | - * SWIOTLB to bounce buffer DMA operation. |
|---|
| 386 | | - */ |
|---|
| 387 | | - if (sev_active()) |
|---|
| 388 | | - dma_ops = &swiotlb_dma_ops; |
|---|
| 389 | | - |
|---|
| 390 | | - /* |
|---|
| 391 | 448 | * With SEV, we need to unroll the rep string I/O instructions. |
|---|
| 392 | 449 | */ |
|---|
| 393 | 450 | if (sev_active()) |
|---|
| 394 | 451 | static_branch_enable(&sev_enable_key); |
|---|
| 395 | 452 | |
|---|
| 396 | | - pr_info("AMD %s active\n", |
|---|
| 397 | | - sev_active() ? "Secure Encrypted Virtualization (SEV)" |
|---|
| 398 | | - : "Secure Memory Encryption (SME)"); |
|---|
| 453 | + print_mem_encrypt_feature_info(); |
|---|
| 399 | 454 | } |
|---|
| 400 | 455 | |
|---|