forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-10 9999e48639b3cecb08ffb37358bcba3b48161b29
kernel/arch/x86/mm/mem_encrypt.c
....@@ -1,13 +1,10 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * AMD Memory Encryption Support
34 *
45 * Copyright (C) 2016 Advanced Micro Devices, Inc.
56 *
67 * Author: Tom Lendacky <thomas.lendacky@amd.com>
7
- *
8
- * This program is free software; you can redistribute it and/or modify
9
- * it under the terms of the GNU General Public License version 2 as
10
- * published by the Free Software Foundation.
118 */
129
1310 #define DISABLE_BRANCH_PROFILING
....@@ -18,6 +15,11 @@
1815 #include <linux/dma-direct.h>
1916 #include <linux/swiotlb.h>
2017 #include <linux/mem_encrypt.h>
18
+#include <linux/device.h>
19
+#include <linux/kernel.h>
20
+#include <linux/bitops.h>
21
+#include <linux/dma-mapping.h>
22
+#include <linux/cc_platform.h>
2123
2224 #include <asm/tlbflush.h>
2325 #include <asm/fixmap.h>
....@@ -36,15 +38,17 @@
3638 * reside in the .data section so as not to be zeroed out when the .bss
3739 * section is later cleared.
3840 */
39
-u64 sme_me_mask __section(.data) = 0;
41
+u64 sme_me_mask __section(".data") = 0;
42
+u64 sev_status __section(".data") = 0;
43
+u64 sev_check_data __section(".data") = 0;
4044 EXPORT_SYMBOL(sme_me_mask);
4145 DEFINE_STATIC_KEY_FALSE(sev_enable_key);
4246 EXPORT_SYMBOL_GPL(sev_enable_key);
4347
44
-bool sev_enabled __section(.data);
48
+bool sev_enabled __section(".data");
4549
4650 /* Buffer used for early in-place encryption by BSP, no locking needed */
47
-static char sme_early_buffer[PAGE_SIZE] __aligned(PAGE_SIZE);
51
+static char sme_early_buffer[PAGE_SIZE] __initdata __aligned(PAGE_SIZE);
4852
4953 /*
5054 * This routine does not change the underlying encryption setting of the
....@@ -133,7 +137,7 @@
133137 size = (size <= PMD_SIZE) ? 0 : size - PMD_SIZE;
134138 } while (size);
135139
136
- __native_flush_tlb();
140
+ flush_tlb_local();
137141 }
138142
139143 void __init sme_unmap_bootdata(char *real_mode_data)
....@@ -301,9 +305,13 @@
301305 else
302306 split_page_size_mask = 1 << PG_LEVEL_2M;
303307
304
- kernel_physical_mapping_init(__pa(vaddr & pmask),
305
- __pa((vaddr_end & pmask) + psize),
306
- split_page_size_mask);
308
+ /*
309
+ * kernel_physical_mapping_change() does not flush the TLBs, so
310
+ * a TLB flush is required after we exit from the for loop.
311
+ */
312
+ kernel_physical_mapping_change(__pa(vaddr & pmask),
313
+ __pa((vaddr_end & pmask) + psize),
314
+ split_page_size_mask);
307315 }
308316
309317 ret = 0;
....@@ -339,15 +347,45 @@
339347 {
340348 return sme_me_mask && !sev_enabled;
341349 }
342
-EXPORT_SYMBOL(sme_active);
343350
344351 bool sev_active(void)
345352 {
346
- return sme_me_mask && sev_enabled;
353
+ return sev_status & MSR_AMD64_SEV_ENABLED;
347354 }
348
-EXPORT_SYMBOL(sev_active);
355
+EXPORT_SYMBOL_GPL(sev_active);
349356
350
-/* Architecture __weak replacement functions */
357
+/* Needs to be called from non-instrumentable code */
358
+bool noinstr sev_es_active(void)
359
+{
360
+ return sev_status & MSR_AMD64_SEV_ES_ENABLED;
361
+}
362
+
363
+/* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */
364
+bool force_dma_unencrypted(struct device *dev)
365
+{
366
+ /*
367
+ * For SEV, all DMA must be to unencrypted addresses.
368
+ */
369
+ if (sev_active())
370
+ return true;
371
+
372
+ /*
373
+ * For SME, all DMA must be to unencrypted addresses if the
374
+ * device does not support DMA to addresses that include the
375
+ * encryption mask.
376
+ */
377
+ if (sme_active()) {
378
+ u64 dma_enc_mask = DMA_BIT_MASK(__ffs64(sme_me_mask));
379
+ u64 dma_dev_mask = min_not_zero(dev->coherent_dma_mask,
380
+ dev->bus_dma_limit);
381
+
382
+ if (dma_dev_mask <= dma_enc_mask)
383
+ return true;
384
+ }
385
+
386
+ return false;
387
+}
388
+
351389 void __init mem_encrypt_free_decrypted_mem(void)
352390 {
353391 unsigned long vaddr, vaddr_end, npages;
....@@ -372,6 +410,32 @@
372410 free_init_pages("unused decrypted", vaddr, vaddr_end);
373411 }
374412
413
+static void print_mem_encrypt_feature_info(void)
414
+{
415
+ pr_info("AMD Memory Encryption Features active:");
416
+
417
+ /* Secure Memory Encryption */
418
+ if (sme_active()) {
419
+ /*
420
+ * SME is mutually exclusive with any of the SEV
421
+ * features below.
422
+ */
423
+ pr_cont(" SME\n");
424
+ return;
425
+ }
426
+
427
+ /* Secure Encrypted Virtualization */
428
+ if (sev_active())
429
+ pr_cont(" SEV");
430
+
431
+ /* Encrypted Register State */
432
+ if (sev_es_active())
433
+ pr_cont(" SEV-ES");
434
+
435
+ pr_cont("\n");
436
+}
437
+
438
+/* Architecture __weak replacement functions */
375439 void __init mem_encrypt_init(void)
376440 {
377441 if (!sme_me_mask)
....@@ -381,20 +445,11 @@
381445 swiotlb_update_mem_attributes();
382446
383447 /*
384
- * With SEV, DMA operations cannot use encryption, we need to use
385
- * SWIOTLB to bounce buffer DMA operation.
386
- */
387
- if (sev_active())
388
- dma_ops = &swiotlb_dma_ops;
389
-
390
- /*
391448 * With SEV, we need to unroll the rep string I/O instructions.
392449 */
393450 if (sev_active())
394451 static_branch_enable(&sev_enable_key);
395452
396
- pr_info("AMD %s active\n",
397
- sev_active() ? "Secure Encrypted Virtualization (SEV)"
398
- : "Secure Memory Encryption (SME)");
453
+ print_mem_encrypt_feature_info();
399454 }
400455