forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-10 9999e48639b3cecb08ffb37358bcba3b48161b29
kernel/arch/x86/mm/ioremap.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Re-map IO memory to kernel address space so that we can access it.
34 * This is needed for high PCI addresses that aren't mapped in the
....@@ -6,7 +7,7 @@
67 * (C) Copyright 1995 1996 Linus Torvalds
78 */
89
9
-#include <linux/bootmem.h>
10
+#include <linux/memblock.h>
1011 #include <linux/init.h>
1112 #include <linux/io.h>
1213 #include <linux/ioport.h>
....@@ -15,21 +16,24 @@
1516 #include <linux/mmiotrace.h>
1617 #include <linux/mem_encrypt.h>
1718 #include <linux/efi.h>
19
+#include <linux/pgtable.h>
1820
1921 #include <asm/set_memory.h>
2022 #include <asm/e820/api.h>
23
+#include <asm/efi.h>
2124 #include <asm/fixmap.h>
22
-#include <asm/pgtable.h>
2325 #include <asm/tlbflush.h>
2426 #include <asm/pgalloc.h>
25
-#include <asm/pat.h>
27
+#include <asm/memtype.h>
2628 #include <asm/setup.h>
2729
2830 #include "physaddr.h"
2931
30
-struct ioremap_mem_flags {
31
- bool system_ram;
32
- bool desc_other;
32
+/*
33
+ * Descriptor controlling ioremap() behavior.
34
+ */
35
+struct ioremap_desc {
36
+ unsigned int flags;
3337 };
3438
3539 /*
....@@ -61,13 +65,14 @@
6165 return err;
6266 }
6367
64
-static bool __ioremap_check_ram(struct resource *res)
68
+/* Does the range (or a subset of) contain normal RAM? */
69
+static unsigned int __ioremap_check_ram(struct resource *res)
6570 {
6671 unsigned long start_pfn, stop_pfn;
6772 unsigned long i;
6873
6974 if ((res->flags & IORESOURCE_SYSTEM_RAM) != IORESOURCE_SYSTEM_RAM)
70
- return false;
75
+ return 0;
7176
7277 start_pfn = (res->start + PAGE_SIZE - 1) >> PAGE_SHIFT;
7378 stop_pfn = (res->end + 1) >> PAGE_SHIFT;
....@@ -75,45 +80,84 @@
7580 for (i = 0; i < (stop_pfn - start_pfn); ++i)
7681 if (pfn_valid(start_pfn + i) &&
7782 !PageReserved(pfn_to_page(start_pfn + i)))
78
- return true;
83
+ return IORES_MAP_SYSTEM_RAM;
7984 }
8085
81
- return false;
86
+ return 0;
8287 }
8388
84
-static int __ioremap_check_desc_other(struct resource *res)
89
+/*
90
+ * In a SEV guest, NONE and RESERVED should not be mapped encrypted because
91
+ * there the whole memory is already encrypted.
92
+ */
93
+static unsigned int __ioremap_check_encrypted(struct resource *res)
8594 {
86
- return (res->desc != IORES_DESC_NONE);
95
+ if (!sev_active())
96
+ return 0;
97
+
98
+ switch (res->desc) {
99
+ case IORES_DESC_NONE:
100
+ case IORES_DESC_RESERVED:
101
+ break;
102
+ default:
103
+ return IORES_MAP_ENCRYPTED;
104
+ }
105
+
106
+ return 0;
87107 }
88108
89
-static int __ioremap_res_check(struct resource *res, void *arg)
109
+/*
110
+ * The EFI runtime services data area is not covered by walk_mem_res(), but must
111
+ * be mapped encrypted when SEV is active.
112
+ */
113
+static void __ioremap_check_other(resource_size_t addr, struct ioremap_desc *desc)
90114 {
91
- struct ioremap_mem_flags *flags = arg;
115
+ if (!sev_active())
116
+ return;
92117
93
- if (!flags->system_ram)
94
- flags->system_ram = __ioremap_check_ram(res);
118
+ if (!IS_ENABLED(CONFIG_EFI))
119
+ return;
95120
96
- if (!flags->desc_other)
97
- flags->desc_other = __ioremap_check_desc_other(res);
121
+ if (efi_mem_type(addr) == EFI_RUNTIME_SERVICES_DATA ||
122
+ (efi_mem_type(addr) == EFI_BOOT_SERVICES_DATA &&
123
+ efi_mem_attributes(addr) & EFI_MEMORY_RUNTIME))
124
+ desc->flags |= IORES_MAP_ENCRYPTED;
125
+}
98126
99
- return flags->system_ram && flags->desc_other;
127
+static int __ioremap_collect_map_flags(struct resource *res, void *arg)
128
+{
129
+ struct ioremap_desc *desc = arg;
130
+
131
+ if (!(desc->flags & IORES_MAP_SYSTEM_RAM))
132
+ desc->flags |= __ioremap_check_ram(res);
133
+
134
+ if (!(desc->flags & IORES_MAP_ENCRYPTED))
135
+ desc->flags |= __ioremap_check_encrypted(res);
136
+
137
+ return ((desc->flags & (IORES_MAP_SYSTEM_RAM | IORES_MAP_ENCRYPTED)) ==
138
+ (IORES_MAP_SYSTEM_RAM | IORES_MAP_ENCRYPTED));
100139 }
101140
102141 /*
103142 * To avoid multiple resource walks, this function walks resources marked as
104143 * IORESOURCE_MEM and IORESOURCE_BUSY and looking for system RAM and/or a
105144 * resource described not as IORES_DESC_NONE (e.g. IORES_DESC_ACPI_TABLES).
145
+ *
146
+ * After that, deal with misc other ranges in __ioremap_check_other() which do
147
+ * not fall into the above category.
106148 */
107149 static void __ioremap_check_mem(resource_size_t addr, unsigned long size,
108
- struct ioremap_mem_flags *flags)
150
+ struct ioremap_desc *desc)
109151 {
110152 u64 start, end;
111153
112154 start = (u64)addr;
113155 end = start + size - 1;
114
- memset(flags, 0, sizeof(*flags));
156
+ memset(desc, 0, sizeof(struct ioremap_desc));
115157
116
- walk_mem_res(start, end, flags, __ioremap_res_check);
158
+ walk_mem_res(start, end, desc, __ioremap_collect_map_flags);
159
+
160
+ __ioremap_check_other(addr, desc);
117161 }
118162
119163 /*
....@@ -130,14 +174,15 @@
130174 * have to convert them into an offset in a page-aligned mapping, but the
131175 * caller shouldn't need to know that small detail.
132176 */
133
-static void __iomem *__ioremap_caller(resource_size_t phys_addr,
134
- unsigned long size, enum page_cache_mode pcm, void *caller)
177
+static void __iomem *
178
+__ioremap_caller(resource_size_t phys_addr, unsigned long size,
179
+ enum page_cache_mode pcm, void *caller, bool encrypted)
135180 {
136181 unsigned long offset, vaddr;
137182 resource_size_t last_addr;
138183 const resource_size_t unaligned_phys_addr = phys_addr;
139184 const unsigned long unaligned_size = size;
140
- struct ioremap_mem_flags mem_flags;
185
+ struct ioremap_desc io_desc;
141186 struct vm_struct *area;
142187 enum page_cache_mode new_pcm;
143188 pgprot_t prot;
....@@ -156,12 +201,12 @@
156201 return NULL;
157202 }
158203
159
- __ioremap_check_mem(phys_addr, size, &mem_flags);
204
+ __ioremap_check_mem(phys_addr, size, &io_desc);
160205
161206 /*
162207 * Don't allow anybody to remap normal RAM that we're using..
163208 */
164
- if (mem_flags.system_ram) {
209
+ if (io_desc.flags & IORES_MAP_SYSTEM_RAM) {
165210 WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
166211 &phys_addr, &last_addr);
167212 return NULL;
....@@ -171,13 +216,19 @@
171216 * Mappings have to be page-aligned
172217 */
173218 offset = phys_addr & ~PAGE_MASK;
174
- phys_addr &= PHYSICAL_PAGE_MASK;
219
+ phys_addr &= PAGE_MASK;
175220 size = PAGE_ALIGN(last_addr+1) - phys_addr;
176221
177
- retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
222
+ /*
223
+ * Mask out any bits not part of the actual physical
224
+ * address, like memory encryption bits.
225
+ */
226
+ phys_addr &= PHYSICAL_PAGE_MASK;
227
+
228
+ retval = memtype_reserve(phys_addr, (u64)phys_addr + size,
178229 pcm, &new_pcm);
179230 if (retval) {
180
- printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval);
231
+ printk(KERN_ERR "ioremap memtype_reserve failed %d\n", retval);
181232 return NULL;
182233 }
183234
....@@ -199,7 +250,7 @@
199250 * resulting mapping.
200251 */
201252 prot = PAGE_KERNEL_IO;
202
- if (sev_active() && mem_flags.desc_other)
253
+ if ((io_desc.flags & IORES_MAP_ENCRYPTED) || encrypted)
203254 prot = pgprot_encrypted(prot);
204255
205256 switch (pcm) {
....@@ -233,7 +284,7 @@
233284 area->phys_addr = phys_addr;
234285 vaddr = (unsigned long) area->addr;
235286
236
- if (kernel_map_sync_memtype(phys_addr, size, pcm))
287
+ if (memtype_kernel_map_sync(phys_addr, size, pcm))
237288 goto err_free_area;
238289
239290 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
....@@ -253,16 +304,16 @@
253304 err_free_area:
254305 free_vm_area(area);
255306 err_free_memtype:
256
- free_memtype(phys_addr, phys_addr + size);
307
+ memtype_free(phys_addr, phys_addr + size);
257308 return NULL;
258309 }
259310
260311 /**
261
- * ioremap_nocache - map bus memory into CPU space
312
+ * ioremap - map bus memory into CPU space
262313 * @phys_addr: bus address of the memory
263314 * @size: size of the resource to map
264315 *
265
- * ioremap_nocache performs a platform specific sequence of operations to
316
+ * ioremap performs a platform specific sequence of operations to
266317 * make bus memory CPU accessible via the readb/readw/readl/writeb/
267318 * writew/writel functions and the other mmio helpers. The returned
268319 * address is not guaranteed to be usable directly as a virtual
....@@ -278,7 +329,7 @@
278329 *
279330 * Must be freed with iounmap.
280331 */
281
-void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
332
+void __iomem *ioremap(resource_size_t phys_addr, unsigned long size)
282333 {
283334 /*
284335 * Ideally, this should be:
....@@ -291,9 +342,9 @@
291342 enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC_MINUS;
292343
293344 return __ioremap_caller(phys_addr, size, pcm,
294
- __builtin_return_address(0));
345
+ __builtin_return_address(0), false);
295346 }
296
-EXPORT_SYMBOL(ioremap_nocache);
347
+EXPORT_SYMBOL(ioremap);
297348
298349 /**
299350 * ioremap_uc - map bus memory into CPU space as strongly uncachable
....@@ -324,7 +375,7 @@
324375 enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC;
325376
326377 return __ioremap_caller(phys_addr, size, pcm,
327
- __builtin_return_address(0));
378
+ __builtin_return_address(0), false);
328379 }
329380 EXPORT_SYMBOL_GPL(ioremap_uc);
330381
....@@ -341,7 +392,7 @@
341392 void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
342393 {
343394 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC,
344
- __builtin_return_address(0));
395
+ __builtin_return_address(0), false);
345396 }
346397 EXPORT_SYMBOL(ioremap_wc);
347398
....@@ -358,14 +409,21 @@
358409 void __iomem *ioremap_wt(resource_size_t phys_addr, unsigned long size)
359410 {
360411 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WT,
361
- __builtin_return_address(0));
412
+ __builtin_return_address(0), false);
362413 }
363414 EXPORT_SYMBOL(ioremap_wt);
415
+
416
+void __iomem *ioremap_encrypted(resource_size_t phys_addr, unsigned long size)
417
+{
418
+ return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
419
+ __builtin_return_address(0), true);
420
+}
421
+EXPORT_SYMBOL(ioremap_encrypted);
364422
365423 void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
366424 {
367425 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
368
- __builtin_return_address(0));
426
+ __builtin_return_address(0), false);
369427 }
370428 EXPORT_SYMBOL(ioremap_cache);
371429
....@@ -374,7 +432,7 @@
374432 {
375433 return __ioremap_caller(phys_addr, size,
376434 pgprot2cachemode(__pgprot(prot_val)),
377
- __builtin_return_address(0));
435
+ __builtin_return_address(0), false);
378436 }
379437 EXPORT_SYMBOL(ioremap_prot);
380438
....@@ -422,7 +480,7 @@
422480 return;
423481 }
424482
425
- free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
483
+ memtype_free(p->phys_addr, p->phys_addr + get_vm_area_size(p));
426484
427485 /* Finally remove it */
428486 o = remove_vm_area((void __force *)addr);
....@@ -430,6 +488,11 @@
430488 kfree(p);
431489 }
432490 EXPORT_SYMBOL(iounmap);
491
+
492
+int __init arch_ioremap_p4d_supported(void)
493
+{
494
+ return 0;
495
+}
433496
434497 int __init arch_ioremap_pud_supported(void)
435498 {
....@@ -519,7 +582,7 @@
519582 /* For SEV, these areas are encrypted */
520583 if (sev_active())
521584 break;
522
- /* Fallthrough */
585
+ fallthrough;
523586
524587 case E820_TYPE_PRAM:
525588 return true;
....@@ -576,6 +639,7 @@
576639 static bool memremap_is_setup_data(resource_size_t phys_addr,
577640 unsigned long size)
578641 {
642
+ struct setup_indirect *indirect;
579643 struct setup_data *data;
580644 u64 paddr, paddr_next;
581645
....@@ -588,9 +652,35 @@
588652
589653 data = memremap(paddr, sizeof(*data),
590654 MEMREMAP_WB | MEMREMAP_DEC);
655
+ if (!data) {
656
+ pr_warn("failed to memremap setup_data entry\n");
657
+ return false;
658
+ }
591659
592660 paddr_next = data->next;
593661 len = data->len;
662
+
663
+ if ((phys_addr > paddr) && (phys_addr < (paddr + len))) {
664
+ memunmap(data);
665
+ return true;
666
+ }
667
+
668
+ if (data->type == SETUP_INDIRECT) {
669
+ memunmap(data);
670
+ data = memremap(paddr, sizeof(*data) + len,
671
+ MEMREMAP_WB | MEMREMAP_DEC);
672
+ if (!data) {
673
+ pr_warn("failed to memremap indirect setup_data\n");
674
+ return false;
675
+ }
676
+
677
+ indirect = (struct setup_indirect *)data->data;
678
+
679
+ if (indirect->type != SETUP_INDIRECT) {
680
+ paddr = indirect->addr;
681
+ len = indirect->len;
682
+ }
683
+ }
594684
595685 memunmap(data);
596686
....@@ -610,22 +700,51 @@
610700 static bool __init early_memremap_is_setup_data(resource_size_t phys_addr,
611701 unsigned long size)
612702 {
703
+ struct setup_indirect *indirect;
613704 struct setup_data *data;
614705 u64 paddr, paddr_next;
615706
616707 paddr = boot_params.hdr.setup_data;
617708 while (paddr) {
618
- unsigned int len;
709
+ unsigned int len, size;
619710
620711 if (phys_addr == paddr)
621712 return true;
622713
623714 data = early_memremap_decrypted(paddr, sizeof(*data));
715
+ if (!data) {
716
+ pr_warn("failed to early memremap setup_data entry\n");
717
+ return false;
718
+ }
719
+
720
+ size = sizeof(*data);
624721
625722 paddr_next = data->next;
626723 len = data->len;
627724
628
- early_memunmap(data, sizeof(*data));
725
+ if ((phys_addr > paddr) && (phys_addr < (paddr + len))) {
726
+ early_memunmap(data, sizeof(*data));
727
+ return true;
728
+ }
729
+
730
+ if (data->type == SETUP_INDIRECT) {
731
+ size += len;
732
+ early_memunmap(data, sizeof(*data));
733
+ data = early_memremap_decrypted(paddr, size);
734
+ if (!data) {
735
+ pr_warn("failed to early memremap indirect setup_data\n");
736
+ return false;
737
+ }
738
+
739
+ indirect = (struct setup_indirect *)data->data;
740
+
741
+ if (indirect->type != SETUP_INDIRECT) {
742
+ paddr = indirect->addr;
743
+ len = indirect->len;
744
+ }
745
+ }
746
+
747
+ early_memunmap(data, size);
629748
630749 if ((phys_addr > paddr) && (phys_addr < (paddr + len)))
631750 return true;
....@@ -712,10 +831,8 @@
712831 void __init *early_memremap_encrypted_wp(resource_size_t phys_addr,
713832 unsigned long size)
714833 {
715
- /* Be sure the write-protect PAT entry is set for write-protect */
716
- if (__pte2cachemode_tbl[_PAGE_CACHE_MODE_WP] != _PAGE_CACHE_MODE_WP)
834
+ if (!x86_has_pat_wp())
717835 return NULL;
718
-
719836 return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_ENC_WP);
720837 }
721838
....@@ -733,10 +850,8 @@
733850 void __init *early_memremap_decrypted_wp(resource_size_t phys_addr,
734851 unsigned long size)
735852 {
736
- /* Be sure the write-protect PAT entry is set for write-protect */
737
- if (__pte2cachemode_tbl[_PAGE_CACHE_MODE_WP] != _PAGE_CACHE_MODE_WP)
853
+ if (!x86_has_pat_wp())
738854 return NULL;
739
-
740855 return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_NOENC_WP);
741856 }
742857 #endif /* CONFIG_AMD_MEM_ENCRYPT */
....@@ -817,11 +932,11 @@
817932 pte = early_ioremap_pte(addr);
818933
819934 /* Sanitize 'prot' against any unsupported bits: */
820
- pgprot_val(flags) &= __default_kernel_pte_mask;
935
+ pgprot_val(flags) &= __supported_pte_mask;
821936
822937 if (pgprot_val(flags))
823938 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
824939 else
825940 pte_clear(&init_mm, addr, pte);
826
- __flush_tlb_one_kernel(addr);
941
+ flush_tlb_one_kernel(addr);
827942 }