.. | .. |
---|
18 | 18 | #include <linux/mman.h> |
---|
19 | 19 | #include <linux/mm.h> |
---|
20 | 20 | #include <linux/swap.h> |
---|
| 21 | +#include <linux/swiotlb.h> |
---|
21 | 22 | #include <linux/smp.h> |
---|
22 | 23 | #include <linux/init.h> |
---|
23 | 24 | #include <linux/pagemap.h> |
---|
24 | | -#include <linux/bootmem.h> |
---|
| 25 | +#include <linux/memblock.h> |
---|
25 | 26 | #include <linux/memory.h> |
---|
26 | 27 | #include <linux/pfn.h> |
---|
27 | 28 | #include <linux/poison.h> |
---|
.. | .. |
---|
29 | 30 | #include <linux/export.h> |
---|
30 | 31 | #include <linux/cma.h> |
---|
31 | 32 | #include <linux/gfp.h> |
---|
32 | | -#include <linux/memblock.h> |
---|
| 33 | +#include <linux/dma-direct.h> |
---|
33 | 34 | #include <asm/processor.h> |
---|
34 | 35 | #include <linux/uaccess.h> |
---|
35 | | -#include <asm/pgtable.h> |
---|
36 | 36 | #include <asm/pgalloc.h> |
---|
| 37 | +#include <asm/ptdump.h> |
---|
37 | 38 | #include <asm/dma.h> |
---|
38 | 39 | #include <asm/lowcore.h> |
---|
39 | 40 | #include <asm/tlb.h> |
---|
.. | .. |
---|
42 | 43 | #include <asm/ctl_reg.h> |
---|
43 | 44 | #include <asm/sclp.h> |
---|
44 | 45 | #include <asm/set_memory.h> |
---|
| 46 | +#include <asm/kasan.h> |
---|
| 47 | +#include <asm/dma-mapping.h> |
---|
| 48 | +#include <asm/uv.h> |
---|
| 49 | +#include <linux/virtio_config.h> |
---|
45 | 50 | |
---|
46 | | -pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(.bss..swapper_pg_dir); |
---|
| 51 | +pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(".bss..swapper_pg_dir"); |
---|
47 | 52 | |
---|
48 | 53 | unsigned long empty_zero_page, zero_page_mask; |
---|
49 | 54 | EXPORT_SYMBOL(empty_zero_page); |
---|
50 | 55 | EXPORT_SYMBOL(zero_page_mask); |
---|
| 56 | + |
---|
| 57 | +bool initmem_freed; |
---|
51 | 58 | |
---|
52 | 59 | static void __init setup_zero_pages(void) |
---|
53 | 60 | { |
---|
.. | .. |
---|
59 | 66 | order = 7; |
---|
60 | 67 | |
---|
61 | 68 | /* Limit number of empty zero pages for small memory sizes */ |
---|
62 | | - while (order > 2 && (totalram_pages >> 10) < (1UL << order)) |
---|
| 69 | + while (order > 2 && (totalram_pages() >> 10) < (1UL << order)) |
---|
63 | 70 | order--; |
---|
64 | 71 | |
---|
65 | 72 | empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); |
---|
.. | .. |
---|
98 | 105 | S390_lowcore.user_asce = S390_lowcore.kernel_asce; |
---|
99 | 106 | crst_table_init((unsigned long *) init_mm.pgd, pgd_type); |
---|
100 | 107 | vmem_map_init(); |
---|
| 108 | + kasan_copy_shadow(init_mm.pgd); |
---|
101 | 109 | |
---|
102 | | - /* enable virtual mapping in kernel mode */ |
---|
| 110 | + /* enable virtual mapping in kernel mode */ |
---|
103 | 111 | __ctl_load(S390_lowcore.kernel_asce, 1, 1); |
---|
104 | 112 | __ctl_load(S390_lowcore.kernel_asce, 7, 7); |
---|
105 | 113 | __ctl_load(S390_lowcore.kernel_asce, 13, 13); |
---|
.. | .. |
---|
107 | 115 | psw_bits(psw).dat = 1; |
---|
108 | 116 | psw_bits(psw).as = PSW_BITS_AS_HOME; |
---|
109 | 117 | __load_psw_mask(psw.mask); |
---|
| 118 | + kasan_free_early_identity(); |
---|
110 | 119 | |
---|
111 | | - sparse_memory_present_with_active_regions(MAX_NUMNODES); |
---|
112 | 120 | sparse_init(); |
---|
| 121 | + zone_dma_bits = 31; |
---|
113 | 122 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); |
---|
114 | 123 | max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS); |
---|
115 | 124 | max_zone_pfns[ZONE_NORMAL] = max_low_pfn; |
---|
116 | | - free_area_init_nodes(max_zone_pfns); |
---|
| 125 | + free_area_init(max_zone_pfns); |
---|
117 | 126 | } |
---|
118 | 127 | |
---|
119 | 128 | void mark_rodata_ro(void) |
---|
.. | .. |
---|
122 | 131 | |
---|
123 | 132 | set_memory_ro((unsigned long)__start_ro_after_init, size >> PAGE_SHIFT); |
---|
124 | 133 | pr_info("Write protected read-only-after-init data: %luk\n", size >> 10); |
---|
| 134 | + debug_checkwx(); |
---|
| 135 | +} |
---|
| 136 | + |
---|
| 137 | +int set_memory_encrypted(unsigned long addr, int numpages) |
---|
| 138 | +{ |
---|
| 139 | + int i; |
---|
| 140 | + |
---|
| 141 | + /* make specified pages unshared, (swiotlb, dma_free) */ |
---|
| 142 | + for (i = 0; i < numpages; ++i) { |
---|
| 143 | + uv_remove_shared(addr); |
---|
| 144 | + addr += PAGE_SIZE; |
---|
| 145 | + } |
---|
| 146 | + return 0; |
---|
| 147 | +} |
---|
| 148 | + |
---|
| 149 | +int set_memory_decrypted(unsigned long addr, int numpages) |
---|
| 150 | +{ |
---|
| 151 | + int i; |
---|
| 152 | + /* make specified pages shared (swiotlb, dma_alloca) */ |
---|
| 153 | + for (i = 0; i < numpages; ++i) { |
---|
| 154 | + uv_set_shared(addr); |
---|
| 155 | + addr += PAGE_SIZE; |
---|
| 156 | + } |
---|
| 157 | + return 0; |
---|
| 158 | +} |
---|
| 159 | + |
---|
| 160 | +/* are we a protected virtualization guest? */ |
---|
| 161 | +bool force_dma_unencrypted(struct device *dev) |
---|
| 162 | +{ |
---|
| 163 | + return is_prot_virt_guest(); |
---|
| 164 | +} |
---|
| 165 | + |
---|
| 166 | +#ifdef CONFIG_ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS |
---|
| 167 | + |
---|
| 168 | +int arch_has_restricted_virtio_memory_access(void) |
---|
| 169 | +{ |
---|
| 170 | + return is_prot_virt_guest(); |
---|
| 171 | +} |
---|
| 172 | +EXPORT_SYMBOL(arch_has_restricted_virtio_memory_access); |
---|
| 173 | + |
---|
| 174 | +#endif |
---|
| 175 | + |
---|
| 176 | +/* protected virtualization */ |
---|
| 177 | +static void pv_init(void) |
---|
| 178 | +{ |
---|
| 179 | + if (!is_prot_virt_guest()) |
---|
| 180 | + return; |
---|
| 181 | + |
---|
| 182 | + /* make sure bounce buffers are shared */ |
---|
| 183 | + swiotlb_force = SWIOTLB_FORCE; |
---|
| 184 | + swiotlb_init(1); |
---|
| 185 | + swiotlb_update_mem_attributes(); |
---|
125 | 186 | } |
---|
126 | 187 | |
---|
127 | 188 | void __init mem_init(void) |
---|
.. | .. |
---|
132 | 193 | set_max_mapnr(max_low_pfn); |
---|
133 | 194 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); |
---|
134 | 195 | |
---|
| 196 | + pv_init(); |
---|
| 197 | + |
---|
135 | 198 | /* Setup guest page hinting */ |
---|
136 | 199 | cmma_init(); |
---|
137 | 200 | |
---|
138 | 201 | /* this will put all low memory onto the freelists */ |
---|
139 | | - free_all_bootmem(); |
---|
| 202 | + memblock_free_all(); |
---|
140 | 203 | setup_zero_pages(); /* Setup zeroed pages. */ |
---|
141 | 204 | |
---|
142 | 205 | cmma_init_nodat(); |
---|
.. | .. |
---|
146 | 209 | |
---|
147 | 210 | void free_initmem(void) |
---|
148 | 211 | { |
---|
| 212 | + initmem_freed = true; |
---|
149 | 213 | __set_memory((unsigned long)_sinittext, |
---|
150 | 214 | (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT, |
---|
151 | 215 | SET_MEMORY_RW | SET_MEMORY_NX); |
---|
152 | 216 | free_initmem_default(POISON_FREE_INITMEM); |
---|
153 | 217 | } |
---|
154 | | - |
---|
155 | | -#ifdef CONFIG_BLK_DEV_INITRD |
---|
156 | | -void __init free_initrd_mem(unsigned long start, unsigned long end) |
---|
157 | | -{ |
---|
158 | | - free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM, |
---|
159 | | - "initrd"); |
---|
160 | | -} |
---|
161 | | -#endif |
---|
162 | 218 | |
---|
163 | 219 | unsigned long memory_block_size_bytes(void) |
---|
164 | 220 | { |
---|
.. | .. |
---|
222 | 278 | |
---|
223 | 279 | #endif /* CONFIG_CMA */ |
---|
224 | 280 | |
---|
225 | | -int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap, |
---|
226 | | - bool want_memblock) |
---|
| 281 | +int arch_add_memory(int nid, u64 start, u64 size, |
---|
| 282 | + struct mhp_params *params) |
---|
227 | 283 | { |
---|
228 | 284 | unsigned long start_pfn = PFN_DOWN(start); |
---|
229 | 285 | unsigned long size_pages = PFN_DOWN(size); |
---|
230 | 286 | int rc; |
---|
231 | 287 | |
---|
| 288 | + if (WARN_ON_ONCE(params->altmap)) |
---|
| 289 | + return -EINVAL; |
---|
| 290 | + |
---|
| 291 | + if (WARN_ON_ONCE(params->pgprot.pgprot != PAGE_KERNEL.pgprot)) |
---|
| 292 | + return -EINVAL; |
---|
| 293 | + |
---|
232 | 294 | rc = vmem_add_mapping(start, size); |
---|
233 | 295 | if (rc) |
---|
234 | 296 | return rc; |
---|
235 | 297 | |
---|
236 | | - rc = __add_pages(nid, start_pfn, size_pages, altmap, want_memblock); |
---|
| 298 | + rc = __add_pages(nid, start_pfn, size_pages, params); |
---|
237 | 299 | if (rc) |
---|
238 | 300 | vmem_remove_mapping(start, size); |
---|
239 | 301 | return rc; |
---|