.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * Based on arch/arm/mm/init.c |
---|
3 | 4 | * |
---|
4 | 5 | * Copyright (C) 1995-2005 Russell King |
---|
5 | 6 | * Copyright (C) 2012 ARM Ltd. |
---|
6 | | - * |
---|
7 | | - * This program is free software; you can redistribute it and/or modify |
---|
8 | | - * it under the terms of the GNU General Public License version 2 as |
---|
9 | | - * published by the Free Software Foundation. |
---|
10 | | - * |
---|
11 | | - * This program is distributed in the hope that it will be useful, |
---|
12 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
13 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
---|
14 | | - * GNU General Public License for more details. |
---|
15 | | - * |
---|
16 | | - * You should have received a copy of the GNU General Public License |
---|
17 | | - * along with this program. If not, see <http://www.gnu.org/licenses/>. |
---|
18 | 7 | */ |
---|
19 | 8 | |
---|
20 | 9 | #include <linux/kernel.h> |
---|
.. | .. |
---|
22 | 11 | #include <linux/errno.h> |
---|
23 | 12 | #include <linux/swap.h> |
---|
24 | 13 | #include <linux/init.h> |
---|
25 | | -#include <linux/bootmem.h> |
---|
26 | 14 | #include <linux/cache.h> |
---|
27 | 15 | #include <linux/mman.h> |
---|
28 | 16 | #include <linux/nodemask.h> |
---|
.. | .. |
---|
32 | 20 | #include <linux/sort.h> |
---|
33 | 21 | #include <linux/of.h> |
---|
34 | 22 | #include <linux/of_fdt.h> |
---|
35 | | -#include <linux/dma-mapping.h> |
---|
36 | | -#include <linux/dma-contiguous.h> |
---|
| 23 | +#include <linux/dma-direct.h> |
---|
| 24 | +#include <linux/dma-map-ops.h> |
---|
37 | 25 | #include <linux/efi.h> |
---|
38 | 26 | #include <linux/swiotlb.h> |
---|
39 | 27 | #include <linux/vmalloc.h> |
---|
40 | 28 | #include <linux/mm.h> |
---|
41 | 29 | #include <linux/kexec.h> |
---|
42 | 30 | #include <linux/crash_dump.h> |
---|
| 31 | +#include <linux/hugetlb.h> |
---|
| 32 | +#include <linux/acpi_iort.h> |
---|
| 33 | +#include <linux/rk-dma-heap.h> |
---|
43 | 34 | |
---|
44 | 35 | #include <asm/boot.h> |
---|
45 | 36 | #include <asm/fixmap.h> |
---|
46 | 37 | #include <asm/kasan.h> |
---|
47 | 38 | #include <asm/kernel-pgtable.h> |
---|
| 39 | +#include <asm/kvm_host.h> |
---|
48 | 40 | #include <asm/memory.h> |
---|
49 | 41 | #include <asm/numa.h> |
---|
50 | 42 | #include <asm/sections.h> |
---|
51 | 43 | #include <asm/setup.h> |
---|
52 | | -#include <asm/sizes.h> |
---|
| 44 | +#include <linux/sizes.h> |
---|
53 | 45 | #include <asm/tlb.h> |
---|
54 | 46 | #include <asm/alternative.h> |
---|
55 | | - |
---|
56 | | -EXPORT_SYMBOL_GPL(kimage_vaddr); |
---|
57 | 47 | |
---|
58 | 48 | /* |
---|
59 | 49 | * We need to be able to catch inadvertent references to memstart_addr |
---|
.. | .. |
---|
62 | 52 | * that cannot be mistaken for a real physical address. |
---|
63 | 53 | */ |
---|
64 | 54 | s64 memstart_addr __ro_after_init = -1; |
---|
65 | | -phys_addr_t arm64_dma_phys_limit __ro_after_init; |
---|
| 55 | +EXPORT_SYMBOL(memstart_addr); |
---|
66 | 56 | |
---|
67 | | -#ifdef CONFIG_BLK_DEV_INITRD |
---|
68 | | -static int __init early_initrd(char *p) |
---|
69 | | -{ |
---|
70 | | - unsigned long start, size; |
---|
71 | | - char *endp; |
---|
72 | | - |
---|
73 | | - start = memparse(p, &endp); |
---|
74 | | - if (*endp == ',') { |
---|
75 | | - size = memparse(endp + 1, NULL); |
---|
76 | | - |
---|
77 | | - initrd_start = start; |
---|
78 | | - initrd_end = start + size; |
---|
79 | | - } |
---|
80 | | - return 0; |
---|
81 | | -} |
---|
82 | | -early_param("initrd", early_initrd); |
---|
| 57 | +/* |
---|
| 58 | + * If the corresponding config options are enabled, we create both ZONE_DMA |
---|
| 59 | + * and ZONE_DMA32. By default ZONE_DMA covers the 32-bit addressable memory |
---|
| 60 | + * unless restricted on specific platforms (e.g. 30-bit on Raspberry Pi 4). |
---|
| 61 | + * In such case, ZONE_DMA32 covers the rest of the 32-bit addressable memory, |
---|
| 62 | + * otherwise it is empty. |
---|
| 63 | + * |
---|
| 64 | + * Memory reservation for crash kernel either done early or deferred |
---|
| 65 | + * depending on DMA memory zones configs (ZONE_DMA) -- |
---|
| 66 | + * |
---|
| 67 | + * In absence of ZONE_DMA configs arm64_dma_phys_limit initialized |
---|
| 68 | + * here instead of max_zone_phys(). This lets early reservation of |
---|
| 69 | + * crash kernel memory which has a dependency on arm64_dma_phys_limit. |
---|
| 70 | + * Reserving memory early for crash kernel allows linear creation of block |
---|
| 71 | + * mappings (greater than page-granularity) for all the memory bank rangs. |
---|
| 72 | + * In this scheme a comparatively quicker boot is observed. |
---|
| 73 | + * |
---|
| 74 | + * If ZONE_DMA configs are defined, crash kernel memory reservation |
---|
| 75 | + * is delayed until DMA zone memory range size initilazation performed in |
---|
| 76 | + * zone_sizes_init(). The defer is necessary to steer clear of DMA zone |
---|
| 77 | + * memory range to avoid overlap allocation. So crash kernel memory boundaries |
---|
| 78 | + * are not known when mapping all bank memory ranges, which otherwise means |
---|
| 79 | + * not possible to exclude crash kernel range from creating block mappings |
---|
| 80 | + * so page-granularity mappings are created for the entire memory range. |
---|
| 81 | + * Hence a slightly slower boot is observed. |
---|
| 82 | + * |
---|
| 83 | + * Note: Page-granularity mapppings are necessary for crash kernel memory |
---|
| 84 | + * range for shrinking its size via /sys/kernel/kexec_crash_size interface. |
---|
| 85 | + */ |
---|
| 86 | +#if IS_ENABLED(CONFIG_ZONE_DMA) || IS_ENABLED(CONFIG_ZONE_DMA32) |
---|
| 87 | +phys_addr_t __ro_after_init arm64_dma_phys_limit; |
---|
| 88 | +#else |
---|
| 89 | +phys_addr_t __ro_after_init arm64_dma_phys_limit = PHYS_MASK + 1; |
---|
83 | 90 | #endif |
---|
| 91 | + |
---|
| 92 | +/* |
---|
| 93 | + * Provide a run-time mean of disabling ZONE_DMA32 if it is enabled via |
---|
| 94 | + * CONFIG_ZONE_DMA32. |
---|
| 95 | + */ |
---|
| 96 | +static bool disable_dma32 __ro_after_init; |
---|
84 | 97 | |
---|
85 | 98 | #ifdef CONFIG_KEXEC_CORE |
---|
86 | 99 | /* |
---|
.. | .. |
---|
105 | 118 | |
---|
106 | 119 | if (crash_base == 0) { |
---|
107 | 120 | /* Current arm64 boot protocol requires 2MB alignment */ |
---|
108 | | - crash_base = memblock_find_in_range(0, ARCH_LOW_ADDRESS_LIMIT, |
---|
| 121 | + crash_base = memblock_find_in_range(0, arm64_dma_phys_limit, |
---|
109 | 122 | crash_size, SZ_2M); |
---|
110 | 123 | if (crash_base == 0) { |
---|
111 | 124 | pr_warn("cannot allocate crashkernel (size:0x%llx)\n", |
---|
.. | .. |
---|
137 | 150 | crashk_res.start = crash_base; |
---|
138 | 151 | crashk_res.end = crash_base + crash_size - 1; |
---|
139 | 152 | } |
---|
140 | | - |
---|
141 | | -static void __init kexec_reserve_crashkres_pages(void) |
---|
142 | | -{ |
---|
143 | | -#ifdef CONFIG_HIBERNATION |
---|
144 | | - phys_addr_t addr; |
---|
145 | | - struct page *page; |
---|
146 | | - |
---|
147 | | - if (!crashk_res.end) |
---|
148 | | - return; |
---|
149 | | - |
---|
150 | | - /* |
---|
151 | | - * To reduce the size of hibernation image, all the pages are |
---|
152 | | - * marked as Reserved initially. |
---|
153 | | - */ |
---|
154 | | - for (addr = crashk_res.start; addr < (crashk_res.end + 1); |
---|
155 | | - addr += PAGE_SIZE) { |
---|
156 | | - page = phys_to_page(addr); |
---|
157 | | - SetPageReserved(page); |
---|
158 | | - } |
---|
159 | | -#endif |
---|
160 | | -} |
---|
161 | 153 | #else |
---|
162 | 154 | static void __init reserve_crashkernel(void) |
---|
163 | | -{ |
---|
164 | | -} |
---|
165 | | - |
---|
166 | | -static void __init kexec_reserve_crashkres_pages(void) |
---|
167 | 155 | { |
---|
168 | 156 | } |
---|
169 | 157 | #endif /* CONFIG_KEXEC_CORE */ |
---|
.. | .. |
---|
218 | 206 | { |
---|
219 | 207 | } |
---|
220 | 208 | #endif /* CONFIG_CRASH_DUMP */ |
---|
221 | | -/* |
---|
222 | | - * Return the maximum physical address for ZONE_DMA32 (DMA_BIT_MASK(32)). It |
---|
223 | | - * currently assumes that for memory starting above 4G, 32-bit devices will |
---|
224 | | - * use a DMA offset. |
---|
225 | | - */ |
---|
226 | | -static phys_addr_t __init max_zone_dma_phys(void) |
---|
227 | | -{ |
---|
228 | | - phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, 32); |
---|
229 | | - return min(offset + (1ULL << 32), memblock_end_of_DRAM()); |
---|
230 | | -} |
---|
231 | 209 | |
---|
232 | | -#ifdef CONFIG_NUMA |
---|
| 210 | +/* |
---|
| 211 | + * Return the maximum physical address for a zone accessible by the given bits |
---|
| 212 | + * limit. If DRAM starts above 32-bit, expand the zone to the maximum |
---|
| 213 | + * available memory, otherwise cap it at 32-bit. |
---|
| 214 | + */ |
---|
| 215 | +static phys_addr_t __init max_zone_phys(unsigned int zone_bits) |
---|
| 216 | +{ |
---|
| 217 | + phys_addr_t zone_mask = DMA_BIT_MASK(zone_bits); |
---|
| 218 | + phys_addr_t phys_start = memblock_start_of_DRAM(); |
---|
| 219 | + |
---|
| 220 | + if (phys_start > U32_MAX) |
---|
| 221 | + zone_mask = PHYS_ADDR_MAX; |
---|
| 222 | + else if (phys_start > zone_mask) |
---|
| 223 | + zone_mask = U32_MAX; |
---|
| 224 | + |
---|
| 225 | + return min(zone_mask, memblock_end_of_DRAM() - 1) + 1; |
---|
| 226 | +} |
---|
233 | 227 | |
---|
234 | 228 | static void __init zone_sizes_init(unsigned long min, unsigned long max) |
---|
235 | 229 | { |
---|
236 | 230 | unsigned long max_zone_pfns[MAX_NR_ZONES] = {0}; |
---|
| 231 | + unsigned int __maybe_unused acpi_zone_dma_bits; |
---|
| 232 | + unsigned int __maybe_unused dt_zone_dma_bits; |
---|
| 233 | + phys_addr_t __maybe_unused dma32_phys_limit = max_zone_phys(32); |
---|
237 | 234 | |
---|
| 235 | +#ifdef CONFIG_ZONE_DMA |
---|
| 236 | + acpi_zone_dma_bits = fls64(acpi_iort_dma_get_max_cpu_address()); |
---|
| 237 | + dt_zone_dma_bits = fls64(of_dma_get_max_cpu_address(NULL)); |
---|
| 238 | + zone_dma_bits = min3(32U, dt_zone_dma_bits, acpi_zone_dma_bits); |
---|
| 239 | + arm64_dma_phys_limit = max_zone_phys(zone_dma_bits); |
---|
| 240 | + max_zone_pfns[ZONE_DMA] = PFN_DOWN(arm64_dma_phys_limit); |
---|
| 241 | +#endif |
---|
238 | 242 | #ifdef CONFIG_ZONE_DMA32 |
---|
239 | | - max_zone_pfns[ZONE_DMA32] = PFN_DOWN(max_zone_dma_phys()); |
---|
| 243 | + max_zone_pfns[ZONE_DMA32] = disable_dma32 ? 0 : PFN_DOWN(dma32_phys_limit); |
---|
| 244 | + if (!arm64_dma_phys_limit) |
---|
| 245 | + arm64_dma_phys_limit = dma32_phys_limit; |
---|
240 | 246 | #endif |
---|
241 | 247 | max_zone_pfns[ZONE_NORMAL] = max; |
---|
242 | 248 | |
---|
243 | | - free_area_init_nodes(max_zone_pfns); |
---|
| 249 | + free_area_init(max_zone_pfns); |
---|
244 | 250 | } |
---|
245 | 251 | |
---|
246 | | -#else |
---|
247 | | - |
---|
248 | | -static void __init zone_sizes_init(unsigned long min, unsigned long max) |
---|
| 252 | +static int __init early_disable_dma32(char *buf) |
---|
249 | 253 | { |
---|
250 | | - struct memblock_region *reg; |
---|
251 | | - unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES]; |
---|
252 | | - unsigned long max_dma = min; |
---|
| 254 | + if (!buf) |
---|
| 255 | + return -EINVAL; |
---|
253 | 256 | |
---|
254 | | - memset(zone_size, 0, sizeof(zone_size)); |
---|
| 257 | + if (!strcmp(buf, "on")) |
---|
| 258 | + disable_dma32 = true; |
---|
255 | 259 | |
---|
256 | | - /* 4GB maximum for 32-bit only capable devices */ |
---|
257 | | -#ifdef CONFIG_ZONE_DMA32 |
---|
258 | | - max_dma = PFN_DOWN(arm64_dma_phys_limit); |
---|
259 | | - zone_size[ZONE_DMA32] = max_dma - min; |
---|
260 | | -#endif |
---|
261 | | - zone_size[ZONE_NORMAL] = max - max_dma; |
---|
262 | | - |
---|
263 | | - memcpy(zhole_size, zone_size, sizeof(zhole_size)); |
---|
264 | | - |
---|
265 | | - for_each_memblock(memory, reg) { |
---|
266 | | - unsigned long start = memblock_region_memory_base_pfn(reg); |
---|
267 | | - unsigned long end = memblock_region_memory_end_pfn(reg); |
---|
268 | | - |
---|
269 | | - if (start >= max) |
---|
270 | | - continue; |
---|
271 | | - |
---|
272 | | -#ifdef CONFIG_ZONE_DMA32 |
---|
273 | | - if (start < max_dma) { |
---|
274 | | - unsigned long dma_end = min(end, max_dma); |
---|
275 | | - zhole_size[ZONE_DMA32] -= dma_end - start; |
---|
276 | | - } |
---|
277 | | -#endif |
---|
278 | | - if (end > max_dma) { |
---|
279 | | - unsigned long normal_end = min(end, max); |
---|
280 | | - unsigned long normal_start = max(start, max_dma); |
---|
281 | | - zhole_size[ZONE_NORMAL] -= normal_end - normal_start; |
---|
282 | | - } |
---|
283 | | - } |
---|
284 | | - |
---|
285 | | - free_area_init_node(0, zone_size, min, zhole_size); |
---|
| 260 | + return 0; |
---|
286 | 261 | } |
---|
| 262 | +early_param("disable_dma32", early_disable_dma32); |
---|
287 | 263 | |
---|
288 | | -#endif /* CONFIG_NUMA */ |
---|
289 | | - |
---|
290 | | -#ifdef CONFIG_HAVE_ARCH_PFN_VALID |
---|
291 | 264 | int pfn_valid(unsigned long pfn) |
---|
292 | 265 | { |
---|
293 | 266 | phys_addr_t addr = pfn << PAGE_SHIFT; |
---|
294 | 267 | |
---|
295 | 268 | if ((addr >> PAGE_SHIFT) != pfn) |
---|
296 | 269 | return 0; |
---|
| 270 | + |
---|
| 271 | +#ifdef CONFIG_SPARSEMEM |
---|
| 272 | + if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) |
---|
| 273 | + return 0; |
---|
| 274 | + |
---|
| 275 | + if (!valid_section(__pfn_to_section(pfn))) |
---|
| 276 | + return 0; |
---|
| 277 | + |
---|
| 278 | + /* |
---|
| 279 | + * ZONE_DEVICE memory does not have the memblock entries. |
---|
| 280 | + * memblock_is_map_memory() check for ZONE_DEVICE based |
---|
| 281 | + * addresses will always fail. Even the normal hotplugged |
---|
| 282 | + * memory will never have MEMBLOCK_NOMAP flag set in their |
---|
| 283 | + * memblock entries. Skip memblock search for all non early |
---|
| 284 | + * memory sections covering all of hotplug memory including |
---|
| 285 | + * both normal and ZONE_DEVICE based. |
---|
| 286 | + */ |
---|
| 287 | + if (!early_section(__pfn_to_section(pfn))) |
---|
| 288 | + return pfn_section_valid(__pfn_to_section(pfn), pfn); |
---|
| 289 | +#endif |
---|
297 | 290 | return memblock_is_map_memory(addr); |
---|
298 | 291 | } |
---|
299 | 292 | EXPORT_SYMBOL(pfn_valid); |
---|
300 | | -#endif |
---|
301 | | - |
---|
302 | | -#ifndef CONFIG_SPARSEMEM |
---|
303 | | -static void __init arm64_memory_present(void) |
---|
304 | | -{ |
---|
305 | | -} |
---|
306 | | -#else |
---|
307 | | -static void __init arm64_memory_present(void) |
---|
308 | | -{ |
---|
309 | | - struct memblock_region *reg; |
---|
310 | | - |
---|
311 | | - for_each_memblock(memory, reg) { |
---|
312 | | - int nid = memblock_get_region_node(reg); |
---|
313 | | - |
---|
314 | | - memory_present(nid, memblock_region_memory_base_pfn(reg), |
---|
315 | | - memblock_region_memory_end_pfn(reg)); |
---|
316 | | - } |
---|
317 | | -} |
---|
318 | | -#endif |
---|
319 | 293 | |
---|
320 | 294 | static phys_addr_t memory_limit = PHYS_ADDR_MAX; |
---|
321 | 295 | |
---|
.. | .. |
---|
368 | 342 | |
---|
369 | 343 | void __init arm64_memblock_init(void) |
---|
370 | 344 | { |
---|
371 | | - const s64 linear_region_size = -(s64)PAGE_OFFSET; |
---|
| 345 | + const s64 linear_region_size = BIT(vabits_actual - 1); |
---|
372 | 346 | |
---|
373 | 347 | /* Handle linux,usable-memory-range property */ |
---|
374 | 348 | fdt_enforce_memory_region(); |
---|
375 | 349 | |
---|
376 | 350 | /* Remove memory above our supported physical address size */ |
---|
377 | 351 | memblock_remove(1ULL << PHYS_MASK_SHIFT, ULLONG_MAX); |
---|
378 | | - |
---|
379 | | - /* |
---|
380 | | - * Ensure that the linear region takes up exactly half of the kernel |
---|
381 | | - * virtual address space. This way, we can distinguish a linear address |
---|
382 | | - * from a kernel/module/vmalloc address by testing a single bit. |
---|
383 | | - */ |
---|
384 | | - BUILD_BUG_ON(linear_region_size != BIT(VA_BITS - 1)); |
---|
385 | 352 | |
---|
386 | 353 | /* |
---|
387 | 354 | * Select a suitable value for the base of physical memory. |
---|
.. | .. |
---|
404 | 371 | } |
---|
405 | 372 | |
---|
406 | 373 | /* |
---|
| 374 | + * If we are running with a 52-bit kernel VA config on a system that |
---|
| 375 | + * does not support it, we have to place the available physical |
---|
| 376 | + * memory in the 48-bit addressable part of the linear region, i.e., |
---|
| 377 | + * we have to move it upward. Since memstart_addr represents the |
---|
| 378 | + * physical address of PAGE_OFFSET, we have to *subtract* from it. |
---|
| 379 | + */ |
---|
| 380 | + if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52) && (vabits_actual != 52)) |
---|
| 381 | + memstart_addr -= _PAGE_OFFSET(48) - _PAGE_OFFSET(52); |
---|
| 382 | + |
---|
| 383 | + /* |
---|
407 | 384 | * Apply the memory limit if it was set. Since the kernel may be loaded |
---|
408 | 385 | * high up in memory, add back the kernel region that must be accessible |
---|
409 | 386 | * via the linear mapping. |
---|
.. | .. |
---|
413 | 390 | memblock_add(__pa_symbol(_text), (u64)(_end - _text)); |
---|
414 | 391 | } |
---|
415 | 392 | |
---|
416 | | - if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && initrd_start) { |
---|
| 393 | + if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && phys_initrd_size) { |
---|
417 | 394 | /* |
---|
418 | 395 | * Add back the memory we just removed if it results in the |
---|
419 | 396 | * initrd to become inaccessible via the linear mapping. |
---|
420 | 397 | * Otherwise, this is a no-op |
---|
421 | 398 | */ |
---|
422 | | - u64 base = initrd_start & PAGE_MASK; |
---|
423 | | - u64 size = PAGE_ALIGN(initrd_end) - base; |
---|
| 399 | + u64 base = phys_initrd_start & PAGE_MASK; |
---|
| 400 | + u64 size = PAGE_ALIGN(phys_initrd_start + phys_initrd_size) - base; |
---|
424 | 401 | |
---|
425 | 402 | /* |
---|
426 | 403 | * We can only add back the initrd memory if we don't end up |
---|
.. | .. |
---|
434 | 411 | base + size > memblock_start_of_DRAM() + |
---|
435 | 412 | linear_region_size, |
---|
436 | 413 | "initrd not fully accessible via the linear mapping -- please check your bootloader ...\n")) { |
---|
437 | | - initrd_start = 0; |
---|
| 414 | + phys_initrd_size = 0; |
---|
438 | 415 | } else { |
---|
439 | 416 | memblock_remove(base, size); /* clear MEMBLOCK_ flags */ |
---|
440 | 417 | memblock_add(base, size); |
---|
.. | .. |
---|
444 | 421 | |
---|
445 | 422 | if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { |
---|
446 | 423 | extern u16 memstart_offset_seed; |
---|
447 | | - u64 range = linear_region_size - |
---|
448 | | - (memblock_end_of_DRAM() - memblock_start_of_DRAM()); |
---|
| 424 | + u64 mmfr0 = read_cpuid(ID_AA64MMFR0_EL1); |
---|
| 425 | + int parange = cpuid_feature_extract_unsigned_field( |
---|
| 426 | + mmfr0, ID_AA64MMFR0_PARANGE_SHIFT); |
---|
| 427 | + s64 range = linear_region_size - |
---|
| 428 | + BIT(id_aa64mmfr0_parange_to_phys_shift(parange)); |
---|
449 | 429 | |
---|
450 | 430 | /* |
---|
451 | 431 | * If the size of the linear region exceeds, by a sufficient |
---|
452 | | - * margin, the size of the region that the available physical |
---|
453 | | - * memory spans, randomize the linear region as well. |
---|
| 432 | + * margin, the size of the region that the physical memory can |
---|
| 433 | + * span, randomize the linear region as well. |
---|
454 | 434 | */ |
---|
455 | | - if (memstart_offset_seed > 0 && range >= ARM64_MEMSTART_ALIGN) { |
---|
| 435 | + if (memstart_offset_seed > 0 && range >= (s64)ARM64_MEMSTART_ALIGN) { |
---|
456 | 436 | range /= ARM64_MEMSTART_ALIGN; |
---|
457 | 437 | memstart_addr -= ARM64_MEMSTART_ALIGN * |
---|
458 | 438 | ((range * memstart_offset_seed) >> 16); |
---|
.. | .. |
---|
464 | 444 | * pagetables with memblock. |
---|
465 | 445 | */ |
---|
466 | 446 | memblock_reserve(__pa_symbol(_text), _end - _text); |
---|
467 | | -#ifdef CONFIG_BLK_DEV_INITRD |
---|
468 | | - if (initrd_start) { |
---|
469 | | - memblock_reserve(initrd_start, initrd_end - initrd_start); |
---|
470 | | - |
---|
| 447 | + if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && phys_initrd_size) { |
---|
471 | 448 | /* the generic initrd code expects virtual addresses */ |
---|
472 | | - initrd_start = __phys_to_virt(initrd_start); |
---|
473 | | - initrd_end = __phys_to_virt(initrd_end); |
---|
| 449 | + initrd_start = __phys_to_virt(phys_initrd_start); |
---|
| 450 | + initrd_end = initrd_start + phys_initrd_size; |
---|
474 | 451 | } |
---|
475 | | -#endif |
---|
476 | 452 | |
---|
477 | 453 | early_init_fdt_scan_reserved_mem(); |
---|
478 | 454 | |
---|
479 | | - /* 4GB maximum for 32-bit only capable devices */ |
---|
480 | | - if (IS_ENABLED(CONFIG_ZONE_DMA32)) |
---|
481 | | - arm64_dma_phys_limit = max_zone_dma_phys(); |
---|
482 | | - else |
---|
483 | | - arm64_dma_phys_limit = PHYS_MASK + 1; |
---|
484 | | - |
---|
485 | | - reserve_crashkernel(); |
---|
486 | | - |
---|
487 | 455 | reserve_elfcorehdr(); |
---|
488 | 456 | |
---|
| 457 | + if (!IS_ENABLED(CONFIG_ZONE_DMA) && !IS_ENABLED(CONFIG_ZONE_DMA32)) |
---|
| 458 | + reserve_crashkernel(); |
---|
| 459 | + |
---|
489 | 460 | high_memory = __va(memblock_end_of_DRAM() - 1) + 1; |
---|
490 | | - |
---|
491 | | - dma_contiguous_reserve(arm64_dma_phys_limit); |
---|
492 | | - |
---|
493 | | - memblock_allow_resize(); |
---|
494 | 461 | } |
---|
495 | 462 | |
---|
496 | 463 | void __init bootmem_init(void) |
---|
.. | .. |
---|
503 | 470 | early_memtest(min << PAGE_SHIFT, max << PAGE_SHIFT); |
---|
504 | 471 | |
---|
505 | 472 | max_pfn = max_low_pfn = max; |
---|
| 473 | + min_low_pfn = min; |
---|
506 | 474 | |
---|
507 | 475 | arm64_numa_init(); |
---|
508 | | - /* |
---|
509 | | - * Sparsemem tries to allocate bootmem in memory_present(), so must be |
---|
510 | | - * done after the fixed reservations. |
---|
511 | | - */ |
---|
512 | | - arm64_memory_present(); |
---|
513 | 476 | |
---|
| 477 | + /* |
---|
| 478 | + * must be done after arm64_numa_init() which calls numa_init() to |
---|
| 479 | + * initialize node_online_map that gets used in hugetlb_cma_reserve() |
---|
| 480 | + * while allocating required CMA size across online nodes. |
---|
| 481 | + */ |
---|
| 482 | +#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA) |
---|
| 483 | + arm64_hugetlb_cma_reserve(); |
---|
| 484 | +#endif |
---|
| 485 | + |
---|
| 486 | + dma_pernuma_cma_reserve(); |
---|
| 487 | + |
---|
| 488 | + kvm_hyp_reserve(); |
---|
| 489 | + |
---|
| 490 | + /* |
---|
| 491 | + * sparse_init() tries to allocate memory from memblock, so must be |
---|
| 492 | + * done after the fixed reservations |
---|
| 493 | + */ |
---|
514 | 494 | sparse_init(); |
---|
515 | 495 | zone_sizes_init(min, max); |
---|
| 496 | + |
---|
| 497 | + /* |
---|
| 498 | + * Reserve the CMA area after arm64_dma_phys_limit was initialised. |
---|
| 499 | + */ |
---|
| 500 | + dma_contiguous_reserve(arm64_dma_phys_limit); |
---|
| 501 | + rk_dma_heap_cma_setup(); |
---|
| 502 | + |
---|
| 503 | + /* |
---|
| 504 | + * request_standard_resources() depends on crashkernel's memory being |
---|
| 505 | + * reserved, so do it here. |
---|
| 506 | + */ |
---|
| 507 | + if (IS_ENABLED(CONFIG_ZONE_DMA) || IS_ENABLED(CONFIG_ZONE_DMA32)) |
---|
| 508 | + reserve_crashkernel(); |
---|
516 | 509 | |
---|
517 | 510 | memblock_dump_all(); |
---|
518 | 511 | } |
---|
.. | .. |
---|
541 | 534 | * memmap array. |
---|
542 | 535 | */ |
---|
543 | 536 | if (pg < pgend) |
---|
544 | | - free_bootmem(pg, pgend - pg); |
---|
| 537 | + memblock_free(pg, pgend - pg); |
---|
545 | 538 | } |
---|
546 | 539 | |
---|
547 | 540 | /* |
---|
.. | .. |
---|
549 | 542 | */ |
---|
550 | 543 | static void __init free_unused_memmap(void) |
---|
551 | 544 | { |
---|
552 | | - unsigned long start, prev_end = 0; |
---|
553 | | - struct memblock_region *reg; |
---|
| 545 | + unsigned long start, end, prev_end = 0; |
---|
| 546 | + int i; |
---|
554 | 547 | |
---|
555 | | - for_each_memblock(memory, reg) { |
---|
556 | | - start = __phys_to_pfn(reg->base); |
---|
557 | | - |
---|
| 548 | + for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) { |
---|
558 | 549 | #ifdef CONFIG_SPARSEMEM |
---|
559 | 550 | /* |
---|
560 | 551 | * Take care not to free memmap entries that don't exist due |
---|
.. | .. |
---|
574 | 565 | * memmap entries are valid from the bank end aligned to |
---|
575 | 566 | * MAX_ORDER_NR_PAGES. |
---|
576 | 567 | */ |
---|
577 | | - prev_end = ALIGN(__phys_to_pfn(reg->base + reg->size), |
---|
578 | | - MAX_ORDER_NR_PAGES); |
---|
| 568 | + prev_end = ALIGN(end, MAX_ORDER_NR_PAGES); |
---|
579 | 569 | } |
---|
580 | 570 | |
---|
581 | 571 | #ifdef CONFIG_SPARSEMEM |
---|
.. | .. |
---|
593 | 583 | void __init mem_init(void) |
---|
594 | 584 | { |
---|
595 | 585 | if (swiotlb_force == SWIOTLB_FORCE || |
---|
596 | | - max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT)) |
---|
| 586 | + max_pfn > PFN_DOWN(arm64_dma_phys_limit)) |
---|
597 | 587 | swiotlb_init(1); |
---|
598 | 588 | else |
---|
599 | 589 | swiotlb_force = SWIOTLB_NO_FORCE; |
---|
600 | 590 | |
---|
601 | | - set_max_mapnr(pfn_to_page(max_pfn) - mem_map); |
---|
| 591 | + set_max_mapnr(max_pfn - PHYS_PFN_OFFSET); |
---|
602 | 592 | |
---|
603 | 593 | #ifndef CONFIG_SPARSEMEM_VMEMMAP |
---|
604 | 594 | free_unused_memmap(); |
---|
605 | 595 | #endif |
---|
606 | 596 | /* this will put all unused low memory onto the freelists */ |
---|
607 | | - free_all_bootmem(); |
---|
608 | | - |
---|
609 | | - kexec_reserve_crashkres_pages(); |
---|
| 597 | + memblock_free_all(); |
---|
610 | 598 | |
---|
611 | 599 | mem_init_print_info(NULL); |
---|
612 | 600 | |
---|
.. | .. |
---|
615 | 603 | * detected at build time already. |
---|
616 | 604 | */ |
---|
617 | 605 | #ifdef CONFIG_COMPAT |
---|
618 | | - BUILD_BUG_ON(TASK_SIZE_32 > TASK_SIZE_64); |
---|
619 | | -#endif |
---|
620 | | - |
---|
621 | | -#ifdef CONFIG_SPARSEMEM_VMEMMAP |
---|
622 | | - /* |
---|
623 | | - * Make sure we chose the upper bound of sizeof(struct page) |
---|
624 | | - * correctly when sizing the VMEMMAP array. |
---|
625 | | - */ |
---|
626 | | - BUILD_BUG_ON(sizeof(struct page) > (1 << STRUCT_PAGE_MAX_SHIFT)); |
---|
| 606 | + BUILD_BUG_ON(TASK_SIZE_32 > DEFAULT_MAP_WINDOW_64); |
---|
627 | 607 | #endif |
---|
628 | 608 | |
---|
629 | 609 | if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) { |
---|
.. | .. |
---|
640 | 620 | { |
---|
641 | 621 | free_reserved_area(lm_alias(__init_begin), |
---|
642 | 622 | lm_alias(__init_end), |
---|
643 | | - 0, "unused kernel"); |
---|
| 623 | + POISON_FREE_INITMEM, "unused kernel"); |
---|
644 | 624 | /* |
---|
645 | 625 | * Unmap the __init region but leave the VM area in place. This |
---|
646 | 626 | * prevents the region from being reused for kernel modules, which |
---|
.. | .. |
---|
649 | 629 | unmap_kernel_range((u64)__init_begin, (u64)(__init_end - __init_begin)); |
---|
650 | 630 | } |
---|
651 | 631 | |
---|
652 | | -#ifdef CONFIG_BLK_DEV_INITRD |
---|
653 | | - |
---|
654 | | -static int keep_initrd __initdata; |
---|
655 | | - |
---|
656 | | -void __init free_initrd_mem(unsigned long start, unsigned long end) |
---|
657 | | -{ |
---|
658 | | - if (!keep_initrd) { |
---|
659 | | - free_reserved_area((void *)start, (void *)end, 0, "initrd"); |
---|
660 | | - memblock_free(__virt_to_phys(start), end - start); |
---|
661 | | - } |
---|
662 | | -} |
---|
663 | | - |
---|
664 | | -static int __init keepinitrd_setup(char *__unused) |
---|
665 | | -{ |
---|
666 | | - keep_initrd = 1; |
---|
667 | | - return 1; |
---|
668 | | -} |
---|
669 | | - |
---|
670 | | -__setup("keepinitrd", keepinitrd_setup); |
---|
671 | | -#endif |
---|
672 | | - |
---|
673 | | -/* |
---|
674 | | - * Dump out memory limit information on panic. |
---|
675 | | - */ |
---|
676 | | -static int dump_mem_limit(struct notifier_block *self, unsigned long v, void *p) |
---|
| 632 | +void dump_mem_limit(void) |
---|
677 | 633 | { |
---|
678 | 634 | if (memory_limit != PHYS_ADDR_MAX) { |
---|
679 | 635 | pr_emerg("Memory Limit: %llu MB\n", memory_limit >> 20); |
---|
680 | 636 | } else { |
---|
681 | 637 | pr_emerg("Memory Limit: none\n"); |
---|
682 | 638 | } |
---|
683 | | - return 0; |
---|
684 | 639 | } |
---|
685 | | - |
---|
686 | | -static struct notifier_block mem_limit_notifier = { |
---|
687 | | - .notifier_call = dump_mem_limit, |
---|
688 | | -}; |
---|
689 | | - |
---|
690 | | -static int __init register_mem_limit_dumper(void) |
---|
691 | | -{ |
---|
692 | | - atomic_notifier_chain_register(&panic_notifier_list, |
---|
693 | | - &mem_limit_notifier); |
---|
694 | | - return 0; |
---|
695 | | -} |
---|
696 | | -__initcall(register_mem_limit_dumper); |
---|