hc
2023-12-09 b22da3d8526a935aa31e086e63f60ff3246cb61c
kernel/arch/arm64/mm/init.c
....@@ -1,20 +1,9 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Based on arch/arm/mm/init.c
34 *
45 * Copyright (C) 1995-2005 Russell King
56 * Copyright (C) 2012 ARM Ltd.
6
- *
7
- * This program is free software; you can redistribute it and/or modify
8
- * it under the terms of the GNU General Public License version 2 as
9
- * published by the Free Software Foundation.
10
- *
11
- * This program is distributed in the hope that it will be useful,
12
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14
- * GNU General Public License for more details.
15
- *
16
- * You should have received a copy of the GNU General Public License
17
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
187 */
198
209 #include <linux/kernel.h>
....@@ -22,7 +11,6 @@
2211 #include <linux/errno.h>
2312 #include <linux/swap.h>
2413 #include <linux/init.h>
25
-#include <linux/bootmem.h>
2614 #include <linux/cache.h>
2715 #include <linux/mman.h>
2816 #include <linux/nodemask.h>
....@@ -32,28 +20,30 @@
3220 #include <linux/sort.h>
3321 #include <linux/of.h>
3422 #include <linux/of_fdt.h>
35
-#include <linux/dma-mapping.h>
36
-#include <linux/dma-contiguous.h>
23
+#include <linux/dma-direct.h>
24
+#include <linux/dma-map-ops.h>
3725 #include <linux/efi.h>
3826 #include <linux/swiotlb.h>
3927 #include <linux/vmalloc.h>
4028 #include <linux/mm.h>
4129 #include <linux/kexec.h>
4230 #include <linux/crash_dump.h>
31
+#include <linux/hugetlb.h>
32
+#include <linux/acpi_iort.h>
33
+#include <linux/rk-dma-heap.h>
4334
4435 #include <asm/boot.h>
4536 #include <asm/fixmap.h>
4637 #include <asm/kasan.h>
4738 #include <asm/kernel-pgtable.h>
39
+#include <asm/kvm_host.h>
4840 #include <asm/memory.h>
4941 #include <asm/numa.h>
5042 #include <asm/sections.h>
5143 #include <asm/setup.h>
52
-#include <asm/sizes.h>
44
+#include <linux/sizes.h>
5345 #include <asm/tlb.h>
5446 #include <asm/alternative.h>
55
-
56
-EXPORT_SYMBOL_GPL(kimage_vaddr);
5747
5848 /*
5949 * We need to be able to catch inadvertent references to memstart_addr
....@@ -62,25 +52,48 @@
6252 * that cannot be mistaken for a real physical address.
6353 */
6454 s64 memstart_addr __ro_after_init = -1;
65
-phys_addr_t arm64_dma_phys_limit __ro_after_init;
55
+EXPORT_SYMBOL(memstart_addr);
6656
67
-#ifdef CONFIG_BLK_DEV_INITRD
68
-static int __init early_initrd(char *p)
69
-{
70
- unsigned long start, size;
71
- char *endp;
72
-
73
- start = memparse(p, &endp);
74
- if (*endp == ',') {
75
- size = memparse(endp + 1, NULL);
76
-
77
- initrd_start = start;
78
- initrd_end = start + size;
79
- }
80
- return 0;
81
-}
82
-early_param("initrd", early_initrd);
57
+/*
58
+ * If the corresponding config options are enabled, we create both ZONE_DMA
59
+ * and ZONE_DMA32. By default ZONE_DMA covers the 32-bit addressable memory
60
+ * unless restricted on specific platforms (e.g. 30-bit on Raspberry Pi 4).
61
+ * In such case, ZONE_DMA32 covers the rest of the 32-bit addressable memory,
62
+ * otherwise it is empty.
63
+ *
64
+ * Memory reservation for crash kernel either done early or deferred
65
+ * depending on DMA memory zones configs (ZONE_DMA) --
66
+ *
67
+ * In absence of ZONE_DMA configs arm64_dma_phys_limit initialized
68
+ * here instead of max_zone_phys(). This lets early reservation of
69
+ * crash kernel memory which has a dependency on arm64_dma_phys_limit.
70
+ * Reserving memory early for crash kernel allows linear creation of block
71
+ * mappings (greater than page-granularity) for all the memory bank rangs.
72
+ * In this scheme a comparatively quicker boot is observed.
73
+ *
74
+ * If ZONE_DMA configs are defined, crash kernel memory reservation
75
+ * is delayed until DMA zone memory range size initilazation performed in
76
+ * zone_sizes_init(). The defer is necessary to steer clear of DMA zone
77
+ * memory range to avoid overlap allocation. So crash kernel memory boundaries
78
+ * are not known when mapping all bank memory ranges, which otherwise means
79
+ * not possible to exclude crash kernel range from creating block mappings
80
+ * so page-granularity mappings are created for the entire memory range.
81
+ * Hence a slightly slower boot is observed.
82
+ *
83
+ * Note: Page-granularity mapppings are necessary for crash kernel memory
84
+ * range for shrinking its size via /sys/kernel/kexec_crash_size interface.
85
+ */
86
+#if IS_ENABLED(CONFIG_ZONE_DMA) || IS_ENABLED(CONFIG_ZONE_DMA32)
87
+phys_addr_t __ro_after_init arm64_dma_phys_limit;
88
+#else
89
+phys_addr_t __ro_after_init arm64_dma_phys_limit = PHYS_MASK + 1;
8390 #endif
91
+
92
+/*
93
+ * Provide a run-time mean of disabling ZONE_DMA32 if it is enabled via
94
+ * CONFIG_ZONE_DMA32.
95
+ */
96
+static bool disable_dma32 __ro_after_init;
8497
8598 #ifdef CONFIG_KEXEC_CORE
8699 /*
....@@ -105,7 +118,7 @@
105118
106119 if (crash_base == 0) {
107120 /* Current arm64 boot protocol requires 2MB alignment */
108
- crash_base = memblock_find_in_range(0, ARCH_LOW_ADDRESS_LIMIT,
121
+ crash_base = memblock_find_in_range(0, arm64_dma_phys_limit,
109122 crash_size, SZ_2M);
110123 if (crash_base == 0) {
111124 pr_warn("cannot allocate crashkernel (size:0x%llx)\n",
....@@ -137,33 +150,8 @@
137150 crashk_res.start = crash_base;
138151 crashk_res.end = crash_base + crash_size - 1;
139152 }
140
-
141
-static void __init kexec_reserve_crashkres_pages(void)
142
-{
143
-#ifdef CONFIG_HIBERNATION
144
- phys_addr_t addr;
145
- struct page *page;
146
-
147
- if (!crashk_res.end)
148
- return;
149
-
150
- /*
151
- * To reduce the size of hibernation image, all the pages are
152
- * marked as Reserved initially.
153
- */
154
- for (addr = crashk_res.start; addr < (crashk_res.end + 1);
155
- addr += PAGE_SIZE) {
156
- page = phys_to_page(addr);
157
- SetPageReserved(page);
158
- }
159
-#endif
160
-}
161153 #else
162154 static void __init reserve_crashkernel(void)
163
-{
164
-}
165
-
166
-static void __init kexec_reserve_crashkres_pages(void)
167155 {
168156 }
169157 #endif /* CONFIG_KEXEC_CORE */
....@@ -218,104 +206,90 @@
218206 {
219207 }
220208 #endif /* CONFIG_CRASH_DUMP */
221
-/*
222
- * Return the maximum physical address for ZONE_DMA32 (DMA_BIT_MASK(32)). It
223
- * currently assumes that for memory starting above 4G, 32-bit devices will
224
- * use a DMA offset.
225
- */
226
-static phys_addr_t __init max_zone_dma_phys(void)
227
-{
228
- phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, 32);
229
- return min(offset + (1ULL << 32), memblock_end_of_DRAM());
230
-}
231209
232
-#ifdef CONFIG_NUMA
210
+/*
211
+ * Return the maximum physical address for a zone accessible by the given bits
212
+ * limit. If DRAM starts above 32-bit, expand the zone to the maximum
213
+ * available memory, otherwise cap it at 32-bit.
214
+ */
215
+static phys_addr_t __init max_zone_phys(unsigned int zone_bits)
216
+{
217
+ phys_addr_t zone_mask = DMA_BIT_MASK(zone_bits);
218
+ phys_addr_t phys_start = memblock_start_of_DRAM();
219
+
220
+ if (phys_start > U32_MAX)
221
+ zone_mask = PHYS_ADDR_MAX;
222
+ else if (phys_start > zone_mask)
223
+ zone_mask = U32_MAX;
224
+
225
+ return min(zone_mask, memblock_end_of_DRAM() - 1) + 1;
226
+}
233227
234228 static void __init zone_sizes_init(unsigned long min, unsigned long max)
235229 {
236230 unsigned long max_zone_pfns[MAX_NR_ZONES] = {0};
231
+ unsigned int __maybe_unused acpi_zone_dma_bits;
232
+ unsigned int __maybe_unused dt_zone_dma_bits;
233
+ phys_addr_t __maybe_unused dma32_phys_limit = max_zone_phys(32);
237234
235
+#ifdef CONFIG_ZONE_DMA
236
+ acpi_zone_dma_bits = fls64(acpi_iort_dma_get_max_cpu_address());
237
+ dt_zone_dma_bits = fls64(of_dma_get_max_cpu_address(NULL));
238
+ zone_dma_bits = min3(32U, dt_zone_dma_bits, acpi_zone_dma_bits);
239
+ arm64_dma_phys_limit = max_zone_phys(zone_dma_bits);
240
+ max_zone_pfns[ZONE_DMA] = PFN_DOWN(arm64_dma_phys_limit);
241
+#endif
238242 #ifdef CONFIG_ZONE_DMA32
239
- max_zone_pfns[ZONE_DMA32] = PFN_DOWN(max_zone_dma_phys());
243
+ max_zone_pfns[ZONE_DMA32] = disable_dma32 ? 0 : PFN_DOWN(dma32_phys_limit);
244
+ if (!arm64_dma_phys_limit)
245
+ arm64_dma_phys_limit = dma32_phys_limit;
240246 #endif
241247 max_zone_pfns[ZONE_NORMAL] = max;
242248
243
- free_area_init_nodes(max_zone_pfns);
249
+ free_area_init(max_zone_pfns);
244250 }
245251
246
-#else
247
-
248
-static void __init zone_sizes_init(unsigned long min, unsigned long max)
252
+static int __init early_disable_dma32(char *buf)
249253 {
250
- struct memblock_region *reg;
251
- unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
252
- unsigned long max_dma = min;
254
+ if (!buf)
255
+ return -EINVAL;
253256
254
- memset(zone_size, 0, sizeof(zone_size));
257
+ if (!strcmp(buf, "on"))
258
+ disable_dma32 = true;
255259
256
- /* 4GB maximum for 32-bit only capable devices */
257
-#ifdef CONFIG_ZONE_DMA32
258
- max_dma = PFN_DOWN(arm64_dma_phys_limit);
259
- zone_size[ZONE_DMA32] = max_dma - min;
260
-#endif
261
- zone_size[ZONE_NORMAL] = max - max_dma;
262
-
263
- memcpy(zhole_size, zone_size, sizeof(zhole_size));
264
-
265
- for_each_memblock(memory, reg) {
266
- unsigned long start = memblock_region_memory_base_pfn(reg);
267
- unsigned long end = memblock_region_memory_end_pfn(reg);
268
-
269
- if (start >= max)
270
- continue;
271
-
272
-#ifdef CONFIG_ZONE_DMA32
273
- if (start < max_dma) {
274
- unsigned long dma_end = min(end, max_dma);
275
- zhole_size[ZONE_DMA32] -= dma_end - start;
276
- }
277
-#endif
278
- if (end > max_dma) {
279
- unsigned long normal_end = min(end, max);
280
- unsigned long normal_start = max(start, max_dma);
281
- zhole_size[ZONE_NORMAL] -= normal_end - normal_start;
282
- }
283
- }
284
-
285
- free_area_init_node(0, zone_size, min, zhole_size);
260
+ return 0;
286261 }
262
+early_param("disable_dma32", early_disable_dma32);
287263
288
-#endif /* CONFIG_NUMA */
289
-
290
-#ifdef CONFIG_HAVE_ARCH_PFN_VALID
291264 int pfn_valid(unsigned long pfn)
292265 {
293266 phys_addr_t addr = pfn << PAGE_SHIFT;
294267
295268 if ((addr >> PAGE_SHIFT) != pfn)
296269 return 0;
270
+
271
+#ifdef CONFIG_SPARSEMEM
272
+ if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
273
+ return 0;
274
+
275
+ if (!valid_section(__pfn_to_section(pfn)))
276
+ return 0;
277
+
278
+ /*
279
+ * ZONE_DEVICE memory does not have the memblock entries.
280
+ * memblock_is_map_memory() check for ZONE_DEVICE based
281
+ * addresses will always fail. Even the normal hotplugged
282
+ * memory will never have MEMBLOCK_NOMAP flag set in their
283
+ * memblock entries. Skip memblock search for all non early
284
+ * memory sections covering all of hotplug memory including
285
+ * both normal and ZONE_DEVICE based.
286
+ */
287
+ if (!early_section(__pfn_to_section(pfn)))
288
+ return pfn_section_valid(__pfn_to_section(pfn), pfn);
289
+#endif
297290 return memblock_is_map_memory(addr);
298291 }
299292 EXPORT_SYMBOL(pfn_valid);
300
-#endif
301
-
302
-#ifndef CONFIG_SPARSEMEM
303
-static void __init arm64_memory_present(void)
304
-{
305
-}
306
-#else
307
-static void __init arm64_memory_present(void)
308
-{
309
- struct memblock_region *reg;
310
-
311
- for_each_memblock(memory, reg) {
312
- int nid = memblock_get_region_node(reg);
313
-
314
- memory_present(nid, memblock_region_memory_base_pfn(reg),
315
- memblock_region_memory_end_pfn(reg));
316
- }
317
-}
318
-#endif
319293
320294 static phys_addr_t memory_limit = PHYS_ADDR_MAX;
321295
....@@ -368,20 +342,13 @@
368342
369343 void __init arm64_memblock_init(void)
370344 {
371
- const s64 linear_region_size = -(s64)PAGE_OFFSET;
345
+ const s64 linear_region_size = BIT(vabits_actual - 1);
372346
373347 /* Handle linux,usable-memory-range property */
374348 fdt_enforce_memory_region();
375349
376350 /* Remove memory above our supported physical address size */
377351 memblock_remove(1ULL << PHYS_MASK_SHIFT, ULLONG_MAX);
378
-
379
- /*
380
- * Ensure that the linear region takes up exactly half of the kernel
381
- * virtual address space. This way, we can distinguish a linear address
382
- * from a kernel/module/vmalloc address by testing a single bit.
383
- */
384
- BUILD_BUG_ON(linear_region_size != BIT(VA_BITS - 1));
385352
386353 /*
387354 * Select a suitable value for the base of physical memory.
....@@ -404,6 +371,16 @@
404371 }
405372
406373 /*
374
+ * If we are running with a 52-bit kernel VA config on a system that
375
+ * does not support it, we have to place the available physical
376
+ * memory in the 48-bit addressable part of the linear region, i.e.,
377
+ * we have to move it upward. Since memstart_addr represents the
378
+ * physical address of PAGE_OFFSET, we have to *subtract* from it.
379
+ */
380
+ if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52) && (vabits_actual != 52))
381
+ memstart_addr -= _PAGE_OFFSET(48) - _PAGE_OFFSET(52);
382
+
383
+ /*
407384 * Apply the memory limit if it was set. Since the kernel may be loaded
408385 * high up in memory, add back the kernel region that must be accessible
409386 * via the linear mapping.
....@@ -413,14 +390,14 @@
413390 memblock_add(__pa_symbol(_text), (u64)(_end - _text));
414391 }
415392
416
- if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && initrd_start) {
393
+ if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && phys_initrd_size) {
417394 /*
418395 * Add back the memory we just removed if it results in the
419396 * initrd to become inaccessible via the linear mapping.
420397 * Otherwise, this is a no-op
421398 */
422
- u64 base = initrd_start & PAGE_MASK;
423
- u64 size = PAGE_ALIGN(initrd_end) - base;
399
+ u64 base = phys_initrd_start & PAGE_MASK;
400
+ u64 size = PAGE_ALIGN(phys_initrd_start + phys_initrd_size) - base;
424401
425402 /*
426403 * We can only add back the initrd memory if we don't end up
....@@ -434,7 +411,7 @@
434411 base + size > memblock_start_of_DRAM() +
435412 linear_region_size,
436413 "initrd not fully accessible via the linear mapping -- please check your bootloader ...\n")) {
437
- initrd_start = 0;
414
+ phys_initrd_size = 0;
438415 } else {
439416 memblock_remove(base, size); /* clear MEMBLOCK_ flags */
440417 memblock_add(base, size);
....@@ -444,15 +421,18 @@
444421
445422 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
446423 extern u16 memstart_offset_seed;
447
- u64 range = linear_region_size -
448
- (memblock_end_of_DRAM() - memblock_start_of_DRAM());
424
+ u64 mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
425
+ int parange = cpuid_feature_extract_unsigned_field(
426
+ mmfr0, ID_AA64MMFR0_PARANGE_SHIFT);
427
+ s64 range = linear_region_size -
428
+ BIT(id_aa64mmfr0_parange_to_phys_shift(parange));
449429
450430 /*
451431 * If the size of the linear region exceeds, by a sufficient
452
- * margin, the size of the region that the available physical
453
- * memory spans, randomize the linear region as well.
432
+ * margin, the size of the region that the physical memory can
433
+ * span, randomize the linear region as well.
454434 */
455
- if (memstart_offset_seed > 0 && range >= ARM64_MEMSTART_ALIGN) {
435
+ if (memstart_offset_seed > 0 && range >= (s64)ARM64_MEMSTART_ALIGN) {
456436 range /= ARM64_MEMSTART_ALIGN;
457437 memstart_addr -= ARM64_MEMSTART_ALIGN *
458438 ((range * memstart_offset_seed) >> 16);
....@@ -464,33 +444,20 @@
464444 * pagetables with memblock.
465445 */
466446 memblock_reserve(__pa_symbol(_text), _end - _text);
467
-#ifdef CONFIG_BLK_DEV_INITRD
468
- if (initrd_start) {
469
- memblock_reserve(initrd_start, initrd_end - initrd_start);
470
-
447
+ if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && phys_initrd_size) {
471448 /* the generic initrd code expects virtual addresses */
472
- initrd_start = __phys_to_virt(initrd_start);
473
- initrd_end = __phys_to_virt(initrd_end);
449
+ initrd_start = __phys_to_virt(phys_initrd_start);
450
+ initrd_end = initrd_start + phys_initrd_size;
474451 }
475
-#endif
476452
477453 early_init_fdt_scan_reserved_mem();
478454
479
- /* 4GB maximum for 32-bit only capable devices */
480
- if (IS_ENABLED(CONFIG_ZONE_DMA32))
481
- arm64_dma_phys_limit = max_zone_dma_phys();
482
- else
483
- arm64_dma_phys_limit = PHYS_MASK + 1;
484
-
485
- reserve_crashkernel();
486
-
487455 reserve_elfcorehdr();
488456
457
+ if (!IS_ENABLED(CONFIG_ZONE_DMA) && !IS_ENABLED(CONFIG_ZONE_DMA32))
458
+ reserve_crashkernel();
459
+
489460 high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
490
-
491
- dma_contiguous_reserve(arm64_dma_phys_limit);
492
-
493
- memblock_allow_resize();
494461 }
495462
496463 void __init bootmem_init(void)
....@@ -503,16 +470,42 @@
503470 early_memtest(min << PAGE_SHIFT, max << PAGE_SHIFT);
504471
505472 max_pfn = max_low_pfn = max;
473
+ min_low_pfn = min;
506474
507475 arm64_numa_init();
508
- /*
509
- * Sparsemem tries to allocate bootmem in memory_present(), so must be
510
- * done after the fixed reservations.
511
- */
512
- arm64_memory_present();
513476
477
+ /*
478
+ * must be done after arm64_numa_init() which calls numa_init() to
479
+ * initialize node_online_map that gets used in hugetlb_cma_reserve()
480
+ * while allocating required CMA size across online nodes.
481
+ */
482
+#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
483
+ arm64_hugetlb_cma_reserve();
484
+#endif
485
+
486
+ dma_pernuma_cma_reserve();
487
+
488
+ kvm_hyp_reserve();
489
+
490
+ /*
491
+ * sparse_init() tries to allocate memory from memblock, so must be
492
+ * done after the fixed reservations
493
+ */
514494 sparse_init();
515495 zone_sizes_init(min, max);
496
+
497
+ /*
498
+ * Reserve the CMA area after arm64_dma_phys_limit was initialised.
499
+ */
500
+ dma_contiguous_reserve(arm64_dma_phys_limit);
501
+ rk_dma_heap_cma_setup();
502
+
503
+ /*
504
+ * request_standard_resources() depends on crashkernel's memory being
505
+ * reserved, so do it here.
506
+ */
507
+ if (IS_ENABLED(CONFIG_ZONE_DMA) || IS_ENABLED(CONFIG_ZONE_DMA32))
508
+ reserve_crashkernel();
516509
517510 memblock_dump_all();
518511 }
....@@ -541,7 +534,7 @@
541534 * memmap array.
542535 */
543536 if (pg < pgend)
544
- free_bootmem(pg, pgend - pg);
537
+ memblock_free(pg, pgend - pg);
545538 }
546539
547540 /*
....@@ -549,12 +542,10 @@
549542 */
550543 static void __init free_unused_memmap(void)
551544 {
552
- unsigned long start, prev_end = 0;
553
- struct memblock_region *reg;
545
+ unsigned long start, end, prev_end = 0;
546
+ int i;
554547
555
- for_each_memblock(memory, reg) {
556
- start = __phys_to_pfn(reg->base);
557
-
548
+ for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) {
558549 #ifdef CONFIG_SPARSEMEM
559550 /*
560551 * Take care not to free memmap entries that don't exist due
....@@ -574,8 +565,7 @@
574565 * memmap entries are valid from the bank end aligned to
575566 * MAX_ORDER_NR_PAGES.
576567 */
577
- prev_end = ALIGN(__phys_to_pfn(reg->base + reg->size),
578
- MAX_ORDER_NR_PAGES);
568
+ prev_end = ALIGN(end, MAX_ORDER_NR_PAGES);
579569 }
580570
581571 #ifdef CONFIG_SPARSEMEM
....@@ -593,20 +583,18 @@
593583 void __init mem_init(void)
594584 {
595585 if (swiotlb_force == SWIOTLB_FORCE ||
596
- max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
586
+ max_pfn > PFN_DOWN(arm64_dma_phys_limit))
597587 swiotlb_init(1);
598588 else
599589 swiotlb_force = SWIOTLB_NO_FORCE;
600590
601
- set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
591
+ set_max_mapnr(max_pfn - PHYS_PFN_OFFSET);
602592
603593 #ifndef CONFIG_SPARSEMEM_VMEMMAP
604594 free_unused_memmap();
605595 #endif
606596 /* this will put all unused low memory onto the freelists */
607
- free_all_bootmem();
608
-
609
- kexec_reserve_crashkres_pages();
597
+ memblock_free_all();
610598
611599 mem_init_print_info(NULL);
612600
....@@ -615,15 +603,7 @@
615603 * detected at build time already.
616604 */
617605 #ifdef CONFIG_COMPAT
618
- BUILD_BUG_ON(TASK_SIZE_32 > TASK_SIZE_64);
619
-#endif
620
-
621
-#ifdef CONFIG_SPARSEMEM_VMEMMAP
622
- /*
623
- * Make sure we chose the upper bound of sizeof(struct page)
624
- * correctly when sizing the VMEMMAP array.
625
- */
626
- BUILD_BUG_ON(sizeof(struct page) > (1 << STRUCT_PAGE_MAX_SHIFT));
606
+ BUILD_BUG_ON(TASK_SIZE_32 > DEFAULT_MAP_WINDOW_64);
627607 #endif
628608
629609 if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
....@@ -640,7 +620,7 @@
640620 {
641621 free_reserved_area(lm_alias(__init_begin),
642622 lm_alias(__init_end),
643
- 0, "unused kernel");
623
+ POISON_FREE_INITMEM, "unused kernel");
644624 /*
645625 * Unmap the __init region but leave the VM area in place. This
646626 * prevents the region from being reused for kernel modules, which
....@@ -649,48 +629,11 @@
649629 unmap_kernel_range((u64)__init_begin, (u64)(__init_end - __init_begin));
650630 }
651631
652
-#ifdef CONFIG_BLK_DEV_INITRD
653
-
654
-static int keep_initrd __initdata;
655
-
656
-void __init free_initrd_mem(unsigned long start, unsigned long end)
657
-{
658
- if (!keep_initrd) {
659
- free_reserved_area((void *)start, (void *)end, 0, "initrd");
660
- memblock_free(__virt_to_phys(start), end - start);
661
- }
662
-}
663
-
664
-static int __init keepinitrd_setup(char *__unused)
665
-{
666
- keep_initrd = 1;
667
- return 1;
668
-}
669
-
670
-__setup("keepinitrd", keepinitrd_setup);
671
-#endif
672
-
673
-/*
674
- * Dump out memory limit information on panic.
675
- */
676
-static int dump_mem_limit(struct notifier_block *self, unsigned long v, void *p)
632
+void dump_mem_limit(void)
677633 {
678634 if (memory_limit != PHYS_ADDR_MAX) {
679635 pr_emerg("Memory Limit: %llu MB\n", memory_limit >> 20);
680636 } else {
681637 pr_emerg("Memory Limit: none\n");
682638 }
683
- return 0;
684639 }
685
-
686
-static struct notifier_block mem_limit_notifier = {
687
- .notifier_call = dump_mem_limit,
688
-};
689
-
690
-static int __init register_mem_limit_dumper(void)
691
-{
692
- atomic_notifier_chain_register(&panic_notifier_list,
693
- &mem_limit_notifier);
694
- return 0;
695
-}
696
-__initcall(register_mem_limit_dumper);