.. | .. |
---|
11 | 11 | * Copyright (C) 2000, 2001, 2002, 2007 Maciej W. Rozycki |
---|
12 | 12 | */ |
---|
13 | 13 | #include <linux/init.h> |
---|
| 14 | +#include <linux/cpu.h> |
---|
| 15 | +#include <linux/delay.h> |
---|
14 | 16 | #include <linux/ioport.h> |
---|
15 | 17 | #include <linux/export.h> |
---|
16 | 18 | #include <linux/screen_info.h> |
---|
17 | 19 | #include <linux/memblock.h> |
---|
18 | | -#include <linux/bootmem.h> |
---|
19 | 20 | #include <linux/initrd.h> |
---|
20 | 21 | #include <linux/root_dev.h> |
---|
21 | 22 | #include <linux/highmem.h> |
---|
.. | .. |
---|
25 | 26 | #include <linux/kexec.h> |
---|
26 | 27 | #include <linux/sizes.h> |
---|
27 | 28 | #include <linux/device.h> |
---|
28 | | -#include <linux/dma-contiguous.h> |
---|
| 29 | +#include <linux/dma-map-ops.h> |
---|
29 | 30 | #include <linux/decompress/generic.h> |
---|
30 | 31 | #include <linux/of_fdt.h> |
---|
| 32 | +#include <linux/of_reserved_mem.h> |
---|
| 33 | +#include <linux/dmi.h> |
---|
31 | 34 | |
---|
32 | 35 | #include <asm/addrspace.h> |
---|
33 | 36 | #include <asm/bootinfo.h> |
---|
.. | .. |
---|
43 | 46 | #include <asm/prom.h> |
---|
44 | 47 | |
---|
45 | 48 | #ifdef CONFIG_MIPS_ELF_APPENDED_DTB |
---|
46 | | -const char __section(.appended_dtb) __appended_dtb[0x100000]; |
---|
| 49 | +const char __section(".appended_dtb") __appended_dtb[0x100000]; |
---|
47 | 50 | #endif /* CONFIG_MIPS_ELF_APPENDED_DTB */ |
---|
48 | 51 | |
---|
49 | 52 | struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly; |
---|
.. | .. |
---|
63 | 66 | |
---|
64 | 67 | EXPORT_SYMBOL(mips_machtype); |
---|
65 | 68 | |
---|
66 | | -struct boot_mem_map boot_mem_map; |
---|
67 | | - |
---|
68 | 69 | static char __initdata command_line[COMMAND_LINE_SIZE]; |
---|
69 | 70 | char __initdata arcs_cmdline[COMMAND_LINE_SIZE]; |
---|
70 | 71 | |
---|
71 | 72 | #ifdef CONFIG_CMDLINE_BOOL |
---|
72 | | -static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE; |
---|
| 73 | +static const char builtin_cmdline[] __initconst = CONFIG_CMDLINE; |
---|
| 74 | +#else |
---|
| 75 | +static const char builtin_cmdline[] __initconst = ""; |
---|
73 | 76 | #endif |
---|
74 | 77 | |
---|
75 | 78 | /* |
---|
.. | .. |
---|
90 | 93 | EXPORT_SYMBOL(ARCH_PFN_OFFSET); |
---|
91 | 94 | #endif |
---|
92 | 95 | |
---|
93 | | -void __init add_memory_region(phys_addr_t start, phys_addr_t size, long type) |
---|
94 | | -{ |
---|
95 | | - int x = boot_mem_map.nr_map; |
---|
96 | | - int i; |
---|
97 | | - |
---|
98 | | - /* |
---|
99 | | - * If the region reaches the top of the physical address space, adjust |
---|
100 | | - * the size slightly so that (start + size) doesn't overflow |
---|
101 | | - */ |
---|
102 | | - if (start + size - 1 == PHYS_ADDR_MAX) |
---|
103 | | - --size; |
---|
104 | | - |
---|
105 | | - /* Sanity check */ |
---|
106 | | - if (start + size < start) { |
---|
107 | | - pr_warn("Trying to add an invalid memory region, skipped\n"); |
---|
108 | | - return; |
---|
109 | | - } |
---|
110 | | - |
---|
111 | | - /* |
---|
112 | | - * Try to merge with existing entry, if any. |
---|
113 | | - */ |
---|
114 | | - for (i = 0; i < boot_mem_map.nr_map; i++) { |
---|
115 | | - struct boot_mem_map_entry *entry = boot_mem_map.map + i; |
---|
116 | | - unsigned long top; |
---|
117 | | - |
---|
118 | | - if (entry->type != type) |
---|
119 | | - continue; |
---|
120 | | - |
---|
121 | | - if (start + size < entry->addr) |
---|
122 | | - continue; /* no overlap */ |
---|
123 | | - |
---|
124 | | - if (entry->addr + entry->size < start) |
---|
125 | | - continue; /* no overlap */ |
---|
126 | | - |
---|
127 | | - top = max(entry->addr + entry->size, start + size); |
---|
128 | | - entry->addr = min(entry->addr, start); |
---|
129 | | - entry->size = top - entry->addr; |
---|
130 | | - |
---|
131 | | - return; |
---|
132 | | - } |
---|
133 | | - |
---|
134 | | - if (boot_mem_map.nr_map == BOOT_MEM_MAP_MAX) { |
---|
135 | | - pr_err("Ooops! Too many entries in the memory map!\n"); |
---|
136 | | - return; |
---|
137 | | - } |
---|
138 | | - |
---|
139 | | - boot_mem_map.map[x].addr = start; |
---|
140 | | - boot_mem_map.map[x].size = size; |
---|
141 | | - boot_mem_map.map[x].type = type; |
---|
142 | | - boot_mem_map.nr_map++; |
---|
143 | | -} |
---|
144 | | - |
---|
145 | 96 | void __init detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_addr_t sz_max) |
---|
146 | 97 | { |
---|
147 | 98 | void *dm = &detect_magic; |
---|
.. | .. |
---|
158 | 109 | ((unsigned long long) sz_min) / SZ_1M, |
---|
159 | 110 | ((unsigned long long) sz_max) / SZ_1M); |
---|
160 | 111 | |
---|
161 | | - add_memory_region(start, size, BOOT_MEM_RAM); |
---|
162 | | -} |
---|
163 | | - |
---|
164 | | -static bool __init __maybe_unused memory_region_available(phys_addr_t start, |
---|
165 | | - phys_addr_t size) |
---|
166 | | -{ |
---|
167 | | - int i; |
---|
168 | | - bool in_ram = false, free = true; |
---|
169 | | - |
---|
170 | | - for (i = 0; i < boot_mem_map.nr_map; i++) { |
---|
171 | | - phys_addr_t start_, end_; |
---|
172 | | - |
---|
173 | | - start_ = boot_mem_map.map[i].addr; |
---|
174 | | - end_ = boot_mem_map.map[i].addr + boot_mem_map.map[i].size; |
---|
175 | | - |
---|
176 | | - switch (boot_mem_map.map[i].type) { |
---|
177 | | - case BOOT_MEM_RAM: |
---|
178 | | - if (start >= start_ && start + size <= end_) |
---|
179 | | - in_ram = true; |
---|
180 | | - break; |
---|
181 | | - case BOOT_MEM_RESERVED: |
---|
182 | | - if ((start >= start_ && start < end_) || |
---|
183 | | - (start < start_ && start + size >= start_)) |
---|
184 | | - free = false; |
---|
185 | | - break; |
---|
186 | | - default: |
---|
187 | | - continue; |
---|
188 | | - } |
---|
189 | | - } |
---|
190 | | - |
---|
191 | | - return in_ram && free; |
---|
192 | | -} |
---|
193 | | - |
---|
194 | | -static void __init print_memory_map(void) |
---|
195 | | -{ |
---|
196 | | - int i; |
---|
197 | | - const int field = 2 * sizeof(unsigned long); |
---|
198 | | - |
---|
199 | | - for (i = 0; i < boot_mem_map.nr_map; i++) { |
---|
200 | | - printk(KERN_INFO " memory: %0*Lx @ %0*Lx ", |
---|
201 | | - field, (unsigned long long) boot_mem_map.map[i].size, |
---|
202 | | - field, (unsigned long long) boot_mem_map.map[i].addr); |
---|
203 | | - |
---|
204 | | - switch (boot_mem_map.map[i].type) { |
---|
205 | | - case BOOT_MEM_RAM: |
---|
206 | | - printk(KERN_CONT "(usable)\n"); |
---|
207 | | - break; |
---|
208 | | - case BOOT_MEM_INIT_RAM: |
---|
209 | | - printk(KERN_CONT "(usable after init)\n"); |
---|
210 | | - break; |
---|
211 | | - case BOOT_MEM_ROM_DATA: |
---|
212 | | - printk(KERN_CONT "(ROM data)\n"); |
---|
213 | | - break; |
---|
214 | | - case BOOT_MEM_RESERVED: |
---|
215 | | - printk(KERN_CONT "(reserved)\n"); |
---|
216 | | - break; |
---|
217 | | - default: |
---|
218 | | - printk(KERN_CONT "type %lu\n", boot_mem_map.map[i].type); |
---|
219 | | - break; |
---|
220 | | - } |
---|
221 | | - } |
---|
| 112 | + memblock_add(start, size); |
---|
222 | 113 | } |
---|
223 | 114 | |
---|
224 | 115 | /* |
---|
.. | .. |
---|
265 | 156 | pr_err("initrd start must be page aligned\n"); |
---|
266 | 157 | goto disable; |
---|
267 | 158 | } |
---|
268 | | - if (initrd_start < PAGE_OFFSET) { |
---|
269 | | - pr_err("initrd start < PAGE_OFFSET\n"); |
---|
270 | | - goto disable; |
---|
271 | | - } |
---|
272 | 159 | |
---|
273 | 160 | /* |
---|
274 | 161 | * Sanitize initrd addresses. For example firmware |
---|
.. | .. |
---|
280 | 167 | end = __pa(initrd_end); |
---|
281 | 168 | initrd_end = (unsigned long)__va(end); |
---|
282 | 169 | initrd_start = (unsigned long)__va(__pa(initrd_start)); |
---|
| 170 | + |
---|
| 171 | + if (initrd_start < PAGE_OFFSET) { |
---|
| 172 | + pr_err("initrd start < PAGE_OFFSET\n"); |
---|
| 173 | + goto disable; |
---|
| 174 | + } |
---|
283 | 175 | |
---|
284 | 176 | ROOT_DEV = Root_RAM0; |
---|
285 | 177 | return PFN_UP(end); |
---|
.. | .. |
---|
333 | 225 | |
---|
334 | 226 | maybe_bswap_initrd(); |
---|
335 | 227 | |
---|
336 | | - reserve_bootmem(__pa(initrd_start), size, BOOTMEM_DEFAULT); |
---|
| 228 | + memblock_reserve(__pa(initrd_start), size); |
---|
337 | 229 | initrd_below_start_ok = 1; |
---|
338 | 230 | |
---|
339 | 231 | pr_info("Initial ramdisk at: 0x%lx (%lu bytes)\n", |
---|
.. | .. |
---|
360 | 252 | * Initialize the bootmem allocator. It also setup initrd related data |
---|
361 | 253 | * if needed. |
---|
362 | 254 | */ |
---|
363 | | -#if defined(CONFIG_SGI_IP27) || (defined(CONFIG_CPU_LOONGSON3) && defined(CONFIG_NUMA)) |
---|
| 255 | +#if defined(CONFIG_SGI_IP27) || (defined(CONFIG_CPU_LOONGSON64) && defined(CONFIG_NUMA)) |
---|
364 | 256 | |
---|
365 | 257 | static void __init bootmem_init(void) |
---|
366 | 258 | { |
---|
.. | .. |
---|
370 | 262 | |
---|
371 | 263 | #else /* !CONFIG_SGI_IP27 */ |
---|
372 | 264 | |
---|
373 | | -static unsigned long __init bootmap_bytes(unsigned long pages) |
---|
374 | | -{ |
---|
375 | | - unsigned long bytes = DIV_ROUND_UP(pages, 8); |
---|
376 | | - |
---|
377 | | - return ALIGN(bytes, sizeof(long)); |
---|
378 | | -} |
---|
379 | | - |
---|
380 | 265 | static void __init bootmem_init(void) |
---|
381 | 266 | { |
---|
382 | | - unsigned long reserved_end; |
---|
383 | | - unsigned long mapstart = ~0UL; |
---|
384 | | - unsigned long bootmap_size; |
---|
385 | | - phys_addr_t ramstart = PHYS_ADDR_MAX; |
---|
386 | | - bool bootmap_valid = false; |
---|
| 267 | + phys_addr_t ramstart, ramend; |
---|
| 268 | + unsigned long start, end; |
---|
387 | 269 | int i; |
---|
| 270 | + |
---|
| 271 | + ramstart = memblock_start_of_DRAM(); |
---|
| 272 | + ramend = memblock_end_of_DRAM(); |
---|
388 | 273 | |
---|
389 | 274 | /* |
---|
390 | 275 | * Sanity check any INITRD first. We don't take it into account |
---|
.. | .. |
---|
393 | 278 | * will reserve the area used for the initrd. |
---|
394 | 279 | */ |
---|
395 | 280 | init_initrd(); |
---|
396 | | - reserved_end = (unsigned long) PFN_UP(__pa_symbol(&_end)); |
---|
397 | 281 | |
---|
| 282 | + /* Reserve memory occupied by kernel. */ |
---|
| 283 | + memblock_reserve(__pa_symbol(&_text), |
---|
| 284 | + __pa_symbol(&_end) - __pa_symbol(&_text)); |
---|
| 285 | + |
---|
| 286 | + /* max_low_pfn is not a number of pages but the end pfn of low mem */ |
---|
| 287 | + |
---|
| 288 | +#ifdef CONFIG_MIPS_AUTO_PFN_OFFSET |
---|
| 289 | + ARCH_PFN_OFFSET = PFN_UP(ramstart); |
---|
| 290 | +#else |
---|
398 | 291 | /* |
---|
399 | | - * max_low_pfn is not a number of pages. The number of pages |
---|
400 | | - * of the system is given by 'max_low_pfn - min_low_pfn'. |
---|
| 292 | + * Reserve any memory between the start of RAM and PHYS_OFFSET |
---|
401 | 293 | */ |
---|
402 | | - min_low_pfn = ~0UL; |
---|
403 | | - max_low_pfn = 0; |
---|
| 294 | + if (ramstart > PHYS_OFFSET) |
---|
| 295 | + memblock_reserve(PHYS_OFFSET, ramstart - PHYS_OFFSET); |
---|
404 | 296 | |
---|
405 | | - /* |
---|
406 | | - * Find the highest page frame number we have available |
---|
407 | | - * and the lowest used RAM address |
---|
408 | | - */ |
---|
409 | | - for (i = 0; i < boot_mem_map.nr_map; i++) { |
---|
410 | | - unsigned long start, end; |
---|
| 297 | + if (PFN_UP(ramstart) > ARCH_PFN_OFFSET) { |
---|
| 298 | + pr_info("Wasting %lu bytes for tracking %lu unused pages\n", |
---|
| 299 | + (unsigned long)((PFN_UP(ramstart) - ARCH_PFN_OFFSET) * sizeof(struct page)), |
---|
| 300 | + (unsigned long)(PFN_UP(ramstart) - ARCH_PFN_OFFSET)); |
---|
| 301 | + } |
---|
| 302 | +#endif |
---|
411 | 303 | |
---|
412 | | - if (boot_mem_map.map[i].type != BOOT_MEM_RAM) |
---|
413 | | - continue; |
---|
414 | | - |
---|
415 | | - start = PFN_UP(boot_mem_map.map[i].addr); |
---|
416 | | - end = PFN_DOWN(boot_mem_map.map[i].addr |
---|
417 | | - + boot_mem_map.map[i].size); |
---|
418 | | - |
---|
419 | | - ramstart = min(ramstart, boot_mem_map.map[i].addr); |
---|
420 | | - |
---|
421 | | -#ifndef CONFIG_HIGHMEM |
---|
| 304 | + min_low_pfn = ARCH_PFN_OFFSET; |
---|
| 305 | + max_pfn = PFN_DOWN(ramend); |
---|
| 306 | + for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) { |
---|
422 | 307 | /* |
---|
423 | 308 | * Skip highmem here so we get an accurate max_low_pfn if low |
---|
424 | 309 | * memory stops short of high memory. |
---|
.. | .. |
---|
429 | 314 | continue; |
---|
430 | 315 | if (end > PFN_DOWN(HIGHMEM_START)) |
---|
431 | 316 | end = PFN_DOWN(HIGHMEM_START); |
---|
432 | | -#endif |
---|
433 | | - |
---|
434 | 317 | if (end > max_low_pfn) |
---|
435 | 318 | max_low_pfn = end; |
---|
436 | | - if (start < min_low_pfn) |
---|
437 | | - min_low_pfn = start; |
---|
438 | | - if (end <= reserved_end) |
---|
439 | | - continue; |
---|
440 | | -#ifdef CONFIG_BLK_DEV_INITRD |
---|
441 | | - /* Skip zones before initrd and initrd itself */ |
---|
442 | | - if (initrd_end && end <= (unsigned long)PFN_UP(__pa(initrd_end))) |
---|
443 | | - continue; |
---|
444 | | -#endif |
---|
445 | | - if (start >= mapstart) |
---|
446 | | - continue; |
---|
447 | | - mapstart = max(reserved_end, start); |
---|
448 | 319 | } |
---|
449 | 320 | |
---|
450 | 321 | if (min_low_pfn >= max_low_pfn) |
---|
451 | 322 | panic("Incorrect memory mapping !!!"); |
---|
452 | 323 | |
---|
453 | | -#ifdef CONFIG_MIPS_AUTO_PFN_OFFSET |
---|
454 | | - ARCH_PFN_OFFSET = PFN_UP(ramstart); |
---|
455 | | -#else |
---|
456 | | - /* |
---|
457 | | - * Reserve any memory between the start of RAM and PHYS_OFFSET |
---|
458 | | - */ |
---|
459 | | - if (ramstart > PHYS_OFFSET) |
---|
460 | | - add_memory_region(PHYS_OFFSET, ramstart - PHYS_OFFSET, |
---|
461 | | - BOOT_MEM_RESERVED); |
---|
462 | | - |
---|
463 | | - if (min_low_pfn > ARCH_PFN_OFFSET) { |
---|
464 | | - pr_info("Wasting %lu bytes for tracking %lu unused pages\n", |
---|
465 | | - (min_low_pfn - ARCH_PFN_OFFSET) * sizeof(struct page), |
---|
466 | | - min_low_pfn - ARCH_PFN_OFFSET); |
---|
467 | | - } else if (ARCH_PFN_OFFSET - min_low_pfn > 0UL) { |
---|
468 | | - pr_info("%lu free pages won't be used\n", |
---|
469 | | - ARCH_PFN_OFFSET - min_low_pfn); |
---|
470 | | - } |
---|
471 | | - min_low_pfn = ARCH_PFN_OFFSET; |
---|
472 | | -#endif |
---|
473 | | - |
---|
474 | | - /* |
---|
475 | | - * Determine low and high memory ranges |
---|
476 | | - */ |
---|
477 | | - max_pfn = max_low_pfn; |
---|
478 | | - if (max_low_pfn > PFN_DOWN(HIGHMEM_START)) { |
---|
| 324 | + if (max_pfn > PFN_DOWN(HIGHMEM_START)) { |
---|
479 | 325 | #ifdef CONFIG_HIGHMEM |
---|
480 | 326 | highstart_pfn = PFN_DOWN(HIGHMEM_START); |
---|
481 | | - highend_pfn = max_low_pfn; |
---|
482 | | -#endif |
---|
| 327 | + highend_pfn = max_pfn; |
---|
| 328 | +#else |
---|
483 | 329 | max_low_pfn = PFN_DOWN(HIGHMEM_START); |
---|
484 | | - } |
---|
485 | | - |
---|
486 | | -#ifdef CONFIG_BLK_DEV_INITRD |
---|
487 | | - /* |
---|
488 | | - * mapstart should be after initrd_end |
---|
489 | | - */ |
---|
490 | | - if (initrd_end) |
---|
491 | | - mapstart = max(mapstart, (unsigned long)PFN_UP(__pa(initrd_end))); |
---|
492 | | -#endif |
---|
493 | | - |
---|
494 | | - /* |
---|
495 | | - * check that mapstart doesn't overlap with any of |
---|
496 | | - * memory regions that have been reserved through eg. DTB |
---|
497 | | - */ |
---|
498 | | - bootmap_size = bootmap_bytes(max_low_pfn - min_low_pfn); |
---|
499 | | - |
---|
500 | | - bootmap_valid = memory_region_available(PFN_PHYS(mapstart), |
---|
501 | | - bootmap_size); |
---|
502 | | - for (i = 0; i < boot_mem_map.nr_map && !bootmap_valid; i++) { |
---|
503 | | - unsigned long mapstart_addr; |
---|
504 | | - |
---|
505 | | - switch (boot_mem_map.map[i].type) { |
---|
506 | | - case BOOT_MEM_RESERVED: |
---|
507 | | - mapstart_addr = PFN_ALIGN(boot_mem_map.map[i].addr + |
---|
508 | | - boot_mem_map.map[i].size); |
---|
509 | | - if (PHYS_PFN(mapstart_addr) < mapstart) |
---|
510 | | - break; |
---|
511 | | - |
---|
512 | | - bootmap_valid = memory_region_available(mapstart_addr, |
---|
513 | | - bootmap_size); |
---|
514 | | - if (bootmap_valid) |
---|
515 | | - mapstart = PHYS_PFN(mapstart_addr); |
---|
516 | | - break; |
---|
517 | | - default: |
---|
518 | | - break; |
---|
519 | | - } |
---|
520 | | - } |
---|
521 | | - |
---|
522 | | - if (!bootmap_valid) |
---|
523 | | - panic("No memory area to place a bootmap bitmap"); |
---|
524 | | - |
---|
525 | | - /* |
---|
526 | | - * Initialize the boot-time allocator with low memory only. |
---|
527 | | - */ |
---|
528 | | - if (bootmap_size != init_bootmem_node(NODE_DATA(0), mapstart, |
---|
529 | | - min_low_pfn, max_low_pfn)) |
---|
530 | | - panic("Unexpected memory size required for bootmap"); |
---|
531 | | - |
---|
532 | | - for (i = 0; i < boot_mem_map.nr_map; i++) { |
---|
533 | | - unsigned long start, end; |
---|
534 | | - |
---|
535 | | - start = PFN_UP(boot_mem_map.map[i].addr); |
---|
536 | | - end = PFN_DOWN(boot_mem_map.map[i].addr |
---|
537 | | - + boot_mem_map.map[i].size); |
---|
538 | | - |
---|
539 | | - if (start <= min_low_pfn) |
---|
540 | | - start = min_low_pfn; |
---|
541 | | - if (start >= end) |
---|
542 | | - continue; |
---|
543 | | - |
---|
544 | | -#ifndef CONFIG_HIGHMEM |
---|
545 | | - if (end > max_low_pfn) |
---|
546 | | - end = max_low_pfn; |
---|
547 | | - |
---|
548 | | - /* |
---|
549 | | - * ... finally, is the area going away? |
---|
550 | | - */ |
---|
551 | | - if (end <= start) |
---|
552 | | - continue; |
---|
553 | | -#endif |
---|
554 | | - |
---|
555 | | - memblock_add_node(PFN_PHYS(start), PFN_PHYS(end - start), 0); |
---|
556 | | - } |
---|
557 | | - |
---|
558 | | - /* |
---|
559 | | - * Register fully available low RAM pages with the bootmem allocator. |
---|
560 | | - */ |
---|
561 | | - for (i = 0; i < boot_mem_map.nr_map; i++) { |
---|
562 | | - unsigned long start, end, size; |
---|
563 | | - |
---|
564 | | - start = PFN_UP(boot_mem_map.map[i].addr); |
---|
565 | | - end = PFN_DOWN(boot_mem_map.map[i].addr |
---|
566 | | - + boot_mem_map.map[i].size); |
---|
567 | | - |
---|
568 | | - /* |
---|
569 | | - * Reserve usable memory. |
---|
570 | | - */ |
---|
571 | | - switch (boot_mem_map.map[i].type) { |
---|
572 | | - case BOOT_MEM_RAM: |
---|
573 | | - break; |
---|
574 | | - case BOOT_MEM_INIT_RAM: |
---|
575 | | - memory_present(0, start, end); |
---|
576 | | - continue; |
---|
577 | | - default: |
---|
578 | | - /* Not usable memory */ |
---|
579 | | - if (start > min_low_pfn && end < max_low_pfn) |
---|
580 | | - reserve_bootmem(boot_mem_map.map[i].addr, |
---|
581 | | - boot_mem_map.map[i].size, |
---|
582 | | - BOOTMEM_DEFAULT); |
---|
583 | | - continue; |
---|
584 | | - } |
---|
585 | | - |
---|
586 | | - /* |
---|
587 | | - * We are rounding up the start address of usable memory |
---|
588 | | - * and at the end of the usable range downwards. |
---|
589 | | - */ |
---|
590 | | - if (start >= max_low_pfn) |
---|
591 | | - continue; |
---|
592 | | - if (start < reserved_end) |
---|
593 | | - start = reserved_end; |
---|
594 | | - if (end > max_low_pfn) |
---|
595 | | - end = max_low_pfn; |
---|
596 | | - |
---|
597 | | - /* |
---|
598 | | - * ... finally, is the area going away? |
---|
599 | | - */ |
---|
600 | | - if (end <= start) |
---|
601 | | - continue; |
---|
602 | | - size = end - start; |
---|
603 | | - |
---|
604 | | - /* Register lowmem ranges */ |
---|
605 | | - free_bootmem(PFN_PHYS(start), size << PAGE_SHIFT); |
---|
606 | | - memory_present(0, start, end); |
---|
607 | | - } |
---|
608 | | - |
---|
609 | | - /* |
---|
610 | | - * Reserve the bootmap memory. |
---|
611 | | - */ |
---|
612 | | - reserve_bootmem(PFN_PHYS(mapstart), bootmap_size, BOOTMEM_DEFAULT); |
---|
613 | | - |
---|
614 | | -#ifdef CONFIG_RELOCATABLE |
---|
615 | | - /* |
---|
616 | | - * The kernel reserves all memory below its _end symbol as bootmem, |
---|
617 | | - * but the kernel may now be at a much higher address. The memory |
---|
618 | | - * between the original and new locations may be returned to the system. |
---|
619 | | - */ |
---|
620 | | - if (__pa_symbol(_text) > __pa_symbol(VMLINUX_LOAD_ADDRESS)) { |
---|
621 | | - unsigned long offset; |
---|
622 | | - extern void show_kernel_relocation(const char *level); |
---|
623 | | - |
---|
624 | | - offset = __pa_symbol(_text) - __pa_symbol(VMLINUX_LOAD_ADDRESS); |
---|
625 | | - free_bootmem(__pa_symbol(VMLINUX_LOAD_ADDRESS), offset); |
---|
626 | | - |
---|
627 | | -#if defined(CONFIG_DEBUG_KERNEL) && defined(CONFIG_DEBUG_INFO) |
---|
628 | | - /* |
---|
629 | | - * This information is necessary when debugging the kernel |
---|
630 | | - * But is a security vulnerability otherwise! |
---|
631 | | - */ |
---|
632 | | - show_kernel_relocation(KERN_INFO); |
---|
| 330 | + max_pfn = max_low_pfn; |
---|
633 | 331 | #endif |
---|
634 | 332 | } |
---|
635 | | -#endif |
---|
636 | 333 | |
---|
637 | 334 | /* |
---|
638 | 335 | * Reserve initrd memory if needed. |
---|
.. | .. |
---|
641 | 338 | } |
---|
642 | 339 | |
---|
643 | 340 | #endif /* CONFIG_SGI_IP27 */ |
---|
644 | | - |
---|
645 | | -/* |
---|
646 | | - * arch_mem_init - initialize memory management subsystem |
---|
647 | | - * |
---|
648 | | - * o plat_mem_setup() detects the memory configuration and will record detected |
---|
649 | | - * memory areas using add_memory_region. |
---|
650 | | - * |
---|
651 | | - * At this stage the memory configuration of the system is known to the |
---|
652 | | - * kernel but generic memory management system is still entirely uninitialized. |
---|
653 | | - * |
---|
654 | | - * o bootmem_init() |
---|
655 | | - * o sparse_init() |
---|
656 | | - * o paging_init() |
---|
657 | | - * o dma_contiguous_reserve() |
---|
658 | | - * |
---|
659 | | - * At this stage the bootmem allocator is ready to use. |
---|
660 | | - * |
---|
661 | | - * NOTE: historically plat_mem_setup did the entire platform initialization. |
---|
662 | | - * This was rather impractical because it meant plat_mem_setup had to |
---|
663 | | - * get away without any kind of memory allocator. To keep old code from |
---|
664 | | - * breaking plat_setup was just renamed to plat_mem_setup and a second platform |
---|
665 | | - * initialization hook for anything else was introduced. |
---|
666 | | - */ |
---|
667 | 341 | |
---|
668 | 342 | static int usermem __initdata; |
---|
669 | 343 | |
---|
.. | .. |
---|
677 | 351 | * size. |
---|
678 | 352 | */ |
---|
679 | 353 | if (usermem == 0) { |
---|
680 | | - boot_mem_map.nr_map = 0; |
---|
681 | 354 | usermem = 1; |
---|
| 355 | + memblock_remove(memblock_start_of_DRAM(), |
---|
| 356 | + memblock_end_of_DRAM() - memblock_start_of_DRAM()); |
---|
682 | 357 | } |
---|
683 | 358 | start = 0; |
---|
684 | 359 | size = memparse(p, &p); |
---|
685 | 360 | if (*p == '@') |
---|
686 | 361 | start = memparse(p + 1, &p); |
---|
687 | 362 | |
---|
688 | | - add_memory_region(start, size, BOOT_MEM_RAM); |
---|
| 363 | + memblock_add(start, size); |
---|
689 | 364 | |
---|
690 | 365 | return 0; |
---|
691 | 366 | } |
---|
.. | .. |
---|
711 | 386 | |
---|
712 | 387 | if (*p == '@') { |
---|
713 | 388 | start_at = memparse(p+1, &p); |
---|
714 | | - add_memory_region(start_at, mem_size, BOOT_MEM_RAM); |
---|
| 389 | + memblock_add(start_at, mem_size); |
---|
715 | 390 | } else if (*p == '#') { |
---|
716 | 391 | pr_err("\"memmap=nn#ss\" (force ACPI data) invalid on MIPS\n"); |
---|
717 | 392 | return -EINVAL; |
---|
718 | 393 | } else if (*p == '$') { |
---|
719 | 394 | start_at = memparse(p+1, &p); |
---|
720 | | - add_memory_region(start_at, mem_size, BOOT_MEM_RESERVED); |
---|
| 395 | + memblock_add(start_at, mem_size); |
---|
| 396 | + memblock_reserve(start_at, mem_size); |
---|
721 | 397 | } else { |
---|
722 | 398 | pr_err("\"memmap\" invalid format!\n"); |
---|
723 | 399 | return -EINVAL; |
---|
.. | .. |
---|
732 | 408 | early_param("memmap", early_parse_memmap); |
---|
733 | 409 | |
---|
734 | 410 | #ifdef CONFIG_PROC_VMCORE |
---|
735 | | -unsigned long setup_elfcorehdr, setup_elfcorehdr_size; |
---|
| 411 | +static unsigned long setup_elfcorehdr, setup_elfcorehdr_size; |
---|
736 | 412 | static int __init early_parse_elfcorehdr(char *p) |
---|
737 | 413 | { |
---|
738 | | - int i; |
---|
| 414 | + phys_addr_t start, end; |
---|
| 415 | + u64 i; |
---|
739 | 416 | |
---|
740 | 417 | setup_elfcorehdr = memparse(p, &p); |
---|
741 | 418 | |
---|
742 | | - for (i = 0; i < boot_mem_map.nr_map; i++) { |
---|
743 | | - unsigned long start = boot_mem_map.map[i].addr; |
---|
744 | | - unsigned long end = (boot_mem_map.map[i].addr + |
---|
745 | | - boot_mem_map.map[i].size); |
---|
| 419 | + for_each_mem_range(i, &start, &end) { |
---|
746 | 420 | if (setup_elfcorehdr >= start && setup_elfcorehdr < end) { |
---|
747 | 421 | /* |
---|
748 | 422 | * Reserve from the elf core header to the end of |
---|
.. | .. |
---|
762 | 436 | early_param("elfcorehdr", early_parse_elfcorehdr); |
---|
763 | 437 | #endif |
---|
764 | 438 | |
---|
765 | | -static void __init arch_mem_addpart(phys_addr_t mem, phys_addr_t end, int type) |
---|
766 | | -{ |
---|
767 | | - phys_addr_t size; |
---|
768 | | - int i; |
---|
769 | | - |
---|
770 | | - size = end - mem; |
---|
771 | | - if (!size) |
---|
772 | | - return; |
---|
773 | | - |
---|
774 | | - /* Make sure it is in the boot_mem_map */ |
---|
775 | | - for (i = 0; i < boot_mem_map.nr_map; i++) { |
---|
776 | | - if (mem >= boot_mem_map.map[i].addr && |
---|
777 | | - mem < (boot_mem_map.map[i].addr + |
---|
778 | | - boot_mem_map.map[i].size)) |
---|
779 | | - return; |
---|
780 | | - } |
---|
781 | | - add_memory_region(mem, size, type); |
---|
782 | | -} |
---|
783 | | - |
---|
784 | 439 | #ifdef CONFIG_KEXEC |
---|
785 | | -static inline unsigned long long get_total_mem(void) |
---|
786 | | -{ |
---|
787 | | - unsigned long long total; |
---|
788 | 440 | |
---|
789 | | - total = max_pfn - min_low_pfn; |
---|
790 | | - return total << PAGE_SHIFT; |
---|
791 | | -} |
---|
| 441 | +/* 64M alignment for crash kernel regions */ |
---|
| 442 | +#define CRASH_ALIGN SZ_64M |
---|
| 443 | +#define CRASH_ADDR_MAX SZ_512M |
---|
792 | 444 | |
---|
793 | 445 | static void __init mips_parse_crashkernel(void) |
---|
794 | 446 | { |
---|
.. | .. |
---|
796 | 448 | unsigned long long crash_size, crash_base; |
---|
797 | 449 | int ret; |
---|
798 | 450 | |
---|
799 | | - total_mem = get_total_mem(); |
---|
| 451 | + total_mem = memblock_phys_mem_size(); |
---|
800 | 452 | ret = parse_crashkernel(boot_command_line, total_mem, |
---|
801 | 453 | &crash_size, &crash_base); |
---|
802 | 454 | if (ret != 0 || crash_size <= 0) |
---|
803 | 455 | return; |
---|
804 | 456 | |
---|
805 | | - if (!memory_region_available(crash_base, crash_size)) { |
---|
806 | | - pr_warn("Invalid memory region reserved for crash kernel\n"); |
---|
807 | | - return; |
---|
| 457 | + if (crash_base <= 0) { |
---|
| 458 | + crash_base = memblock_find_in_range(CRASH_ALIGN, CRASH_ADDR_MAX, |
---|
| 459 | + crash_size, CRASH_ALIGN); |
---|
| 460 | + if (!crash_base) { |
---|
| 461 | + pr_warn("crashkernel reservation failed - No suitable area found.\n"); |
---|
| 462 | + return; |
---|
| 463 | + } |
---|
| 464 | + } else { |
---|
| 465 | + unsigned long long start; |
---|
| 466 | + |
---|
| 467 | + start = memblock_find_in_range(crash_base, crash_base + crash_size, |
---|
| 468 | + crash_size, 1); |
---|
| 469 | + if (start != crash_base) { |
---|
| 470 | + pr_warn("Invalid memory region reserved for crash kernel\n"); |
---|
| 471 | + return; |
---|
| 472 | + } |
---|
808 | 473 | } |
---|
809 | 474 | |
---|
810 | 475 | crashk_res.start = crash_base; |
---|
.. | .. |
---|
821 | 486 | ret = request_resource(res, &crashk_res); |
---|
822 | 487 | if (!ret) |
---|
823 | 488 | pr_info("Reserving %ldMB of memory at %ldMB for crashkernel\n", |
---|
824 | | - (unsigned long)((crashk_res.end - |
---|
825 | | - crashk_res.start + 1) >> 20), |
---|
| 489 | + (unsigned long)(resource_size(&crashk_res) >> 20), |
---|
826 | 490 | (unsigned long)(crashk_res.start >> 20)); |
---|
827 | 491 | } |
---|
828 | 492 | #else /* !defined(CONFIG_KEXEC) */ |
---|
.. | .. |
---|
835 | 499 | } |
---|
836 | 500 | #endif /* !defined(CONFIG_KEXEC) */ |
---|
837 | 501 | |
---|
838 | | -#define USE_PROM_CMDLINE IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_BOOTLOADER) |
---|
839 | | -#define USE_DTB_CMDLINE IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_DTB) |
---|
840 | | -#define EXTEND_WITH_PROM IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND) |
---|
841 | | -#define BUILTIN_EXTEND_WITH_PROM \ |
---|
842 | | - IS_ENABLED(CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND) |
---|
| 502 | +static void __init check_kernel_sections_mem(void) |
---|
| 503 | +{ |
---|
| 504 | + phys_addr_t start = __pa_symbol(&_text); |
---|
| 505 | + phys_addr_t size = __pa_symbol(&_end) - start; |
---|
843 | 506 | |
---|
| 507 | + if (!memblock_is_region_memory(start, size)) { |
---|
| 508 | + pr_info("Kernel sections are not in the memory maps\n"); |
---|
| 509 | + memblock_add(start, size); |
---|
| 510 | + } |
---|
| 511 | +} |
---|
| 512 | + |
---|
| 513 | +static void __init bootcmdline_append(const char *s, size_t max) |
---|
| 514 | +{ |
---|
| 515 | + if (!s[0] || !max) |
---|
| 516 | + return; |
---|
| 517 | + |
---|
| 518 | + if (boot_command_line[0]) |
---|
| 519 | + strlcat(boot_command_line, " ", COMMAND_LINE_SIZE); |
---|
| 520 | + |
---|
| 521 | + strlcat(boot_command_line, s, max); |
---|
| 522 | +} |
---|
| 523 | + |
---|
| 524 | +#ifdef CONFIG_OF_EARLY_FLATTREE |
---|
| 525 | + |
---|
| 526 | +static int __init bootcmdline_scan_chosen(unsigned long node, const char *uname, |
---|
| 527 | + int depth, void *data) |
---|
| 528 | +{ |
---|
| 529 | + bool *dt_bootargs = data; |
---|
| 530 | + const char *p; |
---|
| 531 | + int l; |
---|
| 532 | + |
---|
| 533 | + if (depth != 1 || !data || |
---|
| 534 | + (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0)) |
---|
| 535 | + return 0; |
---|
| 536 | + |
---|
| 537 | + p = of_get_flat_dt_prop(node, "bootargs", &l); |
---|
| 538 | + if (p != NULL && l > 0) { |
---|
| 539 | + bootcmdline_append(p, min(l, COMMAND_LINE_SIZE)); |
---|
| 540 | + *dt_bootargs = true; |
---|
| 541 | + } |
---|
| 542 | + |
---|
| 543 | + return 1; |
---|
| 544 | +} |
---|
| 545 | + |
---|
| 546 | +#endif /* CONFIG_OF_EARLY_FLATTREE */ |
---|
| 547 | + |
---|
| 548 | +static void __init bootcmdline_init(void) |
---|
| 549 | +{ |
---|
| 550 | + bool dt_bootargs = false; |
---|
| 551 | + |
---|
| 552 | + /* |
---|
| 553 | + * If CMDLINE_OVERRIDE is enabled then initializing the command line is |
---|
| 554 | + * trivial - we simply use the built-in command line unconditionally & |
---|
| 555 | + * unmodified. |
---|
| 556 | + */ |
---|
| 557 | + if (IS_ENABLED(CONFIG_CMDLINE_OVERRIDE)) { |
---|
| 558 | + strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); |
---|
| 559 | + return; |
---|
| 560 | + } |
---|
| 561 | + |
---|
| 562 | + /* |
---|
| 563 | + * If the user specified a built-in command line & |
---|
| 564 | + * MIPS_CMDLINE_BUILTIN_EXTEND, then the built-in command line is |
---|
| 565 | + * prepended to arguments from the bootloader or DT so we'll copy them |
---|
| 566 | + * to the start of boot_command_line here. Otherwise, empty |
---|
| 567 | + * boot_command_line to undo anything early_init_dt_scan_chosen() did. |
---|
| 568 | + */ |
---|
| 569 | + if (IS_ENABLED(CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND)) |
---|
| 570 | + strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); |
---|
| 571 | + else |
---|
| 572 | + boot_command_line[0] = 0; |
---|
| 573 | + |
---|
| 574 | +#ifdef CONFIG_OF_EARLY_FLATTREE |
---|
| 575 | + /* |
---|
| 576 | + * If we're configured to take boot arguments from DT, look for those |
---|
| 577 | + * now. |
---|
| 578 | + */ |
---|
| 579 | + if (IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_DTB) || |
---|
| 580 | + IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND)) |
---|
| 581 | + of_scan_flat_dt(bootcmdline_scan_chosen, &dt_bootargs); |
---|
| 582 | +#endif |
---|
| 583 | + |
---|
| 584 | + /* |
---|
| 585 | + * If we didn't get any arguments from DT (regardless of whether that's |
---|
| 586 | + * because we weren't configured to look for them, or because we looked |
---|
| 587 | + * & found none) then we'll take arguments from the bootloader. |
---|
| 588 | + * plat_mem_setup() should have filled arcs_cmdline with arguments from |
---|
| 589 | + * the bootloader. |
---|
| 590 | + */ |
---|
| 591 | + if (IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND) || !dt_bootargs) |
---|
| 592 | + bootcmdline_append(arcs_cmdline, COMMAND_LINE_SIZE); |
---|
| 593 | + |
---|
| 594 | + /* |
---|
| 595 | + * If the user specified a built-in command line & we didn't already |
---|
| 596 | + * prepend it, we append it to boot_command_line here. |
---|
| 597 | + */ |
---|
| 598 | + if (IS_ENABLED(CONFIG_CMDLINE_BOOL) && |
---|
| 599 | + !IS_ENABLED(CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND)) |
---|
| 600 | + bootcmdline_append(builtin_cmdline, COMMAND_LINE_SIZE); |
---|
| 601 | +} |
---|
| 602 | + |
---|
| 603 | +/* |
---|
| 604 | + * arch_mem_init - initialize memory management subsystem |
---|
| 605 | + * |
---|
| 606 | + * o plat_mem_setup() detects the memory configuration and will record detected |
---|
| 607 | + * memory areas using memblock_add. |
---|
| 608 | + * |
---|
| 609 | + * At this stage the memory configuration of the system is known to the |
---|
| 610 | + * kernel but generic memory management system is still entirely uninitialized. |
---|
| 611 | + * |
---|
| 612 | + * o bootmem_init() |
---|
| 613 | + * o sparse_init() |
---|
| 614 | + * o paging_init() |
---|
| 615 | + * o dma_contiguous_reserve() |
---|
| 616 | + * |
---|
| 617 | + * At this stage the bootmem allocator is ready to use. |
---|
| 618 | + * |
---|
| 619 | + * NOTE: historically plat_mem_setup did the entire platform initialization. |
---|
| 620 | + * This was rather impractical because it meant plat_mem_setup had to |
---|
| 621 | + * get away without any kind of memory allocator. To keep old code from |
---|
| 622 | + * breaking plat_setup was just renamed to plat_mem_setup and a second platform |
---|
| 623 | + * initialization hook for anything else was introduced. |
---|
| 624 | + */ |
---|
844 | 625 | static void __init arch_mem_init(char **cmdline_p) |
---|
845 | 626 | { |
---|
846 | | - struct memblock_region *reg; |
---|
847 | | - extern void plat_mem_setup(void); |
---|
848 | | - |
---|
849 | | - /* |
---|
850 | | - * Initialize boot_command_line to an innocuous but non-empty string in |
---|
851 | | - * order to prevent early_init_dt_scan_chosen() from copying |
---|
852 | | - * CONFIG_CMDLINE into it without our knowledge. We handle |
---|
853 | | - * CONFIG_CMDLINE ourselves below & don't want to duplicate its |
---|
854 | | - * content because repeating arguments can be problematic. |
---|
855 | | - */ |
---|
856 | | - strlcpy(boot_command_line, " ", COMMAND_LINE_SIZE); |
---|
857 | | - |
---|
858 | 627 | /* call board setup routine */ |
---|
859 | 628 | plat_mem_setup(); |
---|
| 629 | + memblock_set_bottom_up(true); |
---|
860 | 630 | |
---|
861 | | - /* |
---|
862 | | - * Make sure all kernel memory is in the maps. The "UP" and |
---|
863 | | - * "DOWN" are opposite for initdata since if it crosses over |
---|
864 | | - * into another memory section you don't want that to be |
---|
865 | | - * freed when the initdata is freed. |
---|
866 | | - */ |
---|
867 | | - arch_mem_addpart(PFN_DOWN(__pa_symbol(&_text)) << PAGE_SHIFT, |
---|
868 | | - PFN_UP(__pa_symbol(&_edata)) << PAGE_SHIFT, |
---|
869 | | - BOOT_MEM_RAM); |
---|
870 | | - arch_mem_addpart(PFN_UP(__pa_symbol(&__init_begin)) << PAGE_SHIFT, |
---|
871 | | - PFN_DOWN(__pa_symbol(&__init_end)) << PAGE_SHIFT, |
---|
872 | | - BOOT_MEM_INIT_RAM); |
---|
873 | | - |
---|
874 | | - pr_info("Determined physical RAM map:\n"); |
---|
875 | | - print_memory_map(); |
---|
876 | | - |
---|
877 | | -#if defined(CONFIG_CMDLINE_BOOL) && defined(CONFIG_CMDLINE_OVERRIDE) |
---|
878 | | - strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); |
---|
879 | | -#else |
---|
880 | | - if ((USE_PROM_CMDLINE && arcs_cmdline[0]) || |
---|
881 | | - (USE_DTB_CMDLINE && !boot_command_line[0])) |
---|
882 | | - strlcpy(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE); |
---|
883 | | - |
---|
884 | | - if (EXTEND_WITH_PROM && arcs_cmdline[0]) { |
---|
885 | | - if (boot_command_line[0]) |
---|
886 | | - strlcat(boot_command_line, " ", COMMAND_LINE_SIZE); |
---|
887 | | - strlcat(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE); |
---|
888 | | - } |
---|
889 | | - |
---|
890 | | -#if defined(CONFIG_CMDLINE_BOOL) |
---|
891 | | - if (builtin_cmdline[0]) { |
---|
892 | | - if (boot_command_line[0]) |
---|
893 | | - strlcat(boot_command_line, " ", COMMAND_LINE_SIZE); |
---|
894 | | - strlcat(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); |
---|
895 | | - } |
---|
896 | | - |
---|
897 | | - if (BUILTIN_EXTEND_WITH_PROM && arcs_cmdline[0]) { |
---|
898 | | - if (boot_command_line[0]) |
---|
899 | | - strlcat(boot_command_line, " ", COMMAND_LINE_SIZE); |
---|
900 | | - strlcat(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE); |
---|
901 | | - } |
---|
902 | | -#endif |
---|
903 | | -#endif |
---|
| 631 | + bootcmdline_init(); |
---|
904 | 632 | strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE); |
---|
905 | | - |
---|
906 | 633 | *cmdline_p = command_line; |
---|
907 | 634 | |
---|
908 | 635 | parse_early_param(); |
---|
909 | 636 | |
---|
910 | | - if (usermem) { |
---|
911 | | - pr_info("User-defined physical RAM map:\n"); |
---|
912 | | - print_memory_map(); |
---|
913 | | - } |
---|
| 637 | + if (usermem) |
---|
| 638 | + pr_info("User-defined physical RAM map overwrite\n"); |
---|
| 639 | + |
---|
| 640 | + check_kernel_sections_mem(); |
---|
914 | 641 | |
---|
915 | 642 | early_init_fdt_reserve_self(); |
---|
916 | 643 | early_init_fdt_scan_reserved_mem(); |
---|
917 | 644 | |
---|
| 645 | +#ifndef CONFIG_NUMA |
---|
| 646 | + memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0); |
---|
| 647 | +#endif |
---|
918 | 648 | bootmem_init(); |
---|
| 649 | + |
---|
| 650 | + /* |
---|
| 651 | + * Prevent memblock from allocating high memory. |
---|
| 652 | + * This cannot be done before max_low_pfn is detected, so up |
---|
| 653 | + * to this point is possible to only reserve physical memory |
---|
| 654 | + * with memblock_reserve; memblock_alloc* can be used |
---|
| 655 | + * only after this point |
---|
| 656 | + */ |
---|
| 657 | + memblock_set_current_limit(PFN_PHYS(max_low_pfn)); |
---|
| 658 | + |
---|
919 | 659 | #ifdef CONFIG_PROC_VMCORE |
---|
920 | 660 | if (setup_elfcorehdr && setup_elfcorehdr_size) { |
---|
921 | 661 | printk(KERN_INFO "kdump reserved memory at %lx-%lx\n", |
---|
922 | 662 | setup_elfcorehdr, setup_elfcorehdr_size); |
---|
923 | | - reserve_bootmem(setup_elfcorehdr, setup_elfcorehdr_size, |
---|
924 | | - BOOTMEM_DEFAULT); |
---|
| 663 | + memblock_reserve(setup_elfcorehdr, setup_elfcorehdr_size); |
---|
925 | 664 | } |
---|
926 | 665 | #endif |
---|
927 | 666 | |
---|
928 | 667 | mips_parse_crashkernel(); |
---|
929 | 668 | #ifdef CONFIG_KEXEC |
---|
930 | 669 | if (crashk_res.start != crashk_res.end) |
---|
931 | | - reserve_bootmem(crashk_res.start, |
---|
932 | | - crashk_res.end - crashk_res.start + 1, |
---|
933 | | - BOOTMEM_DEFAULT); |
---|
| 670 | + memblock_reserve(crashk_res.start, resource_size(&crashk_res)); |
---|
934 | 671 | #endif |
---|
935 | 672 | device_tree_init(); |
---|
936 | 673 | |
---|
.. | .. |
---|
947 | 684 | plat_swiotlb_setup(); |
---|
948 | 685 | |
---|
949 | 686 | dma_contiguous_reserve(PFN_PHYS(max_low_pfn)); |
---|
950 | | - /* Tell bootmem about cma reserved memblock section */ |
---|
951 | | - for_each_memblock(reserved, reg) |
---|
952 | | - if (reg->size != 0) |
---|
953 | | - reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT); |
---|
954 | 687 | |
---|
955 | | - reserve_bootmem_region(__pa_symbol(&__nosave_begin), |
---|
956 | | - __pa_symbol(&__nosave_end)); /* Reserve for hibernation */ |
---|
| 688 | + /* Reserve for hibernation. */ |
---|
| 689 | + memblock_reserve(__pa_symbol(&__nosave_begin), |
---|
| 690 | + __pa_symbol(&__nosave_end) - __pa_symbol(&__nosave_begin)); |
---|
| 691 | + |
---|
| 692 | + fdt_init_reserved_mem(); |
---|
| 693 | + |
---|
| 694 | + memblock_dump_all(); |
---|
| 695 | + |
---|
| 696 | + early_memtest(PFN_PHYS(ARCH_PFN_OFFSET), PFN_PHYS(max_low_pfn)); |
---|
957 | 697 | } |
---|
958 | 698 | |
---|
959 | 699 | static void __init resource_init(void) |
---|
960 | 700 | { |
---|
961 | | - int i; |
---|
| 701 | + phys_addr_t start, end; |
---|
| 702 | + u64 i; |
---|
962 | 703 | |
---|
963 | 704 | if (UNCAC_BASE != IO_BASE) |
---|
964 | 705 | return; |
---|
.. | .. |
---|
970 | 711 | bss_resource.start = __pa_symbol(&__bss_start); |
---|
971 | 712 | bss_resource.end = __pa_symbol(&__bss_stop) - 1; |
---|
972 | 713 | |
---|
973 | | - for (i = 0; i < boot_mem_map.nr_map; i++) { |
---|
| 714 | + for_each_mem_range(i, &start, &end) { |
---|
974 | 715 | struct resource *res; |
---|
975 | | - unsigned long start, end; |
---|
976 | 716 | |
---|
977 | | - start = boot_mem_map.map[i].addr; |
---|
978 | | - end = boot_mem_map.map[i].addr + boot_mem_map.map[i].size - 1; |
---|
979 | | - if (start >= HIGHMEM_START) |
---|
980 | | - continue; |
---|
981 | | - if (end >= HIGHMEM_START) |
---|
982 | | - end = HIGHMEM_START - 1; |
---|
983 | | - |
---|
984 | | - res = alloc_bootmem(sizeof(struct resource)); |
---|
| 717 | + res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES); |
---|
| 718 | + if (!res) |
---|
| 719 | + panic("%s: Failed to allocate %zu bytes\n", __func__, |
---|
| 720 | + sizeof(struct resource)); |
---|
985 | 721 | |
---|
986 | 722 | res->start = start; |
---|
987 | | - res->end = end; |
---|
988 | | - res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; |
---|
989 | | - |
---|
990 | | - switch (boot_mem_map.map[i].type) { |
---|
991 | | - case BOOT_MEM_RAM: |
---|
992 | | - case BOOT_MEM_INIT_RAM: |
---|
993 | | - case BOOT_MEM_ROM_DATA: |
---|
994 | | - res->name = "System RAM"; |
---|
995 | | - res->flags |= IORESOURCE_SYSRAM; |
---|
996 | | - break; |
---|
997 | | - case BOOT_MEM_RESERVED: |
---|
998 | | - default: |
---|
999 | | - res->name = "reserved"; |
---|
1000 | | - } |
---|
| 723 | + /* |
---|
| 724 | + * In memblock, end points to the first byte after the |
---|
| 725 | + * range while in resourses, end points to the last byte in |
---|
| 726 | + * the range. |
---|
| 727 | + */ |
---|
| 728 | + res->end = end - 1; |
---|
| 729 | + res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; |
---|
| 730 | + res->name = "System RAM"; |
---|
1001 | 731 | |
---|
1002 | 732 | request_resource(&iomem_resource, res); |
---|
1003 | 733 | |
---|
.. | .. |
---|
1048 | 778 | #if defined(CONFIG_VT) |
---|
1049 | 779 | #if defined(CONFIG_VGA_CONSOLE) |
---|
1050 | 780 | conswitchp = &vga_con; |
---|
1051 | | -#elif defined(CONFIG_DUMMY_CONSOLE) |
---|
1052 | | - conswitchp = &dummy_con; |
---|
1053 | 781 | #endif |
---|
1054 | 782 | #endif |
---|
1055 | 783 | |
---|
1056 | 784 | arch_mem_init(cmdline_p); |
---|
| 785 | + dmi_setup(); |
---|
1057 | 786 | |
---|
1058 | 787 | resource_init(); |
---|
1059 | 788 | plat_smp_setup(); |
---|
.. | .. |
---|
1074 | 803 | struct dentry *mips_debugfs_dir; |
---|
1075 | 804 | static int __init debugfs_mips(void) |
---|
1076 | 805 | { |
---|
1077 | | - struct dentry *d; |
---|
1078 | | - |
---|
1079 | | - d = debugfs_create_dir("mips", NULL); |
---|
1080 | | - if (!d) |
---|
1081 | | - return -ENOMEM; |
---|
1082 | | - mips_debugfs_dir = d; |
---|
| 806 | + mips_debugfs_dir = debugfs_create_dir("mips", NULL); |
---|
1083 | 807 | return 0; |
---|
1084 | 808 | } |
---|
1085 | 809 | arch_initcall(debugfs_mips); |
---|
1086 | 810 | #endif |
---|
1087 | 811 | |
---|
1088 | | -#if defined(CONFIG_DMA_MAYBE_COHERENT) && !defined(CONFIG_DMA_PERDEV_COHERENT) |
---|
| 812 | +#ifdef CONFIG_DMA_MAYBE_COHERENT |
---|
1089 | 813 | /* User defined DMA coherency from command line. */ |
---|
1090 | 814 | enum coherent_io_user_state coherentio = IO_COHERENCE_DEFAULT; |
---|
1091 | 815 | EXPORT_SYMBOL_GPL(coherentio); |
---|
1092 | | -int hw_coherentio = 0; /* Actual hardware supported DMA coherency setting. */ |
---|
| 816 | +int hw_coherentio; /* Actual hardware supported DMA coherency setting. */ |
---|
1093 | 817 | |
---|
1094 | 818 | static int __init setcoherentio(char *str) |
---|
1095 | 819 | { |
---|
.. | .. |
---|
1107 | 831 | } |
---|
1108 | 832 | early_param("nocoherentio", setnocoherentio); |
---|
1109 | 833 | #endif |
---|
| 834 | + |
---|
| 835 | +void __init arch_cpu_finalize_init(void) |
---|
| 836 | +{ |
---|
| 837 | + unsigned int cpu = smp_processor_id(); |
---|
| 838 | + |
---|
| 839 | + cpu_data[cpu].udelay_val = loops_per_jiffy; |
---|
| 840 | + check_bugs32(); |
---|
| 841 | + |
---|
| 842 | + if (IS_ENABLED(CONFIG_CPU_R4X00_BUGS64)) |
---|
| 843 | + check_bugs64(); |
---|
| 844 | +} |
---|