.. | .. |
---|
22 | 22 | #include <linux/ptrace.h> |
---|
23 | 23 | #include <linux/mman.h> |
---|
24 | 24 | #include <linux/mm.h> |
---|
25 | | -#include <linux/bootmem.h> |
---|
| 25 | +#include <linux/memblock.h> |
---|
26 | 26 | #include <linux/highmem.h> |
---|
27 | 27 | #include <linux/swap.h> |
---|
28 | 28 | #include <linux/proc_fs.h> |
---|
.. | .. |
---|
32 | 32 | #include <linux/kcore.h> |
---|
33 | 33 | #include <linux/initrd.h> |
---|
34 | 34 | |
---|
35 | | -#include <asm/asm-offsets.h> |
---|
36 | 35 | #include <asm/bootinfo.h> |
---|
37 | 36 | #include <asm/cachectl.h> |
---|
38 | 37 | #include <asm/cpu.h> |
---|
.. | .. |
---|
41 | 40 | #include <asm/maar.h> |
---|
42 | 41 | #include <asm/mmu_context.h> |
---|
43 | 42 | #include <asm/sections.h> |
---|
44 | | -#include <asm/pgtable.h> |
---|
45 | 43 | #include <asm/pgalloc.h> |
---|
46 | 44 | #include <asm/tlb.h> |
---|
47 | 45 | #include <asm/fixmap.h> |
---|
.. | .. |
---|
85 | 83 | static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot) |
---|
86 | 84 | { |
---|
87 | 85 | enum fixed_addresses idx; |
---|
| 86 | + unsigned int old_mmid; |
---|
88 | 87 | unsigned long vaddr, flags, entrylo; |
---|
89 | 88 | unsigned long old_ctx; |
---|
90 | 89 | pte_t pte; |
---|
.. | .. |
---|
111 | 110 | write_c0_entryhi(vaddr & (PAGE_MASK << 1)); |
---|
112 | 111 | write_c0_entrylo0(entrylo); |
---|
113 | 112 | write_c0_entrylo1(entrylo); |
---|
| 113 | + if (cpu_has_mmid) { |
---|
| 114 | + old_mmid = read_c0_memorymapid(); |
---|
| 115 | + write_c0_memorymapid(MMID_KERNEL_WIRED); |
---|
| 116 | + } |
---|
114 | 117 | #ifdef CONFIG_XPA |
---|
115 | 118 | if (cpu_has_xpa) { |
---|
116 | 119 | entrylo = (pte.pte_low & _PFNX_MASK); |
---|
.. | .. |
---|
125 | 128 | tlb_write_indexed(); |
---|
126 | 129 | tlbw_use_hazard(); |
---|
127 | 130 | write_c0_entryhi(old_ctx); |
---|
| 131 | + if (cpu_has_mmid) |
---|
| 132 | + write_c0_memorymapid(old_mmid); |
---|
128 | 133 | local_irq_restore(flags); |
---|
129 | 134 | |
---|
130 | 135 | return (void*) vaddr; |
---|
.. | .. |
---|
233 | 238 | unsigned long vaddr; |
---|
234 | 239 | |
---|
235 | 240 | vaddr = start; |
---|
236 | | - i = __pgd_offset(vaddr); |
---|
237 | | - j = __pud_offset(vaddr); |
---|
238 | | - k = __pmd_offset(vaddr); |
---|
| 241 | + i = pgd_index(vaddr); |
---|
| 242 | + j = pud_index(vaddr); |
---|
| 243 | + k = pmd_index(vaddr); |
---|
239 | 244 | pgd = pgd_base + i; |
---|
240 | 245 | |
---|
241 | 246 | for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) { |
---|
.. | .. |
---|
244 | 249 | pmd = (pmd_t *)pud; |
---|
245 | 250 | for (; (k < PTRS_PER_PMD) && (vaddr < end); pmd++, k++) { |
---|
246 | 251 | if (pmd_none(*pmd)) { |
---|
247 | | - pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); |
---|
| 252 | + pte = (pte_t *) memblock_alloc_low(PAGE_SIZE, |
---|
| 253 | + PAGE_SIZE); |
---|
| 254 | + if (!pte) |
---|
| 255 | + panic("%s: Failed to allocate %lu bytes align=%lx\n", |
---|
| 256 | + __func__, PAGE_SIZE, |
---|
| 257 | + PAGE_SIZE); |
---|
| 258 | + |
---|
248 | 259 | set_pmd(pmd, __pmd((unsigned long)pte)); |
---|
249 | 260 | BUG_ON(pte != pte_offset_kernel(pmd, 0)); |
---|
250 | 261 | } |
---|
.. | .. |
---|
257 | 268 | #endif |
---|
258 | 269 | } |
---|
259 | 270 | |
---|
| 271 | +struct maar_walk_info { |
---|
| 272 | + struct maar_config cfg[16]; |
---|
| 273 | + unsigned int num_cfg; |
---|
| 274 | +}; |
---|
| 275 | + |
---|
| 276 | +static int maar_res_walk(unsigned long start_pfn, unsigned long nr_pages, |
---|
| 277 | + void *data) |
---|
| 278 | +{ |
---|
| 279 | + struct maar_walk_info *wi = data; |
---|
| 280 | + struct maar_config *cfg = &wi->cfg[wi->num_cfg]; |
---|
| 281 | + unsigned int maar_align; |
---|
| 282 | + |
---|
| 283 | + /* MAAR registers hold physical addresses right shifted by 4 bits */ |
---|
| 284 | + maar_align = BIT(MIPS_MAAR_ADDR_SHIFT + 4); |
---|
| 285 | + |
---|
| 286 | + /* Fill in the MAAR config entry */ |
---|
| 287 | + cfg->lower = ALIGN(PFN_PHYS(start_pfn), maar_align); |
---|
| 288 | + cfg->upper = ALIGN_DOWN(PFN_PHYS(start_pfn + nr_pages), maar_align) - 1; |
---|
| 289 | + cfg->attrs = MIPS_MAAR_S; |
---|
| 290 | + |
---|
| 291 | + /* Ensure we don't overflow the cfg array */ |
---|
| 292 | + if (!WARN_ON(wi->num_cfg >= ARRAY_SIZE(wi->cfg))) |
---|
| 293 | + wi->num_cfg++; |
---|
| 294 | + |
---|
| 295 | + return 0; |
---|
| 296 | +} |
---|
| 297 | + |
---|
| 298 | + |
---|
260 | 299 | unsigned __weak platform_maar_init(unsigned num_pairs) |
---|
261 | 300 | { |
---|
262 | | - struct maar_config cfg[BOOT_MEM_MAP_MAX]; |
---|
263 | | - unsigned i, num_configured, num_cfg = 0; |
---|
| 301 | + unsigned int num_configured; |
---|
| 302 | + struct maar_walk_info wi; |
---|
264 | 303 | |
---|
265 | | - for (i = 0; i < boot_mem_map.nr_map; i++) { |
---|
266 | | - switch (boot_mem_map.map[i].type) { |
---|
267 | | - case BOOT_MEM_RAM: |
---|
268 | | - case BOOT_MEM_INIT_RAM: |
---|
269 | | - break; |
---|
270 | | - default: |
---|
271 | | - continue; |
---|
272 | | - } |
---|
| 304 | + wi.num_cfg = 0; |
---|
| 305 | + walk_system_ram_range(0, max_pfn, &wi, maar_res_walk); |
---|
273 | 306 | |
---|
274 | | - /* Round lower up */ |
---|
275 | | - cfg[num_cfg].lower = boot_mem_map.map[i].addr; |
---|
276 | | - cfg[num_cfg].lower = (cfg[num_cfg].lower + 0xffff) & ~0xffff; |
---|
277 | | - |
---|
278 | | - /* Round upper down */ |
---|
279 | | - cfg[num_cfg].upper = boot_mem_map.map[i].addr + |
---|
280 | | - boot_mem_map.map[i].size; |
---|
281 | | - cfg[num_cfg].upper = (cfg[num_cfg].upper & ~0xffff) - 1; |
---|
282 | | - |
---|
283 | | - cfg[num_cfg].attrs = MIPS_MAAR_S; |
---|
284 | | - num_cfg++; |
---|
285 | | - } |
---|
286 | | - |
---|
287 | | - num_configured = maar_config(cfg, num_cfg, num_pairs); |
---|
288 | | - if (num_configured < num_cfg) |
---|
289 | | - pr_warn("Not enough MAAR pairs (%u) for all bootmem regions (%u)\n", |
---|
290 | | - num_pairs, num_cfg); |
---|
| 307 | + num_configured = maar_config(wi.cfg, wi.num_cfg, num_pairs); |
---|
| 308 | + if (num_configured < wi.num_cfg) |
---|
| 309 | + pr_warn("Not enough MAAR pairs (%u) for all memory regions (%u)\n", |
---|
| 310 | + num_pairs, wi.num_cfg); |
---|
291 | 311 | |
---|
292 | 312 | return num_configured; |
---|
293 | 313 | } |
---|
.. | .. |
---|
337 | 357 | write_c0_maari(i); |
---|
338 | 358 | back_to_back_c0_hazard(); |
---|
339 | 359 | upper = read_c0_maar(); |
---|
| 360 | +#ifdef CONFIG_XPA |
---|
| 361 | + upper |= (phys_addr_t)readx_c0_maar() << MIPS_MAARX_ADDR_SHIFT; |
---|
| 362 | +#endif |
---|
340 | 363 | |
---|
341 | 364 | write_c0_maari(i + 1); |
---|
342 | 365 | back_to_back_c0_hazard(); |
---|
343 | 366 | lower = read_c0_maar(); |
---|
| 367 | +#ifdef CONFIG_XPA |
---|
| 368 | + lower |= (phys_addr_t)readx_c0_maar() << MIPS_MAARX_ADDR_SHIFT; |
---|
| 369 | +#endif |
---|
344 | 370 | |
---|
345 | 371 | attr = lower & upper; |
---|
346 | 372 | lower = (lower & MIPS_MAAR_ADDR) << 4; |
---|
347 | 373 | upper = ((upper & MIPS_MAAR_ADDR) << 4) | 0xffff; |
---|
348 | 374 | |
---|
349 | 375 | pr_info(" [%d]: ", i / 2); |
---|
350 | | - if (!(attr & MIPS_MAAR_VL)) { |
---|
| 376 | + if ((attr & MIPS_MAAR_V) != MIPS_MAAR_V) { |
---|
351 | 377 | pr_cont("disabled\n"); |
---|
352 | 378 | continue; |
---|
353 | 379 | } |
---|
.. | .. |
---|
370 | 396 | } |
---|
371 | 397 | |
---|
372 | 398 | #ifndef CONFIG_NEED_MULTIPLE_NODES |
---|
373 | | -int page_is_ram(unsigned long pagenr) |
---|
374 | | -{ |
---|
375 | | - int i; |
---|
376 | | - |
---|
377 | | - for (i = 0; i < boot_mem_map.nr_map; i++) { |
---|
378 | | - unsigned long addr, end; |
---|
379 | | - |
---|
380 | | - switch (boot_mem_map.map[i].type) { |
---|
381 | | - case BOOT_MEM_RAM: |
---|
382 | | - case BOOT_MEM_INIT_RAM: |
---|
383 | | - break; |
---|
384 | | - default: |
---|
385 | | - /* not usable memory */ |
---|
386 | | - continue; |
---|
387 | | - } |
---|
388 | | - |
---|
389 | | - addr = PFN_UP(boot_mem_map.map[i].addr); |
---|
390 | | - end = PFN_DOWN(boot_mem_map.map[i].addr + |
---|
391 | | - boot_mem_map.map[i].size); |
---|
392 | | - |
---|
393 | | - if (pagenr >= addr && pagenr < end) |
---|
394 | | - return 1; |
---|
395 | | - } |
---|
396 | | - |
---|
397 | | - return 0; |
---|
398 | | -} |
---|
399 | | - |
---|
400 | 399 | void __init paging_init(void) |
---|
401 | 400 | { |
---|
402 | 401 | unsigned long max_zone_pfns[MAX_NR_ZONES]; |
---|
.. | .. |
---|
424 | 423 | } |
---|
425 | 424 | #endif |
---|
426 | 425 | |
---|
427 | | - free_area_init_nodes(max_zone_pfns); |
---|
| 426 | + free_area_init(max_zone_pfns); |
---|
428 | 427 | } |
---|
429 | 428 | |
---|
430 | 429 | #ifdef CONFIG_64BIT |
---|
431 | 430 | static struct kcore_list kcore_kseg0; |
---|
432 | 431 | #endif |
---|
433 | 432 | |
---|
434 | | -static inline void mem_init_free_highmem(void) |
---|
| 433 | +static inline void __init mem_init_free_highmem(void) |
---|
435 | 434 | { |
---|
436 | 435 | #ifdef CONFIG_HIGHMEM |
---|
437 | 436 | unsigned long tmp; |
---|
.. | .. |
---|
442 | 441 | for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) { |
---|
443 | 442 | struct page *page = pfn_to_page(tmp); |
---|
444 | 443 | |
---|
445 | | - if (!page_is_ram(tmp)) |
---|
| 444 | + if (!memblock_is_memory(PFN_PHYS(tmp))) |
---|
446 | 445 | SetPageReserved(page); |
---|
447 | 446 | else |
---|
448 | 447 | free_highmem_page(page); |
---|
.. | .. |
---|
452 | 451 | |
---|
453 | 452 | void __init mem_init(void) |
---|
454 | 453 | { |
---|
| 454 | + /* |
---|
| 455 | + * When _PFN_SHIFT is greater than PAGE_SHIFT we won't have enough PTE |
---|
| 456 | + * bits to hold a full 32b physical address on MIPS32 systems. |
---|
| 457 | + */ |
---|
| 458 | + BUILD_BUG_ON(IS_ENABLED(CONFIG_32BIT) && (_PFN_SHIFT > PAGE_SHIFT)); |
---|
| 459 | + |
---|
455 | 460 | #ifdef CONFIG_HIGHMEM |
---|
456 | 461 | #ifdef CONFIG_DISCONTIGMEM |
---|
457 | 462 | #error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet" |
---|
.. | .. |
---|
463 | 468 | high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); |
---|
464 | 469 | |
---|
465 | 470 | maar_init(); |
---|
466 | | - free_all_bootmem(); |
---|
| 471 | + memblock_free_all(); |
---|
467 | 472 | setup_zero_pages(); /* Setup zeroed pages. */ |
---|
468 | 473 | mem_init_free_highmem(); |
---|
469 | 474 | mem_init_print_info(NULL); |
---|
.. | .. |
---|
492 | 497 | printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10); |
---|
493 | 498 | } |
---|
494 | 499 | |
---|
495 | | -#ifdef CONFIG_BLK_DEV_INITRD |
---|
496 | | -void free_initrd_mem(unsigned long start, unsigned long end) |
---|
497 | | -{ |
---|
498 | | - free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM, |
---|
499 | | - "initrd"); |
---|
500 | | -} |
---|
501 | | -#endif |
---|
502 | | - |
---|
503 | 500 | void (*free_init_pages_eva)(void *begin, void *end) = NULL; |
---|
504 | 501 | |
---|
505 | 502 | void __ref free_initmem(void) |
---|
.. | .. |
---|
516 | 513 | free_initmem_default(POISON_FREE_INITMEM); |
---|
517 | 514 | } |
---|
518 | 515 | |
---|
| 516 | +#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA |
---|
| 517 | +unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; |
---|
| 518 | +EXPORT_SYMBOL(__per_cpu_offset); |
---|
| 519 | + |
---|
| 520 | +static int __init pcpu_cpu_distance(unsigned int from, unsigned int to) |
---|
| 521 | +{ |
---|
| 522 | + return node_distance(cpu_to_node(from), cpu_to_node(to)); |
---|
| 523 | +} |
---|
| 524 | + |
---|
| 525 | +static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, |
---|
| 526 | + size_t align) |
---|
| 527 | +{ |
---|
| 528 | + return memblock_alloc_try_nid(size, align, __pa(MAX_DMA_ADDRESS), |
---|
| 529 | + MEMBLOCK_ALLOC_ACCESSIBLE, |
---|
| 530 | + cpu_to_node(cpu)); |
---|
| 531 | +} |
---|
| 532 | + |
---|
| 533 | +static void __init pcpu_fc_free(void *ptr, size_t size) |
---|
| 534 | +{ |
---|
| 535 | + memblock_free_early(__pa(ptr), size); |
---|
| 536 | +} |
---|
| 537 | + |
---|
| 538 | +void __init setup_per_cpu_areas(void) |
---|
| 539 | +{ |
---|
| 540 | + unsigned long delta; |
---|
| 541 | + unsigned int cpu; |
---|
| 542 | + int rc; |
---|
| 543 | + |
---|
| 544 | + /* |
---|
| 545 | + * Always reserve area for module percpu variables. That's |
---|
| 546 | + * what the legacy allocator did. |
---|
| 547 | + */ |
---|
| 548 | + rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, |
---|
| 549 | + PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, |
---|
| 550 | + pcpu_cpu_distance, |
---|
| 551 | + pcpu_fc_alloc, pcpu_fc_free); |
---|
| 552 | + if (rc < 0) |
---|
| 553 | + panic("Failed to initialize percpu areas."); |
---|
| 554 | + |
---|
| 555 | + delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; |
---|
| 556 | + for_each_possible_cpu(cpu) |
---|
| 557 | + __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; |
---|
| 558 | +} |
---|
| 559 | +#endif |
---|
| 560 | + |
---|
519 | 561 | #ifndef CONFIG_MIPS_PGD_C0_CONTEXT |
---|
520 | 562 | unsigned long pgd_current[NR_CPUS]; |
---|
521 | 563 | #endif |
---|
522 | 564 | |
---|
523 | 565 | /* |
---|
524 | | - * gcc 3.3 and older have trouble determining that PTRS_PER_PGD and PGD_ORDER |
---|
525 | | - * are constants. So we use the variants from asm-offset.h until that gcc |
---|
526 | | - * will officially be retired. |
---|
527 | | - * |
---|
528 | 566 | * Align swapper_pg_dir in to 64K, allows its address to be loaded |
---|
529 | 567 | * with a single LUI instruction in the TLB handlers. If we used |
---|
530 | 568 | * __aligned(64K), its size would get rounded up to the alignment |
---|
531 | 569 | * size, and waste space. So we place it in its own section and align |
---|
532 | 570 | * it in the linker script. |
---|
533 | 571 | */ |
---|
534 | | -pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(.bss..swapper_pg_dir); |
---|
| 572 | +pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(".bss..swapper_pg_dir"); |
---|
535 | 573 | #ifndef __PAGETABLE_PUD_FOLDED |
---|
536 | 574 | pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss; |
---|
537 | 575 | #endif |
---|