hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/arch/mips/mm/init.c
....@@ -22,7 +22,7 @@
2222 #include <linux/ptrace.h>
2323 #include <linux/mman.h>
2424 #include <linux/mm.h>
25
-#include <linux/bootmem.h>
25
+#include <linux/memblock.h>
2626 #include <linux/highmem.h>
2727 #include <linux/swap.h>
2828 #include <linux/proc_fs.h>
....@@ -32,7 +32,6 @@
3232 #include <linux/kcore.h>
3333 #include <linux/initrd.h>
3434
35
-#include <asm/asm-offsets.h>
3635 #include <asm/bootinfo.h>
3736 #include <asm/cachectl.h>
3837 #include <asm/cpu.h>
....@@ -41,7 +40,6 @@
4140 #include <asm/maar.h>
4241 #include <asm/mmu_context.h>
4342 #include <asm/sections.h>
44
-#include <asm/pgtable.h>
4543 #include <asm/pgalloc.h>
4644 #include <asm/tlb.h>
4745 #include <asm/fixmap.h>
....@@ -85,6 +83,7 @@
8583 static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
8684 {
8785 enum fixed_addresses idx;
86
+ unsigned int old_mmid;
8887 unsigned long vaddr, flags, entrylo;
8988 unsigned long old_ctx;
9089 pte_t pte;
....@@ -111,6 +110,10 @@
111110 write_c0_entryhi(vaddr & (PAGE_MASK << 1));
112111 write_c0_entrylo0(entrylo);
113112 write_c0_entrylo1(entrylo);
113
+ if (cpu_has_mmid) {
114
+ old_mmid = read_c0_memorymapid();
115
+ write_c0_memorymapid(MMID_KERNEL_WIRED);
116
+ }
114117 #ifdef CONFIG_XPA
115118 if (cpu_has_xpa) {
116119 entrylo = (pte.pte_low & _PFNX_MASK);
....@@ -125,6 +128,8 @@
125128 tlb_write_indexed();
126129 tlbw_use_hazard();
127130 write_c0_entryhi(old_ctx);
131
+ if (cpu_has_mmid)
132
+ write_c0_memorymapid(old_mmid);
128133 local_irq_restore(flags);
129134
130135 return (void*) vaddr;
....@@ -233,9 +238,9 @@
233238 unsigned long vaddr;
234239
235240 vaddr = start;
236
- i = __pgd_offset(vaddr);
237
- j = __pud_offset(vaddr);
238
- k = __pmd_offset(vaddr);
241
+ i = pgd_index(vaddr);
242
+ j = pud_index(vaddr);
243
+ k = pmd_index(vaddr);
239244 pgd = pgd_base + i;
240245
241246 for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
....@@ -244,7 +249,13 @@
244249 pmd = (pmd_t *)pud;
245250 for (; (k < PTRS_PER_PMD) && (vaddr < end); pmd++, k++) {
246251 if (pmd_none(*pmd)) {
247
- pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
252
+ pte = (pte_t *) memblock_alloc_low(PAGE_SIZE,
253
+ PAGE_SIZE);
254
+ if (!pte)
255
+ panic("%s: Failed to allocate %lu bytes align=%lx\n",
256
+ __func__, PAGE_SIZE,
257
+ PAGE_SIZE);
258
+
248259 set_pmd(pmd, __pmd((unsigned long)pte));
249260 BUG_ON(pte != pte_offset_kernel(pmd, 0));
250261 }
....@@ -257,37 +268,46 @@
257268 #endif
258269 }
259270
271
+struct maar_walk_info {
272
+ struct maar_config cfg[16];
273
+ unsigned int num_cfg;
274
+};
275
+
276
+static int maar_res_walk(unsigned long start_pfn, unsigned long nr_pages,
277
+ void *data)
278
+{
279
+ struct maar_walk_info *wi = data;
280
+ struct maar_config *cfg = &wi->cfg[wi->num_cfg];
281
+ unsigned int maar_align;
282
+
283
+ /* MAAR registers hold physical addresses right shifted by 4 bits */
284
+ maar_align = BIT(MIPS_MAAR_ADDR_SHIFT + 4);
285
+
286
+ /* Fill in the MAAR config entry */
287
+ cfg->lower = ALIGN(PFN_PHYS(start_pfn), maar_align);
288
+ cfg->upper = ALIGN_DOWN(PFN_PHYS(start_pfn + nr_pages), maar_align) - 1;
289
+ cfg->attrs = MIPS_MAAR_S;
290
+
291
+ /* Ensure we don't overflow the cfg array */
292
+ if (!WARN_ON(wi->num_cfg >= ARRAY_SIZE(wi->cfg)))
293
+ wi->num_cfg++;
294
+
295
+ return 0;
296
+}
297
+
298
+
260299 unsigned __weak platform_maar_init(unsigned num_pairs)
261300 {
262
- struct maar_config cfg[BOOT_MEM_MAP_MAX];
263
- unsigned i, num_configured, num_cfg = 0;
301
+ unsigned int num_configured;
302
+ struct maar_walk_info wi;
264303
265
- for (i = 0; i < boot_mem_map.nr_map; i++) {
266
- switch (boot_mem_map.map[i].type) {
267
- case BOOT_MEM_RAM:
268
- case BOOT_MEM_INIT_RAM:
269
- break;
270
- default:
271
- continue;
272
- }
304
+ wi.num_cfg = 0;
305
+ walk_system_ram_range(0, max_pfn, &wi, maar_res_walk);
273306
274
- /* Round lower up */
275
- cfg[num_cfg].lower = boot_mem_map.map[i].addr;
276
- cfg[num_cfg].lower = (cfg[num_cfg].lower + 0xffff) & ~0xffff;
277
-
278
- /* Round upper down */
279
- cfg[num_cfg].upper = boot_mem_map.map[i].addr +
280
- boot_mem_map.map[i].size;
281
- cfg[num_cfg].upper = (cfg[num_cfg].upper & ~0xffff) - 1;
282
-
283
- cfg[num_cfg].attrs = MIPS_MAAR_S;
284
- num_cfg++;
285
- }
286
-
287
- num_configured = maar_config(cfg, num_cfg, num_pairs);
288
- if (num_configured < num_cfg)
289
- pr_warn("Not enough MAAR pairs (%u) for all bootmem regions (%u)\n",
290
- num_pairs, num_cfg);
307
+ num_configured = maar_config(wi.cfg, wi.num_cfg, num_pairs);
308
+ if (num_configured < wi.num_cfg)
309
+ pr_warn("Not enough MAAR pairs (%u) for all memory regions (%u)\n",
310
+ num_pairs, wi.num_cfg);
291311
292312 return num_configured;
293313 }
....@@ -337,17 +357,23 @@
337357 write_c0_maari(i);
338358 back_to_back_c0_hazard();
339359 upper = read_c0_maar();
360
+#ifdef CONFIG_XPA
361
+ upper |= (phys_addr_t)readx_c0_maar() << MIPS_MAARX_ADDR_SHIFT;
362
+#endif
340363
341364 write_c0_maari(i + 1);
342365 back_to_back_c0_hazard();
343366 lower = read_c0_maar();
367
+#ifdef CONFIG_XPA
368
+ lower |= (phys_addr_t)readx_c0_maar() << MIPS_MAARX_ADDR_SHIFT;
369
+#endif
344370
345371 attr = lower & upper;
346372 lower = (lower & MIPS_MAAR_ADDR) << 4;
347373 upper = ((upper & MIPS_MAAR_ADDR) << 4) | 0xffff;
348374
349375 pr_info(" [%d]: ", i / 2);
350
- if (!(attr & MIPS_MAAR_VL)) {
376
+ if ((attr & MIPS_MAAR_V) != MIPS_MAAR_V) {
351377 pr_cont("disabled\n");
352378 continue;
353379 }
....@@ -370,33 +396,6 @@
370396 }
371397
372398 #ifndef CONFIG_NEED_MULTIPLE_NODES
373
-int page_is_ram(unsigned long pagenr)
374
-{
375
- int i;
376
-
377
- for (i = 0; i < boot_mem_map.nr_map; i++) {
378
- unsigned long addr, end;
379
-
380
- switch (boot_mem_map.map[i].type) {
381
- case BOOT_MEM_RAM:
382
- case BOOT_MEM_INIT_RAM:
383
- break;
384
- default:
385
- /* not usable memory */
386
- continue;
387
- }
388
-
389
- addr = PFN_UP(boot_mem_map.map[i].addr);
390
- end = PFN_DOWN(boot_mem_map.map[i].addr +
391
- boot_mem_map.map[i].size);
392
-
393
- if (pagenr >= addr && pagenr < end)
394
- return 1;
395
- }
396
-
397
- return 0;
398
-}
399
-
400399 void __init paging_init(void)
401400 {
402401 unsigned long max_zone_pfns[MAX_NR_ZONES];
....@@ -424,14 +423,14 @@
424423 }
425424 #endif
426425
427
- free_area_init_nodes(max_zone_pfns);
426
+ free_area_init(max_zone_pfns);
428427 }
429428
430429 #ifdef CONFIG_64BIT
431430 static struct kcore_list kcore_kseg0;
432431 #endif
433432
434
-static inline void mem_init_free_highmem(void)
433
+static inline void __init mem_init_free_highmem(void)
435434 {
436435 #ifdef CONFIG_HIGHMEM
437436 unsigned long tmp;
....@@ -442,7 +441,7 @@
442441 for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
443442 struct page *page = pfn_to_page(tmp);
444443
445
- if (!page_is_ram(tmp))
444
+ if (!memblock_is_memory(PFN_PHYS(tmp)))
446445 SetPageReserved(page);
447446 else
448447 free_highmem_page(page);
....@@ -452,6 +451,12 @@
452451
453452 void __init mem_init(void)
454453 {
454
+ /*
455
+ * When _PFN_SHIFT is greater than PAGE_SHIFT we won't have enough PTE
456
+ * bits to hold a full 32b physical address on MIPS32 systems.
457
+ */
458
+ BUILD_BUG_ON(IS_ENABLED(CONFIG_32BIT) && (_PFN_SHIFT > PAGE_SHIFT));
459
+
455460 #ifdef CONFIG_HIGHMEM
456461 #ifdef CONFIG_DISCONTIGMEM
457462 #error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet"
....@@ -463,7 +468,7 @@
463468 high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
464469
465470 maar_init();
466
- free_all_bootmem();
471
+ memblock_free_all();
467472 setup_zero_pages(); /* Setup zeroed pages. */
468473 mem_init_free_highmem();
469474 mem_init_print_info(NULL);
....@@ -492,14 +497,6 @@
492497 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
493498 }
494499
495
-#ifdef CONFIG_BLK_DEV_INITRD
496
-void free_initrd_mem(unsigned long start, unsigned long end)
497
-{
498
- free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
499
- "initrd");
500
-}
501
-#endif
502
-
503500 void (*free_init_pages_eva)(void *begin, void *end) = NULL;
504501
505502 void __ref free_initmem(void)
....@@ -516,22 +513,63 @@
516513 free_initmem_default(POISON_FREE_INITMEM);
517514 }
518515
516
+#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
517
+unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
518
+EXPORT_SYMBOL(__per_cpu_offset);
519
+
520
+static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
521
+{
522
+ return node_distance(cpu_to_node(from), cpu_to_node(to));
523
+}
524
+
525
+static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size,
526
+ size_t align)
527
+{
528
+ return memblock_alloc_try_nid(size, align, __pa(MAX_DMA_ADDRESS),
529
+ MEMBLOCK_ALLOC_ACCESSIBLE,
530
+ cpu_to_node(cpu));
531
+}
532
+
533
+static void __init pcpu_fc_free(void *ptr, size_t size)
534
+{
535
+ memblock_free_early(__pa(ptr), size);
536
+}
537
+
538
+void __init setup_per_cpu_areas(void)
539
+{
540
+ unsigned long delta;
541
+ unsigned int cpu;
542
+ int rc;
543
+
544
+ /*
545
+ * Always reserve area for module percpu variables. That's
546
+ * what the legacy allocator did.
547
+ */
548
+ rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
549
+ PERCPU_DYNAMIC_RESERVE, PAGE_SIZE,
550
+ pcpu_cpu_distance,
551
+ pcpu_fc_alloc, pcpu_fc_free);
552
+ if (rc < 0)
553
+ panic("Failed to initialize percpu areas.");
554
+
555
+ delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
556
+ for_each_possible_cpu(cpu)
557
+ __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
558
+}
559
+#endif
560
+
519561 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT
520562 unsigned long pgd_current[NR_CPUS];
521563 #endif
522564
523565 /*
524
- * gcc 3.3 and older have trouble determining that PTRS_PER_PGD and PGD_ORDER
525
- * are constants. So we use the variants from asm-offset.h until that gcc
526
- * will officially be retired.
527
- *
528566 * Align swapper_pg_dir in to 64K, allows its address to be loaded
529567 * with a single LUI instruction in the TLB handlers. If we used
530568 * __aligned(64K), its size would get rounded up to the alignment
531569 * size, and waste space. So we place it in its own section and align
532570 * it in the linker script.
533571 */
534
-pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
572
+pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(".bss..swapper_pg_dir");
535573 #ifndef __PAGETABLE_PUD_FOLDED
536574 pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss;
537575 #endif