hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/arch/mips/mm/init.c
....@@ -22,7 +22,7 @@
2222 #include <linux/ptrace.h>
2323 #include <linux/mman.h>
2424 #include <linux/mm.h>
25
-#include <linux/bootmem.h>
25
+#include <linux/memblock.h>
2626 #include <linux/highmem.h>
2727 #include <linux/swap.h>
2828 #include <linux/proc_fs.h>
....@@ -32,16 +32,13 @@
3232 #include <linux/kcore.h>
3333 #include <linux/initrd.h>
3434
35
-#include <asm/asm-offsets.h>
3635 #include <asm/bootinfo.h>
3736 #include <asm/cachectl.h>
3837 #include <asm/cpu.h>
3938 #include <asm/dma.h>
40
-#include <asm/kmap_types.h>
4139 #include <asm/maar.h>
4240 #include <asm/mmu_context.h>
4341 #include <asm/sections.h>
44
-#include <asm/pgtable.h>
4542 #include <asm/pgalloc.h>
4643 #include <asm/tlb.h>
4744 #include <asm/fixmap.h>
....@@ -85,6 +82,7 @@
8582 static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
8683 {
8784 enum fixed_addresses idx;
85
+ unsigned int old_mmid;
8886 unsigned long vaddr, flags, entrylo;
8987 unsigned long old_ctx;
9088 pte_t pte;
....@@ -111,6 +109,10 @@
111109 write_c0_entryhi(vaddr & (PAGE_MASK << 1));
112110 write_c0_entrylo0(entrylo);
113111 write_c0_entrylo1(entrylo);
112
+ if (cpu_has_mmid) {
113
+ old_mmid = read_c0_memorymapid();
114
+ write_c0_memorymapid(MMID_KERNEL_WIRED);
115
+ }
114116 #ifdef CONFIG_XPA
115117 if (cpu_has_xpa) {
116118 entrylo = (pte.pte_low & _PFNX_MASK);
....@@ -125,6 +127,8 @@
125127 tlb_write_indexed();
126128 tlbw_use_hazard();
127129 write_c0_entryhi(old_ctx);
130
+ if (cpu_has_mmid)
131
+ write_c0_memorymapid(old_mmid);
128132 local_irq_restore(flags);
129133
130134 return (void*) vaddr;
....@@ -233,9 +237,9 @@
233237 unsigned long vaddr;
234238
235239 vaddr = start;
236
- i = __pgd_offset(vaddr);
237
- j = __pud_offset(vaddr);
238
- k = __pmd_offset(vaddr);
240
+ i = pgd_index(vaddr);
241
+ j = pud_index(vaddr);
242
+ k = pmd_index(vaddr);
239243 pgd = pgd_base + i;
240244
241245 for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
....@@ -244,7 +248,13 @@
244248 pmd = (pmd_t *)pud;
245249 for (; (k < PTRS_PER_PMD) && (vaddr < end); pmd++, k++) {
246250 if (pmd_none(*pmd)) {
247
- pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
251
+ pte = (pte_t *) memblock_alloc_low(PAGE_SIZE,
252
+ PAGE_SIZE);
253
+ if (!pte)
254
+ panic("%s: Failed to allocate %lu bytes align=%lx\n",
255
+ __func__, PAGE_SIZE,
256
+ PAGE_SIZE);
257
+
248258 set_pmd(pmd, __pmd((unsigned long)pte));
249259 BUG_ON(pte != pte_offset_kernel(pmd, 0));
250260 }
....@@ -257,37 +267,46 @@
257267 #endif
258268 }
259269
270
+struct maar_walk_info {
271
+ struct maar_config cfg[16];
272
+ unsigned int num_cfg;
273
+};
274
+
275
+static int maar_res_walk(unsigned long start_pfn, unsigned long nr_pages,
276
+ void *data)
277
+{
278
+ struct maar_walk_info *wi = data;
279
+ struct maar_config *cfg = &wi->cfg[wi->num_cfg];
280
+ unsigned int maar_align;
281
+
282
+ /* MAAR registers hold physical addresses right shifted by 4 bits */
283
+ maar_align = BIT(MIPS_MAAR_ADDR_SHIFT + 4);
284
+
285
+ /* Fill in the MAAR config entry */
286
+ cfg->lower = ALIGN(PFN_PHYS(start_pfn), maar_align);
287
+ cfg->upper = ALIGN_DOWN(PFN_PHYS(start_pfn + nr_pages), maar_align) - 1;
288
+ cfg->attrs = MIPS_MAAR_S;
289
+
290
+ /* Ensure we don't overflow the cfg array */
291
+ if (!WARN_ON(wi->num_cfg >= ARRAY_SIZE(wi->cfg)))
292
+ wi->num_cfg++;
293
+
294
+ return 0;
295
+}
296
+
297
+
260298 unsigned __weak platform_maar_init(unsigned num_pairs)
261299 {
262
- struct maar_config cfg[BOOT_MEM_MAP_MAX];
263
- unsigned i, num_configured, num_cfg = 0;
300
+ unsigned int num_configured;
301
+ struct maar_walk_info wi;
264302
265
- for (i = 0; i < boot_mem_map.nr_map; i++) {
266
- switch (boot_mem_map.map[i].type) {
267
- case BOOT_MEM_RAM:
268
- case BOOT_MEM_INIT_RAM:
269
- break;
270
- default:
271
- continue;
272
- }
303
+ wi.num_cfg = 0;
304
+ walk_system_ram_range(0, max_pfn, &wi, maar_res_walk);
273305
274
- /* Round lower up */
275
- cfg[num_cfg].lower = boot_mem_map.map[i].addr;
276
- cfg[num_cfg].lower = (cfg[num_cfg].lower + 0xffff) & ~0xffff;
277
-
278
- /* Round upper down */
279
- cfg[num_cfg].upper = boot_mem_map.map[i].addr +
280
- boot_mem_map.map[i].size;
281
- cfg[num_cfg].upper = (cfg[num_cfg].upper & ~0xffff) - 1;
282
-
283
- cfg[num_cfg].attrs = MIPS_MAAR_S;
284
- num_cfg++;
285
- }
286
-
287
- num_configured = maar_config(cfg, num_cfg, num_pairs);
288
- if (num_configured < num_cfg)
289
- pr_warn("Not enough MAAR pairs (%u) for all bootmem regions (%u)\n",
290
- num_pairs, num_cfg);
306
+ num_configured = maar_config(wi.cfg, wi.num_cfg, num_pairs);
307
+ if (num_configured < wi.num_cfg)
308
+ pr_warn("Not enough MAAR pairs (%u) for all memory regions (%u)\n",
309
+ num_pairs, wi.num_cfg);
291310
292311 return num_configured;
293312 }
....@@ -337,17 +356,23 @@
337356 write_c0_maari(i);
338357 back_to_back_c0_hazard();
339358 upper = read_c0_maar();
359
+#ifdef CONFIG_XPA
360
+ upper |= (phys_addr_t)readx_c0_maar() << MIPS_MAARX_ADDR_SHIFT;
361
+#endif
340362
341363 write_c0_maari(i + 1);
342364 back_to_back_c0_hazard();
343365 lower = read_c0_maar();
366
+#ifdef CONFIG_XPA
367
+ lower |= (phys_addr_t)readx_c0_maar() << MIPS_MAARX_ADDR_SHIFT;
368
+#endif
344369
345370 attr = lower & upper;
346371 lower = (lower & MIPS_MAAR_ADDR) << 4;
347372 upper = ((upper & MIPS_MAAR_ADDR) << 4) | 0xffff;
348373
349374 pr_info(" [%d]: ", i / 2);
350
- if (!(attr & MIPS_MAAR_VL)) {
375
+ if ((attr & MIPS_MAAR_V) != MIPS_MAAR_V) {
351376 pr_cont("disabled\n");
352377 continue;
353378 }
....@@ -370,42 +395,12 @@
370395 }
371396
372397 #ifndef CONFIG_NEED_MULTIPLE_NODES
373
-int page_is_ram(unsigned long pagenr)
374
-{
375
- int i;
376
-
377
- for (i = 0; i < boot_mem_map.nr_map; i++) {
378
- unsigned long addr, end;
379
-
380
- switch (boot_mem_map.map[i].type) {
381
- case BOOT_MEM_RAM:
382
- case BOOT_MEM_INIT_RAM:
383
- break;
384
- default:
385
- /* not usable memory */
386
- continue;
387
- }
388
-
389
- addr = PFN_UP(boot_mem_map.map[i].addr);
390
- end = PFN_DOWN(boot_mem_map.map[i].addr +
391
- boot_mem_map.map[i].size);
392
-
393
- if (pagenr >= addr && pagenr < end)
394
- return 1;
395
- }
396
-
397
- return 0;
398
-}
399
-
400398 void __init paging_init(void)
401399 {
402400 unsigned long max_zone_pfns[MAX_NR_ZONES];
403401
404402 pagetable_init();
405403
406
-#ifdef CONFIG_HIGHMEM
407
- kmap_init();
408
-#endif
409404 #ifdef CONFIG_ZONE_DMA
410405 max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
411406 #endif
....@@ -424,14 +419,14 @@
424419 }
425420 #endif
426421
427
- free_area_init_nodes(max_zone_pfns);
422
+ free_area_init(max_zone_pfns);
428423 }
429424
430425 #ifdef CONFIG_64BIT
431426 static struct kcore_list kcore_kseg0;
432427 #endif
433428
434
-static inline void mem_init_free_highmem(void)
429
+static inline void __init mem_init_free_highmem(void)
435430 {
436431 #ifdef CONFIG_HIGHMEM
437432 unsigned long tmp;
....@@ -442,7 +437,7 @@
442437 for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
443438 struct page *page = pfn_to_page(tmp);
444439
445
- if (!page_is_ram(tmp))
440
+ if (!memblock_is_memory(PFN_PHYS(tmp)))
446441 SetPageReserved(page);
447442 else
448443 free_highmem_page(page);
....@@ -452,6 +447,12 @@
452447
453448 void __init mem_init(void)
454449 {
450
+ /*
451
+ * When _PFN_SHIFT is greater than PAGE_SHIFT we won't have enough PTE
452
+ * bits to hold a full 32b physical address on MIPS32 systems.
453
+ */
454
+ BUILD_BUG_ON(IS_ENABLED(CONFIG_32BIT) && (_PFN_SHIFT > PAGE_SHIFT));
455
+
455456 #ifdef CONFIG_HIGHMEM
456457 #ifdef CONFIG_DISCONTIGMEM
457458 #error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet"
....@@ -463,7 +464,7 @@
463464 high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
464465
465466 maar_init();
466
- free_all_bootmem();
467
+ memblock_free_all();
467468 setup_zero_pages(); /* Setup zeroed pages. */
468469 mem_init_free_highmem();
469470 mem_init_print_info(NULL);
....@@ -492,14 +493,6 @@
492493 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
493494 }
494495
495
-#ifdef CONFIG_BLK_DEV_INITRD
496
-void free_initrd_mem(unsigned long start, unsigned long end)
497
-{
498
- free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
499
- "initrd");
500
-}
501
-#endif
502
-
503496 void (*free_init_pages_eva)(void *begin, void *end) = NULL;
504497
505498 void __ref free_initmem(void)
....@@ -516,22 +509,63 @@
516509 free_initmem_default(POISON_FREE_INITMEM);
517510 }
518511
512
+#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
513
+unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
514
+EXPORT_SYMBOL(__per_cpu_offset);
515
+
516
+static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
517
+{
518
+ return node_distance(cpu_to_node(from), cpu_to_node(to));
519
+}
520
+
521
+static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size,
522
+ size_t align)
523
+{
524
+ return memblock_alloc_try_nid(size, align, __pa(MAX_DMA_ADDRESS),
525
+ MEMBLOCK_ALLOC_ACCESSIBLE,
526
+ cpu_to_node(cpu));
527
+}
528
+
529
+static void __init pcpu_fc_free(void *ptr, size_t size)
530
+{
531
+ memblock_free_early(__pa(ptr), size);
532
+}
533
+
534
+void __init setup_per_cpu_areas(void)
535
+{
536
+ unsigned long delta;
537
+ unsigned int cpu;
538
+ int rc;
539
+
540
+ /*
541
+ * Always reserve area for module percpu variables. That's
542
+ * what the legacy allocator did.
543
+ */
544
+ rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
545
+ PERCPU_DYNAMIC_RESERVE, PAGE_SIZE,
546
+ pcpu_cpu_distance,
547
+ pcpu_fc_alloc, pcpu_fc_free);
548
+ if (rc < 0)
549
+ panic("Failed to initialize percpu areas.");
550
+
551
+ delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
552
+ for_each_possible_cpu(cpu)
553
+ __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
554
+}
555
+#endif
556
+
519557 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT
520558 unsigned long pgd_current[NR_CPUS];
521559 #endif
522560
523561 /*
524
- * gcc 3.3 and older have trouble determining that PTRS_PER_PGD and PGD_ORDER
525
- * are constants. So we use the variants from asm-offset.h until that gcc
526
- * will officially be retired.
527
- *
528562 * Align swapper_pg_dir in to 64K, allows its address to be loaded
529563 * with a single LUI instruction in the TLB handlers. If we used
530564 * __aligned(64K), its size would get rounded up to the alignment
531565 * size, and waste space. So we place it in its own section and align
532566 * it in the linker script.
533567 */
534
-pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
568
+pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(".bss..swapper_pg_dir");
535569 #ifndef __PAGETABLE_PUD_FOLDED
536570 pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss;
537571 #endif