forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/arch/parisc/mm/init.c
....@@ -14,7 +14,6 @@
1414
1515 #include <linux/module.h>
1616 #include <linux/mm.h>
17
-#include <linux/bootmem.h>
1817 #include <linux/memblock.h>
1918 #include <linux/gfp.h>
2019 #include <linux/delay.h>
....@@ -27,7 +26,6 @@
2726 #include <linux/compat.h>
2827
2928 #include <asm/pgalloc.h>
30
-#include <asm/pgtable.h>
3129 #include <asm/tlb.h>
3230 #include <asm/pdc_chassis.h>
3331 #include <asm/mmzone.h>
....@@ -39,16 +37,11 @@
3937 extern void parisc_kernel_start(void); /* Kernel entry point in head.S */
4038
4139 #if CONFIG_PGTABLE_LEVELS == 3
42
-/* NOTE: This layout exactly conforms to the hybrid L2/L3 page table layout
43
- * with the first pmd adjacent to the pgd and below it. gcc doesn't actually
44
- * guarantee that global objects will be laid out in memory in the same order
45
- * as the order of declaration, so put these in different sections and use
46
- * the linker script to order them. */
47
-pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((__section__ (".data..vm0.pmd"), aligned(PAGE_SIZE)));
40
+pmd_t pmd0[PTRS_PER_PMD] __section(".data..vm0.pmd") __attribute__ ((aligned(PAGE_SIZE)));
4841 #endif
4942
50
-pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((__section__ (".data..vm0.pgd"), aligned(PAGE_SIZE)));
51
-pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data..vm0.pte"), aligned(PAGE_SIZE)));
43
+pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(".data..vm0.pgd") __attribute__ ((aligned(PAGE_SIZE)));
44
+pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __section(".data..vm0.pte") __attribute__ ((aligned(PAGE_SIZE)));
5245
5346 static struct resource data_resource = {
5447 .name = "Kernel data",
....@@ -67,7 +60,7 @@
6760 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
6861 };
6962
70
-static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __read_mostly;
63
+static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __ro_after_init;
7164
7265 /* The following array is initialized from the firmware specific
7366 * information retrieved in kernel/inventory.c.
....@@ -75,36 +68,6 @@
7568
7669 physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __initdata;
7770 int npmem_ranges __initdata;
78
-
79
-/*
80
- * get_memblock() allocates pages via memblock.
81
- * We can't use memblock_find_in_range(0, KERNEL_INITIAL_SIZE) here since it
82
- * doesn't allocate from bottom to top which is needed because we only created
83
- * the initial mapping up to KERNEL_INITIAL_SIZE in the assembly bootup code.
84
- */
85
-static void * __init get_memblock(unsigned long size)
86
-{
87
- static phys_addr_t search_addr __initdata;
88
- phys_addr_t phys;
89
-
90
- if (!search_addr)
91
- search_addr = PAGE_ALIGN(__pa((unsigned long) &_end));
92
- search_addr = ALIGN(search_addr, size);
93
- while (!memblock_is_region_memory(search_addr, size) ||
94
- memblock_is_region_reserved(search_addr, size)) {
95
- search_addr += size;
96
- }
97
- phys = search_addr;
98
-
99
- if (phys)
100
- memblock_reserve(phys, size);
101
- else
102
- panic("get_memblock() failed.\n");
103
-
104
- memset(__va(phys), 0, size);
105
-
106
- return __va(phys);
107
-}
10871
10972 #ifdef CONFIG_64BIT
11073 #define MAX_MEM (1UL << MAX_PHYSMEM_BITS)
....@@ -302,6 +265,13 @@
302265 max_pfn = start_pfn + npages;
303266 }
304267
268
+ /*
269
+ * We can't use memblock top-down allocations because we only
270
+ * created the initial mapping up to KERNEL_INITIAL_SIZE in
271
+ * the assembly bootup code.
272
+ */
273
+ memblock_set_bottom_up(true);
274
+
305275 /* IOMMU is always used to access "high mem" on those boxes
306276 * that can support enough mem that a PCI device couldn't
307277 * directly DMA to any physical addresses.
....@@ -369,22 +339,12 @@
369339 memblock_dump_all();
370340 }
371341
372
-static int __init parisc_text_address(unsigned long vaddr)
342
+static bool kernel_set_to_readonly;
343
+
344
+static void __ref map_pages(unsigned long start_vaddr,
345
+ unsigned long start_paddr, unsigned long size,
346
+ pgprot_t pgprot, int force)
373347 {
374
- static unsigned long head_ptr __initdata;
375
-
376
- if (!head_ptr)
377
- head_ptr = PAGE_MASK & (unsigned long)
378
- dereference_function_descriptor(&parisc_kernel_start);
379
-
380
- return core_kernel_text(vaddr) || vaddr == head_ptr;
381
-}
382
-
383
-static void __init map_pages(unsigned long start_vaddr,
384
- unsigned long start_paddr, unsigned long size,
385
- pgprot_t pgprot, int force)
386
-{
387
- pgd_t *pg_dir;
388348 pmd_t *pmd;
389349 pte_t *pg_table;
390350 unsigned long end_paddr;
....@@ -396,86 +356,72 @@
396356 unsigned long vaddr;
397357 unsigned long ro_start;
398358 unsigned long ro_end;
399
- unsigned long kernel_end;
359
+ unsigned long kernel_start, kernel_end;
400360
401361 ro_start = __pa((unsigned long)_text);
402362 ro_end = __pa((unsigned long)&data_start);
363
+ kernel_start = __pa((unsigned long)&__init_begin);
403364 kernel_end = __pa((unsigned long)&_end);
404365
405366 end_paddr = start_paddr + size;
406367
407
- pg_dir = pgd_offset_k(start_vaddr);
408
-
409
-#if PTRS_PER_PMD == 1
410
- start_pmd = 0;
411
-#else
368
+ /* for 2-level configuration PTRS_PER_PMD is 0 so start_pmd will be 0 */
412369 start_pmd = ((start_vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
413
-#endif
414370 start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
415371
416372 address = start_paddr;
417373 vaddr = start_vaddr;
418374 while (address < end_paddr) {
419
-#if PTRS_PER_PMD == 1
420
- pmd = (pmd_t *)__pa(pg_dir);
421
-#else
422
- pmd = (pmd_t *)pgd_address(*pg_dir);
375
+ pgd_t *pgd = pgd_offset_k(vaddr);
376
+ p4d_t *p4d = p4d_offset(pgd, vaddr);
377
+ pud_t *pud = pud_offset(p4d, vaddr);
423378
424
- /*
425
- * pmd is physical at this point
426
- */
427
-
428
- if (!pmd) {
429
- pmd = (pmd_t *) get_memblock(PAGE_SIZE << PMD_ORDER);
430
- pmd = (pmd_t *) __pa(pmd);
379
+#if CONFIG_PGTABLE_LEVELS == 3
380
+ if (pud_none(*pud)) {
381
+ pmd = memblock_alloc(PAGE_SIZE << PMD_ORDER,
382
+ PAGE_SIZE << PMD_ORDER);
383
+ if (!pmd)
384
+ panic("pmd allocation failed.\n");
385
+ pud_populate(NULL, pud, pmd);
431386 }
432
-
433
- pgd_populate(NULL, pg_dir, __va(pmd));
434387 #endif
435
- pg_dir++;
436388
437
- /* now change pmd to kernel virtual addresses */
438
-
439
- pmd = (pmd_t *)__va(pmd) + start_pmd;
389
+ pmd = pmd_offset(pud, vaddr);
440390 for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++, pmd++) {
441
-
442
- /*
443
- * pg_table is physical at this point
444
- */
445
-
446
- pg_table = (pte_t *)pmd_address(*pmd);
447
- if (!pg_table) {
448
- pg_table = (pte_t *) get_memblock(PAGE_SIZE);
449
- pg_table = (pte_t *) __pa(pg_table);
391
+ if (pmd_none(*pmd)) {
392
+ pg_table = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
393
+ if (!pg_table)
394
+ panic("page table allocation failed\n");
395
+ pmd_populate_kernel(NULL, pmd, pg_table);
450396 }
451397
452
- pmd_populate_kernel(NULL, pmd, __va(pg_table));
453
-
454
- /* now change pg_table to kernel virtual addresses */
455
-
456
- pg_table = (pte_t *) __va(pg_table) + start_pte;
398
+ pg_table = pte_offset_kernel(pmd, vaddr);
457399 for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) {
458400 pte_t pte;
401
+ pgprot_t prot;
402
+ bool huge = false;
459403
460
- if (force)
461
- pte = __mk_pte(address, pgprot);
462
- else if (parisc_text_address(vaddr)) {
463
- pte = __mk_pte(address, PAGE_KERNEL_EXEC);
464
- if (address >= ro_start && address < kernel_end)
465
- pte = pte_mkhuge(pte);
404
+ if (force) {
405
+ prot = pgprot;
406
+ } else if (address < kernel_start || address >= kernel_end) {
407
+ /* outside kernel memory */
408
+ prot = PAGE_KERNEL;
409
+ } else if (!kernel_set_to_readonly) {
410
+ /* still initializing, allow writing to RO memory */
411
+ prot = PAGE_KERNEL_RWX;
412
+ huge = true;
413
+ } else if (address >= ro_start) {
414
+ /* Code (ro) and Data areas */
415
+ prot = (address < ro_end) ?
416
+ PAGE_KERNEL_EXEC : PAGE_KERNEL;
417
+ huge = true;
418
+ } else {
419
+ prot = PAGE_KERNEL;
466420 }
467
- else
468
-#if defined(CONFIG_PARISC_PAGE_SIZE_4KB)
469
- if (address >= ro_start && address < ro_end) {
470
- pte = __mk_pte(address, PAGE_KERNEL_EXEC);
421
+
422
+ pte = __mk_pte(address, prot);
423
+ if (huge)
471424 pte = pte_mkhuge(pte);
472
- } else
473
-#endif
474
- {
475
- pte = __mk_pte(address, pgprot);
476
- if (address >= ro_start && address < kernel_end)
477
- pte = pte_mkhuge(pte);
478
- }
479425
480426 if (address >= end_paddr)
481427 break;
....@@ -494,15 +440,33 @@
494440 }
495441 }
496442
497
-void __ref free_initmem(void)
443
+void __init set_kernel_text_rw(int enable_read_write)
444
+{
445
+ unsigned long start = (unsigned long) __init_begin;
446
+ unsigned long end = (unsigned long) &data_start;
447
+
448
+ map_pages(start, __pa(start), end-start,
449
+ PAGE_KERNEL_RWX, enable_read_write ? 1:0);
450
+
451
+ /* force the kernel to see the new page table entries */
452
+ flush_cache_all();
453
+ flush_tlb_all();
454
+}
455
+
456
+void free_initmem(void)
498457 {
499458 unsigned long init_begin = (unsigned long)__init_begin;
500459 unsigned long init_end = (unsigned long)__init_end;
460
+ unsigned long kernel_end = (unsigned long)&_end;
461
+
462
+ /* Remap kernel text and data, but do not touch init section yet. */
463
+ kernel_set_to_readonly = true;
464
+ map_pages(init_end, __pa(init_end), kernel_end - init_end,
465
+ PAGE_KERNEL, 0);
501466
502467 /* The init text pages are marked R-X. We have to
503468 * flush the icache and mark them RW-
504469 *
505
- * This is tricky, because map_pages is in the init section.
506470 * Do a dummy remap of the data section first (the data
507471 * section is already PAGE_KERNEL) to pull in the TLB entries
508472 * for map_kernel */
....@@ -514,7 +478,7 @@
514478 PAGE_KERNEL, 1);
515479
516480 /* force the kernel to see the new TLB entries */
517
- __flush_tlb_range(0, init_begin, init_end);
481
+ __flush_tlb_range(0, init_begin, kernel_end);
518482
519483 /* finally dump all the instructions which were cached, since the
520484 * pages are no-longer executable */
....@@ -532,8 +496,9 @@
532496 {
533497 /* rodata memory was already mapped with KERNEL_RO access rights by
534498 pagetable_init() and map_pages(). No need to do additional stuff here */
535
- printk (KERN_INFO "Write protecting the kernel read-only data: %luk\n",
536
- (unsigned long)(__end_rodata - __start_rodata) >> 10);
499
+ unsigned long roai_size = __end_ro_after_init - __start_ro_after_init;
500
+
501
+ pr_info("Write protected read-only-after-init data: %luk\n", roai_size >> 10);
537502 }
538503 #endif
539504
....@@ -559,11 +524,11 @@
559524 #define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \
560525 & ~(VM_MAP_OFFSET-1)))
561526
562
-void *parisc_vmalloc_start __read_mostly;
527
+void *parisc_vmalloc_start __ro_after_init;
563528 EXPORT_SYMBOL(parisc_vmalloc_start);
564529
565530 #ifdef CONFIG_PA11
566
-unsigned long pcxl_dma_start __read_mostly;
531
+unsigned long pcxl_dma_start __ro_after_init;
567532 #endif
568533
569534 void __init mem_init(void)
....@@ -588,10 +553,15 @@
588553 BUILD_BUG_ON(PGD_ENTRY_SIZE != sizeof(pgd_t));
589554 BUILD_BUG_ON(PAGE_SHIFT + BITS_PER_PTE + BITS_PER_PMD + BITS_PER_PGD
590555 > BITS_PER_LONG);
556
+#if CONFIG_PGTABLE_LEVELS == 3
557
+ BUILD_BUG_ON(PT_INITIAL > PTRS_PER_PMD);
558
+#else
559
+ BUILD_BUG_ON(PT_INITIAL > PTRS_PER_PGD);
560
+#endif
591561
592562 high_memory = __va((max_pfn << PAGE_SHIFT));
593563 set_max_mapnr(max_low_pfn);
594
- free_all_bootmem();
564
+ memblock_free_all();
595565
596566 #ifdef CONFIG_PA11
597567 if (boot_cpu_data.cpu_type == pcxl2 || boot_cpu_data.cpu_type == pcxl) {
....@@ -610,14 +580,18 @@
610580 * But keep code for debugging purposes.
611581 */
612582 printk("virtual kernel memory layout:\n"
613
- " vmalloc : 0x%px - 0x%px (%4ld MB)\n"
614
- " memory : 0x%px - 0x%px (%4ld MB)\n"
615
- " .init : 0x%px - 0x%px (%4ld kB)\n"
616
- " .data : 0x%px - 0x%px (%4ld kB)\n"
617
- " .text : 0x%px - 0x%px (%4ld kB)\n",
583
+ " vmalloc : 0x%px - 0x%px (%4ld MB)\n"
584
+ " fixmap : 0x%px - 0x%px (%4ld kB)\n"
585
+ " memory : 0x%px - 0x%px (%4ld MB)\n"
586
+ " .init : 0x%px - 0x%px (%4ld kB)\n"
587
+ " .data : 0x%px - 0x%px (%4ld kB)\n"
588
+ " .text : 0x%px - 0x%px (%4ld kB)\n",
618589
619590 (void*)VMALLOC_START, (void*)VMALLOC_END,
620591 (VMALLOC_END - VMALLOC_START) >> 20,
592
+
593
+ (void *)FIXMAP_START, (void *)(FIXMAP_START + FIXMAP_SIZE),
594
+ (unsigned long)(FIXMAP_SIZE / 1024),
621595
622596 __va(0), high_memory,
623597 ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
....@@ -633,7 +607,7 @@
633607 #endif
634608 }
635609
636
-unsigned long *empty_zero_page __read_mostly;
610
+unsigned long *empty_zero_page __ro_after_init;
637611 EXPORT_SYMBOL(empty_zero_page);
638612
639613 /*
....@@ -671,7 +645,10 @@
671645 }
672646 #endif
673647
674
- empty_zero_page = get_memblock(PAGE_SIZE);
648
+ empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
649
+ if (!empty_zero_page)
650
+ panic("zero page allocation failed.\n");
651
+
675652 }
676653
677654 static void __init gateway_init(void)
....@@ -696,27 +673,11 @@
696673
697674 static void __init parisc_bootmem_free(void)
698675 {
699
- unsigned long zones_size[MAX_NR_ZONES] = { 0, };
700
- unsigned long holes_size[MAX_NR_ZONES] = { 0, };
701
- unsigned long mem_start_pfn = ~0UL, mem_end_pfn = 0, mem_size_pfn = 0;
702
- int i;
676
+ unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0, };
703677
704
- for (i = 0; i < npmem_ranges; i++) {
705
- unsigned long start = pmem_ranges[i].start_pfn;
706
- unsigned long size = pmem_ranges[i].pages;
707
- unsigned long end = start + size;
678
+ max_zone_pfn[0] = memblock_end_of_DRAM();
708679
709
- if (mem_start_pfn > start)
710
- mem_start_pfn = start;
711
- if (mem_end_pfn < end)
712
- mem_end_pfn = end;
713
- mem_size_pfn += size;
714
- }
715
-
716
- zones_size[0] = mem_end_pfn - mem_start_pfn;
717
- holes_size[0] = zones_size[0] - mem_size_pfn;
718
-
719
- free_area_init_node(0, zones_size, mem_start_pfn, holes_size);
680
+ free_area_init(max_zone_pfn);
720681 }
721682
722683 void __init paging_init(void)
....@@ -727,11 +688,6 @@
727688 flush_cache_all_local(); /* start with known state */
728689 flush_tlb_all_local(NULL);
729690
730
- /*
731
- * Mark all memblocks as present for sparsemem using
732
- * memory_present() and then initialize sparsemem.
733
- */
734
- memblocks_present();
735691 sparse_init();
736692 parisc_bootmem_free();
737693 }
....@@ -788,7 +744,7 @@
788744 free_space_ids--;
789745
790746 index = find_next_zero_bit(space_id, NR_SPACE_IDS, space_id_index);
791
- space_id[index >> SHIFT_PER_LONG] |= (1L << (index & (BITS_PER_LONG - 1)));
747
+ space_id[BIT_WORD(index)] |= BIT_MASK(index);
792748 space_id_index = index;
793749
794750 spin_unlock(&sid_lock);
....@@ -799,16 +755,16 @@
799755 void free_sid(unsigned long spaceid)
800756 {
801757 unsigned long index = spaceid >> SPACEID_SHIFT;
802
- unsigned long *dirty_space_offset;
758
+ unsigned long *dirty_space_offset, mask;
803759
804
- dirty_space_offset = dirty_space_id + (index >> SHIFT_PER_LONG);
805
- index &= (BITS_PER_LONG - 1);
760
+ dirty_space_offset = &dirty_space_id[BIT_WORD(index)];
761
+ mask = BIT_MASK(index);
806762
807763 spin_lock(&sid_lock);
808764
809
- BUG_ON(*dirty_space_offset & (1L << index)); /* attempt to free space id twice */
765
+ BUG_ON(*dirty_space_offset & mask); /* attempt to free space id twice */
810766
811
- *dirty_space_offset |= (1L << index);
767
+ *dirty_space_offset |= mask;
812768 dirty_space_ids++;
813769
814770 spin_unlock(&sid_lock);
....@@ -913,12 +869,5 @@
913869 flush_tlb_all_local(NULL);
914870 recycle_sids();
915871 spin_unlock(&sid_lock);
916
-}
917
-#endif
918
-
919
-#ifdef CONFIG_BLK_DEV_INITRD
920
-void free_initrd_mem(unsigned long start, unsigned long end)
921
-{
922
- free_reserved_area((void *)start, (void *)end, -1, "initrd");
923872 }
924873 #endif