forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-11 297b60346df8beafee954a0fd7c2d64f33f3b9bc
kernel/arch/x86/mm/init_32.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 *
34 * Copyright (C) 1995 Linus Torvalds
....@@ -23,7 +24,6 @@
2324 #include <linux/pci.h>
2425 #include <linux/pfn.h>
2526 #include <linux/poison.h>
26
-#include <linux/bootmem.h>
2727 #include <linux/memblock.h>
2828 #include <linux/proc_fs.h>
2929 #include <linux/memory_hotplug.h>
....@@ -35,7 +35,6 @@
3535 #include <asm/bios_ebda.h>
3636 #include <asm/processor.h>
3737 #include <linux/uaccess.h>
38
-#include <asm/pgtable.h>
3938 #include <asm/dma.h>
4039 #include <asm/fixmap.h>
4140 #include <asm/e820/api.h>
....@@ -52,6 +51,8 @@
5251 #include <asm/page_types.h>
5352 #include <asm/cpu_entry_area.h>
5453 #include <asm/init.h>
54
+#include <asm/pgtable_areas.h>
55
+#include <asm/numa.h>
5556
5657 #include "mm_internal.h"
5758
....@@ -237,7 +238,11 @@
237238 }
238239 }
239240
240
-static inline int is_kernel_text(unsigned long addr)
241
+/*
242
+ * The <linux/kallsyms.h> already defines is_kernel_text,
243
+ * using '__' prefix not to get in conflict.
244
+ */
245
+static inline int __is_kernel_text(unsigned long addr)
241246 {
242247 if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
243248 return 1;
....@@ -252,7 +257,8 @@
252257 unsigned long __init
253258 kernel_physical_mapping_init(unsigned long start,
254259 unsigned long end,
255
- unsigned long page_size_mask)
260
+ unsigned long page_size_mask,
261
+ pgprot_t prot)
256262 {
257263 int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
258264 unsigned long last_map_addr = end;
....@@ -327,8 +333,8 @@
327333 addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
328334 PAGE_OFFSET + PAGE_SIZE-1;
329335
330
- if (is_kernel_text(addr) ||
331
- is_kernel_text(addr2))
336
+ if (__is_kernel_text(addr) ||
337
+ __is_kernel_text(addr2))
332338 prot = PAGE_KERNEL_LARGE_EXEC;
333339
334340 pages_2m++;
....@@ -353,7 +359,7 @@
353359 */
354360 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
355361
356
- if (is_kernel_text(addr))
362
+ if (__is_kernel_text(addr))
357363 prot = PAGE_KERNEL_EXEC;
358364
359365 pages_4k++;
....@@ -390,15 +396,6 @@
390396
391397 pte_t *kmap_pte;
392398
393
-static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr)
394
-{
395
- pgd_t *pgd = pgd_offset_k(vaddr);
396
- p4d_t *p4d = p4d_offset(pgd, vaddr);
397
- pud_t *pud = pud_offset(p4d, vaddr);
398
- pmd_t *pmd = pmd_offset(pud, vaddr);
399
- return pte_offset_kernel(pmd, vaddr);
400
-}
401
-
402399 static void __init kmap_init(void)
403400 {
404401 unsigned long kmap_vstart;
....@@ -407,28 +404,17 @@
407404 * Cache the first kmap pte:
408405 */
409406 kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
410
- kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
407
+ kmap_pte = virt_to_kpte(kmap_vstart);
411408 }
412409
413410 #ifdef CONFIG_HIGHMEM
414411 static void __init permanent_kmaps_init(pgd_t *pgd_base)
415412 {
416
- unsigned long vaddr;
417
- pgd_t *pgd;
418
- p4d_t *p4d;
419
- pud_t *pud;
420
- pmd_t *pmd;
421
- pte_t *pte;
413
+ unsigned long vaddr = PKMAP_BASE;
422414
423
- vaddr = PKMAP_BASE;
424415 page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
425416
426
- pgd = swapper_pg_dir + pgd_index(vaddr);
427
- p4d = p4d_offset(pgd, vaddr);
428
- pud = pud_offset(p4d, vaddr);
429
- pmd = pmd_offset(pud, vaddr);
430
- pte = pte_offset_kernel(pmd, vaddr);
431
- pkmap_page_table = pte;
417
+ pkmap_page_table = virt_to_kpte(vaddr);
432418 }
433419
434420 void __init add_highpages_with_active_regions(int nid,
....@@ -693,7 +679,6 @@
693679 #endif
694680
695681 memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
696
- sparse_memory_present_with_active_regions(0);
697682
698683 #ifdef CONFIG_FLATMEM
699684 max_mapnr = IS_ENABLED(CONFIG_HIGHMEM) ? highend_pfn : max_low_pfn;
....@@ -733,7 +718,6 @@
733718 * NOTE: at this point the bootmem allocator is fully available.
734719 */
735720 olpc_dt_build_devicetree();
736
- sparse_memory_present_with_active_regions(MAX_NUMNODES);
737721 sparse_init();
738722 zone_sizes_init();
739723 }
....@@ -752,7 +736,7 @@
752736
753737 __set_fixmap(FIX_WP_TEST, __pa_symbol(empty_zero_page), PAGE_KERNEL_RO);
754738
755
- if (probe_kernel_write((char *)fix_to_virt(FIX_WP_TEST), &z, 1)) {
739
+ if (copy_to_kernel_nofault((char *)fix_to_virt(FIX_WP_TEST), &z, 1)) {
756740 clear_fixmap(FIX_WP_TEST);
757741 printk(KERN_CONT "Ok.\n");
758742 return;
....@@ -771,7 +755,7 @@
771755 #endif
772756 /*
773757 * With CONFIG_DEBUG_PAGEALLOC initialization of highmem pages has to
774
- * be done before free_all_bootmem(). Memblock use free low memory for
758
+ * be done before memblock_free_all(). Memblock use free low memory for
775759 * temporary data (see find_range_array()) and for this purpose can use
776760 * pages that was already passed to the buddy allocator, hence marked as
777761 * not accessible in the page tables when compiled with
....@@ -781,50 +765,12 @@
781765 set_highmem_pages_init();
782766
783767 /* this will put all low memory onto the freelists */
784
- free_all_bootmem();
768
+ memblock_free_all();
785769
786770 after_bootmem = 1;
787771 x86_init.hyper.init_after_bootmem();
788772
789773 mem_init_print_info(NULL);
790
- printk(KERN_INFO "virtual kernel memory layout:\n"
791
- " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
792
- " cpu_entry : 0x%08lx - 0x%08lx (%4ld kB)\n"
793
-#ifdef CONFIG_HIGHMEM
794
- " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
795
-#endif
796
- " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
797
- " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
798
- " .init : 0x%08lx - 0x%08lx (%4ld kB)\n"
799
- " .data : 0x%08lx - 0x%08lx (%4ld kB)\n"
800
- " .text : 0x%08lx - 0x%08lx (%4ld kB)\n",
801
- FIXADDR_START, FIXADDR_TOP,
802
- (FIXADDR_TOP - FIXADDR_START) >> 10,
803
-
804
- CPU_ENTRY_AREA_BASE,
805
- CPU_ENTRY_AREA_BASE + CPU_ENTRY_AREA_MAP_SIZE,
806
- CPU_ENTRY_AREA_MAP_SIZE >> 10,
807
-
808
-#ifdef CONFIG_HIGHMEM
809
- PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
810
- (LAST_PKMAP*PAGE_SIZE) >> 10,
811
-#endif
812
-
813
- VMALLOC_START, VMALLOC_END,
814
- (VMALLOC_END - VMALLOC_START) >> 20,
815
-
816
- (unsigned long)__va(0), (unsigned long)high_memory,
817
- ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
818
-
819
- (unsigned long)&__init_begin, (unsigned long)&__init_end,
820
- ((unsigned long)&__init_end -
821
- (unsigned long)&__init_begin) >> 10,
822
-
823
- (unsigned long)&_etext, (unsigned long)&_edata,
824
- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
825
-
826
- (unsigned long)&_text, (unsigned long)&_etext,
827
- ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
828774
829775 /*
830776 * Check boundaries twice: Some fundamental inconsistencies can
....@@ -851,13 +797,25 @@
851797 }
852798
853799 #ifdef CONFIG_MEMORY_HOTPLUG
854
-int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
855
- bool want_memblock)
800
+int arch_add_memory(int nid, u64 start, u64 size,
801
+ struct mhp_params *params)
856802 {
857803 unsigned long start_pfn = start >> PAGE_SHIFT;
858804 unsigned long nr_pages = size >> PAGE_SHIFT;
805
+ int ret;
859806
860
- return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
807
+ /*
808
+ * The page tables were already mapped at boot so if the caller
809
+ * requests a different mapping type then we must change all the
810
+ * pages with __set_memory_prot().
811
+ */
812
+ if (params->pgprot.pgprot != PAGE_KERNEL.pgprot) {
813
+ ret = __set_memory_prot(start, nr_pages, params->pgprot);
814
+ if (ret)
815
+ return ret;
816
+ }
817
+
818
+ return __add_pages(nid, start_pfn, nr_pages, params);
861819 }
862820
863821 void arch_remove_memory(int nid, u64 start, u64 size,
....@@ -872,34 +830,6 @@
872830
873831 int kernel_set_to_readonly __read_mostly;
874832
875
-void set_kernel_text_rw(void)
876
-{
877
- unsigned long start = PFN_ALIGN(_text);
878
- unsigned long size = PFN_ALIGN(_etext) - start;
879
-
880
- if (!kernel_set_to_readonly)
881
- return;
882
-
883
- pr_debug("Set kernel text: %lx - %lx for read write\n",
884
- start, start+size);
885
-
886
- set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
887
-}
888
-
889
-void set_kernel_text_ro(void)
890
-{
891
- unsigned long start = PFN_ALIGN(_text);
892
- unsigned long size = PFN_ALIGN(_etext) - start;
893
-
894
- if (!kernel_set_to_readonly)
895
- return;
896
-
897
- pr_debug("Set kernel text: %lx - %lx for read only\n",
898
- start, start+size);
899
-
900
- set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
901
-}
902
-
903833 static void mark_nxdata_nx(void)
904834 {
905835 /*
....@@ -908,46 +838,31 @@
908838 */
909839 unsigned long start = PFN_ALIGN(_etext);
910840 /*
911
- * This comes from is_kernel_text upper limit. Also HPAGE where used:
841
+ * This comes from __is_kernel_text upper limit. Also HPAGE where used:
912842 */
913843 unsigned long size = (((unsigned long)__init_end + HPAGE_SIZE) & HPAGE_MASK) - start;
914844
915845 if (__supported_pte_mask & _PAGE_NX)
916846 printk(KERN_INFO "NX-protecting the kernel data: %luk\n", size >> 10);
917
- set_pages_nx(virt_to_page(start), size >> PAGE_SHIFT);
847
+ set_memory_nx(start, size >> PAGE_SHIFT);
918848 }
919849
920850 void mark_rodata_ro(void)
921851 {
922852 unsigned long start = PFN_ALIGN(_text);
923
- unsigned long size = PFN_ALIGN(_etext) - start;
853
+ unsigned long size = (unsigned long)__end_rodata - start;
924854
925855 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
926
- printk(KERN_INFO "Write protecting the kernel text: %luk\n",
856
+ pr_info("Write protecting kernel text and read-only data: %luk\n",
927857 size >> 10);
928858
929859 kernel_set_to_readonly = 1;
930860
931861 #ifdef CONFIG_CPA_DEBUG
932
- printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n",
933
- start, start+size);
934
- set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT);
935
-
936
- printk(KERN_INFO "Testing CPA: write protecting again\n");
937
- set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT);
938
-#endif
939
-
940
- start += size;
941
- size = (unsigned long)__end_rodata - start;
942
- set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
943
- printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
944
- size >> 10);
945
-
946
-#ifdef CONFIG_CPA_DEBUG
947
- printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, start + size);
862
+ pr_info("Testing CPA: Reverting %lx-%lx\n", start, start + size);
948863 set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
949864
950
- printk(KERN_INFO "Testing CPA: write protecting again\n");
865
+ pr_info("Testing CPA: write protecting again\n");
951866 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
952867 #endif
953868 mark_nxdata_nx();