hc
2024-10-12 a5969cabbb4660eab42b6ef0412cbbd1200cf14d
kernel/arch/powerpc/kernel/setup-common.c
....@@ -1,13 +1,9 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /*
23 * Common boot and setup code for both 32-bit and 64-bit.
34 * Extracted from arch/powerpc/kernel/setup_64.c.
45 *
56 * Copyright (C) 2001 PPC64 Team, IBM Corp
6
- *
7
- * This program is free software; you can redistribute it and/or
8
- * modify it under the terms of the GNU General Public License
9
- * as published by the Free Software Foundation; either version
10
- * 2 of the License, or (at your option) any later version.
117 */
128
139 #undef DEBUG
....@@ -35,13 +31,13 @@
3531 #include <linux/memblock.h>
3632 #include <linux/of_platform.h>
3733 #include <linux/hugetlb.h>
34
+#include <linux/pgtable.h>
3835 #include <asm/debugfs.h>
3936 #include <asm/io.h>
4037 #include <asm/paca.h>
4138 #include <asm/prom.h>
4239 #include <asm/processor.h>
4340 #include <asm/vdso_datapage.h>
44
-#include <asm/pgtable.h>
4541 #include <asm/smp.h>
4642 #include <asm/elf.h>
4743 #include <asm/machdep.h>
....@@ -67,6 +63,7 @@
6763 #include <asm/livepatch.h>
6864 #include <asm/mmu_context.h>
6965 #include <asm/cpu_has_feature.h>
66
+#include <asm/kasan.h>
7067
7168 #include "setup.h"
7269
....@@ -133,13 +130,11 @@
133130 /* also used by kexec */
134131 void machine_shutdown(void)
135132 {
136
-#ifdef CONFIG_FA_DUMP
137133 /*
138134 * if fadump is active, cleanup the fadump registration before we
139135 * shutdown.
140136 */
141137 fadump_cleanup();
142
-#endif
143138
144139 if (ppc_md.machine_shutdown)
145140 ppc_md.machine_shutdown();
....@@ -200,14 +195,15 @@
200195 {
201196 struct device_node *root;
202197 const char *model = NULL;
203
-#if defined(CONFIG_SMP) && defined(CONFIG_PPC32)
204198 unsigned long bogosum = 0;
205199 int i;
206
- for_each_online_cpu(i)
207
- bogosum += loops_per_jiffy;
208
- seq_printf(m, "total bogomips\t: %lu.%02lu\n",
209
- bogosum/(500000/HZ), bogosum/(5000/HZ) % 100);
210
-#endif /* CONFIG_SMP && CONFIG_PPC32 */
200
+
201
+ if (IS_ENABLED(CONFIG_SMP) && IS_ENABLED(CONFIG_PPC32)) {
202
+ for_each_online_cpu(i)
203
+ bogosum += loops_per_jiffy;
204
+ seq_printf(m, "total bogomips\t: %lu.%02lu\n",
205
+ bogosum / (500000 / HZ), bogosum / (5000 / HZ) % 100);
206
+ }
211207 seq_printf(m, "timebase\t: %lu\n", ppc_tb_freq);
212208 if (ppc_md.name)
213209 seq_printf(m, "platform\t: %s\n", ppc_md.name);
....@@ -221,11 +217,10 @@
221217 if (ppc_md.show_cpuinfo != NULL)
222218 ppc_md.show_cpuinfo(m);
223219
224
-#ifdef CONFIG_PPC32
225220 /* Display the amount of memory */
226
- seq_printf(m, "Memory\t\t: %d MB\n",
227
- (unsigned int)(total_memory / (1024 * 1024)));
228
-#endif
221
+ if (IS_ENABLED(CONFIG_PPC32))
222
+ seq_printf(m, "Memory\t\t: %d MB\n",
223
+ (unsigned int)(total_memory / (1024 * 1024)));
229224 }
230225
231226 static int show_cpuinfo(struct seq_file *m, void *v)
....@@ -252,26 +247,24 @@
252247 else
253248 seq_printf(m, "unknown (%08x)", pvr);
254249
255
-#ifdef CONFIG_ALTIVEC
256250 if (cpu_has_feature(CPU_FTR_ALTIVEC))
257251 seq_printf(m, ", altivec supported");
258
-#endif /* CONFIG_ALTIVEC */
259252
260253 seq_printf(m, "\n");
261254
262255 #ifdef CONFIG_TAU
263
- if (cur_cpu_spec->cpu_features & CPU_FTR_TAU) {
264
-#ifdef CONFIG_TAU_AVERAGE
265
- /* more straightforward, but potentially misleading */
266
- seq_printf(m, "temperature \t: %u C (uncalibrated)\n",
267
- cpu_temp(cpu_id));
268
-#else
269
- /* show the actual temp sensor range */
270
- u32 temp;
271
- temp = cpu_temp_both(cpu_id);
272
- seq_printf(m, "temperature \t: %u-%u C (uncalibrated)\n",
273
- temp & 0xff, temp >> 16);
274
-#endif
256
+ if (cpu_has_feature(CPU_FTR_TAU)) {
257
+ if (IS_ENABLED(CONFIG_TAU_AVERAGE)) {
258
+ /* more straightforward, but potentially misleading */
259
+ seq_printf(m, "temperature \t: %u C (uncalibrated)\n",
260
+ cpu_temp(cpu_id));
261
+ } else {
262
+ /* show the actual temp sensor range */
263
+ u32 temp;
264
+ temp = cpu_temp_both(cpu_id);
265
+ seq_printf(m, "temperature \t: %u-%u C (uncalibrated)\n",
266
+ temp & 0xff, temp >> 16);
267
+ }
275268 }
276269 #endif /* CONFIG_TAU */
277270
....@@ -313,15 +306,12 @@
313306 }
314307 } else {
315308 switch (PVR_VER(pvr)) {
316
- case 0x0020: /* 403 family */
317
- maj = PVR_MAJ(pvr) + 1;
318
- min = PVR_MIN(pvr);
319
- break;
320309 case 0x1008: /* 740P/750P ?? */
321310 maj = ((pvr >> 8) & 0xFF) - 1;
322311 min = pvr & 0xFF;
323312 break;
324313 case 0x004e: /* POWER9 bits 12-15 give chip type */
314
+ case 0x0080: /* POWER10 bit 12 gives SMT8/4 */
325315 maj = (pvr >> 8) & 0x0F;
326316 min = pvr & 0xFF;
327317 break;
....@@ -335,11 +325,10 @@
335325 seq_printf(m, "revision\t: %hd.%hd (pvr %04x %04x)\n",
336326 maj, min, PVR_VER(pvr), PVR_REV(pvr));
337327
338
-#ifdef CONFIG_PPC32
339
- seq_printf(m, "bogomips\t: %lu.%02lu\n",
340
- loops_per_jiffy / (500000/HZ),
341
- (loops_per_jiffy / (5000/HZ)) % 100);
342
-#endif
328
+ if (IS_ENABLED(CONFIG_PPC32))
329
+ seq_printf(m, "bogomips\t: %lu.%02lu\n", loops_per_jiffy / (500000 / HZ),
330
+ (loops_per_jiffy / (5000 / HZ)) % 100);
331
+
343332 seq_printf(m, "\n");
344333
345334 /* If this is the last cpu, print the summary */
....@@ -401,8 +390,8 @@
401390
402391 #ifdef CONFIG_SMP
403392
404
-int threads_per_core, threads_per_subcore, threads_shift;
405
-cpumask_t threads_core_mask;
393
+int threads_per_core, threads_per_subcore, threads_shift __read_mostly;
394
+cpumask_t threads_core_mask __read_mostly;
406395 EXPORT_SYMBOL_GPL(threads_per_core);
407396 EXPORT_SYMBOL_GPL(threads_per_subcore);
408397 EXPORT_SYMBOL_GPL(threads_shift);
....@@ -459,9 +448,11 @@
459448
460449 DBG("smp_setup_cpu_maps()\n");
461450
462
- cpu_to_phys_id = __va(memblock_alloc(nr_cpu_ids * sizeof(u32),
463
- __alignof__(u32)));
464
- memset(cpu_to_phys_id, 0, nr_cpu_ids * sizeof(u32));
451
+ cpu_to_phys_id = memblock_alloc(nr_cpu_ids * sizeof(u32),
452
+ __alignof__(u32));
453
+ if (!cpu_to_phys_id)
454
+ panic("%s: Failed to allocate %zu bytes align=0x%zx\n",
455
+ __func__, nr_cpu_ids * sizeof(u32), __alignof__(u32));
465456
466457 for_each_node_by_type(dn, "cpu") {
467458 const __be32 *intserv;
....@@ -635,7 +626,7 @@
635626 }
636627 /* What can we do if we didn't find ? */
637628 if (machine_id >= &__machine_desc_end) {
638
- DBG("No suitable machine found !\n");
629
+ pr_err("No suitable machine description found !\n");
639630 for (;;);
640631 }
641632
....@@ -688,7 +679,7 @@
688679 return ret;
689680 parent = of_get_parent(np);
690681 if (parent) {
691
- if (strcmp(parent->type, "isa") == 0)
682
+ if (of_node_is_type(parent, "isa"))
692683 ret = 0;
693684 of_node_put(parent);
694685 }
....@@ -721,8 +712,28 @@
721712 .priority = INT_MIN /* may not return; must be done last */
722713 };
723714
715
+/*
716
+ * Dump out kernel offset information on panic.
717
+ */
718
+static int dump_kernel_offset(struct notifier_block *self, unsigned long v,
719
+ void *p)
720
+{
721
+ pr_emerg("Kernel Offset: 0x%lx from 0x%lx\n",
722
+ kaslr_offset(), KERNELBASE);
723
+
724
+ return 0;
725
+}
726
+
727
+static struct notifier_block kernel_offset_notifier = {
728
+ .notifier_call = dump_kernel_offset
729
+};
730
+
724731 void __init setup_panic(void)
725732 {
733
+ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_offset() > 0)
734
+ atomic_notifier_chain_register(&panic_notifier_list,
735
+ &kernel_offset_notifier);
736
+
726737 /* PPC64 always does a hard irq disable in its panic handler */
727738 if (!IS_ENABLED(CONFIG_PPC64) && !ppc_md.panic)
728739 return;
....@@ -738,23 +749,19 @@
738749 * BUG() in that case.
739750 */
740751
741
-#ifdef CONFIG_NOT_COHERENT_CACHE
742
-#define KERNEL_COHERENCY 0
743
-#else
744
-#define KERNEL_COHERENCY 1
745
-#endif
752
+#define KERNEL_COHERENCY (!IS_ENABLED(CONFIG_NOT_COHERENT_CACHE))
746753
747754 static int __init check_cache_coherency(void)
748755 {
749756 struct device_node *np;
750757 const void *prop;
751
- int devtree_coherency;
758
+ bool devtree_coherency;
752759
753760 np = of_find_node_by_path("/");
754761 prop = of_get_property(np, "coherency-off", NULL);
755762 of_node_put(np);
756763
757
- devtree_coherency = prop ? 0 : 1;
764
+ devtree_coherency = prop ? false : true;
758765
759766 if (devtree_coherency != KERNEL_COHERENCY) {
760767 printk(KERN_ERR
....@@ -777,8 +784,7 @@
777784 static int powerpc_debugfs_init(void)
778785 {
779786 powerpc_debugfs_root = debugfs_create_dir("powerpc", NULL);
780
-
781
- return powerpc_debugfs_root == NULL;
787
+ return 0;
782788 }
783789 arch_initcall(powerpc_debugfs_init);
784790 #endif
....@@ -788,22 +794,9 @@
788794 pr_info("%s\n", s);
789795 }
790796
791
-void arch_setup_pdev_archdata(struct platform_device *pdev)
792
-{
793
- pdev->archdata.dma_mask = DMA_BIT_MASK(32);
794
- pdev->dev.dma_mask = &pdev->archdata.dma_mask;
795
- set_dma_ops(&pdev->dev, &dma_nommu_ops);
796
-}
797
-
798797 static __init void print_system_info(void)
799798 {
800799 pr_info("-----------------------------------------------------\n");
801
-#ifdef CONFIG_PPC_BOOK3S_64
802
- pr_info("ppc64_pft_size = 0x%llx\n", ppc64_pft_size);
803
-#endif
804
-#ifdef CONFIG_PPC_STD_MMU_32
805
- pr_info("Hash_size = 0x%lx\n", Hash_size);
806
-#endif
807800 pr_info("phys_mem_size = 0x%llx\n",
808801 (unsigned long long)memblock_phys_mem_size());
809802
....@@ -823,20 +816,15 @@
823816 pr_info("mmu_features = 0x%08x\n", cur_cpu_spec->mmu_features);
824817 #ifdef CONFIG_PPC64
825818 pr_info("firmware_features = 0x%016lx\n", powerpc_firmware_features);
819
+#ifdef CONFIG_PPC_BOOK3S
820
+ pr_info("vmalloc start = 0x%lx\n", KERN_VIRT_START);
821
+ pr_info("IO start = 0x%lx\n", KERN_IO_START);
822
+ pr_info("vmemmap start = 0x%lx\n", (unsigned long)vmemmap);
823
+#endif
826824 #endif
827825
828
-#ifdef CONFIG_PPC_BOOK3S_64
829
- if (htab_address)
830
- pr_info("htab_address = 0x%p\n", htab_address);
831
- if (htab_hash_mask)
832
- pr_info("htab_hash_mask = 0x%lx\n", htab_hash_mask);
833
-#endif
834
-#ifdef CONFIG_PPC_STD_MMU_32
835
- if (Hash)
836
- pr_info("Hash = 0x%p\n", Hash);
837
- if (Hash_mask)
838
- pr_info("Hash_mask = 0x%lx\n", Hash_mask);
839
-#endif
826
+ if (!early_radix_enabled())
827
+ print_system_hash_info();
840828
841829 if (PHYSICAL_START > 0)
842830 pr_info("physical_start = 0x%llx\n",
....@@ -867,6 +855,8 @@
867855 */
868856 void __init setup_arch(char **cmdline_p)
869857 {
858
+ kasan_init();
859
+
870860 *cmdline_p = boot_command_line;
871861
872862 /* Set a half-reasonable default so udelay does something sensible */
....@@ -937,27 +927,17 @@
937927 /* Reserve large chunks of memory for use by CMA for KVM. */
938928 kvm_cma_reserve();
939929
940
- klp_init_thread_info(&init_thread_info);
930
+ /* Reserve large chunks of memory for us by CMA for hugetlb */
931
+ gigantic_hugetlb_cma_reserve();
932
+
933
+ klp_init_thread_info(&init_task);
941934
942935 init_mm.start_code = (unsigned long)_stext;
943936 init_mm.end_code = (unsigned long) _etext;
944937 init_mm.end_data = (unsigned long) _edata;
945938 init_mm.brk = klimit;
946939
947
-#ifdef CONFIG_PPC_MM_SLICES
948
-#ifdef CONFIG_PPC64
949
- if (!radix_enabled())
950
- init_mm.context.slb_addr_limit = DEFAULT_MAP_WINDOW_USER64;
951
-#elif defined(CONFIG_PPC_8xx)
952
- init_mm.context.slb_addr_limit = DEFAULT_MAP_WINDOW;
953
-#else
954
-#error "context.addr_limit not initialized."
955
-#endif
956
-#endif
957
-
958
-#ifdef CONFIG_SPAPR_TCE_IOMMU
959940 mm_iommu_init(&init_mm);
960
-#endif
961941 irqstack_early_init();
962942 exc_lvl_early_init();
963943 emergency_stack_init();
....@@ -966,9 +946,8 @@
966946
967947 initmem_init();
968948
969
-#ifdef CONFIG_DUMMY_CONSOLE
970
- conswitchp = &dummy_con;
971
-#endif
949
+ early_memtest(min_low_pfn << PAGE_SHIFT, max_low_pfn << PAGE_SHIFT);
950
+
972951 if (ppc_md.setup_arch)
973952 ppc_md.setup_arch();
974953
....@@ -980,10 +959,8 @@
980959 /* Initialize the MMU context management stuff. */
981960 mmu_context_init();
982961
983
-#ifdef CONFIG_PPC64
984962 /* Interrupt code needs to be 64K-aligned. */
985
- if ((unsigned long)_stext & 0xffff)
963
+ if (IS_ENABLED(CONFIG_PPC64) && (unsigned long)_stext & 0xffff)
986964 panic("Kernelbase not 64K-aligned (0x%lx)!\n",
987965 (unsigned long)_stext);
988
-#endif
989966 }