hc
2024-02-19 1c055e55a242a33e574e48be530e06770a210dcd
kernel/arch/s390/kernel/setup.c
....@@ -34,11 +34,10 @@
3434 #include <linux/delay.h>
3535 #include <linux/init.h>
3636 #include <linux/initrd.h>
37
-#include <linux/bootmem.h>
3837 #include <linux/root_dev.h>
3938 #include <linux/console.h>
4039 #include <linux/kernel_stat.h>
41
-#include <linux/dma-contiguous.h>
40
+#include <linux/dma-map-ops.h>
4241 #include <linux/device.h>
4342 #include <linux/notifier.h>
4443 #include <linux/pfn.h>
....@@ -49,7 +48,9 @@
4948 #include <linux/crash_dump.h>
5049 #include <linux/memory.h>
5150 #include <linux/compat.h>
51
+#include <linux/start_kernel.h>
5252
53
+#include <asm/boot_data.h>
5354 #include <asm/ipl.h>
5455 #include <asm/facility.h>
5556 #include <asm/smp.h>
....@@ -65,10 +66,14 @@
6566 #include <asm/diag.h>
6667 #include <asm/os_info.h>
6768 #include <asm/sclp.h>
69
+#include <asm/stacktrace.h>
6870 #include <asm/sysinfo.h>
6971 #include <asm/numa.h>
7072 #include <asm/alternative.h>
7173 #include <asm/nospec-branch.h>
74
+#include <asm/mem_detect.h>
75
+#include <asm/uv.h>
76
+#include <asm/asm-offsets.h>
7277 #include "entry.h"
7378
7479 /*
....@@ -88,9 +93,22 @@
8893
8994 unsigned long int_hwcap = 0;
9095
91
-int __initdata memory_end_set;
92
-unsigned long __initdata memory_end;
93
-unsigned long __initdata max_physmem_end;
96
+int __bootdata(noexec_disabled);
97
+int __bootdata(memory_end_set);
98
+unsigned long __bootdata(memory_end);
99
+unsigned long __bootdata(vmalloc_size);
100
+unsigned long __bootdata(max_physmem_end);
101
+struct mem_detect_info __bootdata(mem_detect);
102
+
103
+struct exception_table_entry *__bootdata_preserved(__start_dma_ex_table);
104
+struct exception_table_entry *__bootdata_preserved(__stop_dma_ex_table);
105
+unsigned long __bootdata_preserved(__stext_dma);
106
+unsigned long __bootdata_preserved(__etext_dma);
107
+unsigned long __bootdata_preserved(__sdma);
108
+unsigned long __bootdata_preserved(__edma);
109
+unsigned long __bootdata_preserved(__kaslr_offset);
110
+unsigned int __bootdata_preserved(zlib_dfltcc_support);
111
+EXPORT_SYMBOL(zlib_dfltcc_support);
94112
95113 unsigned long VMALLOC_START;
96114 EXPORT_SYMBOL(VMALLOC_START);
....@@ -100,6 +118,7 @@
100118
101119 struct page *vmemmap;
102120 EXPORT_SYMBOL(vmemmap);
121
+unsigned long vmemmap_size;
103122
104123 unsigned long MODULES_VADDR;
105124 unsigned long MODULES_END;
....@@ -107,6 +126,12 @@
107126 /* An array with a pointer to the lowcore of every CPU. */
108127 struct lowcore *lowcore_ptr[NR_CPUS];
109128 EXPORT_SYMBOL(lowcore_ptr);
129
+
130
+/*
131
+ * The Write Back bit position in the physaddr is given by the SLPC PCI.
132
+ * Leaving the mask zero always uses write through which is safe
133
+ */
134
+unsigned long mio_wb_bit_mask __ro_after_init;
110135
111136 /*
112137 * This is set up by the setup-routine at boot-time
....@@ -149,15 +174,15 @@
149174 static int __init conmode_setup(char *str)
150175 {
151176 #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
152
- if (strncmp(str, "hwc", 4) == 0 || strncmp(str, "sclp", 5) == 0)
177
+ if (!strcmp(str, "hwc") || !strcmp(str, "sclp"))
153178 SET_CONSOLE_SCLP;
154179 #endif
155180 #if defined(CONFIG_TN3215_CONSOLE)
156
- if (strncmp(str, "3215", 5) == 0)
181
+ if (!strcmp(str, "3215"))
157182 SET_CONSOLE_3215;
158183 #endif
159184 #if defined(CONFIG_TN3270_CONSOLE)
160
- if (strncmp(str, "3270", 5) == 0)
185
+ if (!strcmp(str, "3270"))
161186 SET_CONSOLE_3270;
162187 #endif
163188 set_preferred_console();
....@@ -192,7 +217,7 @@
192217 #endif
193218 return;
194219 }
195
- if (strncmp(ptr + 8, "3270", 4) == 0) {
220
+ if (str_has_prefix(ptr + 8, "3270")) {
196221 #if defined(CONFIG_TN3270_CONSOLE)
197222 SET_CONSOLE_3270;
198223 #elif defined(CONFIG_TN3215_CONSOLE)
....@@ -200,7 +225,7 @@
200225 #elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
201226 SET_CONSOLE_SCLP;
202227 #endif
203
- } else if (strncmp(ptr + 8, "3215", 4) == 0) {
228
+ } else if (str_has_prefix(ptr + 8, "3215")) {
204229 #if defined(CONFIG_TN3215_CONSOLE)
205230 SET_CONSOLE_3215;
206231 #elif defined(CONFIG_TN3270_CONSOLE)
....@@ -221,14 +246,12 @@
221246 SET_CONSOLE_SCLP;
222247 #endif
223248 }
224
- if (IS_ENABLED(CONFIG_VT) && IS_ENABLED(CONFIG_DUMMY_CONSOLE))
225
- conswitchp = &dummy_con;
226249 }
227250
228251 #ifdef CONFIG_CRASH_DUMP
229252 static void __init setup_zfcpdump(void)
230253 {
231
- if (ipl_info.type != IPL_TYPE_FCP_DUMP)
254
+ if (!is_ipl_type_dump())
232255 return;
233256 if (OLDMEM_BASE)
234257 return;
....@@ -283,57 +306,102 @@
283306 void (*pm_power_off)(void) = machine_power_off;
284307 EXPORT_SYMBOL_GPL(pm_power_off);
285308
286
-static int __init early_parse_mem(char *p)
309
+void *restart_stack;
310
+
311
+unsigned long stack_alloc(void)
287312 {
288
- memory_end = memparse(p, &p);
289
- memory_end &= PAGE_MASK;
290
- memory_end_set = 1;
313
+#ifdef CONFIG_VMAP_STACK
314
+ return (unsigned long)__vmalloc_node(THREAD_SIZE, THREAD_SIZE,
315
+ THREADINFO_GFP, NUMA_NO_NODE,
316
+ __builtin_return_address(0));
317
+#else
318
+ return __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
319
+#endif
320
+}
321
+
322
+void stack_free(unsigned long stack)
323
+{
324
+#ifdef CONFIG_VMAP_STACK
325
+ vfree((void *) stack);
326
+#else
327
+ free_pages(stack, THREAD_SIZE_ORDER);
328
+#endif
329
+}
330
+
331
+int __init arch_early_irq_init(void)
332
+{
333
+ unsigned long stack;
334
+
335
+ stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
336
+ if (!stack)
337
+ panic("Couldn't allocate async stack");
338
+ S390_lowcore.async_stack = stack + STACK_INIT_OFFSET;
291339 return 0;
292340 }
293
-early_param("mem", early_parse_mem);
294341
295
-static int __init parse_vmalloc(char *arg)
342
+static int __init async_stack_realloc(void)
296343 {
297
- if (!arg)
298
- return -EINVAL;
299
- VMALLOC_END = (memparse(arg, &arg) + PAGE_SIZE - 1) & PAGE_MASK;
344
+ unsigned long old, new;
345
+
346
+ old = S390_lowcore.async_stack - STACK_INIT_OFFSET;
347
+ new = stack_alloc();
348
+ if (!new)
349
+ panic("Couldn't allocate async stack");
350
+ S390_lowcore.async_stack = new + STACK_INIT_OFFSET;
351
+ free_pages(old, THREAD_SIZE_ORDER);
300352 return 0;
301353 }
302
-early_param("vmalloc", parse_vmalloc);
354
+early_initcall(async_stack_realloc);
303355
304
-void *restart_stack __section(.data);
356
+void __init arch_call_rest_init(void)
357
+{
358
+ unsigned long stack;
359
+
360
+ stack = stack_alloc();
361
+ if (!stack)
362
+ panic("Couldn't allocate kernel stack");
363
+ current->stack = (void *) stack;
364
+#ifdef CONFIG_VMAP_STACK
365
+ current->stack_vm_area = (void *) stack;
366
+#endif
367
+ set_task_stack_end_magic(current);
368
+ stack += STACK_INIT_OFFSET;
369
+ S390_lowcore.kernel_stack = stack;
370
+ CALL_ON_STACK_NORETURN(rest_init, stack);
371
+}
305372
306373 static void __init setup_lowcore_dat_off(void)
307374 {
375
+ unsigned long int_psw_mask = PSW_KERNEL_BITS;
308376 struct lowcore *lc;
377
+
378
+ if (IS_ENABLED(CONFIG_KASAN))
379
+ int_psw_mask |= PSW_MASK_DAT;
309380
310381 /*
311382 * Setup lowcore for boot cpu
312383 */
313384 BUILD_BUG_ON(sizeof(struct lowcore) != LC_PAGES * PAGE_SIZE);
314
- lc = memblock_virt_alloc_low(sizeof(*lc), sizeof(*lc));
385
+ lc = memblock_alloc_low(sizeof(*lc), sizeof(*lc));
386
+ if (!lc)
387
+ panic("%s: Failed to allocate %zu bytes align=%zx\n",
388
+ __func__, sizeof(*lc), sizeof(*lc));
389
+
315390 lc->restart_psw.mask = PSW_KERNEL_BITS;
316391 lc->restart_psw.addr = (unsigned long) restart_int_handler;
317
- lc->external_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK;
392
+ lc->external_new_psw.mask = int_psw_mask | PSW_MASK_MCHECK;
318393 lc->external_new_psw.addr = (unsigned long) ext_int_handler;
319
- lc->svc_new_psw.mask = PSW_KERNEL_BITS |
320
- PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
394
+ lc->svc_new_psw.mask = int_psw_mask | PSW_MASK_MCHECK;
321395 lc->svc_new_psw.addr = (unsigned long) system_call;
322
- lc->program_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK;
396
+ lc->program_new_psw.mask = int_psw_mask | PSW_MASK_MCHECK;
323397 lc->program_new_psw.addr = (unsigned long) pgm_check_handler;
324398 lc->mcck_new_psw.mask = PSW_KERNEL_BITS;
325399 lc->mcck_new_psw.addr = (unsigned long) mcck_int_handler;
326
- lc->io_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK;
400
+ lc->io_new_psw.mask = int_psw_mask | PSW_MASK_MCHECK;
327401 lc->io_new_psw.addr = (unsigned long) io_int_handler;
328402 lc->clock_comparator = clock_comparator_max;
329
- lc->kernel_stack = ((unsigned long) &init_thread_union)
403
+ lc->nodat_stack = ((unsigned long) &init_thread_union)
330404 + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
331
- lc->async_stack = (unsigned long)
332
- memblock_virt_alloc(ASYNC_SIZE, ASYNC_SIZE)
333
- + ASYNC_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
334
- lc->panic_stack = (unsigned long)
335
- memblock_virt_alloc(PAGE_SIZE, PAGE_SIZE)
336
- + PAGE_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
337405 lc->current_task = (unsigned long)&init_task;
338406 lc->lpp = LPP_MAGIC;
339407 lc->machine_flags = S390_lowcore.machine_flags;
....@@ -344,7 +412,6 @@
344412 memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list,
345413 sizeof(lc->alt_stfle_fac_list));
346414 nmi_alloc_boot_cpu(lc);
347
- vdso_alloc_boot_cpu(lc);
348415 lc->sync_enter_timer = S390_lowcore.sync_enter_timer;
349416 lc->async_enter_timer = S390_lowcore.async_enter_timer;
350417 lc->exit_timer = S390_lowcore.exit_timer;
....@@ -354,8 +421,15 @@
354421 lc->last_update_timer = S390_lowcore.last_update_timer;
355422 lc->last_update_clock = S390_lowcore.last_update_clock;
356423
357
- restart_stack = memblock_virt_alloc(ASYNC_SIZE, ASYNC_SIZE);
358
- restart_stack += ASYNC_SIZE;
424
+ /*
425
+ * Allocate the global restart stack which is the same for
426
+ * all CPUs in cast *one* of them does a PSW restart.
427
+ */
428
+ restart_stack = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
429
+ if (!restart_stack)
430
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
431
+ __func__, THREAD_SIZE, THREAD_SIZE);
432
+ restart_stack += STACK_INIT_OFFSET;
359433
360434 /*
361435 * Set up PSW restart to call ipl.c:do_restart(). Copy the relevant
....@@ -374,12 +448,13 @@
374448 mem_assign_absolute(S390_lowcore.restart_source, lc->restart_source);
375449 mem_assign_absolute(S390_lowcore.restart_psw, lc->restart_psw);
376450
377
-#ifdef CONFIG_SMP
378451 lc->spinlock_lockval = arch_spin_lockval(0);
379452 lc->spinlock_index = 0;
380453 arch_spin_lock_setup(0);
381
-#endif
382454 lc->br_r1_trampoline = 0x07f1; /* br %r1 */
455
+ lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
456
+ lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
457
+ lc->preempt_count = PREEMPT_DISABLED;
383458
384459 set_prefix((u32)(unsigned long) lc);
385460 lowcore_ptr[0] = lc;
....@@ -419,8 +494,9 @@
419494 static void __init setup_resources(void)
420495 {
421496 struct resource *res, *std_res, *sub_res;
422
- struct memblock_region *reg;
497
+ phys_addr_t start, end;
423498 int j;
499
+ u64 i;
424500
425501 code_resource.start = (unsigned long) _text;
426502 code_resource.end = (unsigned long) _etext - 1;
....@@ -429,13 +505,21 @@
429505 bss_resource.start = (unsigned long) __bss_start;
430506 bss_resource.end = (unsigned long) __bss_stop - 1;
431507
432
- for_each_memblock(memory, reg) {
433
- res = memblock_virt_alloc(sizeof(*res), 8);
508
+ for_each_mem_range(i, &start, &end) {
509
+ res = memblock_alloc(sizeof(*res), 8);
510
+ if (!res)
511
+ panic("%s: Failed to allocate %zu bytes align=0x%x\n",
512
+ __func__, sizeof(*res), 8);
434513 res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
435514
436515 res->name = "System RAM";
437
- res->start = reg->base;
438
- res->end = reg->base + reg->size - 1;
516
+ res->start = start;
517
+ /*
518
+ * In memblock, end points to the first byte after the
519
+ * range while in resourses, end points to the last byte in
520
+ * the range.
521
+ */
522
+ res->end = end - 1;
439523 request_resource(&iomem_resource, res);
440524
441525 for (j = 0; j < ARRAY_SIZE(standard_resources); j++) {
....@@ -444,7 +528,10 @@
444528 std_res->start > res->end)
445529 continue;
446530 if (std_res->end > res->end) {
447
- sub_res = memblock_virt_alloc(sizeof(*sub_res), 8);
531
+ sub_res = memblock_alloc(sizeof(*sub_res), 8);
532
+ if (!sub_res)
533
+ panic("%s: Failed to allocate %zu bytes align=0x%x\n",
534
+ __func__, sizeof(*sub_res), 8);
448535 *sub_res = *std_res;
449536 sub_res->end = res->end;
450537 std_res->start = res->end + 1;
....@@ -472,21 +559,25 @@
472559
473560 static void __init setup_memory_end(void)
474561 {
475
- unsigned long vmax, vmalloc_size, tmp;
562
+ unsigned long vmax, tmp;
476563
477
- /* Choose kernel address space layout: 2, 3, or 4 levels. */
478
- vmalloc_size = VMALLOC_END ?: (128UL << 30) - MODULES_LEN;
564
+ /* Choose kernel address space layout: 3 or 4 levels. */
479565 tmp = (memory_end ?: max_physmem_end) / PAGE_SIZE;
480566 tmp = tmp * (sizeof(struct page) + PAGE_SIZE);
481567 if (tmp + vmalloc_size + MODULES_LEN <= _REGION2_SIZE)
482568 vmax = _REGION2_SIZE; /* 3-level kernel page table */
483569 else
484570 vmax = _REGION1_SIZE; /* 4-level kernel page table */
571
+ if (is_prot_virt_host())
572
+ adjust_to_uv_max(&vmax);
573
+#ifdef CONFIG_KASAN
574
+ vmax = kasan_vmax;
575
+#endif
485576 /* module area is at the end of the kernel address space. */
486577 MODULES_END = vmax;
487578 MODULES_VADDR = MODULES_END - MODULES_LEN;
488579 VMALLOC_END = MODULES_VADDR;
489
- VMALLOC_START = vmax - vmalloc_size;
580
+ VMALLOC_START = VMALLOC_END - vmalloc_size;
490581
491582 /* Split remaining virtual space between 1:1 mapping & vmemmap array */
492583 tmp = VMALLOC_START / (PAGE_SIZE + sizeof(struct page));
....@@ -498,7 +589,17 @@
498589 vmemmap = (struct page *) tmp;
499590
500591 /* Take care that memory_end is set and <= vmemmap */
501
- memory_end = min(memory_end ?: max_physmem_end, tmp);
592
+ memory_end = min(memory_end ?: max_physmem_end, (unsigned long)vmemmap);
593
+#ifdef CONFIG_KASAN
594
+ memory_end = min(memory_end, KASAN_SHADOW_START);
595
+#endif
596
+ vmemmap_size = SECTION_ALIGN_UP(memory_end / PAGE_SIZE) * sizeof(struct page);
597
+#ifdef CONFIG_KASAN
598
+ /* move vmemmap above kasan shadow only if stands in a way */
599
+ if (KASAN_SHADOW_END > (unsigned long)vmemmap &&
600
+ (unsigned long)vmemmap + vmemmap_size > KASAN_SHADOW_START)
601
+ vmemmap = max(vmemmap, (struct page *)KASAN_SHADOW_END);
602
+#endif
502603 max_pfn = max_low_pfn = PFN_DOWN(memory_end);
503604 memblock_remove(memory_end, ULONG_MAX);
504605
....@@ -508,9 +609,10 @@
508609 #ifdef CONFIG_CRASH_DUMP
509610
510611 /*
511
- * When kdump is enabled, we have to ensure that no memory from
512
- * the area [0 - crashkernel memory size] and
513
- * [crashk_res.start - crashk_res.end] is set offline.
612
+ * When kdump is enabled, we have to ensure that no memory from the area
613
+ * [0 - crashkernel memory size] is set offline - it will be exchanged with
614
+ * the crashkernel memory region when kdump is triggered. The crashkernel
615
+ * memory region can never get offlined (pages are unmovable).
514616 */
515617 static int kdump_mem_notifier(struct notifier_block *nb,
516618 unsigned long action, void *data)
....@@ -521,11 +623,7 @@
521623 return NOTIFY_OK;
522624 if (arg->start_pfn < PFN_DOWN(resource_size(&crashk_res)))
523625 return NOTIFY_BAD;
524
- if (arg->start_pfn > PFN_DOWN(crashk_res.end))
525
- return NOTIFY_OK;
526
- if (arg->start_pfn + arg->nr_pages - 1 < PFN_DOWN(crashk_res.start))
527
- return NOTIFY_OK;
528
- return NOTIFY_BAD;
626
+ return NOTIFY_OK;
529627 }
530628
531629 static struct notifier_block kdump_mem_nb = {
....@@ -539,17 +637,8 @@
539637 */
540638 static void __init reserve_memory_end(void)
541639 {
542
-#ifdef CONFIG_CRASH_DUMP
543
- if (ipl_info.type == IPL_TYPE_FCP_DUMP &&
544
- !OLDMEM_BASE && sclp.hsa_size) {
545
- memory_end = sclp.hsa_size;
546
- memory_end &= PAGE_MASK;
547
- memory_end_set = 1;
548
- }
549
-#endif
550
- if (!memory_end_set)
551
- return;
552
- memblock_reserve(memory_end, ULONG_MAX);
640
+ if (memory_end_set)
641
+ memblock_reserve(memory_end, ULONG_MAX);
553642 }
554643
555644 /*
....@@ -657,6 +746,66 @@
657746 }
658747
659748 /*
749
+ * Reserve the memory area used to pass the certificate lists
750
+ */
751
+static void __init reserve_certificate_list(void)
752
+{
753
+ if (ipl_cert_list_addr)
754
+ memblock_reserve(ipl_cert_list_addr, ipl_cert_list_size);
755
+}
756
+
757
+static void __init reserve_mem_detect_info(void)
758
+{
759
+ unsigned long start, size;
760
+
761
+ get_mem_detect_reserved(&start, &size);
762
+ if (size)
763
+ memblock_reserve(start, size);
764
+}
765
+
766
+static void __init free_mem_detect_info(void)
767
+{
768
+ unsigned long start, size;
769
+
770
+ get_mem_detect_reserved(&start, &size);
771
+ if (size)
772
+ memblock_free(start, size);
773
+}
774
+
775
+static const char * __init get_mem_info_source(void)
776
+{
777
+ switch (mem_detect.info_source) {
778
+ case MEM_DETECT_SCLP_STOR_INFO:
779
+ return "sclp storage info";
780
+ case MEM_DETECT_DIAG260:
781
+ return "diag260";
782
+ case MEM_DETECT_SCLP_READ_INFO:
783
+ return "sclp read info";
784
+ case MEM_DETECT_BIN_SEARCH:
785
+ return "binary search";
786
+ }
787
+ return "none";
788
+}
789
+
790
+static void __init memblock_add_mem_detect_info(void)
791
+{
792
+ unsigned long start, end;
793
+ int i;
794
+
795
+ pr_debug("physmem info source: %s (%hhd)\n",
796
+ get_mem_info_source(), mem_detect.info_source);
797
+ /* keep memblock lists close to the kernel */
798
+ memblock_set_bottom_up(true);
799
+ for_each_mem_detect_block(i, &start, &end) {
800
+ memblock_add(start, end - start);
801
+ memblock_physmem_add(start, end - start);
802
+ }
803
+ memblock_set_bottom_up(false);
804
+ memblock_set_node(0, ULONG_MAX, &memblock.memory, 0);
805
+ memblock_dump_all();
806
+}
807
+
808
+/*
660809 * Check for initrd being in usable memory
661810 */
662811 static void __init check_initrd(void)
....@@ -678,30 +827,23 @@
678827 {
679828 unsigned long start_pfn = PFN_UP(__pa(_end));
680829
681
-#ifdef CONFIG_DMA_API_DEBUG
682
- /*
683
- * DMA_API_DEBUG code stumbles over addresses from the
684
- * range [PARMAREA_END, _stext]. Mark the memory as reserved
685
- * so it is not used for CONFIG_DMA_API_DEBUG=y.
686
- */
687
- memblock_reserve(0, PFN_PHYS(start_pfn));
688
-#else
689
- memblock_reserve(0, PARMAREA_END);
830
+ memblock_reserve(0, HEAD_END);
690831 memblock_reserve((unsigned long)_stext, PFN_PHYS(start_pfn)
691832 - (unsigned long)_stext);
692
-#endif
833
+ memblock_reserve(__sdma, __edma - __sdma);
693834 }
694835
695836 static void __init setup_memory(void)
696837 {
697
- struct memblock_region *reg;
838
+ phys_addr_t start, end;
839
+ u64 i;
698840
699841 /*
700842 * Init storage key for present memory
701843 */
702
- for_each_memblock(memory, reg) {
703
- storage_key_init_range(reg->base, reg->base + reg->size);
704
- }
844
+ for_each_mem_range(i, &start, &end)
845
+ storage_key_init_range(start, end);
846
+
705847 psw_set_key(PAGE_DEFAULT_KEY);
706848 }
707849
....@@ -781,10 +923,18 @@
781923 if (MACHINE_HAS_VX) {
782924 elf_hwcap |= HWCAP_S390_VXRS;
783925 if (test_facility(134))
784
- elf_hwcap |= HWCAP_S390_VXRS_EXT;
785
- if (test_facility(135))
786926 elf_hwcap |= HWCAP_S390_VXRS_BCD;
927
+ if (test_facility(135))
928
+ elf_hwcap |= HWCAP_S390_VXRS_EXT;
929
+ if (test_facility(148))
930
+ elf_hwcap |= HWCAP_S390_VXRS_EXT2;
931
+ if (test_facility(152))
932
+ elf_hwcap |= HWCAP_S390_VXRS_PDE;
787933 }
934
+ if (test_facility(150))
935
+ elf_hwcap |= HWCAP_S390_SORT;
936
+ if (test_facility(151))
937
+ elf_hwcap |= HWCAP_S390_DFLT;
788938
789939 /*
790940 * Guarded storage support HWCAP_S390_GS is bit 12.
....@@ -828,6 +978,10 @@
828978 case 0x3907:
829979 strcpy(elf_platform, "z14");
830980 break;
981
+ case 0x8561:
982
+ case 0x8562:
983
+ strcpy(elf_platform, "z15");
984
+ break;
831985 }
832986
833987 /*
....@@ -847,10 +1001,19 @@
8471001 {
8481002 struct sysinfo_3_2_2 *vmms;
8491003
850
- vmms = (struct sysinfo_3_2_2 *) memblock_alloc(PAGE_SIZE, PAGE_SIZE);
1004
+ vmms = (struct sysinfo_3_2_2 *) memblock_phys_alloc(PAGE_SIZE,
1005
+ PAGE_SIZE);
1006
+ if (!vmms)
1007
+ panic("Failed to allocate memory for sysinfo structure\n");
1008
+
8511009 if (stsi(vmms, 3, 2, 2) == 0 && vmms->count)
8521010 add_device_randomness(&vmms->vm, sizeof(vmms->vm[0]) * vmms->count);
8531011 memblock_free((unsigned long) vmms, PAGE_SIZE);
1012
+
1013
+#ifdef CONFIG_ARCH_RANDOM
1014
+ if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG))
1015
+ static_branch_enable(&s390_arch_random_available);
1016
+#endif
8541017 }
8551018
8561019 /*
....@@ -867,6 +1030,56 @@
8671030 task_size += sizeof(freg_t) * __NUM_FPRS;
8681031 }
8691032 arch_task_struct_size = task_size;
1033
+}
1034
+
1035
+/*
1036
+ * Issue diagnose 318 to set the control program name and
1037
+ * version codes.
1038
+ */
1039
+static void __init setup_control_program_code(void)
1040
+{
1041
+ union diag318_info diag318_info = {
1042
+ .cpnc = CPNC_LINUX,
1043
+ .cpvc = 0,
1044
+ };
1045
+
1046
+ if (!sclp.has_diag318)
1047
+ return;
1048
+
1049
+ diag_stat_inc(DIAG_STAT_X318);
1050
+ asm volatile("diag %0,0,0x318\n" : : "d" (diag318_info.val));
1051
+}
1052
+
1053
+/*
1054
+ * Print the component list from the IPL report
1055
+ */
1056
+static void __init log_component_list(void)
1057
+{
1058
+ struct ipl_rb_component_entry *ptr, *end;
1059
+ char *str;
1060
+
1061
+ if (!early_ipl_comp_list_addr)
1062
+ return;
1063
+ if (ipl_block.hdr.flags & IPL_PL_FLAG_SIPL)
1064
+ pr_info("Linux is running with Secure-IPL enabled\n");
1065
+ else
1066
+ pr_info("Linux is running with Secure-IPL disabled\n");
1067
+ ptr = (void *) early_ipl_comp_list_addr;
1068
+ end = (void *) ptr + early_ipl_comp_list_size;
1069
+ pr_info("The IPL report contains the following components:\n");
1070
+ while (ptr < end) {
1071
+ if (ptr->flags & IPL_RB_COMPONENT_FLAG_SIGNED) {
1072
+ if (ptr->flags & IPL_RB_COMPONENT_FLAG_VERIFIED)
1073
+ str = "signed, verified";
1074
+ else
1075
+ str = "signed, verification failed";
1076
+ } else {
1077
+ str = "not signed";
1078
+ }
1079
+ pr_info("%016llx - %016llx (%s)\n",
1080
+ ptr->addr, ptr->addr + ptr->len, str);
1081
+ ptr++;
1082
+ }
8701083 }
8711084
8721085 /*
....@@ -889,14 +1102,15 @@
8891102 else
8901103 pr_info("Linux is running as a guest in 64-bit mode\n");
8911104
1105
+ log_component_list();
1106
+
8921107 /* Have one command line that is parsed and saved in /proc/cmdline */
8931108 /* boot_command_line has been already set up in early.c */
8941109 *cmdline_p = boot_command_line;
8951110
8961111 ROOT_DEV = Root_RAM0;
8971112
898
- /* Is init_mm really needed? */
899
- init_mm.start_code = PAGE_OFFSET;
1113
+ init_mm.start_code = (unsigned long) _text;
9001114 init_mm.end_code = (unsigned long) _etext;
9011115 init_mm.end_data = (unsigned long) _edata;
9021116 init_mm.brk = (unsigned long) _end;
....@@ -914,27 +1128,24 @@
9141128 os_info_init();
9151129 setup_ipl();
9161130 setup_task_size();
1131
+ setup_control_program_code();
9171132
9181133 /* Do some memory reservations *before* memory is added to memblock */
9191134 reserve_memory_end();
9201135 reserve_oldmem();
9211136 reserve_kernel();
9221137 reserve_initrd();
1138
+ reserve_certificate_list();
1139
+ reserve_mem_detect_info();
9231140 memblock_allow_resize();
9241141
9251142 /* Get information about *all* installed memory */
926
- detect_memory_memblock();
1143
+ memblock_add_mem_detect_info();
9271144
1145
+ free_mem_detect_info();
9281146 remove_oldmem();
9291147
930
- /*
931
- * Make sure all chunks are MAX_ORDER aligned so we don't need the
932
- * extra checks that HOLES_IN_ZONE would require.
933
- *
934
- * Is this still required?
935
- */
936
- memblock_trim_memory(1UL << (MAX_ORDER - 1 + PAGE_SHIFT));
937
-
1148
+ setup_uv();
9381149 setup_memory_end();
9391150 setup_memory();
9401151 dma_contiguous_reserve(memory_end);
....@@ -978,7 +1189,7 @@
9781189 if (IS_ENABLED(CONFIG_EXPOLINE))
9791190 nospec_init_branches();
9801191
981
- /* Setup zfcpdump support */
1192
+ /* Setup zfcp/nvme dump support */
9821193 setup_zfcpdump();
9831194
9841195 /* Add system specific data to the random pool */