| .. | .. |
|---|
| 34 | 34 | #include <linux/delay.h> |
|---|
| 35 | 35 | #include <linux/init.h> |
|---|
| 36 | 36 | #include <linux/initrd.h> |
|---|
| 37 | | -#include <linux/bootmem.h> |
|---|
| 38 | 37 | #include <linux/root_dev.h> |
|---|
| 39 | 38 | #include <linux/console.h> |
|---|
| 40 | 39 | #include <linux/kernel_stat.h> |
|---|
| 41 | | -#include <linux/dma-contiguous.h> |
|---|
| 40 | +#include <linux/dma-map-ops.h> |
|---|
| 42 | 41 | #include <linux/device.h> |
|---|
| 43 | 42 | #include <linux/notifier.h> |
|---|
| 44 | 43 | #include <linux/pfn.h> |
|---|
| .. | .. |
|---|
| 49 | 48 | #include <linux/crash_dump.h> |
|---|
| 50 | 49 | #include <linux/memory.h> |
|---|
| 51 | 50 | #include <linux/compat.h> |
|---|
| 51 | +#include <linux/start_kernel.h> |
|---|
| 52 | 52 | |
|---|
| 53 | +#include <asm/boot_data.h> |
|---|
| 53 | 54 | #include <asm/ipl.h> |
|---|
| 54 | 55 | #include <asm/facility.h> |
|---|
| 55 | 56 | #include <asm/smp.h> |
|---|
| .. | .. |
|---|
| 65 | 66 | #include <asm/diag.h> |
|---|
| 66 | 67 | #include <asm/os_info.h> |
|---|
| 67 | 68 | #include <asm/sclp.h> |
|---|
| 69 | +#include <asm/stacktrace.h> |
|---|
| 68 | 70 | #include <asm/sysinfo.h> |
|---|
| 69 | 71 | #include <asm/numa.h> |
|---|
| 70 | 72 | #include <asm/alternative.h> |
|---|
| 71 | 73 | #include <asm/nospec-branch.h> |
|---|
| 74 | +#include <asm/mem_detect.h> |
|---|
| 75 | +#include <asm/uv.h> |
|---|
| 76 | +#include <asm/asm-offsets.h> |
|---|
| 72 | 77 | #include "entry.h" |
|---|
| 73 | 78 | |
|---|
| 74 | 79 | /* |
|---|
| .. | .. |
|---|
| 88 | 93 | |
|---|
| 89 | 94 | unsigned long int_hwcap = 0; |
|---|
| 90 | 95 | |
|---|
| 91 | | -int __initdata memory_end_set; |
|---|
| 92 | | -unsigned long __initdata memory_end; |
|---|
| 93 | | -unsigned long __initdata max_physmem_end; |
|---|
| 96 | +int __bootdata(noexec_disabled); |
|---|
| 97 | +int __bootdata(memory_end_set); |
|---|
| 98 | +unsigned long __bootdata(memory_end); |
|---|
| 99 | +unsigned long __bootdata(vmalloc_size); |
|---|
| 100 | +unsigned long __bootdata(max_physmem_end); |
|---|
| 101 | +struct mem_detect_info __bootdata(mem_detect); |
|---|
| 102 | + |
|---|
| 103 | +struct exception_table_entry *__bootdata_preserved(__start_dma_ex_table); |
|---|
| 104 | +struct exception_table_entry *__bootdata_preserved(__stop_dma_ex_table); |
|---|
| 105 | +unsigned long __bootdata_preserved(__stext_dma); |
|---|
| 106 | +unsigned long __bootdata_preserved(__etext_dma); |
|---|
| 107 | +unsigned long __bootdata_preserved(__sdma); |
|---|
| 108 | +unsigned long __bootdata_preserved(__edma); |
|---|
| 109 | +unsigned long __bootdata_preserved(__kaslr_offset); |
|---|
| 110 | +unsigned int __bootdata_preserved(zlib_dfltcc_support); |
|---|
| 111 | +EXPORT_SYMBOL(zlib_dfltcc_support); |
|---|
| 94 | 112 | |
|---|
| 95 | 113 | unsigned long VMALLOC_START; |
|---|
| 96 | 114 | EXPORT_SYMBOL(VMALLOC_START); |
|---|
| .. | .. |
|---|
| 100 | 118 | |
|---|
| 101 | 119 | struct page *vmemmap; |
|---|
| 102 | 120 | EXPORT_SYMBOL(vmemmap); |
|---|
| 121 | +unsigned long vmemmap_size; |
|---|
| 103 | 122 | |
|---|
| 104 | 123 | unsigned long MODULES_VADDR; |
|---|
| 105 | 124 | unsigned long MODULES_END; |
|---|
| .. | .. |
|---|
| 107 | 126 | /* An array with a pointer to the lowcore of every CPU. */ |
|---|
| 108 | 127 | struct lowcore *lowcore_ptr[NR_CPUS]; |
|---|
| 109 | 128 | EXPORT_SYMBOL(lowcore_ptr); |
|---|
| 129 | + |
|---|
| 130 | +/* |
|---|
| 131 | + * The Write Back bit position in the physaddr is given by the SLPC PCI. |
|---|
| 132 | + * Leaving the mask zero always uses write through which is safe |
|---|
| 133 | + */ |
|---|
| 134 | +unsigned long mio_wb_bit_mask __ro_after_init; |
|---|
| 110 | 135 | |
|---|
| 111 | 136 | /* |
|---|
| 112 | 137 | * This is set up by the setup-routine at boot-time |
|---|
| .. | .. |
|---|
| 149 | 174 | static int __init conmode_setup(char *str) |
|---|
| 150 | 175 | { |
|---|
| 151 | 176 | #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE) |
|---|
| 152 | | - if (strncmp(str, "hwc", 4) == 0 || strncmp(str, "sclp", 5) == 0) |
|---|
| 177 | + if (!strcmp(str, "hwc") || !strcmp(str, "sclp")) |
|---|
| 153 | 178 | SET_CONSOLE_SCLP; |
|---|
| 154 | 179 | #endif |
|---|
| 155 | 180 | #if defined(CONFIG_TN3215_CONSOLE) |
|---|
| 156 | | - if (strncmp(str, "3215", 5) == 0) |
|---|
| 181 | + if (!strcmp(str, "3215")) |
|---|
| 157 | 182 | SET_CONSOLE_3215; |
|---|
| 158 | 183 | #endif |
|---|
| 159 | 184 | #if defined(CONFIG_TN3270_CONSOLE) |
|---|
| 160 | | - if (strncmp(str, "3270", 5) == 0) |
|---|
| 185 | + if (!strcmp(str, "3270")) |
|---|
| 161 | 186 | SET_CONSOLE_3270; |
|---|
| 162 | 187 | #endif |
|---|
| 163 | 188 | set_preferred_console(); |
|---|
| .. | .. |
|---|
| 192 | 217 | #endif |
|---|
| 193 | 218 | return; |
|---|
| 194 | 219 | } |
|---|
| 195 | | - if (strncmp(ptr + 8, "3270", 4) == 0) { |
|---|
| 220 | + if (str_has_prefix(ptr + 8, "3270")) { |
|---|
| 196 | 221 | #if defined(CONFIG_TN3270_CONSOLE) |
|---|
| 197 | 222 | SET_CONSOLE_3270; |
|---|
| 198 | 223 | #elif defined(CONFIG_TN3215_CONSOLE) |
|---|
| .. | .. |
|---|
| 200 | 225 | #elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE) |
|---|
| 201 | 226 | SET_CONSOLE_SCLP; |
|---|
| 202 | 227 | #endif |
|---|
| 203 | | - } else if (strncmp(ptr + 8, "3215", 4) == 0) { |
|---|
| 228 | + } else if (str_has_prefix(ptr + 8, "3215")) { |
|---|
| 204 | 229 | #if defined(CONFIG_TN3215_CONSOLE) |
|---|
| 205 | 230 | SET_CONSOLE_3215; |
|---|
| 206 | 231 | #elif defined(CONFIG_TN3270_CONSOLE) |
|---|
| .. | .. |
|---|
| 221 | 246 | SET_CONSOLE_SCLP; |
|---|
| 222 | 247 | #endif |
|---|
| 223 | 248 | } |
|---|
| 224 | | - if (IS_ENABLED(CONFIG_VT) && IS_ENABLED(CONFIG_DUMMY_CONSOLE)) |
|---|
| 225 | | - conswitchp = &dummy_con; |
|---|
| 226 | 249 | } |
|---|
| 227 | 250 | |
|---|
| 228 | 251 | #ifdef CONFIG_CRASH_DUMP |
|---|
| 229 | 252 | static void __init setup_zfcpdump(void) |
|---|
| 230 | 253 | { |
|---|
| 231 | | - if (ipl_info.type != IPL_TYPE_FCP_DUMP) |
|---|
| 254 | + if (!is_ipl_type_dump()) |
|---|
| 232 | 255 | return; |
|---|
| 233 | 256 | if (OLDMEM_BASE) |
|---|
| 234 | 257 | return; |
|---|
| .. | .. |
|---|
| 283 | 306 | void (*pm_power_off)(void) = machine_power_off; |
|---|
| 284 | 307 | EXPORT_SYMBOL_GPL(pm_power_off); |
|---|
| 285 | 308 | |
|---|
| 286 | | -static int __init early_parse_mem(char *p) |
|---|
| 309 | +void *restart_stack; |
|---|
| 310 | + |
|---|
| 311 | +unsigned long stack_alloc(void) |
|---|
| 287 | 312 | { |
|---|
| 288 | | - memory_end = memparse(p, &p); |
|---|
| 289 | | - memory_end &= PAGE_MASK; |
|---|
| 290 | | - memory_end_set = 1; |
|---|
| 313 | +#ifdef CONFIG_VMAP_STACK |
|---|
| 314 | + return (unsigned long)__vmalloc_node(THREAD_SIZE, THREAD_SIZE, |
|---|
| 315 | + THREADINFO_GFP, NUMA_NO_NODE, |
|---|
| 316 | + __builtin_return_address(0)); |
|---|
| 317 | +#else |
|---|
| 318 | + return __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER); |
|---|
| 319 | +#endif |
|---|
| 320 | +} |
|---|
| 321 | + |
|---|
| 322 | +void stack_free(unsigned long stack) |
|---|
| 323 | +{ |
|---|
| 324 | +#ifdef CONFIG_VMAP_STACK |
|---|
| 325 | + vfree((void *) stack); |
|---|
| 326 | +#else |
|---|
| 327 | + free_pages(stack, THREAD_SIZE_ORDER); |
|---|
| 328 | +#endif |
|---|
| 329 | +} |
|---|
| 330 | + |
|---|
| 331 | +int __init arch_early_irq_init(void) |
|---|
| 332 | +{ |
|---|
| 333 | + unsigned long stack; |
|---|
| 334 | + |
|---|
| 335 | + stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER); |
|---|
| 336 | + if (!stack) |
|---|
| 337 | + panic("Couldn't allocate async stack"); |
|---|
| 338 | + S390_lowcore.async_stack = stack + STACK_INIT_OFFSET; |
|---|
| 291 | 339 | return 0; |
|---|
| 292 | 340 | } |
|---|
| 293 | | -early_param("mem", early_parse_mem); |
|---|
| 294 | 341 | |
|---|
| 295 | | -static int __init parse_vmalloc(char *arg) |
|---|
| 342 | +static int __init async_stack_realloc(void) |
|---|
| 296 | 343 | { |
|---|
| 297 | | - if (!arg) |
|---|
| 298 | | - return -EINVAL; |
|---|
| 299 | | - VMALLOC_END = (memparse(arg, &arg) + PAGE_SIZE - 1) & PAGE_MASK; |
|---|
| 344 | + unsigned long old, new; |
|---|
| 345 | + |
|---|
| 346 | + old = S390_lowcore.async_stack - STACK_INIT_OFFSET; |
|---|
| 347 | + new = stack_alloc(); |
|---|
| 348 | + if (!new) |
|---|
| 349 | + panic("Couldn't allocate async stack"); |
|---|
| 350 | + S390_lowcore.async_stack = new + STACK_INIT_OFFSET; |
|---|
| 351 | + free_pages(old, THREAD_SIZE_ORDER); |
|---|
| 300 | 352 | return 0; |
|---|
| 301 | 353 | } |
|---|
| 302 | | -early_param("vmalloc", parse_vmalloc); |
|---|
| 354 | +early_initcall(async_stack_realloc); |
|---|
| 303 | 355 | |
|---|
| 304 | | -void *restart_stack __section(.data); |
|---|
| 356 | +void __init arch_call_rest_init(void) |
|---|
| 357 | +{ |
|---|
| 358 | + unsigned long stack; |
|---|
| 359 | + |
|---|
| 360 | + stack = stack_alloc(); |
|---|
| 361 | + if (!stack) |
|---|
| 362 | + panic("Couldn't allocate kernel stack"); |
|---|
| 363 | + current->stack = (void *) stack; |
|---|
| 364 | +#ifdef CONFIG_VMAP_STACK |
|---|
| 365 | + current->stack_vm_area = (void *) stack; |
|---|
| 366 | +#endif |
|---|
| 367 | + set_task_stack_end_magic(current); |
|---|
| 368 | + stack += STACK_INIT_OFFSET; |
|---|
| 369 | + S390_lowcore.kernel_stack = stack; |
|---|
| 370 | + CALL_ON_STACK_NORETURN(rest_init, stack); |
|---|
| 371 | +} |
|---|
| 305 | 372 | |
|---|
| 306 | 373 | static void __init setup_lowcore_dat_off(void) |
|---|
| 307 | 374 | { |
|---|
| 375 | + unsigned long int_psw_mask = PSW_KERNEL_BITS; |
|---|
| 308 | 376 | struct lowcore *lc; |
|---|
| 377 | + |
|---|
| 378 | + if (IS_ENABLED(CONFIG_KASAN)) |
|---|
| 379 | + int_psw_mask |= PSW_MASK_DAT; |
|---|
| 309 | 380 | |
|---|
| 310 | 381 | /* |
|---|
| 311 | 382 | * Setup lowcore for boot cpu |
|---|
| 312 | 383 | */ |
|---|
| 313 | 384 | BUILD_BUG_ON(sizeof(struct lowcore) != LC_PAGES * PAGE_SIZE); |
|---|
| 314 | | - lc = memblock_virt_alloc_low(sizeof(*lc), sizeof(*lc)); |
|---|
| 385 | + lc = memblock_alloc_low(sizeof(*lc), sizeof(*lc)); |
|---|
| 386 | + if (!lc) |
|---|
| 387 | + panic("%s: Failed to allocate %zu bytes align=%zx\n", |
|---|
| 388 | + __func__, sizeof(*lc), sizeof(*lc)); |
|---|
| 389 | + |
|---|
| 315 | 390 | lc->restart_psw.mask = PSW_KERNEL_BITS; |
|---|
| 316 | 391 | lc->restart_psw.addr = (unsigned long) restart_int_handler; |
|---|
| 317 | | - lc->external_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK; |
|---|
| 392 | + lc->external_new_psw.mask = int_psw_mask | PSW_MASK_MCHECK; |
|---|
| 318 | 393 | lc->external_new_psw.addr = (unsigned long) ext_int_handler; |
|---|
| 319 | | - lc->svc_new_psw.mask = PSW_KERNEL_BITS | |
|---|
| 320 | | - PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; |
|---|
| 394 | + lc->svc_new_psw.mask = int_psw_mask | PSW_MASK_MCHECK; |
|---|
| 321 | 395 | lc->svc_new_psw.addr = (unsigned long) system_call; |
|---|
| 322 | | - lc->program_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK; |
|---|
| 396 | + lc->program_new_psw.mask = int_psw_mask | PSW_MASK_MCHECK; |
|---|
| 323 | 397 | lc->program_new_psw.addr = (unsigned long) pgm_check_handler; |
|---|
| 324 | 398 | lc->mcck_new_psw.mask = PSW_KERNEL_BITS; |
|---|
| 325 | 399 | lc->mcck_new_psw.addr = (unsigned long) mcck_int_handler; |
|---|
| 326 | | - lc->io_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK; |
|---|
| 400 | + lc->io_new_psw.mask = int_psw_mask | PSW_MASK_MCHECK; |
|---|
| 327 | 401 | lc->io_new_psw.addr = (unsigned long) io_int_handler; |
|---|
| 328 | 402 | lc->clock_comparator = clock_comparator_max; |
|---|
| 329 | | - lc->kernel_stack = ((unsigned long) &init_thread_union) |
|---|
| 403 | + lc->nodat_stack = ((unsigned long) &init_thread_union) |
|---|
| 330 | 404 | + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); |
|---|
| 331 | | - lc->async_stack = (unsigned long) |
|---|
| 332 | | - memblock_virt_alloc(ASYNC_SIZE, ASYNC_SIZE) |
|---|
| 333 | | - + ASYNC_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); |
|---|
| 334 | | - lc->panic_stack = (unsigned long) |
|---|
| 335 | | - memblock_virt_alloc(PAGE_SIZE, PAGE_SIZE) |
|---|
| 336 | | - + PAGE_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); |
|---|
| 337 | 405 | lc->current_task = (unsigned long)&init_task; |
|---|
| 338 | 406 | lc->lpp = LPP_MAGIC; |
|---|
| 339 | 407 | lc->machine_flags = S390_lowcore.machine_flags; |
|---|
| .. | .. |
|---|
| 344 | 412 | memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list, |
|---|
| 345 | 413 | sizeof(lc->alt_stfle_fac_list)); |
|---|
| 346 | 414 | nmi_alloc_boot_cpu(lc); |
|---|
| 347 | | - vdso_alloc_boot_cpu(lc); |
|---|
| 348 | 415 | lc->sync_enter_timer = S390_lowcore.sync_enter_timer; |
|---|
| 349 | 416 | lc->async_enter_timer = S390_lowcore.async_enter_timer; |
|---|
| 350 | 417 | lc->exit_timer = S390_lowcore.exit_timer; |
|---|
| .. | .. |
|---|
| 354 | 421 | lc->last_update_timer = S390_lowcore.last_update_timer; |
|---|
| 355 | 422 | lc->last_update_clock = S390_lowcore.last_update_clock; |
|---|
| 356 | 423 | |
|---|
| 357 | | - restart_stack = memblock_virt_alloc(ASYNC_SIZE, ASYNC_SIZE); |
|---|
| 358 | | - restart_stack += ASYNC_SIZE; |
|---|
| 424 | + /* |
|---|
| 425 | + * Allocate the global restart stack which is the same for |
|---|
| 426 | + * all CPUs in cast *one* of them does a PSW restart. |
|---|
| 427 | + */ |
|---|
| 428 | + restart_stack = memblock_alloc(THREAD_SIZE, THREAD_SIZE); |
|---|
| 429 | + if (!restart_stack) |
|---|
| 430 | + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", |
|---|
| 431 | + __func__, THREAD_SIZE, THREAD_SIZE); |
|---|
| 432 | + restart_stack += STACK_INIT_OFFSET; |
|---|
| 359 | 433 | |
|---|
| 360 | 434 | /* |
|---|
| 361 | 435 | * Set up PSW restart to call ipl.c:do_restart(). Copy the relevant |
|---|
| .. | .. |
|---|
| 374 | 448 | mem_assign_absolute(S390_lowcore.restart_source, lc->restart_source); |
|---|
| 375 | 449 | mem_assign_absolute(S390_lowcore.restart_psw, lc->restart_psw); |
|---|
| 376 | 450 | |
|---|
| 377 | | -#ifdef CONFIG_SMP |
|---|
| 378 | 451 | lc->spinlock_lockval = arch_spin_lockval(0); |
|---|
| 379 | 452 | lc->spinlock_index = 0; |
|---|
| 380 | 453 | arch_spin_lock_setup(0); |
|---|
| 381 | | -#endif |
|---|
| 382 | 454 | lc->br_r1_trampoline = 0x07f1; /* br %r1 */ |
|---|
| 455 | + lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW); |
|---|
| 456 | + lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW); |
|---|
| 457 | + lc->preempt_count = PREEMPT_DISABLED; |
|---|
| 383 | 458 | |
|---|
| 384 | 459 | set_prefix((u32)(unsigned long) lc); |
|---|
| 385 | 460 | lowcore_ptr[0] = lc; |
|---|
| .. | .. |
|---|
| 419 | 494 | static void __init setup_resources(void) |
|---|
| 420 | 495 | { |
|---|
| 421 | 496 | struct resource *res, *std_res, *sub_res; |
|---|
| 422 | | - struct memblock_region *reg; |
|---|
| 497 | + phys_addr_t start, end; |
|---|
| 423 | 498 | int j; |
|---|
| 499 | + u64 i; |
|---|
| 424 | 500 | |
|---|
| 425 | 501 | code_resource.start = (unsigned long) _text; |
|---|
| 426 | 502 | code_resource.end = (unsigned long) _etext - 1; |
|---|
| .. | .. |
|---|
| 429 | 505 | bss_resource.start = (unsigned long) __bss_start; |
|---|
| 430 | 506 | bss_resource.end = (unsigned long) __bss_stop - 1; |
|---|
| 431 | 507 | |
|---|
| 432 | | - for_each_memblock(memory, reg) { |
|---|
| 433 | | - res = memblock_virt_alloc(sizeof(*res), 8); |
|---|
| 508 | + for_each_mem_range(i, &start, &end) { |
|---|
| 509 | + res = memblock_alloc(sizeof(*res), 8); |
|---|
| 510 | + if (!res) |
|---|
| 511 | + panic("%s: Failed to allocate %zu bytes align=0x%x\n", |
|---|
| 512 | + __func__, sizeof(*res), 8); |
|---|
| 434 | 513 | res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM; |
|---|
| 435 | 514 | |
|---|
| 436 | 515 | res->name = "System RAM"; |
|---|
| 437 | | - res->start = reg->base; |
|---|
| 438 | | - res->end = reg->base + reg->size - 1; |
|---|
| 516 | + res->start = start; |
|---|
| 517 | + /* |
|---|
| 518 | + * In memblock, end points to the first byte after the |
|---|
| 519 | + * range while in resourses, end points to the last byte in |
|---|
| 520 | + * the range. |
|---|
| 521 | + */ |
|---|
| 522 | + res->end = end - 1; |
|---|
| 439 | 523 | request_resource(&iomem_resource, res); |
|---|
| 440 | 524 | |
|---|
| 441 | 525 | for (j = 0; j < ARRAY_SIZE(standard_resources); j++) { |
|---|
| .. | .. |
|---|
| 444 | 528 | std_res->start > res->end) |
|---|
| 445 | 529 | continue; |
|---|
| 446 | 530 | if (std_res->end > res->end) { |
|---|
| 447 | | - sub_res = memblock_virt_alloc(sizeof(*sub_res), 8); |
|---|
| 531 | + sub_res = memblock_alloc(sizeof(*sub_res), 8); |
|---|
| 532 | + if (!sub_res) |
|---|
| 533 | + panic("%s: Failed to allocate %zu bytes align=0x%x\n", |
|---|
| 534 | + __func__, sizeof(*sub_res), 8); |
|---|
| 448 | 535 | *sub_res = *std_res; |
|---|
| 449 | 536 | sub_res->end = res->end; |
|---|
| 450 | 537 | std_res->start = res->end + 1; |
|---|
| .. | .. |
|---|
| 472 | 559 | |
|---|
| 473 | 560 | static void __init setup_memory_end(void) |
|---|
| 474 | 561 | { |
|---|
| 475 | | - unsigned long vmax, vmalloc_size, tmp; |
|---|
| 562 | + unsigned long vmax, tmp; |
|---|
| 476 | 563 | |
|---|
| 477 | | - /* Choose kernel address space layout: 2, 3, or 4 levels. */ |
|---|
| 478 | | - vmalloc_size = VMALLOC_END ?: (128UL << 30) - MODULES_LEN; |
|---|
| 564 | + /* Choose kernel address space layout: 3 or 4 levels. */ |
|---|
| 479 | 565 | tmp = (memory_end ?: max_physmem_end) / PAGE_SIZE; |
|---|
| 480 | 566 | tmp = tmp * (sizeof(struct page) + PAGE_SIZE); |
|---|
| 481 | 567 | if (tmp + vmalloc_size + MODULES_LEN <= _REGION2_SIZE) |
|---|
| 482 | 568 | vmax = _REGION2_SIZE; /* 3-level kernel page table */ |
|---|
| 483 | 569 | else |
|---|
| 484 | 570 | vmax = _REGION1_SIZE; /* 4-level kernel page table */ |
|---|
| 571 | + if (is_prot_virt_host()) |
|---|
| 572 | + adjust_to_uv_max(&vmax); |
|---|
| 573 | +#ifdef CONFIG_KASAN |
|---|
| 574 | + vmax = kasan_vmax; |
|---|
| 575 | +#endif |
|---|
| 485 | 576 | /* module area is at the end of the kernel address space. */ |
|---|
| 486 | 577 | MODULES_END = vmax; |
|---|
| 487 | 578 | MODULES_VADDR = MODULES_END - MODULES_LEN; |
|---|
| 488 | 579 | VMALLOC_END = MODULES_VADDR; |
|---|
| 489 | | - VMALLOC_START = vmax - vmalloc_size; |
|---|
| 580 | + VMALLOC_START = VMALLOC_END - vmalloc_size; |
|---|
| 490 | 581 | |
|---|
| 491 | 582 | /* Split remaining virtual space between 1:1 mapping & vmemmap array */ |
|---|
| 492 | 583 | tmp = VMALLOC_START / (PAGE_SIZE + sizeof(struct page)); |
|---|
| .. | .. |
|---|
| 498 | 589 | vmemmap = (struct page *) tmp; |
|---|
| 499 | 590 | |
|---|
| 500 | 591 | /* Take care that memory_end is set and <= vmemmap */ |
|---|
| 501 | | - memory_end = min(memory_end ?: max_physmem_end, tmp); |
|---|
| 592 | + memory_end = min(memory_end ?: max_physmem_end, (unsigned long)vmemmap); |
|---|
| 593 | +#ifdef CONFIG_KASAN |
|---|
| 594 | + memory_end = min(memory_end, KASAN_SHADOW_START); |
|---|
| 595 | +#endif |
|---|
| 596 | + vmemmap_size = SECTION_ALIGN_UP(memory_end / PAGE_SIZE) * sizeof(struct page); |
|---|
| 597 | +#ifdef CONFIG_KASAN |
|---|
| 598 | + /* move vmemmap above kasan shadow only if stands in a way */ |
|---|
| 599 | + if (KASAN_SHADOW_END > (unsigned long)vmemmap && |
|---|
| 600 | + (unsigned long)vmemmap + vmemmap_size > KASAN_SHADOW_START) |
|---|
| 601 | + vmemmap = max(vmemmap, (struct page *)KASAN_SHADOW_END); |
|---|
| 602 | +#endif |
|---|
| 502 | 603 | max_pfn = max_low_pfn = PFN_DOWN(memory_end); |
|---|
| 503 | 604 | memblock_remove(memory_end, ULONG_MAX); |
|---|
| 504 | 605 | |
|---|
| .. | .. |
|---|
| 508 | 609 | #ifdef CONFIG_CRASH_DUMP |
|---|
| 509 | 610 | |
|---|
| 510 | 611 | /* |
|---|
| 511 | | - * When kdump is enabled, we have to ensure that no memory from |
|---|
| 512 | | - * the area [0 - crashkernel memory size] and |
|---|
| 513 | | - * [crashk_res.start - crashk_res.end] is set offline. |
|---|
| 612 | + * When kdump is enabled, we have to ensure that no memory from the area |
|---|
| 613 | + * [0 - crashkernel memory size] is set offline - it will be exchanged with |
|---|
| 614 | + * the crashkernel memory region when kdump is triggered. The crashkernel |
|---|
| 615 | + * memory region can never get offlined (pages are unmovable). |
|---|
| 514 | 616 | */ |
|---|
| 515 | 617 | static int kdump_mem_notifier(struct notifier_block *nb, |
|---|
| 516 | 618 | unsigned long action, void *data) |
|---|
| .. | .. |
|---|
| 521 | 623 | return NOTIFY_OK; |
|---|
| 522 | 624 | if (arg->start_pfn < PFN_DOWN(resource_size(&crashk_res))) |
|---|
| 523 | 625 | return NOTIFY_BAD; |
|---|
| 524 | | - if (arg->start_pfn > PFN_DOWN(crashk_res.end)) |
|---|
| 525 | | - return NOTIFY_OK; |
|---|
| 526 | | - if (arg->start_pfn + arg->nr_pages - 1 < PFN_DOWN(crashk_res.start)) |
|---|
| 527 | | - return NOTIFY_OK; |
|---|
| 528 | | - return NOTIFY_BAD; |
|---|
| 626 | + return NOTIFY_OK; |
|---|
| 529 | 627 | } |
|---|
| 530 | 628 | |
|---|
| 531 | 629 | static struct notifier_block kdump_mem_nb = { |
|---|
| .. | .. |
|---|
| 539 | 637 | */ |
|---|
| 540 | 638 | static void __init reserve_memory_end(void) |
|---|
| 541 | 639 | { |
|---|
| 542 | | -#ifdef CONFIG_CRASH_DUMP |
|---|
| 543 | | - if (ipl_info.type == IPL_TYPE_FCP_DUMP && |
|---|
| 544 | | - !OLDMEM_BASE && sclp.hsa_size) { |
|---|
| 545 | | - memory_end = sclp.hsa_size; |
|---|
| 546 | | - memory_end &= PAGE_MASK; |
|---|
| 547 | | - memory_end_set = 1; |
|---|
| 548 | | - } |
|---|
| 549 | | -#endif |
|---|
| 550 | | - if (!memory_end_set) |
|---|
| 551 | | - return; |
|---|
| 552 | | - memblock_reserve(memory_end, ULONG_MAX); |
|---|
| 640 | + if (memory_end_set) |
|---|
| 641 | + memblock_reserve(memory_end, ULONG_MAX); |
|---|
| 553 | 642 | } |
|---|
| 554 | 643 | |
|---|
| 555 | 644 | /* |
|---|
| .. | .. |
|---|
| 657 | 746 | } |
|---|
| 658 | 747 | |
|---|
| 659 | 748 | /* |
|---|
| 749 | + * Reserve the memory area used to pass the certificate lists |
|---|
| 750 | + */ |
|---|
| 751 | +static void __init reserve_certificate_list(void) |
|---|
| 752 | +{ |
|---|
| 753 | + if (ipl_cert_list_addr) |
|---|
| 754 | + memblock_reserve(ipl_cert_list_addr, ipl_cert_list_size); |
|---|
| 755 | +} |
|---|
| 756 | + |
|---|
| 757 | +static void __init reserve_mem_detect_info(void) |
|---|
| 758 | +{ |
|---|
| 759 | + unsigned long start, size; |
|---|
| 760 | + |
|---|
| 761 | + get_mem_detect_reserved(&start, &size); |
|---|
| 762 | + if (size) |
|---|
| 763 | + memblock_reserve(start, size); |
|---|
| 764 | +} |
|---|
| 765 | + |
|---|
| 766 | +static void __init free_mem_detect_info(void) |
|---|
| 767 | +{ |
|---|
| 768 | + unsigned long start, size; |
|---|
| 769 | + |
|---|
| 770 | + get_mem_detect_reserved(&start, &size); |
|---|
| 771 | + if (size) |
|---|
| 772 | + memblock_free(start, size); |
|---|
| 773 | +} |
|---|
| 774 | + |
|---|
| 775 | +static const char * __init get_mem_info_source(void) |
|---|
| 776 | +{ |
|---|
| 777 | + switch (mem_detect.info_source) { |
|---|
| 778 | + case MEM_DETECT_SCLP_STOR_INFO: |
|---|
| 779 | + return "sclp storage info"; |
|---|
| 780 | + case MEM_DETECT_DIAG260: |
|---|
| 781 | + return "diag260"; |
|---|
| 782 | + case MEM_DETECT_SCLP_READ_INFO: |
|---|
| 783 | + return "sclp read info"; |
|---|
| 784 | + case MEM_DETECT_BIN_SEARCH: |
|---|
| 785 | + return "binary search"; |
|---|
| 786 | + } |
|---|
| 787 | + return "none"; |
|---|
| 788 | +} |
|---|
| 789 | + |
|---|
| 790 | +static void __init memblock_add_mem_detect_info(void) |
|---|
| 791 | +{ |
|---|
| 792 | + unsigned long start, end; |
|---|
| 793 | + int i; |
|---|
| 794 | + |
|---|
| 795 | + pr_debug("physmem info source: %s (%hhd)\n", |
|---|
| 796 | + get_mem_info_source(), mem_detect.info_source); |
|---|
| 797 | + /* keep memblock lists close to the kernel */ |
|---|
| 798 | + memblock_set_bottom_up(true); |
|---|
| 799 | + for_each_mem_detect_block(i, &start, &end) { |
|---|
| 800 | + memblock_add(start, end - start); |
|---|
| 801 | + memblock_physmem_add(start, end - start); |
|---|
| 802 | + } |
|---|
| 803 | + memblock_set_bottom_up(false); |
|---|
| 804 | + memblock_set_node(0, ULONG_MAX, &memblock.memory, 0); |
|---|
| 805 | + memblock_dump_all(); |
|---|
| 806 | +} |
|---|
| 807 | + |
|---|
| 808 | +/* |
|---|
| 660 | 809 | * Check for initrd being in usable memory |
|---|
| 661 | 810 | */ |
|---|
| 662 | 811 | static void __init check_initrd(void) |
|---|
| .. | .. |
|---|
| 678 | 827 | { |
|---|
| 679 | 828 | unsigned long start_pfn = PFN_UP(__pa(_end)); |
|---|
| 680 | 829 | |
|---|
| 681 | | -#ifdef CONFIG_DMA_API_DEBUG |
|---|
| 682 | | - /* |
|---|
| 683 | | - * DMA_API_DEBUG code stumbles over addresses from the |
|---|
| 684 | | - * range [PARMAREA_END, _stext]. Mark the memory as reserved |
|---|
| 685 | | - * so it is not used for CONFIG_DMA_API_DEBUG=y. |
|---|
| 686 | | - */ |
|---|
| 687 | | - memblock_reserve(0, PFN_PHYS(start_pfn)); |
|---|
| 688 | | -#else |
|---|
| 689 | | - memblock_reserve(0, PARMAREA_END); |
|---|
| 830 | + memblock_reserve(0, HEAD_END); |
|---|
| 690 | 831 | memblock_reserve((unsigned long)_stext, PFN_PHYS(start_pfn) |
|---|
| 691 | 832 | - (unsigned long)_stext); |
|---|
| 692 | | -#endif |
|---|
| 833 | + memblock_reserve(__sdma, __edma - __sdma); |
|---|
| 693 | 834 | } |
|---|
| 694 | 835 | |
|---|
| 695 | 836 | static void __init setup_memory(void) |
|---|
| 696 | 837 | { |
|---|
| 697 | | - struct memblock_region *reg; |
|---|
| 838 | + phys_addr_t start, end; |
|---|
| 839 | + u64 i; |
|---|
| 698 | 840 | |
|---|
| 699 | 841 | /* |
|---|
| 700 | 842 | * Init storage key for present memory |
|---|
| 701 | 843 | */ |
|---|
| 702 | | - for_each_memblock(memory, reg) { |
|---|
| 703 | | - storage_key_init_range(reg->base, reg->base + reg->size); |
|---|
| 704 | | - } |
|---|
| 844 | + for_each_mem_range(i, &start, &end) |
|---|
| 845 | + storage_key_init_range(start, end); |
|---|
| 846 | + |
|---|
| 705 | 847 | psw_set_key(PAGE_DEFAULT_KEY); |
|---|
| 706 | 848 | } |
|---|
| 707 | 849 | |
|---|
| .. | .. |
|---|
| 781 | 923 | if (MACHINE_HAS_VX) { |
|---|
| 782 | 924 | elf_hwcap |= HWCAP_S390_VXRS; |
|---|
| 783 | 925 | if (test_facility(134)) |
|---|
| 784 | | - elf_hwcap |= HWCAP_S390_VXRS_EXT; |
|---|
| 785 | | - if (test_facility(135)) |
|---|
| 786 | 926 | elf_hwcap |= HWCAP_S390_VXRS_BCD; |
|---|
| 927 | + if (test_facility(135)) |
|---|
| 928 | + elf_hwcap |= HWCAP_S390_VXRS_EXT; |
|---|
| 929 | + if (test_facility(148)) |
|---|
| 930 | + elf_hwcap |= HWCAP_S390_VXRS_EXT2; |
|---|
| 931 | + if (test_facility(152)) |
|---|
| 932 | + elf_hwcap |= HWCAP_S390_VXRS_PDE; |
|---|
| 787 | 933 | } |
|---|
| 934 | + if (test_facility(150)) |
|---|
| 935 | + elf_hwcap |= HWCAP_S390_SORT; |
|---|
| 936 | + if (test_facility(151)) |
|---|
| 937 | + elf_hwcap |= HWCAP_S390_DFLT; |
|---|
| 788 | 938 | |
|---|
| 789 | 939 | /* |
|---|
| 790 | 940 | * Guarded storage support HWCAP_S390_GS is bit 12. |
|---|
| .. | .. |
|---|
| 828 | 978 | case 0x3907: |
|---|
| 829 | 979 | strcpy(elf_platform, "z14"); |
|---|
| 830 | 980 | break; |
|---|
| 981 | + case 0x8561: |
|---|
| 982 | + case 0x8562: |
|---|
| 983 | + strcpy(elf_platform, "z15"); |
|---|
| 984 | + break; |
|---|
| 831 | 985 | } |
|---|
| 832 | 986 | |
|---|
| 833 | 987 | /* |
|---|
| .. | .. |
|---|
| 847 | 1001 | { |
|---|
| 848 | 1002 | struct sysinfo_3_2_2 *vmms; |
|---|
| 849 | 1003 | |
|---|
| 850 | | - vmms = (struct sysinfo_3_2_2 *) memblock_alloc(PAGE_SIZE, PAGE_SIZE); |
|---|
| 1004 | + vmms = (struct sysinfo_3_2_2 *) memblock_phys_alloc(PAGE_SIZE, |
|---|
| 1005 | + PAGE_SIZE); |
|---|
| 1006 | + if (!vmms) |
|---|
| 1007 | + panic("Failed to allocate memory for sysinfo structure\n"); |
|---|
| 1008 | + |
|---|
| 851 | 1009 | if (stsi(vmms, 3, 2, 2) == 0 && vmms->count) |
|---|
| 852 | 1010 | add_device_randomness(&vmms->vm, sizeof(vmms->vm[0]) * vmms->count); |
|---|
| 853 | 1011 | memblock_free((unsigned long) vmms, PAGE_SIZE); |
|---|
| 1012 | + |
|---|
| 1013 | +#ifdef CONFIG_ARCH_RANDOM |
|---|
| 1014 | + if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG)) |
|---|
| 1015 | + static_branch_enable(&s390_arch_random_available); |
|---|
| 1016 | +#endif |
|---|
| 854 | 1017 | } |
|---|
| 855 | 1018 | |
|---|
| 856 | 1019 | /* |
|---|
| .. | .. |
|---|
| 867 | 1030 | task_size += sizeof(freg_t) * __NUM_FPRS; |
|---|
| 868 | 1031 | } |
|---|
| 869 | 1032 | arch_task_struct_size = task_size; |
|---|
| 1033 | +} |
|---|
| 1034 | + |
|---|
| 1035 | +/* |
|---|
| 1036 | + * Issue diagnose 318 to set the control program name and |
|---|
| 1037 | + * version codes. |
|---|
| 1038 | + */ |
|---|
| 1039 | +static void __init setup_control_program_code(void) |
|---|
| 1040 | +{ |
|---|
| 1041 | + union diag318_info diag318_info = { |
|---|
| 1042 | + .cpnc = CPNC_LINUX, |
|---|
| 1043 | + .cpvc = 0, |
|---|
| 1044 | + }; |
|---|
| 1045 | + |
|---|
| 1046 | + if (!sclp.has_diag318) |
|---|
| 1047 | + return; |
|---|
| 1048 | + |
|---|
| 1049 | + diag_stat_inc(DIAG_STAT_X318); |
|---|
| 1050 | + asm volatile("diag %0,0,0x318\n" : : "d" (diag318_info.val)); |
|---|
| 1051 | +} |
|---|
| 1052 | + |
|---|
| 1053 | +/* |
|---|
| 1054 | + * Print the component list from the IPL report |
|---|
| 1055 | + */ |
|---|
| 1056 | +static void __init log_component_list(void) |
|---|
| 1057 | +{ |
|---|
| 1058 | + struct ipl_rb_component_entry *ptr, *end; |
|---|
| 1059 | + char *str; |
|---|
| 1060 | + |
|---|
| 1061 | + if (!early_ipl_comp_list_addr) |
|---|
| 1062 | + return; |
|---|
| 1063 | + if (ipl_block.hdr.flags & IPL_PL_FLAG_SIPL) |
|---|
| 1064 | + pr_info("Linux is running with Secure-IPL enabled\n"); |
|---|
| 1065 | + else |
|---|
| 1066 | + pr_info("Linux is running with Secure-IPL disabled\n"); |
|---|
| 1067 | + ptr = (void *) early_ipl_comp_list_addr; |
|---|
| 1068 | + end = (void *) ptr + early_ipl_comp_list_size; |
|---|
| 1069 | + pr_info("The IPL report contains the following components:\n"); |
|---|
| 1070 | + while (ptr < end) { |
|---|
| 1071 | + if (ptr->flags & IPL_RB_COMPONENT_FLAG_SIGNED) { |
|---|
| 1072 | + if (ptr->flags & IPL_RB_COMPONENT_FLAG_VERIFIED) |
|---|
| 1073 | + str = "signed, verified"; |
|---|
| 1074 | + else |
|---|
| 1075 | + str = "signed, verification failed"; |
|---|
| 1076 | + } else { |
|---|
| 1077 | + str = "not signed"; |
|---|
| 1078 | + } |
|---|
| 1079 | + pr_info("%016llx - %016llx (%s)\n", |
|---|
| 1080 | + ptr->addr, ptr->addr + ptr->len, str); |
|---|
| 1081 | + ptr++; |
|---|
| 1082 | + } |
|---|
| 870 | 1083 | } |
|---|
| 871 | 1084 | |
|---|
| 872 | 1085 | /* |
|---|
| .. | .. |
|---|
| 889 | 1102 | else |
|---|
| 890 | 1103 | pr_info("Linux is running as a guest in 64-bit mode\n"); |
|---|
| 891 | 1104 | |
|---|
| 1105 | + log_component_list(); |
|---|
| 1106 | + |
|---|
| 892 | 1107 | /* Have one command line that is parsed and saved in /proc/cmdline */ |
|---|
| 893 | 1108 | /* boot_command_line has been already set up in early.c */ |
|---|
| 894 | 1109 | *cmdline_p = boot_command_line; |
|---|
| 895 | 1110 | |
|---|
| 896 | 1111 | ROOT_DEV = Root_RAM0; |
|---|
| 897 | 1112 | |
|---|
| 898 | | - /* Is init_mm really needed? */ |
|---|
| 899 | | - init_mm.start_code = PAGE_OFFSET; |
|---|
| 1113 | + init_mm.start_code = (unsigned long) _text; |
|---|
| 900 | 1114 | init_mm.end_code = (unsigned long) _etext; |
|---|
| 901 | 1115 | init_mm.end_data = (unsigned long) _edata; |
|---|
| 902 | 1116 | init_mm.brk = (unsigned long) _end; |
|---|
| .. | .. |
|---|
| 914 | 1128 | os_info_init(); |
|---|
| 915 | 1129 | setup_ipl(); |
|---|
| 916 | 1130 | setup_task_size(); |
|---|
| 1131 | + setup_control_program_code(); |
|---|
| 917 | 1132 | |
|---|
| 918 | 1133 | /* Do some memory reservations *before* memory is added to memblock */ |
|---|
| 919 | 1134 | reserve_memory_end(); |
|---|
| 920 | 1135 | reserve_oldmem(); |
|---|
| 921 | 1136 | reserve_kernel(); |
|---|
| 922 | 1137 | reserve_initrd(); |
|---|
| 1138 | + reserve_certificate_list(); |
|---|
| 1139 | + reserve_mem_detect_info(); |
|---|
| 923 | 1140 | memblock_allow_resize(); |
|---|
| 924 | 1141 | |
|---|
| 925 | 1142 | /* Get information about *all* installed memory */ |
|---|
| 926 | | - detect_memory_memblock(); |
|---|
| 1143 | + memblock_add_mem_detect_info(); |
|---|
| 927 | 1144 | |
|---|
| 1145 | + free_mem_detect_info(); |
|---|
| 928 | 1146 | remove_oldmem(); |
|---|
| 929 | 1147 | |
|---|
| 930 | | - /* |
|---|
| 931 | | - * Make sure all chunks are MAX_ORDER aligned so we don't need the |
|---|
| 932 | | - * extra checks that HOLES_IN_ZONE would require. |
|---|
| 933 | | - * |
|---|
| 934 | | - * Is this still required? |
|---|
| 935 | | - */ |
|---|
| 936 | | - memblock_trim_memory(1UL << (MAX_ORDER - 1 + PAGE_SHIFT)); |
|---|
| 937 | | - |
|---|
| 1148 | + setup_uv(); |
|---|
| 938 | 1149 | setup_memory_end(); |
|---|
| 939 | 1150 | setup_memory(); |
|---|
| 940 | 1151 | dma_contiguous_reserve(memory_end); |
|---|
| .. | .. |
|---|
| 978 | 1189 | if (IS_ENABLED(CONFIG_EXPOLINE)) |
|---|
| 979 | 1190 | nospec_init_branches(); |
|---|
| 980 | 1191 | |
|---|
| 981 | | - /* Setup zfcpdump support */ |
|---|
| 1192 | + /* Setup zfcp/nvme dump support */ |
|---|
| 982 | 1193 | setup_zfcpdump(); |
|---|
| 983 | 1194 | |
|---|
| 984 | 1195 | /* Add system specific data to the random pool */ |
|---|