| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * Based on arch/arm/kernel/setup.c |
|---|
| 3 | 4 | * |
|---|
| 4 | 5 | * Copyright (C) 1995-2001 Russell King |
|---|
| 5 | 6 | * Copyright (C) 2012 ARM Ltd. |
|---|
| 6 | | - * |
|---|
| 7 | | - * This program is free software; you can redistribute it and/or modify |
|---|
| 8 | | - * it under the terms of the GNU General Public License version 2 as |
|---|
| 9 | | - * published by the Free Software Foundation. |
|---|
| 10 | | - * |
|---|
| 11 | | - * This program is distributed in the hope that it will be useful, |
|---|
| 12 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
|---|
| 13 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|---|
| 14 | | - * GNU General Public License for more details. |
|---|
| 15 | | - * |
|---|
| 16 | | - * You should have received a copy of the GNU General Public License |
|---|
| 17 | | - * along with this program. If not, see <http://www.gnu.org/licenses/>. |
|---|
| 18 | 7 | */ |
|---|
| 19 | 8 | |
|---|
| 20 | 9 | #include <linux/acpi.h> |
|---|
| .. | .. |
|---|
| 26 | 15 | #include <linux/initrd.h> |
|---|
| 27 | 16 | #include <linux/console.h> |
|---|
| 28 | 17 | #include <linux/cache.h> |
|---|
| 29 | | -#include <linux/bootmem.h> |
|---|
| 30 | 18 | #include <linux/screen_info.h> |
|---|
| 31 | 19 | #include <linux/init.h> |
|---|
| 32 | 20 | #include <linux/kexec.h> |
|---|
| .. | .. |
|---|
| 59 | 47 | #include <asm/cacheflush.h> |
|---|
| 60 | 48 | #include <asm/tlbflush.h> |
|---|
| 61 | 49 | #include <asm/traps.h> |
|---|
| 62 | | -#include <asm/memblock.h> |
|---|
| 63 | 50 | #include <asm/efi.h> |
|---|
| 64 | 51 | #include <asm/xen/hypervisor.h> |
|---|
| 65 | 52 | #include <asm/mmu_context.h> |
|---|
| .. | .. |
|---|
| 68 | 55 | static struct resource *standard_resources; |
|---|
| 69 | 56 | |
|---|
| 70 | 57 | phys_addr_t __fdt_pointer __initdata; |
|---|
| 71 | | - |
|---|
| 72 | | -/* Vendor stub */ |
|---|
| 73 | | -unsigned int boot_reason; |
|---|
| 74 | | -EXPORT_SYMBOL_GPL(boot_reason); |
|---|
| 75 | | - |
|---|
| 76 | | -/* Vendor stub */ |
|---|
| 77 | | -unsigned int cold_boot; |
|---|
| 78 | | -EXPORT_SYMBOL_GPL(cold_boot); |
|---|
| 79 | 58 | |
|---|
| 80 | 59 | /* |
|---|
| 81 | 60 | * Standard memory resources |
|---|
| .. | .. |
|---|
| 106 | 85 | void __init smp_setup_processor_id(void) |
|---|
| 107 | 86 | { |
|---|
| 108 | 87 | u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK; |
|---|
| 109 | | - cpu_logical_map(0) = mpidr; |
|---|
| 88 | + set_cpu_logical_map(0, mpidr); |
|---|
| 110 | 89 | |
|---|
| 111 | 90 | /* |
|---|
| 112 | 91 | * clear __my_cpu_offset on boot CPU to avoid hang caused by |
|---|
| .. | .. |
|---|
| 189 | 168 | pr_warn("Large number of MPIDR hash buckets detected\n"); |
|---|
| 190 | 169 | } |
|---|
| 191 | 170 | |
|---|
| 171 | +static void *early_fdt_ptr __initdata; |
|---|
| 172 | + |
|---|
| 173 | +void __init *get_early_fdt_ptr(void) |
|---|
| 174 | +{ |
|---|
| 175 | + return early_fdt_ptr; |
|---|
| 176 | +} |
|---|
| 177 | + |
|---|
| 178 | +asmlinkage void __init early_fdt_map(u64 dt_phys) |
|---|
| 179 | +{ |
|---|
| 180 | + int fdt_size; |
|---|
| 181 | + |
|---|
| 182 | + early_fixmap_init(); |
|---|
| 183 | + early_fdt_ptr = fixmap_remap_fdt(dt_phys, &fdt_size, PAGE_KERNEL); |
|---|
| 184 | +} |
|---|
| 185 | + |
|---|
| 192 | 186 | static void __init setup_machine_fdt(phys_addr_t dt_phys) |
|---|
| 193 | 187 | { |
|---|
| 194 | 188 | int size; |
|---|
| .. | .. |
|---|
| 225 | 219 | struct memblock_region *region; |
|---|
| 226 | 220 | struct resource *res; |
|---|
| 227 | 221 | unsigned long i = 0; |
|---|
| 222 | + size_t res_size; |
|---|
| 228 | 223 | |
|---|
| 229 | 224 | kernel_code.start = __pa_symbol(_text); |
|---|
| 230 | 225 | kernel_code.end = __pa_symbol(__init_begin - 1); |
|---|
| .. | .. |
|---|
| 232 | 227 | kernel_data.end = __pa_symbol(_end - 1); |
|---|
| 233 | 228 | |
|---|
| 234 | 229 | num_standard_resources = memblock.memory.cnt; |
|---|
| 235 | | - standard_resources = alloc_bootmem_low(num_standard_resources * |
|---|
| 236 | | - sizeof(*standard_resources)); |
|---|
| 230 | + res_size = num_standard_resources * sizeof(*standard_resources); |
|---|
| 231 | + standard_resources = memblock_alloc(res_size, SMP_CACHE_BYTES); |
|---|
| 232 | + if (!standard_resources) |
|---|
| 233 | + panic("%s: Failed to allocate %zu bytes\n", __func__, res_size); |
|---|
| 237 | 234 | |
|---|
| 238 | | - for_each_memblock(memory, region) { |
|---|
| 235 | + for_each_mem_region(region) { |
|---|
| 239 | 236 | res = &standard_resources[i++]; |
|---|
| 240 | 237 | if (memblock_is_nomap(region)) { |
|---|
| 241 | 238 | res->name = "reserved"; |
|---|
| .. | .. |
|---|
| 275 | 272 | if (!memblock_is_region_reserved(mem->start, mem_size)) |
|---|
| 276 | 273 | continue; |
|---|
| 277 | 274 | |
|---|
| 278 | | - for_each_reserved_mem_region(j, &r_start, &r_end) { |
|---|
| 275 | + for_each_reserved_mem_range(j, &r_start, &r_end) { |
|---|
| 279 | 276 | resource_size_t start, end; |
|---|
| 280 | 277 | |
|---|
| 281 | 278 | start = max(PFN_PHYS(PFN_DOWN(r_start)), mem->start); |
|---|
| .. | .. |
|---|
| 294 | 291 | |
|---|
| 295 | 292 | u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID }; |
|---|
| 296 | 293 | |
|---|
| 297 | | -void __init setup_arch(char **cmdline_p) |
|---|
| 294 | +u64 cpu_logical_map(unsigned int cpu) |
|---|
| 295 | +{ |
|---|
| 296 | + return __cpu_logical_map[cpu]; |
|---|
| 297 | +} |
|---|
| 298 | + |
|---|
| 299 | +void __init __no_sanitize_address setup_arch(char **cmdline_p) |
|---|
| 298 | 300 | { |
|---|
| 299 | 301 | init_mm.start_code = (unsigned long) _text; |
|---|
| 300 | 302 | init_mm.end_code = (unsigned long) _etext; |
|---|
| .. | .. |
|---|
| 302 | 304 | init_mm.brk = (unsigned long) _end; |
|---|
| 303 | 305 | |
|---|
| 304 | 306 | *cmdline_p = boot_command_line; |
|---|
| 307 | + |
|---|
| 308 | + /* |
|---|
| 309 | + * If know now we are going to need KPTI then use non-global |
|---|
| 310 | + * mappings from the start, avoiding the cost of rewriting |
|---|
| 311 | + * everything later. |
|---|
| 312 | + */ |
|---|
| 313 | + arm64_use_ng_mappings = kaslr_requires_kpti(); |
|---|
| 305 | 314 | |
|---|
| 306 | 315 | early_fixmap_init(); |
|---|
| 307 | 316 | early_ioremap_init(); |
|---|
| .. | .. |
|---|
| 330 | 339 | |
|---|
| 331 | 340 | xen_early_init(); |
|---|
| 332 | 341 | efi_init(); |
|---|
| 342 | + |
|---|
| 343 | + if (!efi_enabled(EFI_BOOT) && ((u64)_text % MIN_KIMG_ALIGN) != 0) |
|---|
| 344 | + pr_warn(FW_BUG "Kernel image misaligned at boot, please fix your bootloader!"); |
|---|
| 345 | + |
|---|
| 333 | 346 | arm64_memblock_init(); |
|---|
| 334 | 347 | |
|---|
| 335 | 348 | paging_init(); |
|---|
| .. | .. |
|---|
| 355 | 368 | else |
|---|
| 356 | 369 | psci_acpi_init(); |
|---|
| 357 | 370 | |
|---|
| 358 | | - cpu_read_bootcpu_ops(); |
|---|
| 371 | + init_bootcpu_ops(); |
|---|
| 359 | 372 | smp_init_cpus(); |
|---|
| 360 | 373 | smp_build_mpidr_hash(); |
|---|
| 361 | 374 | |
|---|
| 362 | 375 | /* Init percpu seeds for random tags after cpus are set up. */ |
|---|
| 363 | | - kasan_init_tags(); |
|---|
| 376 | + kasan_init_sw_tags(); |
|---|
| 364 | 377 | |
|---|
| 365 | 378 | #ifdef CONFIG_ARM64_SW_TTBR0_PAN |
|---|
| 366 | 379 | /* |
|---|
| .. | .. |
|---|
| 368 | 381 | * faults in case uaccess_enable() is inadvertently called by the init |
|---|
| 369 | 382 | * thread. |
|---|
| 370 | 383 | */ |
|---|
| 371 | | - init_task.thread_info.ttbr0 = __pa_symbol(empty_zero_page); |
|---|
| 384 | + init_task.thread_info.ttbr0 = phys_to_ttbr(__pa_symbol(reserved_pg_dir)); |
|---|
| 372 | 385 | #endif |
|---|
| 373 | 386 | |
|---|
| 374 | | -#ifdef CONFIG_VT |
|---|
| 375 | | -#if defined(CONFIG_VGA_CONSOLE) |
|---|
| 376 | | - conswitchp = &vga_con; |
|---|
| 377 | | -#elif defined(CONFIG_DUMMY_CONSOLE) |
|---|
| 378 | | - conswitchp = &dummy_con; |
|---|
| 379 | | -#endif |
|---|
| 380 | | -#endif |
|---|
| 381 | 387 | if (boot_args[1] || boot_args[2] || boot_args[3]) { |
|---|
| 382 | 388 | pr_err("WARNING: x1-x3 nonzero in violation of boot protocol:\n" |
|---|
| 383 | 389 | "\tx1: %016llx\n\tx2: %016llx\n\tx3: %016llx\n" |
|---|
| 384 | 390 | "This indicates a broken bootloader or old kernel\n", |
|---|
| 385 | 391 | boot_args[1], boot_args[2], boot_args[3]); |
|---|
| 386 | 392 | } |
|---|
| 393 | +} |
|---|
| 394 | + |
|---|
| 395 | +static inline bool cpu_can_disable(unsigned int cpu) |
|---|
| 396 | +{ |
|---|
| 397 | +#ifdef CONFIG_HOTPLUG_CPU |
|---|
| 398 | + const struct cpu_operations *ops = get_cpu_ops(cpu); |
|---|
| 399 | + |
|---|
| 400 | + if (ops && ops->cpu_can_disable) |
|---|
| 401 | + return ops->cpu_can_disable(cpu); |
|---|
| 402 | +#endif |
|---|
| 403 | + return false; |
|---|
| 387 | 404 | } |
|---|
| 388 | 405 | |
|---|
| 389 | 406 | static int __init topology_init(void) |
|---|
| .. | .. |
|---|
| 395 | 412 | |
|---|
| 396 | 413 | for_each_possible_cpu(i) { |
|---|
| 397 | 414 | struct cpu *cpu = &per_cpu(cpu_data.cpu, i); |
|---|
| 398 | | - cpu->hotpluggable = 1; |
|---|
| 415 | + cpu->hotpluggable = cpu_can_disable(i); |
|---|
| 399 | 416 | register_cpu(cpu, i); |
|---|
| 400 | 417 | } |
|---|
| 401 | 418 | |
|---|
| .. | .. |
|---|
| 403 | 420 | } |
|---|
| 404 | 421 | subsys_initcall(topology_init); |
|---|
| 405 | 422 | |
|---|
| 406 | | -/* |
|---|
| 407 | | - * Dump out kernel offset information on panic. |
|---|
| 408 | | - */ |
|---|
| 409 | | -static int dump_kernel_offset(struct notifier_block *self, unsigned long v, |
|---|
| 410 | | - void *p) |
|---|
| 423 | +static void dump_kernel_offset(void) |
|---|
| 411 | 424 | { |
|---|
| 412 | 425 | const unsigned long offset = kaslr_offset(); |
|---|
| 413 | 426 | |
|---|
| 414 | 427 | if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && offset > 0) { |
|---|
| 415 | 428 | pr_emerg("Kernel Offset: 0x%lx from 0x%lx\n", |
|---|
| 416 | 429 | offset, KIMAGE_VADDR); |
|---|
| 430 | + pr_emerg("PHYS_OFFSET: 0x%llx\n", PHYS_OFFSET); |
|---|
| 417 | 431 | } else { |
|---|
| 418 | 432 | pr_emerg("Kernel Offset: disabled\n"); |
|---|
| 419 | 433 | } |
|---|
| 434 | +} |
|---|
| 435 | + |
|---|
| 436 | +static int arm64_panic_block_dump(struct notifier_block *self, |
|---|
| 437 | + unsigned long v, void *p) |
|---|
| 438 | +{ |
|---|
| 439 | + dump_kernel_offset(); |
|---|
| 440 | + dump_cpu_features(); |
|---|
| 441 | + dump_mem_limit(); |
|---|
| 420 | 442 | return 0; |
|---|
| 421 | 443 | } |
|---|
| 422 | 444 | |
|---|
| 423 | | -static struct notifier_block kernel_offset_notifier = { |
|---|
| 424 | | - .notifier_call = dump_kernel_offset |
|---|
| 445 | +static struct notifier_block arm64_panic_block = { |
|---|
| 446 | + .notifier_call = arm64_panic_block_dump |
|---|
| 425 | 447 | }; |
|---|
| 426 | 448 | |
|---|
| 427 | | -static int __init register_kernel_offset_dumper(void) |
|---|
| 449 | +static int __init register_arm64_panic_block(void) |
|---|
| 428 | 450 | { |
|---|
| 429 | 451 | atomic_notifier_chain_register(&panic_notifier_list, |
|---|
| 430 | | - &kernel_offset_notifier); |
|---|
| 452 | + &arm64_panic_block); |
|---|
| 431 | 453 | return 0; |
|---|
| 432 | 454 | } |
|---|
| 433 | | -__initcall(register_kernel_offset_dumper); |
|---|
| 455 | +device_initcall(register_arm64_panic_block); |
|---|