.. | .. |
---|
| 1 | +/* SPDX-License-Identifier: GPL-2.0-only */ |
---|
1 | 2 | /* |
---|
2 | 3 | * Low-level CPU initialisation |
---|
3 | 4 | * Based on arch/arm/kernel/head.S |
---|
.. | .. |
---|
6 | 7 | * Copyright (C) 2003-2012 ARM Ltd. |
---|
7 | 8 | * Authors: Catalin Marinas <catalin.marinas@arm.com> |
---|
8 | 9 | * Will Deacon <will.deacon@arm.com> |
---|
9 | | - * |
---|
10 | | - * This program is free software; you can redistribute it and/or modify |
---|
11 | | - * it under the terms of the GNU General Public License version 2 as |
---|
12 | | - * published by the Free Software Foundation. |
---|
13 | | - * |
---|
14 | | - * This program is distributed in the hope that it will be useful, |
---|
15 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
16 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
---|
17 | | - * GNU General Public License for more details. |
---|
18 | | - * |
---|
19 | | - * You should have received a copy of the GNU General Public License |
---|
20 | | - * along with this program. If not, see <http://www.gnu.org/licenses/>. |
---|
21 | 10 | */ |
---|
22 | 11 | |
---|
23 | 12 | #include <linux/linkage.h> |
---|
24 | 13 | #include <linux/init.h> |
---|
25 | | -#include <linux/irqchip/arm-gic-v3.h> |
---|
| 14 | +#include <linux/pgtable.h> |
---|
26 | 15 | |
---|
| 16 | +#include <asm/asm_pointer_auth.h> |
---|
27 | 17 | #include <asm/assembler.h> |
---|
28 | 18 | #include <asm/boot.h> |
---|
29 | 19 | #include <asm/ptrace.h> |
---|
30 | 20 | #include <asm/asm-offsets.h> |
---|
31 | 21 | #include <asm/cache.h> |
---|
32 | 22 | #include <asm/cputype.h> |
---|
| 23 | +#include <asm/el2_setup.h> |
---|
33 | 24 | #include <asm/elf.h> |
---|
| 25 | +#include <asm/image.h> |
---|
34 | 26 | #include <asm/kernel-pgtable.h> |
---|
35 | 27 | #include <asm/kvm_arm.h> |
---|
36 | 28 | #include <asm/memory.h> |
---|
37 | 29 | #include <asm/pgtable-hwdef.h> |
---|
38 | | -#include <asm/pgtable.h> |
---|
39 | 30 | #include <asm/page.h> |
---|
40 | 31 | #include <asm/scs.h> |
---|
41 | 32 | #include <asm/smp.h> |
---|
.. | .. |
---|
45 | 36 | |
---|
46 | 37 | #include "efi-header.S" |
---|
47 | 38 | |
---|
48 | | -#define __PHYS_OFFSET (KERNEL_START - TEXT_OFFSET) |
---|
| 39 | +#define __PHYS_OFFSET KERNEL_START |
---|
49 | 40 | |
---|
50 | | -#if (TEXT_OFFSET & 0xfff) != 0 |
---|
51 | | -#error TEXT_OFFSET must be at least 4KB aligned |
---|
52 | | -#elif (PAGE_OFFSET & 0x1fffff) != 0 |
---|
| 41 | +#if (PAGE_OFFSET & 0x1fffff) != 0 |
---|
53 | 42 | #error PAGE_OFFSET must be at least 2MB aligned |
---|
54 | | -#elif TEXT_OFFSET > 0x1fffff |
---|
55 | | -#error TEXT_OFFSET must be less than 2MB |
---|
56 | 43 | #endif |
---|
57 | 44 | |
---|
58 | 45 | /* |
---|
.. | .. |
---|
64 | 51 | * x0 = physical address to the FDT blob. |
---|
65 | 52 | * |
---|
66 | 53 | * This code is mostly position independent so you call this at |
---|
67 | | - * __pa(PAGE_OFFSET + TEXT_OFFSET). |
---|
| 54 | + * __pa(PAGE_OFFSET). |
---|
68 | 55 | * |
---|
69 | 56 | * Note that the callee-saved registers are used for storing variables |
---|
70 | 57 | * that are useful before the MMU is enabled. The allocations are described |
---|
.. | .. |
---|
81 | 68 | * its opcode forms the magic "MZ" signature required by UEFI. |
---|
82 | 69 | */ |
---|
83 | 70 | add x13, x18, #0x16 |
---|
84 | | - b stext |
---|
| 71 | + b primary_entry |
---|
85 | 72 | #else |
---|
86 | | - b stext // branch to kernel start, magic |
---|
| 73 | + b primary_entry // branch to kernel start, magic |
---|
87 | 74 | .long 0 // reserved |
---|
88 | 75 | #endif |
---|
89 | | - le64sym _kernel_offset_le // Image load offset from start of RAM, little-endian |
---|
| 76 | + .quad 0 // Image load offset from start of RAM, little-endian |
---|
90 | 77 | le64sym _kernel_size_le // Effective size of kernel image, little-endian |
---|
91 | 78 | le64sym _kernel_flags_le // Informative flags, little-endian |
---|
92 | 79 | .quad 0 // reserved |
---|
93 | 80 | .quad 0 // reserved |
---|
94 | 81 | .quad 0 // reserved |
---|
95 | | - .ascii "ARM\x64" // Magic number |
---|
| 82 | + .ascii ARM64_IMAGE_MAGIC // Magic number |
---|
96 | 83 | #ifdef CONFIG_EFI |
---|
97 | 84 | .long pe_header - _head // Offset to the PE header. |
---|
98 | 85 | |
---|
.. | .. |
---|
109 | 96 | * primary lowlevel boot path: |
---|
110 | 97 | * |
---|
111 | 98 | * Register Scope Purpose |
---|
112 | | - * x21 stext() .. start_kernel() FDT pointer passed at boot in x0 |
---|
113 | | - * x23 stext() .. start_kernel() physical misalignment/KASLR offset |
---|
114 | | - * x28 __create_page_tables() callee preserved temp register |
---|
115 | | - * x19/x20 __primary_switch() callee preserved temp registers |
---|
116 | | - * x24 __primary_switch() .. relocate_kernel() |
---|
117 | | - * current RELR displacement |
---|
| 99 | + * x21 primary_entry() .. start_kernel() FDT pointer passed at boot in x0 |
---|
| 100 | + * x23 primary_entry() .. start_kernel() physical misalignment/KASLR offset |
---|
| 101 | + * x28 __create_page_tables() callee preserved temp register |
---|
| 102 | + * x19/x20 __primary_switch() callee preserved temp registers |
---|
| 103 | + * x24 __primary_switch() .. relocate_kernel() current RELR displacement |
---|
118 | 104 | */ |
---|
119 | | -ENTRY(stext) |
---|
| 105 | +SYM_CODE_START(primary_entry) |
---|
120 | 106 | bl preserve_boot_args |
---|
121 | | - bl el2_setup // Drop to EL1, w0=cpu_boot_mode |
---|
| 107 | + bl init_kernel_el // w0=cpu_boot_mode |
---|
122 | 108 | adrp x23, __PHYS_OFFSET |
---|
123 | 109 | and x23, x23, MIN_KIMG_ALIGN - 1 // KASLR offset, defaults to 0 |
---|
124 | 110 | bl set_cpu_boot_mode_flag |
---|
.. | .. |
---|
131 | 117 | */ |
---|
132 | 118 | bl __cpu_setup // initialise processor |
---|
133 | 119 | b __primary_switch |
---|
134 | | -ENDPROC(stext) |
---|
| 120 | +SYM_CODE_END(primary_entry) |
---|
135 | 121 | |
---|
136 | 122 | /* |
---|
137 | 123 | * Preserve the arguments passed by the bootloader in x0 .. x3 |
---|
138 | 124 | */ |
---|
139 | | -preserve_boot_args: |
---|
| 125 | +SYM_CODE_START_LOCAL(preserve_boot_args) |
---|
140 | 126 | mov x21, x0 // x21=FDT |
---|
141 | 127 | |
---|
142 | 128 | adr_l x0, boot_args // record the contents of |
---|
.. | .. |
---|
148 | 134 | |
---|
149 | 135 | mov x1, #0x20 // 4 x 8 bytes |
---|
150 | 136 | b __inval_dcache_area // tail call |
---|
151 | | -ENDPROC(preserve_boot_args) |
---|
| 137 | +SYM_CODE_END(preserve_boot_args) |
---|
152 | 138 | |
---|
153 | 139 | /* |
---|
154 | 140 | * Macro to create a table entry to the next page. |
---|
.. | .. |
---|
287 | 273 | * - first few MB of the kernel linear mapping to jump to once the MMU has |
---|
288 | 274 | * been enabled |
---|
289 | 275 | */ |
---|
290 | | -__create_page_tables: |
---|
| 276 | +SYM_FUNC_START_LOCAL(__create_page_tables) |
---|
291 | 277 | mov x28, lr |
---|
292 | 278 | |
---|
293 | 279 | /* |
---|
294 | | - * Invalidate the idmap and swapper page tables to avoid potential |
---|
295 | | - * dirty cache lines being evicted. |
---|
| 280 | + * Invalidate the init page tables to avoid potential dirty cache lines |
---|
| 281 | + * being evicted. Other page tables are allocated in rodata as part of |
---|
| 282 | + * the kernel image, and thus are clean to the PoC per the boot |
---|
| 283 | + * protocol. |
---|
296 | 284 | */ |
---|
297 | | - adrp x0, idmap_pg_dir |
---|
298 | | - adrp x1, swapper_pg_end |
---|
| 285 | + adrp x0, init_pg_dir |
---|
| 286 | + adrp x1, init_pg_end |
---|
299 | 287 | sub x1, x1, x0 |
---|
300 | 288 | bl __inval_dcache_area |
---|
301 | 289 | |
---|
302 | 290 | /* |
---|
303 | | - * Clear the idmap and swapper page tables. |
---|
| 291 | + * Clear the init page tables. |
---|
304 | 292 | */ |
---|
305 | | - adrp x0, idmap_pg_dir |
---|
306 | | - adrp x1, swapper_pg_end |
---|
| 293 | + adrp x0, init_pg_dir |
---|
| 294 | + adrp x1, init_pg_end |
---|
307 | 295 | sub x1, x1, x0 |
---|
308 | 296 | 1: stp xzr, xzr, [x0], #16 |
---|
309 | 297 | stp xzr, xzr, [x0], #16 |
---|
.. | .. |
---|
320 | 308 | adrp x0, idmap_pg_dir |
---|
321 | 309 | adrp x3, __idmap_text_start // __pa(__idmap_text_start) |
---|
322 | 310 | |
---|
| 311 | +#ifdef CONFIG_ARM64_VA_BITS_52 |
---|
| 312 | + mrs_s x6, SYS_ID_AA64MMFR2_EL1 |
---|
| 313 | + and x6, x6, #(0xf << ID_AA64MMFR2_LVA_SHIFT) |
---|
| 314 | + mov x5, #52 |
---|
| 315 | + cbnz x6, 1f |
---|
| 316 | +#endif |
---|
| 317 | + mov x5, #VA_BITS_MIN |
---|
| 318 | +1: |
---|
| 319 | + adr_l x6, vabits_actual |
---|
| 320 | + str x5, [x6] |
---|
| 321 | + dmb sy |
---|
| 322 | + dc ivac, x6 // Invalidate potentially stale cache line |
---|
| 323 | + |
---|
323 | 324 | /* |
---|
324 | 325 | * VA_BITS may be too small to allow for an ID mapping to be created |
---|
325 | 326 | * that covers system RAM if that is located sufficiently high in the |
---|
.. | .. |
---|
334 | 335 | */ |
---|
335 | 336 | adrp x5, __idmap_text_end |
---|
336 | 337 | clz x5, x5 |
---|
337 | | - cmp x5, TCR_T0SZ(VA_BITS) // default T0SZ small enough? |
---|
| 338 | + cmp x5, TCR_T0SZ(VA_BITS_MIN) // default T0SZ small enough? |
---|
338 | 339 | b.ge 1f // .. then skip VA range extension |
---|
339 | 340 | |
---|
340 | 341 | adr_l x6, idmap_t0sz |
---|
.. | .. |
---|
377 | 378 | /* |
---|
378 | 379 | * Map the kernel image (starting with PHYS_OFFSET). |
---|
379 | 380 | */ |
---|
380 | | - adrp x0, swapper_pg_dir |
---|
381 | | - mov_q x5, KIMAGE_VADDR + TEXT_OFFSET // compile time __va(_text) |
---|
| 381 | + adrp x0, init_pg_dir |
---|
| 382 | + mov_q x5, KIMAGE_VADDR // compile time __va(_text) |
---|
382 | 383 | add x5, x5, x23 // add KASLR displacement |
---|
383 | 384 | mov x4, PTRS_PER_PGD |
---|
384 | 385 | adrp x6, _end // runtime __pa(_end) |
---|
.. | .. |
---|
390 | 391 | |
---|
391 | 392 | /* |
---|
392 | 393 | * Since the page tables have been populated with non-cacheable |
---|
393 | | - * accesses (MMU disabled), invalidate the idmap and swapper page |
---|
394 | | - * tables again to remove any speculatively loaded cache lines. |
---|
| 394 | + * accesses (MMU disabled), invalidate those tables again to |
---|
| 395 | + * remove any speculatively loaded cache lines. |
---|
395 | 396 | */ |
---|
396 | | - adrp x0, idmap_pg_dir |
---|
397 | | - adrp x1, swapper_pg_end |
---|
398 | | - sub x1, x1, x0 |
---|
399 | 397 | dmb sy |
---|
| 398 | + |
---|
| 399 | + adrp x0, idmap_pg_dir |
---|
| 400 | + adrp x1, idmap_pg_end |
---|
| 401 | + sub x1, x1, x0 |
---|
| 402 | + bl __inval_dcache_area |
---|
| 403 | + |
---|
| 404 | + adrp x0, init_pg_dir |
---|
| 405 | + adrp x1, init_pg_end |
---|
| 406 | + sub x1, x1, x0 |
---|
400 | 407 | bl __inval_dcache_area |
---|
401 | 408 | |
---|
402 | 409 | ret x28 |
---|
403 | | -ENDPROC(__create_page_tables) |
---|
404 | | - .ltorg |
---|
| 410 | +SYM_FUNC_END(__create_page_tables) |
---|
405 | 411 | |
---|
406 | 412 | /* |
---|
407 | 413 | * The following fragment of code is executed with the MMU enabled. |
---|
408 | 414 | * |
---|
409 | 415 | * x0 = __PHYS_OFFSET |
---|
410 | 416 | */ |
---|
411 | | -__primary_switched: |
---|
| 417 | +SYM_FUNC_START_LOCAL(__primary_switched) |
---|
412 | 418 | adrp x4, init_thread_union |
---|
413 | 419 | add sp, x4, #THREAD_SIZE |
---|
414 | 420 | adr_l x5, init_task |
---|
.. | .. |
---|
422 | 428 | mov x29, sp |
---|
423 | 429 | |
---|
424 | 430 | #ifdef CONFIG_SHADOW_CALL_STACK |
---|
425 | | - adr_l x18, init_shadow_call_stack // Set shadow call stack |
---|
| 431 | + adr_l scs_sp, init_shadow_call_stack // Set shadow call stack |
---|
426 | 432 | #endif |
---|
427 | 433 | |
---|
428 | 434 | str_l x21, __fdt_pointer, x5 // Save FDT pointer |
---|
.. | .. |
---|
439 | 445 | bl __pi_memset |
---|
440 | 446 | dsb ishst // Make zero page visible to PTW |
---|
441 | 447 | |
---|
442 | | -#ifdef CONFIG_KASAN |
---|
| 448 | +#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) |
---|
443 | 449 | bl kasan_early_init |
---|
444 | 450 | #endif |
---|
| 451 | + mov x0, x21 // pass FDT address in x0 |
---|
| 452 | + bl early_fdt_map // Try mapping the FDT early |
---|
| 453 | + bl init_feature_override // Parse cpu feature overrides |
---|
445 | 454 | #ifdef CONFIG_RANDOMIZE_BASE |
---|
446 | 455 | tst x23, ~(MIN_KIMG_ALIGN - 1) // already running randomized? |
---|
447 | 456 | b.ne 0f |
---|
448 | | - mov x0, x21 // pass FDT address in x0 |
---|
449 | 457 | bl kaslr_early_init // parse FDT for KASLR options |
---|
450 | 458 | cbz x0, 0f // KASLR disabled? just proceed |
---|
451 | 459 | orr x23, x23, x0 // record KASLR offset |
---|
.. | .. |
---|
453 | 461 | ret // to __primary_switch() |
---|
454 | 462 | 0: |
---|
455 | 463 | #endif |
---|
| 464 | + bl switch_to_vhe // Prefer VHE if possible |
---|
456 | 465 | add sp, sp, #16 |
---|
457 | 466 | mov x29, #0 |
---|
458 | 467 | mov x30, #0 |
---|
459 | 468 | b start_kernel |
---|
460 | | -ENDPROC(__primary_switched) |
---|
| 469 | +SYM_FUNC_END(__primary_switched) |
---|
| 470 | + |
---|
| 471 | + .pushsection ".rodata", "a" |
---|
| 472 | +SYM_DATA_START(kimage_vaddr) |
---|
| 473 | + .quad _text |
---|
| 474 | +SYM_DATA_END(kimage_vaddr) |
---|
| 475 | +EXPORT_SYMBOL(kimage_vaddr) |
---|
| 476 | + .popsection |
---|
461 | 477 | |
---|
462 | 478 | /* |
---|
463 | 479 | * end early head section, begin head code that is also used for |
---|
.. | .. |
---|
465 | 481 | */ |
---|
466 | 482 | .section ".idmap.text","awx" |
---|
467 | 483 | |
---|
468 | | -ENTRY(kimage_vaddr) |
---|
469 | | - .quad _text - TEXT_OFFSET |
---|
470 | | - |
---|
471 | 484 | /* |
---|
472 | | - * If we're fortunate enough to boot at EL2, ensure that the world is |
---|
473 | | - * sane before dropping to EL1. |
---|
| 485 | + * Starting from EL2 or EL1, configure the CPU to execute at the highest |
---|
| 486 | + * reachable EL supported by the kernel in a chosen default state. If dropping |
---|
| 487 | + * from EL2 to EL1, configure EL2 before configuring EL1. |
---|
| 488 | + * |
---|
| 489 | + * Since we cannot always rely on ERET synchronizing writes to sysregs (e.g. if |
---|
| 490 | + * SCTLR_ELx.EOS is clear), we place an ISB prior to ERET. |
---|
474 | 491 | * |
---|
475 | 492 | * Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in w0 if |
---|
476 | 493 | * booted in EL1 or EL2 respectively. |
---|
477 | 494 | */ |
---|
478 | | -ENTRY(el2_setup) |
---|
479 | | - msr SPsel, #1 // We want to use SP_EL{1,2} |
---|
| 495 | +SYM_FUNC_START(init_kernel_el) |
---|
| 496 | + mov_q x0, INIT_SCTLR_EL1_MMU_OFF |
---|
| 497 | + msr sctlr_el1, x0 |
---|
| 498 | + |
---|
480 | 499 | mrs x0, CurrentEL |
---|
481 | 500 | cmp x0, #CurrentEL_EL2 |
---|
482 | | - b.eq 1f |
---|
483 | | - mov_q x0, (SCTLR_EL1_RES1 | ENDIAN_SET_EL1) |
---|
484 | | - msr sctlr_el1, x0 |
---|
485 | | - mov w0, #BOOT_CPU_MODE_EL1 // This cpu booted in EL1 |
---|
| 501 | + b.eq init_el2 |
---|
| 502 | + |
---|
| 503 | +SYM_INNER_LABEL(init_el1, SYM_L_LOCAL) |
---|
486 | 504 | isb |
---|
487 | | - ret |
---|
| 505 | + mov_q x0, INIT_PSTATE_EL1 |
---|
| 506 | + msr spsr_el1, x0 |
---|
| 507 | + msr elr_el1, lr |
---|
| 508 | + mov w0, #BOOT_CPU_MODE_EL1 |
---|
| 509 | + eret |
---|
488 | 510 | |
---|
489 | | -1: mov_q x0, (SCTLR_EL2_RES1 | ENDIAN_SET_EL2) |
---|
490 | | - msr sctlr_el2, x0 |
---|
491 | | - |
---|
492 | | -#ifdef CONFIG_ARM64_VHE |
---|
493 | | - /* |
---|
494 | | - * Check for VHE being present. For the rest of the EL2 setup, |
---|
495 | | - * x2 being non-zero indicates that we do have VHE, and that the |
---|
496 | | - * kernel is intended to run at EL2. |
---|
497 | | - */ |
---|
498 | | - mrs x2, id_aa64mmfr1_el1 |
---|
499 | | - ubfx x2, x2, #8, #4 |
---|
500 | | -#else |
---|
501 | | - mov x2, xzr |
---|
502 | | -#endif |
---|
503 | | - |
---|
504 | | - /* Hyp configuration. */ |
---|
| 511 | +SYM_INNER_LABEL(init_el2, SYM_L_LOCAL) |
---|
505 | 512 | mov_q x0, HCR_HOST_NVHE_FLAGS |
---|
506 | | - cbz x2, set_hcr |
---|
507 | | - mov_q x0, HCR_HOST_VHE_FLAGS |
---|
508 | | -set_hcr: |
---|
509 | 513 | msr hcr_el2, x0 |
---|
510 | 514 | isb |
---|
511 | 515 | |
---|
512 | | - /* |
---|
513 | | - * Allow Non-secure EL1 and EL0 to access physical timer and counter. |
---|
514 | | - * This is not necessary for VHE, since the host kernel runs in EL2, |
---|
515 | | - * and EL0 accesses are configured in the later stage of boot process. |
---|
516 | | - * Note that when HCR_EL2.E2H == 1, CNTHCTL_EL2 has the same bit layout |
---|
517 | | - * as CNTKCTL_EL1, and CNTKCTL_EL1 accessing instructions are redefined |
---|
518 | | - * to access CNTHCTL_EL2. This allows the kernel designed to run at EL1 |
---|
519 | | - * to transparently mess with the EL0 bits via CNTKCTL_EL1 access in |
---|
520 | | - * EL2. |
---|
521 | | - */ |
---|
522 | | - cbnz x2, 1f |
---|
523 | | - mrs x0, cnthctl_el2 |
---|
524 | | - orr x0, x0, #3 // Enable EL1 physical timers |
---|
525 | | - msr cnthctl_el2, x0 |
---|
526 | | -1: |
---|
527 | | - msr cntvoff_el2, xzr // Clear virtual offset |
---|
528 | | - |
---|
529 | | -#ifdef CONFIG_ARM_GIC_V3 |
---|
530 | | - /* GICv3 system register access */ |
---|
531 | | - mrs x0, id_aa64pfr0_el1 |
---|
532 | | - ubfx x0, x0, #24, #4 |
---|
533 | | - cbz x0, 3f |
---|
534 | | - |
---|
535 | | - mrs_s x0, SYS_ICC_SRE_EL2 |
---|
536 | | - orr x0, x0, #ICC_SRE_EL2_SRE // Set ICC_SRE_EL2.SRE==1 |
---|
537 | | - orr x0, x0, #ICC_SRE_EL2_ENABLE // Set ICC_SRE_EL2.Enable==1 |
---|
538 | | - msr_s SYS_ICC_SRE_EL2, x0 |
---|
539 | | - isb // Make sure SRE is now set |
---|
540 | | - mrs_s x0, SYS_ICC_SRE_EL2 // Read SRE back, |
---|
541 | | - tbz x0, #0, 3f // and check that it sticks |
---|
542 | | - msr_s SYS_ICH_HCR_EL2, xzr // Reset ICC_HCR_EL2 to defaults |
---|
543 | | - |
---|
544 | | -3: |
---|
545 | | -#endif |
---|
546 | | - |
---|
547 | | - /* Populate ID registers. */ |
---|
548 | | - mrs x0, midr_el1 |
---|
549 | | - mrs x1, mpidr_el1 |
---|
550 | | - msr vpidr_el2, x0 |
---|
551 | | - msr vmpidr_el2, x1 |
---|
552 | | - |
---|
553 | | -#ifdef CONFIG_COMPAT |
---|
554 | | - msr hstr_el2, xzr // Disable CP15 traps to EL2 |
---|
555 | | -#endif |
---|
556 | | - |
---|
557 | | - /* EL2 debug */ |
---|
558 | | - mrs x1, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer |
---|
559 | | - sbfx x0, x1, #8, #4 |
---|
560 | | - cmp x0, #1 |
---|
561 | | - b.lt 4f // Skip if no PMU present |
---|
562 | | - mrs x0, pmcr_el0 // Disable debug access traps |
---|
563 | | - ubfx x0, x0, #11, #5 // to EL2 and allow access to |
---|
564 | | -4: |
---|
565 | | - csel x3, xzr, x0, lt // all PMU counters from EL1 |
---|
566 | | - |
---|
567 | | - /* Statistical profiling */ |
---|
568 | | - ubfx x0, x1, #32, #4 // Check ID_AA64DFR0_EL1 PMSVer |
---|
569 | | - cbz x0, 7f // Skip if SPE not present |
---|
570 | | - cbnz x2, 6f // VHE? |
---|
571 | | - mrs_s x4, SYS_PMBIDR_EL1 // If SPE available at EL2, |
---|
572 | | - and x4, x4, #(1 << SYS_PMBIDR_EL1_P_SHIFT) |
---|
573 | | - cbnz x4, 5f // then permit sampling of physical |
---|
574 | | - mov x4, #(1 << SYS_PMSCR_EL2_PCT_SHIFT | \ |
---|
575 | | - 1 << SYS_PMSCR_EL2_PA_SHIFT) |
---|
576 | | - msr_s SYS_PMSCR_EL2, x4 // addresses and physical counter |
---|
577 | | -5: |
---|
578 | | - mov x1, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT) |
---|
579 | | - orr x3, x3, x1 // If we don't have VHE, then |
---|
580 | | - b 7f // use EL1&0 translation. |
---|
581 | | -6: // For VHE, use EL2 translation |
---|
582 | | - orr x3, x3, #MDCR_EL2_TPMS // and disable access from EL1 |
---|
583 | | -7: |
---|
584 | | - msr mdcr_el2, x3 // Configure debug traps |
---|
585 | | - |
---|
586 | | - /* LORegions */ |
---|
587 | | - mrs x1, id_aa64mmfr1_el1 |
---|
588 | | - ubfx x0, x1, #ID_AA64MMFR1_LOR_SHIFT, 4 |
---|
589 | | - cbz x0, 1f |
---|
590 | | - msr_s SYS_LORC_EL1, xzr |
---|
591 | | -1: |
---|
592 | | - |
---|
593 | | - /* Stage-2 translation */ |
---|
594 | | - msr vttbr_el2, xzr |
---|
595 | | - |
---|
596 | | - cbz x2, install_el2_stub |
---|
597 | | - |
---|
598 | | - mov w0, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2 |
---|
599 | | - isb |
---|
600 | | - ret |
---|
601 | | - |
---|
602 | | -install_el2_stub: |
---|
603 | | - /* |
---|
604 | | - * When VHE is not in use, early init of EL2 and EL1 needs to be |
---|
605 | | - * done here. |
---|
606 | | - * When VHE _is_ in use, EL1 will not be used in the host and |
---|
607 | | - * requires no configuration, and all non-hyp-specific EL2 setup |
---|
608 | | - * will be done via the _EL1 system register aliases in __cpu_setup. |
---|
609 | | - */ |
---|
610 | | - mov_q x0, (SCTLR_EL1_RES1 | ENDIAN_SET_EL1) |
---|
611 | | - msr sctlr_el1, x0 |
---|
612 | | - |
---|
613 | | - /* Coprocessor traps. */ |
---|
614 | | - mov x0, #0x33ff |
---|
615 | | - msr cptr_el2, x0 // Disable copro. traps to EL2 |
---|
616 | | - |
---|
617 | | - /* SVE register access */ |
---|
618 | | - mrs x1, id_aa64pfr0_el1 |
---|
619 | | - ubfx x1, x1, #ID_AA64PFR0_SVE_SHIFT, #4 |
---|
620 | | - cbz x1, 7f |
---|
621 | | - |
---|
622 | | - bic x0, x0, #CPTR_EL2_TZ // Also disable SVE traps |
---|
623 | | - msr cptr_el2, x0 // Disable copro. traps to EL2 |
---|
624 | | - isb |
---|
625 | | - mov x1, #ZCR_ELx_LEN_MASK // SVE: Enable full vector |
---|
626 | | - msr_s SYS_ZCR_EL2, x1 // length for EL1. |
---|
| 516 | + init_el2_state |
---|
627 | 517 | |
---|
628 | 518 | /* Hypervisor stub */ |
---|
629 | | -7: adr_l x0, __hyp_stub_vectors |
---|
| 519 | + adr_l x0, __hyp_stub_vectors |
---|
630 | 520 | msr vbar_el2, x0 |
---|
| 521 | + isb |
---|
631 | 522 | |
---|
632 | | - /* spsr */ |
---|
633 | | - mov x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ |
---|
634 | | - PSR_MODE_EL1h) |
---|
635 | | - msr spsr_el2, x0 |
---|
636 | 523 | msr elr_el2, lr |
---|
637 | | - mov w0, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2 |
---|
| 524 | + mov w0, #BOOT_CPU_MODE_EL2 |
---|
638 | 525 | eret |
---|
639 | | -ENDPROC(el2_setup) |
---|
| 526 | +SYM_FUNC_END(init_kernel_el) |
---|
640 | 527 | |
---|
641 | 528 | /* |
---|
642 | 529 | * Sets the __boot_cpu_mode flag depending on the CPU boot mode passed |
---|
643 | 530 | * in w0. See arch/arm64/include/asm/virt.h for more info. |
---|
644 | 531 | */ |
---|
645 | | -set_cpu_boot_mode_flag: |
---|
| 532 | +SYM_FUNC_START_LOCAL(set_cpu_boot_mode_flag) |
---|
646 | 533 | adr_l x1, __boot_cpu_mode |
---|
647 | 534 | cmp w0, #BOOT_CPU_MODE_EL2 |
---|
648 | 535 | b.ne 1f |
---|
.. | .. |
---|
651 | 538 | dmb sy |
---|
652 | 539 | dc ivac, x1 // Invalidate potentially stale cache line |
---|
653 | 540 | ret |
---|
654 | | -ENDPROC(set_cpu_boot_mode_flag) |
---|
| 541 | +SYM_FUNC_END(set_cpu_boot_mode_flag) |
---|
655 | 542 | |
---|
656 | 543 | /* |
---|
657 | 544 | * These values are written with the MMU off, but read with the MMU on. |
---|
.. | .. |
---|
667 | 554 | * This is not in .bss, because we set it sufficiently early that the boot-time |
---|
668 | 555 | * zeroing of .bss would clobber it. |
---|
669 | 556 | */ |
---|
670 | | -ENTRY(__boot_cpu_mode) |
---|
| 557 | +SYM_DATA_START(__boot_cpu_mode) |
---|
671 | 558 | .long BOOT_CPU_MODE_EL2 |
---|
672 | 559 | .long BOOT_CPU_MODE_EL1 |
---|
| 560 | +SYM_DATA_END(__boot_cpu_mode) |
---|
673 | 561 | /* |
---|
674 | 562 | * The booting CPU updates the failed status @__early_cpu_boot_status, |
---|
675 | 563 | * with MMU turned off. |
---|
676 | 564 | */ |
---|
677 | | -ENTRY(__early_cpu_boot_status) |
---|
| 565 | +SYM_DATA_START(__early_cpu_boot_status) |
---|
678 | 566 | .quad 0 |
---|
| 567 | +SYM_DATA_END(__early_cpu_boot_status) |
---|
679 | 568 | |
---|
680 | 569 | .popsection |
---|
681 | 570 | |
---|
.. | .. |
---|
683 | 572 | * This provides a "holding pen" for platforms to hold all secondary |
---|
684 | 573 | * cores are held until we're ready for them to initialise. |
---|
685 | 574 | */ |
---|
686 | | -ENTRY(secondary_holding_pen) |
---|
687 | | - bl el2_setup // Drop to EL1, w0=cpu_boot_mode |
---|
| 575 | +SYM_FUNC_START(secondary_holding_pen) |
---|
| 576 | + bl init_kernel_el // w0=cpu_boot_mode |
---|
688 | 577 | bl set_cpu_boot_mode_flag |
---|
689 | 578 | mrs x0, mpidr_el1 |
---|
690 | 579 | mov_q x1, MPIDR_HWID_BITMASK |
---|
.. | .. |
---|
695 | 584 | b.eq secondary_startup |
---|
696 | 585 | wfe |
---|
697 | 586 | b pen |
---|
698 | | -ENDPROC(secondary_holding_pen) |
---|
| 587 | +SYM_FUNC_END(secondary_holding_pen) |
---|
699 | 588 | |
---|
700 | 589 | /* |
---|
701 | 590 | * Secondary entry point that jumps straight into the kernel. Only to |
---|
702 | 591 | * be used where CPUs are brought online dynamically by the kernel. |
---|
703 | 592 | */ |
---|
704 | | -ENTRY(secondary_entry) |
---|
705 | | - bl el2_setup // Drop to EL1 |
---|
| 593 | +SYM_FUNC_START(secondary_entry) |
---|
| 594 | + bl init_kernel_el // w0=cpu_boot_mode |
---|
706 | 595 | bl set_cpu_boot_mode_flag |
---|
707 | 596 | b secondary_startup |
---|
708 | | -ENDPROC(secondary_entry) |
---|
| 597 | +SYM_FUNC_END(secondary_entry) |
---|
709 | 598 | |
---|
710 | | -secondary_startup: |
---|
| 599 | +SYM_FUNC_START_LOCAL(secondary_startup) |
---|
711 | 600 | /* |
---|
712 | 601 | * Common entry point for secondary CPUs. |
---|
713 | 602 | */ |
---|
| 603 | + bl switch_to_vhe |
---|
714 | 604 | bl __cpu_secondary_check52bitva |
---|
715 | 605 | bl __cpu_setup // initialise processor |
---|
| 606 | + adrp x1, swapper_pg_dir |
---|
716 | 607 | bl __enable_mmu |
---|
717 | 608 | ldr x8, =__secondary_switched |
---|
718 | 609 | br x8 |
---|
719 | | -ENDPROC(secondary_startup) |
---|
| 610 | +SYM_FUNC_END(secondary_startup) |
---|
720 | 611 | |
---|
721 | | -__secondary_switched: |
---|
| 612 | +SYM_FUNC_START_LOCAL(__secondary_switched) |
---|
722 | 613 | adr_l x5, vectors |
---|
723 | 614 | msr vbar_el1, x5 |
---|
724 | 615 | isb |
---|
725 | 616 | |
---|
726 | 617 | adr_l x0, secondary_data |
---|
727 | 618 | ldr x1, [x0, #CPU_BOOT_STACK] // get secondary_data.stack |
---|
| 619 | + cbz x1, __secondary_too_slow |
---|
728 | 620 | mov sp, x1 |
---|
729 | 621 | ldr x2, [x0, #CPU_BOOT_TASK] |
---|
| 622 | + cbz x2, __secondary_too_slow |
---|
730 | 623 | msr sp_el0, x2 |
---|
731 | | -#ifdef CONFIG_SHADOW_CALL_STACK |
---|
732 | | - ldr x18, [x2, #TSK_TI_SCS] // set shadow call stack |
---|
733 | | - str xzr, [x2, #TSK_TI_SCS] // limit visibility of saved SCS |
---|
734 | | -#endif |
---|
| 624 | + scs_load_current |
---|
735 | 625 | mov x29, #0 |
---|
736 | 626 | mov x30, #0 |
---|
| 627 | + |
---|
| 628 | +#ifdef CONFIG_ARM64_PTR_AUTH |
---|
| 629 | + ptrauth_keys_init_cpu x2, x3, x4, x5 |
---|
| 630 | +#endif |
---|
| 631 | + |
---|
737 | 632 | b secondary_start_kernel |
---|
738 | | -ENDPROC(__secondary_switched) |
---|
| 633 | +SYM_FUNC_END(__secondary_switched) |
---|
| 634 | + |
---|
| 635 | +SYM_FUNC_START_LOCAL(__secondary_too_slow) |
---|
| 636 | + wfe |
---|
| 637 | + wfi |
---|
| 638 | + b __secondary_too_slow |
---|
| 639 | +SYM_FUNC_END(__secondary_too_slow) |
---|
739 | 640 | |
---|
740 | 641 | /* |
---|
741 | 642 | * The booting CPU updates the failed status @__early_cpu_boot_status, |
---|
.. | .. |
---|
759 | 660 | * Enable the MMU. |
---|
760 | 661 | * |
---|
761 | 662 | * x0 = SCTLR_EL1 value for turning on the MMU. |
---|
| 663 | + * x1 = TTBR1_EL1 value |
---|
762 | 664 | * |
---|
763 | 665 | * Returns to the caller via x30/lr. This requires the caller to be covered |
---|
764 | 666 | * by the .idmap.text section. |
---|
.. | .. |
---|
766 | 668 | * Checks if the selected granule size is supported by the CPU. |
---|
767 | 669 | * If it isn't, park the CPU |
---|
768 | 670 | */ |
---|
769 | | -ENTRY(__enable_mmu) |
---|
770 | | - mrs x1, ID_AA64MMFR0_EL1 |
---|
771 | | - ubfx x2, x1, #ID_AA64MMFR0_TGRAN_SHIFT, 4 |
---|
772 | | - cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED |
---|
773 | | - b.ne __no_granule_support |
---|
774 | | - update_early_cpu_boot_status 0, x1, x2 |
---|
775 | | - adrp x1, idmap_pg_dir |
---|
776 | | - adrp x2, swapper_pg_dir |
---|
777 | | - phys_to_ttbr x3, x1 |
---|
778 | | - phys_to_ttbr x4, x2 |
---|
779 | | - msr ttbr0_el1, x3 // load TTBR0 |
---|
780 | | - msr ttbr1_el1, x4 // load TTBR1 |
---|
| 671 | +SYM_FUNC_START(__enable_mmu) |
---|
| 672 | + mrs x2, ID_AA64MMFR0_EL1 |
---|
| 673 | + ubfx x2, x2, #ID_AA64MMFR0_TGRAN_SHIFT, 4 |
---|
| 674 | + cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED_MIN |
---|
| 675 | + b.lt __no_granule_support |
---|
| 676 | + cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED_MAX |
---|
| 677 | + b.gt __no_granule_support |
---|
| 678 | + update_early_cpu_boot_status 0, x2, x3 |
---|
| 679 | + adrp x2, idmap_pg_dir |
---|
| 680 | + phys_to_ttbr x1, x1 |
---|
| 681 | + phys_to_ttbr x2, x2 |
---|
| 682 | + msr ttbr0_el1, x2 // load TTBR0 |
---|
| 683 | + offset_ttbr1 x1, x3 |
---|
| 684 | + msr ttbr1_el1, x1 // load TTBR1 |
---|
781 | 685 | isb |
---|
782 | | - msr sctlr_el1, x0 |
---|
783 | | - isb |
---|
784 | | - /* |
---|
785 | | - * Invalidate the local I-cache so that any instructions fetched |
---|
786 | | - * speculatively from the PoC are discarded, since they may have |
---|
787 | | - * been dynamically patched at the PoU. |
---|
788 | | - */ |
---|
789 | | - ic iallu |
---|
790 | | - dsb nsh |
---|
791 | | - isb |
---|
792 | | - ret |
---|
793 | | -ENDPROC(__enable_mmu) |
---|
794 | 686 | |
---|
795 | | -ENTRY(__cpu_secondary_check52bitva) |
---|
796 | | -#ifdef CONFIG_ARM64_52BIT_VA |
---|
797 | | - ldr_l x0, vabits_user |
---|
| 687 | + set_sctlr_el1 x0 |
---|
| 688 | + |
---|
| 689 | + ret |
---|
| 690 | +SYM_FUNC_END(__enable_mmu) |
---|
| 691 | + |
---|
| 692 | +SYM_FUNC_START(__cpu_secondary_check52bitva) |
---|
| 693 | +#ifdef CONFIG_ARM64_VA_BITS_52 |
---|
| 694 | + ldr_l x0, vabits_actual |
---|
798 | 695 | cmp x0, #52 |
---|
799 | 696 | b.ne 2f |
---|
800 | 697 | |
---|
.. | .. |
---|
802 | 699 | and x0, x0, #(0xf << ID_AA64MMFR2_LVA_SHIFT) |
---|
803 | 700 | cbnz x0, 2f |
---|
804 | 701 | |
---|
805 | | - adr_l x0, va52mismatch |
---|
806 | | - mov w1, #1 |
---|
807 | | - strb w1, [x0] |
---|
808 | | - dmb sy |
---|
809 | | - dc ivac, x0 // Invalidate potentially stale cache line |
---|
810 | | - |
---|
811 | | - update_early_cpu_boot_status CPU_STUCK_IN_KERNEL, x0, x1 |
---|
| 702 | + update_early_cpu_boot_status \ |
---|
| 703 | + CPU_STUCK_IN_KERNEL | CPU_STUCK_REASON_52_BIT_VA, x0, x1 |
---|
812 | 704 | 1: wfe |
---|
813 | 705 | wfi |
---|
814 | 706 | b 1b |
---|
815 | 707 | |
---|
816 | 708 | #endif |
---|
817 | 709 | 2: ret |
---|
818 | | -ENDPROC(__cpu_secondary_check52bitva) |
---|
| 710 | +SYM_FUNC_END(__cpu_secondary_check52bitva) |
---|
819 | 711 | |
---|
820 | | -__no_granule_support: |
---|
| 712 | +SYM_FUNC_START_LOCAL(__no_granule_support) |
---|
821 | 713 | /* Indicate that this CPU can't boot and is stuck in the kernel */ |
---|
822 | | - update_early_cpu_boot_status CPU_STUCK_IN_KERNEL, x1, x2 |
---|
| 714 | + update_early_cpu_boot_status \ |
---|
| 715 | + CPU_STUCK_IN_KERNEL | CPU_STUCK_REASON_NO_GRAN, x1, x2 |
---|
823 | 716 | 1: |
---|
824 | 717 | wfe |
---|
825 | 718 | wfi |
---|
826 | 719 | b 1b |
---|
827 | | -ENDPROC(__no_granule_support) |
---|
| 720 | +SYM_FUNC_END(__no_granule_support) |
---|
828 | 721 | |
---|
829 | 722 | #ifdef CONFIG_RELOCATABLE |
---|
830 | | -__relocate_kernel: |
---|
| 723 | +SYM_FUNC_START_LOCAL(__relocate_kernel) |
---|
831 | 724 | /* |
---|
832 | 725 | * Iterate over each entry in the relocation table, and apply the |
---|
833 | 726 | * relocations in place. |
---|
.. | .. |
---|
929 | 822 | #endif |
---|
930 | 823 | ret |
---|
931 | 824 | |
---|
932 | | -ENDPROC(__relocate_kernel) |
---|
| 825 | +SYM_FUNC_END(__relocate_kernel) |
---|
933 | 826 | #endif |
---|
934 | 827 | |
---|
935 | | -__primary_switch: |
---|
| 828 | +SYM_FUNC_START_LOCAL(__primary_switch) |
---|
936 | 829 | #ifdef CONFIG_RANDOMIZE_BASE |
---|
937 | 830 | mov x19, x0 // preserve new SCTLR_EL1 value |
---|
938 | 831 | mrs x20, sctlr_el1 // preserve old SCTLR_EL1 value |
---|
939 | 832 | #endif |
---|
940 | 833 | |
---|
| 834 | + adrp x1, init_pg_dir |
---|
941 | 835 | bl __enable_mmu |
---|
942 | 836 | #ifdef CONFIG_RELOCATABLE |
---|
943 | 837 | #ifdef CONFIG_RELR |
---|
.. | .. |
---|
963 | 857 | dsb nsh |
---|
964 | 858 | isb |
---|
965 | 859 | |
---|
966 | | - msr sctlr_el1, x19 // re-enable the MMU |
---|
967 | | - isb |
---|
968 | | - ic iallu // flush instructions fetched |
---|
969 | | - dsb nsh // via old mapping |
---|
970 | | - isb |
---|
| 860 | + set_sctlr_el1 x19 // re-enable the MMU |
---|
971 | 861 | |
---|
972 | 862 | bl __relocate_kernel |
---|
973 | 863 | #endif |
---|
.. | .. |
---|
975 | 865 | ldr x8, =__primary_switched |
---|
976 | 866 | adrp x0, __PHYS_OFFSET |
---|
977 | 867 | br x8 |
---|
978 | | -ENDPROC(__primary_switch) |
---|
| 868 | +SYM_FUNC_END(__primary_switch) |
---|