| .. | .. |
|---|
| 1 | +/* SPDX-License-Identifier: GPL-2.0-only */ |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * Copyright (C) 2012 ARM Ltd. |
|---|
| 3 | | - * |
|---|
| 4 | | - * This program is free software; you can redistribute it and/or modify |
|---|
| 5 | | - * it under the terms of the GNU General Public License version 2 as |
|---|
| 6 | | - * published by the Free Software Foundation. |
|---|
| 7 | | - * |
|---|
| 8 | | - * This program is distributed in the hope that it will be useful, |
|---|
| 9 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
|---|
| 10 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|---|
| 11 | | - * GNU General Public License for more details. |
|---|
| 12 | | - * |
|---|
| 13 | | - * You should have received a copy of the GNU General Public License |
|---|
| 14 | | - * along with this program. If not, see <http://www.gnu.org/licenses/>. |
|---|
| 15 | 4 | */ |
|---|
| 16 | 5 | #ifndef __ASM_MMU_H |
|---|
| 17 | 6 | #define __ASM_MMU_H |
|---|
| 7 | + |
|---|
| 8 | +#include <asm/cputype.h> |
|---|
| 18 | 9 | |
|---|
| 19 | 10 | #define MMCF_AARCH32 0x1 /* mm context flag for AArch32 executables */ |
|---|
| 20 | 11 | #define USER_ASID_BIT 48 |
|---|
| 21 | 12 | #define USER_ASID_FLAG (UL(1) << USER_ASID_BIT) |
|---|
| 22 | 13 | #define TTBR_ASID_MASK (UL(0xffff) << 48) |
|---|
| 23 | 14 | |
|---|
| 24 | | -#define BP_HARDEN_EL2_SLOTS 4 |
|---|
| 25 | | - |
|---|
| 26 | 15 | #ifndef __ASSEMBLY__ |
|---|
| 16 | + |
|---|
| 17 | +#include <linux/refcount.h> |
|---|
| 27 | 18 | |
|---|
| 28 | 19 | typedef struct { |
|---|
| 29 | 20 | atomic64_t id; |
|---|
| 21 | +#ifdef CONFIG_COMPAT |
|---|
| 22 | + void *sigpage; |
|---|
| 23 | +#endif |
|---|
| 24 | + refcount_t pinned; |
|---|
| 30 | 25 | void *vdso; |
|---|
| 31 | 26 | unsigned long flags; |
|---|
| 32 | 27 | } mm_context_t; |
|---|
| 33 | 28 | |
|---|
| 34 | 29 | /* |
|---|
| 35 | | - * This macro is only used by the TLBI code, which cannot race with an |
|---|
| 36 | | - * ASID change and therefore doesn't need to reload the counter using |
|---|
| 37 | | - * atomic64_read. |
|---|
| 30 | + * We use atomic64_read() here because the ASID for an 'mm_struct' can |
|---|
| 31 | + * be reallocated when scheduling one of its threads following a |
|---|
| 32 | + * rollover event (see new_context() and flush_context()). In this case, |
|---|
| 33 | + * a concurrent TLBI (e.g. via try_to_unmap_one() and ptep_clear_flush()) |
|---|
| 34 | + * may use a stale ASID. This is fine in principle as the new ASID is |
|---|
| 35 | + * guaranteed to be clean in the TLB, but the TLBI routines have to take |
|---|
| 36 | + * care to handle the following race: |
|---|
| 37 | + * |
|---|
| 38 | + * CPU 0 CPU 1 CPU 2 |
|---|
| 39 | + * |
|---|
| 40 | + * // ptep_clear_flush(mm) |
|---|
| 41 | + * xchg_relaxed(pte, 0) |
|---|
| 42 | + * DSB ISHST |
|---|
| 43 | + * old = ASID(mm) |
|---|
| 44 | + * | <rollover> |
|---|
| 45 | + * | new = new_context(mm) |
|---|
| 46 | + * \-----------------> atomic_set(mm->context.id, new) |
|---|
| 47 | + * cpu_switch_mm(mm) |
|---|
| 48 | + * // Hardware walk of pte using new ASID |
|---|
| 49 | + * TLBI(old) |
|---|
| 50 | + * |
|---|
| 51 | + * In this scenario, the barrier on CPU 0 and the dependency on CPU 1 |
|---|
| 52 | + * ensure that the page-table walker on CPU 1 *must* see the invalid PTE |
|---|
| 53 | + * written by CPU 0. |
|---|
| 38 | 54 | */ |
|---|
| 39 | | -#define ASID(mm) ((mm)->context.id.counter & 0xffff) |
|---|
| 55 | +#define ASID(mm) (atomic64_read(&(mm)->context.id) & 0xffff) |
|---|
| 40 | 56 | |
|---|
| 41 | 57 | static inline bool arm64_kernel_unmapped_at_el0(void) |
|---|
| 42 | 58 | { |
|---|
| 43 | | - return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0) && |
|---|
| 44 | | - cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0); |
|---|
| 59 | + return cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0); |
|---|
| 45 | 60 | } |
|---|
| 46 | 61 | |
|---|
| 47 | | -typedef void (*bp_hardening_cb_t)(void); |
|---|
| 48 | | - |
|---|
| 49 | | -struct bp_hardening_data { |
|---|
| 50 | | - int hyp_vectors_slot; |
|---|
| 51 | | - bp_hardening_cb_t fn; |
|---|
| 52 | | -}; |
|---|
| 53 | | - |
|---|
| 54 | | -#if (defined(CONFIG_HARDEN_BRANCH_PREDICTOR) || \ |
|---|
| 55 | | - defined(CONFIG_HARDEN_EL2_VECTORS)) |
|---|
| 56 | | -extern char __bp_harden_hyp_vecs_start[], __bp_harden_hyp_vecs_end[]; |
|---|
| 57 | | -extern atomic_t arm64_el2_vector_last_slot; |
|---|
| 58 | | -#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR || CONFIG_HARDEN_EL2_VECTORS */ |
|---|
| 59 | | - |
|---|
| 60 | | -#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR |
|---|
| 61 | | -DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data); |
|---|
| 62 | | - |
|---|
| 63 | | -static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void) |
|---|
| 64 | | -{ |
|---|
| 65 | | - return this_cpu_ptr(&bp_hardening_data); |
|---|
| 66 | | -} |
|---|
| 67 | | - |
|---|
| 68 | | -static inline void arm64_apply_bp_hardening(void) |
|---|
| 69 | | -{ |
|---|
| 70 | | - struct bp_hardening_data *d; |
|---|
| 71 | | - |
|---|
| 72 | | - if (!cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR)) |
|---|
| 73 | | - return; |
|---|
| 74 | | - |
|---|
| 75 | | - d = arm64_get_bp_hardening_data(); |
|---|
| 76 | | - if (d->fn) |
|---|
| 77 | | - d->fn(); |
|---|
| 78 | | -} |
|---|
| 79 | | -#else |
|---|
| 80 | | -static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void) |
|---|
| 81 | | -{ |
|---|
| 82 | | - return NULL; |
|---|
| 83 | | -} |
|---|
| 84 | | - |
|---|
| 85 | | -static inline void arm64_apply_bp_hardening(void) { } |
|---|
| 86 | | -#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */ |
|---|
| 87 | | - |
|---|
| 62 | +extern void arm64_memblock_init(void); |
|---|
| 88 | 63 | extern void paging_init(void); |
|---|
| 89 | 64 | extern void bootmem_init(void); |
|---|
| 90 | 65 | extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt); |
|---|
| .. | .. |
|---|
| 94 | 69 | pgprot_t prot, bool page_mappings_only); |
|---|
| 95 | 70 | extern void *fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot); |
|---|
| 96 | 71 | extern void mark_linear_text_alias_ro(void); |
|---|
| 72 | +extern bool kaslr_requires_kpti(void); |
|---|
| 73 | + |
|---|
| 74 | +#define INIT_MM_CONTEXT(name) \ |
|---|
| 75 | + .pgd = init_pg_dir, |
|---|
| 97 | 76 | |
|---|
| 98 | 77 | #endif /* !__ASSEMBLY__ */ |
|---|
| 99 | 78 | #endif |
|---|