forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-09-20 a36159eec6ca17402b0e146b86efaf76568dc353
kernel/arch/arm64/include/asm/mmu.h
....@@ -1,90 +1,65 @@
1
+/* SPDX-License-Identifier: GPL-2.0-only */
12 /*
23 * Copyright (C) 2012 ARM Ltd.
3
- *
4
- * This program is free software; you can redistribute it and/or modify
5
- * it under the terms of the GNU General Public License version 2 as
6
- * published by the Free Software Foundation.
7
- *
8
- * This program is distributed in the hope that it will be useful,
9
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
10
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
- * GNU General Public License for more details.
12
- *
13
- * You should have received a copy of the GNU General Public License
14
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
154 */
165 #ifndef __ASM_MMU_H
176 #define __ASM_MMU_H
7
+
8
+#include <asm/cputype.h>
189
1910 #define MMCF_AARCH32 0x1 /* mm context flag for AArch32 executables */
2011 #define USER_ASID_BIT 48
2112 #define USER_ASID_FLAG (UL(1) << USER_ASID_BIT)
2213 #define TTBR_ASID_MASK (UL(0xffff) << 48)
2314
24
-#define BP_HARDEN_EL2_SLOTS 4
25
-
2615 #ifndef __ASSEMBLY__
16
+
17
+#include <linux/refcount.h>
2718
2819 typedef struct {
2920 atomic64_t id;
21
+#ifdef CONFIG_COMPAT
22
+ void *sigpage;
23
+#endif
24
+ refcount_t pinned;
3025 void *vdso;
3126 unsigned long flags;
3227 } mm_context_t;
3328
3429 /*
35
- * This macro is only used by the TLBI code, which cannot race with an
36
- * ASID change and therefore doesn't need to reload the counter using
37
- * atomic64_read.
30
+ * We use atomic64_read() here because the ASID for an 'mm_struct' can
31
+ * be reallocated when scheduling one of its threads following a
32
+ * rollover event (see new_context() and flush_context()). In this case,
33
+ * a concurrent TLBI (e.g. via try_to_unmap_one() and ptep_clear_flush())
34
+ * may use a stale ASID. This is fine in principle as the new ASID is
35
+ * guaranteed to be clean in the TLB, but the TLBI routines have to take
36
+ * care to handle the following race:
37
+ *
38
+ * CPU 0 CPU 1 CPU 2
39
+ *
40
+ * // ptep_clear_flush(mm)
41
+ * xchg_relaxed(pte, 0)
42
+ * DSB ISHST
43
+ * old = ASID(mm)
44
+ * | <rollover>
45
+ * | new = new_context(mm)
46
+ * \-----------------> atomic_set(mm->context.id, new)
47
+ * cpu_switch_mm(mm)
48
+ * // Hardware walk of pte using new ASID
49
+ * TLBI(old)
50
+ *
51
+ * In this scenario, the barrier on CPU 0 and the dependency on CPU 1
52
+ * ensure that the page-table walker on CPU 1 *must* see the invalid PTE
53
+ * written by CPU 0.
3854 */
39
-#define ASID(mm) ((mm)->context.id.counter & 0xffff)
55
+#define ASID(mm) (atomic64_read(&(mm)->context.id) & 0xffff)
4056
4157 static inline bool arm64_kernel_unmapped_at_el0(void)
4258 {
43
- return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0) &&
44
- cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0);
59
+ return cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0);
4560 }
4661
47
-typedef void (*bp_hardening_cb_t)(void);
48
-
49
-struct bp_hardening_data {
50
- int hyp_vectors_slot;
51
- bp_hardening_cb_t fn;
52
-};
53
-
54
-#if (defined(CONFIG_HARDEN_BRANCH_PREDICTOR) || \
55
- defined(CONFIG_HARDEN_EL2_VECTORS))
56
-extern char __bp_harden_hyp_vecs_start[], __bp_harden_hyp_vecs_end[];
57
-extern atomic_t arm64_el2_vector_last_slot;
58
-#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR || CONFIG_HARDEN_EL2_VECTORS */
59
-
60
-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
61
-DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
62
-
63
-static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
64
-{
65
- return this_cpu_ptr(&bp_hardening_data);
66
-}
67
-
68
-static inline void arm64_apply_bp_hardening(void)
69
-{
70
- struct bp_hardening_data *d;
71
-
72
- if (!cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR))
73
- return;
74
-
75
- d = arm64_get_bp_hardening_data();
76
- if (d->fn)
77
- d->fn();
78
-}
79
-#else
80
-static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
81
-{
82
- return NULL;
83
-}
84
-
85
-static inline void arm64_apply_bp_hardening(void) { }
86
-#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
87
-
62
+extern void arm64_memblock_init(void);
8863 extern void paging_init(void);
8964 extern void bootmem_init(void);
9065 extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
....@@ -94,6 +69,10 @@
9469 pgprot_t prot, bool page_mappings_only);
9570 extern void *fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot);
9671 extern void mark_linear_text_alias_ro(void);
72
+extern bool kaslr_requires_kpti(void);
73
+
74
+#define INIT_MM_CONTEXT(name) \
75
+ .pgd = init_pg_dir,
9776
9877 #endif /* !__ASSEMBLY__ */
9978 #endif