.. | .. |
---|
| 1 | +/* SPDX-License-Identifier: GPL-2.0-only */ |
---|
1 | 2 | /* |
---|
2 | 3 | * Based on arch/arm/include/asm/mmu_context.h |
---|
3 | 4 | * |
---|
4 | 5 | * Copyright (C) 1996 Russell King. |
---|
5 | 6 | * Copyright (C) 2012 ARM Ltd. |
---|
6 | | - * |
---|
7 | | - * This program is free software; you can redistribute it and/or modify |
---|
8 | | - * it under the terms of the GNU General Public License version 2 as |
---|
9 | | - * published by the Free Software Foundation. |
---|
10 | | - * |
---|
11 | | - * This program is distributed in the hope that it will be useful, |
---|
12 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
13 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
---|
14 | | - * GNU General Public License for more details. |
---|
15 | | - * |
---|
16 | | - * You should have received a copy of the GNU General Public License |
---|
17 | | - * along with this program. If not, see <http://www.gnu.org/licenses/>. |
---|
18 | 7 | */ |
---|
19 | 8 | #ifndef __ASM_MMU_CONTEXT_H |
---|
20 | 9 | #define __ASM_MMU_CONTEXT_H |
---|
.. | .. |
---|
25 | 14 | #include <linux/sched.h> |
---|
26 | 15 | #include <linux/sched/hotplug.h> |
---|
27 | 16 | #include <linux/mm_types.h> |
---|
| 17 | +#include <linux/pgtable.h> |
---|
28 | 18 | |
---|
29 | 19 | #include <asm/cacheflush.h> |
---|
30 | 20 | #include <asm/cpufeature.h> |
---|
31 | 21 | #include <asm/proc-fns.h> |
---|
32 | 22 | #include <asm-generic/mm_hooks.h> |
---|
33 | 23 | #include <asm/cputype.h> |
---|
34 | | -#include <asm/pgtable.h> |
---|
35 | 24 | #include <asm/sysreg.h> |
---|
36 | 25 | #include <asm/tlbflush.h> |
---|
| 26 | + |
---|
| 27 | +extern bool rodata_full; |
---|
37 | 28 | |
---|
38 | 29 | static inline void contextidr_thread_switch(struct task_struct *next) |
---|
39 | 30 | { |
---|
.. | .. |
---|
45 | 36 | } |
---|
46 | 37 | |
---|
47 | 38 | /* |
---|
48 | | - * Set TTBR0 to empty_zero_page. No translations will be possible via TTBR0. |
---|
| 39 | + * Set TTBR0 to reserved_pg_dir. No translations will be possible via TTBR0. |
---|
49 | 40 | */ |
---|
50 | 41 | static inline void cpu_set_reserved_ttbr0(void) |
---|
51 | 42 | { |
---|
52 | | - unsigned long ttbr = phys_to_ttbr(__pa_symbol(empty_zero_page)); |
---|
| 43 | + unsigned long ttbr = phys_to_ttbr(__pa_symbol(reserved_pg_dir)); |
---|
53 | 44 | |
---|
54 | 45 | write_sysreg(ttbr, ttbr0_el1); |
---|
55 | 46 | isb(); |
---|
56 | 47 | } |
---|
| 48 | + |
---|
| 49 | +void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm); |
---|
57 | 50 | |
---|
58 | 51 | static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm) |
---|
59 | 52 | { |
---|
.. | .. |
---|
72 | 65 | |
---|
73 | 66 | static inline bool __cpu_uses_extended_idmap(void) |
---|
74 | 67 | { |
---|
75 | | - return unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS)); |
---|
| 68 | + return unlikely(idmap_t0sz != TCR_T0SZ(vabits_actual)); |
---|
76 | 69 | } |
---|
77 | 70 | |
---|
78 | 71 | /* |
---|
.. | .. |
---|
101 | 94 | isb(); |
---|
102 | 95 | } |
---|
103 | 96 | |
---|
104 | | -#define cpu_set_default_tcr_t0sz() __cpu_set_tcr_t0sz(TCR_T0SZ(VA_BITS)) |
---|
| 97 | +#define cpu_set_default_tcr_t0sz() __cpu_set_tcr_t0sz(TCR_T0SZ(vabits_actual)) |
---|
105 | 98 | #define cpu_set_idmap_tcr_t0sz() __cpu_set_tcr_t0sz(idmap_t0sz) |
---|
106 | 99 | |
---|
107 | 100 | /* |
---|
.. | .. |
---|
147 | 140 | extern ttbr_replace_func idmap_cpu_replace_ttbr1; |
---|
148 | 141 | ttbr_replace_func *replace_phys; |
---|
149 | 142 | |
---|
150 | | - phys_addr_t pgd_phys = virt_to_phys(pgdp); |
---|
| 143 | + /* phys_to_ttbr() zeros lower 2 bits of ttbr with 52-bit PA */ |
---|
| 144 | + phys_addr_t ttbr1 = phys_to_ttbr(virt_to_phys(pgdp)); |
---|
| 145 | + |
---|
| 146 | + if (system_supports_cnp() && !WARN_ON(pgdp != lm_alias(swapper_pg_dir))) { |
---|
| 147 | + /* |
---|
| 148 | + * cpu_replace_ttbr1() is used when there's a boot CPU |
---|
| 149 | + * up (i.e. cpufeature framework is not up yet) and |
---|
| 150 | + * latter only when we enable CNP via cpufeature's |
---|
| 151 | + * enable() callback. |
---|
| 152 | + * Also we rely on the cpu_hwcap bit being set before |
---|
| 153 | + * calling the enable() function. |
---|
| 154 | + */ |
---|
| 155 | + ttbr1 |= TTBR_CNP_BIT; |
---|
| 156 | + } |
---|
151 | 157 | |
---|
152 | 158 | replace_phys = (void *)__pa_function(idmap_cpu_replace_ttbr1); |
---|
153 | 159 | |
---|
154 | 160 | cpu_install_idmap(); |
---|
155 | | - replace_phys(pgd_phys); |
---|
| 161 | + replace_phys(ttbr1); |
---|
156 | 162 | cpu_uninstall_idmap(); |
---|
157 | 163 | } |
---|
158 | 164 | |
---|
.. | .. |
---|
166 | 172 | * take CPU migration into account. |
---|
167 | 173 | */ |
---|
168 | 174 | #define destroy_context(mm) do { } while(0) |
---|
169 | | -void check_and_switch_context(struct mm_struct *mm, unsigned int cpu); |
---|
| 175 | +void check_and_switch_context(struct mm_struct *mm); |
---|
170 | 176 | |
---|
171 | | -#define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.id, 0); 0; }) |
---|
| 177 | +static inline int |
---|
| 178 | +init_new_context(struct task_struct *tsk, struct mm_struct *mm) |
---|
| 179 | +{ |
---|
| 180 | + atomic64_set(&mm->context.id, 0); |
---|
| 181 | + refcount_set(&mm->context.pinned, 0); |
---|
| 182 | + return 0; |
---|
| 183 | +} |
---|
172 | 184 | |
---|
173 | 185 | #ifdef CONFIG_ARM64_SW_TTBR0_PAN |
---|
174 | 186 | static inline void update_saved_ttbr0(struct task_struct *tsk, |
---|
.. | .. |
---|
180 | 192 | return; |
---|
181 | 193 | |
---|
182 | 194 | if (mm == &init_mm) |
---|
183 | | - ttbr = __pa_symbol(empty_zero_page); |
---|
| 195 | + ttbr = phys_to_ttbr(__pa_symbol(reserved_pg_dir)); |
---|
184 | 196 | else |
---|
185 | | - ttbr = virt_to_phys(mm->pgd) | ASID(mm) << 48; |
---|
| 197 | + ttbr = phys_to_ttbr(virt_to_phys(mm->pgd)) | ASID(mm) << 48; |
---|
186 | 198 | |
---|
187 | 199 | WRITE_ONCE(task_thread_info(tsk)->ttbr0, ttbr); |
---|
188 | 200 | } |
---|
.. | .. |
---|
205 | 217 | |
---|
206 | 218 | static inline void __switch_mm(struct mm_struct *next) |
---|
207 | 219 | { |
---|
208 | | - unsigned int cpu = smp_processor_id(); |
---|
209 | | - |
---|
210 | 220 | /* |
---|
211 | 221 | * init_mm.pgd does not contain any user mappings and it is always |
---|
212 | 222 | * active for kernel addresses in TTBR1. Just set the reserved TTBR0. |
---|
.. | .. |
---|
216 | 226 | return; |
---|
217 | 227 | } |
---|
218 | 228 | |
---|
219 | | - check_and_switch_context(next, cpu); |
---|
| 229 | + check_and_switch_context(next); |
---|
220 | 230 | } |
---|
221 | 231 | |
---|
222 | 232 | static inline void |
---|
.. | .. |
---|
238 | 248 | #define deactivate_mm(tsk,mm) do { } while (0) |
---|
239 | 249 | #define activate_mm(prev,next) switch_mm(prev, next, current) |
---|
240 | 250 | |
---|
| 251 | +static inline const struct cpumask * |
---|
| 252 | +task_cpu_possible_mask(struct task_struct *p) |
---|
| 253 | +{ |
---|
| 254 | + if (!static_branch_unlikely(&arm64_mismatched_32bit_el0)) |
---|
| 255 | + return cpu_possible_mask; |
---|
| 256 | + |
---|
| 257 | + if (!is_compat_thread(task_thread_info(p))) |
---|
| 258 | + return cpu_possible_mask; |
---|
| 259 | + |
---|
| 260 | + return system_32bit_el0_cpumask(); |
---|
| 261 | +} |
---|
| 262 | +#define task_cpu_possible_mask task_cpu_possible_mask |
---|
| 263 | + |
---|
241 | 264 | void verify_cpu_asid_bits(void); |
---|
242 | 265 | void post_ttbr_update_workaround(void); |
---|
243 | 266 | |
---|
| 267 | +unsigned long arm64_mm_context_get(struct mm_struct *mm); |
---|
| 268 | +void arm64_mm_context_put(struct mm_struct *mm); |
---|
| 269 | + |
---|
244 | 270 | #endif /* !__ASSEMBLY__ */ |
---|
245 | 271 | |
---|
246 | 272 | #endif /* !__ASM_MMU_CONTEXT_H */ |
---|