forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-01-04 1543e317f1da31b75942316931e8f491a8920811
kernel/arch/arm64/include/asm/mmu_context.h
....@@ -1,20 +1,9 @@
1
+/* SPDX-License-Identifier: GPL-2.0-only */
12 /*
23 * Based on arch/arm/include/asm/mmu_context.h
34 *
45 * Copyright (C) 1996 Russell King.
56 * Copyright (C) 2012 ARM Ltd.
6
- *
7
- * This program is free software; you can redistribute it and/or modify
8
- * it under the terms of the GNU General Public License version 2 as
9
- * published by the Free Software Foundation.
10
- *
11
- * This program is distributed in the hope that it will be useful,
12
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14
- * GNU General Public License for more details.
15
- *
16
- * You should have received a copy of the GNU General Public License
17
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
187 */
198 #ifndef __ASM_MMU_CONTEXT_H
209 #define __ASM_MMU_CONTEXT_H
....@@ -25,15 +14,17 @@
2514 #include <linux/sched.h>
2615 #include <linux/sched/hotplug.h>
2716 #include <linux/mm_types.h>
17
+#include <linux/pgtable.h>
2818
2919 #include <asm/cacheflush.h>
3020 #include <asm/cpufeature.h>
3121 #include <asm/proc-fns.h>
3222 #include <asm-generic/mm_hooks.h>
3323 #include <asm/cputype.h>
34
-#include <asm/pgtable.h>
3524 #include <asm/sysreg.h>
3625 #include <asm/tlbflush.h>
26
+
27
+extern bool rodata_full;
3728
3829 static inline void contextidr_thread_switch(struct task_struct *next)
3930 {
....@@ -45,15 +36,17 @@
4536 }
4637
4738 /*
48
- * Set TTBR0 to empty_zero_page. No translations will be possible via TTBR0.
39
+ * Set TTBR0 to reserved_pg_dir. No translations will be possible via TTBR0.
4940 */
5041 static inline void cpu_set_reserved_ttbr0(void)
5142 {
52
- unsigned long ttbr = phys_to_ttbr(__pa_symbol(empty_zero_page));
43
+ unsigned long ttbr = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
5344
5445 write_sysreg(ttbr, ttbr0_el1);
5546 isb();
5647 }
48
+
49
+void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
5750
5851 static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm)
5952 {
....@@ -72,7 +65,7 @@
7265
7366 static inline bool __cpu_uses_extended_idmap(void)
7467 {
75
- return unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS));
68
+ return unlikely(idmap_t0sz != TCR_T0SZ(vabits_actual));
7669 }
7770
7871 /*
....@@ -101,7 +94,7 @@
10194 isb();
10295 }
10396
104
-#define cpu_set_default_tcr_t0sz() __cpu_set_tcr_t0sz(TCR_T0SZ(VA_BITS))
97
+#define cpu_set_default_tcr_t0sz() __cpu_set_tcr_t0sz(TCR_T0SZ(vabits_actual))
10598 #define cpu_set_idmap_tcr_t0sz() __cpu_set_tcr_t0sz(idmap_t0sz)
10699
107100 /*
....@@ -147,12 +140,25 @@
147140 extern ttbr_replace_func idmap_cpu_replace_ttbr1;
148141 ttbr_replace_func *replace_phys;
149142
150
- phys_addr_t pgd_phys = virt_to_phys(pgdp);
143
+ /* phys_to_ttbr() zeros lower 2 bits of ttbr with 52-bit PA */
144
+ phys_addr_t ttbr1 = phys_to_ttbr(virt_to_phys(pgdp));
145
+
146
+ if (system_supports_cnp() && !WARN_ON(pgdp != lm_alias(swapper_pg_dir))) {
147
+ /*
148
+ * cpu_replace_ttbr1() is used when there's a boot CPU
149
+ * up (i.e. cpufeature framework is not up yet) and
150
+ * latter only when we enable CNP via cpufeature's
151
+ * enable() callback.
152
+ * Also we rely on the cpu_hwcap bit being set before
153
+ * calling the enable() function.
154
+ */
155
+ ttbr1 |= TTBR_CNP_BIT;
156
+ }
151157
152158 replace_phys = (void *)__pa_function(idmap_cpu_replace_ttbr1);
153159
154160 cpu_install_idmap();
155
- replace_phys(pgd_phys);
161
+ replace_phys(ttbr1);
156162 cpu_uninstall_idmap();
157163 }
158164
....@@ -166,9 +172,15 @@
166172 * take CPU migration into account.
167173 */
168174 #define destroy_context(mm) do { } while(0)
169
-void check_and_switch_context(struct mm_struct *mm, unsigned int cpu);
175
+void check_and_switch_context(struct mm_struct *mm);
170176
171
-#define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.id, 0); 0; })
177
+static inline int
178
+init_new_context(struct task_struct *tsk, struct mm_struct *mm)
179
+{
180
+ atomic64_set(&mm->context.id, 0);
181
+ refcount_set(&mm->context.pinned, 0);
182
+ return 0;
183
+}
172184
173185 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
174186 static inline void update_saved_ttbr0(struct task_struct *tsk,
....@@ -180,9 +192,9 @@
180192 return;
181193
182194 if (mm == &init_mm)
183
- ttbr = __pa_symbol(empty_zero_page);
195
+ ttbr = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
184196 else
185
- ttbr = virt_to_phys(mm->pgd) | ASID(mm) << 48;
197
+ ttbr = phys_to_ttbr(virt_to_phys(mm->pgd)) | ASID(mm) << 48;
186198
187199 WRITE_ONCE(task_thread_info(tsk)->ttbr0, ttbr);
188200 }
....@@ -205,8 +217,6 @@
205217
206218 static inline void __switch_mm(struct mm_struct *next)
207219 {
208
- unsigned int cpu = smp_processor_id();
209
-
210220 /*
211221 * init_mm.pgd does not contain any user mappings and it is always
212222 * active for kernel addresses in TTBR1. Just set the reserved TTBR0.
....@@ -216,7 +226,7 @@
216226 return;
217227 }
218228
219
- check_and_switch_context(next, cpu);
229
+ check_and_switch_context(next);
220230 }
221231
222232 static inline void
....@@ -238,9 +248,25 @@
238248 #define deactivate_mm(tsk,mm) do { } while (0)
239249 #define activate_mm(prev,next) switch_mm(prev, next, current)
240250
251
+static inline const struct cpumask *
252
+task_cpu_possible_mask(struct task_struct *p)
253
+{
254
+ if (!static_branch_unlikely(&arm64_mismatched_32bit_el0))
255
+ return cpu_possible_mask;
256
+
257
+ if (!is_compat_thread(task_thread_info(p)))
258
+ return cpu_possible_mask;
259
+
260
+ return system_32bit_el0_cpumask();
261
+}
262
+#define task_cpu_possible_mask task_cpu_possible_mask
263
+
241264 void verify_cpu_asid_bits(void);
242265 void post_ttbr_update_workaround(void);
243266
267
+unsigned long arm64_mm_context_get(struct mm_struct *mm);
268
+void arm64_mm_context_put(struct mm_struct *mm);
269
+
244270 #endif /* !__ASSEMBLY__ */
245271
246272 #endif /* !__ASM_MMU_CONTEXT_H */