hc
2024-10-12 a5969cabbb4660eab42b6ef0412cbbd1200cf14d
kernel/arch/x86/include/asm/mmu_context.h
....@@ -9,34 +9,23 @@
99
1010 #include <trace/events/tlb.h>
1111
12
-#include <asm/pgalloc.h>
1312 #include <asm/tlbflush.h>
1413 #include <asm/paravirt.h>
15
-#include <asm/mpx.h>
14
+#include <asm/debugreg.h>
1615
1716 extern atomic64_t last_mm_ctx_id;
1817
19
-#ifndef CONFIG_PARAVIRT
18
+#ifndef CONFIG_PARAVIRT_XXL
2019 static inline void paravirt_activate_mm(struct mm_struct *prev,
2120 struct mm_struct *next)
2221 {
2322 }
24
-#endif /* !CONFIG_PARAVIRT */
23
+#endif /* !CONFIG_PARAVIRT_XXL */
2524
2625 #ifdef CONFIG_PERF_EVENTS
27
-
26
+DECLARE_STATIC_KEY_FALSE(rdpmc_never_available_key);
2827 DECLARE_STATIC_KEY_FALSE(rdpmc_always_available_key);
29
-
30
-static inline void load_mm_cr4(struct mm_struct *mm)
31
-{
32
- if (static_branch_unlikely(&rdpmc_always_available_key) ||
33
- atomic_read(&mm->context.perf_rdpmc_allowed))
34
- cr4_set_bits(X86_CR4_PCE);
35
- else
36
- cr4_clear_bits(X86_CR4_PCE);
37
-}
38
-#else
39
-static inline void load_mm_cr4(struct mm_struct *mm) {}
28
+void cr4_update_pce(void *ignored);
4029 #endif
4130
4231 #ifdef CONFIG_MODIFY_LDT_SYSCALL
....@@ -66,14 +55,6 @@
6655 int slot;
6756 };
6857
69
-/* This is a multiple of PAGE_SIZE. */
70
-#define LDT_SLOT_STRIDE (LDT_ENTRIES * LDT_ENTRY_SIZE)
71
-
72
-static inline void *ldt_slot_va(int slot)
73
-{
74
- return (void *)(LDT_BASE_ADDR + LDT_SLOT_STRIDE * slot);
75
-}
76
-
7758 /*
7859 * Used for LDT copy/destruction.
7960 */
....@@ -96,87 +77,21 @@
9677 static inline void ldt_arch_exit_mmap(struct mm_struct *mm) { }
9778 #endif
9879
80
+#ifdef CONFIG_MODIFY_LDT_SYSCALL
81
+extern void load_mm_ldt(struct mm_struct *mm);
82
+extern void switch_ldt(struct mm_struct *prev, struct mm_struct *next);
83
+#else
9984 static inline void load_mm_ldt(struct mm_struct *mm)
10085 {
101
-#ifdef CONFIG_MODIFY_LDT_SYSCALL
102
- struct ldt_struct *ldt;
103
-
104
- /* READ_ONCE synchronizes with smp_store_release */
105
- ldt = READ_ONCE(mm->context.ldt);
106
-
107
- /*
108
- * Any change to mm->context.ldt is followed by an IPI to all
109
- * CPUs with the mm active. The LDT will not be freed until
110
- * after the IPI is handled by all such CPUs. This means that,
111
- * if the ldt_struct changes before we return, the values we see
112
- * will be safe, and the new values will be loaded before we run
113
- * any user code.
114
- *
115
- * NB: don't try to convert this to use RCU without extreme care.
116
- * We would still need IRQs off, because we don't want to change
117
- * the local LDT after an IPI loaded a newer value than the one
118
- * that we can see.
119
- */
120
-
121
- if (unlikely(ldt)) {
122
- if (static_cpu_has(X86_FEATURE_PTI)) {
123
- if (WARN_ON_ONCE((unsigned long)ldt->slot > 1)) {
124
- /*
125
- * Whoops -- either the new LDT isn't mapped
126
- * (if slot == -1) or is mapped into a bogus
127
- * slot (if slot > 1).
128
- */
129
- clear_LDT();
130
- return;
131
- }
132
-
133
- /*
134
- * If page table isolation is enabled, ldt->entries
135
- * will not be mapped in the userspace pagetables.
136
- * Tell the CPU to access the LDT through the alias
137
- * at ldt_slot_va(ldt->slot).
138
- */
139
- set_ldt(ldt_slot_va(ldt->slot), ldt->nr_entries);
140
- } else {
141
- set_ldt(ldt->entries, ldt->nr_entries);
142
- }
143
- } else {
144
- clear_LDT();
145
- }
146
-#else
14786 clear_LDT();
148
-#endif
14987 }
150
-
15188 static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next)
15289 {
153
-#ifdef CONFIG_MODIFY_LDT_SYSCALL
154
- /*
155
- * Load the LDT if either the old or new mm had an LDT.
156
- *
157
- * An mm will never go from having an LDT to not having an LDT. Two
158
- * mms never share an LDT, so we don't gain anything by checking to
159
- * see whether the LDT changed. There's also no guarantee that
160
- * prev->context.ldt actually matches LDTR, but, if LDTR is non-NULL,
161
- * then prev->context.ldt will also be non-NULL.
162
- *
163
- * If we really cared, we could optimize the case where prev == next
164
- * and we're exiting lazy mode. Most of the time, if this happens,
165
- * we don't actually need to reload LDTR, but modify_ldt() is mostly
166
- * used by legacy code and emulators where we don't need this level of
167
- * performance.
168
- *
169
- * This uses | instead of || because it generates better code.
170
- */
171
- if (unlikely((unsigned long)prev->context.ldt |
172
- (unsigned long)next->context.ldt))
173
- load_mm_ldt(next);
174
-#endif
175
-
17690 DEBUG_LOCKS_WARN_ON(preemptible());
17791 }
92
+#endif
17893
179
-void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
94
+extern void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
18095
18196 /*
18297 * Init a new mm. Used on mm copies, like at fork()
....@@ -271,34 +186,9 @@
271186 }
272187 #endif
273188
274
-static inline void arch_bprm_mm_init(struct mm_struct *mm,
275
- struct vm_area_struct *vma)
189
+static inline void arch_unmap(struct mm_struct *mm, unsigned long start,
190
+ unsigned long end)
276191 {
277
- mpx_mm_init(mm);
278
-}
279
-
280
-static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
281
- unsigned long start, unsigned long end)
282
-{
283
- /*
284
- * mpx_notify_unmap() goes and reads a rarely-hot
285
- * cacheline in the mm_struct. That can be expensive
286
- * enough to be seen in profiles.
287
- *
288
- * The mpx_notify_unmap() call and its contents have been
289
- * observed to affect munmap() performance on hardware
290
- * where MPX is not present.
291
- *
292
- * The unlikely() optimizes for the fast case: no MPX
293
- * in the CPU, or no MPX use in the process. Even if
294
- * we get this wrong (in the unlikely event that MPX
295
- * is widely enabled on some system) the overhead of
296
- * MPX itself (reading bounds tables) is expected to
297
- * overwhelm the overhead of getting this unlikely()
298
- * consistently wrong.
299
- */
300
- if (unlikely(cpu_feature_enabled(X86_FEATURE_MPX)))
301
- mpx_notify_unmap(mm, vma, start, end);
302192 }
303193
304194 /*
....@@ -310,21 +200,6 @@
310200 * So do not enforce things if the VMA is not from the current
311201 * mm, or if we are in a kernel thread.
312202 */
313
-static inline bool vma_is_foreign(struct vm_area_struct *vma)
314
-{
315
- if (!current->mm)
316
- return true;
317
- /*
318
- * Should PKRU be enforced on the access to this VMA? If
319
- * the VMA is from another process, then PKRU has no
320
- * relevance and should not be enforced.
321
- */
322
- if (current->mm != vma->vm_mm)
323
- return true;
324
-
325
- return false;
326
-}
327
-
328203 static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
329204 bool write, bool execute, bool foreign)
330205 {
....@@ -337,23 +212,6 @@
337212 return __pkru_allows_pkey(vma_pkey(vma), write);
338213 }
339214
340
-/*
341
- * This can be used from process context to figure out what the value of
342
- * CR3 is without needing to do a (slow) __read_cr3().
343
- *
344
- * It's intended to be used for code like KVM that sneakily changes CR3
345
- * and needs to restore it. It needs to be used very carefully.
346
- */
347
-static inline unsigned long __get_current_cr3_fast(void)
348
-{
349
- unsigned long cr3 = build_cr3(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd,
350
- this_cpu_read(cpu_tlbstate.loaded_mm_asid));
351
-
352
- /* For now, be very restrictive about when this can be called. */
353
- VM_WARN_ON(in_nmi() || preemptible());
354
-
355
- VM_BUG_ON(cr3 != __read_cr3());
356
- return cr3;
357
-}
215
+unsigned long __get_current_cr3_fast(void);
358216
359217 #endif /* _ASM_X86_MMU_CONTEXT_H */