.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * Copyright (C) 1994 Linus Torvalds |
---|
3 | 4 | * |
---|
.. | .. |
---|
24 | 25 | */ |
---|
25 | 26 | union fpregs_state init_fpstate __read_mostly; |
---|
26 | 27 | |
---|
27 | | -/* |
---|
28 | | - * Track whether the kernel is using the FPU state |
---|
29 | | - * currently. |
---|
30 | | - * |
---|
31 | | - * This flag is used: |
---|
32 | | - * |
---|
33 | | - * - by IRQ context code to potentially use the FPU |
---|
34 | | - * if it's unused. |
---|
35 | | - * |
---|
36 | | - * - to debug kernel_fpu_begin()/end() correctness |
---|
37 | | - */ |
---|
| 28 | +/* Track in-kernel FPU usage */ |
---|
38 | 29 | static DEFINE_PER_CPU(bool, in_kernel_fpu); |
---|
39 | 30 | |
---|
40 | 31 | /* |
---|
.. | .. |
---|
42 | 33 | */ |
---|
43 | 34 | DEFINE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx); |
---|
44 | 35 | |
---|
45 | | -static void kernel_fpu_disable(void) |
---|
46 | | -{ |
---|
47 | | - WARN_ON_FPU(this_cpu_read(in_kernel_fpu)); |
---|
48 | | - this_cpu_write(in_kernel_fpu, true); |
---|
49 | | -} |
---|
50 | | - |
---|
51 | | -static void kernel_fpu_enable(void) |
---|
52 | | -{ |
---|
53 | | - WARN_ON_FPU(!this_cpu_read(in_kernel_fpu)); |
---|
54 | | - this_cpu_write(in_kernel_fpu, false); |
---|
55 | | -} |
---|
56 | | - |
---|
57 | | -static bool kernel_fpu_disabled(void) |
---|
58 | | -{ |
---|
59 | | - return this_cpu_read(in_kernel_fpu); |
---|
60 | | -} |
---|
61 | | - |
---|
62 | | -static bool interrupted_kernel_fpu_idle(void) |
---|
63 | | -{ |
---|
64 | | - return !kernel_fpu_disabled(); |
---|
65 | | -} |
---|
66 | | - |
---|
67 | | -/* |
---|
68 | | - * Were we in user mode (or vm86 mode) when we were |
---|
69 | | - * interrupted? |
---|
70 | | - * |
---|
71 | | - * Doing kernel_fpu_begin/end() is ok if we are running |
---|
72 | | - * in an interrupt context from user mode - we'll just |
---|
73 | | - * save the FPU state as required. |
---|
74 | | - */ |
---|
75 | | -static bool interrupted_user_mode(void) |
---|
76 | | -{ |
---|
77 | | - struct pt_regs *regs = get_irq_regs(); |
---|
78 | | - return regs && user_mode(regs); |
---|
79 | | -} |
---|
80 | | - |
---|
81 | 36 | /* |
---|
82 | 37 | * Can we use the FPU in kernel mode with the |
---|
83 | 38 | * whole "kernel_fpu_begin/end()" sequence? |
---|
84 | | - * |
---|
85 | | - * It's always ok in process context (ie "not interrupt") |
---|
86 | | - * but it is sometimes ok even from an irq. |
---|
87 | 39 | */ |
---|
88 | 40 | bool irq_fpu_usable(void) |
---|
89 | 41 | { |
---|
90 | | - return !in_interrupt() || |
---|
91 | | - interrupted_user_mode() || |
---|
92 | | - interrupted_kernel_fpu_idle(); |
---|
| 42 | + if (WARN_ON_ONCE(in_nmi())) |
---|
| 43 | + return false; |
---|
| 44 | + |
---|
| 45 | + /* In kernel FPU usage already active? */ |
---|
| 46 | + if (this_cpu_read(in_kernel_fpu)) |
---|
| 47 | + return false; |
---|
| 48 | + |
---|
| 49 | + /* |
---|
| 50 | + * When not in NMI or hard interrupt context, FPU can be used in: |
---|
| 51 | + * |
---|
| 52 | + * - Task context except from within fpregs_lock()'ed critical |
---|
| 53 | + * regions. |
---|
| 54 | + * |
---|
| 55 | + * - Soft interrupt processing context which cannot happen |
---|
| 56 | + * while in a fpregs_lock()'ed critical region. |
---|
| 57 | + */ |
---|
| 58 | + if (!in_irq()) |
---|
| 59 | + return true; |
---|
| 60 | + |
---|
| 61 | + /* |
---|
| 62 | + * In hard interrupt context it's safe when soft interrupts |
---|
| 63 | + * are enabled, which means the interrupt did not hit in |
---|
| 64 | + * a fpregs_lock()'ed critical region. |
---|
| 65 | + */ |
---|
| 66 | + return !softirq_count(); |
---|
93 | 67 | } |
---|
94 | 68 | EXPORT_SYMBOL(irq_fpu_usable); |
---|
95 | 69 | |
---|
96 | | -static void __kernel_fpu_begin(void) |
---|
| 70 | +/* |
---|
| 71 | + * These must be called with preempt disabled. Returns |
---|
| 72 | + * 'true' if the FPU state is still intact and we can |
---|
| 73 | + * keep registers active. |
---|
| 74 | + * |
---|
| 75 | + * The legacy FNSAVE instruction cleared all FPU state |
---|
| 76 | + * unconditionally, so registers are essentially destroyed. |
---|
| 77 | + * Modern FPU state can be kept in registers, if there are |
---|
| 78 | + * no pending FP exceptions. |
---|
| 79 | + */ |
---|
| 80 | +int copy_fpregs_to_fpstate(struct fpu *fpu) |
---|
97 | 81 | { |
---|
98 | | - struct fpu *fpu = ¤t->thread.fpu; |
---|
| 82 | + if (likely(use_xsave())) { |
---|
| 83 | + copy_xregs_to_kernel(&fpu->state.xsave); |
---|
| 84 | + |
---|
| 85 | + /* |
---|
| 86 | + * AVX512 state is tracked here because its use is |
---|
| 87 | + * known to slow the max clock speed of the core. |
---|
| 88 | + */ |
---|
| 89 | + if (fpu->state.xsave.header.xfeatures & XFEATURE_MASK_AVX512) |
---|
| 90 | + fpu->avx512_timestamp = jiffies; |
---|
| 91 | + return 1; |
---|
| 92 | + } |
---|
| 93 | + |
---|
| 94 | + if (likely(use_fxsr())) { |
---|
| 95 | + copy_fxregs_to_kernel(fpu); |
---|
| 96 | + return 1; |
---|
| 97 | + } |
---|
| 98 | + |
---|
| 99 | + /* |
---|
| 100 | + * Legacy FPU register saving, FNSAVE always clears FPU registers, |
---|
| 101 | + * so we have to mark them inactive: |
---|
| 102 | + */ |
---|
| 103 | + asm volatile("fnsave %[fp]; fwait" : [fp] "=m" (fpu->state.fsave)); |
---|
| 104 | + |
---|
| 105 | + return 0; |
---|
| 106 | +} |
---|
| 107 | +EXPORT_SYMBOL(copy_fpregs_to_fpstate); |
---|
| 108 | + |
---|
| 109 | +void kernel_fpu_begin_mask(unsigned int kfpu_mask) |
---|
| 110 | +{ |
---|
| 111 | + preempt_disable(); |
---|
99 | 112 | |
---|
100 | 113 | WARN_ON_FPU(!irq_fpu_usable()); |
---|
| 114 | + WARN_ON_FPU(this_cpu_read(in_kernel_fpu)); |
---|
101 | 115 | |
---|
102 | | - kernel_fpu_disable(); |
---|
| 116 | + this_cpu_write(in_kernel_fpu, true); |
---|
103 | 117 | |
---|
104 | | - if (fpu->initialized) { |
---|
| 118 | + if (!(current->flags & PF_KTHREAD) && |
---|
| 119 | + !test_thread_flag(TIF_NEED_FPU_LOAD)) { |
---|
| 120 | + set_thread_flag(TIF_NEED_FPU_LOAD); |
---|
105 | 121 | /* |
---|
106 | 122 | * Ignore return value -- we don't care if reg state |
---|
107 | 123 | * is clobbered. |
---|
108 | 124 | */ |
---|
109 | | - copy_fpregs_to_fpstate(fpu); |
---|
110 | | - } else { |
---|
111 | | - __cpu_invalidate_fpregs_state(); |
---|
| 125 | + copy_fpregs_to_fpstate(¤t->thread.fpu); |
---|
112 | 126 | } |
---|
| 127 | + __cpu_invalidate_fpregs_state(); |
---|
| 128 | + |
---|
| 129 | + /* Put sane initial values into the control registers. */ |
---|
| 130 | + if (likely(kfpu_mask & KFPU_MXCSR) && boot_cpu_has(X86_FEATURE_XMM)) |
---|
| 131 | + ldmxcsr(MXCSR_DEFAULT); |
---|
| 132 | + |
---|
| 133 | + if (unlikely(kfpu_mask & KFPU_387) && boot_cpu_has(X86_FEATURE_FPU)) |
---|
| 134 | + asm volatile ("fninit"); |
---|
113 | 135 | } |
---|
114 | | - |
---|
115 | | -static void __kernel_fpu_end(void) |
---|
116 | | -{ |
---|
117 | | - struct fpu *fpu = ¤t->thread.fpu; |
---|
118 | | - |
---|
119 | | - if (fpu->initialized) |
---|
120 | | - copy_kernel_to_fpregs(&fpu->state); |
---|
121 | | - |
---|
122 | | - kernel_fpu_enable(); |
---|
123 | | -} |
---|
124 | | - |
---|
125 | | -void kernel_fpu_begin(void) |
---|
126 | | -{ |
---|
127 | | - preempt_disable(); |
---|
128 | | - __kernel_fpu_begin(); |
---|
129 | | -} |
---|
130 | | -EXPORT_SYMBOL_GPL(kernel_fpu_begin); |
---|
| 136 | +EXPORT_SYMBOL_GPL(kernel_fpu_begin_mask); |
---|
131 | 137 | |
---|
132 | 138 | void kernel_fpu_end(void) |
---|
133 | 139 | { |
---|
134 | | - __kernel_fpu_end(); |
---|
| 140 | + WARN_ON_FPU(!this_cpu_read(in_kernel_fpu)); |
---|
| 141 | + |
---|
| 142 | + this_cpu_write(in_kernel_fpu, false); |
---|
135 | 143 | preempt_enable(); |
---|
136 | 144 | } |
---|
137 | 145 | EXPORT_SYMBOL_GPL(kernel_fpu_end); |
---|
138 | | - |
---|
139 | | -void kernel_fpu_resched(void) |
---|
140 | | -{ |
---|
141 | | - WARN_ON_FPU(!this_cpu_read(in_kernel_fpu)); |
---|
142 | | - |
---|
143 | | - if (should_resched(PREEMPT_OFFSET)) { |
---|
144 | | - kernel_fpu_end(); |
---|
145 | | - cond_resched(); |
---|
146 | | - kernel_fpu_begin(); |
---|
147 | | - } |
---|
148 | | -} |
---|
149 | | -EXPORT_SYMBOL_GPL(kernel_fpu_resched); |
---|
150 | 146 | |
---|
151 | 147 | /* |
---|
152 | 148 | * Save the FPU state (mark it for reload if necessary): |
---|
.. | .. |
---|
157 | 153 | { |
---|
158 | 154 | WARN_ON_FPU(fpu != ¤t->thread.fpu); |
---|
159 | 155 | |
---|
160 | | - preempt_disable(); |
---|
| 156 | + fpregs_lock(); |
---|
161 | 157 | trace_x86_fpu_before_save(fpu); |
---|
162 | | - if (fpu->initialized) { |
---|
| 158 | + |
---|
| 159 | + if (!test_thread_flag(TIF_NEED_FPU_LOAD)) { |
---|
163 | 160 | if (!copy_fpregs_to_fpstate(fpu)) { |
---|
164 | 161 | copy_kernel_to_fpregs(&fpu->state); |
---|
165 | 162 | } |
---|
166 | 163 | } |
---|
| 164 | + |
---|
167 | 165 | trace_x86_fpu_after_save(fpu); |
---|
168 | | - preempt_enable(); |
---|
| 166 | + fpregs_unlock(); |
---|
169 | 167 | } |
---|
170 | | -EXPORT_SYMBOL_GPL(fpu__save); |
---|
171 | 168 | |
---|
172 | 169 | /* |
---|
173 | 170 | * Legacy x87 fpstate state init: |
---|
.. | .. |
---|
198 | 195 | } |
---|
199 | 196 | EXPORT_SYMBOL_GPL(fpstate_init); |
---|
200 | 197 | |
---|
201 | | -int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu) |
---|
| 198 | +int fpu__copy(struct task_struct *dst, struct task_struct *src) |
---|
202 | 199 | { |
---|
| 200 | + struct fpu *dst_fpu = &dst->thread.fpu; |
---|
| 201 | + struct fpu *src_fpu = &src->thread.fpu; |
---|
| 202 | + |
---|
203 | 203 | dst_fpu->last_cpu = -1; |
---|
204 | 204 | |
---|
205 | | - if (!src_fpu->initialized || !static_cpu_has(X86_FEATURE_FPU)) |
---|
| 205 | + if (!static_cpu_has(X86_FEATURE_FPU)) |
---|
206 | 206 | return 0; |
---|
207 | 207 | |
---|
208 | 208 | WARN_ON_FPU(src_fpu != ¤t->thread.fpu); |
---|
.. | .. |
---|
214 | 214 | memset(&dst_fpu->state.xsave, 0, fpu_kernel_xstate_size); |
---|
215 | 215 | |
---|
216 | 216 | /* |
---|
217 | | - * Save current FPU registers directly into the child |
---|
218 | | - * FPU context, without any memory-to-memory copying. |
---|
| 217 | + * If the FPU registers are not current just memcpy() the state. |
---|
| 218 | + * Otherwise save current FPU registers directly into the child's FPU |
---|
| 219 | + * context, without any memory-to-memory copying. |
---|
219 | 220 | * |
---|
220 | 221 | * ( The function 'fails' in the FNSAVE case, which destroys |
---|
221 | | - * register contents so we have to copy them back. ) |
---|
| 222 | + * register contents so we have to load them back. ) |
---|
222 | 223 | */ |
---|
223 | | - if (!copy_fpregs_to_fpstate(dst_fpu)) { |
---|
224 | | - memcpy(&src_fpu->state, &dst_fpu->state, fpu_kernel_xstate_size); |
---|
225 | | - copy_kernel_to_fpregs(&src_fpu->state); |
---|
226 | | - } |
---|
| 224 | + fpregs_lock(); |
---|
| 225 | + if (test_thread_flag(TIF_NEED_FPU_LOAD)) |
---|
| 226 | + memcpy(&dst_fpu->state, &src_fpu->state, fpu_kernel_xstate_size); |
---|
| 227 | + |
---|
| 228 | + else if (!copy_fpregs_to_fpstate(dst_fpu)) |
---|
| 229 | + copy_kernel_to_fpregs(&dst_fpu->state); |
---|
| 230 | + |
---|
| 231 | + fpregs_unlock(); |
---|
| 232 | + |
---|
| 233 | + set_tsk_thread_flag(dst, TIF_NEED_FPU_LOAD); |
---|
227 | 234 | |
---|
228 | 235 | trace_x86_fpu_copy_src(src_fpu); |
---|
229 | 236 | trace_x86_fpu_copy_dst(dst_fpu); |
---|
.. | .. |
---|
235 | 242 | * Activate the current task's in-memory FPU context, |
---|
236 | 243 | * if it has not been used before: |
---|
237 | 244 | */ |
---|
238 | | -void fpu__initialize(struct fpu *fpu) |
---|
| 245 | +static void fpu__initialize(struct fpu *fpu) |
---|
239 | 246 | { |
---|
240 | 247 | WARN_ON_FPU(fpu != ¤t->thread.fpu); |
---|
241 | 248 | |
---|
242 | | - if (!fpu->initialized) { |
---|
243 | | - fpstate_init(&fpu->state); |
---|
244 | | - trace_x86_fpu_init_state(fpu); |
---|
245 | | - |
---|
246 | | - trace_x86_fpu_activate_state(fpu); |
---|
247 | | - /* Safe to do for the current task: */ |
---|
248 | | - fpu->initialized = 1; |
---|
249 | | - } |
---|
| 249 | + set_thread_flag(TIF_NEED_FPU_LOAD); |
---|
| 250 | + fpstate_init(&fpu->state); |
---|
| 251 | + trace_x86_fpu_init_state(fpu); |
---|
250 | 252 | } |
---|
251 | | -EXPORT_SYMBOL_GPL(fpu__initialize); |
---|
252 | 253 | |
---|
253 | 254 | /* |
---|
254 | 255 | * This function must be called before we read a task's fpstate. |
---|
.. | .. |
---|
260 | 261 | * |
---|
261 | 262 | * - or it's called for stopped tasks (ptrace), in which case the |
---|
262 | 263 | * registers were already saved by the context-switch code when |
---|
263 | | - * the task scheduled out - we only have to initialize the registers |
---|
264 | | - * if they've never been initialized. |
---|
| 264 | + * the task scheduled out. |
---|
265 | 265 | * |
---|
266 | 266 | * If the task has used the FPU before then save it. |
---|
267 | 267 | */ |
---|
268 | 268 | void fpu__prepare_read(struct fpu *fpu) |
---|
269 | 269 | { |
---|
270 | | - if (fpu == ¤t->thread.fpu) { |
---|
| 270 | + if (fpu == ¤t->thread.fpu) |
---|
271 | 271 | fpu__save(fpu); |
---|
272 | | - } else { |
---|
273 | | - if (!fpu->initialized) { |
---|
274 | | - fpstate_init(&fpu->state); |
---|
275 | | - trace_x86_fpu_init_state(fpu); |
---|
276 | | - |
---|
277 | | - trace_x86_fpu_activate_state(fpu); |
---|
278 | | - /* Safe to do for current and for stopped child tasks: */ |
---|
279 | | - fpu->initialized = 1; |
---|
280 | | - } |
---|
281 | | - } |
---|
282 | 272 | } |
---|
283 | 273 | |
---|
284 | 274 | /* |
---|
285 | 275 | * This function must be called before we write a task's fpstate. |
---|
286 | 276 | * |
---|
287 | | - * If the task has used the FPU before then invalidate any cached FPU registers. |
---|
288 | | - * If the task has not used the FPU before then initialize its fpstate. |
---|
| 277 | + * Invalidate any cached FPU registers. |
---|
289 | 278 | * |
---|
290 | 279 | * After this function call, after registers in the fpstate are |
---|
291 | 280 | * modified and the child task has woken up, the child task will |
---|
.. | .. |
---|
302 | 291 | */ |
---|
303 | 292 | WARN_ON_FPU(fpu == ¤t->thread.fpu); |
---|
304 | 293 | |
---|
305 | | - if (fpu->initialized) { |
---|
306 | | - /* Invalidate any cached state: */ |
---|
307 | | - __fpu_invalidate_fpregs_state(fpu); |
---|
308 | | - } else { |
---|
309 | | - fpstate_init(&fpu->state); |
---|
310 | | - trace_x86_fpu_init_state(fpu); |
---|
311 | | - |
---|
312 | | - trace_x86_fpu_activate_state(fpu); |
---|
313 | | - /* Safe to do for stopped child tasks: */ |
---|
314 | | - fpu->initialized = 1; |
---|
315 | | - } |
---|
| 294 | + /* Invalidate any cached state: */ |
---|
| 295 | + __fpu_invalidate_fpregs_state(fpu); |
---|
316 | 296 | } |
---|
317 | | - |
---|
318 | | -/* |
---|
319 | | - * 'fpu__restore()' is called to copy FPU registers from |
---|
320 | | - * the FPU fpstate to the live hw registers and to activate |
---|
321 | | - * access to the hardware registers, so that FPU instructions |
---|
322 | | - * can be used afterwards. |
---|
323 | | - * |
---|
324 | | - * Must be called with kernel preemption disabled (for example |
---|
325 | | - * with local interrupts disabled, as it is in the case of |
---|
326 | | - * do_device_not_available()). |
---|
327 | | - */ |
---|
328 | | -void fpu__restore(struct fpu *fpu) |
---|
329 | | -{ |
---|
330 | | - fpu__initialize(fpu); |
---|
331 | | - |
---|
332 | | - /* Avoid __kernel_fpu_begin() right after fpregs_activate() */ |
---|
333 | | - kernel_fpu_disable(); |
---|
334 | | - trace_x86_fpu_before_restore(fpu); |
---|
335 | | - fpregs_activate(fpu); |
---|
336 | | - copy_kernel_to_fpregs(&fpu->state); |
---|
337 | | - trace_x86_fpu_after_restore(fpu); |
---|
338 | | - kernel_fpu_enable(); |
---|
339 | | -} |
---|
340 | | -EXPORT_SYMBOL_GPL(fpu__restore); |
---|
341 | 297 | |
---|
342 | 298 | /* |
---|
343 | 299 | * Drops current FPU state: deactivates the fpregs and |
---|
.. | .. |
---|
353 | 309 | preempt_disable(); |
---|
354 | 310 | |
---|
355 | 311 | if (fpu == ¤t->thread.fpu) { |
---|
356 | | - if (fpu->initialized) { |
---|
357 | | - /* Ignore delayed exceptions from user space */ |
---|
358 | | - asm volatile("1: fwait\n" |
---|
359 | | - "2:\n" |
---|
360 | | - _ASM_EXTABLE(1b, 2b)); |
---|
361 | | - fpregs_deactivate(fpu); |
---|
362 | | - } |
---|
| 312 | + /* Ignore delayed exceptions from user space */ |
---|
| 313 | + asm volatile("1: fwait\n" |
---|
| 314 | + "2:\n" |
---|
| 315 | + _ASM_EXTABLE(1b, 2b)); |
---|
| 316 | + fpregs_deactivate(fpu); |
---|
363 | 317 | } |
---|
364 | | - |
---|
365 | | - fpu->initialized = 0; |
---|
366 | 318 | |
---|
367 | 319 | trace_x86_fpu_dropped(fpu); |
---|
368 | 320 | |
---|
.. | .. |
---|
370 | 322 | } |
---|
371 | 323 | |
---|
372 | 324 | /* |
---|
373 | | - * Clear FPU registers by setting them up from |
---|
374 | | - * the init fpstate: |
---|
| 325 | + * Clear FPU registers by setting them up from the init fpstate. |
---|
| 326 | + * Caller must do fpregs_[un]lock() around it. |
---|
375 | 327 | */ |
---|
376 | | -static inline void copy_init_fpstate_to_fpregs(void) |
---|
| 328 | +static inline void copy_init_fpstate_to_fpregs(u64 features_mask) |
---|
377 | 329 | { |
---|
378 | 330 | if (use_xsave()) |
---|
379 | | - copy_kernel_to_xregs(&init_fpstate.xsave, -1); |
---|
| 331 | + copy_kernel_to_xregs(&init_fpstate.xsave, features_mask); |
---|
380 | 332 | else if (static_cpu_has(X86_FEATURE_FXSR)) |
---|
381 | 333 | copy_kernel_to_fxregs(&init_fpstate.fxsave); |
---|
382 | 334 | else |
---|
.. | .. |
---|
392 | 344 | * Called by sys_execve(), by the signal handler code and by various |
---|
393 | 345 | * error paths. |
---|
394 | 346 | */ |
---|
395 | | -void fpu__clear(struct fpu *fpu) |
---|
| 347 | +static void fpu__clear(struct fpu *fpu, bool user_only) |
---|
396 | 348 | { |
---|
397 | | - WARN_ON_FPU(fpu != ¤t->thread.fpu); /* Almost certainly an anomaly */ |
---|
| 349 | + WARN_ON_FPU(fpu != ¤t->thread.fpu); |
---|
398 | 350 | |
---|
399 | | - fpu__drop(fpu); |
---|
400 | | - |
---|
401 | | - /* |
---|
402 | | - * Make sure fpstate is cleared and initialized. |
---|
403 | | - */ |
---|
404 | | - if (static_cpu_has(X86_FEATURE_FPU)) { |
---|
405 | | - preempt_disable(); |
---|
| 351 | + if (!static_cpu_has(X86_FEATURE_FPU)) { |
---|
| 352 | + fpu__drop(fpu); |
---|
406 | 353 | fpu__initialize(fpu); |
---|
407 | | - user_fpu_begin(); |
---|
408 | | - copy_init_fpstate_to_fpregs(); |
---|
409 | | - preempt_enable(); |
---|
| 354 | + return; |
---|
410 | 355 | } |
---|
| 356 | + |
---|
| 357 | + fpregs_lock(); |
---|
| 358 | + |
---|
| 359 | + if (user_only) { |
---|
| 360 | + if (!fpregs_state_valid(fpu, smp_processor_id()) && |
---|
| 361 | + xfeatures_mask_supervisor()) |
---|
| 362 | + copy_kernel_to_xregs(&fpu->state.xsave, |
---|
| 363 | + xfeatures_mask_supervisor()); |
---|
| 364 | + copy_init_fpstate_to_fpregs(xfeatures_mask_user()); |
---|
| 365 | + } else { |
---|
| 366 | + copy_init_fpstate_to_fpregs(xfeatures_mask_all); |
---|
| 367 | + } |
---|
| 368 | + |
---|
| 369 | + fpregs_mark_activate(); |
---|
| 370 | + fpregs_unlock(); |
---|
411 | 371 | } |
---|
412 | 372 | |
---|
| 373 | +void fpu__clear_user_states(struct fpu *fpu) |
---|
| 374 | +{ |
---|
| 375 | + fpu__clear(fpu, true); |
---|
| 376 | +} |
---|
| 377 | + |
---|
| 378 | +void fpu__clear_all(struct fpu *fpu) |
---|
| 379 | +{ |
---|
| 380 | + fpu__clear(fpu, false); |
---|
| 381 | +} |
---|
| 382 | + |
---|
| 383 | +/* |
---|
| 384 | + * Load FPU context before returning to userspace. |
---|
| 385 | + */ |
---|
| 386 | +void switch_fpu_return(void) |
---|
| 387 | +{ |
---|
| 388 | + if (!static_cpu_has(X86_FEATURE_FPU)) |
---|
| 389 | + return; |
---|
| 390 | + |
---|
| 391 | + __fpregs_load_activate(); |
---|
| 392 | +} |
---|
| 393 | +EXPORT_SYMBOL_GPL(switch_fpu_return); |
---|
| 394 | + |
---|
| 395 | +#ifdef CONFIG_X86_DEBUG_FPU |
---|
| 396 | +/* |
---|
| 397 | + * If current FPU state according to its tracking (loaded FPU context on this |
---|
| 398 | + * CPU) is not valid then we must have TIF_NEED_FPU_LOAD set so the context is |
---|
| 399 | + * loaded on return to userland. |
---|
| 400 | + */ |
---|
| 401 | +void fpregs_assert_state_consistent(void) |
---|
| 402 | +{ |
---|
| 403 | + struct fpu *fpu = ¤t->thread.fpu; |
---|
| 404 | + |
---|
| 405 | + if (test_thread_flag(TIF_NEED_FPU_LOAD)) |
---|
| 406 | + return; |
---|
| 407 | + |
---|
| 408 | + WARN_ON_FPU(!fpregs_state_valid(fpu, smp_processor_id())); |
---|
| 409 | +} |
---|
| 410 | +EXPORT_SYMBOL_GPL(fpregs_assert_state_consistent); |
---|
| 411 | +#endif |
---|
| 412 | + |
---|
| 413 | +void fpregs_mark_activate(void) |
---|
| 414 | +{ |
---|
| 415 | + struct fpu *fpu = ¤t->thread.fpu; |
---|
| 416 | + |
---|
| 417 | + fpregs_activate(fpu); |
---|
| 418 | + fpu->last_cpu = smp_processor_id(); |
---|
| 419 | + clear_thread_flag(TIF_NEED_FPU_LOAD); |
---|
| 420 | +} |
---|
| 421 | +EXPORT_SYMBOL_GPL(fpregs_mark_activate); |
---|
| 422 | + |
---|
413 | 423 | /* |
---|
414 | 424 | * x87 math exception handling: |
---|
415 | 425 | */ |
---|