.. | .. |
---|
15 | 15 | |
---|
16 | 16 | #include <linux/hardirq.h> |
---|
17 | 17 | #include <linux/pkeys.h> |
---|
| 18 | +#include <linux/cpuhotplug.h> |
---|
18 | 19 | |
---|
19 | 20 | #define CREATE_TRACE_POINTS |
---|
20 | 21 | #include <asm/trace/fpu.h> |
---|
.. | .. |
---|
76 | 77 | */ |
---|
77 | 78 | bool irq_fpu_usable(void) |
---|
78 | 79 | { |
---|
79 | | - return !in_interrupt() || |
---|
80 | | - interrupted_user_mode() || |
---|
81 | | - interrupted_kernel_fpu_idle(); |
---|
| 80 | + return running_inband() && |
---|
| 81 | + (!in_interrupt() || |
---|
| 82 | + interrupted_user_mode() || |
---|
| 83 | + interrupted_kernel_fpu_idle()); |
---|
82 | 84 | } |
---|
83 | 85 | EXPORT_SYMBOL(irq_fpu_usable); |
---|
84 | 86 | |
---|
.. | .. |
---|
123 | 125 | |
---|
124 | 126 | void kernel_fpu_begin_mask(unsigned int kfpu_mask) |
---|
125 | 127 | { |
---|
| 128 | + unsigned long flags; |
---|
| 129 | + |
---|
126 | 130 | preempt_disable(); |
---|
127 | 131 | |
---|
128 | 132 | WARN_ON_FPU(!irq_fpu_usable()); |
---|
129 | 133 | WARN_ON_FPU(this_cpu_read(in_kernel_fpu)); |
---|
| 134 | + |
---|
| 135 | + flags = hard_cond_local_irq_save(); |
---|
130 | 136 | |
---|
131 | 137 | this_cpu_write(in_kernel_fpu, true); |
---|
132 | 138 | |
---|
.. | .. |
---|
139 | 145 | */ |
---|
140 | 146 | copy_fpregs_to_fpstate(¤t->thread.fpu); |
---|
141 | 147 | } |
---|
| 148 | + |
---|
142 | 149 | __cpu_invalidate_fpregs_state(); |
---|
143 | 150 | |
---|
144 | 151 | /* Put sane initial values into the control registers. */ |
---|
.. | .. |
---|
147 | 154 | |
---|
148 | 155 | if (unlikely(kfpu_mask & KFPU_387) && boot_cpu_has(X86_FEATURE_FPU)) |
---|
149 | 156 | asm volatile ("fninit"); |
---|
| 157 | + |
---|
| 158 | + hard_cond_local_irq_restore(flags); |
---|
150 | 159 | } |
---|
151 | 160 | EXPORT_SYMBOL_GPL(kernel_fpu_begin_mask); |
---|
152 | 161 | |
---|
.. | .. |
---|
166 | 175 | */ |
---|
167 | 176 | void fpu__save(struct fpu *fpu) |
---|
168 | 177 | { |
---|
| 178 | + unsigned long flags; |
---|
| 179 | + |
---|
169 | 180 | WARN_ON_FPU(fpu != ¤t->thread.fpu); |
---|
170 | 181 | |
---|
171 | | - fpregs_lock(); |
---|
| 182 | + flags = fpregs_lock(); |
---|
172 | 183 | trace_x86_fpu_before_save(fpu); |
---|
173 | 184 | |
---|
174 | 185 | if (!test_thread_flag(TIF_NEED_FPU_LOAD)) { |
---|
.. | .. |
---|
178 | 189 | } |
---|
179 | 190 | |
---|
180 | 191 | trace_x86_fpu_after_save(fpu); |
---|
181 | | - fpregs_unlock(); |
---|
| 192 | + fpregs_unlock(flags); |
---|
182 | 193 | } |
---|
183 | 194 | |
---|
184 | 195 | /* |
---|
.. | .. |
---|
214 | 225 | { |
---|
215 | 226 | struct fpu *dst_fpu = &dst->thread.fpu; |
---|
216 | 227 | struct fpu *src_fpu = &src->thread.fpu; |
---|
| 228 | + unsigned long flags; |
---|
217 | 229 | |
---|
218 | 230 | dst_fpu->last_cpu = -1; |
---|
219 | 231 | |
---|
.. | .. |
---|
236 | 248 | * ( The function 'fails' in the FNSAVE case, which destroys |
---|
237 | 249 | * register contents so we have to load them back. ) |
---|
238 | 250 | */ |
---|
239 | | - fpregs_lock(); |
---|
| 251 | + flags = fpregs_lock(); |
---|
240 | 252 | if (test_thread_flag(TIF_NEED_FPU_LOAD)) |
---|
241 | 253 | memcpy(&dst_fpu->state, &src_fpu->state, fpu_kernel_xstate_size); |
---|
242 | 254 | |
---|
243 | 255 | else if (!copy_fpregs_to_fpstate(dst_fpu)) |
---|
244 | 256 | copy_kernel_to_fpregs(&dst_fpu->state); |
---|
245 | 257 | |
---|
246 | | - fpregs_unlock(); |
---|
| 258 | + fpregs_unlock(flags); |
---|
247 | 259 | |
---|
248 | 260 | set_tsk_thread_flag(dst, TIF_NEED_FPU_LOAD); |
---|
249 | 261 | |
---|
.. | .. |
---|
321 | 333 | */ |
---|
322 | 334 | void fpu__drop(struct fpu *fpu) |
---|
323 | 335 | { |
---|
324 | | - preempt_disable(); |
---|
| 336 | + unsigned long flags; |
---|
| 337 | + |
---|
| 338 | + flags = hard_preempt_disable(); |
---|
325 | 339 | |
---|
326 | 340 | if (fpu == ¤t->thread.fpu) { |
---|
327 | 341 | /* Ignore delayed exceptions from user space */ |
---|
.. | .. |
---|
333 | 347 | |
---|
334 | 348 | trace_x86_fpu_dropped(fpu); |
---|
335 | 349 | |
---|
336 | | - preempt_enable(); |
---|
| 350 | + hard_preempt_enable(flags); |
---|
337 | 351 | } |
---|
338 | 352 | |
---|
339 | 353 | /* |
---|
.. | .. |
---|
361 | 375 | */ |
---|
362 | 376 | static void fpu__clear(struct fpu *fpu, bool user_only) |
---|
363 | 377 | { |
---|
| 378 | + unsigned long flags; |
---|
| 379 | + |
---|
364 | 380 | WARN_ON_FPU(fpu != ¤t->thread.fpu); |
---|
365 | 381 | |
---|
366 | 382 | if (!static_cpu_has(X86_FEATURE_FPU)) { |
---|
| 383 | + flags = hard_cond_local_irq_save(); |
---|
367 | 384 | fpu__drop(fpu); |
---|
368 | 385 | fpu__initialize(fpu); |
---|
| 386 | + hard_cond_local_irq_restore(flags); |
---|
369 | 387 | return; |
---|
370 | 388 | } |
---|
371 | 389 | |
---|
372 | | - fpregs_lock(); |
---|
| 390 | + flags = fpregs_lock(); |
---|
373 | 391 | |
---|
374 | 392 | if (user_only) { |
---|
375 | 393 | if (!fpregs_state_valid(fpu, smp_processor_id()) && |
---|
.. | .. |
---|
382 | 400 | } |
---|
383 | 401 | |
---|
384 | 402 | fpregs_mark_activate(); |
---|
385 | | - fpregs_unlock(); |
---|
| 403 | + fpregs_unlock(flags); |
---|
386 | 404 | } |
---|
387 | 405 | |
---|
388 | 406 | void fpu__clear_user_states(struct fpu *fpu) |
---|
.. | .. |
---|
400 | 418 | */ |
---|
401 | 419 | void switch_fpu_return(void) |
---|
402 | 420 | { |
---|
| 421 | + unsigned long flags; |
---|
| 422 | + |
---|
403 | 423 | if (!static_cpu_has(X86_FEATURE_FPU)) |
---|
404 | 424 | return; |
---|
405 | 425 | |
---|
| 426 | + flags = hard_cond_local_irq_save(); |
---|
406 | 427 | __fpregs_load_activate(); |
---|
| 428 | + hard_cond_local_irq_restore(flags); |
---|
407 | 429 | } |
---|
408 | 430 | EXPORT_SYMBOL_GPL(switch_fpu_return); |
---|
409 | 431 | |
---|
.. | .. |
---|
503 | 525 | */ |
---|
504 | 526 | return 0; |
---|
505 | 527 | } |
---|
| 528 | + |
---|
| 529 | +#ifdef CONFIG_DOVETAIL |
---|
| 530 | + |
---|
| 531 | +/* |
---|
| 532 | + * Holds the in-kernel fpu state when preempted by a task running on |
---|
| 533 | + * the out-of-band stage. |
---|
| 534 | + */ |
---|
| 535 | +static DEFINE_PER_CPU(struct fpu *, in_kernel_fpstate); |
---|
| 536 | + |
---|
| 537 | +static int fpu__init_kernel_fpstate(unsigned int cpu) |
---|
| 538 | +{ |
---|
| 539 | + struct fpu *fpu; |
---|
| 540 | + |
---|
| 541 | + fpu = kzalloc(sizeof(*fpu) + fpu_kernel_xstate_size, GFP_KERNEL); |
---|
| 542 | + if (fpu == NULL) |
---|
| 543 | + return -ENOMEM; |
---|
| 544 | + |
---|
| 545 | + this_cpu_write(in_kernel_fpstate, fpu); |
---|
| 546 | + fpstate_init(&fpu->state); |
---|
| 547 | + |
---|
| 548 | + return 0; |
---|
| 549 | +} |
---|
| 550 | + |
---|
| 551 | +static int fpu__drop_kernel_fpstate(unsigned int cpu) |
---|
| 552 | +{ |
---|
| 553 | + struct fpu *fpu = this_cpu_read(in_kernel_fpstate); |
---|
| 554 | + |
---|
| 555 | + kfree(fpu); |
---|
| 556 | + |
---|
| 557 | + return 0; |
---|
| 558 | +} |
---|
| 559 | + |
---|
| 560 | +void fpu__suspend_inband(void) |
---|
| 561 | +{ |
---|
| 562 | + struct fpu *kfpu = this_cpu_read(in_kernel_fpstate); |
---|
| 563 | + struct task_struct *tsk = current; |
---|
| 564 | + |
---|
| 565 | + if (kernel_fpu_disabled()) { |
---|
| 566 | + copy_fpregs_to_fpstate(kfpu); |
---|
| 567 | + __cpu_invalidate_fpregs_state(); |
---|
| 568 | + oob_fpu_set_preempt(&tsk->thread.fpu); |
---|
| 569 | + } |
---|
| 570 | +} |
---|
| 571 | + |
---|
| 572 | +void fpu__resume_inband(void) |
---|
| 573 | +{ |
---|
| 574 | + struct fpu *kfpu = this_cpu_read(in_kernel_fpstate); |
---|
| 575 | + struct task_struct *tsk = current; |
---|
| 576 | + |
---|
| 577 | + if (oob_fpu_preempted(&tsk->thread.fpu)) { |
---|
| 578 | + copy_kernel_to_fpregs(&kfpu->state); |
---|
| 579 | + __cpu_invalidate_fpregs_state(); |
---|
| 580 | + oob_fpu_clear_preempt(&tsk->thread.fpu); |
---|
| 581 | + } else if (!(tsk->flags & PF_KTHREAD) && |
---|
| 582 | + test_thread_flag(TIF_NEED_FPU_LOAD)) |
---|
| 583 | + switch_fpu_return(); |
---|
| 584 | +} |
---|
| 585 | + |
---|
| 586 | +static void __init fpu__init_dovetail(void) |
---|
| 587 | +{ |
---|
| 588 | + cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, |
---|
| 589 | + "platform/x86/dovetail:online", |
---|
| 590 | + fpu__init_kernel_fpstate, fpu__drop_kernel_fpstate); |
---|
| 591 | +} |
---|
| 592 | +core_initcall(fpu__init_dovetail); |
---|
| 593 | + |
---|
| 594 | +#endif |
---|