.. | .. |
---|
28 | 28 | extern void kernel_fpu_end(void); |
---|
29 | 29 | extern bool irq_fpu_usable(void); |
---|
30 | 30 | extern void fpregs_mark_activate(void); |
---|
31 | | -extern void kernel_fpu_resched(void); |
---|
32 | 31 | |
---|
33 | 32 | /* Code that is unaware of kernel_fpu_begin_mask() can use this */ |
---|
34 | 33 | static inline void kernel_fpu_begin(void) |
---|
.. | .. |
---|
41 | 40 | * A context switch will (and softirq might) save CPU's FPU registers to |
---|
42 | 41 | * fpu->state and set TIF_NEED_FPU_LOAD leaving CPU's FPU registers in |
---|
43 | 42 | * a random state. |
---|
44 | | - * |
---|
45 | | - * local_bh_disable() protects against both preemption and soft interrupts |
---|
46 | | - * on !RT kernels. |
---|
47 | | - * |
---|
48 | | - * On RT kernels local_bh_disable() is not sufficient because it only |
---|
49 | | - * serializes soft interrupt related sections via a local lock, but stays |
---|
50 | | - * preemptible. Disabling preemption is the right choice here as bottom |
---|
51 | | - * half processing is always in thread context on RT kernels so it |
---|
52 | | - * implicitly prevents bottom half processing as well. |
---|
53 | | - * |
---|
54 | | - * Disabling preemption also serializes against kernel_fpu_begin(). |
---|
55 | 43 | */ |
---|
56 | 44 | static inline void fpregs_lock(void) |
---|
57 | 45 | { |
---|
58 | | - if (!IS_ENABLED(CONFIG_PREEMPT_RT)) |
---|
59 | | - local_bh_disable(); |
---|
60 | | - else |
---|
61 | | - preempt_disable(); |
---|
| 46 | + preempt_disable(); |
---|
| 47 | + local_bh_disable(); |
---|
62 | 48 | } |
---|
63 | 49 | |
---|
64 | 50 | static inline void fpregs_unlock(void) |
---|
65 | 51 | { |
---|
66 | | - if (!IS_ENABLED(CONFIG_PREEMPT_RT)) |
---|
67 | | - local_bh_enable(); |
---|
68 | | - else |
---|
69 | | - preempt_enable(); |
---|
| 52 | + local_bh_enable(); |
---|
| 53 | + preempt_enable(); |
---|
70 | 54 | } |
---|
71 | 55 | |
---|
72 | 56 | #ifdef CONFIG_X86_DEBUG_FPU |
---|