hc
2023-12-08 01573e231f18eb2d99162747186f59511f56b64d
kernel/arch/x86/include/asm/fpu/api.h
....@@ -10,18 +10,75 @@
1010
1111 #ifndef _ASM_X86_FPU_API_H
1212 #define _ASM_X86_FPU_API_H
13
+#include <linux/bottom_half.h>
1314
1415 /*
1516 * Use kernel_fpu_begin/end() if you intend to use FPU in kernel context. It
1617 * disables preemption so be careful if you intend to use it for long periods
1718 * of time.
18
- * If you intend to use the FPU in softirq you need to check first with
19
+ * If you intend to use the FPU in irq/softirq you need to check first with
1920 * irq_fpu_usable() if it is possible.
2021 */
21
-extern void kernel_fpu_begin(void);
22
+
23
+/* Kernel FPU states to initialize in kernel_fpu_begin_mask() */
24
+#define KFPU_387 _BITUL(0) /* 387 state will be initialized */
25
+#define KFPU_MXCSR _BITUL(1) /* MXCSR will be initialized */
26
+
27
+extern void kernel_fpu_begin_mask(unsigned int kfpu_mask);
2228 extern void kernel_fpu_end(void);
23
-extern void kernel_fpu_resched(void);
2429 extern bool irq_fpu_usable(void);
30
+extern void fpregs_mark_activate(void);
31
+extern void kernel_fpu_resched(void);
32
+
33
+/* Code that is unaware of kernel_fpu_begin_mask() can use this */
34
+static inline void kernel_fpu_begin(void)
35
+{
36
+ kernel_fpu_begin_mask(KFPU_387 | KFPU_MXCSR);
37
+}
38
+
39
+/*
40
+ * Use fpregs_lock() while editing CPU's FPU registers or fpu->state.
41
+ * A context switch will (and softirq might) save CPU's FPU registers to
42
+ * fpu->state and set TIF_NEED_FPU_LOAD leaving CPU's FPU registers in
43
+ * a random state.
44
+ *
45
+ * local_bh_disable() protects against both preemption and soft interrupts
46
+ * on !RT kernels.
47
+ *
48
+ * On RT kernels local_bh_disable() is not sufficient because it only
49
+ * serializes soft interrupt related sections via a local lock, but stays
50
+ * preemptible. Disabling preemption is the right choice here as bottom
51
+ * half processing is always in thread context on RT kernels so it
52
+ * implicitly prevents bottom half processing as well.
53
+ *
54
+ * Disabling preemption also serializes against kernel_fpu_begin().
55
+ */
56
+static inline void fpregs_lock(void)
57
+{
58
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
59
+ local_bh_disable();
60
+ else
61
+ preempt_disable();
62
+}
63
+
64
+static inline void fpregs_unlock(void)
65
+{
66
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
67
+ local_bh_enable();
68
+ else
69
+ preempt_enable();
70
+}
71
+
72
+#ifdef CONFIG_X86_DEBUG_FPU
73
+extern void fpregs_assert_state_consistent(void);
74
+#else
75
+static inline void fpregs_assert_state_consistent(void) { }
76
+#endif
77
+
78
+/*
79
+ * Load the task FPU state before returning to userspace.
80
+ */
81
+extern void switch_fpu_return(void);
2582
2683 /*
2784 * Query the presence of one or more xfeatures. Works on any legacy CPU as well.
....@@ -32,4 +89,12 @@
3289 */
3390 extern int cpu_has_xfeatures(u64 xfeatures_mask, const char **feature_name);
3491
92
+/*
93
+ * Tasks that are not using SVA have mm->pasid set to zero to note that they
94
+ * will not have the valid bit set in MSR_IA32_PASID while they are running.
95
+ */
96
+#define PASID_DISABLED 0
97
+
98
+static inline void update_pasid(void) { }
99
+
35100 #endif /* _ASM_X86_FPU_API_H */