hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/arch/x86/include/asm/fpu/api.h
....@@ -10,17 +10,59 @@
1010
1111 #ifndef _ASM_X86_FPU_API_H
1212 #define _ASM_X86_FPU_API_H
13
+#include <linux/bottom_half.h>
1314
1415 /*
1516 * Use kernel_fpu_begin/end() if you intend to use FPU in kernel context. It
1617 * disables preemption so be careful if you intend to use it for long periods
1718 * of time.
18
- * If you intend to use the FPU in softirq you need to check first with
19
+ * If you intend to use the FPU in irq/softirq you need to check first with
1920 * irq_fpu_usable() if it is possible.
2021 */
21
-extern void kernel_fpu_begin(void);
22
+
23
+/* Kernel FPU states to initialize in kernel_fpu_begin_mask() */
24
+#define KFPU_387 _BITUL(0) /* 387 state will be initialized */
25
+#define KFPU_MXCSR _BITUL(1) /* MXCSR will be initialized */
26
+
27
+extern void kernel_fpu_begin_mask(unsigned int kfpu_mask);
2228 extern void kernel_fpu_end(void);
2329 extern bool irq_fpu_usable(void);
30
+extern void fpregs_mark_activate(void);
31
+
32
+/* Code that is unaware of kernel_fpu_begin_mask() can use this */
33
+static inline void kernel_fpu_begin(void)
34
+{
35
+ kernel_fpu_begin_mask(KFPU_387 | KFPU_MXCSR);
36
+}
37
+
38
+/*
39
+ * Use fpregs_lock() while editing CPU's FPU registers or fpu->state.
40
+ * A context switch will (and softirq might) save CPU's FPU registers to
41
+ * fpu->state and set TIF_NEED_FPU_LOAD leaving CPU's FPU registers in
42
+ * a random state.
43
+ */
44
+static inline void fpregs_lock(void)
45
+{
46
+ preempt_disable();
47
+ local_bh_disable();
48
+}
49
+
50
+static inline void fpregs_unlock(void)
51
+{
52
+ local_bh_enable();
53
+ preempt_enable();
54
+}
55
+
56
+#ifdef CONFIG_X86_DEBUG_FPU
57
+extern void fpregs_assert_state_consistent(void);
58
+#else
59
+static inline void fpregs_assert_state_consistent(void) { }
60
+#endif
61
+
62
+/*
63
+ * Load the task FPU state before returning to userspace.
64
+ */
65
+extern void switch_fpu_return(void);
2466
2567 /*
2668 * Query the presence of one or more xfeatures. Works on any legacy CPU as well.
....@@ -31,4 +73,12 @@
3173 */
3274 extern int cpu_has_xfeatures(u64 xfeatures_mask, const char **feature_name);
3375
76
+/*
77
+ * Tasks that are not using SVA have mm->pasid set to zero to note that they
78
+ * will not have the valid bit set in MSR_IA32_PASID while they are running.
79
+ */
80
+#define PASID_DISABLED 0
81
+
82
+static inline void update_pasid(void) { }
83
+
3484 #endif /* _ASM_X86_FPU_API_H */