hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/include/linux/scs.h
....@@ -9,48 +9,59 @@
99 #define _LINUX_SCS_H
1010
1111 #include <linux/gfp.h>
12
+#include <linux/poison.h>
1213 #include <linux/sched.h>
13
-#include <asm/page.h>
14
+#include <linux/sizes.h>
1415
1516 #ifdef CONFIG_SHADOW_CALL_STACK
1617
17
-/*
18
- * In testing, 1 KiB shadow stack size (i.e. 128 stack frames on a 64-bit
19
- * architecture) provided ~40% safety margin on stack usage while keeping
20
- * memory allocation overhead reasonable.
21
- */
22
-#define SCS_SIZE 1024UL
23
-#define GFP_SCS (GFP_KERNEL | __GFP_ZERO)
18
+#define SCS_ORDER 0
19
+#define SCS_SIZE (PAGE_SIZE << SCS_ORDER)
20
+#define GFP_SCS (GFP_KERNEL | __GFP_ZERO)
2421
25
-/*
26
- * A random number outside the kernel's virtual address space to mark the
27
- * end of the shadow stack.
28
- */
29
-#define SCS_END_MAGIC 0xaf0194819b1635f6UL
22
+/* An illegal pointer value to mark the end of the shadow stack. */
23
+#define SCS_END_MAGIC (0x5f6UL + POISON_POINTER_DELTA)
3024
31
-#define task_scs(tsk) (task_thread_info(tsk)->shadow_call_stack)
25
+#define task_scs(tsk) (task_thread_info(tsk)->scs_base)
26
+#define task_scs_sp(tsk) (task_thread_info(tsk)->scs_sp)
3227
33
-static inline void task_set_scs(struct task_struct *tsk, void *s)
28
+void *scs_alloc(int node);
29
+void scs_free(void *s);
30
+void scs_init(void);
31
+int scs_prepare(struct task_struct *tsk, int node);
32
+void scs_release(struct task_struct *tsk);
33
+
34
+static inline void scs_task_reset(struct task_struct *tsk)
3435 {
35
- task_scs(tsk) = s;
36
+ /*
37
+ * Reset the shadow stack to the base address in case the task
38
+ * is reused.
39
+ */
40
+ task_scs_sp(tsk) = task_scs(tsk);
3641 }
3742
38
-extern void scs_init(void);
39
-extern void scs_task_reset(struct task_struct *tsk);
40
-extern int scs_prepare(struct task_struct *tsk, int node);
41
-extern bool scs_corrupted(struct task_struct *tsk);
42
-extern void scs_release(struct task_struct *tsk);
43
+static inline unsigned long *__scs_magic(void *s)
44
+{
45
+ return (unsigned long *)(s + SCS_SIZE) - 1;
46
+}
47
+
48
+static inline bool task_scs_end_corrupted(struct task_struct *tsk)
49
+{
50
+ unsigned long *magic = __scs_magic(task_scs(tsk));
51
+ unsigned long sz = task_scs_sp(tsk) - task_scs(tsk);
52
+
53
+ return sz >= SCS_SIZE - 1 || READ_ONCE_NOCHECK(*magic) != SCS_END_MAGIC;
54
+}
4355
4456 #else /* CONFIG_SHADOW_CALL_STACK */
4557
46
-#define task_scs(tsk) NULL
47
-
48
-static inline void task_set_scs(struct task_struct *tsk, void *s) {}
58
+static inline void *scs_alloc(int node) { return NULL; }
59
+static inline void scs_free(void *s) {}
4960 static inline void scs_init(void) {}
5061 static inline void scs_task_reset(struct task_struct *tsk) {}
5162 static inline int scs_prepare(struct task_struct *tsk, int node) { return 0; }
52
-static inline bool scs_corrupted(struct task_struct *tsk) { return false; }
5363 static inline void scs_release(struct task_struct *tsk) {}
64
+static inline bool task_scs_end_corrupted(struct task_struct *tsk) { return false; }
5465
5566 #endif /* CONFIG_SHADOW_CALL_STACK */
5667