hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/arch/x86/include/asm/stackprotector.h
....@@ -13,7 +13,7 @@
1313 * On x86_64, %gs is shared by percpu area and stack canary. All
1414 * percpu symbols are zero based and %gs points to the base of percpu
1515 * area. The first occupant of the percpu area is always
16
- * irq_stack_union which contains stack_canary at offset 40. Userland
16
+ * fixed_percpu_data which contains stack_canary at offset 40. Userland
1717 * %gs is always saved and restored on kernel entry and exit using
1818 * swapgs, so stack protector doesn't add any complexity there.
1919 *
....@@ -69,7 +69,7 @@
6969 u64 tsc;
7070
7171 #ifdef CONFIG_X86_64
72
- BUILD_BUG_ON(offsetof(union irq_stack_union, stack_canary) != 40);
72
+ BUILD_BUG_ON(offsetof(struct fixed_percpu_data, stack_canary) != 40);
7373 #endif
7474 /*
7575 * We both use the random pool and the current TSC as a source
....@@ -84,9 +84,18 @@
8484
8585 current->stack_canary = canary;
8686 #ifdef CONFIG_X86_64
87
- this_cpu_write(irq_stack_union.stack_canary, canary);
87
+ this_cpu_write(fixed_percpu_data.stack_canary, canary);
8888 #else
8989 this_cpu_write(stack_canary.canary, canary);
90
+#endif
91
+}
92
+
93
+static inline void cpu_init_stack_canary(int cpu, struct task_struct *idle)
94
+{
95
+#ifdef CONFIG_X86_64
96
+ per_cpu(fixed_percpu_data.stack_canary, cpu) = idle->stack_canary;
97
+#else
98
+ per_cpu(stack_canary.canary, cpu) = idle->stack_canary;
9099 #endif
91100 }
92101
....@@ -119,6 +128,9 @@
119128 static inline void setup_stack_canary_segment(int cpu)
120129 { }
121130
131
+static inline void cpu_init_stack_canary(int cpu, struct task_struct *idle)
132
+{ }
133
+
122134 static inline void load_stack_canary_segment(void)
123135 {
124136 #ifdef CONFIG_X86_32