hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/arch/x86/include/asm/stackprotector.h
....@@ -13,7 +13,7 @@
1313 * On x86_64, %gs is shared by percpu area and stack canary. All
1414 * percpu symbols are zero based and %gs points to the base of percpu
1515 * area. The first occupant of the percpu area is always
16
- * irq_stack_union which contains stack_canary at offset 40. Userland
16
+ * fixed_percpu_data which contains stack_canary at offset 40. Userland
1717 * %gs is always saved and restored on kernel entry and exit using
1818 * swapgs, so stack protector doesn't add any complexity there.
1919 *
....@@ -65,34 +65,37 @@
6565 */
6666 static __always_inline void boot_init_stack_canary(void)
6767 {
68
- u64 uninitialized_var(canary);
68
+ u64 canary;
6969 u64 tsc;
7070
7171 #ifdef CONFIG_X86_64
72
- BUILD_BUG_ON(offsetof(union irq_stack_union, stack_canary) != 40);
72
+ BUILD_BUG_ON(offsetof(struct fixed_percpu_data, stack_canary) != 40);
7373 #endif
7474 /*
7575 * We both use the random pool and the current TSC as a source
7676 * of randomness. The TSC only matters for very early init,
7777 * there it already has some randomness on most systems. Later
7878 * on during the bootup the random pool has true entropy too.
79
- * For preempt-rt we need to weaken the randomness a bit, as
80
- * we can't call into the random generator from atomic context
81
- * due to locking constraints. We just leave canary
82
- * uninitialized and use the TSC based randomness on top of it.
8379 */
84
-#ifndef CONFIG_PREEMPT_RT_FULL
8580 get_random_bytes(&canary, sizeof(canary));
86
-#endif
8781 tsc = rdtsc();
8882 canary += tsc + (tsc << 32UL);
8983 canary &= CANARY_MASK;
9084
9185 current->stack_canary = canary;
9286 #ifdef CONFIG_X86_64
93
- this_cpu_write(irq_stack_union.stack_canary, canary);
87
+ this_cpu_write(fixed_percpu_data.stack_canary, canary);
9488 #else
9589 this_cpu_write(stack_canary.canary, canary);
90
+#endif
91
+}
92
+
93
+static inline void cpu_init_stack_canary(int cpu, struct task_struct *idle)
94
+{
95
+#ifdef CONFIG_X86_64
96
+ per_cpu(fixed_percpu_data.stack_canary, cpu) = idle->stack_canary;
97
+#else
98
+ per_cpu(stack_canary.canary, cpu) = idle->stack_canary;
9699 #endif
97100 }
98101
....@@ -125,6 +128,9 @@
125128 static inline void setup_stack_canary_segment(int cpu)
126129 { }
127130
131
+static inline void cpu_init_stack_canary(int cpu, struct task_struct *idle)
132
+{ }
133
+
128134 static inline void load_stack_canary_segment(void)
129135 {
130136 #ifdef CONFIG_X86_32