.. | .. |
---|
13 | 13 | * On x86_64, %gs is shared by percpu area and stack canary. All |
---|
14 | 14 | * percpu symbols are zero based and %gs points to the base of percpu |
---|
15 | 15 | * area. The first occupant of the percpu area is always |
---|
16 | | - * irq_stack_union which contains stack_canary at offset 40. Userland |
---|
| 16 | + * fixed_percpu_data which contains stack_canary at offset 40. Userland |
---|
17 | 17 | * %gs is always saved and restored on kernel entry and exit using |
---|
18 | 18 | * swapgs, so stack protector doesn't add any complexity there. |
---|
19 | 19 | * |
---|
.. | .. |
---|
65 | 65 | */ |
---|
66 | 66 | static __always_inline void boot_init_stack_canary(void) |
---|
67 | 67 | { |
---|
68 | | - u64 uninitialized_var(canary); |
---|
| 68 | + u64 canary = 0; |
---|
69 | 69 | u64 tsc; |
---|
70 | 70 | |
---|
71 | 71 | #ifdef CONFIG_X86_64 |
---|
72 | | - BUILD_BUG_ON(offsetof(union irq_stack_union, stack_canary) != 40); |
---|
| 72 | + BUILD_BUG_ON(offsetof(struct fixed_percpu_data, stack_canary) != 40); |
---|
73 | 73 | #endif |
---|
74 | 74 | /* |
---|
75 | 75 | * We both use the random pool and the current TSC as a source |
---|
.. | .. |
---|
81 | 81 | * due to locking constraints. We just leave canary |
---|
82 | 82 | * uninitialized and use the TSC based randomness on top of it. |
---|
83 | 83 | */ |
---|
84 | | -#ifndef CONFIG_PREEMPT_RT_FULL |
---|
| 84 | +#ifndef CONFIG_PREEMPT_RT |
---|
85 | 85 | get_random_bytes(&canary, sizeof(canary)); |
---|
86 | 86 | #endif |
---|
87 | 87 | tsc = rdtsc(); |
---|
.. | .. |
---|
90 | 90 | |
---|
91 | 91 | current->stack_canary = canary; |
---|
92 | 92 | #ifdef CONFIG_X86_64 |
---|
93 | | - this_cpu_write(irq_stack_union.stack_canary, canary); |
---|
| 93 | + this_cpu_write(fixed_percpu_data.stack_canary, canary); |
---|
94 | 94 | #else |
---|
95 | 95 | this_cpu_write(stack_canary.canary, canary); |
---|
| 96 | +#endif |
---|
| 97 | +} |
---|
| 98 | + |
---|
| 99 | +static inline void cpu_init_stack_canary(int cpu, struct task_struct *idle) |
---|
| 100 | +{ |
---|
| 101 | +#ifdef CONFIG_X86_64 |
---|
| 102 | + per_cpu(fixed_percpu_data.stack_canary, cpu) = idle->stack_canary; |
---|
| 103 | +#else |
---|
| 104 | + per_cpu(stack_canary.canary, cpu) = idle->stack_canary; |
---|
96 | 105 | #endif |
---|
97 | 106 | } |
---|
98 | 107 | |
---|
.. | .. |
---|
125 | 134 | static inline void setup_stack_canary_segment(int cpu) |
---|
126 | 135 | { } |
---|
127 | 136 | |
---|
| 137 | +static inline void cpu_init_stack_canary(int cpu, struct task_struct *idle) |
---|
| 138 | +{ } |
---|
| 139 | + |
---|
128 | 140 | static inline void load_stack_canary_segment(void) |
---|
129 | 141 | { |
---|
130 | 142 | #ifdef CONFIG_X86_32 |
---|