.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * Context tracking: Probe on high level context boundaries such as kernel |
---|
3 | 4 | * and userspace. This includes syscalls and exceptions entry/exit. |
---|
.. | .. |
---|
24 | 25 | #define CREATE_TRACE_POINTS |
---|
25 | 26 | #include <trace/events/context_tracking.h> |
---|
26 | 27 | |
---|
27 | | -DEFINE_STATIC_KEY_FALSE(context_tracking_enabled); |
---|
28 | | -EXPORT_SYMBOL_GPL(context_tracking_enabled); |
---|
| 28 | +DEFINE_STATIC_KEY_FALSE(context_tracking_key); |
---|
| 29 | +EXPORT_SYMBOL_GPL(context_tracking_key); |
---|
29 | 30 | |
---|
30 | 31 | DEFINE_PER_CPU(struct context_tracking, context_tracking); |
---|
31 | 32 | EXPORT_SYMBOL_GPL(context_tracking); |
---|
32 | 33 | |
---|
33 | | -static bool context_tracking_recursion_enter(void) |
---|
| 34 | +static noinstr bool context_tracking_recursion_enter(void) |
---|
34 | 35 | { |
---|
35 | 36 | int recursion; |
---|
36 | 37 | |
---|
.. | .. |
---|
44 | 45 | return false; |
---|
45 | 46 | } |
---|
46 | 47 | |
---|
47 | | -static void context_tracking_recursion_exit(void) |
---|
| 48 | +static __always_inline void context_tracking_recursion_exit(void) |
---|
48 | 49 | { |
---|
49 | 50 | __this_cpu_dec(context_tracking.recursion); |
---|
50 | 51 | } |
---|
.. | .. |
---|
58 | 59 | * instructions to execute won't use any RCU read side critical section |
---|
59 | 60 | * because this function sets RCU in extended quiescent state. |
---|
60 | 61 | */ |
---|
61 | | -void __context_tracking_enter(enum ctx_state state) |
---|
| 62 | +void noinstr __context_tracking_enter(enum ctx_state state) |
---|
62 | 63 | { |
---|
63 | 64 | /* Kernel threads aren't supposed to go to userspace */ |
---|
64 | 65 | WARN_ON_ONCE(!current->mm); |
---|
.. | .. |
---|
76 | 77 | * on the tick. |
---|
77 | 78 | */ |
---|
78 | 79 | if (state == CONTEXT_USER) { |
---|
| 80 | + instrumentation_begin(); |
---|
79 | 81 | trace_user_enter(0); |
---|
80 | 82 | vtime_user_enter(current); |
---|
| 83 | + instrumentation_end(); |
---|
81 | 84 | } |
---|
82 | 85 | rcu_user_enter(); |
---|
83 | 86 | } |
---|
.. | .. |
---|
98 | 101 | } |
---|
99 | 102 | context_tracking_recursion_exit(); |
---|
100 | 103 | } |
---|
101 | | -NOKPROBE_SYMBOL(__context_tracking_enter); |
---|
102 | 104 | EXPORT_SYMBOL_GPL(__context_tracking_enter); |
---|
103 | 105 | |
---|
104 | 106 | void context_tracking_enter(enum ctx_state state) |
---|
.. | .. |
---|
141 | 143 | * This call supports re-entrancy. This way it can be called from any exception |
---|
142 | 144 | * handler without needing to know if we came from userspace or not. |
---|
143 | 145 | */ |
---|
144 | | -void __context_tracking_exit(enum ctx_state state) |
---|
| 146 | +void noinstr __context_tracking_exit(enum ctx_state state) |
---|
145 | 147 | { |
---|
146 | 148 | if (!context_tracking_recursion_enter()) |
---|
147 | 149 | return; |
---|
.. | .. |
---|
154 | 156 | */ |
---|
155 | 157 | rcu_user_exit(); |
---|
156 | 158 | if (state == CONTEXT_USER) { |
---|
| 159 | + instrumentation_begin(); |
---|
157 | 160 | vtime_user_exit(current); |
---|
158 | 161 | trace_user_exit(0); |
---|
| 162 | + instrumentation_end(); |
---|
159 | 163 | } |
---|
160 | 164 | } |
---|
161 | 165 | __this_cpu_write(context_tracking.state, CONTEXT_KERNEL); |
---|
162 | 166 | } |
---|
163 | 167 | context_tracking_recursion_exit(); |
---|
164 | 168 | } |
---|
165 | | -NOKPROBE_SYMBOL(__context_tracking_exit); |
---|
166 | 169 | EXPORT_SYMBOL_GPL(__context_tracking_exit); |
---|
167 | 170 | |
---|
168 | 171 | void context_tracking_exit(enum ctx_state state) |
---|
.. | .. |
---|
191 | 194 | |
---|
192 | 195 | if (!per_cpu(context_tracking.active, cpu)) { |
---|
193 | 196 | per_cpu(context_tracking.active, cpu) = true; |
---|
194 | | - static_branch_inc(&context_tracking_enabled); |
---|
| 197 | + static_branch_inc(&context_tracking_key); |
---|
195 | 198 | } |
---|
196 | 199 | |
---|
197 | 200 | if (initialized) |
---|
198 | 201 | return; |
---|
199 | 202 | |
---|
| 203 | +#ifdef CONFIG_HAVE_TIF_NOHZ |
---|
200 | 204 | /* |
---|
201 | 205 | * Set TIF_NOHZ to init/0 and let it propagate to all tasks through fork |
---|
202 | 206 | * This assumes that init is the only task at this early boot stage. |
---|
203 | 207 | */ |
---|
204 | 208 | set_tsk_thread_flag(&init_task, TIF_NOHZ); |
---|
| 209 | +#endif |
---|
205 | 210 | WARN_ON_ONCE(!tasklist_empty()); |
---|
206 | 211 | |
---|
207 | 212 | initialized = true; |
---|