hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/kernel/context_tracking.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Context tracking: Probe on high level context boundaries such as kernel
34 * and userspace. This includes syscalls and exceptions entry/exit.
....@@ -24,13 +25,13 @@
2425 #define CREATE_TRACE_POINTS
2526 #include <trace/events/context_tracking.h>
2627
27
-DEFINE_STATIC_KEY_FALSE(context_tracking_enabled);
28
-EXPORT_SYMBOL_GPL(context_tracking_enabled);
28
+DEFINE_STATIC_KEY_FALSE(context_tracking_key);
29
+EXPORT_SYMBOL_GPL(context_tracking_key);
2930
3031 DEFINE_PER_CPU(struct context_tracking, context_tracking);
3132 EXPORT_SYMBOL_GPL(context_tracking);
3233
33
-static bool context_tracking_recursion_enter(void)
34
+static noinstr bool context_tracking_recursion_enter(void)
3435 {
3536 int recursion;
3637
....@@ -44,7 +45,7 @@
4445 return false;
4546 }
4647
47
-static void context_tracking_recursion_exit(void)
48
+static __always_inline void context_tracking_recursion_exit(void)
4849 {
4950 __this_cpu_dec(context_tracking.recursion);
5051 }
....@@ -58,7 +59,7 @@
5859 * instructions to execute won't use any RCU read side critical section
5960 * because this function sets RCU in extended quiescent state.
6061 */
61
-void __context_tracking_enter(enum ctx_state state)
62
+void noinstr __context_tracking_enter(enum ctx_state state)
6263 {
6364 /* Kernel threads aren't supposed to go to userspace */
6465 WARN_ON_ONCE(!current->mm);
....@@ -76,8 +77,10 @@
7677 * on the tick.
7778 */
7879 if (state == CONTEXT_USER) {
80
+ instrumentation_begin();
7981 trace_user_enter(0);
8082 vtime_user_enter(current);
83
+ instrumentation_end();
8184 }
8285 rcu_user_enter();
8386 }
....@@ -98,7 +101,6 @@
98101 }
99102 context_tracking_recursion_exit();
100103 }
101
-NOKPROBE_SYMBOL(__context_tracking_enter);
102104 EXPORT_SYMBOL_GPL(__context_tracking_enter);
103105
104106 void context_tracking_enter(enum ctx_state state)
....@@ -141,7 +143,7 @@
141143 * This call supports re-entrancy. This way it can be called from any exception
142144 * handler without needing to know if we came from userspace or not.
143145 */
144
-void __context_tracking_exit(enum ctx_state state)
146
+void noinstr __context_tracking_exit(enum ctx_state state)
145147 {
146148 if (!context_tracking_recursion_enter())
147149 return;
....@@ -154,15 +156,16 @@
154156 */
155157 rcu_user_exit();
156158 if (state == CONTEXT_USER) {
159
+ instrumentation_begin();
157160 vtime_user_exit(current);
158161 trace_user_exit(0);
162
+ instrumentation_end();
159163 }
160164 }
161165 __this_cpu_write(context_tracking.state, CONTEXT_KERNEL);
162166 }
163167 context_tracking_recursion_exit();
164168 }
165
-NOKPROBE_SYMBOL(__context_tracking_exit);
166169 EXPORT_SYMBOL_GPL(__context_tracking_exit);
167170
168171 void context_tracking_exit(enum ctx_state state)
....@@ -191,17 +194,19 @@
191194
192195 if (!per_cpu(context_tracking.active, cpu)) {
193196 per_cpu(context_tracking.active, cpu) = true;
194
- static_branch_inc(&context_tracking_enabled);
197
+ static_branch_inc(&context_tracking_key);
195198 }
196199
197200 if (initialized)
198201 return;
199202
203
+#ifdef CONFIG_HAVE_TIF_NOHZ
200204 /*
201205 * Set TIF_NOHZ to init/0 and let it propagate to all tasks through fork
202206 * This assumes that init is the only task at this early boot stage.
203207 */
204208 set_tsk_thread_flag(&init_task, TIF_NOHZ);
209
+#endif
205210 WARN_ON_ONCE(!tasklist_empty());
206211
207212 initialized = true;