hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/kernel/tracepoint.c
....@@ -1,19 +1,6 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /*
23 * Copyright (C) 2008-2014 Mathieu Desnoyers
3
- *
4
- * This program is free software; you can redistribute it and/or modify
5
- * it under the terms of the GNU General Public License as published by
6
- * the Free Software Foundation; either version 2 of the License, or
7
- * (at your option) any later version.
8
- *
9
- * This program is distributed in the hope that it will be useful,
10
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
11
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12
- * GNU General Public License for more details.
13
- *
14
- * You should have received a copy of the GNU General Public License
15
- * along with this program; if not, write to the Free Software
16
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
174 */
185 #include <linux/module.h>
196 #include <linux/mutex.h>
....@@ -28,11 +15,56 @@
2815 #include <linux/sched/task.h>
2916 #include <linux/static_key.h>
3017
18
+enum tp_func_state {
19
+ TP_FUNC_0,
20
+ TP_FUNC_1,
21
+ TP_FUNC_2,
22
+ TP_FUNC_N,
23
+};
24
+
3125 extern tracepoint_ptr_t __start___tracepoints_ptrs[];
3226 extern tracepoint_ptr_t __stop___tracepoints_ptrs[];
3327
3428 DEFINE_SRCU(tracepoint_srcu);
3529 EXPORT_SYMBOL_GPL(tracepoint_srcu);
30
+
31
+enum tp_transition_sync {
32
+ TP_TRANSITION_SYNC_1_0_1,
33
+ TP_TRANSITION_SYNC_N_2_1,
34
+
35
+ _NR_TP_TRANSITION_SYNC,
36
+};
37
+
38
+struct tp_transition_snapshot {
39
+ unsigned long rcu;
40
+ unsigned long srcu;
41
+ bool ongoing;
42
+};
43
+
44
+/* Protected by tracepoints_mutex */
45
+static struct tp_transition_snapshot tp_transition_snapshot[_NR_TP_TRANSITION_SYNC];
46
+
47
+static void tp_rcu_get_state(enum tp_transition_sync sync)
48
+{
49
+ struct tp_transition_snapshot *snapshot = &tp_transition_snapshot[sync];
50
+
51
+ /* Keep the latest get_state snapshot. */
52
+ snapshot->rcu = get_state_synchronize_rcu();
53
+ snapshot->srcu = start_poll_synchronize_srcu(&tracepoint_srcu);
54
+ snapshot->ongoing = true;
55
+}
56
+
57
+static void tp_rcu_cond_sync(enum tp_transition_sync sync)
58
+{
59
+ struct tp_transition_snapshot *snapshot = &tp_transition_snapshot[sync];
60
+
61
+ if (!snapshot->ongoing)
62
+ return;
63
+ cond_synchronize_rcu(snapshot->rcu);
64
+ if (!poll_state_synchronize_srcu(&tracepoint_srcu, snapshot->srcu))
65
+ synchronize_srcu(&tracepoint_srcu);
66
+ snapshot->ongoing = false;
67
+}
3668
3769 /* Set to 1 to enable tracepoint debug output */
3870 static const int tracepoint_debug;
....@@ -63,7 +95,7 @@
6395 */
6496 struct tp_probes {
6597 struct rcu_head rcu;
66
- struct tracepoint_func probes[0];
98
+ struct tracepoint_func probes[];
6799 };
68100
69101 /* Called in removal of a func but failed to allocate a new tp_funcs */
....@@ -74,8 +106,8 @@
74106
75107 static inline void *allocate_probes(int count)
76108 {
77
- struct tp_probes *p = kmalloc(count * sizeof(struct tracepoint_func)
78
- + sizeof(struct tp_probes), GFP_KERNEL);
109
+ struct tp_probes *p = kmalloc(struct_size(p, probes, count),
110
+ GFP_KERNEL);
79111 return p == NULL ? NULL : p->probes;
80112 }
81113
....@@ -98,7 +130,7 @@
98130 while (early_probes) {
99131 tmp = early_probes;
100132 early_probes = tmp->next;
101
- call_rcu_sched(tmp, rcu_free_old_probes);
133
+ call_rcu(tmp, rcu_free_old_probes);
102134 }
103135
104136 return 0;
....@@ -129,7 +161,7 @@
129161 * cover both cases. So let us chain the SRCU and sched RCU
130162 * callbacks to wait for both grace periods.
131163 */
132
- call_rcu_sched(&tp_probes->rcu, rcu_free_old_probes);
164
+ call_rcu(&tp_probes->rcu, rcu_free_old_probes);
133165 }
134166 }
135167
....@@ -281,6 +313,32 @@
281313 }
282314
283315 /*
316
+ * Count the number of functions (enum tp_func_state) in a tp_funcs array.
317
+ */
318
+static enum tp_func_state nr_func_state(const struct tracepoint_func *tp_funcs)
319
+{
320
+ if (!tp_funcs)
321
+ return TP_FUNC_0;
322
+ if (!tp_funcs[1].func)
323
+ return TP_FUNC_1;
324
+ if (!tp_funcs[2].func)
325
+ return TP_FUNC_2;
326
+ return TP_FUNC_N; /* 3 or more */
327
+}
328
+
329
+static void tracepoint_update_call(struct tracepoint *tp, struct tracepoint_func *tp_funcs)
330
+{
331
+ void *func = tp->iterator;
332
+
333
+ /* Synthetic events do not have static call sites */
334
+ if (!tp->static_call_key)
335
+ return;
336
+ if (nr_func_state(tp_funcs) == TP_FUNC_1)
337
+ func = tp_funcs[0].func;
338
+ __static_call_update(tp->static_call_key, tp->static_call_tramp, func);
339
+}
340
+
341
+/*
284342 * Add the probe function to a tracepoint.
285343 */
286344 static int tracepoint_add_func(struct tracepoint *tp,
....@@ -310,9 +368,42 @@
310368 * a pointer to it. This array is referenced by __DO_TRACE from
311369 * include/linux/tracepoint.h using rcu_dereference_sched().
312370 */
313
- rcu_assign_pointer(tp->funcs, tp_funcs);
314
- if (!static_key_enabled(&tp->key))
315
- static_key_slow_inc(&tp->key);
371
+ switch (nr_func_state(tp_funcs)) {
372
+ case TP_FUNC_1: /* 0->1 */
373
+ /*
374
+ * Make sure new static func never uses old data after a
375
+ * 1->0->1 transition sequence.
376
+ */
377
+ tp_rcu_cond_sync(TP_TRANSITION_SYNC_1_0_1);
378
+ /* Set static call to first function */
379
+ tracepoint_update_call(tp, tp_funcs);
380
+ /* Both iterator and static call handle NULL tp->funcs */
381
+ rcu_assign_pointer(tp->funcs, tp_funcs);
382
+ static_key_enable(&tp->key);
383
+ break;
384
+ case TP_FUNC_2: /* 1->2 */
385
+ /* Set iterator static call */
386
+ tracepoint_update_call(tp, tp_funcs);
387
+ /*
388
+ * Iterator callback installed before updating tp->funcs.
389
+ * Requires ordering between RCU assign/dereference and
390
+ * static call update/call.
391
+ */
392
+ fallthrough;
393
+ case TP_FUNC_N: /* N->N+1 (N>1) */
394
+ rcu_assign_pointer(tp->funcs, tp_funcs);
395
+ /*
396
+ * Make sure static func never uses incorrect data after a
397
+ * N->...->2->1 (N>1) transition sequence.
398
+ */
399
+ if (tp_funcs[0].data != old[0].data)
400
+ tp_rcu_get_state(TP_TRANSITION_SYNC_N_2_1);
401
+ break;
402
+ default:
403
+ WARN_ON_ONCE(1);
404
+ break;
405
+ }
406
+
316407 release_probes(old);
317408 return 0;
318409 }
....@@ -338,15 +429,53 @@
338429 /* Failed allocating new tp_funcs, replaced func with stub */
339430 return 0;
340431
341
- if (!tp_funcs) {
432
+ switch (nr_func_state(tp_funcs)) {
433
+ case TP_FUNC_0: /* 1->0 */
342434 /* Removed last function */
343435 if (tp->unregfunc && static_key_enabled(&tp->key))
344436 tp->unregfunc();
345437
346
- if (static_key_enabled(&tp->key))
347
- static_key_slow_dec(&tp->key);
438
+ static_key_disable(&tp->key);
439
+ /* Set iterator static call */
440
+ tracepoint_update_call(tp, tp_funcs);
441
+ /* Both iterator and static call handle NULL tp->funcs */
442
+ rcu_assign_pointer(tp->funcs, NULL);
443
+ /*
444
+ * Make sure new static func never uses old data after a
445
+ * 1->0->1 transition sequence.
446
+ */
447
+ tp_rcu_get_state(TP_TRANSITION_SYNC_1_0_1);
448
+ break;
449
+ case TP_FUNC_1: /* 2->1 */
450
+ rcu_assign_pointer(tp->funcs, tp_funcs);
451
+ /*
452
+ * Make sure static func never uses incorrect data after a
453
+ * N->...->2->1 (N>2) transition sequence. If the first
454
+ * element's data has changed, then force the synchronization
455
+ * to prevent current readers that have loaded the old data
456
+ * from calling the new function.
457
+ */
458
+ if (tp_funcs[0].data != old[0].data)
459
+ tp_rcu_get_state(TP_TRANSITION_SYNC_N_2_1);
460
+ tp_rcu_cond_sync(TP_TRANSITION_SYNC_N_2_1);
461
+ /* Set static call to first function */
462
+ tracepoint_update_call(tp, tp_funcs);
463
+ break;
464
+ case TP_FUNC_2: /* N->N-1 (N>2) */
465
+ fallthrough;
466
+ case TP_FUNC_N:
467
+ rcu_assign_pointer(tp->funcs, tp_funcs);
468
+ /*
469
+ * Make sure static func never uses incorrect data after a
470
+ * N->...->2->1 (N>2) transition sequence.
471
+ */
472
+ if (tp_funcs[0].data != old[0].data)
473
+ tp_rcu_get_state(TP_TRANSITION_SYNC_N_2_1);
474
+ break;
475
+ default:
476
+ WARN_ON_ONCE(1);
477
+ break;
348478 }
349
- rcu_assign_pointer(tp->funcs, tp_funcs);
350479 release_probes(old);
351480 return 0;
352481 }
....@@ -609,7 +738,7 @@
609738 case MODULE_STATE_UNFORMED:
610739 break;
611740 }
612
- return ret;
741
+ return notifier_from_errno(ret);
613742 }
614743
615744 static struct notifier_block tracepoint_module_nb = {
....@@ -678,3 +807,82 @@
678807 }
679808 }
680809 #endif
810
+
811
+#ifdef CONFIG_ANDROID_VENDOR_HOOKS
812
+
813
+static void *rvh_zalloc_funcs(int count)
814
+{
815
+ return kzalloc(sizeof(struct tracepoint_func) * count, GFP_KERNEL);
816
+}
817
+
818
+#define ANDROID_RVH_NR_PROBES_MAX 2
819
+static int rvh_func_add(struct tracepoint *tp, struct tracepoint_func *func)
820
+{
821
+ int i;
822
+
823
+ if (!static_key_enabled(&tp->key)) {
824
+ /* '+ 1' for the last NULL element */
825
+ tp->funcs = rvh_zalloc_funcs(ANDROID_RVH_NR_PROBES_MAX + 1);
826
+ if (!tp->funcs)
827
+ return ENOMEM;
828
+ }
829
+
830
+ for (i = 0; i < ANDROID_RVH_NR_PROBES_MAX; i++) {
831
+ if (!tp->funcs[i].func) {
832
+ if (!static_key_enabled(&tp->key))
833
+ tp->funcs[i].data = func->data;
834
+ WRITE_ONCE(tp->funcs[i].func, func->func);
835
+
836
+ return 0;
837
+ }
838
+ }
839
+
840
+ return -EBUSY;
841
+}
842
+
843
+static int android_rvh_add_func(struct tracepoint *tp, struct tracepoint_func *func)
844
+{
845
+ int ret;
846
+
847
+ if (tp->regfunc && !static_key_enabled(&tp->key)) {
848
+ ret = tp->regfunc();
849
+ if (ret < 0)
850
+ return ret;
851
+ }
852
+
853
+ ret = rvh_func_add(tp, func);
854
+ if (ret)
855
+ return ret;
856
+ tracepoint_update_call(tp, tp->funcs);
857
+ static_key_enable(&tp->key);
858
+
859
+ return 0;
860
+}
861
+
862
+int android_rvh_probe_register(struct tracepoint *tp, void *probe, void *data)
863
+{
864
+ struct tracepoint_func tp_func;
865
+ int ret;
866
+
867
+ /*
868
+ * Once the static key has been flipped, the array may be read
869
+ * concurrently. Although __traceiter_*() always checks .func first,
870
+ * it doesn't enforce read->read dependencies, and we can't strongly
871
+ * guarantee it will see the correct .data for the second element
872
+ * without adding smp_load_acquire() in the fast path. But this is a
873
+ * corner case which is unlikely to be needed by anybody in practice,
874
+ * so let's just forbid it and keep the fast path clean.
875
+ */
876
+ if (WARN_ON(static_key_enabled(&tp->key) && data))
877
+ return -EINVAL;
878
+
879
+ mutex_lock(&tracepoints_mutex);
880
+ tp_func.func = probe;
881
+ tp_func.data = data;
882
+ ret = android_rvh_add_func(tp, &tp_func);
883
+ mutex_unlock(&tracepoints_mutex);
884
+
885
+ return ret;
886
+}
887
+EXPORT_SYMBOL_GPL(android_rvh_probe_register);
888
+#endif