hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/kernel/trace/trace_uprobe.c
....@@ -7,6 +7,8 @@
77 */
88 #define pr_fmt(fmt) "trace_uprobe: " fmt
99
10
+#include <linux/security.h>
11
+#include <linux/ctype.h>
1012 #include <linux/module.h>
1113 #include <linux/uaccess.h>
1214 #include <linux/uprobes.h>
....@@ -14,7 +16,9 @@
1416 #include <linux/string.h>
1517 #include <linux/rculist.h>
1618
19
+#include "trace_dynevent.h"
1720 #include "trace_probe.h"
21
+#include "trace_probe_tmpl.h"
1822
1923 #define UPROBE_EVENT_SYSTEM "uprobes"
2024
....@@ -30,26 +34,54 @@
3034 #define DATAOF_TRACE_ENTRY(entry, is_return) \
3135 ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
3236
33
-struct trace_uprobe_filter {
34
- rwlock_t rwlock;
35
- int nr_systemwide;
36
- struct list_head perf_events;
37
+static int trace_uprobe_create(int argc, const char **argv);
38
+static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev);
39
+static int trace_uprobe_release(struct dyn_event *ev);
40
+static bool trace_uprobe_is_busy(struct dyn_event *ev);
41
+static bool trace_uprobe_match(const char *system, const char *event,
42
+ int argc, const char **argv, struct dyn_event *ev);
43
+
44
+static struct dyn_event_operations trace_uprobe_ops = {
45
+ .create = trace_uprobe_create,
46
+ .show = trace_uprobe_show,
47
+ .is_busy = trace_uprobe_is_busy,
48
+ .free = trace_uprobe_release,
49
+ .match = trace_uprobe_match,
3750 };
3851
3952 /*
4053 * uprobe event core functions
4154 */
4255 struct trace_uprobe {
43
- struct list_head list;
44
- struct trace_uprobe_filter filter;
56
+ struct dyn_event devent;
4557 struct uprobe_consumer consumer;
4658 struct path path;
4759 struct inode *inode;
4860 char *filename;
4961 unsigned long offset;
62
+ unsigned long ref_ctr_offset;
5063 unsigned long nhit;
5164 struct trace_probe tp;
5265 };
66
+
67
+static bool is_trace_uprobe(struct dyn_event *ev)
68
+{
69
+ return ev->ops == &trace_uprobe_ops;
70
+}
71
+
72
+static struct trace_uprobe *to_trace_uprobe(struct dyn_event *ev)
73
+{
74
+ return container_of(ev, struct trace_uprobe, devent);
75
+}
76
+
77
+/**
78
+ * for_each_trace_uprobe - iterate over the trace_uprobe list
79
+ * @pos: the struct trace_uprobe * for each entry
80
+ * @dpos: the struct dyn_event * to use as a loop cursor
81
+ */
82
+#define for_each_trace_uprobe(pos, dpos) \
83
+ for_each_dyn_event(dpos) \
84
+ if (is_trace_uprobe(dpos) && (pos = to_trace_uprobe(dpos)))
5385
5486 #define SIZEOF_TRACE_UPROBE(n) \
5587 (offsetof(struct trace_uprobe, tp.args) + \
....@@ -57,9 +89,6 @@
5789
5890 static int register_uprobe_event(struct trace_uprobe *tu);
5991 static int unregister_uprobe_event(struct trace_uprobe *tu);
60
-
61
-static DEFINE_MUTEX(uprobe_lock);
62
-static LIST_HEAD(uprobe_list);
6392
6493 struct uprobe_dispatch_data {
6594 struct trace_uprobe *tu;
....@@ -98,81 +127,84 @@
98127 /*
99128 * Uprobes-specific fetch functions
100129 */
101
-#define DEFINE_FETCH_stack(type) \
102
-static void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs, \
103
- void *offset, void *dest) \
104
-{ \
105
- *(type *)dest = (type)get_user_stack_nth(regs, \
106
- ((unsigned long)offset)); \
107
-}
108
-DEFINE_BASIC_FETCH_FUNCS(stack)
109
-/* No string on the stack entry */
110
-#define fetch_stack_string NULL
111
-#define fetch_stack_string_size NULL
130
+static nokprobe_inline int
131
+probe_mem_read(void *dest, void *src, size_t size)
132
+{
133
+ void __user *vaddr = (void __force __user *)src;
112134
113
-#define DEFINE_FETCH_memory(type) \
114
-static void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs, \
115
- void *addr, void *dest) \
116
-{ \
117
- type retval; \
118
- void __user *vaddr = (void __force __user *) addr; \
119
- \
120
- if (copy_from_user(&retval, vaddr, sizeof(type))) \
121
- *(type *)dest = 0; \
122
- else \
123
- *(type *) dest = retval; \
135
+ return copy_from_user(dest, vaddr, size) ? -EFAULT : 0;
124136 }
125
-DEFINE_BASIC_FETCH_FUNCS(memory)
137
+
138
+static nokprobe_inline int
139
+probe_mem_read_user(void *dest, void *src, size_t size)
140
+{
141
+ return probe_mem_read(dest, src, size);
142
+}
143
+
126144 /*
127145 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
128146 * length and relative data location.
129147 */
130
-static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
131
- void *addr, void *dest)
148
+static nokprobe_inline int
149
+fetch_store_string(unsigned long addr, void *dest, void *base)
132150 {
133151 long ret;
134
- u32 rloc = *(u32 *)dest;
135
- int maxlen = get_rloc_len(rloc);
136
- u8 *dst = get_rloc_data(dest);
152
+ u32 loc = *(u32 *)dest;
153
+ int maxlen = get_loc_len(loc);
154
+ u8 *dst = get_loc_data(dest, base);
137155 void __user *src = (void __force __user *) addr;
138156
139
- if (!maxlen)
140
- return;
157
+ if (unlikely(!maxlen))
158
+ return -ENOMEM;
141159
142
- ret = strncpy_from_user(dst, src, maxlen);
143
- if (ret == maxlen)
144
- dst[ret - 1] = '\0';
145
- else if (ret >= 0)
146
- /*
147
- * Include the terminating null byte. In this case it
148
- * was copied by strncpy_from_user but not accounted
149
- * for in ret.
150
- */
151
- ret++;
152
-
153
- if (ret < 0) { /* Failed to fetch string */
154
- ((u8 *)get_rloc_data(dest))[0] = '\0';
155
- *(u32 *)dest = make_data_rloc(0, get_rloc_offs(rloc));
156
- } else {
157
- *(u32 *)dest = make_data_rloc(ret, get_rloc_offs(rloc));
160
+ if (addr == FETCH_TOKEN_COMM)
161
+ ret = strlcpy(dst, current->comm, maxlen);
162
+ else
163
+ ret = strncpy_from_user(dst, src, maxlen);
164
+ if (ret >= 0) {
165
+ if (ret == maxlen)
166
+ dst[ret - 1] = '\0';
167
+ else
168
+ /*
169
+ * Include the terminating null byte. In this case it
170
+ * was copied by strncpy_from_user but not accounted
171
+ * for in ret.
172
+ */
173
+ ret++;
174
+ *(u32 *)dest = make_data_loc(ret, (void *)dst - base);
158175 }
176
+
177
+ return ret;
159178 }
160179
161
-static void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
162
- void *addr, void *dest)
180
+static nokprobe_inline int
181
+fetch_store_string_user(unsigned long addr, void *dest, void *base)
182
+{
183
+ return fetch_store_string(addr, dest, base);
184
+}
185
+
186
+/* Return the length of string -- including null terminal byte */
187
+static nokprobe_inline int
188
+fetch_store_strlen(unsigned long addr)
163189 {
164190 int len;
165191 void __user *vaddr = (void __force __user *) addr;
166192
167
- len = strnlen_user(vaddr, MAX_STRING_SIZE);
168
-
169
- if (len == 0 || len > MAX_STRING_SIZE) /* Failed to check length */
170
- *(u32 *)dest = 0;
193
+ if (addr == FETCH_TOKEN_COMM)
194
+ len = strlen(current->comm) + 1;
171195 else
172
- *(u32 *)dest = len;
196
+ len = strnlen_user(vaddr, MAX_STRING_SIZE);
197
+
198
+ return (len > MAX_STRING_SIZE) ? 0 : len;
173199 }
174200
175
-static unsigned long translate_user_vaddr(void *file_offset)
201
+static nokprobe_inline int
202
+fetch_store_strlen_user(unsigned long addr)
203
+{
204
+ return fetch_store_strlen(addr);
205
+}
206
+
207
+static unsigned long translate_user_vaddr(unsigned long file_offset)
176208 {
177209 unsigned long base_addr;
178210 struct uprobe_dispatch_data *udd;
....@@ -180,44 +212,50 @@
180212 udd = (void *) current->utask->vaddr;
181213
182214 base_addr = udd->bp_addr - udd->tu->offset;
183
- return base_addr + (unsigned long)file_offset;
215
+ return base_addr + file_offset;
184216 }
185217
186
-#define DEFINE_FETCH_file_offset(type) \
187
-static void FETCH_FUNC_NAME(file_offset, type)(struct pt_regs *regs, \
188
- void *offset, void *dest)\
189
-{ \
190
- void *vaddr = (void *)translate_user_vaddr(offset); \
191
- \
192
- FETCH_FUNC_NAME(memory, type)(regs, vaddr, dest); \
218
+/* Note that we don't verify it, since the code does not come from user space */
219
+static int
220
+process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest,
221
+ void *base)
222
+{
223
+ unsigned long val;
224
+
225
+ /* 1st stage: get value from context */
226
+ switch (code->op) {
227
+ case FETCH_OP_REG:
228
+ val = regs_get_register(regs, code->param);
229
+ break;
230
+ case FETCH_OP_STACK:
231
+ val = get_user_stack_nth(regs, code->param);
232
+ break;
233
+ case FETCH_OP_STACKP:
234
+ val = user_stack_pointer(regs);
235
+ break;
236
+ case FETCH_OP_RETVAL:
237
+ val = regs_return_value(regs);
238
+ break;
239
+ case FETCH_OP_IMM:
240
+ val = code->immediate;
241
+ break;
242
+ case FETCH_OP_COMM:
243
+ val = FETCH_TOKEN_COMM;
244
+ break;
245
+ case FETCH_OP_DATA:
246
+ val = (unsigned long)code->data;
247
+ break;
248
+ case FETCH_OP_FOFFS:
249
+ val = translate_user_vaddr(code->immediate);
250
+ break;
251
+ default:
252
+ return -EILSEQ;
253
+ }
254
+ code++;
255
+
256
+ return process_fetch_insn_bottom(code, val, dest, base);
193257 }
194
-DEFINE_BASIC_FETCH_FUNCS(file_offset)
195
-DEFINE_FETCH_file_offset(string)
196
-DEFINE_FETCH_file_offset(string_size)
197
-
198
-/* Fetch type information table */
199
-static const struct fetch_type uprobes_fetch_type_table[] = {
200
- /* Special types */
201
- [FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string,
202
- sizeof(u32), 1, "__data_loc char[]"),
203
- [FETCH_TYPE_STRSIZE] = __ASSIGN_FETCH_TYPE("string_size", u32,
204
- string_size, sizeof(u32), 0, "u32"),
205
- /* Basic types */
206
- ASSIGN_FETCH_TYPE(u8, u8, 0),
207
- ASSIGN_FETCH_TYPE(u16, u16, 0),
208
- ASSIGN_FETCH_TYPE(u32, u32, 0),
209
- ASSIGN_FETCH_TYPE(u64, u64, 0),
210
- ASSIGN_FETCH_TYPE(s8, u8, 1),
211
- ASSIGN_FETCH_TYPE(s16, u16, 1),
212
- ASSIGN_FETCH_TYPE(s32, u32, 1),
213
- ASSIGN_FETCH_TYPE(s64, u64, 1),
214
- ASSIGN_FETCH_TYPE_ALIAS(x8, u8, u8, 0),
215
- ASSIGN_FETCH_TYPE_ALIAS(x16, u16, u16, 0),
216
- ASSIGN_FETCH_TYPE_ALIAS(x32, u32, u32, 0),
217
- ASSIGN_FETCH_TYPE_ALIAS(x64, u64, u64, 0),
218
-
219
- ASSIGN_FETCH_TYPE_END
220
-};
258
+NOKPROBE_SYMBOL(process_fetch_insn)
221259
222260 static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
223261 {
....@@ -236,6 +274,63 @@
236274 return tu->consumer.ret_handler != NULL;
237275 }
238276
277
+static bool trace_uprobe_is_busy(struct dyn_event *ev)
278
+{
279
+ struct trace_uprobe *tu = to_trace_uprobe(ev);
280
+
281
+ return trace_probe_is_enabled(&tu->tp);
282
+}
283
+
284
+static bool trace_uprobe_match_command_head(struct trace_uprobe *tu,
285
+ int argc, const char **argv)
286
+{
287
+ char buf[MAX_ARGSTR_LEN + 1];
288
+ int len;
289
+
290
+ if (!argc)
291
+ return true;
292
+
293
+ len = strlen(tu->filename);
294
+ if (strncmp(tu->filename, argv[0], len) || argv[0][len] != ':')
295
+ return false;
296
+
297
+ if (tu->ref_ctr_offset == 0)
298
+ snprintf(buf, sizeof(buf), "0x%0*lx",
299
+ (int)(sizeof(void *) * 2), tu->offset);
300
+ else
301
+ snprintf(buf, sizeof(buf), "0x%0*lx(0x%lx)",
302
+ (int)(sizeof(void *) * 2), tu->offset,
303
+ tu->ref_ctr_offset);
304
+ if (strcmp(buf, &argv[0][len + 1]))
305
+ return false;
306
+
307
+ argc--; argv++;
308
+
309
+ return trace_probe_match_command_args(&tu->tp, argc, argv);
310
+}
311
+
312
+static bool trace_uprobe_match(const char *system, const char *event,
313
+ int argc, const char **argv, struct dyn_event *ev)
314
+{
315
+ struct trace_uprobe *tu = to_trace_uprobe(ev);
316
+
317
+ return strcmp(trace_probe_name(&tu->tp), event) == 0 &&
318
+ (!system || strcmp(trace_probe_group_name(&tu->tp), system) == 0) &&
319
+ trace_uprobe_match_command_head(tu, argc, argv);
320
+}
321
+
322
+static nokprobe_inline struct trace_uprobe *
323
+trace_uprobe_primary_from_call(struct trace_event_call *call)
324
+{
325
+ struct trace_probe *tp;
326
+
327
+ tp = trace_probe_primary_from_call(call);
328
+ if (WARN_ON_ONCE(!tp))
329
+ return NULL;
330
+
331
+ return container_of(tp, struct trace_uprobe, tp);
332
+}
333
+
239334 /*
240335 * Allocate new trace_uprobe and initialize it (including uprobes).
241336 */
....@@ -243,78 +338,151 @@
243338 alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
244339 {
245340 struct trace_uprobe *tu;
246
-
247
- if (!event || !is_good_name(event))
248
- return ERR_PTR(-EINVAL);
249
-
250
- if (!group || !is_good_name(group))
251
- return ERR_PTR(-EINVAL);
341
+ int ret;
252342
253343 tu = kzalloc(SIZEOF_TRACE_UPROBE(nargs), GFP_KERNEL);
254344 if (!tu)
255345 return ERR_PTR(-ENOMEM);
256346
257
- tu->tp.call.class = &tu->tp.class;
258
- tu->tp.call.name = kstrdup(event, GFP_KERNEL);
259
- if (!tu->tp.call.name)
347
+ ret = trace_probe_init(&tu->tp, event, group, true);
348
+ if (ret < 0)
260349 goto error;
261350
262
- tu->tp.class.system = kstrdup(group, GFP_KERNEL);
263
- if (!tu->tp.class.system)
264
- goto error;
265
-
266
- INIT_LIST_HEAD(&tu->list);
267
- INIT_LIST_HEAD(&tu->tp.files);
351
+ dyn_event_init(&tu->devent, &trace_uprobe_ops);
268352 tu->consumer.handler = uprobe_dispatcher;
269353 if (is_ret)
270354 tu->consumer.ret_handler = uretprobe_dispatcher;
271
- init_trace_uprobe_filter(&tu->filter);
355
+ init_trace_uprobe_filter(tu->tp.event->filter);
272356 return tu;
273357
274358 error:
275
- kfree(tu->tp.call.name);
276359 kfree(tu);
277360
278
- return ERR_PTR(-ENOMEM);
361
+ return ERR_PTR(ret);
279362 }
280363
281364 static void free_trace_uprobe(struct trace_uprobe *tu)
282365 {
283
- int i;
284
-
285
- for (i = 0; i < tu->tp.nr_args; i++)
286
- traceprobe_free_probe_arg(&tu->tp.args[i]);
366
+ if (!tu)
367
+ return;
287368
288369 path_put(&tu->path);
289
- kfree(tu->tp.call.class->system);
290
- kfree(tu->tp.call.name);
370
+ trace_probe_cleanup(&tu->tp);
291371 kfree(tu->filename);
292372 kfree(tu);
293373 }
294374
295375 static struct trace_uprobe *find_probe_event(const char *event, const char *group)
296376 {
377
+ struct dyn_event *pos;
297378 struct trace_uprobe *tu;
298379
299
- list_for_each_entry(tu, &uprobe_list, list)
300
- if (strcmp(trace_event_name(&tu->tp.call), event) == 0 &&
301
- strcmp(tu->tp.call.class->system, group) == 0)
380
+ for_each_trace_uprobe(tu, pos)
381
+ if (strcmp(trace_probe_name(&tu->tp), event) == 0 &&
382
+ strcmp(trace_probe_group_name(&tu->tp), group) == 0)
302383 return tu;
303384
304385 return NULL;
305386 }
306387
307
-/* Unregister a trace_uprobe and probe_event: call with locking uprobe_lock */
388
+/* Unregister a trace_uprobe and probe_event */
308389 static int unregister_trace_uprobe(struct trace_uprobe *tu)
309390 {
310391 int ret;
392
+
393
+ if (trace_probe_has_sibling(&tu->tp))
394
+ goto unreg;
311395
312396 ret = unregister_uprobe_event(tu);
313397 if (ret)
314398 return ret;
315399
316
- list_del(&tu->list);
400
+unreg:
401
+ dyn_event_remove(&tu->devent);
402
+ trace_probe_unlink(&tu->tp);
317403 free_trace_uprobe(tu);
404
+ return 0;
405
+}
406
+
407
+static bool trace_uprobe_has_same_uprobe(struct trace_uprobe *orig,
408
+ struct trace_uprobe *comp)
409
+{
410
+ struct trace_probe_event *tpe = orig->tp.event;
411
+ struct trace_probe *pos;
412
+ struct inode *comp_inode = d_real_inode(comp->path.dentry);
413
+ int i;
414
+
415
+ list_for_each_entry(pos, &tpe->probes, list) {
416
+ orig = container_of(pos, struct trace_uprobe, tp);
417
+ if (comp_inode != d_real_inode(orig->path.dentry) ||
418
+ comp->offset != orig->offset)
419
+ continue;
420
+
421
+ /*
422
+ * trace_probe_compare_arg_type() ensured that nr_args and
423
+ * each argument name and type are same. Let's compare comm.
424
+ */
425
+ for (i = 0; i < orig->tp.nr_args; i++) {
426
+ if (strcmp(orig->tp.args[i].comm,
427
+ comp->tp.args[i].comm))
428
+ break;
429
+ }
430
+
431
+ if (i == orig->tp.nr_args)
432
+ return true;
433
+ }
434
+
435
+ return false;
436
+}
437
+
438
+static int append_trace_uprobe(struct trace_uprobe *tu, struct trace_uprobe *to)
439
+{
440
+ int ret;
441
+
442
+ ret = trace_probe_compare_arg_type(&tu->tp, &to->tp);
443
+ if (ret) {
444
+ /* Note that argument starts index = 2 */
445
+ trace_probe_log_set_index(ret + 1);
446
+ trace_probe_log_err(0, DIFF_ARG_TYPE);
447
+ return -EEXIST;
448
+ }
449
+ if (trace_uprobe_has_same_uprobe(to, tu)) {
450
+ trace_probe_log_set_index(0);
451
+ trace_probe_log_err(0, SAME_PROBE);
452
+ return -EEXIST;
453
+ }
454
+
455
+ /* Append to existing event */
456
+ ret = trace_probe_append(&tu->tp, &to->tp);
457
+ if (!ret)
458
+ dyn_event_add(&tu->devent);
459
+
460
+ return ret;
461
+}
462
+
463
+/*
464
+ * Uprobe with multiple reference counter is not allowed. i.e.
465
+ * If inode and offset matches, reference counter offset *must*
466
+ * match as well. Though, there is one exception: If user is
467
+ * replacing old trace_uprobe with new one(same group/event),
468
+ * then we allow same uprobe with new reference counter as far
469
+ * as the new one does not conflict with any other existing
470
+ * ones.
471
+ */
472
+static int validate_ref_ctr_offset(struct trace_uprobe *new)
473
+{
474
+ struct dyn_event *pos;
475
+ struct trace_uprobe *tmp;
476
+ struct inode *new_inode = d_real_inode(new->path.dentry);
477
+
478
+ for_each_trace_uprobe(tmp, pos) {
479
+ if (new_inode == d_real_inode(tmp->path.dentry) &&
480
+ new->offset == tmp->offset &&
481
+ new->ref_ctr_offset != tmp->ref_ctr_offset) {
482
+ pr_warn("Reference counter offset mismatch.");
483
+ return -EINVAL;
484
+ }
485
+ }
318486 return 0;
319487 }
320488
....@@ -324,136 +492,163 @@
324492 struct trace_uprobe *old_tu;
325493 int ret;
326494
327
- mutex_lock(&uprobe_lock);
495
+ mutex_lock(&event_mutex);
496
+
497
+ ret = validate_ref_ctr_offset(tu);
498
+ if (ret)
499
+ goto end;
328500
329501 /* register as an event */
330
- old_tu = find_probe_event(trace_event_name(&tu->tp.call),
331
- tu->tp.call.class->system);
502
+ old_tu = find_probe_event(trace_probe_name(&tu->tp),
503
+ trace_probe_group_name(&tu->tp));
332504 if (old_tu) {
333
- /* delete old event */
334
- ret = unregister_trace_uprobe(old_tu);
335
- if (ret)
336
- goto end;
505
+ if (is_ret_probe(tu) != is_ret_probe(old_tu)) {
506
+ trace_probe_log_set_index(0);
507
+ trace_probe_log_err(0, DIFF_PROBE_TYPE);
508
+ ret = -EEXIST;
509
+ } else {
510
+ ret = append_trace_uprobe(tu, old_tu);
511
+ }
512
+ goto end;
337513 }
338514
339515 ret = register_uprobe_event(tu);
340516 if (ret) {
341
- pr_warn("Failed to register probe event(%d)\n", ret);
517
+ if (ret == -EEXIST) {
518
+ trace_probe_log_set_index(0);
519
+ trace_probe_log_err(0, EVENT_EXIST);
520
+ } else
521
+ pr_warn("Failed to register probe event(%d)\n", ret);
342522 goto end;
343523 }
344524
345
- list_add_tail(&tu->list, &uprobe_list);
525
+ dyn_event_add(&tu->devent);
346526
347527 end:
348
- mutex_unlock(&uprobe_lock);
528
+ mutex_unlock(&event_mutex);
349529
350530 return ret;
351531 }
352532
353533 /*
354534 * Argument syntax:
355
- * - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS]
356
- *
357
- * - Remove uprobe: -:[GRP/]EVENT
535
+ * - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET[%return][(REF)] [FETCHARGS]
358536 */
359
-static int create_trace_uprobe(int argc, char **argv)
537
+static int trace_uprobe_create(int argc, const char **argv)
360538 {
361539 struct trace_uprobe *tu;
362
- char *arg, *event, *group, *filename;
540
+ const char *event = NULL, *group = UPROBE_EVENT_SYSTEM;
541
+ char *arg, *filename, *rctr, *rctr_end, *tmp;
363542 char buf[MAX_EVENT_NAME_LEN];
364543 struct path path;
365
- unsigned long offset;
366
- bool is_delete, is_return;
544
+ unsigned long offset, ref_ctr_offset;
545
+ bool is_return = false;
367546 int i, ret;
368547
369548 ret = 0;
370
- is_delete = false;
371
- is_return = false;
372
- event = NULL;
373
- group = NULL;
549
+ ref_ctr_offset = 0;
374550
375
- /* argc must be >= 1 */
376
- if (argv[0][0] == '-')
377
- is_delete = true;
378
- else if (argv[0][0] == 'r')
551
+ switch (argv[0][0]) {
552
+ case 'r':
379553 is_return = true;
380
- else if (argv[0][0] != 'p') {
381
- pr_info("Probe definition must be started with 'p', 'r' or '-'.\n");
382
- return -EINVAL;
554
+ break;
555
+ case 'p':
556
+ break;
557
+ default:
558
+ return -ECANCELED;
383559 }
384560
385
- if (argv[0][1] == ':') {
561
+ if (argc < 2)
562
+ return -ECANCELED;
563
+
564
+ if (argv[0][1] == ':')
386565 event = &argv[0][2];
387
- arg = strchr(event, '/');
388566
389
- if (arg) {
390
- group = event;
391
- event = arg + 1;
392
- event[-1] = '\0';
567
+ if (!strchr(argv[1], '/'))
568
+ return -ECANCELED;
393569
394
- if (strlen(group) == 0) {
395
- pr_info("Group name is not specified\n");
396
- return -EINVAL;
397
- }
398
- }
399
- if (strlen(event) == 0) {
400
- pr_info("Event name is not specified\n");
401
- return -EINVAL;
402
- }
403
- }
404
- if (!group)
405
- group = UPROBE_EVENT_SYSTEM;
570
+ filename = kstrdup(argv[1], GFP_KERNEL);
571
+ if (!filename)
572
+ return -ENOMEM;
406573
407
- if (is_delete) {
408
- int ret;
409
-
410
- if (!event) {
411
- pr_info("Delete command needs an event name.\n");
412
- return -EINVAL;
413
- }
414
- mutex_lock(&uprobe_lock);
415
- tu = find_probe_event(event, group);
416
-
417
- if (!tu) {
418
- mutex_unlock(&uprobe_lock);
419
- pr_info("Event %s/%s doesn't exist.\n", group, event);
420
- return -ENOENT;
421
- }
422
- /* delete an event */
423
- ret = unregister_trace_uprobe(tu);
424
- mutex_unlock(&uprobe_lock);
425
- return ret;
426
- }
427
-
428
- if (argc < 2) {
429
- pr_info("Probe point is not specified.\n");
430
- return -EINVAL;
431
- }
432574 /* Find the last occurrence, in case the path contains ':' too. */
433
- arg = strrchr(argv[1], ':');
434
- if (!arg)
435
- return -EINVAL;
575
+ arg = strrchr(filename, ':');
576
+ if (!arg || !isdigit(arg[1])) {
577
+ kfree(filename);
578
+ return -ECANCELED;
579
+ }
580
+
581
+ trace_probe_log_init("trace_uprobe", argc, argv);
582
+ trace_probe_log_set_index(1); /* filename is the 2nd argument */
436583
437584 *arg++ = '\0';
438
- filename = argv[1];
439585 ret = kern_path(filename, LOOKUP_FOLLOW, &path);
440
- if (ret)
586
+ if (ret) {
587
+ trace_probe_log_err(0, FILE_NOT_FOUND);
588
+ kfree(filename);
589
+ trace_probe_log_clear();
441590 return ret;
442
-
591
+ }
443592 if (!d_is_reg(path.dentry)) {
593
+ trace_probe_log_err(0, NO_REGULAR_FILE);
444594 ret = -EINVAL;
445595 goto fail_address_parse;
446596 }
447597
448
- ret = kstrtoul(arg, 0, &offset);
449
- if (ret)
450
- goto fail_address_parse;
598
+ /* Parse reference counter offset if specified. */
599
+ rctr = strchr(arg, '(');
600
+ if (rctr) {
601
+ rctr_end = strchr(rctr, ')');
602
+ if (!rctr_end) {
603
+ ret = -EINVAL;
604
+ rctr_end = rctr + strlen(rctr);
605
+ trace_probe_log_err(rctr_end - filename,
606
+ REFCNT_OPEN_BRACE);
607
+ goto fail_address_parse;
608
+ } else if (rctr_end[1] != '\0') {
609
+ ret = -EINVAL;
610
+ trace_probe_log_err(rctr_end + 1 - filename,
611
+ BAD_REFCNT_SUFFIX);
612
+ goto fail_address_parse;
613
+ }
451614
452
- argc -= 2;
453
- argv += 2;
615
+ *rctr++ = '\0';
616
+ *rctr_end = '\0';
617
+ ret = kstrtoul(rctr, 0, &ref_ctr_offset);
618
+ if (ret) {
619
+ trace_probe_log_err(rctr - filename, BAD_REFCNT);
620
+ goto fail_address_parse;
621
+ }
622
+ }
623
+
624
+ /* Check if there is %return suffix */
625
+ tmp = strchr(arg, '%');
626
+ if (tmp) {
627
+ if (!strcmp(tmp, "%return")) {
628
+ *tmp = '\0';
629
+ is_return = true;
630
+ } else {
631
+ trace_probe_log_err(tmp - filename, BAD_ADDR_SUFFIX);
632
+ ret = -EINVAL;
633
+ goto fail_address_parse;
634
+ }
635
+ }
636
+
637
+ /* Parse uprobe offset. */
638
+ ret = kstrtoul(arg, 0, &offset);
639
+ if (ret) {
640
+ trace_probe_log_err(arg - filename, BAD_UPROBE_OFFS);
641
+ goto fail_address_parse;
642
+ }
454643
455644 /* setup a probe */
456
- if (!event) {
645
+ trace_probe_log_set_index(0);
646
+ if (event) {
647
+ ret = traceprobe_parse_event_name(&event, &group, buf,
648
+ event - argv[0]);
649
+ if (ret)
650
+ goto fail_address_parse;
651
+ } else {
457652 char *tail;
458653 char *ptr;
459654
....@@ -472,130 +667,90 @@
472667 kfree(tail);
473668 }
474669
670
+ argc -= 2;
671
+ argv += 2;
672
+
475673 tu = alloc_trace_uprobe(group, event, argc, is_return);
476674 if (IS_ERR(tu)) {
477
- pr_info("Failed to allocate trace_uprobe.(%d)\n", (int)PTR_ERR(tu));
478675 ret = PTR_ERR(tu);
676
+ /* This must return -ENOMEM otherwise there is a bug */
677
+ WARN_ON_ONCE(ret != -ENOMEM);
479678 goto fail_address_parse;
480679 }
481680 tu->offset = offset;
681
+ tu->ref_ctr_offset = ref_ctr_offset;
482682 tu->path = path;
483
- tu->filename = kstrdup(filename, GFP_KERNEL);
484
-
485
- if (!tu->filename) {
486
- pr_info("Failed to allocate filename.\n");
487
- ret = -ENOMEM;
488
- goto error;
489
- }
683
+ tu->filename = filename;
490684
491685 /* parse arguments */
492
- ret = 0;
493686 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
494
- struct probe_arg *parg = &tu->tp.args[i];
495
-
496
- /* Increment count for freeing args in error case */
497
- tu->tp.nr_args++;
498
-
499
- /* Parse argument name */
500
- arg = strchr(argv[i], '=');
501
- if (arg) {
502
- *arg++ = '\0';
503
- parg->name = kstrdup(argv[i], GFP_KERNEL);
504
- } else {
505
- arg = argv[i];
506
- /* If argument name is omitted, set "argN" */
507
- snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
508
- parg->name = kstrdup(buf, GFP_KERNEL);
509
- }
510
-
511
- if (!parg->name) {
512
- pr_info("Failed to allocate argument[%d] name.\n", i);
687
+ tmp = kstrdup(argv[i], GFP_KERNEL);
688
+ if (!tmp) {
513689 ret = -ENOMEM;
514690 goto error;
515691 }
516692
517
- if (!is_good_name(parg->name)) {
518
- pr_info("Invalid argument[%d] name: %s\n", i, parg->name);
519
- ret = -EINVAL;
693
+ trace_probe_log_set_index(i + 2);
694
+ ret = traceprobe_parse_probe_arg(&tu->tp, i, tmp,
695
+ is_return ? TPARG_FL_RETURN : 0);
696
+ kfree(tmp);
697
+ if (ret)
520698 goto error;
521
- }
522
-
523
- if (traceprobe_conflict_field_name(parg->name, tu->tp.args, i)) {
524
- pr_info("Argument[%d] name '%s' conflicts with "
525
- "another field.\n", i, argv[i]);
526
- ret = -EINVAL;
527
- goto error;
528
- }
529
-
530
- /* Parse fetch argument */
531
- ret = traceprobe_parse_probe_arg(arg, &tu->tp.size, parg,
532
- is_return, false,
533
- uprobes_fetch_type_table);
534
- if (ret) {
535
- pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
536
- goto error;
537
- }
538699 }
539700
540
- ret = register_trace_uprobe(tu);
541
- if (ret)
701
+ ret = traceprobe_set_print_fmt(&tu->tp, is_ret_probe(tu));
702
+ if (ret < 0)
542703 goto error;
543
- return 0;
704
+
705
+ ret = register_trace_uprobe(tu);
706
+ if (!ret)
707
+ goto out;
544708
545709 error:
546710 free_trace_uprobe(tu);
711
+out:
712
+ trace_probe_log_clear();
547713 return ret;
548714
549715 fail_address_parse:
716
+ trace_probe_log_clear();
550717 path_put(&path);
551
-
552
- pr_info("Failed to parse address or file.\n");
718
+ kfree(filename);
553719
554720 return ret;
555721 }
556722
557
-static int cleanup_all_probes(void)
723
+static int create_or_delete_trace_uprobe(int argc, char **argv)
558724 {
559
- struct trace_uprobe *tu;
560
- int ret = 0;
725
+ int ret;
561726
562
- mutex_lock(&uprobe_lock);
563
- while (!list_empty(&uprobe_list)) {
564
- tu = list_entry(uprobe_list.next, struct trace_uprobe, list);
565
- ret = unregister_trace_uprobe(tu);
566
- if (ret)
567
- break;
568
- }
569
- mutex_unlock(&uprobe_lock);
570
- return ret;
727
+ if (argv[0][0] == '-')
728
+ return dyn_event_release(argc, argv, &trace_uprobe_ops);
729
+
730
+ ret = trace_uprobe_create(argc, (const char **)argv);
731
+ return ret == -ECANCELED ? -EINVAL : ret;
732
+}
733
+
734
+static int trace_uprobe_release(struct dyn_event *ev)
735
+{
736
+ struct trace_uprobe *tu = to_trace_uprobe(ev);
737
+
738
+ return unregister_trace_uprobe(tu);
571739 }
572740
573741 /* Probes listing interfaces */
574
-static void *probes_seq_start(struct seq_file *m, loff_t *pos)
742
+static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev)
575743 {
576
- mutex_lock(&uprobe_lock);
577
- return seq_list_start(&uprobe_list, *pos);
578
-}
579
-
580
-static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
581
-{
582
- return seq_list_next(v, &uprobe_list, pos);
583
-}
584
-
585
-static void probes_seq_stop(struct seq_file *m, void *v)
586
-{
587
- mutex_unlock(&uprobe_lock);
588
-}
589
-
590
-static int probes_seq_show(struct seq_file *m, void *v)
591
-{
592
- struct trace_uprobe *tu = v;
744
+ struct trace_uprobe *tu = to_trace_uprobe(ev);
593745 char c = is_ret_probe(tu) ? 'r' : 'p';
594746 int i;
595747
596
- seq_printf(m, "%c:%s/%s %s:0x%0*lx", c, tu->tp.call.class->system,
597
- trace_event_name(&tu->tp.call), tu->filename,
748
+ seq_printf(m, "%c:%s/%s %s:0x%0*lx", c, trace_probe_group_name(&tu->tp),
749
+ trace_probe_name(&tu->tp), tu->filename,
598750 (int)(sizeof(void *) * 2), tu->offset);
751
+
752
+ if (tu->ref_ctr_offset)
753
+ seq_printf(m, "(0x%lx)", tu->ref_ctr_offset);
599754
600755 for (i = 0; i < tu->tp.nr_args; i++)
601756 seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm);
....@@ -604,19 +759,33 @@
604759 return 0;
605760 }
606761
762
+static int probes_seq_show(struct seq_file *m, void *v)
763
+{
764
+ struct dyn_event *ev = v;
765
+
766
+ if (!is_trace_uprobe(ev))
767
+ return 0;
768
+
769
+ return trace_uprobe_show(m, ev);
770
+}
771
+
607772 static const struct seq_operations probes_seq_op = {
608
- .start = probes_seq_start,
609
- .next = probes_seq_next,
610
- .stop = probes_seq_stop,
611
- .show = probes_seq_show
773
+ .start = dyn_event_seq_start,
774
+ .next = dyn_event_seq_next,
775
+ .stop = dyn_event_seq_stop,
776
+ .show = probes_seq_show
612777 };
613778
614779 static int probes_open(struct inode *inode, struct file *file)
615780 {
616781 int ret;
617782
783
+ ret = security_locked_down(LOCKDOWN_TRACEFS);
784
+ if (ret)
785
+ return ret;
786
+
618787 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
619
- ret = cleanup_all_probes();
788
+ ret = dyn_events_release_all(&trace_uprobe_ops);
620789 if (ret)
621790 return ret;
622791 }
....@@ -627,7 +796,8 @@
627796 static ssize_t probes_write(struct file *file, const char __user *buffer,
628797 size_t count, loff_t *ppos)
629798 {
630
- return trace_parse_run_command(file, buffer, count, ppos, create_trace_uprobe);
799
+ return trace_parse_run_command(file, buffer, count, ppos,
800
+ create_or_delete_trace_uprobe);
631801 }
632802
633803 static const struct file_operations uprobe_events_ops = {
....@@ -642,22 +812,33 @@
642812 /* Probes profiling interfaces */
643813 static int probes_profile_seq_show(struct seq_file *m, void *v)
644814 {
645
- struct trace_uprobe *tu = v;
815
+ struct dyn_event *ev = v;
816
+ struct trace_uprobe *tu;
646817
818
+ if (!is_trace_uprobe(ev))
819
+ return 0;
820
+
821
+ tu = to_trace_uprobe(ev);
647822 seq_printf(m, " %s %-44s %15lu\n", tu->filename,
648
- trace_event_name(&tu->tp.call), tu->nhit);
823
+ trace_probe_name(&tu->tp), tu->nhit);
649824 return 0;
650825 }
651826
652827 static const struct seq_operations profile_seq_op = {
653
- .start = probes_seq_start,
654
- .next = probes_seq_next,
655
- .stop = probes_seq_stop,
828
+ .start = dyn_event_seq_start,
829
+ .next = dyn_event_seq_next,
830
+ .stop = dyn_event_seq_stop,
656831 .show = probes_profile_seq_show
657832 };
658833
659834 static int profile_open(struct inode *inode, struct file *file)
660835 {
836
+ int ret;
837
+
838
+ ret = security_locked_down(LOCKDOWN_TRACEFS);
839
+ if (ret)
840
+ return ret;
841
+
661842 return seq_open(file, &profile_seq_op);
662843 }
663844
....@@ -767,11 +948,11 @@
767948 struct trace_event_file *trace_file)
768949 {
769950 struct uprobe_trace_entry_head *entry;
951
+ struct trace_buffer *buffer;
770952 struct ring_buffer_event *event;
771
- struct ring_buffer *buffer;
772953 void *data;
773954 int size, esize;
774
- struct trace_event_call *call = &tu->tp.call;
955
+ struct trace_event_call *call = trace_probe_event_call(&tu->tp);
775956
776957 WARN_ON(call != trace_file->event_call);
777958
....@@ -813,7 +994,7 @@
813994 return 0;
814995
815996 rcu_read_lock();
816
- list_for_each_entry_rcu(link, &tu->tp.files, list)
997
+ trace_probe_for_each_link_rcu(link, &tu->tp)
817998 __uprobe_trace_func(tu, 0, regs, ucb, dsize, link->file);
818999 rcu_read_unlock();
8191000
....@@ -827,7 +1008,7 @@
8271008 struct event_file_link *link;
8281009
8291010 rcu_read_lock();
830
- list_for_each_entry_rcu(link, &tu->tp.files, list)
1011
+ trace_probe_for_each_link_rcu(link, &tu->tp)
8311012 __uprobe_trace_func(tu, func, regs, ucb, dsize, link->file);
8321013 rcu_read_unlock();
8331014 }
....@@ -840,29 +1021,27 @@
8401021 struct trace_seq *s = &iter->seq;
8411022 struct trace_uprobe *tu;
8421023 u8 *data;
843
- int i;
8441024
8451025 entry = (struct uprobe_trace_entry_head *)iter->ent;
846
- tu = container_of(event, struct trace_uprobe, tp.call.event);
1026
+ tu = trace_uprobe_primary_from_call(
1027
+ container_of(event, struct trace_event_call, event));
1028
+ if (unlikely(!tu))
1029
+ goto out;
8471030
8481031 if (is_ret_probe(tu)) {
8491032 trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)",
850
- trace_event_name(&tu->tp.call),
1033
+ trace_probe_name(&tu->tp),
8511034 entry->vaddr[1], entry->vaddr[0]);
8521035 data = DATAOF_TRACE_ENTRY(entry, true);
8531036 } else {
8541037 trace_seq_printf(s, "%s: (0x%lx)",
855
- trace_event_name(&tu->tp.call),
1038
+ trace_probe_name(&tu->tp),
8561039 entry->vaddr[0]);
8571040 data = DATAOF_TRACE_ENTRY(entry, false);
8581041 }
8591042
860
- for (i = 0; i < tu->tp.nr_args; i++) {
861
- struct probe_arg *parg = &tu->tp.args[i];
862
-
863
- if (!parg->type->print(s, parg->name, data + parg->offset, entry))
864
- goto out;
865
- }
1043
+ if (print_probe_args(s, tu->tp.args, tu->tp.nr_args, data, entry) < 0)
1044
+ goto out;
8661045
8671046 trace_seq_putc(s, '\n');
8681047
....@@ -874,34 +1053,73 @@
8741053 enum uprobe_filter_ctx ctx,
8751054 struct mm_struct *mm);
8761055
877
-static int
878
-probe_event_enable(struct trace_uprobe *tu, struct trace_event_file *file,
879
- filter_func_t filter)
1056
+static int trace_uprobe_enable(struct trace_uprobe *tu, filter_func_t filter)
8801057 {
881
- bool enabled = trace_probe_is_enabled(&tu->tp);
882
- struct event_file_link *link = NULL;
8831058 int ret;
8841059
1060
+ tu->consumer.filter = filter;
1061
+ tu->inode = d_real_inode(tu->path.dentry);
1062
+
1063
+ if (tu->ref_ctr_offset)
1064
+ ret = uprobe_register_refctr(tu->inode, tu->offset,
1065
+ tu->ref_ctr_offset, &tu->consumer);
1066
+ else
1067
+ ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
1068
+
1069
+ if (ret)
1070
+ tu->inode = NULL;
1071
+
1072
+ return ret;
1073
+}
1074
+
1075
+static void __probe_event_disable(struct trace_probe *tp)
1076
+{
1077
+ struct trace_probe *pos;
1078
+ struct trace_uprobe *tu;
1079
+
1080
+ tu = container_of(tp, struct trace_uprobe, tp);
1081
+ WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter));
1082
+
1083
+ list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1084
+ tu = container_of(pos, struct trace_uprobe, tp);
1085
+ if (!tu->inode)
1086
+ continue;
1087
+
1088
+ uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
1089
+ tu->inode = NULL;
1090
+ }
1091
+}
1092
+
1093
+static int probe_event_enable(struct trace_event_call *call,
1094
+ struct trace_event_file *file, filter_func_t filter)
1095
+{
1096
+ struct trace_probe *pos, *tp;
1097
+ struct trace_uprobe *tu;
1098
+ bool enabled;
1099
+ int ret;
1100
+
1101
+ tp = trace_probe_primary_from_call(call);
1102
+ if (WARN_ON_ONCE(!tp))
1103
+ return -ENODEV;
1104
+ enabled = trace_probe_is_enabled(tp);
1105
+
1106
+ /* This may also change "enabled" state */
8851107 if (file) {
886
- if (tu->tp.flags & TP_FLAG_PROFILE)
1108
+ if (trace_probe_test_flag(tp, TP_FLAG_PROFILE))
8871109 return -EINTR;
8881110
889
- link = kmalloc(sizeof(*link), GFP_KERNEL);
890
- if (!link)
891
- return -ENOMEM;
892
-
893
- link->file = file;
894
- list_add_tail_rcu(&link->list, &tu->tp.files);
895
-
896
- tu->tp.flags |= TP_FLAG_TRACE;
1111
+ ret = trace_probe_add_file(tp, file);
1112
+ if (ret < 0)
1113
+ return ret;
8971114 } else {
898
- if (tu->tp.flags & TP_FLAG_TRACE)
1115
+ if (trace_probe_test_flag(tp, TP_FLAG_TRACE))
8991116 return -EINTR;
9001117
901
- tu->tp.flags |= TP_FLAG_PROFILE;
1118
+ trace_probe_set_flag(tp, TP_FLAG_PROFILE);
9021119 }
9031120
904
- WARN_ON(!uprobe_filter_is_empty(&tu->filter));
1121
+ tu = container_of(tp, struct trace_uprobe, tp);
1122
+ WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter));
9051123
9061124 if (enabled)
9071125 return 0;
....@@ -910,11 +1128,14 @@
9101128 if (ret)
9111129 goto err_flags;
9121130
913
- tu->consumer.filter = filter;
914
- tu->inode = d_real_inode(tu->path.dentry);
915
- ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
916
- if (ret)
917
- goto err_buffer;
1131
+ list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1132
+ tu = container_of(pos, struct trace_uprobe, tp);
1133
+ ret = trace_uprobe_enable(tu, filter);
1134
+ if (ret) {
1135
+ __probe_event_disable(tp);
1136
+ goto err_buffer;
1137
+ }
1138
+ }
9181139
9191140 return 0;
9201141
....@@ -922,52 +1143,48 @@
9221143 uprobe_buffer_disable();
9231144
9241145 err_flags:
925
- if (file) {
926
- list_del(&link->list);
927
- kfree(link);
928
- tu->tp.flags &= ~TP_FLAG_TRACE;
929
- } else {
930
- tu->tp.flags &= ~TP_FLAG_PROFILE;
931
- }
1146
+ if (file)
1147
+ trace_probe_remove_file(tp, file);
1148
+ else
1149
+ trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
1150
+
9321151 return ret;
9331152 }
9341153
935
-static void
936
-probe_event_disable(struct trace_uprobe *tu, struct trace_event_file *file)
1154
+static void probe_event_disable(struct trace_event_call *call,
1155
+ struct trace_event_file *file)
9371156 {
938
- if (!trace_probe_is_enabled(&tu->tp))
1157
+ struct trace_probe *tp;
1158
+
1159
+ tp = trace_probe_primary_from_call(call);
1160
+ if (WARN_ON_ONCE(!tp))
1161
+ return;
1162
+
1163
+ if (!trace_probe_is_enabled(tp))
9391164 return;
9401165
9411166 if (file) {
942
- struct event_file_link *link;
943
-
944
- link = find_event_file_link(&tu->tp, file);
945
- if (!link)
1167
+ if (trace_probe_remove_file(tp, file) < 0)
9461168 return;
9471169
948
- list_del_rcu(&link->list);
949
- /* synchronize with u{,ret}probe_trace_func */
950
- synchronize_rcu();
951
- kfree(link);
952
-
953
- if (!list_empty(&tu->tp.files))
1170
+ if (trace_probe_is_enabled(tp))
9541171 return;
955
- }
1172
+ } else
1173
+ trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
9561174
957
- WARN_ON(!uprobe_filter_is_empty(&tu->filter));
958
-
959
- uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
960
- tu->inode = NULL;
961
- tu->tp.flags &= file ? ~TP_FLAG_TRACE : ~TP_FLAG_PROFILE;
962
-
1175
+ __probe_event_disable(tp);
9631176 uprobe_buffer_disable();
9641177 }
9651178
9661179 static int uprobe_event_define_fields(struct trace_event_call *event_call)
9671180 {
968
- int ret, i, size;
1181
+ int ret, size;
9691182 struct uprobe_trace_entry_head field;
970
- struct trace_uprobe *tu = event_call->data;
1183
+ struct trace_uprobe *tu;
1184
+
1185
+ tu = trace_uprobe_primary_from_call(event_call);
1186
+ if (unlikely(!tu))
1187
+ return -ENODEV;
9711188
9721189 if (is_ret_probe(tu)) {
9731190 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0);
....@@ -977,19 +1194,8 @@
9771194 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0);
9781195 size = SIZEOF_TRACE_ENTRY(false);
9791196 }
980
- /* Set argument names as fields */
981
- for (i = 0; i < tu->tp.nr_args; i++) {
982
- struct probe_arg *parg = &tu->tp.args[i];
9831197
984
- ret = trace_define_field(event_call, parg->type->fmttype,
985
- parg->name, size + parg->offset,
986
- parg->type->size, parg->type->is_signed,
987
- FILTER_OTHER);
988
-
989
- if (ret)
990
- return ret;
991
- }
992
- return 0;
1198
+ return traceprobe_define_arg_fields(event_call, size, &tu->tp);
9931199 }
9941200
9951201 #ifdef CONFIG_PERF_EVENTS
....@@ -1010,39 +1216,39 @@
10101216 }
10111217
10121218 static inline bool
1013
-uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event)
1219
+trace_uprobe_filter_event(struct trace_uprobe_filter *filter,
1220
+ struct perf_event *event)
10141221 {
1015
- return __uprobe_perf_filter(&tu->filter, event->hw.target->mm);
1222
+ return __uprobe_perf_filter(filter, event->hw.target->mm);
10161223 }
10171224
1018
-static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
1225
+static bool trace_uprobe_filter_remove(struct trace_uprobe_filter *filter,
1226
+ struct perf_event *event)
10191227 {
10201228 bool done;
10211229
1022
- write_lock(&tu->filter.rwlock);
1230
+ write_lock(&filter->rwlock);
10231231 if (event->hw.target) {
10241232 list_del(&event->hw.tp_list);
1025
- done = tu->filter.nr_systemwide ||
1233
+ done = filter->nr_systemwide ||
10261234 (event->hw.target->flags & PF_EXITING) ||
1027
- uprobe_filter_event(tu, event);
1235
+ trace_uprobe_filter_event(filter, event);
10281236 } else {
1029
- tu->filter.nr_systemwide--;
1030
- done = tu->filter.nr_systemwide;
1237
+ filter->nr_systemwide--;
1238
+ done = filter->nr_systemwide;
10311239 }
1032
- write_unlock(&tu->filter.rwlock);
1240
+ write_unlock(&filter->rwlock);
10331241
1034
- if (!done)
1035
- return uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
1036
-
1037
- return 0;
1242
+ return done;
10381243 }
10391244
1040
-static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
1245
+/* This returns true if the filter always covers target mm */
1246
+static bool trace_uprobe_filter_add(struct trace_uprobe_filter *filter,
1247
+ struct perf_event *event)
10411248 {
10421249 bool done;
1043
- int err;
10441250
1045
- write_lock(&tu->filter.rwlock);
1251
+ write_lock(&filter->rwlock);
10461252 if (event->hw.target) {
10471253 /*
10481254 * event->parent != NULL means copy_process(), we can avoid
....@@ -1052,35 +1258,84 @@
10521258 * attr.enable_on_exec means that exec/mmap will install the
10531259 * breakpoints we need.
10541260 */
1055
- done = tu->filter.nr_systemwide ||
1261
+ done = filter->nr_systemwide ||
10561262 event->parent || event->attr.enable_on_exec ||
1057
- uprobe_filter_event(tu, event);
1058
- list_add(&event->hw.tp_list, &tu->filter.perf_events);
1263
+ trace_uprobe_filter_event(filter, event);
1264
+ list_add(&event->hw.tp_list, &filter->perf_events);
10591265 } else {
1060
- done = tu->filter.nr_systemwide;
1061
- tu->filter.nr_systemwide++;
1266
+ done = filter->nr_systemwide;
1267
+ filter->nr_systemwide++;
10621268 }
1063
- write_unlock(&tu->filter.rwlock);
1269
+ write_unlock(&filter->rwlock);
10641270
1065
- err = 0;
1066
- if (!done) {
1067
- err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
1068
- if (err)
1069
- uprobe_perf_close(tu, event);
1271
+ return done;
1272
+}
1273
+
1274
+static int uprobe_perf_close(struct trace_event_call *call,
1275
+ struct perf_event *event)
1276
+{
1277
+ struct trace_probe *pos, *tp;
1278
+ struct trace_uprobe *tu;
1279
+ int ret = 0;
1280
+
1281
+ tp = trace_probe_primary_from_call(call);
1282
+ if (WARN_ON_ONCE(!tp))
1283
+ return -ENODEV;
1284
+
1285
+ tu = container_of(tp, struct trace_uprobe, tp);
1286
+ if (trace_uprobe_filter_remove(tu->tp.event->filter, event))
1287
+ return 0;
1288
+
1289
+ list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1290
+ tu = container_of(pos, struct trace_uprobe, tp);
1291
+ ret = uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
1292
+ if (ret)
1293
+ break;
10701294 }
1295
+
1296
+ return ret;
1297
+}
1298
+
1299
+static int uprobe_perf_open(struct trace_event_call *call,
1300
+ struct perf_event *event)
1301
+{
1302
+ struct trace_probe *pos, *tp;
1303
+ struct trace_uprobe *tu;
1304
+ int err = 0;
1305
+
1306
+ tp = trace_probe_primary_from_call(call);
1307
+ if (WARN_ON_ONCE(!tp))
1308
+ return -ENODEV;
1309
+
1310
+ tu = container_of(tp, struct trace_uprobe, tp);
1311
+ if (trace_uprobe_filter_add(tu->tp.event->filter, event))
1312
+ return 0;
1313
+
1314
+ list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1315
+ tu = container_of(pos, struct trace_uprobe, tp);
1316
+ err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
1317
+ if (err) {
1318
+ uprobe_perf_close(call, event);
1319
+ break;
1320
+ }
1321
+ }
1322
+
10711323 return err;
10721324 }
10731325
10741326 static bool uprobe_perf_filter(struct uprobe_consumer *uc,
10751327 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
10761328 {
1329
+ struct trace_uprobe_filter *filter;
10771330 struct trace_uprobe *tu;
10781331 int ret;
10791332
10801333 tu = container_of(uc, struct trace_uprobe, consumer);
1081
- read_lock(&tu->filter.rwlock);
1082
- ret = __uprobe_perf_filter(&tu->filter, mm);
1083
- read_unlock(&tu->filter.rwlock);
1334
+ filter = tu->tp.event->filter;
1335
+
1336
+ read_lock(&filter->rwlock);
1337
+ ret = __uprobe_perf_filter(filter, mm);
1338
+ read_unlock(&filter->rwlock);
10841339
10851340 return ret;
10861341 }
....@@ -1089,15 +1344,22 @@
10891344 unsigned long func, struct pt_regs *regs,
10901345 struct uprobe_cpu_buffer *ucb, int dsize)
10911346 {
1092
- struct trace_event_call *call = &tu->tp.call;
1347
+ struct trace_event_call *call = trace_probe_event_call(&tu->tp);
10931348 struct uprobe_trace_entry_head *entry;
10941349 struct hlist_head *head;
10951350 void *data;
10961351 int size, esize;
10971352 int rctx;
10981353
1099
- if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
1100
- return;
1354
+ if (bpf_prog_array_valid(call)) {
1355
+ u32 ret;
1356
+
1357
+ preempt_disable();
1358
+ ret = trace_call_bpf(call, regs);
1359
+ preempt_enable();
1360
+ if (!ret)
1361
+ return;
1362
+ }
11011363
11021364 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
11031365
....@@ -1168,7 +1430,7 @@
11681430 if (perf_type_tracepoint)
11691431 tu = find_probe_event(pevent, group);
11701432 else
1171
- tu = event->tp_event->data;
1433
+ tu = trace_uprobe_primary_from_call(event->tp_event);
11721434 if (!tu)
11731435 return -EINVAL;
11741436
....@@ -1184,36 +1446,34 @@
11841446 trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
11851447 void *data)
11861448 {
1187
- struct trace_uprobe *tu = event->data;
11881449 struct trace_event_file *file = data;
11891450
11901451 switch (type) {
11911452 case TRACE_REG_REGISTER:
1192
- return probe_event_enable(tu, file, NULL);
1453
+ return probe_event_enable(event, file, NULL);
11931454
11941455 case TRACE_REG_UNREGISTER:
1195
- probe_event_disable(tu, file);
1456
+ probe_event_disable(event, file);
11961457 return 0;
11971458
11981459 #ifdef CONFIG_PERF_EVENTS
11991460 case TRACE_REG_PERF_REGISTER:
1200
- return probe_event_enable(tu, NULL, uprobe_perf_filter);
1461
+ return probe_event_enable(event, NULL, uprobe_perf_filter);
12011462
12021463 case TRACE_REG_PERF_UNREGISTER:
1203
- probe_event_disable(tu, NULL);
1464
+ probe_event_disable(event, NULL);
12041465 return 0;
12051466
12061467 case TRACE_REG_PERF_OPEN:
1207
- return uprobe_perf_open(tu, data);
1468
+ return uprobe_perf_open(event, data);
12081469
12091470 case TRACE_REG_PERF_CLOSE:
1210
- return uprobe_perf_close(tu, data);
1471
+ return uprobe_perf_close(event, data);
12111472
12121473 #endif
12131474 default:
12141475 return 0;
12151476 }
1216
- return 0;
12171477 }
12181478
12191479 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
....@@ -1240,13 +1500,13 @@
12401500 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
12411501
12421502 ucb = uprobe_buffer_get();
1243
- store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize);
1503
+ store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
12441504
1245
- if (tu->tp.flags & TP_FLAG_TRACE)
1505
+ if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
12461506 ret |= uprobe_trace_func(tu, regs, ucb, dsize);
12471507
12481508 #ifdef CONFIG_PERF_EVENTS
1249
- if (tu->tp.flags & TP_FLAG_PROFILE)
1509
+ if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
12501510 ret |= uprobe_perf_func(tu, regs, ucb, dsize);
12511511 #endif
12521512 uprobe_buffer_put(ucb);
....@@ -1275,13 +1535,13 @@
12751535 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
12761536
12771537 ucb = uprobe_buffer_get();
1278
- store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize);
1538
+ store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
12791539
1280
- if (tu->tp.flags & TP_FLAG_TRACE)
1540
+ if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
12811541 uretprobe_trace_func(tu, func, regs, ucb, dsize);
12821542
12831543 #ifdef CONFIG_PERF_EVENTS
1284
- if (tu->tp.flags & TP_FLAG_PROFILE)
1544
+ if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
12851545 uretprobe_perf_func(tu, func, regs, ucb, dsize);
12861546 #endif
12871547 uprobe_buffer_put(ucb);
....@@ -1292,62 +1552,38 @@
12921552 .trace = print_uprobe_event
12931553 };
12941554
1295
-static inline void init_trace_event_call(struct trace_uprobe *tu,
1296
- struct trace_event_call *call)
1297
-{
1298
- INIT_LIST_HEAD(&call->class->fields);
1299
- call->event.funcs = &uprobe_funcs;
1300
- call->class->define_fields = uprobe_event_define_fields;
1555
+static struct trace_event_fields uprobe_fields_array[] = {
1556
+ { .type = TRACE_FUNCTION_TYPE,
1557
+ .define_fields = uprobe_event_define_fields },
1558
+ {}
1559
+};
13011560
1302
- call->flags = TRACE_EVENT_FL_UPROBE;
1561
+static inline void init_trace_event_call(struct trace_uprobe *tu)
1562
+{
1563
+ struct trace_event_call *call = trace_probe_event_call(&tu->tp);
1564
+ call->event.funcs = &uprobe_funcs;
1565
+ call->class->fields_array = uprobe_fields_array;
1566
+
1567
+ call->flags = TRACE_EVENT_FL_UPROBE | TRACE_EVENT_FL_CAP_ANY;
13031568 call->class->reg = trace_uprobe_register;
1304
- call->data = tu;
13051569 }
13061570
13071571 static int register_uprobe_event(struct trace_uprobe *tu)
13081572 {
1309
- struct trace_event_call *call = &tu->tp.call;
1310
- int ret = 0;
1573
+ init_trace_event_call(tu);
13111574
1312
- init_trace_event_call(tu, call);
1313
-
1314
- if (set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0)
1315
- return -ENOMEM;
1316
-
1317
- ret = register_trace_event(&call->event);
1318
- if (!ret) {
1319
- kfree(call->print_fmt);
1320
- return -ENODEV;
1321
- }
1322
-
1323
- ret = trace_add_event_call(call);
1324
-
1325
- if (ret) {
1326
- pr_info("Failed to register uprobe event: %s\n",
1327
- trace_event_name(call));
1328
- kfree(call->print_fmt);
1329
- unregister_trace_event(&call->event);
1330
- }
1331
-
1332
- return ret;
1575
+ return trace_probe_register_event_call(&tu->tp);
13331576 }
13341577
13351578 static int unregister_uprobe_event(struct trace_uprobe *tu)
13361579 {
1337
- int ret;
1338
-
1339
- /* tu->event is unregistered in trace_remove_event_call() */
1340
- ret = trace_remove_event_call(&tu->tp.call);
1341
- if (ret)
1342
- return ret;
1343
- kfree(tu->tp.call.print_fmt);
1344
- tu->tp.call.print_fmt = NULL;
1345
- return 0;
1580
+ return trace_probe_unregister_event_call(&tu->tp);
13461581 }
13471582
13481583 #ifdef CONFIG_PERF_EVENTS
13491584 struct trace_event_call *
1350
-create_local_trace_uprobe(char *name, unsigned long offs, bool is_return)
1585
+create_local_trace_uprobe(char *name, unsigned long offs,
1586
+ unsigned long ref_ctr_offset, bool is_return)
13511587 {
13521588 struct trace_uprobe *tu;
13531589 struct path path;
....@@ -1363,7 +1599,7 @@
13631599 }
13641600
13651601 /*
1366
- * local trace_kprobes are not added to probe_list, so they are never
1602
+ * local trace_kprobes are not added to dyn_event, so they are never
13671603 * searched in find_trace_kprobe(). Therefore, there is no concern of
13681604 * duplicated name "DUMMY_EVENT" here.
13691605 */
....@@ -1379,15 +1615,16 @@
13791615
13801616 tu->offset = offs;
13811617 tu->path = path;
1618
+ tu->ref_ctr_offset = ref_ctr_offset;
13821619 tu->filename = kstrdup(name, GFP_KERNEL);
1383
- init_trace_event_call(tu, &tu->tp.call);
1620
+ init_trace_event_call(tu);
13841621
1385
- if (set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0) {
1622
+ if (traceprobe_set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0) {
13861623 ret = -ENOMEM;
13871624 goto error;
13881625 }
13891626
1390
- return &tu->tp.call;
1627
+ return trace_probe_event_call(&tu->tp);
13911628 error:
13921629 free_trace_uprobe(tu);
13931630 return ERR_PTR(ret);
....@@ -1397,10 +1634,7 @@
13971634 {
13981635 struct trace_uprobe *tu;
13991636
1400
- tu = container_of(event_call, struct trace_uprobe, tp.call);
1401
-
1402
- kfree(tu->tp.call.print_fmt);
1403
- tu->tp.call.print_fmt = NULL;
1637
+ tu = trace_uprobe_primary_from_call(event_call);
14041638
14051639 free_trace_uprobe(tu);
14061640 }
....@@ -1409,16 +1643,20 @@
14091643 /* Make a trace interface for controling probe points */
14101644 static __init int init_uprobe_trace(void)
14111645 {
1412
- struct dentry *d_tracer;
1646
+ int ret;
14131647
1414
- d_tracer = tracing_init_dentry();
1415
- if (IS_ERR(d_tracer))
1648
+ ret = dyn_event_register(&trace_uprobe_ops);
1649
+ if (ret)
1650
+ return ret;
1651
+
1652
+ ret = tracing_init_dentry();
1653
+ if (ret)
14161654 return 0;
14171655
1418
- trace_create_file("uprobe_events", 0644, d_tracer,
1656
+ trace_create_file("uprobe_events", 0644, NULL,
14191657 NULL, &uprobe_events_ops);
14201658 /* Profile interface */
1421
- trace_create_file("uprobe_profile", 0444, d_tracer,
1659
+ trace_create_file("uprobe_profile", 0444, NULL,
14221660 NULL, &uprobe_profile_ops);
14231661 return 0;
14241662 }