forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-11 297b60346df8beafee954a0fd7c2d64f33f3b9bc
kernel/kernel/trace/trace_uprobe.c
....@@ -7,6 +7,8 @@
77 */
88 #define pr_fmt(fmt) "trace_uprobe: " fmt
99
10
+#include <linux/security.h>
11
+#include <linux/ctype.h>
1012 #include <linux/module.h>
1113 #include <linux/uaccess.h>
1214 #include <linux/uprobes.h>
....@@ -14,7 +16,9 @@
1416 #include <linux/string.h>
1517 #include <linux/rculist.h>
1618
19
+#include "trace_dynevent.h"
1720 #include "trace_probe.h"
21
+#include "trace_probe_tmpl.h"
1822
1923 #define UPROBE_EVENT_SYSTEM "uprobes"
2024
....@@ -30,26 +34,54 @@
3034 #define DATAOF_TRACE_ENTRY(entry, is_return) \
3135 ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
3236
33
-struct trace_uprobe_filter {
34
- rwlock_t rwlock;
35
- int nr_systemwide;
36
- struct list_head perf_events;
37
+static int trace_uprobe_create(int argc, const char **argv);
38
+static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev);
39
+static int trace_uprobe_release(struct dyn_event *ev);
40
+static bool trace_uprobe_is_busy(struct dyn_event *ev);
41
+static bool trace_uprobe_match(const char *system, const char *event,
42
+ int argc, const char **argv, struct dyn_event *ev);
43
+
44
+static struct dyn_event_operations trace_uprobe_ops = {
45
+ .create = trace_uprobe_create,
46
+ .show = trace_uprobe_show,
47
+ .is_busy = trace_uprobe_is_busy,
48
+ .free = trace_uprobe_release,
49
+ .match = trace_uprobe_match,
3750 };
3851
3952 /*
4053 * uprobe event core functions
4154 */
4255 struct trace_uprobe {
43
- struct list_head list;
44
- struct trace_uprobe_filter filter;
56
+ struct dyn_event devent;
4557 struct uprobe_consumer consumer;
4658 struct path path;
4759 struct inode *inode;
4860 char *filename;
4961 unsigned long offset;
62
+ unsigned long ref_ctr_offset;
5063 unsigned long nhit;
5164 struct trace_probe tp;
5265 };
66
+
67
+static bool is_trace_uprobe(struct dyn_event *ev)
68
+{
69
+ return ev->ops == &trace_uprobe_ops;
70
+}
71
+
72
+static struct trace_uprobe *to_trace_uprobe(struct dyn_event *ev)
73
+{
74
+ return container_of(ev, struct trace_uprobe, devent);
75
+}
76
+
77
+/**
78
+ * for_each_trace_uprobe - iterate over the trace_uprobe list
79
+ * @pos: the struct trace_uprobe * for each entry
80
+ * @dpos: the struct dyn_event * to use as a loop cursor
81
+ */
82
+#define for_each_trace_uprobe(pos, dpos) \
83
+ for_each_dyn_event(dpos) \
84
+ if (is_trace_uprobe(dpos) && (pos = to_trace_uprobe(dpos)))
5385
5486 #define SIZEOF_TRACE_UPROBE(n) \
5587 (offsetof(struct trace_uprobe, tp.args) + \
....@@ -57,9 +89,6 @@
5789
5890 static int register_uprobe_event(struct trace_uprobe *tu);
5991 static int unregister_uprobe_event(struct trace_uprobe *tu);
60
-
61
-static DEFINE_MUTEX(uprobe_lock);
62
-static LIST_HEAD(uprobe_list);
6392
6493 struct uprobe_dispatch_data {
6594 struct trace_uprobe *tu;
....@@ -98,81 +127,84 @@
98127 /*
99128 * Uprobes-specific fetch functions
100129 */
101
-#define DEFINE_FETCH_stack(type) \
102
-static void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs, \
103
- void *offset, void *dest) \
104
-{ \
105
- *(type *)dest = (type)get_user_stack_nth(regs, \
106
- ((unsigned long)offset)); \
107
-}
108
-DEFINE_BASIC_FETCH_FUNCS(stack)
109
-/* No string on the stack entry */
110
-#define fetch_stack_string NULL
111
-#define fetch_stack_string_size NULL
130
+static nokprobe_inline int
131
+probe_mem_read(void *dest, void *src, size_t size)
132
+{
133
+ void __user *vaddr = (void __force __user *)src;
112134
113
-#define DEFINE_FETCH_memory(type) \
114
-static void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs, \
115
- void *addr, void *dest) \
116
-{ \
117
- type retval; \
118
- void __user *vaddr = (void __force __user *) addr; \
119
- \
120
- if (copy_from_user(&retval, vaddr, sizeof(type))) \
121
- *(type *)dest = 0; \
122
- else \
123
- *(type *) dest = retval; \
135
+ return copy_from_user(dest, vaddr, size) ? -EFAULT : 0;
124136 }
125
-DEFINE_BASIC_FETCH_FUNCS(memory)
137
+
138
+static nokprobe_inline int
139
+probe_mem_read_user(void *dest, void *src, size_t size)
140
+{
141
+ return probe_mem_read(dest, src, size);
142
+}
143
+
126144 /*
127145 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
128146 * length and relative data location.
129147 */
130
-static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
131
- void *addr, void *dest)
148
+static nokprobe_inline int
149
+fetch_store_string(unsigned long addr, void *dest, void *base)
132150 {
133151 long ret;
134
- u32 rloc = *(u32 *)dest;
135
- int maxlen = get_rloc_len(rloc);
136
- u8 *dst = get_rloc_data(dest);
152
+ u32 loc = *(u32 *)dest;
153
+ int maxlen = get_loc_len(loc);
154
+ u8 *dst = get_loc_data(dest, base);
137155 void __user *src = (void __force __user *) addr;
138156
139
- if (!maxlen)
140
- return;
157
+ if (unlikely(!maxlen))
158
+ return -ENOMEM;
141159
142
- ret = strncpy_from_user(dst, src, maxlen);
143
- if (ret == maxlen)
144
- dst[ret - 1] = '\0';
145
- else if (ret >= 0)
146
- /*
147
- * Include the terminating null byte. In this case it
148
- * was copied by strncpy_from_user but not accounted
149
- * for in ret.
150
- */
151
- ret++;
152
-
153
- if (ret < 0) { /* Failed to fetch string */
154
- ((u8 *)get_rloc_data(dest))[0] = '\0';
155
- *(u32 *)dest = make_data_rloc(0, get_rloc_offs(rloc));
156
- } else {
157
- *(u32 *)dest = make_data_rloc(ret, get_rloc_offs(rloc));
160
+ if (addr == FETCH_TOKEN_COMM)
161
+ ret = strlcpy(dst, current->comm, maxlen);
162
+ else
163
+ ret = strncpy_from_user(dst, src, maxlen);
164
+ if (ret >= 0) {
165
+ if (ret == maxlen)
166
+ dst[ret - 1] = '\0';
167
+ else
168
+ /*
169
+ * Include the terminating null byte. In this case it
170
+ * was copied by strncpy_from_user but not accounted
171
+ * for in ret.
172
+ */
173
+ ret++;
174
+ *(u32 *)dest = make_data_loc(ret, (void *)dst - base);
158175 }
176
+
177
+ return ret;
159178 }
160179
161
-static void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
162
- void *addr, void *dest)
180
+static nokprobe_inline int
181
+fetch_store_string_user(unsigned long addr, void *dest, void *base)
182
+{
183
+ return fetch_store_string(addr, dest, base);
184
+}
185
+
186
+/* Return the length of string -- including null terminal byte */
187
+static nokprobe_inline int
188
+fetch_store_strlen(unsigned long addr)
163189 {
164190 int len;
165191 void __user *vaddr = (void __force __user *) addr;
166192
167
- len = strnlen_user(vaddr, MAX_STRING_SIZE);
168
-
169
- if (len == 0 || len > MAX_STRING_SIZE) /* Failed to check length */
170
- *(u32 *)dest = 0;
193
+ if (addr == FETCH_TOKEN_COMM)
194
+ len = strlen(current->comm) + 1;
171195 else
172
- *(u32 *)dest = len;
196
+ len = strnlen_user(vaddr, MAX_STRING_SIZE);
197
+
198
+ return (len > MAX_STRING_SIZE) ? 0 : len;
173199 }
174200
175
-static unsigned long translate_user_vaddr(void *file_offset)
201
+static nokprobe_inline int
202
+fetch_store_strlen_user(unsigned long addr)
203
+{
204
+ return fetch_store_strlen(addr);
205
+}
206
+
207
+static unsigned long translate_user_vaddr(unsigned long file_offset)
176208 {
177209 unsigned long base_addr;
178210 struct uprobe_dispatch_data *udd;
....@@ -180,44 +212,51 @@
180212 udd = (void *) current->utask->vaddr;
181213
182214 base_addr = udd->bp_addr - udd->tu->offset;
183
- return base_addr + (unsigned long)file_offset;
215
+ return base_addr + file_offset;
184216 }
185217
186
-#define DEFINE_FETCH_file_offset(type) \
187
-static void FETCH_FUNC_NAME(file_offset, type)(struct pt_regs *regs, \
188
- void *offset, void *dest)\
189
-{ \
190
- void *vaddr = (void *)translate_user_vaddr(offset); \
191
- \
192
- FETCH_FUNC_NAME(memory, type)(regs, vaddr, dest); \
218
+/* Note that we don't verify it, since the code does not come from user space */
219
+static int
220
+process_fetch_insn(struct fetch_insn *code, void *rec, void *dest,
221
+ void *base)
222
+{
223
+ struct pt_regs *regs = rec;
224
+ unsigned long val;
225
+
226
+ /* 1st stage: get value from context */
227
+ switch (code->op) {
228
+ case FETCH_OP_REG:
229
+ val = regs_get_register(regs, code->param);
230
+ break;
231
+ case FETCH_OP_STACK:
232
+ val = get_user_stack_nth(regs, code->param);
233
+ break;
234
+ case FETCH_OP_STACKP:
235
+ val = user_stack_pointer(regs);
236
+ break;
237
+ case FETCH_OP_RETVAL:
238
+ val = regs_return_value(regs);
239
+ break;
240
+ case FETCH_OP_IMM:
241
+ val = code->immediate;
242
+ break;
243
+ case FETCH_OP_COMM:
244
+ val = FETCH_TOKEN_COMM;
245
+ break;
246
+ case FETCH_OP_DATA:
247
+ val = (unsigned long)code->data;
248
+ break;
249
+ case FETCH_OP_FOFFS:
250
+ val = translate_user_vaddr(code->immediate);
251
+ break;
252
+ default:
253
+ return -EILSEQ;
254
+ }
255
+ code++;
256
+
257
+ return process_fetch_insn_bottom(code, val, dest, base);
193258 }
194
-DEFINE_BASIC_FETCH_FUNCS(file_offset)
195
-DEFINE_FETCH_file_offset(string)
196
-DEFINE_FETCH_file_offset(string_size)
197
-
198
-/* Fetch type information table */
199
-static const struct fetch_type uprobes_fetch_type_table[] = {
200
- /* Special types */
201
- [FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string,
202
- sizeof(u32), 1, "__data_loc char[]"),
203
- [FETCH_TYPE_STRSIZE] = __ASSIGN_FETCH_TYPE("string_size", u32,
204
- string_size, sizeof(u32), 0, "u32"),
205
- /* Basic types */
206
- ASSIGN_FETCH_TYPE(u8, u8, 0),
207
- ASSIGN_FETCH_TYPE(u16, u16, 0),
208
- ASSIGN_FETCH_TYPE(u32, u32, 0),
209
- ASSIGN_FETCH_TYPE(u64, u64, 0),
210
- ASSIGN_FETCH_TYPE(s8, u8, 1),
211
- ASSIGN_FETCH_TYPE(s16, u16, 1),
212
- ASSIGN_FETCH_TYPE(s32, u32, 1),
213
- ASSIGN_FETCH_TYPE(s64, u64, 1),
214
- ASSIGN_FETCH_TYPE_ALIAS(x8, u8, u8, 0),
215
- ASSIGN_FETCH_TYPE_ALIAS(x16, u16, u16, 0),
216
- ASSIGN_FETCH_TYPE_ALIAS(x32, u32, u32, 0),
217
- ASSIGN_FETCH_TYPE_ALIAS(x64, u64, u64, 0),
218
-
219
- ASSIGN_FETCH_TYPE_END
220
-};
259
+NOKPROBE_SYMBOL(process_fetch_insn)
221260
222261 static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
223262 {
....@@ -236,6 +275,63 @@
236275 return tu->consumer.ret_handler != NULL;
237276 }
238277
278
+static bool trace_uprobe_is_busy(struct dyn_event *ev)
279
+{
280
+ struct trace_uprobe *tu = to_trace_uprobe(ev);
281
+
282
+ return trace_probe_is_enabled(&tu->tp);
283
+}
284
+
285
+static bool trace_uprobe_match_command_head(struct trace_uprobe *tu,
286
+ int argc, const char **argv)
287
+{
288
+ char buf[MAX_ARGSTR_LEN + 1];
289
+ int len;
290
+
291
+ if (!argc)
292
+ return true;
293
+
294
+ len = strlen(tu->filename);
295
+ if (strncmp(tu->filename, argv[0], len) || argv[0][len] != ':')
296
+ return false;
297
+
298
+ if (tu->ref_ctr_offset == 0)
299
+ snprintf(buf, sizeof(buf), "0x%0*lx",
300
+ (int)(sizeof(void *) * 2), tu->offset);
301
+ else
302
+ snprintf(buf, sizeof(buf), "0x%0*lx(0x%lx)",
303
+ (int)(sizeof(void *) * 2), tu->offset,
304
+ tu->ref_ctr_offset);
305
+ if (strcmp(buf, &argv[0][len + 1]))
306
+ return false;
307
+
308
+ argc--; argv++;
309
+
310
+ return trace_probe_match_command_args(&tu->tp, argc, argv);
311
+}
312
+
313
+static bool trace_uprobe_match(const char *system, const char *event,
314
+ int argc, const char **argv, struct dyn_event *ev)
315
+{
316
+ struct trace_uprobe *tu = to_trace_uprobe(ev);
317
+
318
+ return strcmp(trace_probe_name(&tu->tp), event) == 0 &&
319
+ (!system || strcmp(trace_probe_group_name(&tu->tp), system) == 0) &&
320
+ trace_uprobe_match_command_head(tu, argc, argv);
321
+}
322
+
323
+static nokprobe_inline struct trace_uprobe *
324
+trace_uprobe_primary_from_call(struct trace_event_call *call)
325
+{
326
+ struct trace_probe *tp;
327
+
328
+ tp = trace_probe_primary_from_call(call);
329
+ if (WARN_ON_ONCE(!tp))
330
+ return NULL;
331
+
332
+ return container_of(tp, struct trace_uprobe, tp);
333
+}
334
+
239335 /*
240336 * Allocate new trace_uprobe and initialize it (including uprobes).
241337 */
....@@ -243,78 +339,151 @@
243339 alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
244340 {
245341 struct trace_uprobe *tu;
246
-
247
- if (!event || !is_good_name(event))
248
- return ERR_PTR(-EINVAL);
249
-
250
- if (!group || !is_good_name(group))
251
- return ERR_PTR(-EINVAL);
342
+ int ret;
252343
253344 tu = kzalloc(SIZEOF_TRACE_UPROBE(nargs), GFP_KERNEL);
254345 if (!tu)
255346 return ERR_PTR(-ENOMEM);
256347
257
- tu->tp.call.class = &tu->tp.class;
258
- tu->tp.call.name = kstrdup(event, GFP_KERNEL);
259
- if (!tu->tp.call.name)
348
+ ret = trace_probe_init(&tu->tp, event, group, true);
349
+ if (ret < 0)
260350 goto error;
261351
262
- tu->tp.class.system = kstrdup(group, GFP_KERNEL);
263
- if (!tu->tp.class.system)
264
- goto error;
265
-
266
- INIT_LIST_HEAD(&tu->list);
267
- INIT_LIST_HEAD(&tu->tp.files);
352
+ dyn_event_init(&tu->devent, &trace_uprobe_ops);
268353 tu->consumer.handler = uprobe_dispatcher;
269354 if (is_ret)
270355 tu->consumer.ret_handler = uretprobe_dispatcher;
271
- init_trace_uprobe_filter(&tu->filter);
356
+ init_trace_uprobe_filter(tu->tp.event->filter);
272357 return tu;
273358
274359 error:
275
- kfree(tu->tp.call.name);
276360 kfree(tu);
277361
278
- return ERR_PTR(-ENOMEM);
362
+ return ERR_PTR(ret);
279363 }
280364
281365 static void free_trace_uprobe(struct trace_uprobe *tu)
282366 {
283
- int i;
284
-
285
- for (i = 0; i < tu->tp.nr_args; i++)
286
- traceprobe_free_probe_arg(&tu->tp.args[i]);
367
+ if (!tu)
368
+ return;
287369
288370 path_put(&tu->path);
289
- kfree(tu->tp.call.class->system);
290
- kfree(tu->tp.call.name);
371
+ trace_probe_cleanup(&tu->tp);
291372 kfree(tu->filename);
292373 kfree(tu);
293374 }
294375
295376 static struct trace_uprobe *find_probe_event(const char *event, const char *group)
296377 {
378
+ struct dyn_event *pos;
297379 struct trace_uprobe *tu;
298380
299
- list_for_each_entry(tu, &uprobe_list, list)
300
- if (strcmp(trace_event_name(&tu->tp.call), event) == 0 &&
301
- strcmp(tu->tp.call.class->system, group) == 0)
381
+ for_each_trace_uprobe(tu, pos)
382
+ if (strcmp(trace_probe_name(&tu->tp), event) == 0 &&
383
+ strcmp(trace_probe_group_name(&tu->tp), group) == 0)
302384 return tu;
303385
304386 return NULL;
305387 }
306388
307
-/* Unregister a trace_uprobe and probe_event: call with locking uprobe_lock */
389
+/* Unregister a trace_uprobe and probe_event */
308390 static int unregister_trace_uprobe(struct trace_uprobe *tu)
309391 {
310392 int ret;
393
+
394
+ if (trace_probe_has_sibling(&tu->tp))
395
+ goto unreg;
311396
312397 ret = unregister_uprobe_event(tu);
313398 if (ret)
314399 return ret;
315400
316
- list_del(&tu->list);
401
+unreg:
402
+ dyn_event_remove(&tu->devent);
403
+ trace_probe_unlink(&tu->tp);
317404 free_trace_uprobe(tu);
405
+ return 0;
406
+}
407
+
408
+static bool trace_uprobe_has_same_uprobe(struct trace_uprobe *orig,
409
+ struct trace_uprobe *comp)
410
+{
411
+ struct trace_probe_event *tpe = orig->tp.event;
412
+ struct trace_probe *pos;
413
+ struct inode *comp_inode = d_real_inode(comp->path.dentry);
414
+ int i;
415
+
416
+ list_for_each_entry(pos, &tpe->probes, list) {
417
+ orig = container_of(pos, struct trace_uprobe, tp);
418
+ if (comp_inode != d_real_inode(orig->path.dentry) ||
419
+ comp->offset != orig->offset)
420
+ continue;
421
+
422
+ /*
423
+ * trace_probe_compare_arg_type() ensured that nr_args and
424
+ * each argument name and type are same. Let's compare comm.
425
+ */
426
+ for (i = 0; i < orig->tp.nr_args; i++) {
427
+ if (strcmp(orig->tp.args[i].comm,
428
+ comp->tp.args[i].comm))
429
+ break;
430
+ }
431
+
432
+ if (i == orig->tp.nr_args)
433
+ return true;
434
+ }
435
+
436
+ return false;
437
+}
438
+
439
+static int append_trace_uprobe(struct trace_uprobe *tu, struct trace_uprobe *to)
440
+{
441
+ int ret;
442
+
443
+ ret = trace_probe_compare_arg_type(&tu->tp, &to->tp);
444
+ if (ret) {
445
+ /* Note that argument starts index = 2 */
446
+ trace_probe_log_set_index(ret + 1);
447
+ trace_probe_log_err(0, DIFF_ARG_TYPE);
448
+ return -EEXIST;
449
+ }
450
+ if (trace_uprobe_has_same_uprobe(to, tu)) {
451
+ trace_probe_log_set_index(0);
452
+ trace_probe_log_err(0, SAME_PROBE);
453
+ return -EEXIST;
454
+ }
455
+
456
+ /* Append to existing event */
457
+ ret = trace_probe_append(&tu->tp, &to->tp);
458
+ if (!ret)
459
+ dyn_event_add(&tu->devent);
460
+
461
+ return ret;
462
+}
463
+
464
+/*
465
+ * Uprobe with multiple reference counter is not allowed. i.e.
466
+ * If inode and offset matches, reference counter offset *must*
467
+ * match as well. Though, there is one exception: If user is
468
+ * replacing old trace_uprobe with new one(same group/event),
469
+ * then we allow same uprobe with new reference counter as far
470
+ * as the new one does not conflict with any other existing
471
+ * ones.
472
+ */
473
+static int validate_ref_ctr_offset(struct trace_uprobe *new)
474
+{
475
+ struct dyn_event *pos;
476
+ struct trace_uprobe *tmp;
477
+ struct inode *new_inode = d_real_inode(new->path.dentry);
478
+
479
+ for_each_trace_uprobe(tmp, pos) {
480
+ if (new_inode == d_real_inode(tmp->path.dentry) &&
481
+ new->offset == tmp->offset &&
482
+ new->ref_ctr_offset != tmp->ref_ctr_offset) {
483
+ pr_warn("Reference counter offset mismatch.");
484
+ return -EINVAL;
485
+ }
486
+ }
318487 return 0;
319488 }
320489
....@@ -324,136 +493,163 @@
324493 struct trace_uprobe *old_tu;
325494 int ret;
326495
327
- mutex_lock(&uprobe_lock);
496
+ mutex_lock(&event_mutex);
497
+
498
+ ret = validate_ref_ctr_offset(tu);
499
+ if (ret)
500
+ goto end;
328501
329502 /* register as an event */
330
- old_tu = find_probe_event(trace_event_name(&tu->tp.call),
331
- tu->tp.call.class->system);
503
+ old_tu = find_probe_event(trace_probe_name(&tu->tp),
504
+ trace_probe_group_name(&tu->tp));
332505 if (old_tu) {
333
- /* delete old event */
334
- ret = unregister_trace_uprobe(old_tu);
335
- if (ret)
336
- goto end;
506
+ if (is_ret_probe(tu) != is_ret_probe(old_tu)) {
507
+ trace_probe_log_set_index(0);
508
+ trace_probe_log_err(0, DIFF_PROBE_TYPE);
509
+ ret = -EEXIST;
510
+ } else {
511
+ ret = append_trace_uprobe(tu, old_tu);
512
+ }
513
+ goto end;
337514 }
338515
339516 ret = register_uprobe_event(tu);
340517 if (ret) {
341
- pr_warn("Failed to register probe event(%d)\n", ret);
518
+ if (ret == -EEXIST) {
519
+ trace_probe_log_set_index(0);
520
+ trace_probe_log_err(0, EVENT_EXIST);
521
+ } else
522
+ pr_warn("Failed to register probe event(%d)\n", ret);
342523 goto end;
343524 }
344525
345
- list_add_tail(&tu->list, &uprobe_list);
526
+ dyn_event_add(&tu->devent);
346527
347528 end:
348
- mutex_unlock(&uprobe_lock);
529
+ mutex_unlock(&event_mutex);
349530
350531 return ret;
351532 }
352533
353534 /*
354535 * Argument syntax:
355
- * - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS]
356
- *
357
- * - Remove uprobe: -:[GRP/]EVENT
536
+ * - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET[%return][(REF)] [FETCHARGS]
358537 */
359
-static int create_trace_uprobe(int argc, char **argv)
538
+static int trace_uprobe_create(int argc, const char **argv)
360539 {
361540 struct trace_uprobe *tu;
362
- char *arg, *event, *group, *filename;
541
+ const char *event = NULL, *group = UPROBE_EVENT_SYSTEM;
542
+ char *arg, *filename, *rctr, *rctr_end, *tmp;
363543 char buf[MAX_EVENT_NAME_LEN];
364544 struct path path;
365
- unsigned long offset;
366
- bool is_delete, is_return;
545
+ unsigned long offset, ref_ctr_offset;
546
+ bool is_return = false;
367547 int i, ret;
368548
369549 ret = 0;
370
- is_delete = false;
371
- is_return = false;
372
- event = NULL;
373
- group = NULL;
550
+ ref_ctr_offset = 0;
374551
375
- /* argc must be >= 1 */
376
- if (argv[0][0] == '-')
377
- is_delete = true;
378
- else if (argv[0][0] == 'r')
552
+ switch (argv[0][0]) {
553
+ case 'r':
379554 is_return = true;
380
- else if (argv[0][0] != 'p') {
381
- pr_info("Probe definition must be started with 'p', 'r' or '-'.\n");
382
- return -EINVAL;
555
+ break;
556
+ case 'p':
557
+ break;
558
+ default:
559
+ return -ECANCELED;
383560 }
384561
385
- if (argv[0][1] == ':') {
562
+ if (argc < 2)
563
+ return -ECANCELED;
564
+
565
+ if (argv[0][1] == ':')
386566 event = &argv[0][2];
387
- arg = strchr(event, '/');
388567
389
- if (arg) {
390
- group = event;
391
- event = arg + 1;
392
- event[-1] = '\0';
568
+ if (!strchr(argv[1], '/'))
569
+ return -ECANCELED;
393570
394
- if (strlen(group) == 0) {
395
- pr_info("Group name is not specified\n");
396
- return -EINVAL;
397
- }
398
- }
399
- if (strlen(event) == 0) {
400
- pr_info("Event name is not specified\n");
401
- return -EINVAL;
402
- }
403
- }
404
- if (!group)
405
- group = UPROBE_EVENT_SYSTEM;
571
+ filename = kstrdup(argv[1], GFP_KERNEL);
572
+ if (!filename)
573
+ return -ENOMEM;
406574
407
- if (is_delete) {
408
- int ret;
409
-
410
- if (!event) {
411
- pr_info("Delete command needs an event name.\n");
412
- return -EINVAL;
413
- }
414
- mutex_lock(&uprobe_lock);
415
- tu = find_probe_event(event, group);
416
-
417
- if (!tu) {
418
- mutex_unlock(&uprobe_lock);
419
- pr_info("Event %s/%s doesn't exist.\n", group, event);
420
- return -ENOENT;
421
- }
422
- /* delete an event */
423
- ret = unregister_trace_uprobe(tu);
424
- mutex_unlock(&uprobe_lock);
425
- return ret;
426
- }
427
-
428
- if (argc < 2) {
429
- pr_info("Probe point is not specified.\n");
430
- return -EINVAL;
431
- }
432575 /* Find the last occurrence, in case the path contains ':' too. */
433
- arg = strrchr(argv[1], ':');
434
- if (!arg)
435
- return -EINVAL;
576
+ arg = strrchr(filename, ':');
577
+ if (!arg || !isdigit(arg[1])) {
578
+ kfree(filename);
579
+ return -ECANCELED;
580
+ }
581
+
582
+ trace_probe_log_init("trace_uprobe", argc, argv);
583
+ trace_probe_log_set_index(1); /* filename is the 2nd argument */
436584
437585 *arg++ = '\0';
438
- filename = argv[1];
439586 ret = kern_path(filename, LOOKUP_FOLLOW, &path);
440
- if (ret)
587
+ if (ret) {
588
+ trace_probe_log_err(0, FILE_NOT_FOUND);
589
+ kfree(filename);
590
+ trace_probe_log_clear();
441591 return ret;
442
-
592
+ }
443593 if (!d_is_reg(path.dentry)) {
594
+ trace_probe_log_err(0, NO_REGULAR_FILE);
444595 ret = -EINVAL;
445596 goto fail_address_parse;
446597 }
447598
448
- ret = kstrtoul(arg, 0, &offset);
449
- if (ret)
450
- goto fail_address_parse;
599
+ /* Parse reference counter offset if specified. */
600
+ rctr = strchr(arg, '(');
601
+ if (rctr) {
602
+ rctr_end = strchr(rctr, ')');
603
+ if (!rctr_end) {
604
+ ret = -EINVAL;
605
+ rctr_end = rctr + strlen(rctr);
606
+ trace_probe_log_err(rctr_end - filename,
607
+ REFCNT_OPEN_BRACE);
608
+ goto fail_address_parse;
609
+ } else if (rctr_end[1] != '\0') {
610
+ ret = -EINVAL;
611
+ trace_probe_log_err(rctr_end + 1 - filename,
612
+ BAD_REFCNT_SUFFIX);
613
+ goto fail_address_parse;
614
+ }
451615
452
- argc -= 2;
453
- argv += 2;
616
+ *rctr++ = '\0';
617
+ *rctr_end = '\0';
618
+ ret = kstrtoul(rctr, 0, &ref_ctr_offset);
619
+ if (ret) {
620
+ trace_probe_log_err(rctr - filename, BAD_REFCNT);
621
+ goto fail_address_parse;
622
+ }
623
+ }
624
+
625
+ /* Check if there is %return suffix */
626
+ tmp = strchr(arg, '%');
627
+ if (tmp) {
628
+ if (!strcmp(tmp, "%return")) {
629
+ *tmp = '\0';
630
+ is_return = true;
631
+ } else {
632
+ trace_probe_log_err(tmp - filename, BAD_ADDR_SUFFIX);
633
+ ret = -EINVAL;
634
+ goto fail_address_parse;
635
+ }
636
+ }
637
+
638
+ /* Parse uprobe offset. */
639
+ ret = kstrtoul(arg, 0, &offset);
640
+ if (ret) {
641
+ trace_probe_log_err(arg - filename, BAD_UPROBE_OFFS);
642
+ goto fail_address_parse;
643
+ }
454644
455645 /* setup a probe */
456
- if (!event) {
646
+ trace_probe_log_set_index(0);
647
+ if (event) {
648
+ ret = traceprobe_parse_event_name(&event, &group, buf,
649
+ event - argv[0]);
650
+ if (ret)
651
+ goto fail_address_parse;
652
+ } else {
457653 char *tail;
458654 char *ptr;
459655
....@@ -472,130 +668,90 @@
472668 kfree(tail);
473669 }
474670
671
+ argc -= 2;
672
+ argv += 2;
673
+
475674 tu = alloc_trace_uprobe(group, event, argc, is_return);
476675 if (IS_ERR(tu)) {
477
- pr_info("Failed to allocate trace_uprobe.(%d)\n", (int)PTR_ERR(tu));
478676 ret = PTR_ERR(tu);
677
+ /* This must return -ENOMEM otherwise there is a bug */
678
+ WARN_ON_ONCE(ret != -ENOMEM);
479679 goto fail_address_parse;
480680 }
481681 tu->offset = offset;
682
+ tu->ref_ctr_offset = ref_ctr_offset;
482683 tu->path = path;
483
- tu->filename = kstrdup(filename, GFP_KERNEL);
484
-
485
- if (!tu->filename) {
486
- pr_info("Failed to allocate filename.\n");
487
- ret = -ENOMEM;
488
- goto error;
489
- }
684
+ tu->filename = filename;
490685
491686 /* parse arguments */
492
- ret = 0;
493687 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
494
- struct probe_arg *parg = &tu->tp.args[i];
495
-
496
- /* Increment count for freeing args in error case */
497
- tu->tp.nr_args++;
498
-
499
- /* Parse argument name */
500
- arg = strchr(argv[i], '=');
501
- if (arg) {
502
- *arg++ = '\0';
503
- parg->name = kstrdup(argv[i], GFP_KERNEL);
504
- } else {
505
- arg = argv[i];
506
- /* If argument name is omitted, set "argN" */
507
- snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
508
- parg->name = kstrdup(buf, GFP_KERNEL);
509
- }
510
-
511
- if (!parg->name) {
512
- pr_info("Failed to allocate argument[%d] name.\n", i);
688
+ tmp = kstrdup(argv[i], GFP_KERNEL);
689
+ if (!tmp) {
513690 ret = -ENOMEM;
514691 goto error;
515692 }
516693
517
- if (!is_good_name(parg->name)) {
518
- pr_info("Invalid argument[%d] name: %s\n", i, parg->name);
519
- ret = -EINVAL;
694
+ trace_probe_log_set_index(i + 2);
695
+ ret = traceprobe_parse_probe_arg(&tu->tp, i, tmp,
696
+ is_return ? TPARG_FL_RETURN : 0);
697
+ kfree(tmp);
698
+ if (ret)
520699 goto error;
521
- }
522
-
523
- if (traceprobe_conflict_field_name(parg->name, tu->tp.args, i)) {
524
- pr_info("Argument[%d] name '%s' conflicts with "
525
- "another field.\n", i, argv[i]);
526
- ret = -EINVAL;
527
- goto error;
528
- }
529
-
530
- /* Parse fetch argument */
531
- ret = traceprobe_parse_probe_arg(arg, &tu->tp.size, parg,
532
- is_return, false,
533
- uprobes_fetch_type_table);
534
- if (ret) {
535
- pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
536
- goto error;
537
- }
538700 }
539701
540
- ret = register_trace_uprobe(tu);
541
- if (ret)
702
+ ret = traceprobe_set_print_fmt(&tu->tp, is_ret_probe(tu));
703
+ if (ret < 0)
542704 goto error;
543
- return 0;
705
+
706
+ ret = register_trace_uprobe(tu);
707
+ if (!ret)
708
+ goto out;
544709
545710 error:
546711 free_trace_uprobe(tu);
712
+out:
713
+ trace_probe_log_clear();
547714 return ret;
548715
549716 fail_address_parse:
717
+ trace_probe_log_clear();
550718 path_put(&path);
551
-
552
- pr_info("Failed to parse address or file.\n");
719
+ kfree(filename);
553720
554721 return ret;
555722 }
556723
557
-static int cleanup_all_probes(void)
724
+static int create_or_delete_trace_uprobe(int argc, char **argv)
558725 {
559
- struct trace_uprobe *tu;
560
- int ret = 0;
726
+ int ret;
561727
562
- mutex_lock(&uprobe_lock);
563
- while (!list_empty(&uprobe_list)) {
564
- tu = list_entry(uprobe_list.next, struct trace_uprobe, list);
565
- ret = unregister_trace_uprobe(tu);
566
- if (ret)
567
- break;
568
- }
569
- mutex_unlock(&uprobe_lock);
570
- return ret;
728
+ if (argv[0][0] == '-')
729
+ return dyn_event_release(argc, argv, &trace_uprobe_ops);
730
+
731
+ ret = trace_uprobe_create(argc, (const char **)argv);
732
+ return ret == -ECANCELED ? -EINVAL : ret;
733
+}
734
+
735
+static int trace_uprobe_release(struct dyn_event *ev)
736
+{
737
+ struct trace_uprobe *tu = to_trace_uprobe(ev);
738
+
739
+ return unregister_trace_uprobe(tu);
571740 }
572741
573742 /* Probes listing interfaces */
574
-static void *probes_seq_start(struct seq_file *m, loff_t *pos)
743
+static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev)
575744 {
576
- mutex_lock(&uprobe_lock);
577
- return seq_list_start(&uprobe_list, *pos);
578
-}
579
-
580
-static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
581
-{
582
- return seq_list_next(v, &uprobe_list, pos);
583
-}
584
-
585
-static void probes_seq_stop(struct seq_file *m, void *v)
586
-{
587
- mutex_unlock(&uprobe_lock);
588
-}
589
-
590
-static int probes_seq_show(struct seq_file *m, void *v)
591
-{
592
- struct trace_uprobe *tu = v;
745
+ struct trace_uprobe *tu = to_trace_uprobe(ev);
593746 char c = is_ret_probe(tu) ? 'r' : 'p';
594747 int i;
595748
596
- seq_printf(m, "%c:%s/%s %s:0x%0*lx", c, tu->tp.call.class->system,
597
- trace_event_name(&tu->tp.call), tu->filename,
749
+ seq_printf(m, "%c:%s/%s %s:0x%0*lx", c, trace_probe_group_name(&tu->tp),
750
+ trace_probe_name(&tu->tp), tu->filename,
598751 (int)(sizeof(void *) * 2), tu->offset);
752
+
753
+ if (tu->ref_ctr_offset)
754
+ seq_printf(m, "(0x%lx)", tu->ref_ctr_offset);
599755
600756 for (i = 0; i < tu->tp.nr_args; i++)
601757 seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm);
....@@ -604,19 +760,33 @@
604760 return 0;
605761 }
606762
763
+static int probes_seq_show(struct seq_file *m, void *v)
764
+{
765
+ struct dyn_event *ev = v;
766
+
767
+ if (!is_trace_uprobe(ev))
768
+ return 0;
769
+
770
+ return trace_uprobe_show(m, ev);
771
+}
772
+
607773 static const struct seq_operations probes_seq_op = {
608
- .start = probes_seq_start,
609
- .next = probes_seq_next,
610
- .stop = probes_seq_stop,
611
- .show = probes_seq_show
774
+ .start = dyn_event_seq_start,
775
+ .next = dyn_event_seq_next,
776
+ .stop = dyn_event_seq_stop,
777
+ .show = probes_seq_show
612778 };
613779
614780 static int probes_open(struct inode *inode, struct file *file)
615781 {
616782 int ret;
617783
784
+ ret = security_locked_down(LOCKDOWN_TRACEFS);
785
+ if (ret)
786
+ return ret;
787
+
618788 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
619
- ret = cleanup_all_probes();
789
+ ret = dyn_events_release_all(&trace_uprobe_ops);
620790 if (ret)
621791 return ret;
622792 }
....@@ -627,7 +797,8 @@
627797 static ssize_t probes_write(struct file *file, const char __user *buffer,
628798 size_t count, loff_t *ppos)
629799 {
630
- return trace_parse_run_command(file, buffer, count, ppos, create_trace_uprobe);
800
+ return trace_parse_run_command(file, buffer, count, ppos,
801
+ create_or_delete_trace_uprobe);
631802 }
632803
633804 static const struct file_operations uprobe_events_ops = {
....@@ -642,22 +813,33 @@
642813 /* Probes profiling interfaces */
643814 static int probes_profile_seq_show(struct seq_file *m, void *v)
644815 {
645
- struct trace_uprobe *tu = v;
816
+ struct dyn_event *ev = v;
817
+ struct trace_uprobe *tu;
646818
819
+ if (!is_trace_uprobe(ev))
820
+ return 0;
821
+
822
+ tu = to_trace_uprobe(ev);
647823 seq_printf(m, " %s %-44s %15lu\n", tu->filename,
648
- trace_event_name(&tu->tp.call), tu->nhit);
824
+ trace_probe_name(&tu->tp), tu->nhit);
649825 return 0;
650826 }
651827
652828 static const struct seq_operations profile_seq_op = {
653
- .start = probes_seq_start,
654
- .next = probes_seq_next,
655
- .stop = probes_seq_stop,
829
+ .start = dyn_event_seq_start,
830
+ .next = dyn_event_seq_next,
831
+ .stop = dyn_event_seq_stop,
656832 .show = probes_profile_seq_show
657833 };
658834
659835 static int profile_open(struct inode *inode, struct file *file)
660836 {
837
+ int ret;
838
+
839
+ ret = security_locked_down(LOCKDOWN_TRACEFS);
840
+ if (ret)
841
+ return ret;
842
+
661843 return seq_open(file, &profile_seq_op);
662844 }
663845
....@@ -767,11 +949,11 @@
767949 struct trace_event_file *trace_file)
768950 {
769951 struct uprobe_trace_entry_head *entry;
952
+ struct trace_buffer *buffer;
770953 struct ring_buffer_event *event;
771
- struct ring_buffer *buffer;
772954 void *data;
773955 int size, esize;
774
- struct trace_event_call *call = &tu->tp.call;
956
+ struct trace_event_call *call = trace_probe_event_call(&tu->tp);
775957
776958 WARN_ON(call != trace_file->event_call);
777959
....@@ -813,7 +995,7 @@
813995 return 0;
814996
815997 rcu_read_lock();
816
- list_for_each_entry_rcu(link, &tu->tp.files, list)
998
+ trace_probe_for_each_link_rcu(link, &tu->tp)
817999 __uprobe_trace_func(tu, 0, regs, ucb, dsize, link->file);
8181000 rcu_read_unlock();
8191001
....@@ -827,7 +1009,7 @@
8271009 struct event_file_link *link;
8281010
8291011 rcu_read_lock();
830
- list_for_each_entry_rcu(link, &tu->tp.files, list)
1012
+ trace_probe_for_each_link_rcu(link, &tu->tp)
8311013 __uprobe_trace_func(tu, func, regs, ucb, dsize, link->file);
8321014 rcu_read_unlock();
8331015 }
....@@ -840,29 +1022,27 @@
8401022 struct trace_seq *s = &iter->seq;
8411023 struct trace_uprobe *tu;
8421024 u8 *data;
843
- int i;
8441025
8451026 entry = (struct uprobe_trace_entry_head *)iter->ent;
846
- tu = container_of(event, struct trace_uprobe, tp.call.event);
1027
+ tu = trace_uprobe_primary_from_call(
1028
+ container_of(event, struct trace_event_call, event));
1029
+ if (unlikely(!tu))
1030
+ goto out;
8471031
8481032 if (is_ret_probe(tu)) {
8491033 trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)",
850
- trace_event_name(&tu->tp.call),
1034
+ trace_probe_name(&tu->tp),
8511035 entry->vaddr[1], entry->vaddr[0]);
8521036 data = DATAOF_TRACE_ENTRY(entry, true);
8531037 } else {
8541038 trace_seq_printf(s, "%s: (0x%lx)",
855
- trace_event_name(&tu->tp.call),
1039
+ trace_probe_name(&tu->tp),
8561040 entry->vaddr[0]);
8571041 data = DATAOF_TRACE_ENTRY(entry, false);
8581042 }
8591043
860
- for (i = 0; i < tu->tp.nr_args; i++) {
861
- struct probe_arg *parg = &tu->tp.args[i];
862
-
863
- if (!parg->type->print(s, parg->name, data + parg->offset, entry))
864
- goto out;
865
- }
1044
+ if (print_probe_args(s, tu->tp.args, tu->tp.nr_args, data, entry) < 0)
1045
+ goto out;
8661046
8671047 trace_seq_putc(s, '\n');
8681048
....@@ -874,34 +1054,73 @@
8741054 enum uprobe_filter_ctx ctx,
8751055 struct mm_struct *mm);
8761056
877
-static int
878
-probe_event_enable(struct trace_uprobe *tu, struct trace_event_file *file,
879
- filter_func_t filter)
1057
+static int trace_uprobe_enable(struct trace_uprobe *tu, filter_func_t filter)
8801058 {
881
- bool enabled = trace_probe_is_enabled(&tu->tp);
882
- struct event_file_link *link = NULL;
8831059 int ret;
8841060
1061
+ tu->consumer.filter = filter;
1062
+ tu->inode = d_real_inode(tu->path.dentry);
1063
+
1064
+ if (tu->ref_ctr_offset)
1065
+ ret = uprobe_register_refctr(tu->inode, tu->offset,
1066
+ tu->ref_ctr_offset, &tu->consumer);
1067
+ else
1068
+ ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
1069
+
1070
+ if (ret)
1071
+ tu->inode = NULL;
1072
+
1073
+ return ret;
1074
+}
1075
+
1076
+static void __probe_event_disable(struct trace_probe *tp)
1077
+{
1078
+ struct trace_probe *pos;
1079
+ struct trace_uprobe *tu;
1080
+
1081
+ tu = container_of(tp, struct trace_uprobe, tp);
1082
+ WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter));
1083
+
1084
+ list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1085
+ tu = container_of(pos, struct trace_uprobe, tp);
1086
+ if (!tu->inode)
1087
+ continue;
1088
+
1089
+ uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
1090
+ tu->inode = NULL;
1091
+ }
1092
+}
1093
+
1094
+static int probe_event_enable(struct trace_event_call *call,
1095
+ struct trace_event_file *file, filter_func_t filter)
1096
+{
1097
+ struct trace_probe *pos, *tp;
1098
+ struct trace_uprobe *tu;
1099
+ bool enabled;
1100
+ int ret;
1101
+
1102
+ tp = trace_probe_primary_from_call(call);
1103
+ if (WARN_ON_ONCE(!tp))
1104
+ return -ENODEV;
1105
+ enabled = trace_probe_is_enabled(tp);
1106
+
1107
+ /* This may also change "enabled" state */
8851108 if (file) {
886
- if (tu->tp.flags & TP_FLAG_PROFILE)
1109
+ if (trace_probe_test_flag(tp, TP_FLAG_PROFILE))
8871110 return -EINTR;
8881111
889
- link = kmalloc(sizeof(*link), GFP_KERNEL);
890
- if (!link)
891
- return -ENOMEM;
892
-
893
- link->file = file;
894
- list_add_tail_rcu(&link->list, &tu->tp.files);
895
-
896
- tu->tp.flags |= TP_FLAG_TRACE;
1112
+ ret = trace_probe_add_file(tp, file);
1113
+ if (ret < 0)
1114
+ return ret;
8971115 } else {
898
- if (tu->tp.flags & TP_FLAG_TRACE)
1116
+ if (trace_probe_test_flag(tp, TP_FLAG_TRACE))
8991117 return -EINTR;
9001118
901
- tu->tp.flags |= TP_FLAG_PROFILE;
1119
+ trace_probe_set_flag(tp, TP_FLAG_PROFILE);
9021120 }
9031121
904
- WARN_ON(!uprobe_filter_is_empty(&tu->filter));
1122
+ tu = container_of(tp, struct trace_uprobe, tp);
1123
+ WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter));
9051124
9061125 if (enabled)
9071126 return 0;
....@@ -910,11 +1129,14 @@
9101129 if (ret)
9111130 goto err_flags;
9121131
913
- tu->consumer.filter = filter;
914
- tu->inode = d_real_inode(tu->path.dentry);
915
- ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
916
- if (ret)
917
- goto err_buffer;
1132
+ list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1133
+ tu = container_of(pos, struct trace_uprobe, tp);
1134
+ ret = trace_uprobe_enable(tu, filter);
1135
+ if (ret) {
1136
+ __probe_event_disable(tp);
1137
+ goto err_buffer;
1138
+ }
1139
+ }
9181140
9191141 return 0;
9201142
....@@ -922,52 +1144,48 @@
9221144 uprobe_buffer_disable();
9231145
9241146 err_flags:
925
- if (file) {
926
- list_del(&link->list);
927
- kfree(link);
928
- tu->tp.flags &= ~TP_FLAG_TRACE;
929
- } else {
930
- tu->tp.flags &= ~TP_FLAG_PROFILE;
931
- }
1147
+ if (file)
1148
+ trace_probe_remove_file(tp, file);
1149
+ else
1150
+ trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
1151
+
9321152 return ret;
9331153 }
9341154
935
-static void
936
-probe_event_disable(struct trace_uprobe *tu, struct trace_event_file *file)
1155
+static void probe_event_disable(struct trace_event_call *call,
1156
+ struct trace_event_file *file)
9371157 {
938
- if (!trace_probe_is_enabled(&tu->tp))
1158
+ struct trace_probe *tp;
1159
+
1160
+ tp = trace_probe_primary_from_call(call);
1161
+ if (WARN_ON_ONCE(!tp))
1162
+ return;
1163
+
1164
+ if (!trace_probe_is_enabled(tp))
9391165 return;
9401166
9411167 if (file) {
942
- struct event_file_link *link;
943
-
944
- link = find_event_file_link(&tu->tp, file);
945
- if (!link)
1168
+ if (trace_probe_remove_file(tp, file) < 0)
9461169 return;
9471170
948
- list_del_rcu(&link->list);
949
- /* synchronize with u{,ret}probe_trace_func */
950
- synchronize_rcu();
951
- kfree(link);
952
-
953
- if (!list_empty(&tu->tp.files))
1171
+ if (trace_probe_is_enabled(tp))
9541172 return;
955
- }
1173
+ } else
1174
+ trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
9561175
957
- WARN_ON(!uprobe_filter_is_empty(&tu->filter));
958
-
959
- uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
960
- tu->inode = NULL;
961
- tu->tp.flags &= file ? ~TP_FLAG_TRACE : ~TP_FLAG_PROFILE;
962
-
1176
+ __probe_event_disable(tp);
9631177 uprobe_buffer_disable();
9641178 }
9651179
9661180 static int uprobe_event_define_fields(struct trace_event_call *event_call)
9671181 {
968
- int ret, i, size;
1182
+ int ret, size;
9691183 struct uprobe_trace_entry_head field;
970
- struct trace_uprobe *tu = event_call->data;
1184
+ struct trace_uprobe *tu;
1185
+
1186
+ tu = trace_uprobe_primary_from_call(event_call);
1187
+ if (unlikely(!tu))
1188
+ return -ENODEV;
9711189
9721190 if (is_ret_probe(tu)) {
9731191 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0);
....@@ -977,19 +1195,8 @@
9771195 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0);
9781196 size = SIZEOF_TRACE_ENTRY(false);
9791197 }
980
- /* Set argument names as fields */
981
- for (i = 0; i < tu->tp.nr_args; i++) {
982
- struct probe_arg *parg = &tu->tp.args[i];
9831198
984
- ret = trace_define_field(event_call, parg->type->fmttype,
985
- parg->name, size + parg->offset,
986
- parg->type->size, parg->type->is_signed,
987
- FILTER_OTHER);
988
-
989
- if (ret)
990
- return ret;
991
- }
992
- return 0;
1199
+ return traceprobe_define_arg_fields(event_call, size, &tu->tp);
9931200 }
9941201
9951202 #ifdef CONFIG_PERF_EVENTS
....@@ -1010,39 +1217,39 @@
10101217 }
10111218
10121219 static inline bool
1013
-uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event)
1220
+trace_uprobe_filter_event(struct trace_uprobe_filter *filter,
1221
+ struct perf_event *event)
10141222 {
1015
- return __uprobe_perf_filter(&tu->filter, event->hw.target->mm);
1223
+ return __uprobe_perf_filter(filter, event->hw.target->mm);
10161224 }
10171225
1018
-static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
1226
+static bool trace_uprobe_filter_remove(struct trace_uprobe_filter *filter,
1227
+ struct perf_event *event)
10191228 {
10201229 bool done;
10211230
1022
- write_lock(&tu->filter.rwlock);
1231
+ write_lock(&filter->rwlock);
10231232 if (event->hw.target) {
10241233 list_del(&event->hw.tp_list);
1025
- done = tu->filter.nr_systemwide ||
1234
+ done = filter->nr_systemwide ||
10261235 (event->hw.target->flags & PF_EXITING) ||
1027
- uprobe_filter_event(tu, event);
1236
+ trace_uprobe_filter_event(filter, event);
10281237 } else {
1029
- tu->filter.nr_systemwide--;
1030
- done = tu->filter.nr_systemwide;
1238
+ filter->nr_systemwide--;
1239
+ done = filter->nr_systemwide;
10311240 }
1032
- write_unlock(&tu->filter.rwlock);
1241
+ write_unlock(&filter->rwlock);
10331242
1034
- if (!done)
1035
- return uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
1036
-
1037
- return 0;
1243
+ return done;
10381244 }
10391245
1040
-static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
1246
+/* This returns true if the filter always covers target mm */
1247
+static bool trace_uprobe_filter_add(struct trace_uprobe_filter *filter,
1248
+ struct perf_event *event)
10411249 {
10421250 bool done;
1043
- int err;
10441251
1045
- write_lock(&tu->filter.rwlock);
1252
+ write_lock(&filter->rwlock);
10461253 if (event->hw.target) {
10471254 /*
10481255 * event->parent != NULL means copy_process(), we can avoid
....@@ -1052,35 +1259,84 @@
10521259 * attr.enable_on_exec means that exec/mmap will install the
10531260 * breakpoints we need.
10541261 */
1055
- done = tu->filter.nr_systemwide ||
1262
+ done = filter->nr_systemwide ||
10561263 event->parent || event->attr.enable_on_exec ||
1057
- uprobe_filter_event(tu, event);
1058
- list_add(&event->hw.tp_list, &tu->filter.perf_events);
1264
+ trace_uprobe_filter_event(filter, event);
1265
+ list_add(&event->hw.tp_list, &filter->perf_events);
10591266 } else {
1060
- done = tu->filter.nr_systemwide;
1061
- tu->filter.nr_systemwide++;
1267
+ done = filter->nr_systemwide;
1268
+ filter->nr_systemwide++;
10621269 }
1063
- write_unlock(&tu->filter.rwlock);
1270
+ write_unlock(&filter->rwlock);
10641271
1065
- err = 0;
1066
- if (!done) {
1067
- err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
1068
- if (err)
1069
- uprobe_perf_close(tu, event);
1272
+ return done;
1273
+}
1274
+
1275
+static int uprobe_perf_close(struct trace_event_call *call,
1276
+ struct perf_event *event)
1277
+{
1278
+ struct trace_probe *pos, *tp;
1279
+ struct trace_uprobe *tu;
1280
+ int ret = 0;
1281
+
1282
+ tp = trace_probe_primary_from_call(call);
1283
+ if (WARN_ON_ONCE(!tp))
1284
+ return -ENODEV;
1285
+
1286
+ tu = container_of(tp, struct trace_uprobe, tp);
1287
+ if (trace_uprobe_filter_remove(tu->tp.event->filter, event))
1288
+ return 0;
1289
+
1290
+ list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1291
+ tu = container_of(pos, struct trace_uprobe, tp);
1292
+ ret = uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
1293
+ if (ret)
1294
+ break;
10701295 }
1296
+
1297
+ return ret;
1298
+}
1299
+
1300
+static int uprobe_perf_open(struct trace_event_call *call,
1301
+ struct perf_event *event)
1302
+{
1303
+ struct trace_probe *pos, *tp;
1304
+ struct trace_uprobe *tu;
1305
+ int err = 0;
1306
+
1307
+ tp = trace_probe_primary_from_call(call);
1308
+ if (WARN_ON_ONCE(!tp))
1309
+ return -ENODEV;
1310
+
1311
+ tu = container_of(tp, struct trace_uprobe, tp);
1312
+ if (trace_uprobe_filter_add(tu->tp.event->filter, event))
1313
+ return 0;
1314
+
1315
+ list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1316
+ tu = container_of(pos, struct trace_uprobe, tp);
1317
+ err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
1318
+ if (err) {
1319
+ uprobe_perf_close(call, event);
1320
+ break;
1321
+ }
1322
+ }
1323
+
10711324 return err;
10721325 }
10731326
10741327 static bool uprobe_perf_filter(struct uprobe_consumer *uc,
10751328 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
10761329 {
1330
+ struct trace_uprobe_filter *filter;
10771331 struct trace_uprobe *tu;
10781332 int ret;
10791333
10801334 tu = container_of(uc, struct trace_uprobe, consumer);
1081
- read_lock(&tu->filter.rwlock);
1082
- ret = __uprobe_perf_filter(&tu->filter, mm);
1083
- read_unlock(&tu->filter.rwlock);
1335
+ filter = tu->tp.event->filter;
1336
+
1337
+ read_lock(&filter->rwlock);
1338
+ ret = __uprobe_perf_filter(filter, mm);
1339
+ read_unlock(&filter->rwlock);
10841340
10851341 return ret;
10861342 }
....@@ -1089,15 +1345,22 @@
10891345 unsigned long func, struct pt_regs *regs,
10901346 struct uprobe_cpu_buffer *ucb, int dsize)
10911347 {
1092
- struct trace_event_call *call = &tu->tp.call;
1348
+ struct trace_event_call *call = trace_probe_event_call(&tu->tp);
10931349 struct uprobe_trace_entry_head *entry;
10941350 struct hlist_head *head;
10951351 void *data;
10961352 int size, esize;
10971353 int rctx;
10981354
1099
- if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
1100
- return;
1355
+ if (bpf_prog_array_valid(call)) {
1356
+ u32 ret;
1357
+
1358
+ preempt_disable();
1359
+ ret = trace_call_bpf(call, regs);
1360
+ preempt_enable();
1361
+ if (!ret)
1362
+ return;
1363
+ }
11011364
11021365 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
11031366
....@@ -1159,7 +1422,7 @@
11591422
11601423 int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type,
11611424 const char **filename, u64 *probe_offset,
1162
- bool perf_type_tracepoint)
1425
+ u64 *probe_addr, bool perf_type_tracepoint)
11631426 {
11641427 const char *pevent = trace_event_name(event->tp_event);
11651428 const char *group = event->tp_event->class->system;
....@@ -1168,7 +1431,7 @@
11681431 if (perf_type_tracepoint)
11691432 tu = find_probe_event(pevent, group);
11701433 else
1171
- tu = event->tp_event->data;
1434
+ tu = trace_uprobe_primary_from_call(event->tp_event);
11721435 if (!tu)
11731436 return -EINVAL;
11741437
....@@ -1176,6 +1439,7 @@
11761439 : BPF_FD_TYPE_UPROBE;
11771440 *filename = tu->filename;
11781441 *probe_offset = tu->offset;
1442
+ *probe_addr = 0;
11791443 return 0;
11801444 }
11811445 #endif /* CONFIG_PERF_EVENTS */
....@@ -1184,36 +1448,34 @@
11841448 trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
11851449 void *data)
11861450 {
1187
- struct trace_uprobe *tu = event->data;
11881451 struct trace_event_file *file = data;
11891452
11901453 switch (type) {
11911454 case TRACE_REG_REGISTER:
1192
- return probe_event_enable(tu, file, NULL);
1455
+ return probe_event_enable(event, file, NULL);
11931456
11941457 case TRACE_REG_UNREGISTER:
1195
- probe_event_disable(tu, file);
1458
+ probe_event_disable(event, file);
11961459 return 0;
11971460
11981461 #ifdef CONFIG_PERF_EVENTS
11991462 case TRACE_REG_PERF_REGISTER:
1200
- return probe_event_enable(tu, NULL, uprobe_perf_filter);
1463
+ return probe_event_enable(event, NULL, uprobe_perf_filter);
12011464
12021465 case TRACE_REG_PERF_UNREGISTER:
1203
- probe_event_disable(tu, NULL);
1466
+ probe_event_disable(event, NULL);
12041467 return 0;
12051468
12061469 case TRACE_REG_PERF_OPEN:
1207
- return uprobe_perf_open(tu, data);
1470
+ return uprobe_perf_open(event, data);
12081471
12091472 case TRACE_REG_PERF_CLOSE:
1210
- return uprobe_perf_close(tu, data);
1473
+ return uprobe_perf_close(event, data);
12111474
12121475 #endif
12131476 default:
12141477 return 0;
12151478 }
1216
- return 0;
12171479 }
12181480
12191481 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
....@@ -1240,13 +1502,13 @@
12401502 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
12411503
12421504 ucb = uprobe_buffer_get();
1243
- store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize);
1505
+ store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
12441506
1245
- if (tu->tp.flags & TP_FLAG_TRACE)
1507
+ if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
12461508 ret |= uprobe_trace_func(tu, regs, ucb, dsize);
12471509
12481510 #ifdef CONFIG_PERF_EVENTS
1249
- if (tu->tp.flags & TP_FLAG_PROFILE)
1511
+ if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
12501512 ret |= uprobe_perf_func(tu, regs, ucb, dsize);
12511513 #endif
12521514 uprobe_buffer_put(ucb);
....@@ -1275,13 +1537,13 @@
12751537 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
12761538
12771539 ucb = uprobe_buffer_get();
1278
- store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize);
1540
+ store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
12791541
1280
- if (tu->tp.flags & TP_FLAG_TRACE)
1542
+ if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
12811543 uretprobe_trace_func(tu, func, regs, ucb, dsize);
12821544
12831545 #ifdef CONFIG_PERF_EVENTS
1284
- if (tu->tp.flags & TP_FLAG_PROFILE)
1546
+ if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
12851547 uretprobe_perf_func(tu, func, regs, ucb, dsize);
12861548 #endif
12871549 uprobe_buffer_put(ucb);
....@@ -1292,62 +1554,38 @@
12921554 .trace = print_uprobe_event
12931555 };
12941556
1295
-static inline void init_trace_event_call(struct trace_uprobe *tu,
1296
- struct trace_event_call *call)
1297
-{
1298
- INIT_LIST_HEAD(&call->class->fields);
1299
- call->event.funcs = &uprobe_funcs;
1300
- call->class->define_fields = uprobe_event_define_fields;
1557
+static struct trace_event_fields uprobe_fields_array[] = {
1558
+ { .type = TRACE_FUNCTION_TYPE,
1559
+ .define_fields = uprobe_event_define_fields },
1560
+ {}
1561
+};
13011562
1302
- call->flags = TRACE_EVENT_FL_UPROBE;
1563
+static inline void init_trace_event_call(struct trace_uprobe *tu)
1564
+{
1565
+ struct trace_event_call *call = trace_probe_event_call(&tu->tp);
1566
+ call->event.funcs = &uprobe_funcs;
1567
+ call->class->fields_array = uprobe_fields_array;
1568
+
1569
+ call->flags = TRACE_EVENT_FL_UPROBE | TRACE_EVENT_FL_CAP_ANY;
13031570 call->class->reg = trace_uprobe_register;
1304
- call->data = tu;
13051571 }
13061572
13071573 static int register_uprobe_event(struct trace_uprobe *tu)
13081574 {
1309
- struct trace_event_call *call = &tu->tp.call;
1310
- int ret = 0;
1575
+ init_trace_event_call(tu);
13111576
1312
- init_trace_event_call(tu, call);
1313
-
1314
- if (set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0)
1315
- return -ENOMEM;
1316
-
1317
- ret = register_trace_event(&call->event);
1318
- if (!ret) {
1319
- kfree(call->print_fmt);
1320
- return -ENODEV;
1321
- }
1322
-
1323
- ret = trace_add_event_call(call);
1324
-
1325
- if (ret) {
1326
- pr_info("Failed to register uprobe event: %s\n",
1327
- trace_event_name(call));
1328
- kfree(call->print_fmt);
1329
- unregister_trace_event(&call->event);
1330
- }
1331
-
1332
- return ret;
1577
+ return trace_probe_register_event_call(&tu->tp);
13331578 }
13341579
13351580 static int unregister_uprobe_event(struct trace_uprobe *tu)
13361581 {
1337
- int ret;
1338
-
1339
- /* tu->event is unregistered in trace_remove_event_call() */
1340
- ret = trace_remove_event_call(&tu->tp.call);
1341
- if (ret)
1342
- return ret;
1343
- kfree(tu->tp.call.print_fmt);
1344
- tu->tp.call.print_fmt = NULL;
1345
- return 0;
1582
+ return trace_probe_unregister_event_call(&tu->tp);
13461583 }
13471584
13481585 #ifdef CONFIG_PERF_EVENTS
13491586 struct trace_event_call *
1350
-create_local_trace_uprobe(char *name, unsigned long offs, bool is_return)
1587
+create_local_trace_uprobe(char *name, unsigned long offs,
1588
+ unsigned long ref_ctr_offset, bool is_return)
13511589 {
13521590 struct trace_uprobe *tu;
13531591 struct path path;
....@@ -1363,7 +1601,7 @@
13631601 }
13641602
13651603 /*
1366
- * local trace_kprobes are not added to probe_list, so they are never
1604
+ * local trace_kprobes are not added to dyn_event, so they are never
13671605 * searched in find_trace_kprobe(). Therefore, there is no concern of
13681606 * duplicated name "DUMMY_EVENT" here.
13691607 */
....@@ -1379,15 +1617,16 @@
13791617
13801618 tu->offset = offs;
13811619 tu->path = path;
1620
+ tu->ref_ctr_offset = ref_ctr_offset;
13821621 tu->filename = kstrdup(name, GFP_KERNEL);
1383
- init_trace_event_call(tu, &tu->tp.call);
1622
+ init_trace_event_call(tu);
13841623
1385
- if (set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0) {
1624
+ if (traceprobe_set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0) {
13861625 ret = -ENOMEM;
13871626 goto error;
13881627 }
13891628
1390
- return &tu->tp.call;
1629
+ return trace_probe_event_call(&tu->tp);
13911630 error:
13921631 free_trace_uprobe(tu);
13931632 return ERR_PTR(ret);
....@@ -1397,10 +1636,7 @@
13971636 {
13981637 struct trace_uprobe *tu;
13991638
1400
- tu = container_of(event_call, struct trace_uprobe, tp.call);
1401
-
1402
- kfree(tu->tp.call.print_fmt);
1403
- tu->tp.call.print_fmt = NULL;
1639
+ tu = trace_uprobe_primary_from_call(event_call);
14041640
14051641 free_trace_uprobe(tu);
14061642 }
....@@ -1409,16 +1645,20 @@
14091645 /* Make a trace interface for controling probe points */
14101646 static __init int init_uprobe_trace(void)
14111647 {
1412
- struct dentry *d_tracer;
1648
+ int ret;
14131649
1414
- d_tracer = tracing_init_dentry();
1415
- if (IS_ERR(d_tracer))
1650
+ ret = dyn_event_register(&trace_uprobe_ops);
1651
+ if (ret)
1652
+ return ret;
1653
+
1654
+ ret = tracing_init_dentry();
1655
+ if (ret)
14161656 return 0;
14171657
1418
- trace_create_file("uprobe_events", 0644, d_tracer,
1658
+ trace_create_file("uprobe_events", 0644, NULL,
14191659 NULL, &uprobe_events_ops);
14201660 /* Profile interface */
1421
- trace_create_file("uprobe_profile", 0444, d_tracer,
1661
+ trace_create_file("uprobe_profile", 0444, NULL,
14221662 NULL, &uprobe_profile_ops);
14231663 return 0;
14241664 }