hc
2024-05-13 9d77db3c730780c8ef5ccd4b66403ff5675cfe4e
kernel/include/linux/trace_events.h
....@@ -11,7 +11,7 @@
1111 #include <linux/tracepoint.h>
1212
1313 struct trace_array;
14
-struct trace_buffer;
14
+struct array_buffer;
1515 struct tracer;
1616 struct dentry;
1717 struct bpf_prog;
....@@ -45,6 +45,11 @@
4545 const void *buf, int count,
4646 size_t el_size);
4747
48
+const char *
49
+trace_print_hex_dump_seq(struct trace_seq *p, const char *prefix_str,
50
+ int prefix_type, int rowsize, int groupsize,
51
+ const void *buf, size_t len, bool ascii);
52
+
4853 struct trace_iterator;
4954 struct trace_event;
5055
....@@ -62,8 +67,6 @@
6267 unsigned char flags;
6368 unsigned char preempt_count;
6469 int pid;
65
- unsigned char migrate_disable;
66
- unsigned char preempt_lazy_count;
6770 };
6871
6972 #define TRACE_EVENT_TYPE_MAX \
....@@ -76,12 +79,14 @@
7679 struct trace_iterator {
7780 struct trace_array *tr;
7881 struct tracer *trace;
79
- struct trace_buffer *trace_buffer;
82
+ struct array_buffer *array_buffer;
8083 void *private;
8184 int cpu_file;
8285 struct mutex mutex;
8386 struct ring_buffer_iter **buffer_iter;
8487 unsigned long iter_flags;
88
+ void *temp; /* temp holder */
89
+ unsigned int temp_size;
8590
8691 /* trace_seq for __print_flags() and __print_symbolic() etc. */
8792 struct trace_seq tmp_seq;
....@@ -144,12 +149,13 @@
144149 enum print_line_t trace_handle_return(struct trace_seq *s);
145150
146151 void tracing_generic_entry_update(struct trace_entry *entry,
152
+ unsigned short type,
147153 unsigned long flags,
148154 int pc);
149155 struct trace_event_file;
150156
151157 struct ring_buffer_event *
152
-trace_event_buffer_lock_reserve(struct ring_buffer **current_buffer,
158
+trace_event_buffer_lock_reserve(struct trace_buffer **current_buffer,
153159 struct trace_event_file *trace_file,
154160 int type, unsigned long len,
155161 unsigned long flags, int pc);
....@@ -188,6 +194,22 @@
188194
189195 struct trace_event_call;
190196
197
+#define TRACE_FUNCTION_TYPE ((const char *)~0UL)
198
+
199
+struct trace_event_fields {
200
+ const char *type;
201
+ union {
202
+ struct {
203
+ const char *name;
204
+ const int size;
205
+ const int align;
206
+ const int is_signed;
207
+ const int filter_type;
208
+ };
209
+ int (*define_fields)(struct trace_event_call *);
210
+ };
211
+};
212
+
191213 struct trace_event_class {
192214 const char *system;
193215 void *probe;
....@@ -196,7 +218,7 @@
196218 #endif
197219 int (*reg)(struct trace_event_call *event,
198220 enum trace_reg type, void *data);
199
- int (*define_fields)(struct trace_event_call *);
221
+ struct trace_event_fields *fields_array;
200222 struct list_head *(*get_fields)(struct trace_event_call *);
201223 struct list_head fields;
202224 int (*raw_init)(struct trace_event_call *);
....@@ -206,12 +228,13 @@
206228 enum trace_reg type, void *data);
207229
208230 struct trace_event_buffer {
209
- struct ring_buffer *buffer;
231
+ struct trace_buffer *buffer;
210232 struct ring_buffer_event *event;
211233 struct trace_event_file *trace_file;
212234 void *entry;
213235 unsigned long flags;
214236 int pc;
237
+ struct pt_regs *regs;
215238 };
216239
217240 void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
....@@ -319,6 +342,14 @@
319342 return call->name;
320343 }
321344
345
+static inline struct list_head *
346
+trace_get_fields(struct trace_event_call *event_call)
347
+{
348
+ if (!event_call->class->get_fields)
349
+ return &event_call->class->fields;
350
+ return event_call->class->get_fields(event_call);
351
+}
352
+
322353 struct trace_array;
323354 struct trace_subsystem_dir;
324355
....@@ -335,6 +366,128 @@
335366 EVENT_FILE_FL_PID_FILTER_BIT,
336367 EVENT_FILE_FL_WAS_ENABLED_BIT,
337368 };
369
+
370
+extern struct trace_event_file *trace_get_event_file(const char *instance,
371
+ const char *system,
372
+ const char *event);
373
+extern void trace_put_event_file(struct trace_event_file *file);
374
+
375
+#define MAX_DYNEVENT_CMD_LEN (2048)
376
+
377
+enum dynevent_type {
378
+ DYNEVENT_TYPE_SYNTH = 1,
379
+ DYNEVENT_TYPE_KPROBE,
380
+ DYNEVENT_TYPE_NONE,
381
+};
382
+
383
+struct dynevent_cmd;
384
+
385
+typedef int (*dynevent_create_fn_t)(struct dynevent_cmd *cmd);
386
+
387
+struct dynevent_cmd {
388
+ struct seq_buf seq;
389
+ const char *event_name;
390
+ unsigned int n_fields;
391
+ enum dynevent_type type;
392
+ dynevent_create_fn_t run_command;
393
+ void *private_data;
394
+};
395
+
396
+extern int dynevent_create(struct dynevent_cmd *cmd);
397
+
398
+extern int synth_event_delete(const char *name);
399
+
400
+extern void synth_event_cmd_init(struct dynevent_cmd *cmd,
401
+ char *buf, int maxlen);
402
+
403
+extern int __synth_event_gen_cmd_start(struct dynevent_cmd *cmd,
404
+ const char *name,
405
+ struct module *mod, ...);
406
+
407
+#define synth_event_gen_cmd_start(cmd, name, mod, ...) \
408
+ __synth_event_gen_cmd_start(cmd, name, mod, ## __VA_ARGS__, NULL)
409
+
410
+struct synth_field_desc {
411
+ const char *type;
412
+ const char *name;
413
+};
414
+
415
+extern int synth_event_gen_cmd_array_start(struct dynevent_cmd *cmd,
416
+ const char *name,
417
+ struct module *mod,
418
+ struct synth_field_desc *fields,
419
+ unsigned int n_fields);
420
+extern int synth_event_create(const char *name,
421
+ struct synth_field_desc *fields,
422
+ unsigned int n_fields, struct module *mod);
423
+
424
+extern int synth_event_add_field(struct dynevent_cmd *cmd,
425
+ const char *type,
426
+ const char *name);
427
+extern int synth_event_add_field_str(struct dynevent_cmd *cmd,
428
+ const char *type_name);
429
+extern int synth_event_add_fields(struct dynevent_cmd *cmd,
430
+ struct synth_field_desc *fields,
431
+ unsigned int n_fields);
432
+
433
+#define synth_event_gen_cmd_end(cmd) \
434
+ dynevent_create(cmd)
435
+
436
+struct synth_event;
437
+
438
+struct synth_event_trace_state {
439
+ struct trace_event_buffer fbuffer;
440
+ struct synth_trace_event *entry;
441
+ struct trace_buffer *buffer;
442
+ struct synth_event *event;
443
+ unsigned int cur_field;
444
+ unsigned int n_u64;
445
+ bool disabled;
446
+ bool add_next;
447
+ bool add_name;
448
+};
449
+
450
+extern int synth_event_trace(struct trace_event_file *file,
451
+ unsigned int n_vals, ...);
452
+extern int synth_event_trace_array(struct trace_event_file *file, u64 *vals,
453
+ unsigned int n_vals);
454
+extern int synth_event_trace_start(struct trace_event_file *file,
455
+ struct synth_event_trace_state *trace_state);
456
+extern int synth_event_add_next_val(u64 val,
457
+ struct synth_event_trace_state *trace_state);
458
+extern int synth_event_add_val(const char *field_name, u64 val,
459
+ struct synth_event_trace_state *trace_state);
460
+extern int synth_event_trace_end(struct synth_event_trace_state *trace_state);
461
+
462
+extern int kprobe_event_delete(const char *name);
463
+
464
+extern void kprobe_event_cmd_init(struct dynevent_cmd *cmd,
465
+ char *buf, int maxlen);
466
+
467
+#define kprobe_event_gen_cmd_start(cmd, name, loc, ...) \
468
+ __kprobe_event_gen_cmd_start(cmd, false, name, loc, ## __VA_ARGS__, NULL)
469
+
470
+#define kretprobe_event_gen_cmd_start(cmd, name, loc, ...) \
471
+ __kprobe_event_gen_cmd_start(cmd, true, name, loc, ## __VA_ARGS__, NULL)
472
+
473
+extern int __kprobe_event_gen_cmd_start(struct dynevent_cmd *cmd,
474
+ bool kretprobe,
475
+ const char *name,
476
+ const char *loc, ...);
477
+
478
+#define kprobe_event_add_fields(cmd, ...) \
479
+ __kprobe_event_add_fields(cmd, ## __VA_ARGS__, NULL)
480
+
481
+#define kprobe_event_add_field(cmd, field) \
482
+ __kprobe_event_add_fields(cmd, field, NULL)
483
+
484
+extern int __kprobe_event_add_fields(struct dynevent_cmd *cmd, ...);
485
+
486
+#define kprobe_event_gen_cmd_end(cmd) \
487
+ dynevent_create(cmd)
488
+
489
+#define kretprobe_event_gen_cmd_end(cmd) \
490
+ dynevent_create(cmd)
338491
339492 /*
340493 * Event file flags:
....@@ -418,7 +571,7 @@
418571
419572 #define PERF_MAX_TRACE_SIZE 2048
420573
421
-#define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */
574
+#define MAX_FILTER_STR_VAL 256U /* Should handle KSYM_SYMBOL_LEN */
422575
423576 enum event_trigger_type {
424577 ETT_NONE = (0),
....@@ -473,7 +626,8 @@
473626 int perf_event_query_prog_array(struct perf_event *event, void __user *info);
474627 int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog);
475628 int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog);
476
-struct bpf_raw_event_map *bpf_find_raw_tracepoint(const char *name);
629
+struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name);
630
+void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp);
477631 int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
478632 u32 *fd_type, const char **buf,
479633 u64 *probe_offset, u64 *probe_addr);
....@@ -504,9 +658,12 @@
504658 {
505659 return -EOPNOTSUPP;
506660 }
507
-static inline struct bpf_raw_event_map *bpf_find_raw_tracepoint(const char *name)
661
+static inline struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
508662 {
509663 return NULL;
664
+}
665
+static inline void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
666
+{
510667 }
511668 static inline int bpf_get_perf_event_info(const struct perf_event *event,
512669 u32 *prog_id, u32 *fd_type,
....@@ -531,16 +688,16 @@
531688 extern int trace_define_field(struct trace_event_call *call, const char *type,
532689 const char *name, int offset, int size,
533690 int is_signed, int filter_type);
534
-extern int trace_add_event_call_nolock(struct trace_event_call *call);
535
-extern int trace_remove_event_call_nolock(struct trace_event_call *call);
536691 extern int trace_add_event_call(struct trace_event_call *call);
537692 extern int trace_remove_event_call(struct trace_event_call *call);
538693 extern int trace_event_get_offsets(struct trace_event_call *call);
539694
540695 #define is_signed_type(type) (((type)(-1)) < (type)1)
541696
697
+int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set);
542698 int trace_set_clr_event(const char *system, const char *event, int set);
543
-
699
+int trace_array_set_clr_event(struct trace_array *tr, const char *system,
700
+ const char *event, bool enable);
544701 /*
545702 * The double __builtin_constant_p is because gcc will give us an error
546703 * if we try to allocate the static variable to fmt if it is not a
....@@ -552,7 +709,7 @@
552709 tracing_record_cmdline(current); \
553710 if (__builtin_constant_p(fmt)) { \
554711 static const char *trace_printk_fmt \
555
- __attribute__((section("__trace_printk_fmt"))) = \
712
+ __section("__trace_printk_fmt") = \
556713 __builtin_constant_p(fmt) ? fmt : NULL; \
557714 \
558715 __trace_bprintk(ip, trace_printk_fmt, ##args); \
....@@ -579,11 +736,13 @@
579736 bool perf_type_tracepoint);
580737 #endif
581738 #ifdef CONFIG_UPROBE_EVENTS
582
-extern int perf_uprobe_init(struct perf_event *event, bool is_retprobe);
739
+extern int perf_uprobe_init(struct perf_event *event,
740
+ unsigned long ref_ctr_offset, bool is_retprobe);
583741 extern void perf_uprobe_destroy(struct perf_event *event);
584742 extern int bpf_get_uprobe_info(const struct perf_event *event,
585743 u32 *fd_type, const char **filename,
586
- u64 *probe_offset, bool perf_type_tracepoint);
744
+ u64 *probe_offset, u64 *probe_addr,
745
+ bool perf_type_tracepoint);
587746 #endif
588747 extern int ftrace_profile_set_filter(struct perf_event *event, int event_id,
589748 char *filter_str);