hc
2024-10-22 8ac6c7a54ed1b98d142dce24b11c6de6a1e239a5
kernel/samples/bpf/trace_event_user.c
....@@ -1,30 +1,27 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /* Copyright (c) 2016 Facebook
2
- *
3
- * This program is free software; you can redistribute it and/or
4
- * modify it under the terms of version 2 of the GNU General Public
5
- * License as published by the Free Software Foundation.
63 */
74 #include <stdio.h>
85 #include <unistd.h>
96 #include <stdlib.h>
107 #include <stdbool.h>
118 #include <string.h>
12
-#include <fcntl.h>
13
-#include <poll.h>
14
-#include <sys/ioctl.h>
159 #include <linux/perf_event.h>
1610 #include <linux/bpf.h>
1711 #include <signal.h>
18
-#include <assert.h>
1912 #include <errno.h>
2013 #include <sys/resource.h>
21
-#include "libbpf.h"
22
-#include "bpf_load.h"
14
+#include <bpf/bpf.h>
15
+#include <bpf/libbpf.h>
2316 #include "perf-sys.h"
2417 #include "trace_helpers.h"
2518
2619 #define SAMPLE_FREQ 50
2720
21
+static int pid;
22
+/* counts, stackmap */
23
+static int map_fd[2];
24
+struct bpf_program *prog;
2825 static bool sys_read_seen, sys_write_seen;
2926
3027 static void print_ksym(__u64 addr)
....@@ -34,6 +31,11 @@
3431 if (!addr)
3532 return;
3633 sym = ksym_search(addr);
34
+ if (!sym) {
35
+ printf("ksym not found. Is kallsyms loaded?\n");
36
+ return;
37
+ }
38
+
3739 printf("%s;", sym->name);
3840 if (!strstr(sym->name, "sys_read"))
3941 sys_read_seen = true;
....@@ -89,10 +91,10 @@
8991 }
9092 }
9193
92
-static void int_exit(int sig)
94
+static void err_exit(int err)
9395 {
94
- kill(0, SIGKILL);
95
- exit(0);
96
+ kill(pid, SIGKILL);
97
+ exit(err);
9698 }
9799
98100 static void print_stacks(void)
....@@ -100,7 +102,7 @@
100102 struct key_t key = {}, next_key;
101103 __u64 value;
102104 __u32 stackid = 0, next_id;
103
- int fd = map_fd[0], stack_map = map_fd[1];
105
+ int error = 1, fd = map_fd[0], stack_map = map_fd[1];
104106
105107 sys_read_seen = sys_write_seen = false;
106108 while (bpf_map_get_next_key(fd, &key, &next_key) == 0) {
....@@ -112,7 +114,7 @@
112114 printf("\n");
113115 if (!sys_read_seen || !sys_write_seen) {
114116 printf("BUG kernel stack doesn't contain sys_read() and sys_write()\n");
115
- int_exit(0);
117
+ err_exit(error);
116118 }
117119
118120 /* clear stack map */
....@@ -134,43 +136,52 @@
134136
135137 static void test_perf_event_all_cpu(struct perf_event_attr *attr)
136138 {
137
- int nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
138
- int *pmu_fd = malloc(nr_cpus * sizeof(int));
139
- int i, error = 0;
139
+ int nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
140
+ struct bpf_link **links = calloc(nr_cpus, sizeof(struct bpf_link *));
141
+ int i, pmu_fd, error = 1;
142
+
143
+ if (!links) {
144
+ printf("malloc of links failed\n");
145
+ goto err;
146
+ }
140147
141148 /* system wide perf event, no need to inherit */
142149 attr->inherit = 0;
143150
144151 /* open perf_event on all cpus */
145152 for (i = 0; i < nr_cpus; i++) {
146
- pmu_fd[i] = sys_perf_event_open(attr, -1, i, -1, 0);
147
- if (pmu_fd[i] < 0) {
153
+ pmu_fd = sys_perf_event_open(attr, -1, i, -1, 0);
154
+ if (pmu_fd < 0) {
148155 printf("sys_perf_event_open failed\n");
149
- error = 1;
150156 goto all_cpu_err;
151157 }
152
- assert(ioctl(pmu_fd[i], PERF_EVENT_IOC_SET_BPF, prog_fd[0]) == 0);
153
- assert(ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE) == 0);
158
+ links[i] = bpf_program__attach_perf_event(prog, pmu_fd);
159
+ if (libbpf_get_error(links[i])) {
160
+ printf("bpf_program__attach_perf_event failed\n");
161
+ links[i] = NULL;
162
+ close(pmu_fd);
163
+ goto all_cpu_err;
164
+ }
154165 }
155166
156
- if (generate_load() < 0) {
157
- error = 1;
167
+ if (generate_load() < 0)
158168 goto all_cpu_err;
159
- }
169
+
160170 print_stacks();
171
+ error = 0;
161172 all_cpu_err:
162
- for (i--; i >= 0; i--) {
163
- ioctl(pmu_fd[i], PERF_EVENT_IOC_DISABLE);
164
- close(pmu_fd[i]);
165
- }
166
- free(pmu_fd);
173
+ for (i--; i >= 0; i--)
174
+ bpf_link__destroy(links[i]);
175
+err:
176
+ free(links);
167177 if (error)
168
- int_exit(0);
178
+ err_exit(error);
169179 }
170180
171181 static void test_perf_event_task(struct perf_event_attr *attr)
172182 {
173
- int pmu_fd, error = 0;
183
+ struct bpf_link *link = NULL;
184
+ int pmu_fd, error = 1;
174185
175186 /* per task perf event, enable inherit so the "dd ..." command can be traced properly.
176187 * Enabling inherit will cause bpf_perf_prog_read_time helper failure.
....@@ -181,21 +192,25 @@
181192 pmu_fd = sys_perf_event_open(attr, 0, -1, -1, 0);
182193 if (pmu_fd < 0) {
183194 printf("sys_perf_event_open failed\n");
184
- int_exit(0);
185
- }
186
- assert(ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd[0]) == 0);
187
- assert(ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE) == 0);
188
-
189
- if (generate_load() < 0) {
190
- error = 1;
191195 goto err;
192196 }
197
+ link = bpf_program__attach_perf_event(prog, pmu_fd);
198
+ if (libbpf_get_error(link)) {
199
+ printf("bpf_program__attach_perf_event failed\n");
200
+ link = NULL;
201
+ close(pmu_fd);
202
+ goto err;
203
+ }
204
+
205
+ if (generate_load() < 0)
206
+ goto err;
207
+
193208 print_stacks();
209
+ error = 0;
194210 err:
195
- ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
196
- close(pmu_fd);
211
+ bpf_link__destroy(link);
197212 if (error)
198
- int_exit(0);
213
+ err_exit(error);
199214 }
200215
201216 static void test_bpf_perf_event(void)
....@@ -280,29 +295,60 @@
280295 int main(int argc, char **argv)
281296 {
282297 struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
298
+ struct bpf_object *obj = NULL;
283299 char filename[256];
300
+ int error = 1;
284301
285302 snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
286303 setrlimit(RLIMIT_MEMLOCK, &r);
287304
288
- signal(SIGINT, int_exit);
289
- signal(SIGTERM, int_exit);
305
+ signal(SIGINT, err_exit);
306
+ signal(SIGTERM, err_exit);
290307
291308 if (load_kallsyms()) {
292309 printf("failed to process /proc/kallsyms\n");
293
- return 1;
310
+ goto cleanup;
294311 }
295312
296
- if (load_bpf_file(filename)) {
297
- printf("%s", bpf_log_buf);
298
- return 2;
313
+ obj = bpf_object__open_file(filename, NULL);
314
+ if (libbpf_get_error(obj)) {
315
+ printf("opening BPF object file failed\n");
316
+ obj = NULL;
317
+ goto cleanup;
299318 }
300319
301
- if (fork() == 0) {
320
+ prog = bpf_object__find_program_by_name(obj, "bpf_prog1");
321
+ if (!prog) {
322
+ printf("finding a prog in obj file failed\n");
323
+ goto cleanup;
324
+ }
325
+
326
+ /* load BPF program */
327
+ if (bpf_object__load(obj)) {
328
+ printf("loading BPF object file failed\n");
329
+ goto cleanup;
330
+ }
331
+
332
+ map_fd[0] = bpf_object__find_map_fd_by_name(obj, "counts");
333
+ map_fd[1] = bpf_object__find_map_fd_by_name(obj, "stackmap");
334
+ if (map_fd[0] < 0 || map_fd[1] < 0) {
335
+ printf("finding a counts/stackmap map in obj file failed\n");
336
+ goto cleanup;
337
+ }
338
+
339
+ pid = fork();
340
+ if (pid == 0) {
302341 read_trace_pipe();
303342 return 0;
343
+ } else if (pid == -1) {
344
+ printf("couldn't spawn process\n");
345
+ goto cleanup;
304346 }
347
+
305348 test_bpf_perf_event();
306
- int_exit(0);
307
- return 0;
349
+ error = 0;
350
+
351
+cleanup:
352
+ bpf_object__close(obj);
353
+ err_exit(error);
308354 }