hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/tools/testing/selftests/bpf/test_progs.c
....@@ -1,396 +1,257 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /* Copyright (c) 2017 Facebook
2
- *
3
- * This program is free software; you can redistribute it and/or
4
- * modify it under the terms of version 2 of the GNU General Public
5
- * License as published by the Free Software Foundation.
63 */
7
-#include <stdio.h>
8
-#include <unistd.h>
9
-#include <errno.h>
10
-#include <string.h>
11
-#include <assert.h>
12
-#include <stdlib.h>
13
-#include <time.h>
14
-
15
-#include <linux/types.h>
16
-typedef __u16 __sum16;
17
-#include <arpa/inet.h>
18
-#include <linux/if_ether.h>
19
-#include <linux/if_packet.h>
20
-#include <linux/ip.h>
21
-#include <linux/ipv6.h>
22
-#include <linux/tcp.h>
23
-#include <linux/filter.h>
24
-#include <linux/perf_event.h>
25
-#include <linux/unistd.h>
26
-
27
-#include <sys/ioctl.h>
28
-#include <sys/wait.h>
29
-#include <sys/types.h>
30
-#include <fcntl.h>
31
-
32
-#include <linux/bpf.h>
33
-#include <linux/err.h>
34
-#include <bpf/bpf.h>
35
-#include <bpf/libbpf.h>
36
-
37
-#include "test_iptunnel_common.h"
38
-#include "bpf_util.h"
39
-#include "bpf_endian.h"
4
+#define _GNU_SOURCE
5
+#include "test_progs.h"
6
+#include "cgroup_helpers.h"
407 #include "bpf_rlimit.h"
41
-#include "trace_helpers.h"
8
+#include <argp.h>
9
+#include <pthread.h>
10
+#include <sched.h>
11
+#include <signal.h>
12
+#include <string.h>
13
+#include <execinfo.h> /* backtrace */
4214
43
-static int error_cnt, pass_cnt;
44
-static bool jit_enabled;
15
+#define EXIT_NO_TEST 2
16
+#define EXIT_ERR_SETUP_INFRA 3
4517
46
-#define MAGIC_BYTES 123
18
+/* defined in test_progs.h */
19
+struct test_env env = {};
4720
48
-/* ipv4 test vector */
49
-static struct {
50
- struct ethhdr eth;
51
- struct iphdr iph;
52
- struct tcphdr tcp;
53
-} __packed pkt_v4 = {
54
- .eth.h_proto = __bpf_constant_htons(ETH_P_IP),
55
- .iph.ihl = 5,
56
- .iph.protocol = 6,
57
- .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
58
- .tcp.urg_ptr = 123,
21
+struct prog_test_def {
22
+ const char *test_name;
23
+ int test_num;
24
+ void (*run_test)(void);
25
+ bool force_log;
26
+ int error_cnt;
27
+ int skip_cnt;
28
+ bool tested;
29
+ bool need_cgroup_cleanup;
30
+
31
+ char *subtest_name;
32
+ int subtest_num;
33
+
34
+ /* store counts before subtest started */
35
+ int old_error_cnt;
5936 };
6037
61
-/* ipv6 test vector */
62
-static struct {
63
- struct ethhdr eth;
64
- struct ipv6hdr iph;
65
- struct tcphdr tcp;
66
-} __packed pkt_v6 = {
67
- .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
68
- .iph.nexthdr = 6,
69
- .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
70
- .tcp.urg_ptr = 123,
71
-};
38
+/* Override C runtime library's usleep() implementation to ensure nanosleep()
39
+ * is always called. Usleep is frequently used in selftests as a way to
40
+ * trigger kprobe and tracepoints.
41
+ */
42
+int usleep(useconds_t usec)
43
+{
44
+ struct timespec ts = {
45
+ .tv_sec = usec / 1000000,
46
+ .tv_nsec = (usec % 1000000) * 1000,
47
+ };
7248
73
-#define CHECK(condition, tag, format...) ({ \
74
- int __ret = !!(condition); \
75
- if (__ret) { \
76
- error_cnt++; \
77
- printf("%s:FAIL:%s ", __func__, tag); \
78
- printf(format); \
79
- } else { \
80
- pass_cnt++; \
81
- printf("%s:PASS:%s %d nsec\n", __func__, tag, duration);\
82
- } \
83
- __ret; \
84
-})
49
+ return syscall(__NR_nanosleep, &ts, NULL);
50
+}
8551
86
-static int bpf_find_map(const char *test, struct bpf_object *obj,
87
- const char *name)
52
+static bool should_run(struct test_selector *sel, int num, const char *name)
53
+{
54
+ int i;
55
+
56
+ for (i = 0; i < sel->blacklist.cnt; i++) {
57
+ if (strstr(name, sel->blacklist.strs[i]))
58
+ return false;
59
+ }
60
+
61
+ for (i = 0; i < sel->whitelist.cnt; i++) {
62
+ if (strstr(name, sel->whitelist.strs[i]))
63
+ return true;
64
+ }
65
+
66
+ if (!sel->whitelist.cnt && !sel->num_set)
67
+ return true;
68
+
69
+ return num < sel->num_set_len && sel->num_set[num];
70
+}
71
+
72
+static void dump_test_log(const struct prog_test_def *test, bool failed)
73
+{
74
+ if (stdout == env.stdout)
75
+ return;
76
+
77
+ fflush(stdout); /* exports env.log_buf & env.log_cnt */
78
+
79
+ if (env.verbosity > VERBOSE_NONE || test->force_log || failed) {
80
+ if (env.log_cnt) {
81
+ env.log_buf[env.log_cnt] = '\0';
82
+ fprintf(env.stdout, "%s", env.log_buf);
83
+ if (env.log_buf[env.log_cnt - 1] != '\n')
84
+ fprintf(env.stdout, "\n");
85
+ }
86
+ }
87
+
88
+ fseeko(stdout, 0, SEEK_SET); /* rewind */
89
+}
90
+
91
+static void skip_account(void)
92
+{
93
+ if (env.test->skip_cnt) {
94
+ env.skip_cnt++;
95
+ env.test->skip_cnt = 0;
96
+ }
97
+}
98
+
99
+static void stdio_restore(void);
100
+
101
+/* A bunch of tests set custom affinity per-thread and/or per-process. Reset
102
+ * it after each test/sub-test.
103
+ */
104
+static void reset_affinity() {
105
+
106
+ cpu_set_t cpuset;
107
+ int i, err;
108
+
109
+ CPU_ZERO(&cpuset);
110
+ for (i = 0; i < env.nr_cpus; i++)
111
+ CPU_SET(i, &cpuset);
112
+
113
+ err = sched_setaffinity(0, sizeof(cpuset), &cpuset);
114
+ if (err < 0) {
115
+ stdio_restore();
116
+ fprintf(stderr, "Failed to reset process affinity: %d!\n", err);
117
+ exit(EXIT_ERR_SETUP_INFRA);
118
+ }
119
+ err = pthread_setaffinity_np(pthread_self(), sizeof(cpuset), &cpuset);
120
+ if (err < 0) {
121
+ stdio_restore();
122
+ fprintf(stderr, "Failed to reset thread affinity: %d!\n", err);
123
+ exit(EXIT_ERR_SETUP_INFRA);
124
+ }
125
+}
126
+
127
+static void save_netns(void)
128
+{
129
+ env.saved_netns_fd = open("/proc/self/ns/net", O_RDONLY);
130
+ if (env.saved_netns_fd == -1) {
131
+ perror("open(/proc/self/ns/net)");
132
+ exit(EXIT_ERR_SETUP_INFRA);
133
+ }
134
+}
135
+
136
+static void restore_netns(void)
137
+{
138
+ if (setns(env.saved_netns_fd, CLONE_NEWNET) == -1) {
139
+ stdio_restore();
140
+ perror("setns(CLONE_NEWNS)");
141
+ exit(EXIT_ERR_SETUP_INFRA);
142
+ }
143
+}
144
+
145
+void test__end_subtest()
146
+{
147
+ struct prog_test_def *test = env.test;
148
+ int sub_error_cnt = test->error_cnt - test->old_error_cnt;
149
+
150
+ if (sub_error_cnt)
151
+ env.fail_cnt++;
152
+ else
153
+ env.sub_succ_cnt++;
154
+ skip_account();
155
+
156
+ dump_test_log(test, sub_error_cnt);
157
+
158
+ fprintf(env.stdout, "#%d/%d %s:%s\n",
159
+ test->test_num, test->subtest_num,
160
+ test->subtest_name, sub_error_cnt ? "FAIL" : "OK");
161
+
162
+ free(test->subtest_name);
163
+ test->subtest_name = NULL;
164
+}
165
+
166
+bool test__start_subtest(const char *name)
167
+{
168
+ struct prog_test_def *test = env.test;
169
+
170
+ if (test->subtest_name)
171
+ test__end_subtest();
172
+
173
+ test->subtest_num++;
174
+
175
+ if (!name || !name[0]) {
176
+ fprintf(env.stderr,
177
+ "Subtest #%d didn't provide sub-test name!\n",
178
+ test->subtest_num);
179
+ return false;
180
+ }
181
+
182
+ if (!should_run(&env.subtest_selector, test->subtest_num, name))
183
+ return false;
184
+
185
+ test->subtest_name = strdup(name);
186
+ if (!test->subtest_name) {
187
+ fprintf(env.stderr,
188
+ "Subtest #%d: failed to copy subtest name!\n",
189
+ test->subtest_num);
190
+ return false;
191
+ }
192
+ env.test->old_error_cnt = env.test->error_cnt;
193
+
194
+ return true;
195
+}
196
+
197
+void test__force_log() {
198
+ env.test->force_log = true;
199
+}
200
+
201
+void test__skip(void)
202
+{
203
+ env.test->skip_cnt++;
204
+}
205
+
206
+void test__fail(void)
207
+{
208
+ env.test->error_cnt++;
209
+}
210
+
211
+int test__join_cgroup(const char *path)
212
+{
213
+ int fd;
214
+
215
+ if (!env.test->need_cgroup_cleanup) {
216
+ if (setup_cgroup_environment()) {
217
+ fprintf(stderr,
218
+ "#%d %s: Failed to setup cgroup environment\n",
219
+ env.test->test_num, env.test->test_name);
220
+ return -1;
221
+ }
222
+
223
+ env.test->need_cgroup_cleanup = true;
224
+ }
225
+
226
+ fd = create_and_get_cgroup(path);
227
+ if (fd < 0) {
228
+ fprintf(stderr,
229
+ "#%d %s: Failed to create cgroup '%s' (errno=%d)\n",
230
+ env.test->test_num, env.test->test_name, path, errno);
231
+ return fd;
232
+ }
233
+
234
+ if (join_cgroup(path)) {
235
+ fprintf(stderr,
236
+ "#%d %s: Failed to join cgroup '%s' (errno=%d)\n",
237
+ env.test->test_num, env.test->test_name, path, errno);
238
+ return -1;
239
+ }
240
+
241
+ return fd;
242
+}
243
+
244
+int bpf_find_map(const char *test, struct bpf_object *obj, const char *name)
88245 {
89246 struct bpf_map *map;
90247
91248 map = bpf_object__find_map_by_name(obj, name);
92249 if (!map) {
93
- printf("%s:FAIL:map '%s' not found\n", test, name);
94
- error_cnt++;
250
+ fprintf(stdout, "%s:FAIL:map '%s' not found\n", test, name);
251
+ test__fail();
95252 return -1;
96253 }
97254 return bpf_map__fd(map);
98
-}
99
-
100
-static void test_pkt_access(void)
101
-{
102
- const char *file = "./test_pkt_access.o";
103
- struct bpf_object *obj;
104
- __u32 duration, retval;
105
- int err, prog_fd;
106
-
107
- err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
108
- if (err) {
109
- error_cnt++;
110
- return;
111
- }
112
-
113
- err = bpf_prog_test_run(prog_fd, 100000, &pkt_v4, sizeof(pkt_v4),
114
- NULL, NULL, &retval, &duration);
115
- CHECK(err || errno || retval, "ipv4",
116
- "err %d errno %d retval %d duration %d\n",
117
- err, errno, retval, duration);
118
-
119
- err = bpf_prog_test_run(prog_fd, 100000, &pkt_v6, sizeof(pkt_v6),
120
- NULL, NULL, &retval, &duration);
121
- CHECK(err || errno || retval, "ipv6",
122
- "err %d errno %d retval %d duration %d\n",
123
- err, errno, retval, duration);
124
- bpf_object__close(obj);
125
-}
126
-
127
-static void test_xdp(void)
128
-{
129
- struct vip key4 = {.protocol = 6, .family = AF_INET};
130
- struct vip key6 = {.protocol = 6, .family = AF_INET6};
131
- struct iptnl_info value4 = {.family = AF_INET};
132
- struct iptnl_info value6 = {.family = AF_INET6};
133
- const char *file = "./test_xdp.o";
134
- struct bpf_object *obj;
135
- char buf[128];
136
- struct ipv6hdr *iph6 = (void *)buf + sizeof(struct ethhdr);
137
- struct iphdr *iph = (void *)buf + sizeof(struct ethhdr);
138
- __u32 duration, retval, size;
139
- int err, prog_fd, map_fd;
140
-
141
- err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
142
- if (err) {
143
- error_cnt++;
144
- return;
145
- }
146
-
147
- map_fd = bpf_find_map(__func__, obj, "vip2tnl");
148
- if (map_fd < 0)
149
- goto out;
150
- bpf_map_update_elem(map_fd, &key4, &value4, 0);
151
- bpf_map_update_elem(map_fd, &key6, &value6, 0);
152
-
153
- err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
154
- buf, &size, &retval, &duration);
155
-
156
- CHECK(err || errno || retval != XDP_TX || size != 74 ||
157
- iph->protocol != IPPROTO_IPIP, "ipv4",
158
- "err %d errno %d retval %d size %d\n",
159
- err, errno, retval, size);
160
-
161
- err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6),
162
- buf, &size, &retval, &duration);
163
- CHECK(err || errno || retval != XDP_TX || size != 114 ||
164
- iph6->nexthdr != IPPROTO_IPV6, "ipv6",
165
- "err %d errno %d retval %d size %d\n",
166
- err, errno, retval, size);
167
-out:
168
- bpf_object__close(obj);
169
-}
170
-
171
-static void test_xdp_adjust_tail(void)
172
-{
173
- const char *file = "./test_adjust_tail.o";
174
- struct bpf_object *obj;
175
- char buf[128];
176
- __u32 duration, retval, size;
177
- int err, prog_fd;
178
-
179
- err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
180
- if (err) {
181
- error_cnt++;
182
- return;
183
- }
184
-
185
- err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
186
- buf, &size, &retval, &duration);
187
-
188
- CHECK(err || errno || retval != XDP_DROP,
189
- "ipv4", "err %d errno %d retval %d size %d\n",
190
- err, errno, retval, size);
191
-
192
- err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6),
193
- buf, &size, &retval, &duration);
194
- CHECK(err || errno || retval != XDP_TX || size != 54,
195
- "ipv6", "err %d errno %d retval %d size %d\n",
196
- err, errno, retval, size);
197
- bpf_object__close(obj);
198
-}
199
-
200
-
201
-
202
-#define MAGIC_VAL 0x1234
203
-#define NUM_ITER 100000
204
-#define VIP_NUM 5
205
-
206
-static void test_l4lb(const char *file)
207
-{
208
- unsigned int nr_cpus = bpf_num_possible_cpus();
209
- struct vip key = {.protocol = 6};
210
- struct vip_meta {
211
- __u32 flags;
212
- __u32 vip_num;
213
- } value = {.vip_num = VIP_NUM};
214
- __u32 stats_key = VIP_NUM;
215
- struct vip_stats {
216
- __u64 bytes;
217
- __u64 pkts;
218
- } stats[nr_cpus];
219
- struct real_definition {
220
- union {
221
- __be32 dst;
222
- __be32 dstv6[4];
223
- };
224
- __u8 flags;
225
- } real_def = {.dst = MAGIC_VAL};
226
- __u32 ch_key = 11, real_num = 3;
227
- __u32 duration, retval, size;
228
- int err, i, prog_fd, map_fd;
229
- __u64 bytes = 0, pkts = 0;
230
- struct bpf_object *obj;
231
- char buf[128];
232
- u32 *magic = (u32 *)buf;
233
-
234
- err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
235
- if (err) {
236
- error_cnt++;
237
- return;
238
- }
239
-
240
- map_fd = bpf_find_map(__func__, obj, "vip_map");
241
- if (map_fd < 0)
242
- goto out;
243
- bpf_map_update_elem(map_fd, &key, &value, 0);
244
-
245
- map_fd = bpf_find_map(__func__, obj, "ch_rings");
246
- if (map_fd < 0)
247
- goto out;
248
- bpf_map_update_elem(map_fd, &ch_key, &real_num, 0);
249
-
250
- map_fd = bpf_find_map(__func__, obj, "reals");
251
- if (map_fd < 0)
252
- goto out;
253
- bpf_map_update_elem(map_fd, &real_num, &real_def, 0);
254
-
255
- err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4),
256
- buf, &size, &retval, &duration);
257
- CHECK(err || errno || retval != 7/*TC_ACT_REDIRECT*/ || size != 54 ||
258
- *magic != MAGIC_VAL, "ipv4",
259
- "err %d errno %d retval %d size %d magic %x\n",
260
- err, errno, retval, size, *magic);
261
-
262
- err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6),
263
- buf, &size, &retval, &duration);
264
- CHECK(err || errno || retval != 7/*TC_ACT_REDIRECT*/ || size != 74 ||
265
- *magic != MAGIC_VAL, "ipv6",
266
- "err %d errno %d retval %d size %d magic %x\n",
267
- err, errno, retval, size, *magic);
268
-
269
- map_fd = bpf_find_map(__func__, obj, "stats");
270
- if (map_fd < 0)
271
- goto out;
272
- bpf_map_lookup_elem(map_fd, &stats_key, stats);
273
- for (i = 0; i < nr_cpus; i++) {
274
- bytes += stats[i].bytes;
275
- pkts += stats[i].pkts;
276
- }
277
- if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) {
278
- error_cnt++;
279
- printf("test_l4lb:FAIL:stats %lld %lld\n", bytes, pkts);
280
- }
281
-out:
282
- bpf_object__close(obj);
283
-}
284
-
285
-static void test_l4lb_all(void)
286
-{
287
- const char *file1 = "./test_l4lb.o";
288
- const char *file2 = "./test_l4lb_noinline.o";
289
-
290
- test_l4lb(file1);
291
- test_l4lb(file2);
292
-}
293
-
294
-static void test_xdp_noinline(void)
295
-{
296
- const char *file = "./test_xdp_noinline.o";
297
- unsigned int nr_cpus = bpf_num_possible_cpus();
298
- struct vip key = {.protocol = 6};
299
- struct vip_meta {
300
- __u32 flags;
301
- __u32 vip_num;
302
- } value = {.vip_num = VIP_NUM};
303
- __u32 stats_key = VIP_NUM;
304
- struct vip_stats {
305
- __u64 bytes;
306
- __u64 pkts;
307
- } stats[nr_cpus];
308
- struct real_definition {
309
- union {
310
- __be32 dst;
311
- __be32 dstv6[4];
312
- };
313
- __u8 flags;
314
- } real_def = {.dst = MAGIC_VAL};
315
- __u32 ch_key = 11, real_num = 3;
316
- __u32 duration, retval, size;
317
- int err, i, prog_fd, map_fd;
318
- __u64 bytes = 0, pkts = 0;
319
- struct bpf_object *obj;
320
- char buf[128];
321
- u32 *magic = (u32 *)buf;
322
-
323
- err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
324
- if (err) {
325
- error_cnt++;
326
- return;
327
- }
328
-
329
- map_fd = bpf_find_map(__func__, obj, "vip_map");
330
- if (map_fd < 0)
331
- goto out;
332
- bpf_map_update_elem(map_fd, &key, &value, 0);
333
-
334
- map_fd = bpf_find_map(__func__, obj, "ch_rings");
335
- if (map_fd < 0)
336
- goto out;
337
- bpf_map_update_elem(map_fd, &ch_key, &real_num, 0);
338
-
339
- map_fd = bpf_find_map(__func__, obj, "reals");
340
- if (map_fd < 0)
341
- goto out;
342
- bpf_map_update_elem(map_fd, &real_num, &real_def, 0);
343
-
344
- err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4),
345
- buf, &size, &retval, &duration);
346
- CHECK(err || errno || retval != 1 || size != 54 ||
347
- *magic != MAGIC_VAL, "ipv4",
348
- "err %d errno %d retval %d size %d magic %x\n",
349
- err, errno, retval, size, *magic);
350
-
351
- err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6),
352
- buf, &size, &retval, &duration);
353
- CHECK(err || errno || retval != 1 || size != 74 ||
354
- *magic != MAGIC_VAL, "ipv6",
355
- "err %d errno %d retval %d size %d magic %x\n",
356
- err, errno, retval, size, *magic);
357
-
358
- map_fd = bpf_find_map(__func__, obj, "stats");
359
- if (map_fd < 0)
360
- goto out;
361
- bpf_map_lookup_elem(map_fd, &stats_key, stats);
362
- for (i = 0; i < nr_cpus; i++) {
363
- bytes += stats[i].bytes;
364
- pkts += stats[i].pkts;
365
- }
366
- if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) {
367
- error_cnt++;
368
- printf("test_xdp_noinline:FAIL:stats %lld %lld\n", bytes, pkts);
369
- }
370
-out:
371
- bpf_object__close(obj);
372
-}
373
-
374
-static void test_tcp_estats(void)
375
-{
376
- const char *file = "./test_tcp_estats.o";
377
- int err, prog_fd;
378
- struct bpf_object *obj;
379
- __u32 duration = 0;
380
-
381
- err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
382
- CHECK(err, "", "err %d errno %d\n", err, errno);
383
- if (err) {
384
- error_cnt++;
385
- return;
386
- }
387
-
388
- bpf_object__close(obj);
389
-}
390
-
391
-static inline __u64 ptr_to_u64(const void *ptr)
392
-{
393
- return (__u64) (unsigned long) ptr;
394255 }
395256
396257 static bool is_jit_enabled(void)
....@@ -411,475 +272,7 @@
411272 return enabled;
412273 }
413274
414
-static void test_bpf_obj_id(void)
415
-{
416
- const __u64 array_magic_value = 0xfaceb00c;
417
- const __u32 array_key = 0;
418
- const int nr_iters = 2;
419
- const char *file = "./test_obj_id.o";
420
- const char *expected_prog_name = "test_obj_id";
421
- const char *expected_map_name = "test_map_id";
422
- const __u64 nsec_per_sec = 1000000000;
423
-
424
- struct bpf_object *objs[nr_iters];
425
- int prog_fds[nr_iters], map_fds[nr_iters];
426
- /* +1 to test for the info_len returned by kernel */
427
- struct bpf_prog_info prog_infos[nr_iters + 1];
428
- struct bpf_map_info map_infos[nr_iters + 1];
429
- /* Each prog only uses one map. +1 to test nr_map_ids
430
- * returned by kernel.
431
- */
432
- __u32 map_ids[nr_iters + 1];
433
- char jited_insns[128], xlated_insns[128], zeros[128];
434
- __u32 i, next_id, info_len, nr_id_found, duration = 0;
435
- struct timespec real_time_ts, boot_time_ts;
436
- int err = 0;
437
- __u64 array_value;
438
- uid_t my_uid = getuid();
439
- time_t now, load_time;
440
-
441
- err = bpf_prog_get_fd_by_id(0);
442
- CHECK(err >= 0 || errno != ENOENT,
443
- "get-fd-by-notexist-prog-id", "err %d errno %d\n", err, errno);
444
-
445
- err = bpf_map_get_fd_by_id(0);
446
- CHECK(err >= 0 || errno != ENOENT,
447
- "get-fd-by-notexist-map-id", "err %d errno %d\n", err, errno);
448
-
449
- for (i = 0; i < nr_iters; i++)
450
- objs[i] = NULL;
451
-
452
- /* Check bpf_obj_get_info_by_fd() */
453
- bzero(zeros, sizeof(zeros));
454
- for (i = 0; i < nr_iters; i++) {
455
- now = time(NULL);
456
- err = bpf_prog_load(file, BPF_PROG_TYPE_SOCKET_FILTER,
457
- &objs[i], &prog_fds[i]);
458
- /* test_obj_id.o is a dumb prog. It should never fail
459
- * to load.
460
- */
461
- if (err)
462
- error_cnt++;
463
- assert(!err);
464
-
465
- /* Insert a magic value to the map */
466
- map_fds[i] = bpf_find_map(__func__, objs[i], "test_map_id");
467
- assert(map_fds[i] >= 0);
468
- err = bpf_map_update_elem(map_fds[i], &array_key,
469
- &array_magic_value, 0);
470
- assert(!err);
471
-
472
- /* Check getting map info */
473
- info_len = sizeof(struct bpf_map_info) * 2;
474
- bzero(&map_infos[i], info_len);
475
- err = bpf_obj_get_info_by_fd(map_fds[i], &map_infos[i],
476
- &info_len);
477
- if (CHECK(err ||
478
- map_infos[i].type != BPF_MAP_TYPE_ARRAY ||
479
- map_infos[i].key_size != sizeof(__u32) ||
480
- map_infos[i].value_size != sizeof(__u64) ||
481
- map_infos[i].max_entries != 1 ||
482
- map_infos[i].map_flags != 0 ||
483
- info_len != sizeof(struct bpf_map_info) ||
484
- strcmp((char *)map_infos[i].name, expected_map_name),
485
- "get-map-info(fd)",
486
- "err %d errno %d type %d(%d) info_len %u(%Zu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n",
487
- err, errno,
488
- map_infos[i].type, BPF_MAP_TYPE_ARRAY,
489
- info_len, sizeof(struct bpf_map_info),
490
- map_infos[i].key_size,
491
- map_infos[i].value_size,
492
- map_infos[i].max_entries,
493
- map_infos[i].map_flags,
494
- map_infos[i].name, expected_map_name))
495
- goto done;
496
-
497
- /* Check getting prog info */
498
- info_len = sizeof(struct bpf_prog_info) * 2;
499
- bzero(&prog_infos[i], info_len);
500
- bzero(jited_insns, sizeof(jited_insns));
501
- bzero(xlated_insns, sizeof(xlated_insns));
502
- prog_infos[i].jited_prog_insns = ptr_to_u64(jited_insns);
503
- prog_infos[i].jited_prog_len = sizeof(jited_insns);
504
- prog_infos[i].xlated_prog_insns = ptr_to_u64(xlated_insns);
505
- prog_infos[i].xlated_prog_len = sizeof(xlated_insns);
506
- prog_infos[i].map_ids = ptr_to_u64(map_ids + i);
507
- prog_infos[i].nr_map_ids = 2;
508
- err = clock_gettime(CLOCK_REALTIME, &real_time_ts);
509
- assert(!err);
510
- err = clock_gettime(CLOCK_BOOTTIME, &boot_time_ts);
511
- assert(!err);
512
- err = bpf_obj_get_info_by_fd(prog_fds[i], &prog_infos[i],
513
- &info_len);
514
- load_time = (real_time_ts.tv_sec - boot_time_ts.tv_sec)
515
- + (prog_infos[i].load_time / nsec_per_sec);
516
- if (CHECK(err ||
517
- prog_infos[i].type != BPF_PROG_TYPE_SOCKET_FILTER ||
518
- info_len != sizeof(struct bpf_prog_info) ||
519
- (jit_enabled && !prog_infos[i].jited_prog_len) ||
520
- (jit_enabled &&
521
- !memcmp(jited_insns, zeros, sizeof(zeros))) ||
522
- !prog_infos[i].xlated_prog_len ||
523
- !memcmp(xlated_insns, zeros, sizeof(zeros)) ||
524
- load_time < now - 60 || load_time > now + 60 ||
525
- prog_infos[i].created_by_uid != my_uid ||
526
- prog_infos[i].nr_map_ids != 1 ||
527
- *(int *)prog_infos[i].map_ids != map_infos[i].id ||
528
- strcmp((char *)prog_infos[i].name, expected_prog_name),
529
- "get-prog-info(fd)",
530
- "err %d errno %d i %d type %d(%d) info_len %u(%Zu) jit_enabled %d jited_prog_len %u xlated_prog_len %u jited_prog %d xlated_prog %d load_time %lu(%lu) uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) name %s(%s)\n",
531
- err, errno, i,
532
- prog_infos[i].type, BPF_PROG_TYPE_SOCKET_FILTER,
533
- info_len, sizeof(struct bpf_prog_info),
534
- jit_enabled,
535
- prog_infos[i].jited_prog_len,
536
- prog_infos[i].xlated_prog_len,
537
- !!memcmp(jited_insns, zeros, sizeof(zeros)),
538
- !!memcmp(xlated_insns, zeros, sizeof(zeros)),
539
- load_time, now,
540
- prog_infos[i].created_by_uid, my_uid,
541
- prog_infos[i].nr_map_ids, 1,
542
- *(int *)prog_infos[i].map_ids, map_infos[i].id,
543
- prog_infos[i].name, expected_prog_name))
544
- goto done;
545
- }
546
-
547
- /* Check bpf_prog_get_next_id() */
548
- nr_id_found = 0;
549
- next_id = 0;
550
- while (!bpf_prog_get_next_id(next_id, &next_id)) {
551
- struct bpf_prog_info prog_info = {};
552
- __u32 saved_map_id;
553
- int prog_fd;
554
-
555
- info_len = sizeof(prog_info);
556
-
557
- prog_fd = bpf_prog_get_fd_by_id(next_id);
558
- if (prog_fd < 0 && errno == ENOENT)
559
- /* The bpf_prog is in the dead row */
560
- continue;
561
- if (CHECK(prog_fd < 0, "get-prog-fd(next_id)",
562
- "prog_fd %d next_id %d errno %d\n",
563
- prog_fd, next_id, errno))
564
- break;
565
-
566
- for (i = 0; i < nr_iters; i++)
567
- if (prog_infos[i].id == next_id)
568
- break;
569
-
570
- if (i == nr_iters)
571
- continue;
572
-
573
- nr_id_found++;
574
-
575
- /* Negative test:
576
- * prog_info.nr_map_ids = 1
577
- * prog_info.map_ids = NULL
578
- */
579
- prog_info.nr_map_ids = 1;
580
- err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len);
581
- if (CHECK(!err || errno != EFAULT,
582
- "get-prog-fd-bad-nr-map-ids", "err %d errno %d(%d)",
583
- err, errno, EFAULT))
584
- break;
585
- bzero(&prog_info, sizeof(prog_info));
586
- info_len = sizeof(prog_info);
587
-
588
- saved_map_id = *(int *)(prog_infos[i].map_ids);
589
- prog_info.map_ids = prog_infos[i].map_ids;
590
- prog_info.nr_map_ids = 2;
591
- err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len);
592
- prog_infos[i].jited_prog_insns = 0;
593
- prog_infos[i].xlated_prog_insns = 0;
594
- CHECK(err || info_len != sizeof(struct bpf_prog_info) ||
595
- memcmp(&prog_info, &prog_infos[i], info_len) ||
596
- *(int *)prog_info.map_ids != saved_map_id,
597
- "get-prog-info(next_id->fd)",
598
- "err %d errno %d info_len %u(%Zu) memcmp %d map_id %u(%u)\n",
599
- err, errno, info_len, sizeof(struct bpf_prog_info),
600
- memcmp(&prog_info, &prog_infos[i], info_len),
601
- *(int *)prog_info.map_ids, saved_map_id);
602
- close(prog_fd);
603
- }
604
- CHECK(nr_id_found != nr_iters,
605
- "check total prog id found by get_next_id",
606
- "nr_id_found %u(%u)\n",
607
- nr_id_found, nr_iters);
608
-
609
- /* Check bpf_map_get_next_id() */
610
- nr_id_found = 0;
611
- next_id = 0;
612
- while (!bpf_map_get_next_id(next_id, &next_id)) {
613
- struct bpf_map_info map_info = {};
614
- int map_fd;
615
-
616
- info_len = sizeof(map_info);
617
-
618
- map_fd = bpf_map_get_fd_by_id(next_id);
619
- if (map_fd < 0 && errno == ENOENT)
620
- /* The bpf_map is in the dead row */
621
- continue;
622
- if (CHECK(map_fd < 0, "get-map-fd(next_id)",
623
- "map_fd %d next_id %u errno %d\n",
624
- map_fd, next_id, errno))
625
- break;
626
-
627
- for (i = 0; i < nr_iters; i++)
628
- if (map_infos[i].id == next_id)
629
- break;
630
-
631
- if (i == nr_iters)
632
- continue;
633
-
634
- nr_id_found++;
635
-
636
- err = bpf_map_lookup_elem(map_fd, &array_key, &array_value);
637
- assert(!err);
638
-
639
- err = bpf_obj_get_info_by_fd(map_fd, &map_info, &info_len);
640
- CHECK(err || info_len != sizeof(struct bpf_map_info) ||
641
- memcmp(&map_info, &map_infos[i], info_len) ||
642
- array_value != array_magic_value,
643
- "check get-map-info(next_id->fd)",
644
- "err %d errno %d info_len %u(%Zu) memcmp %d array_value %llu(%llu)\n",
645
- err, errno, info_len, sizeof(struct bpf_map_info),
646
- memcmp(&map_info, &map_infos[i], info_len),
647
- array_value, array_magic_value);
648
-
649
- close(map_fd);
650
- }
651
- CHECK(nr_id_found != nr_iters,
652
- "check total map id found by get_next_id",
653
- "nr_id_found %u(%u)\n",
654
- nr_id_found, nr_iters);
655
-
656
-done:
657
- for (i = 0; i < nr_iters; i++)
658
- bpf_object__close(objs[i]);
659
-}
660
-
661
-static void test_pkt_md_access(void)
662
-{
663
- const char *file = "./test_pkt_md_access.o";
664
- struct bpf_object *obj;
665
- __u32 duration, retval;
666
- int err, prog_fd;
667
-
668
- err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
669
- if (err) {
670
- error_cnt++;
671
- return;
672
- }
673
-
674
- err = bpf_prog_test_run(prog_fd, 10, &pkt_v4, sizeof(pkt_v4),
675
- NULL, NULL, &retval, &duration);
676
- CHECK(err || retval, "",
677
- "err %d errno %d retval %d duration %d\n",
678
- err, errno, retval, duration);
679
-
680
- bpf_object__close(obj);
681
-}
682
-
683
-static void test_obj_name(void)
684
-{
685
- struct {
686
- const char *name;
687
- int success;
688
- int expected_errno;
689
- } tests[] = {
690
- { "", 1, 0 },
691
- { "_123456789ABCDE", 1, 0 },
692
- { "_123456789ABCDEF", 0, EINVAL },
693
- { "_123456789ABCD\n", 0, EINVAL },
694
- };
695
- struct bpf_insn prog[] = {
696
- BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0),
697
- BPF_EXIT_INSN(),
698
- };
699
- __u32 duration = 0;
700
- int i;
701
-
702
- for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) {
703
- size_t name_len = strlen(tests[i].name) + 1;
704
- union bpf_attr attr;
705
- size_t ncopy;
706
- int fd;
707
-
708
- /* test different attr.prog_name during BPF_PROG_LOAD */
709
- ncopy = name_len < sizeof(attr.prog_name) ?
710
- name_len : sizeof(attr.prog_name);
711
- bzero(&attr, sizeof(attr));
712
- attr.prog_type = BPF_PROG_TYPE_SCHED_CLS;
713
- attr.insn_cnt = 2;
714
- attr.insns = ptr_to_u64(prog);
715
- attr.license = ptr_to_u64("");
716
- memcpy(attr.prog_name, tests[i].name, ncopy);
717
-
718
- fd = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
719
- CHECK((tests[i].success && fd < 0) ||
720
- (!tests[i].success && fd != -1) ||
721
- (!tests[i].success && errno != tests[i].expected_errno),
722
- "check-bpf-prog-name",
723
- "fd %d(%d) errno %d(%d)\n",
724
- fd, tests[i].success, errno, tests[i].expected_errno);
725
-
726
- if (fd != -1)
727
- close(fd);
728
-
729
- /* test different attr.map_name during BPF_MAP_CREATE */
730
- ncopy = name_len < sizeof(attr.map_name) ?
731
- name_len : sizeof(attr.map_name);
732
- bzero(&attr, sizeof(attr));
733
- attr.map_type = BPF_MAP_TYPE_ARRAY;
734
- attr.key_size = 4;
735
- attr.value_size = 4;
736
- attr.max_entries = 1;
737
- attr.map_flags = 0;
738
- memcpy(attr.map_name, tests[i].name, ncopy);
739
- fd = syscall(__NR_bpf, BPF_MAP_CREATE, &attr, sizeof(attr));
740
- CHECK((tests[i].success && fd < 0) ||
741
- (!tests[i].success && fd != -1) ||
742
- (!tests[i].success && errno != tests[i].expected_errno),
743
- "check-bpf-map-name",
744
- "fd %d(%d) errno %d(%d)\n",
745
- fd, tests[i].success, errno, tests[i].expected_errno);
746
-
747
- if (fd != -1)
748
- close(fd);
749
- }
750
-}
751
-
752
-static void test_tp_attach_query(void)
753
-{
754
- const int num_progs = 3;
755
- int i, j, bytes, efd, err, prog_fd[num_progs], pmu_fd[num_progs];
756
- __u32 duration = 0, info_len, saved_prog_ids[num_progs];
757
- const char *file = "./test_tracepoint.o";
758
- struct perf_event_query_bpf *query;
759
- struct perf_event_attr attr = {};
760
- struct bpf_object *obj[num_progs];
761
- struct bpf_prog_info prog_info;
762
- char buf[256];
763
-
764
- snprintf(buf, sizeof(buf),
765
- "/sys/kernel/debug/tracing/events/sched/sched_switch/id");
766
- efd = open(buf, O_RDONLY, 0);
767
- if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
768
- return;
769
- bytes = read(efd, buf, sizeof(buf));
770
- close(efd);
771
- if (CHECK(bytes <= 0 || bytes >= sizeof(buf),
772
- "read", "bytes %d errno %d\n", bytes, errno))
773
- return;
774
-
775
- attr.config = strtol(buf, NULL, 0);
776
- attr.type = PERF_TYPE_TRACEPOINT;
777
- attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
778
- attr.sample_period = 1;
779
- attr.wakeup_events = 1;
780
-
781
- query = malloc(sizeof(*query) + sizeof(__u32) * num_progs);
782
- for (i = 0; i < num_progs; i++) {
783
- err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj[i],
784
- &prog_fd[i]);
785
- if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
786
- goto cleanup1;
787
-
788
- bzero(&prog_info, sizeof(prog_info));
789
- prog_info.jited_prog_len = 0;
790
- prog_info.xlated_prog_len = 0;
791
- prog_info.nr_map_ids = 0;
792
- info_len = sizeof(prog_info);
793
- err = bpf_obj_get_info_by_fd(prog_fd[i], &prog_info, &info_len);
794
- if (CHECK(err, "bpf_obj_get_info_by_fd", "err %d errno %d\n",
795
- err, errno))
796
- goto cleanup1;
797
- saved_prog_ids[i] = prog_info.id;
798
-
799
- pmu_fd[i] = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
800
- 0 /* cpu 0 */, -1 /* group id */,
801
- 0 /* flags */);
802
- if (CHECK(pmu_fd[i] < 0, "perf_event_open", "err %d errno %d\n",
803
- pmu_fd[i], errno))
804
- goto cleanup2;
805
- err = ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE, 0);
806
- if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
807
- err, errno))
808
- goto cleanup3;
809
-
810
- if (i == 0) {
811
- /* check NULL prog array query */
812
- query->ids_len = num_progs;
813
- err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
814
- if (CHECK(err || query->prog_cnt != 0,
815
- "perf_event_ioc_query_bpf",
816
- "err %d errno %d query->prog_cnt %u\n",
817
- err, errno, query->prog_cnt))
818
- goto cleanup3;
819
- }
820
-
821
- err = ioctl(pmu_fd[i], PERF_EVENT_IOC_SET_BPF, prog_fd[i]);
822
- if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
823
- err, errno))
824
- goto cleanup3;
825
-
826
- if (i == 1) {
827
- /* try to get # of programs only */
828
- query->ids_len = 0;
829
- err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
830
- if (CHECK(err || query->prog_cnt != 2,
831
- "perf_event_ioc_query_bpf",
832
- "err %d errno %d query->prog_cnt %u\n",
833
- err, errno, query->prog_cnt))
834
- goto cleanup3;
835
-
836
- /* try a few negative tests */
837
- /* invalid query pointer */
838
- err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF,
839
- (struct perf_event_query_bpf *)0x1);
840
- if (CHECK(!err || errno != EFAULT,
841
- "perf_event_ioc_query_bpf",
842
- "err %d errno %d\n", err, errno))
843
- goto cleanup3;
844
-
845
- /* no enough space */
846
- query->ids_len = 1;
847
- err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
848
- if (CHECK(!err || errno != ENOSPC || query->prog_cnt != 2,
849
- "perf_event_ioc_query_bpf",
850
- "err %d errno %d query->prog_cnt %u\n",
851
- err, errno, query->prog_cnt))
852
- goto cleanup3;
853
- }
854
-
855
- query->ids_len = num_progs;
856
- err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
857
- if (CHECK(err || query->prog_cnt != (i + 1),
858
- "perf_event_ioc_query_bpf",
859
- "err %d errno %d query->prog_cnt %u\n",
860
- err, errno, query->prog_cnt))
861
- goto cleanup3;
862
- for (j = 0; j < i + 1; j++)
863
- if (CHECK(saved_prog_ids[j] != query->ids[j],
864
- "perf_event_ioc_query_bpf",
865
- "#%d saved_prog_id %x query prog_id %x\n",
866
- j, saved_prog_ids[j], query->ids[j]))
867
- goto cleanup3;
868
- }
869
-
870
- i = num_progs - 1;
871
- for (; i >= 0; i--) {
872
- cleanup3:
873
- ioctl(pmu_fd[i], PERF_EVENT_IOC_DISABLE);
874
- cleanup2:
875
- close(pmu_fd[i]);
876
- cleanup1:
877
- bpf_object__close(obj[i]);
878
- }
879
- free(query);
880
-}
881
-
882
-static int compare_map_keys(int map1_fd, int map2_fd)
275
+int compare_map_keys(int map1_fd, int map2_fd)
883276 {
884277 __u32 key, next_key;
885278 char val_buf[PERF_MAX_STACK_DEPTH *
....@@ -906,7 +299,7 @@
906299 return 0;
907300 }
908301
909
-static int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len)
302
+int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len)
910303 {
911304 __u32 key, next_key, *cur_key_p, *next_key_p;
912305 char *val_buf1, *val_buf2;
....@@ -942,165 +335,7 @@
942335 return err;
943336 }
944337
945
-static void test_stacktrace_map()
946
-{
947
- int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
948
- const char *file = "./test_stacktrace_map.o";
949
- int bytes, efd, err, pmu_fd, prog_fd, stack_trace_len;
950
- struct perf_event_attr attr = {};
951
- __u32 key, val, duration = 0;
952
- struct bpf_object *obj;
953
- char buf[256];
954
-
955
- err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
956
- if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
957
- return;
958
-
959
- /* Get the ID for the sched/sched_switch tracepoint */
960
- snprintf(buf, sizeof(buf),
961
- "/sys/kernel/debug/tracing/events/sched/sched_switch/id");
962
- efd = open(buf, O_RDONLY, 0);
963
- if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
964
- goto close_prog;
965
-
966
- bytes = read(efd, buf, sizeof(buf));
967
- close(efd);
968
- if (bytes <= 0 || bytes >= sizeof(buf))
969
- goto close_prog;
970
-
971
- /* Open the perf event and attach bpf progrram */
972
- attr.config = strtol(buf, NULL, 0);
973
- attr.type = PERF_TYPE_TRACEPOINT;
974
- attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
975
- attr.sample_period = 1;
976
- attr.wakeup_events = 1;
977
- pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
978
- 0 /* cpu 0 */, -1 /* group id */,
979
- 0 /* flags */);
980
- if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n",
981
- pmu_fd, errno))
982
- goto close_prog;
983
-
984
- err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
985
- if (err)
986
- goto disable_pmu;
987
-
988
- err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
989
- if (err)
990
- goto disable_pmu;
991
-
992
- /* find map fds */
993
- control_map_fd = bpf_find_map(__func__, obj, "control_map");
994
- if (control_map_fd < 0)
995
- goto disable_pmu;
996
-
997
- stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
998
- if (stackid_hmap_fd < 0)
999
- goto disable_pmu;
1000
-
1001
- stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
1002
- if (stackmap_fd < 0)
1003
- goto disable_pmu;
1004
-
1005
- stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap");
1006
- if (stack_amap_fd < 0)
1007
- goto disable_pmu;
1008
-
1009
- /* give some time for bpf program run */
1010
- sleep(1);
1011
-
1012
- /* disable stack trace collection */
1013
- key = 0;
1014
- val = 1;
1015
- bpf_map_update_elem(control_map_fd, &key, &val, 0);
1016
-
1017
- /* for every element in stackid_hmap, we can find a corresponding one
1018
- * in stackmap, and vise versa.
1019
- */
1020
- err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
1021
- if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
1022
- "err %d errno %d\n", err, errno))
1023
- goto disable_pmu_noerr;
1024
-
1025
- err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
1026
- if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
1027
- "err %d errno %d\n", err, errno))
1028
- goto disable_pmu_noerr;
1029
-
1030
- stack_trace_len = PERF_MAX_STACK_DEPTH * sizeof(__u64);
1031
- err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len);
1032
- if (CHECK(err, "compare_stack_ips stackmap vs. stack_amap",
1033
- "err %d errno %d\n", err, errno))
1034
- goto disable_pmu_noerr;
1035
-
1036
- goto disable_pmu_noerr;
1037
-disable_pmu:
1038
- error_cnt++;
1039
-disable_pmu_noerr:
1040
- ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
1041
- close(pmu_fd);
1042
-close_prog:
1043
- bpf_object__close(obj);
1044
-}
1045
-
1046
-static void test_stacktrace_map_raw_tp()
1047
-{
1048
- int control_map_fd, stackid_hmap_fd, stackmap_fd;
1049
- const char *file = "./test_stacktrace_map.o";
1050
- int efd, err, prog_fd;
1051
- __u32 key, val, duration = 0;
1052
- struct bpf_object *obj;
1053
-
1054
- err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
1055
- if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
1056
- return;
1057
-
1058
- efd = bpf_raw_tracepoint_open("sched_switch", prog_fd);
1059
- if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno))
1060
- goto close_prog;
1061
-
1062
- /* find map fds */
1063
- control_map_fd = bpf_find_map(__func__, obj, "control_map");
1064
- if (control_map_fd < 0)
1065
- goto close_prog;
1066
-
1067
- stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
1068
- if (stackid_hmap_fd < 0)
1069
- goto close_prog;
1070
-
1071
- stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
1072
- if (stackmap_fd < 0)
1073
- goto close_prog;
1074
-
1075
- /* give some time for bpf program run */
1076
- sleep(1);
1077
-
1078
- /* disable stack trace collection */
1079
- key = 0;
1080
- val = 1;
1081
- bpf_map_update_elem(control_map_fd, &key, &val, 0);
1082
-
1083
- /* for every element in stackid_hmap, we can find a corresponding one
1084
- * in stackmap, and vise versa.
1085
- */
1086
- err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
1087
- if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
1088
- "err %d errno %d\n", err, errno))
1089
- goto close_prog;
1090
-
1091
- err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
1092
- if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
1093
- "err %d errno %d\n", err, errno))
1094
- goto close_prog;
1095
-
1096
- goto close_prog_noerr;
1097
-close_prog:
1098
- error_cnt++;
1099
-close_prog_noerr:
1100
- bpf_object__close(obj);
1101
-}
1102
-
1103
-static int extract_build_id(char *build_id, size_t size)
338
+int extract_build_id(char *build_id, size_t size)
1104339 {
1105340 FILE *fp;
1106341 char *line = NULL;
....@@ -1125,632 +360,392 @@
1125360 return -1;
1126361 }
1127362
1128
-static void test_stacktrace_build_id(void)
1129
-{
1130
- int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
1131
- const char *file = "./test_stacktrace_build_id.o";
1132
- int bytes, efd, err, pmu_fd, prog_fd, stack_trace_len;
1133
- struct perf_event_attr attr = {};
1134
- __u32 key, previous_key, val, duration = 0;
1135
- struct bpf_object *obj;
1136
- char buf[256];
1137
- int i, j;
1138
- struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
1139
- int build_id_matches = 0;
1140
- int retry = 1;
363
+/* extern declarations for test funcs */
364
+#define DEFINE_TEST(name) extern void test_##name(void);
365
+#include <prog_tests/tests.h>
366
+#undef DEFINE_TEST
1141367
1142
-retry:
1143
- err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
1144
- if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
1145
- goto out;
368
+static struct prog_test_def prog_test_defs[] = {
369
+#define DEFINE_TEST(name) { \
370
+ .test_name = #name, \
371
+ .run_test = &test_##name, \
372
+},
373
+#include <prog_tests/tests.h>
374
+#undef DEFINE_TEST
375
+};
376
+const int prog_test_cnt = ARRAY_SIZE(prog_test_defs);
1146377
1147
- /* Get the ID for the sched/sched_switch tracepoint */
1148
- snprintf(buf, sizeof(buf),
1149
- "/sys/kernel/debug/tracing/events/random/urandom_read/id");
1150
- efd = open(buf, O_RDONLY, 0);
1151
- if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
1152
- goto close_prog;
378
+const char *argp_program_version = "test_progs 0.1";
379
+const char *argp_program_bug_address = "<bpf@vger.kernel.org>";
380
+const char argp_program_doc[] = "BPF selftests test runner";
1153381
1154
- bytes = read(efd, buf, sizeof(buf));
1155
- close(efd);
1156
- if (CHECK(bytes <= 0 || bytes >= sizeof(buf),
1157
- "read", "bytes %d errno %d\n", bytes, errno))
1158
- goto close_prog;
1159
-
1160
- /* Open the perf event and attach bpf progrram */
1161
- attr.config = strtol(buf, NULL, 0);
1162
- attr.type = PERF_TYPE_TRACEPOINT;
1163
- attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
1164
- attr.sample_period = 1;
1165
- attr.wakeup_events = 1;
1166
- pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
1167
- 0 /* cpu 0 */, -1 /* group id */,
1168
- 0 /* flags */);
1169
- if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n",
1170
- pmu_fd, errno))
1171
- goto close_prog;
1172
-
1173
- err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
1174
- if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
1175
- err, errno))
1176
- goto close_pmu;
1177
-
1178
- err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
1179
- if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
1180
- err, errno))
1181
- goto disable_pmu;
1182
-
1183
- /* find map fds */
1184
- control_map_fd = bpf_find_map(__func__, obj, "control_map");
1185
- if (CHECK(control_map_fd < 0, "bpf_find_map control_map",
1186
- "err %d errno %d\n", err, errno))
1187
- goto disable_pmu;
1188
-
1189
- stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
1190
- if (CHECK(stackid_hmap_fd < 0, "bpf_find_map stackid_hmap",
1191
- "err %d errno %d\n", err, errno))
1192
- goto disable_pmu;
1193
-
1194
- stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
1195
- if (CHECK(stackmap_fd < 0, "bpf_find_map stackmap", "err %d errno %d\n",
1196
- err, errno))
1197
- goto disable_pmu;
1198
-
1199
- stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap");
1200
- if (CHECK(stack_amap_fd < 0, "bpf_find_map stack_amap",
1201
- "err %d errno %d\n", err, errno))
1202
- goto disable_pmu;
1203
-
1204
- assert(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null")
1205
- == 0);
1206
- assert(system("./urandom_read") == 0);
1207
- /* disable stack trace collection */
1208
- key = 0;
1209
- val = 1;
1210
- bpf_map_update_elem(control_map_fd, &key, &val, 0);
1211
-
1212
- /* for every element in stackid_hmap, we can find a corresponding one
1213
- * in stackmap, and vise versa.
1214
- */
1215
- err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
1216
- if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
1217
- "err %d errno %d\n", err, errno))
1218
- goto disable_pmu;
1219
-
1220
- err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
1221
- if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
1222
- "err %d errno %d\n", err, errno))
1223
- goto disable_pmu;
1224
-
1225
- err = extract_build_id(buf, 256);
1226
-
1227
- if (CHECK(err, "get build_id with readelf",
1228
- "err %d errno %d\n", err, errno))
1229
- goto disable_pmu;
1230
-
1231
- err = bpf_map_get_next_key(stackmap_fd, NULL, &key);
1232
- if (CHECK(err, "get_next_key from stackmap",
1233
- "err %d, errno %d\n", err, errno))
1234
- goto disable_pmu;
1235
-
1236
- do {
1237
- char build_id[64];
1238
-
1239
- err = bpf_map_lookup_elem(stackmap_fd, &key, id_offs);
1240
- if (CHECK(err, "lookup_elem from stackmap",
1241
- "err %d, errno %d\n", err, errno))
1242
- goto disable_pmu;
1243
- for (i = 0; i < PERF_MAX_STACK_DEPTH; ++i)
1244
- if (id_offs[i].status == BPF_STACK_BUILD_ID_VALID &&
1245
- id_offs[i].offset != 0) {
1246
- for (j = 0; j < 20; ++j)
1247
- sprintf(build_id + 2 * j, "%02x",
1248
- id_offs[i].build_id[j] & 0xff);
1249
- if (strstr(buf, build_id) != NULL)
1250
- build_id_matches = 1;
1251
- }
1252
- previous_key = key;
1253
- } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
1254
-
1255
- /* stack_map_get_build_id_offset() is racy and sometimes can return
1256
- * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID;
1257
- * try it one more time.
1258
- */
1259
- if (build_id_matches < 1 && retry--) {
1260
- ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
1261
- close(pmu_fd);
1262
- bpf_object__close(obj);
1263
- printf("%s:WARN:Didn't find expected build ID from the map, retrying\n",
1264
- __func__);
1265
- goto retry;
1266
- }
1267
-
1268
- if (CHECK(build_id_matches < 1, "build id match",
1269
- "Didn't find expected build ID from the map\n"))
1270
- goto disable_pmu;
1271
-
1272
- stack_trace_len = PERF_MAX_STACK_DEPTH
1273
- * sizeof(struct bpf_stack_build_id);
1274
- err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len);
1275
- CHECK(err, "compare_stack_ips stackmap vs. stack_amap",
1276
- "err %d errno %d\n", err, errno);
1277
-
1278
-disable_pmu:
1279
- ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
1280
-
1281
-close_pmu:
1282
- close(pmu_fd);
1283
-
1284
-close_prog:
1285
- bpf_object__close(obj);
1286
-
1287
-out:
1288
- return;
1289
-}
1290
-
1291
-static void test_stacktrace_build_id_nmi(void)
1292
-{
1293
- int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
1294
- const char *file = "./test_stacktrace_build_id.o";
1295
- int err, pmu_fd, prog_fd;
1296
- struct perf_event_attr attr = {
1297
- .sample_freq = 5000,
1298
- .freq = 1,
1299
- .type = PERF_TYPE_HARDWARE,
1300
- .config = PERF_COUNT_HW_CPU_CYCLES,
1301
- };
1302
- __u32 key, previous_key, val, duration = 0;
1303
- struct bpf_object *obj;
1304
- char buf[256];
1305
- int i, j;
1306
- struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
1307
- int build_id_matches = 0;
1308
- int retry = 1;
1309
-
1310
-retry:
1311
- err = bpf_prog_load(file, BPF_PROG_TYPE_PERF_EVENT, &obj, &prog_fd);
1312
- if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
1313
- return;
1314
-
1315
- pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
1316
- 0 /* cpu 0 */, -1 /* group id */,
1317
- 0 /* flags */);
1318
- if (CHECK(pmu_fd < 0, "perf_event_open",
1319
- "err %d errno %d. Does the test host support PERF_COUNT_HW_CPU_CYCLES?\n",
1320
- pmu_fd, errno))
1321
- goto close_prog;
1322
-
1323
- err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
1324
- if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
1325
- err, errno))
1326
- goto close_pmu;
1327
-
1328
- err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
1329
- if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
1330
- err, errno))
1331
- goto disable_pmu;
1332
-
1333
- /* find map fds */
1334
- control_map_fd = bpf_find_map(__func__, obj, "control_map");
1335
- if (CHECK(control_map_fd < 0, "bpf_find_map control_map",
1336
- "err %d errno %d\n", err, errno))
1337
- goto disable_pmu;
1338
-
1339
- stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
1340
- if (CHECK(stackid_hmap_fd < 0, "bpf_find_map stackid_hmap",
1341
- "err %d errno %d\n", err, errno))
1342
- goto disable_pmu;
1343
-
1344
- stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
1345
- if (CHECK(stackmap_fd < 0, "bpf_find_map stackmap", "err %d errno %d\n",
1346
- err, errno))
1347
- goto disable_pmu;
1348
-
1349
- stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap");
1350
- if (CHECK(stack_amap_fd < 0, "bpf_find_map stack_amap",
1351
- "err %d errno %d\n", err, errno))
1352
- goto disable_pmu;
1353
-
1354
- assert(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null")
1355
- == 0);
1356
- assert(system("taskset 0x1 ./urandom_read 100000") == 0);
1357
- /* disable stack trace collection */
1358
- key = 0;
1359
- val = 1;
1360
- bpf_map_update_elem(control_map_fd, &key, &val, 0);
1361
-
1362
- /* for every element in stackid_hmap, we can find a corresponding one
1363
- * in stackmap, and vise versa.
1364
- */
1365
- err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
1366
- if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
1367
- "err %d errno %d\n", err, errno))
1368
- goto disable_pmu;
1369
-
1370
- err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
1371
- if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
1372
- "err %d errno %d\n", err, errno))
1373
- goto disable_pmu;
1374
-
1375
- err = extract_build_id(buf, 256);
1376
-
1377
- if (CHECK(err, "get build_id with readelf",
1378
- "err %d errno %d\n", err, errno))
1379
- goto disable_pmu;
1380
-
1381
- err = bpf_map_get_next_key(stackmap_fd, NULL, &key);
1382
- if (CHECK(err, "get_next_key from stackmap",
1383
- "err %d, errno %d\n", err, errno))
1384
- goto disable_pmu;
1385
-
1386
- do {
1387
- char build_id[64];
1388
-
1389
- err = bpf_map_lookup_elem(stackmap_fd, &key, id_offs);
1390
- if (CHECK(err, "lookup_elem from stackmap",
1391
- "err %d, errno %d\n", err, errno))
1392
- goto disable_pmu;
1393
- for (i = 0; i < PERF_MAX_STACK_DEPTH; ++i)
1394
- if (id_offs[i].status == BPF_STACK_BUILD_ID_VALID &&
1395
- id_offs[i].offset != 0) {
1396
- for (j = 0; j < 20; ++j)
1397
- sprintf(build_id + 2 * j, "%02x",
1398
- id_offs[i].build_id[j] & 0xff);
1399
- if (strstr(buf, build_id) != NULL)
1400
- build_id_matches = 1;
1401
- }
1402
- previous_key = key;
1403
- } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
1404
-
1405
- /* stack_map_get_build_id_offset() is racy and sometimes can return
1406
- * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID;
1407
- * try it one more time.
1408
- */
1409
- if (build_id_matches < 1 && retry--) {
1410
- ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
1411
- close(pmu_fd);
1412
- bpf_object__close(obj);
1413
- printf("%s:WARN:Didn't find expected build ID from the map, retrying\n",
1414
- __func__);
1415
- goto retry;
1416
- }
1417
-
1418
- if (CHECK(build_id_matches < 1, "build id match",
1419
- "Didn't find expected build ID from the map\n"))
1420
- goto disable_pmu;
1421
-
1422
- /*
1423
- * We intentionally skip compare_stack_ips(). This is because we
1424
- * only support one in_nmi() ips-to-build_id translation per cpu
1425
- * at any time, thus stack_amap here will always fallback to
1426
- * BPF_STACK_BUILD_ID_IP;
1427
- */
1428
-
1429
-disable_pmu:
1430
- ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
1431
-
1432
-close_pmu:
1433
- close(pmu_fd);
1434
-
1435
-close_prog:
1436
- bpf_object__close(obj);
1437
-}
1438
-
1439
-#define MAX_CNT_RAWTP 10ull
1440
-#define MAX_STACK_RAWTP 100
1441
-struct get_stack_trace_t {
1442
- int pid;
1443
- int kern_stack_size;
1444
- int user_stack_size;
1445
- int user_stack_buildid_size;
1446
- __u64 kern_stack[MAX_STACK_RAWTP];
1447
- __u64 user_stack[MAX_STACK_RAWTP];
1448
- struct bpf_stack_build_id user_stack_buildid[MAX_STACK_RAWTP];
382
+enum ARG_KEYS {
383
+ ARG_TEST_NUM = 'n',
384
+ ARG_TEST_NAME = 't',
385
+ ARG_TEST_NAME_BLACKLIST = 'b',
386
+ ARG_VERIFIER_STATS = 's',
387
+ ARG_VERBOSE = 'v',
388
+ ARG_GET_TEST_CNT = 'c',
389
+ ARG_LIST_TEST_NAMES = 'l',
1449390 };
1450391
1451
-static int get_stack_print_output(void *data, int size)
392
+static const struct argp_option opts[] = {
393
+ { "num", ARG_TEST_NUM, "NUM", 0,
394
+ "Run test number NUM only " },
395
+ { "name", ARG_TEST_NAME, "NAMES", 0,
396
+ "Run tests with names containing any string from NAMES list" },
397
+ { "name-blacklist", ARG_TEST_NAME_BLACKLIST, "NAMES", 0,
398
+ "Don't run tests with names containing any string from NAMES list" },
399
+ { "verifier-stats", ARG_VERIFIER_STATS, NULL, 0,
400
+ "Output verifier statistics", },
401
+ { "verbose", ARG_VERBOSE, "LEVEL", OPTION_ARG_OPTIONAL,
402
+ "Verbose output (use -vv or -vvv for progressively verbose output)" },
403
+ { "count", ARG_GET_TEST_CNT, NULL, 0,
404
+ "Get number of selected top-level tests " },
405
+ { "list", ARG_LIST_TEST_NAMES, NULL, 0,
406
+ "List test names that would run (without running them) " },
407
+ {},
408
+};
409
+
410
+static int libbpf_print_fn(enum libbpf_print_level level,
411
+ const char *format, va_list args)
1452412 {
1453
- bool good_kern_stack = false, good_user_stack = false;
1454
- const char *nonjit_func = "___bpf_prog_run";
1455
- struct get_stack_trace_t *e = data;
1456
- int i, num_stack;
1457
- static __u64 cnt;
1458
- struct ksym *ks;
413
+ if (env.verbosity < VERBOSE_VERY && level == LIBBPF_DEBUG)
414
+ return 0;
415
+ vfprintf(stdout, format, args);
416
+ return 0;
417
+}
1459418
1460
- cnt++;
419
+static void free_str_set(const struct str_set *set)
420
+{
421
+ int i;
1461422
1462
- if (size < sizeof(struct get_stack_trace_t)) {
1463
- __u64 *raw_data = data;
1464
- bool found = false;
423
+ if (!set)
424
+ return;
1465425
1466
- num_stack = size / sizeof(__u64);
1467
- /* If jit is enabled, we do not have a good way to
1468
- * verify the sanity of the kernel stack. So we
1469
- * just assume it is good if the stack is not empty.
1470
- * This could be improved in the future.
1471
- */
1472
- if (jit_enabled) {
1473
- found = num_stack > 0;
1474
- } else {
1475
- for (i = 0; i < num_stack; i++) {
1476
- ks = ksym_search(raw_data[i]);
1477
- if (strcmp(ks->name, nonjit_func) == 0) {
1478
- found = true;
1479
- break;
1480
- }
1481
- }
1482
- }
1483
- if (found) {
1484
- good_kern_stack = true;
1485
- good_user_stack = true;
1486
- }
1487
- } else {
1488
- num_stack = e->kern_stack_size / sizeof(__u64);
1489
- if (jit_enabled) {
1490
- good_kern_stack = num_stack > 0;
1491
- } else {
1492
- for (i = 0; i < num_stack; i++) {
1493
- ks = ksym_search(e->kern_stack[i]);
1494
- if (strcmp(ks->name, nonjit_func) == 0) {
1495
- good_kern_stack = true;
1496
- break;
1497
- }
1498
- }
1499
- }
1500
- if (e->user_stack_size > 0 && e->user_stack_buildid_size > 0)
1501
- good_user_stack = true;
426
+ for (i = 0; i < set->cnt; i++)
427
+ free((void *)set->strs[i]);
428
+ free(set->strs);
429
+}
430
+
431
+static int parse_str_list(const char *s, struct str_set *set)
432
+{
433
+ char *input, *state = NULL, *next, **tmp, **strs = NULL;
434
+ int cnt = 0;
435
+
436
+ input = strdup(s);
437
+ if (!input)
438
+ return -ENOMEM;
439
+
440
+ set->cnt = 0;
441
+ set->strs = NULL;
442
+
443
+ while ((next = strtok_r(state ? NULL : input, ",", &state))) {
444
+ tmp = realloc(strs, sizeof(*strs) * (cnt + 1));
445
+ if (!tmp)
446
+ goto err;
447
+ strs = tmp;
448
+
449
+ strs[cnt] = strdup(next);
450
+ if (!strs[cnt])
451
+ goto err;
452
+
453
+ cnt++;
1502454 }
1503
- if (!good_kern_stack || !good_user_stack)
1504
- return LIBBPF_PERF_EVENT_ERROR;
1505455
1506
- if (cnt == MAX_CNT_RAWTP)
1507
- return LIBBPF_PERF_EVENT_DONE;
1508
-
1509
- return LIBBPF_PERF_EVENT_CONT;
456
+ set->cnt = cnt;
457
+ set->strs = (const char **)strs;
458
+ free(input);
459
+ return 0;
460
+err:
461
+ free(strs);
462
+ free(input);
463
+ return -ENOMEM;
1510464 }
1511465
1512
-static void test_get_stack_raw_tp(void)
1513
-{
1514
- const char *file = "./test_get_stack_rawtp.o";
1515
- int i, efd, err, prog_fd, pmu_fd, perfmap_fd;
1516
- struct perf_event_attr attr = {};
1517
- struct timespec tv = {0, 10};
1518
- __u32 key = 0, duration = 0;
1519
- struct bpf_object *obj;
466
+extern int extra_prog_load_log_flags;
1520467
1521
- err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
1522
- if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
468
+static error_t parse_arg(int key, char *arg, struct argp_state *state)
469
+{
470
+ struct test_env *env = state->input;
471
+
472
+ switch (key) {
473
+ case ARG_TEST_NUM: {
474
+ char *subtest_str = strchr(arg, '/');
475
+
476
+ if (subtest_str) {
477
+ *subtest_str = '\0';
478
+ if (parse_num_list(subtest_str + 1,
479
+ &env->subtest_selector.num_set,
480
+ &env->subtest_selector.num_set_len)) {
481
+ fprintf(stderr,
482
+ "Failed to parse subtest numbers.\n");
483
+ return -EINVAL;
484
+ }
485
+ }
486
+ if (parse_num_list(arg, &env->test_selector.num_set,
487
+ &env->test_selector.num_set_len)) {
488
+ fprintf(stderr, "Failed to parse test numbers.\n");
489
+ return -EINVAL;
490
+ }
491
+ break;
492
+ }
493
+ case ARG_TEST_NAME: {
494
+ char *subtest_str = strchr(arg, '/');
495
+
496
+ if (subtest_str) {
497
+ *subtest_str = '\0';
498
+ if (parse_str_list(subtest_str + 1,
499
+ &env->subtest_selector.whitelist))
500
+ return -ENOMEM;
501
+ }
502
+ if (parse_str_list(arg, &env->test_selector.whitelist))
503
+ return -ENOMEM;
504
+ break;
505
+ }
506
+ case ARG_TEST_NAME_BLACKLIST: {
507
+ char *subtest_str = strchr(arg, '/');
508
+
509
+ if (subtest_str) {
510
+ *subtest_str = '\0';
511
+ if (parse_str_list(subtest_str + 1,
512
+ &env->subtest_selector.blacklist))
513
+ return -ENOMEM;
514
+ }
515
+ if (parse_str_list(arg, &env->test_selector.blacklist))
516
+ return -ENOMEM;
517
+ break;
518
+ }
519
+ case ARG_VERIFIER_STATS:
520
+ env->verifier_stats = true;
521
+ break;
522
+ case ARG_VERBOSE:
523
+ env->verbosity = VERBOSE_NORMAL;
524
+ if (arg) {
525
+ if (strcmp(arg, "v") == 0) {
526
+ env->verbosity = VERBOSE_VERY;
527
+ extra_prog_load_log_flags = 1;
528
+ } else if (strcmp(arg, "vv") == 0) {
529
+ env->verbosity = VERBOSE_SUPER;
530
+ extra_prog_load_log_flags = 2;
531
+ } else {
532
+ fprintf(stderr,
533
+ "Unrecognized verbosity setting ('%s'), only -v and -vv are supported\n",
534
+ arg);
535
+ return -EINVAL;
536
+ }
537
+ }
538
+ break;
539
+ case ARG_GET_TEST_CNT:
540
+ env->get_test_cnt = true;
541
+ break;
542
+ case ARG_LIST_TEST_NAMES:
543
+ env->list_test_names = true;
544
+ break;
545
+ case ARGP_KEY_ARG:
546
+ argp_usage(state);
547
+ break;
548
+ case ARGP_KEY_END:
549
+ break;
550
+ default:
551
+ return ARGP_ERR_UNKNOWN;
552
+ }
553
+ return 0;
554
+}
555
+
556
+static void stdio_hijack(void)
557
+{
558
+#ifdef __GLIBC__
559
+ env.stdout = stdout;
560
+ env.stderr = stderr;
561
+
562
+ if (env.verbosity > VERBOSE_NONE) {
563
+ /* nothing to do, output to stdout by default */
564
+ return;
565
+ }
566
+
567
+ /* stdout and stderr -> buffer */
568
+ fflush(stdout);
569
+
570
+ stdout = open_memstream(&env.log_buf, &env.log_cnt);
571
+ if (!stdout) {
572
+ stdout = env.stdout;
573
+ perror("open_memstream");
574
+ return;
575
+ }
576
+
577
+ stderr = stdout;
578
+#endif
579
+}
580
+
581
+static void stdio_restore(void)
582
+{
583
+#ifdef __GLIBC__
584
+ if (stdout == env.stdout)
1523585 return;
1524586
1525
- efd = bpf_raw_tracepoint_open("sys_enter", prog_fd);
1526
- if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno))
1527
- goto close_prog;
587
+ fclose(stdout);
588
+ free(env.log_buf);
1528589
1529
- perfmap_fd = bpf_find_map(__func__, obj, "perfmap");
1530
- if (CHECK(perfmap_fd < 0, "bpf_find_map", "err %d errno %d\n",
1531
- perfmap_fd, errno))
1532
- goto close_prog;
590
+ env.log_buf = NULL;
591
+ env.log_cnt = 0;
1533592
1534
- err = load_kallsyms();
1535
- if (CHECK(err < 0, "load_kallsyms", "err %d errno %d\n", err, errno))
1536
- goto close_prog;
1537
-
1538
- attr.sample_type = PERF_SAMPLE_RAW;
1539
- attr.type = PERF_TYPE_SOFTWARE;
1540
- attr.config = PERF_COUNT_SW_BPF_OUTPUT;
1541
- pmu_fd = syscall(__NR_perf_event_open, &attr, getpid()/*pid*/, -1/*cpu*/,
1542
- -1/*group_fd*/, 0);
1543
- if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", pmu_fd,
1544
- errno))
1545
- goto close_prog;
1546
-
1547
- err = bpf_map_update_elem(perfmap_fd, &key, &pmu_fd, BPF_ANY);
1548
- if (CHECK(err < 0, "bpf_map_update_elem", "err %d errno %d\n", err,
1549
- errno))
1550
- goto close_prog;
1551
-
1552
- err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
1553
- if (CHECK(err < 0, "ioctl PERF_EVENT_IOC_ENABLE", "err %d errno %d\n",
1554
- err, errno))
1555
- goto close_prog;
1556
-
1557
- err = perf_event_mmap(pmu_fd);
1558
- if (CHECK(err < 0, "perf_event_mmap", "err %d errno %d\n", err, errno))
1559
- goto close_prog;
1560
-
1561
- /* trigger some syscall action */
1562
- for (i = 0; i < MAX_CNT_RAWTP; i++)
1563
- nanosleep(&tv, NULL);
1564
-
1565
- err = perf_event_poller(pmu_fd, get_stack_print_output);
1566
- if (CHECK(err < 0, "perf_event_poller", "err %d errno %d\n", err, errno))
1567
- goto close_prog;
1568
-
1569
- goto close_prog_noerr;
1570
-close_prog:
1571
- error_cnt++;
1572
-close_prog_noerr:
1573
- bpf_object__close(obj);
593
+ stdout = env.stdout;
594
+ stderr = env.stderr;
595
+#endif
1574596 }
1575597
1576
-static void test_task_fd_query_rawtp(void)
598
+/*
599
+ * Determine if test_progs is running as a "flavored" test runner and switch
600
+ * into corresponding sub-directory to load correct BPF objects.
601
+ *
602
+ * This is done by looking at executable name. If it contains "-flavor"
603
+ * suffix, then we are running as a flavored test runner.
604
+ */
605
+int cd_flavor_subdir(const char *exec_name)
1577606 {
1578
- const char *file = "./test_get_stack_rawtp.o";
1579
- __u64 probe_offset, probe_addr;
1580
- __u32 len, prog_id, fd_type;
1581
- struct bpf_object *obj;
1582
- int efd, err, prog_fd;
1583
- __u32 duration = 0;
1584
- char buf[256];
607
+ /* General form of argv[0] passed here is:
608
+ * some/path/to/test_progs[-flavor], where -flavor part is optional.
609
+ * First cut out "test_progs[-flavor]" part, then extract "flavor"
610
+ * part, if it's there.
611
+ */
612
+ const char *flavor = strrchr(exec_name, '/');
1585613
1586
- err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
1587
- if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
1588
- return;
614
+ if (!flavor)
615
+ return 0;
616
+ flavor++;
617
+ flavor = strrchr(flavor, '-');
618
+ if (!flavor)
619
+ return 0;
620
+ flavor++;
621
+ if (env.verbosity > VERBOSE_NONE)
622
+ fprintf(stdout, "Switching to flavor '%s' subdirectory...\n", flavor);
1589623
1590
- efd = bpf_raw_tracepoint_open("sys_enter", prog_fd);
1591
- if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno))
1592
- goto close_prog;
1593
-
1594
- /* query (getpid(), efd) */
1595
- len = sizeof(buf);
1596
- err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id,
1597
- &fd_type, &probe_offset, &probe_addr);
1598
- if (CHECK(err < 0, "bpf_task_fd_query", "err %d errno %d\n", err,
1599
- errno))
1600
- goto close_prog;
1601
-
1602
- err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
1603
- strcmp(buf, "sys_enter") == 0;
1604
- if (CHECK(!err, "check_results", "fd_type %d tp_name %s\n",
1605
- fd_type, buf))
1606
- goto close_prog;
1607
-
1608
- /* test zero len */
1609
- len = 0;
1610
- err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id,
1611
- &fd_type, &probe_offset, &probe_addr);
1612
- if (CHECK(err < 0, "bpf_task_fd_query (len = 0)", "err %d errno %d\n",
1613
- err, errno))
1614
- goto close_prog;
1615
- err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
1616
- len == strlen("sys_enter");
1617
- if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len))
1618
- goto close_prog;
1619
-
1620
- /* test empty buffer */
1621
- len = sizeof(buf);
1622
- err = bpf_task_fd_query(getpid(), efd, 0, 0, &len, &prog_id,
1623
- &fd_type, &probe_offset, &probe_addr);
1624
- if (CHECK(err < 0, "bpf_task_fd_query (buf = 0)", "err %d errno %d\n",
1625
- err, errno))
1626
- goto close_prog;
1627
- err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
1628
- len == strlen("sys_enter");
1629
- if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len))
1630
- goto close_prog;
1631
-
1632
- /* test smaller buffer */
1633
- len = 3;
1634
- err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id,
1635
- &fd_type, &probe_offset, &probe_addr);
1636
- if (CHECK(err >= 0 || errno != ENOSPC, "bpf_task_fd_query (len = 3)",
1637
- "err %d errno %d\n", err, errno))
1638
- goto close_prog;
1639
- err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
1640
- len == strlen("sys_enter") &&
1641
- strcmp(buf, "sy") == 0;
1642
- if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len))
1643
- goto close_prog;
1644
-
1645
- goto close_prog_noerr;
1646
-close_prog:
1647
- error_cnt++;
1648
-close_prog_noerr:
1649
- bpf_object__close(obj);
624
+ return chdir(flavor);
1650625 }
1651626
1652
-static void test_task_fd_query_tp_core(const char *probe_name,
1653
- const char *tp_name)
627
+#define MAX_BACKTRACE_SZ 128
628
+void crash_handler(int signum)
1654629 {
1655
- const char *file = "./test_tracepoint.o";
1656
- int err, bytes, efd, prog_fd, pmu_fd;
1657
- struct perf_event_attr attr = {};
1658
- __u64 probe_offset, probe_addr;
1659
- __u32 len, prog_id, fd_type;
1660
- struct bpf_object *obj;
1661
- __u32 duration = 0;
1662
- char buf[256];
630
+ void *bt[MAX_BACKTRACE_SZ];
631
+ size_t sz;
1663632
1664
- err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
1665
- if (CHECK(err, "bpf_prog_load", "err %d errno %d\n", err, errno))
1666
- goto close_prog;
633
+ sz = backtrace(bt, ARRAY_SIZE(bt));
1667634
1668
- snprintf(buf, sizeof(buf),
1669
- "/sys/kernel/debug/tracing/events/%s/id", probe_name);
1670
- efd = open(buf, O_RDONLY, 0);
1671
- if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
1672
- goto close_prog;
1673
- bytes = read(efd, buf, sizeof(buf));
1674
- close(efd);
1675
- if (CHECK(bytes <= 0 || bytes >= sizeof(buf), "read",
1676
- "bytes %d errno %d\n", bytes, errno))
1677
- goto close_prog;
635
+ if (env.test)
636
+ dump_test_log(env.test, true);
637
+ if (env.stdout)
638
+ stdio_restore();
1678639
1679
- attr.config = strtol(buf, NULL, 0);
1680
- attr.type = PERF_TYPE_TRACEPOINT;
1681
- attr.sample_type = PERF_SAMPLE_RAW;
1682
- attr.sample_period = 1;
1683
- attr.wakeup_events = 1;
1684
- pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
1685
- 0 /* cpu 0 */, -1 /* group id */,
1686
- 0 /* flags */);
1687
- if (CHECK(err, "perf_event_open", "err %d errno %d\n", err, errno))
1688
- goto close_pmu;
1689
-
1690
- err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
1691
- if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n", err,
1692
- errno))
1693
- goto close_pmu;
1694
-
1695
- err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
1696
- if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n", err,
1697
- errno))
1698
- goto close_pmu;
1699
-
1700
- /* query (getpid(), pmu_fd) */
1701
- len = sizeof(buf);
1702
- err = bpf_task_fd_query(getpid(), pmu_fd, 0, buf, &len, &prog_id,
1703
- &fd_type, &probe_offset, &probe_addr);
1704
- if (CHECK(err < 0, "bpf_task_fd_query", "err %d errno %d\n", err,
1705
- errno))
1706
- goto close_pmu;
1707
-
1708
- err = (fd_type == BPF_FD_TYPE_TRACEPOINT) && !strcmp(buf, tp_name);
1709
- if (CHECK(!err, "check_results", "fd_type %d tp_name %s\n",
1710
- fd_type, buf))
1711
- goto close_pmu;
1712
-
1713
- close(pmu_fd);
1714
- goto close_prog_noerr;
1715
-
1716
-close_pmu:
1717
- close(pmu_fd);
1718
-close_prog:
1719
- error_cnt++;
1720
-close_prog_noerr:
1721
- bpf_object__close(obj);
640
+ fprintf(stderr, "Caught signal #%d!\nStack trace:\n", signum);
641
+ backtrace_symbols_fd(bt, sz, STDERR_FILENO);
1722642 }
1723643
1724
-static void test_task_fd_query_tp(void)
644
+int main(int argc, char **argv)
1725645 {
1726
- test_task_fd_query_tp_core("sched/sched_switch",
1727
- "sched_switch");
1728
- test_task_fd_query_tp_core("syscalls/sys_enter_read",
1729
- "sys_enter_read");
1730
-}
646
+ static const struct argp argp = {
647
+ .options = opts,
648
+ .parser = parse_arg,
649
+ .doc = argp_program_doc,
650
+ };
651
+ struct sigaction sigact = {
652
+ .sa_handler = crash_handler,
653
+ .sa_flags = SA_RESETHAND,
654
+ };
655
+ int err, i;
1731656
1732
-int main(void)
1733
-{
1734
- jit_enabled = is_jit_enabled();
657
+ sigaction(SIGSEGV, &sigact, NULL);
1735658
1736
- test_pkt_access();
1737
- test_xdp();
1738
- test_xdp_adjust_tail();
1739
- test_l4lb_all();
1740
- test_xdp_noinline();
1741
- test_tcp_estats();
1742
- test_bpf_obj_id();
1743
- test_pkt_md_access();
1744
- test_obj_name();
1745
- test_tp_attach_query();
1746
- test_stacktrace_map();
1747
- test_stacktrace_build_id();
1748
- test_stacktrace_build_id_nmi();
1749
- test_stacktrace_map_raw_tp();
1750
- test_get_stack_raw_tp();
1751
- test_task_fd_query_rawtp();
1752
- test_task_fd_query_tp();
659
+ err = argp_parse(&argp, argc, argv, 0, NULL, &env);
660
+ if (err)
661
+ return err;
1753662
1754
- printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, error_cnt);
1755
- return error_cnt ? EXIT_FAILURE : EXIT_SUCCESS;
663
+ err = cd_flavor_subdir(argv[0]);
664
+ if (err)
665
+ return err;
666
+
667
+ libbpf_set_print(libbpf_print_fn);
668
+
669
+ srand(time(NULL));
670
+
671
+ env.jit_enabled = is_jit_enabled();
672
+ env.nr_cpus = libbpf_num_possible_cpus();
673
+ if (env.nr_cpus < 0) {
674
+ fprintf(stderr, "Failed to get number of CPUs: %d!\n",
675
+ env.nr_cpus);
676
+ return -1;
677
+ }
678
+
679
+ save_netns();
680
+ stdio_hijack();
681
+ for (i = 0; i < prog_test_cnt; i++) {
682
+ struct prog_test_def *test = &prog_test_defs[i];
683
+
684
+ env.test = test;
685
+ test->test_num = i + 1;
686
+
687
+ if (!should_run(&env.test_selector,
688
+ test->test_num, test->test_name))
689
+ continue;
690
+
691
+ if (env.get_test_cnt) {
692
+ env.succ_cnt++;
693
+ continue;
694
+ }
695
+
696
+ if (env.list_test_names) {
697
+ fprintf(env.stdout, "%s\n", test->test_name);
698
+ env.succ_cnt++;
699
+ continue;
700
+ }
701
+
702
+ test->run_test();
703
+ /* ensure last sub-test is finalized properly */
704
+ if (test->subtest_name)
705
+ test__end_subtest();
706
+
707
+ test->tested = true;
708
+ if (test->error_cnt)
709
+ env.fail_cnt++;
710
+ else
711
+ env.succ_cnt++;
712
+ skip_account();
713
+
714
+ dump_test_log(test, test->error_cnt);
715
+
716
+ fprintf(env.stdout, "#%d %s:%s\n",
717
+ test->test_num, test->test_name,
718
+ test->error_cnt ? "FAIL" : "OK");
719
+
720
+ reset_affinity();
721
+ restore_netns();
722
+ if (test->need_cgroup_cleanup)
723
+ cleanup_cgroup_environment();
724
+ }
725
+ stdio_restore();
726
+
727
+ if (env.get_test_cnt) {
728
+ printf("%d\n", env.succ_cnt);
729
+ goto out;
730
+ }
731
+
732
+ if (env.list_test_names)
733
+ goto out;
734
+
735
+ fprintf(stdout, "Summary: %d/%d PASSED, %d SKIPPED, %d FAILED\n",
736
+ env.succ_cnt, env.sub_succ_cnt, env.skip_cnt, env.fail_cnt);
737
+
738
+out:
739
+ free_str_set(&env.test_selector.blacklist);
740
+ free_str_set(&env.test_selector.whitelist);
741
+ free(env.test_selector.num_set);
742
+ free_str_set(&env.subtest_selector.blacklist);
743
+ free_str_set(&env.subtest_selector.whitelist);
744
+ free(env.subtest_selector.num_set);
745
+ close(env.saved_netns_fd);
746
+
747
+ if (env.succ_cnt + env.fail_cnt + env.skip_cnt == 0)
748
+ return EXIT_NO_TEST;
749
+
750
+ return env.fail_cnt ? EXIT_FAILURE : EXIT_SUCCESS;
1756751 }