forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-10-22 8ac6c7a54ed1b98d142dce24b11c6de6a1e239a5
kernel/tools/testing/selftests/bpf/test_verifier.c
....@@ -1,12 +1,10 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Testsuite for eBPF verifier
34 *
45 * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
56 * Copyright (c) 2017 Facebook
6
- *
7
- * This program is free software; you can redistribute it and/or
8
- * modify it under the terms of version 2 of the GNU General Public
9
- * License as published by the Free Software Foundation.
7
+ * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
108 */
119
1210 #include <endian.h>
....@@ -22,6 +20,7 @@
2220 #include <stdbool.h>
2321 #include <sched.h>
2422 #include <limits.h>
23
+#include <assert.h>
2524
2625 #include <sys/capability.h>
2726
....@@ -30,8 +29,10 @@
3029 #include <linux/bpf_perf_event.h>
3130 #include <linux/bpf.h>
3231 #include <linux/if_ether.h>
32
+#include <linux/btf.h>
3333
3434 #include <bpf/bpf.h>
35
+#include <bpf/libbpf.h>
3536
3637 #ifdef HAVE_GENHDR
3738 # include "autoconf.h"
....@@ -43,11 +44,14 @@
4344 #include "bpf_rlimit.h"
4445 #include "bpf_rand.h"
4546 #include "bpf_util.h"
47
+#include "test_btf.h"
4648 #include "../../../include/linux/filter.h"
4749
4850 #define MAX_INSNS BPF_MAXINSNS
51
+#define MAX_TEST_INSNS 1000000
4952 #define MAX_FIXUPS 8
50
-#define MAX_NR_MAPS 8
53
+#define MAX_NR_MAPS 21
54
+#define MAX_TEST_RUNS 8
5155 #define POINTER_VALUE 0xcafe4all
5256 #define TEST_DATA_LEN 64
5357
....@@ -56,30 +60,62 @@
5660
5761 #define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled"
5862 static bool unpriv_disabled = false;
63
+static int skips;
64
+static bool verbose = false;
5965
6066 struct bpf_test {
6167 const char *descr;
6268 struct bpf_insn insns[MAX_INSNS];
63
- int fixup_map1[MAX_FIXUPS];
64
- int fixup_map2[MAX_FIXUPS];
65
- int fixup_map3[MAX_FIXUPS];
66
- int fixup_map4[MAX_FIXUPS];
69
+ struct bpf_insn *fill_insns;
70
+ int fixup_map_hash_8b[MAX_FIXUPS];
71
+ int fixup_map_hash_48b[MAX_FIXUPS];
72
+ int fixup_map_hash_16b[MAX_FIXUPS];
73
+ int fixup_map_array_48b[MAX_FIXUPS];
74
+ int fixup_map_sockmap[MAX_FIXUPS];
75
+ int fixup_map_sockhash[MAX_FIXUPS];
76
+ int fixup_map_xskmap[MAX_FIXUPS];
77
+ int fixup_map_stacktrace[MAX_FIXUPS];
6778 int fixup_prog1[MAX_FIXUPS];
6879 int fixup_prog2[MAX_FIXUPS];
6980 int fixup_map_in_map[MAX_FIXUPS];
7081 int fixup_cgroup_storage[MAX_FIXUPS];
82
+ int fixup_percpu_cgroup_storage[MAX_FIXUPS];
83
+ int fixup_map_spin_lock[MAX_FIXUPS];
84
+ int fixup_map_array_ro[MAX_FIXUPS];
85
+ int fixup_map_array_wo[MAX_FIXUPS];
86
+ int fixup_map_array_small[MAX_FIXUPS];
87
+ int fixup_sk_storage_map[MAX_FIXUPS];
88
+ int fixup_map_event_output[MAX_FIXUPS];
89
+ int fixup_map_reuseport_array[MAX_FIXUPS];
90
+ int fixup_map_ringbuf[MAX_FIXUPS];
7191 const char *errstr;
7292 const char *errstr_unpriv;
73
- uint32_t retval, retval_unpriv;
93
+ uint32_t insn_processed;
94
+ int prog_len;
7495 enum {
7596 UNDEF,
7697 ACCEPT,
77
- REJECT
98
+ REJECT,
99
+ VERBOSE_ACCEPT,
78100 } result, result_unpriv;
79101 enum bpf_prog_type prog_type;
80102 uint8_t flags;
81
- __u8 data[TEST_DATA_LEN];
82103 void (*fill_helper)(struct bpf_test *self);
104
+ int runs;
105
+#define bpf_testdata_struct_t \
106
+ struct { \
107
+ uint32_t retval, retval_unpriv; \
108
+ union { \
109
+ __u8 data[TEST_DATA_LEN]; \
110
+ __u64 data64[TEST_DATA_LEN / 8]; \
111
+ }; \
112
+ }
113
+ union {
114
+ bpf_testdata_struct_t;
115
+ bpf_testdata_struct_t retvals[MAX_TEST_RUNS];
116
+ };
117
+ enum bpf_attach_type expected_attach_type;
118
+ const char *kfunc;
83119 };
84120
85121 /* Note we want this to be 64 bit aligned so that the end of our array is
....@@ -99,49 +135,61 @@
99135
100136 static void bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self)
101137 {
102
- /* test: {skb->data[0], vlan_push} x 68 + {skb->data[0], vlan_pop} x 68 */
138
+ /* test: {skb->data[0], vlan_push} x 51 + {skb->data[0], vlan_pop} x 51 */
103139 #define PUSH_CNT 51
104
- unsigned int len = BPF_MAXINSNS;
105
- struct bpf_insn *insn = self->insns;
140
+ /* jump range is limited to 16 bit. PUSH_CNT of ld_abs needs room */
141
+ unsigned int len = (1 << 15) - PUSH_CNT * 2 * 5 * 6;
142
+ struct bpf_insn *insn = self->fill_insns;
106143 int i = 0, j, k = 0;
107144
108145 insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
109146 loop:
110147 for (j = 0; j < PUSH_CNT; j++) {
111148 insn[i++] = BPF_LD_ABS(BPF_B, 0);
112
- insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 2);
149
+ /* jump to error label */
150
+ insn[i] = BPF_JMP32_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 3);
113151 i++;
114152 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
115153 insn[i++] = BPF_MOV64_IMM(BPF_REG_2, 1);
116154 insn[i++] = BPF_MOV64_IMM(BPF_REG_3, 2);
117155 insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
118156 BPF_FUNC_skb_vlan_push),
119
- insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 2);
157
+ insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 3);
120158 i++;
121159 }
122160
123161 for (j = 0; j < PUSH_CNT; j++) {
124162 insn[i++] = BPF_LD_ABS(BPF_B, 0);
125
- insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 2);
163
+ insn[i] = BPF_JMP32_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 3);
126164 i++;
127165 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
128166 insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
129167 BPF_FUNC_skb_vlan_pop),
130
- insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 2);
168
+ insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 3);
131169 i++;
132170 }
133171 if (++k < 5)
134172 goto loop;
135173
136
- for (; i < len - 1; i++)
137
- insn[i] = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 0xbef);
174
+ for (; i < len - 3; i++)
175
+ insn[i] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0xbef);
176
+ insn[len - 3] = BPF_JMP_A(1);
177
+ /* error label */
178
+ insn[len - 2] = BPF_MOV32_IMM(BPF_REG_0, 0);
138179 insn[len - 1] = BPF_EXIT_INSN();
180
+ self->prog_len = len;
139181 }
140182
141183 static void bpf_fill_jump_around_ld_abs(struct bpf_test *self)
142184 {
143
- struct bpf_insn *insn = self->insns;
144
- unsigned int len = BPF_MAXINSNS;
185
+ struct bpf_insn *insn = self->fill_insns;
186
+ /* jump range is limited to 16 bit. every ld_abs is replaced by 6 insns,
187
+ * but on arches like arm, ppc etc, there will be one BPF_ZEXT inserted
188
+ * to extend the error value of the inlined ld_abs sequence which then
189
+ * contains 7 insns. so, set the dividend to 7 so the testcase could
190
+ * work on all arches.
191
+ */
192
+ unsigned int len = (1 << 15) / 7;
145193 int i = 0;
146194
147195 insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
....@@ -151,11 +199,12 @@
151199 while (i < len - 1)
152200 insn[i++] = BPF_LD_ABS(BPF_B, 1);
153201 insn[i] = BPF_EXIT_INSN();
202
+ self->prog_len = i + 1;
154203 }
155204
156205 static void bpf_fill_rand_ld_dw(struct bpf_test *self)
157206 {
158
- struct bpf_insn *insn = self->insns;
207
+ struct bpf_insn *insn = self->fill_insns;
159208 uint64_t res = 0;
160209 int i = 0;
161210
....@@ -173,12609 +222,139 @@
173222 insn[i++] = BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32);
174223 insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
175224 insn[i] = BPF_EXIT_INSN();
225
+ self->prog_len = i + 1;
176226 res ^= (res >> 32);
177227 self->retval = (uint32_t)res;
178228 }
179229
180
-static struct bpf_test tests[] = {
181
- {
182
- "add+sub+mul",
183
- .insns = {
184
- BPF_MOV64_IMM(BPF_REG_1, 1),
185
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
186
- BPF_MOV64_IMM(BPF_REG_2, 3),
187
- BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
188
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
189
- BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
190
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
191
- BPF_EXIT_INSN(),
192
- },
193
- .result = ACCEPT,
194
- .retval = -3,
195
- },
196
- {
197
- "DIV32 by 0, zero check 1",
198
- .insns = {
199
- BPF_MOV32_IMM(BPF_REG_0, 42),
200
- BPF_MOV32_IMM(BPF_REG_1, 0),
201
- BPF_MOV32_IMM(BPF_REG_2, 1),
202
- BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
203
- BPF_EXIT_INSN(),
204
- },
205
- .result = ACCEPT,
206
- .retval = 42,
207
- },
208
- {
209
- "DIV32 by 0, zero check 2",
210
- .insns = {
211
- BPF_MOV32_IMM(BPF_REG_0, 42),
212
- BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
213
- BPF_MOV32_IMM(BPF_REG_2, 1),
214
- BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
215
- BPF_EXIT_INSN(),
216
- },
217
- .result = ACCEPT,
218
- .retval = 42,
219
- },
220
- {
221
- "DIV64 by 0, zero check",
222
- .insns = {
223
- BPF_MOV32_IMM(BPF_REG_0, 42),
224
- BPF_MOV32_IMM(BPF_REG_1, 0),
225
- BPF_MOV32_IMM(BPF_REG_2, 1),
226
- BPF_ALU64_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
227
- BPF_EXIT_INSN(),
228
- },
229
- .result = ACCEPT,
230
- .retval = 42,
231
- },
232
- {
233
- "MOD32 by 0, zero check 1",
234
- .insns = {
235
- BPF_MOV32_IMM(BPF_REG_0, 42),
236
- BPF_MOV32_IMM(BPF_REG_1, 0),
237
- BPF_MOV32_IMM(BPF_REG_2, 1),
238
- BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
239
- BPF_EXIT_INSN(),
240
- },
241
- .result = ACCEPT,
242
- .retval = 42,
243
- },
244
- {
245
- "MOD32 by 0, zero check 2",
246
- .insns = {
247
- BPF_MOV32_IMM(BPF_REG_0, 42),
248
- BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
249
- BPF_MOV32_IMM(BPF_REG_2, 1),
250
- BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
251
- BPF_EXIT_INSN(),
252
- },
253
- .result = ACCEPT,
254
- .retval = 42,
255
- },
256
- {
257
- "MOD64 by 0, zero check",
258
- .insns = {
259
- BPF_MOV32_IMM(BPF_REG_0, 42),
260
- BPF_MOV32_IMM(BPF_REG_1, 0),
261
- BPF_MOV32_IMM(BPF_REG_2, 1),
262
- BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
263
- BPF_EXIT_INSN(),
264
- },
265
- .result = ACCEPT,
266
- .retval = 42,
267
- },
268
- {
269
- "DIV32 by 0, zero check ok, cls",
270
- .insns = {
271
- BPF_MOV32_IMM(BPF_REG_0, 42),
272
- BPF_MOV32_IMM(BPF_REG_1, 2),
273
- BPF_MOV32_IMM(BPF_REG_2, 16),
274
- BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
275
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
276
- BPF_EXIT_INSN(),
277
- },
278
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
279
- .result = ACCEPT,
280
- .retval = 8,
281
- },
282
- {
283
- "DIV32 by 0, zero check 1, cls",
284
- .insns = {
285
- BPF_MOV32_IMM(BPF_REG_1, 0),
286
- BPF_MOV32_IMM(BPF_REG_0, 1),
287
- BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
288
- BPF_EXIT_INSN(),
289
- },
290
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
291
- .result = ACCEPT,
292
- .retval = 0,
293
- },
294
- {
295
- "DIV32 by 0, zero check 2, cls",
296
- .insns = {
297
- BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
298
- BPF_MOV32_IMM(BPF_REG_0, 1),
299
- BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
300
- BPF_EXIT_INSN(),
301
- },
302
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
303
- .result = ACCEPT,
304
- .retval = 0,
305
- },
306
- {
307
- "DIV64 by 0, zero check, cls",
308
- .insns = {
309
- BPF_MOV32_IMM(BPF_REG_1, 0),
310
- BPF_MOV32_IMM(BPF_REG_0, 1),
311
- BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
312
- BPF_EXIT_INSN(),
313
- },
314
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
315
- .result = ACCEPT,
316
- .retval = 0,
317
- },
318
- {
319
- "MOD32 by 0, zero check ok, cls",
320
- .insns = {
321
- BPF_MOV32_IMM(BPF_REG_0, 42),
322
- BPF_MOV32_IMM(BPF_REG_1, 3),
323
- BPF_MOV32_IMM(BPF_REG_2, 5),
324
- BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
325
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
326
- BPF_EXIT_INSN(),
327
- },
328
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
329
- .result = ACCEPT,
330
- .retval = 2,
331
- },
332
- {
333
- "MOD32 by 0, zero check 1, cls",
334
- .insns = {
335
- BPF_MOV32_IMM(BPF_REG_1, 0),
336
- BPF_MOV32_IMM(BPF_REG_0, 1),
337
- BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
338
- BPF_EXIT_INSN(),
339
- },
340
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
341
- .result = ACCEPT,
342
- .retval = 1,
343
- },
344
- {
345
- "MOD32 by 0, zero check 2, cls",
346
- .insns = {
347
- BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
348
- BPF_MOV32_IMM(BPF_REG_0, 1),
349
- BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
350
- BPF_EXIT_INSN(),
351
- },
352
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
353
- .result = ACCEPT,
354
- .retval = 1,
355
- },
356
- {
357
- "MOD64 by 0, zero check 1, cls",
358
- .insns = {
359
- BPF_MOV32_IMM(BPF_REG_1, 0),
360
- BPF_MOV32_IMM(BPF_REG_0, 2),
361
- BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
362
- BPF_EXIT_INSN(),
363
- },
364
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
365
- .result = ACCEPT,
366
- .retval = 2,
367
- },
368
- {
369
- "MOD64 by 0, zero check 2, cls",
370
- .insns = {
371
- BPF_MOV32_IMM(BPF_REG_1, 0),
372
- BPF_MOV32_IMM(BPF_REG_0, -1),
373
- BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
374
- BPF_EXIT_INSN(),
375
- },
376
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
377
- .result = ACCEPT,
378
- .retval = -1,
379
- },
380
- /* Just make sure that JITs used udiv/umod as otherwise we get
381
- * an exception from INT_MIN/-1 overflow similarly as with div
382
- * by zero.
230
+#define MAX_JMP_SEQ 8192
231
+
232
+/* test the sequence of 8k jumps */
233
+static void bpf_fill_scale1(struct bpf_test *self)
234
+{
235
+ struct bpf_insn *insn = self->fill_insns;
236
+ int i = 0, k = 0;
237
+
238
+ insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
239
+ /* test to check that the long sequence of jumps is acceptable */
240
+ while (k++ < MAX_JMP_SEQ) {
241
+ insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
242
+ BPF_FUNC_get_prandom_u32);
243
+ insn[i++] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, bpf_semi_rand_get(), 2);
244
+ insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_10);
245
+ insn[i++] = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6,
246
+ -8 * (k % 64 + 1));
247
+ }
248
+ /* is_state_visited() doesn't allocate state for pruning for every jump.
249
+ * Hence multiply jmps by 4 to accommodate that heuristic
383250 */
384
- {
385
- "DIV32 overflow, check 1",
386
- .insns = {
387
- BPF_MOV32_IMM(BPF_REG_1, -1),
388
- BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
389
- BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
390
- BPF_EXIT_INSN(),
391
- },
392
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
393
- .result = ACCEPT,
394
- .retval = 0,
395
- },
396
- {
397
- "DIV32 overflow, check 2",
398
- .insns = {
399
- BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
400
- BPF_ALU32_IMM(BPF_DIV, BPF_REG_0, -1),
401
- BPF_EXIT_INSN(),
402
- },
403
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
404
- .result = ACCEPT,
405
- .retval = 0,
406
- },
407
- {
408
- "DIV64 overflow, check 1",
409
- .insns = {
410
- BPF_MOV64_IMM(BPF_REG_1, -1),
411
- BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
412
- BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
413
- BPF_EXIT_INSN(),
414
- },
415
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
416
- .result = ACCEPT,
417
- .retval = 0,
418
- },
419
- {
420
- "DIV64 overflow, check 2",
421
- .insns = {
422
- BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
423
- BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, -1),
424
- BPF_EXIT_INSN(),
425
- },
426
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
427
- .result = ACCEPT,
428
- .retval = 0,
429
- },
430
- {
431
- "MOD32 overflow, check 1",
432
- .insns = {
433
- BPF_MOV32_IMM(BPF_REG_1, -1),
434
- BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
435
- BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
436
- BPF_EXIT_INSN(),
437
- },
438
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
439
- .result = ACCEPT,
440
- .retval = INT_MIN,
441
- },
442
- {
443
- "MOD32 overflow, check 2",
444
- .insns = {
445
- BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
446
- BPF_ALU32_IMM(BPF_MOD, BPF_REG_0, -1),
447
- BPF_EXIT_INSN(),
448
- },
449
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
450
- .result = ACCEPT,
451
- .retval = INT_MIN,
452
- },
453
- {
454
- "MOD64 overflow, check 1",
455
- .insns = {
456
- BPF_MOV64_IMM(BPF_REG_1, -1),
457
- BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
458
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
459
- BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
460
- BPF_MOV32_IMM(BPF_REG_0, 0),
461
- BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
462
- BPF_MOV32_IMM(BPF_REG_0, 1),
463
- BPF_EXIT_INSN(),
464
- },
465
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
466
- .result = ACCEPT,
467
- .retval = 1,
468
- },
469
- {
470
- "MOD64 overflow, check 2",
471
- .insns = {
472
- BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
473
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
474
- BPF_ALU64_IMM(BPF_MOD, BPF_REG_2, -1),
475
- BPF_MOV32_IMM(BPF_REG_0, 0),
476
- BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
477
- BPF_MOV32_IMM(BPF_REG_0, 1),
478
- BPF_EXIT_INSN(),
479
- },
480
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
481
- .result = ACCEPT,
482
- .retval = 1,
483
- },
484
- {
485
- "xor32 zero extend check",
486
- .insns = {
487
- BPF_MOV32_IMM(BPF_REG_2, -1),
488
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 32),
489
- BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 0xffff),
490
- BPF_ALU32_REG(BPF_XOR, BPF_REG_2, BPF_REG_2),
491
- BPF_MOV32_IMM(BPF_REG_0, 2),
492
- BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 1),
493
- BPF_MOV32_IMM(BPF_REG_0, 1),
494
- BPF_EXIT_INSN(),
495
- },
496
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
497
- .result = ACCEPT,
498
- .retval = 1,
499
- },
500
- {
501
- "empty prog",
502
- .insns = {
503
- },
504
- .errstr = "unknown opcode 00",
505
- .result = REJECT,
506
- },
507
- {
508
- "only exit insn",
509
- .insns = {
510
- BPF_EXIT_INSN(),
511
- },
512
- .errstr = "R0 !read_ok",
513
- .result = REJECT,
514
- },
515
- {
516
- "unreachable",
517
- .insns = {
518
- BPF_EXIT_INSN(),
519
- BPF_EXIT_INSN(),
520
- },
521
- .errstr = "unreachable",
522
- .result = REJECT,
523
- },
524
- {
525
- "unreachable2",
526
- .insns = {
527
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
528
- BPF_JMP_IMM(BPF_JA, 0, 0, 0),
529
- BPF_EXIT_INSN(),
530
- },
531
- .errstr = "unreachable",
532
- .result = REJECT,
533
- },
534
- {
535
- "out of range jump",
536
- .insns = {
537
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
538
- BPF_EXIT_INSN(),
539
- },
540
- .errstr = "jump out of range",
541
- .result = REJECT,
542
- },
543
- {
544
- "out of range jump2",
545
- .insns = {
546
- BPF_JMP_IMM(BPF_JA, 0, 0, -2),
547
- BPF_EXIT_INSN(),
548
- },
549
- .errstr = "jump out of range",
550
- .result = REJECT,
551
- },
552
- {
553
- "test1 ld_imm64",
554
- .insns = {
555
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
556
- BPF_LD_IMM64(BPF_REG_0, 0),
557
- BPF_LD_IMM64(BPF_REG_0, 0),
558
- BPF_LD_IMM64(BPF_REG_0, 1),
559
- BPF_LD_IMM64(BPF_REG_0, 1),
560
- BPF_MOV64_IMM(BPF_REG_0, 2),
561
- BPF_EXIT_INSN(),
562
- },
563
- .errstr = "invalid BPF_LD_IMM insn",
564
- .errstr_unpriv = "R1 pointer comparison",
565
- .result = REJECT,
566
- },
567
- {
568
- "test2 ld_imm64",
569
- .insns = {
570
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
571
- BPF_LD_IMM64(BPF_REG_0, 0),
572
- BPF_LD_IMM64(BPF_REG_0, 0),
573
- BPF_LD_IMM64(BPF_REG_0, 1),
574
- BPF_LD_IMM64(BPF_REG_0, 1),
575
- BPF_EXIT_INSN(),
576
- },
577
- .errstr = "invalid BPF_LD_IMM insn",
578
- .errstr_unpriv = "R1 pointer comparison",
579
- .result = REJECT,
580
- },
581
- {
582
- "test3 ld_imm64",
583
- .insns = {
584
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
585
- BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
586
- BPF_LD_IMM64(BPF_REG_0, 0),
587
- BPF_LD_IMM64(BPF_REG_0, 0),
588
- BPF_LD_IMM64(BPF_REG_0, 1),
589
- BPF_LD_IMM64(BPF_REG_0, 1),
590
- BPF_EXIT_INSN(),
591
- },
592
- .errstr = "invalid bpf_ld_imm64 insn",
593
- .result = REJECT,
594
- },
595
- {
596
- "test4 ld_imm64",
597
- .insns = {
598
- BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
599
- BPF_EXIT_INSN(),
600
- },
601
- .errstr = "invalid bpf_ld_imm64 insn",
602
- .result = REJECT,
603
- },
604
- {
605
- "test5 ld_imm64",
606
- .insns = {
607
- BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
608
- },
609
- .errstr = "invalid bpf_ld_imm64 insn",
610
- .result = REJECT,
611
- },
612
- {
613
- "test6 ld_imm64",
614
- .insns = {
615
- BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
616
- BPF_RAW_INSN(0, 0, 0, 0, 0),
617
- BPF_EXIT_INSN(),
618
- },
619
- .result = ACCEPT,
620
- },
621
- {
622
- "test7 ld_imm64",
623
- .insns = {
624
- BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
625
- BPF_RAW_INSN(0, 0, 0, 0, 1),
626
- BPF_EXIT_INSN(),
627
- },
628
- .result = ACCEPT,
629
- .retval = 1,
630
- },
631
- {
632
- "test8 ld_imm64",
633
- .insns = {
634
- BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 1, 1),
635
- BPF_RAW_INSN(0, 0, 0, 0, 1),
636
- BPF_EXIT_INSN(),
637
- },
638
- .errstr = "uses reserved fields",
639
- .result = REJECT,
640
- },
641
- {
642
- "test9 ld_imm64",
643
- .insns = {
644
- BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
645
- BPF_RAW_INSN(0, 0, 0, 1, 1),
646
- BPF_EXIT_INSN(),
647
- },
648
- .errstr = "invalid bpf_ld_imm64 insn",
649
- .result = REJECT,
650
- },
651
- {
652
- "test10 ld_imm64",
653
- .insns = {
654
- BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
655
- BPF_RAW_INSN(0, BPF_REG_1, 0, 0, 1),
656
- BPF_EXIT_INSN(),
657
- },
658
- .errstr = "invalid bpf_ld_imm64 insn",
659
- .result = REJECT,
660
- },
661
- {
662
- "test11 ld_imm64",
663
- .insns = {
664
- BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
665
- BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
666
- BPF_EXIT_INSN(),
667
- },
668
- .errstr = "invalid bpf_ld_imm64 insn",
669
- .result = REJECT,
670
- },
671
- {
672
- "test12 ld_imm64",
673
- .insns = {
674
- BPF_MOV64_IMM(BPF_REG_1, 0),
675
- BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
676
- BPF_RAW_INSN(0, 0, 0, 0, 1),
677
- BPF_EXIT_INSN(),
678
- },
679
- .errstr = "not pointing to valid bpf_map",
680
- .result = REJECT,
681
- },
682
- {
683
- "test13 ld_imm64",
684
- .insns = {
685
- BPF_MOV64_IMM(BPF_REG_1, 0),
686
- BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
687
- BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
688
- BPF_EXIT_INSN(),
689
- },
690
- .errstr = "invalid bpf_ld_imm64 insn",
691
- .result = REJECT,
692
- },
693
- {
694
- "arsh32 on imm",
695
- .insns = {
696
- BPF_MOV64_IMM(BPF_REG_0, 1),
697
- BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 5),
698
- BPF_EXIT_INSN(),
699
- },
700
- .result = REJECT,
701
- .errstr = "unknown opcode c4",
702
- },
703
- {
704
- "arsh32 on reg",
705
- .insns = {
706
- BPF_MOV64_IMM(BPF_REG_0, 1),
707
- BPF_MOV64_IMM(BPF_REG_1, 5),
708
- BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
709
- BPF_EXIT_INSN(),
710
- },
711
- .result = REJECT,
712
- .errstr = "unknown opcode cc",
713
- },
714
- {
715
- "arsh64 on imm",
716
- .insns = {
717
- BPF_MOV64_IMM(BPF_REG_0, 1),
718
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_0, 5),
719
- BPF_EXIT_INSN(),
720
- },
721
- .result = ACCEPT,
722
- },
723
- {
724
- "arsh64 on reg",
725
- .insns = {
726
- BPF_MOV64_IMM(BPF_REG_0, 1),
727
- BPF_MOV64_IMM(BPF_REG_1, 5),
728
- BPF_ALU64_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
729
- BPF_EXIT_INSN(),
730
- },
731
- .result = ACCEPT,
732
- },
733
- {
734
- "no bpf_exit",
735
- .insns = {
736
- BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
737
- },
738
- .errstr = "not an exit",
739
- .result = REJECT,
740
- },
741
- {
742
- "loop (back-edge)",
743
- .insns = {
744
- BPF_JMP_IMM(BPF_JA, 0, 0, -1),
745
- BPF_EXIT_INSN(),
746
- },
747
- .errstr = "back-edge",
748
- .result = REJECT,
749
- },
750
- {
751
- "loop2 (back-edge)",
752
- .insns = {
753
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
754
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
755
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
756
- BPF_JMP_IMM(BPF_JA, 0, 0, -4),
757
- BPF_EXIT_INSN(),
758
- },
759
- .errstr = "back-edge",
760
- .result = REJECT,
761
- },
762
- {
763
- "conditional loop",
764
- .insns = {
765
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
766
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
767
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
768
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
769
- BPF_EXIT_INSN(),
770
- },
771
- .errstr = "back-edge",
772
- .result = REJECT,
773
- },
774
- {
775
- "read uninitialized register",
776
- .insns = {
777
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
778
- BPF_EXIT_INSN(),
779
- },
780
- .errstr = "R2 !read_ok",
781
- .result = REJECT,
782
- },
783
- {
784
- "read invalid register",
785
- .insns = {
786
- BPF_MOV64_REG(BPF_REG_0, -1),
787
- BPF_EXIT_INSN(),
788
- },
789
- .errstr = "R15 is invalid",
790
- .result = REJECT,
791
- },
792
- {
793
- "program doesn't init R0 before exit",
794
- .insns = {
795
- BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
796
- BPF_EXIT_INSN(),
797
- },
798
- .errstr = "R0 !read_ok",
799
- .result = REJECT,
800
- },
801
- {
802
- "program doesn't init R0 before exit in all branches",
803
- .insns = {
804
- BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
805
- BPF_MOV64_IMM(BPF_REG_0, 1),
806
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
807
- BPF_EXIT_INSN(),
808
- },
809
- .errstr = "R0 !read_ok",
810
- .errstr_unpriv = "R1 pointer comparison",
811
- .result = REJECT,
812
- },
813
- {
814
- "stack out of bounds",
815
- .insns = {
816
- BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
817
- BPF_EXIT_INSN(),
818
- },
819
- .errstr = "invalid stack",
820
- .result = REJECT,
821
- },
822
- {
823
- "invalid call insn1",
824
- .insns = {
825
- BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
826
- BPF_EXIT_INSN(),
827
- },
828
- .errstr = "unknown opcode 8d",
829
- .result = REJECT,
830
- },
831
- {
832
- "invalid call insn2",
833
- .insns = {
834
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
835
- BPF_EXIT_INSN(),
836
- },
837
- .errstr = "BPF_CALL uses reserved",
838
- .result = REJECT,
839
- },
840
- {
841
- "invalid function call",
842
- .insns = {
843
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
844
- BPF_EXIT_INSN(),
845
- },
846
- .errstr = "invalid func unknown#1234567",
847
- .result = REJECT,
848
- },
849
- {
850
- "uninitialized stack1",
851
- .insns = {
852
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
853
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
854
- BPF_LD_MAP_FD(BPF_REG_1, 0),
855
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
856
- BPF_FUNC_map_lookup_elem),
857
- BPF_EXIT_INSN(),
858
- },
859
- .fixup_map1 = { 2 },
860
- .errstr = "invalid indirect read from stack",
861
- .result = REJECT,
862
- },
863
- {
864
- "uninitialized stack2",
865
- .insns = {
866
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
867
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
868
- BPF_EXIT_INSN(),
869
- },
870
- .errstr = "invalid read from stack",
871
- .result = REJECT,
872
- },
873
- {
874
- "invalid fp arithmetic",
875
- /* If this gets ever changed, make sure JITs can deal with it. */
876
- .insns = {
877
- BPF_MOV64_IMM(BPF_REG_0, 0),
878
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
879
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 8),
880
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
881
- BPF_EXIT_INSN(),
882
- },
883
- .errstr = "R1 subtraction from stack pointer",
884
- .result = REJECT,
885
- },
886
- {
887
- "non-invalid fp arithmetic",
888
- .insns = {
889
- BPF_MOV64_IMM(BPF_REG_0, 0),
890
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
891
- BPF_EXIT_INSN(),
892
- },
893
- .result = ACCEPT,
894
- },
895
- {
896
- "invalid argument register",
897
- .insns = {
898
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
899
- BPF_FUNC_get_cgroup_classid),
900
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
901
- BPF_FUNC_get_cgroup_classid),
902
- BPF_EXIT_INSN(),
903
- },
904
- .errstr = "R1 !read_ok",
905
- .result = REJECT,
906
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
907
- },
908
- {
909
- "non-invalid argument register",
910
- .insns = {
911
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
912
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
913
- BPF_FUNC_get_cgroup_classid),
914
- BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6),
915
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
916
- BPF_FUNC_get_cgroup_classid),
917
- BPF_EXIT_INSN(),
918
- },
919
- .result = ACCEPT,
920
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
921
- },
922
- {
923
- "check valid spill/fill",
924
- .insns = {
925
- /* spill R1(ctx) into stack */
926
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
927
- /* fill it back into R2 */
928
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
929
- /* should be able to access R0 = *(R2 + 8) */
930
- /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
931
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
932
- BPF_EXIT_INSN(),
933
- },
934
- .errstr_unpriv = "R0 leaks addr",
935
- .result = ACCEPT,
936
- .result_unpriv = REJECT,
937
- .retval = POINTER_VALUE,
938
- },
939
- {
940
- "check valid spill/fill, skb mark",
941
- .insns = {
942
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
943
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
944
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
945
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
946
- offsetof(struct __sk_buff, mark)),
947
- BPF_EXIT_INSN(),
948
- },
949
- .result = ACCEPT,
950
- .result_unpriv = ACCEPT,
951
- },
952
- {
953
- "check corrupted spill/fill",
954
- .insns = {
955
- /* spill R1(ctx) into stack */
956
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
957
- /* mess up with R1 pointer on stack */
958
- BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
959
- /* fill back into R0 is fine for priv.
960
- * R0 now becomes SCALAR_VALUE.
961
- */
962
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
963
- /* Load from R0 should fail. */
964
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8),
965
- BPF_EXIT_INSN(),
966
- },
967
- .errstr_unpriv = "attempt to corrupt spilled",
968
- .errstr = "R0 invalid mem access 'inv",
969
- .result = REJECT,
970
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
971
- },
972
- {
973
- "check corrupted spill/fill, LSB",
974
- .insns = {
975
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
976
- BPF_ST_MEM(BPF_H, BPF_REG_10, -8, 0xcafe),
977
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
978
- BPF_EXIT_INSN(),
979
- },
980
- .errstr_unpriv = "attempt to corrupt spilled",
981
- .result_unpriv = REJECT,
982
- .result = ACCEPT,
983
- .retval = POINTER_VALUE,
984
- },
985
- {
986
- "check corrupted spill/fill, MSB",
987
- .insns = {
988
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
989
- BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0x12345678),
990
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
991
- BPF_EXIT_INSN(),
992
- },
993
- .errstr_unpriv = "attempt to corrupt spilled",
994
- .result_unpriv = REJECT,
995
- .result = ACCEPT,
996
- .retval = POINTER_VALUE,
997
- },
998
- {
999
- "invalid src register in STX",
1000
- .insns = {
1001
- BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
1002
- BPF_EXIT_INSN(),
1003
- },
1004
- .errstr = "R15 is invalid",
1005
- .result = REJECT,
1006
- },
1007
- {
1008
- "invalid dst register in STX",
1009
- .insns = {
1010
- BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
1011
- BPF_EXIT_INSN(),
1012
- },
1013
- .errstr = "R14 is invalid",
1014
- .result = REJECT,
1015
- },
1016
- {
1017
- "invalid dst register in ST",
1018
- .insns = {
1019
- BPF_ST_MEM(BPF_B, 14, -1, -1),
1020
- BPF_EXIT_INSN(),
1021
- },
1022
- .errstr = "R14 is invalid",
1023
- .result = REJECT,
1024
- },
1025
- {
1026
- "invalid src register in LDX",
1027
- .insns = {
1028
- BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
1029
- BPF_EXIT_INSN(),
1030
- },
1031
- .errstr = "R12 is invalid",
1032
- .result = REJECT,
1033
- },
1034
- {
1035
- "invalid dst register in LDX",
1036
- .insns = {
1037
- BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
1038
- BPF_EXIT_INSN(),
1039
- },
1040
- .errstr = "R11 is invalid",
1041
- .result = REJECT,
1042
- },
1043
- {
1044
- "junk insn",
1045
- .insns = {
1046
- BPF_RAW_INSN(0, 0, 0, 0, 0),
1047
- BPF_EXIT_INSN(),
1048
- },
1049
- .errstr = "unknown opcode 00",
1050
- .result = REJECT,
1051
- },
1052
- {
1053
- "junk insn2",
1054
- .insns = {
1055
- BPF_RAW_INSN(1, 0, 0, 0, 0),
1056
- BPF_EXIT_INSN(),
1057
- },
1058
- .errstr = "BPF_LDX uses reserved fields",
1059
- .result = REJECT,
1060
- },
1061
- {
1062
- "junk insn3",
1063
- .insns = {
1064
- BPF_RAW_INSN(-1, 0, 0, 0, 0),
1065
- BPF_EXIT_INSN(),
1066
- },
1067
- .errstr = "unknown opcode ff",
1068
- .result = REJECT,
1069
- },
1070
- {
1071
- "junk insn4",
1072
- .insns = {
1073
- BPF_RAW_INSN(-1, -1, -1, -1, -1),
1074
- BPF_EXIT_INSN(),
1075
- },
1076
- .errstr = "unknown opcode ff",
1077
- .result = REJECT,
1078
- },
1079
- {
1080
- "junk insn5",
1081
- .insns = {
1082
- BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
1083
- BPF_EXIT_INSN(),
1084
- },
1085
- .errstr = "BPF_ALU uses reserved fields",
1086
- .result = REJECT,
1087
- },
1088
- {
1089
- "misaligned read from stack",
1090
- .insns = {
1091
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1092
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
1093
- BPF_EXIT_INSN(),
1094
- },
1095
- .errstr = "misaligned stack access",
1096
- .result = REJECT,
1097
- },
1098
- {
1099
- "invalid map_fd for function call",
1100
- .insns = {
1101
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1102
- BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
1103
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1104
- BPF_LD_MAP_FD(BPF_REG_1, 0),
1105
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1106
- BPF_FUNC_map_delete_elem),
1107
- BPF_EXIT_INSN(),
1108
- },
1109
- .errstr = "fd 0 is not pointing to valid bpf_map",
1110
- .result = REJECT,
1111
- },
1112
- {
1113
- "don't check return value before access",
1114
- .insns = {
1115
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1116
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1117
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1118
- BPF_LD_MAP_FD(BPF_REG_1, 0),
1119
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1120
- BPF_FUNC_map_lookup_elem),
1121
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1122
- BPF_EXIT_INSN(),
1123
- },
1124
- .fixup_map1 = { 3 },
1125
- .errstr = "R0 invalid mem access 'map_value_or_null'",
1126
- .result = REJECT,
1127
- },
1128
- {
1129
- "access memory with incorrect alignment",
1130
- .insns = {
1131
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1132
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1133
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1134
- BPF_LD_MAP_FD(BPF_REG_1, 0),
1135
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1136
- BPF_FUNC_map_lookup_elem),
1137
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
1138
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
1139
- BPF_EXIT_INSN(),
1140
- },
1141
- .fixup_map1 = { 3 },
1142
- .errstr = "misaligned value access",
1143
- .result = REJECT,
1144
- .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1145
- },
1146
- {
1147
- "sometimes access memory with incorrect alignment",
1148
- .insns = {
1149
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1150
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1151
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1152
- BPF_LD_MAP_FD(BPF_REG_1, 0),
1153
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1154
- BPF_FUNC_map_lookup_elem),
1155
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
1156
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1157
- BPF_EXIT_INSN(),
1158
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
1159
- BPF_EXIT_INSN(),
1160
- },
1161
- .fixup_map1 = { 3 },
1162
- .errstr = "R0 invalid mem access",
1163
- .errstr_unpriv = "R0 leaks addr",
1164
- .result = REJECT,
1165
- .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1166
- },
1167
- {
1168
- "jump test 1",
1169
- .insns = {
1170
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1171
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
1172
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
1173
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
1174
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
1175
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
1176
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
1177
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
1178
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
1179
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
1180
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
1181
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
1182
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
1183
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
1184
- BPF_MOV64_IMM(BPF_REG_0, 0),
1185
- BPF_EXIT_INSN(),
1186
- },
1187
- .errstr_unpriv = "R1 pointer comparison",
1188
- .result_unpriv = REJECT,
1189
- .result = ACCEPT,
1190
- },
1191
- {
1192
- "jump test 2",
1193
- .insns = {
1194
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1195
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
1196
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
1197
- BPF_JMP_IMM(BPF_JA, 0, 0, 14),
1198
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
1199
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
1200
- BPF_JMP_IMM(BPF_JA, 0, 0, 11),
1201
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
1202
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
1203
- BPF_JMP_IMM(BPF_JA, 0, 0, 8),
1204
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
1205
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
1206
- BPF_JMP_IMM(BPF_JA, 0, 0, 5),
1207
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
1208
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
1209
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1210
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
1211
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
1212
- BPF_MOV64_IMM(BPF_REG_0, 0),
1213
- BPF_EXIT_INSN(),
1214
- },
1215
- .errstr_unpriv = "R1 pointer comparison",
1216
- .result_unpriv = REJECT,
1217
- .result = ACCEPT,
1218
- },
1219
- {
1220
- "jump test 3",
1221
- .insns = {
1222
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1223
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
1224
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
1225
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1226
- BPF_JMP_IMM(BPF_JA, 0, 0, 19),
1227
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
1228
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
1229
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1230
- BPF_JMP_IMM(BPF_JA, 0, 0, 15),
1231
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
1232
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
1233
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
1234
- BPF_JMP_IMM(BPF_JA, 0, 0, 11),
1235
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
1236
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
1237
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
1238
- BPF_JMP_IMM(BPF_JA, 0, 0, 7),
1239
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
1240
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
1241
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
1242
- BPF_JMP_IMM(BPF_JA, 0, 0, 3),
1243
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
1244
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
1245
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
1246
- BPF_LD_MAP_FD(BPF_REG_1, 0),
1247
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1248
- BPF_FUNC_map_delete_elem),
1249
- BPF_EXIT_INSN(),
1250
- },
1251
- .fixup_map1 = { 24 },
1252
- .errstr_unpriv = "R1 pointer comparison",
1253
- .result_unpriv = REJECT,
1254
- .result = ACCEPT,
1255
- .retval = -ENOENT,
1256
- },
1257
- {
1258
- "jump test 4",
1259
- .insns = {
1260
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1261
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1262
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1263
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1264
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1265
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1266
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1267
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1268
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1269
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1270
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1271
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1272
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1273
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1274
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1275
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1276
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1277
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1278
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1279
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1280
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1281
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1282
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1283
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1284
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1285
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1286
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1287
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1288
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1289
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1290
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1291
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1292
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1293
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1294
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1295
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1296
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1297
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1298
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1299
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1300
- BPF_MOV64_IMM(BPF_REG_0, 0),
1301
- BPF_EXIT_INSN(),
1302
- },
1303
- .errstr_unpriv = "R1 pointer comparison",
1304
- .result_unpriv = REJECT,
1305
- .result = ACCEPT,
1306
- },
1307
- {
1308
- "jump test 5",
1309
- .insns = {
1310
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1311
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
1312
- BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1313
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1314
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1315
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1316
- BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1317
- BPF_MOV64_IMM(BPF_REG_0, 0),
1318
- BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1319
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1320
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1321
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1322
- BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1323
- BPF_MOV64_IMM(BPF_REG_0, 0),
1324
- BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1325
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1326
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1327
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1328
- BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1329
- BPF_MOV64_IMM(BPF_REG_0, 0),
1330
- BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1331
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1332
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1333
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1334
- BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1335
- BPF_MOV64_IMM(BPF_REG_0, 0),
1336
- BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1337
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1338
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1339
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1340
- BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1341
- BPF_MOV64_IMM(BPF_REG_0, 0),
1342
- BPF_EXIT_INSN(),
1343
- },
1344
- .errstr_unpriv = "R1 pointer comparison",
1345
- .result_unpriv = REJECT,
1346
- .result = ACCEPT,
1347
- },
1348
- {
1349
- "access skb fields ok",
1350
- .insns = {
1351
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1352
- offsetof(struct __sk_buff, len)),
1353
- BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1354
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1355
- offsetof(struct __sk_buff, mark)),
1356
- BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1357
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1358
- offsetof(struct __sk_buff, pkt_type)),
1359
- BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1360
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1361
- offsetof(struct __sk_buff, queue_mapping)),
1362
- BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1363
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1364
- offsetof(struct __sk_buff, protocol)),
1365
- BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1366
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1367
- offsetof(struct __sk_buff, vlan_present)),
1368
- BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1369
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1370
- offsetof(struct __sk_buff, vlan_tci)),
1371
- BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1372
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1373
- offsetof(struct __sk_buff, napi_id)),
1374
- BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1375
- BPF_EXIT_INSN(),
1376
- },
1377
- .result = ACCEPT,
1378
- },
1379
- {
1380
- "access skb fields bad1",
1381
- .insns = {
1382
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
1383
- BPF_EXIT_INSN(),
1384
- },
1385
- .errstr = "invalid bpf_context access",
1386
- .result = REJECT,
1387
- },
1388
- {
1389
- "access skb fields bad2",
1390
- .insns = {
1391
- BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
1392
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1393
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1394
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1395
- BPF_LD_MAP_FD(BPF_REG_1, 0),
1396
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1397
- BPF_FUNC_map_lookup_elem),
1398
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1399
- BPF_EXIT_INSN(),
1400
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1401
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1402
- offsetof(struct __sk_buff, pkt_type)),
1403
- BPF_EXIT_INSN(),
1404
- },
1405
- .fixup_map1 = { 4 },
1406
- .errstr = "different pointers",
1407
- .errstr_unpriv = "R1 pointer comparison",
1408
- .result = REJECT,
1409
- },
1410
- {
1411
- "access skb fields bad3",
1412
- .insns = {
1413
- BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1414
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1415
- offsetof(struct __sk_buff, pkt_type)),
1416
- BPF_EXIT_INSN(),
1417
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1418
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1419
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1420
- BPF_LD_MAP_FD(BPF_REG_1, 0),
1421
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1422
- BPF_FUNC_map_lookup_elem),
1423
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1424
- BPF_EXIT_INSN(),
1425
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1426
- BPF_JMP_IMM(BPF_JA, 0, 0, -12),
1427
- },
1428
- .fixup_map1 = { 6 },
1429
- .errstr = "different pointers",
1430
- .errstr_unpriv = "R1 pointer comparison",
1431
- .result = REJECT,
1432
- },
1433
- {
1434
- "access skb fields bad4",
1435
- .insns = {
1436
- BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
1437
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1438
- offsetof(struct __sk_buff, len)),
1439
- BPF_MOV64_IMM(BPF_REG_0, 0),
1440
- BPF_EXIT_INSN(),
1441
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1442
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1443
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1444
- BPF_LD_MAP_FD(BPF_REG_1, 0),
1445
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1446
- BPF_FUNC_map_lookup_elem),
1447
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1448
- BPF_EXIT_INSN(),
1449
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1450
- BPF_JMP_IMM(BPF_JA, 0, 0, -13),
1451
- },
1452
- .fixup_map1 = { 7 },
1453
- .errstr = "different pointers",
1454
- .errstr_unpriv = "R1 pointer comparison",
1455
- .result = REJECT,
1456
- },
1457
- {
1458
- "invalid access __sk_buff family",
1459
- .insns = {
1460
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1461
- offsetof(struct __sk_buff, family)),
1462
- BPF_EXIT_INSN(),
1463
- },
1464
- .errstr = "invalid bpf_context access",
1465
- .result = REJECT,
1466
- },
1467
- {
1468
- "invalid access __sk_buff remote_ip4",
1469
- .insns = {
1470
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1471
- offsetof(struct __sk_buff, remote_ip4)),
1472
- BPF_EXIT_INSN(),
1473
- },
1474
- .errstr = "invalid bpf_context access",
1475
- .result = REJECT,
1476
- },
1477
- {
1478
- "invalid access __sk_buff local_ip4",
1479
- .insns = {
1480
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1481
- offsetof(struct __sk_buff, local_ip4)),
1482
- BPF_EXIT_INSN(),
1483
- },
1484
- .errstr = "invalid bpf_context access",
1485
- .result = REJECT,
1486
- },
1487
- {
1488
- "invalid access __sk_buff remote_ip6",
1489
- .insns = {
1490
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1491
- offsetof(struct __sk_buff, remote_ip6)),
1492
- BPF_EXIT_INSN(),
1493
- },
1494
- .errstr = "invalid bpf_context access",
1495
- .result = REJECT,
1496
- },
1497
- {
1498
- "invalid access __sk_buff local_ip6",
1499
- .insns = {
1500
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1501
- offsetof(struct __sk_buff, local_ip6)),
1502
- BPF_EXIT_INSN(),
1503
- },
1504
- .errstr = "invalid bpf_context access",
1505
- .result = REJECT,
1506
- },
1507
- {
1508
- "invalid access __sk_buff remote_port",
1509
- .insns = {
1510
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1511
- offsetof(struct __sk_buff, remote_port)),
1512
- BPF_EXIT_INSN(),
1513
- },
1514
- .errstr = "invalid bpf_context access",
1515
- .result = REJECT,
1516
- },
1517
- {
1518
- "invalid access __sk_buff remote_port",
1519
- .insns = {
1520
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1521
- offsetof(struct __sk_buff, local_port)),
1522
- BPF_EXIT_INSN(),
1523
- },
1524
- .errstr = "invalid bpf_context access",
1525
- .result = REJECT,
1526
- },
1527
- {
1528
- "valid access __sk_buff family",
1529
- .insns = {
1530
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1531
- offsetof(struct __sk_buff, family)),
1532
- BPF_EXIT_INSN(),
1533
- },
1534
- .result = ACCEPT,
1535
- .prog_type = BPF_PROG_TYPE_SK_SKB,
1536
- },
1537
- {
1538
- "valid access __sk_buff remote_ip4",
1539
- .insns = {
1540
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1541
- offsetof(struct __sk_buff, remote_ip4)),
1542
- BPF_EXIT_INSN(),
1543
- },
1544
- .result = ACCEPT,
1545
- .prog_type = BPF_PROG_TYPE_SK_SKB,
1546
- },
1547
- {
1548
- "valid access __sk_buff local_ip4",
1549
- .insns = {
1550
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1551
- offsetof(struct __sk_buff, local_ip4)),
1552
- BPF_EXIT_INSN(),
1553
- },
1554
- .result = ACCEPT,
1555
- .prog_type = BPF_PROG_TYPE_SK_SKB,
1556
- },
1557
- {
1558
- "valid access __sk_buff remote_ip6",
1559
- .insns = {
1560
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1561
- offsetof(struct __sk_buff, remote_ip6[0])),
1562
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1563
- offsetof(struct __sk_buff, remote_ip6[1])),
1564
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1565
- offsetof(struct __sk_buff, remote_ip6[2])),
1566
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1567
- offsetof(struct __sk_buff, remote_ip6[3])),
1568
- BPF_EXIT_INSN(),
1569
- },
1570
- .result = ACCEPT,
1571
- .prog_type = BPF_PROG_TYPE_SK_SKB,
1572
- },
1573
- {
1574
- "valid access __sk_buff local_ip6",
1575
- .insns = {
1576
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1577
- offsetof(struct __sk_buff, local_ip6[0])),
1578
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1579
- offsetof(struct __sk_buff, local_ip6[1])),
1580
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1581
- offsetof(struct __sk_buff, local_ip6[2])),
1582
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1583
- offsetof(struct __sk_buff, local_ip6[3])),
1584
- BPF_EXIT_INSN(),
1585
- },
1586
- .result = ACCEPT,
1587
- .prog_type = BPF_PROG_TYPE_SK_SKB,
1588
- },
1589
- {
1590
- "valid access __sk_buff remote_port",
1591
- .insns = {
1592
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1593
- offsetof(struct __sk_buff, remote_port)),
1594
- BPF_EXIT_INSN(),
1595
- },
1596
- .result = ACCEPT,
1597
- .prog_type = BPF_PROG_TYPE_SK_SKB,
1598
- },
1599
- {
1600
- "valid access __sk_buff remote_port",
1601
- .insns = {
1602
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1603
- offsetof(struct __sk_buff, local_port)),
1604
- BPF_EXIT_INSN(),
1605
- },
1606
- .result = ACCEPT,
1607
- .prog_type = BPF_PROG_TYPE_SK_SKB,
1608
- },
1609
- {
1610
- "invalid access of tc_classid for SK_SKB",
1611
- .insns = {
1612
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1613
- offsetof(struct __sk_buff, tc_classid)),
1614
- BPF_EXIT_INSN(),
1615
- },
1616
- .result = REJECT,
1617
- .prog_type = BPF_PROG_TYPE_SK_SKB,
1618
- .errstr = "invalid bpf_context access",
1619
- },
1620
- {
1621
- "invalid access of skb->mark for SK_SKB",
1622
- .insns = {
1623
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1624
- offsetof(struct __sk_buff, mark)),
1625
- BPF_EXIT_INSN(),
1626
- },
1627
- .result = REJECT,
1628
- .prog_type = BPF_PROG_TYPE_SK_SKB,
1629
- .errstr = "invalid bpf_context access",
1630
- },
1631
- {
1632
- "check skb->mark is not writeable by SK_SKB",
1633
- .insns = {
1634
- BPF_MOV64_IMM(BPF_REG_0, 0),
1635
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1636
- offsetof(struct __sk_buff, mark)),
1637
- BPF_EXIT_INSN(),
1638
- },
1639
- .result = REJECT,
1640
- .prog_type = BPF_PROG_TYPE_SK_SKB,
1641
- .errstr = "invalid bpf_context access",
1642
- },
1643
- {
1644
- "check skb->tc_index is writeable by SK_SKB",
1645
- .insns = {
1646
- BPF_MOV64_IMM(BPF_REG_0, 0),
1647
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1648
- offsetof(struct __sk_buff, tc_index)),
1649
- BPF_EXIT_INSN(),
1650
- },
1651
- .result = ACCEPT,
1652
- .prog_type = BPF_PROG_TYPE_SK_SKB,
1653
- },
1654
- {
1655
- "check skb->priority is writeable by SK_SKB",
1656
- .insns = {
1657
- BPF_MOV64_IMM(BPF_REG_0, 0),
1658
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1659
- offsetof(struct __sk_buff, priority)),
1660
- BPF_EXIT_INSN(),
1661
- },
1662
- .result = ACCEPT,
1663
- .prog_type = BPF_PROG_TYPE_SK_SKB,
1664
- },
1665
- {
1666
- "direct packet read for SK_SKB",
1667
- .insns = {
1668
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1669
- offsetof(struct __sk_buff, data)),
1670
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1671
- offsetof(struct __sk_buff, data_end)),
1672
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1673
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1674
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1675
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
1676
- BPF_MOV64_IMM(BPF_REG_0, 0),
1677
- BPF_EXIT_INSN(),
1678
- },
1679
- .result = ACCEPT,
1680
- .prog_type = BPF_PROG_TYPE_SK_SKB,
1681
- },
1682
- {
1683
- "direct packet write for SK_SKB",
1684
- .insns = {
1685
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1686
- offsetof(struct __sk_buff, data)),
1687
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1688
- offsetof(struct __sk_buff, data_end)),
1689
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1690
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1691
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1692
- BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
1693
- BPF_MOV64_IMM(BPF_REG_0, 0),
1694
- BPF_EXIT_INSN(),
1695
- },
1696
- .result = ACCEPT,
1697
- .prog_type = BPF_PROG_TYPE_SK_SKB,
1698
- },
1699
- {
1700
- "overlapping checks for direct packet access SK_SKB",
1701
- .insns = {
1702
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1703
- offsetof(struct __sk_buff, data)),
1704
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1705
- offsetof(struct __sk_buff, data_end)),
1706
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1707
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1708
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
1709
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
1710
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
1711
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
1712
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
1713
- BPF_MOV64_IMM(BPF_REG_0, 0),
1714
- BPF_EXIT_INSN(),
1715
- },
1716
- .result = ACCEPT,
1717
- .prog_type = BPF_PROG_TYPE_SK_SKB,
1718
- },
1719
- {
1720
- "valid access family in SK_MSG",
1721
- .insns = {
1722
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1723
- offsetof(struct sk_msg_md, family)),
1724
- BPF_EXIT_INSN(),
1725
- },
1726
- .result = ACCEPT,
1727
- .prog_type = BPF_PROG_TYPE_SK_MSG,
1728
- },
1729
- {
1730
- "valid access remote_ip4 in SK_MSG",
1731
- .insns = {
1732
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1733
- offsetof(struct sk_msg_md, remote_ip4)),
1734
- BPF_EXIT_INSN(),
1735
- },
1736
- .result = ACCEPT,
1737
- .prog_type = BPF_PROG_TYPE_SK_MSG,
1738
- },
1739
- {
1740
- "valid access local_ip4 in SK_MSG",
1741
- .insns = {
1742
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1743
- offsetof(struct sk_msg_md, local_ip4)),
1744
- BPF_EXIT_INSN(),
1745
- },
1746
- .result = ACCEPT,
1747
- .prog_type = BPF_PROG_TYPE_SK_MSG,
1748
- },
1749
- {
1750
- "valid access remote_port in SK_MSG",
1751
- .insns = {
1752
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1753
- offsetof(struct sk_msg_md, remote_port)),
1754
- BPF_EXIT_INSN(),
1755
- },
1756
- .result = ACCEPT,
1757
- .prog_type = BPF_PROG_TYPE_SK_MSG,
1758
- },
1759
- {
1760
- "valid access local_port in SK_MSG",
1761
- .insns = {
1762
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1763
- offsetof(struct sk_msg_md, local_port)),
1764
- BPF_EXIT_INSN(),
1765
- },
1766
- .result = ACCEPT,
1767
- .prog_type = BPF_PROG_TYPE_SK_MSG,
1768
- },
1769
- {
1770
- "valid access remote_ip6 in SK_MSG",
1771
- .insns = {
1772
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1773
- offsetof(struct sk_msg_md, remote_ip6[0])),
1774
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1775
- offsetof(struct sk_msg_md, remote_ip6[1])),
1776
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1777
- offsetof(struct sk_msg_md, remote_ip6[2])),
1778
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1779
- offsetof(struct sk_msg_md, remote_ip6[3])),
1780
- BPF_EXIT_INSN(),
1781
- },
1782
- .result = ACCEPT,
1783
- .prog_type = BPF_PROG_TYPE_SK_SKB,
1784
- },
1785
- {
1786
- "valid access local_ip6 in SK_MSG",
1787
- .insns = {
1788
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1789
- offsetof(struct sk_msg_md, local_ip6[0])),
1790
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1791
- offsetof(struct sk_msg_md, local_ip6[1])),
1792
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1793
- offsetof(struct sk_msg_md, local_ip6[2])),
1794
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1795
- offsetof(struct sk_msg_md, local_ip6[3])),
1796
- BPF_EXIT_INSN(),
1797
- },
1798
- .result = ACCEPT,
1799
- .prog_type = BPF_PROG_TYPE_SK_SKB,
1800
- },
1801
- {
1802
- "invalid 64B read of family in SK_MSG",
1803
- .insns = {
1804
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1805
- offsetof(struct sk_msg_md, family)),
1806
- BPF_EXIT_INSN(),
1807
- },
1808
- .errstr = "invalid bpf_context access",
1809
- .result = REJECT,
1810
- .prog_type = BPF_PROG_TYPE_SK_MSG,
1811
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1812
- },
1813
- {
1814
- "invalid read past end of SK_MSG",
1815
- .insns = {
1816
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1817
- offsetof(struct sk_msg_md, local_port) + 4),
1818
- BPF_EXIT_INSN(),
1819
- },
1820
- .errstr = "R0 !read_ok",
1821
- .result = REJECT,
1822
- .prog_type = BPF_PROG_TYPE_SK_MSG,
1823
- },
1824
- {
1825
- "invalid read offset in SK_MSG",
1826
- .insns = {
1827
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1828
- offsetof(struct sk_msg_md, family) + 1),
1829
- BPF_EXIT_INSN(),
1830
- },
1831
- .errstr = "invalid bpf_context access",
1832
- .result = REJECT,
1833
- .prog_type = BPF_PROG_TYPE_SK_MSG,
1834
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1835
- },
1836
- {
1837
- "direct packet read for SK_MSG",
1838
- .insns = {
1839
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1840
- offsetof(struct sk_msg_md, data)),
1841
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
1842
- offsetof(struct sk_msg_md, data_end)),
1843
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1844
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1845
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1846
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
1847
- BPF_MOV64_IMM(BPF_REG_0, 0),
1848
- BPF_EXIT_INSN(),
1849
- },
1850
- .result = ACCEPT,
1851
- .prog_type = BPF_PROG_TYPE_SK_MSG,
1852
- },
1853
- {
1854
- "direct packet write for SK_MSG",
1855
- .insns = {
1856
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1857
- offsetof(struct sk_msg_md, data)),
1858
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
1859
- offsetof(struct sk_msg_md, data_end)),
1860
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1861
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1862
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1863
- BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
1864
- BPF_MOV64_IMM(BPF_REG_0, 0),
1865
- BPF_EXIT_INSN(),
1866
- },
1867
- .result = ACCEPT,
1868
- .prog_type = BPF_PROG_TYPE_SK_MSG,
1869
- },
1870
- {
1871
- "overlapping checks for direct packet access SK_MSG",
1872
- .insns = {
1873
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1874
- offsetof(struct sk_msg_md, data)),
1875
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
1876
- offsetof(struct sk_msg_md, data_end)),
1877
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1878
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1879
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
1880
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
1881
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
1882
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
1883
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
1884
- BPF_MOV64_IMM(BPF_REG_0, 0),
1885
- BPF_EXIT_INSN(),
1886
- },
1887
- .result = ACCEPT,
1888
- .prog_type = BPF_PROG_TYPE_SK_MSG,
1889
- },
1890
- {
1891
- "check skb->mark is not writeable by sockets",
1892
- .insns = {
1893
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1894
- offsetof(struct __sk_buff, mark)),
1895
- BPF_EXIT_INSN(),
1896
- },
1897
- .errstr = "invalid bpf_context access",
1898
- .errstr_unpriv = "R1 leaks addr",
1899
- .result = REJECT,
1900
- },
1901
- {
1902
- "check skb->tc_index is not writeable by sockets",
1903
- .insns = {
1904
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1905
- offsetof(struct __sk_buff, tc_index)),
1906
- BPF_EXIT_INSN(),
1907
- },
1908
- .errstr = "invalid bpf_context access",
1909
- .errstr_unpriv = "R1 leaks addr",
1910
- .result = REJECT,
1911
- },
1912
- {
1913
- "check cb access: byte",
1914
- .insns = {
1915
- BPF_MOV64_IMM(BPF_REG_0, 0),
1916
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1917
- offsetof(struct __sk_buff, cb[0])),
1918
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1919
- offsetof(struct __sk_buff, cb[0]) + 1),
1920
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1921
- offsetof(struct __sk_buff, cb[0]) + 2),
1922
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1923
- offsetof(struct __sk_buff, cb[0]) + 3),
1924
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1925
- offsetof(struct __sk_buff, cb[1])),
1926
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1927
- offsetof(struct __sk_buff, cb[1]) + 1),
1928
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1929
- offsetof(struct __sk_buff, cb[1]) + 2),
1930
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1931
- offsetof(struct __sk_buff, cb[1]) + 3),
1932
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1933
- offsetof(struct __sk_buff, cb[2])),
1934
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1935
- offsetof(struct __sk_buff, cb[2]) + 1),
1936
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1937
- offsetof(struct __sk_buff, cb[2]) + 2),
1938
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1939
- offsetof(struct __sk_buff, cb[2]) + 3),
1940
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1941
- offsetof(struct __sk_buff, cb[3])),
1942
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1943
- offsetof(struct __sk_buff, cb[3]) + 1),
1944
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1945
- offsetof(struct __sk_buff, cb[3]) + 2),
1946
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1947
- offsetof(struct __sk_buff, cb[3]) + 3),
1948
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1949
- offsetof(struct __sk_buff, cb[4])),
1950
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1951
- offsetof(struct __sk_buff, cb[4]) + 1),
1952
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1953
- offsetof(struct __sk_buff, cb[4]) + 2),
1954
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1955
- offsetof(struct __sk_buff, cb[4]) + 3),
1956
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1957
- offsetof(struct __sk_buff, cb[0])),
1958
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1959
- offsetof(struct __sk_buff, cb[0]) + 1),
1960
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1961
- offsetof(struct __sk_buff, cb[0]) + 2),
1962
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1963
- offsetof(struct __sk_buff, cb[0]) + 3),
1964
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1965
- offsetof(struct __sk_buff, cb[1])),
1966
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1967
- offsetof(struct __sk_buff, cb[1]) + 1),
1968
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1969
- offsetof(struct __sk_buff, cb[1]) + 2),
1970
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1971
- offsetof(struct __sk_buff, cb[1]) + 3),
1972
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1973
- offsetof(struct __sk_buff, cb[2])),
1974
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1975
- offsetof(struct __sk_buff, cb[2]) + 1),
1976
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1977
- offsetof(struct __sk_buff, cb[2]) + 2),
1978
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1979
- offsetof(struct __sk_buff, cb[2]) + 3),
1980
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1981
- offsetof(struct __sk_buff, cb[3])),
1982
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1983
- offsetof(struct __sk_buff, cb[3]) + 1),
1984
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1985
- offsetof(struct __sk_buff, cb[3]) + 2),
1986
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1987
- offsetof(struct __sk_buff, cb[3]) + 3),
1988
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1989
- offsetof(struct __sk_buff, cb[4])),
1990
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1991
- offsetof(struct __sk_buff, cb[4]) + 1),
1992
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1993
- offsetof(struct __sk_buff, cb[4]) + 2),
1994
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1995
- offsetof(struct __sk_buff, cb[4]) + 3),
1996
- BPF_EXIT_INSN(),
1997
- },
1998
- .result = ACCEPT,
1999
- },
2000
- {
2001
- "__sk_buff->hash, offset 0, byte store not permitted",
2002
- .insns = {
2003
- BPF_MOV64_IMM(BPF_REG_0, 0),
2004
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2005
- offsetof(struct __sk_buff, hash)),
2006
- BPF_EXIT_INSN(),
2007
- },
2008
- .errstr = "invalid bpf_context access",
2009
- .result = REJECT,
2010
- },
2011
- {
2012
- "__sk_buff->tc_index, offset 3, byte store not permitted",
2013
- .insns = {
2014
- BPF_MOV64_IMM(BPF_REG_0, 0),
2015
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2016
- offsetof(struct __sk_buff, tc_index) + 3),
2017
- BPF_EXIT_INSN(),
2018
- },
2019
- .errstr = "invalid bpf_context access",
2020
- .result = REJECT,
2021
- },
2022
- {
2023
- "check skb->hash byte load permitted",
2024
- .insns = {
2025
- BPF_MOV64_IMM(BPF_REG_0, 0),
2026
-#if __BYTE_ORDER == __LITTLE_ENDIAN
2027
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2028
- offsetof(struct __sk_buff, hash)),
2029
-#else
2030
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2031
- offsetof(struct __sk_buff, hash) + 3),
2032
-#endif
2033
- BPF_EXIT_INSN(),
2034
- },
2035
- .result = ACCEPT,
2036
- },
2037
- {
2038
- "check skb->hash byte load permitted 1",
2039
- .insns = {
2040
- BPF_MOV64_IMM(BPF_REG_0, 0),
2041
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2042
- offsetof(struct __sk_buff, hash) + 1),
2043
- BPF_EXIT_INSN(),
2044
- },
2045
- .result = ACCEPT,
2046
- },
2047
- {
2048
- "check skb->hash byte load permitted 2",
2049
- .insns = {
2050
- BPF_MOV64_IMM(BPF_REG_0, 0),
2051
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2052
- offsetof(struct __sk_buff, hash) + 2),
2053
- BPF_EXIT_INSN(),
2054
- },
2055
- .result = ACCEPT,
2056
- },
2057
- {
2058
- "check skb->hash byte load permitted 3",
2059
- .insns = {
2060
- BPF_MOV64_IMM(BPF_REG_0, 0),
2061
-#if __BYTE_ORDER == __LITTLE_ENDIAN
2062
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2063
- offsetof(struct __sk_buff, hash) + 3),
2064
-#else
2065
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2066
- offsetof(struct __sk_buff, hash)),
2067
-#endif
2068
- BPF_EXIT_INSN(),
2069
- },
2070
- .result = ACCEPT,
2071
- },
2072
- {
2073
- "check cb access: byte, wrong type",
2074
- .insns = {
2075
- BPF_MOV64_IMM(BPF_REG_0, 0),
2076
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2077
- offsetof(struct __sk_buff, cb[0])),
2078
- BPF_EXIT_INSN(),
2079
- },
2080
- .errstr = "invalid bpf_context access",
2081
- .result = REJECT,
2082
- .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
2083
- },
2084
- {
2085
- "check cb access: half",
2086
- .insns = {
2087
- BPF_MOV64_IMM(BPF_REG_0, 0),
2088
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2089
- offsetof(struct __sk_buff, cb[0])),
2090
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2091
- offsetof(struct __sk_buff, cb[0]) + 2),
2092
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2093
- offsetof(struct __sk_buff, cb[1])),
2094
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2095
- offsetof(struct __sk_buff, cb[1]) + 2),
2096
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2097
- offsetof(struct __sk_buff, cb[2])),
2098
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2099
- offsetof(struct __sk_buff, cb[2]) + 2),
2100
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2101
- offsetof(struct __sk_buff, cb[3])),
2102
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2103
- offsetof(struct __sk_buff, cb[3]) + 2),
2104
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2105
- offsetof(struct __sk_buff, cb[4])),
2106
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2107
- offsetof(struct __sk_buff, cb[4]) + 2),
2108
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2109
- offsetof(struct __sk_buff, cb[0])),
2110
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2111
- offsetof(struct __sk_buff, cb[0]) + 2),
2112
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2113
- offsetof(struct __sk_buff, cb[1])),
2114
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2115
- offsetof(struct __sk_buff, cb[1]) + 2),
2116
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2117
- offsetof(struct __sk_buff, cb[2])),
2118
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2119
- offsetof(struct __sk_buff, cb[2]) + 2),
2120
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2121
- offsetof(struct __sk_buff, cb[3])),
2122
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2123
- offsetof(struct __sk_buff, cb[3]) + 2),
2124
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2125
- offsetof(struct __sk_buff, cb[4])),
2126
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2127
- offsetof(struct __sk_buff, cb[4]) + 2),
2128
- BPF_EXIT_INSN(),
2129
- },
2130
- .result = ACCEPT,
2131
- },
2132
- {
2133
- "check cb access: half, unaligned",
2134
- .insns = {
2135
- BPF_MOV64_IMM(BPF_REG_0, 0),
2136
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2137
- offsetof(struct __sk_buff, cb[0]) + 1),
2138
- BPF_EXIT_INSN(),
2139
- },
2140
- .errstr = "misaligned context access",
2141
- .result = REJECT,
2142
- .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2143
- },
2144
- {
2145
- "check __sk_buff->hash, offset 0, half store not permitted",
2146
- .insns = {
2147
- BPF_MOV64_IMM(BPF_REG_0, 0),
2148
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2149
- offsetof(struct __sk_buff, hash)),
2150
- BPF_EXIT_INSN(),
2151
- },
2152
- .errstr = "invalid bpf_context access",
2153
- .result = REJECT,
2154
- },
2155
- {
2156
- "check __sk_buff->tc_index, offset 2, half store not permitted",
2157
- .insns = {
2158
- BPF_MOV64_IMM(BPF_REG_0, 0),
2159
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2160
- offsetof(struct __sk_buff, tc_index) + 2),
2161
- BPF_EXIT_INSN(),
2162
- },
2163
- .errstr = "invalid bpf_context access",
2164
- .result = REJECT,
2165
- },
2166
- {
2167
- "check skb->hash half load permitted",
2168
- .insns = {
2169
- BPF_MOV64_IMM(BPF_REG_0, 0),
2170
-#if __BYTE_ORDER == __LITTLE_ENDIAN
2171
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2172
- offsetof(struct __sk_buff, hash)),
2173
-#else
2174
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2175
- offsetof(struct __sk_buff, hash) + 2),
2176
-#endif
2177
- BPF_EXIT_INSN(),
2178
- },
2179
- .result = ACCEPT,
2180
- },
2181
- {
2182
- "check skb->hash half load permitted 2",
2183
- .insns = {
2184
- BPF_MOV64_IMM(BPF_REG_0, 0),
2185
-#if __BYTE_ORDER == __LITTLE_ENDIAN
2186
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2187
- offsetof(struct __sk_buff, hash) + 2),
2188
-#else
2189
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2190
- offsetof(struct __sk_buff, hash)),
2191
-#endif
2192
- BPF_EXIT_INSN(),
2193
- },
2194
- .result = ACCEPT,
2195
- },
2196
- {
2197
- "check skb->hash half load not permitted, unaligned 1",
2198
- .insns = {
2199
- BPF_MOV64_IMM(BPF_REG_0, 0),
2200
-#if __BYTE_ORDER == __LITTLE_ENDIAN
2201
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2202
- offsetof(struct __sk_buff, hash) + 1),
2203
-#else
2204
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2205
- offsetof(struct __sk_buff, hash) + 3),
2206
-#endif
2207
- BPF_EXIT_INSN(),
2208
- },
2209
- .errstr = "invalid bpf_context access",
2210
- .result = REJECT,
2211
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
2212
- },
2213
- {
2214
- "check skb->hash half load not permitted, unaligned 3",
2215
- .insns = {
2216
- BPF_MOV64_IMM(BPF_REG_0, 0),
2217
-#if __BYTE_ORDER == __LITTLE_ENDIAN
2218
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2219
- offsetof(struct __sk_buff, hash) + 3),
2220
-#else
2221
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2222
- offsetof(struct __sk_buff, hash) + 1),
2223
-#endif
2224
- BPF_EXIT_INSN(),
2225
- },
2226
- .errstr = "invalid bpf_context access",
2227
- .result = REJECT,
2228
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
2229
- },
2230
- {
2231
- "check cb access: half, wrong type",
2232
- .insns = {
2233
- BPF_MOV64_IMM(BPF_REG_0, 0),
2234
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2235
- offsetof(struct __sk_buff, cb[0])),
2236
- BPF_EXIT_INSN(),
2237
- },
2238
- .errstr = "invalid bpf_context access",
2239
- .result = REJECT,
2240
- .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
2241
- },
2242
- {
2243
- "check cb access: word",
2244
- .insns = {
2245
- BPF_MOV64_IMM(BPF_REG_0, 0),
2246
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2247
- offsetof(struct __sk_buff, cb[0])),
2248
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2249
- offsetof(struct __sk_buff, cb[1])),
2250
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2251
- offsetof(struct __sk_buff, cb[2])),
2252
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2253
- offsetof(struct __sk_buff, cb[3])),
2254
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2255
- offsetof(struct __sk_buff, cb[4])),
2256
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2257
- offsetof(struct __sk_buff, cb[0])),
2258
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2259
- offsetof(struct __sk_buff, cb[1])),
2260
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2261
- offsetof(struct __sk_buff, cb[2])),
2262
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2263
- offsetof(struct __sk_buff, cb[3])),
2264
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2265
- offsetof(struct __sk_buff, cb[4])),
2266
- BPF_EXIT_INSN(),
2267
- },
2268
- .result = ACCEPT,
2269
- },
2270
- {
2271
- "check cb access: word, unaligned 1",
2272
- .insns = {
2273
- BPF_MOV64_IMM(BPF_REG_0, 0),
2274
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2275
- offsetof(struct __sk_buff, cb[0]) + 2),
2276
- BPF_EXIT_INSN(),
2277
- },
2278
- .errstr = "misaligned context access",
2279
- .result = REJECT,
2280
- .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2281
- },
2282
- {
2283
- "check cb access: word, unaligned 2",
2284
- .insns = {
2285
- BPF_MOV64_IMM(BPF_REG_0, 0),
2286
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2287
- offsetof(struct __sk_buff, cb[4]) + 1),
2288
- BPF_EXIT_INSN(),
2289
- },
2290
- .errstr = "misaligned context access",
2291
- .result = REJECT,
2292
- .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2293
- },
2294
- {
2295
- "check cb access: word, unaligned 3",
2296
- .insns = {
2297
- BPF_MOV64_IMM(BPF_REG_0, 0),
2298
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2299
- offsetof(struct __sk_buff, cb[4]) + 2),
2300
- BPF_EXIT_INSN(),
2301
- },
2302
- .errstr = "misaligned context access",
2303
- .result = REJECT,
2304
- .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2305
- },
2306
- {
2307
- "check cb access: word, unaligned 4",
2308
- .insns = {
2309
- BPF_MOV64_IMM(BPF_REG_0, 0),
2310
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2311
- offsetof(struct __sk_buff, cb[4]) + 3),
2312
- BPF_EXIT_INSN(),
2313
- },
2314
- .errstr = "misaligned context access",
2315
- .result = REJECT,
2316
- .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2317
- },
2318
- {
2319
- "check cb access: double",
2320
- .insns = {
2321
- BPF_MOV64_IMM(BPF_REG_0, 0),
2322
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2323
- offsetof(struct __sk_buff, cb[0])),
2324
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2325
- offsetof(struct __sk_buff, cb[2])),
2326
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2327
- offsetof(struct __sk_buff, cb[0])),
2328
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2329
- offsetof(struct __sk_buff, cb[2])),
2330
- BPF_EXIT_INSN(),
2331
- },
2332
- .result = ACCEPT,
2333
- },
2334
- {
2335
- "check cb access: double, unaligned 1",
2336
- .insns = {
2337
- BPF_MOV64_IMM(BPF_REG_0, 0),
2338
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2339
- offsetof(struct __sk_buff, cb[1])),
2340
- BPF_EXIT_INSN(),
2341
- },
2342
- .errstr = "misaligned context access",
2343
- .result = REJECT,
2344
- .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2345
- },
2346
- {
2347
- "check cb access: double, unaligned 2",
2348
- .insns = {
2349
- BPF_MOV64_IMM(BPF_REG_0, 0),
2350
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2351
- offsetof(struct __sk_buff, cb[3])),
2352
- BPF_EXIT_INSN(),
2353
- },
2354
- .errstr = "misaligned context access",
2355
- .result = REJECT,
2356
- .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2357
- },
2358
- {
2359
- "check cb access: double, oob 1",
2360
- .insns = {
2361
- BPF_MOV64_IMM(BPF_REG_0, 0),
2362
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2363
- offsetof(struct __sk_buff, cb[4])),
2364
- BPF_EXIT_INSN(),
2365
- },
2366
- .errstr = "invalid bpf_context access",
2367
- .result = REJECT,
2368
- },
2369
- {
2370
- "check cb access: double, oob 2",
2371
- .insns = {
2372
- BPF_MOV64_IMM(BPF_REG_0, 0),
2373
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2374
- offsetof(struct __sk_buff, cb[4])),
2375
- BPF_EXIT_INSN(),
2376
- },
2377
- .errstr = "invalid bpf_context access",
2378
- .result = REJECT,
2379
- },
2380
- {
2381
- "check __sk_buff->ifindex dw store not permitted",
2382
- .insns = {
2383
- BPF_MOV64_IMM(BPF_REG_0, 0),
2384
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2385
- offsetof(struct __sk_buff, ifindex)),
2386
- BPF_EXIT_INSN(),
2387
- },
2388
- .errstr = "invalid bpf_context access",
2389
- .result = REJECT,
2390
- },
2391
- {
2392
- "check __sk_buff->ifindex dw load not permitted",
2393
- .insns = {
2394
- BPF_MOV64_IMM(BPF_REG_0, 0),
2395
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2396
- offsetof(struct __sk_buff, ifindex)),
2397
- BPF_EXIT_INSN(),
2398
- },
2399
- .errstr = "invalid bpf_context access",
2400
- .result = REJECT,
2401
- },
2402
- {
2403
- "check cb access: double, wrong type",
2404
- .insns = {
2405
- BPF_MOV64_IMM(BPF_REG_0, 0),
2406
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2407
- offsetof(struct __sk_buff, cb[0])),
2408
- BPF_EXIT_INSN(),
2409
- },
2410
- .errstr = "invalid bpf_context access",
2411
- .result = REJECT,
2412
- .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
2413
- },
2414
- {
2415
- "check out of range skb->cb access",
2416
- .insns = {
2417
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2418
- offsetof(struct __sk_buff, cb[0]) + 256),
2419
- BPF_EXIT_INSN(),
2420
- },
2421
- .errstr = "invalid bpf_context access",
2422
- .errstr_unpriv = "",
2423
- .result = REJECT,
2424
- .prog_type = BPF_PROG_TYPE_SCHED_ACT,
2425
- },
2426
- {
2427
- "write skb fields from socket prog",
2428
- .insns = {
2429
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2430
- offsetof(struct __sk_buff, cb[4])),
2431
- BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
2432
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2433
- offsetof(struct __sk_buff, mark)),
2434
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2435
- offsetof(struct __sk_buff, tc_index)),
2436
- BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
2437
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
2438
- offsetof(struct __sk_buff, cb[0])),
2439
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
2440
- offsetof(struct __sk_buff, cb[2])),
2441
- BPF_EXIT_INSN(),
2442
- },
2443
- .result = ACCEPT,
2444
- .errstr_unpriv = "R1 leaks addr",
2445
- .result_unpriv = REJECT,
2446
- },
2447
- {
2448
- "write skb fields from tc_cls_act prog",
2449
- .insns = {
2450
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2451
- offsetof(struct __sk_buff, cb[0])),
2452
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2453
- offsetof(struct __sk_buff, mark)),
2454
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2455
- offsetof(struct __sk_buff, tc_index)),
2456
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2457
- offsetof(struct __sk_buff, tc_index)),
2458
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2459
- offsetof(struct __sk_buff, cb[3])),
2460
- BPF_EXIT_INSN(),
2461
- },
2462
- .errstr_unpriv = "",
2463
- .result_unpriv = REJECT,
2464
- .result = ACCEPT,
2465
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2466
- },
2467
- {
2468
- "PTR_TO_STACK store/load",
2469
- .insns = {
2470
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2471
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
2472
- BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
2473
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
2474
- BPF_EXIT_INSN(),
2475
- },
2476
- .result = ACCEPT,
2477
- .retval = 0xfaceb00c,
2478
- },
2479
- {
2480
- "PTR_TO_STACK store/load - bad alignment on off",
2481
- .insns = {
2482
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2483
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
2484
- BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
2485
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
2486
- BPF_EXIT_INSN(),
2487
- },
2488
- .result = REJECT,
2489
- .errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8",
2490
- },
2491
- {
2492
- "PTR_TO_STACK store/load - bad alignment on reg",
2493
- .insns = {
2494
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2495
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
2496
- BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
2497
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
2498
- BPF_EXIT_INSN(),
2499
- },
2500
- .result = REJECT,
2501
- .errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8",
2502
- },
2503
- {
2504
- "PTR_TO_STACK store/load - out of bounds low",
2505
- .insns = {
2506
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2507
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
2508
- BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
2509
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
2510
- BPF_EXIT_INSN(),
2511
- },
2512
- .result = REJECT,
2513
- .errstr = "invalid stack off=-79992 size=8",
2514
- .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
2515
- },
2516
- {
2517
- "PTR_TO_STACK store/load - out of bounds high",
2518
- .insns = {
2519
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2520
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
2521
- BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
2522
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
2523
- BPF_EXIT_INSN(),
2524
- },
2525
- .result = REJECT,
2526
- .errstr = "invalid stack off=0 size=8",
2527
- },
2528
- {
2529
- "unpriv: return pointer",
2530
- .insns = {
2531
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
2532
- BPF_EXIT_INSN(),
2533
- },
2534
- .result = ACCEPT,
2535
- .result_unpriv = REJECT,
2536
- .errstr_unpriv = "R0 leaks addr",
2537
- .retval = POINTER_VALUE,
2538
- },
2539
- {
2540
- "unpriv: add const to pointer",
2541
- .insns = {
2542
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
2543
- BPF_MOV64_IMM(BPF_REG_0, 0),
2544
- BPF_EXIT_INSN(),
2545
- },
2546
- .result = ACCEPT,
2547
- },
2548
- {
2549
- "unpriv: add pointer to pointer",
2550
- .insns = {
2551
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
2552
- BPF_MOV64_IMM(BPF_REG_0, 0),
2553
- BPF_EXIT_INSN(),
2554
- },
2555
- .result = REJECT,
2556
- .errstr = "R1 pointer += pointer",
2557
- },
2558
- {
2559
- "unpriv: neg pointer",
2560
- .insns = {
2561
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
2562
- BPF_MOV64_IMM(BPF_REG_0, 0),
2563
- BPF_EXIT_INSN(),
2564
- },
2565
- .result = ACCEPT,
2566
- .result_unpriv = REJECT,
2567
- .errstr_unpriv = "R1 pointer arithmetic",
2568
- },
2569
- {
2570
- "unpriv: cmp pointer with const",
2571
- .insns = {
2572
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2573
- BPF_MOV64_IMM(BPF_REG_0, 0),
2574
- BPF_EXIT_INSN(),
2575
- },
2576
- .result = ACCEPT,
2577
- .result_unpriv = REJECT,
2578
- .errstr_unpriv = "R1 pointer comparison",
2579
- },
2580
- {
2581
- "unpriv: cmp pointer with pointer",
2582
- .insns = {
2583
- BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
2584
- BPF_MOV64_IMM(BPF_REG_0, 0),
2585
- BPF_EXIT_INSN(),
2586
- },
2587
- .result = ACCEPT,
2588
- .result_unpriv = REJECT,
2589
- .errstr_unpriv = "R10 pointer comparison",
2590
- },
2591
- {
2592
- "unpriv: check that printk is disallowed",
2593
- .insns = {
2594
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2595
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2596
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
2597
- BPF_MOV64_IMM(BPF_REG_2, 8),
2598
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
2599
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2600
- BPF_FUNC_trace_printk),
2601
- BPF_MOV64_IMM(BPF_REG_0, 0),
2602
- BPF_EXIT_INSN(),
2603
- },
2604
- .errstr_unpriv = "unknown func bpf_trace_printk#6",
2605
- .result_unpriv = REJECT,
2606
- .result = ACCEPT,
2607
- },
2608
- {
2609
- "unpriv: pass pointer to helper function",
2610
- .insns = {
2611
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2612
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2613
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2614
- BPF_LD_MAP_FD(BPF_REG_1, 0),
2615
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
2616
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2617
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2618
- BPF_FUNC_map_update_elem),
2619
- BPF_MOV64_IMM(BPF_REG_0, 0),
2620
- BPF_EXIT_INSN(),
2621
- },
2622
- .fixup_map1 = { 3 },
2623
- .errstr_unpriv = "R4 leaks addr",
2624
- .result_unpriv = REJECT,
2625
- .result = ACCEPT,
2626
- },
2627
- {
2628
- "unpriv: indirectly pass pointer on stack to helper function",
2629
- .insns = {
2630
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2631
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2632
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2633
- BPF_LD_MAP_FD(BPF_REG_1, 0),
2634
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2635
- BPF_FUNC_map_lookup_elem),
2636
- BPF_MOV64_IMM(BPF_REG_0, 0),
2637
- BPF_EXIT_INSN(),
2638
- },
2639
- .fixup_map1 = { 3 },
2640
- .errstr = "invalid indirect read from stack off -8+0 size 8",
2641
- .result = REJECT,
2642
- },
2643
- {
2644
- "unpriv: mangle pointer on stack 1",
2645
- .insns = {
2646
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2647
- BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
2648
- BPF_MOV64_IMM(BPF_REG_0, 0),
2649
- BPF_EXIT_INSN(),
2650
- },
2651
- .errstr_unpriv = "attempt to corrupt spilled",
2652
- .result_unpriv = REJECT,
2653
- .result = ACCEPT,
2654
- },
2655
- {
2656
- "unpriv: mangle pointer on stack 2",
2657
- .insns = {
2658
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2659
- BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
2660
- BPF_MOV64_IMM(BPF_REG_0, 0),
2661
- BPF_EXIT_INSN(),
2662
- },
2663
- .errstr_unpriv = "attempt to corrupt spilled",
2664
- .result_unpriv = REJECT,
2665
- .result = ACCEPT,
2666
- },
2667
- {
2668
- "unpriv: read pointer from stack in small chunks",
2669
- .insns = {
2670
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2671
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
2672
- BPF_MOV64_IMM(BPF_REG_0, 0),
2673
- BPF_EXIT_INSN(),
2674
- },
2675
- .errstr = "invalid size",
2676
- .result = REJECT,
2677
- },
2678
- {
2679
- "unpriv: write pointer into ctx",
2680
- .insns = {
2681
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
2682
- BPF_MOV64_IMM(BPF_REG_0, 0),
2683
- BPF_EXIT_INSN(),
2684
- },
2685
- .errstr_unpriv = "R1 leaks addr",
2686
- .result_unpriv = REJECT,
2687
- .errstr = "invalid bpf_context access",
2688
- .result = REJECT,
2689
- },
2690
- {
2691
- "unpriv: spill/fill of ctx",
2692
- .insns = {
2693
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2694
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2695
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2696
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2697
- BPF_MOV64_IMM(BPF_REG_0, 0),
2698
- BPF_EXIT_INSN(),
2699
- },
2700
- .result = ACCEPT,
2701
- },
2702
- {
2703
- "unpriv: spill/fill of ctx 2",
2704
- .insns = {
2705
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2706
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2707
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2708
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2709
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2710
- BPF_FUNC_get_hash_recalc),
2711
- BPF_MOV64_IMM(BPF_REG_0, 0),
2712
- BPF_EXIT_INSN(),
2713
- },
2714
- .result = ACCEPT,
2715
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2716
- },
2717
- {
2718
- "unpriv: spill/fill of ctx 3",
2719
- .insns = {
2720
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2721
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2722
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2723
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
2724
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2725
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2726
- BPF_FUNC_get_hash_recalc),
2727
- BPF_EXIT_INSN(),
2728
- },
2729
- .result = REJECT,
2730
- .errstr = "R1 type=fp expected=ctx",
2731
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2732
- },
2733
- {
2734
- "unpriv: spill/fill of ctx 4",
2735
- .insns = {
2736
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2737
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2738
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2739
- BPF_MOV64_IMM(BPF_REG_0, 1),
2740
- BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10,
2741
- BPF_REG_0, -8, 0),
2742
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2743
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2744
- BPF_FUNC_get_hash_recalc),
2745
- BPF_EXIT_INSN(),
2746
- },
2747
- .result = REJECT,
2748
- .errstr = "R1 type=inv expected=ctx",
2749
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2750
- },
2751
- {
2752
- "unpriv: spill/fill of different pointers stx",
2753
- .insns = {
2754
- BPF_MOV64_IMM(BPF_REG_3, 42),
2755
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2756
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2757
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2758
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2759
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
2760
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2761
- BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2762
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2763
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2764
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
2765
- offsetof(struct __sk_buff, mark)),
2766
- BPF_MOV64_IMM(BPF_REG_0, 0),
2767
- BPF_EXIT_INSN(),
2768
- },
2769
- .result = REJECT,
2770
- .errstr = "same insn cannot be used with different pointers",
2771
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2772
- },
2773
- {
2774
- "unpriv: spill/fill of different pointers ldx",
2775
- .insns = {
2776
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2777
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2778
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2779
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2780
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
2781
- -(__s32)offsetof(struct bpf_perf_event_data,
2782
- sample_period) - 8),
2783
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2784
- BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2785
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2786
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2787
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
2788
- offsetof(struct bpf_perf_event_data,
2789
- sample_period)),
2790
- BPF_MOV64_IMM(BPF_REG_0, 0),
2791
- BPF_EXIT_INSN(),
2792
- },
2793
- .result = REJECT,
2794
- .errstr = "same insn cannot be used with different pointers",
2795
- .prog_type = BPF_PROG_TYPE_PERF_EVENT,
2796
- },
2797
- {
2798
- "unpriv: write pointer into map elem value",
2799
- .insns = {
2800
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2801
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2802
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2803
- BPF_LD_MAP_FD(BPF_REG_1, 0),
2804
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2805
- BPF_FUNC_map_lookup_elem),
2806
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
2807
- BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
2808
- BPF_EXIT_INSN(),
2809
- },
2810
- .fixup_map1 = { 3 },
2811
- .errstr_unpriv = "R0 leaks addr",
2812
- .result_unpriv = REJECT,
2813
- .result = ACCEPT,
2814
- },
2815
- {
2816
- "alu32: mov u32 const",
2817
- .insns = {
2818
- BPF_MOV32_IMM(BPF_REG_7, 0),
2819
- BPF_ALU32_IMM(BPF_AND, BPF_REG_7, 1),
2820
- BPF_MOV32_REG(BPF_REG_0, BPF_REG_7),
2821
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
2822
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
2823
- BPF_EXIT_INSN(),
2824
- },
2825
- .errstr_unpriv = "R7 invalid mem access 'inv'",
2826
- .result_unpriv = REJECT,
2827
- .result = ACCEPT,
2828
- .retval = 0,
2829
- },
2830
- {
2831
- "unpriv: partial copy of pointer",
2832
- .insns = {
2833
- BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
2834
- BPF_MOV64_IMM(BPF_REG_0, 0),
2835
- BPF_EXIT_INSN(),
2836
- },
2837
- .errstr_unpriv = "R10 partial copy",
2838
- .result_unpriv = REJECT,
2839
- .result = ACCEPT,
2840
- },
2841
- {
2842
- "unpriv: pass pointer to tail_call",
2843
- .insns = {
2844
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
2845
- BPF_LD_MAP_FD(BPF_REG_2, 0),
2846
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2847
- BPF_FUNC_tail_call),
2848
- BPF_MOV64_IMM(BPF_REG_0, 0),
2849
- BPF_EXIT_INSN(),
2850
- },
2851
- .fixup_prog1 = { 1 },
2852
- .errstr_unpriv = "R3 leaks addr into helper",
2853
- .result_unpriv = REJECT,
2854
- .result = ACCEPT,
2855
- },
2856
- {
2857
- "unpriv: cmp map pointer with zero",
2858
- .insns = {
2859
- BPF_MOV64_IMM(BPF_REG_1, 0),
2860
- BPF_LD_MAP_FD(BPF_REG_1, 0),
2861
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2862
- BPF_MOV64_IMM(BPF_REG_0, 0),
2863
- BPF_EXIT_INSN(),
2864
- },
2865
- .fixup_map1 = { 1 },
2866
- .errstr_unpriv = "R1 pointer comparison",
2867
- .result_unpriv = REJECT,
2868
- .result = ACCEPT,
2869
- },
2870
- {
2871
- "unpriv: write into frame pointer",
2872
- .insns = {
2873
- BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
2874
- BPF_MOV64_IMM(BPF_REG_0, 0),
2875
- BPF_EXIT_INSN(),
2876
- },
2877
- .errstr = "frame pointer is read only",
2878
- .result = REJECT,
2879
- },
2880
- {
2881
- "unpriv: spill/fill frame pointer",
2882
- .insns = {
2883
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2884
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2885
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
2886
- BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0),
2887
- BPF_MOV64_IMM(BPF_REG_0, 0),
2888
- BPF_EXIT_INSN(),
2889
- },
2890
- .errstr = "frame pointer is read only",
2891
- .result = REJECT,
2892
- },
2893
- {
2894
- "unpriv: cmp of frame pointer",
2895
- .insns = {
2896
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
2897
- BPF_MOV64_IMM(BPF_REG_0, 0),
2898
- BPF_EXIT_INSN(),
2899
- },
2900
- .errstr_unpriv = "R10 pointer comparison",
2901
- .result_unpriv = REJECT,
2902
- .result = ACCEPT,
2903
- },
2904
- {
2905
- "unpriv: adding of fp, reg",
2906
- .insns = {
2907
- BPF_MOV64_IMM(BPF_REG_0, 0),
2908
- BPF_MOV64_IMM(BPF_REG_1, 0),
2909
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
2910
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
2911
- BPF_EXIT_INSN(),
2912
- },
2913
- .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
2914
- .result_unpriv = REJECT,
2915
- .result = ACCEPT,
2916
- },
2917
- {
2918
- "unpriv: adding of fp, imm",
2919
- .insns = {
2920
- BPF_MOV64_IMM(BPF_REG_0, 0),
2921
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2922
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0),
2923
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
2924
- BPF_EXIT_INSN(),
2925
- },
2926
- .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
2927
- .result_unpriv = REJECT,
2928
- .result = ACCEPT,
2929
- },
2930
- {
2931
- "unpriv: cmp of stack pointer",
2932
- .insns = {
2933
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2934
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2935
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
2936
- BPF_MOV64_IMM(BPF_REG_0, 0),
2937
- BPF_EXIT_INSN(),
2938
- },
2939
- .errstr_unpriv = "R2 pointer comparison",
2940
- .result_unpriv = REJECT,
2941
- .result = ACCEPT,
2942
- },
2943
- {
2944
- "runtime/jit: tail_call within bounds, prog once",
2945
- .insns = {
2946
- BPF_MOV64_IMM(BPF_REG_3, 0),
2947
- BPF_LD_MAP_FD(BPF_REG_2, 0),
2948
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2949
- BPF_FUNC_tail_call),
2950
- BPF_MOV64_IMM(BPF_REG_0, 1),
2951
- BPF_EXIT_INSN(),
2952
- },
2953
- .fixup_prog1 = { 1 },
2954
- .result = ACCEPT,
2955
- .retval = 42,
2956
- },
2957
- {
2958
- "runtime/jit: tail_call within bounds, prog loop",
2959
- .insns = {
2960
- BPF_MOV64_IMM(BPF_REG_3, 1),
2961
- BPF_LD_MAP_FD(BPF_REG_2, 0),
2962
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2963
- BPF_FUNC_tail_call),
2964
- BPF_MOV64_IMM(BPF_REG_0, 1),
2965
- BPF_EXIT_INSN(),
2966
- },
2967
- .fixup_prog1 = { 1 },
2968
- .result = ACCEPT,
2969
- .retval = 41,
2970
- },
2971
- {
2972
- "runtime/jit: tail_call within bounds, no prog",
2973
- .insns = {
2974
- BPF_MOV64_IMM(BPF_REG_3, 2),
2975
- BPF_LD_MAP_FD(BPF_REG_2, 0),
2976
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2977
- BPF_FUNC_tail_call),
2978
- BPF_MOV64_IMM(BPF_REG_0, 1),
2979
- BPF_EXIT_INSN(),
2980
- },
2981
- .fixup_prog1 = { 1 },
2982
- .result = ACCEPT,
2983
- .retval = 1,
2984
- },
2985
- {
2986
- "runtime/jit: tail_call out of bounds",
2987
- .insns = {
2988
- BPF_MOV64_IMM(BPF_REG_3, 256),
2989
- BPF_LD_MAP_FD(BPF_REG_2, 0),
2990
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2991
- BPF_FUNC_tail_call),
2992
- BPF_MOV64_IMM(BPF_REG_0, 2),
2993
- BPF_EXIT_INSN(),
2994
- },
2995
- .fixup_prog1 = { 1 },
2996
- .result = ACCEPT,
2997
- .retval = 2,
2998
- },
2999
- {
3000
- "runtime/jit: pass negative index to tail_call",
3001
- .insns = {
3002
- BPF_MOV64_IMM(BPF_REG_3, -1),
3003
- BPF_LD_MAP_FD(BPF_REG_2, 0),
3004
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3005
- BPF_FUNC_tail_call),
3006
- BPF_MOV64_IMM(BPF_REG_0, 2),
3007
- BPF_EXIT_INSN(),
3008
- },
3009
- .fixup_prog1 = { 1 },
3010
- .result = ACCEPT,
3011
- .retval = 2,
3012
- },
3013
- {
3014
- "runtime/jit: pass > 32bit index to tail_call",
3015
- .insns = {
3016
- BPF_LD_IMM64(BPF_REG_3, 0x100000000ULL),
3017
- BPF_LD_MAP_FD(BPF_REG_2, 0),
3018
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3019
- BPF_FUNC_tail_call),
3020
- BPF_MOV64_IMM(BPF_REG_0, 2),
3021
- BPF_EXIT_INSN(),
3022
- },
3023
- .fixup_prog1 = { 2 },
3024
- .result = ACCEPT,
3025
- .retval = 42,
3026
- /* Verifier rewrite for unpriv skips tail call here. */
3027
- .retval_unpriv = 2,
3028
- },
3029
- {
3030
- "stack pointer arithmetic",
3031
- .insns = {
3032
- BPF_MOV64_IMM(BPF_REG_1, 4),
3033
- BPF_JMP_IMM(BPF_JA, 0, 0, 0),
3034
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
3035
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
3036
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
3037
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
3038
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1),
3039
- BPF_ST_MEM(0, BPF_REG_2, 4, 0),
3040
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
3041
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
3042
- BPF_ST_MEM(0, BPF_REG_2, 4, 0),
3043
- BPF_MOV64_IMM(BPF_REG_0, 0),
3044
- BPF_EXIT_INSN(),
3045
- },
3046
- .result = ACCEPT,
3047
- },
3048
- {
3049
- "raw_stack: no skb_load_bytes",
3050
- .insns = {
3051
- BPF_MOV64_IMM(BPF_REG_2, 4),
3052
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3053
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3054
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3055
- BPF_MOV64_IMM(BPF_REG_4, 8),
3056
- /* Call to skb_load_bytes() omitted. */
3057
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3058
- BPF_EXIT_INSN(),
3059
- },
3060
- .result = REJECT,
3061
- .errstr = "invalid read from stack off -8+0 size 8",
3062
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3063
- },
3064
- {
3065
- "raw_stack: skb_load_bytes, negative len",
3066
- .insns = {
3067
- BPF_MOV64_IMM(BPF_REG_2, 4),
3068
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3069
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3070
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3071
- BPF_MOV64_IMM(BPF_REG_4, -8),
3072
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3073
- BPF_FUNC_skb_load_bytes),
3074
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3075
- BPF_EXIT_INSN(),
3076
- },
3077
- .result = REJECT,
3078
- .errstr = "R4 min value is negative",
3079
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3080
- },
3081
- {
3082
- "raw_stack: skb_load_bytes, negative len 2",
3083
- .insns = {
3084
- BPF_MOV64_IMM(BPF_REG_2, 4),
3085
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3086
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3087
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3088
- BPF_MOV64_IMM(BPF_REG_4, ~0),
3089
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3090
- BPF_FUNC_skb_load_bytes),
3091
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3092
- BPF_EXIT_INSN(),
3093
- },
3094
- .result = REJECT,
3095
- .errstr = "R4 min value is negative",
3096
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3097
- },
3098
- {
3099
- "raw_stack: skb_load_bytes, zero len",
3100
- .insns = {
3101
- BPF_MOV64_IMM(BPF_REG_2, 4),
3102
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3103
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3104
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3105
- BPF_MOV64_IMM(BPF_REG_4, 0),
3106
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3107
- BPF_FUNC_skb_load_bytes),
3108
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3109
- BPF_EXIT_INSN(),
3110
- },
3111
- .result = REJECT,
3112
- .errstr = "invalid stack type R3",
3113
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3114
- },
3115
- {
3116
- "raw_stack: skb_load_bytes, no init",
3117
- .insns = {
3118
- BPF_MOV64_IMM(BPF_REG_2, 4),
3119
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3120
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3121
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3122
- BPF_MOV64_IMM(BPF_REG_4, 8),
3123
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3124
- BPF_FUNC_skb_load_bytes),
3125
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3126
- BPF_EXIT_INSN(),
3127
- },
3128
- .result = ACCEPT,
3129
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3130
- },
3131
- {
3132
- "raw_stack: skb_load_bytes, init",
3133
- .insns = {
3134
- BPF_MOV64_IMM(BPF_REG_2, 4),
3135
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3136
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3137
- BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe),
3138
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3139
- BPF_MOV64_IMM(BPF_REG_4, 8),
3140
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3141
- BPF_FUNC_skb_load_bytes),
3142
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3143
- BPF_EXIT_INSN(),
3144
- },
3145
- .result = ACCEPT,
3146
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3147
- },
3148
- {
3149
- "raw_stack: skb_load_bytes, spilled regs around bounds",
3150
- .insns = {
3151
- BPF_MOV64_IMM(BPF_REG_2, 4),
3152
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3153
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
3154
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
3155
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
3156
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3157
- BPF_MOV64_IMM(BPF_REG_4, 8),
3158
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3159
- BPF_FUNC_skb_load_bytes),
3160
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
3161
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
3162
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3163
- offsetof(struct __sk_buff, mark)),
3164
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
3165
- offsetof(struct __sk_buff, priority)),
3166
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3167
- BPF_EXIT_INSN(),
3168
- },
3169
- .result = ACCEPT,
3170
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3171
- },
3172
- {
3173
- "raw_stack: skb_load_bytes, spilled regs corruption",
3174
- .insns = {
3175
- BPF_MOV64_IMM(BPF_REG_2, 4),
3176
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3177
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3178
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
3179
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3180
- BPF_MOV64_IMM(BPF_REG_4, 8),
3181
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3182
- BPF_FUNC_skb_load_bytes),
3183
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3184
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3185
- offsetof(struct __sk_buff, mark)),
3186
- BPF_EXIT_INSN(),
3187
- },
3188
- .result = REJECT,
3189
- .errstr = "R0 invalid mem access 'inv'",
3190
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3191
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3192
- },
3193
- {
3194
- "raw_stack: skb_load_bytes, spilled regs corruption 2",
3195
- .insns = {
3196
- BPF_MOV64_IMM(BPF_REG_2, 4),
3197
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3198
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
3199
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
3200
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
3201
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
3202
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3203
- BPF_MOV64_IMM(BPF_REG_4, 8),
3204
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3205
- BPF_FUNC_skb_load_bytes),
3206
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
3207
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
3208
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
3209
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3210
- offsetof(struct __sk_buff, mark)),
3211
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
3212
- offsetof(struct __sk_buff, priority)),
3213
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3214
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3,
3215
- offsetof(struct __sk_buff, pkt_type)),
3216
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
3217
- BPF_EXIT_INSN(),
3218
- },
3219
- .result = REJECT,
3220
- .errstr = "R3 invalid mem access 'inv'",
3221
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3222
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3223
- },
3224
- {
3225
- "raw_stack: skb_load_bytes, spilled regs + data",
3226
- .insns = {
3227
- BPF_MOV64_IMM(BPF_REG_2, 4),
3228
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3229
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
3230
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
3231
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
3232
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
3233
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3234
- BPF_MOV64_IMM(BPF_REG_4, 8),
3235
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3236
- BPF_FUNC_skb_load_bytes),
3237
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
3238
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
3239
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
3240
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3241
- offsetof(struct __sk_buff, mark)),
3242
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
3243
- offsetof(struct __sk_buff, priority)),
3244
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3245
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
3246
- BPF_EXIT_INSN(),
3247
- },
3248
- .result = ACCEPT,
3249
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3250
- },
3251
- {
3252
- "raw_stack: skb_load_bytes, invalid access 1",
3253
- .insns = {
3254
- BPF_MOV64_IMM(BPF_REG_2, 4),
3255
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3256
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513),
3257
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3258
- BPF_MOV64_IMM(BPF_REG_4, 8),
3259
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3260
- BPF_FUNC_skb_load_bytes),
3261
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3262
- BPF_EXIT_INSN(),
3263
- },
3264
- .result = REJECT,
3265
- .errstr = "invalid stack type R3 off=-513 access_size=8",
3266
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3267
- },
3268
- {
3269
- "raw_stack: skb_load_bytes, invalid access 2",
3270
- .insns = {
3271
- BPF_MOV64_IMM(BPF_REG_2, 4),
3272
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3273
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
3274
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3275
- BPF_MOV64_IMM(BPF_REG_4, 8),
3276
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3277
- BPF_FUNC_skb_load_bytes),
3278
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3279
- BPF_EXIT_INSN(),
3280
- },
3281
- .result = REJECT,
3282
- .errstr = "invalid stack type R3 off=-1 access_size=8",
3283
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3284
- },
3285
- {
3286
- "raw_stack: skb_load_bytes, invalid access 3",
3287
- .insns = {
3288
- BPF_MOV64_IMM(BPF_REG_2, 4),
3289
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3290
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff),
3291
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3292
- BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
3293
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3294
- BPF_FUNC_skb_load_bytes),
3295
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3296
- BPF_EXIT_INSN(),
3297
- },
3298
- .result = REJECT,
3299
- .errstr = "R4 min value is negative",
3300
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3301
- },
3302
- {
3303
- "raw_stack: skb_load_bytes, invalid access 4",
3304
- .insns = {
3305
- BPF_MOV64_IMM(BPF_REG_2, 4),
3306
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3307
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
3308
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3309
- BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
3310
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3311
- BPF_FUNC_skb_load_bytes),
3312
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3313
- BPF_EXIT_INSN(),
3314
- },
3315
- .result = REJECT,
3316
- .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
3317
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3318
- },
3319
- {
3320
- "raw_stack: skb_load_bytes, invalid access 5",
3321
- .insns = {
3322
- BPF_MOV64_IMM(BPF_REG_2, 4),
3323
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3324
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
3325
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3326
- BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
3327
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3328
- BPF_FUNC_skb_load_bytes),
3329
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3330
- BPF_EXIT_INSN(),
3331
- },
3332
- .result = REJECT,
3333
- .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
3334
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3335
- },
3336
- {
3337
- "raw_stack: skb_load_bytes, invalid access 6",
3338
- .insns = {
3339
- BPF_MOV64_IMM(BPF_REG_2, 4),
3340
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3341
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
3342
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3343
- BPF_MOV64_IMM(BPF_REG_4, 0),
3344
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3345
- BPF_FUNC_skb_load_bytes),
3346
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3347
- BPF_EXIT_INSN(),
3348
- },
3349
- .result = REJECT,
3350
- .errstr = "invalid stack type R3 off=-512 access_size=0",
3351
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3352
- },
3353
- {
3354
- "raw_stack: skb_load_bytes, large access",
3355
- .insns = {
3356
- BPF_MOV64_IMM(BPF_REG_2, 4),
3357
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3358
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
3359
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3360
- BPF_MOV64_IMM(BPF_REG_4, 512),
3361
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3362
- BPF_FUNC_skb_load_bytes),
3363
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3364
- BPF_EXIT_INSN(),
3365
- },
3366
- .result = ACCEPT,
3367
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3368
- },
3369
- {
3370
- "context stores via ST",
3371
- .insns = {
3372
- BPF_MOV64_IMM(BPF_REG_0, 0),
3373
- BPF_ST_MEM(BPF_DW, BPF_REG_1, offsetof(struct __sk_buff, mark), 0),
3374
- BPF_EXIT_INSN(),
3375
- },
3376
- .errstr = "BPF_ST stores into R1 context is not allowed",
3377
- .result = REJECT,
3378
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3379
- },
3380
- {
3381
- "context stores via XADD",
3382
- .insns = {
3383
- BPF_MOV64_IMM(BPF_REG_0, 0),
3384
- BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_1,
3385
- BPF_REG_0, offsetof(struct __sk_buff, mark), 0),
3386
- BPF_EXIT_INSN(),
3387
- },
3388
- .errstr = "BPF_XADD stores into R1 context is not allowed",
3389
- .result = REJECT,
3390
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3391
- },
3392
- {
3393
- "direct packet access: test1",
3394
- .insns = {
3395
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3396
- offsetof(struct __sk_buff, data)),
3397
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3398
- offsetof(struct __sk_buff, data_end)),
3399
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3400
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3401
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3402
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3403
- BPF_MOV64_IMM(BPF_REG_0, 0),
3404
- BPF_EXIT_INSN(),
3405
- },
3406
- .result = ACCEPT,
3407
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3408
- },
3409
- {
3410
- "direct packet access: test2",
3411
- .insns = {
3412
- BPF_MOV64_IMM(BPF_REG_0, 1),
3413
- BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
3414
- offsetof(struct __sk_buff, data_end)),
3415
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3416
- offsetof(struct __sk_buff, data)),
3417
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3418
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
3419
- BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15),
3420
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7),
3421
- BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12),
3422
- BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14),
3423
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3424
- offsetof(struct __sk_buff, data)),
3425
- BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
3426
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3427
- offsetof(struct __sk_buff, len)),
3428
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49),
3429
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49),
3430
- BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
3431
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
3432
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
3433
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
3434
- offsetof(struct __sk_buff, data_end)),
3435
- BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
3436
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4),
3437
- BPF_MOV64_IMM(BPF_REG_0, 0),
3438
- BPF_EXIT_INSN(),
3439
- },
3440
- .result = ACCEPT,
3441
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3442
- },
3443
- {
3444
- "direct packet access: test3",
3445
- .insns = {
3446
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3447
- offsetof(struct __sk_buff, data)),
3448
- BPF_MOV64_IMM(BPF_REG_0, 0),
3449
- BPF_EXIT_INSN(),
3450
- },
3451
- .errstr = "invalid bpf_context access off=76",
3452
- .result = REJECT,
3453
- .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
3454
- },
3455
- {
3456
- "direct packet access: test4 (write)",
3457
- .insns = {
3458
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3459
- offsetof(struct __sk_buff, data)),
3460
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3461
- offsetof(struct __sk_buff, data_end)),
3462
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3463
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3464
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3465
- BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3466
- BPF_MOV64_IMM(BPF_REG_0, 0),
3467
- BPF_EXIT_INSN(),
3468
- },
3469
- .result = ACCEPT,
3470
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3471
- },
3472
- {
3473
- "direct packet access: test5 (pkt_end >= reg, good access)",
3474
- .insns = {
3475
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3476
- offsetof(struct __sk_buff, data)),
3477
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3478
- offsetof(struct __sk_buff, data_end)),
3479
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3480
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3481
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
3482
- BPF_MOV64_IMM(BPF_REG_0, 1),
3483
- BPF_EXIT_INSN(),
3484
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3485
- BPF_MOV64_IMM(BPF_REG_0, 0),
3486
- BPF_EXIT_INSN(),
3487
- },
3488
- .result = ACCEPT,
3489
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3490
- },
3491
- {
3492
- "direct packet access: test6 (pkt_end >= reg, bad access)",
3493
- .insns = {
3494
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3495
- offsetof(struct __sk_buff, data)),
3496
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3497
- offsetof(struct __sk_buff, data_end)),
3498
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3499
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3500
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
3501
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3502
- BPF_MOV64_IMM(BPF_REG_0, 1),
3503
- BPF_EXIT_INSN(),
3504
- BPF_MOV64_IMM(BPF_REG_0, 0),
3505
- BPF_EXIT_INSN(),
3506
- },
3507
- .errstr = "invalid access to packet",
3508
- .result = REJECT,
3509
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3510
- },
3511
- {
3512
- "direct packet access: test7 (pkt_end >= reg, both accesses)",
3513
- .insns = {
3514
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3515
- offsetof(struct __sk_buff, data)),
3516
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3517
- offsetof(struct __sk_buff, data_end)),
3518
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3519
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3520
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
3521
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3522
- BPF_MOV64_IMM(BPF_REG_0, 1),
3523
- BPF_EXIT_INSN(),
3524
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3525
- BPF_MOV64_IMM(BPF_REG_0, 0),
3526
- BPF_EXIT_INSN(),
3527
- },
3528
- .errstr = "invalid access to packet",
3529
- .result = REJECT,
3530
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3531
- },
3532
- {
3533
- "direct packet access: test8 (double test, variant 1)",
3534
- .insns = {
3535
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3536
- offsetof(struct __sk_buff, data)),
3537
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3538
- offsetof(struct __sk_buff, data_end)),
3539
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3540
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3541
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 4),
3542
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3543
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3544
- BPF_MOV64_IMM(BPF_REG_0, 1),
3545
- BPF_EXIT_INSN(),
3546
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3547
- BPF_MOV64_IMM(BPF_REG_0, 0),
3548
- BPF_EXIT_INSN(),
3549
- },
3550
- .result = ACCEPT,
3551
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3552
- },
3553
- {
3554
- "direct packet access: test9 (double test, variant 2)",
3555
- .insns = {
3556
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3557
- offsetof(struct __sk_buff, data)),
3558
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3559
- offsetof(struct __sk_buff, data_end)),
3560
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3561
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3562
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
3563
- BPF_MOV64_IMM(BPF_REG_0, 1),
3564
- BPF_EXIT_INSN(),
3565
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3566
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3567
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3568
- BPF_MOV64_IMM(BPF_REG_0, 0),
3569
- BPF_EXIT_INSN(),
3570
- },
3571
- .result = ACCEPT,
3572
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3573
- },
3574
- {
3575
- "direct packet access: test10 (write invalid)",
3576
- .insns = {
3577
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3578
- offsetof(struct __sk_buff, data)),
3579
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3580
- offsetof(struct __sk_buff, data_end)),
3581
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3582
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3583
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
3584
- BPF_MOV64_IMM(BPF_REG_0, 0),
3585
- BPF_EXIT_INSN(),
3586
- BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3587
- BPF_MOV64_IMM(BPF_REG_0, 0),
3588
- BPF_EXIT_INSN(),
3589
- },
3590
- .errstr = "invalid access to packet",
3591
- .result = REJECT,
3592
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3593
- },
3594
- {
3595
- "direct packet access: test11 (shift, good access)",
3596
- .insns = {
3597
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3598
- offsetof(struct __sk_buff, data)),
3599
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3600
- offsetof(struct __sk_buff, data_end)),
3601
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3602
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3603
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
3604
- BPF_MOV64_IMM(BPF_REG_3, 144),
3605
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3606
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
3607
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 3),
3608
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3609
- BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3610
- BPF_MOV64_IMM(BPF_REG_0, 1),
3611
- BPF_EXIT_INSN(),
3612
- BPF_MOV64_IMM(BPF_REG_0, 0),
3613
- BPF_EXIT_INSN(),
3614
- },
3615
- .result = ACCEPT,
3616
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3617
- .retval = 1,
3618
- },
3619
- {
3620
- "direct packet access: test12 (and, good access)",
3621
- .insns = {
3622
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3623
- offsetof(struct __sk_buff, data)),
3624
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3625
- offsetof(struct __sk_buff, data_end)),
3626
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3627
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3628
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
3629
- BPF_MOV64_IMM(BPF_REG_3, 144),
3630
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3631
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
3632
- BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
3633
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3634
- BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3635
- BPF_MOV64_IMM(BPF_REG_0, 1),
3636
- BPF_EXIT_INSN(),
3637
- BPF_MOV64_IMM(BPF_REG_0, 0),
3638
- BPF_EXIT_INSN(),
3639
- },
3640
- .result = ACCEPT,
3641
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3642
- .retval = 1,
3643
- },
3644
- {
3645
- "direct packet access: test13 (branches, good access)",
3646
- .insns = {
3647
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3648
- offsetof(struct __sk_buff, data)),
3649
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3650
- offsetof(struct __sk_buff, data_end)),
3651
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3652
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3653
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 13),
3654
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3655
- offsetof(struct __sk_buff, mark)),
3656
- BPF_MOV64_IMM(BPF_REG_4, 1),
3657
- BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 2),
3658
- BPF_MOV64_IMM(BPF_REG_3, 14),
3659
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
3660
- BPF_MOV64_IMM(BPF_REG_3, 24),
3661
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3662
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
3663
- BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
3664
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3665
- BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3666
- BPF_MOV64_IMM(BPF_REG_0, 1),
3667
- BPF_EXIT_INSN(),
3668
- BPF_MOV64_IMM(BPF_REG_0, 0),
3669
- BPF_EXIT_INSN(),
3670
- },
3671
- .result = ACCEPT,
3672
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3673
- .retval = 1,
3674
- },
3675
- {
3676
- "direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)",
3677
- .insns = {
3678
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3679
- offsetof(struct __sk_buff, data)),
3680
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3681
- offsetof(struct __sk_buff, data_end)),
3682
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3683
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3684
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
3685
- BPF_MOV64_IMM(BPF_REG_5, 12),
3686
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 4),
3687
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3688
- BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3689
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
3690
- BPF_MOV64_IMM(BPF_REG_0, 1),
3691
- BPF_EXIT_INSN(),
3692
- BPF_MOV64_IMM(BPF_REG_0, 0),
3693
- BPF_EXIT_INSN(),
3694
- },
3695
- .result = ACCEPT,
3696
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3697
- .retval = 1,
3698
- },
3699
- {
3700
- "direct packet access: test15 (spill with xadd)",
3701
- .insns = {
3702
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3703
- offsetof(struct __sk_buff, data)),
3704
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3705
- offsetof(struct __sk_buff, data_end)),
3706
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3707
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3708
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
3709
- BPF_MOV64_IMM(BPF_REG_5, 4096),
3710
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
3711
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
3712
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
3713
- BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
3714
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
3715
- BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0),
3716
- BPF_MOV64_IMM(BPF_REG_0, 0),
3717
- BPF_EXIT_INSN(),
3718
- },
3719
- .errstr = "R2 invalid mem access 'inv'",
3720
- .result = REJECT,
3721
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3722
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3723
- },
3724
- {
3725
- "direct packet access: test16 (arith on data_end)",
3726
- .insns = {
3727
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3728
- offsetof(struct __sk_buff, data)),
3729
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3730
- offsetof(struct __sk_buff, data_end)),
3731
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3732
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3733
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 16),
3734
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3735
- BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3736
- BPF_MOV64_IMM(BPF_REG_0, 0),
3737
- BPF_EXIT_INSN(),
3738
- },
3739
- .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
3740
- .result = REJECT,
3741
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3742
- },
3743
- {
3744
- "direct packet access: test17 (pruning, alignment)",
3745
- .insns = {
3746
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3747
- offsetof(struct __sk_buff, data)),
3748
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3749
- offsetof(struct __sk_buff, data_end)),
3750
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3751
- offsetof(struct __sk_buff, mark)),
3752
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3753
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 14),
3754
- BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 1, 4),
3755
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3756
- BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, -4),
3757
- BPF_MOV64_IMM(BPF_REG_0, 0),
3758
- BPF_EXIT_INSN(),
3759
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
3760
- BPF_JMP_A(-6),
3761
- },
3762
- .errstr = "misaligned packet access off 2+(0x0; 0x0)+15+-4 size 4",
3763
- .result = REJECT,
3764
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3765
- .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
3766
- },
3767
- {
3768
- "direct packet access: test18 (imm += pkt_ptr, 1)",
3769
- .insns = {
3770
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3771
- offsetof(struct __sk_buff, data)),
3772
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3773
- offsetof(struct __sk_buff, data_end)),
3774
- BPF_MOV64_IMM(BPF_REG_0, 8),
3775
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3776
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3777
- BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3778
- BPF_MOV64_IMM(BPF_REG_0, 0),
3779
- BPF_EXIT_INSN(),
3780
- },
3781
- .result = ACCEPT,
3782
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3783
- },
3784
- {
3785
- "direct packet access: test19 (imm += pkt_ptr, 2)",
3786
- .insns = {
3787
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3788
- offsetof(struct __sk_buff, data)),
3789
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3790
- offsetof(struct __sk_buff, data_end)),
3791
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3792
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3793
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
3794
- BPF_MOV64_IMM(BPF_REG_4, 4),
3795
- BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3796
- BPF_STX_MEM(BPF_B, BPF_REG_4, BPF_REG_4, 0),
3797
- BPF_MOV64_IMM(BPF_REG_0, 0),
3798
- BPF_EXIT_INSN(),
3799
- },
3800
- .result = ACCEPT,
3801
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3802
- },
3803
- {
3804
- "direct packet access: test20 (x += pkt_ptr, 1)",
3805
- .insns = {
3806
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3807
- offsetof(struct __sk_buff, data)),
3808
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3809
- offsetof(struct __sk_buff, data_end)),
3810
- BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
3811
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3812
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
3813
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0x7fff),
3814
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3815
- BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3816
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
3817
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
3818
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
3819
- BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
3820
- BPF_MOV64_IMM(BPF_REG_0, 0),
3821
- BPF_EXIT_INSN(),
3822
- },
3823
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3824
- .result = ACCEPT,
3825
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3826
- },
3827
- {
3828
- "direct packet access: test21 (x += pkt_ptr, 2)",
3829
- .insns = {
3830
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3831
- offsetof(struct __sk_buff, data)),
3832
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3833
- offsetof(struct __sk_buff, data_end)),
3834
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3835
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3836
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
3837
- BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
3838
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
3839
- BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
3840
- BPF_ALU64_IMM(BPF_AND, BPF_REG_4, 0x7fff),
3841
- BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3842
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
3843
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
3844
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
3845
- BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
3846
- BPF_MOV64_IMM(BPF_REG_0, 0),
3847
- BPF_EXIT_INSN(),
3848
- },
3849
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3850
- .result = ACCEPT,
3851
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3852
- },
3853
- {
3854
- "direct packet access: test22 (x += pkt_ptr, 3)",
3855
- .insns = {
3856
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3857
- offsetof(struct __sk_buff, data)),
3858
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3859
- offsetof(struct __sk_buff, data_end)),
3860
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3861
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3862
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -8),
3863
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_3, -16),
3864
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -16),
3865
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 11),
3866
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
3867
- BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
3868
- BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
3869
- BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
3870
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 49),
3871
- BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3872
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
3873
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
3874
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
3875
- BPF_MOV64_IMM(BPF_REG_2, 1),
3876
- BPF_STX_MEM(BPF_H, BPF_REG_4, BPF_REG_2, 0),
3877
- BPF_MOV64_IMM(BPF_REG_0, 0),
3878
- BPF_EXIT_INSN(),
3879
- },
3880
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3881
- .result = ACCEPT,
3882
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3883
- },
3884
- {
3885
- "direct packet access: test23 (x += pkt_ptr, 4)",
3886
- .insns = {
3887
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3888
- offsetof(struct __sk_buff, data)),
3889
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3890
- offsetof(struct __sk_buff, data_end)),
3891
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
3892
- offsetof(struct __sk_buff, mark)),
3893
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3894
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
3895
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff),
3896
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3897
- BPF_MOV64_IMM(BPF_REG_0, 31),
3898
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
3899
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3900
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
3901
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0xffff - 1),
3902
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3903
- BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
3904
- BPF_MOV64_IMM(BPF_REG_0, 0),
3905
- BPF_EXIT_INSN(),
3906
- },
3907
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3908
- .result = REJECT,
3909
- .errstr = "invalid access to packet, off=0 size=8, R5(id=1,off=0,r=0)",
3910
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3911
- },
3912
- {
3913
- "direct packet access: test24 (x += pkt_ptr, 5)",
3914
- .insns = {
3915
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3916
- offsetof(struct __sk_buff, data)),
3917
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3918
- offsetof(struct __sk_buff, data_end)),
3919
- BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
3920
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3921
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
3922
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xff),
3923
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3924
- BPF_MOV64_IMM(BPF_REG_0, 64),
3925
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
3926
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3927
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
3928
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7fff - 1),
3929
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3930
- BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
3931
- BPF_MOV64_IMM(BPF_REG_0, 0),
3932
- BPF_EXIT_INSN(),
3933
- },
3934
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3935
- .result = ACCEPT,
3936
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3937
- },
3938
- {
3939
- "direct packet access: test25 (marking on <, good access)",
3940
- .insns = {
3941
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3942
- offsetof(struct __sk_buff, data)),
3943
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3944
- offsetof(struct __sk_buff, data_end)),
3945
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3946
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3947
- BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 2),
3948
- BPF_MOV64_IMM(BPF_REG_0, 0),
3949
- BPF_EXIT_INSN(),
3950
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3951
- BPF_JMP_IMM(BPF_JA, 0, 0, -4),
3952
- },
3953
- .result = ACCEPT,
3954
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3955
- },
3956
- {
3957
- "direct packet access: test26 (marking on <, bad access)",
3958
- .insns = {
3959
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3960
- offsetof(struct __sk_buff, data)),
3961
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3962
- offsetof(struct __sk_buff, data_end)),
3963
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3964
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3965
- BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 3),
3966
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3967
- BPF_MOV64_IMM(BPF_REG_0, 0),
3968
- BPF_EXIT_INSN(),
3969
- BPF_JMP_IMM(BPF_JA, 0, 0, -3),
3970
- },
3971
- .result = REJECT,
3972
- .errstr = "invalid access to packet",
3973
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3974
- },
3975
- {
3976
- "direct packet access: test27 (marking on <=, good access)",
3977
- .insns = {
3978
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3979
- offsetof(struct __sk_buff, data)),
3980
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3981
- offsetof(struct __sk_buff, data_end)),
3982
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3983
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3984
- BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 1),
3985
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3986
- BPF_MOV64_IMM(BPF_REG_0, 1),
3987
- BPF_EXIT_INSN(),
3988
- },
3989
- .result = ACCEPT,
3990
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3991
- .retval = 1,
3992
- },
3993
- {
3994
- "direct packet access: test28 (marking on <=, bad access)",
3995
- .insns = {
3996
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3997
- offsetof(struct __sk_buff, data)),
3998
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3999
- offsetof(struct __sk_buff, data_end)),
4000
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4001
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4002
- BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 2),
4003
- BPF_MOV64_IMM(BPF_REG_0, 1),
4004
- BPF_EXIT_INSN(),
4005
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4006
- BPF_JMP_IMM(BPF_JA, 0, 0, -4),
4007
- },
4008
- .result = REJECT,
4009
- .errstr = "invalid access to packet",
4010
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4011
- },
4012
- {
4013
- "helper access to packet: test1, valid packet_ptr range",
4014
- .insns = {
4015
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4016
- offsetof(struct xdp_md, data)),
4017
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4018
- offsetof(struct xdp_md, data_end)),
4019
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
4020
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
4021
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
4022
- BPF_LD_MAP_FD(BPF_REG_1, 0),
4023
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
4024
- BPF_MOV64_IMM(BPF_REG_4, 0),
4025
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4026
- BPF_FUNC_map_update_elem),
4027
- BPF_MOV64_IMM(BPF_REG_0, 0),
4028
- BPF_EXIT_INSN(),
4029
- },
4030
- .fixup_map1 = { 5 },
4031
- .result_unpriv = ACCEPT,
4032
- .result = ACCEPT,
4033
- .prog_type = BPF_PROG_TYPE_XDP,
4034
- },
4035
- {
4036
- "helper access to packet: test2, unchecked packet_ptr",
4037
- .insns = {
4038
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4039
- offsetof(struct xdp_md, data)),
4040
- BPF_LD_MAP_FD(BPF_REG_1, 0),
4041
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4042
- BPF_FUNC_map_lookup_elem),
4043
- BPF_MOV64_IMM(BPF_REG_0, 0),
4044
- BPF_EXIT_INSN(),
4045
- },
4046
- .fixup_map1 = { 1 },
4047
- .result = REJECT,
4048
- .errstr = "invalid access to packet",
4049
- .prog_type = BPF_PROG_TYPE_XDP,
4050
- },
4051
- {
4052
- "helper access to packet: test3, variable add",
4053
- .insns = {
4054
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4055
- offsetof(struct xdp_md, data)),
4056
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4057
- offsetof(struct xdp_md, data_end)),
4058
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4059
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
4060
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
4061
- BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
4062
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4063
- BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
4064
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
4065
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
4066
- BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
4067
- BPF_LD_MAP_FD(BPF_REG_1, 0),
4068
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
4069
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4070
- BPF_FUNC_map_lookup_elem),
4071
- BPF_MOV64_IMM(BPF_REG_0, 0),
4072
- BPF_EXIT_INSN(),
4073
- },
4074
- .fixup_map1 = { 11 },
4075
- .result = ACCEPT,
4076
- .prog_type = BPF_PROG_TYPE_XDP,
4077
- },
4078
- {
4079
- "helper access to packet: test4, packet_ptr with bad range",
4080
- .insns = {
4081
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4082
- offsetof(struct xdp_md, data)),
4083
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4084
- offsetof(struct xdp_md, data_end)),
4085
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4086
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
4087
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
4088
- BPF_MOV64_IMM(BPF_REG_0, 0),
4089
- BPF_EXIT_INSN(),
4090
- BPF_LD_MAP_FD(BPF_REG_1, 0),
4091
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4092
- BPF_FUNC_map_lookup_elem),
4093
- BPF_MOV64_IMM(BPF_REG_0, 0),
4094
- BPF_EXIT_INSN(),
4095
- },
4096
- .fixup_map1 = { 7 },
4097
- .result = REJECT,
4098
- .errstr = "invalid access to packet",
4099
- .prog_type = BPF_PROG_TYPE_XDP,
4100
- },
4101
- {
4102
- "helper access to packet: test5, packet_ptr with too short range",
4103
- .insns = {
4104
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4105
- offsetof(struct xdp_md, data)),
4106
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4107
- offsetof(struct xdp_md, data_end)),
4108
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
4109
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4110
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
4111
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
4112
- BPF_LD_MAP_FD(BPF_REG_1, 0),
4113
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4114
- BPF_FUNC_map_lookup_elem),
4115
- BPF_MOV64_IMM(BPF_REG_0, 0),
4116
- BPF_EXIT_INSN(),
4117
- },
4118
- .fixup_map1 = { 6 },
4119
- .result = REJECT,
4120
- .errstr = "invalid access to packet",
4121
- .prog_type = BPF_PROG_TYPE_XDP,
4122
- },
4123
- {
4124
- "helper access to packet: test6, cls valid packet_ptr range",
4125
- .insns = {
4126
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4127
- offsetof(struct __sk_buff, data)),
4128
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4129
- offsetof(struct __sk_buff, data_end)),
4130
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
4131
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
4132
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
4133
- BPF_LD_MAP_FD(BPF_REG_1, 0),
4134
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
4135
- BPF_MOV64_IMM(BPF_REG_4, 0),
4136
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4137
- BPF_FUNC_map_update_elem),
4138
- BPF_MOV64_IMM(BPF_REG_0, 0),
4139
- BPF_EXIT_INSN(),
4140
- },
4141
- .fixup_map1 = { 5 },
4142
- .result = ACCEPT,
4143
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4144
- },
4145
- {
4146
- "helper access to packet: test7, cls unchecked packet_ptr",
4147
- .insns = {
4148
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4149
- offsetof(struct __sk_buff, data)),
4150
- BPF_LD_MAP_FD(BPF_REG_1, 0),
4151
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4152
- BPF_FUNC_map_lookup_elem),
4153
- BPF_MOV64_IMM(BPF_REG_0, 0),
4154
- BPF_EXIT_INSN(),
4155
- },
4156
- .fixup_map1 = { 1 },
4157
- .result = REJECT,
4158
- .errstr = "invalid access to packet",
4159
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4160
- },
4161
- {
4162
- "helper access to packet: test8, cls variable add",
4163
- .insns = {
4164
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4165
- offsetof(struct __sk_buff, data)),
4166
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4167
- offsetof(struct __sk_buff, data_end)),
4168
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4169
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
4170
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
4171
- BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
4172
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4173
- BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
4174
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
4175
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
4176
- BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
4177
- BPF_LD_MAP_FD(BPF_REG_1, 0),
4178
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
4179
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4180
- BPF_FUNC_map_lookup_elem),
4181
- BPF_MOV64_IMM(BPF_REG_0, 0),
4182
- BPF_EXIT_INSN(),
4183
- },
4184
- .fixup_map1 = { 11 },
4185
- .result = ACCEPT,
4186
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4187
- },
4188
- {
4189
- "helper access to packet: test9, cls packet_ptr with bad range",
4190
- .insns = {
4191
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4192
- offsetof(struct __sk_buff, data)),
4193
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4194
- offsetof(struct __sk_buff, data_end)),
4195
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4196
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
4197
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
4198
- BPF_MOV64_IMM(BPF_REG_0, 0),
4199
- BPF_EXIT_INSN(),
4200
- BPF_LD_MAP_FD(BPF_REG_1, 0),
4201
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4202
- BPF_FUNC_map_lookup_elem),
4203
- BPF_MOV64_IMM(BPF_REG_0, 0),
4204
- BPF_EXIT_INSN(),
4205
- },
4206
- .fixup_map1 = { 7 },
4207
- .result = REJECT,
4208
- .errstr = "invalid access to packet",
4209
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4210
- },
4211
- {
4212
- "helper access to packet: test10, cls packet_ptr with too short range",
4213
- .insns = {
4214
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4215
- offsetof(struct __sk_buff, data)),
4216
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4217
- offsetof(struct __sk_buff, data_end)),
4218
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
4219
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4220
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
4221
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
4222
- BPF_LD_MAP_FD(BPF_REG_1, 0),
4223
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4224
- BPF_FUNC_map_lookup_elem),
4225
- BPF_MOV64_IMM(BPF_REG_0, 0),
4226
- BPF_EXIT_INSN(),
4227
- },
4228
- .fixup_map1 = { 6 },
4229
- .result = REJECT,
4230
- .errstr = "invalid access to packet",
4231
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4232
- },
4233
- {
4234
- "helper access to packet: test11, cls unsuitable helper 1",
4235
- .insns = {
4236
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4237
- offsetof(struct __sk_buff, data)),
4238
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4239
- offsetof(struct __sk_buff, data_end)),
4240
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4241
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
4242
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 7),
4243
- BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_7, 4),
4244
- BPF_MOV64_IMM(BPF_REG_2, 0),
4245
- BPF_MOV64_IMM(BPF_REG_4, 42),
4246
- BPF_MOV64_IMM(BPF_REG_5, 0),
4247
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4248
- BPF_FUNC_skb_store_bytes),
4249
- BPF_MOV64_IMM(BPF_REG_0, 0),
4250
- BPF_EXIT_INSN(),
4251
- },
4252
- .result = REJECT,
4253
- .errstr = "helper access to the packet",
4254
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4255
- },
4256
- {
4257
- "helper access to packet: test12, cls unsuitable helper 2",
4258
- .insns = {
4259
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4260
- offsetof(struct __sk_buff, data)),
4261
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4262
- offsetof(struct __sk_buff, data_end)),
4263
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
4264
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
4265
- BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3),
4266
- BPF_MOV64_IMM(BPF_REG_2, 0),
4267
- BPF_MOV64_IMM(BPF_REG_4, 4),
4268
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4269
- BPF_FUNC_skb_load_bytes),
4270
- BPF_MOV64_IMM(BPF_REG_0, 0),
4271
- BPF_EXIT_INSN(),
4272
- },
4273
- .result = REJECT,
4274
- .errstr = "helper access to the packet",
4275
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4276
- },
4277
- {
4278
- "helper access to packet: test13, cls helper ok",
4279
- .insns = {
4280
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4281
- offsetof(struct __sk_buff, data)),
4282
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4283
- offsetof(struct __sk_buff, data_end)),
4284
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4285
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4286
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4287
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4288
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4289
- BPF_MOV64_IMM(BPF_REG_2, 4),
4290
- BPF_MOV64_IMM(BPF_REG_3, 0),
4291
- BPF_MOV64_IMM(BPF_REG_4, 0),
4292
- BPF_MOV64_IMM(BPF_REG_5, 0),
4293
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4294
- BPF_FUNC_csum_diff),
4295
- BPF_MOV64_IMM(BPF_REG_0, 0),
4296
- BPF_EXIT_INSN(),
4297
- },
4298
- .result = ACCEPT,
4299
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4300
- },
4301
- {
4302
- "helper access to packet: test14, cls helper ok sub",
4303
- .insns = {
4304
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4305
- offsetof(struct __sk_buff, data)),
4306
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4307
- offsetof(struct __sk_buff, data_end)),
4308
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4309
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4310
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4311
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4312
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 4),
4313
- BPF_MOV64_IMM(BPF_REG_2, 4),
4314
- BPF_MOV64_IMM(BPF_REG_3, 0),
4315
- BPF_MOV64_IMM(BPF_REG_4, 0),
4316
- BPF_MOV64_IMM(BPF_REG_5, 0),
4317
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4318
- BPF_FUNC_csum_diff),
4319
- BPF_MOV64_IMM(BPF_REG_0, 0),
4320
- BPF_EXIT_INSN(),
4321
- },
4322
- .result = ACCEPT,
4323
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4324
- },
4325
- {
4326
- "helper access to packet: test15, cls helper fail sub",
4327
- .insns = {
4328
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4329
- offsetof(struct __sk_buff, data)),
4330
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4331
- offsetof(struct __sk_buff, data_end)),
4332
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4333
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4334
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4335
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4336
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 12),
4337
- BPF_MOV64_IMM(BPF_REG_2, 4),
4338
- BPF_MOV64_IMM(BPF_REG_3, 0),
4339
- BPF_MOV64_IMM(BPF_REG_4, 0),
4340
- BPF_MOV64_IMM(BPF_REG_5, 0),
4341
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4342
- BPF_FUNC_csum_diff),
4343
- BPF_MOV64_IMM(BPF_REG_0, 0),
4344
- BPF_EXIT_INSN(),
4345
- },
4346
- .result = REJECT,
4347
- .errstr = "invalid access to packet",
4348
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4349
- },
4350
- {
4351
- "helper access to packet: test16, cls helper fail range 1",
4352
- .insns = {
4353
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4354
- offsetof(struct __sk_buff, data)),
4355
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4356
- offsetof(struct __sk_buff, data_end)),
4357
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4358
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4359
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4360
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4361
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4362
- BPF_MOV64_IMM(BPF_REG_2, 8),
4363
- BPF_MOV64_IMM(BPF_REG_3, 0),
4364
- BPF_MOV64_IMM(BPF_REG_4, 0),
4365
- BPF_MOV64_IMM(BPF_REG_5, 0),
4366
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4367
- BPF_FUNC_csum_diff),
4368
- BPF_MOV64_IMM(BPF_REG_0, 0),
4369
- BPF_EXIT_INSN(),
4370
- },
4371
- .result = REJECT,
4372
- .errstr = "invalid access to packet",
4373
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4374
- },
4375
- {
4376
- "helper access to packet: test17, cls helper fail range 2",
4377
- .insns = {
4378
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4379
- offsetof(struct __sk_buff, data)),
4380
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4381
- offsetof(struct __sk_buff, data_end)),
4382
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4383
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4384
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4385
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4386
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4387
- BPF_MOV64_IMM(BPF_REG_2, -9),
4388
- BPF_MOV64_IMM(BPF_REG_3, 0),
4389
- BPF_MOV64_IMM(BPF_REG_4, 0),
4390
- BPF_MOV64_IMM(BPF_REG_5, 0),
4391
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4392
- BPF_FUNC_csum_diff),
4393
- BPF_MOV64_IMM(BPF_REG_0, 0),
4394
- BPF_EXIT_INSN(),
4395
- },
4396
- .result = REJECT,
4397
- .errstr = "R2 min value is negative",
4398
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4399
- },
4400
- {
4401
- "helper access to packet: test18, cls helper fail range 3",
4402
- .insns = {
4403
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4404
- offsetof(struct __sk_buff, data)),
4405
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4406
- offsetof(struct __sk_buff, data_end)),
4407
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4408
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4409
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4410
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4411
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4412
- BPF_MOV64_IMM(BPF_REG_2, ~0),
4413
- BPF_MOV64_IMM(BPF_REG_3, 0),
4414
- BPF_MOV64_IMM(BPF_REG_4, 0),
4415
- BPF_MOV64_IMM(BPF_REG_5, 0),
4416
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4417
- BPF_FUNC_csum_diff),
4418
- BPF_MOV64_IMM(BPF_REG_0, 0),
4419
- BPF_EXIT_INSN(),
4420
- },
4421
- .result = REJECT,
4422
- .errstr = "R2 min value is negative",
4423
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4424
- },
4425
- {
4426
- "helper access to packet: test19, cls helper range zero",
4427
- .insns = {
4428
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4429
- offsetof(struct __sk_buff, data)),
4430
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4431
- offsetof(struct __sk_buff, data_end)),
4432
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4433
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4434
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4435
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4436
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4437
- BPF_MOV64_IMM(BPF_REG_2, 0),
4438
- BPF_MOV64_IMM(BPF_REG_3, 0),
4439
- BPF_MOV64_IMM(BPF_REG_4, 0),
4440
- BPF_MOV64_IMM(BPF_REG_5, 0),
4441
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4442
- BPF_FUNC_csum_diff),
4443
- BPF_MOV64_IMM(BPF_REG_0, 0),
4444
- BPF_EXIT_INSN(),
4445
- },
4446
- .result = ACCEPT,
4447
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4448
- },
4449
- {
4450
- "helper access to packet: test20, pkt end as input",
4451
- .insns = {
4452
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4453
- offsetof(struct __sk_buff, data)),
4454
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4455
- offsetof(struct __sk_buff, data_end)),
4456
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4457
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4458
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4459
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4460
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
4461
- BPF_MOV64_IMM(BPF_REG_2, 4),
4462
- BPF_MOV64_IMM(BPF_REG_3, 0),
4463
- BPF_MOV64_IMM(BPF_REG_4, 0),
4464
- BPF_MOV64_IMM(BPF_REG_5, 0),
4465
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4466
- BPF_FUNC_csum_diff),
4467
- BPF_MOV64_IMM(BPF_REG_0, 0),
4468
- BPF_EXIT_INSN(),
4469
- },
4470
- .result = REJECT,
4471
- .errstr = "R1 type=pkt_end expected=fp",
4472
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4473
- },
4474
- {
4475
- "helper access to packet: test21, wrong reg",
4476
- .insns = {
4477
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4478
- offsetof(struct __sk_buff, data)),
4479
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4480
- offsetof(struct __sk_buff, data_end)),
4481
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4482
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4483
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4484
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4485
- BPF_MOV64_IMM(BPF_REG_2, 4),
4486
- BPF_MOV64_IMM(BPF_REG_3, 0),
4487
- BPF_MOV64_IMM(BPF_REG_4, 0),
4488
- BPF_MOV64_IMM(BPF_REG_5, 0),
4489
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4490
- BPF_FUNC_csum_diff),
4491
- BPF_MOV64_IMM(BPF_REG_0, 0),
4492
- BPF_EXIT_INSN(),
4493
- },
4494
- .result = REJECT,
4495
- .errstr = "invalid access to packet",
4496
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4497
- },
4498
- {
4499
- "valid map access into an array with a constant",
4500
- .insns = {
4501
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4502
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4503
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4504
- BPF_LD_MAP_FD(BPF_REG_1, 0),
4505
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4506
- BPF_FUNC_map_lookup_elem),
4507
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4508
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4509
- offsetof(struct test_val, foo)),
4510
- BPF_EXIT_INSN(),
4511
- },
4512
- .fixup_map2 = { 3 },
4513
- .errstr_unpriv = "R0 leaks addr",
4514
- .result_unpriv = REJECT,
4515
- .result = ACCEPT,
4516
- },
4517
- {
4518
- "valid map access into an array with a register",
4519
- .insns = {
4520
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4521
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4522
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4523
- BPF_LD_MAP_FD(BPF_REG_1, 0),
4524
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4525
- BPF_FUNC_map_lookup_elem),
4526
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4527
- BPF_MOV64_IMM(BPF_REG_1, 4),
4528
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4529
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4530
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4531
- offsetof(struct test_val, foo)),
4532
- BPF_EXIT_INSN(),
4533
- },
4534
- .fixup_map2 = { 3 },
4535
- .errstr_unpriv = "R0 leaks addr",
4536
- .result_unpriv = REJECT,
4537
- .result = ACCEPT,
4538
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4539
- },
4540
- {
4541
- "valid map access into an array with a variable",
4542
- .insns = {
4543
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4544
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4545
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4546
- BPF_LD_MAP_FD(BPF_REG_1, 0),
4547
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4548
- BPF_FUNC_map_lookup_elem),
4549
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4550
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4551
- BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3),
4552
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4553
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4554
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4555
- offsetof(struct test_val, foo)),
4556
- BPF_EXIT_INSN(),
4557
- },
4558
- .fixup_map2 = { 3 },
4559
- .errstr_unpriv = "R0 leaks addr",
4560
- .result_unpriv = REJECT,
4561
- .result = ACCEPT,
4562
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4563
- },
4564
- {
4565
- "valid map access into an array with a signed variable",
4566
- .insns = {
4567
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4568
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4569
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4570
- BPF_LD_MAP_FD(BPF_REG_1, 0),
4571
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4572
- BPF_FUNC_map_lookup_elem),
4573
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
4574
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4575
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1),
4576
- BPF_MOV32_IMM(BPF_REG_1, 0),
4577
- BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
4578
- BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
4579
- BPF_MOV32_IMM(BPF_REG_1, 0),
4580
- BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
4581
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4582
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4583
- offsetof(struct test_val, foo)),
4584
- BPF_EXIT_INSN(),
4585
- },
4586
- .fixup_map2 = { 3 },
4587
- .errstr_unpriv = "R0 leaks addr",
4588
- .result_unpriv = REJECT,
4589
- .result = ACCEPT,
4590
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4591
- },
4592
- {
4593
- "invalid map access into an array with a constant",
4594
- .insns = {
4595
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4596
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4597
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4598
- BPF_LD_MAP_FD(BPF_REG_1, 0),
4599
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4600
- BPF_FUNC_map_lookup_elem),
4601
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4602
- BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2,
4603
- offsetof(struct test_val, foo)),
4604
- BPF_EXIT_INSN(),
4605
- },
4606
- .fixup_map2 = { 3 },
4607
- .errstr = "invalid access to map value, value_size=48 off=48 size=8",
4608
- .result = REJECT,
4609
- },
4610
- {
4611
- "invalid map access into an array with a register",
4612
- .insns = {
4613
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4614
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4615
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4616
- BPF_LD_MAP_FD(BPF_REG_1, 0),
4617
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4618
- BPF_FUNC_map_lookup_elem),
4619
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4620
- BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1),
4621
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4622
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4623
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4624
- offsetof(struct test_val, foo)),
4625
- BPF_EXIT_INSN(),
4626
- },
4627
- .fixup_map2 = { 3 },
4628
- .errstr = "R0 min value is outside of the array range",
4629
- .result = REJECT,
4630
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4631
- },
4632
- {
4633
- "invalid map access into an array with a variable",
4634
- .insns = {
4635
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4636
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4637
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4638
- BPF_LD_MAP_FD(BPF_REG_1, 0),
4639
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4640
- BPF_FUNC_map_lookup_elem),
4641
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4642
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4643
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4644
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4645
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4646
- offsetof(struct test_val, foo)),
4647
- BPF_EXIT_INSN(),
4648
- },
4649
- .fixup_map2 = { 3 },
4650
- .errstr = "R0 unbounded memory access, make sure to bounds check any array access into a map",
4651
- .result = REJECT,
4652
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4653
- },
4654
- {
4655
- "invalid map access into an array with no floor check",
4656
- .insns = {
4657
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4658
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4659
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4660
- BPF_LD_MAP_FD(BPF_REG_1, 0),
4661
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4662
- BPF_FUNC_map_lookup_elem),
4663
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4664
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
4665
- BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
4666
- BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
4667
- BPF_MOV32_IMM(BPF_REG_1, 0),
4668
- BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
4669
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4670
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4671
- offsetof(struct test_val, foo)),
4672
- BPF_EXIT_INSN(),
4673
- },
4674
- .fixup_map2 = { 3 },
4675
- .errstr_unpriv = "R0 leaks addr",
4676
- .errstr = "R0 unbounded memory access",
4677
- .result_unpriv = REJECT,
4678
- .result = REJECT,
4679
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4680
- },
4681
- {
4682
- "invalid map access into an array with a invalid max check",
4683
- .insns = {
4684
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4685
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4686
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4687
- BPF_LD_MAP_FD(BPF_REG_1, 0),
4688
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4689
- BPF_FUNC_map_lookup_elem),
4690
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4691
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4692
- BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1),
4693
- BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
4694
- BPF_MOV32_IMM(BPF_REG_1, 0),
4695
- BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
4696
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4697
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4698
- offsetof(struct test_val, foo)),
4699
- BPF_EXIT_INSN(),
4700
- },
4701
- .fixup_map2 = { 3 },
4702
- .errstr_unpriv = "R0 leaks addr",
4703
- .errstr = "invalid access to map value, value_size=48 off=44 size=8",
4704
- .result_unpriv = REJECT,
4705
- .result = REJECT,
4706
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4707
- },
4708
- {
4709
- "invalid map access into an array with a invalid max check",
4710
- .insns = {
4711
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4712
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4713
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4714
- BPF_LD_MAP_FD(BPF_REG_1, 0),
4715
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4716
- BPF_FUNC_map_lookup_elem),
4717
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
4718
- BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
4719
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4720
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4721
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4722
- BPF_LD_MAP_FD(BPF_REG_1, 0),
4723
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4724
- BPF_FUNC_map_lookup_elem),
4725
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4726
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
4727
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
4728
- offsetof(struct test_val, foo)),
4729
- BPF_EXIT_INSN(),
4730
- },
4731
- .fixup_map2 = { 3, 11 },
4732
- .errstr = "R0 pointer += pointer",
4733
- .result = REJECT,
4734
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4735
- },
4736
- {
4737
- "valid cgroup storage access",
4738
- .insns = {
4739
- BPF_MOV64_IMM(BPF_REG_2, 0),
4740
- BPF_LD_MAP_FD(BPF_REG_1, 0),
4741
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4742
- BPF_FUNC_get_local_storage),
4743
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4744
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
4745
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
4746
- BPF_EXIT_INSN(),
4747
- },
4748
- .fixup_cgroup_storage = { 1 },
4749
- .result = ACCEPT,
4750
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
4751
- },
4752
- {
4753
- "invalid cgroup storage access 1",
4754
- .insns = {
4755
- BPF_MOV64_IMM(BPF_REG_2, 0),
4756
- BPF_LD_MAP_FD(BPF_REG_1, 0),
4757
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4758
- BPF_FUNC_get_local_storage),
4759
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4760
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
4761
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
4762
- BPF_EXIT_INSN(),
4763
- },
4764
- .fixup_map1 = { 1 },
4765
- .result = REJECT,
4766
- .errstr = "cannot pass map_type 1 into func bpf_get_local_storage",
4767
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
4768
- },
4769
- {
4770
- "invalid cgroup storage access 2",
4771
- .insns = {
4772
- BPF_MOV64_IMM(BPF_REG_2, 0),
4773
- BPF_LD_MAP_FD(BPF_REG_1, 1),
4774
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4775
- BPF_FUNC_get_local_storage),
4776
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
4777
- BPF_EXIT_INSN(),
4778
- },
4779
- .result = REJECT,
4780
- .errstr = "fd 1 is not pointing to valid bpf_map",
4781
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
4782
- },
4783
- {
4784
- "invalid per-cgroup storage access 3",
4785
- .insns = {
4786
- BPF_MOV64_IMM(BPF_REG_2, 0),
4787
- BPF_LD_MAP_FD(BPF_REG_1, 0),
4788
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4789
- BPF_FUNC_get_local_storage),
4790
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 256),
4791
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
4792
- BPF_MOV64_IMM(BPF_REG_0, 0),
4793
- BPF_EXIT_INSN(),
4794
- },
4795
- .fixup_cgroup_storage = { 1 },
4796
- .result = REJECT,
4797
- .errstr = "invalid access to map value, value_size=64 off=256 size=4",
4798
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
4799
- },
4800
- {
4801
- "invalid cgroup storage access 4",
4802
- .insns = {
4803
- BPF_MOV64_IMM(BPF_REG_2, 0),
4804
- BPF_LD_MAP_FD(BPF_REG_1, 0),
4805
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4806
- BPF_FUNC_get_local_storage),
4807
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -2),
4808
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
4809
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
4810
- BPF_EXIT_INSN(),
4811
- },
4812
- .fixup_cgroup_storage = { 1 },
4813
- .result = REJECT,
4814
- .errstr = "invalid access to map value, value_size=64 off=-2 size=4",
4815
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
4816
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4817
- },
4818
- {
4819
- "invalid cgroup storage access 5",
4820
- .insns = {
4821
- BPF_MOV64_IMM(BPF_REG_2, 7),
4822
- BPF_LD_MAP_FD(BPF_REG_1, 0),
4823
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4824
- BPF_FUNC_get_local_storage),
4825
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4826
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
4827
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
4828
- BPF_EXIT_INSN(),
4829
- },
4830
- .fixup_cgroup_storage = { 1 },
4831
- .result = REJECT,
4832
- .errstr = "get_local_storage() doesn't support non-zero flags",
4833
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
4834
- },
4835
- {
4836
- "invalid cgroup storage access 6",
4837
- .insns = {
4838
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
4839
- BPF_LD_MAP_FD(BPF_REG_1, 0),
4840
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4841
- BPF_FUNC_get_local_storage),
4842
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4843
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
4844
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
4845
- BPF_EXIT_INSN(),
4846
- },
4847
- .fixup_cgroup_storage = { 1 },
4848
- .result = REJECT,
4849
- .errstr = "get_local_storage() doesn't support non-zero flags",
4850
- .errstr_unpriv = "R2 leaks addr into helper function",
4851
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
4852
- },
4853
- {
4854
- "multiple registers share map_lookup_elem result",
4855
- .insns = {
4856
- BPF_MOV64_IMM(BPF_REG_1, 10),
4857
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4858
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4859
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4860
- BPF_LD_MAP_FD(BPF_REG_1, 0),
4861
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4862
- BPF_FUNC_map_lookup_elem),
4863
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4864
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4865
- BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4866
- BPF_EXIT_INSN(),
4867
- },
4868
- .fixup_map1 = { 4 },
4869
- .result = ACCEPT,
4870
- .prog_type = BPF_PROG_TYPE_SCHED_CLS
4871
- },
4872
- {
4873
- "alu ops on ptr_to_map_value_or_null, 1",
4874
- .insns = {
4875
- BPF_MOV64_IMM(BPF_REG_1, 10),
4876
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4877
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4878
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4879
- BPF_LD_MAP_FD(BPF_REG_1, 0),
4880
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4881
- BPF_FUNC_map_lookup_elem),
4882
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4883
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -2),
4884
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
4885
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4886
- BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4887
- BPF_EXIT_INSN(),
4888
- },
4889
- .fixup_map1 = { 4 },
4890
- .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
4891
- .result = REJECT,
4892
- .prog_type = BPF_PROG_TYPE_SCHED_CLS
4893
- },
4894
- {
4895
- "alu ops on ptr_to_map_value_or_null, 2",
4896
- .insns = {
4897
- BPF_MOV64_IMM(BPF_REG_1, 10),
4898
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4899
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4900
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4901
- BPF_LD_MAP_FD(BPF_REG_1, 0),
4902
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4903
- BPF_FUNC_map_lookup_elem),
4904
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4905
- BPF_ALU64_IMM(BPF_AND, BPF_REG_4, -1),
4906
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4907
- BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4908
- BPF_EXIT_INSN(),
4909
- },
4910
- .fixup_map1 = { 4 },
4911
- .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
4912
- .result = REJECT,
4913
- .prog_type = BPF_PROG_TYPE_SCHED_CLS
4914
- },
4915
- {
4916
- "alu ops on ptr_to_map_value_or_null, 3",
4917
- .insns = {
4918
- BPF_MOV64_IMM(BPF_REG_1, 10),
4919
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4920
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4921
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4922
- BPF_LD_MAP_FD(BPF_REG_1, 0),
4923
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4924
- BPF_FUNC_map_lookup_elem),
4925
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4926
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 1),
4927
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4928
- BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4929
- BPF_EXIT_INSN(),
4930
- },
4931
- .fixup_map1 = { 4 },
4932
- .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
4933
- .result = REJECT,
4934
- .prog_type = BPF_PROG_TYPE_SCHED_CLS
4935
- },
4936
- {
4937
- "invalid memory access with multiple map_lookup_elem calls",
4938
- .insns = {
4939
- BPF_MOV64_IMM(BPF_REG_1, 10),
4940
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4941
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4942
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4943
- BPF_LD_MAP_FD(BPF_REG_1, 0),
4944
- BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
4945
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
4946
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4947
- BPF_FUNC_map_lookup_elem),
4948
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4949
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
4950
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
4951
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4952
- BPF_FUNC_map_lookup_elem),
4953
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4954
- BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4955
- BPF_EXIT_INSN(),
4956
- },
4957
- .fixup_map1 = { 4 },
4958
- .result = REJECT,
4959
- .errstr = "R4 !read_ok",
4960
- .prog_type = BPF_PROG_TYPE_SCHED_CLS
4961
- },
4962
- {
4963
- "valid indirect map_lookup_elem access with 2nd lookup in branch",
4964
- .insns = {
4965
- BPF_MOV64_IMM(BPF_REG_1, 10),
4966
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4967
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4968
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4969
- BPF_LD_MAP_FD(BPF_REG_1, 0),
4970
- BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
4971
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
4972
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4973
- BPF_FUNC_map_lookup_elem),
4974
- BPF_MOV64_IMM(BPF_REG_2, 10),
4975
- BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 3),
4976
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
4977
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
4978
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4979
- BPF_FUNC_map_lookup_elem),
4980
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4981
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4982
- BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4983
- BPF_EXIT_INSN(),
4984
- },
4985
- .fixup_map1 = { 4 },
4986
- .result = ACCEPT,
4987
- .prog_type = BPF_PROG_TYPE_SCHED_CLS
4988
- },
4989
- {
4990
- "invalid map access from else condition",
4991
- .insns = {
4992
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4993
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4994
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4995
- BPF_LD_MAP_FD(BPF_REG_1, 0),
4996
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
4997
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4998
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4999
- BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES-1, 1),
5000
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
5001
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
5002
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
5003
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
5004
- BPF_EXIT_INSN(),
5005
- },
5006
- .fixup_map2 = { 3 },
5007
- .errstr = "R0 unbounded memory access",
5008
- .result = REJECT,
5009
- .errstr_unpriv = "R0 leaks addr",
5010
- .result_unpriv = REJECT,
5011
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5012
- },
5013
- {
5014
- "constant register |= constant should keep constant type",
5015
- .insns = {
5016
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5017
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
5018
- BPF_MOV64_IMM(BPF_REG_2, 34),
5019
- BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13),
5020
- BPF_MOV64_IMM(BPF_REG_3, 0),
5021
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
5022
- BPF_EXIT_INSN(),
5023
- },
5024
- .result = ACCEPT,
5025
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5026
- },
5027
- {
5028
- "constant register |= constant should not bypass stack boundary checks",
5029
- .insns = {
5030
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5031
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
5032
- BPF_MOV64_IMM(BPF_REG_2, 34),
5033
- BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24),
5034
- BPF_MOV64_IMM(BPF_REG_3, 0),
5035
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
5036
- BPF_EXIT_INSN(),
5037
- },
5038
- .errstr = "invalid stack type R1 off=-48 access_size=58",
5039
- .result = REJECT,
5040
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5041
- },
5042
- {
5043
- "constant register |= constant register should keep constant type",
5044
- .insns = {
5045
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5046
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
5047
- BPF_MOV64_IMM(BPF_REG_2, 34),
5048
- BPF_MOV64_IMM(BPF_REG_4, 13),
5049
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
5050
- BPF_MOV64_IMM(BPF_REG_3, 0),
5051
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
5052
- BPF_EXIT_INSN(),
5053
- },
5054
- .result = ACCEPT,
5055
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5056
- },
5057
- {
5058
- "constant register |= constant register should not bypass stack boundary checks",
5059
- .insns = {
5060
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5061
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
5062
- BPF_MOV64_IMM(BPF_REG_2, 34),
5063
- BPF_MOV64_IMM(BPF_REG_4, 24),
5064
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
5065
- BPF_MOV64_IMM(BPF_REG_3, 0),
5066
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
5067
- BPF_EXIT_INSN(),
5068
- },
5069
- .errstr = "invalid stack type R1 off=-48 access_size=58",
5070
- .result = REJECT,
5071
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5072
- },
5073
- {
5074
- "invalid direct packet write for LWT_IN",
5075
- .insns = {
5076
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5077
- offsetof(struct __sk_buff, data)),
5078
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5079
- offsetof(struct __sk_buff, data_end)),
5080
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5081
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5082
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5083
- BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
5084
- BPF_MOV64_IMM(BPF_REG_0, 0),
5085
- BPF_EXIT_INSN(),
5086
- },
5087
- .errstr = "cannot write into packet",
5088
- .result = REJECT,
5089
- .prog_type = BPF_PROG_TYPE_LWT_IN,
5090
- },
5091
- {
5092
- "invalid direct packet write for LWT_OUT",
5093
- .insns = {
5094
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5095
- offsetof(struct __sk_buff, data)),
5096
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5097
- offsetof(struct __sk_buff, data_end)),
5098
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5099
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5100
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5101
- BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
5102
- BPF_MOV64_IMM(BPF_REG_0, 0),
5103
- BPF_EXIT_INSN(),
5104
- },
5105
- .errstr = "cannot write into packet",
5106
- .result = REJECT,
5107
- .prog_type = BPF_PROG_TYPE_LWT_OUT,
5108
- },
5109
- {
5110
- "direct packet write for LWT_XMIT",
5111
- .insns = {
5112
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5113
- offsetof(struct __sk_buff, data)),
5114
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5115
- offsetof(struct __sk_buff, data_end)),
5116
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5117
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5118
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5119
- BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
5120
- BPF_MOV64_IMM(BPF_REG_0, 0),
5121
- BPF_EXIT_INSN(),
5122
- },
5123
- .result = ACCEPT,
5124
- .prog_type = BPF_PROG_TYPE_LWT_XMIT,
5125
- },
5126
- {
5127
- "direct packet read for LWT_IN",
5128
- .insns = {
5129
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5130
- offsetof(struct __sk_buff, data)),
5131
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5132
- offsetof(struct __sk_buff, data_end)),
5133
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5134
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5135
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5136
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
5137
- BPF_MOV64_IMM(BPF_REG_0, 0),
5138
- BPF_EXIT_INSN(),
5139
- },
5140
- .result = ACCEPT,
5141
- .prog_type = BPF_PROG_TYPE_LWT_IN,
5142
- },
5143
- {
5144
- "direct packet read for LWT_OUT",
5145
- .insns = {
5146
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5147
- offsetof(struct __sk_buff, data)),
5148
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5149
- offsetof(struct __sk_buff, data_end)),
5150
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5151
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5152
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5153
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
5154
- BPF_MOV64_IMM(BPF_REG_0, 0),
5155
- BPF_EXIT_INSN(),
5156
- },
5157
- .result = ACCEPT,
5158
- .prog_type = BPF_PROG_TYPE_LWT_OUT,
5159
- },
5160
- {
5161
- "direct packet read for LWT_XMIT",
5162
- .insns = {
5163
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5164
- offsetof(struct __sk_buff, data)),
5165
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5166
- offsetof(struct __sk_buff, data_end)),
5167
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5168
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5169
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5170
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
5171
- BPF_MOV64_IMM(BPF_REG_0, 0),
5172
- BPF_EXIT_INSN(),
5173
- },
5174
- .result = ACCEPT,
5175
- .prog_type = BPF_PROG_TYPE_LWT_XMIT,
5176
- },
5177
- {
5178
- "overlapping checks for direct packet access",
5179
- .insns = {
5180
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5181
- offsetof(struct __sk_buff, data)),
5182
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5183
- offsetof(struct __sk_buff, data_end)),
5184
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5185
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5186
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
5187
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
5188
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
5189
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
5190
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
5191
- BPF_MOV64_IMM(BPF_REG_0, 0),
5192
- BPF_EXIT_INSN(),
5193
- },
5194
- .result = ACCEPT,
5195
- .prog_type = BPF_PROG_TYPE_LWT_XMIT,
5196
- },
5197
- {
5198
- "make headroom for LWT_XMIT",
5199
- .insns = {
5200
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5201
- BPF_MOV64_IMM(BPF_REG_2, 34),
5202
- BPF_MOV64_IMM(BPF_REG_3, 0),
5203
- BPF_EMIT_CALL(BPF_FUNC_skb_change_head),
5204
- /* split for s390 to succeed */
5205
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
5206
- BPF_MOV64_IMM(BPF_REG_2, 42),
5207
- BPF_MOV64_IMM(BPF_REG_3, 0),
5208
- BPF_EMIT_CALL(BPF_FUNC_skb_change_head),
5209
- BPF_MOV64_IMM(BPF_REG_0, 0),
5210
- BPF_EXIT_INSN(),
5211
- },
5212
- .result = ACCEPT,
5213
- .prog_type = BPF_PROG_TYPE_LWT_XMIT,
5214
- },
5215
- {
5216
- "invalid access of tc_classid for LWT_IN",
5217
- .insns = {
5218
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5219
- offsetof(struct __sk_buff, tc_classid)),
5220
- BPF_EXIT_INSN(),
5221
- },
5222
- .result = REJECT,
5223
- .errstr = "invalid bpf_context access",
5224
- },
5225
- {
5226
- "invalid access of tc_classid for LWT_OUT",
5227
- .insns = {
5228
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5229
- offsetof(struct __sk_buff, tc_classid)),
5230
- BPF_EXIT_INSN(),
5231
- },
5232
- .result = REJECT,
5233
- .errstr = "invalid bpf_context access",
5234
- },
5235
- {
5236
- "invalid access of tc_classid for LWT_XMIT",
5237
- .insns = {
5238
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5239
- offsetof(struct __sk_buff, tc_classid)),
5240
- BPF_EXIT_INSN(),
5241
- },
5242
- .result = REJECT,
5243
- .errstr = "invalid bpf_context access",
5244
- },
5245
- {
5246
- "leak pointer into ctx 1",
5247
- .insns = {
5248
- BPF_MOV64_IMM(BPF_REG_0, 0),
5249
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
5250
- offsetof(struct __sk_buff, cb[0])),
5251
- BPF_LD_MAP_FD(BPF_REG_2, 0),
5252
- BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_2,
5253
- offsetof(struct __sk_buff, cb[0])),
5254
- BPF_EXIT_INSN(),
5255
- },
5256
- .fixup_map1 = { 2 },
5257
- .errstr_unpriv = "R2 leaks addr into mem",
5258
- .result_unpriv = REJECT,
5259
- .result = REJECT,
5260
- .errstr = "BPF_XADD stores into R1 context is not allowed",
5261
- },
5262
- {
5263
- "leak pointer into ctx 2",
5264
- .insns = {
5265
- BPF_MOV64_IMM(BPF_REG_0, 0),
5266
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
5267
- offsetof(struct __sk_buff, cb[0])),
5268
- BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_10,
5269
- offsetof(struct __sk_buff, cb[0])),
5270
- BPF_EXIT_INSN(),
5271
- },
5272
- .errstr_unpriv = "R10 leaks addr into mem",
5273
- .result_unpriv = REJECT,
5274
- .result = REJECT,
5275
- .errstr = "BPF_XADD stores into R1 context is not allowed",
5276
- },
5277
- {
5278
- "leak pointer into ctx 3",
5279
- .insns = {
5280
- BPF_MOV64_IMM(BPF_REG_0, 0),
5281
- BPF_LD_MAP_FD(BPF_REG_2, 0),
5282
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2,
5283
- offsetof(struct __sk_buff, cb[0])),
5284
- BPF_EXIT_INSN(),
5285
- },
5286
- .fixup_map1 = { 1 },
5287
- .errstr_unpriv = "R2 leaks addr into ctx",
5288
- .result_unpriv = REJECT,
5289
- .result = ACCEPT,
5290
- },
5291
- {
5292
- "leak pointer into map val",
5293
- .insns = {
5294
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5295
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5296
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5297
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5298
- BPF_LD_MAP_FD(BPF_REG_1, 0),
5299
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5300
- BPF_FUNC_map_lookup_elem),
5301
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
5302
- BPF_MOV64_IMM(BPF_REG_3, 0),
5303
- BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
5304
- BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
5305
- BPF_MOV64_IMM(BPF_REG_0, 0),
5306
- BPF_EXIT_INSN(),
5307
- },
5308
- .fixup_map1 = { 4 },
5309
- .errstr_unpriv = "R6 leaks addr into mem",
5310
- .result_unpriv = REJECT,
5311
- .result = ACCEPT,
5312
- },
5313
- {
5314
- "helper access to map: full range",
5315
- .insns = {
5316
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5317
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5318
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5319
- BPF_LD_MAP_FD(BPF_REG_1, 0),
5320
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5321
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5322
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5323
- BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5324
- BPF_MOV64_IMM(BPF_REG_3, 0),
5325
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
5326
- BPF_EXIT_INSN(),
5327
- },
5328
- .fixup_map2 = { 3 },
5329
- .result = ACCEPT,
5330
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5331
- },
5332
- {
5333
- "helper access to map: partial range",
5334
- .insns = {
5335
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5336
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5337
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5338
- BPF_LD_MAP_FD(BPF_REG_1, 0),
5339
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5340
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5341
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5342
- BPF_MOV64_IMM(BPF_REG_2, 8),
5343
- BPF_MOV64_IMM(BPF_REG_3, 0),
5344
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
5345
- BPF_EXIT_INSN(),
5346
- },
5347
- .fixup_map2 = { 3 },
5348
- .result = ACCEPT,
5349
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5350
- },
5351
- {
5352
- "helper access to map: empty range",
5353
- .insns = {
5354
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5355
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5356
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5357
- BPF_LD_MAP_FD(BPF_REG_1, 0),
5358
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5359
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
5360
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5361
- BPF_MOV64_IMM(BPF_REG_2, 0),
5362
- BPF_EMIT_CALL(BPF_FUNC_trace_printk),
5363
- BPF_EXIT_INSN(),
5364
- },
5365
- .fixup_map2 = { 3 },
5366
- .errstr = "invalid access to map value, value_size=48 off=0 size=0",
5367
- .result = REJECT,
5368
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5369
- },
5370
- {
5371
- "helper access to map: out-of-bound range",
5372
- .insns = {
5373
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5374
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5375
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5376
- BPF_LD_MAP_FD(BPF_REG_1, 0),
5377
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5378
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5379
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5380
- BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val) + 8),
5381
- BPF_MOV64_IMM(BPF_REG_3, 0),
5382
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
5383
- BPF_EXIT_INSN(),
5384
- },
5385
- .fixup_map2 = { 3 },
5386
- .errstr = "invalid access to map value, value_size=48 off=0 size=56",
5387
- .result = REJECT,
5388
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5389
- },
5390
- {
5391
- "helper access to map: negative range",
5392
- .insns = {
5393
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5394
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5395
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5396
- BPF_LD_MAP_FD(BPF_REG_1, 0),
5397
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5398
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5399
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5400
- BPF_MOV64_IMM(BPF_REG_2, -8),
5401
- BPF_MOV64_IMM(BPF_REG_3, 0),
5402
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
5403
- BPF_EXIT_INSN(),
5404
- },
5405
- .fixup_map2 = { 3 },
5406
- .errstr = "R2 min value is negative",
5407
- .result = REJECT,
5408
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5409
- },
5410
- {
5411
- "helper access to adjusted map (via const imm): full range",
5412
- .insns = {
5413
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5414
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5415
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5416
- BPF_LD_MAP_FD(BPF_REG_1, 0),
5417
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5418
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5419
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5420
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5421
- offsetof(struct test_val, foo)),
5422
- BPF_MOV64_IMM(BPF_REG_2,
5423
- sizeof(struct test_val) -
5424
- offsetof(struct test_val, foo)),
5425
- BPF_MOV64_IMM(BPF_REG_3, 0),
5426
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
5427
- BPF_EXIT_INSN(),
5428
- },
5429
- .fixup_map2 = { 3 },
5430
- .result = ACCEPT,
5431
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5432
- },
5433
- {
5434
- "helper access to adjusted map (via const imm): partial range",
5435
- .insns = {
5436
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5437
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5438
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5439
- BPF_LD_MAP_FD(BPF_REG_1, 0),
5440
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5441
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5442
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5443
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5444
- offsetof(struct test_val, foo)),
5445
- BPF_MOV64_IMM(BPF_REG_2, 8),
5446
- BPF_MOV64_IMM(BPF_REG_3, 0),
5447
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
5448
- BPF_EXIT_INSN(),
5449
- },
5450
- .fixup_map2 = { 3 },
5451
- .result = ACCEPT,
5452
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5453
- },
5454
- {
5455
- "helper access to adjusted map (via const imm): empty range",
5456
- .insns = {
5457
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5458
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5459
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5460
- BPF_LD_MAP_FD(BPF_REG_1, 0),
5461
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5462
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5463
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5464
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5465
- offsetof(struct test_val, foo)),
5466
- BPF_MOV64_IMM(BPF_REG_2, 0),
5467
- BPF_EMIT_CALL(BPF_FUNC_trace_printk),
5468
- BPF_EXIT_INSN(),
5469
- },
5470
- .fixup_map2 = { 3 },
5471
- .errstr = "invalid access to map value, value_size=48 off=4 size=0",
5472
- .result = REJECT,
5473
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5474
- },
5475
- {
5476
- "helper access to adjusted map (via const imm): out-of-bound range",
5477
- .insns = {
5478
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5479
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5480
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5481
- BPF_LD_MAP_FD(BPF_REG_1, 0),
5482
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5483
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5484
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5485
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5486
- offsetof(struct test_val, foo)),
5487
- BPF_MOV64_IMM(BPF_REG_2,
5488
- sizeof(struct test_val) -
5489
- offsetof(struct test_val, foo) + 8),
5490
- BPF_MOV64_IMM(BPF_REG_3, 0),
5491
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
5492
- BPF_EXIT_INSN(),
5493
- },
5494
- .fixup_map2 = { 3 },
5495
- .errstr = "invalid access to map value, value_size=48 off=4 size=52",
5496
- .result = REJECT,
5497
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5498
- },
5499
- {
5500
- "helper access to adjusted map (via const imm): negative range (> adjustment)",
5501
- .insns = {
5502
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5503
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5504
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5505
- BPF_LD_MAP_FD(BPF_REG_1, 0),
5506
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5507
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5508
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5509
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5510
- offsetof(struct test_val, foo)),
5511
- BPF_MOV64_IMM(BPF_REG_2, -8),
5512
- BPF_MOV64_IMM(BPF_REG_3, 0),
5513
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
5514
- BPF_EXIT_INSN(),
5515
- },
5516
- .fixup_map2 = { 3 },
5517
- .errstr = "R2 min value is negative",
5518
- .result = REJECT,
5519
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5520
- },
5521
- {
5522
- "helper access to adjusted map (via const imm): negative range (< adjustment)",
5523
- .insns = {
5524
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5525
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5526
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5527
- BPF_LD_MAP_FD(BPF_REG_1, 0),
5528
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5529
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5530
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5531
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5532
- offsetof(struct test_val, foo)),
5533
- BPF_MOV64_IMM(BPF_REG_2, -1),
5534
- BPF_MOV64_IMM(BPF_REG_3, 0),
5535
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
5536
- BPF_EXIT_INSN(),
5537
- },
5538
- .fixup_map2 = { 3 },
5539
- .errstr = "R2 min value is negative",
5540
- .result = REJECT,
5541
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5542
- },
5543
- {
5544
- "helper access to adjusted map (via const reg): full range",
5545
- .insns = {
5546
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5547
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5548
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5549
- BPF_LD_MAP_FD(BPF_REG_1, 0),
5550
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5551
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5552
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5553
- BPF_MOV64_IMM(BPF_REG_3,
5554
- offsetof(struct test_val, foo)),
5555
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5556
- BPF_MOV64_IMM(BPF_REG_2,
5557
- sizeof(struct test_val) -
5558
- offsetof(struct test_val, foo)),
5559
- BPF_MOV64_IMM(BPF_REG_3, 0),
5560
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
5561
- BPF_EXIT_INSN(),
5562
- },
5563
- .fixup_map2 = { 3 },
5564
- .result = ACCEPT,
5565
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5566
- },
5567
- {
5568
- "helper access to adjusted map (via const reg): partial range",
5569
- .insns = {
5570
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5571
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5572
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5573
- BPF_LD_MAP_FD(BPF_REG_1, 0),
5574
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5575
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5576
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5577
- BPF_MOV64_IMM(BPF_REG_3,
5578
- offsetof(struct test_val, foo)),
5579
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5580
- BPF_MOV64_IMM(BPF_REG_2, 8),
5581
- BPF_MOV64_IMM(BPF_REG_3, 0),
5582
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
5583
- BPF_EXIT_INSN(),
5584
- },
5585
- .fixup_map2 = { 3 },
5586
- .result = ACCEPT,
5587
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5588
- },
5589
- {
5590
- "helper access to adjusted map (via const reg): empty range",
5591
- .insns = {
5592
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5593
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5594
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5595
- BPF_LD_MAP_FD(BPF_REG_1, 0),
5596
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5597
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5598
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5599
- BPF_MOV64_IMM(BPF_REG_3, 0),
5600
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5601
- BPF_MOV64_IMM(BPF_REG_2, 0),
5602
- BPF_EMIT_CALL(BPF_FUNC_trace_printk),
5603
- BPF_EXIT_INSN(),
5604
- },
5605
- .fixup_map2 = { 3 },
5606
- .errstr = "R1 min value is outside of the array range",
5607
- .result = REJECT,
5608
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5609
- },
5610
- {
5611
- "helper access to adjusted map (via const reg): out-of-bound range",
5612
- .insns = {
5613
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5614
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5615
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5616
- BPF_LD_MAP_FD(BPF_REG_1, 0),
5617
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5618
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5619
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5620
- BPF_MOV64_IMM(BPF_REG_3,
5621
- offsetof(struct test_val, foo)),
5622
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5623
- BPF_MOV64_IMM(BPF_REG_2,
5624
- sizeof(struct test_val) -
5625
- offsetof(struct test_val, foo) + 8),
5626
- BPF_MOV64_IMM(BPF_REG_3, 0),
5627
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
5628
- BPF_EXIT_INSN(),
5629
- },
5630
- .fixup_map2 = { 3 },
5631
- .errstr = "invalid access to map value, value_size=48 off=4 size=52",
5632
- .result = REJECT,
5633
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5634
- },
5635
- {
5636
- "helper access to adjusted map (via const reg): negative range (> adjustment)",
5637
- .insns = {
5638
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5639
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5640
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5641
- BPF_LD_MAP_FD(BPF_REG_1, 0),
5642
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5643
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5644
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5645
- BPF_MOV64_IMM(BPF_REG_3,
5646
- offsetof(struct test_val, foo)),
5647
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5648
- BPF_MOV64_IMM(BPF_REG_2, -8),
5649
- BPF_MOV64_IMM(BPF_REG_3, 0),
5650
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
5651
- BPF_EXIT_INSN(),
5652
- },
5653
- .fixup_map2 = { 3 },
5654
- .errstr = "R2 min value is negative",
5655
- .result = REJECT,
5656
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5657
- },
5658
- {
5659
- "helper access to adjusted map (via const reg): negative range (< adjustment)",
5660
- .insns = {
5661
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5662
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5663
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5664
- BPF_LD_MAP_FD(BPF_REG_1, 0),
5665
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5666
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5667
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5668
- BPF_MOV64_IMM(BPF_REG_3,
5669
- offsetof(struct test_val, foo)),
5670
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5671
- BPF_MOV64_IMM(BPF_REG_2, -1),
5672
- BPF_MOV64_IMM(BPF_REG_3, 0),
5673
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
5674
- BPF_EXIT_INSN(),
5675
- },
5676
- .fixup_map2 = { 3 },
5677
- .errstr = "R2 min value is negative",
5678
- .result = REJECT,
5679
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5680
- },
5681
- {
5682
- "helper access to adjusted map (via variable): full range",
5683
- .insns = {
5684
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5685
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5686
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5687
- BPF_LD_MAP_FD(BPF_REG_1, 0),
5688
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5689
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5690
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5691
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5692
- BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
5693
- offsetof(struct test_val, foo), 4),
5694
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5695
- BPF_MOV64_IMM(BPF_REG_2,
5696
- sizeof(struct test_val) -
5697
- offsetof(struct test_val, foo)),
5698
- BPF_MOV64_IMM(BPF_REG_3, 0),
5699
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
5700
- BPF_EXIT_INSN(),
5701
- },
5702
- .fixup_map2 = { 3 },
5703
- .result = ACCEPT,
5704
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5705
- },
5706
- {
5707
- "helper access to adjusted map (via variable): partial range",
5708
- .insns = {
5709
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5710
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5711
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5712
- BPF_LD_MAP_FD(BPF_REG_1, 0),
5713
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5714
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5715
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5716
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5717
- BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
5718
- offsetof(struct test_val, foo), 4),
5719
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5720
- BPF_MOV64_IMM(BPF_REG_2, 8),
5721
- BPF_MOV64_IMM(BPF_REG_3, 0),
5722
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
5723
- BPF_EXIT_INSN(),
5724
- },
5725
- .fixup_map2 = { 3 },
5726
- .result = ACCEPT,
5727
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5728
- },
5729
- {
5730
- "helper access to adjusted map (via variable): empty range",
5731
- .insns = {
5732
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5733
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5734
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5735
- BPF_LD_MAP_FD(BPF_REG_1, 0),
5736
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5737
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5738
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5739
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5740
- BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
5741
- offsetof(struct test_val, foo), 3),
5742
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5743
- BPF_MOV64_IMM(BPF_REG_2, 0),
5744
- BPF_EMIT_CALL(BPF_FUNC_trace_printk),
5745
- BPF_EXIT_INSN(),
5746
- },
5747
- .fixup_map2 = { 3 },
5748
- .errstr = "R1 min value is outside of the array range",
5749
- .result = REJECT,
5750
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5751
- },
5752
- {
5753
- "helper access to adjusted map (via variable): no max check",
5754
- .insns = {
5755
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5756
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5757
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5758
- BPF_LD_MAP_FD(BPF_REG_1, 0),
5759
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5760
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5761
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5762
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5763
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5764
- BPF_MOV64_IMM(BPF_REG_2, 1),
5765
- BPF_MOV64_IMM(BPF_REG_3, 0),
5766
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
5767
- BPF_EXIT_INSN(),
5768
- },
5769
- .fixup_map2 = { 3 },
5770
- .errstr = "R1 unbounded memory access",
5771
- .result = REJECT,
5772
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5773
- },
5774
- {
5775
- "helper access to adjusted map (via variable): wrong max check",
5776
- .insns = {
5777
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5778
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5779
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5780
- BPF_LD_MAP_FD(BPF_REG_1, 0),
5781
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5782
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5783
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5784
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5785
- BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
5786
- offsetof(struct test_val, foo), 4),
5787
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5788
- BPF_MOV64_IMM(BPF_REG_2,
5789
- sizeof(struct test_val) -
5790
- offsetof(struct test_val, foo) + 1),
5791
- BPF_MOV64_IMM(BPF_REG_3, 0),
5792
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
5793
- BPF_EXIT_INSN(),
5794
- },
5795
- .fixup_map2 = { 3 },
5796
- .errstr = "invalid access to map value, value_size=48 off=4 size=45",
5797
- .result = REJECT,
5798
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5799
- },
5800
- {
5801
- "helper access to map: bounds check using <, good access",
5802
- .insns = {
5803
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5804
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5805
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5806
- BPF_LD_MAP_FD(BPF_REG_1, 0),
5807
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5808
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5809
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5810
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5811
- BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 2),
5812
- BPF_MOV64_IMM(BPF_REG_0, 0),
5813
- BPF_EXIT_INSN(),
5814
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5815
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5816
- BPF_MOV64_IMM(BPF_REG_0, 0),
5817
- BPF_EXIT_INSN(),
5818
- },
5819
- .fixup_map2 = { 3 },
5820
- .result = ACCEPT,
5821
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5822
- },
5823
- {
5824
- "helper access to map: bounds check using <, bad access",
5825
- .insns = {
5826
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5827
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5828
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5829
- BPF_LD_MAP_FD(BPF_REG_1, 0),
5830
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5831
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5832
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5833
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5834
- BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 4),
5835
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5836
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5837
- BPF_MOV64_IMM(BPF_REG_0, 0),
5838
- BPF_EXIT_INSN(),
5839
- BPF_MOV64_IMM(BPF_REG_0, 0),
5840
- BPF_EXIT_INSN(),
5841
- },
5842
- .fixup_map2 = { 3 },
5843
- .result = REJECT,
5844
- .errstr = "R1 unbounded memory access",
5845
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5846
- },
5847
- {
5848
- "helper access to map: bounds check using <=, good access",
5849
- .insns = {
5850
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5851
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5852
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5853
- BPF_LD_MAP_FD(BPF_REG_1, 0),
5854
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5855
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5856
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5857
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5858
- BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 2),
5859
- BPF_MOV64_IMM(BPF_REG_0, 0),
5860
- BPF_EXIT_INSN(),
5861
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5862
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5863
- BPF_MOV64_IMM(BPF_REG_0, 0),
5864
- BPF_EXIT_INSN(),
5865
- },
5866
- .fixup_map2 = { 3 },
5867
- .result = ACCEPT,
5868
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5869
- },
5870
- {
5871
- "helper access to map: bounds check using <=, bad access",
5872
- .insns = {
5873
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5874
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5875
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5876
- BPF_LD_MAP_FD(BPF_REG_1, 0),
5877
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5878
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5879
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5880
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5881
- BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 4),
5882
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5883
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5884
- BPF_MOV64_IMM(BPF_REG_0, 0),
5885
- BPF_EXIT_INSN(),
5886
- BPF_MOV64_IMM(BPF_REG_0, 0),
5887
- BPF_EXIT_INSN(),
5888
- },
5889
- .fixup_map2 = { 3 },
5890
- .result = REJECT,
5891
- .errstr = "R1 unbounded memory access",
5892
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5893
- },
5894
- {
5895
- "helper access to map: bounds check using s<, good access",
5896
- .insns = {
5897
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5898
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5899
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5900
- BPF_LD_MAP_FD(BPF_REG_1, 0),
5901
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5902
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5903
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5904
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5905
- BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
5906
- BPF_MOV64_IMM(BPF_REG_0, 0),
5907
- BPF_EXIT_INSN(),
5908
- BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 0, -3),
5909
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5910
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5911
- BPF_MOV64_IMM(BPF_REG_0, 0),
5912
- BPF_EXIT_INSN(),
5913
- },
5914
- .fixup_map2 = { 3 },
5915
- .result = ACCEPT,
5916
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5917
- },
5918
- {
5919
- "helper access to map: bounds check using s<, good access 2",
5920
- .insns = {
5921
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5922
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5923
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5924
- BPF_LD_MAP_FD(BPF_REG_1, 0),
5925
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5926
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5927
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5928
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5929
- BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
5930
- BPF_MOV64_IMM(BPF_REG_0, 0),
5931
- BPF_EXIT_INSN(),
5932
- BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
5933
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5934
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5935
- BPF_MOV64_IMM(BPF_REG_0, 0),
5936
- BPF_EXIT_INSN(),
5937
- },
5938
- .fixup_map2 = { 3 },
5939
- .result = ACCEPT,
5940
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5941
- },
5942
- {
5943
- "helper access to map: bounds check using s<, bad access",
5944
- .insns = {
5945
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5946
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5947
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5948
- BPF_LD_MAP_FD(BPF_REG_1, 0),
5949
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5950
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5951
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5952
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
5953
- BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
5954
- BPF_MOV64_IMM(BPF_REG_0, 0),
5955
- BPF_EXIT_INSN(),
5956
- BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
5957
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5958
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5959
- BPF_MOV64_IMM(BPF_REG_0, 0),
5960
- BPF_EXIT_INSN(),
5961
- },
5962
- .fixup_map2 = { 3 },
5963
- .result = REJECT,
5964
- .errstr = "R1 min value is negative",
5965
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5966
- },
5967
- {
5968
- "helper access to map: bounds check using s<=, good access",
5969
- .insns = {
5970
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5971
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5972
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5973
- BPF_LD_MAP_FD(BPF_REG_1, 0),
5974
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5975
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5976
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5977
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5978
- BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
5979
- BPF_MOV64_IMM(BPF_REG_0, 0),
5980
- BPF_EXIT_INSN(),
5981
- BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 0, -3),
5982
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5983
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5984
- BPF_MOV64_IMM(BPF_REG_0, 0),
5985
- BPF_EXIT_INSN(),
5986
- },
5987
- .fixup_map2 = { 3 },
5988
- .result = ACCEPT,
5989
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5990
- },
5991
- {
5992
- "helper access to map: bounds check using s<=, good access 2",
5993
- .insns = {
5994
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5995
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5996
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5997
- BPF_LD_MAP_FD(BPF_REG_1, 0),
5998
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5999
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6000
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6001
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6002
- BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
6003
- BPF_MOV64_IMM(BPF_REG_0, 0),
6004
- BPF_EXIT_INSN(),
6005
- BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
6006
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6007
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6008
- BPF_MOV64_IMM(BPF_REG_0, 0),
6009
- BPF_EXIT_INSN(),
6010
- },
6011
- .fixup_map2 = { 3 },
6012
- .result = ACCEPT,
6013
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6014
- },
6015
- {
6016
- "helper access to map: bounds check using s<=, bad access",
6017
- .insns = {
6018
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6019
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6020
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6021
- BPF_LD_MAP_FD(BPF_REG_1, 0),
6022
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6023
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6024
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6025
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
6026
- BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
6027
- BPF_MOV64_IMM(BPF_REG_0, 0),
6028
- BPF_EXIT_INSN(),
6029
- BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
6030
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6031
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6032
- BPF_MOV64_IMM(BPF_REG_0, 0),
6033
- BPF_EXIT_INSN(),
6034
- },
6035
- .fixup_map2 = { 3 },
6036
- .result = REJECT,
6037
- .errstr = "R1 min value is negative",
6038
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6039
- },
6040
- {
6041
- "map lookup helper access to map",
6042
- .insns = {
6043
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6044
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6045
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6046
- BPF_LD_MAP_FD(BPF_REG_1, 0),
6047
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6048
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6049
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6050
- BPF_LD_MAP_FD(BPF_REG_1, 0),
6051
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6052
- BPF_EXIT_INSN(),
6053
- },
6054
- .fixup_map3 = { 3, 8 },
6055
- .result = ACCEPT,
6056
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6057
- },
6058
- {
6059
- "map update helper access to map",
6060
- .insns = {
6061
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6062
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6063
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6064
- BPF_LD_MAP_FD(BPF_REG_1, 0),
6065
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6066
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6067
- BPF_MOV64_IMM(BPF_REG_4, 0),
6068
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
6069
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6070
- BPF_LD_MAP_FD(BPF_REG_1, 0),
6071
- BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
6072
- BPF_EXIT_INSN(),
6073
- },
6074
- .fixup_map3 = { 3, 10 },
6075
- .result = ACCEPT,
6076
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6077
- },
6078
- {
6079
- "map update helper access to map: wrong size",
6080
- .insns = {
6081
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6082
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6083
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6084
- BPF_LD_MAP_FD(BPF_REG_1, 0),
6085
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6086
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6087
- BPF_MOV64_IMM(BPF_REG_4, 0),
6088
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
6089
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6090
- BPF_LD_MAP_FD(BPF_REG_1, 0),
6091
- BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
6092
- BPF_EXIT_INSN(),
6093
- },
6094
- .fixup_map1 = { 3 },
6095
- .fixup_map3 = { 10 },
6096
- .result = REJECT,
6097
- .errstr = "invalid access to map value, value_size=8 off=0 size=16",
6098
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6099
- },
6100
- {
6101
- "map helper access to adjusted map (via const imm)",
6102
- .insns = {
6103
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6104
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6105
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6106
- BPF_LD_MAP_FD(BPF_REG_1, 0),
6107
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6108
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6109
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6110
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
6111
- offsetof(struct other_val, bar)),
6112
- BPF_LD_MAP_FD(BPF_REG_1, 0),
6113
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6114
- BPF_EXIT_INSN(),
6115
- },
6116
- .fixup_map3 = { 3, 9 },
6117
- .result = ACCEPT,
6118
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6119
- },
6120
- {
6121
- "map helper access to adjusted map (via const imm): out-of-bound 1",
6122
- .insns = {
6123
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6124
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6125
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6126
- BPF_LD_MAP_FD(BPF_REG_1, 0),
6127
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6128
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6129
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6130
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
6131
- sizeof(struct other_val) - 4),
6132
- BPF_LD_MAP_FD(BPF_REG_1, 0),
6133
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6134
- BPF_EXIT_INSN(),
6135
- },
6136
- .fixup_map3 = { 3, 9 },
6137
- .result = REJECT,
6138
- .errstr = "invalid access to map value, value_size=16 off=12 size=8",
6139
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6140
- },
6141
- {
6142
- "map helper access to adjusted map (via const imm): out-of-bound 2",
6143
- .insns = {
6144
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6145
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6146
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6147
- BPF_LD_MAP_FD(BPF_REG_1, 0),
6148
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6149
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6150
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6151
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
6152
- BPF_LD_MAP_FD(BPF_REG_1, 0),
6153
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6154
- BPF_EXIT_INSN(),
6155
- },
6156
- .fixup_map3 = { 3, 9 },
6157
- .result = REJECT,
6158
- .errstr = "invalid access to map value, value_size=16 off=-4 size=8",
6159
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6160
- },
6161
- {
6162
- "map helper access to adjusted map (via const reg)",
6163
- .insns = {
6164
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6165
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6166
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6167
- BPF_LD_MAP_FD(BPF_REG_1, 0),
6168
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6169
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6170
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6171
- BPF_MOV64_IMM(BPF_REG_3,
6172
- offsetof(struct other_val, bar)),
6173
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6174
- BPF_LD_MAP_FD(BPF_REG_1, 0),
6175
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6176
- BPF_EXIT_INSN(),
6177
- },
6178
- .fixup_map3 = { 3, 10 },
6179
- .result = ACCEPT,
6180
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6181
- },
6182
- {
6183
- "map helper access to adjusted map (via const reg): out-of-bound 1",
6184
- .insns = {
6185
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6186
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6187
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6188
- BPF_LD_MAP_FD(BPF_REG_1, 0),
6189
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6190
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6191
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6192
- BPF_MOV64_IMM(BPF_REG_3,
6193
- sizeof(struct other_val) - 4),
6194
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6195
- BPF_LD_MAP_FD(BPF_REG_1, 0),
6196
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6197
- BPF_EXIT_INSN(),
6198
- },
6199
- .fixup_map3 = { 3, 10 },
6200
- .result = REJECT,
6201
- .errstr = "invalid access to map value, value_size=16 off=12 size=8",
6202
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6203
- },
6204
- {
6205
- "map helper access to adjusted map (via const reg): out-of-bound 2",
6206
- .insns = {
6207
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6208
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6209
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6210
- BPF_LD_MAP_FD(BPF_REG_1, 0),
6211
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6212
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6213
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6214
- BPF_MOV64_IMM(BPF_REG_3, -4),
6215
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6216
- BPF_LD_MAP_FD(BPF_REG_1, 0),
6217
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6218
- BPF_EXIT_INSN(),
6219
- },
6220
- .fixup_map3 = { 3, 10 },
6221
- .result = REJECT,
6222
- .errstr = "invalid access to map value, value_size=16 off=-4 size=8",
6223
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6224
- },
6225
- {
6226
- "map helper access to adjusted map (via variable)",
6227
- .insns = {
6228
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6229
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6230
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6231
- BPF_LD_MAP_FD(BPF_REG_1, 0),
6232
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6233
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6234
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6235
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6236
- BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
6237
- offsetof(struct other_val, bar), 4),
6238
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6239
- BPF_LD_MAP_FD(BPF_REG_1, 0),
6240
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6241
- BPF_EXIT_INSN(),
6242
- },
6243
- .fixup_map3 = { 3, 11 },
6244
- .result = ACCEPT,
6245
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6246
- },
6247
- {
6248
- "map helper access to adjusted map (via variable): no max check",
6249
- .insns = {
6250
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6251
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6252
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6253
- BPF_LD_MAP_FD(BPF_REG_1, 0),
6254
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6255
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6256
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6257
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6258
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6259
- BPF_LD_MAP_FD(BPF_REG_1, 0),
6260
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6261
- BPF_EXIT_INSN(),
6262
- },
6263
- .fixup_map3 = { 3, 10 },
6264
- .result = REJECT,
6265
- .errstr = "R2 unbounded memory access, make sure to bounds check any array access into a map",
6266
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6267
- },
6268
- {
6269
- "map helper access to adjusted map (via variable): wrong max check",
6270
- .insns = {
6271
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6272
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6273
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6274
- BPF_LD_MAP_FD(BPF_REG_1, 0),
6275
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6276
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6277
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6278
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6279
- BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
6280
- offsetof(struct other_val, bar) + 1, 4),
6281
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6282
- BPF_LD_MAP_FD(BPF_REG_1, 0),
6283
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6284
- BPF_EXIT_INSN(),
6285
- },
6286
- .fixup_map3 = { 3, 11 },
6287
- .result = REJECT,
6288
- .errstr = "invalid access to map value, value_size=16 off=9 size=8",
6289
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6290
- },
6291
- {
6292
- "map element value is preserved across register spilling",
6293
- .insns = {
6294
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6295
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6296
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6297
- BPF_LD_MAP_FD(BPF_REG_1, 0),
6298
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6299
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6300
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
6301
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6302
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
6303
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
6304
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
6305
- BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
6306
- BPF_EXIT_INSN(),
6307
- },
6308
- .fixup_map2 = { 3 },
6309
- .errstr_unpriv = "R0 leaks addr",
6310
- .result = ACCEPT,
6311
- .result_unpriv = REJECT,
6312
- },
6313
- {
6314
- "map element value or null is marked on register spilling",
6315
- .insns = {
6316
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6317
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6318
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6319
- BPF_LD_MAP_FD(BPF_REG_1, 0),
6320
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6321
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6322
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -152),
6323
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
6324
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
6325
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
6326
- BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
6327
- BPF_EXIT_INSN(),
6328
- },
6329
- .fixup_map2 = { 3 },
6330
- .errstr_unpriv = "R0 leaks addr",
6331
- .result = ACCEPT,
6332
- .result_unpriv = REJECT,
6333
- },
6334
- {
6335
- "map element value store of cleared call register",
6336
- .insns = {
6337
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6338
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6339
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6340
- BPF_LD_MAP_FD(BPF_REG_1, 0),
6341
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6342
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
6343
- BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
6344
- BPF_EXIT_INSN(),
6345
- },
6346
- .fixup_map2 = { 3 },
6347
- .errstr_unpriv = "R1 !read_ok",
6348
- .errstr = "R1 !read_ok",
6349
- .result = REJECT,
6350
- .result_unpriv = REJECT,
6351
- },
6352
- {
6353
- "map element value with unaligned store",
6354
- .insns = {
6355
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6356
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6357
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6358
- BPF_LD_MAP_FD(BPF_REG_1, 0),
6359
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6360
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17),
6361
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
6362
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
6363
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 43),
6364
- BPF_ST_MEM(BPF_DW, BPF_REG_0, -2, 44),
6365
- BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
6366
- BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 32),
6367
- BPF_ST_MEM(BPF_DW, BPF_REG_8, 2, 33),
6368
- BPF_ST_MEM(BPF_DW, BPF_REG_8, -2, 34),
6369
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 5),
6370
- BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 22),
6371
- BPF_ST_MEM(BPF_DW, BPF_REG_8, 4, 23),
6372
- BPF_ST_MEM(BPF_DW, BPF_REG_8, -7, 24),
6373
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_8),
6374
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 3),
6375
- BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 22),
6376
- BPF_ST_MEM(BPF_DW, BPF_REG_7, 4, 23),
6377
- BPF_ST_MEM(BPF_DW, BPF_REG_7, -4, 24),
6378
- BPF_EXIT_INSN(),
6379
- },
6380
- .fixup_map2 = { 3 },
6381
- .errstr_unpriv = "R0 leaks addr",
6382
- .result = ACCEPT,
6383
- .result_unpriv = REJECT,
6384
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
6385
- },
6386
- {
6387
- "map element value with unaligned load",
6388
- .insns = {
6389
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6390
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6391
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6392
- BPF_LD_MAP_FD(BPF_REG_1, 0),
6393
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6394
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
6395
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
6396
- BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 9),
6397
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
6398
- BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
6399
- BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 2),
6400
- BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
6401
- BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0),
6402
- BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 2),
6403
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 5),
6404
- BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
6405
- BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4),
6406
- BPF_EXIT_INSN(),
6407
- },
6408
- .fixup_map2 = { 3 },
6409
- .errstr_unpriv = "R0 leaks addr",
6410
- .result = ACCEPT,
6411
- .result_unpriv = REJECT,
6412
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
6413
- },
6414
- {
6415
- "map element value illegal alu op, 1",
6416
- .insns = {
6417
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6418
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6419
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6420
- BPF_LD_MAP_FD(BPF_REG_1, 0),
6421
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6422
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
6423
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 8),
6424
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
6425
- BPF_EXIT_INSN(),
6426
- },
6427
- .fixup_map2 = { 3 },
6428
- .errstr = "R0 bitwise operator &= on pointer",
6429
- .result = REJECT,
6430
- },
6431
- {
6432
- "map element value illegal alu op, 2",
6433
- .insns = {
6434
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6435
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6436
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6437
- BPF_LD_MAP_FD(BPF_REG_1, 0),
6438
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6439
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
6440
- BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0),
6441
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
6442
- BPF_EXIT_INSN(),
6443
- },
6444
- .fixup_map2 = { 3 },
6445
- .errstr = "R0 32-bit pointer arithmetic prohibited",
6446
- .result = REJECT,
6447
- },
6448
- {
6449
- "map element value illegal alu op, 3",
6450
- .insns = {
6451
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6452
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6453
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6454
- BPF_LD_MAP_FD(BPF_REG_1, 0),
6455
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6456
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
6457
- BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, 42),
6458
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
6459
- BPF_EXIT_INSN(),
6460
- },
6461
- .fixup_map2 = { 3 },
6462
- .errstr = "R0 pointer arithmetic with /= operator",
6463
- .result = REJECT,
6464
- },
6465
- {
6466
- "map element value illegal alu op, 4",
6467
- .insns = {
6468
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6469
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6470
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6471
- BPF_LD_MAP_FD(BPF_REG_1, 0),
6472
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6473
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
6474
- BPF_ENDIAN(BPF_FROM_BE, BPF_REG_0, 64),
6475
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
6476
- BPF_EXIT_INSN(),
6477
- },
6478
- .fixup_map2 = { 3 },
6479
- .errstr_unpriv = "R0 pointer arithmetic prohibited",
6480
- .errstr = "invalid mem access 'inv'",
6481
- .result = REJECT,
6482
- .result_unpriv = REJECT,
6483
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
6484
- },
6485
- {
6486
- "map element value illegal alu op, 5",
6487
- .insns = {
6488
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6489
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6490
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6491
- BPF_LD_MAP_FD(BPF_REG_1, 0),
6492
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6493
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6494
- BPF_MOV64_IMM(BPF_REG_3, 4096),
6495
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6496
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6497
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
6498
- BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0),
6499
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0),
6500
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
6501
- BPF_EXIT_INSN(),
6502
- },
6503
- .fixup_map2 = { 3 },
6504
- .errstr = "R0 invalid mem access 'inv'",
6505
- .result = REJECT,
6506
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
6507
- },
6508
- {
6509
- "map element value is preserved across register spilling",
6510
- .insns = {
6511
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6512
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6513
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6514
- BPF_LD_MAP_FD(BPF_REG_1, 0),
6515
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6516
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6517
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0,
6518
- offsetof(struct test_val, foo)),
6519
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
6520
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6521
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
6522
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
6523
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
6524
- BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
6525
- BPF_EXIT_INSN(),
6526
- },
6527
- .fixup_map2 = { 3 },
6528
- .errstr_unpriv = "R0 leaks addr",
6529
- .result = ACCEPT,
6530
- .result_unpriv = REJECT,
6531
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
6532
- },
6533
- {
6534
- "helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
6535
- .insns = {
6536
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6537
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6538
- BPF_MOV64_IMM(BPF_REG_0, 0),
6539
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
6540
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
6541
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
6542
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
6543
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
6544
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
6545
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
6546
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
6547
- BPF_MOV64_IMM(BPF_REG_2, 16),
6548
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6549
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6550
- BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
6551
- BPF_MOV64_IMM(BPF_REG_4, 0),
6552
- BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
6553
- BPF_MOV64_IMM(BPF_REG_3, 0),
6554
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
6555
- BPF_MOV64_IMM(BPF_REG_0, 0),
6556
- BPF_EXIT_INSN(),
6557
- },
6558
- .result = ACCEPT,
6559
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6560
- },
6561
- {
6562
- "helper access to variable memory: stack, bitwise AND, zero included",
6563
- .insns = {
6564
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8),
6565
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6566
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6567
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6568
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6569
- BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
6570
- BPF_MOV64_IMM(BPF_REG_3, 0),
6571
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
6572
- BPF_EXIT_INSN(),
6573
- },
6574
- .errstr = "invalid indirect read from stack off -64+0 size 64",
6575
- .result = REJECT,
6576
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6577
- },
6578
- {
6579
- "helper access to variable memory: stack, bitwise AND + JMP, wrong max",
6580
- .insns = {
6581
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8),
6582
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6583
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6584
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6585
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6586
- BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65),
6587
- BPF_MOV64_IMM(BPF_REG_4, 0),
6588
- BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
6589
- BPF_MOV64_IMM(BPF_REG_3, 0),
6590
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
6591
- BPF_MOV64_IMM(BPF_REG_0, 0),
6592
- BPF_EXIT_INSN(),
6593
- },
6594
- .errstr = "invalid stack type R1 off=-64 access_size=65",
6595
- .result = REJECT,
6596
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6597
- },
6598
- {
6599
- "helper access to variable memory: stack, JMP, correct bounds",
6600
- .insns = {
6601
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6602
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6603
- BPF_MOV64_IMM(BPF_REG_0, 0),
6604
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
6605
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
6606
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
6607
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
6608
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
6609
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
6610
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
6611
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
6612
- BPF_MOV64_IMM(BPF_REG_2, 16),
6613
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6614
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6615
- BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 4),
6616
- BPF_MOV64_IMM(BPF_REG_4, 0),
6617
- BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
6618
- BPF_MOV64_IMM(BPF_REG_3, 0),
6619
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
6620
- BPF_MOV64_IMM(BPF_REG_0, 0),
6621
- BPF_EXIT_INSN(),
6622
- },
6623
- .result = ACCEPT,
6624
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6625
- },
6626
- {
6627
- "helper access to variable memory: stack, JMP (signed), correct bounds",
6628
- .insns = {
6629
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6630
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6631
- BPF_MOV64_IMM(BPF_REG_0, 0),
6632
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
6633
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
6634
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
6635
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
6636
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
6637
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
6638
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
6639
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
6640
- BPF_MOV64_IMM(BPF_REG_2, 16),
6641
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6642
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6643
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 4),
6644
- BPF_MOV64_IMM(BPF_REG_4, 0),
6645
- BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
6646
- BPF_MOV64_IMM(BPF_REG_3, 0),
6647
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
6648
- BPF_MOV64_IMM(BPF_REG_0, 0),
6649
- BPF_EXIT_INSN(),
6650
- },
6651
- .result = ACCEPT,
6652
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6653
- },
6654
- {
6655
- "helper access to variable memory: stack, JMP, bounds + offset",
6656
- .insns = {
6657
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8),
6658
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6659
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6660
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6661
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6662
- BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5),
6663
- BPF_MOV64_IMM(BPF_REG_4, 0),
6664
- BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3),
6665
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
6666
- BPF_MOV64_IMM(BPF_REG_3, 0),
6667
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
6668
- BPF_MOV64_IMM(BPF_REG_0, 0),
6669
- BPF_EXIT_INSN(),
6670
- },
6671
- .errstr = "invalid stack type R1 off=-64 access_size=65",
6672
- .result = REJECT,
6673
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6674
- },
6675
- {
6676
- "helper access to variable memory: stack, JMP, wrong max",
6677
- .insns = {
6678
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8),
6679
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6680
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6681
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6682
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6683
- BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4),
6684
- BPF_MOV64_IMM(BPF_REG_4, 0),
6685
- BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
6686
- BPF_MOV64_IMM(BPF_REG_3, 0),
6687
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
6688
- BPF_MOV64_IMM(BPF_REG_0, 0),
6689
- BPF_EXIT_INSN(),
6690
- },
6691
- .errstr = "invalid stack type R1 off=-64 access_size=65",
6692
- .result = REJECT,
6693
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6694
- },
6695
- {
6696
- "helper access to variable memory: stack, JMP, no max check",
6697
- .insns = {
6698
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8),
6699
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6700
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6701
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6702
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6703
- BPF_MOV64_IMM(BPF_REG_4, 0),
6704
- BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
6705
- BPF_MOV64_IMM(BPF_REG_3, 0),
6706
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
6707
- BPF_MOV64_IMM(BPF_REG_0, 0),
6708
- BPF_EXIT_INSN(),
6709
- },
6710
- /* because max wasn't checked, signed min is negative */
6711
- .errstr = "R2 min value is negative, either use unsigned or 'var &= const'",
6712
- .result = REJECT,
6713
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6714
- },
6715
- {
6716
- "helper access to variable memory: stack, JMP, no min check",
6717
- .insns = {
6718
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8),
6719
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6720
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6721
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6722
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6723
- BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
6724
- BPF_MOV64_IMM(BPF_REG_3, 0),
6725
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
6726
- BPF_MOV64_IMM(BPF_REG_0, 0),
6727
- BPF_EXIT_INSN(),
6728
- },
6729
- .errstr = "invalid indirect read from stack off -64+0 size 64",
6730
- .result = REJECT,
6731
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6732
- },
6733
- {
6734
- "helper access to variable memory: stack, JMP (signed), no min check",
6735
- .insns = {
6736
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8),
6737
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6738
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6739
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6740
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6741
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3),
6742
- BPF_MOV64_IMM(BPF_REG_3, 0),
6743
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
6744
- BPF_MOV64_IMM(BPF_REG_0, 0),
6745
- BPF_EXIT_INSN(),
6746
- },
6747
- .errstr = "R2 min value is negative",
6748
- .result = REJECT,
6749
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6750
- },
6751
- {
6752
- "helper access to variable memory: map, JMP, correct bounds",
6753
- .insns = {
6754
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6755
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6756
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6757
- BPF_LD_MAP_FD(BPF_REG_1, 0),
6758
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6759
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
6760
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6761
- BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
6762
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
6763
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
6764
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
6765
- sizeof(struct test_val), 4),
6766
- BPF_MOV64_IMM(BPF_REG_4, 0),
6767
- BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
6768
- BPF_MOV64_IMM(BPF_REG_3, 0),
6769
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
6770
- BPF_MOV64_IMM(BPF_REG_0, 0),
6771
- BPF_EXIT_INSN(),
6772
- },
6773
- .fixup_map2 = { 3 },
6774
- .result = ACCEPT,
6775
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6776
- },
6777
- {
6778
- "helper access to variable memory: map, JMP, wrong max",
6779
- .insns = {
6780
- BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
6781
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6782
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6783
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6784
- BPF_LD_MAP_FD(BPF_REG_1, 0),
6785
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6786
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
6787
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6788
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_6),
6789
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
6790
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
6791
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
6792
- sizeof(struct test_val) + 1, 4),
6793
- BPF_MOV64_IMM(BPF_REG_4, 0),
6794
- BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
6795
- BPF_MOV64_IMM(BPF_REG_3, 0),
6796
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
6797
- BPF_MOV64_IMM(BPF_REG_0, 0),
6798
- BPF_EXIT_INSN(),
6799
- },
6800
- .fixup_map2 = { 4 },
6801
- .errstr = "invalid access to map value, value_size=48 off=0 size=49",
6802
- .result = REJECT,
6803
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6804
- },
6805
- {
6806
- "helper access to variable memory: map adjusted, JMP, correct bounds",
6807
- .insns = {
6808
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6809
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6810
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6811
- BPF_LD_MAP_FD(BPF_REG_1, 0),
6812
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6813
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
6814
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6815
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
6816
- BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
6817
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
6818
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
6819
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
6820
- sizeof(struct test_val) - 20, 4),
6821
- BPF_MOV64_IMM(BPF_REG_4, 0),
6822
- BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
6823
- BPF_MOV64_IMM(BPF_REG_3, 0),
6824
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
6825
- BPF_MOV64_IMM(BPF_REG_0, 0),
6826
- BPF_EXIT_INSN(),
6827
- },
6828
- .fixup_map2 = { 3 },
6829
- .result = ACCEPT,
6830
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6831
- },
6832
- {
6833
- "helper access to variable memory: map adjusted, JMP, wrong max",
6834
- .insns = {
6835
- BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
6836
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6837
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6838
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6839
- BPF_LD_MAP_FD(BPF_REG_1, 0),
6840
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6841
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
6842
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6843
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
6844
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_6),
6845
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
6846
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
6847
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
6848
- sizeof(struct test_val) - 19, 4),
6849
- BPF_MOV64_IMM(BPF_REG_4, 0),
6850
- BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
6851
- BPF_MOV64_IMM(BPF_REG_3, 0),
6852
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
6853
- BPF_MOV64_IMM(BPF_REG_0, 0),
6854
- BPF_EXIT_INSN(),
6855
- },
6856
- .fixup_map2 = { 4 },
6857
- .errstr = "R1 min value is outside of the array range",
6858
- .result = REJECT,
6859
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6860
- },
6861
- {
6862
- "helper access to variable memory: size = 0 allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
6863
- .insns = {
6864
- BPF_MOV64_IMM(BPF_REG_1, 0),
6865
- BPF_MOV64_IMM(BPF_REG_2, 0),
6866
- BPF_MOV64_IMM(BPF_REG_3, 0),
6867
- BPF_MOV64_IMM(BPF_REG_4, 0),
6868
- BPF_MOV64_IMM(BPF_REG_5, 0),
6869
- BPF_EMIT_CALL(BPF_FUNC_csum_diff),
6870
- BPF_EXIT_INSN(),
6871
- },
6872
- .result = ACCEPT,
6873
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6874
- },
6875
- {
6876
- "helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
6877
- .insns = {
6878
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
6879
- BPF_MOV64_IMM(BPF_REG_1, 0),
6880
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
6881
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
6882
- BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
6883
- BPF_MOV64_IMM(BPF_REG_3, 0),
6884
- BPF_MOV64_IMM(BPF_REG_4, 0),
6885
- BPF_MOV64_IMM(BPF_REG_5, 0),
6886
- BPF_EMIT_CALL(BPF_FUNC_csum_diff),
6887
- BPF_EXIT_INSN(),
6888
- },
6889
- .errstr = "R1 type=inv expected=fp",
6890
- .result = REJECT,
6891
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6892
- },
6893
- {
6894
- "helper access to variable memory: size = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
6895
- .insns = {
6896
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6897
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
6898
- BPF_MOV64_IMM(BPF_REG_2, 0),
6899
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
6900
- BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8),
6901
- BPF_MOV64_IMM(BPF_REG_3, 0),
6902
- BPF_MOV64_IMM(BPF_REG_4, 0),
6903
- BPF_MOV64_IMM(BPF_REG_5, 0),
6904
- BPF_EMIT_CALL(BPF_FUNC_csum_diff),
6905
- BPF_EXIT_INSN(),
6906
- },
6907
- .result = ACCEPT,
6908
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6909
- },
6910
- {
6911
- "helper access to variable memory: size = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
6912
- .insns = {
6913
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6914
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6915
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6916
- BPF_LD_MAP_FD(BPF_REG_1, 0),
6917
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6918
- BPF_FUNC_map_lookup_elem),
6919
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6920
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6921
- BPF_MOV64_IMM(BPF_REG_2, 0),
6922
- BPF_MOV64_IMM(BPF_REG_3, 0),
6923
- BPF_MOV64_IMM(BPF_REG_4, 0),
6924
- BPF_MOV64_IMM(BPF_REG_5, 0),
6925
- BPF_EMIT_CALL(BPF_FUNC_csum_diff),
6926
- BPF_EXIT_INSN(),
6927
- },
6928
- .fixup_map1 = { 3 },
6929
- .result = ACCEPT,
6930
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6931
- },
6932
- {
6933
- "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
6934
- .insns = {
6935
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6936
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6937
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6938
- BPF_LD_MAP_FD(BPF_REG_1, 0),
6939
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6940
- BPF_FUNC_map_lookup_elem),
6941
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6942
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
6943
- BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 7),
6944
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6945
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
6946
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
6947
- BPF_MOV64_IMM(BPF_REG_3, 0),
6948
- BPF_MOV64_IMM(BPF_REG_4, 0),
6949
- BPF_MOV64_IMM(BPF_REG_5, 0),
6950
- BPF_EMIT_CALL(BPF_FUNC_csum_diff),
6951
- BPF_EXIT_INSN(),
6952
- },
6953
- .fixup_map1 = { 3 },
6954
- .result = ACCEPT,
6955
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6956
- },
6957
- {
6958
- "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
6959
- .insns = {
6960
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6961
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6962
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6963
- BPF_LD_MAP_FD(BPF_REG_1, 0),
6964
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6965
- BPF_FUNC_map_lookup_elem),
6966
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6967
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6968
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
6969
- BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
6970
- BPF_MOV64_IMM(BPF_REG_3, 0),
6971
- BPF_MOV64_IMM(BPF_REG_4, 0),
6972
- BPF_MOV64_IMM(BPF_REG_5, 0),
6973
- BPF_EMIT_CALL(BPF_FUNC_csum_diff),
6974
- BPF_EXIT_INSN(),
6975
- },
6976
- .fixup_map1 = { 3 },
6977
- .result = ACCEPT,
6978
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6979
- },
6980
- {
6981
- "helper access to variable memory: size possible = 0 allowed on != NULL packet pointer (ARG_PTR_TO_MEM_OR_NULL)",
6982
- .insns = {
6983
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
6984
- offsetof(struct __sk_buff, data)),
6985
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6986
- offsetof(struct __sk_buff, data_end)),
6987
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_6),
6988
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
6989
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
6990
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
6991
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 0),
6992
- BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
6993
- BPF_MOV64_IMM(BPF_REG_3, 0),
6994
- BPF_MOV64_IMM(BPF_REG_4, 0),
6995
- BPF_MOV64_IMM(BPF_REG_5, 0),
6996
- BPF_EMIT_CALL(BPF_FUNC_csum_diff),
6997
- BPF_EXIT_INSN(),
6998
- },
6999
- .result = ACCEPT,
7000
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
7001
- .retval = 0 /* csum_diff of 64-byte packet */,
7002
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7003
- },
7004
- {
7005
- "helper access to variable memory: size = 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
7006
- .insns = {
7007
- BPF_MOV64_IMM(BPF_REG_1, 0),
7008
- BPF_MOV64_IMM(BPF_REG_2, 0),
7009
- BPF_MOV64_IMM(BPF_REG_3, 0),
7010
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
7011
- BPF_EXIT_INSN(),
7012
- },
7013
- .errstr = "R1 type=inv expected=fp",
7014
- .result = REJECT,
7015
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7016
- },
7017
- {
7018
- "helper access to variable memory: size > 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
7019
- .insns = {
7020
- BPF_MOV64_IMM(BPF_REG_1, 0),
7021
- BPF_MOV64_IMM(BPF_REG_2, 1),
7022
- BPF_MOV64_IMM(BPF_REG_3, 0),
7023
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
7024
- BPF_EXIT_INSN(),
7025
- },
7026
- .errstr = "R1 type=inv expected=fp",
7027
- .result = REJECT,
7028
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7029
- },
7030
- {
7031
- "helper access to variable memory: size = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
7032
- .insns = {
7033
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7034
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
7035
- BPF_MOV64_IMM(BPF_REG_2, 0),
7036
- BPF_MOV64_IMM(BPF_REG_3, 0),
7037
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
7038
- BPF_EXIT_INSN(),
7039
- },
7040
- .result = ACCEPT,
7041
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7042
- },
7043
- {
7044
- "helper access to variable memory: size = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
7045
- .insns = {
7046
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7047
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7048
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7049
- BPF_LD_MAP_FD(BPF_REG_1, 0),
7050
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7051
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7052
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7053
- BPF_MOV64_IMM(BPF_REG_2, 0),
7054
- BPF_MOV64_IMM(BPF_REG_3, 0),
7055
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
7056
- BPF_EXIT_INSN(),
7057
- },
7058
- .fixup_map1 = { 3 },
7059
- .result = ACCEPT,
7060
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7061
- },
7062
- {
7063
- "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
7064
- .insns = {
7065
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7066
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7067
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7068
- BPF_LD_MAP_FD(BPF_REG_1, 0),
7069
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7070
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7071
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
7072
- BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
7073
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7074
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
7075
- BPF_MOV64_IMM(BPF_REG_3, 0),
7076
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
7077
- BPF_EXIT_INSN(),
7078
- },
7079
- .fixup_map1 = { 3 },
7080
- .result = ACCEPT,
7081
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7082
- },
7083
- {
7084
- "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
7085
- .insns = {
7086
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7087
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7088
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7089
- BPF_LD_MAP_FD(BPF_REG_1, 0),
7090
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7091
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
7092
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7093
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
7094
- BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 2),
7095
- BPF_MOV64_IMM(BPF_REG_3, 0),
7096
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
7097
- BPF_EXIT_INSN(),
7098
- },
7099
- .fixup_map1 = { 3 },
7100
- .result = ACCEPT,
7101
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7102
- },
7103
- {
7104
- "helper access to variable memory: 8 bytes leak",
7105
- .insns = {
7106
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8),
7107
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7108
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7109
- BPF_MOV64_IMM(BPF_REG_0, 0),
7110
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
7111
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
7112
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
7113
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
7114
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
7115
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
7116
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
7117
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
7118
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
7119
- BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
7120
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
7121
- BPF_MOV64_IMM(BPF_REG_3, 0),
7122
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
7123
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7124
- BPF_EXIT_INSN(),
7125
- },
7126
- .errstr = "invalid indirect read from stack off -64+32 size 64",
7127
- .result = REJECT,
7128
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7129
- },
7130
- {
7131
- "helper access to variable memory: 8 bytes no leak (init memory)",
7132
- .insns = {
7133
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7134
- BPF_MOV64_IMM(BPF_REG_0, 0),
7135
- BPF_MOV64_IMM(BPF_REG_0, 0),
7136
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
7137
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
7138
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
7139
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
7140
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
7141
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
7142
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
7143
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
7144
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7145
- BPF_MOV64_IMM(BPF_REG_2, 0),
7146
- BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32),
7147
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32),
7148
- BPF_MOV64_IMM(BPF_REG_3, 0),
7149
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
7150
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7151
- BPF_EXIT_INSN(),
7152
- },
7153
- .result = ACCEPT,
7154
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7155
- },
7156
- {
7157
- "invalid and of negative number",
7158
- .insns = {
7159
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7160
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7161
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7162
- BPF_LD_MAP_FD(BPF_REG_1, 0),
7163
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7164
- BPF_FUNC_map_lookup_elem),
7165
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7166
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7167
- BPF_ALU64_IMM(BPF_AND, BPF_REG_1, -4),
7168
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
7169
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7170
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
7171
- offsetof(struct test_val, foo)),
7172
- BPF_EXIT_INSN(),
7173
- },
7174
- .fixup_map2 = { 3 },
7175
- .errstr = "R0 max value is outside of the array range",
7176
- .result = REJECT,
7177
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7178
- },
7179
- {
7180
- "invalid range check",
7181
- .insns = {
7182
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7183
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7184
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7185
- BPF_LD_MAP_FD(BPF_REG_1, 0),
7186
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7187
- BPF_FUNC_map_lookup_elem),
7188
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 12),
7189
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
7190
- BPF_MOV64_IMM(BPF_REG_9, 1),
7191
- BPF_ALU32_IMM(BPF_MOD, BPF_REG_1, 2),
7192
- BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1),
7193
- BPF_ALU32_REG(BPF_AND, BPF_REG_9, BPF_REG_1),
7194
- BPF_ALU32_IMM(BPF_ADD, BPF_REG_9, 1),
7195
- BPF_ALU32_IMM(BPF_RSH, BPF_REG_9, 1),
7196
- BPF_MOV32_IMM(BPF_REG_3, 1),
7197
- BPF_ALU32_REG(BPF_SUB, BPF_REG_3, BPF_REG_9),
7198
- BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0x10000000),
7199
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
7200
- BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
7201
- BPF_MOV64_REG(BPF_REG_0, 0),
7202
- BPF_EXIT_INSN(),
7203
- },
7204
- .fixup_map2 = { 3 },
7205
- .errstr = "R0 max value is outside of the array range",
7206
- .result = REJECT,
7207
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7208
- },
7209
- {
7210
- "map in map access",
7211
- .insns = {
7212
- BPF_ST_MEM(0, BPF_REG_10, -4, 0),
7213
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7214
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
7215
- BPF_LD_MAP_FD(BPF_REG_1, 0),
7216
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7217
- BPF_FUNC_map_lookup_elem),
7218
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
7219
- BPF_ST_MEM(0, BPF_REG_10, -4, 0),
7220
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7221
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
7222
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7223
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7224
- BPF_FUNC_map_lookup_elem),
7225
- BPF_MOV64_IMM(BPF_REG_0, 0),
7226
- BPF_EXIT_INSN(),
7227
- },
7228
- .fixup_map_in_map = { 3 },
7229
- .result = ACCEPT,
7230
- },
7231
- {
7232
- "invalid inner map pointer",
7233
- .insns = {
7234
- BPF_ST_MEM(0, BPF_REG_10, -4, 0),
7235
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7236
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
7237
- BPF_LD_MAP_FD(BPF_REG_1, 0),
7238
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7239
- BPF_FUNC_map_lookup_elem),
7240
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7241
- BPF_ST_MEM(0, BPF_REG_10, -4, 0),
7242
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7243
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
7244
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7245
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7246
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7247
- BPF_FUNC_map_lookup_elem),
7248
- BPF_MOV64_IMM(BPF_REG_0, 0),
7249
- BPF_EXIT_INSN(),
7250
- },
7251
- .fixup_map_in_map = { 3 },
7252
- .errstr = "R1 pointer arithmetic on CONST_PTR_TO_MAP prohibited",
7253
- .result = REJECT,
7254
- },
7255
- {
7256
- "forgot null checking on the inner map pointer",
7257
- .insns = {
7258
- BPF_ST_MEM(0, BPF_REG_10, -4, 0),
7259
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7260
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
7261
- BPF_LD_MAP_FD(BPF_REG_1, 0),
7262
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7263
- BPF_FUNC_map_lookup_elem),
7264
- BPF_ST_MEM(0, BPF_REG_10, -4, 0),
7265
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7266
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
7267
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7268
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7269
- BPF_FUNC_map_lookup_elem),
7270
- BPF_MOV64_IMM(BPF_REG_0, 0),
7271
- BPF_EXIT_INSN(),
7272
- },
7273
- .fixup_map_in_map = { 3 },
7274
- .errstr = "R1 type=map_value_or_null expected=map_ptr",
7275
- .result = REJECT,
7276
- },
7277
- {
7278
- "ld_abs: check calling conv, r1",
7279
- .insns = {
7280
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7281
- BPF_MOV64_IMM(BPF_REG_1, 0),
7282
- BPF_LD_ABS(BPF_W, -0x200000),
7283
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
7284
- BPF_EXIT_INSN(),
7285
- },
7286
- .errstr = "R1 !read_ok",
7287
- .result = REJECT,
7288
- },
7289
- {
7290
- "ld_abs: check calling conv, r2",
7291
- .insns = {
7292
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7293
- BPF_MOV64_IMM(BPF_REG_2, 0),
7294
- BPF_LD_ABS(BPF_W, -0x200000),
7295
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
7296
- BPF_EXIT_INSN(),
7297
- },
7298
- .errstr = "R2 !read_ok",
7299
- .result = REJECT,
7300
- },
7301
- {
7302
- "ld_abs: check calling conv, r3",
7303
- .insns = {
7304
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7305
- BPF_MOV64_IMM(BPF_REG_3, 0),
7306
- BPF_LD_ABS(BPF_W, -0x200000),
7307
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
7308
- BPF_EXIT_INSN(),
7309
- },
7310
- .errstr = "R3 !read_ok",
7311
- .result = REJECT,
7312
- },
7313
- {
7314
- "ld_abs: check calling conv, r4",
7315
- .insns = {
7316
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7317
- BPF_MOV64_IMM(BPF_REG_4, 0),
7318
- BPF_LD_ABS(BPF_W, -0x200000),
7319
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
7320
- BPF_EXIT_INSN(),
7321
- },
7322
- .errstr = "R4 !read_ok",
7323
- .result = REJECT,
7324
- },
7325
- {
7326
- "ld_abs: check calling conv, r5",
7327
- .insns = {
7328
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7329
- BPF_MOV64_IMM(BPF_REG_5, 0),
7330
- BPF_LD_ABS(BPF_W, -0x200000),
7331
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
7332
- BPF_EXIT_INSN(),
7333
- },
7334
- .errstr = "R5 !read_ok",
7335
- .result = REJECT,
7336
- },
7337
- {
7338
- "ld_abs: check calling conv, r7",
7339
- .insns = {
7340
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7341
- BPF_MOV64_IMM(BPF_REG_7, 0),
7342
- BPF_LD_ABS(BPF_W, -0x200000),
7343
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
7344
- BPF_EXIT_INSN(),
7345
- },
7346
- .result = ACCEPT,
7347
- },
7348
- {
7349
- "ld_abs: tests on r6 and skb data reload helper",
7350
- .insns = {
7351
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7352
- BPF_LD_ABS(BPF_B, 0),
7353
- BPF_LD_ABS(BPF_H, 0),
7354
- BPF_LD_ABS(BPF_W, 0),
7355
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
7356
- BPF_MOV64_IMM(BPF_REG_6, 0),
7357
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
7358
- BPF_MOV64_IMM(BPF_REG_2, 1),
7359
- BPF_MOV64_IMM(BPF_REG_3, 2),
7360
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7361
- BPF_FUNC_skb_vlan_push),
7362
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
7363
- BPF_LD_ABS(BPF_B, 0),
7364
- BPF_LD_ABS(BPF_H, 0),
7365
- BPF_LD_ABS(BPF_W, 0),
7366
- BPF_MOV64_IMM(BPF_REG_0, 42),
7367
- BPF_EXIT_INSN(),
7368
- },
7369
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
7370
- .result = ACCEPT,
7371
- .retval = 42 /* ultimate return value */,
7372
- },
7373
- {
7374
- "ld_ind: check calling conv, r1",
7375
- .insns = {
7376
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7377
- BPF_MOV64_IMM(BPF_REG_1, 1),
7378
- BPF_LD_IND(BPF_W, BPF_REG_1, -0x200000),
7379
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
7380
- BPF_EXIT_INSN(),
7381
- },
7382
- .errstr = "R1 !read_ok",
7383
- .result = REJECT,
7384
- },
7385
- {
7386
- "ld_ind: check calling conv, r2",
7387
- .insns = {
7388
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7389
- BPF_MOV64_IMM(BPF_REG_2, 1),
7390
- BPF_LD_IND(BPF_W, BPF_REG_2, -0x200000),
7391
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
7392
- BPF_EXIT_INSN(),
7393
- },
7394
- .errstr = "R2 !read_ok",
7395
- .result = REJECT,
7396
- },
7397
- {
7398
- "ld_ind: check calling conv, r3",
7399
- .insns = {
7400
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7401
- BPF_MOV64_IMM(BPF_REG_3, 1),
7402
- BPF_LD_IND(BPF_W, BPF_REG_3, -0x200000),
7403
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
7404
- BPF_EXIT_INSN(),
7405
- },
7406
- .errstr = "R3 !read_ok",
7407
- .result = REJECT,
7408
- },
7409
- {
7410
- "ld_ind: check calling conv, r4",
7411
- .insns = {
7412
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7413
- BPF_MOV64_IMM(BPF_REG_4, 1),
7414
- BPF_LD_IND(BPF_W, BPF_REG_4, -0x200000),
7415
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
7416
- BPF_EXIT_INSN(),
7417
- },
7418
- .errstr = "R4 !read_ok",
7419
- .result = REJECT,
7420
- },
7421
- {
7422
- "ld_ind: check calling conv, r5",
7423
- .insns = {
7424
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7425
- BPF_MOV64_IMM(BPF_REG_5, 1),
7426
- BPF_LD_IND(BPF_W, BPF_REG_5, -0x200000),
7427
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
7428
- BPF_EXIT_INSN(),
7429
- },
7430
- .errstr = "R5 !read_ok",
7431
- .result = REJECT,
7432
- },
7433
- {
7434
- "ld_ind: check calling conv, r7",
7435
- .insns = {
7436
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7437
- BPF_MOV64_IMM(BPF_REG_7, 1),
7438
- BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
7439
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
7440
- BPF_EXIT_INSN(),
7441
- },
7442
- .result = ACCEPT,
7443
- .retval = 1,
7444
- },
7445
- {
7446
- "check bpf_perf_event_data->sample_period byte load permitted",
7447
- .insns = {
7448
- BPF_MOV64_IMM(BPF_REG_0, 0),
7449
-#if __BYTE_ORDER == __LITTLE_ENDIAN
7450
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
7451
- offsetof(struct bpf_perf_event_data, sample_period)),
7452
-#else
7453
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
7454
- offsetof(struct bpf_perf_event_data, sample_period) + 7),
7455
-#endif
7456
- BPF_EXIT_INSN(),
7457
- },
7458
- .result = ACCEPT,
7459
- .prog_type = BPF_PROG_TYPE_PERF_EVENT,
7460
- },
7461
- {
7462
- "check bpf_perf_event_data->sample_period half load permitted",
7463
- .insns = {
7464
- BPF_MOV64_IMM(BPF_REG_0, 0),
7465
-#if __BYTE_ORDER == __LITTLE_ENDIAN
7466
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
7467
- offsetof(struct bpf_perf_event_data, sample_period)),
7468
-#else
7469
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
7470
- offsetof(struct bpf_perf_event_data, sample_period) + 6),
7471
-#endif
7472
- BPF_EXIT_INSN(),
7473
- },
7474
- .result = ACCEPT,
7475
- .prog_type = BPF_PROG_TYPE_PERF_EVENT,
7476
- },
7477
- {
7478
- "check bpf_perf_event_data->sample_period word load permitted",
7479
- .insns = {
7480
- BPF_MOV64_IMM(BPF_REG_0, 0),
7481
-#if __BYTE_ORDER == __LITTLE_ENDIAN
7482
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
7483
- offsetof(struct bpf_perf_event_data, sample_period)),
7484
-#else
7485
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
7486
- offsetof(struct bpf_perf_event_data, sample_period) + 4),
7487
-#endif
7488
- BPF_EXIT_INSN(),
7489
- },
7490
- .result = ACCEPT,
7491
- .prog_type = BPF_PROG_TYPE_PERF_EVENT,
7492
- },
7493
- {
7494
- "check bpf_perf_event_data->sample_period dword load permitted",
7495
- .insns = {
7496
- BPF_MOV64_IMM(BPF_REG_0, 0),
7497
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
7498
- offsetof(struct bpf_perf_event_data, sample_period)),
7499
- BPF_EXIT_INSN(),
7500
- },
7501
- .result = ACCEPT,
7502
- .prog_type = BPF_PROG_TYPE_PERF_EVENT,
7503
- },
7504
- {
7505
- "check skb->data half load not permitted",
7506
- .insns = {
7507
- BPF_MOV64_IMM(BPF_REG_0, 0),
7508
-#if __BYTE_ORDER == __LITTLE_ENDIAN
7509
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
7510
- offsetof(struct __sk_buff, data)),
7511
-#else
7512
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
7513
- offsetof(struct __sk_buff, data) + 2),
7514
-#endif
7515
- BPF_EXIT_INSN(),
7516
- },
7517
- .result = REJECT,
7518
- .errstr = "invalid bpf_context access",
7519
- },
7520
- {
7521
- "check skb->tc_classid half load not permitted for lwt prog",
7522
- .insns = {
7523
- BPF_MOV64_IMM(BPF_REG_0, 0),
7524
-#if __BYTE_ORDER == __LITTLE_ENDIAN
7525
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
7526
- offsetof(struct __sk_buff, tc_classid)),
7527
-#else
7528
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
7529
- offsetof(struct __sk_buff, tc_classid) + 2),
7530
-#endif
7531
- BPF_EXIT_INSN(),
7532
- },
7533
- .result = REJECT,
7534
- .errstr = "invalid bpf_context access",
7535
- .prog_type = BPF_PROG_TYPE_LWT_IN,
7536
- },
7537
- {
7538
- "bounds checks mixing signed and unsigned, positive bounds",
7539
- .insns = {
7540
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7541
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7542
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7543
- BPF_LD_MAP_FD(BPF_REG_1, 0),
7544
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7545
- BPF_FUNC_map_lookup_elem),
7546
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7547
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7548
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7549
- BPF_MOV64_IMM(BPF_REG_2, 2),
7550
- BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 3),
7551
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 4, 2),
7552
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7553
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7554
- BPF_MOV64_IMM(BPF_REG_0, 0),
7555
- BPF_EXIT_INSN(),
7556
- },
7557
- .fixup_map1 = { 3 },
7558
- .errstr = "unbounded min value",
7559
- .result = REJECT,
7560
- },
7561
- {
7562
- "bounds checks mixing signed and unsigned",
7563
- .insns = {
7564
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7565
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7566
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7567
- BPF_LD_MAP_FD(BPF_REG_1, 0),
7568
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7569
- BPF_FUNC_map_lookup_elem),
7570
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7571
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7572
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7573
- BPF_MOV64_IMM(BPF_REG_2, -1),
7574
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
7575
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7576
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7577
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7578
- BPF_MOV64_IMM(BPF_REG_0, 0),
7579
- BPF_EXIT_INSN(),
7580
- },
7581
- .fixup_map1 = { 3 },
7582
- .errstr = "unbounded min value",
7583
- .result = REJECT,
7584
- },
7585
- {
7586
- "bounds checks mixing signed and unsigned, variant 2",
7587
- .insns = {
7588
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7589
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7590
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7591
- BPF_LD_MAP_FD(BPF_REG_1, 0),
7592
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7593
- BPF_FUNC_map_lookup_elem),
7594
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7595
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7596
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7597
- BPF_MOV64_IMM(BPF_REG_2, -1),
7598
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
7599
- BPF_MOV64_IMM(BPF_REG_8, 0),
7600
- BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_1),
7601
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
7602
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
7603
- BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
7604
- BPF_MOV64_IMM(BPF_REG_0, 0),
7605
- BPF_EXIT_INSN(),
7606
- },
7607
- .fixup_map1 = { 3 },
7608
- .errstr = "unbounded min value",
7609
- .result = REJECT,
7610
- },
7611
- {
7612
- "bounds checks mixing signed and unsigned, variant 3",
7613
- .insns = {
7614
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7615
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7616
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7617
- BPF_LD_MAP_FD(BPF_REG_1, 0),
7618
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7619
- BPF_FUNC_map_lookup_elem),
7620
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
7621
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7622
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7623
- BPF_MOV64_IMM(BPF_REG_2, -1),
7624
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 4),
7625
- BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
7626
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
7627
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
7628
- BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
7629
- BPF_MOV64_IMM(BPF_REG_0, 0),
7630
- BPF_EXIT_INSN(),
7631
- },
7632
- .fixup_map1 = { 3 },
7633
- .errstr = "unbounded min value",
7634
- .result = REJECT,
7635
- },
7636
- {
7637
- "bounds checks mixing signed and unsigned, variant 4",
7638
- .insns = {
7639
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7640
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7641
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7642
- BPF_LD_MAP_FD(BPF_REG_1, 0),
7643
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7644
- BPF_FUNC_map_lookup_elem),
7645
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7646
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7647
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7648
- BPF_MOV64_IMM(BPF_REG_2, 1),
7649
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
7650
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7651
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7652
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7653
- BPF_MOV64_IMM(BPF_REG_0, 0),
7654
- BPF_EXIT_INSN(),
7655
- },
7656
- .fixup_map1 = { 3 },
7657
- .result = ACCEPT,
7658
- },
7659
- {
7660
- "bounds checks mixing signed and unsigned, variant 5",
7661
- .insns = {
7662
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7663
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7664
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7665
- BPF_LD_MAP_FD(BPF_REG_1, 0),
7666
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7667
- BPF_FUNC_map_lookup_elem),
7668
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7669
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7670
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7671
- BPF_MOV64_IMM(BPF_REG_2, -1),
7672
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
7673
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 4),
7674
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 4),
7675
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
7676
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7677
- BPF_MOV64_IMM(BPF_REG_0, 0),
7678
- BPF_EXIT_INSN(),
7679
- },
7680
- .fixup_map1 = { 3 },
7681
- .errstr = "unbounded min value",
7682
- .result = REJECT,
7683
- },
7684
- {
7685
- "bounds checks mixing signed and unsigned, variant 6",
7686
- .insns = {
7687
- BPF_MOV64_IMM(BPF_REG_2, 0),
7688
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
7689
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -512),
7690
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7691
- BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -16),
7692
- BPF_MOV64_IMM(BPF_REG_6, -1),
7693
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_6, 5),
7694
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_4, 1, 4),
7695
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
7696
- BPF_MOV64_IMM(BPF_REG_5, 0),
7697
- BPF_ST_MEM(BPF_H, BPF_REG_10, -512, 0),
7698
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7699
- BPF_FUNC_skb_load_bytes),
7700
- BPF_MOV64_IMM(BPF_REG_0, 0),
7701
- BPF_EXIT_INSN(),
7702
- },
7703
- .errstr = "R4 min value is negative, either use unsigned",
7704
- .result = REJECT,
7705
- },
7706
- {
7707
- "bounds checks mixing signed and unsigned, variant 7",
7708
- .insns = {
7709
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7710
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7711
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7712
- BPF_LD_MAP_FD(BPF_REG_1, 0),
7713
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7714
- BPF_FUNC_map_lookup_elem),
7715
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7716
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7717
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7718
- BPF_MOV64_IMM(BPF_REG_2, 1024 * 1024 * 1024),
7719
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
7720
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7721
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7722
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7723
- BPF_MOV64_IMM(BPF_REG_0, 0),
7724
- BPF_EXIT_INSN(),
7725
- },
7726
- .fixup_map1 = { 3 },
7727
- .result = ACCEPT,
7728
- },
7729
- {
7730
- "bounds checks mixing signed and unsigned, variant 8",
7731
- .insns = {
7732
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7733
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7734
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7735
- BPF_LD_MAP_FD(BPF_REG_1, 0),
7736
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7737
- BPF_FUNC_map_lookup_elem),
7738
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7739
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7740
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7741
- BPF_MOV64_IMM(BPF_REG_2, -1),
7742
- BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
7743
- BPF_MOV64_IMM(BPF_REG_0, 0),
7744
- BPF_EXIT_INSN(),
7745
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7746
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7747
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7748
- BPF_MOV64_IMM(BPF_REG_0, 0),
7749
- BPF_EXIT_INSN(),
7750
- },
7751
- .fixup_map1 = { 3 },
7752
- .errstr = "unbounded min value",
7753
- .result = REJECT,
7754
- },
7755
- {
7756
- "bounds checks mixing signed and unsigned, variant 9",
7757
- .insns = {
7758
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7759
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7760
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7761
- BPF_LD_MAP_FD(BPF_REG_1, 0),
7762
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7763
- BPF_FUNC_map_lookup_elem),
7764
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
7765
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7766
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7767
- BPF_LD_IMM64(BPF_REG_2, -9223372036854775808ULL),
7768
- BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
7769
- BPF_MOV64_IMM(BPF_REG_0, 0),
7770
- BPF_EXIT_INSN(),
7771
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7772
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7773
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7774
- BPF_MOV64_IMM(BPF_REG_0, 0),
7775
- BPF_EXIT_INSN(),
7776
- },
7777
- .fixup_map1 = { 3 },
7778
- .result = ACCEPT,
7779
- },
7780
- {
7781
- "bounds checks mixing signed and unsigned, variant 10",
7782
- .insns = {
7783
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7784
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7785
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7786
- BPF_LD_MAP_FD(BPF_REG_1, 0),
7787
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7788
- BPF_FUNC_map_lookup_elem),
7789
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7790
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7791
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7792
- BPF_MOV64_IMM(BPF_REG_2, 0),
7793
- BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
7794
- BPF_MOV64_IMM(BPF_REG_0, 0),
7795
- BPF_EXIT_INSN(),
7796
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7797
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7798
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7799
- BPF_MOV64_IMM(BPF_REG_0, 0),
7800
- BPF_EXIT_INSN(),
7801
- },
7802
- .fixup_map1 = { 3 },
7803
- .errstr = "unbounded min value",
7804
- .result = REJECT,
7805
- },
7806
- {
7807
- "bounds checks mixing signed and unsigned, variant 11",
7808
- .insns = {
7809
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7810
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7811
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7812
- BPF_LD_MAP_FD(BPF_REG_1, 0),
7813
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7814
- BPF_FUNC_map_lookup_elem),
7815
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7816
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7817
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7818
- BPF_MOV64_IMM(BPF_REG_2, -1),
7819
- BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
7820
- /* Dead branch. */
7821
- BPF_MOV64_IMM(BPF_REG_0, 0),
7822
- BPF_EXIT_INSN(),
7823
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7824
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7825
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7826
- BPF_MOV64_IMM(BPF_REG_0, 0),
7827
- BPF_EXIT_INSN(),
7828
- },
7829
- .fixup_map1 = { 3 },
7830
- .errstr = "unbounded min value",
7831
- .result = REJECT,
7832
- },
7833
- {
7834
- "bounds checks mixing signed and unsigned, variant 12",
7835
- .insns = {
7836
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7837
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7838
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7839
- BPF_LD_MAP_FD(BPF_REG_1, 0),
7840
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7841
- BPF_FUNC_map_lookup_elem),
7842
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7843
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7844
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7845
- BPF_MOV64_IMM(BPF_REG_2, -6),
7846
- BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
7847
- BPF_MOV64_IMM(BPF_REG_0, 0),
7848
- BPF_EXIT_INSN(),
7849
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7850
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7851
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7852
- BPF_MOV64_IMM(BPF_REG_0, 0),
7853
- BPF_EXIT_INSN(),
7854
- },
7855
- .fixup_map1 = { 3 },
7856
- .errstr = "unbounded min value",
7857
- .result = REJECT,
7858
- },
7859
- {
7860
- "bounds checks mixing signed and unsigned, variant 13",
7861
- .insns = {
7862
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7863
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7864
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7865
- BPF_LD_MAP_FD(BPF_REG_1, 0),
7866
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7867
- BPF_FUNC_map_lookup_elem),
7868
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7869
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7870
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7871
- BPF_MOV64_IMM(BPF_REG_2, 2),
7872
- BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
7873
- BPF_MOV64_IMM(BPF_REG_7, 1),
7874
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 0, 2),
7875
- BPF_MOV64_IMM(BPF_REG_0, 0),
7876
- BPF_EXIT_INSN(),
7877
- BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_1),
7878
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 4, 2),
7879
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_7),
7880
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7881
- BPF_MOV64_IMM(BPF_REG_0, 0),
7882
- BPF_EXIT_INSN(),
7883
- },
7884
- .fixup_map1 = { 3 },
7885
- .errstr = "unbounded min value",
7886
- .result = REJECT,
7887
- },
7888
- {
7889
- "bounds checks mixing signed and unsigned, variant 14",
7890
- .insns = {
7891
- BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
7892
- offsetof(struct __sk_buff, mark)),
7893
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7894
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7895
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7896
- BPF_LD_MAP_FD(BPF_REG_1, 0),
7897
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7898
- BPF_FUNC_map_lookup_elem),
7899
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
7900
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7901
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7902
- BPF_MOV64_IMM(BPF_REG_2, -1),
7903
- BPF_MOV64_IMM(BPF_REG_8, 2),
7904
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 42, 6),
7905
- BPF_JMP_REG(BPF_JSGT, BPF_REG_8, BPF_REG_1, 3),
7906
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7907
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7908
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7909
- BPF_MOV64_IMM(BPF_REG_0, 0),
7910
- BPF_EXIT_INSN(),
7911
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, -3),
7912
- BPF_JMP_IMM(BPF_JA, 0, 0, -7),
7913
- },
7914
- .fixup_map1 = { 4 },
7915
- .errstr = "unbounded min value",
7916
- .result = REJECT,
7917
- },
7918
- {
7919
- "bounds checks mixing signed and unsigned, variant 15",
7920
- .insns = {
7921
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7922
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7923
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7924
- BPF_LD_MAP_FD(BPF_REG_1, 0),
7925
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7926
- BPF_FUNC_map_lookup_elem),
7927
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7928
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7929
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7930
- BPF_MOV64_IMM(BPF_REG_2, -6),
7931
- BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
7932
- BPF_MOV64_IMM(BPF_REG_0, 0),
7933
- BPF_EXIT_INSN(),
7934
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7935
- BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 1, 2),
7936
- BPF_MOV64_IMM(BPF_REG_0, 0),
7937
- BPF_EXIT_INSN(),
7938
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7939
- BPF_MOV64_IMM(BPF_REG_0, 0),
7940
- BPF_EXIT_INSN(),
7941
- },
7942
- .fixup_map1 = { 3 },
7943
- .errstr = "unbounded min value",
7944
- .result = REJECT,
7945
- .result_unpriv = REJECT,
7946
- },
7947
- {
7948
- "subtraction bounds (map value) variant 1",
7949
- .insns = {
7950
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7951
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7952
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7953
- BPF_LD_MAP_FD(BPF_REG_1, 0),
7954
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7955
- BPF_FUNC_map_lookup_elem),
7956
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7957
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7958
- BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 7),
7959
- BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
7960
- BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 5),
7961
- BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
7962
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 56),
7963
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7964
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7965
- BPF_EXIT_INSN(),
7966
- BPF_MOV64_IMM(BPF_REG_0, 0),
7967
- BPF_EXIT_INSN(),
7968
- },
7969
- .fixup_map1 = { 3 },
7970
- .errstr = "R0 max value is outside of the array range",
7971
- .result = REJECT,
7972
- },
7973
- {
7974
- "subtraction bounds (map value) variant 2",
7975
- .insns = {
7976
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7977
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7978
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7979
- BPF_LD_MAP_FD(BPF_REG_1, 0),
7980
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7981
- BPF_FUNC_map_lookup_elem),
7982
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
7983
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7984
- BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 6),
7985
- BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
7986
- BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 4),
7987
- BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
7988
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7989
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7990
- BPF_EXIT_INSN(),
7991
- BPF_MOV64_IMM(BPF_REG_0, 0),
7992
- BPF_EXIT_INSN(),
7993
- },
7994
- .fixup_map1 = { 3 },
7995
- .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
7996
- .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
7997
- .result = REJECT,
7998
- },
7999
- {
8000
- "bounds check based on zero-extended MOV",
8001
- .insns = {
8002
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8003
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8004
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8005
- BPF_LD_MAP_FD(BPF_REG_1, 0),
8006
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8007
- BPF_FUNC_map_lookup_elem),
8008
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8009
- /* r2 = 0x0000'0000'ffff'ffff */
8010
- BPF_MOV32_IMM(BPF_REG_2, 0xffffffff),
8011
- /* r2 = 0 */
8012
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
8013
- /* no-op */
8014
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
8015
- /* access at offset 0 */
8016
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8017
- /* exit */
8018
- BPF_MOV64_IMM(BPF_REG_0, 0),
8019
- BPF_EXIT_INSN(),
8020
- },
8021
- .fixup_map1 = { 3 },
8022
- .result = ACCEPT
8023
- },
8024
- {
8025
- "bounds check based on sign-extended MOV. test1",
8026
- .insns = {
8027
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8028
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8029
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8030
- BPF_LD_MAP_FD(BPF_REG_1, 0),
8031
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8032
- BPF_FUNC_map_lookup_elem),
8033
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8034
- /* r2 = 0xffff'ffff'ffff'ffff */
8035
- BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
8036
- /* r2 = 0xffff'ffff */
8037
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
8038
- /* r0 = <oob pointer> */
8039
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
8040
- /* access to OOB pointer */
8041
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8042
- /* exit */
8043
- BPF_MOV64_IMM(BPF_REG_0, 0),
8044
- BPF_EXIT_INSN(),
8045
- },
8046
- .fixup_map1 = { 3 },
8047
- .errstr = "map_value pointer and 4294967295",
8048
- .result = REJECT
8049
- },
8050
- {
8051
- "bounds check based on sign-extended MOV. test2",
8052
- .insns = {
8053
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8054
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8055
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8056
- BPF_LD_MAP_FD(BPF_REG_1, 0),
8057
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8058
- BPF_FUNC_map_lookup_elem),
8059
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8060
- /* r2 = 0xffff'ffff'ffff'ffff */
8061
- BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
8062
- /* r2 = 0xfff'ffff */
8063
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 36),
8064
- /* r0 = <oob pointer> */
8065
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
8066
- /* access to OOB pointer */
8067
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8068
- /* exit */
8069
- BPF_MOV64_IMM(BPF_REG_0, 0),
8070
- BPF_EXIT_INSN(),
8071
- },
8072
- .fixup_map1 = { 3 },
8073
- .errstr = "R0 min value is outside of the array range",
8074
- .result = REJECT
8075
- },
8076
- {
8077
- "bounds check based on reg_off + var_off + insn_off. test1",
8078
- .insns = {
8079
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
8080
- offsetof(struct __sk_buff, mark)),
8081
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8082
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8083
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8084
- BPF_LD_MAP_FD(BPF_REG_1, 0),
8085
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8086
- BPF_FUNC_map_lookup_elem),
8087
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8088
- BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
8089
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 29) - 1),
8090
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
8091
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
8092
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
8093
- BPF_MOV64_IMM(BPF_REG_0, 0),
8094
- BPF_EXIT_INSN(),
8095
- },
8096
- .fixup_map1 = { 4 },
8097
- .errstr = "value_size=8 off=1073741825",
8098
- .result = REJECT,
8099
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
8100
- },
8101
- {
8102
- "bounds check based on reg_off + var_off + insn_off. test2",
8103
- .insns = {
8104
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
8105
- offsetof(struct __sk_buff, mark)),
8106
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8107
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8108
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8109
- BPF_LD_MAP_FD(BPF_REG_1, 0),
8110
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8111
- BPF_FUNC_map_lookup_elem),
8112
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8113
- BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
8114
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 30) - 1),
8115
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
8116
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
8117
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
8118
- BPF_MOV64_IMM(BPF_REG_0, 0),
8119
- BPF_EXIT_INSN(),
8120
- },
8121
- .fixup_map1 = { 4 },
8122
- .errstr = "value 1073741823",
8123
- .result = REJECT,
8124
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
8125
- },
8126
- {
8127
- "bounds check after truncation of non-boundary-crossing range",
8128
- .insns = {
8129
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8130
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8131
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8132
- BPF_LD_MAP_FD(BPF_REG_1, 0),
8133
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8134
- BPF_FUNC_map_lookup_elem),
8135
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8136
- /* r1 = [0x00, 0xff] */
8137
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8138
- BPF_MOV64_IMM(BPF_REG_2, 1),
8139
- /* r2 = 0x10'0000'0000 */
8140
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 36),
8141
- /* r1 = [0x10'0000'0000, 0x10'0000'00ff] */
8142
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
8143
- /* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */
8144
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
8145
- /* r1 = [0x00, 0xff] */
8146
- BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 0x7fffffff),
8147
- /* r1 = 0 */
8148
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
8149
- /* no-op */
8150
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8151
- /* access at offset 0 */
8152
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8153
- /* exit */
8154
- BPF_MOV64_IMM(BPF_REG_0, 0),
8155
- BPF_EXIT_INSN(),
8156
- },
8157
- .fixup_map1 = { 3 },
8158
- .result = ACCEPT
8159
- },
8160
- {
8161
- "bounds check after truncation of boundary-crossing range (1)",
8162
- .insns = {
8163
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8164
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8165
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8166
- BPF_LD_MAP_FD(BPF_REG_1, 0),
8167
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8168
- BPF_FUNC_map_lookup_elem),
8169
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8170
- /* r1 = [0x00, 0xff] */
8171
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8172
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
8173
- /* r1 = [0xffff'ff80, 0x1'0000'007f] */
8174
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
8175
- /* r1 = [0xffff'ff80, 0xffff'ffff] or
8176
- * [0x0000'0000, 0x0000'007f]
8177
- */
8178
- BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 0),
8179
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
8180
- /* r1 = [0x00, 0xff] or
8181
- * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
8182
- */
8183
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
8184
- /* r1 = 0 or
8185
- * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
8186
- */
8187
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
8188
- /* no-op or OOB pointer computation */
8189
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8190
- /* potentially OOB access */
8191
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8192
- /* exit */
8193
- BPF_MOV64_IMM(BPF_REG_0, 0),
8194
- BPF_EXIT_INSN(),
8195
- },
8196
- .fixup_map1 = { 3 },
8197
- /* not actually fully unbounded, but the bound is very high */
8198
- .errstr = "R0 unbounded memory access",
8199
- .result = REJECT
8200
- },
8201
- {
8202
- "bounds check after truncation of boundary-crossing range (2)",
8203
- .insns = {
8204
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8205
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8206
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8207
- BPF_LD_MAP_FD(BPF_REG_1, 0),
8208
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8209
- BPF_FUNC_map_lookup_elem),
8210
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8211
- /* r1 = [0x00, 0xff] */
8212
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8213
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
8214
- /* r1 = [0xffff'ff80, 0x1'0000'007f] */
8215
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
8216
- /* r1 = [0xffff'ff80, 0xffff'ffff] or
8217
- * [0x0000'0000, 0x0000'007f]
8218
- * difference to previous test: truncation via MOV32
8219
- * instead of ALU32.
8220
- */
8221
- BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),
8222
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
8223
- /* r1 = [0x00, 0xff] or
8224
- * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
8225
- */
8226
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
8227
- /* r1 = 0 or
8228
- * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
8229
- */
8230
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
8231
- /* no-op or OOB pointer computation */
8232
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8233
- /* potentially OOB access */
8234
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8235
- /* exit */
8236
- BPF_MOV64_IMM(BPF_REG_0, 0),
8237
- BPF_EXIT_INSN(),
8238
- },
8239
- .fixup_map1 = { 3 },
8240
- /* not actually fully unbounded, but the bound is very high */
8241
- .errstr = "R0 unbounded memory access",
8242
- .result = REJECT
8243
- },
8244
- {
8245
- "bounds check after wrapping 32-bit addition",
8246
- .insns = {
8247
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8248
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8249
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8250
- BPF_LD_MAP_FD(BPF_REG_1, 0),
8251
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8252
- BPF_FUNC_map_lookup_elem),
8253
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
8254
- /* r1 = 0x7fff'ffff */
8255
- BPF_MOV64_IMM(BPF_REG_1, 0x7fffffff),
8256
- /* r1 = 0xffff'fffe */
8257
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
8258
- /* r1 = 0 */
8259
- BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 2),
8260
- /* no-op */
8261
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8262
- /* access at offset 0 */
8263
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8264
- /* exit */
8265
- BPF_MOV64_IMM(BPF_REG_0, 0),
8266
- BPF_EXIT_INSN(),
8267
- },
8268
- .fixup_map1 = { 3 },
8269
- .result = ACCEPT
8270
- },
8271
- {
8272
- "bounds check after shift with oversized count operand",
8273
- .insns = {
8274
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8275
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8276
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8277
- BPF_LD_MAP_FD(BPF_REG_1, 0),
8278
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8279
- BPF_FUNC_map_lookup_elem),
8280
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
8281
- BPF_MOV64_IMM(BPF_REG_2, 32),
8282
- BPF_MOV64_IMM(BPF_REG_1, 1),
8283
- /* r1 = (u32)1 << (u32)32 = ? */
8284
- BPF_ALU32_REG(BPF_LSH, BPF_REG_1, BPF_REG_2),
8285
- /* r1 = [0x0000, 0xffff] */
8286
- BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xffff),
8287
- /* computes unknown pointer, potentially OOB */
8288
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8289
- /* potentially OOB access */
8290
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8291
- /* exit */
8292
- BPF_MOV64_IMM(BPF_REG_0, 0),
8293
- BPF_EXIT_INSN(),
8294
- },
8295
- .fixup_map1 = { 3 },
8296
- .errstr = "R0 max value is outside of the array range",
8297
- .result = REJECT
8298
- },
8299
- {
8300
- "bounds check after right shift of maybe-negative number",
8301
- .insns = {
8302
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8303
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8304
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8305
- BPF_LD_MAP_FD(BPF_REG_1, 0),
8306
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8307
- BPF_FUNC_map_lookup_elem),
8308
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
8309
- /* r1 = [0x00, 0xff] */
8310
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8311
- /* r1 = [-0x01, 0xfe] */
8312
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
8313
- /* r1 = 0 or 0xff'ffff'ffff'ffff */
8314
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
8315
- /* r1 = 0 or 0xffff'ffff'ffff */
8316
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
8317
- /* computes unknown pointer, potentially OOB */
8318
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8319
- /* potentially OOB access */
8320
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8321
- /* exit */
8322
- BPF_MOV64_IMM(BPF_REG_0, 0),
8323
- BPF_EXIT_INSN(),
8324
- },
8325
- .fixup_map1 = { 3 },
8326
- .errstr = "R0 unbounded memory access",
8327
- .result = REJECT
8328
- },
8329
- {
8330
- "bounds check map access with off+size signed 32bit overflow. test1",
8331
- .insns = {
8332
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8333
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8334
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8335
- BPF_LD_MAP_FD(BPF_REG_1, 0),
8336
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8337
- BPF_FUNC_map_lookup_elem),
8338
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
8339
- BPF_EXIT_INSN(),
8340
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7ffffffe),
8341
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
8342
- BPF_JMP_A(0),
8343
- BPF_EXIT_INSN(),
8344
- },
8345
- .fixup_map1 = { 3 },
8346
- .errstr = "map_value pointer and 2147483646",
8347
- .result = REJECT
8348
- },
8349
- {
8350
- "bounds check map access with off+size signed 32bit overflow. test2",
8351
- .insns = {
8352
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8353
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8354
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8355
- BPF_LD_MAP_FD(BPF_REG_1, 0),
8356
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8357
- BPF_FUNC_map_lookup_elem),
8358
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
8359
- BPF_EXIT_INSN(),
8360
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
8361
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
8362
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
8363
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
8364
- BPF_JMP_A(0),
8365
- BPF_EXIT_INSN(),
8366
- },
8367
- .fixup_map1 = { 3 },
8368
- .errstr = "pointer offset 1073741822",
8369
- .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
8370
- .result = REJECT
8371
- },
8372
- {
8373
- "bounds check map access with off+size signed 32bit overflow. test3",
8374
- .insns = {
8375
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8376
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8377
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8378
- BPF_LD_MAP_FD(BPF_REG_1, 0),
8379
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8380
- BPF_FUNC_map_lookup_elem),
8381
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
8382
- BPF_EXIT_INSN(),
8383
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
8384
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
8385
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
8386
- BPF_JMP_A(0),
8387
- BPF_EXIT_INSN(),
8388
- },
8389
- .fixup_map1 = { 3 },
8390
- .errstr = "pointer offset -1073741822",
8391
- .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
8392
- .result = REJECT
8393
- },
8394
- {
8395
- "bounds check map access with off+size signed 32bit overflow. test4",
8396
- .insns = {
8397
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8398
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8399
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8400
- BPF_LD_MAP_FD(BPF_REG_1, 0),
8401
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8402
- BPF_FUNC_map_lookup_elem),
8403
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
8404
- BPF_EXIT_INSN(),
8405
- BPF_MOV64_IMM(BPF_REG_1, 1000000),
8406
- BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 1000000),
8407
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8408
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
8409
- BPF_JMP_A(0),
8410
- BPF_EXIT_INSN(),
8411
- },
8412
- .fixup_map1 = { 3 },
8413
- .errstr = "map_value pointer and 1000000000000",
8414
- .result = REJECT
8415
- },
8416
- {
8417
- "pointer/scalar confusion in state equality check (way 1)",
8418
- .insns = {
8419
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8420
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8421
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8422
- BPF_LD_MAP_FD(BPF_REG_1, 0),
8423
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8424
- BPF_FUNC_map_lookup_elem),
8425
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
8426
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
8427
- BPF_JMP_A(1),
8428
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
8429
- BPF_JMP_A(0),
8430
- BPF_EXIT_INSN(),
8431
- },
8432
- .fixup_map1 = { 3 },
8433
- .result = ACCEPT,
8434
- .retval = POINTER_VALUE,
8435
- .result_unpriv = REJECT,
8436
- .errstr_unpriv = "R0 leaks addr as return value"
8437
- },
8438
- {
8439
- "pointer/scalar confusion in state equality check (way 2)",
8440
- .insns = {
8441
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8442
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8443
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8444
- BPF_LD_MAP_FD(BPF_REG_1, 0),
8445
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8446
- BPF_FUNC_map_lookup_elem),
8447
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
8448
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
8449
- BPF_JMP_A(1),
8450
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
8451
- BPF_EXIT_INSN(),
8452
- },
8453
- .fixup_map1 = { 3 },
8454
- .result = ACCEPT,
8455
- .retval = POINTER_VALUE,
8456
- .result_unpriv = REJECT,
8457
- .errstr_unpriv = "R0 leaks addr as return value"
8458
- },
8459
- {
8460
- "variable-offset ctx access",
8461
- .insns = {
8462
- /* Get an unknown value */
8463
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
8464
- /* Make it small and 4-byte aligned */
8465
- BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
8466
- /* add it to skb. We now have either &skb->len or
8467
- * &skb->pkt_type, but we don't know which
8468
- */
8469
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
8470
- /* dereference it */
8471
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
8472
- BPF_EXIT_INSN(),
8473
- },
8474
- .errstr = "variable ctx access var_off=(0x0; 0x4)",
8475
- .result = REJECT,
8476
- .prog_type = BPF_PROG_TYPE_LWT_IN,
8477
- },
8478
- {
8479
- "variable-offset stack access",
8480
- .insns = {
8481
- /* Fill the top 8 bytes of the stack */
8482
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8483
- /* Get an unknown value */
8484
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
8485
- /* Make it small and 4-byte aligned */
8486
- BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
8487
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
8488
- /* add it to fp. We now have either fp-4 or fp-8, but
8489
- * we don't know which
8490
- */
8491
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
8492
- /* dereference it */
8493
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
8494
- BPF_EXIT_INSN(),
8495
- },
8496
- .errstr = "variable stack access var_off=(0xfffffffffffffff8; 0x4)",
8497
- .result = REJECT,
8498
- .prog_type = BPF_PROG_TYPE_LWT_IN,
8499
- },
8500
- {
8501
- "indirect variable-offset stack access, out of bound",
8502
- .insns = {
8503
- /* Fill the top 8 bytes of the stack */
8504
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8505
- /* Get an unknown value */
8506
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
8507
- /* Make it small and 4-byte aligned */
8508
- BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
8509
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
8510
- /* add it to fp. We now have either fp-4 or fp-8, but
8511
- * we don't know which
8512
- */
8513
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
8514
- /* dereference it indirectly */
8515
- BPF_LD_MAP_FD(BPF_REG_1, 0),
8516
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8517
- BPF_FUNC_map_lookup_elem),
8518
- BPF_MOV64_IMM(BPF_REG_0, 0),
8519
- BPF_EXIT_INSN(),
8520
- },
8521
- .fixup_map1 = { 5 },
8522
- .errstr = "invalid stack type R2 var_off",
8523
- .result = REJECT,
8524
- .prog_type = BPF_PROG_TYPE_LWT_IN,
8525
- },
8526
- {
8527
- "indirect variable-offset stack access, max_off+size > max_initialized",
8528
- .insns = {
8529
- /* Fill only the second from top 8 bytes of the stack. */
8530
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
8531
- /* Get an unknown value. */
8532
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
8533
- /* Make it small and 4-byte aligned. */
8534
- BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
8535
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 16),
8536
- /* Add it to fp. We now have either fp-12 or fp-16, but we don't know
8537
- * which. fp-12 size 8 is partially uninitialized stack.
8538
- */
8539
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
8540
- /* Dereference it indirectly. */
8541
- BPF_LD_MAP_FD(BPF_REG_1, 0),
8542
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
8543
- BPF_MOV64_IMM(BPF_REG_0, 0),
8544
- BPF_EXIT_INSN(),
8545
- },
8546
- .fixup_map1 = { 5 },
8547
- .errstr = "invalid indirect read from stack var_off",
8548
- .result = REJECT,
8549
- .prog_type = BPF_PROG_TYPE_LWT_IN,
8550
- },
8551
- {
8552
- "indirect variable-offset stack access, min_off < min_initialized",
8553
- .insns = {
8554
- /* Fill only the top 8 bytes of the stack. */
8555
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8556
- /* Get an unknown value */
8557
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
8558
- /* Make it small and 4-byte aligned. */
8559
- BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
8560
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 16),
8561
- /* Add it to fp. We now have either fp-12 or fp-16, but we don't know
8562
- * which. fp-16 size 8 is partially uninitialized stack.
8563
- */
8564
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
8565
- /* Dereference it indirectly. */
8566
- BPF_LD_MAP_FD(BPF_REG_1, 0),
8567
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
8568
- BPF_MOV64_IMM(BPF_REG_0, 0),
8569
- BPF_EXIT_INSN(),
8570
- },
8571
- .fixup_map1 = { 5 },
8572
- .errstr = "invalid indirect read from stack var_off",
8573
- .result = REJECT,
8574
- .prog_type = BPF_PROG_TYPE_LWT_IN,
8575
- },
8576
- {
8577
- "indirect variable-offset stack access, ok",
8578
- .insns = {
8579
- /* Fill the top 16 bytes of the stack. */
8580
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
8581
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8582
- /* Get an unknown value. */
8583
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
8584
- /* Make it small and 4-byte aligned. */
8585
- BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
8586
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 16),
8587
- /* Add it to fp. We now have either fp-12 or fp-16, we don't know
8588
- * which, but either way it points to initialized stack.
8589
- */
8590
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
8591
- /* Dereference it indirectly. */
8592
- BPF_LD_MAP_FD(BPF_REG_1, 0),
8593
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
8594
- BPF_MOV64_IMM(BPF_REG_0, 0),
8595
- BPF_EXIT_INSN(),
8596
- },
8597
- .fixup_map1 = { 6 },
8598
- .result = ACCEPT,
8599
- .prog_type = BPF_PROG_TYPE_LWT_IN,
8600
- },
8601
- {
8602
- "direct stack access with 32-bit wraparound. test1",
8603
- .insns = {
8604
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
8605
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
8606
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
8607
- BPF_MOV32_IMM(BPF_REG_0, 0),
8608
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8609
- BPF_EXIT_INSN()
8610
- },
8611
- .errstr = "fp pointer and 2147483647",
8612
- .result = REJECT
8613
- },
8614
- {
8615
- "direct stack access with 32-bit wraparound. test2",
8616
- .insns = {
8617
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
8618
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
8619
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
8620
- BPF_MOV32_IMM(BPF_REG_0, 0),
8621
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8622
- BPF_EXIT_INSN()
8623
- },
8624
- .errstr = "fp pointer and 1073741823",
8625
- .result = REJECT
8626
- },
8627
- {
8628
- "direct stack access with 32-bit wraparound. test3",
8629
- .insns = {
8630
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
8631
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
8632
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
8633
- BPF_MOV32_IMM(BPF_REG_0, 0),
8634
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8635
- BPF_EXIT_INSN()
8636
- },
8637
- .errstr = "fp pointer offset 1073741822",
8638
- .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
8639
- .result = REJECT
8640
- },
8641
- {
8642
- "liveness pruning and write screening",
8643
- .insns = {
8644
- /* Get an unknown value */
8645
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
8646
- /* branch conditions teach us nothing about R2 */
8647
- BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
8648
- BPF_MOV64_IMM(BPF_REG_0, 0),
8649
- BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
8650
- BPF_MOV64_IMM(BPF_REG_0, 0),
8651
- BPF_EXIT_INSN(),
8652
- },
8653
- .errstr = "R0 !read_ok",
8654
- .result = REJECT,
8655
- .prog_type = BPF_PROG_TYPE_LWT_IN,
8656
- },
8657
- {
8658
- "varlen_map_value_access pruning",
8659
- .insns = {
8660
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8661
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8662
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8663
- BPF_LD_MAP_FD(BPF_REG_1, 0),
8664
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8665
- BPF_FUNC_map_lookup_elem),
8666
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
8667
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
8668
- BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
8669
- BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
8670
- BPF_MOV32_IMM(BPF_REG_1, 0),
8671
- BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
8672
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8673
- BPF_JMP_IMM(BPF_JA, 0, 0, 0),
8674
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
8675
- offsetof(struct test_val, foo)),
8676
- BPF_EXIT_INSN(),
8677
- },
8678
- .fixup_map2 = { 3 },
8679
- .errstr_unpriv = "R0 leaks addr",
8680
- .errstr = "R0 unbounded memory access",
8681
- .result_unpriv = REJECT,
8682
- .result = REJECT,
8683
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8684
- },
8685
- {
8686
- "invalid 64-bit BPF_END",
8687
- .insns = {
8688
- BPF_MOV32_IMM(BPF_REG_0, 0),
8689
- {
8690
- .code = BPF_ALU64 | BPF_END | BPF_TO_LE,
8691
- .dst_reg = BPF_REG_0,
8692
- .src_reg = 0,
8693
- .off = 0,
8694
- .imm = 32,
8695
- },
8696
- BPF_EXIT_INSN(),
8697
- },
8698
- .errstr = "unknown opcode d7",
8699
- .result = REJECT,
8700
- },
8701
- {
8702
- "XDP, using ifindex from netdev",
8703
- .insns = {
8704
- BPF_MOV64_IMM(BPF_REG_0, 0),
8705
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8706
- offsetof(struct xdp_md, ingress_ifindex)),
8707
- BPF_JMP_IMM(BPF_JLT, BPF_REG_2, 1, 1),
8708
- BPF_MOV64_IMM(BPF_REG_0, 1),
8709
- BPF_EXIT_INSN(),
8710
- },
8711
- .result = ACCEPT,
8712
- .prog_type = BPF_PROG_TYPE_XDP,
8713
- .retval = 1,
8714
- },
8715
- {
8716
- "meta access, test1",
8717
- .insns = {
8718
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8719
- offsetof(struct xdp_md, data_meta)),
8720
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8721
- offsetof(struct xdp_md, data)),
8722
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
8723
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
8724
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
8725
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
8726
- BPF_MOV64_IMM(BPF_REG_0, 0),
8727
- BPF_EXIT_INSN(),
8728
- },
8729
- .result = ACCEPT,
8730
- .prog_type = BPF_PROG_TYPE_XDP,
8731
- },
8732
- {
8733
- "meta access, test2",
8734
- .insns = {
8735
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8736
- offsetof(struct xdp_md, data_meta)),
8737
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8738
- offsetof(struct xdp_md, data)),
8739
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
8740
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 8),
8741
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
8742
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
8743
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
8744
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8745
- BPF_MOV64_IMM(BPF_REG_0, 0),
8746
- BPF_EXIT_INSN(),
8747
- },
8748
- .result = REJECT,
8749
- .errstr = "invalid access to packet, off=-8",
8750
- .prog_type = BPF_PROG_TYPE_XDP,
8751
- },
8752
- {
8753
- "meta access, test3",
8754
- .insns = {
8755
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8756
- offsetof(struct xdp_md, data_meta)),
8757
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8758
- offsetof(struct xdp_md, data_end)),
8759
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
8760
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
8761
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
8762
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
8763
- BPF_MOV64_IMM(BPF_REG_0, 0),
8764
- BPF_EXIT_INSN(),
8765
- },
8766
- .result = REJECT,
8767
- .errstr = "invalid access to packet",
8768
- .prog_type = BPF_PROG_TYPE_XDP,
8769
- },
8770
- {
8771
- "meta access, test4",
8772
- .insns = {
8773
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8774
- offsetof(struct xdp_md, data_meta)),
8775
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8776
- offsetof(struct xdp_md, data_end)),
8777
- BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
8778
- offsetof(struct xdp_md, data)),
8779
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
8780
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
8781
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
8782
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
8783
- BPF_MOV64_IMM(BPF_REG_0, 0),
8784
- BPF_EXIT_INSN(),
8785
- },
8786
- .result = REJECT,
8787
- .errstr = "invalid access to packet",
8788
- .prog_type = BPF_PROG_TYPE_XDP,
8789
- },
8790
- {
8791
- "meta access, test5",
8792
- .insns = {
8793
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8794
- offsetof(struct xdp_md, data_meta)),
8795
- BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
8796
- offsetof(struct xdp_md, data)),
8797
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
8798
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
8799
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_4, 3),
8800
- BPF_MOV64_IMM(BPF_REG_2, -8),
8801
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8802
- BPF_FUNC_xdp_adjust_meta),
8803
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
8804
- BPF_MOV64_IMM(BPF_REG_0, 0),
8805
- BPF_EXIT_INSN(),
8806
- },
8807
- .result = REJECT,
8808
- .errstr = "R3 !read_ok",
8809
- .prog_type = BPF_PROG_TYPE_XDP,
8810
- },
8811
- {
8812
- "meta access, test6",
8813
- .insns = {
8814
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8815
- offsetof(struct xdp_md, data_meta)),
8816
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8817
- offsetof(struct xdp_md, data)),
8818
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
8819
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
8820
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
8821
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
8822
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_0, 1),
8823
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
8824
- BPF_MOV64_IMM(BPF_REG_0, 0),
8825
- BPF_EXIT_INSN(),
8826
- },
8827
- .result = REJECT,
8828
- .errstr = "invalid access to packet",
8829
- .prog_type = BPF_PROG_TYPE_XDP,
8830
- },
8831
- {
8832
- "meta access, test7",
8833
- .insns = {
8834
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8835
- offsetof(struct xdp_md, data_meta)),
8836
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8837
- offsetof(struct xdp_md, data)),
8838
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
8839
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
8840
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
8841
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
8842
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
8843
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
8844
- BPF_MOV64_IMM(BPF_REG_0, 0),
8845
- BPF_EXIT_INSN(),
8846
- },
8847
- .result = ACCEPT,
8848
- .prog_type = BPF_PROG_TYPE_XDP,
8849
- },
8850
- {
8851
- "meta access, test8",
8852
- .insns = {
8853
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8854
- offsetof(struct xdp_md, data_meta)),
8855
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8856
- offsetof(struct xdp_md, data)),
8857
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
8858
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
8859
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
8860
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
8861
- BPF_MOV64_IMM(BPF_REG_0, 0),
8862
- BPF_EXIT_INSN(),
8863
- },
8864
- .result = ACCEPT,
8865
- .prog_type = BPF_PROG_TYPE_XDP,
8866
- },
8867
- {
8868
- "meta access, test9",
8869
- .insns = {
8870
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8871
- offsetof(struct xdp_md, data_meta)),
8872
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8873
- offsetof(struct xdp_md, data)),
8874
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
8875
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
8876
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
8877
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
8878
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
8879
- BPF_MOV64_IMM(BPF_REG_0, 0),
8880
- BPF_EXIT_INSN(),
8881
- },
8882
- .result = REJECT,
8883
- .errstr = "invalid access to packet",
8884
- .prog_type = BPF_PROG_TYPE_XDP,
8885
- },
8886
- {
8887
- "meta access, test10",
8888
- .insns = {
8889
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8890
- offsetof(struct xdp_md, data_meta)),
8891
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8892
- offsetof(struct xdp_md, data)),
8893
- BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
8894
- offsetof(struct xdp_md, data_end)),
8895
- BPF_MOV64_IMM(BPF_REG_5, 42),
8896
- BPF_MOV64_IMM(BPF_REG_6, 24),
8897
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
8898
- BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
8899
- BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
8900
- BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
8901
- BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_5),
8902
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
8903
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
8904
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
8905
- BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_5, 1),
8906
- BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
8907
- BPF_MOV64_IMM(BPF_REG_0, 0),
8908
- BPF_EXIT_INSN(),
8909
- },
8910
- .result = REJECT,
8911
- .errstr = "invalid access to packet",
8912
- .prog_type = BPF_PROG_TYPE_XDP,
8913
- },
8914
- {
8915
- "meta access, test11",
8916
- .insns = {
8917
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8918
- offsetof(struct xdp_md, data_meta)),
8919
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8920
- offsetof(struct xdp_md, data)),
8921
- BPF_MOV64_IMM(BPF_REG_5, 42),
8922
- BPF_MOV64_IMM(BPF_REG_6, 24),
8923
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
8924
- BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
8925
- BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
8926
- BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
8927
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_5),
8928
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
8929
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
8930
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
8931
- BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_3, 1),
8932
- BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_5, 0),
8933
- BPF_MOV64_IMM(BPF_REG_0, 0),
8934
- BPF_EXIT_INSN(),
8935
- },
8936
- .result = ACCEPT,
8937
- .prog_type = BPF_PROG_TYPE_XDP,
8938
- },
8939
- {
8940
- "meta access, test12",
8941
- .insns = {
8942
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8943
- offsetof(struct xdp_md, data_meta)),
8944
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8945
- offsetof(struct xdp_md, data)),
8946
- BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
8947
- offsetof(struct xdp_md, data_end)),
8948
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
8949
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
8950
- BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 5),
8951
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
8952
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
8953
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
8954
- BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 1),
8955
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
8956
- BPF_MOV64_IMM(BPF_REG_0, 0),
8957
- BPF_EXIT_INSN(),
8958
- },
8959
- .result = ACCEPT,
8960
- .prog_type = BPF_PROG_TYPE_XDP,
8961
- },
8962
- {
8963
- "arithmetic ops make PTR_TO_CTX unusable",
8964
- .insns = {
8965
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
8966
- offsetof(struct __sk_buff, data) -
8967
- offsetof(struct __sk_buff, mark)),
8968
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
8969
- offsetof(struct __sk_buff, mark)),
8970
- BPF_EXIT_INSN(),
8971
- },
8972
- .errstr = "dereference of modified ctx ptr",
8973
- .result = REJECT,
8974
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
8975
- },
8976
- {
8977
- "pkt_end - pkt_start is allowed",
8978
- .insns = {
8979
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
8980
- offsetof(struct __sk_buff, data_end)),
8981
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8982
- offsetof(struct __sk_buff, data)),
8983
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
8984
- BPF_EXIT_INSN(),
8985
- },
8986
- .result = ACCEPT,
8987
- .retval = TEST_DATA_LEN,
8988
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
8989
- },
8990
- {
8991
- "XDP pkt read, pkt_end mangling, bad access 1",
8992
- .insns = {
8993
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8994
- offsetof(struct xdp_md, data)),
8995
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8996
- offsetof(struct xdp_md, data_end)),
8997
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8998
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8999
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8),
9000
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
9001
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9002
- BPF_MOV64_IMM(BPF_REG_0, 0),
9003
- BPF_EXIT_INSN(),
9004
- },
9005
- .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
9006
- .result = REJECT,
9007
- .prog_type = BPF_PROG_TYPE_XDP,
9008
- },
9009
- {
9010
- "XDP pkt read, pkt_end mangling, bad access 2",
9011
- .insns = {
9012
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9013
- offsetof(struct xdp_md, data)),
9014
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9015
- offsetof(struct xdp_md, data_end)),
9016
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9017
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9018
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_3, 8),
9019
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
9020
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9021
- BPF_MOV64_IMM(BPF_REG_0, 0),
9022
- BPF_EXIT_INSN(),
9023
- },
9024
- .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
9025
- .result = REJECT,
9026
- .prog_type = BPF_PROG_TYPE_XDP,
9027
- },
9028
- {
9029
- "XDP pkt read, pkt_data' > pkt_end, good access",
9030
- .insns = {
9031
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9032
- offsetof(struct xdp_md, data)),
9033
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9034
- offsetof(struct xdp_md, data_end)),
9035
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9036
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9037
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
9038
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9039
- BPF_MOV64_IMM(BPF_REG_0, 0),
9040
- BPF_EXIT_INSN(),
9041
- },
9042
- .result = ACCEPT,
9043
- .prog_type = BPF_PROG_TYPE_XDP,
9044
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9045
- },
9046
- {
9047
- "XDP pkt read, pkt_data' > pkt_end, bad access 1",
9048
- .insns = {
9049
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9050
- offsetof(struct xdp_md, data)),
9051
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9052
- offsetof(struct xdp_md, data_end)),
9053
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9054
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9055
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
9056
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9057
- BPF_MOV64_IMM(BPF_REG_0, 0),
9058
- BPF_EXIT_INSN(),
9059
- },
9060
- .errstr = "R1 offset is outside of the packet",
9061
- .result = REJECT,
9062
- .prog_type = BPF_PROG_TYPE_XDP,
9063
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9064
- },
9065
- {
9066
- "XDP pkt read, pkt_data' > pkt_end, bad access 2",
9067
- .insns = {
9068
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9069
- offsetof(struct xdp_md, data)),
9070
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9071
- offsetof(struct xdp_md, data_end)),
9072
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9073
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9074
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
9075
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9076
- BPF_MOV64_IMM(BPF_REG_0, 0),
9077
- BPF_EXIT_INSN(),
9078
- },
9079
- .errstr = "R1 offset is outside of the packet",
9080
- .result = REJECT,
9081
- .prog_type = BPF_PROG_TYPE_XDP,
9082
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9083
- },
9084
- {
9085
- "XDP pkt read, pkt_end > pkt_data', good access",
9086
- .insns = {
9087
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9088
- offsetof(struct xdp_md, data)),
9089
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9090
- offsetof(struct xdp_md, data_end)),
9091
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9092
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9093
- BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
9094
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9095
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9096
- BPF_MOV64_IMM(BPF_REG_0, 0),
9097
- BPF_EXIT_INSN(),
9098
- },
9099
- .result = ACCEPT,
9100
- .prog_type = BPF_PROG_TYPE_XDP,
9101
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9102
- },
9103
- {
9104
- "XDP pkt read, pkt_end > pkt_data', bad access 1",
9105
- .insns = {
9106
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9107
- offsetof(struct xdp_md, data)),
9108
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9109
- offsetof(struct xdp_md, data_end)),
9110
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9111
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9112
- BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
9113
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9114
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9115
- BPF_MOV64_IMM(BPF_REG_0, 0),
9116
- BPF_EXIT_INSN(),
9117
- },
9118
- .errstr = "R1 offset is outside of the packet",
9119
- .result = REJECT,
9120
- .prog_type = BPF_PROG_TYPE_XDP,
9121
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9122
- },
9123
- {
9124
- "XDP pkt read, pkt_end > pkt_data', bad access 2",
9125
- .insns = {
9126
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9127
- offsetof(struct xdp_md, data)),
9128
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9129
- offsetof(struct xdp_md, data_end)),
9130
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9131
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9132
- BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
9133
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9134
- BPF_MOV64_IMM(BPF_REG_0, 0),
9135
- BPF_EXIT_INSN(),
9136
- },
9137
- .errstr = "R1 offset is outside of the packet",
9138
- .result = REJECT,
9139
- .prog_type = BPF_PROG_TYPE_XDP,
9140
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9141
- },
9142
- {
9143
- "XDP pkt read, pkt_data' < pkt_end, good access",
9144
- .insns = {
9145
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9146
- offsetof(struct xdp_md, data)),
9147
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9148
- offsetof(struct xdp_md, data_end)),
9149
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9150
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9151
- BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
9152
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9153
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9154
- BPF_MOV64_IMM(BPF_REG_0, 0),
9155
- BPF_EXIT_INSN(),
9156
- },
9157
- .result = ACCEPT,
9158
- .prog_type = BPF_PROG_TYPE_XDP,
9159
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9160
- },
9161
- {
9162
- "XDP pkt read, pkt_data' < pkt_end, bad access 1",
9163
- .insns = {
9164
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9165
- offsetof(struct xdp_md, data)),
9166
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9167
- offsetof(struct xdp_md, data_end)),
9168
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9169
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9170
- BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
9171
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9172
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9173
- BPF_MOV64_IMM(BPF_REG_0, 0),
9174
- BPF_EXIT_INSN(),
9175
- },
9176
- .errstr = "R1 offset is outside of the packet",
9177
- .result = REJECT,
9178
- .prog_type = BPF_PROG_TYPE_XDP,
9179
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9180
- },
9181
- {
9182
- "XDP pkt read, pkt_data' < pkt_end, bad access 2",
9183
- .insns = {
9184
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9185
- offsetof(struct xdp_md, data)),
9186
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9187
- offsetof(struct xdp_md, data_end)),
9188
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9189
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9190
- BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
9191
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9192
- BPF_MOV64_IMM(BPF_REG_0, 0),
9193
- BPF_EXIT_INSN(),
9194
- },
9195
- .errstr = "R1 offset is outside of the packet",
9196
- .result = REJECT,
9197
- .prog_type = BPF_PROG_TYPE_XDP,
9198
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9199
- },
9200
- {
9201
- "XDP pkt read, pkt_end < pkt_data', good access",
9202
- .insns = {
9203
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9204
- offsetof(struct xdp_md, data)),
9205
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9206
- offsetof(struct xdp_md, data_end)),
9207
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9208
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9209
- BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
9210
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9211
- BPF_MOV64_IMM(BPF_REG_0, 0),
9212
- BPF_EXIT_INSN(),
9213
- },
9214
- .result = ACCEPT,
9215
- .prog_type = BPF_PROG_TYPE_XDP,
9216
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9217
- },
9218
- {
9219
- "XDP pkt read, pkt_end < pkt_data', bad access 1",
9220
- .insns = {
9221
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9222
- offsetof(struct xdp_md, data)),
9223
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9224
- offsetof(struct xdp_md, data_end)),
9225
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9226
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9227
- BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
9228
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9229
- BPF_MOV64_IMM(BPF_REG_0, 0),
9230
- BPF_EXIT_INSN(),
9231
- },
9232
- .errstr = "R1 offset is outside of the packet",
9233
- .result = REJECT,
9234
- .prog_type = BPF_PROG_TYPE_XDP,
9235
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9236
- },
9237
- {
9238
- "XDP pkt read, pkt_end < pkt_data', bad access 2",
9239
- .insns = {
9240
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9241
- offsetof(struct xdp_md, data)),
9242
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9243
- offsetof(struct xdp_md, data_end)),
9244
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9245
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9246
- BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
9247
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9248
- BPF_MOV64_IMM(BPF_REG_0, 0),
9249
- BPF_EXIT_INSN(),
9250
- },
9251
- .errstr = "R1 offset is outside of the packet",
9252
- .result = REJECT,
9253
- .prog_type = BPF_PROG_TYPE_XDP,
9254
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9255
- },
9256
- {
9257
- "XDP pkt read, pkt_data' >= pkt_end, good access",
9258
- .insns = {
9259
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9260
- offsetof(struct xdp_md, data)),
9261
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9262
- offsetof(struct xdp_md, data_end)),
9263
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9264
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9265
- BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
9266
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9267
- BPF_MOV64_IMM(BPF_REG_0, 0),
9268
- BPF_EXIT_INSN(),
9269
- },
9270
- .result = ACCEPT,
9271
- .prog_type = BPF_PROG_TYPE_XDP,
9272
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9273
- },
9274
- {
9275
- "XDP pkt read, pkt_data' >= pkt_end, bad access 1",
9276
- .insns = {
9277
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9278
- offsetof(struct xdp_md, data)),
9279
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9280
- offsetof(struct xdp_md, data_end)),
9281
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9282
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9283
- BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
9284
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9285
- BPF_MOV64_IMM(BPF_REG_0, 0),
9286
- BPF_EXIT_INSN(),
9287
- },
9288
- .errstr = "R1 offset is outside of the packet",
9289
- .result = REJECT,
9290
- .prog_type = BPF_PROG_TYPE_XDP,
9291
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9292
- },
9293
- {
9294
- "XDP pkt read, pkt_data' >= pkt_end, bad access 2",
9295
- .insns = {
9296
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9297
- offsetof(struct xdp_md, data)),
9298
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9299
- offsetof(struct xdp_md, data_end)),
9300
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9301
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9302
- BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
9303
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9304
- BPF_MOV64_IMM(BPF_REG_0, 0),
9305
- BPF_EXIT_INSN(),
9306
- },
9307
- .errstr = "R1 offset is outside of the packet",
9308
- .result = REJECT,
9309
- .prog_type = BPF_PROG_TYPE_XDP,
9310
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9311
- },
9312
- {
9313
- "XDP pkt read, pkt_end >= pkt_data', good access",
9314
- .insns = {
9315
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9316
- offsetof(struct xdp_md, data)),
9317
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9318
- offsetof(struct xdp_md, data_end)),
9319
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9320
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9321
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
9322
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9323
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9324
- BPF_MOV64_IMM(BPF_REG_0, 0),
9325
- BPF_EXIT_INSN(),
9326
- },
9327
- .result = ACCEPT,
9328
- .prog_type = BPF_PROG_TYPE_XDP,
9329
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9330
- },
9331
- {
9332
- "XDP pkt read, pkt_end >= pkt_data', bad access 1",
9333
- .insns = {
9334
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9335
- offsetof(struct xdp_md, data)),
9336
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9337
- offsetof(struct xdp_md, data_end)),
9338
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9339
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9340
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
9341
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9342
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9343
- BPF_MOV64_IMM(BPF_REG_0, 0),
9344
- BPF_EXIT_INSN(),
9345
- },
9346
- .errstr = "R1 offset is outside of the packet",
9347
- .result = REJECT,
9348
- .prog_type = BPF_PROG_TYPE_XDP,
9349
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9350
- },
9351
- {
9352
- "XDP pkt read, pkt_end >= pkt_data', bad access 2",
9353
- .insns = {
9354
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9355
- offsetof(struct xdp_md, data)),
9356
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9357
- offsetof(struct xdp_md, data_end)),
9358
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9359
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9360
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
9361
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9362
- BPF_MOV64_IMM(BPF_REG_0, 0),
9363
- BPF_EXIT_INSN(),
9364
- },
9365
- .errstr = "R1 offset is outside of the packet",
9366
- .result = REJECT,
9367
- .prog_type = BPF_PROG_TYPE_XDP,
9368
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9369
- },
9370
- {
9371
- "XDP pkt read, pkt_data' <= pkt_end, good access",
9372
- .insns = {
9373
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9374
- offsetof(struct xdp_md, data)),
9375
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9376
- offsetof(struct xdp_md, data_end)),
9377
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9378
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9379
- BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
9380
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9381
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9382
- BPF_MOV64_IMM(BPF_REG_0, 0),
9383
- BPF_EXIT_INSN(),
9384
- },
9385
- .result = ACCEPT,
9386
- .prog_type = BPF_PROG_TYPE_XDP,
9387
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9388
- },
9389
- {
9390
- "XDP pkt read, pkt_data' <= pkt_end, bad access 1",
9391
- .insns = {
9392
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9393
- offsetof(struct xdp_md, data)),
9394
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9395
- offsetof(struct xdp_md, data_end)),
9396
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9397
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9398
- BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
9399
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9400
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9401
- BPF_MOV64_IMM(BPF_REG_0, 0),
9402
- BPF_EXIT_INSN(),
9403
- },
9404
- .errstr = "R1 offset is outside of the packet",
9405
- .result = REJECT,
9406
- .prog_type = BPF_PROG_TYPE_XDP,
9407
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9408
- },
9409
- {
9410
- "XDP pkt read, pkt_data' <= pkt_end, bad access 2",
9411
- .insns = {
9412
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9413
- offsetof(struct xdp_md, data)),
9414
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9415
- offsetof(struct xdp_md, data_end)),
9416
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9417
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9418
- BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
9419
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9420
- BPF_MOV64_IMM(BPF_REG_0, 0),
9421
- BPF_EXIT_INSN(),
9422
- },
9423
- .errstr = "R1 offset is outside of the packet",
9424
- .result = REJECT,
9425
- .prog_type = BPF_PROG_TYPE_XDP,
9426
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9427
- },
9428
- {
9429
- "XDP pkt read, pkt_end <= pkt_data', good access",
9430
- .insns = {
9431
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9432
- offsetof(struct xdp_md, data)),
9433
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9434
- offsetof(struct xdp_md, data_end)),
9435
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9436
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9437
- BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
9438
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9439
- BPF_MOV64_IMM(BPF_REG_0, 0),
9440
- BPF_EXIT_INSN(),
9441
- },
9442
- .result = ACCEPT,
9443
- .prog_type = BPF_PROG_TYPE_XDP,
9444
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9445
- },
9446
- {
9447
- "XDP pkt read, pkt_end <= pkt_data', bad access 1",
9448
- .insns = {
9449
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9450
- offsetof(struct xdp_md, data)),
9451
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9452
- offsetof(struct xdp_md, data_end)),
9453
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9454
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9455
- BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
9456
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9457
- BPF_MOV64_IMM(BPF_REG_0, 0),
9458
- BPF_EXIT_INSN(),
9459
- },
9460
- .errstr = "R1 offset is outside of the packet",
9461
- .result = REJECT,
9462
- .prog_type = BPF_PROG_TYPE_XDP,
9463
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9464
- },
9465
- {
9466
- "XDP pkt read, pkt_end <= pkt_data', bad access 2",
9467
- .insns = {
9468
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9469
- offsetof(struct xdp_md, data)),
9470
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9471
- offsetof(struct xdp_md, data_end)),
9472
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9473
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9474
- BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
9475
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9476
- BPF_MOV64_IMM(BPF_REG_0, 0),
9477
- BPF_EXIT_INSN(),
9478
- },
9479
- .errstr = "R1 offset is outside of the packet",
9480
- .result = REJECT,
9481
- .prog_type = BPF_PROG_TYPE_XDP,
9482
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9483
- },
9484
- {
9485
- "XDP pkt read, pkt_meta' > pkt_data, good access",
9486
- .insns = {
9487
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9488
- offsetof(struct xdp_md, data_meta)),
9489
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9490
- offsetof(struct xdp_md, data)),
9491
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9492
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9493
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
9494
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9495
- BPF_MOV64_IMM(BPF_REG_0, 0),
9496
- BPF_EXIT_INSN(),
9497
- },
9498
- .result = ACCEPT,
9499
- .prog_type = BPF_PROG_TYPE_XDP,
9500
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9501
- },
9502
- {
9503
- "XDP pkt read, pkt_meta' > pkt_data, bad access 1",
9504
- .insns = {
9505
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9506
- offsetof(struct xdp_md, data_meta)),
9507
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9508
- offsetof(struct xdp_md, data)),
9509
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9510
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9511
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
9512
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9513
- BPF_MOV64_IMM(BPF_REG_0, 0),
9514
- BPF_EXIT_INSN(),
9515
- },
9516
- .errstr = "R1 offset is outside of the packet",
9517
- .result = REJECT,
9518
- .prog_type = BPF_PROG_TYPE_XDP,
9519
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9520
- },
9521
- {
9522
- "XDP pkt read, pkt_meta' > pkt_data, bad access 2",
9523
- .insns = {
9524
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9525
- offsetof(struct xdp_md, data_meta)),
9526
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9527
- offsetof(struct xdp_md, data)),
9528
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9529
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9530
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
9531
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9532
- BPF_MOV64_IMM(BPF_REG_0, 0),
9533
- BPF_EXIT_INSN(),
9534
- },
9535
- .errstr = "R1 offset is outside of the packet",
9536
- .result = REJECT,
9537
- .prog_type = BPF_PROG_TYPE_XDP,
9538
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9539
- },
9540
- {
9541
- "XDP pkt read, pkt_data > pkt_meta', good access",
9542
- .insns = {
9543
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9544
- offsetof(struct xdp_md, data_meta)),
9545
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9546
- offsetof(struct xdp_md, data)),
9547
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9548
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9549
- BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
9550
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9551
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9552
- BPF_MOV64_IMM(BPF_REG_0, 0),
9553
- BPF_EXIT_INSN(),
9554
- },
9555
- .result = ACCEPT,
9556
- .prog_type = BPF_PROG_TYPE_XDP,
9557
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9558
- },
9559
- {
9560
- "XDP pkt read, pkt_data > pkt_meta', bad access 1",
9561
- .insns = {
9562
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9563
- offsetof(struct xdp_md, data_meta)),
9564
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9565
- offsetof(struct xdp_md, data)),
9566
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9567
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9568
- BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
9569
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9570
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9571
- BPF_MOV64_IMM(BPF_REG_0, 0),
9572
- BPF_EXIT_INSN(),
9573
- },
9574
- .errstr = "R1 offset is outside of the packet",
9575
- .result = REJECT,
9576
- .prog_type = BPF_PROG_TYPE_XDP,
9577
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9578
- },
9579
- {
9580
- "XDP pkt read, pkt_data > pkt_meta', bad access 2",
9581
- .insns = {
9582
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9583
- offsetof(struct xdp_md, data_meta)),
9584
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9585
- offsetof(struct xdp_md, data)),
9586
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9587
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9588
- BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
9589
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9590
- BPF_MOV64_IMM(BPF_REG_0, 0),
9591
- BPF_EXIT_INSN(),
9592
- },
9593
- .errstr = "R1 offset is outside of the packet",
9594
- .result = REJECT,
9595
- .prog_type = BPF_PROG_TYPE_XDP,
9596
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9597
- },
9598
- {
9599
- "XDP pkt read, pkt_meta' < pkt_data, good access",
9600
- .insns = {
9601
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9602
- offsetof(struct xdp_md, data_meta)),
9603
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9604
- offsetof(struct xdp_md, data)),
9605
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9606
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9607
- BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
9608
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9609
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9610
- BPF_MOV64_IMM(BPF_REG_0, 0),
9611
- BPF_EXIT_INSN(),
9612
- },
9613
- .result = ACCEPT,
9614
- .prog_type = BPF_PROG_TYPE_XDP,
9615
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9616
- },
9617
- {
9618
- "XDP pkt read, pkt_meta' < pkt_data, bad access 1",
9619
- .insns = {
9620
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9621
- offsetof(struct xdp_md, data_meta)),
9622
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9623
- offsetof(struct xdp_md, data)),
9624
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9625
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9626
- BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
9627
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9628
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9629
- BPF_MOV64_IMM(BPF_REG_0, 0),
9630
- BPF_EXIT_INSN(),
9631
- },
9632
- .errstr = "R1 offset is outside of the packet",
9633
- .result = REJECT,
9634
- .prog_type = BPF_PROG_TYPE_XDP,
9635
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9636
- },
9637
- {
9638
- "XDP pkt read, pkt_meta' < pkt_data, bad access 2",
9639
- .insns = {
9640
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9641
- offsetof(struct xdp_md, data_meta)),
9642
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9643
- offsetof(struct xdp_md, data)),
9644
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9645
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9646
- BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
9647
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9648
- BPF_MOV64_IMM(BPF_REG_0, 0),
9649
- BPF_EXIT_INSN(),
9650
- },
9651
- .errstr = "R1 offset is outside of the packet",
9652
- .result = REJECT,
9653
- .prog_type = BPF_PROG_TYPE_XDP,
9654
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9655
- },
9656
- {
9657
- "XDP pkt read, pkt_data < pkt_meta', good access",
9658
- .insns = {
9659
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9660
- offsetof(struct xdp_md, data_meta)),
9661
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9662
- offsetof(struct xdp_md, data)),
9663
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9664
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9665
- BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
9666
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9667
- BPF_MOV64_IMM(BPF_REG_0, 0),
9668
- BPF_EXIT_INSN(),
9669
- },
9670
- .result = ACCEPT,
9671
- .prog_type = BPF_PROG_TYPE_XDP,
9672
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9673
- },
9674
- {
9675
- "XDP pkt read, pkt_data < pkt_meta', bad access 1",
9676
- .insns = {
9677
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9678
- offsetof(struct xdp_md, data_meta)),
9679
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9680
- offsetof(struct xdp_md, data)),
9681
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9682
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9683
- BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
9684
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9685
- BPF_MOV64_IMM(BPF_REG_0, 0),
9686
- BPF_EXIT_INSN(),
9687
- },
9688
- .errstr = "R1 offset is outside of the packet",
9689
- .result = REJECT,
9690
- .prog_type = BPF_PROG_TYPE_XDP,
9691
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9692
- },
9693
- {
9694
- "XDP pkt read, pkt_data < pkt_meta', bad access 2",
9695
- .insns = {
9696
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9697
- offsetof(struct xdp_md, data_meta)),
9698
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9699
- offsetof(struct xdp_md, data)),
9700
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9701
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9702
- BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
9703
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9704
- BPF_MOV64_IMM(BPF_REG_0, 0),
9705
- BPF_EXIT_INSN(),
9706
- },
9707
- .errstr = "R1 offset is outside of the packet",
9708
- .result = REJECT,
9709
- .prog_type = BPF_PROG_TYPE_XDP,
9710
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9711
- },
9712
- {
9713
- "XDP pkt read, pkt_meta' >= pkt_data, good access",
9714
- .insns = {
9715
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9716
- offsetof(struct xdp_md, data_meta)),
9717
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9718
- offsetof(struct xdp_md, data)),
9719
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9720
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9721
- BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
9722
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9723
- BPF_MOV64_IMM(BPF_REG_0, 0),
9724
- BPF_EXIT_INSN(),
9725
- },
9726
- .result = ACCEPT,
9727
- .prog_type = BPF_PROG_TYPE_XDP,
9728
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9729
- },
9730
- {
9731
- "XDP pkt read, pkt_meta' >= pkt_data, bad access 1",
9732
- .insns = {
9733
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9734
- offsetof(struct xdp_md, data_meta)),
9735
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9736
- offsetof(struct xdp_md, data)),
9737
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9738
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9739
- BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
9740
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9741
- BPF_MOV64_IMM(BPF_REG_0, 0),
9742
- BPF_EXIT_INSN(),
9743
- },
9744
- .errstr = "R1 offset is outside of the packet",
9745
- .result = REJECT,
9746
- .prog_type = BPF_PROG_TYPE_XDP,
9747
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9748
- },
9749
- {
9750
- "XDP pkt read, pkt_meta' >= pkt_data, bad access 2",
9751
- .insns = {
9752
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9753
- offsetof(struct xdp_md, data_meta)),
9754
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9755
- offsetof(struct xdp_md, data)),
9756
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9757
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9758
- BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
9759
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9760
- BPF_MOV64_IMM(BPF_REG_0, 0),
9761
- BPF_EXIT_INSN(),
9762
- },
9763
- .errstr = "R1 offset is outside of the packet",
9764
- .result = REJECT,
9765
- .prog_type = BPF_PROG_TYPE_XDP,
9766
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9767
- },
9768
- {
9769
- "XDP pkt read, pkt_data >= pkt_meta', good access",
9770
- .insns = {
9771
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9772
- offsetof(struct xdp_md, data_meta)),
9773
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9774
- offsetof(struct xdp_md, data)),
9775
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9776
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9777
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
9778
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9779
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9780
- BPF_MOV64_IMM(BPF_REG_0, 0),
9781
- BPF_EXIT_INSN(),
9782
- },
9783
- .result = ACCEPT,
9784
- .prog_type = BPF_PROG_TYPE_XDP,
9785
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9786
- },
9787
- {
9788
- "XDP pkt read, pkt_data >= pkt_meta', bad access 1",
9789
- .insns = {
9790
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9791
- offsetof(struct xdp_md, data_meta)),
9792
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9793
- offsetof(struct xdp_md, data)),
9794
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9795
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9796
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
9797
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9798
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9799
- BPF_MOV64_IMM(BPF_REG_0, 0),
9800
- BPF_EXIT_INSN(),
9801
- },
9802
- .errstr = "R1 offset is outside of the packet",
9803
- .result = REJECT,
9804
- .prog_type = BPF_PROG_TYPE_XDP,
9805
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9806
- },
9807
- {
9808
- "XDP pkt read, pkt_data >= pkt_meta', bad access 2",
9809
- .insns = {
9810
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9811
- offsetof(struct xdp_md, data_meta)),
9812
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9813
- offsetof(struct xdp_md, data)),
9814
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9815
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9816
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
9817
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9818
- BPF_MOV64_IMM(BPF_REG_0, 0),
9819
- BPF_EXIT_INSN(),
9820
- },
9821
- .errstr = "R1 offset is outside of the packet",
9822
- .result = REJECT,
9823
- .prog_type = BPF_PROG_TYPE_XDP,
9824
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9825
- },
9826
- {
9827
- "XDP pkt read, pkt_meta' <= pkt_data, good access",
9828
- .insns = {
9829
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9830
- offsetof(struct xdp_md, data_meta)),
9831
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9832
- offsetof(struct xdp_md, data)),
9833
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9834
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9835
- BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
9836
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9837
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9838
- BPF_MOV64_IMM(BPF_REG_0, 0),
9839
- BPF_EXIT_INSN(),
9840
- },
9841
- .result = ACCEPT,
9842
- .prog_type = BPF_PROG_TYPE_XDP,
9843
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9844
- },
9845
- {
9846
- "XDP pkt read, pkt_meta' <= pkt_data, bad access 1",
9847
- .insns = {
9848
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9849
- offsetof(struct xdp_md, data_meta)),
9850
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9851
- offsetof(struct xdp_md, data)),
9852
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9853
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9854
- BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
9855
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9856
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9857
- BPF_MOV64_IMM(BPF_REG_0, 0),
9858
- BPF_EXIT_INSN(),
9859
- },
9860
- .errstr = "R1 offset is outside of the packet",
9861
- .result = REJECT,
9862
- .prog_type = BPF_PROG_TYPE_XDP,
9863
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9864
- },
9865
- {
9866
- "XDP pkt read, pkt_meta' <= pkt_data, bad access 2",
9867
- .insns = {
9868
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9869
- offsetof(struct xdp_md, data_meta)),
9870
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9871
- offsetof(struct xdp_md, data)),
9872
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9873
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9874
- BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
9875
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9876
- BPF_MOV64_IMM(BPF_REG_0, 0),
9877
- BPF_EXIT_INSN(),
9878
- },
9879
- .errstr = "R1 offset is outside of the packet",
9880
- .result = REJECT,
9881
- .prog_type = BPF_PROG_TYPE_XDP,
9882
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9883
- },
9884
- {
9885
- "XDP pkt read, pkt_data <= pkt_meta', good access",
9886
- .insns = {
9887
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9888
- offsetof(struct xdp_md, data_meta)),
9889
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9890
- offsetof(struct xdp_md, data)),
9891
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9892
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9893
- BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
9894
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9895
- BPF_MOV64_IMM(BPF_REG_0, 0),
9896
- BPF_EXIT_INSN(),
9897
- },
9898
- .result = ACCEPT,
9899
- .prog_type = BPF_PROG_TYPE_XDP,
9900
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9901
- },
9902
- {
9903
- "XDP pkt read, pkt_data <= pkt_meta', bad access 1",
9904
- .insns = {
9905
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9906
- offsetof(struct xdp_md, data_meta)),
9907
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9908
- offsetof(struct xdp_md, data)),
9909
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9910
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9911
- BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
9912
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9913
- BPF_MOV64_IMM(BPF_REG_0, 0),
9914
- BPF_EXIT_INSN(),
9915
- },
9916
- .errstr = "R1 offset is outside of the packet",
9917
- .result = REJECT,
9918
- .prog_type = BPF_PROG_TYPE_XDP,
9919
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9920
- },
9921
- {
9922
- "XDP pkt read, pkt_data <= pkt_meta', bad access 2",
9923
- .insns = {
9924
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9925
- offsetof(struct xdp_md, data_meta)),
9926
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9927
- offsetof(struct xdp_md, data)),
9928
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9929
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9930
- BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
9931
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9932
- BPF_MOV64_IMM(BPF_REG_0, 0),
9933
- BPF_EXIT_INSN(),
9934
- },
9935
- .errstr = "R1 offset is outside of the packet",
9936
- .result = REJECT,
9937
- .prog_type = BPF_PROG_TYPE_XDP,
9938
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9939
- },
9940
- {
9941
- "check deducing bounds from const, 1",
9942
- .insns = {
9943
- BPF_MOV64_IMM(BPF_REG_0, 1),
9944
- BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 0),
9945
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
9946
- BPF_EXIT_INSN(),
9947
- },
9948
- .errstr_unpriv = "R1 has pointer with unsupported alu operation",
9949
- .errstr = "R0 tried to subtract pointer from scalar",
9950
- .result = REJECT,
9951
- },
9952
- {
9953
- "check deducing bounds from const, 2",
9954
- .insns = {
9955
- BPF_MOV64_IMM(BPF_REG_0, 1),
9956
- BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
9957
- BPF_EXIT_INSN(),
9958
- BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 1, 1),
9959
- BPF_EXIT_INSN(),
9960
- BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
9961
- BPF_EXIT_INSN(),
9962
- },
9963
- .errstr_unpriv = "R1 has pointer with unsupported alu operation",
9964
- .result_unpriv = REJECT,
9965
- .result = ACCEPT,
9966
- .retval = 1,
9967
- },
9968
- {
9969
- "check deducing bounds from const, 3",
9970
- .insns = {
9971
- BPF_MOV64_IMM(BPF_REG_0, 0),
9972
- BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
9973
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
9974
- BPF_EXIT_INSN(),
9975
- },
9976
- .errstr_unpriv = "R1 has pointer with unsupported alu operation",
9977
- .errstr = "R0 tried to subtract pointer from scalar",
9978
- .result = REJECT,
9979
- },
9980
- {
9981
- "check deducing bounds from const, 4",
9982
- .insns = {
9983
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9984
- BPF_MOV64_IMM(BPF_REG_0, 0),
9985
- BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 1),
9986
- BPF_EXIT_INSN(),
9987
- BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
9988
- BPF_EXIT_INSN(),
9989
- BPF_ALU64_REG(BPF_SUB, BPF_REG_6, BPF_REG_0),
9990
- BPF_EXIT_INSN(),
9991
- },
9992
- .errstr_unpriv = "R6 has pointer with unsupported alu operation",
9993
- .result_unpriv = REJECT,
9994
- .result = ACCEPT,
9995
- },
9996
- {
9997
- "check deducing bounds from const, 5",
9998
- .insns = {
9999
- BPF_MOV64_IMM(BPF_REG_0, 0),
10000
- BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
10001
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10002
- BPF_EXIT_INSN(),
10003
- },
10004
- .errstr_unpriv = "R1 has pointer with unsupported alu operation",
10005
- .errstr = "R0 tried to subtract pointer from scalar",
10006
- .result = REJECT,
10007
- },
10008
- {
10009
- "check deducing bounds from const, 6",
10010
- .insns = {
10011
- BPF_MOV64_IMM(BPF_REG_0, 0),
10012
- BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
10013
- BPF_EXIT_INSN(),
10014
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10015
- BPF_EXIT_INSN(),
10016
- },
10017
- .errstr_unpriv = "R1 has pointer with unsupported alu operation",
10018
- .errstr = "R0 tried to subtract pointer from scalar",
10019
- .result = REJECT,
10020
- },
10021
- {
10022
- "check deducing bounds from const, 7",
10023
- .insns = {
10024
- BPF_MOV64_IMM(BPF_REG_0, ~0),
10025
- BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
10026
- BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
10027
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10028
- offsetof(struct __sk_buff, mark)),
10029
- BPF_EXIT_INSN(),
10030
- },
10031
- .errstr_unpriv = "R1 has pointer with unsupported alu operation",
10032
- .errstr = "dereference of modified ctx ptr",
10033
- .result = REJECT,
10034
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10035
- },
10036
- {
10037
- "check deducing bounds from const, 8",
10038
- .insns = {
10039
- BPF_MOV64_IMM(BPF_REG_0, ~0),
10040
- BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
10041
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
10042
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10043
- offsetof(struct __sk_buff, mark)),
10044
- BPF_EXIT_INSN(),
10045
- },
10046
- .errstr_unpriv = "R1 has pointer with unsupported alu operation",
10047
- .errstr = "dereference of modified ctx ptr",
10048
- .result = REJECT,
10049
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10050
- },
10051
- {
10052
- "check deducing bounds from const, 9",
10053
- .insns = {
10054
- BPF_MOV64_IMM(BPF_REG_0, 0),
10055
- BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
10056
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10057
- BPF_EXIT_INSN(),
10058
- },
10059
- .errstr_unpriv = "R1 has pointer with unsupported alu operation",
10060
- .errstr = "R0 tried to subtract pointer from scalar",
10061
- .result = REJECT,
10062
- },
10063
- {
10064
- "check deducing bounds from const, 10",
10065
- .insns = {
10066
- BPF_MOV64_IMM(BPF_REG_0, 0),
10067
- BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
10068
- /* Marks reg as unknown. */
10069
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_0, 0),
10070
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10071
- BPF_EXIT_INSN(),
10072
- },
10073
- .errstr = "math between ctx pointer and register with unbounded min value is not allowed",
10074
- .result = REJECT,
10075
- },
10076
- {
10077
- "bpf_exit with invalid return code. test1",
10078
- .insns = {
10079
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
10080
- BPF_EXIT_INSN(),
10081
- },
10082
- .errstr = "R0 has value (0x0; 0xffffffff)",
10083
- .result = REJECT,
10084
- .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10085
- },
10086
- {
10087
- "bpf_exit with invalid return code. test2",
10088
- .insns = {
10089
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
10090
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
10091
- BPF_EXIT_INSN(),
10092
- },
10093
- .result = ACCEPT,
10094
- .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10095
- },
10096
- {
10097
- "bpf_exit with invalid return code. test3",
10098
- .insns = {
10099
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
10100
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 3),
10101
- BPF_EXIT_INSN(),
10102
- },
10103
- .errstr = "R0 has value (0x0; 0x3)",
10104
- .result = REJECT,
10105
- .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10106
- },
10107
- {
10108
- "bpf_exit with invalid return code. test4",
10109
- .insns = {
10110
- BPF_MOV64_IMM(BPF_REG_0, 1),
10111
- BPF_EXIT_INSN(),
10112
- },
10113
- .result = ACCEPT,
10114
- .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10115
- },
10116
- {
10117
- "bpf_exit with invalid return code. test5",
10118
- .insns = {
10119
- BPF_MOV64_IMM(BPF_REG_0, 2),
10120
- BPF_EXIT_INSN(),
10121
- },
10122
- .errstr = "R0 has value (0x2; 0x0)",
10123
- .result = REJECT,
10124
- .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10125
- },
10126
- {
10127
- "bpf_exit with invalid return code. test6",
10128
- .insns = {
10129
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
10130
- BPF_EXIT_INSN(),
10131
- },
10132
- .errstr = "R0 is not a known value (ctx)",
10133
- .result = REJECT,
10134
- .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10135
- },
10136
- {
10137
- "bpf_exit with invalid return code. test7",
10138
- .insns = {
10139
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
10140
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 4),
10141
- BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_2),
10142
- BPF_EXIT_INSN(),
10143
- },
10144
- .errstr = "R0 has unknown scalar value",
10145
- .result = REJECT,
10146
- .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10147
- },
10148
- {
10149
- "calls: basic sanity",
10150
- .insns = {
10151
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10152
- BPF_MOV64_IMM(BPF_REG_0, 1),
10153
- BPF_EXIT_INSN(),
10154
- BPF_MOV64_IMM(BPF_REG_0, 2),
10155
- BPF_EXIT_INSN(),
10156
- },
10157
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10158
- .result = ACCEPT,
10159
- },
10160
- {
10161
- "calls: not on unpriviledged",
10162
- .insns = {
10163
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10164
- BPF_MOV64_IMM(BPF_REG_0, 1),
10165
- BPF_EXIT_INSN(),
10166
- BPF_MOV64_IMM(BPF_REG_0, 2),
10167
- BPF_EXIT_INSN(),
10168
- },
10169
- .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
10170
- .result_unpriv = REJECT,
10171
- .result = ACCEPT,
10172
- .retval = 1,
10173
- },
10174
- {
10175
- "calls: div by 0 in subprog",
10176
- .insns = {
10177
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10178
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
10179
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10180
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
10181
- offsetof(struct __sk_buff, data_end)),
10182
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
10183
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
10184
- BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
10185
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
10186
- BPF_MOV64_IMM(BPF_REG_0, 1),
10187
- BPF_EXIT_INSN(),
10188
- BPF_MOV32_IMM(BPF_REG_2, 0),
10189
- BPF_MOV32_IMM(BPF_REG_3, 1),
10190
- BPF_ALU32_REG(BPF_DIV, BPF_REG_3, BPF_REG_2),
10191
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10192
- offsetof(struct __sk_buff, data)),
10193
- BPF_EXIT_INSN(),
10194
- },
10195
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10196
- .result = ACCEPT,
10197
- .retval = 1,
10198
- },
10199
- {
10200
- "calls: multiple ret types in subprog 1",
10201
- .insns = {
10202
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10203
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
10204
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10205
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
10206
- offsetof(struct __sk_buff, data_end)),
10207
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
10208
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
10209
- BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
10210
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
10211
- BPF_MOV64_IMM(BPF_REG_0, 1),
10212
- BPF_EXIT_INSN(),
10213
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10214
- offsetof(struct __sk_buff, data)),
10215
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
10216
- BPF_MOV32_IMM(BPF_REG_0, 42),
10217
- BPF_EXIT_INSN(),
10218
- },
10219
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10220
- .result = REJECT,
10221
- .errstr = "R0 invalid mem access 'inv'",
10222
- },
10223
- {
10224
- "calls: multiple ret types in subprog 2",
10225
- .insns = {
10226
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10227
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
10228
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10229
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
10230
- offsetof(struct __sk_buff, data_end)),
10231
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
10232
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
10233
- BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
10234
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
10235
- BPF_MOV64_IMM(BPF_REG_0, 1),
10236
- BPF_EXIT_INSN(),
10237
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10238
- offsetof(struct __sk_buff, data)),
10239
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10240
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 9),
10241
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10242
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10243
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10244
- BPF_LD_MAP_FD(BPF_REG_1, 0),
10245
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10246
- BPF_FUNC_map_lookup_elem),
10247
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
10248
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6,
10249
- offsetof(struct __sk_buff, data)),
10250
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
10251
- BPF_EXIT_INSN(),
10252
- },
10253
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10254
- .fixup_map1 = { 16 },
10255
- .result = REJECT,
10256
- .errstr = "R0 min value is outside of the array range",
10257
- },
10258
- {
10259
- "calls: overlapping caller/callee",
10260
- .insns = {
10261
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0),
10262
- BPF_MOV64_IMM(BPF_REG_0, 1),
10263
- BPF_EXIT_INSN(),
10264
- },
10265
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10266
- .errstr = "last insn is not an exit or jmp",
10267
- .result = REJECT,
10268
- },
10269
- {
10270
- "calls: wrong recursive calls",
10271
- .insns = {
10272
- BPF_JMP_IMM(BPF_JA, 0, 0, 4),
10273
- BPF_JMP_IMM(BPF_JA, 0, 0, 4),
10274
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
10275
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
10276
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
10277
- BPF_MOV64_IMM(BPF_REG_0, 1),
10278
- BPF_EXIT_INSN(),
10279
- },
10280
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10281
- .errstr = "jump out of range",
10282
- .result = REJECT,
10283
- },
10284
- {
10285
- "calls: wrong src reg",
10286
- .insns = {
10287
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 2, 0, 0),
10288
- BPF_MOV64_IMM(BPF_REG_0, 1),
10289
- BPF_EXIT_INSN(),
10290
- },
10291
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10292
- .errstr = "BPF_CALL uses reserved fields",
10293
- .result = REJECT,
10294
- },
10295
- {
10296
- "calls: wrong off value",
10297
- .insns = {
10298
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, -1, 2),
10299
- BPF_MOV64_IMM(BPF_REG_0, 1),
10300
- BPF_EXIT_INSN(),
10301
- BPF_MOV64_IMM(BPF_REG_0, 2),
10302
- BPF_EXIT_INSN(),
10303
- },
10304
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10305
- .errstr = "BPF_CALL uses reserved fields",
10306
- .result = REJECT,
10307
- },
10308
- {
10309
- "calls: jump back loop",
10310
- .insns = {
10311
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
10312
- BPF_MOV64_IMM(BPF_REG_0, 1),
10313
- BPF_EXIT_INSN(),
10314
- },
10315
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10316
- .errstr = "back-edge from insn 0 to 0",
10317
- .result = REJECT,
10318
- },
10319
- {
10320
- "calls: conditional call",
10321
- .insns = {
10322
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10323
- offsetof(struct __sk_buff, mark)),
10324
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
10325
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10326
- BPF_MOV64_IMM(BPF_REG_0, 1),
10327
- BPF_EXIT_INSN(),
10328
- BPF_MOV64_IMM(BPF_REG_0, 2),
10329
- BPF_EXIT_INSN(),
10330
- },
10331
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10332
- .errstr = "jump out of range",
10333
- .result = REJECT,
10334
- },
10335
- {
10336
- "calls: conditional call 2",
10337
- .insns = {
10338
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10339
- offsetof(struct __sk_buff, mark)),
10340
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
10341
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
10342
- BPF_MOV64_IMM(BPF_REG_0, 1),
10343
- BPF_EXIT_INSN(),
10344
- BPF_MOV64_IMM(BPF_REG_0, 2),
10345
- BPF_EXIT_INSN(),
10346
- BPF_MOV64_IMM(BPF_REG_0, 3),
10347
- BPF_EXIT_INSN(),
10348
- },
10349
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10350
- .result = ACCEPT,
10351
- },
10352
- {
10353
- "calls: conditional call 3",
10354
- .insns = {
10355
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10356
- offsetof(struct __sk_buff, mark)),
10357
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
10358
- BPF_JMP_IMM(BPF_JA, 0, 0, 4),
10359
- BPF_MOV64_IMM(BPF_REG_0, 1),
10360
- BPF_EXIT_INSN(),
10361
- BPF_MOV64_IMM(BPF_REG_0, 1),
10362
- BPF_JMP_IMM(BPF_JA, 0, 0, -6),
10363
- BPF_MOV64_IMM(BPF_REG_0, 3),
10364
- BPF_JMP_IMM(BPF_JA, 0, 0, -6),
10365
- },
10366
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10367
- .errstr = "back-edge from insn",
10368
- .result = REJECT,
10369
- },
10370
- {
10371
- "calls: conditional call 4",
10372
- .insns = {
10373
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10374
- offsetof(struct __sk_buff, mark)),
10375
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
10376
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
10377
- BPF_MOV64_IMM(BPF_REG_0, 1),
10378
- BPF_EXIT_INSN(),
10379
- BPF_MOV64_IMM(BPF_REG_0, 1),
10380
- BPF_JMP_IMM(BPF_JA, 0, 0, -5),
10381
- BPF_MOV64_IMM(BPF_REG_0, 3),
10382
- BPF_EXIT_INSN(),
10383
- },
10384
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10385
- .result = ACCEPT,
10386
- },
10387
- {
10388
- "calls: conditional call 5",
10389
- .insns = {
10390
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10391
- offsetof(struct __sk_buff, mark)),
10392
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
10393
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
10394
- BPF_MOV64_IMM(BPF_REG_0, 1),
10395
- BPF_EXIT_INSN(),
10396
- BPF_MOV64_IMM(BPF_REG_0, 1),
10397
- BPF_JMP_IMM(BPF_JA, 0, 0, -6),
10398
- BPF_MOV64_IMM(BPF_REG_0, 3),
10399
- BPF_EXIT_INSN(),
10400
- },
10401
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10402
- .errstr = "back-edge from insn",
10403
- .result = REJECT,
10404
- },
10405
- {
10406
- "calls: conditional call 6",
10407
- .insns = {
10408
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10409
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -2),
10410
- BPF_EXIT_INSN(),
10411
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10412
- offsetof(struct __sk_buff, mark)),
10413
- BPF_EXIT_INSN(),
10414
- },
10415
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10416
- .errstr = "back-edge from insn",
10417
- .result = REJECT,
10418
- },
10419
- {
10420
- "calls: using r0 returned by callee",
10421
- .insns = {
10422
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10423
- BPF_EXIT_INSN(),
10424
- BPF_MOV64_IMM(BPF_REG_0, 2),
10425
- BPF_EXIT_INSN(),
10426
- },
10427
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10428
- .result = ACCEPT,
10429
- },
10430
- {
10431
- "calls: using uninit r0 from callee",
10432
- .insns = {
10433
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10434
- BPF_EXIT_INSN(),
10435
- BPF_EXIT_INSN(),
10436
- },
10437
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10438
- .errstr = "!read_ok",
10439
- .result = REJECT,
10440
- },
10441
- {
10442
- "calls: callee is using r1",
10443
- .insns = {
10444
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10445
- BPF_EXIT_INSN(),
10446
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10447
- offsetof(struct __sk_buff, len)),
10448
- BPF_EXIT_INSN(),
10449
- },
10450
- .prog_type = BPF_PROG_TYPE_SCHED_ACT,
10451
- .result = ACCEPT,
10452
- .retval = TEST_DATA_LEN,
10453
- },
10454
- {
10455
- "calls: callee using args1",
10456
- .insns = {
10457
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10458
- BPF_EXIT_INSN(),
10459
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
10460
- BPF_EXIT_INSN(),
10461
- },
10462
- .errstr_unpriv = "allowed for root only",
10463
- .result_unpriv = REJECT,
10464
- .result = ACCEPT,
10465
- .retval = POINTER_VALUE,
10466
- },
10467
- {
10468
- "calls: callee using wrong args2",
10469
- .insns = {
10470
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10471
- BPF_EXIT_INSN(),
10472
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10473
- BPF_EXIT_INSN(),
10474
- },
10475
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10476
- .errstr = "R2 !read_ok",
10477
- .result = REJECT,
10478
- },
10479
- {
10480
- "calls: callee using two args",
10481
- .insns = {
10482
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10483
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
10484
- offsetof(struct __sk_buff, len)),
10485
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_6,
10486
- offsetof(struct __sk_buff, len)),
10487
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10488
- BPF_EXIT_INSN(),
10489
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
10490
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
10491
- BPF_EXIT_INSN(),
10492
- },
10493
- .errstr_unpriv = "allowed for root only",
10494
- .result_unpriv = REJECT,
10495
- .result = ACCEPT,
10496
- .retval = TEST_DATA_LEN + TEST_DATA_LEN - ETH_HLEN - ETH_HLEN,
10497
- },
10498
- {
10499
- "calls: callee changing pkt pointers",
10500
- .insns = {
10501
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
10502
- offsetof(struct xdp_md, data)),
10503
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
10504
- offsetof(struct xdp_md, data_end)),
10505
- BPF_MOV64_REG(BPF_REG_8, BPF_REG_6),
10506
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 8),
10507
- BPF_JMP_REG(BPF_JGT, BPF_REG_8, BPF_REG_7, 2),
10508
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10509
- /* clear_all_pkt_pointers() has to walk all frames
10510
- * to make sure that pkt pointers in the caller
10511
- * are cleared when callee is calling a helper that
10512
- * adjusts packet size
10513
- */
10514
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
10515
- BPF_MOV32_IMM(BPF_REG_0, 0),
10516
- BPF_EXIT_INSN(),
10517
- BPF_MOV64_IMM(BPF_REG_2, 0),
10518
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10519
- BPF_FUNC_xdp_adjust_head),
10520
- BPF_EXIT_INSN(),
10521
- },
10522
- .result = REJECT,
10523
- .errstr = "R6 invalid mem access 'inv'",
10524
- .prog_type = BPF_PROG_TYPE_XDP,
10525
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10526
- },
10527
- {
10528
- "calls: two calls with args",
10529
- .insns = {
10530
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10531
- BPF_EXIT_INSN(),
10532
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10533
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
10534
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
10535
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10536
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10537
- BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
10538
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
10539
- BPF_EXIT_INSN(),
10540
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10541
- offsetof(struct __sk_buff, len)),
10542
- BPF_EXIT_INSN(),
10543
- },
10544
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10545
- .result = ACCEPT,
10546
- .retval = TEST_DATA_LEN + TEST_DATA_LEN,
10547
- },
10548
- {
10549
- "calls: calls with stack arith",
10550
- .insns = {
10551
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10552
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
10553
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10554
- BPF_EXIT_INSN(),
10555
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
10556
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10557
- BPF_EXIT_INSN(),
10558
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
10559
- BPF_MOV64_IMM(BPF_REG_0, 42),
10560
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
10561
- BPF_EXIT_INSN(),
10562
- },
10563
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10564
- .result = ACCEPT,
10565
- .retval = 42,
10566
- },
10567
- {
10568
- "calls: calls with misaligned stack access",
10569
- .insns = {
10570
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10571
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
10572
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10573
- BPF_EXIT_INSN(),
10574
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -61),
10575
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10576
- BPF_EXIT_INSN(),
10577
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
10578
- BPF_MOV64_IMM(BPF_REG_0, 42),
10579
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
10580
- BPF_EXIT_INSN(),
10581
- },
10582
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10583
- .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
10584
- .errstr = "misaligned stack access",
10585
- .result = REJECT,
10586
- },
10587
- {
10588
- "calls: calls control flow, jump test",
10589
- .insns = {
10590
- BPF_MOV64_IMM(BPF_REG_0, 42),
10591
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
10592
- BPF_MOV64_IMM(BPF_REG_0, 43),
10593
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10594
- BPF_JMP_IMM(BPF_JA, 0, 0, -3),
10595
- BPF_EXIT_INSN(),
10596
- },
10597
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10598
- .result = ACCEPT,
10599
- .retval = 43,
10600
- },
10601
- {
10602
- "calls: calls control flow, jump test 2",
10603
- .insns = {
10604
- BPF_MOV64_IMM(BPF_REG_0, 42),
10605
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
10606
- BPF_MOV64_IMM(BPF_REG_0, 43),
10607
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10608
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
10609
- BPF_EXIT_INSN(),
10610
- },
10611
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10612
- .errstr = "jump out of range from insn 1 to 4",
10613
- .result = REJECT,
10614
- },
10615
- {
10616
- "calls: two calls with bad jump",
10617
- .insns = {
10618
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10619
- BPF_EXIT_INSN(),
10620
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10621
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
10622
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
10623
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10624
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10625
- BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
10626
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
10627
- BPF_EXIT_INSN(),
10628
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10629
- offsetof(struct __sk_buff, len)),
10630
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
10631
- BPF_EXIT_INSN(),
10632
- },
10633
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10634
- .errstr = "jump out of range from insn 11 to 9",
10635
- .result = REJECT,
10636
- },
10637
- {
10638
- "calls: recursive call. test1",
10639
- .insns = {
10640
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10641
- BPF_EXIT_INSN(),
10642
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
10643
- BPF_EXIT_INSN(),
10644
- },
10645
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10646
- .errstr = "back-edge",
10647
- .result = REJECT,
10648
- },
10649
- {
10650
- "calls: recursive call. test2",
10651
- .insns = {
10652
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10653
- BPF_EXIT_INSN(),
10654
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
10655
- BPF_EXIT_INSN(),
10656
- },
10657
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10658
- .errstr = "back-edge",
10659
- .result = REJECT,
10660
- },
10661
- {
10662
- "calls: unreachable code",
10663
- .insns = {
10664
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10665
- BPF_EXIT_INSN(),
10666
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10667
- BPF_EXIT_INSN(),
10668
- BPF_MOV64_IMM(BPF_REG_0, 0),
10669
- BPF_EXIT_INSN(),
10670
- BPF_MOV64_IMM(BPF_REG_0, 0),
10671
- BPF_EXIT_INSN(),
10672
- },
10673
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10674
- .errstr = "unreachable insn 6",
10675
- .result = REJECT,
10676
- },
10677
- {
10678
- "calls: invalid call",
10679
- .insns = {
10680
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10681
- BPF_EXIT_INSN(),
10682
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -4),
10683
- BPF_EXIT_INSN(),
10684
- },
10685
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10686
- .errstr = "invalid destination",
10687
- .result = REJECT,
10688
- },
10689
- {
10690
- "calls: invalid call 2",
10691
- .insns = {
10692
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10693
- BPF_EXIT_INSN(),
10694
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0x7fffffff),
10695
- BPF_EXIT_INSN(),
10696
- },
10697
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10698
- .errstr = "invalid destination",
10699
- .result = REJECT,
10700
- },
10701
- {
10702
- "calls: jumping across function bodies. test1",
10703
- .insns = {
10704
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10705
- BPF_MOV64_IMM(BPF_REG_0, 0),
10706
- BPF_EXIT_INSN(),
10707
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
10708
- BPF_EXIT_INSN(),
10709
- },
10710
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10711
- .errstr = "jump out of range",
10712
- .result = REJECT,
10713
- },
10714
- {
10715
- "calls: jumping across function bodies. test2",
10716
- .insns = {
10717
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
10718
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10719
- BPF_MOV64_IMM(BPF_REG_0, 0),
10720
- BPF_EXIT_INSN(),
10721
- BPF_EXIT_INSN(),
10722
- },
10723
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10724
- .errstr = "jump out of range",
10725
- .result = REJECT,
10726
- },
10727
- {
10728
- "calls: call without exit",
10729
- .insns = {
10730
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10731
- BPF_EXIT_INSN(),
10732
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10733
- BPF_EXIT_INSN(),
10734
- BPF_MOV64_IMM(BPF_REG_0, 0),
10735
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -2),
10736
- },
10737
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10738
- .errstr = "not an exit",
10739
- .result = REJECT,
10740
- },
10741
- {
10742
- "calls: call into middle of ld_imm64",
10743
- .insns = {
10744
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10745
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10746
- BPF_MOV64_IMM(BPF_REG_0, 0),
10747
- BPF_EXIT_INSN(),
10748
- BPF_LD_IMM64(BPF_REG_0, 0),
10749
- BPF_EXIT_INSN(),
10750
- },
10751
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10752
- .errstr = "last insn",
10753
- .result = REJECT,
10754
- },
10755
- {
10756
- "calls: call into middle of other call",
10757
- .insns = {
10758
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10759
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10760
- BPF_MOV64_IMM(BPF_REG_0, 0),
10761
- BPF_EXIT_INSN(),
10762
- BPF_MOV64_IMM(BPF_REG_0, 0),
10763
- BPF_MOV64_IMM(BPF_REG_0, 0),
10764
- BPF_EXIT_INSN(),
10765
- },
10766
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10767
- .errstr = "last insn",
10768
- .result = REJECT,
10769
- },
10770
- {
10771
- "calls: ld_abs with changing ctx data in callee",
10772
- .insns = {
10773
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10774
- BPF_LD_ABS(BPF_B, 0),
10775
- BPF_LD_ABS(BPF_H, 0),
10776
- BPF_LD_ABS(BPF_W, 0),
10777
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
10778
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
10779
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
10780
- BPF_LD_ABS(BPF_B, 0),
10781
- BPF_LD_ABS(BPF_H, 0),
10782
- BPF_LD_ABS(BPF_W, 0),
10783
- BPF_EXIT_INSN(),
10784
- BPF_MOV64_IMM(BPF_REG_2, 1),
10785
- BPF_MOV64_IMM(BPF_REG_3, 2),
10786
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10787
- BPF_FUNC_skb_vlan_push),
10788
- BPF_EXIT_INSN(),
10789
- },
10790
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10791
- .errstr = "BPF_LD_[ABS|IND] instructions cannot be mixed",
10792
- .result = REJECT,
10793
- },
10794
- {
10795
- "calls: two calls with bad fallthrough",
10796
- .insns = {
10797
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10798
- BPF_EXIT_INSN(),
10799
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10800
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
10801
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
10802
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10803
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10804
- BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
10805
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
10806
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_0),
10807
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10808
- offsetof(struct __sk_buff, len)),
10809
- BPF_EXIT_INSN(),
10810
- },
10811
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10812
- .errstr = "not an exit",
10813
- .result = REJECT,
10814
- },
10815
- {
10816
- "calls: two calls with stack read",
10817
- .insns = {
10818
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10819
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10820
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10821
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10822
- BPF_EXIT_INSN(),
10823
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10824
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
10825
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
10826
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10827
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10828
- BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
10829
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
10830
- BPF_EXIT_INSN(),
10831
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
10832
- BPF_EXIT_INSN(),
10833
- },
10834
- .prog_type = BPF_PROG_TYPE_XDP,
10835
- .result = ACCEPT,
10836
- },
10837
- {
10838
- "calls: two calls with stack write",
10839
- .insns = {
10840
- /* main prog */
10841
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10842
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10843
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10844
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10845
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
10846
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10847
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
10848
- BPF_EXIT_INSN(),
251
+ while (i < MAX_TEST_INSNS - MAX_JMP_SEQ * 4)
252
+ insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 42);
253
+ insn[i] = BPF_EXIT_INSN();
254
+ self->prog_len = i + 1;
255
+ self->retval = 42;
256
+}
10849257
10850
- /* subprog 1 */
10851
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10852
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
10853
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 7),
10854
- BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
10855
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10856
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
10857
- BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
10858
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
10859
- /* write into stack frame of main prog */
10860
- BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
10861
- BPF_EXIT_INSN(),
258
+/* test the sequence of 8k jumps in inner most function (function depth 8)*/
259
+static void bpf_fill_scale2(struct bpf_test *self)
260
+{
261
+ struct bpf_insn *insn = self->fill_insns;
262
+ int i = 0, k = 0;
10862263
10863
- /* subprog 2 */
10864
- /* read from stack frame of main prog */
10865
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
10866
- BPF_EXIT_INSN(),
10867
- },
10868
- .prog_type = BPF_PROG_TYPE_XDP,
10869
- .result = ACCEPT,
10870
- },
10871
- {
10872
- "calls: stack overflow using two frames (pre-call access)",
10873
- .insns = {
10874
- /* prog 1 */
10875
- BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
10876
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1),
10877
- BPF_EXIT_INSN(),
264
+#define FUNC_NEST 7
265
+ for (k = 0; k < FUNC_NEST; k++) {
266
+ insn[i++] = BPF_CALL_REL(1);
267
+ insn[i++] = BPF_EXIT_INSN();
268
+ }
269
+ insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
270
+ /* test to check that the long sequence of jumps is acceptable */
271
+ k = 0;
272
+ while (k++ < MAX_JMP_SEQ) {
273
+ insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
274
+ BPF_FUNC_get_prandom_u32);
275
+ insn[i++] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, bpf_semi_rand_get(), 2);
276
+ insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_10);
277
+ insn[i++] = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6,
278
+ -8 * (k % (64 - 4 * FUNC_NEST) + 1));
279
+ }
280
+ while (i < MAX_TEST_INSNS - MAX_JMP_SEQ * 4)
281
+ insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 42);
282
+ insn[i] = BPF_EXIT_INSN();
283
+ self->prog_len = i + 1;
284
+ self->retval = 42;
285
+}
10878286
10879
- /* prog 2 */
10880
- BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
10881
- BPF_MOV64_IMM(BPF_REG_0, 0),
10882
- BPF_EXIT_INSN(),
10883
- },
10884
- .prog_type = BPF_PROG_TYPE_XDP,
10885
- .errstr = "combined stack size",
10886
- .result = REJECT,
10887
- },
10888
- {
10889
- "calls: stack overflow using two frames (post-call access)",
10890
- .insns = {
10891
- /* prog 1 */
10892
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2),
10893
- BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
10894
- BPF_EXIT_INSN(),
287
+static void bpf_fill_scale(struct bpf_test *self)
288
+{
289
+ switch (self->retval) {
290
+ case 1:
291
+ return bpf_fill_scale1(self);
292
+ case 2:
293
+ return bpf_fill_scale2(self);
294
+ default:
295
+ self->prog_len = 0;
296
+ break;
297
+ }
298
+}
10895299
10896
- /* prog 2 */
10897
- BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
10898
- BPF_MOV64_IMM(BPF_REG_0, 0),
10899
- BPF_EXIT_INSN(),
10900
- },
10901
- .prog_type = BPF_PROG_TYPE_XDP,
10902
- .errstr = "combined stack size",
10903
- .result = REJECT,
10904
- },
10905
- {
10906
- "calls: stack depth check using three frames. test1",
10907
- .insns = {
10908
- /* main */
10909
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
10910
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
10911
- BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
10912
- BPF_MOV64_IMM(BPF_REG_0, 0),
10913
- BPF_EXIT_INSN(),
10914
- /* A */
10915
- BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
10916
- BPF_EXIT_INSN(),
10917
- /* B */
10918
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
10919
- BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
10920
- BPF_EXIT_INSN(),
10921
- },
10922
- .prog_type = BPF_PROG_TYPE_XDP,
10923
- /* stack_main=32, stack_A=256, stack_B=64
10924
- * and max(main+A, main+A+B) < 512
10925
- */
10926
- .result = ACCEPT,
10927
- },
10928
- {
10929
- "calls: stack depth check using three frames. test2",
10930
- .insns = {
10931
- /* main */
10932
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
10933
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
10934
- BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
10935
- BPF_MOV64_IMM(BPF_REG_0, 0),
10936
- BPF_EXIT_INSN(),
10937
- /* A */
10938
- BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
10939
- BPF_EXIT_INSN(),
10940
- /* B */
10941
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
10942
- BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
10943
- BPF_EXIT_INSN(),
10944
- },
10945
- .prog_type = BPF_PROG_TYPE_XDP,
10946
- /* stack_main=32, stack_A=64, stack_B=256
10947
- * and max(main+A, main+A+B) < 512
10948
- */
10949
- .result = ACCEPT,
10950
- },
10951
- {
10952
- "calls: stack depth check using three frames. test3",
10953
- .insns = {
10954
- /* main */
10955
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10956
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
10957
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10958
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 8), /* call B */
10959
- BPF_JMP_IMM(BPF_JGE, BPF_REG_6, 0, 1),
10960
- BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
10961
- BPF_MOV64_IMM(BPF_REG_0, 0),
10962
- BPF_EXIT_INSN(),
10963
- /* A */
10964
- BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 10, 1),
10965
- BPF_EXIT_INSN(),
10966
- BPF_ST_MEM(BPF_B, BPF_REG_10, -224, 0),
10967
- BPF_JMP_IMM(BPF_JA, 0, 0, -3),
10968
- /* B */
10969
- BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 1),
10970
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -6), /* call A */
10971
- BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
10972
- BPF_EXIT_INSN(),
10973
- },
10974
- .prog_type = BPF_PROG_TYPE_XDP,
10975
- /* stack_main=64, stack_A=224, stack_B=256
10976
- * and max(main+A, main+A+B) > 512
10977
- */
10978
- .errstr = "combined stack",
10979
- .result = REJECT,
10980
- },
10981
- {
10982
- "calls: stack depth check using three frames. test4",
10983
- /* void main(void) {
10984
- * func1(0);
10985
- * func1(1);
10986
- * func2(1);
10987
- * }
10988
- * void func1(int alloc_or_recurse) {
10989
- * if (alloc_or_recurse) {
10990
- * frame_pointer[-300] = 1;
10991
- * } else {
10992
- * func2(alloc_or_recurse);
10993
- * }
10994
- * }
10995
- * void func2(int alloc_or_recurse) {
10996
- * if (alloc_or_recurse) {
10997
- * frame_pointer[-300] = 1;
10998
- * }
10999
- * }
11000
- */
11001
- .insns = {
11002
- /* main */
11003
- BPF_MOV64_IMM(BPF_REG_1, 0),
11004
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
11005
- BPF_MOV64_IMM(BPF_REG_1, 1),
11006
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
11007
- BPF_MOV64_IMM(BPF_REG_1, 1),
11008
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 7), /* call B */
11009
- BPF_MOV64_IMM(BPF_REG_0, 0),
11010
- BPF_EXIT_INSN(),
11011
- /* A */
11012
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
11013
- BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
11014
- BPF_EXIT_INSN(),
11015
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
11016
- BPF_EXIT_INSN(),
11017
- /* B */
11018
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
11019
- BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
11020
- BPF_EXIT_INSN(),
11021
- },
11022
- .prog_type = BPF_PROG_TYPE_XDP,
11023
- .result = REJECT,
11024
- .errstr = "combined stack",
11025
- },
11026
- {
11027
- "calls: stack depth check using three frames. test5",
11028
- .insns = {
11029
- /* main */
11030
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
11031
- BPF_EXIT_INSN(),
11032
- /* A */
11033
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
11034
- BPF_EXIT_INSN(),
11035
- /* B */
11036
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
11037
- BPF_EXIT_INSN(),
11038
- /* C */
11039
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
11040
- BPF_EXIT_INSN(),
11041
- /* D */
11042
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
11043
- BPF_EXIT_INSN(),
11044
- /* E */
11045
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
11046
- BPF_EXIT_INSN(),
11047
- /* F */
11048
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
11049
- BPF_EXIT_INSN(),
11050
- /* G */
11051
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
11052
- BPF_EXIT_INSN(),
11053
- /* H */
11054
- BPF_MOV64_IMM(BPF_REG_0, 0),
11055
- BPF_EXIT_INSN(),
11056
- },
11057
- .prog_type = BPF_PROG_TYPE_XDP,
11058
- .errstr = "call stack",
11059
- .result = REJECT,
11060
- },
11061
- {
11062
- "calls: spill into caller stack frame",
11063
- .insns = {
11064
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11065
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11066
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11067
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11068
- BPF_EXIT_INSN(),
11069
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
11070
- BPF_MOV64_IMM(BPF_REG_0, 0),
11071
- BPF_EXIT_INSN(),
11072
- },
11073
- .prog_type = BPF_PROG_TYPE_XDP,
11074
- .errstr = "cannot spill",
11075
- .result = REJECT,
11076
- },
11077
- {
11078
- "calls: write into caller stack frame",
11079
- .insns = {
11080
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11081
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11082
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11083
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11084
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
11085
- BPF_EXIT_INSN(),
11086
- BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
11087
- BPF_MOV64_IMM(BPF_REG_0, 0),
11088
- BPF_EXIT_INSN(),
11089
- },
11090
- .prog_type = BPF_PROG_TYPE_XDP,
11091
- .result = ACCEPT,
11092
- .retval = 42,
11093
- },
11094
- {
11095
- "calls: write into callee stack frame",
11096
- .insns = {
11097
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11098
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
11099
- BPF_EXIT_INSN(),
11100
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
11101
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -8),
11102
- BPF_EXIT_INSN(),
11103
- },
11104
- .prog_type = BPF_PROG_TYPE_XDP,
11105
- .errstr = "cannot return stack pointer",
11106
- .result = REJECT,
11107
- },
11108
- {
11109
- "calls: two calls with stack write and void return",
11110
- .insns = {
11111
- /* main prog */
11112
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11113
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11114
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11115
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11116
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11117
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11118
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
11119
- BPF_EXIT_INSN(),
300
+/* BPF_SK_LOOKUP contains 13 instructions, if you need to fix up maps */
301
+#define BPF_SK_LOOKUP(func) \
302
+ /* struct bpf_sock_tuple tuple = {} */ \
303
+ BPF_MOV64_IMM(BPF_REG_2, 0), \
304
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -8), \
305
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -16), \
306
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -24), \
307
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -32), \
308
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -40), \
309
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -48), \
310
+ /* sk = func(ctx, &tuple, sizeof tuple, 0, 0) */ \
311
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), \
312
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48), \
313
+ BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)), \
314
+ BPF_MOV64_IMM(BPF_REG_4, 0), \
315
+ BPF_MOV64_IMM(BPF_REG_5, 0), \
316
+ BPF_EMIT_CALL(BPF_FUNC_ ## func)
11120317
11121
- /* subprog 1 */
11122
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11123
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11124
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11125
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
11126
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11127
- BPF_EXIT_INSN(),
318
+/* BPF_DIRECT_PKT_R2 contains 7 instructions, it initializes default return
319
+ * value into 0 and does necessary preparation for direct packet access
320
+ * through r2. The allowed access range is 8 bytes.
321
+ */
322
+#define BPF_DIRECT_PKT_R2 \
323
+ BPF_MOV64_IMM(BPF_REG_0, 0), \
324
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, \
325
+ offsetof(struct __sk_buff, data)), \
326
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, \
327
+ offsetof(struct __sk_buff, data_end)), \
328
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), \
329
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8), \
330
+ BPF_JMP_REG(BPF_JLE, BPF_REG_4, BPF_REG_3, 1), \
331
+ BPF_EXIT_INSN()
11128332
11129
- /* subprog 2 */
11130
- /* write into stack frame of main prog */
11131
- BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
11132
- BPF_EXIT_INSN(), /* void return */
11133
- },
11134
- .prog_type = BPF_PROG_TYPE_XDP,
11135
- .result = ACCEPT,
11136
- },
11137
- {
11138
- "calls: ambiguous return value",
11139
- .insns = {
11140
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11141
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
11142
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
11143
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11144
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11145
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
11146
- BPF_EXIT_INSN(),
11147
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
11148
- BPF_MOV64_IMM(BPF_REG_0, 0),
11149
- BPF_EXIT_INSN(),
11150
- },
11151
- .errstr_unpriv = "allowed for root only",
11152
- .result_unpriv = REJECT,
11153
- .errstr = "R0 !read_ok",
11154
- .result = REJECT,
11155
- },
11156
- {
11157
- "calls: two calls that return map_value",
11158
- .insns = {
11159
- /* main prog */
11160
- /* pass fp-16, fp-8 into a function */
11161
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11162
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11163
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11164
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11165
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
333
+/* BPF_RAND_UEXT_R7 contains 4 instructions, it initializes R7 into a random
334
+ * positive u32, and zero-extend it into 64-bit.
335
+ */
336
+#define BPF_RAND_UEXT_R7 \
337
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, \
338
+ BPF_FUNC_get_prandom_u32), \
339
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), \
340
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 33), \
341
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 33)
11166342
11167
- /* fetch map_value_ptr from the stack of this function */
11168
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
11169
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
11170
- /* write into map value */
11171
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11172
- /* fetch secound map_value_ptr from the stack */
11173
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
11174
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
11175
- /* write into map value */
11176
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11177
- BPF_MOV64_IMM(BPF_REG_0, 0),
11178
- BPF_EXIT_INSN(),
343
+/* BPF_RAND_SEXT_R7 contains 5 instructions, it initializes R7 into a random
344
+ * negative u32, and sign-extend it into 64-bit.
345
+ */
346
+#define BPF_RAND_SEXT_R7 \
347
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, \
348
+ BPF_FUNC_get_prandom_u32), \
349
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), \
350
+ BPF_ALU64_IMM(BPF_OR, BPF_REG_7, 0x80000000), \
351
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 32), \
352
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_7, 32)
11179353
11180
- /* subprog 1 */
11181
- /* call 3rd function twice */
11182
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11183
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11184
- /* first time with fp-8 */
11185
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11186
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
11187
- /* second time with fp-16 */
11188
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11189
- BPF_EXIT_INSN(),
11190
-
11191
- /* subprog 2 */
11192
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11193
- /* lookup from map */
11194
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11195
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11196
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11197
- BPF_LD_MAP_FD(BPF_REG_1, 0),
11198
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11199
- BPF_FUNC_map_lookup_elem),
11200
- /* write map_value_ptr into stack frame of main prog */
11201
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11202
- BPF_MOV64_IMM(BPF_REG_0, 0),
11203
- BPF_EXIT_INSN(), /* return 0 */
11204
- },
11205
- .prog_type = BPF_PROG_TYPE_XDP,
11206
- .fixup_map1 = { 23 },
11207
- .result = ACCEPT,
11208
- },
11209
- {
11210
- "calls: two calls that return map_value with bool condition",
11211
- .insns = {
11212
- /* main prog */
11213
- /* pass fp-16, fp-8 into a function */
11214
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11215
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11216
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11217
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11218
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11219
- BPF_MOV64_IMM(BPF_REG_0, 0),
11220
- BPF_EXIT_INSN(),
11221
-
11222
- /* subprog 1 */
11223
- /* call 3rd function twice */
11224
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11225
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11226
- /* first time with fp-8 */
11227
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
11228
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
11229
- /* fetch map_value_ptr from the stack of this function */
11230
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
11231
- /* write into map value */
11232
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11233
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
11234
- /* second time with fp-16 */
11235
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11236
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
11237
- /* fetch secound map_value_ptr from the stack */
11238
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
11239
- /* write into map value */
11240
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11241
- BPF_EXIT_INSN(),
11242
-
11243
- /* subprog 2 */
11244
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11245
- /* lookup from map */
11246
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11247
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11248
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11249
- BPF_LD_MAP_FD(BPF_REG_1, 0),
11250
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11251
- BPF_FUNC_map_lookup_elem),
11252
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11253
- BPF_MOV64_IMM(BPF_REG_0, 0),
11254
- BPF_EXIT_INSN(), /* return 0 */
11255
- /* write map_value_ptr into stack frame of main prog */
11256
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11257
- BPF_MOV64_IMM(BPF_REG_0, 1),
11258
- BPF_EXIT_INSN(), /* return 1 */
11259
- },
11260
- .prog_type = BPF_PROG_TYPE_XDP,
11261
- .fixup_map1 = { 23 },
11262
- .result = ACCEPT,
11263
- },
11264
- {
11265
- "calls: two calls that return map_value with incorrect bool check",
11266
- .insns = {
11267
- /* main prog */
11268
- /* pass fp-16, fp-8 into a function */
11269
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11270
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11271
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11272
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11273
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11274
- BPF_MOV64_IMM(BPF_REG_0, 0),
11275
- BPF_EXIT_INSN(),
11276
-
11277
- /* subprog 1 */
11278
- /* call 3rd function twice */
11279
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11280
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11281
- /* first time with fp-8 */
11282
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
11283
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
11284
- /* fetch map_value_ptr from the stack of this function */
11285
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
11286
- /* write into map value */
11287
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11288
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
11289
- /* second time with fp-16 */
11290
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11291
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11292
- /* fetch secound map_value_ptr from the stack */
11293
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
11294
- /* write into map value */
11295
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11296
- BPF_EXIT_INSN(),
11297
-
11298
- /* subprog 2 */
11299
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11300
- /* lookup from map */
11301
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11302
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11303
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11304
- BPF_LD_MAP_FD(BPF_REG_1, 0),
11305
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11306
- BPF_FUNC_map_lookup_elem),
11307
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11308
- BPF_MOV64_IMM(BPF_REG_0, 0),
11309
- BPF_EXIT_INSN(), /* return 0 */
11310
- /* write map_value_ptr into stack frame of main prog */
11311
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11312
- BPF_MOV64_IMM(BPF_REG_0, 1),
11313
- BPF_EXIT_INSN(), /* return 1 */
11314
- },
11315
- .prog_type = BPF_PROG_TYPE_XDP,
11316
- .fixup_map1 = { 23 },
11317
- .result = REJECT,
11318
- .errstr = "invalid read from stack off -16+0 size 8",
11319
- },
11320
- {
11321
- "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1",
11322
- .insns = {
11323
- /* main prog */
11324
- /* pass fp-16, fp-8 into a function */
11325
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11326
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11327
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11328
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11329
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11330
- BPF_MOV64_IMM(BPF_REG_0, 0),
11331
- BPF_EXIT_INSN(),
11332
-
11333
- /* subprog 1 */
11334
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11335
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11336
- /* 1st lookup from map */
11337
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11338
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11339
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11340
- BPF_LD_MAP_FD(BPF_REG_1, 0),
11341
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11342
- BPF_FUNC_map_lookup_elem),
11343
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11344
- BPF_MOV64_IMM(BPF_REG_8, 0),
11345
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
11346
- /* write map_value_ptr into stack frame of main prog at fp-8 */
11347
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11348
- BPF_MOV64_IMM(BPF_REG_8, 1),
11349
-
11350
- /* 2nd lookup from map */
11351
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
11352
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11353
- BPF_LD_MAP_FD(BPF_REG_1, 0),
11354
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
11355
- BPF_FUNC_map_lookup_elem),
11356
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11357
- BPF_MOV64_IMM(BPF_REG_9, 0),
11358
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
11359
- /* write map_value_ptr into stack frame of main prog at fp-16 */
11360
- BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
11361
- BPF_MOV64_IMM(BPF_REG_9, 1),
11362
-
11363
- /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
11364
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
11365
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
11366
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
11367
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
11368
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
11369
- BPF_EXIT_INSN(),
11370
-
11371
- /* subprog 2 */
11372
- /* if arg2 == 1 do *arg1 = 0 */
11373
- BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
11374
- /* fetch map_value_ptr from the stack of this function */
11375
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
11376
- /* write into map value */
11377
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11378
-
11379
- /* if arg4 == 1 do *arg3 = 0 */
11380
- BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
11381
- /* fetch map_value_ptr from the stack of this function */
11382
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
11383
- /* write into map value */
11384
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
11385
- BPF_EXIT_INSN(),
11386
- },
11387
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11388
- .fixup_map1 = { 12, 22 },
11389
- .result = REJECT,
11390
- .errstr = "invalid access to map value, value_size=8 off=2 size=8",
11391
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11392
- },
11393
- {
11394
- "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2",
11395
- .insns = {
11396
- /* main prog */
11397
- /* pass fp-16, fp-8 into a function */
11398
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11399
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11400
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11401
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11402
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11403
- BPF_MOV64_IMM(BPF_REG_0, 0),
11404
- BPF_EXIT_INSN(),
11405
-
11406
- /* subprog 1 */
11407
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11408
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11409
- /* 1st lookup from map */
11410
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11411
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11412
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11413
- BPF_LD_MAP_FD(BPF_REG_1, 0),
11414
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11415
- BPF_FUNC_map_lookup_elem),
11416
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11417
- BPF_MOV64_IMM(BPF_REG_8, 0),
11418
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
11419
- /* write map_value_ptr into stack frame of main prog at fp-8 */
11420
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11421
- BPF_MOV64_IMM(BPF_REG_8, 1),
11422
-
11423
- /* 2nd lookup from map */
11424
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
11425
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11426
- BPF_LD_MAP_FD(BPF_REG_1, 0),
11427
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
11428
- BPF_FUNC_map_lookup_elem),
11429
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11430
- BPF_MOV64_IMM(BPF_REG_9, 0),
11431
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
11432
- /* write map_value_ptr into stack frame of main prog at fp-16 */
11433
- BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
11434
- BPF_MOV64_IMM(BPF_REG_9, 1),
11435
-
11436
- /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
11437
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
11438
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
11439
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
11440
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
11441
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
11442
- BPF_EXIT_INSN(),
11443
-
11444
- /* subprog 2 */
11445
- /* if arg2 == 1 do *arg1 = 0 */
11446
- BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
11447
- /* fetch map_value_ptr from the stack of this function */
11448
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
11449
- /* write into map value */
11450
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11451
-
11452
- /* if arg4 == 1 do *arg3 = 0 */
11453
- BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
11454
- /* fetch map_value_ptr from the stack of this function */
11455
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
11456
- /* write into map value */
11457
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11458
- BPF_EXIT_INSN(),
11459
- },
11460
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11461
- .fixup_map1 = { 12, 22 },
11462
- .result = ACCEPT,
11463
- },
11464
- {
11465
- "calls: two jumps that receive map_value via arg=ptr_stack_of_jumper. test3",
11466
- .insns = {
11467
- /* main prog */
11468
- /* pass fp-16, fp-8 into a function */
11469
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11470
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11471
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11472
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11473
- BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
11474
- BPF_MOV64_IMM(BPF_REG_0, 0),
11475
- BPF_EXIT_INSN(),
11476
-
11477
- /* subprog 1 */
11478
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11479
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11480
- /* 1st lookup from map */
11481
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0),
11482
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11483
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
11484
- BPF_LD_MAP_FD(BPF_REG_1, 0),
11485
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11486
- BPF_FUNC_map_lookup_elem),
11487
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11488
- BPF_MOV64_IMM(BPF_REG_8, 0),
11489
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
11490
- /* write map_value_ptr into stack frame of main prog at fp-8 */
11491
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11492
- BPF_MOV64_IMM(BPF_REG_8, 1),
11493
-
11494
- /* 2nd lookup from map */
11495
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11496
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
11497
- BPF_LD_MAP_FD(BPF_REG_1, 0),
11498
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11499
- BPF_FUNC_map_lookup_elem),
11500
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11501
- BPF_MOV64_IMM(BPF_REG_9, 0), // 26
11502
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
11503
- /* write map_value_ptr into stack frame of main prog at fp-16 */
11504
- BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
11505
- BPF_MOV64_IMM(BPF_REG_9, 1),
11506
-
11507
- /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
11508
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), // 30
11509
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
11510
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
11511
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
11512
- BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), // 34
11513
- BPF_JMP_IMM(BPF_JA, 0, 0, -30),
11514
-
11515
- /* subprog 2 */
11516
- /* if arg2 == 1 do *arg1 = 0 */
11517
- BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
11518
- /* fetch map_value_ptr from the stack of this function */
11519
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
11520
- /* write into map value */
11521
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11522
-
11523
- /* if arg4 == 1 do *arg3 = 0 */
11524
- BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
11525
- /* fetch map_value_ptr from the stack of this function */
11526
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
11527
- /* write into map value */
11528
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
11529
- BPF_JMP_IMM(BPF_JA, 0, 0, -8),
11530
- },
11531
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11532
- .fixup_map1 = { 12, 22 },
11533
- .result = REJECT,
11534
- .errstr = "invalid access to map value, value_size=8 off=2 size=8",
11535
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11536
- },
11537
- {
11538
- "calls: two calls that receive map_value_ptr_or_null via arg. test1",
11539
- .insns = {
11540
- /* main prog */
11541
- /* pass fp-16, fp-8 into a function */
11542
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11543
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11544
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11545
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11546
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11547
- BPF_MOV64_IMM(BPF_REG_0, 0),
11548
- BPF_EXIT_INSN(),
11549
-
11550
- /* subprog 1 */
11551
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11552
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11553
- /* 1st lookup from map */
11554
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11555
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11556
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11557
- BPF_LD_MAP_FD(BPF_REG_1, 0),
11558
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11559
- BPF_FUNC_map_lookup_elem),
11560
- /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
11561
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11562
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11563
- BPF_MOV64_IMM(BPF_REG_8, 0),
11564
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11565
- BPF_MOV64_IMM(BPF_REG_8, 1),
11566
-
11567
- /* 2nd lookup from map */
11568
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11569
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11570
- BPF_LD_MAP_FD(BPF_REG_1, 0),
11571
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11572
- BPF_FUNC_map_lookup_elem),
11573
- /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
11574
- BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
11575
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11576
- BPF_MOV64_IMM(BPF_REG_9, 0),
11577
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11578
- BPF_MOV64_IMM(BPF_REG_9, 1),
11579
-
11580
- /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
11581
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11582
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
11583
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
11584
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
11585
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11586
- BPF_EXIT_INSN(),
11587
-
11588
- /* subprog 2 */
11589
- /* if arg2 == 1 do *arg1 = 0 */
11590
- BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
11591
- /* fetch map_value_ptr from the stack of this function */
11592
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
11593
- /* write into map value */
11594
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11595
-
11596
- /* if arg4 == 1 do *arg3 = 0 */
11597
- BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
11598
- /* fetch map_value_ptr from the stack of this function */
11599
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
11600
- /* write into map value */
11601
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11602
- BPF_EXIT_INSN(),
11603
- },
11604
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11605
- .fixup_map1 = { 12, 22 },
11606
- .result = ACCEPT,
11607
- },
11608
- {
11609
- "calls: two calls that receive map_value_ptr_or_null via arg. test2",
11610
- .insns = {
11611
- /* main prog */
11612
- /* pass fp-16, fp-8 into a function */
11613
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11614
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11615
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11616
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11617
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11618
- BPF_MOV64_IMM(BPF_REG_0, 0),
11619
- BPF_EXIT_INSN(),
11620
-
11621
- /* subprog 1 */
11622
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11623
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11624
- /* 1st lookup from map */
11625
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11626
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11627
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11628
- BPF_LD_MAP_FD(BPF_REG_1, 0),
11629
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11630
- BPF_FUNC_map_lookup_elem),
11631
- /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
11632
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11633
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11634
- BPF_MOV64_IMM(BPF_REG_8, 0),
11635
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11636
- BPF_MOV64_IMM(BPF_REG_8, 1),
11637
-
11638
- /* 2nd lookup from map */
11639
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11640
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11641
- BPF_LD_MAP_FD(BPF_REG_1, 0),
11642
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11643
- BPF_FUNC_map_lookup_elem),
11644
- /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
11645
- BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
11646
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11647
- BPF_MOV64_IMM(BPF_REG_9, 0),
11648
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11649
- BPF_MOV64_IMM(BPF_REG_9, 1),
11650
-
11651
- /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
11652
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11653
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
11654
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
11655
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
11656
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11657
- BPF_EXIT_INSN(),
11658
-
11659
- /* subprog 2 */
11660
- /* if arg2 == 1 do *arg1 = 0 */
11661
- BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
11662
- /* fetch map_value_ptr from the stack of this function */
11663
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
11664
- /* write into map value */
11665
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11666
-
11667
- /* if arg4 == 0 do *arg3 = 0 */
11668
- BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 0, 2),
11669
- /* fetch map_value_ptr from the stack of this function */
11670
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
11671
- /* write into map value */
11672
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11673
- BPF_EXIT_INSN(),
11674
- },
11675
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11676
- .fixup_map1 = { 12, 22 },
11677
- .result = REJECT,
11678
- .errstr = "R0 invalid mem access 'inv'",
11679
- },
11680
- {
11681
- "calls: pkt_ptr spill into caller stack",
11682
- .insns = {
11683
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
11684
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
11685
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11686
- BPF_EXIT_INSN(),
11687
-
11688
- /* subprog 1 */
11689
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11690
- offsetof(struct __sk_buff, data)),
11691
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11692
- offsetof(struct __sk_buff, data_end)),
11693
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11694
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11695
- /* spill unchecked pkt_ptr into stack of caller */
11696
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11697
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
11698
- /* now the pkt range is verified, read pkt_ptr from stack */
11699
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
11700
- /* write 4 bytes into packet */
11701
- BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11702
- BPF_EXIT_INSN(),
11703
- },
11704
- .result = ACCEPT,
11705
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11706
- .retval = POINTER_VALUE,
11707
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11708
- },
11709
- {
11710
- "calls: pkt_ptr spill into caller stack 2",
11711
- .insns = {
11712
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
11713
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
11714
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11715
- /* Marking is still kept, but not in all cases safe. */
11716
- BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
11717
- BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
11718
- BPF_EXIT_INSN(),
11719
-
11720
- /* subprog 1 */
11721
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11722
- offsetof(struct __sk_buff, data)),
11723
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11724
- offsetof(struct __sk_buff, data_end)),
11725
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11726
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11727
- /* spill unchecked pkt_ptr into stack of caller */
11728
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11729
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
11730
- /* now the pkt range is verified, read pkt_ptr from stack */
11731
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
11732
- /* write 4 bytes into packet */
11733
- BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11734
- BPF_EXIT_INSN(),
11735
- },
11736
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11737
- .errstr = "invalid access to packet",
11738
- .result = REJECT,
11739
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11740
- },
11741
- {
11742
- "calls: pkt_ptr spill into caller stack 3",
11743
- .insns = {
11744
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
11745
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
11746
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11747
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
11748
- /* Marking is still kept and safe here. */
11749
- BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
11750
- BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
11751
- BPF_EXIT_INSN(),
11752
-
11753
- /* subprog 1 */
11754
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11755
- offsetof(struct __sk_buff, data)),
11756
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11757
- offsetof(struct __sk_buff, data_end)),
11758
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11759
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11760
- /* spill unchecked pkt_ptr into stack of caller */
11761
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11762
- BPF_MOV64_IMM(BPF_REG_5, 0),
11763
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
11764
- BPF_MOV64_IMM(BPF_REG_5, 1),
11765
- /* now the pkt range is verified, read pkt_ptr from stack */
11766
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
11767
- /* write 4 bytes into packet */
11768
- BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11769
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
11770
- BPF_EXIT_INSN(),
11771
- },
11772
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11773
- .result = ACCEPT,
11774
- .retval = 1,
11775
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11776
- },
11777
- {
11778
- "calls: pkt_ptr spill into caller stack 4",
11779
- .insns = {
11780
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
11781
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
11782
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11783
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
11784
- /* Check marking propagated. */
11785
- BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
11786
- BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
11787
- BPF_EXIT_INSN(),
11788
-
11789
- /* subprog 1 */
11790
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11791
- offsetof(struct __sk_buff, data)),
11792
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11793
- offsetof(struct __sk_buff, data_end)),
11794
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11795
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11796
- /* spill unchecked pkt_ptr into stack of caller */
11797
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11798
- BPF_MOV64_IMM(BPF_REG_5, 0),
11799
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
11800
- BPF_MOV64_IMM(BPF_REG_5, 1),
11801
- /* don't read back pkt_ptr from stack here */
11802
- /* write 4 bytes into packet */
11803
- BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11804
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
11805
- BPF_EXIT_INSN(),
11806
- },
11807
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11808
- .result = ACCEPT,
11809
- .retval = 1,
11810
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11811
- },
11812
- {
11813
- "calls: pkt_ptr spill into caller stack 5",
11814
- .insns = {
11815
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
11816
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
11817
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, 0),
11818
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11819
- BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
11820
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
11821
- BPF_EXIT_INSN(),
11822
-
11823
- /* subprog 1 */
11824
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11825
- offsetof(struct __sk_buff, data)),
11826
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11827
- offsetof(struct __sk_buff, data_end)),
11828
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11829
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11830
- BPF_MOV64_IMM(BPF_REG_5, 0),
11831
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
11832
- /* spill checked pkt_ptr into stack of caller */
11833
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11834
- BPF_MOV64_IMM(BPF_REG_5, 1),
11835
- /* don't read back pkt_ptr from stack here */
11836
- /* write 4 bytes into packet */
11837
- BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11838
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
11839
- BPF_EXIT_INSN(),
11840
- },
11841
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11842
- .errstr = "same insn cannot be used with different",
11843
- .result = REJECT,
11844
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11845
- },
11846
- {
11847
- "calls: pkt_ptr spill into caller stack 6",
11848
- .insns = {
11849
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11850
- offsetof(struct __sk_buff, data_end)),
11851
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
11852
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
11853
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11854
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11855
- BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
11856
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
11857
- BPF_EXIT_INSN(),
11858
-
11859
- /* subprog 1 */
11860
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11861
- offsetof(struct __sk_buff, data)),
11862
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11863
- offsetof(struct __sk_buff, data_end)),
11864
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11865
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11866
- BPF_MOV64_IMM(BPF_REG_5, 0),
11867
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
11868
- /* spill checked pkt_ptr into stack of caller */
11869
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11870
- BPF_MOV64_IMM(BPF_REG_5, 1),
11871
- /* don't read back pkt_ptr from stack here */
11872
- /* write 4 bytes into packet */
11873
- BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11874
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
11875
- BPF_EXIT_INSN(),
11876
- },
11877
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11878
- .errstr = "R4 invalid mem access",
11879
- .result = REJECT,
11880
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11881
- },
11882
- {
11883
- "calls: pkt_ptr spill into caller stack 7",
11884
- .insns = {
11885
- BPF_MOV64_IMM(BPF_REG_2, 0),
11886
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
11887
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
11888
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11889
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11890
- BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
11891
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
11892
- BPF_EXIT_INSN(),
11893
-
11894
- /* subprog 1 */
11895
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11896
- offsetof(struct __sk_buff, data)),
11897
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11898
- offsetof(struct __sk_buff, data_end)),
11899
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11900
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11901
- BPF_MOV64_IMM(BPF_REG_5, 0),
11902
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
11903
- /* spill checked pkt_ptr into stack of caller */
11904
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11905
- BPF_MOV64_IMM(BPF_REG_5, 1),
11906
- /* don't read back pkt_ptr from stack here */
11907
- /* write 4 bytes into packet */
11908
- BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11909
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
11910
- BPF_EXIT_INSN(),
11911
- },
11912
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11913
- .errstr = "R4 invalid mem access",
11914
- .result = REJECT,
11915
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11916
- },
11917
- {
11918
- "calls: pkt_ptr spill into caller stack 8",
11919
- .insns = {
11920
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11921
- offsetof(struct __sk_buff, data)),
11922
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11923
- offsetof(struct __sk_buff, data_end)),
11924
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11925
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11926
- BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
11927
- BPF_EXIT_INSN(),
11928
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
11929
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
11930
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11931
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11932
- BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
11933
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
11934
- BPF_EXIT_INSN(),
11935
-
11936
- /* subprog 1 */
11937
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11938
- offsetof(struct __sk_buff, data)),
11939
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11940
- offsetof(struct __sk_buff, data_end)),
11941
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11942
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11943
- BPF_MOV64_IMM(BPF_REG_5, 0),
11944
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
11945
- /* spill checked pkt_ptr into stack of caller */
11946
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11947
- BPF_MOV64_IMM(BPF_REG_5, 1),
11948
- /* don't read back pkt_ptr from stack here */
11949
- /* write 4 bytes into packet */
11950
- BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11951
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
11952
- BPF_EXIT_INSN(),
11953
- },
11954
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11955
- .result = ACCEPT,
11956
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11957
- },
11958
- {
11959
- "calls: pkt_ptr spill into caller stack 9",
11960
- .insns = {
11961
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11962
- offsetof(struct __sk_buff, data)),
11963
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11964
- offsetof(struct __sk_buff, data_end)),
11965
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11966
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11967
- BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
11968
- BPF_EXIT_INSN(),
11969
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
11970
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
11971
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11972
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11973
- BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
11974
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
11975
- BPF_EXIT_INSN(),
11976
-
11977
- /* subprog 1 */
11978
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11979
- offsetof(struct __sk_buff, data)),
11980
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11981
- offsetof(struct __sk_buff, data_end)),
11982
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11983
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11984
- BPF_MOV64_IMM(BPF_REG_5, 0),
11985
- /* spill unchecked pkt_ptr into stack of caller */
11986
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11987
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
11988
- BPF_MOV64_IMM(BPF_REG_5, 1),
11989
- /* don't read back pkt_ptr from stack here */
11990
- /* write 4 bytes into packet */
11991
- BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11992
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
11993
- BPF_EXIT_INSN(),
11994
- },
11995
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11996
- .errstr = "invalid access to packet",
11997
- .result = REJECT,
11998
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11999
- },
12000
- {
12001
- "calls: caller stack init to zero or map_value_or_null",
12002
- .insns = {
12003
- BPF_MOV64_IMM(BPF_REG_0, 0),
12004
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
12005
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12006
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12007
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
12008
- /* fetch map_value_or_null or const_zero from stack */
12009
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
12010
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
12011
- /* store into map_value */
12012
- BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0),
12013
- BPF_EXIT_INSN(),
12014
-
12015
- /* subprog 1 */
12016
- /* if (ctx == 0) return; */
12017
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8),
12018
- /* else bpf_map_lookup() and *(fp - 8) = r0 */
12019
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
12020
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12021
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12022
- BPF_LD_MAP_FD(BPF_REG_1, 0),
12023
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12024
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12025
- BPF_FUNC_map_lookup_elem),
12026
- /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
12027
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
12028
- BPF_EXIT_INSN(),
12029
- },
12030
- .fixup_map1 = { 13 },
12031
- .result = ACCEPT,
12032
- .prog_type = BPF_PROG_TYPE_XDP,
12033
- },
12034
- {
12035
- "calls: stack init to zero and pruning",
12036
- .insns = {
12037
- /* first make allocated_stack 16 byte */
12038
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
12039
- /* now fork the execution such that the false branch
12040
- * of JGT insn will be verified second and it skisp zero
12041
- * init of fp-8 stack slot. If stack liveness marking
12042
- * is missing live_read marks from call map_lookup
12043
- * processing then pruning will incorrectly assume
12044
- * that fp-8 stack slot was unused in the fall-through
12045
- * branch and will accept the program incorrectly
12046
- */
12047
- BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 2),
12048
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12049
- BPF_JMP_IMM(BPF_JA, 0, 0, 0),
12050
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12051
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12052
- BPF_LD_MAP_FD(BPF_REG_1, 0),
12053
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12054
- BPF_FUNC_map_lookup_elem),
12055
- BPF_EXIT_INSN(),
12056
- },
12057
- .fixup_map2 = { 6 },
12058
- .errstr = "invalid indirect read from stack off -8+0 size 8",
12059
- .result = REJECT,
12060
- .prog_type = BPF_PROG_TYPE_XDP,
12061
- },
12062
- {
12063
- "calls: two calls returning different map pointers for lookup (hash, array)",
12064
- .insns = {
12065
- /* main prog */
12066
- BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
12067
- BPF_CALL_REL(11),
12068
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
12069
- BPF_CALL_REL(12),
12070
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
12071
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12072
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12073
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12074
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12075
- BPF_FUNC_map_lookup_elem),
12076
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
12077
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
12078
- offsetof(struct test_val, foo)),
12079
- BPF_MOV64_IMM(BPF_REG_0, 1),
12080
- BPF_EXIT_INSN(),
12081
- /* subprog 1 */
12082
- BPF_LD_MAP_FD(BPF_REG_0, 0),
12083
- BPF_EXIT_INSN(),
12084
- /* subprog 2 */
12085
- BPF_LD_MAP_FD(BPF_REG_0, 0),
12086
- BPF_EXIT_INSN(),
12087
- },
12088
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12089
- .fixup_map2 = { 13 },
12090
- .fixup_map4 = { 16 },
12091
- .result = ACCEPT,
12092
- .retval = 1,
12093
- },
12094
- {
12095
- "calls: two calls returning different map pointers for lookup (hash, map in map)",
12096
- .insns = {
12097
- /* main prog */
12098
- BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
12099
- BPF_CALL_REL(11),
12100
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
12101
- BPF_CALL_REL(12),
12102
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
12103
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12104
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12105
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12106
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12107
- BPF_FUNC_map_lookup_elem),
12108
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
12109
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
12110
- offsetof(struct test_val, foo)),
12111
- BPF_MOV64_IMM(BPF_REG_0, 1),
12112
- BPF_EXIT_INSN(),
12113
- /* subprog 1 */
12114
- BPF_LD_MAP_FD(BPF_REG_0, 0),
12115
- BPF_EXIT_INSN(),
12116
- /* subprog 2 */
12117
- BPF_LD_MAP_FD(BPF_REG_0, 0),
12118
- BPF_EXIT_INSN(),
12119
- },
12120
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12121
- .fixup_map_in_map = { 16 },
12122
- .fixup_map4 = { 13 },
12123
- .result = REJECT,
12124
- .errstr = "R0 invalid mem access 'map_ptr'",
12125
- },
12126
- {
12127
- "cond: two branches returning different map pointers for lookup (tail, tail)",
12128
- .insns = {
12129
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
12130
- offsetof(struct __sk_buff, mark)),
12131
- BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 0, 3),
12132
- BPF_LD_MAP_FD(BPF_REG_2, 0),
12133
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12134
- BPF_LD_MAP_FD(BPF_REG_2, 0),
12135
- BPF_MOV64_IMM(BPF_REG_3, 7),
12136
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12137
- BPF_FUNC_tail_call),
12138
- BPF_MOV64_IMM(BPF_REG_0, 1),
12139
- BPF_EXIT_INSN(),
12140
- },
12141
- .fixup_prog1 = { 5 },
12142
- .fixup_prog2 = { 2 },
12143
- .result_unpriv = REJECT,
12144
- .errstr_unpriv = "tail_call abusing map_ptr",
12145
- .result = ACCEPT,
12146
- .retval = 42,
12147
- },
12148
- {
12149
- "cond: two branches returning same map pointers for lookup (tail, tail)",
12150
- .insns = {
12151
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
12152
- offsetof(struct __sk_buff, mark)),
12153
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 3),
12154
- BPF_LD_MAP_FD(BPF_REG_2, 0),
12155
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12156
- BPF_LD_MAP_FD(BPF_REG_2, 0),
12157
- BPF_MOV64_IMM(BPF_REG_3, 7),
12158
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12159
- BPF_FUNC_tail_call),
12160
- BPF_MOV64_IMM(BPF_REG_0, 1),
12161
- BPF_EXIT_INSN(),
12162
- },
12163
- .fixup_prog2 = { 2, 5 },
12164
- .result_unpriv = ACCEPT,
12165
- .result = ACCEPT,
12166
- .retval = 42,
12167
- },
12168
- {
12169
- "search pruning: all branches should be verified (nop operation)",
12170
- .insns = {
12171
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12172
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12173
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
12174
- BPF_LD_MAP_FD(BPF_REG_1, 0),
12175
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
12176
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
12177
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
12178
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
12179
- BPF_MOV64_IMM(BPF_REG_4, 0),
12180
- BPF_JMP_A(1),
12181
- BPF_MOV64_IMM(BPF_REG_4, 1),
12182
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
12183
- BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
12184
- BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
12185
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_5, 0, 2),
12186
- BPF_MOV64_IMM(BPF_REG_6, 0),
12187
- BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xdead),
12188
- BPF_EXIT_INSN(),
12189
- },
12190
- .fixup_map1 = { 3 },
12191
- .errstr = "R6 invalid mem access 'inv'",
12192
- .result = REJECT,
12193
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
12194
- },
12195
- {
12196
- "search pruning: all branches should be verified (invalid stack access)",
12197
- .insns = {
12198
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12199
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12200
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
12201
- BPF_LD_MAP_FD(BPF_REG_1, 0),
12202
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
12203
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
12204
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
12205
- BPF_MOV64_IMM(BPF_REG_4, 0),
12206
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
12207
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
12208
- BPF_JMP_A(1),
12209
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -24),
12210
- BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
12211
- BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
12212
- BPF_EXIT_INSN(),
12213
- },
12214
- .fixup_map1 = { 3 },
12215
- .errstr = "invalid read from stack off -16+0 size 8",
12216
- .result = REJECT,
12217
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
12218
- },
12219
- {
12220
- "jit: lsh, rsh, arsh by 1",
12221
- .insns = {
12222
- BPF_MOV64_IMM(BPF_REG_0, 1),
12223
- BPF_MOV64_IMM(BPF_REG_1, 0xff),
12224
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 1),
12225
- BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 1),
12226
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x3fc, 1),
12227
- BPF_EXIT_INSN(),
12228
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 1),
12229
- BPF_ALU32_IMM(BPF_RSH, BPF_REG_1, 1),
12230
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0xff, 1),
12231
- BPF_EXIT_INSN(),
12232
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 1),
12233
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x7f, 1),
12234
- BPF_EXIT_INSN(),
12235
- BPF_MOV64_IMM(BPF_REG_0, 2),
12236
- BPF_EXIT_INSN(),
12237
- },
12238
- .result = ACCEPT,
12239
- .retval = 2,
12240
- },
12241
- {
12242
- "jit: mov32 for ldimm64, 1",
12243
- .insns = {
12244
- BPF_MOV64_IMM(BPF_REG_0, 2),
12245
- BPF_LD_IMM64(BPF_REG_1, 0xfeffffffffffffffULL),
12246
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32),
12247
- BPF_LD_IMM64(BPF_REG_2, 0xfeffffffULL),
12248
- BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
12249
- BPF_MOV64_IMM(BPF_REG_0, 1),
12250
- BPF_EXIT_INSN(),
12251
- },
12252
- .result = ACCEPT,
12253
- .retval = 2,
12254
- },
12255
- {
12256
- "jit: mov32 for ldimm64, 2",
12257
- .insns = {
12258
- BPF_MOV64_IMM(BPF_REG_0, 1),
12259
- BPF_LD_IMM64(BPF_REG_1, 0x1ffffffffULL),
12260
- BPF_LD_IMM64(BPF_REG_2, 0xffffffffULL),
12261
- BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
12262
- BPF_MOV64_IMM(BPF_REG_0, 2),
12263
- BPF_EXIT_INSN(),
12264
- },
12265
- .result = ACCEPT,
12266
- .retval = 2,
12267
- },
12268
- {
12269
- "jit: various mul tests",
12270
- .insns = {
12271
- BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
12272
- BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
12273
- BPF_LD_IMM64(BPF_REG_1, 0xefefefULL),
12274
- BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
12275
- BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
12276
- BPF_MOV64_IMM(BPF_REG_0, 1),
12277
- BPF_EXIT_INSN(),
12278
- BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
12279
- BPF_ALU64_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
12280
- BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
12281
- BPF_MOV64_IMM(BPF_REG_0, 1),
12282
- BPF_EXIT_INSN(),
12283
- BPF_MOV32_REG(BPF_REG_2, BPF_REG_2),
12284
- BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
12285
- BPF_ALU32_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
12286
- BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
12287
- BPF_MOV64_IMM(BPF_REG_0, 1),
12288
- BPF_EXIT_INSN(),
12289
- BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
12290
- BPF_ALU32_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
12291
- BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
12292
- BPF_MOV64_IMM(BPF_REG_0, 1),
12293
- BPF_EXIT_INSN(),
12294
- BPF_LD_IMM64(BPF_REG_0, 0x952a7bbcULL),
12295
- BPF_LD_IMM64(BPF_REG_1, 0xfefefeULL),
12296
- BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
12297
- BPF_ALU32_REG(BPF_MUL, BPF_REG_2, BPF_REG_1),
12298
- BPF_JMP_REG(BPF_JEQ, BPF_REG_2, BPF_REG_0, 2),
12299
- BPF_MOV64_IMM(BPF_REG_0, 1),
12300
- BPF_EXIT_INSN(),
12301
- BPF_MOV64_IMM(BPF_REG_0, 2),
12302
- BPF_EXIT_INSN(),
12303
- },
12304
- .result = ACCEPT,
12305
- .retval = 2,
12306
- },
12307
- {
12308
- "xadd/w check unaligned stack",
12309
- .insns = {
12310
- BPF_MOV64_IMM(BPF_REG_0, 1),
12311
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
12312
- BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -7),
12313
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
12314
- BPF_EXIT_INSN(),
12315
- },
12316
- .result = REJECT,
12317
- .errstr = "misaligned stack access off",
12318
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12319
- },
12320
- {
12321
- "xadd/w check unaligned map",
12322
- .insns = {
12323
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12324
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12325
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12326
- BPF_LD_MAP_FD(BPF_REG_1, 0),
12327
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12328
- BPF_FUNC_map_lookup_elem),
12329
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
12330
- BPF_EXIT_INSN(),
12331
- BPF_MOV64_IMM(BPF_REG_1, 1),
12332
- BPF_STX_XADD(BPF_W, BPF_REG_0, BPF_REG_1, 3),
12333
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 3),
12334
- BPF_EXIT_INSN(),
12335
- },
12336
- .fixup_map1 = { 3 },
12337
- .result = REJECT,
12338
- .errstr = "misaligned value access off",
12339
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12340
- },
12341
- {
12342
- "xadd/w check unaligned pkt",
12343
- .insns = {
12344
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12345
- offsetof(struct xdp_md, data)),
12346
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12347
- offsetof(struct xdp_md, data_end)),
12348
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
12349
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
12350
- BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 2),
12351
- BPF_MOV64_IMM(BPF_REG_0, 99),
12352
- BPF_JMP_IMM(BPF_JA, 0, 0, 6),
12353
- BPF_MOV64_IMM(BPF_REG_0, 1),
12354
- BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12355
- BPF_ST_MEM(BPF_W, BPF_REG_2, 3, 0),
12356
- BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 1),
12357
- BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 2),
12358
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 1),
12359
- BPF_EXIT_INSN(),
12360
- },
12361
- .result = REJECT,
12362
- .errstr = "BPF_XADD stores into R2 packet",
12363
- .prog_type = BPF_PROG_TYPE_XDP,
12364
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
12365
- },
12366
- {
12367
- "xadd/w check whether src/dst got mangled, 1",
12368
- .insns = {
12369
- BPF_MOV64_IMM(BPF_REG_0, 1),
12370
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
12371
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
12372
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
12373
- BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
12374
- BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
12375
- BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
12376
- BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
12377
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
12378
- BPF_EXIT_INSN(),
12379
- BPF_MOV64_IMM(BPF_REG_0, 42),
12380
- BPF_EXIT_INSN(),
12381
- },
12382
- .result = ACCEPT,
12383
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12384
- .retval = 3,
12385
- },
12386
- {
12387
- "xadd/w check whether src/dst got mangled, 2",
12388
- .insns = {
12389
- BPF_MOV64_IMM(BPF_REG_0, 1),
12390
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
12391
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
12392
- BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8),
12393
- BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
12394
- BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
12395
- BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
12396
- BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
12397
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
12398
- BPF_EXIT_INSN(),
12399
- BPF_MOV64_IMM(BPF_REG_0, 42),
12400
- BPF_EXIT_INSN(),
12401
- },
12402
- .result = ACCEPT,
12403
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12404
- .retval = 3,
12405
- },
12406
- {
12407
- "bpf_get_stack return R0 within range",
12408
- .insns = {
12409
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12410
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12411
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12412
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12413
- BPF_LD_MAP_FD(BPF_REG_1, 0),
12414
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12415
- BPF_FUNC_map_lookup_elem),
12416
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 28),
12417
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
12418
- BPF_MOV64_IMM(BPF_REG_9, sizeof(struct test_val)/2),
12419
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
12420
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
12421
- BPF_MOV64_IMM(BPF_REG_3, sizeof(struct test_val)/2),
12422
- BPF_MOV64_IMM(BPF_REG_4, 256),
12423
- BPF_EMIT_CALL(BPF_FUNC_get_stack),
12424
- BPF_MOV64_IMM(BPF_REG_1, 0),
12425
- BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
12426
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_8, 32),
12427
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_8, 32),
12428
- BPF_JMP_REG(BPF_JSLT, BPF_REG_8, BPF_REG_1, 16),
12429
- BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_8),
12430
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
12431
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_8),
12432
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_9),
12433
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 32),
12434
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 32),
12435
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
12436
- BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_1),
12437
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
12438
- BPF_MOV64_IMM(BPF_REG_5, sizeof(struct test_val)/2),
12439
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_5),
12440
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 4),
12441
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
12442
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_9),
12443
- BPF_MOV64_IMM(BPF_REG_4, 0),
12444
- BPF_EMIT_CALL(BPF_FUNC_get_stack),
12445
- BPF_EXIT_INSN(),
12446
- },
12447
- .fixup_map2 = { 4 },
12448
- .result = ACCEPT,
12449
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
12450
- },
12451
- {
12452
- "ld_abs: invalid op 1",
12453
- .insns = {
12454
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12455
- BPF_LD_ABS(BPF_DW, 0),
12456
- BPF_EXIT_INSN(),
12457
- },
12458
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12459
- .result = REJECT,
12460
- .errstr = "unknown opcode",
12461
- },
12462
- {
12463
- "ld_abs: invalid op 2",
12464
- .insns = {
12465
- BPF_MOV32_IMM(BPF_REG_0, 256),
12466
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12467
- BPF_LD_IND(BPF_DW, BPF_REG_0, 0),
12468
- BPF_EXIT_INSN(),
12469
- },
12470
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12471
- .result = REJECT,
12472
- .errstr = "unknown opcode",
12473
- },
12474
- {
12475
- "ld_abs: nmap reduced",
12476
- .insns = {
12477
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12478
- BPF_LD_ABS(BPF_H, 12),
12479
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 28),
12480
- BPF_LD_ABS(BPF_H, 12),
12481
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 26),
12482
- BPF_MOV32_IMM(BPF_REG_0, 18),
12483
- BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -64),
12484
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -64),
12485
- BPF_LD_IND(BPF_W, BPF_REG_7, 14),
12486
- BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -60),
12487
- BPF_MOV32_IMM(BPF_REG_0, 280971478),
12488
- BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -56),
12489
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -56),
12490
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -60),
12491
- BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_7),
12492
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 15),
12493
- BPF_LD_ABS(BPF_H, 12),
12494
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 13),
12495
- BPF_MOV32_IMM(BPF_REG_0, 22),
12496
- BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -56),
12497
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -56),
12498
- BPF_LD_IND(BPF_H, BPF_REG_7, 14),
12499
- BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -52),
12500
- BPF_MOV32_IMM(BPF_REG_0, 17366),
12501
- BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -48),
12502
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -48),
12503
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -52),
12504
- BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_7),
12505
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12506
- BPF_MOV32_IMM(BPF_REG_0, 256),
12507
- BPF_EXIT_INSN(),
12508
- BPF_MOV32_IMM(BPF_REG_0, 0),
12509
- BPF_EXIT_INSN(),
12510
- },
12511
- .data = {
12512
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0x06, 0,
12513
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
12514
- 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6,
12515
- },
12516
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12517
- .result = ACCEPT,
12518
- .retval = 256,
12519
- },
12520
- {
12521
- "ld_abs: div + abs, test 1",
12522
- .insns = {
12523
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
12524
- BPF_LD_ABS(BPF_B, 3),
12525
- BPF_ALU64_IMM(BPF_MOV, BPF_REG_2, 2),
12526
- BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_2),
12527
- BPF_ALU64_REG(BPF_MOV, BPF_REG_8, BPF_REG_0),
12528
- BPF_LD_ABS(BPF_B, 4),
12529
- BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
12530
- BPF_LD_IND(BPF_B, BPF_REG_8, -70),
12531
- BPF_EXIT_INSN(),
12532
- },
12533
- .data = {
12534
- 10, 20, 30, 40, 50,
12535
- },
12536
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12537
- .result = ACCEPT,
12538
- .retval = 10,
12539
- },
12540
- {
12541
- "ld_abs: div + abs, test 2",
12542
- .insns = {
12543
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
12544
- BPF_LD_ABS(BPF_B, 3),
12545
- BPF_ALU64_IMM(BPF_MOV, BPF_REG_2, 2),
12546
- BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_2),
12547
- BPF_ALU64_REG(BPF_MOV, BPF_REG_8, BPF_REG_0),
12548
- BPF_LD_ABS(BPF_B, 128),
12549
- BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
12550
- BPF_LD_IND(BPF_B, BPF_REG_8, -70),
12551
- BPF_EXIT_INSN(),
12552
- },
12553
- .data = {
12554
- 10, 20, 30, 40, 50,
12555
- },
12556
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12557
- .result = ACCEPT,
12558
- .retval = 0,
12559
- },
12560
- {
12561
- "ld_abs: div + abs, test 3",
12562
- .insns = {
12563
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
12564
- BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0),
12565
- BPF_LD_ABS(BPF_B, 3),
12566
- BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_7),
12567
- BPF_EXIT_INSN(),
12568
- },
12569
- .data = {
12570
- 10, 20, 30, 40, 50,
12571
- },
12572
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12573
- .result = ACCEPT,
12574
- .retval = 0,
12575
- },
12576
- {
12577
- "ld_abs: div + abs, test 4",
12578
- .insns = {
12579
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
12580
- BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0),
12581
- BPF_LD_ABS(BPF_B, 256),
12582
- BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_7),
12583
- BPF_EXIT_INSN(),
12584
- },
12585
- .data = {
12586
- 10, 20, 30, 40, 50,
12587
- },
12588
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12589
- .result = ACCEPT,
12590
- .retval = 0,
12591
- },
12592
- {
12593
- "ld_abs: vlan + abs, test 1",
12594
- .insns = { },
12595
- .data = {
12596
- 0x34,
12597
- },
12598
- .fill_helper = bpf_fill_ld_abs_vlan_push_pop,
12599
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12600
- .result = ACCEPT,
12601
- .retval = 0xbef,
12602
- },
12603
- {
12604
- "ld_abs: vlan + abs, test 2",
12605
- .insns = {
12606
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12607
- BPF_LD_ABS(BPF_B, 0),
12608
- BPF_LD_ABS(BPF_H, 0),
12609
- BPF_LD_ABS(BPF_W, 0),
12610
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
12611
- BPF_MOV64_IMM(BPF_REG_6, 0),
12612
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
12613
- BPF_MOV64_IMM(BPF_REG_2, 1),
12614
- BPF_MOV64_IMM(BPF_REG_3, 2),
12615
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12616
- BPF_FUNC_skb_vlan_push),
12617
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
12618
- BPF_LD_ABS(BPF_B, 0),
12619
- BPF_LD_ABS(BPF_H, 0),
12620
- BPF_LD_ABS(BPF_W, 0),
12621
- BPF_MOV64_IMM(BPF_REG_0, 42),
12622
- BPF_EXIT_INSN(),
12623
- },
12624
- .data = {
12625
- 0x34,
12626
- },
12627
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12628
- .result = ACCEPT,
12629
- .retval = 42,
12630
- },
12631
- {
12632
- "ld_abs: jump around ld_abs",
12633
- .insns = { },
12634
- .data = {
12635
- 10, 11,
12636
- },
12637
- .fill_helper = bpf_fill_jump_around_ld_abs,
12638
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12639
- .result = ACCEPT,
12640
- .retval = 10,
12641
- },
12642
- {
12643
- "ld_dw: xor semi-random 64 bit imms, test 1",
12644
- .insns = { },
12645
- .data = { },
12646
- .fill_helper = bpf_fill_rand_ld_dw,
12647
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12648
- .result = ACCEPT,
12649
- .retval = 4090,
12650
- },
12651
- {
12652
- "ld_dw: xor semi-random 64 bit imms, test 2",
12653
- .insns = { },
12654
- .data = { },
12655
- .fill_helper = bpf_fill_rand_ld_dw,
12656
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12657
- .result = ACCEPT,
12658
- .retval = 2047,
12659
- },
12660
- {
12661
- "ld_dw: xor semi-random 64 bit imms, test 3",
12662
- .insns = { },
12663
- .data = { },
12664
- .fill_helper = bpf_fill_rand_ld_dw,
12665
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12666
- .result = ACCEPT,
12667
- .retval = 511,
12668
- },
12669
- {
12670
- "ld_dw: xor semi-random 64 bit imms, test 4",
12671
- .insns = { },
12672
- .data = { },
12673
- .fill_helper = bpf_fill_rand_ld_dw,
12674
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12675
- .result = ACCEPT,
12676
- .retval = 5,
12677
- },
12678
- {
12679
- "pass unmodified ctx pointer to helper",
12680
- .insns = {
12681
- BPF_MOV64_IMM(BPF_REG_2, 0),
12682
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12683
- BPF_FUNC_csum_update),
12684
- BPF_MOV64_IMM(BPF_REG_0, 0),
12685
- BPF_EXIT_INSN(),
12686
- },
12687
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12688
- .result = ACCEPT,
12689
- },
12690
- {
12691
- "pass modified ctx pointer to helper, 1",
12692
- .insns = {
12693
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
12694
- BPF_MOV64_IMM(BPF_REG_2, 0),
12695
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12696
- BPF_FUNC_csum_update),
12697
- BPF_MOV64_IMM(BPF_REG_0, 0),
12698
- BPF_EXIT_INSN(),
12699
- },
12700
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12701
- .result = REJECT,
12702
- .errstr = "dereference of modified ctx ptr",
12703
- },
12704
- {
12705
- "pass modified ctx pointer to helper, 2",
12706
- .insns = {
12707
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
12708
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12709
- BPF_FUNC_get_socket_cookie),
12710
- BPF_MOV64_IMM(BPF_REG_0, 0),
12711
- BPF_EXIT_INSN(),
12712
- },
12713
- .result_unpriv = REJECT,
12714
- .result = REJECT,
12715
- .errstr_unpriv = "dereference of modified ctx ptr",
12716
- .errstr = "dereference of modified ctx ptr",
12717
- },
12718
- {
12719
- "pass modified ctx pointer to helper, 3",
12720
- .insns = {
12721
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 0),
12722
- BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 4),
12723
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
12724
- BPF_MOV64_IMM(BPF_REG_2, 0),
12725
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12726
- BPF_FUNC_csum_update),
12727
- BPF_MOV64_IMM(BPF_REG_0, 0),
12728
- BPF_EXIT_INSN(),
12729
- },
12730
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12731
- .result = REJECT,
12732
- .errstr = "variable ctx access var_off=(0x0; 0x4)",
12733
- },
12734
- {
12735
- "mov64 src == dst",
12736
- .insns = {
12737
- BPF_MOV64_IMM(BPF_REG_2, 0),
12738
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_2),
12739
- // Check bounds are OK
12740
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
12741
- BPF_MOV64_IMM(BPF_REG_0, 0),
12742
- BPF_EXIT_INSN(),
12743
- },
12744
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12745
- .result = ACCEPT,
12746
- },
12747
- {
12748
- "mov64 src != dst",
12749
- .insns = {
12750
- BPF_MOV64_IMM(BPF_REG_3, 0),
12751
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
12752
- // Check bounds are OK
12753
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
12754
- BPF_MOV64_IMM(BPF_REG_0, 0),
12755
- BPF_EXIT_INSN(),
12756
- },
12757
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12758
- .result = ACCEPT,
12759
- },
12760
- {
12761
- "calls: ctx read at start of subprog",
12762
- .insns = {
12763
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12764
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
12765
- BPF_JMP_REG(BPF_JSGT, BPF_REG_0, BPF_REG_0, 0),
12766
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
12767
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
12768
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
12769
- BPF_EXIT_INSN(),
12770
- BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
12771
- BPF_MOV64_IMM(BPF_REG_0, 0),
12772
- BPF_EXIT_INSN(),
12773
- },
12774
- .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
12775
- .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
12776
- .result_unpriv = REJECT,
12777
- .result = ACCEPT,
12778
- },
354
+static struct bpf_test tests[] = {
355
+#define FILL_ARRAY
356
+#include <verifier/tests.h>
357
+#undef FILL_ARRAY
12779358 };
12780359
12781360 static int probe_filter_length(const struct bpf_insn *fp)
....@@ -12788,23 +367,54 @@
12788367 return len + 1;
12789368 }
12790369
12791
-static int create_map(uint32_t type, uint32_t size_key,
12792
- uint32_t size_value, uint32_t max_elem)
370
+static bool skip_unsupported_map(enum bpf_map_type map_type)
371
+{
372
+ if (!bpf_probe_map_type(map_type, 0)) {
373
+ printf("SKIP (unsupported map type %d)\n", map_type);
374
+ skips++;
375
+ return true;
376
+ }
377
+ return false;
378
+}
379
+
380
+static int __create_map(uint32_t type, uint32_t size_key,
381
+ uint32_t size_value, uint32_t max_elem,
382
+ uint32_t extra_flags)
12793383 {
12794384 int fd;
12795385
12796386 fd = bpf_create_map(type, size_key, size_value, max_elem,
12797
- type == BPF_MAP_TYPE_HASH ? BPF_F_NO_PREALLOC : 0);
12798
- if (fd < 0)
387
+ (type == BPF_MAP_TYPE_HASH ?
388
+ BPF_F_NO_PREALLOC : 0) | extra_flags);
389
+ if (fd < 0) {
390
+ if (skip_unsupported_map(type))
391
+ return -1;
12799392 printf("Failed to create hash map '%s'!\n", strerror(errno));
393
+ }
12800394
12801395 return fd;
12802396 }
12803397
12804
-static int create_prog_dummy1(enum bpf_map_type prog_type)
398
+static int create_map(uint32_t type, uint32_t size_key,
399
+ uint32_t size_value, uint32_t max_elem)
400
+{
401
+ return __create_map(type, size_key, size_value, max_elem, 0);
402
+}
403
+
404
+static void update_map(int fd, int index)
405
+{
406
+ struct test_val value = {
407
+ .index = (6 + 1) * sizeof(int),
408
+ .foo[6] = 0xabcdef12,
409
+ };
410
+
411
+ assert(!bpf_map_update_elem(fd, &index, &value, 0));
412
+}
413
+
414
+static int create_prog_dummy_simple(enum bpf_prog_type prog_type, int ret)
12805415 {
12806416 struct bpf_insn prog[] = {
12807
- BPF_MOV64_IMM(BPF_REG_0, 42),
417
+ BPF_MOV64_IMM(BPF_REG_0, ret),
12808418 BPF_EXIT_INSN(),
12809419 };
12810420
....@@ -12812,14 +422,15 @@
12812422 ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
12813423 }
12814424
12815
-static int create_prog_dummy2(enum bpf_map_type prog_type, int mfd, int idx)
425
+static int create_prog_dummy_loop(enum bpf_prog_type prog_type, int mfd,
426
+ int idx, int ret)
12816427 {
12817428 struct bpf_insn prog[] = {
12818429 BPF_MOV64_IMM(BPF_REG_3, idx),
12819430 BPF_LD_MAP_FD(BPF_REG_2, mfd),
12820431 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12821432 BPF_FUNC_tail_call),
12822
- BPF_MOV64_IMM(BPF_REG_0, 41),
433
+ BPF_MOV64_IMM(BPF_REG_0, ret),
12823434 BPF_EXIT_INSN(),
12824435 };
12825436
....@@ -12827,36 +438,38 @@
12827438 ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
12828439 }
12829440
12830
-static int create_prog_array(enum bpf_map_type prog_type, uint32_t max_elem,
12831
- int p1key)
441
+static int create_prog_array(enum bpf_prog_type prog_type, uint32_t max_elem,
442
+ int p1key, int p2key, int p3key)
12832443 {
12833
- int p2key = 1;
12834
- int mfd, p1fd, p2fd;
444
+ int mfd, p1fd, p2fd, p3fd;
12835445
12836446 mfd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
12837447 sizeof(int), max_elem, 0);
12838448 if (mfd < 0) {
449
+ if (skip_unsupported_map(BPF_MAP_TYPE_PROG_ARRAY))
450
+ return -1;
12839451 printf("Failed to create prog array '%s'!\n", strerror(errno));
12840452 return -1;
12841453 }
12842454
12843
- p1fd = create_prog_dummy1(prog_type);
12844
- p2fd = create_prog_dummy2(prog_type, mfd, p2key);
12845
- if (p1fd < 0 || p2fd < 0)
12846
- goto out;
455
+ p1fd = create_prog_dummy_simple(prog_type, 42);
456
+ p2fd = create_prog_dummy_loop(prog_type, mfd, p2key, 41);
457
+ p3fd = create_prog_dummy_simple(prog_type, 24);
458
+ if (p1fd < 0 || p2fd < 0 || p3fd < 0)
459
+ goto err;
12847460 if (bpf_map_update_elem(mfd, &p1key, &p1fd, BPF_ANY) < 0)
12848
- goto out;
461
+ goto err;
12849462 if (bpf_map_update_elem(mfd, &p2key, &p2fd, BPF_ANY) < 0)
12850
- goto out;
463
+ goto err;
464
+ if (bpf_map_update_elem(mfd, &p3key, &p3fd, BPF_ANY) < 0) {
465
+err:
466
+ close(mfd);
467
+ mfd = -1;
468
+ }
469
+ close(p3fd);
12851470 close(p2fd);
12852471 close(p1fd);
12853
-
12854472 return mfd;
12855
-out:
12856
- close(p2fd);
12857
- close(p1fd);
12858
- close(mfd);
12859
- return -1;
12860473 }
12861474
12862475 static int create_map_in_map(void)
....@@ -12866,93 +479,218 @@
12866479 inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
12867480 sizeof(int), 1, 0);
12868481 if (inner_map_fd < 0) {
482
+ if (skip_unsupported_map(BPF_MAP_TYPE_ARRAY))
483
+ return -1;
12869484 printf("Failed to create array '%s'!\n", strerror(errno));
12870485 return inner_map_fd;
12871486 }
12872487
12873488 outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL,
12874489 sizeof(int), inner_map_fd, 1, 0);
12875
- if (outer_map_fd < 0)
490
+ if (outer_map_fd < 0) {
491
+ if (skip_unsupported_map(BPF_MAP_TYPE_ARRAY_OF_MAPS))
492
+ return -1;
12876493 printf("Failed to create array of maps '%s'!\n",
12877494 strerror(errno));
495
+ }
12878496
12879497 close(inner_map_fd);
12880498
12881499 return outer_map_fd;
12882500 }
12883501
12884
-static int create_cgroup_storage(void)
502
+static int create_cgroup_storage(bool percpu)
12885503 {
504
+ enum bpf_map_type type = percpu ? BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE :
505
+ BPF_MAP_TYPE_CGROUP_STORAGE;
12886506 int fd;
12887507
12888
- fd = bpf_create_map(BPF_MAP_TYPE_CGROUP_STORAGE,
12889
- sizeof(struct bpf_cgroup_storage_key),
508
+ fd = bpf_create_map(type, sizeof(struct bpf_cgroup_storage_key),
12890509 TEST_DATA_LEN, 0, 0);
12891
- if (fd < 0)
12892
- printf("Failed to create array '%s'!\n", strerror(errno));
510
+ if (fd < 0) {
511
+ if (skip_unsupported_map(type))
512
+ return -1;
513
+ printf("Failed to create cgroup storage '%s'!\n",
514
+ strerror(errno));
515
+ }
12893516
517
+ return fd;
518
+}
519
+
520
+/* struct bpf_spin_lock {
521
+ * int val;
522
+ * };
523
+ * struct val {
524
+ * int cnt;
525
+ * struct bpf_spin_lock l;
526
+ * };
527
+ */
528
+static const char btf_str_sec[] = "\0bpf_spin_lock\0val\0cnt\0l";
529
+static __u32 btf_raw_types[] = {
530
+ /* int */
531
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
532
+ /* struct bpf_spin_lock */ /* [2] */
533
+ BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4),
534
+ BTF_MEMBER_ENC(15, 1, 0), /* int val; */
535
+ /* struct val */ /* [3] */
536
+ BTF_TYPE_ENC(15, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 8),
537
+ BTF_MEMBER_ENC(19, 1, 0), /* int cnt; */
538
+ BTF_MEMBER_ENC(23, 2, 32),/* struct bpf_spin_lock l; */
539
+};
540
+
541
+static int load_btf(void)
542
+{
543
+ struct btf_header hdr = {
544
+ .magic = BTF_MAGIC,
545
+ .version = BTF_VERSION,
546
+ .hdr_len = sizeof(struct btf_header),
547
+ .type_len = sizeof(btf_raw_types),
548
+ .str_off = sizeof(btf_raw_types),
549
+ .str_len = sizeof(btf_str_sec),
550
+ };
551
+ void *ptr, *raw_btf;
552
+ int btf_fd;
553
+
554
+ ptr = raw_btf = malloc(sizeof(hdr) + sizeof(btf_raw_types) +
555
+ sizeof(btf_str_sec));
556
+
557
+ memcpy(ptr, &hdr, sizeof(hdr));
558
+ ptr += sizeof(hdr);
559
+ memcpy(ptr, btf_raw_types, hdr.type_len);
560
+ ptr += hdr.type_len;
561
+ memcpy(ptr, btf_str_sec, hdr.str_len);
562
+ ptr += hdr.str_len;
563
+
564
+ btf_fd = bpf_load_btf(raw_btf, ptr - raw_btf, 0, 0, 0);
565
+ free(raw_btf);
566
+ if (btf_fd < 0)
567
+ return -1;
568
+ return btf_fd;
569
+}
570
+
571
+static int create_map_spin_lock(void)
572
+{
573
+ struct bpf_create_map_attr attr = {
574
+ .name = "test_map",
575
+ .map_type = BPF_MAP_TYPE_ARRAY,
576
+ .key_size = 4,
577
+ .value_size = 8,
578
+ .max_entries = 1,
579
+ .btf_key_type_id = 1,
580
+ .btf_value_type_id = 3,
581
+ };
582
+ int fd, btf_fd;
583
+
584
+ btf_fd = load_btf();
585
+ if (btf_fd < 0)
586
+ return -1;
587
+ attr.btf_fd = btf_fd;
588
+ fd = bpf_create_map_xattr(&attr);
589
+ if (fd < 0)
590
+ printf("Failed to create map with spin_lock\n");
591
+ return fd;
592
+}
593
+
594
+static int create_sk_storage_map(void)
595
+{
596
+ struct bpf_create_map_attr attr = {
597
+ .name = "test_map",
598
+ .map_type = BPF_MAP_TYPE_SK_STORAGE,
599
+ .key_size = 4,
600
+ .value_size = 8,
601
+ .max_entries = 0,
602
+ .map_flags = BPF_F_NO_PREALLOC,
603
+ .btf_key_type_id = 1,
604
+ .btf_value_type_id = 3,
605
+ };
606
+ int fd, btf_fd;
607
+
608
+ btf_fd = load_btf();
609
+ if (btf_fd < 0)
610
+ return -1;
611
+ attr.btf_fd = btf_fd;
612
+ fd = bpf_create_map_xattr(&attr);
613
+ close(attr.btf_fd);
614
+ if (fd < 0)
615
+ printf("Failed to create sk_storage_map\n");
12894616 return fd;
12895617 }
12896618
12897619 static char bpf_vlog[UINT_MAX >> 8];
12898620
12899
-static void do_test_fixup(struct bpf_test *test, enum bpf_map_type prog_type,
621
+static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
12900622 struct bpf_insn *prog, int *map_fds)
12901623 {
12902
- int *fixup_map1 = test->fixup_map1;
12903
- int *fixup_map2 = test->fixup_map2;
12904
- int *fixup_map3 = test->fixup_map3;
12905
- int *fixup_map4 = test->fixup_map4;
624
+ int *fixup_map_hash_8b = test->fixup_map_hash_8b;
625
+ int *fixup_map_hash_48b = test->fixup_map_hash_48b;
626
+ int *fixup_map_hash_16b = test->fixup_map_hash_16b;
627
+ int *fixup_map_array_48b = test->fixup_map_array_48b;
628
+ int *fixup_map_sockmap = test->fixup_map_sockmap;
629
+ int *fixup_map_sockhash = test->fixup_map_sockhash;
630
+ int *fixup_map_xskmap = test->fixup_map_xskmap;
631
+ int *fixup_map_stacktrace = test->fixup_map_stacktrace;
12906632 int *fixup_prog1 = test->fixup_prog1;
12907633 int *fixup_prog2 = test->fixup_prog2;
12908634 int *fixup_map_in_map = test->fixup_map_in_map;
12909635 int *fixup_cgroup_storage = test->fixup_cgroup_storage;
636
+ int *fixup_percpu_cgroup_storage = test->fixup_percpu_cgroup_storage;
637
+ int *fixup_map_spin_lock = test->fixup_map_spin_lock;
638
+ int *fixup_map_array_ro = test->fixup_map_array_ro;
639
+ int *fixup_map_array_wo = test->fixup_map_array_wo;
640
+ int *fixup_map_array_small = test->fixup_map_array_small;
641
+ int *fixup_sk_storage_map = test->fixup_sk_storage_map;
642
+ int *fixup_map_event_output = test->fixup_map_event_output;
643
+ int *fixup_map_reuseport_array = test->fixup_map_reuseport_array;
644
+ int *fixup_map_ringbuf = test->fixup_map_ringbuf;
12910645
12911
- if (test->fill_helper)
646
+ if (test->fill_helper) {
647
+ test->fill_insns = calloc(MAX_TEST_INSNS, sizeof(struct bpf_insn));
12912648 test->fill_helper(test);
649
+ }
12913650
12914651 /* Allocating HTs with 1 elem is fine here, since we only test
12915652 * for verifier and not do a runtime lookup, so the only thing
12916653 * that really matters is value size in this case.
12917654 */
12918
- if (*fixup_map1) {
655
+ if (*fixup_map_hash_8b) {
12919656 map_fds[0] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
12920657 sizeof(long long), 1);
12921658 do {
12922
- prog[*fixup_map1].imm = map_fds[0];
12923
- fixup_map1++;
12924
- } while (*fixup_map1);
659
+ prog[*fixup_map_hash_8b].imm = map_fds[0];
660
+ fixup_map_hash_8b++;
661
+ } while (*fixup_map_hash_8b);
12925662 }
12926663
12927
- if (*fixup_map2) {
664
+ if (*fixup_map_hash_48b) {
12928665 map_fds[1] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
12929666 sizeof(struct test_val), 1);
12930667 do {
12931
- prog[*fixup_map2].imm = map_fds[1];
12932
- fixup_map2++;
12933
- } while (*fixup_map2);
668
+ prog[*fixup_map_hash_48b].imm = map_fds[1];
669
+ fixup_map_hash_48b++;
670
+ } while (*fixup_map_hash_48b);
12934671 }
12935672
12936
- if (*fixup_map3) {
673
+ if (*fixup_map_hash_16b) {
12937674 map_fds[2] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
12938675 sizeof(struct other_val), 1);
12939676 do {
12940
- prog[*fixup_map3].imm = map_fds[2];
12941
- fixup_map3++;
12942
- } while (*fixup_map3);
677
+ prog[*fixup_map_hash_16b].imm = map_fds[2];
678
+ fixup_map_hash_16b++;
679
+ } while (*fixup_map_hash_16b);
12943680 }
12944681
12945
- if (*fixup_map4) {
682
+ if (*fixup_map_array_48b) {
12946683 map_fds[3] = create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
12947684 sizeof(struct test_val), 1);
685
+ update_map(map_fds[3], 0);
12948686 do {
12949
- prog[*fixup_map4].imm = map_fds[3];
12950
- fixup_map4++;
12951
- } while (*fixup_map4);
687
+ prog[*fixup_map_array_48b].imm = map_fds[3];
688
+ fixup_map_array_48b++;
689
+ } while (*fixup_map_array_48b);
12952690 }
12953691
12954692 if (*fixup_prog1) {
12955
- map_fds[4] = create_prog_array(prog_type, 4, 0);
693
+ map_fds[4] = create_prog_array(prog_type, 4, 0, 1, 2);
12956694 do {
12957695 prog[*fixup_prog1].imm = map_fds[4];
12958696 fixup_prog1++;
....@@ -12960,7 +698,7 @@
12960698 }
12961699
12962700 if (*fixup_prog2) {
12963
- map_fds[5] = create_prog_array(prog_type, 8, 7);
701
+ map_fds[5] = create_prog_array(prog_type, 8, 7, 1, 2);
12964702 do {
12965703 prog[*fixup_prog2].imm = map_fds[5];
12966704 fixup_prog2++;
....@@ -12976,18 +714,133 @@
12976714 }
12977715
12978716 if (*fixup_cgroup_storage) {
12979
- map_fds[7] = create_cgroup_storage();
717
+ map_fds[7] = create_cgroup_storage(false);
12980718 do {
12981719 prog[*fixup_cgroup_storage].imm = map_fds[7];
12982720 fixup_cgroup_storage++;
12983721 } while (*fixup_cgroup_storage);
12984722 }
723
+
724
+ if (*fixup_percpu_cgroup_storage) {
725
+ map_fds[8] = create_cgroup_storage(true);
726
+ do {
727
+ prog[*fixup_percpu_cgroup_storage].imm = map_fds[8];
728
+ fixup_percpu_cgroup_storage++;
729
+ } while (*fixup_percpu_cgroup_storage);
730
+ }
731
+ if (*fixup_map_sockmap) {
732
+ map_fds[9] = create_map(BPF_MAP_TYPE_SOCKMAP, sizeof(int),
733
+ sizeof(int), 1);
734
+ do {
735
+ prog[*fixup_map_sockmap].imm = map_fds[9];
736
+ fixup_map_sockmap++;
737
+ } while (*fixup_map_sockmap);
738
+ }
739
+ if (*fixup_map_sockhash) {
740
+ map_fds[10] = create_map(BPF_MAP_TYPE_SOCKHASH, sizeof(int),
741
+ sizeof(int), 1);
742
+ do {
743
+ prog[*fixup_map_sockhash].imm = map_fds[10];
744
+ fixup_map_sockhash++;
745
+ } while (*fixup_map_sockhash);
746
+ }
747
+ if (*fixup_map_xskmap) {
748
+ map_fds[11] = create_map(BPF_MAP_TYPE_XSKMAP, sizeof(int),
749
+ sizeof(int), 1);
750
+ do {
751
+ prog[*fixup_map_xskmap].imm = map_fds[11];
752
+ fixup_map_xskmap++;
753
+ } while (*fixup_map_xskmap);
754
+ }
755
+ if (*fixup_map_stacktrace) {
756
+ map_fds[12] = create_map(BPF_MAP_TYPE_STACK_TRACE, sizeof(u32),
757
+ sizeof(u64), 1);
758
+ do {
759
+ prog[*fixup_map_stacktrace].imm = map_fds[12];
760
+ fixup_map_stacktrace++;
761
+ } while (*fixup_map_stacktrace);
762
+ }
763
+ if (*fixup_map_spin_lock) {
764
+ map_fds[13] = create_map_spin_lock();
765
+ do {
766
+ prog[*fixup_map_spin_lock].imm = map_fds[13];
767
+ fixup_map_spin_lock++;
768
+ } while (*fixup_map_spin_lock);
769
+ }
770
+ if (*fixup_map_array_ro) {
771
+ map_fds[14] = __create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
772
+ sizeof(struct test_val), 1,
773
+ BPF_F_RDONLY_PROG);
774
+ update_map(map_fds[14], 0);
775
+ do {
776
+ prog[*fixup_map_array_ro].imm = map_fds[14];
777
+ fixup_map_array_ro++;
778
+ } while (*fixup_map_array_ro);
779
+ }
780
+ if (*fixup_map_array_wo) {
781
+ map_fds[15] = __create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
782
+ sizeof(struct test_val), 1,
783
+ BPF_F_WRONLY_PROG);
784
+ update_map(map_fds[15], 0);
785
+ do {
786
+ prog[*fixup_map_array_wo].imm = map_fds[15];
787
+ fixup_map_array_wo++;
788
+ } while (*fixup_map_array_wo);
789
+ }
790
+ if (*fixup_map_array_small) {
791
+ map_fds[16] = __create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
792
+ 1, 1, 0);
793
+ update_map(map_fds[16], 0);
794
+ do {
795
+ prog[*fixup_map_array_small].imm = map_fds[16];
796
+ fixup_map_array_small++;
797
+ } while (*fixup_map_array_small);
798
+ }
799
+ if (*fixup_sk_storage_map) {
800
+ map_fds[17] = create_sk_storage_map();
801
+ do {
802
+ prog[*fixup_sk_storage_map].imm = map_fds[17];
803
+ fixup_sk_storage_map++;
804
+ } while (*fixup_sk_storage_map);
805
+ }
806
+ if (*fixup_map_event_output) {
807
+ map_fds[18] = __create_map(BPF_MAP_TYPE_PERF_EVENT_ARRAY,
808
+ sizeof(int), sizeof(int), 1, 0);
809
+ do {
810
+ prog[*fixup_map_event_output].imm = map_fds[18];
811
+ fixup_map_event_output++;
812
+ } while (*fixup_map_event_output);
813
+ }
814
+ if (*fixup_map_reuseport_array) {
815
+ map_fds[19] = __create_map(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
816
+ sizeof(u32), sizeof(u64), 1, 0);
817
+ do {
818
+ prog[*fixup_map_reuseport_array].imm = map_fds[19];
819
+ fixup_map_reuseport_array++;
820
+ } while (*fixup_map_reuseport_array);
821
+ }
822
+ if (*fixup_map_ringbuf) {
823
+ map_fds[20] = create_map(BPF_MAP_TYPE_RINGBUF, 0,
824
+ 0, 4096);
825
+ do {
826
+ prog[*fixup_map_ringbuf].imm = map_fds[20];
827
+ fixup_map_ringbuf++;
828
+ } while (*fixup_map_ringbuf);
829
+ }
12985830 }
831
+
832
+struct libcap {
833
+ struct __user_cap_header_struct hdr;
834
+ struct __user_cap_data_struct data[2];
835
+};
12986836
12987837 static int set_admin(bool admin)
12988838 {
12989839 cap_t caps;
12990
- const cap_value_t cap_val = CAP_SYS_ADMIN;
840
+ /* need CAP_BPF, CAP_NET_ADMIN, CAP_PERFMON to load progs */
841
+ const cap_value_t cap_net_admin = CAP_NET_ADMIN;
842
+ const cap_value_t cap_sys_admin = CAP_SYS_ADMIN;
843
+ struct libcap *cap;
12991844 int ret = -1;
12992845
12993846 caps = cap_get_proc();
....@@ -12995,10 +848,25 @@
12995848 perror("cap_get_proc");
12996849 return -1;
12997850 }
12998
- if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_val,
12999
- admin ? CAP_SET : CAP_CLEAR)) {
13000
- perror("cap_set_flag");
851
+ cap = (struct libcap *)caps;
852
+ if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_sys_admin, CAP_CLEAR)) {
853
+ perror("cap_set_flag clear admin");
13001854 goto out;
855
+ }
856
+ if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_net_admin,
857
+ admin ? CAP_SET : CAP_CLEAR)) {
858
+ perror("cap_set_flag set_or_clear net");
859
+ goto out;
860
+ }
861
+ /* libcap is likely old and simply ignores CAP_BPF and CAP_PERFMON,
862
+ * so update effective bits manually
863
+ */
864
+ if (admin) {
865
+ cap->data[1].effective |= 1 << (38 /* CAP_PERFMON */ - 32);
866
+ cap->data[1].effective |= 1 << (39 /* CAP_BPF */ - 32);
867
+ } else {
868
+ cap->data[1].effective &= ~(1 << (38 - 32));
869
+ cap->data[1].effective &= ~(1 << (39 - 32));
13002870 }
13003871 if (cap_set_proc(caps)) {
13004872 perror("cap_set_proc");
....@@ -13011,16 +879,74 @@
13011879 return ret;
13012880 }
13013881
882
+static int do_prog_test_run(int fd_prog, bool unpriv, uint32_t expected_val,
883
+ void *data, size_t size_data)
884
+{
885
+ __u8 tmp[TEST_DATA_LEN << 2];
886
+ __u32 size_tmp = sizeof(tmp);
887
+ uint32_t retval;
888
+ int err;
889
+
890
+ if (unpriv)
891
+ set_admin(true);
892
+ err = bpf_prog_test_run(fd_prog, 1, data, size_data,
893
+ tmp, &size_tmp, &retval, NULL);
894
+ if (unpriv)
895
+ set_admin(false);
896
+ if (err && errno != 524/*ENOTSUPP*/ && errno != EPERM) {
897
+ printf("Unexpected bpf_prog_test_run error ");
898
+ return err;
899
+ }
900
+ if (!err && retval != expected_val &&
901
+ expected_val != POINTER_VALUE) {
902
+ printf("FAIL retval %d != %d ", retval, expected_val);
903
+ return 1;
904
+ }
905
+
906
+ return 0;
907
+}
908
+
909
+static bool cmp_str_seq(const char *log, const char *exp)
910
+{
911
+ char needle[80];
912
+ const char *p, *q;
913
+ int len;
914
+
915
+ do {
916
+ p = strchr(exp, '\t');
917
+ if (!p)
918
+ p = exp + strlen(exp);
919
+
920
+ len = p - exp;
921
+ if (len >= sizeof(needle) || !len) {
922
+ printf("FAIL\nTestcase bug\n");
923
+ return false;
924
+ }
925
+ strncpy(needle, exp, len);
926
+ needle[len] = 0;
927
+ q = strstr(log, needle);
928
+ if (!q) {
929
+ printf("FAIL\nUnexpected verifier log in successful load!\n"
930
+ "EXP: %s\nRES:\n", needle);
931
+ return false;
932
+ }
933
+ log = q + len;
934
+ exp = p + 1;
935
+ } while (*p);
936
+ return true;
937
+}
938
+
13014939 static void do_test_single(struct bpf_test *test, bool unpriv,
13015940 int *passes, int *errors)
13016941 {
13017942 int fd_prog, expected_ret, alignment_prevented_execution;
13018943 int prog_len, prog_type = test->prog_type;
13019944 struct bpf_insn *prog = test->insns;
945
+ struct bpf_load_program_attr attr;
946
+ int run_errs, run_successes;
13020947 int map_fds[MAX_NR_MAPS];
13021948 const char *expected_err;
13022
- uint32_t expected_val;
13023
- uint32_t retval;
949
+ int fixup_skips;
13024950 __u32 pflags;
13025951 int i, err;
13026952
....@@ -13029,27 +955,72 @@
13029955
13030956 if (!prog_type)
13031957 prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
958
+ fixup_skips = skips;
13032959 do_test_fixup(test, prog_type, prog, map_fds);
13033
- prog_len = probe_filter_length(prog);
960
+ if (test->fill_insns) {
961
+ prog = test->fill_insns;
962
+ prog_len = test->prog_len;
963
+ } else {
964
+ prog_len = probe_filter_length(prog);
965
+ }
966
+ /* If there were some map skips during fixup due to missing bpf
967
+ * features, skip this test.
968
+ */
969
+ if (fixup_skips != skips)
970
+ return;
13034971
13035
- pflags = 0;
972
+ pflags = BPF_F_TEST_RND_HI32;
13036973 if (test->flags & F_LOAD_WITH_STRICT_ALIGNMENT)
13037974 pflags |= BPF_F_STRICT_ALIGNMENT;
13038975 if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS)
13039976 pflags |= BPF_F_ANY_ALIGNMENT;
13040
- fd_prog = bpf_verify_program(prog_type, prog, prog_len, pflags,
13041
- "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 1);
977
+ if (test->flags & ~3)
978
+ pflags |= test->flags;
13042979
13043980 expected_ret = unpriv && test->result_unpriv != UNDEF ?
13044981 test->result_unpriv : test->result;
13045982 expected_err = unpriv && test->errstr_unpriv ?
13046983 test->errstr_unpriv : test->errstr;
13047
- expected_val = unpriv && test->retval_unpriv ?
13048
- test->retval_unpriv : test->retval;
984
+ memset(&attr, 0, sizeof(attr));
985
+ attr.prog_type = prog_type;
986
+ attr.expected_attach_type = test->expected_attach_type;
987
+ attr.insns = prog;
988
+ attr.insns_cnt = prog_len;
989
+ attr.license = "GPL";
990
+ if (verbose)
991
+ attr.log_level = 1;
992
+ else if (expected_ret == VERBOSE_ACCEPT)
993
+ attr.log_level = 2;
994
+ else
995
+ attr.log_level = 4;
996
+ attr.prog_flags = pflags;
997
+
998
+ if (prog_type == BPF_PROG_TYPE_TRACING && test->kfunc) {
999
+ attr.attach_btf_id = libbpf_find_vmlinux_btf_id(test->kfunc,
1000
+ attr.expected_attach_type);
1001
+ if (attr.attach_btf_id < 0) {
1002
+ printf("FAIL\nFailed to find BTF ID for '%s'!\n",
1003
+ test->kfunc);
1004
+ (*errors)++;
1005
+ return;
1006
+ }
1007
+ }
1008
+
1009
+ fd_prog = bpf_load_program_xattr(&attr, bpf_vlog, sizeof(bpf_vlog));
1010
+
1011
+ /* BPF_PROG_TYPE_TRACING requires more setup and
1012
+ * bpf_probe_prog_type won't give correct answer
1013
+ */
1014
+ if (fd_prog < 0 && prog_type != BPF_PROG_TYPE_TRACING &&
1015
+ !bpf_probe_prog_type(prog_type, 0)) {
1016
+ printf("SKIP (unsupported program type %d)\n", prog_type);
1017
+ skips++;
1018
+ goto close_fds;
1019
+ }
130491020
130501021 alignment_prevented_execution = 0;
130511022
13052
- if (expected_ret == ACCEPT) {
1023
+ if (expected_ret == ACCEPT || expected_ret == VERBOSE_ACCEPT) {
130531024 if (fd_prog < 0) {
130541025 printf("FAIL\nFailed to load prog '%s'!\n",
130551026 strerror(errno));
....@@ -13057,51 +1028,82 @@
130571028 }
130581029 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
130591030 if (fd_prog >= 0 &&
13060
- (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS)) {
1031
+ (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS))
130611032 alignment_prevented_execution = 1;
13062
- goto test_ok;
13063
- }
130641033 #endif
1034
+ if (expected_ret == VERBOSE_ACCEPT && !cmp_str_seq(bpf_vlog, expected_err)) {
1035
+ goto fail_log;
1036
+ }
130651037 } else {
130661038 if (fd_prog >= 0) {
130671039 printf("FAIL\nUnexpected success to load!\n");
130681040 goto fail_log;
130691041 }
13070
- if (!strstr(bpf_vlog, expected_err)) {
1042
+ if (!expected_err || !strstr(bpf_vlog, expected_err)) {
130711043 printf("FAIL\nUnexpected error message!\n\tEXP: %s\n\tRES: %s\n",
130721044 expected_err, bpf_vlog);
130731045 goto fail_log;
130741046 }
130751047 }
130761048
13077
- if (fd_prog >= 0) {
13078
- __u8 tmp[TEST_DATA_LEN << 2];
13079
- __u32 size_tmp = sizeof(tmp);
1049
+ if (!unpriv && test->insn_processed) {
1050
+ uint32_t insn_processed;
1051
+ char *proc;
130801052
13081
- if (unpriv)
13082
- set_admin(true);
13083
- err = bpf_prog_test_run(fd_prog, 1, test->data,
13084
- sizeof(test->data), tmp, &size_tmp,
13085
- &retval, NULL);
13086
- if (unpriv)
13087
- set_admin(false);
13088
- if (err && errno != 524/*ENOTSUPP*/ && errno != EPERM) {
13089
- printf("Unexpected bpf_prog_test_run error\n");
13090
- goto fail_log;
13091
- }
13092
- if (!err && retval != expected_val &&
13093
- expected_val != POINTER_VALUE) {
13094
- printf("FAIL retval %d != %d\n", retval, expected_val);
1053
+ proc = strstr(bpf_vlog, "processed ");
1054
+ insn_processed = atoi(proc + 10);
1055
+ if (test->insn_processed != insn_processed) {
1056
+ printf("FAIL\nUnexpected insn_processed %u vs %u\n",
1057
+ insn_processed, test->insn_processed);
130951058 goto fail_log;
130961059 }
130971060 }
13098
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
13099
-test_ok:
13100
-#endif
13101
- (*passes)++;
13102
- printf("OK%s\n", alignment_prevented_execution ?
13103
- " (NOTE: not executed due to unknown alignment)" : "");
1061
+
1062
+ if (verbose)
1063
+ printf(", verifier log:\n%s", bpf_vlog);
1064
+
1065
+ run_errs = 0;
1066
+ run_successes = 0;
1067
+ if (!alignment_prevented_execution && fd_prog >= 0 && test->runs >= 0) {
1068
+ uint32_t expected_val;
1069
+ int i;
1070
+
1071
+ if (!test->runs)
1072
+ test->runs = 1;
1073
+
1074
+ for (i = 0; i < test->runs; i++) {
1075
+ if (unpriv && test->retvals[i].retval_unpriv)
1076
+ expected_val = test->retvals[i].retval_unpriv;
1077
+ else
1078
+ expected_val = test->retvals[i].retval;
1079
+
1080
+ err = do_prog_test_run(fd_prog, unpriv, expected_val,
1081
+ test->retvals[i].data,
1082
+ sizeof(test->retvals[i].data));
1083
+ if (err) {
1084
+ printf("(run %d/%d) ", i + 1, test->runs);
1085
+ run_errs++;
1086
+ } else {
1087
+ run_successes++;
1088
+ }
1089
+ }
1090
+ }
1091
+
1092
+ if (!run_errs) {
1093
+ (*passes)++;
1094
+ if (run_successes > 1)
1095
+ printf("%d cases ", run_successes);
1096
+ printf("OK");
1097
+ if (alignment_prevented_execution)
1098
+ printf(" (NOTE: not executed due to unknown alignment)");
1099
+ printf("\n");
1100
+ } else {
1101
+ printf("\n");
1102
+ goto fail_log;
1103
+ }
131041104 close_fds:
1105
+ if (test->fill_insns)
1106
+ free(test->fill_insns);
131051107 close(fd_prog);
131061108 for (i = 0; i < MAX_NR_MAPS; i++)
131071109 close(map_fds[i]);
....@@ -13115,9 +1117,11 @@
131151117
131161118 static bool is_admin(void)
131171119 {
1120
+ cap_flag_value_t net_priv = CAP_CLEAR;
1121
+ bool perfmon_priv = false;
1122
+ bool bpf_priv = false;
1123
+ struct libcap *cap;
131181124 cap_t caps;
13119
- cap_flag_value_t sysadmin = CAP_CLEAR;
13120
- const cap_value_t cap_val = CAP_SYS_ADMIN;
131211125
131221126 #ifdef CAP_IS_SUPPORTED
131231127 if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
....@@ -13130,11 +1134,14 @@
131301134 perror("cap_get_proc");
131311135 return false;
131321136 }
13133
- if (cap_get_flag(caps, cap_val, CAP_EFFECTIVE, &sysadmin))
13134
- perror("cap_get_flag");
1137
+ cap = (struct libcap *)caps;
1138
+ bpf_priv = cap->data[1].effective & (1 << (39/* CAP_BPF */ - 32));
1139
+ perfmon_priv = cap->data[1].effective & (1 << (38/* CAP_PERFMON */ - 32));
1140
+ if (cap_get_flag(caps, CAP_NET_ADMIN, CAP_EFFECTIVE, &net_priv))
1141
+ perror("cap_get_flag NET");
131351142 if (cap_free(caps))
131361143 perror("cap_free");
13137
- return (sysadmin == CAP_SET);
1144
+ return bpf_priv && perfmon_priv && net_priv == CAP_SET;
131381145 }
131391146
131401147 static void get_unpriv_disabled()
....@@ -13155,19 +1162,6 @@
131551162
131561163 static bool test_as_unpriv(struct bpf_test *test)
131571164 {
13158
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
13159
- /* Some architectures have strict alignment requirements. In
13160
- * that case, the BPF verifier detects if a program has
13161
- * unaligned accesses and rejects them. A user can pass
13162
- * BPF_F_ANY_ALIGNMENT to a program to override this
13163
- * check. That, however, will only work when a privileged user
13164
- * loads a program. An unprivileged user loading a program
13165
- * with this flag will be rejected prior entering the
13166
- * verifier.
13167
- */
13168
- if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS)
13169
- return false;
13170
-#endif
131711165 return !test->prog_type ||
131721166 test->prog_type == BPF_PROG_TYPE_SOCKET_FILTER ||
131731167 test->prog_type == BPF_PROG_TYPE_CGROUP_SKB;
....@@ -13175,7 +1169,7 @@
131751169
131761170 static int do_test(bool unpriv, unsigned int from, unsigned int to)
131771171 {
13178
- int i, passes = 0, errors = 0, skips = 0;
1172
+ int i, passes = 0, errors = 0;
131791173
131801174 for (i = from; i < to; i++) {
131811175 struct bpf_test *test = &tests[i];
....@@ -13213,17 +1207,24 @@
132131207 {
132141208 unsigned int from = 0, to = ARRAY_SIZE(tests);
132151209 bool unpriv = !is_admin();
1210
+ int arg = 1;
1211
+
1212
+ if (argc > 1 && strcmp(argv[1], "-v") == 0) {
1213
+ arg++;
1214
+ verbose = true;
1215
+ argc--;
1216
+ }
132161217
132171218 if (argc == 3) {
13218
- unsigned int l = atoi(argv[argc - 2]);
13219
- unsigned int u = atoi(argv[argc - 1]);
1219
+ unsigned int l = atoi(argv[arg]);
1220
+ unsigned int u = atoi(argv[arg + 1]);
132201221
132211222 if (l < to && u < to) {
132221223 from = l;
132231224 to = u + 1;
132241225 }
132251226 } else if (argc == 2) {
13226
- unsigned int t = atoi(argv[argc - 1]);
1227
+ unsigned int t = atoi(argv[arg]);
132271228
132281229 if (t < to) {
132291230 from = t;