.. | .. |
---|
| 1 | +/* SPDX-License-Identifier: GPL-2.0-only */ |
---|
1 | 2 | /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com |
---|
2 | | - * |
---|
3 | | - * This program is free software; you can redistribute it and/or |
---|
4 | | - * modify it under the terms of version 2 of the GNU General Public |
---|
5 | | - * License as published by the Free Software Foundation. |
---|
6 | 3 | */ |
---|
7 | 4 | #ifndef _LINUX_BPF_VERIFIER_H |
---|
8 | 5 | #define _LINUX_BPF_VERIFIER_H 1 |
---|
.. | .. |
---|
10 | 7 | #include <linux/bpf.h> /* for enum bpf_reg_type */ |
---|
11 | 8 | #include <linux/filter.h> /* for MAX_BPF_STACK */ |
---|
12 | 9 | #include <linux/tnum.h> |
---|
| 10 | +#include <linux/android_kabi.h> |
---|
13 | 11 | |
---|
14 | 12 | /* Maximum variable offset umax_value permitted when resolving memory accesses. |
---|
15 | 13 | * In practice this is far bigger than any realistic pointer offset; this limit |
---|
.. | .. |
---|
36 | 34 | */ |
---|
37 | 35 | enum bpf_reg_liveness { |
---|
38 | 36 | REG_LIVE_NONE = 0, /* reg hasn't been read or written this branch */ |
---|
39 | | - REG_LIVE_READ, /* reg was read, so we're sensitive to initial value */ |
---|
40 | | - REG_LIVE_WRITTEN, /* reg was written first, screening off later reads */ |
---|
| 37 | + REG_LIVE_READ32 = 0x1, /* reg was read, so we're sensitive to initial value */ |
---|
| 38 | + REG_LIVE_READ64 = 0x2, /* likewise, but full 64-bit content matters */ |
---|
| 39 | + REG_LIVE_READ = REG_LIVE_READ32 | REG_LIVE_READ64, |
---|
| 40 | + REG_LIVE_WRITTEN = 0x4, /* reg was written first, screening off later reads */ |
---|
| 41 | + REG_LIVE_DONE = 0x8, /* liveness won't be updating this register anymore */ |
---|
41 | 42 | }; |
---|
42 | 43 | |
---|
43 | 44 | struct bpf_reg_state { |
---|
.. | .. |
---|
45 | 46 | enum bpf_reg_type type; |
---|
46 | 47 | union { |
---|
47 | 48 | /* valid when type == PTR_TO_PACKET */ |
---|
48 | | - u16 range; |
---|
| 49 | + int range; |
---|
49 | 50 | |
---|
50 | 51 | /* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE | |
---|
51 | 52 | * PTR_TO_MAP_VALUE_OR_NULL |
---|
52 | 53 | */ |
---|
53 | 54 | struct bpf_map *map_ptr; |
---|
| 55 | + |
---|
| 56 | + u32 btf_id; /* for PTR_TO_BTF_ID */ |
---|
| 57 | + |
---|
| 58 | + u32 mem_size; /* for PTR_TO_MEM | PTR_TO_MEM_OR_NULL */ |
---|
54 | 59 | |
---|
55 | 60 | /* Max size from any of the above. */ |
---|
56 | 61 | unsigned long raw; |
---|
.. | .. |
---|
61 | 66 | * offset, so they can share range knowledge. |
---|
62 | 67 | * For PTR_TO_MAP_VALUE_OR_NULL this is used to share which map value we |
---|
63 | 68 | * came from, when one is tested for != NULL. |
---|
| 69 | + * For PTR_TO_MEM_OR_NULL this is used to identify memory allocation |
---|
| 70 | + * for the purpose of tracking that it's freed. |
---|
| 71 | + * For PTR_TO_SOCKET this is used to share which pointers retain the |
---|
| 72 | + * same reference to the socket, to determine proper reference freeing. |
---|
64 | 73 | */ |
---|
65 | 74 | u32 id; |
---|
| 75 | + /* PTR_TO_SOCKET and PTR_TO_TCP_SOCK could be a ptr returned |
---|
| 76 | + * from a pointer-cast helper, bpf_sk_fullsock() and |
---|
| 77 | + * bpf_tcp_sock(). |
---|
| 78 | + * |
---|
| 79 | + * Consider the following where "sk" is a reference counted |
---|
| 80 | + * pointer returned from "sk = bpf_sk_lookup_tcp();": |
---|
| 81 | + * |
---|
| 82 | + * 1: sk = bpf_sk_lookup_tcp(); |
---|
| 83 | + * 2: if (!sk) { return 0; } |
---|
| 84 | + * 3: fullsock = bpf_sk_fullsock(sk); |
---|
| 85 | + * 4: if (!fullsock) { bpf_sk_release(sk); return 0; } |
---|
| 86 | + * 5: tp = bpf_tcp_sock(fullsock); |
---|
| 87 | + * 6: if (!tp) { bpf_sk_release(sk); return 0; } |
---|
| 88 | + * 7: bpf_sk_release(sk); |
---|
| 89 | + * 8: snd_cwnd = tp->snd_cwnd; // verifier will complain |
---|
| 90 | + * |
---|
| 91 | + * After bpf_sk_release(sk) at line 7, both "fullsock" ptr and |
---|
| 92 | + * "tp" ptr should be invalidated also. In order to do that, |
---|
| 93 | + * the reg holding "fullsock" and "sk" need to remember |
---|
| 94 | + * the original refcounted ptr id (i.e. sk_reg->id) in ref_obj_id |
---|
| 95 | + * such that the verifier can reset all regs which have |
---|
| 96 | + * ref_obj_id matching the sk_reg->id. |
---|
| 97 | + * |
---|
| 98 | + * sk_reg->ref_obj_id is set to sk_reg->id at line 1. |
---|
| 99 | + * sk_reg->id will stay as NULL-marking purpose only. |
---|
| 100 | + * After NULL-marking is done, sk_reg->id can be reset to 0. |
---|
| 101 | + * |
---|
| 102 | + * After "fullsock = bpf_sk_fullsock(sk);" at line 3, |
---|
| 103 | + * fullsock_reg->ref_obj_id is set to sk_reg->ref_obj_id. |
---|
| 104 | + * |
---|
| 105 | + * After "tp = bpf_tcp_sock(fullsock);" at line 5, |
---|
| 106 | + * tp_reg->ref_obj_id is set to fullsock_reg->ref_obj_id |
---|
| 107 | + * which is the same as sk_reg->ref_obj_id. |
---|
| 108 | + * |
---|
| 109 | + * From the verifier perspective, if sk, fullsock and tp |
---|
| 110 | + * are not NULL, they are the same ptr with different |
---|
| 111 | + * reg->type. In particular, bpf_sk_release(tp) is also |
---|
| 112 | + * allowed and has the same effect as bpf_sk_release(sk). |
---|
| 113 | + */ |
---|
| 114 | + u32 ref_obj_id; |
---|
66 | 115 | /* For scalar types (SCALAR_VALUE), this represents our knowledge of |
---|
67 | 116 | * the actual value. |
---|
68 | 117 | * For pointer types, this represents the variable part of the offset |
---|
.. | .. |
---|
79 | 128 | s64 smax_value; /* maximum possible (s64)value */ |
---|
80 | 129 | u64 umin_value; /* minimum possible (u64)value */ |
---|
81 | 130 | u64 umax_value; /* maximum possible (u64)value */ |
---|
| 131 | + s32 s32_min_value; /* minimum possible (s32)value */ |
---|
| 132 | + s32 s32_max_value; /* maximum possible (s32)value */ |
---|
| 133 | + u32 u32_min_value; /* minimum possible (u32)value */ |
---|
| 134 | + u32 u32_max_value; /* maximum possible (u32)value */ |
---|
82 | 135 | /* parentage chain for liveness checking */ |
---|
83 | 136 | struct bpf_reg_state *parent; |
---|
84 | 137 | /* Inside the callee two registers can be both PTR_TO_STACK like |
---|
.. | .. |
---|
88 | 141 | * pointing to bpf_func_state. |
---|
89 | 142 | */ |
---|
90 | 143 | u32 frameno; |
---|
| 144 | + /* Tracks subreg definition. The stored value is the insn_idx of the |
---|
| 145 | + * writing insn. This is safe because subreg_def is used before any insn |
---|
| 146 | + * patching which only happens after main verification finished. |
---|
| 147 | + */ |
---|
| 148 | + s32 subreg_def; |
---|
91 | 149 | enum bpf_reg_liveness live; |
---|
| 150 | + /* if (!precise && SCALAR_VALUE) min/max/tnum don't affect safety */ |
---|
| 151 | + bool precise; |
---|
92 | 152 | }; |
---|
93 | 153 | |
---|
94 | 154 | enum bpf_stack_slot_type { |
---|
.. | .. |
---|
105 | 165 | u8 slot_type[BPF_REG_SIZE]; |
---|
106 | 166 | }; |
---|
107 | 167 | |
---|
| 168 | +struct bpf_reference_state { |
---|
| 169 | + /* Track each reference created with a unique id, even if the same |
---|
| 170 | + * instruction creates the reference multiple times (eg, via CALL). |
---|
| 171 | + */ |
---|
| 172 | + int id; |
---|
| 173 | + /* Instruction where the allocation of this reference occurred. This |
---|
| 174 | + * is used purely to inform the user of a reference leak. |
---|
| 175 | + */ |
---|
| 176 | + int insn_idx; |
---|
| 177 | +}; |
---|
| 178 | + |
---|
108 | 179 | /* state of the program: |
---|
109 | 180 | * type of all registers and stack info |
---|
110 | 181 | */ |
---|
.. | .. |
---|
117 | 188 | * 0 = main function, 1 = first callee. |
---|
118 | 189 | */ |
---|
119 | 190 | u32 frameno; |
---|
120 | | - /* subprog number == index within subprog_stack_depth |
---|
| 191 | + /* subprog number == index within subprog_info |
---|
121 | 192 | * zero == main subprog |
---|
122 | 193 | */ |
---|
123 | 194 | u32 subprogno; |
---|
124 | 195 | |
---|
125 | | - /* should be second to last. See copy_func_state() */ |
---|
| 196 | + /* The following fields should be last. See copy_func_state() */ |
---|
| 197 | + int acquired_refs; |
---|
| 198 | + struct bpf_reference_state *refs; |
---|
126 | 199 | int allocated_stack; |
---|
127 | 200 | struct bpf_stack_state *stack; |
---|
| 201 | +}; |
---|
| 202 | + |
---|
| 203 | +struct bpf_idx_pair { |
---|
| 204 | + u32 prev_idx; |
---|
| 205 | + u32 idx; |
---|
128 | 206 | }; |
---|
129 | 207 | |
---|
130 | 208 | struct bpf_id_pair { |
---|
.. | .. |
---|
138 | 216 | struct bpf_verifier_state { |
---|
139 | 217 | /* call stack tracking */ |
---|
140 | 218 | struct bpf_func_state *frame[MAX_CALL_FRAMES]; |
---|
| 219 | + struct bpf_verifier_state *parent; |
---|
| 220 | + /* |
---|
| 221 | + * 'branches' field is the number of branches left to explore: |
---|
| 222 | + * 0 - all possible paths from this state reached bpf_exit or |
---|
| 223 | + * were safely pruned |
---|
| 224 | + * 1 - at least one path is being explored. |
---|
| 225 | + * This state hasn't reached bpf_exit |
---|
| 226 | + * 2 - at least two paths are being explored. |
---|
| 227 | + * This state is an immediate parent of two children. |
---|
| 228 | + * One is fallthrough branch with branches==1 and another |
---|
| 229 | + * state is pushed into stack (to be explored later) also with |
---|
| 230 | + * branches==1. The parent of this state has branches==1. |
---|
| 231 | + * The verifier state tree connected via 'parent' pointer looks like: |
---|
| 232 | + * 1 |
---|
| 233 | + * 1 |
---|
| 234 | + * 2 -> 1 (first 'if' pushed into stack) |
---|
| 235 | + * 1 |
---|
| 236 | + * 2 -> 1 (second 'if' pushed into stack) |
---|
| 237 | + * 1 |
---|
| 238 | + * 1 |
---|
| 239 | + * 1 bpf_exit. |
---|
| 240 | + * |
---|
| 241 | + * Once do_check() reaches bpf_exit, it calls update_branch_counts() |
---|
| 242 | + * and the verifier state tree will look: |
---|
| 243 | + * 1 |
---|
| 244 | + * 1 |
---|
| 245 | + * 2 -> 1 (first 'if' pushed into stack) |
---|
| 246 | + * 1 |
---|
| 247 | + * 1 -> 1 (second 'if' pushed into stack) |
---|
| 248 | + * 0 |
---|
| 249 | + * 0 |
---|
| 250 | + * 0 bpf_exit. |
---|
| 251 | + * After pop_stack() the do_check() will resume at second 'if'. |
---|
| 252 | + * |
---|
| 253 | + * If is_state_visited() sees a state with branches > 0 it means |
---|
| 254 | + * there is a loop. If such state is exactly equal to the current state |
---|
| 255 | + * it's an infinite loop. Note states_equal() checks for states |
---|
| 256 | + * equvalency, so two states being 'states_equal' does not mean |
---|
| 257 | + * infinite loop. The exact comparison is provided by |
---|
| 258 | + * states_maybe_looping() function. It's a stronger pre-check and |
---|
| 259 | + * much faster than states_equal(). |
---|
| 260 | + * |
---|
| 261 | + * This algorithm may not find all possible infinite loops or |
---|
| 262 | + * loop iteration count may be too high. |
---|
| 263 | + * In such cases BPF_COMPLEXITY_LIMIT_INSNS limit kicks in. |
---|
| 264 | + */ |
---|
| 265 | + u32 branches; |
---|
| 266 | + u32 insn_idx; |
---|
141 | 267 | u32 curframe; |
---|
| 268 | + u32 active_spin_lock; |
---|
142 | 269 | bool speculative; |
---|
| 270 | + |
---|
| 271 | + /* first and last insn idx of this verifier state */ |
---|
| 272 | + u32 first_insn_idx; |
---|
| 273 | + u32 last_insn_idx; |
---|
| 274 | + /* jmp history recorded from first to last. |
---|
| 275 | + * backtracking is using it to go from last to first. |
---|
| 276 | + * For most states jmp_history_cnt is [0-3]. |
---|
| 277 | + * For loops can go up to ~40. |
---|
| 278 | + */ |
---|
| 279 | + struct bpf_idx_pair *jmp_history; |
---|
| 280 | + u32 jmp_history_cnt; |
---|
143 | 281 | }; |
---|
| 282 | + |
---|
| 283 | +#define bpf_get_spilled_reg(slot, frame) \ |
---|
| 284 | + (((slot < frame->allocated_stack / BPF_REG_SIZE) && \ |
---|
| 285 | + (frame->stack[slot].slot_type[0] == STACK_SPILL)) \ |
---|
| 286 | + ? &frame->stack[slot].spilled_ptr : NULL) |
---|
| 287 | + |
---|
| 288 | +/* Iterate over 'frame', setting 'reg' to either NULL or a spilled register. */ |
---|
| 289 | +#define bpf_for_each_spilled_reg(iter, frame, reg) \ |
---|
| 290 | + for (iter = 0, reg = bpf_get_spilled_reg(iter, frame); \ |
---|
| 291 | + iter < frame->allocated_stack / BPF_REG_SIZE; \ |
---|
| 292 | + iter++, reg = bpf_get_spilled_reg(iter, frame)) |
---|
| 293 | + |
---|
| 294 | +/* Invoke __expr over regsiters in __vst, setting __state and __reg */ |
---|
| 295 | +#define bpf_for_each_reg_in_vstate(__vst, __state, __reg, __expr) \ |
---|
| 296 | + ({ \ |
---|
| 297 | + struct bpf_verifier_state *___vstate = __vst; \ |
---|
| 298 | + int ___i, ___j; \ |
---|
| 299 | + for (___i = 0; ___i <= ___vstate->curframe; ___i++) { \ |
---|
| 300 | + struct bpf_reg_state *___regs; \ |
---|
| 301 | + __state = ___vstate->frame[___i]; \ |
---|
| 302 | + ___regs = __state->regs; \ |
---|
| 303 | + for (___j = 0; ___j < MAX_BPF_REG; ___j++) { \ |
---|
| 304 | + __reg = &___regs[___j]; \ |
---|
| 305 | + (void)(__expr); \ |
---|
| 306 | + } \ |
---|
| 307 | + bpf_for_each_spilled_reg(___j, __state, __reg) { \ |
---|
| 308 | + if (!__reg) \ |
---|
| 309 | + continue; \ |
---|
| 310 | + (void)(__expr); \ |
---|
| 311 | + } \ |
---|
| 312 | + } \ |
---|
| 313 | + }) |
---|
144 | 314 | |
---|
145 | 315 | /* linked list of verifier states used to prune search */ |
---|
146 | 316 | struct bpf_verifier_state_list { |
---|
147 | 317 | struct bpf_verifier_state state; |
---|
148 | 318 | struct bpf_verifier_state_list *next; |
---|
| 319 | + int miss_cnt, hit_cnt; |
---|
149 | 320 | }; |
---|
150 | 321 | |
---|
151 | 322 | /* Possible states for alu_state member. */ |
---|
.. | .. |
---|
160 | 331 | struct bpf_insn_aux_data { |
---|
161 | 332 | union { |
---|
162 | 333 | enum bpf_reg_type ptr_type; /* pointer type for load/store insns */ |
---|
163 | | - unsigned long map_state; /* pointer/poison value for maps */ |
---|
| 334 | + unsigned long map_ptr_state; /* pointer/poison value for maps */ |
---|
164 | 335 | s32 call_imm; /* saved imm field of call insn */ |
---|
165 | 336 | u32 alu_limit; /* limit for add/sub register with pointer */ |
---|
| 337 | + struct { |
---|
| 338 | + u32 map_index; /* index into used_maps[] */ |
---|
| 339 | + u32 map_off; /* offset from value base address */ |
---|
| 340 | + }; |
---|
| 341 | + struct { |
---|
| 342 | + enum bpf_reg_type reg_type; /* type of pseudo_btf_id */ |
---|
| 343 | + union { |
---|
| 344 | + u32 btf_id; /* btf_id for struct typed var */ |
---|
| 345 | + u32 mem_size; /* mem_size for non-struct typed var */ |
---|
| 346 | + }; |
---|
| 347 | + } btf_var; |
---|
166 | 348 | }; |
---|
| 349 | + u64 map_key_state; /* constant (32 bit) key tracking for maps */ |
---|
167 | 350 | int ctx_field_size; /* the ctx field size for load insn, maybe 0 */ |
---|
168 | | - bool seen; /* this insn was processed by the verifier */ |
---|
| 351 | + u32 seen; /* this insn was processed by the verifier at env->pass_cnt */ |
---|
169 | 352 | bool sanitize_stack_spill; /* subject to Spectre v4 sanitation */ |
---|
| 353 | + bool zext_dst; /* this insn zero extends dst reg */ |
---|
170 | 354 | u8 alu_state; /* used in combination with alu_limit */ |
---|
| 355 | + |
---|
| 356 | + /* below fields are initialized once */ |
---|
| 357 | + unsigned int orig_idx; /* original instruction index */ |
---|
| 358 | + bool prune_point; |
---|
171 | 359 | }; |
---|
172 | 360 | |
---|
173 | 361 | #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ |
---|
.. | .. |
---|
187 | 375 | return log->len_used >= log->len_total - 1; |
---|
188 | 376 | } |
---|
189 | 377 | |
---|
| 378 | +#define BPF_LOG_LEVEL1 1 |
---|
| 379 | +#define BPF_LOG_LEVEL2 2 |
---|
| 380 | +#define BPF_LOG_STATS 4 |
---|
| 381 | +#define BPF_LOG_LEVEL (BPF_LOG_LEVEL1 | BPF_LOG_LEVEL2) |
---|
| 382 | +#define BPF_LOG_MASK (BPF_LOG_LEVEL | BPF_LOG_STATS) |
---|
| 383 | +#define BPF_LOG_KERNEL (BPF_LOG_MASK + 1) /* kernel internal flag */ |
---|
| 384 | + |
---|
190 | 385 | static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log) |
---|
191 | 386 | { |
---|
192 | | - return log->level && log->ubuf && !bpf_verifier_log_full(log); |
---|
| 387 | + return log && |
---|
| 388 | + ((log->level && log->ubuf && !bpf_verifier_log_full(log)) || |
---|
| 389 | + log->level == BPF_LOG_KERNEL); |
---|
| 390 | +} |
---|
| 391 | + |
---|
| 392 | +static inline bool |
---|
| 393 | +bpf_verifier_log_attr_valid(const struct bpf_verifier_log *log) |
---|
| 394 | +{ |
---|
| 395 | + return log->len_total >= 128 && log->len_total <= UINT_MAX >> 2 && |
---|
| 396 | + log->level && log->ubuf && !(log->level & ~BPF_LOG_MASK); |
---|
193 | 397 | } |
---|
194 | 398 | |
---|
195 | 399 | #define BPF_MAX_SUBPROGS 256 |
---|
196 | 400 | |
---|
197 | 401 | struct bpf_subprog_info { |
---|
| 402 | + /* 'start' has to be the first field otherwise find_subprog() won't work */ |
---|
198 | 403 | u32 start; /* insn idx of function entry point */ |
---|
| 404 | + u32 linfo_idx; /* The idx to the main_prog->aux->linfo */ |
---|
199 | 405 | u16 stack_depth; /* max. stack depth used by this function */ |
---|
| 406 | + bool has_tail_call; |
---|
| 407 | + bool tail_call_reachable; |
---|
| 408 | + bool has_ld_abs; |
---|
| 409 | + |
---|
| 410 | + ANDROID_KABI_RESERVE(1); |
---|
200 | 411 | }; |
---|
201 | 412 | |
---|
202 | 413 | /* single container for all structs |
---|
.. | .. |
---|
210 | 421 | struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */ |
---|
211 | 422 | int stack_size; /* number of states to be processed */ |
---|
212 | 423 | bool strict_alignment; /* perform strict pointer alignment checks */ |
---|
| 424 | + bool test_state_freq; /* test verifier with different pruning frequency */ |
---|
213 | 425 | struct bpf_verifier_state *cur_state; /* current verifier state */ |
---|
214 | 426 | struct bpf_verifier_state_list **explored_states; /* search pruning optimization */ |
---|
| 427 | + struct bpf_verifier_state_list *free_list; |
---|
215 | 428 | struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */ |
---|
216 | 429 | u32 used_map_cnt; /* number of used maps */ |
---|
217 | 430 | u32 id_gen; /* used to generate unique reg IDs */ |
---|
218 | 431 | bool explore_alu_limits; |
---|
219 | 432 | bool allow_ptr_leaks; |
---|
| 433 | + bool allow_uninit_stack; |
---|
| 434 | + bool allow_ptr_to_map_access; |
---|
| 435 | + bool bpf_capable; |
---|
| 436 | + bool bypass_spec_v1; |
---|
| 437 | + bool bypass_spec_v4; |
---|
220 | 438 | bool seen_direct_write; |
---|
221 | 439 | struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */ |
---|
| 440 | + const struct bpf_line_info *prev_linfo; |
---|
222 | 441 | struct bpf_verifier_log log; |
---|
223 | 442 | struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 1]; |
---|
224 | 443 | struct bpf_id_pair idmap_scratch[BPF_ID_MAP_SIZE]; |
---|
| 444 | + struct { |
---|
| 445 | + int *insn_state; |
---|
| 446 | + int *insn_stack; |
---|
| 447 | + int cur_stack; |
---|
| 448 | + } cfg; |
---|
| 449 | + u32 pass_cnt; /* number of times do_check() was called */ |
---|
225 | 450 | u32 subprog_cnt; |
---|
| 451 | + /* number of instructions analyzed by the verifier */ |
---|
| 452 | + u32 prev_insn_processed, insn_processed; |
---|
| 453 | + /* number of jmps, calls, exits analyzed so far */ |
---|
| 454 | + u32 prev_jmps_processed, jmps_processed; |
---|
| 455 | + /* total verification time */ |
---|
| 456 | + u64 verification_time; |
---|
| 457 | + /* maximum number of verifier states kept in 'branching' instructions */ |
---|
| 458 | + u32 max_states_per_insn; |
---|
| 459 | + /* total number of allocated verifier states */ |
---|
| 460 | + u32 total_states; |
---|
| 461 | + /* some states are freed during program analysis. |
---|
| 462 | + * this is peak number of states. this number dominates kernel |
---|
| 463 | + * memory consumption during verification |
---|
| 464 | + */ |
---|
| 465 | + u32 peak_states; |
---|
| 466 | + /* longest register parentage chain walked for liveness marking */ |
---|
| 467 | + u32 longest_mark_read_walk; |
---|
| 468 | + |
---|
| 469 | + ANDROID_KABI_RESERVE(1); |
---|
| 470 | + ANDROID_KABI_RESERVE(2); |
---|
226 | 471 | }; |
---|
227 | 472 | |
---|
228 | 473 | __printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log, |
---|
229 | 474 | const char *fmt, va_list args); |
---|
230 | 475 | __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env, |
---|
231 | 476 | const char *fmt, ...); |
---|
| 477 | +__printf(2, 3) void bpf_log(struct bpf_verifier_log *log, |
---|
| 478 | + const char *fmt, ...); |
---|
232 | 479 | |
---|
233 | | -static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env) |
---|
| 480 | +static inline struct bpf_func_state *cur_func(struct bpf_verifier_env *env) |
---|
234 | 481 | { |
---|
235 | 482 | struct bpf_verifier_state *cur = env->cur_state; |
---|
236 | 483 | |
---|
237 | | - return cur->frame[cur->curframe]->regs; |
---|
| 484 | + return cur->frame[cur->curframe]; |
---|
238 | 485 | } |
---|
239 | 486 | |
---|
240 | | -int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env); |
---|
| 487 | +static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env) |
---|
| 488 | +{ |
---|
| 489 | + return cur_func(env)->regs; |
---|
| 490 | +} |
---|
| 491 | + |
---|
| 492 | +int bpf_prog_offload_verifier_prep(struct bpf_prog *prog); |
---|
241 | 493 | int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env, |
---|
242 | 494 | int insn_idx, int prev_insn_idx); |
---|
| 495 | +int bpf_prog_offload_finalize(struct bpf_verifier_env *env); |
---|
| 496 | +void |
---|
| 497 | +bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off, |
---|
| 498 | + struct bpf_insn *insn); |
---|
| 499 | +void |
---|
| 500 | +bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt); |
---|
| 501 | + |
---|
| 502 | +int check_ctx_reg(struct bpf_verifier_env *env, |
---|
| 503 | + const struct bpf_reg_state *reg, int regno); |
---|
| 504 | + |
---|
| 505 | +/* this lives here instead of in bpf.h because it needs to dereference tgt_prog */ |
---|
| 506 | +static inline u64 bpf_trampoline_compute_key(const struct bpf_prog *tgt_prog, |
---|
| 507 | + u32 btf_id) |
---|
| 508 | +{ |
---|
| 509 | + return tgt_prog ? (((u64)tgt_prog->aux->id) << 32 | btf_id) : btf_id; |
---|
| 510 | +} |
---|
| 511 | + |
---|
| 512 | +int bpf_check_attach_target(struct bpf_verifier_log *log, |
---|
| 513 | + const struct bpf_prog *prog, |
---|
| 514 | + const struct bpf_prog *tgt_prog, |
---|
| 515 | + u32 btf_id, |
---|
| 516 | + struct bpf_attach_target_info *tgt_info); |
---|
243 | 517 | |
---|
244 | 518 | #endif /* _LINUX_BPF_VERIFIER_H */ |
---|