.. | .. |
---|
| 1 | +/* SPDX-License-Identifier: GPL-2.0-only */ |
---|
1 | 2 | /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com |
---|
2 | | - * |
---|
3 | | - * This program is free software; you can redistribute it and/or |
---|
4 | | - * modify it under the terms of version 2 of the GNU General Public |
---|
5 | | - * License as published by the Free Software Foundation. |
---|
6 | 3 | */ |
---|
7 | 4 | #ifndef _LINUX_BPF_H |
---|
8 | 5 | #define _LINUX_BPF_H 1 |
---|
.. | .. |
---|
15 | 12 | #include <linux/err.h> |
---|
16 | 13 | #include <linux/rbtree_latch.h> |
---|
17 | 14 | #include <linux/numa.h> |
---|
| 15 | +#include <linux/mm_types.h> |
---|
18 | 16 | #include <linux/wait.h> |
---|
| 17 | +#include <linux/u64_stats_sync.h> |
---|
| 18 | +#include <linux/refcount.h> |
---|
| 19 | +#include <linux/mutex.h> |
---|
| 20 | +#include <linux/module.h> |
---|
| 21 | +#include <linux/kallsyms.h> |
---|
| 22 | +#include <linux/capability.h> |
---|
| 23 | +#include <linux/percpu-refcount.h> |
---|
| 24 | +#include <linux/android_kabi.h> |
---|
19 | 25 | |
---|
20 | 26 | struct bpf_verifier_env; |
---|
| 27 | +struct bpf_verifier_log; |
---|
21 | 28 | struct perf_event; |
---|
22 | 29 | struct bpf_prog; |
---|
| 30 | +struct bpf_prog_aux; |
---|
23 | 31 | struct bpf_map; |
---|
24 | 32 | struct sock; |
---|
25 | 33 | struct seq_file; |
---|
| 34 | +struct btf; |
---|
26 | 35 | struct btf_type; |
---|
| 36 | +struct exception_table_entry; |
---|
| 37 | +struct seq_operations; |
---|
| 38 | +struct bpf_iter_aux_info; |
---|
| 39 | +struct bpf_local_storage; |
---|
| 40 | +struct bpf_local_storage_map; |
---|
| 41 | + |
---|
| 42 | +extern struct idr btf_idr; |
---|
| 43 | +extern spinlock_t btf_idr_lock; |
---|
| 44 | + |
---|
| 45 | +typedef int (*bpf_iter_init_seq_priv_t)(void *private_data, |
---|
| 46 | + struct bpf_iter_aux_info *aux); |
---|
| 47 | +typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data); |
---|
| 48 | +struct bpf_iter_seq_info { |
---|
| 49 | + const struct seq_operations *seq_ops; |
---|
| 50 | + bpf_iter_init_seq_priv_t init_seq_private; |
---|
| 51 | + bpf_iter_fini_seq_priv_t fini_seq_private; |
---|
| 52 | + u32 seq_priv_size; |
---|
| 53 | +}; |
---|
27 | 54 | |
---|
28 | 55 | /* map is generic key/value storage optionally accesible by eBPF programs */ |
---|
29 | 56 | struct bpf_map_ops { |
---|
.. | .. |
---|
35 | 62 | int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key); |
---|
36 | 63 | void (*map_release_uref)(struct bpf_map *map); |
---|
37 | 64 | void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key); |
---|
| 65 | + int (*map_lookup_batch)(struct bpf_map *map, const union bpf_attr *attr, |
---|
| 66 | + union bpf_attr __user *uattr); |
---|
| 67 | + int (*map_lookup_and_delete_batch)(struct bpf_map *map, |
---|
| 68 | + const union bpf_attr *attr, |
---|
| 69 | + union bpf_attr __user *uattr); |
---|
| 70 | + int (*map_update_batch)(struct bpf_map *map, const union bpf_attr *attr, |
---|
| 71 | + union bpf_attr __user *uattr); |
---|
| 72 | + int (*map_delete_batch)(struct bpf_map *map, const union bpf_attr *attr, |
---|
| 73 | + union bpf_attr __user *uattr); |
---|
38 | 74 | |
---|
39 | 75 | /* funcs callable from userspace and from eBPF programs */ |
---|
40 | 76 | void *(*map_lookup_elem)(struct bpf_map *map, void *key); |
---|
41 | 77 | int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags); |
---|
42 | 78 | int (*map_delete_elem)(struct bpf_map *map, void *key); |
---|
| 79 | + int (*map_push_elem)(struct bpf_map *map, void *value, u64 flags); |
---|
| 80 | + int (*map_pop_elem)(struct bpf_map *map, void *value); |
---|
| 81 | + int (*map_peek_elem)(struct bpf_map *map, void *value); |
---|
43 | 82 | |
---|
44 | 83 | /* funcs called by prog_array and perf_event_array map */ |
---|
45 | 84 | void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file, |
---|
46 | 85 | int fd); |
---|
47 | 86 | void (*map_fd_put_ptr)(void *ptr); |
---|
48 | | - u32 (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf); |
---|
| 87 | + int (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf); |
---|
49 | 88 | u32 (*map_fd_sys_lookup_elem)(void *ptr); |
---|
50 | 89 | void (*map_seq_show_elem)(struct bpf_map *map, void *key, |
---|
51 | 90 | struct seq_file *m); |
---|
52 | 91 | int (*map_check_btf)(const struct bpf_map *map, |
---|
| 92 | + const struct btf *btf, |
---|
53 | 93 | const struct btf_type *key_type, |
---|
54 | 94 | const struct btf_type *value_type); |
---|
| 95 | + |
---|
| 96 | + /* Prog poke tracking helpers. */ |
---|
| 97 | + int (*map_poke_track)(struct bpf_map *map, struct bpf_prog_aux *aux); |
---|
| 98 | + void (*map_poke_untrack)(struct bpf_map *map, struct bpf_prog_aux *aux); |
---|
| 99 | + void (*map_poke_run)(struct bpf_map *map, u32 key, struct bpf_prog *old, |
---|
| 100 | + struct bpf_prog *new); |
---|
| 101 | + |
---|
| 102 | + /* Direct value access helpers. */ |
---|
| 103 | + int (*map_direct_value_addr)(const struct bpf_map *map, |
---|
| 104 | + u64 *imm, u32 off); |
---|
| 105 | + int (*map_direct_value_meta)(const struct bpf_map *map, |
---|
| 106 | + u64 imm, u32 *off); |
---|
| 107 | + int (*map_mmap)(struct bpf_map *map, struct vm_area_struct *vma); |
---|
| 108 | + __poll_t (*map_poll)(struct bpf_map *map, struct file *filp, |
---|
| 109 | + struct poll_table_struct *pts); |
---|
| 110 | + |
---|
| 111 | + /* Functions called by bpf_local_storage maps */ |
---|
| 112 | + int (*map_local_storage_charge)(struct bpf_local_storage_map *smap, |
---|
| 113 | + void *owner, u32 size); |
---|
| 114 | + void (*map_local_storage_uncharge)(struct bpf_local_storage_map *smap, |
---|
| 115 | + void *owner, u32 size); |
---|
| 116 | + struct bpf_local_storage __rcu ** (*map_owner_storage_ptr)(void *owner); |
---|
| 117 | + |
---|
| 118 | + /* map_meta_equal must be implemented for maps that can be |
---|
| 119 | + * used as an inner map. It is a runtime check to ensure |
---|
| 120 | + * an inner map can be inserted to an outer map. |
---|
| 121 | + * |
---|
| 122 | + * Some properties of the inner map has been used during the |
---|
| 123 | + * verification time. When inserting an inner map at the runtime, |
---|
| 124 | + * map_meta_equal has to ensure the inserting map has the same |
---|
| 125 | + * properties that the verifier has used earlier. |
---|
| 126 | + */ |
---|
| 127 | + bool (*map_meta_equal)(const struct bpf_map *meta0, |
---|
| 128 | + const struct bpf_map *meta1); |
---|
| 129 | + |
---|
| 130 | + /* BTF name and id of struct allocated by map_alloc */ |
---|
| 131 | + const char * const map_btf_name; |
---|
| 132 | + int *map_btf_id; |
---|
| 133 | + |
---|
| 134 | + /* bpf_iter info used to open a seq_file */ |
---|
| 135 | + const struct bpf_iter_seq_info *iter_seq_info; |
---|
| 136 | + |
---|
| 137 | + ANDROID_KABI_RESERVE(1); |
---|
| 138 | + ANDROID_KABI_RESERVE(2); |
---|
| 139 | +}; |
---|
| 140 | + |
---|
| 141 | +struct bpf_map_memory { |
---|
| 142 | + u32 pages; |
---|
| 143 | + struct user_struct *user; |
---|
55 | 144 | }; |
---|
56 | 145 | |
---|
57 | 146 | struct bpf_map { |
---|
.. | .. |
---|
68 | 157 | u32 value_size; |
---|
69 | 158 | u32 max_entries; |
---|
70 | 159 | u32 map_flags; |
---|
71 | | - u32 pages; |
---|
| 160 | + int spin_lock_off; /* >=0 valid offset, <0 error */ |
---|
72 | 161 | u32 id; |
---|
73 | 162 | int numa_node; |
---|
74 | 163 | u32 btf_key_type_id; |
---|
75 | 164 | u32 btf_value_type_id; |
---|
76 | 165 | struct btf *btf; |
---|
77 | | - bool unpriv_array; |
---|
78 | | - /* 55 bytes hole */ |
---|
| 166 | + struct bpf_map_memory memory; |
---|
| 167 | + char name[BPF_OBJ_NAME_LEN]; |
---|
| 168 | + u32 btf_vmlinux_value_type_id; |
---|
| 169 | + bool bypass_spec_v1; |
---|
| 170 | + bool frozen; /* write-once; write-protected by freeze_mutex */ |
---|
| 171 | + /* 22 bytes hole */ |
---|
79 | 172 | |
---|
80 | 173 | /* The 3rd and 4th cacheline with misc members to avoid false sharing |
---|
81 | 174 | * particularly with refcounting. |
---|
82 | 175 | */ |
---|
83 | | - struct user_struct *user ____cacheline_aligned; |
---|
84 | | - atomic_t refcnt; |
---|
85 | | - atomic_t usercnt; |
---|
| 176 | + atomic64_t refcnt ____cacheline_aligned; |
---|
| 177 | + atomic64_t usercnt; |
---|
86 | 178 | struct work_struct work; |
---|
87 | | - char name[BPF_OBJ_NAME_LEN]; |
---|
| 179 | + struct mutex freeze_mutex; |
---|
| 180 | +#ifdef __GENKSYMS__ |
---|
| 181 | + /* Preserve the CRC change that commit 33fe044f6a9e ("bpf: Fix toctou on |
---|
| 182 | + * read-only map's constant scalar tracking") caused. |
---|
| 183 | + */ |
---|
| 184 | + u64 writecnt; |
---|
| 185 | +#else |
---|
| 186 | + atomic64_t writecnt; |
---|
| 187 | +#endif |
---|
88 | 188 | }; |
---|
| 189 | + |
---|
| 190 | +static inline bool map_value_has_spin_lock(const struct bpf_map *map) |
---|
| 191 | +{ |
---|
| 192 | + return map->spin_lock_off >= 0; |
---|
| 193 | +} |
---|
| 194 | + |
---|
| 195 | +static inline void check_and_init_map_lock(struct bpf_map *map, void *dst) |
---|
| 196 | +{ |
---|
| 197 | + if (likely(!map_value_has_spin_lock(map))) |
---|
| 198 | + return; |
---|
| 199 | + *(struct bpf_spin_lock *)(dst + map->spin_lock_off) = |
---|
| 200 | + (struct bpf_spin_lock){}; |
---|
| 201 | +} |
---|
| 202 | + |
---|
| 203 | +/* copy everything but bpf_spin_lock */ |
---|
| 204 | +static inline void copy_map_value(struct bpf_map *map, void *dst, void *src) |
---|
| 205 | +{ |
---|
| 206 | + if (unlikely(map_value_has_spin_lock(map))) { |
---|
| 207 | + u32 off = map->spin_lock_off; |
---|
| 208 | + |
---|
| 209 | + memcpy(dst, src, off); |
---|
| 210 | + memcpy(dst + off + sizeof(struct bpf_spin_lock), |
---|
| 211 | + src + off + sizeof(struct bpf_spin_lock), |
---|
| 212 | + map->value_size - off - sizeof(struct bpf_spin_lock)); |
---|
| 213 | + } else { |
---|
| 214 | + memcpy(dst, src, map->value_size); |
---|
| 215 | + } |
---|
| 216 | +} |
---|
| 217 | +void copy_map_value_locked(struct bpf_map *map, void *dst, void *src, |
---|
| 218 | + bool lock_src); |
---|
| 219 | +int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size); |
---|
89 | 220 | |
---|
90 | 221 | struct bpf_offload_dev; |
---|
91 | 222 | struct bpf_offloaded_map; |
---|
.. | .. |
---|
98 | 229 | int (*map_update_elem)(struct bpf_offloaded_map *map, |
---|
99 | 230 | void *key, void *value, u64 flags); |
---|
100 | 231 | int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key); |
---|
| 232 | + |
---|
| 233 | + ANDROID_KABI_RESERVE(1); |
---|
101 | 234 | }; |
---|
102 | 235 | |
---|
103 | 236 | struct bpf_offloaded_map { |
---|
.. | .. |
---|
120 | 253 | |
---|
121 | 254 | static inline bool bpf_map_support_seq_show(const struct bpf_map *map) |
---|
122 | 255 | { |
---|
123 | | - return map->btf && map->ops->map_seq_show_elem; |
---|
| 256 | + return (map->btf_value_type_id || map->btf_vmlinux_value_type_id) && |
---|
| 257 | + map->ops->map_seq_show_elem; |
---|
124 | 258 | } |
---|
125 | 259 | |
---|
126 | 260 | int map_check_no_btf(const struct bpf_map *map, |
---|
| 261 | + const struct btf *btf, |
---|
127 | 262 | const struct btf_type *key_type, |
---|
128 | 263 | const struct btf_type *value_type); |
---|
| 264 | + |
---|
| 265 | +bool bpf_map_meta_equal(const struct bpf_map *meta0, |
---|
| 266 | + const struct bpf_map *meta1); |
---|
129 | 267 | |
---|
130 | 268 | extern const struct bpf_map_ops bpf_map_offload_ops; |
---|
131 | 269 | |
---|
.. | .. |
---|
139 | 277 | ARG_CONST_MAP_PTR, /* const argument used as pointer to bpf_map */ |
---|
140 | 278 | ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */ |
---|
141 | 279 | ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */ |
---|
| 280 | + ARG_PTR_TO_UNINIT_MAP_VALUE, /* pointer to valid memory used to store a map value */ |
---|
| 281 | + ARG_PTR_TO_MAP_VALUE_OR_NULL, /* pointer to stack used as map value or NULL */ |
---|
142 | 282 | |
---|
143 | 283 | /* the following constraints used to prototype bpf_memcmp() and other |
---|
144 | 284 | * functions that access data on eBPF program stack |
---|
.. | .. |
---|
154 | 294 | ARG_CONST_SIZE_OR_ZERO, /* number of bytes accessed from memory or 0 */ |
---|
155 | 295 | |
---|
156 | 296 | ARG_PTR_TO_CTX, /* pointer to context */ |
---|
| 297 | + ARG_PTR_TO_CTX_OR_NULL, /* pointer to context or NULL */ |
---|
157 | 298 | ARG_ANYTHING, /* any (initialized) argument is ok */ |
---|
| 299 | + ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */ |
---|
| 300 | + ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */ |
---|
| 301 | + ARG_PTR_TO_INT, /* pointer to int */ |
---|
| 302 | + ARG_PTR_TO_LONG, /* pointer to long */ |
---|
| 303 | + ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */ |
---|
| 304 | + ARG_PTR_TO_SOCKET_OR_NULL, /* pointer to bpf_sock (fullsock) or NULL */ |
---|
| 305 | + ARG_PTR_TO_BTF_ID, /* pointer to in-kernel struct */ |
---|
| 306 | + ARG_PTR_TO_ALLOC_MEM, /* pointer to dynamically allocated memory */ |
---|
| 307 | + ARG_PTR_TO_ALLOC_MEM_OR_NULL, /* pointer to dynamically allocated memory or NULL */ |
---|
| 308 | + ARG_CONST_ALLOC_SIZE_OR_ZERO, /* number of allocated bytes requested */ |
---|
| 309 | + ARG_PTR_TO_BTF_ID_SOCK_COMMON, /* pointer to in-kernel sock_common or bpf-mirrored bpf_sock */ |
---|
| 310 | + ARG_PTR_TO_PERCPU_BTF_ID, /* pointer to in-kernel percpu type */ |
---|
| 311 | + __BPF_ARG_TYPE_MAX, |
---|
158 | 312 | }; |
---|
159 | 313 | |
---|
160 | 314 | /* type of values returned from helper functions */ |
---|
.. | .. |
---|
163 | 317 | RET_VOID, /* function doesn't return anything */ |
---|
164 | 318 | RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */ |
---|
165 | 319 | RET_PTR_TO_MAP_VALUE_OR_NULL, /* returns a pointer to map elem value or NULL */ |
---|
| 320 | + RET_PTR_TO_SOCKET_OR_NULL, /* returns a pointer to a socket or NULL */ |
---|
| 321 | + RET_PTR_TO_TCP_SOCK_OR_NULL, /* returns a pointer to a tcp_sock or NULL */ |
---|
| 322 | + RET_PTR_TO_SOCK_COMMON_OR_NULL, /* returns a pointer to a sock_common or NULL */ |
---|
| 323 | + RET_PTR_TO_ALLOC_MEM_OR_NULL, /* returns a pointer to dynamically allocated memory or NULL */ |
---|
| 324 | + RET_PTR_TO_BTF_ID_OR_NULL, /* returns a pointer to a btf_id or NULL */ |
---|
| 325 | + RET_PTR_TO_MEM_OR_BTF_ID_OR_NULL, /* returns a pointer to a valid memory or a btf_id or NULL */ |
---|
| 326 | + RET_PTR_TO_MEM_OR_BTF_ID, /* returns a pointer to a valid memory or a btf_id */ |
---|
166 | 327 | }; |
---|
167 | 328 | |
---|
168 | 329 | /* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs |
---|
.. | .. |
---|
174 | 335 | bool gpl_only; |
---|
175 | 336 | bool pkt_access; |
---|
176 | 337 | enum bpf_return_type ret_type; |
---|
177 | | - enum bpf_arg_type arg1_type; |
---|
178 | | - enum bpf_arg_type arg2_type; |
---|
179 | | - enum bpf_arg_type arg3_type; |
---|
180 | | - enum bpf_arg_type arg4_type; |
---|
181 | | - enum bpf_arg_type arg5_type; |
---|
| 338 | + union { |
---|
| 339 | + struct { |
---|
| 340 | + enum bpf_arg_type arg1_type; |
---|
| 341 | + enum bpf_arg_type arg2_type; |
---|
| 342 | + enum bpf_arg_type arg3_type; |
---|
| 343 | + enum bpf_arg_type arg4_type; |
---|
| 344 | + enum bpf_arg_type arg5_type; |
---|
| 345 | + }; |
---|
| 346 | + enum bpf_arg_type arg_type[5]; |
---|
| 347 | + }; |
---|
| 348 | + union { |
---|
| 349 | + struct { |
---|
| 350 | + u32 *arg1_btf_id; |
---|
| 351 | + u32 *arg2_btf_id; |
---|
| 352 | + u32 *arg3_btf_id; |
---|
| 353 | + u32 *arg4_btf_id; |
---|
| 354 | + u32 *arg5_btf_id; |
---|
| 355 | + }; |
---|
| 356 | + u32 *arg_btf_id[5]; |
---|
| 357 | + }; |
---|
| 358 | + int *ret_btf_id; /* return value btf_id */ |
---|
| 359 | + bool (*allowed)(const struct bpf_prog *prog); |
---|
182 | 360 | }; |
---|
183 | 361 | |
---|
184 | 362 | /* bpf_context is intentionally undefined structure. Pointer to bpf_context is |
---|
.. | .. |
---|
213 | 391 | PTR_TO_PACKET_META, /* skb->data - meta_len */ |
---|
214 | 392 | PTR_TO_PACKET, /* reg points to skb->data */ |
---|
215 | 393 | PTR_TO_PACKET_END, /* skb->data + headlen */ |
---|
| 394 | + PTR_TO_FLOW_KEYS, /* reg points to bpf_flow_keys */ |
---|
| 395 | + PTR_TO_SOCKET, /* reg points to struct bpf_sock */ |
---|
| 396 | + PTR_TO_SOCKET_OR_NULL, /* reg points to struct bpf_sock or NULL */ |
---|
| 397 | + PTR_TO_SOCK_COMMON, /* reg points to sock_common */ |
---|
| 398 | + PTR_TO_SOCK_COMMON_OR_NULL, /* reg points to sock_common or NULL */ |
---|
| 399 | + PTR_TO_TCP_SOCK, /* reg points to struct tcp_sock */ |
---|
| 400 | + PTR_TO_TCP_SOCK_OR_NULL, /* reg points to struct tcp_sock or NULL */ |
---|
| 401 | + PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */ |
---|
| 402 | + PTR_TO_XDP_SOCK, /* reg points to struct xdp_sock */ |
---|
| 403 | + /* PTR_TO_BTF_ID points to a kernel struct that does not need |
---|
| 404 | + * to be null checked by the BPF program. This does not imply the |
---|
| 405 | + * pointer is _not_ null and in practice this can easily be a null |
---|
| 406 | + * pointer when reading pointer chains. The assumption is program |
---|
| 407 | + * context will handle null pointer dereference typically via fault |
---|
| 408 | + * handling. The verifier must keep this in mind and can make no |
---|
| 409 | + * assumptions about null or non-null when doing branch analysis. |
---|
| 410 | + * Further, when passed into helpers the helpers can not, without |
---|
| 411 | + * additional context, assume the value is non-null. |
---|
| 412 | + */ |
---|
| 413 | + PTR_TO_BTF_ID, |
---|
| 414 | + /* PTR_TO_BTF_ID_OR_NULL points to a kernel struct that has not |
---|
| 415 | + * been checked for null. Used primarily to inform the verifier |
---|
| 416 | + * an explicit null check is required for this struct. |
---|
| 417 | + */ |
---|
| 418 | + PTR_TO_BTF_ID_OR_NULL, |
---|
| 419 | + PTR_TO_MEM, /* reg points to valid memory region */ |
---|
| 420 | + PTR_TO_MEM_OR_NULL, /* reg points to valid memory region or NULL */ |
---|
| 421 | + PTR_TO_RDONLY_BUF, /* reg points to a readonly buffer */ |
---|
| 422 | + PTR_TO_RDONLY_BUF_OR_NULL, /* reg points to a readonly buffer or NULL */ |
---|
| 423 | + PTR_TO_RDWR_BUF, /* reg points to a read/write buffer */ |
---|
| 424 | + PTR_TO_RDWR_BUF_OR_NULL, /* reg points to a read/write buffer or NULL */ |
---|
| 425 | + PTR_TO_PERCPU_BTF_ID, /* reg points to a percpu kernel variable */ |
---|
216 | 426 | }; |
---|
217 | 427 | |
---|
218 | 428 | /* The information passed from prog-specific *_is_valid_access |
---|
.. | .. |
---|
220 | 430 | */ |
---|
221 | 431 | struct bpf_insn_access_aux { |
---|
222 | 432 | enum bpf_reg_type reg_type; |
---|
223 | | - int ctx_field_size; |
---|
| 433 | + union { |
---|
| 434 | + int ctx_field_size; |
---|
| 435 | + u32 btf_id; |
---|
| 436 | + }; |
---|
| 437 | + struct bpf_verifier_log *log; /* for verbose logs */ |
---|
224 | 438 | }; |
---|
225 | 439 | |
---|
226 | 440 | static inline void |
---|
.. | .. |
---|
254 | 468 | const struct bpf_insn *src, |
---|
255 | 469 | struct bpf_insn *dst, |
---|
256 | 470 | struct bpf_prog *prog, u32 *target_size); |
---|
| 471 | + int (*btf_struct_access)(struct bpf_verifier_log *log, |
---|
| 472 | + const struct btf_type *t, int off, int size, |
---|
| 473 | + enum bpf_access_type atype, |
---|
| 474 | + u32 *next_btf_id); |
---|
| 475 | + ANDROID_KABI_RESERVE(1); |
---|
257 | 476 | }; |
---|
258 | 477 | |
---|
259 | 478 | struct bpf_prog_offload_ops { |
---|
| 479 | + /* verifier basic callbacks */ |
---|
260 | 480 | int (*insn_hook)(struct bpf_verifier_env *env, |
---|
261 | 481 | int insn_idx, int prev_insn_idx); |
---|
| 482 | + int (*finalize)(struct bpf_verifier_env *env); |
---|
| 483 | + /* verifier optimization callbacks (called after .finalize) */ |
---|
| 484 | + int (*replace_insn)(struct bpf_verifier_env *env, u32 off, |
---|
| 485 | + struct bpf_insn *insn); |
---|
| 486 | + int (*remove_insns)(struct bpf_verifier_env *env, u32 off, u32 cnt); |
---|
| 487 | + /* program management callbacks */ |
---|
| 488 | + int (*prepare)(struct bpf_prog *prog); |
---|
| 489 | + int (*translate)(struct bpf_prog *prog); |
---|
| 490 | + void (*destroy)(struct bpf_prog *prog); |
---|
| 491 | + ANDROID_KABI_RESERVE(1); |
---|
262 | 492 | }; |
---|
263 | 493 | |
---|
264 | 494 | struct bpf_prog_offload { |
---|
265 | 495 | struct bpf_prog *prog; |
---|
266 | 496 | struct net_device *netdev; |
---|
| 497 | + struct bpf_offload_dev *offdev; |
---|
267 | 498 | void *dev_priv; |
---|
268 | 499 | struct list_head offloads; |
---|
269 | 500 | bool dev_state; |
---|
270 | | - const struct bpf_prog_offload_ops *dev_ops; |
---|
| 501 | + bool opt_failed; |
---|
271 | 502 | void *jited_image; |
---|
272 | 503 | u32 jited_len; |
---|
273 | 504 | }; |
---|
274 | 505 | |
---|
| 506 | +enum bpf_cgroup_storage_type { |
---|
| 507 | + BPF_CGROUP_STORAGE_SHARED, |
---|
| 508 | + BPF_CGROUP_STORAGE_PERCPU, |
---|
| 509 | + __BPF_CGROUP_STORAGE_MAX |
---|
| 510 | +}; |
---|
| 511 | + |
---|
| 512 | +#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX |
---|
| 513 | + |
---|
| 514 | +/* The longest tracepoint has 12 args. |
---|
| 515 | + * See include/trace/bpf_probe.h |
---|
| 516 | + */ |
---|
| 517 | +#define MAX_BPF_FUNC_ARGS 12 |
---|
| 518 | + |
---|
| 519 | +struct bpf_prog_stats { |
---|
| 520 | + u64 cnt; |
---|
| 521 | + u64 nsecs; |
---|
| 522 | + struct u64_stats_sync syncp; |
---|
| 523 | +} __aligned(2 * sizeof(u64)); |
---|
| 524 | + |
---|
| 525 | +struct btf_func_model { |
---|
| 526 | + u8 ret_size; |
---|
| 527 | + u8 nr_args; |
---|
| 528 | + u8 arg_size[MAX_BPF_FUNC_ARGS]; |
---|
| 529 | +}; |
---|
| 530 | + |
---|
| 531 | +/* Restore arguments before returning from trampoline to let original function |
---|
| 532 | + * continue executing. This flag is used for fentry progs when there are no |
---|
| 533 | + * fexit progs. |
---|
| 534 | + */ |
---|
| 535 | +#define BPF_TRAMP_F_RESTORE_REGS BIT(0) |
---|
| 536 | +/* Call original function after fentry progs, but before fexit progs. |
---|
| 537 | + * Makes sense for fentry/fexit, normal calls and indirect calls. |
---|
| 538 | + */ |
---|
| 539 | +#define BPF_TRAMP_F_CALL_ORIG BIT(1) |
---|
| 540 | +/* Skip current frame and return to parent. Makes sense for fentry/fexit |
---|
| 541 | + * programs only. Should not be used with normal calls and indirect calls. |
---|
| 542 | + */ |
---|
| 543 | +#define BPF_TRAMP_F_SKIP_FRAME BIT(2) |
---|
| 544 | +/* Return the return value of fentry prog. Only used by bpf_struct_ops. */ |
---|
| 545 | +#define BPF_TRAMP_F_RET_FENTRY_RET BIT(4) |
---|
| 546 | + |
---|
| 547 | +/* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50 |
---|
| 548 | + * bytes on x86. Pick a number to fit into BPF_IMAGE_SIZE / 2 |
---|
| 549 | + */ |
---|
| 550 | +#define BPF_MAX_TRAMP_PROGS 40 |
---|
| 551 | + |
---|
| 552 | +struct bpf_tramp_progs { |
---|
| 553 | + struct bpf_prog *progs[BPF_MAX_TRAMP_PROGS]; |
---|
| 554 | + int nr_progs; |
---|
| 555 | +}; |
---|
| 556 | + |
---|
| 557 | +/* Different use cases for BPF trampoline: |
---|
| 558 | + * 1. replace nop at the function entry (kprobe equivalent) |
---|
| 559 | + * flags = BPF_TRAMP_F_RESTORE_REGS |
---|
| 560 | + * fentry = a set of programs to run before returning from trampoline |
---|
| 561 | + * |
---|
| 562 | + * 2. replace nop at the function entry (kprobe + kretprobe equivalent) |
---|
| 563 | + * flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME |
---|
| 564 | + * orig_call = fentry_ip + MCOUNT_INSN_SIZE |
---|
| 565 | + * fentry = a set of program to run before calling original function |
---|
| 566 | + * fexit = a set of program to run after original function |
---|
| 567 | + * |
---|
| 568 | + * 3. replace direct call instruction anywhere in the function body |
---|
| 569 | + * or assign a function pointer for indirect call (like tcp_congestion_ops->cong_avoid) |
---|
| 570 | + * With flags = 0 |
---|
| 571 | + * fentry = a set of programs to run before returning from trampoline |
---|
| 572 | + * With flags = BPF_TRAMP_F_CALL_ORIG |
---|
| 573 | + * orig_call = original callback addr or direct function addr |
---|
| 574 | + * fentry = a set of program to run before calling original function |
---|
| 575 | + * fexit = a set of program to run after original function |
---|
| 576 | + */ |
---|
| 577 | +struct bpf_tramp_image; |
---|
| 578 | +int arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end, |
---|
| 579 | + const struct btf_func_model *m, u32 flags, |
---|
| 580 | + struct bpf_tramp_progs *tprogs, |
---|
| 581 | + void *orig_call); |
---|
| 582 | +/* these two functions are called from generated trampoline */ |
---|
| 583 | +u64 notrace __bpf_prog_enter(void); |
---|
| 584 | +void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start); |
---|
| 585 | +void notrace __bpf_prog_enter_sleepable(void); |
---|
| 586 | +void notrace __bpf_prog_exit_sleepable(void); |
---|
| 587 | +void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr); |
---|
| 588 | +void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr); |
---|
| 589 | + |
---|
| 590 | +struct bpf_ksym { |
---|
| 591 | + unsigned long start; |
---|
| 592 | + unsigned long end; |
---|
| 593 | + char name[KSYM_NAME_LEN]; |
---|
| 594 | + struct list_head lnode; |
---|
| 595 | + struct latch_tree_node tnode; |
---|
| 596 | + bool prog; |
---|
| 597 | +}; |
---|
| 598 | + |
---|
| 599 | +enum bpf_tramp_prog_type { |
---|
| 600 | + BPF_TRAMP_FENTRY, |
---|
| 601 | + BPF_TRAMP_FEXIT, |
---|
| 602 | + BPF_TRAMP_MODIFY_RETURN, |
---|
| 603 | + BPF_TRAMP_MAX, |
---|
| 604 | + BPF_TRAMP_REPLACE, /* more than MAX */ |
---|
| 605 | +}; |
---|
| 606 | + |
---|
| 607 | +struct bpf_tramp_image { |
---|
| 608 | + void *image; |
---|
| 609 | + struct bpf_ksym ksym; |
---|
| 610 | + struct percpu_ref pcref; |
---|
| 611 | + void *ip_after_call; |
---|
| 612 | + void *ip_epilogue; |
---|
| 613 | + union { |
---|
| 614 | + struct rcu_head rcu; |
---|
| 615 | + struct work_struct work; |
---|
| 616 | + }; |
---|
| 617 | +}; |
---|
| 618 | + |
---|
| 619 | +struct bpf_trampoline { |
---|
| 620 | + /* hlist for trampoline_table */ |
---|
| 621 | + struct hlist_node hlist; |
---|
| 622 | + /* serializes access to fields of this trampoline */ |
---|
| 623 | + struct mutex mutex; |
---|
| 624 | + refcount_t refcnt; |
---|
| 625 | + u64 key; |
---|
| 626 | + struct { |
---|
| 627 | + struct btf_func_model model; |
---|
| 628 | + void *addr; |
---|
| 629 | + bool ftrace_managed; |
---|
| 630 | + } func; |
---|
| 631 | + /* if !NULL this is BPF_PROG_TYPE_EXT program that extends another BPF |
---|
| 632 | + * program by replacing one of its functions. func.addr is the address |
---|
| 633 | + * of the function it replaced. |
---|
| 634 | + */ |
---|
| 635 | + struct bpf_prog *extension_prog; |
---|
| 636 | + /* list of BPF programs using this trampoline */ |
---|
| 637 | + struct hlist_head progs_hlist[BPF_TRAMP_MAX]; |
---|
| 638 | + /* Number of attached programs. A counter per kind. */ |
---|
| 639 | + int progs_cnt[BPF_TRAMP_MAX]; |
---|
| 640 | + /* Executable image of trampoline */ |
---|
| 641 | + struct bpf_tramp_image *cur_image; |
---|
| 642 | + u64 selector; |
---|
| 643 | +}; |
---|
| 644 | + |
---|
| 645 | +struct bpf_attach_target_info { |
---|
| 646 | + struct btf_func_model fmodel; |
---|
| 647 | + long tgt_addr; |
---|
| 648 | + const char *tgt_name; |
---|
| 649 | + const struct btf_type *tgt_type; |
---|
| 650 | +}; |
---|
| 651 | + |
---|
| 652 | +#define BPF_DISPATCHER_MAX 48 /* Fits in 2048B */ |
---|
| 653 | + |
---|
| 654 | +struct bpf_dispatcher_prog { |
---|
| 655 | + struct bpf_prog *prog; |
---|
| 656 | + refcount_t users; |
---|
| 657 | +}; |
---|
| 658 | + |
---|
| 659 | +struct bpf_dispatcher { |
---|
| 660 | + /* dispatcher mutex */ |
---|
| 661 | + struct mutex mutex; |
---|
| 662 | + void *func; |
---|
| 663 | + struct bpf_dispatcher_prog progs[BPF_DISPATCHER_MAX]; |
---|
| 664 | + int num_progs; |
---|
| 665 | + void *image; |
---|
| 666 | + u32 image_off; |
---|
| 667 | + struct bpf_ksym ksym; |
---|
| 668 | +}; |
---|
| 669 | + |
---|
| 670 | +static __always_inline __nocfi unsigned int bpf_dispatcher_nop_func( |
---|
| 671 | + const void *ctx, |
---|
| 672 | + const struct bpf_insn *insnsi, |
---|
| 673 | + unsigned int (*bpf_func)(const void *, |
---|
| 674 | + const struct bpf_insn *)) |
---|
| 675 | +{ |
---|
| 676 | + return bpf_func(ctx, insnsi); |
---|
| 677 | +} |
---|
| 678 | +#ifdef CONFIG_BPF_JIT |
---|
| 679 | +int bpf_trampoline_link_prog(struct bpf_prog *prog, struct bpf_trampoline *tr); |
---|
| 680 | +int bpf_trampoline_unlink_prog(struct bpf_prog *prog, struct bpf_trampoline *tr); |
---|
| 681 | +struct bpf_trampoline *bpf_trampoline_get(u64 key, |
---|
| 682 | + struct bpf_attach_target_info *tgt_info); |
---|
| 683 | +void bpf_trampoline_put(struct bpf_trampoline *tr); |
---|
| 684 | +int arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs); |
---|
| 685 | +#define BPF_DISPATCHER_INIT(_name) { \ |
---|
| 686 | + .mutex = __MUTEX_INITIALIZER(_name.mutex), \ |
---|
| 687 | + .func = &_name##_func, \ |
---|
| 688 | + .progs = {}, \ |
---|
| 689 | + .num_progs = 0, \ |
---|
| 690 | + .image = NULL, \ |
---|
| 691 | + .image_off = 0, \ |
---|
| 692 | + .ksym = { \ |
---|
| 693 | + .name = #_name, \ |
---|
| 694 | + .lnode = LIST_HEAD_INIT(_name.ksym.lnode), \ |
---|
| 695 | + }, \ |
---|
| 696 | +} |
---|
| 697 | + |
---|
| 698 | +#define DEFINE_BPF_DISPATCHER(name) \ |
---|
| 699 | + noinline __nocfi unsigned int bpf_dispatcher_##name##_func( \ |
---|
| 700 | + const void *ctx, \ |
---|
| 701 | + const struct bpf_insn *insnsi, \ |
---|
| 702 | + unsigned int (*bpf_func)(const void *, \ |
---|
| 703 | + const struct bpf_insn *)) \ |
---|
| 704 | + { \ |
---|
| 705 | + return bpf_func(ctx, insnsi); \ |
---|
| 706 | + } \ |
---|
| 707 | + EXPORT_SYMBOL(bpf_dispatcher_##name##_func); \ |
---|
| 708 | + struct bpf_dispatcher bpf_dispatcher_##name = \ |
---|
| 709 | + BPF_DISPATCHER_INIT(bpf_dispatcher_##name); |
---|
| 710 | +#define DECLARE_BPF_DISPATCHER(name) \ |
---|
| 711 | + unsigned int bpf_dispatcher_##name##_func( \ |
---|
| 712 | + const void *ctx, \ |
---|
| 713 | + const struct bpf_insn *insnsi, \ |
---|
| 714 | + unsigned int (*bpf_func)(const void *, \ |
---|
| 715 | + const struct bpf_insn *)); \ |
---|
| 716 | + extern struct bpf_dispatcher bpf_dispatcher_##name; |
---|
| 717 | +#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_##name##_func |
---|
| 718 | +#define BPF_DISPATCHER_PTR(name) (&bpf_dispatcher_##name) |
---|
| 719 | +void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from, |
---|
| 720 | + struct bpf_prog *to); |
---|
| 721 | +/* Called only from JIT-enabled code, so there's no need for stubs. */ |
---|
| 722 | +void *bpf_jit_alloc_exec_page(void); |
---|
| 723 | +void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym); |
---|
| 724 | +void bpf_image_ksym_del(struct bpf_ksym *ksym); |
---|
| 725 | +void bpf_ksym_add(struct bpf_ksym *ksym); |
---|
| 726 | +void bpf_ksym_del(struct bpf_ksym *ksym); |
---|
| 727 | +int bpf_jit_charge_modmem(u32 pages); |
---|
| 728 | +void bpf_jit_uncharge_modmem(u32 pages); |
---|
| 729 | +#else |
---|
| 730 | +static inline int bpf_trampoline_link_prog(struct bpf_prog *prog, |
---|
| 731 | + struct bpf_trampoline *tr) |
---|
| 732 | +{ |
---|
| 733 | + return -ENOTSUPP; |
---|
| 734 | +} |
---|
| 735 | +static inline int bpf_trampoline_unlink_prog(struct bpf_prog *prog, |
---|
| 736 | + struct bpf_trampoline *tr) |
---|
| 737 | +{ |
---|
| 738 | + return -ENOTSUPP; |
---|
| 739 | +} |
---|
| 740 | +static inline struct bpf_trampoline *bpf_trampoline_get(u64 key, |
---|
| 741 | + struct bpf_attach_target_info *tgt_info) |
---|
| 742 | +{ |
---|
| 743 | + return ERR_PTR(-EOPNOTSUPP); |
---|
| 744 | +} |
---|
| 745 | +static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {} |
---|
| 746 | +#define DEFINE_BPF_DISPATCHER(name) |
---|
| 747 | +#define DECLARE_BPF_DISPATCHER(name) |
---|
| 748 | +#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_nop_func |
---|
| 749 | +#define BPF_DISPATCHER_PTR(name) NULL |
---|
| 750 | +static inline void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, |
---|
| 751 | + struct bpf_prog *from, |
---|
| 752 | + struct bpf_prog *to) {} |
---|
| 753 | +static inline bool is_bpf_image_address(unsigned long address) |
---|
| 754 | +{ |
---|
| 755 | + return false; |
---|
| 756 | +} |
---|
| 757 | +#endif |
---|
| 758 | + |
---|
| 759 | +struct bpf_func_info_aux { |
---|
| 760 | + u16 linkage; |
---|
| 761 | + bool unreliable; |
---|
| 762 | +}; |
---|
| 763 | + |
---|
| 764 | +enum bpf_jit_poke_reason { |
---|
| 765 | + BPF_POKE_REASON_TAIL_CALL, |
---|
| 766 | +}; |
---|
| 767 | + |
---|
| 768 | +/* Descriptor of pokes pointing /into/ the JITed image. */ |
---|
| 769 | +struct bpf_jit_poke_descriptor { |
---|
| 770 | + void *tailcall_target; |
---|
| 771 | + void *tailcall_bypass; |
---|
| 772 | + void *bypass_addr; |
---|
| 773 | + union { |
---|
| 774 | + struct { |
---|
| 775 | + struct bpf_map *map; |
---|
| 776 | + u32 key; |
---|
| 777 | + } tail_call; |
---|
| 778 | + }; |
---|
| 779 | + bool tailcall_target_stable; |
---|
| 780 | + u8 adj_off; |
---|
| 781 | + u16 reason; |
---|
| 782 | + u32 insn_idx; |
---|
| 783 | +}; |
---|
| 784 | + |
---|
| 785 | +/* reg_type info for ctx arguments */ |
---|
| 786 | +struct bpf_ctx_arg_aux { |
---|
| 787 | + u32 offset; |
---|
| 788 | + enum bpf_reg_type reg_type; |
---|
| 789 | + u32 btf_id; |
---|
| 790 | +}; |
---|
| 791 | + |
---|
275 | 792 | struct bpf_prog_aux { |
---|
276 | | - atomic_t refcnt; |
---|
| 793 | + atomic64_t refcnt; |
---|
277 | 794 | u32 used_map_cnt; |
---|
278 | 795 | u32 max_ctx_offset; |
---|
| 796 | + u32 max_pkt_offset; |
---|
| 797 | + u32 max_tp_access; |
---|
279 | 798 | u32 stack_depth; |
---|
280 | 799 | u32 id; |
---|
281 | | - u32 func_cnt; |
---|
| 800 | + u32 func_cnt; /* used by non-func prog as the number of func progs */ |
---|
| 801 | + u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */ |
---|
| 802 | + u32 attach_btf_id; /* in-kernel BTF type id to attach to */ |
---|
| 803 | + u32 ctx_arg_info_size; |
---|
| 804 | + u32 max_rdonly_access; |
---|
| 805 | + u32 max_rdwr_access; |
---|
| 806 | + const struct bpf_ctx_arg_aux *ctx_arg_info; |
---|
| 807 | + struct mutex dst_mutex; /* protects dst_* pointers below, *after* prog becomes visible */ |
---|
| 808 | + struct bpf_prog *dst_prog; |
---|
| 809 | + struct bpf_trampoline *dst_trampoline; |
---|
| 810 | + enum bpf_prog_type saved_dst_prog_type; |
---|
| 811 | + enum bpf_attach_type saved_dst_attach_type; |
---|
| 812 | + bool verifier_zext; /* Zero extensions has been inserted by verifier. */ |
---|
282 | 813 | bool offload_requested; |
---|
| 814 | + bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */ |
---|
| 815 | + bool func_proto_unreliable; |
---|
| 816 | + bool sleepable; |
---|
| 817 | + bool tail_call_reachable; |
---|
| 818 | + struct hlist_node tramp_hlist; |
---|
| 819 | + /* BTF_KIND_FUNC_PROTO for valid attach_btf_id */ |
---|
| 820 | + const struct btf_type *attach_func_proto; |
---|
| 821 | + /* function name for valid attach_btf_id */ |
---|
| 822 | + const char *attach_func_name; |
---|
283 | 823 | struct bpf_prog **func; |
---|
284 | 824 | void *jit_data; /* JIT specific data. arch dependent */ |
---|
285 | | - struct latch_tree_node ksym_tnode; |
---|
286 | | - struct list_head ksym_lnode; |
---|
| 825 | + struct bpf_jit_poke_descriptor *poke_tab; |
---|
| 826 | + u32 size_poke_tab; |
---|
| 827 | + struct bpf_ksym ksym; |
---|
287 | 828 | const struct bpf_prog_ops *ops; |
---|
288 | 829 | struct bpf_map **used_maps; |
---|
| 830 | + struct mutex used_maps_mutex; /* mutex for used_maps and used_map_cnt */ |
---|
289 | 831 | struct bpf_prog *prog; |
---|
290 | 832 | struct user_struct *user; |
---|
291 | 833 | u64 load_time; /* ns since boottime */ |
---|
292 | | - struct bpf_map *cgroup_storage; |
---|
| 834 | + struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; |
---|
293 | 835 | char name[BPF_OBJ_NAME_LEN]; |
---|
294 | 836 | #ifdef CONFIG_SECURITY |
---|
295 | 837 | void *security; |
---|
296 | 838 | #endif |
---|
297 | 839 | struct bpf_prog_offload *offload; |
---|
| 840 | + struct btf *btf; |
---|
| 841 | + struct bpf_func_info *func_info; |
---|
| 842 | + struct bpf_func_info_aux *func_info_aux; |
---|
| 843 | + /* bpf_line_info loaded from userspace. linfo->insn_off |
---|
| 844 | + * has the xlated insn offset. |
---|
| 845 | + * Both the main and sub prog share the same linfo. |
---|
| 846 | + * The subprog can access its first linfo by |
---|
| 847 | + * using the linfo_idx. |
---|
| 848 | + */ |
---|
| 849 | + struct bpf_line_info *linfo; |
---|
| 850 | + /* jited_linfo is the jited addr of the linfo. It has a |
---|
| 851 | + * one to one mapping to linfo: |
---|
| 852 | + * jited_linfo[i] is the jited addr for the linfo[i]->insn_off. |
---|
| 853 | + * Both the main and sub prog share the same jited_linfo. |
---|
| 854 | + * The subprog can access its first jited_linfo by |
---|
| 855 | + * using the linfo_idx. |
---|
| 856 | + */ |
---|
| 857 | + void **jited_linfo; |
---|
| 858 | + u32 func_info_cnt; |
---|
| 859 | + u32 nr_linfo; |
---|
| 860 | + /* subprog can use linfo_idx to access its first linfo and |
---|
| 861 | + * jited_linfo. |
---|
| 862 | + * main prog always has linfo_idx == 0 |
---|
| 863 | + */ |
---|
| 864 | + u32 linfo_idx; |
---|
| 865 | + u32 num_exentries; |
---|
| 866 | + struct exception_table_entry *extable; |
---|
| 867 | + struct bpf_prog_stats __percpu *stats; |
---|
298 | 868 | union { |
---|
299 | 869 | struct work_struct work; |
---|
300 | 870 | struct rcu_head rcu; |
---|
301 | 871 | }; |
---|
| 872 | + ANDROID_KABI_RESERVE(1); |
---|
302 | 873 | }; |
---|
| 874 | + |
---|
| 875 | +struct bpf_array_aux { |
---|
| 876 | + /* 'Ownership' of prog array is claimed by the first program that |
---|
| 877 | + * is going to use this map or by the first program which FD is |
---|
| 878 | + * stored in the map to make sure that all callers and callees have |
---|
| 879 | + * the same prog type and JITed flag. |
---|
| 880 | + */ |
---|
| 881 | + struct { |
---|
| 882 | + spinlock_t lock; |
---|
| 883 | + enum bpf_prog_type type; |
---|
| 884 | + bool jited; |
---|
| 885 | + } owner; |
---|
| 886 | + /* Programs with direct jumps into programs part of this array. */ |
---|
| 887 | + struct list_head poke_progs; |
---|
| 888 | + struct bpf_map *map; |
---|
| 889 | + struct mutex poke_mutex; |
---|
| 890 | + struct work_struct work; |
---|
| 891 | +}; |
---|
| 892 | + |
---|
| 893 | +struct bpf_link { |
---|
| 894 | + atomic64_t refcnt; |
---|
| 895 | + u32 id; |
---|
| 896 | + enum bpf_link_type type; |
---|
| 897 | + const struct bpf_link_ops *ops; |
---|
| 898 | + struct bpf_prog *prog; |
---|
| 899 | + struct work_struct work; |
---|
| 900 | +}; |
---|
| 901 | + |
---|
| 902 | +struct bpf_link_ops { |
---|
| 903 | + void (*release)(struct bpf_link *link); |
---|
| 904 | + void (*dealloc)(struct bpf_link *link); |
---|
| 905 | + int (*detach)(struct bpf_link *link); |
---|
| 906 | + int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog, |
---|
| 907 | + struct bpf_prog *old_prog); |
---|
| 908 | + void (*show_fdinfo)(const struct bpf_link *link, struct seq_file *seq); |
---|
| 909 | + int (*fill_link_info)(const struct bpf_link *link, |
---|
| 910 | + struct bpf_link_info *info); |
---|
| 911 | +}; |
---|
| 912 | + |
---|
| 913 | +struct bpf_link_primer { |
---|
| 914 | + struct bpf_link *link; |
---|
| 915 | + struct file *file; |
---|
| 916 | + int fd; |
---|
| 917 | + u32 id; |
---|
| 918 | +}; |
---|
| 919 | + |
---|
| 920 | +struct bpf_struct_ops_value; |
---|
| 921 | +struct btf_type; |
---|
| 922 | +struct btf_member; |
---|
| 923 | + |
---|
| 924 | +#define BPF_STRUCT_OPS_MAX_NR_MEMBERS 64 |
---|
| 925 | +struct bpf_struct_ops { |
---|
| 926 | + const struct bpf_verifier_ops *verifier_ops; |
---|
| 927 | + int (*init)(struct btf *btf); |
---|
| 928 | + int (*check_member)(const struct btf_type *t, |
---|
| 929 | + const struct btf_member *member); |
---|
| 930 | + int (*init_member)(const struct btf_type *t, |
---|
| 931 | + const struct btf_member *member, |
---|
| 932 | + void *kdata, const void *udata); |
---|
| 933 | + int (*reg)(void *kdata); |
---|
| 934 | + void (*unreg)(void *kdata); |
---|
| 935 | + const struct btf_type *type; |
---|
| 936 | + const struct btf_type *value_type; |
---|
| 937 | + const char *name; |
---|
| 938 | + struct btf_func_model func_models[BPF_STRUCT_OPS_MAX_NR_MEMBERS]; |
---|
| 939 | + u32 type_id; |
---|
| 940 | + u32 value_id; |
---|
| 941 | +}; |
---|
| 942 | + |
---|
| 943 | +#if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL) |
---|
| 944 | +#define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA)) |
---|
| 945 | +const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id); |
---|
| 946 | +void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log); |
---|
| 947 | +bool bpf_struct_ops_get(const void *kdata); |
---|
| 948 | +void bpf_struct_ops_put(const void *kdata); |
---|
| 949 | +int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key, |
---|
| 950 | + void *value); |
---|
| 951 | +static inline bool bpf_try_module_get(const void *data, struct module *owner) |
---|
| 952 | +{ |
---|
| 953 | + if (owner == BPF_MODULE_OWNER) |
---|
| 954 | + return bpf_struct_ops_get(data); |
---|
| 955 | + else |
---|
| 956 | + return try_module_get(owner); |
---|
| 957 | +} |
---|
| 958 | +static inline void bpf_module_put(const void *data, struct module *owner) |
---|
| 959 | +{ |
---|
| 960 | + if (owner == BPF_MODULE_OWNER) |
---|
| 961 | + bpf_struct_ops_put(data); |
---|
| 962 | + else |
---|
| 963 | + module_put(owner); |
---|
| 964 | +} |
---|
| 965 | +#else |
---|
| 966 | +static inline const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id) |
---|
| 967 | +{ |
---|
| 968 | + return NULL; |
---|
| 969 | +} |
---|
| 970 | +static inline void bpf_struct_ops_init(struct btf *btf, |
---|
| 971 | + struct bpf_verifier_log *log) |
---|
| 972 | +{ |
---|
| 973 | +} |
---|
| 974 | +static inline bool bpf_try_module_get(const void *data, struct module *owner) |
---|
| 975 | +{ |
---|
| 976 | + return try_module_get(owner); |
---|
| 977 | +} |
---|
| 978 | +static inline void bpf_module_put(const void *data, struct module *owner) |
---|
| 979 | +{ |
---|
| 980 | + module_put(owner); |
---|
| 981 | +} |
---|
| 982 | +static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, |
---|
| 983 | + void *key, |
---|
| 984 | + void *value) |
---|
| 985 | +{ |
---|
| 986 | + return -EINVAL; |
---|
| 987 | +} |
---|
| 988 | +#endif |
---|
303 | 989 | |
---|
304 | 990 | struct bpf_array { |
---|
305 | 991 | struct bpf_map map; |
---|
306 | 992 | u32 elem_size; |
---|
307 | 993 | u32 index_mask; |
---|
308 | | - /* 'ownership' of prog_array is claimed by the first program that |
---|
309 | | - * is going to use this map or by the first program which FD is stored |
---|
310 | | - * in the map to make sure that all callers and callees have the same |
---|
311 | | - * prog_type and JITed flag |
---|
312 | | - */ |
---|
313 | | - enum bpf_prog_type owner_prog_type; |
---|
314 | | - bool owner_jited; |
---|
| 994 | + struct bpf_array_aux *aux; |
---|
315 | 995 | union { |
---|
316 | 996 | char value[0] __aligned(8); |
---|
317 | 997 | void *ptrs[0] __aligned(8); |
---|
.. | .. |
---|
319 | 999 | }; |
---|
320 | 1000 | }; |
---|
321 | 1001 | |
---|
| 1002 | +#define BPF_COMPLEXITY_LIMIT_INSNS 1000000 /* yes. 1M insns */ |
---|
322 | 1003 | #define MAX_TAIL_CALL_CNT 32 |
---|
| 1004 | + |
---|
| 1005 | +#define BPF_F_ACCESS_MASK (BPF_F_RDONLY | \ |
---|
| 1006 | + BPF_F_RDONLY_PROG | \ |
---|
| 1007 | + BPF_F_WRONLY | \ |
---|
| 1008 | + BPF_F_WRONLY_PROG) |
---|
| 1009 | + |
---|
| 1010 | +#define BPF_MAP_CAN_READ BIT(0) |
---|
| 1011 | +#define BPF_MAP_CAN_WRITE BIT(1) |
---|
| 1012 | + |
---|
| 1013 | +static inline u32 bpf_map_flags_to_cap(struct bpf_map *map) |
---|
| 1014 | +{ |
---|
| 1015 | + u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG); |
---|
| 1016 | + |
---|
| 1017 | + /* Combination of BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG is |
---|
| 1018 | + * not possible. |
---|
| 1019 | + */ |
---|
| 1020 | + if (access_flags & BPF_F_RDONLY_PROG) |
---|
| 1021 | + return BPF_MAP_CAN_READ; |
---|
| 1022 | + else if (access_flags & BPF_F_WRONLY_PROG) |
---|
| 1023 | + return BPF_MAP_CAN_WRITE; |
---|
| 1024 | + else |
---|
| 1025 | + return BPF_MAP_CAN_READ | BPF_MAP_CAN_WRITE; |
---|
| 1026 | +} |
---|
| 1027 | + |
---|
| 1028 | +static inline bool bpf_map_flags_access_ok(u32 access_flags) |
---|
| 1029 | +{ |
---|
| 1030 | + return (access_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) != |
---|
| 1031 | + (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG); |
---|
| 1032 | +} |
---|
323 | 1033 | |
---|
324 | 1034 | struct bpf_event_entry { |
---|
325 | 1035 | struct perf_event *event; |
---|
.. | .. |
---|
330 | 1040 | |
---|
331 | 1041 | bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp); |
---|
332 | 1042 | int bpf_prog_calc_tag(struct bpf_prog *fp); |
---|
| 1043 | +const char *kernel_type_name(u32 btf_type_id); |
---|
333 | 1044 | |
---|
334 | 1045 | const struct bpf_func_proto *bpf_get_trace_printk_proto(void); |
---|
335 | 1046 | |
---|
336 | 1047 | typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src, |
---|
337 | 1048 | unsigned long off, unsigned long len); |
---|
| 1049 | +typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type type, |
---|
| 1050 | + const struct bpf_insn *src, |
---|
| 1051 | + struct bpf_insn *dst, |
---|
| 1052 | + struct bpf_prog *prog, |
---|
| 1053 | + u32 *target_size); |
---|
338 | 1054 | |
---|
339 | 1055 | u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, |
---|
340 | 1056 | void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy); |
---|
341 | | - |
---|
342 | | -int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, |
---|
343 | | - union bpf_attr __user *uattr); |
---|
344 | | -int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, |
---|
345 | | - union bpf_attr __user *uattr); |
---|
346 | 1057 | |
---|
347 | 1058 | /* an array of programs to be executed under rcu_lock. |
---|
348 | 1059 | * |
---|
.. | .. |
---|
358 | 1069 | */ |
---|
359 | 1070 | struct bpf_prog_array_item { |
---|
360 | 1071 | struct bpf_prog *prog; |
---|
361 | | - struct bpf_cgroup_storage *cgroup_storage; |
---|
| 1072 | + struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; |
---|
362 | 1073 | }; |
---|
363 | 1074 | |
---|
364 | 1075 | struct bpf_prog_array { |
---|
365 | 1076 | struct rcu_head rcu; |
---|
366 | | - struct bpf_prog_array_item items[0]; |
---|
| 1077 | + struct bpf_prog_array_item items[]; |
---|
367 | 1078 | }; |
---|
368 | 1079 | |
---|
369 | 1080 | struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags); |
---|
370 | | -void bpf_prog_array_free(struct bpf_prog_array __rcu *progs); |
---|
371 | | -int bpf_prog_array_length(struct bpf_prog_array __rcu *progs); |
---|
372 | | -int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs, |
---|
| 1081 | +void bpf_prog_array_free(struct bpf_prog_array *progs); |
---|
| 1082 | +int bpf_prog_array_length(struct bpf_prog_array *progs); |
---|
| 1083 | +bool bpf_prog_array_is_empty(struct bpf_prog_array *array); |
---|
| 1084 | +int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs, |
---|
373 | 1085 | __u32 __user *prog_ids, u32 cnt); |
---|
374 | 1086 | |
---|
375 | | -void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *progs, |
---|
| 1087 | +void bpf_prog_array_delete_safe(struct bpf_prog_array *progs, |
---|
376 | 1088 | struct bpf_prog *old_prog); |
---|
377 | | -int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array, |
---|
| 1089 | +int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index); |
---|
| 1090 | +int bpf_prog_array_update_at(struct bpf_prog_array *array, int index, |
---|
| 1091 | + struct bpf_prog *prog); |
---|
| 1092 | +int bpf_prog_array_copy_info(struct bpf_prog_array *array, |
---|
378 | 1093 | u32 *prog_ids, u32 request_cnt, |
---|
379 | 1094 | u32 *prog_cnt); |
---|
380 | | -int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array, |
---|
| 1095 | +int bpf_prog_array_copy(struct bpf_prog_array *old_array, |
---|
381 | 1096 | struct bpf_prog *exclude_prog, |
---|
382 | 1097 | struct bpf_prog *include_prog, |
---|
383 | 1098 | struct bpf_prog_array **new_array); |
---|
.. | .. |
---|
388 | 1103 | struct bpf_prog *_prog; \ |
---|
389 | 1104 | struct bpf_prog_array *_array; \ |
---|
390 | 1105 | u32 _ret = 1; \ |
---|
391 | | - preempt_disable(); \ |
---|
| 1106 | + migrate_disable(); \ |
---|
392 | 1107 | rcu_read_lock(); \ |
---|
393 | 1108 | _array = rcu_dereference(array); \ |
---|
394 | 1109 | if (unlikely(check_non_null && !_array))\ |
---|
395 | 1110 | goto _out; \ |
---|
396 | 1111 | _item = &_array->items[0]; \ |
---|
397 | 1112 | while ((_prog = READ_ONCE(_item->prog))) { \ |
---|
398 | | - if (set_cg_storage) \ |
---|
399 | | - bpf_cgroup_storage_set(_item->cgroup_storage); \ |
---|
400 | | - _ret &= func(_prog, ctx); \ |
---|
| 1113 | + if (!set_cg_storage) { \ |
---|
| 1114 | + _ret &= func(_prog, ctx); \ |
---|
| 1115 | + } else { \ |
---|
| 1116 | + if (unlikely(bpf_cgroup_storage_set(_item->cgroup_storage))) \ |
---|
| 1117 | + break; \ |
---|
| 1118 | + _ret &= func(_prog, ctx); \ |
---|
| 1119 | + bpf_cgroup_storage_unset(); \ |
---|
| 1120 | + } \ |
---|
401 | 1121 | _item++; \ |
---|
402 | 1122 | } \ |
---|
403 | 1123 | _out: \ |
---|
404 | 1124 | rcu_read_unlock(); \ |
---|
405 | | - preempt_enable(); \ |
---|
| 1125 | + migrate_enable(); \ |
---|
406 | 1126 | _ret; \ |
---|
407 | 1127 | }) |
---|
| 1128 | + |
---|
| 1129 | +/* To be used by __cgroup_bpf_run_filter_skb for EGRESS BPF progs |
---|
| 1130 | + * so BPF programs can request cwr for TCP packets. |
---|
| 1131 | + * |
---|
| 1132 | + * Current cgroup skb programs can only return 0 or 1 (0 to drop the |
---|
| 1133 | + * packet. This macro changes the behavior so the low order bit |
---|
| 1134 | + * indicates whether the packet should be dropped (0) or not (1) |
---|
| 1135 | + * and the next bit is a congestion notification bit. This could be |
---|
| 1136 | + * used by TCP to call tcp_enter_cwr() |
---|
| 1137 | + * |
---|
| 1138 | + * Hence, new allowed return values of CGROUP EGRESS BPF programs are: |
---|
| 1139 | + * 0: drop packet |
---|
| 1140 | + * 1: keep packet |
---|
| 1141 | + * 2: drop packet and cn |
---|
| 1142 | + * 3: keep packet and cn |
---|
| 1143 | + * |
---|
| 1144 | + * This macro then converts it to one of the NET_XMIT or an error |
---|
| 1145 | + * code that is then interpreted as drop packet (and no cn): |
---|
| 1146 | + * 0: NET_XMIT_SUCCESS skb should be transmitted |
---|
| 1147 | + * 1: NET_XMIT_DROP skb should be dropped and cn |
---|
| 1148 | + * 2: NET_XMIT_CN skb should be transmitted and cn |
---|
| 1149 | + * 3: -EPERM skb should be dropped |
---|
| 1150 | + */ |
---|
| 1151 | +#define BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(array, ctx, func) \ |
---|
| 1152 | + ({ \ |
---|
| 1153 | + struct bpf_prog_array_item *_item; \ |
---|
| 1154 | + struct bpf_prog *_prog; \ |
---|
| 1155 | + struct bpf_prog_array *_array; \ |
---|
| 1156 | + u32 ret; \ |
---|
| 1157 | + u32 _ret = 1; \ |
---|
| 1158 | + u32 _cn = 0; \ |
---|
| 1159 | + migrate_disable(); \ |
---|
| 1160 | + rcu_read_lock(); \ |
---|
| 1161 | + _array = rcu_dereference(array); \ |
---|
| 1162 | + _item = &_array->items[0]; \ |
---|
| 1163 | + while ((_prog = READ_ONCE(_item->prog))) { \ |
---|
| 1164 | + if (unlikely(bpf_cgroup_storage_set(_item->cgroup_storage))) \ |
---|
| 1165 | + break; \ |
---|
| 1166 | + ret = func(_prog, ctx); \ |
---|
| 1167 | + bpf_cgroup_storage_unset(); \ |
---|
| 1168 | + _ret &= (ret & 1); \ |
---|
| 1169 | + _cn |= (ret & 2); \ |
---|
| 1170 | + _item++; \ |
---|
| 1171 | + } \ |
---|
| 1172 | + rcu_read_unlock(); \ |
---|
| 1173 | + migrate_enable(); \ |
---|
| 1174 | + if (_ret) \ |
---|
| 1175 | + _ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS); \ |
---|
| 1176 | + else \ |
---|
| 1177 | + _ret = (_cn ? NET_XMIT_DROP : -EPERM); \ |
---|
| 1178 | + _ret; \ |
---|
| 1179 | + }) |
---|
408 | 1180 | |
---|
409 | 1181 | #define BPF_PROG_RUN_ARRAY(array, ctx, func) \ |
---|
410 | 1182 | __BPF_PROG_RUN_ARRAY(array, ctx, func, false, true) |
---|
.. | .. |
---|
414 | 1186 | |
---|
415 | 1187 | #ifdef CONFIG_BPF_SYSCALL |
---|
416 | 1188 | DECLARE_PER_CPU(int, bpf_prog_active); |
---|
| 1189 | +extern struct mutex bpf_stats_enabled_mutex; |
---|
| 1190 | + |
---|
| 1191 | +/* |
---|
| 1192 | + * Block execution of BPF programs attached to instrumentation (perf, |
---|
| 1193 | + * kprobes, tracepoints) to prevent deadlocks on map operations as any of |
---|
| 1194 | + * these events can happen inside a region which holds a map bucket lock |
---|
| 1195 | + * and can deadlock on it. |
---|
| 1196 | + * |
---|
| 1197 | + * Use the preemption safe inc/dec variants on RT because migrate disable |
---|
| 1198 | + * is preemptible on RT and preemption in the middle of the RMW operation |
---|
| 1199 | + * might lead to inconsistent state. Use the raw variants for non RT |
---|
| 1200 | + * kernels as migrate_disable() maps to preempt_disable() so the slightly |
---|
| 1201 | + * more expensive save operation can be avoided. |
---|
| 1202 | + */ |
---|
| 1203 | +static inline void bpf_disable_instrumentation(void) |
---|
| 1204 | +{ |
---|
| 1205 | + migrate_disable(); |
---|
| 1206 | + if (IS_ENABLED(CONFIG_PREEMPT_RT)) |
---|
| 1207 | + this_cpu_inc(bpf_prog_active); |
---|
| 1208 | + else |
---|
| 1209 | + __this_cpu_inc(bpf_prog_active); |
---|
| 1210 | +} |
---|
| 1211 | + |
---|
| 1212 | +static inline void bpf_enable_instrumentation(void) |
---|
| 1213 | +{ |
---|
| 1214 | + if (IS_ENABLED(CONFIG_PREEMPT_RT)) |
---|
| 1215 | + this_cpu_dec(bpf_prog_active); |
---|
| 1216 | + else |
---|
| 1217 | + __this_cpu_dec(bpf_prog_active); |
---|
| 1218 | + migrate_enable(); |
---|
| 1219 | +} |
---|
417 | 1220 | |
---|
418 | 1221 | extern const struct file_operations bpf_map_fops; |
---|
419 | 1222 | extern const struct file_operations bpf_prog_fops; |
---|
| 1223 | +extern const struct file_operations bpf_iter_fops; |
---|
420 | 1224 | |
---|
421 | | -#define BPF_PROG_TYPE(_id, _name) \ |
---|
| 1225 | +#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ |
---|
422 | 1226 | extern const struct bpf_prog_ops _name ## _prog_ops; \ |
---|
423 | 1227 | extern const struct bpf_verifier_ops _name ## _verifier_ops; |
---|
424 | 1228 | #define BPF_MAP_TYPE(_id, _ops) \ |
---|
425 | 1229 | extern const struct bpf_map_ops _ops; |
---|
| 1230 | +#define BPF_LINK_TYPE(_id, _name) |
---|
426 | 1231 | #include <linux/bpf_types.h> |
---|
427 | 1232 | #undef BPF_PROG_TYPE |
---|
428 | 1233 | #undef BPF_MAP_TYPE |
---|
| 1234 | +#undef BPF_LINK_TYPE |
---|
429 | 1235 | |
---|
430 | 1236 | extern const struct bpf_prog_ops bpf_offload_prog_ops; |
---|
431 | 1237 | extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops; |
---|
.. | .. |
---|
434 | 1240 | struct bpf_prog *bpf_prog_get(u32 ufd); |
---|
435 | 1241 | struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type, |
---|
436 | 1242 | bool attach_drv); |
---|
437 | | -struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, int i); |
---|
| 1243 | +void bpf_prog_add(struct bpf_prog *prog, int i); |
---|
438 | 1244 | void bpf_prog_sub(struct bpf_prog *prog, int i); |
---|
439 | | -struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog); |
---|
| 1245 | +void bpf_prog_inc(struct bpf_prog *prog); |
---|
440 | 1246 | struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog); |
---|
441 | 1247 | void bpf_prog_put(struct bpf_prog *prog); |
---|
442 | 1248 | int __bpf_prog_charge(struct user_struct *user, u32 pages); |
---|
.. | .. |
---|
445 | 1251 | void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock); |
---|
446 | 1252 | void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock); |
---|
447 | 1253 | |
---|
| 1254 | +struct bpf_map *bpf_map_get(u32 ufd); |
---|
448 | 1255 | struct bpf_map *bpf_map_get_with_uref(u32 ufd); |
---|
449 | 1256 | struct bpf_map *__bpf_map_get(struct fd f); |
---|
450 | | -struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref); |
---|
| 1257 | +void bpf_map_inc(struct bpf_map *map); |
---|
| 1258 | +void bpf_map_inc_with_uref(struct bpf_map *map); |
---|
| 1259 | +struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map); |
---|
451 | 1260 | void bpf_map_put_with_uref(struct bpf_map *map); |
---|
452 | 1261 | void bpf_map_put(struct bpf_map *map); |
---|
453 | | -int bpf_map_precharge_memlock(u32 pages); |
---|
454 | 1262 | int bpf_map_charge_memlock(struct bpf_map *map, u32 pages); |
---|
455 | 1263 | void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages); |
---|
456 | | -void *bpf_map_area_alloc(size_t size, int numa_node); |
---|
| 1264 | +int bpf_map_charge_init(struct bpf_map_memory *mem, u64 size); |
---|
| 1265 | +void bpf_map_charge_finish(struct bpf_map_memory *mem); |
---|
| 1266 | +void bpf_map_charge_move(struct bpf_map_memory *dst, |
---|
| 1267 | + struct bpf_map_memory *src); |
---|
| 1268 | +void *bpf_map_area_alloc(u64 size, int numa_node); |
---|
| 1269 | +void *bpf_map_area_mmapable_alloc(u64 size, int numa_node); |
---|
457 | 1270 | void bpf_map_area_free(void *base); |
---|
| 1271 | +bool bpf_map_write_active(const struct bpf_map *map); |
---|
458 | 1272 | void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr); |
---|
| 1273 | +int generic_map_lookup_batch(struct bpf_map *map, |
---|
| 1274 | + const union bpf_attr *attr, |
---|
| 1275 | + union bpf_attr __user *uattr); |
---|
| 1276 | +int generic_map_update_batch(struct bpf_map *map, |
---|
| 1277 | + const union bpf_attr *attr, |
---|
| 1278 | + union bpf_attr __user *uattr); |
---|
| 1279 | +int generic_map_delete_batch(struct bpf_map *map, |
---|
| 1280 | + const union bpf_attr *attr, |
---|
| 1281 | + union bpf_attr __user *uattr); |
---|
| 1282 | +struct bpf_map *bpf_map_get_curr_or_next(u32 *id); |
---|
| 1283 | +struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id); |
---|
459 | 1284 | |
---|
460 | 1285 | extern int sysctl_unprivileged_bpf_disabled; |
---|
| 1286 | + |
---|
| 1287 | +static inline bool bpf_allow_ptr_leaks(void) |
---|
| 1288 | +{ |
---|
| 1289 | + return perfmon_capable(); |
---|
| 1290 | +} |
---|
| 1291 | + |
---|
| 1292 | +static inline bool bpf_allow_uninit_stack(void) |
---|
| 1293 | +{ |
---|
| 1294 | + return perfmon_capable(); |
---|
| 1295 | +} |
---|
| 1296 | + |
---|
| 1297 | +static inline bool bpf_allow_ptr_to_map_access(void) |
---|
| 1298 | +{ |
---|
| 1299 | + return perfmon_capable(); |
---|
| 1300 | +} |
---|
| 1301 | + |
---|
| 1302 | +static inline bool bpf_bypass_spec_v1(void) |
---|
| 1303 | +{ |
---|
| 1304 | + return perfmon_capable(); |
---|
| 1305 | +} |
---|
| 1306 | + |
---|
| 1307 | +static inline bool bpf_bypass_spec_v4(void) |
---|
| 1308 | +{ |
---|
| 1309 | + return perfmon_capable(); |
---|
| 1310 | +} |
---|
461 | 1311 | |
---|
462 | 1312 | int bpf_map_new_fd(struct bpf_map *map, int flags); |
---|
463 | 1313 | int bpf_prog_new_fd(struct bpf_prog *prog); |
---|
464 | 1314 | |
---|
| 1315 | +void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, |
---|
| 1316 | + const struct bpf_link_ops *ops, struct bpf_prog *prog); |
---|
| 1317 | +int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer); |
---|
| 1318 | +int bpf_link_settle(struct bpf_link_primer *primer); |
---|
| 1319 | +void bpf_link_cleanup(struct bpf_link_primer *primer); |
---|
| 1320 | +void bpf_link_inc(struct bpf_link *link); |
---|
| 1321 | +void bpf_link_put(struct bpf_link *link); |
---|
| 1322 | +int bpf_link_new_fd(struct bpf_link *link); |
---|
| 1323 | +struct file *bpf_link_new_file(struct bpf_link *link, int *reserved_fd); |
---|
| 1324 | +struct bpf_link *bpf_link_get_from_fd(u32 ufd); |
---|
| 1325 | + |
---|
465 | 1326 | int bpf_obj_pin_user(u32 ufd, const char __user *pathname); |
---|
466 | 1327 | int bpf_obj_get_user(const char __user *pathname, int flags); |
---|
| 1328 | + |
---|
| 1329 | +#define BPF_ITER_FUNC_PREFIX "bpf_iter_" |
---|
| 1330 | +#define DEFINE_BPF_ITER_FUNC(target, args...) \ |
---|
| 1331 | + extern int bpf_iter_ ## target(args); \ |
---|
| 1332 | + int __init bpf_iter_ ## target(args) { return 0; } |
---|
| 1333 | + |
---|
| 1334 | +struct bpf_iter_aux_info { |
---|
| 1335 | + struct bpf_map *map; |
---|
| 1336 | +}; |
---|
| 1337 | + |
---|
| 1338 | +typedef int (*bpf_iter_attach_target_t)(struct bpf_prog *prog, |
---|
| 1339 | + union bpf_iter_link_info *linfo, |
---|
| 1340 | + struct bpf_iter_aux_info *aux); |
---|
| 1341 | +typedef void (*bpf_iter_detach_target_t)(struct bpf_iter_aux_info *aux); |
---|
| 1342 | +typedef void (*bpf_iter_show_fdinfo_t) (const struct bpf_iter_aux_info *aux, |
---|
| 1343 | + struct seq_file *seq); |
---|
| 1344 | +typedef int (*bpf_iter_fill_link_info_t)(const struct bpf_iter_aux_info *aux, |
---|
| 1345 | + struct bpf_link_info *info); |
---|
| 1346 | + |
---|
| 1347 | +#define BPF_ITER_CTX_ARG_MAX 2 |
---|
| 1348 | +struct bpf_iter_reg { |
---|
| 1349 | + const char *target; |
---|
| 1350 | + bpf_iter_attach_target_t attach_target; |
---|
| 1351 | + bpf_iter_detach_target_t detach_target; |
---|
| 1352 | + bpf_iter_show_fdinfo_t show_fdinfo; |
---|
| 1353 | + bpf_iter_fill_link_info_t fill_link_info; |
---|
| 1354 | + u32 ctx_arg_info_size; |
---|
| 1355 | + struct bpf_ctx_arg_aux ctx_arg_info[BPF_ITER_CTX_ARG_MAX]; |
---|
| 1356 | + const struct bpf_iter_seq_info *seq_info; |
---|
| 1357 | +}; |
---|
| 1358 | + |
---|
| 1359 | +struct bpf_iter_meta { |
---|
| 1360 | + __bpf_md_ptr(struct seq_file *, seq); |
---|
| 1361 | + u64 session_id; |
---|
| 1362 | + u64 seq_num; |
---|
| 1363 | +}; |
---|
| 1364 | + |
---|
| 1365 | +struct bpf_iter__bpf_map_elem { |
---|
| 1366 | + __bpf_md_ptr(struct bpf_iter_meta *, meta); |
---|
| 1367 | + __bpf_md_ptr(struct bpf_map *, map); |
---|
| 1368 | + __bpf_md_ptr(void *, key); |
---|
| 1369 | + __bpf_md_ptr(void *, value); |
---|
| 1370 | +}; |
---|
| 1371 | + |
---|
| 1372 | +int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info); |
---|
| 1373 | +void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info); |
---|
| 1374 | +bool bpf_iter_prog_supported(struct bpf_prog *prog); |
---|
| 1375 | +int bpf_iter_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); |
---|
| 1376 | +int bpf_iter_new_fd(struct bpf_link *link); |
---|
| 1377 | +bool bpf_link_is_iter(struct bpf_link *link); |
---|
| 1378 | +struct bpf_prog *bpf_iter_get_info(struct bpf_iter_meta *meta, bool in_stop); |
---|
| 1379 | +int bpf_iter_run_prog(struct bpf_prog *prog, void *ctx); |
---|
| 1380 | +void bpf_iter_map_show_fdinfo(const struct bpf_iter_aux_info *aux, |
---|
| 1381 | + struct seq_file *seq); |
---|
| 1382 | +int bpf_iter_map_fill_link_info(const struct bpf_iter_aux_info *aux, |
---|
| 1383 | + struct bpf_link_info *info); |
---|
467 | 1384 | |
---|
468 | 1385 | int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value); |
---|
469 | 1386 | int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value); |
---|
.. | .. |
---|
502 | 1419 | } |
---|
503 | 1420 | |
---|
504 | 1421 | /* verify correctness of eBPF program */ |
---|
505 | | -int bpf_check(struct bpf_prog **fp, union bpf_attr *attr); |
---|
| 1422 | +int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, |
---|
| 1423 | + union bpf_attr __user *uattr); |
---|
| 1424 | + |
---|
| 1425 | +#ifndef CONFIG_BPF_JIT_ALWAYS_ON |
---|
506 | 1426 | void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth); |
---|
| 1427 | +#endif |
---|
| 1428 | + |
---|
| 1429 | +struct btf *bpf_get_btf_vmlinux(void); |
---|
507 | 1430 | |
---|
508 | 1431 | /* Map specifics */ |
---|
509 | 1432 | struct xdp_buff; |
---|
510 | 1433 | struct sk_buff; |
---|
511 | 1434 | |
---|
512 | 1435 | struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key); |
---|
513 | | -void __dev_map_insert_ctx(struct bpf_map *map, u32 index); |
---|
514 | | -void __dev_map_flush(struct bpf_map *map); |
---|
| 1436 | +struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key); |
---|
| 1437 | +void __dev_flush(void); |
---|
| 1438 | +int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp, |
---|
| 1439 | + struct net_device *dev_rx); |
---|
515 | 1440 | int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, |
---|
516 | 1441 | struct net_device *dev_rx); |
---|
517 | 1442 | int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb, |
---|
518 | 1443 | struct bpf_prog *xdp_prog); |
---|
| 1444 | +bool dev_map_can_have_prog(struct bpf_map *map); |
---|
519 | 1445 | |
---|
520 | 1446 | struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key); |
---|
521 | | -void __cpu_map_insert_ctx(struct bpf_map *map, u32 index); |
---|
522 | | -void __cpu_map_flush(struct bpf_map *map); |
---|
| 1447 | +void __cpu_map_flush(void); |
---|
523 | 1448 | int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp, |
---|
524 | 1449 | struct net_device *dev_rx); |
---|
| 1450 | +bool cpu_map_prog_allowed(struct bpf_map *map); |
---|
525 | 1451 | |
---|
526 | 1452 | /* Return map's numa specified by userspace */ |
---|
527 | 1453 | static inline int bpf_map_attr_numa_node(const union bpf_attr *attr) |
---|
.. | .. |
---|
532 | 1458 | |
---|
533 | 1459 | struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type); |
---|
534 | 1460 | int array_map_alloc_check(union bpf_attr *attr); |
---|
| 1461 | + |
---|
| 1462 | +int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, |
---|
| 1463 | + union bpf_attr __user *uattr); |
---|
| 1464 | +int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, |
---|
| 1465 | + union bpf_attr __user *uattr); |
---|
| 1466 | +int bpf_prog_test_run_tracing(struct bpf_prog *prog, |
---|
| 1467 | + const union bpf_attr *kattr, |
---|
| 1468 | + union bpf_attr __user *uattr); |
---|
| 1469 | +int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, |
---|
| 1470 | + const union bpf_attr *kattr, |
---|
| 1471 | + union bpf_attr __user *uattr); |
---|
| 1472 | +int bpf_prog_test_run_raw_tp(struct bpf_prog *prog, |
---|
| 1473 | + const union bpf_attr *kattr, |
---|
| 1474 | + union bpf_attr __user *uattr); |
---|
| 1475 | +int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, |
---|
| 1476 | + const union bpf_attr *kattr, |
---|
| 1477 | + union bpf_attr __user *uattr); |
---|
| 1478 | +bool btf_ctx_access(int off, int size, enum bpf_access_type type, |
---|
| 1479 | + const struct bpf_prog *prog, |
---|
| 1480 | + struct bpf_insn_access_aux *info); |
---|
| 1481 | +int btf_struct_access(struct bpf_verifier_log *log, |
---|
| 1482 | + const struct btf_type *t, int off, int size, |
---|
| 1483 | + enum bpf_access_type atype, |
---|
| 1484 | + u32 *next_btf_id); |
---|
| 1485 | +bool btf_struct_ids_match(struct bpf_verifier_log *log, |
---|
| 1486 | + int off, u32 id, u32 need_type_id); |
---|
| 1487 | + |
---|
| 1488 | +int btf_distill_func_proto(struct bpf_verifier_log *log, |
---|
| 1489 | + struct btf *btf, |
---|
| 1490 | + const struct btf_type *func_proto, |
---|
| 1491 | + const char *func_name, |
---|
| 1492 | + struct btf_func_model *m); |
---|
| 1493 | + |
---|
| 1494 | +struct bpf_reg_state; |
---|
| 1495 | +int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog, |
---|
| 1496 | + struct bpf_reg_state *regs); |
---|
| 1497 | +int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog, |
---|
| 1498 | + struct bpf_reg_state *reg); |
---|
| 1499 | +int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog, |
---|
| 1500 | + struct btf *btf, const struct btf_type *t); |
---|
| 1501 | + |
---|
| 1502 | +struct bpf_prog *bpf_prog_by_id(u32 id); |
---|
| 1503 | +struct bpf_link *bpf_link_by_id(u32 id); |
---|
| 1504 | + |
---|
| 1505 | +const struct bpf_func_proto *bpf_base_func_proto(enum bpf_func_id func_id); |
---|
| 1506 | + |
---|
| 1507 | +static inline bool unprivileged_ebpf_enabled(void) |
---|
| 1508 | +{ |
---|
| 1509 | + return !sysctl_unprivileged_bpf_disabled; |
---|
| 1510 | +} |
---|
535 | 1511 | |
---|
536 | 1512 | #else /* !CONFIG_BPF_SYSCALL */ |
---|
537 | 1513 | static inline struct bpf_prog *bpf_prog_get(u32 ufd) |
---|
.. | .. |
---|
546 | 1522 | return ERR_PTR(-EOPNOTSUPP); |
---|
547 | 1523 | } |
---|
548 | 1524 | |
---|
549 | | -static inline struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, |
---|
550 | | - int i) |
---|
| 1525 | +static inline void bpf_prog_add(struct bpf_prog *prog, int i) |
---|
551 | 1526 | { |
---|
552 | | - return ERR_PTR(-EOPNOTSUPP); |
---|
553 | 1527 | } |
---|
554 | 1528 | |
---|
555 | 1529 | static inline void bpf_prog_sub(struct bpf_prog *prog, int i) |
---|
.. | .. |
---|
560 | 1534 | { |
---|
561 | 1535 | } |
---|
562 | 1536 | |
---|
563 | | -static inline struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog) |
---|
| 1537 | +static inline void bpf_prog_inc(struct bpf_prog *prog) |
---|
564 | 1538 | { |
---|
565 | | - return ERR_PTR(-EOPNOTSUPP); |
---|
566 | 1539 | } |
---|
567 | 1540 | |
---|
568 | 1541 | static inline struct bpf_prog *__must_check |
---|
.. | .. |
---|
580 | 1553 | { |
---|
581 | 1554 | } |
---|
582 | 1555 | |
---|
| 1556 | +static inline void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, |
---|
| 1557 | + const struct bpf_link_ops *ops, |
---|
| 1558 | + struct bpf_prog *prog) |
---|
| 1559 | +{ |
---|
| 1560 | +} |
---|
| 1561 | + |
---|
| 1562 | +static inline int bpf_link_prime(struct bpf_link *link, |
---|
| 1563 | + struct bpf_link_primer *primer) |
---|
| 1564 | +{ |
---|
| 1565 | + return -EOPNOTSUPP; |
---|
| 1566 | +} |
---|
| 1567 | + |
---|
| 1568 | +static inline int bpf_link_settle(struct bpf_link_primer *primer) |
---|
| 1569 | +{ |
---|
| 1570 | + return -EOPNOTSUPP; |
---|
| 1571 | +} |
---|
| 1572 | + |
---|
| 1573 | +static inline void bpf_link_cleanup(struct bpf_link_primer *primer) |
---|
| 1574 | +{ |
---|
| 1575 | +} |
---|
| 1576 | + |
---|
| 1577 | +static inline void bpf_link_inc(struct bpf_link *link) |
---|
| 1578 | +{ |
---|
| 1579 | +} |
---|
| 1580 | + |
---|
| 1581 | +static inline void bpf_link_put(struct bpf_link *link) |
---|
| 1582 | +{ |
---|
| 1583 | +} |
---|
| 1584 | + |
---|
583 | 1585 | static inline int bpf_obj_get_user(const char __user *pathname, int flags) |
---|
584 | 1586 | { |
---|
585 | 1587 | return -EOPNOTSUPP; |
---|
.. | .. |
---|
591 | 1593 | return NULL; |
---|
592 | 1594 | } |
---|
593 | 1595 | |
---|
594 | | -static inline void __dev_map_insert_ctx(struct bpf_map *map, u32 index) |
---|
| 1596 | +static inline struct net_device *__dev_map_hash_lookup_elem(struct bpf_map *map, |
---|
| 1597 | + u32 key) |
---|
595 | 1598 | { |
---|
| 1599 | + return NULL; |
---|
| 1600 | +} |
---|
| 1601 | +static inline bool dev_map_can_have_prog(struct bpf_map *map) |
---|
| 1602 | +{ |
---|
| 1603 | + return false; |
---|
596 | 1604 | } |
---|
597 | 1605 | |
---|
598 | | -static inline void __dev_map_flush(struct bpf_map *map) |
---|
| 1606 | +static inline void __dev_flush(void) |
---|
599 | 1607 | { |
---|
600 | 1608 | } |
---|
601 | 1609 | |
---|
602 | 1610 | struct xdp_buff; |
---|
603 | 1611 | struct bpf_dtab_netdev; |
---|
| 1612 | + |
---|
| 1613 | +static inline |
---|
| 1614 | +int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp, |
---|
| 1615 | + struct net_device *dev_rx) |
---|
| 1616 | +{ |
---|
| 1617 | + return 0; |
---|
| 1618 | +} |
---|
604 | 1619 | |
---|
605 | 1620 | static inline |
---|
606 | 1621 | int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, |
---|
.. | .. |
---|
624 | 1639 | return NULL; |
---|
625 | 1640 | } |
---|
626 | 1641 | |
---|
627 | | -static inline void __cpu_map_insert_ctx(struct bpf_map *map, u32 index) |
---|
628 | | -{ |
---|
629 | | -} |
---|
630 | | - |
---|
631 | | -static inline void __cpu_map_flush(struct bpf_map *map) |
---|
| 1642 | +static inline void __cpu_map_flush(void) |
---|
632 | 1643 | { |
---|
633 | 1644 | } |
---|
634 | 1645 | |
---|
.. | .. |
---|
639 | 1650 | return 0; |
---|
640 | 1651 | } |
---|
641 | 1652 | |
---|
| 1653 | +static inline bool cpu_map_prog_allowed(struct bpf_map *map) |
---|
| 1654 | +{ |
---|
| 1655 | + return false; |
---|
| 1656 | +} |
---|
| 1657 | + |
---|
642 | 1658 | static inline struct bpf_prog *bpf_prog_get_type_path(const char *name, |
---|
643 | 1659 | enum bpf_prog_type type) |
---|
644 | 1660 | { |
---|
645 | 1661 | return ERR_PTR(-EOPNOTSUPP); |
---|
646 | 1662 | } |
---|
| 1663 | + |
---|
| 1664 | +static inline int bpf_prog_test_run_xdp(struct bpf_prog *prog, |
---|
| 1665 | + const union bpf_attr *kattr, |
---|
| 1666 | + union bpf_attr __user *uattr) |
---|
| 1667 | +{ |
---|
| 1668 | + return -ENOTSUPP; |
---|
| 1669 | +} |
---|
| 1670 | + |
---|
| 1671 | +static inline int bpf_prog_test_run_skb(struct bpf_prog *prog, |
---|
| 1672 | + const union bpf_attr *kattr, |
---|
| 1673 | + union bpf_attr __user *uattr) |
---|
| 1674 | +{ |
---|
| 1675 | + return -ENOTSUPP; |
---|
| 1676 | +} |
---|
| 1677 | + |
---|
| 1678 | +static inline int bpf_prog_test_run_tracing(struct bpf_prog *prog, |
---|
| 1679 | + const union bpf_attr *kattr, |
---|
| 1680 | + union bpf_attr __user *uattr) |
---|
| 1681 | +{ |
---|
| 1682 | + return -ENOTSUPP; |
---|
| 1683 | +} |
---|
| 1684 | + |
---|
| 1685 | +static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, |
---|
| 1686 | + const union bpf_attr *kattr, |
---|
| 1687 | + union bpf_attr __user *uattr) |
---|
| 1688 | +{ |
---|
| 1689 | + return -ENOTSUPP; |
---|
| 1690 | +} |
---|
| 1691 | + |
---|
| 1692 | +static inline int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, |
---|
| 1693 | + const union bpf_attr *kattr, |
---|
| 1694 | + union bpf_attr __user *uattr) |
---|
| 1695 | +{ |
---|
| 1696 | + return -ENOTSUPP; |
---|
| 1697 | +} |
---|
| 1698 | + |
---|
| 1699 | +static inline void bpf_map_put(struct bpf_map *map) |
---|
| 1700 | +{ |
---|
| 1701 | +} |
---|
| 1702 | + |
---|
| 1703 | +static inline struct bpf_prog *bpf_prog_by_id(u32 id) |
---|
| 1704 | +{ |
---|
| 1705 | + return ERR_PTR(-ENOTSUPP); |
---|
| 1706 | +} |
---|
| 1707 | + |
---|
| 1708 | +static inline const struct bpf_func_proto * |
---|
| 1709 | +bpf_base_func_proto(enum bpf_func_id func_id) |
---|
| 1710 | +{ |
---|
| 1711 | + return NULL; |
---|
| 1712 | +} |
---|
| 1713 | + |
---|
| 1714 | +static inline bool unprivileged_ebpf_enabled(void) |
---|
| 1715 | +{ |
---|
| 1716 | + return false; |
---|
| 1717 | +} |
---|
| 1718 | + |
---|
647 | 1719 | #endif /* CONFIG_BPF_SYSCALL */ |
---|
648 | 1720 | |
---|
649 | 1721 | static inline struct bpf_prog *bpf_prog_get_type(u32 ufd, |
---|
.. | .. |
---|
651 | 1723 | { |
---|
652 | 1724 | return bpf_prog_get_type_dev(ufd, type, false); |
---|
653 | 1725 | } |
---|
| 1726 | + |
---|
| 1727 | +void __bpf_free_used_maps(struct bpf_prog_aux *aux, |
---|
| 1728 | + struct bpf_map **used_maps, u32 len); |
---|
654 | 1729 | |
---|
655 | 1730 | bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool); |
---|
656 | 1731 | |
---|
.. | .. |
---|
670 | 1745 | |
---|
671 | 1746 | bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map); |
---|
672 | 1747 | |
---|
673 | | -struct bpf_offload_dev *bpf_offload_dev_create(void); |
---|
| 1748 | +struct bpf_offload_dev * |
---|
| 1749 | +bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv); |
---|
674 | 1750 | void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev); |
---|
| 1751 | +void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev); |
---|
675 | 1752 | int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev, |
---|
676 | 1753 | struct net_device *netdev); |
---|
677 | 1754 | void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev, |
---|
678 | 1755 | struct net_device *netdev); |
---|
679 | 1756 | bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev); |
---|
| 1757 | + |
---|
| 1758 | +void unpriv_ebpf_notify(int new_state); |
---|
680 | 1759 | |
---|
681 | 1760 | #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL) |
---|
682 | 1761 | int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr); |
---|
.. | .. |
---|
720 | 1799 | } |
---|
721 | 1800 | #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ |
---|
722 | 1801 | |
---|
723 | | -#if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_INET) |
---|
724 | | -struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key); |
---|
725 | | -struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key); |
---|
726 | | -int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type); |
---|
727 | | -int sockmap_get_from_fd(const union bpf_attr *attr, int type, |
---|
728 | | - struct bpf_prog *prog); |
---|
| 1802 | +#if defined(CONFIG_BPF_STREAM_PARSER) |
---|
| 1803 | +int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, |
---|
| 1804 | + struct bpf_prog *old, u32 which); |
---|
| 1805 | +int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog); |
---|
| 1806 | +int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype); |
---|
| 1807 | +int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags); |
---|
| 1808 | +void sock_map_unhash(struct sock *sk); |
---|
| 1809 | +void sock_map_close(struct sock *sk, long timeout); |
---|
729 | 1810 | #else |
---|
730 | | -static inline struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key) |
---|
731 | | -{ |
---|
732 | | - return NULL; |
---|
733 | | -} |
---|
734 | | - |
---|
735 | | -static inline struct sock *__sock_hash_lookup_elem(struct bpf_map *map, |
---|
736 | | - void *key) |
---|
737 | | -{ |
---|
738 | | - return NULL; |
---|
739 | | -} |
---|
740 | | - |
---|
741 | | -static inline int sock_map_prog(struct bpf_map *map, |
---|
742 | | - struct bpf_prog *prog, |
---|
743 | | - u32 type) |
---|
| 1811 | +static inline int sock_map_prog_update(struct bpf_map *map, |
---|
| 1812 | + struct bpf_prog *prog, |
---|
| 1813 | + struct bpf_prog *old, u32 which) |
---|
744 | 1814 | { |
---|
745 | 1815 | return -EOPNOTSUPP; |
---|
746 | 1816 | } |
---|
747 | 1817 | |
---|
748 | | -static inline int sockmap_get_from_fd(const union bpf_attr *attr, int type, |
---|
749 | | - struct bpf_prog *prog) |
---|
| 1818 | +static inline int sock_map_get_from_fd(const union bpf_attr *attr, |
---|
| 1819 | + struct bpf_prog *prog) |
---|
750 | 1820 | { |
---|
751 | 1821 | return -EINVAL; |
---|
752 | 1822 | } |
---|
753 | | -#endif |
---|
754 | 1823 | |
---|
755 | | -#if defined(CONFIG_XDP_SOCKETS) |
---|
756 | | -struct xdp_sock; |
---|
757 | | -struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map, u32 key); |
---|
758 | | -int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp, |
---|
759 | | - struct xdp_sock *xs); |
---|
760 | | -void __xsk_map_flush(struct bpf_map *map); |
---|
761 | | -#else |
---|
762 | | -struct xdp_sock; |
---|
763 | | -static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map, |
---|
764 | | - u32 key) |
---|
765 | | -{ |
---|
766 | | - return NULL; |
---|
767 | | -} |
---|
768 | | - |
---|
769 | | -static inline int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp, |
---|
770 | | - struct xdp_sock *xs) |
---|
| 1824 | +static inline int sock_map_prog_detach(const union bpf_attr *attr, |
---|
| 1825 | + enum bpf_prog_type ptype) |
---|
771 | 1826 | { |
---|
772 | 1827 | return -EOPNOTSUPP; |
---|
773 | 1828 | } |
---|
774 | 1829 | |
---|
775 | | -static inline void __xsk_map_flush(struct bpf_map *map) |
---|
| 1830 | +static inline int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, |
---|
| 1831 | + u64 flags) |
---|
776 | 1832 | { |
---|
| 1833 | + return -EOPNOTSUPP; |
---|
777 | 1834 | } |
---|
778 | | -#endif |
---|
| 1835 | +#endif /* CONFIG_BPF_STREAM_PARSER */ |
---|
779 | 1836 | |
---|
780 | 1837 | #if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) |
---|
781 | 1838 | void bpf_sk_reuseport_detach(struct sock *sk); |
---|
.. | .. |
---|
808 | 1865 | extern const struct bpf_func_proto bpf_map_lookup_elem_proto; |
---|
809 | 1866 | extern const struct bpf_func_proto bpf_map_update_elem_proto; |
---|
810 | 1867 | extern const struct bpf_func_proto bpf_map_delete_elem_proto; |
---|
| 1868 | +extern const struct bpf_func_proto bpf_map_push_elem_proto; |
---|
| 1869 | +extern const struct bpf_func_proto bpf_map_pop_elem_proto; |
---|
| 1870 | +extern const struct bpf_func_proto bpf_map_peek_elem_proto; |
---|
811 | 1871 | |
---|
812 | 1872 | extern const struct bpf_func_proto bpf_get_prandom_u32_proto; |
---|
813 | 1873 | extern const struct bpf_func_proto bpf_get_smp_processor_id_proto; |
---|
.. | .. |
---|
820 | 1880 | extern const struct bpf_func_proto bpf_get_current_comm_proto; |
---|
821 | 1881 | extern const struct bpf_func_proto bpf_get_stackid_proto; |
---|
822 | 1882 | extern const struct bpf_func_proto bpf_get_stack_proto; |
---|
| 1883 | +extern const struct bpf_func_proto bpf_get_task_stack_proto; |
---|
| 1884 | +extern const struct bpf_func_proto bpf_get_stackid_proto_pe; |
---|
| 1885 | +extern const struct bpf_func_proto bpf_get_stack_proto_pe; |
---|
823 | 1886 | extern const struct bpf_func_proto bpf_sock_map_update_proto; |
---|
824 | 1887 | extern const struct bpf_func_proto bpf_sock_hash_update_proto; |
---|
825 | 1888 | extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto; |
---|
826 | | - |
---|
| 1889 | +extern const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto; |
---|
| 1890 | +extern const struct bpf_func_proto bpf_msg_redirect_hash_proto; |
---|
| 1891 | +extern const struct bpf_func_proto bpf_msg_redirect_map_proto; |
---|
| 1892 | +extern const struct bpf_func_proto bpf_sk_redirect_hash_proto; |
---|
| 1893 | +extern const struct bpf_func_proto bpf_sk_redirect_map_proto; |
---|
| 1894 | +extern const struct bpf_func_proto bpf_spin_lock_proto; |
---|
| 1895 | +extern const struct bpf_func_proto bpf_spin_unlock_proto; |
---|
827 | 1896 | extern const struct bpf_func_proto bpf_get_local_storage_proto; |
---|
| 1897 | +extern const struct bpf_func_proto bpf_strtol_proto; |
---|
| 1898 | +extern const struct bpf_func_proto bpf_strtoul_proto; |
---|
| 1899 | +extern const struct bpf_func_proto bpf_tcp_sock_proto; |
---|
| 1900 | +extern const struct bpf_func_proto bpf_jiffies64_proto; |
---|
| 1901 | +extern const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto; |
---|
| 1902 | +extern const struct bpf_func_proto bpf_event_output_data_proto; |
---|
| 1903 | +extern const struct bpf_func_proto bpf_ringbuf_output_proto; |
---|
| 1904 | +extern const struct bpf_func_proto bpf_ringbuf_reserve_proto; |
---|
| 1905 | +extern const struct bpf_func_proto bpf_ringbuf_submit_proto; |
---|
| 1906 | +extern const struct bpf_func_proto bpf_ringbuf_discard_proto; |
---|
| 1907 | +extern const struct bpf_func_proto bpf_ringbuf_query_proto; |
---|
| 1908 | +extern const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto; |
---|
| 1909 | +extern const struct bpf_func_proto bpf_skc_to_tcp_sock_proto; |
---|
| 1910 | +extern const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto; |
---|
| 1911 | +extern const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto; |
---|
| 1912 | +extern const struct bpf_func_proto bpf_skc_to_udp6_sock_proto; |
---|
| 1913 | +extern const struct bpf_func_proto bpf_copy_from_user_proto; |
---|
| 1914 | +extern const struct bpf_func_proto bpf_snprintf_btf_proto; |
---|
| 1915 | +extern const struct bpf_func_proto bpf_per_cpu_ptr_proto; |
---|
| 1916 | +extern const struct bpf_func_proto bpf_this_cpu_ptr_proto; |
---|
| 1917 | + |
---|
| 1918 | +const struct bpf_func_proto *bpf_tracing_func_proto( |
---|
| 1919 | + enum bpf_func_id func_id, const struct bpf_prog *prog); |
---|
| 1920 | + |
---|
| 1921 | +const struct bpf_func_proto *tracing_prog_func_proto( |
---|
| 1922 | + enum bpf_func_id func_id, const struct bpf_prog *prog); |
---|
828 | 1923 | |
---|
829 | 1924 | /* Shared helpers among cBPF and eBPF. */ |
---|
830 | 1925 | void bpf_user_rnd_init_once(void); |
---|
831 | 1926 | u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); |
---|
| 1927 | +u64 bpf_get_raw_cpu_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); |
---|
| 1928 | + |
---|
| 1929 | +#if defined(CONFIG_NET) |
---|
| 1930 | +bool bpf_sock_common_is_valid_access(int off, int size, |
---|
| 1931 | + enum bpf_access_type type, |
---|
| 1932 | + struct bpf_insn_access_aux *info); |
---|
| 1933 | +bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type, |
---|
| 1934 | + struct bpf_insn_access_aux *info); |
---|
| 1935 | +u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, |
---|
| 1936 | + const struct bpf_insn *si, |
---|
| 1937 | + struct bpf_insn *insn_buf, |
---|
| 1938 | + struct bpf_prog *prog, |
---|
| 1939 | + u32 *target_size); |
---|
| 1940 | +#else |
---|
| 1941 | +static inline bool bpf_sock_common_is_valid_access(int off, int size, |
---|
| 1942 | + enum bpf_access_type type, |
---|
| 1943 | + struct bpf_insn_access_aux *info) |
---|
| 1944 | +{ |
---|
| 1945 | + return false; |
---|
| 1946 | +} |
---|
| 1947 | +static inline bool bpf_sock_is_valid_access(int off, int size, |
---|
| 1948 | + enum bpf_access_type type, |
---|
| 1949 | + struct bpf_insn_access_aux *info) |
---|
| 1950 | +{ |
---|
| 1951 | + return false; |
---|
| 1952 | +} |
---|
| 1953 | +static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, |
---|
| 1954 | + const struct bpf_insn *si, |
---|
| 1955 | + struct bpf_insn *insn_buf, |
---|
| 1956 | + struct bpf_prog *prog, |
---|
| 1957 | + u32 *target_size) |
---|
| 1958 | +{ |
---|
| 1959 | + return 0; |
---|
| 1960 | +} |
---|
| 1961 | +#endif |
---|
| 1962 | + |
---|
| 1963 | +#ifdef CONFIG_INET |
---|
| 1964 | +struct sk_reuseport_kern { |
---|
| 1965 | + struct sk_buff *skb; |
---|
| 1966 | + struct sock *sk; |
---|
| 1967 | + struct sock *selected_sk; |
---|
| 1968 | + void *data_end; |
---|
| 1969 | + u32 hash; |
---|
| 1970 | + u32 reuseport_id; |
---|
| 1971 | + bool bind_inany; |
---|
| 1972 | +}; |
---|
| 1973 | +bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type, |
---|
| 1974 | + struct bpf_insn_access_aux *info); |
---|
| 1975 | + |
---|
| 1976 | +u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, |
---|
| 1977 | + const struct bpf_insn *si, |
---|
| 1978 | + struct bpf_insn *insn_buf, |
---|
| 1979 | + struct bpf_prog *prog, |
---|
| 1980 | + u32 *target_size); |
---|
| 1981 | + |
---|
| 1982 | +bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type, |
---|
| 1983 | + struct bpf_insn_access_aux *info); |
---|
| 1984 | + |
---|
| 1985 | +u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type, |
---|
| 1986 | + const struct bpf_insn *si, |
---|
| 1987 | + struct bpf_insn *insn_buf, |
---|
| 1988 | + struct bpf_prog *prog, |
---|
| 1989 | + u32 *target_size); |
---|
| 1990 | +#else |
---|
| 1991 | +static inline bool bpf_tcp_sock_is_valid_access(int off, int size, |
---|
| 1992 | + enum bpf_access_type type, |
---|
| 1993 | + struct bpf_insn_access_aux *info) |
---|
| 1994 | +{ |
---|
| 1995 | + return false; |
---|
| 1996 | +} |
---|
| 1997 | + |
---|
| 1998 | +static inline u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, |
---|
| 1999 | + const struct bpf_insn *si, |
---|
| 2000 | + struct bpf_insn *insn_buf, |
---|
| 2001 | + struct bpf_prog *prog, |
---|
| 2002 | + u32 *target_size) |
---|
| 2003 | +{ |
---|
| 2004 | + return 0; |
---|
| 2005 | +} |
---|
| 2006 | +static inline bool bpf_xdp_sock_is_valid_access(int off, int size, |
---|
| 2007 | + enum bpf_access_type type, |
---|
| 2008 | + struct bpf_insn_access_aux *info) |
---|
| 2009 | +{ |
---|
| 2010 | + return false; |
---|
| 2011 | +} |
---|
| 2012 | + |
---|
| 2013 | +static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type, |
---|
| 2014 | + const struct bpf_insn *si, |
---|
| 2015 | + struct bpf_insn *insn_buf, |
---|
| 2016 | + struct bpf_prog *prog, |
---|
| 2017 | + u32 *target_size) |
---|
| 2018 | +{ |
---|
| 2019 | + return 0; |
---|
| 2020 | +} |
---|
| 2021 | +#endif /* CONFIG_INET */ |
---|
| 2022 | + |
---|
| 2023 | +enum bpf_text_poke_type { |
---|
| 2024 | + BPF_MOD_CALL, |
---|
| 2025 | + BPF_MOD_JUMP, |
---|
| 2026 | +}; |
---|
| 2027 | + |
---|
| 2028 | +int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, |
---|
| 2029 | + void *addr1, void *addr2); |
---|
| 2030 | + |
---|
| 2031 | +struct btf_id_set; |
---|
| 2032 | +bool btf_id_set_contains(const struct btf_id_set *set, u32 id); |
---|
832 | 2033 | |
---|
833 | 2034 | #endif /* _LINUX_BPF_H */ |
---|