| .. | .. | 
|---|
 | 1 | +// SPDX-License-Identifier: GPL-2.0-or-later  | 
|---|
| 1 | 2 |  /* | 
|---|
| 2 | 3 |   * Linux Socket Filter - Kernel level socket filtering | 
|---|
| 3 | 4 |   * | 
|---|
| .. | .. | 
|---|
| 12 | 13 |   *	Alexei Starovoitov <ast@plumgrid.com> | 
|---|
| 13 | 14 |   *	Daniel Borkmann <dborkman@redhat.com> | 
|---|
| 14 | 15 |   * | 
|---|
| 15 |  | - * This program is free software; you can redistribute it and/or  | 
|---|
| 16 |  | - * modify it under the terms of the GNU General Public License  | 
|---|
| 17 |  | - * as published by the Free Software Foundation; either version  | 
|---|
| 18 |  | - * 2 of the License, or (at your option) any later version.  | 
|---|
| 19 |  | - *  | 
|---|
| 20 | 16 |   * Andi Kleen - Fix a few bad bugs and races. | 
|---|
| 21 | 17 |   * Kris Katterjohn - Added many additional checks in bpf_check_classic() | 
|---|
| 22 | 18 |   */ | 
|---|
| 23 | 19 |   | 
|---|
 | 20 | +#include <uapi/linux/btf.h>  | 
|---|
| 24 | 21 |  #include <linux/filter.h> | 
|---|
| 25 | 22 |  #include <linux/skbuff.h> | 
|---|
| 26 | 23 |  #include <linux/vmalloc.h> | 
|---|
| 27 | 24 |  #include <linux/random.h> | 
|---|
| 28 | 25 |  #include <linux/moduleloader.h> | 
|---|
| 29 | 26 |  #include <linux/bpf.h> | 
|---|
| 30 |  | -#include <linux/frame.h>  | 
|---|
 | 27 | +#include <linux/btf.h>  | 
|---|
 | 28 | +#include <linux/objtool.h>  | 
|---|
| 31 | 29 |  #include <linux/rbtree_latch.h> | 
|---|
| 32 | 30 |  #include <linux/kallsyms.h> | 
|---|
| 33 | 31 |  #include <linux/rcupdate.h> | 
|---|
| 34 | 32 |  #include <linux/perf_event.h> | 
|---|
 | 33 | +#include <linux/extable.h>  | 
|---|
 | 34 | +#include <linux/log2.h>  | 
|---|
| 35 | 35 |   | 
|---|
| 36 | 36 |  #include <asm/barrier.h> | 
|---|
| 37 | 37 |  #include <asm/unaligned.h> | 
|---|
 | 38 | +  | 
|---|
 | 39 | +#include <trace/hooks/memory.h>  | 
|---|
| 38 | 40 |   | 
|---|
| 39 | 41 |  /* Registers */ | 
|---|
| 40 | 42 |  #define BPF_R0	regs[BPF_REG_0] | 
|---|
| .. | .. | 
|---|
| 66 | 68 |  { | 
|---|
| 67 | 69 |  	u8 *ptr = NULL; | 
|---|
| 68 | 70 |   | 
|---|
| 69 |  | -	if (k >= SKF_NET_OFF)  | 
|---|
 | 71 | +	if (k >= SKF_NET_OFF) {  | 
|---|
| 70 | 72 |  		ptr = skb_network_header(skb) + k - SKF_NET_OFF; | 
|---|
| 71 |  | -	else if (k >= SKF_LL_OFF)  | 
|---|
 | 73 | +	} else if (k >= SKF_LL_OFF) {  | 
|---|
 | 74 | +		if (unlikely(!skb_mac_header_was_set(skb)))  | 
|---|
 | 75 | +			return NULL;  | 
|---|
| 72 | 76 |  		ptr = skb_mac_header(skb) + k - SKF_LL_OFF; | 
|---|
| 73 |  | -  | 
|---|
 | 77 | +	}  | 
|---|
| 74 | 78 |  	if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb)) | 
|---|
| 75 | 79 |  		return ptr; | 
|---|
| 76 | 80 |   | 
|---|
| 77 | 81 |  	return NULL; | 
|---|
| 78 | 82 |  } | 
|---|
| 79 | 83 |   | 
|---|
| 80 |  | -struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)  | 
|---|
 | 84 | +struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags)  | 
|---|
| 81 | 85 |  { | 
|---|
| 82 | 86 |  	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags; | 
|---|
| 83 | 87 |  	struct bpf_prog_aux *aux; | 
|---|
| 84 | 88 |  	struct bpf_prog *fp; | 
|---|
| 85 | 89 |   | 
|---|
| 86 | 90 |  	size = round_up(size, PAGE_SIZE); | 
|---|
| 87 |  | -	fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);  | 
|---|
 | 91 | +	fp = __vmalloc(size, gfp_flags);  | 
|---|
| 88 | 92 |  	if (fp == NULL) | 
|---|
| 89 | 93 |  		return NULL; | 
|---|
| 90 | 94 |   | 
|---|
| .. | .. | 
|---|
| 99 | 103 |  	fp->aux->prog = fp; | 
|---|
| 100 | 104 |  	fp->jit_requested = ebpf_jit_enabled(); | 
|---|
| 101 | 105 |   | 
|---|
| 102 |  | -	INIT_LIST_HEAD_RCU(&fp->aux->ksym_lnode);  | 
|---|
 | 106 | +	INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode);  | 
|---|
 | 107 | +	mutex_init(&fp->aux->used_maps_mutex);  | 
|---|
 | 108 | +	mutex_init(&fp->aux->dst_mutex);  | 
|---|
| 103 | 109 |   | 
|---|
| 104 | 110 |  	return fp; | 
|---|
| 105 | 111 |  } | 
|---|
 | 112 | +  | 
|---|
 | 113 | +struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)  | 
|---|
 | 114 | +{  | 
|---|
 | 115 | +	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;  | 
|---|
 | 116 | +	struct bpf_prog *prog;  | 
|---|
 | 117 | +	int cpu;  | 
|---|
 | 118 | +  | 
|---|
 | 119 | +	prog = bpf_prog_alloc_no_stats(size, gfp_extra_flags);  | 
|---|
 | 120 | +	if (!prog)  | 
|---|
 | 121 | +		return NULL;  | 
|---|
 | 122 | +  | 
|---|
 | 123 | +	prog->aux->stats = alloc_percpu_gfp(struct bpf_prog_stats, gfp_flags);  | 
|---|
 | 124 | +	if (!prog->aux->stats) {  | 
|---|
 | 125 | +		kfree(prog->aux);  | 
|---|
 | 126 | +		vfree(prog);  | 
|---|
 | 127 | +		return NULL;  | 
|---|
 | 128 | +	}  | 
|---|
 | 129 | +  | 
|---|
 | 130 | +	for_each_possible_cpu(cpu) {  | 
|---|
 | 131 | +		struct bpf_prog_stats *pstats;  | 
|---|
 | 132 | +  | 
|---|
 | 133 | +		pstats = per_cpu_ptr(prog->aux->stats, cpu);  | 
|---|
 | 134 | +		u64_stats_init(&pstats->syncp);  | 
|---|
 | 135 | +	}  | 
|---|
 | 136 | +	return prog;  | 
|---|
 | 137 | +}  | 
|---|
| 106 | 138 |  EXPORT_SYMBOL_GPL(bpf_prog_alloc); | 
|---|
 | 139 | +  | 
|---|
 | 140 | +int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog)  | 
|---|
 | 141 | +{  | 
|---|
 | 142 | +	if (!prog->aux->nr_linfo || !prog->jit_requested)  | 
|---|
 | 143 | +		return 0;  | 
|---|
 | 144 | +  | 
|---|
 | 145 | +	prog->aux->jited_linfo = kcalloc(prog->aux->nr_linfo,  | 
|---|
 | 146 | +					 sizeof(*prog->aux->jited_linfo),  | 
|---|
 | 147 | +					 GFP_KERNEL | __GFP_NOWARN);  | 
|---|
 | 148 | +	if (!prog->aux->jited_linfo)  | 
|---|
 | 149 | +		return -ENOMEM;  | 
|---|
 | 150 | +  | 
|---|
 | 151 | +	return 0;  | 
|---|
 | 152 | +}  | 
|---|
 | 153 | +  | 
|---|
 | 154 | +void bpf_prog_free_jited_linfo(struct bpf_prog *prog)  | 
|---|
 | 155 | +{  | 
|---|
 | 156 | +	kfree(prog->aux->jited_linfo);  | 
|---|
 | 157 | +	prog->aux->jited_linfo = NULL;  | 
|---|
 | 158 | +}  | 
|---|
 | 159 | +  | 
|---|
 | 160 | +void bpf_prog_free_unused_jited_linfo(struct bpf_prog *prog)  | 
|---|
 | 161 | +{  | 
|---|
 | 162 | +	if (prog->aux->jited_linfo && !prog->aux->jited_linfo[0])  | 
|---|
 | 163 | +		bpf_prog_free_jited_linfo(prog);  | 
|---|
 | 164 | +}  | 
|---|
 | 165 | +  | 
|---|
 | 166 | +/* The jit engine is responsible to provide an array  | 
|---|
 | 167 | + * for insn_off to the jited_off mapping (insn_to_jit_off).  | 
|---|
 | 168 | + *  | 
|---|
 | 169 | + * The idx to this array is the insn_off.  Hence, the insn_off  | 
|---|
 | 170 | + * here is relative to the prog itself instead of the main prog.  | 
|---|
 | 171 | + * This array has one entry for each xlated bpf insn.  | 
|---|
 | 172 | + *  | 
|---|
 | 173 | + * jited_off is the byte off to the last byte of the jited insn.  | 
|---|
 | 174 | + *  | 
|---|
 | 175 | + * Hence, with  | 
|---|
 | 176 | + * insn_start:  | 
|---|
 | 177 | + *      The first bpf insn off of the prog.  The insn off  | 
|---|
 | 178 | + *      here is relative to the main prog.  | 
|---|
 | 179 | + *      e.g. if prog is a subprog, insn_start > 0  | 
|---|
 | 180 | + * linfo_idx:  | 
|---|
 | 181 | + *      The prog's idx to prog->aux->linfo and jited_linfo  | 
|---|
 | 182 | + *  | 
|---|
 | 183 | + * jited_linfo[linfo_idx] = prog->bpf_func  | 
|---|
 | 184 | + *  | 
|---|
 | 185 | + * For i > linfo_idx,  | 
|---|
 | 186 | + *  | 
|---|
 | 187 | + * jited_linfo[i] = prog->bpf_func +  | 
|---|
 | 188 | + *	insn_to_jit_off[linfo[i].insn_off - insn_start - 1]  | 
|---|
 | 189 | + */  | 
|---|
 | 190 | +void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,  | 
|---|
 | 191 | +			       const u32 *insn_to_jit_off)  | 
|---|
 | 192 | +{  | 
|---|
 | 193 | +	u32 linfo_idx, insn_start, insn_end, nr_linfo, i;  | 
|---|
 | 194 | +	const struct bpf_line_info *linfo;  | 
|---|
 | 195 | +	void **jited_linfo;  | 
|---|
 | 196 | +  | 
|---|
 | 197 | +	if (!prog->aux->jited_linfo)  | 
|---|
 | 198 | +		/* Userspace did not provide linfo */  | 
|---|
 | 199 | +		return;  | 
|---|
 | 200 | +  | 
|---|
 | 201 | +	linfo_idx = prog->aux->linfo_idx;  | 
|---|
 | 202 | +	linfo = &prog->aux->linfo[linfo_idx];  | 
|---|
 | 203 | +	insn_start = linfo[0].insn_off;  | 
|---|
 | 204 | +	insn_end = insn_start + prog->len;  | 
|---|
 | 205 | +  | 
|---|
 | 206 | +	jited_linfo = &prog->aux->jited_linfo[linfo_idx];  | 
|---|
 | 207 | +	jited_linfo[0] = prog->bpf_func;  | 
|---|
 | 208 | +  | 
|---|
 | 209 | +	nr_linfo = prog->aux->nr_linfo - linfo_idx;  | 
|---|
 | 210 | +  | 
|---|
 | 211 | +	for (i = 1; i < nr_linfo && linfo[i].insn_off < insn_end; i++)  | 
|---|
 | 212 | +		/* The verifier ensures that linfo[i].insn_off is  | 
|---|
 | 213 | +		 * strictly increasing  | 
|---|
 | 214 | +		 */  | 
|---|
 | 215 | +		jited_linfo[i] = prog->bpf_func +  | 
|---|
 | 216 | +			insn_to_jit_off[linfo[i].insn_off - insn_start - 1];  | 
|---|
 | 217 | +}  | 
|---|
 | 218 | +  | 
|---|
 | 219 | +void bpf_prog_free_linfo(struct bpf_prog *prog)  | 
|---|
 | 220 | +{  | 
|---|
 | 221 | +	bpf_prog_free_jited_linfo(prog);  | 
|---|
 | 222 | +	kvfree(prog->aux->linfo);  | 
|---|
 | 223 | +}  | 
|---|
| 107 | 224 |   | 
|---|
| 108 | 225 |  struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size, | 
|---|
| 109 | 226 |  				  gfp_t gfp_extra_flags) | 
|---|
| .. | .. | 
|---|
| 112 | 229 |  	struct bpf_prog *fp; | 
|---|
| 113 | 230 |  	u32 pages, delta; | 
|---|
| 114 | 231 |  	int ret; | 
|---|
| 115 |  | -  | 
|---|
| 116 |  | -	BUG_ON(fp_old == NULL);  | 
|---|
| 117 | 232 |   | 
|---|
| 118 | 233 |  	size = round_up(size, PAGE_SIZE); | 
|---|
| 119 | 234 |  	pages = size / PAGE_SIZE; | 
|---|
| .. | .. | 
|---|
| 125 | 240 |  	if (ret) | 
|---|
| 126 | 241 |  		return NULL; | 
|---|
| 127 | 242 |   | 
|---|
| 128 |  | -	fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);  | 
|---|
 | 243 | +	fp = __vmalloc(size, gfp_flags);  | 
|---|
| 129 | 244 |  	if (fp == NULL) { | 
|---|
| 130 | 245 |  		__bpf_prog_uncharge(fp_old->aux->user, delta); | 
|---|
| 131 | 246 |  	} else { | 
|---|
| .. | .. | 
|---|
| 145 | 260 |   | 
|---|
| 146 | 261 |  void __bpf_prog_free(struct bpf_prog *fp) | 
|---|
| 147 | 262 |  { | 
|---|
| 148 |  | -	kfree(fp->aux);  | 
|---|
 | 263 | +	if (fp->aux) {  | 
|---|
 | 264 | +		mutex_destroy(&fp->aux->used_maps_mutex);  | 
|---|
 | 265 | +		mutex_destroy(&fp->aux->dst_mutex);  | 
|---|
 | 266 | +		free_percpu(fp->aux->stats);  | 
|---|
 | 267 | +		kfree(fp->aux->poke_tab);  | 
|---|
 | 268 | +		kfree(fp->aux);  | 
|---|
 | 269 | +	}  | 
|---|
| 149 | 270 |  	vfree(fp); | 
|---|
| 150 | 271 |  } | 
|---|
| 151 | 272 |   | 
|---|
| 152 | 273 |  int bpf_prog_calc_tag(struct bpf_prog *fp) | 
|---|
| 153 | 274 |  { | 
|---|
| 154 |  | -	const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64);  | 
|---|
 | 275 | +	const u32 bits_offset = SHA1_BLOCK_SIZE - sizeof(__be64);  | 
|---|
| 155 | 276 |  	u32 raw_size = bpf_prog_tag_scratch_size(fp); | 
|---|
| 156 |  | -	u32 digest[SHA_DIGEST_WORDS];  | 
|---|
| 157 |  | -	u32 ws[SHA_WORKSPACE_WORDS];  | 
|---|
 | 277 | +	u32 digest[SHA1_DIGEST_WORDS];  | 
|---|
 | 278 | +	u32 ws[SHA1_WORKSPACE_WORDS];  | 
|---|
| 158 | 279 |  	u32 i, bsize, psize, blocks; | 
|---|
| 159 | 280 |  	struct bpf_insn *dst; | 
|---|
| 160 | 281 |  	bool was_ld_map; | 
|---|
| .. | .. | 
|---|
| 166 | 287 |  	if (!raw) | 
|---|
| 167 | 288 |  		return -ENOMEM; | 
|---|
| 168 | 289 |   | 
|---|
| 169 |  | -	sha_init(digest);  | 
|---|
 | 290 | +	sha1_init(digest);  | 
|---|
| 170 | 291 |  	memset(ws, 0, sizeof(ws)); | 
|---|
| 171 | 292 |   | 
|---|
| 172 | 293 |  	/* We need to take out the map fd for the digest calculation | 
|---|
| .. | .. | 
|---|
| 177 | 298 |  		dst[i] = fp->insnsi[i]; | 
|---|
| 178 | 299 |  		if (!was_ld_map && | 
|---|
| 179 | 300 |  		    dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) && | 
|---|
| 180 |  | -		    dst[i].src_reg == BPF_PSEUDO_MAP_FD) {  | 
|---|
 | 301 | +		    (dst[i].src_reg == BPF_PSEUDO_MAP_FD ||  | 
|---|
 | 302 | +		     dst[i].src_reg == BPF_PSEUDO_MAP_VALUE)) {  | 
|---|
| 181 | 303 |  			was_ld_map = true; | 
|---|
| 182 | 304 |  			dst[i].imm = 0; | 
|---|
| 183 | 305 |  		} else if (was_ld_map && | 
|---|
| .. | .. | 
|---|
| 196 | 318 |  	memset(&raw[psize], 0, raw_size - psize); | 
|---|
| 197 | 319 |  	raw[psize++] = 0x80; | 
|---|
| 198 | 320 |   | 
|---|
| 199 |  | -	bsize  = round_up(psize, SHA_MESSAGE_BYTES);  | 
|---|
| 200 |  | -	blocks = bsize / SHA_MESSAGE_BYTES;  | 
|---|
 | 321 | +	bsize  = round_up(psize, SHA1_BLOCK_SIZE);  | 
|---|
 | 322 | +	blocks = bsize / SHA1_BLOCK_SIZE;  | 
|---|
| 201 | 323 |  	todo   = raw; | 
|---|
| 202 | 324 |  	if (bsize - psize >= sizeof(__be64)) { | 
|---|
| 203 | 325 |  		bits = (__be64 *)(todo + bsize - sizeof(__be64)); | 
|---|
| .. | .. | 
|---|
| 208 | 330 |  	*bits = cpu_to_be64((psize - 1) << 3); | 
|---|
| 209 | 331 |   | 
|---|
| 210 | 332 |  	while (blocks--) { | 
|---|
| 211 |  | -		sha_transform(digest, todo, ws);  | 
|---|
| 212 |  | -		todo += SHA_MESSAGE_BYTES;  | 
|---|
 | 333 | +		sha1_transform(digest, todo, ws);  | 
|---|
 | 334 | +		todo += SHA1_BLOCK_SIZE;  | 
|---|
| 213 | 335 |  	} | 
|---|
| 214 | 336 |   | 
|---|
| 215 | 337 |  	result = (__force __be32 *)digest; | 
|---|
| 216 |  | -	for (i = 0; i < SHA_DIGEST_WORDS; i++)  | 
|---|
 | 338 | +	for (i = 0; i < SHA1_DIGEST_WORDS; i++)  | 
|---|
| 217 | 339 |  		result[i] = cpu_to_be32(digest[i]); | 
|---|
| 218 | 340 |  	memcpy(fp->tag, result, sizeof(fp->tag)); | 
|---|
| 219 | 341 |   | 
|---|
| .. | .. | 
|---|
| 221 | 343 |  	return 0; | 
|---|
| 222 | 344 |  } | 
|---|
| 223 | 345 |   | 
|---|
| 224 |  | -static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, u32 delta,  | 
|---|
| 225 |  | -				u32 curr, const bool probe_pass)  | 
|---|
 | 346 | +static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old,  | 
|---|
 | 347 | +				s32 end_new, s32 curr, const bool probe_pass)  | 
|---|
| 226 | 348 |  { | 
|---|
| 227 | 349 |  	const s64 imm_min = S32_MIN, imm_max = S32_MAX; | 
|---|
 | 350 | +	s32 delta = end_new - end_old;  | 
|---|
| 228 | 351 |  	s64 imm = insn->imm; | 
|---|
| 229 | 352 |   | 
|---|
| 230 |  | -	if (curr < pos && curr + imm + 1 > pos)  | 
|---|
 | 353 | +	if (curr < pos && curr + imm + 1 >= end_old)  | 
|---|
| 231 | 354 |  		imm += delta; | 
|---|
| 232 |  | -	else if (curr > pos + delta && curr + imm + 1 <= pos + delta)  | 
|---|
 | 355 | +	else if (curr >= end_new && curr + imm + 1 < end_new)  | 
|---|
| 233 | 356 |  		imm -= delta; | 
|---|
| 234 | 357 |  	if (imm < imm_min || imm > imm_max) | 
|---|
| 235 | 358 |  		return -ERANGE; | 
|---|
| .. | .. | 
|---|
| 238 | 361 |  	return 0; | 
|---|
| 239 | 362 |  } | 
|---|
| 240 | 363 |   | 
|---|
| 241 |  | -static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, u32 delta,  | 
|---|
| 242 |  | -				u32 curr, const bool probe_pass)  | 
|---|
 | 364 | +static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,  | 
|---|
 | 365 | +				s32 end_new, s32 curr, const bool probe_pass)  | 
|---|
| 243 | 366 |  { | 
|---|
| 244 | 367 |  	const s32 off_min = S16_MIN, off_max = S16_MAX; | 
|---|
 | 368 | +	s32 delta = end_new - end_old;  | 
|---|
| 245 | 369 |  	s32 off = insn->off; | 
|---|
| 246 | 370 |   | 
|---|
| 247 |  | -	if (curr < pos && curr + off + 1 > pos)  | 
|---|
 | 371 | +	if (curr < pos && curr + off + 1 >= end_old)  | 
|---|
| 248 | 372 |  		off += delta; | 
|---|
| 249 |  | -	else if (curr > pos + delta && curr + off + 1 <= pos + delta)  | 
|---|
 | 373 | +	else if (curr >= end_new && curr + off + 1 < end_new)  | 
|---|
| 250 | 374 |  		off -= delta; | 
|---|
| 251 | 375 |  	if (off < off_min || off > off_max) | 
|---|
| 252 | 376 |  		return -ERANGE; | 
|---|
| .. | .. | 
|---|
| 255 | 379 |  	return 0; | 
|---|
| 256 | 380 |  } | 
|---|
| 257 | 381 |   | 
|---|
| 258 |  | -static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta,  | 
|---|
| 259 |  | -			    const bool probe_pass)  | 
|---|
 | 382 | +static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old,  | 
|---|
 | 383 | +			    s32 end_new, const bool probe_pass)  | 
|---|
| 260 | 384 |  { | 
|---|
| 261 |  | -	u32 i, insn_cnt = prog->len + (probe_pass ? delta : 0);  | 
|---|
 | 385 | +	u32 i, insn_cnt = prog->len + (probe_pass ? end_new - end_old : 0);  | 
|---|
| 262 | 386 |  	struct bpf_insn *insn = prog->insnsi; | 
|---|
| 263 | 387 |  	int ret = 0; | 
|---|
| 264 | 388 |   | 
|---|
| .. | .. | 
|---|
| 270 | 394 |  		 * do any other adjustments. Therefore skip the patchlet. | 
|---|
| 271 | 395 |  		 */ | 
|---|
| 272 | 396 |  		if (probe_pass && i == pos) { | 
|---|
| 273 |  | -			i += delta + 1;  | 
|---|
| 274 |  | -			insn++;  | 
|---|
 | 397 | +			i = end_new;  | 
|---|
 | 398 | +			insn = prog->insnsi + end_old;  | 
|---|
| 275 | 399 |  		} | 
|---|
| 276 | 400 |  		code = insn->code; | 
|---|
| 277 |  | -		if (BPF_CLASS(code) != BPF_JMP ||  | 
|---|
 | 401 | +		if ((BPF_CLASS(code) != BPF_JMP &&  | 
|---|
 | 402 | +		     BPF_CLASS(code) != BPF_JMP32) ||  | 
|---|
| 278 | 403 |  		    BPF_OP(code) == BPF_EXIT) | 
|---|
| 279 | 404 |  			continue; | 
|---|
| 280 | 405 |  		/* Adjust offset of jmps if we cross patch boundaries. */ | 
|---|
| 281 | 406 |  		if (BPF_OP(code) == BPF_CALL) { | 
|---|
| 282 | 407 |  			if (insn->src_reg != BPF_PSEUDO_CALL) | 
|---|
| 283 | 408 |  				continue; | 
|---|
| 284 |  | -			ret = bpf_adj_delta_to_imm(insn, pos, delta, i,  | 
|---|
| 285 |  | -						   probe_pass);  | 
|---|
 | 409 | +			ret = bpf_adj_delta_to_imm(insn, pos, end_old,  | 
|---|
 | 410 | +						   end_new, i, probe_pass);  | 
|---|
| 286 | 411 |  		} else { | 
|---|
| 287 |  | -			ret = bpf_adj_delta_to_off(insn, pos, delta, i,  | 
|---|
| 288 |  | -						   probe_pass);  | 
|---|
 | 412 | +			ret = bpf_adj_delta_to_off(insn, pos, end_old,  | 
|---|
 | 413 | +						   end_new, i, probe_pass);  | 
|---|
| 289 | 414 |  		} | 
|---|
| 290 | 415 |  		if (ret) | 
|---|
| 291 | 416 |  			break; | 
|---|
| .. | .. | 
|---|
| 294 | 419 |  	return ret; | 
|---|
| 295 | 420 |  } | 
|---|
| 296 | 421 |   | 
|---|
 | 422 | +static void bpf_adj_linfo(struct bpf_prog *prog, u32 off, u32 delta)  | 
|---|
 | 423 | +{  | 
|---|
 | 424 | +	struct bpf_line_info *linfo;  | 
|---|
 | 425 | +	u32 i, nr_linfo;  | 
|---|
 | 426 | +  | 
|---|
 | 427 | +	nr_linfo = prog->aux->nr_linfo;  | 
|---|
 | 428 | +	if (!nr_linfo || !delta)  | 
|---|
 | 429 | +		return;  | 
|---|
 | 430 | +  | 
|---|
 | 431 | +	linfo = prog->aux->linfo;  | 
|---|
 | 432 | +  | 
|---|
 | 433 | +	for (i = 0; i < nr_linfo; i++)  | 
|---|
 | 434 | +		if (off < linfo[i].insn_off)  | 
|---|
 | 435 | +			break;  | 
|---|
 | 436 | +  | 
|---|
 | 437 | +	/* Push all off < linfo[i].insn_off by delta */  | 
|---|
 | 438 | +	for (; i < nr_linfo; i++)  | 
|---|
 | 439 | +		linfo[i].insn_off += delta;  | 
|---|
 | 440 | +}  | 
|---|
 | 441 | +  | 
|---|
| 297 | 442 |  struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, | 
|---|
| 298 | 443 |  				       const struct bpf_insn *patch, u32 len) | 
|---|
| 299 | 444 |  { | 
|---|
| 300 | 445 |  	u32 insn_adj_cnt, insn_rest, insn_delta = len - 1; | 
|---|
| 301 | 446 |  	const u32 cnt_max = S16_MAX; | 
|---|
| 302 | 447 |  	struct bpf_prog *prog_adj; | 
|---|
 | 448 | +	int err;  | 
|---|
| 303 | 449 |   | 
|---|
| 304 | 450 |  	/* Since our patchlet doesn't expand the image, we're done. */ | 
|---|
| 305 | 451 |  	if (insn_delta == 0) { | 
|---|
| .. | .. | 
|---|
| 315 | 461 |  	 * we afterwards may not fail anymore. | 
|---|
| 316 | 462 |  	 */ | 
|---|
| 317 | 463 |  	if (insn_adj_cnt > cnt_max && | 
|---|
| 318 |  | -	    bpf_adj_branches(prog, off, insn_delta, true))  | 
|---|
| 319 |  | -		return NULL;  | 
|---|
 | 464 | +	    (err = bpf_adj_branches(prog, off, off + 1, off + len, true)))  | 
|---|
 | 465 | +		return ERR_PTR(err);  | 
|---|
| 320 | 466 |   | 
|---|
| 321 | 467 |  	/* Several new instructions need to be inserted. Make room | 
|---|
| 322 | 468 |  	 * for them. Likely, there's no need for a new allocation as | 
|---|
| .. | .. | 
|---|
| 325 | 471 |  	prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt), | 
|---|
| 326 | 472 |  				    GFP_USER); | 
|---|
| 327 | 473 |  	if (!prog_adj) | 
|---|
| 328 |  | -		return NULL;  | 
|---|
 | 474 | +		return ERR_PTR(-ENOMEM);  | 
|---|
| 329 | 475 |   | 
|---|
| 330 | 476 |  	prog_adj->len = insn_adj_cnt; | 
|---|
| 331 | 477 |   | 
|---|
| .. | .. | 
|---|
| 347 | 493 |  	 * the ship has sailed to reverse to the original state. An | 
|---|
| 348 | 494 |  	 * overflow cannot happen at this point. | 
|---|
| 349 | 495 |  	 */ | 
|---|
| 350 |  | -	BUG_ON(bpf_adj_branches(prog_adj, off, insn_delta, false));  | 
|---|
 | 496 | +	BUG_ON(bpf_adj_branches(prog_adj, off, off + 1, off + len, false));  | 
|---|
 | 497 | +  | 
|---|
 | 498 | +	bpf_adj_linfo(prog_adj, off, insn_delta);  | 
|---|
| 351 | 499 |   | 
|---|
| 352 | 500 |  	return prog_adj; | 
|---|
| 353 | 501 |  } | 
|---|
| 354 | 502 |   | 
|---|
| 355 |  | -void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)  | 
|---|
 | 503 | +int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt)  | 
|---|
 | 504 | +{  | 
|---|
 | 505 | +	/* Branch offsets can't overflow when program is shrinking, no need  | 
|---|
 | 506 | +	 * to call bpf_adj_branches(..., true) here  | 
|---|
 | 507 | +	 */  | 
|---|
 | 508 | +	memmove(prog->insnsi + off, prog->insnsi + off + cnt,  | 
|---|
 | 509 | +		sizeof(struct bpf_insn) * (prog->len - off - cnt));  | 
|---|
 | 510 | +	prog->len -= cnt;  | 
|---|
 | 511 | +  | 
|---|
 | 512 | +	return WARN_ON_ONCE(bpf_adj_branches(prog, off, off + cnt, off, false));  | 
|---|
 | 513 | +}  | 
|---|
 | 514 | +  | 
|---|
 | 515 | +static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)  | 
|---|
| 356 | 516 |  { | 
|---|
| 357 | 517 |  	int i; | 
|---|
| 358 | 518 |   | 
|---|
| .. | .. | 
|---|
| 368 | 528 |   | 
|---|
| 369 | 529 |  #ifdef CONFIG_BPF_JIT | 
|---|
| 370 | 530 |  /* All BPF JIT sysctl knobs here. */ | 
|---|
| 371 |  | -int bpf_jit_enable   __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON);  | 
|---|
 | 531 | +int bpf_jit_enable   __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);  | 
|---|
 | 532 | +int bpf_jit_kallsyms __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);  | 
|---|
| 372 | 533 |  int bpf_jit_harden   __read_mostly; | 
|---|
| 373 |  | -int bpf_jit_kallsyms __read_mostly;  | 
|---|
| 374 | 534 |  long bpf_jit_limit   __read_mostly; | 
|---|
| 375 | 535 |  long bpf_jit_limit_max __read_mostly; | 
|---|
| 376 | 536 |   | 
|---|
| 377 |  | -static __always_inline void  | 
|---|
| 378 |  | -bpf_get_prog_addr_region(const struct bpf_prog *prog,  | 
|---|
| 379 |  | -			 unsigned long *symbol_start,  | 
|---|
| 380 |  | -			 unsigned long *symbol_end)  | 
|---|
 | 537 | +static void  | 
|---|
 | 538 | +bpf_prog_ksym_set_addr(struct bpf_prog *prog)  | 
|---|
| 381 | 539 |  { | 
|---|
| 382 | 540 |  	const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog); | 
|---|
| 383 | 541 |  	unsigned long addr = (unsigned long)hdr; | 
|---|
| 384 | 542 |   | 
|---|
| 385 | 543 |  	WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog)); | 
|---|
| 386 | 544 |   | 
|---|
| 387 |  | -	*symbol_start = addr;  | 
|---|
| 388 |  | -	*symbol_end   = addr + hdr->pages * PAGE_SIZE;  | 
|---|
 | 545 | +	prog->aux->ksym.start = (unsigned long) prog->bpf_func;  | 
|---|
 | 546 | +	prog->aux->ksym.end   = addr + hdr->pages * PAGE_SIZE;  | 
|---|
| 389 | 547 |  } | 
|---|
| 390 | 548 |   | 
|---|
| 391 |  | -static void bpf_get_prog_name(const struct bpf_prog *prog, char *sym)  | 
|---|
 | 549 | +static void  | 
|---|
 | 550 | +bpf_prog_ksym_set_name(struct bpf_prog *prog)  | 
|---|
| 392 | 551 |  { | 
|---|
 | 552 | +	char *sym = prog->aux->ksym.name;  | 
|---|
| 393 | 553 |  	const char *end = sym + KSYM_NAME_LEN; | 
|---|
 | 554 | +	const struct btf_type *type;  | 
|---|
 | 555 | +	const char *func_name;  | 
|---|
| 394 | 556 |   | 
|---|
| 395 | 557 |  	BUILD_BUG_ON(sizeof("bpf_prog_") + | 
|---|
| 396 | 558 |  		     sizeof(prog->tag) * 2 + | 
|---|
| .. | .. | 
|---|
| 405 | 567 |   | 
|---|
| 406 | 568 |  	sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_"); | 
|---|
| 407 | 569 |  	sym  = bin2hex(sym, prog->tag, sizeof(prog->tag)); | 
|---|
 | 570 | +  | 
|---|
 | 571 | +	/* prog->aux->name will be ignored if full btf name is available */  | 
|---|
 | 572 | +	if (prog->aux->func_info_cnt) {  | 
|---|
 | 573 | +		type = btf_type_by_id(prog->aux->btf,  | 
|---|
 | 574 | +				      prog->aux->func_info[prog->aux->func_idx].type_id);  | 
|---|
 | 575 | +		func_name = btf_name_by_offset(prog->aux->btf, type->name_off);  | 
|---|
 | 576 | +		snprintf(sym, (size_t)(end - sym), "_%s", func_name);  | 
|---|
 | 577 | +		return;  | 
|---|
 | 578 | +	}  | 
|---|
 | 579 | +  | 
|---|
| 408 | 580 |  	if (prog->aux->name[0]) | 
|---|
| 409 | 581 |  		snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name); | 
|---|
| 410 | 582 |  	else | 
|---|
| 411 | 583 |  		*sym = 0; | 
|---|
| 412 | 584 |  } | 
|---|
| 413 | 585 |   | 
|---|
| 414 |  | -static __always_inline unsigned long  | 
|---|
| 415 |  | -bpf_get_prog_addr_start(struct latch_tree_node *n)  | 
|---|
 | 586 | +static unsigned long bpf_get_ksym_start(struct latch_tree_node *n)  | 
|---|
| 416 | 587 |  { | 
|---|
| 417 |  | -	unsigned long symbol_start, symbol_end;  | 
|---|
| 418 |  | -	const struct bpf_prog_aux *aux;  | 
|---|
| 419 |  | -  | 
|---|
| 420 |  | -	aux = container_of(n, struct bpf_prog_aux, ksym_tnode);  | 
|---|
| 421 |  | -	bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);  | 
|---|
| 422 |  | -  | 
|---|
| 423 |  | -	return symbol_start;  | 
|---|
 | 588 | +	return container_of(n, struct bpf_ksym, tnode)->start;  | 
|---|
| 424 | 589 |  } | 
|---|
| 425 | 590 |   | 
|---|
| 426 | 591 |  static __always_inline bool bpf_tree_less(struct latch_tree_node *a, | 
|---|
| 427 | 592 |  					  struct latch_tree_node *b) | 
|---|
| 428 | 593 |  { | 
|---|
| 429 |  | -	return bpf_get_prog_addr_start(a) < bpf_get_prog_addr_start(b);  | 
|---|
 | 594 | +	return bpf_get_ksym_start(a) < bpf_get_ksym_start(b);  | 
|---|
| 430 | 595 |  } | 
|---|
| 431 | 596 |   | 
|---|
| 432 | 597 |  static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n) | 
|---|
| 433 | 598 |  { | 
|---|
| 434 | 599 |  	unsigned long val = (unsigned long)key; | 
|---|
| 435 |  | -	unsigned long symbol_start, symbol_end;  | 
|---|
| 436 |  | -	const struct bpf_prog_aux *aux;  | 
|---|
 | 600 | +	const struct bpf_ksym *ksym;  | 
|---|
| 437 | 601 |   | 
|---|
| 438 |  | -	aux = container_of(n, struct bpf_prog_aux, ksym_tnode);  | 
|---|
| 439 |  | -	bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);  | 
|---|
 | 602 | +	ksym = container_of(n, struct bpf_ksym, tnode);  | 
|---|
| 440 | 603 |   | 
|---|
| 441 |  | -	if (val < symbol_start)  | 
|---|
 | 604 | +	if (val < ksym->start)  | 
|---|
| 442 | 605 |  		return -1; | 
|---|
| 443 |  | -	if (val >= symbol_end)  | 
|---|
 | 606 | +	if (val >= ksym->end)  | 
|---|
| 444 | 607 |  		return  1; | 
|---|
| 445 | 608 |   | 
|---|
| 446 | 609 |  	return 0; | 
|---|
| .. | .. | 
|---|
| 455 | 618 |  static LIST_HEAD(bpf_kallsyms); | 
|---|
| 456 | 619 |  static struct latch_tree_root bpf_tree __cacheline_aligned; | 
|---|
| 457 | 620 |   | 
|---|
| 458 |  | -static void bpf_prog_ksym_node_add(struct bpf_prog_aux *aux)  | 
|---|
 | 621 | +void bpf_ksym_add(struct bpf_ksym *ksym)  | 
|---|
| 459 | 622 |  { | 
|---|
| 460 |  | -	WARN_ON_ONCE(!list_empty(&aux->ksym_lnode));  | 
|---|
| 461 |  | -	list_add_tail_rcu(&aux->ksym_lnode, &bpf_kallsyms);  | 
|---|
| 462 |  | -	latch_tree_insert(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);  | 
|---|
 | 623 | +	spin_lock_bh(&bpf_lock);  | 
|---|
 | 624 | +	WARN_ON_ONCE(!list_empty(&ksym->lnode));  | 
|---|
 | 625 | +	list_add_tail_rcu(&ksym->lnode, &bpf_kallsyms);  | 
|---|
 | 626 | +	latch_tree_insert(&ksym->tnode, &bpf_tree, &bpf_tree_ops);  | 
|---|
 | 627 | +	spin_unlock_bh(&bpf_lock);  | 
|---|
| 463 | 628 |  } | 
|---|
| 464 | 629 |   | 
|---|
| 465 |  | -static void bpf_prog_ksym_node_del(struct bpf_prog_aux *aux)  | 
|---|
 | 630 | +static void __bpf_ksym_del(struct bpf_ksym *ksym)  | 
|---|
| 466 | 631 |  { | 
|---|
| 467 |  | -	if (list_empty(&aux->ksym_lnode))  | 
|---|
 | 632 | +	if (list_empty(&ksym->lnode))  | 
|---|
| 468 | 633 |  		return; | 
|---|
| 469 | 634 |   | 
|---|
| 470 |  | -	latch_tree_erase(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);  | 
|---|
| 471 |  | -	list_del_rcu(&aux->ksym_lnode);  | 
|---|
 | 635 | +	latch_tree_erase(&ksym->tnode, &bpf_tree, &bpf_tree_ops);  | 
|---|
 | 636 | +	list_del_rcu(&ksym->lnode);  | 
|---|
 | 637 | +}  | 
|---|
 | 638 | +  | 
|---|
 | 639 | +void bpf_ksym_del(struct bpf_ksym *ksym)  | 
|---|
 | 640 | +{  | 
|---|
 | 641 | +	spin_lock_bh(&bpf_lock);  | 
|---|
 | 642 | +	__bpf_ksym_del(ksym);  | 
|---|
 | 643 | +	spin_unlock_bh(&bpf_lock);  | 
|---|
| 472 | 644 |  } | 
|---|
| 473 | 645 |   | 
|---|
| 474 | 646 |  static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp) | 
|---|
| .. | .. | 
|---|
| 478 | 650 |   | 
|---|
| 479 | 651 |  static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp) | 
|---|
| 480 | 652 |  { | 
|---|
| 481 |  | -	return list_empty(&fp->aux->ksym_lnode) ||  | 
|---|
| 482 |  | -	       fp->aux->ksym_lnode.prev == LIST_POISON2;  | 
|---|
 | 653 | +	return list_empty(&fp->aux->ksym.lnode) ||  | 
|---|
 | 654 | +	       fp->aux->ksym.lnode.prev == LIST_POISON2;  | 
|---|
| 483 | 655 |  } | 
|---|
| 484 | 656 |   | 
|---|
| 485 | 657 |  void bpf_prog_kallsyms_add(struct bpf_prog *fp) | 
|---|
| 486 | 658 |  { | 
|---|
| 487 | 659 |  	if (!bpf_prog_kallsyms_candidate(fp) || | 
|---|
| 488 |  | -	    !capable(CAP_SYS_ADMIN))  | 
|---|
 | 660 | +	    !bpf_capable())  | 
|---|
| 489 | 661 |  		return; | 
|---|
| 490 | 662 |   | 
|---|
| 491 |  | -	spin_lock_bh(&bpf_lock);  | 
|---|
| 492 |  | -	bpf_prog_ksym_node_add(fp->aux);  | 
|---|
| 493 |  | -	spin_unlock_bh(&bpf_lock);  | 
|---|
 | 663 | +	bpf_prog_ksym_set_addr(fp);  | 
|---|
 | 664 | +	bpf_prog_ksym_set_name(fp);  | 
|---|
 | 665 | +	fp->aux->ksym.prog = true;  | 
|---|
 | 666 | +  | 
|---|
 | 667 | +	bpf_ksym_add(&fp->aux->ksym);  | 
|---|
| 494 | 668 |  } | 
|---|
| 495 | 669 |   | 
|---|
| 496 | 670 |  void bpf_prog_kallsyms_del(struct bpf_prog *fp) | 
|---|
| .. | .. | 
|---|
| 498 | 672 |  	if (!bpf_prog_kallsyms_candidate(fp)) | 
|---|
| 499 | 673 |  		return; | 
|---|
| 500 | 674 |   | 
|---|
| 501 |  | -	spin_lock_bh(&bpf_lock);  | 
|---|
| 502 |  | -	bpf_prog_ksym_node_del(fp->aux);  | 
|---|
| 503 |  | -	spin_unlock_bh(&bpf_lock);  | 
|---|
 | 675 | +	bpf_ksym_del(&fp->aux->ksym);  | 
|---|
| 504 | 676 |  } | 
|---|
| 505 | 677 |   | 
|---|
| 506 |  | -static struct bpf_prog *bpf_prog_kallsyms_find(unsigned long addr)  | 
|---|
 | 678 | +static struct bpf_ksym *bpf_ksym_find(unsigned long addr)  | 
|---|
| 507 | 679 |  { | 
|---|
| 508 | 680 |  	struct latch_tree_node *n; | 
|---|
| 509 | 681 |   | 
|---|
| 510 |  | -	if (!bpf_jit_kallsyms_enabled())  | 
|---|
| 511 |  | -		return NULL;  | 
|---|
| 512 |  | -  | 
|---|
| 513 | 682 |  	n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops); | 
|---|
| 514 |  | -	return n ?  | 
|---|
| 515 |  | -	       container_of(n, struct bpf_prog_aux, ksym_tnode)->prog :  | 
|---|
| 516 |  | -	       NULL;  | 
|---|
 | 683 | +	return n ? container_of(n, struct bpf_ksym, tnode) : NULL;  | 
|---|
| 517 | 684 |  } | 
|---|
| 518 | 685 |   | 
|---|
| 519 | 686 |  const char *__bpf_address_lookup(unsigned long addr, unsigned long *size, | 
|---|
| 520 | 687 |  				 unsigned long *off, char *sym) | 
|---|
| 521 | 688 |  { | 
|---|
| 522 |  | -	unsigned long symbol_start, symbol_end;  | 
|---|
| 523 |  | -	struct bpf_prog *prog;  | 
|---|
 | 689 | +	struct bpf_ksym *ksym;  | 
|---|
| 524 | 690 |  	char *ret = NULL; | 
|---|
| 525 | 691 |   | 
|---|
| 526 | 692 |  	rcu_read_lock(); | 
|---|
| 527 |  | -	prog = bpf_prog_kallsyms_find(addr);  | 
|---|
| 528 |  | -	if (prog) {  | 
|---|
| 529 |  | -		bpf_get_prog_addr_region(prog, &symbol_start, &symbol_end);  | 
|---|
| 530 |  | -		bpf_get_prog_name(prog, sym);  | 
|---|
 | 693 | +	ksym = bpf_ksym_find(addr);  | 
|---|
 | 694 | +	if (ksym) {  | 
|---|
 | 695 | +		unsigned long symbol_start = ksym->start;  | 
|---|
 | 696 | +		unsigned long symbol_end = ksym->end;  | 
|---|
 | 697 | +  | 
|---|
 | 698 | +		strncpy(sym, ksym->name, KSYM_NAME_LEN);  | 
|---|
| 531 | 699 |   | 
|---|
| 532 | 700 |  		ret = sym; | 
|---|
| 533 | 701 |  		if (size) | 
|---|
| .. | .. | 
|---|
| 545 | 713 |  	bool ret; | 
|---|
| 546 | 714 |   | 
|---|
| 547 | 715 |  	rcu_read_lock(); | 
|---|
| 548 |  | -	ret = bpf_prog_kallsyms_find(addr) != NULL;  | 
|---|
 | 716 | +	ret = bpf_ksym_find(addr) != NULL;  | 
|---|
| 549 | 717 |  	rcu_read_unlock(); | 
|---|
| 550 | 718 |   | 
|---|
| 551 | 719 |  	return ret; | 
|---|
| 552 | 720 |  } | 
|---|
| 553 | 721 |   | 
|---|
 | 722 | +static struct bpf_prog *bpf_prog_ksym_find(unsigned long addr)  | 
|---|
 | 723 | +{  | 
|---|
 | 724 | +	struct bpf_ksym *ksym = bpf_ksym_find(addr);  | 
|---|
 | 725 | +  | 
|---|
 | 726 | +	return ksym && ksym->prog ?  | 
|---|
 | 727 | +	       container_of(ksym, struct bpf_prog_aux, ksym)->prog :  | 
|---|
 | 728 | +	       NULL;  | 
|---|
 | 729 | +}  | 
|---|
 | 730 | +  | 
|---|
 | 731 | +const struct exception_table_entry *search_bpf_extables(unsigned long addr)  | 
|---|
 | 732 | +{  | 
|---|
 | 733 | +	const struct exception_table_entry *e = NULL;  | 
|---|
 | 734 | +	struct bpf_prog *prog;  | 
|---|
 | 735 | +  | 
|---|
 | 736 | +	rcu_read_lock();  | 
|---|
 | 737 | +	prog = bpf_prog_ksym_find(addr);  | 
|---|
 | 738 | +	if (!prog)  | 
|---|
 | 739 | +		goto out;  | 
|---|
 | 740 | +	if (!prog->aux->num_exentries)  | 
|---|
 | 741 | +		goto out;  | 
|---|
 | 742 | +  | 
|---|
 | 743 | +	e = search_extable(prog->aux->extable, prog->aux->num_exentries, addr);  | 
|---|
 | 744 | +out:  | 
|---|
 | 745 | +	rcu_read_unlock();  | 
|---|
 | 746 | +	return e;  | 
|---|
 | 747 | +}  | 
|---|
 | 748 | +  | 
|---|
| 554 | 749 |  int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type, | 
|---|
| 555 | 750 |  		    char *sym) | 
|---|
| 556 | 751 |  { | 
|---|
| 557 |  | -	unsigned long symbol_start, symbol_end;  | 
|---|
| 558 |  | -	struct bpf_prog_aux *aux;  | 
|---|
 | 752 | +	struct bpf_ksym *ksym;  | 
|---|
| 559 | 753 |  	unsigned int it = 0; | 
|---|
| 560 | 754 |  	int ret = -ERANGE; | 
|---|
| 561 | 755 |   | 
|---|
| .. | .. | 
|---|
| 563 | 757 |  		return ret; | 
|---|
| 564 | 758 |   | 
|---|
| 565 | 759 |  	rcu_read_lock(); | 
|---|
| 566 |  | -	list_for_each_entry_rcu(aux, &bpf_kallsyms, ksym_lnode) {  | 
|---|
 | 760 | +	list_for_each_entry_rcu(ksym, &bpf_kallsyms, lnode) {  | 
|---|
| 567 | 761 |  		if (it++ != symnum) | 
|---|
| 568 | 762 |  			continue; | 
|---|
| 569 | 763 |   | 
|---|
| 570 |  | -		bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);  | 
|---|
| 571 |  | -		bpf_get_prog_name(aux->prog, sym);  | 
|---|
 | 764 | +		strncpy(sym, ksym->name, KSYM_NAME_LEN);  | 
|---|
| 572 | 765 |   | 
|---|
| 573 |  | -		*value = symbol_start;  | 
|---|
 | 766 | +		*value = ksym->start;  | 
|---|
| 574 | 767 |  		*type  = BPF_SYM_ELF_TYPE; | 
|---|
| 575 | 768 |   | 
|---|
| 576 | 769 |  		ret = 0; | 
|---|
| .. | .. | 
|---|
| 579 | 772 |  	rcu_read_unlock(); | 
|---|
| 580 | 773 |   | 
|---|
| 581 | 774 |  	return ret; | 
|---|
 | 775 | +}  | 
|---|
 | 776 | +  | 
|---|
 | 777 | +int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,  | 
|---|
 | 778 | +				struct bpf_jit_poke_descriptor *poke)  | 
|---|
 | 779 | +{  | 
|---|
 | 780 | +	struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab;  | 
|---|
 | 781 | +	static const u32 poke_tab_max = 1024;  | 
|---|
 | 782 | +	u32 slot = prog->aux->size_poke_tab;  | 
|---|
 | 783 | +	u32 size = slot + 1;  | 
|---|
 | 784 | +  | 
|---|
 | 785 | +	if (size > poke_tab_max)  | 
|---|
 | 786 | +		return -ENOSPC;  | 
|---|
 | 787 | +	if (poke->tailcall_target || poke->tailcall_target_stable ||  | 
|---|
 | 788 | +	    poke->tailcall_bypass || poke->adj_off || poke->bypass_addr)  | 
|---|
 | 789 | +		return -EINVAL;  | 
|---|
 | 790 | +  | 
|---|
 | 791 | +	switch (poke->reason) {  | 
|---|
 | 792 | +	case BPF_POKE_REASON_TAIL_CALL:  | 
|---|
 | 793 | +		if (!poke->tail_call.map)  | 
|---|
 | 794 | +			return -EINVAL;  | 
|---|
 | 795 | +		break;  | 
|---|
 | 796 | +	default:  | 
|---|
 | 797 | +		return -EINVAL;  | 
|---|
 | 798 | +	}  | 
|---|
 | 799 | +  | 
|---|
 | 800 | +	tab = krealloc(tab, size * sizeof(*poke), GFP_KERNEL);  | 
|---|
 | 801 | +	if (!tab)  | 
|---|
 | 802 | +		return -ENOMEM;  | 
|---|
 | 803 | +  | 
|---|
 | 804 | +	memcpy(&tab[slot], poke, sizeof(*poke));  | 
|---|
 | 805 | +	prog->aux->size_poke_tab = size;  | 
|---|
 | 806 | +	prog->aux->poke_tab = tab;  | 
|---|
 | 807 | +  | 
|---|
 | 808 | +	return slot;  | 
|---|
| 582 | 809 |  } | 
|---|
| 583 | 810 |   | 
|---|
| 584 | 811 |  static atomic_long_t bpf_jit_current; | 
|---|
| .. | .. | 
|---|
| 606 | 833 |  } | 
|---|
| 607 | 834 |  pure_initcall(bpf_jit_charge_init); | 
|---|
| 608 | 835 |   | 
|---|
| 609 |  | -static int bpf_jit_charge_modmem(u32 pages)  | 
|---|
 | 836 | +int bpf_jit_charge_modmem(u32 pages)  | 
|---|
| 610 | 837 |  { | 
|---|
| 611 | 838 |  	if (atomic_long_add_return(pages, &bpf_jit_current) > | 
|---|
| 612 | 839 |  	    (bpf_jit_limit >> PAGE_SHIFT)) { | 
|---|
| 613 |  | -		if (!capable(CAP_SYS_ADMIN)) {  | 
|---|
 | 840 | +		if (!bpf_capable()) {  | 
|---|
| 614 | 841 |  			atomic_long_sub(pages, &bpf_jit_current); | 
|---|
| 615 | 842 |  			return -EPERM; | 
|---|
| 616 | 843 |  		} | 
|---|
| .. | .. | 
|---|
| 619 | 846 |  	return 0; | 
|---|
| 620 | 847 |  } | 
|---|
| 621 | 848 |   | 
|---|
| 622 |  | -static void bpf_jit_uncharge_modmem(u32 pages)  | 
|---|
 | 849 | +void bpf_jit_uncharge_modmem(u32 pages)  | 
|---|
| 623 | 850 |  { | 
|---|
| 624 | 851 |  	atomic_long_sub(pages, &bpf_jit_current); | 
|---|
| 625 | 852 |  } | 
|---|
| 626 | 853 |   | 
|---|
| 627 |  | -#if IS_ENABLED(CONFIG_BPF_JIT) && IS_ENABLED(CONFIG_CFI_CLANG)  | 
|---|
| 628 |  | -bool __weak arch_bpf_jit_check_func(const struct bpf_prog *prog)  | 
|---|
 | 854 | +void *__weak bpf_jit_alloc_exec(unsigned long size)  | 
|---|
| 629 | 855 |  { | 
|---|
| 630 |  | -	return true;  | 
|---|
 | 856 | +	return module_alloc(size);  | 
|---|
| 631 | 857 |  } | 
|---|
| 632 |  | -EXPORT_SYMBOL_GPL(arch_bpf_jit_check_func);  | 
|---|
| 633 |  | -#endif  | 
|---|
 | 858 | +  | 
|---|
 | 859 | +void __weak bpf_jit_free_exec(void *addr)  | 
|---|
 | 860 | +{  | 
|---|
 | 861 | +	module_memfree(addr);  | 
|---|
 | 862 | +}  | 
|---|
| 634 | 863 |   | 
|---|
| 635 | 864 |  struct bpf_binary_header * | 
|---|
| 636 | 865 |  bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, | 
|---|
| .. | .. | 
|---|
| 639 | 868 |  { | 
|---|
| 640 | 869 |  	struct bpf_binary_header *hdr; | 
|---|
| 641 | 870 |  	u32 size, hole, start, pages; | 
|---|
 | 871 | +  | 
|---|
 | 872 | +	WARN_ON_ONCE(!is_power_of_2(alignment) ||  | 
|---|
 | 873 | +		     alignment > BPF_IMAGE_ALIGNMENT);  | 
|---|
| 642 | 874 |   | 
|---|
| 643 | 875 |  	/* Most of BPF filters are really small, but if some of them | 
|---|
| 644 | 876 |  	 * fill a page, allow at least 128 extra bytes to insert a | 
|---|
| .. | .. | 
|---|
| 649 | 881 |   | 
|---|
| 650 | 882 |  	if (bpf_jit_charge_modmem(pages)) | 
|---|
| 651 | 883 |  		return NULL; | 
|---|
| 652 |  | -	hdr = module_alloc(size);  | 
|---|
 | 884 | +	hdr = bpf_jit_alloc_exec(size);  | 
|---|
| 653 | 885 |  	if (!hdr) { | 
|---|
| 654 | 886 |  		bpf_jit_uncharge_modmem(pages); | 
|---|
| 655 | 887 |  		return NULL; | 
|---|
| .. | .. | 
|---|
| 658 | 890 |  	/* Fill space with illegal/arch-dep instructions. */ | 
|---|
| 659 | 891 |  	bpf_fill_ill_insns(hdr, size); | 
|---|
| 660 | 892 |   | 
|---|
| 661 |  | -	bpf_jit_set_header_magic(hdr);  | 
|---|
| 662 | 893 |  	hdr->pages = pages; | 
|---|
| 663 | 894 |  	hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)), | 
|---|
| 664 | 895 |  		     PAGE_SIZE - sizeof(*hdr)); | 
|---|
| .. | .. | 
|---|
| 674 | 905 |  { | 
|---|
| 675 | 906 |  	u32 pages = hdr->pages; | 
|---|
| 676 | 907 |   | 
|---|
| 677 |  | -	module_memfree(hdr);  | 
|---|
 | 908 | +	trace_android_vh_set_memory_rw((unsigned long)hdr, pages);  | 
|---|
 | 909 | +	trace_android_vh_set_memory_nx((unsigned long)hdr, pages);  | 
|---|
 | 910 | +	bpf_jit_free_exec(hdr);  | 
|---|
| 678 | 911 |  	bpf_jit_uncharge_modmem(pages); | 
|---|
| 679 | 912 |  } | 
|---|
| 680 | 913 |   | 
|---|
| .. | .. | 
|---|
| 687 | 920 |  	if (fp->jited) { | 
|---|
| 688 | 921 |  		struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp); | 
|---|
| 689 | 922 |   | 
|---|
| 690 |  | -		bpf_jit_binary_unlock_ro(hdr);  | 
|---|
| 691 | 923 |  		bpf_jit_binary_free(hdr); | 
|---|
| 692 | 924 |   | 
|---|
| 693 | 925 |  		WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp)); | 
|---|
| .. | .. | 
|---|
| 696 | 928 |  	bpf_prog_unlock_free(fp); | 
|---|
| 697 | 929 |  } | 
|---|
| 698 | 930 |   | 
|---|
 | 931 | +int bpf_jit_get_func_addr(const struct bpf_prog *prog,  | 
|---|
 | 932 | +			  const struct bpf_insn *insn, bool extra_pass,  | 
|---|
 | 933 | +			  u64 *func_addr, bool *func_addr_fixed)  | 
|---|
 | 934 | +{  | 
|---|
 | 935 | +	s16 off = insn->off;  | 
|---|
 | 936 | +	s32 imm = insn->imm;  | 
|---|
 | 937 | +	u8 *addr;  | 
|---|
 | 938 | +  | 
|---|
 | 939 | +	*func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL;  | 
|---|
 | 940 | +	if (!*func_addr_fixed) {  | 
|---|
 | 941 | +		/* Place-holder address till the last pass has collected  | 
|---|
 | 942 | +		 * all addresses for JITed subprograms in which case we  | 
|---|
 | 943 | +		 * can pick them up from prog->aux.  | 
|---|
 | 944 | +		 */  | 
|---|
 | 945 | +		if (!extra_pass)  | 
|---|
 | 946 | +			addr = NULL;  | 
|---|
 | 947 | +		else if (prog->aux->func &&  | 
|---|
 | 948 | +			 off >= 0 && off < prog->aux->func_cnt)  | 
|---|
 | 949 | +			addr = (u8 *)prog->aux->func[off]->bpf_func;  | 
|---|
 | 950 | +		else  | 
|---|
 | 951 | +			return -EINVAL;  | 
|---|
 | 952 | +	} else {  | 
|---|
 | 953 | +		/* Address of a BPF helper call. Since part of the core  | 
|---|
 | 954 | +		 * kernel, it's always at a fixed location. __bpf_call_base  | 
|---|
 | 955 | +		 * and the helper with imm relative to it are both in core  | 
|---|
 | 956 | +		 * kernel.  | 
|---|
 | 957 | +		 */  | 
|---|
 | 958 | +		addr = (u8 *)__bpf_call_base + imm;  | 
|---|
 | 959 | +	}  | 
|---|
 | 960 | +  | 
|---|
 | 961 | +	*func_addr = (unsigned long)addr;  | 
|---|
 | 962 | +	return 0;  | 
|---|
 | 963 | +}  | 
|---|
 | 964 | +  | 
|---|
| 699 | 965 |  static int bpf_jit_blind_insn(const struct bpf_insn *from, | 
|---|
| 700 | 966 |  			      const struct bpf_insn *aux, | 
|---|
| 701 |  | -			      struct bpf_insn *to_buff)  | 
|---|
 | 967 | +			      struct bpf_insn *to_buff,  | 
|---|
 | 968 | +			      bool emit_zext)  | 
|---|
| 702 | 969 |  { | 
|---|
| 703 | 970 |  	struct bpf_insn *to = to_buff; | 
|---|
| 704 | 971 |  	u32 imm_rnd = get_random_int(); | 
|---|
| .. | .. | 
|---|
| 717 | 984 |  	 * below. | 
|---|
| 718 | 985 |  	 * | 
|---|
| 719 | 986 |  	 * Constant blinding is only used by JITs, not in the interpreter. | 
|---|
 | 987 | +	 * The interpreter uses AX in some occasions as a local temporary  | 
|---|
 | 988 | +	 * register e.g. in DIV or MOD instructions.  | 
|---|
 | 989 | +	 *  | 
|---|
| 720 | 990 |  	 * In restricted circumstances, the verifier can also use the AX | 
|---|
| 721 | 991 |  	 * register for rewrites as long as they do not interfere with | 
|---|
| 722 | 992 |  	 * the above cases! | 
|---|
| .. | .. | 
|---|
| 780 | 1050 |  		*to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off); | 
|---|
| 781 | 1051 |  		break; | 
|---|
| 782 | 1052 |   | 
|---|
 | 1053 | +	case BPF_JMP32 | BPF_JEQ  | BPF_K:  | 
|---|
 | 1054 | +	case BPF_JMP32 | BPF_JNE  | BPF_K:  | 
|---|
 | 1055 | +	case BPF_JMP32 | BPF_JGT  | BPF_K:  | 
|---|
 | 1056 | +	case BPF_JMP32 | BPF_JLT  | BPF_K:  | 
|---|
 | 1057 | +	case BPF_JMP32 | BPF_JGE  | BPF_K:  | 
|---|
 | 1058 | +	case BPF_JMP32 | BPF_JLE  | BPF_K:  | 
|---|
 | 1059 | +	case BPF_JMP32 | BPF_JSGT | BPF_K:  | 
|---|
 | 1060 | +	case BPF_JMP32 | BPF_JSLT | BPF_K:  | 
|---|
 | 1061 | +	case BPF_JMP32 | BPF_JSGE | BPF_K:  | 
|---|
 | 1062 | +	case BPF_JMP32 | BPF_JSLE | BPF_K:  | 
|---|
 | 1063 | +	case BPF_JMP32 | BPF_JSET | BPF_K:  | 
|---|
 | 1064 | +		/* Accommodate for extra offset in case of a backjump. */  | 
|---|
 | 1065 | +		off = from->off;  | 
|---|
 | 1066 | +		if (off < 0)  | 
|---|
 | 1067 | +			off -= 2;  | 
|---|
 | 1068 | +		*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);  | 
|---|
 | 1069 | +		*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);  | 
|---|
 | 1070 | +		*to++ = BPF_JMP32_REG(from->code, from->dst_reg, BPF_REG_AX,  | 
|---|
 | 1071 | +				      off);  | 
|---|
 | 1072 | +		break;  | 
|---|
 | 1073 | +  | 
|---|
| 783 | 1074 |  	case BPF_LD | BPF_IMM | BPF_DW: | 
|---|
| 784 | 1075 |  		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm); | 
|---|
| 785 | 1076 |  		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); | 
|---|
| .. | .. | 
|---|
| 789 | 1080 |  	case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */ | 
|---|
| 790 | 1081 |  		*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm); | 
|---|
| 791 | 1082 |  		*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); | 
|---|
 | 1083 | +		if (emit_zext)  | 
|---|
 | 1084 | +			*to++ = BPF_ZEXT_REG(BPF_REG_AX);  | 
|---|
| 792 | 1085 |  		*to++ = BPF_ALU64_REG(BPF_OR,  aux[0].dst_reg, BPF_REG_AX); | 
|---|
| 793 | 1086 |  		break; | 
|---|
| 794 | 1087 |   | 
|---|
| .. | .. | 
|---|
| 811 | 1104 |  	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags; | 
|---|
| 812 | 1105 |  	struct bpf_prog *fp; | 
|---|
| 813 | 1106 |   | 
|---|
| 814 |  | -	fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags, PAGE_KERNEL);  | 
|---|
 | 1107 | +	fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags);  | 
|---|
| 815 | 1108 |  	if (fp != NULL) { | 
|---|
| 816 | 1109 |  		/* aux->prog still points to the fp_other one, so | 
|---|
| 817 | 1110 |  		 * when promoting the clone to the real program, | 
|---|
| .. | .. | 
|---|
| 872 | 1165 |  		    insn[1].code == 0) | 
|---|
| 873 | 1166 |  			memcpy(aux, insn, sizeof(aux)); | 
|---|
| 874 | 1167 |   | 
|---|
| 875 |  | -		rewritten = bpf_jit_blind_insn(insn, aux, insn_buff);  | 
|---|
 | 1168 | +		rewritten = bpf_jit_blind_insn(insn, aux, insn_buff,  | 
|---|
 | 1169 | +						clone->aux->verifier_zext);  | 
|---|
| 876 | 1170 |  		if (!rewritten) | 
|---|
| 877 | 1171 |  			continue; | 
|---|
| 878 | 1172 |   | 
|---|
| 879 | 1173 |  		tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten); | 
|---|
| 880 |  | -		if (!tmp) {  | 
|---|
 | 1174 | +		if (IS_ERR(tmp)) {  | 
|---|
| 881 | 1175 |  			/* Patching may have repointed aux->prog during | 
|---|
| 882 | 1176 |  			 * realloc from the original one, so we need to | 
|---|
| 883 | 1177 |  			 * fix it up here on error. | 
|---|
| 884 | 1178 |  			 */ | 
|---|
| 885 | 1179 |  			bpf_jit_prog_release_other(prog, clone); | 
|---|
| 886 |  | -			return ERR_PTR(-ENOMEM);  | 
|---|
 | 1180 | +			return tmp;  | 
|---|
| 887 | 1181 |  		} | 
|---|
| 888 | 1182 |   | 
|---|
| 889 | 1183 |  		clone = tmp; | 
|---|
| .. | .. | 
|---|
| 916 | 1210 |  #define BPF_INSN_MAP(INSN_2, INSN_3)		\ | 
|---|
| 917 | 1211 |  	/* 32 bit ALU operations. */		\ | 
|---|
| 918 | 1212 |  	/*   Register based. */			\ | 
|---|
| 919 |  | -	INSN_3(ALU, ADD, X),			\  | 
|---|
| 920 |  | -	INSN_3(ALU, SUB, X),			\  | 
|---|
| 921 |  | -	INSN_3(ALU, AND, X),			\  | 
|---|
| 922 |  | -	INSN_3(ALU, OR,  X),			\  | 
|---|
| 923 |  | -	INSN_3(ALU, LSH, X),			\  | 
|---|
| 924 |  | -	INSN_3(ALU, RSH, X),			\  | 
|---|
| 925 |  | -	INSN_3(ALU, XOR, X),			\  | 
|---|
| 926 |  | -	INSN_3(ALU, MUL, X),			\  | 
|---|
| 927 |  | -	INSN_3(ALU, MOV, X),			\  | 
|---|
| 928 |  | -	INSN_3(ALU, DIV, X),			\  | 
|---|
| 929 |  | -	INSN_3(ALU, MOD, X),			\  | 
|---|
 | 1213 | +	INSN_3(ALU, ADD,  X),			\  | 
|---|
 | 1214 | +	INSN_3(ALU, SUB,  X),			\  | 
|---|
 | 1215 | +	INSN_3(ALU, AND,  X),			\  | 
|---|
 | 1216 | +	INSN_3(ALU, OR,   X),			\  | 
|---|
 | 1217 | +	INSN_3(ALU, LSH,  X),			\  | 
|---|
 | 1218 | +	INSN_3(ALU, RSH,  X),			\  | 
|---|
 | 1219 | +	INSN_3(ALU, XOR,  X),			\  | 
|---|
 | 1220 | +	INSN_3(ALU, MUL,  X),			\  | 
|---|
 | 1221 | +	INSN_3(ALU, MOV,  X),			\  | 
|---|
 | 1222 | +	INSN_3(ALU, ARSH, X),			\  | 
|---|
 | 1223 | +	INSN_3(ALU, DIV,  X),			\  | 
|---|
 | 1224 | +	INSN_3(ALU, MOD,  X),			\  | 
|---|
| 930 | 1225 |  	INSN_2(ALU, NEG),			\ | 
|---|
| 931 | 1226 |  	INSN_3(ALU, END, TO_BE),		\ | 
|---|
| 932 | 1227 |  	INSN_3(ALU, END, TO_LE),		\ | 
|---|
| 933 | 1228 |  	/*   Immediate based. */		\ | 
|---|
| 934 |  | -	INSN_3(ALU, ADD, K),			\  | 
|---|
| 935 |  | -	INSN_3(ALU, SUB, K),			\  | 
|---|
| 936 |  | -	INSN_3(ALU, AND, K),			\  | 
|---|
| 937 |  | -	INSN_3(ALU, OR,  K),			\  | 
|---|
| 938 |  | -	INSN_3(ALU, LSH, K),			\  | 
|---|
| 939 |  | -	INSN_3(ALU, RSH, K),			\  | 
|---|
| 940 |  | -	INSN_3(ALU, XOR, K),			\  | 
|---|
| 941 |  | -	INSN_3(ALU, MUL, K),			\  | 
|---|
| 942 |  | -	INSN_3(ALU, MOV, K),			\  | 
|---|
| 943 |  | -	INSN_3(ALU, DIV, K),			\  | 
|---|
| 944 |  | -	INSN_3(ALU, MOD, K),			\  | 
|---|
 | 1229 | +	INSN_3(ALU, ADD,  K),			\  | 
|---|
 | 1230 | +	INSN_3(ALU, SUB,  K),			\  | 
|---|
 | 1231 | +	INSN_3(ALU, AND,  K),			\  | 
|---|
 | 1232 | +	INSN_3(ALU, OR,   K),			\  | 
|---|
 | 1233 | +	INSN_3(ALU, LSH,  K),			\  | 
|---|
 | 1234 | +	INSN_3(ALU, RSH,  K),			\  | 
|---|
 | 1235 | +	INSN_3(ALU, XOR,  K),			\  | 
|---|
 | 1236 | +	INSN_3(ALU, MUL,  K),			\  | 
|---|
 | 1237 | +	INSN_3(ALU, MOV,  K),			\  | 
|---|
 | 1238 | +	INSN_3(ALU, ARSH, K),			\  | 
|---|
 | 1239 | +	INSN_3(ALU, DIV,  K),			\  | 
|---|
 | 1240 | +	INSN_3(ALU, MOD,  K),			\  | 
|---|
| 945 | 1241 |  	/* 64 bit ALU operations. */		\ | 
|---|
| 946 | 1242 |  	/*   Register based. */			\ | 
|---|
| 947 | 1243 |  	INSN_3(ALU64, ADD,  X),			\ | 
|---|
| .. | .. | 
|---|
| 974 | 1270 |  	INSN_2(JMP, CALL),			\ | 
|---|
| 975 | 1271 |  	/* Exit instruction. */			\ | 
|---|
| 976 | 1272 |  	INSN_2(JMP, EXIT),			\ | 
|---|
 | 1273 | +	/* 32-bit Jump instructions. */		\  | 
|---|
 | 1274 | +	/*   Register based. */			\  | 
|---|
 | 1275 | +	INSN_3(JMP32, JEQ,  X),			\  | 
|---|
 | 1276 | +	INSN_3(JMP32, JNE,  X),			\  | 
|---|
 | 1277 | +	INSN_3(JMP32, JGT,  X),			\  | 
|---|
 | 1278 | +	INSN_3(JMP32, JLT,  X),			\  | 
|---|
 | 1279 | +	INSN_3(JMP32, JGE,  X),			\  | 
|---|
 | 1280 | +	INSN_3(JMP32, JLE,  X),			\  | 
|---|
 | 1281 | +	INSN_3(JMP32, JSGT, X),			\  | 
|---|
 | 1282 | +	INSN_3(JMP32, JSLT, X),			\  | 
|---|
 | 1283 | +	INSN_3(JMP32, JSGE, X),			\  | 
|---|
 | 1284 | +	INSN_3(JMP32, JSLE, X),			\  | 
|---|
 | 1285 | +	INSN_3(JMP32, JSET, X),			\  | 
|---|
 | 1286 | +	/*   Immediate based. */		\  | 
|---|
 | 1287 | +	INSN_3(JMP32, JEQ,  K),			\  | 
|---|
 | 1288 | +	INSN_3(JMP32, JNE,  K),			\  | 
|---|
 | 1289 | +	INSN_3(JMP32, JGT,  K),			\  | 
|---|
 | 1290 | +	INSN_3(JMP32, JLT,  K),			\  | 
|---|
 | 1291 | +	INSN_3(JMP32, JGE,  K),			\  | 
|---|
 | 1292 | +	INSN_3(JMP32, JLE,  K),			\  | 
|---|
 | 1293 | +	INSN_3(JMP32, JSGT, K),			\  | 
|---|
 | 1294 | +	INSN_3(JMP32, JSLT, K),			\  | 
|---|
 | 1295 | +	INSN_3(JMP32, JSGE, K),			\  | 
|---|
 | 1296 | +	INSN_3(JMP32, JSLE, K),			\  | 
|---|
 | 1297 | +	INSN_3(JMP32, JSET, K),			\  | 
|---|
| 977 | 1298 |  	/* Jump instructions. */		\ | 
|---|
| 978 | 1299 |  	/*   Register based. */			\ | 
|---|
| 979 | 1300 |  	INSN_3(JMP, JEQ,  X),			\ | 
|---|
| .. | .. | 
|---|
| 1044 | 1365 |  } | 
|---|
| 1045 | 1366 |   | 
|---|
| 1046 | 1367 |  #ifndef CONFIG_BPF_JIT_ALWAYS_ON | 
|---|
 | 1368 | +u64 __weak bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr)  | 
|---|
 | 1369 | +{  | 
|---|
 | 1370 | +	memset(dst, 0, size);  | 
|---|
 | 1371 | +	return -EFAULT;  | 
|---|
 | 1372 | +}  | 
|---|
 | 1373 | +  | 
|---|
| 1047 | 1374 |  /** | 
|---|
| 1048 | 1375 |   *	__bpf_prog_run - run eBPF program on a given context | 
|---|
| 1049 |  | - *	@ctx: is the data we are operating on  | 
|---|
 | 1376 | + *	@regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers  | 
|---|
| 1050 | 1377 |   *	@insn: is the array of eBPF instructions | 
|---|
 | 1378 | + *	@stack: is the eBPF storage stack  | 
|---|
| 1051 | 1379 |   * | 
|---|
| 1052 | 1380 |   * Decode and execute eBPF instructions. | 
|---|
| 1053 | 1381 |   */ | 
|---|
| .. | .. | 
|---|
| 1055 | 1383 |  { | 
|---|
| 1056 | 1384 |  #define BPF_INSN_2_LBL(x, y)    [BPF_##x | BPF_##y] = &&x##_##y | 
|---|
| 1057 | 1385 |  #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z | 
|---|
| 1058 |  | -	static const void *jumptable[256] = {  | 
|---|
 | 1386 | +	static const void * const jumptable[256] __annotate_jump_table = {  | 
|---|
| 1059 | 1387 |  		[0 ... 255] = &&default_label, | 
|---|
| 1060 | 1388 |  		/* Now overwrite non-defaults ... */ | 
|---|
| 1061 | 1389 |  		BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL), | 
|---|
| .. | .. | 
|---|
| 1063 | 1391 |  		[BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS, | 
|---|
| 1064 | 1392 |  		[BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL, | 
|---|
| 1065 | 1393 |  		[BPF_ST  | BPF_NOSPEC] = &&ST_NOSPEC, | 
|---|
 | 1394 | +		[BPF_LDX | BPF_PROBE_MEM | BPF_B] = &&LDX_PROBE_MEM_B,  | 
|---|
 | 1395 | +		[BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H,  | 
|---|
 | 1396 | +		[BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W,  | 
|---|
 | 1397 | +		[BPF_LDX | BPF_PROBE_MEM | BPF_DW] = &&LDX_PROBE_MEM_DW,  | 
|---|
| 1066 | 1398 |  	}; | 
|---|
| 1067 | 1399 |  #undef BPF_INSN_3_LBL | 
|---|
| 1068 | 1400 |  #undef BPF_INSN_2_LBL | 
|---|
| 1069 | 1401 |  	u32 tail_call_cnt = 0; | 
|---|
| 1070 |  | -	u64 tmp;  | 
|---|
| 1071 | 1402 |   | 
|---|
| 1072 | 1403 |  #define CONT	 ({ insn++; goto select_insn; }) | 
|---|
| 1073 | 1404 |  #define CONT_JMP ({ insn++; goto select_insn; }) | 
|---|
| .. | .. | 
|---|
| 1075 | 1406 |  select_insn: | 
|---|
| 1076 | 1407 |  	goto *jumptable[insn->code]; | 
|---|
| 1077 | 1408 |   | 
|---|
| 1078 |  | -	/* ALU */  | 
|---|
| 1079 |  | -#define ALU(OPCODE, OP)			\  | 
|---|
| 1080 |  | -	ALU64_##OPCODE##_X:		\  | 
|---|
| 1081 |  | -		DST = DST OP SRC;	\  | 
|---|
| 1082 |  | -		CONT;			\  | 
|---|
| 1083 |  | -	ALU_##OPCODE##_X:		\  | 
|---|
| 1084 |  | -		DST = (u32) DST OP (u32) SRC;	\  | 
|---|
| 1085 |  | -		CONT;			\  | 
|---|
| 1086 |  | -	ALU64_##OPCODE##_K:		\  | 
|---|
| 1087 |  | -		DST = DST OP IMM;		\  | 
|---|
| 1088 |  | -		CONT;			\  | 
|---|
| 1089 |  | -	ALU_##OPCODE##_K:		\  | 
|---|
| 1090 |  | -		DST = (u32) DST OP (u32) IMM;	\  | 
|---|
 | 1409 | +	/* Explicitly mask the register-based shift amounts with 63 or 31  | 
|---|
 | 1410 | +	 * to avoid undefined behavior. Normally this won't affect the  | 
|---|
 | 1411 | +	 * generated code, for example, in case of native 64 bit archs such  | 
|---|
 | 1412 | +	 * as x86-64 or arm64, the compiler is optimizing the AND away for  | 
|---|
 | 1413 | +	 * the interpreter. In case of JITs, each of the JIT backends compiles  | 
|---|
 | 1414 | +	 * the BPF shift operations to machine instructions which produce  | 
|---|
 | 1415 | +	 * implementation-defined results in such a case; the resulting  | 
|---|
 | 1416 | +	 * contents of the register may be arbitrary, but program behaviour  | 
|---|
 | 1417 | +	 * as a whole remains defined. In other words, in case of JIT backends,  | 
|---|
 | 1418 | +	 * the AND must /not/ be added to the emitted LSH/RSH/ARSH translation.  | 
|---|
 | 1419 | +	 */  | 
|---|
 | 1420 | +	/* ALU (shifts) */  | 
|---|
 | 1421 | +#define SHT(OPCODE, OP)					\  | 
|---|
 | 1422 | +	ALU64_##OPCODE##_X:				\  | 
|---|
 | 1423 | +		DST = DST OP (SRC & 63);		\  | 
|---|
 | 1424 | +		CONT;					\  | 
|---|
 | 1425 | +	ALU_##OPCODE##_X:				\  | 
|---|
 | 1426 | +		DST = (u32) DST OP ((u32) SRC & 31);	\  | 
|---|
 | 1427 | +		CONT;					\  | 
|---|
 | 1428 | +	ALU64_##OPCODE##_K:				\  | 
|---|
 | 1429 | +		DST = DST OP IMM;			\  | 
|---|
 | 1430 | +		CONT;					\  | 
|---|
 | 1431 | +	ALU_##OPCODE##_K:				\  | 
|---|
 | 1432 | +		DST = (u32) DST OP (u32) IMM;		\  | 
|---|
| 1091 | 1433 |  		CONT; | 
|---|
| 1092 |  | -  | 
|---|
 | 1434 | +	/* ALU (rest) */  | 
|---|
 | 1435 | +#define ALU(OPCODE, OP)					\  | 
|---|
 | 1436 | +	ALU64_##OPCODE##_X:				\  | 
|---|
 | 1437 | +		DST = DST OP SRC;			\  | 
|---|
 | 1438 | +		CONT;					\  | 
|---|
 | 1439 | +	ALU_##OPCODE##_X:				\  | 
|---|
 | 1440 | +		DST = (u32) DST OP (u32) SRC;		\  | 
|---|
 | 1441 | +		CONT;					\  | 
|---|
 | 1442 | +	ALU64_##OPCODE##_K:				\  | 
|---|
 | 1443 | +		DST = DST OP IMM;			\  | 
|---|
 | 1444 | +		CONT;					\  | 
|---|
 | 1445 | +	ALU_##OPCODE##_K:				\  | 
|---|
 | 1446 | +		DST = (u32) DST OP (u32) IMM;		\  | 
|---|
 | 1447 | +		CONT;  | 
|---|
| 1093 | 1448 |  	ALU(ADD,  +) | 
|---|
| 1094 | 1449 |  	ALU(SUB,  -) | 
|---|
| 1095 | 1450 |  	ALU(AND,  &) | 
|---|
| 1096 | 1451 |  	ALU(OR,   |) | 
|---|
| 1097 |  | -	ALU(LSH, <<)  | 
|---|
| 1098 |  | -	ALU(RSH, >>)  | 
|---|
| 1099 | 1452 |  	ALU(XOR,  ^) | 
|---|
| 1100 | 1453 |  	ALU(MUL,  *) | 
|---|
 | 1454 | +	SHT(LSH, <<)  | 
|---|
 | 1455 | +	SHT(RSH, >>)  | 
|---|
 | 1456 | +#undef SHT  | 
|---|
| 1101 | 1457 |  #undef ALU | 
|---|
| 1102 | 1458 |  	ALU_NEG: | 
|---|
| 1103 | 1459 |  		DST = (u32) -DST; | 
|---|
| .. | .. | 
|---|
| 1121 | 1477 |  		DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32; | 
|---|
| 1122 | 1478 |  		insn++; | 
|---|
| 1123 | 1479 |  		CONT; | 
|---|
 | 1480 | +	ALU_ARSH_X:  | 
|---|
 | 1481 | +		DST = (u64) (u32) (((s32) DST) >> (SRC & 31));  | 
|---|
 | 1482 | +		CONT;  | 
|---|
 | 1483 | +	ALU_ARSH_K:  | 
|---|
 | 1484 | +		DST = (u64) (u32) (((s32) DST) >> IMM);  | 
|---|
 | 1485 | +		CONT;  | 
|---|
| 1124 | 1486 |  	ALU64_ARSH_X: | 
|---|
| 1125 |  | -		(*(s64 *) &DST) >>= SRC;  | 
|---|
 | 1487 | +		(*(s64 *) &DST) >>= (SRC & 63);  | 
|---|
| 1126 | 1488 |  		CONT; | 
|---|
| 1127 | 1489 |  	ALU64_ARSH_K: | 
|---|
| 1128 | 1490 |  		(*(s64 *) &DST) >>= IMM; | 
|---|
| 1129 | 1491 |  		CONT; | 
|---|
| 1130 | 1492 |  	ALU64_MOD_X: | 
|---|
| 1131 |  | -		div64_u64_rem(DST, SRC, &tmp);  | 
|---|
| 1132 |  | -		DST = tmp;  | 
|---|
 | 1493 | +		div64_u64_rem(DST, SRC, &AX);  | 
|---|
 | 1494 | +		DST = AX;  | 
|---|
| 1133 | 1495 |  		CONT; | 
|---|
| 1134 | 1496 |  	ALU_MOD_X: | 
|---|
| 1135 |  | -		tmp = (u32) DST;  | 
|---|
| 1136 |  | -		DST = do_div(tmp, (u32) SRC);  | 
|---|
 | 1497 | +		AX = (u32) DST;  | 
|---|
 | 1498 | +		DST = do_div(AX, (u32) SRC);  | 
|---|
| 1137 | 1499 |  		CONT; | 
|---|
| 1138 | 1500 |  	ALU64_MOD_K: | 
|---|
| 1139 |  | -		div64_u64_rem(DST, IMM, &tmp);  | 
|---|
| 1140 |  | -		DST = tmp;  | 
|---|
 | 1501 | +		div64_u64_rem(DST, IMM, &AX);  | 
|---|
 | 1502 | +		DST = AX;  | 
|---|
| 1141 | 1503 |  		CONT; | 
|---|
| 1142 | 1504 |  	ALU_MOD_K: | 
|---|
| 1143 |  | -		tmp = (u32) DST;  | 
|---|
| 1144 |  | -		DST = do_div(tmp, (u32) IMM);  | 
|---|
 | 1505 | +		AX = (u32) DST;  | 
|---|
 | 1506 | +		DST = do_div(AX, (u32) IMM);  | 
|---|
| 1145 | 1507 |  		CONT; | 
|---|
| 1146 | 1508 |  	ALU64_DIV_X: | 
|---|
| 1147 | 1509 |  		DST = div64_u64(DST, SRC); | 
|---|
| 1148 | 1510 |  		CONT; | 
|---|
| 1149 | 1511 |  	ALU_DIV_X: | 
|---|
| 1150 |  | -		tmp = (u32) DST;  | 
|---|
| 1151 |  | -		do_div(tmp, (u32) SRC);  | 
|---|
| 1152 |  | -		DST = (u32) tmp;  | 
|---|
 | 1512 | +		AX = (u32) DST;  | 
|---|
 | 1513 | +		do_div(AX, (u32) SRC);  | 
|---|
 | 1514 | +		DST = (u32) AX;  | 
|---|
| 1153 | 1515 |  		CONT; | 
|---|
| 1154 | 1516 |  	ALU64_DIV_K: | 
|---|
| 1155 | 1517 |  		DST = div64_u64(DST, IMM); | 
|---|
| 1156 | 1518 |  		CONT; | 
|---|
| 1157 | 1519 |  	ALU_DIV_K: | 
|---|
| 1158 |  | -		tmp = (u32) DST;  | 
|---|
| 1159 |  | -		do_div(tmp, (u32) IMM);  | 
|---|
| 1160 |  | -		DST = (u32) tmp;  | 
|---|
 | 1520 | +		AX = (u32) DST;  | 
|---|
 | 1521 | +		do_div(AX, (u32) IMM);  | 
|---|
 | 1522 | +		DST = (u32) AX;  | 
|---|
| 1161 | 1523 |  		CONT; | 
|---|
| 1162 | 1524 |  	ALU_END_TO_BE: | 
|---|
| 1163 | 1525 |  		switch (IMM) { | 
|---|
| .. | .. | 
|---|
| 1222 | 1584 |   | 
|---|
| 1223 | 1585 |  		/* ARG1 at this point is guaranteed to point to CTX from | 
|---|
| 1224 | 1586 |  		 * the verifier side due to the fact that the tail call is | 
|---|
| 1225 |  | -		 * handeled like a helper, that is, bpf_tail_call_proto,  | 
|---|
 | 1587 | +		 * handled like a helper, that is, bpf_tail_call_proto,  | 
|---|
| 1226 | 1588 |  		 * where arg1_type is ARG_PTR_TO_CTX. | 
|---|
| 1227 | 1589 |  		 */ | 
|---|
| 1228 | 1590 |  		insn = prog->insnsi; | 
|---|
| .. | .. | 
|---|
| 1230 | 1592 |  out: | 
|---|
| 1231 | 1593 |  		CONT; | 
|---|
| 1232 | 1594 |  	} | 
|---|
| 1233 |  | -	/* JMP */  | 
|---|
| 1234 | 1595 |  	JMP_JA: | 
|---|
| 1235 | 1596 |  		insn += insn->off; | 
|---|
| 1236 | 1597 |  		CONT; | 
|---|
| 1237 |  | -	JMP_JEQ_X:  | 
|---|
| 1238 |  | -		if (DST == SRC) {  | 
|---|
| 1239 |  | -			insn += insn->off;  | 
|---|
| 1240 |  | -			CONT_JMP;  | 
|---|
| 1241 |  | -		}  | 
|---|
| 1242 |  | -		CONT;  | 
|---|
| 1243 |  | -	JMP_JEQ_K:  | 
|---|
| 1244 |  | -		if (DST == IMM) {  | 
|---|
| 1245 |  | -			insn += insn->off;  | 
|---|
| 1246 |  | -			CONT_JMP;  | 
|---|
| 1247 |  | -		}  | 
|---|
| 1248 |  | -		CONT;  | 
|---|
| 1249 |  | -	JMP_JNE_X:  | 
|---|
| 1250 |  | -		if (DST != SRC) {  | 
|---|
| 1251 |  | -			insn += insn->off;  | 
|---|
| 1252 |  | -			CONT_JMP;  | 
|---|
| 1253 |  | -		}  | 
|---|
| 1254 |  | -		CONT;  | 
|---|
| 1255 |  | -	JMP_JNE_K:  | 
|---|
| 1256 |  | -		if (DST != IMM) {  | 
|---|
| 1257 |  | -			insn += insn->off;  | 
|---|
| 1258 |  | -			CONT_JMP;  | 
|---|
| 1259 |  | -		}  | 
|---|
| 1260 |  | -		CONT;  | 
|---|
| 1261 |  | -	JMP_JGT_X:  | 
|---|
| 1262 |  | -		if (DST > SRC) {  | 
|---|
| 1263 |  | -			insn += insn->off;  | 
|---|
| 1264 |  | -			CONT_JMP;  | 
|---|
| 1265 |  | -		}  | 
|---|
| 1266 |  | -		CONT;  | 
|---|
| 1267 |  | -	JMP_JGT_K:  | 
|---|
| 1268 |  | -		if (DST > IMM) {  | 
|---|
| 1269 |  | -			insn += insn->off;  | 
|---|
| 1270 |  | -			CONT_JMP;  | 
|---|
| 1271 |  | -		}  | 
|---|
| 1272 |  | -		CONT;  | 
|---|
| 1273 |  | -	JMP_JLT_X:  | 
|---|
| 1274 |  | -		if (DST < SRC) {  | 
|---|
| 1275 |  | -			insn += insn->off;  | 
|---|
| 1276 |  | -			CONT_JMP;  | 
|---|
| 1277 |  | -		}  | 
|---|
| 1278 |  | -		CONT;  | 
|---|
| 1279 |  | -	JMP_JLT_K:  | 
|---|
| 1280 |  | -		if (DST < IMM) {  | 
|---|
| 1281 |  | -			insn += insn->off;  | 
|---|
| 1282 |  | -			CONT_JMP;  | 
|---|
| 1283 |  | -		}  | 
|---|
| 1284 |  | -		CONT;  | 
|---|
| 1285 |  | -	JMP_JGE_X:  | 
|---|
| 1286 |  | -		if (DST >= SRC) {  | 
|---|
| 1287 |  | -			insn += insn->off;  | 
|---|
| 1288 |  | -			CONT_JMP;  | 
|---|
| 1289 |  | -		}  | 
|---|
| 1290 |  | -		CONT;  | 
|---|
| 1291 |  | -	JMP_JGE_K:  | 
|---|
| 1292 |  | -		if (DST >= IMM) {  | 
|---|
| 1293 |  | -			insn += insn->off;  | 
|---|
| 1294 |  | -			CONT_JMP;  | 
|---|
| 1295 |  | -		}  | 
|---|
| 1296 |  | -		CONT;  | 
|---|
| 1297 |  | -	JMP_JLE_X:  | 
|---|
| 1298 |  | -		if (DST <= SRC) {  | 
|---|
| 1299 |  | -			insn += insn->off;  | 
|---|
| 1300 |  | -			CONT_JMP;  | 
|---|
| 1301 |  | -		}  | 
|---|
| 1302 |  | -		CONT;  | 
|---|
| 1303 |  | -	JMP_JLE_K:  | 
|---|
| 1304 |  | -		if (DST <= IMM) {  | 
|---|
| 1305 |  | -			insn += insn->off;  | 
|---|
| 1306 |  | -			CONT_JMP;  | 
|---|
| 1307 |  | -		}  | 
|---|
| 1308 |  | -		CONT;  | 
|---|
| 1309 |  | -	JMP_JSGT_X:  | 
|---|
| 1310 |  | -		if (((s64) DST) > ((s64) SRC)) {  | 
|---|
| 1311 |  | -			insn += insn->off;  | 
|---|
| 1312 |  | -			CONT_JMP;  | 
|---|
| 1313 |  | -		}  | 
|---|
| 1314 |  | -		CONT;  | 
|---|
| 1315 |  | -	JMP_JSGT_K:  | 
|---|
| 1316 |  | -		if (((s64) DST) > ((s64) IMM)) {  | 
|---|
| 1317 |  | -			insn += insn->off;  | 
|---|
| 1318 |  | -			CONT_JMP;  | 
|---|
| 1319 |  | -		}  | 
|---|
| 1320 |  | -		CONT;  | 
|---|
| 1321 |  | -	JMP_JSLT_X:  | 
|---|
| 1322 |  | -		if (((s64) DST) < ((s64) SRC)) {  | 
|---|
| 1323 |  | -			insn += insn->off;  | 
|---|
| 1324 |  | -			CONT_JMP;  | 
|---|
| 1325 |  | -		}  | 
|---|
| 1326 |  | -		CONT;  | 
|---|
| 1327 |  | -	JMP_JSLT_K:  | 
|---|
| 1328 |  | -		if (((s64) DST) < ((s64) IMM)) {  | 
|---|
| 1329 |  | -			insn += insn->off;  | 
|---|
| 1330 |  | -			CONT_JMP;  | 
|---|
| 1331 |  | -		}  | 
|---|
| 1332 |  | -		CONT;  | 
|---|
| 1333 |  | -	JMP_JSGE_X:  | 
|---|
| 1334 |  | -		if (((s64) DST) >= ((s64) SRC)) {  | 
|---|
| 1335 |  | -			insn += insn->off;  | 
|---|
| 1336 |  | -			CONT_JMP;  | 
|---|
| 1337 |  | -		}  | 
|---|
| 1338 |  | -		CONT;  | 
|---|
| 1339 |  | -	JMP_JSGE_K:  | 
|---|
| 1340 |  | -		if (((s64) DST) >= ((s64) IMM)) {  | 
|---|
| 1341 |  | -			insn += insn->off;  | 
|---|
| 1342 |  | -			CONT_JMP;  | 
|---|
| 1343 |  | -		}  | 
|---|
| 1344 |  | -		CONT;  | 
|---|
| 1345 |  | -	JMP_JSLE_X:  | 
|---|
| 1346 |  | -		if (((s64) DST) <= ((s64) SRC)) {  | 
|---|
| 1347 |  | -			insn += insn->off;  | 
|---|
| 1348 |  | -			CONT_JMP;  | 
|---|
| 1349 |  | -		}  | 
|---|
| 1350 |  | -		CONT;  | 
|---|
| 1351 |  | -	JMP_JSLE_K:  | 
|---|
| 1352 |  | -		if (((s64) DST) <= ((s64) IMM)) {  | 
|---|
| 1353 |  | -			insn += insn->off;  | 
|---|
| 1354 |  | -			CONT_JMP;  | 
|---|
| 1355 |  | -		}  | 
|---|
| 1356 |  | -		CONT;  | 
|---|
| 1357 |  | -	JMP_JSET_X:  | 
|---|
| 1358 |  | -		if (DST & SRC) {  | 
|---|
| 1359 |  | -			insn += insn->off;  | 
|---|
| 1360 |  | -			CONT_JMP;  | 
|---|
| 1361 |  | -		}  | 
|---|
| 1362 |  | -		CONT;  | 
|---|
| 1363 |  | -	JMP_JSET_K:  | 
|---|
| 1364 |  | -		if (DST & IMM) {  | 
|---|
| 1365 |  | -			insn += insn->off;  | 
|---|
| 1366 |  | -			CONT_JMP;  | 
|---|
| 1367 |  | -		}  | 
|---|
| 1368 |  | -		CONT;  | 
|---|
| 1369 | 1598 |  	JMP_EXIT: | 
|---|
| 1370 | 1599 |  		return BPF_R0; | 
|---|
| 1371 |  | -  | 
|---|
 | 1600 | +	/* JMP */  | 
|---|
 | 1601 | +#define COND_JMP(SIGN, OPCODE, CMP_OP)				\  | 
|---|
 | 1602 | +	JMP_##OPCODE##_X:					\  | 
|---|
 | 1603 | +		if ((SIGN##64) DST CMP_OP (SIGN##64) SRC) {	\  | 
|---|
 | 1604 | +			insn += insn->off;			\  | 
|---|
 | 1605 | +			CONT_JMP;				\  | 
|---|
 | 1606 | +		}						\  | 
|---|
 | 1607 | +		CONT;						\  | 
|---|
 | 1608 | +	JMP32_##OPCODE##_X:					\  | 
|---|
 | 1609 | +		if ((SIGN##32) DST CMP_OP (SIGN##32) SRC) {	\  | 
|---|
 | 1610 | +			insn += insn->off;			\  | 
|---|
 | 1611 | +			CONT_JMP;				\  | 
|---|
 | 1612 | +		}						\  | 
|---|
 | 1613 | +		CONT;						\  | 
|---|
 | 1614 | +	JMP_##OPCODE##_K:					\  | 
|---|
 | 1615 | +		if ((SIGN##64) DST CMP_OP (SIGN##64) IMM) {	\  | 
|---|
 | 1616 | +			insn += insn->off;			\  | 
|---|
 | 1617 | +			CONT_JMP;				\  | 
|---|
 | 1618 | +		}						\  | 
|---|
 | 1619 | +		CONT;						\  | 
|---|
 | 1620 | +	JMP32_##OPCODE##_K:					\  | 
|---|
 | 1621 | +		if ((SIGN##32) DST CMP_OP (SIGN##32) IMM) {	\  | 
|---|
 | 1622 | +			insn += insn->off;			\  | 
|---|
 | 1623 | +			CONT_JMP;				\  | 
|---|
 | 1624 | +		}						\  | 
|---|
 | 1625 | +		CONT;  | 
|---|
 | 1626 | +	COND_JMP(u, JEQ, ==)  | 
|---|
 | 1627 | +	COND_JMP(u, JNE, !=)  | 
|---|
 | 1628 | +	COND_JMP(u, JGT, >)  | 
|---|
 | 1629 | +	COND_JMP(u, JLT, <)  | 
|---|
 | 1630 | +	COND_JMP(u, JGE, >=)  | 
|---|
 | 1631 | +	COND_JMP(u, JLE, <=)  | 
|---|
 | 1632 | +	COND_JMP(u, JSET, &)  | 
|---|
 | 1633 | +	COND_JMP(s, JSGT, >)  | 
|---|
 | 1634 | +	COND_JMP(s, JSLT, <)  | 
|---|
 | 1635 | +	COND_JMP(s, JSGE, >=)  | 
|---|
 | 1636 | +	COND_JMP(s, JSLE, <=)  | 
|---|
 | 1637 | +#undef COND_JMP  | 
|---|
| 1372 | 1638 |  	/* ST, STX and LDX*/ | 
|---|
| 1373 | 1639 |  	ST_NOSPEC: | 
|---|
| 1374 | 1640 |  		/* Speculation barrier for mitigating Speculative Store Bypass. | 
|---|
| .. | .. | 
|---|
| 1393 | 1659 |  		CONT;							\ | 
|---|
| 1394 | 1660 |  	LDX_MEM_##SIZEOP:						\ | 
|---|
| 1395 | 1661 |  		DST = *(SIZE *)(unsigned long) (SRC + insn->off);	\ | 
|---|
 | 1662 | +		CONT;							\  | 
|---|
 | 1663 | +	LDX_PROBE_MEM_##SIZEOP:						\  | 
|---|
 | 1664 | +		bpf_probe_read_kernel(&DST, sizeof(SIZE),		\  | 
|---|
 | 1665 | +				      (const void *)(long) (SRC + insn->off));	\  | 
|---|
 | 1666 | +		DST = *((SIZE *)&DST);					\  | 
|---|
| 1396 | 1667 |  		CONT; | 
|---|
| 1397 | 1668 |   | 
|---|
| 1398 | 1669 |  	LDST(B,   u8) | 
|---|
| .. | .. | 
|---|
| 1400 | 1671 |  	LDST(W,  u32) | 
|---|
| 1401 | 1672 |  	LDST(DW, u64) | 
|---|
| 1402 | 1673 |  #undef LDST | 
|---|
 | 1674 | +  | 
|---|
| 1403 | 1675 |  	STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */ | 
|---|
| 1404 | 1676 |  		atomic_add((u32) SRC, (atomic_t *)(unsigned long) | 
|---|
| 1405 | 1677 |  			   (DST + insn->off)); | 
|---|
| .. | .. | 
|---|
| 1420 | 1692 |  		BUG_ON(1); | 
|---|
| 1421 | 1693 |  		return 0; | 
|---|
| 1422 | 1694 |  } | 
|---|
| 1423 |  | -STACK_FRAME_NON_STANDARD(___bpf_prog_run); /* jump table */  | 
|---|
| 1424 | 1695 |   | 
|---|
| 1425 | 1696 |  #define PROG_NAME(stack_size) __bpf_prog_run##stack_size | 
|---|
| 1426 | 1697 |  #define DEFINE_BPF_PROG_RUN(stack_size) \ | 
|---|
| .. | .. | 
|---|
| 1508 | 1779 |  bool bpf_prog_array_compatible(struct bpf_array *array, | 
|---|
| 1509 | 1780 |  			       const struct bpf_prog *fp) | 
|---|
| 1510 | 1781 |  { | 
|---|
 | 1782 | +	bool ret;  | 
|---|
 | 1783 | +  | 
|---|
| 1511 | 1784 |  	if (fp->kprobe_override) | 
|---|
| 1512 | 1785 |  		return false; | 
|---|
| 1513 | 1786 |   | 
|---|
| 1514 |  | -	if (!array->owner_prog_type) {  | 
|---|
 | 1787 | +	spin_lock(&array->aux->owner.lock);  | 
|---|
 | 1788 | +  | 
|---|
 | 1789 | +	if (!array->aux->owner.type) {  | 
|---|
| 1515 | 1790 |  		/* There's no owner yet where we could check for | 
|---|
| 1516 | 1791 |  		 * compatibility. | 
|---|
| 1517 | 1792 |  		 */ | 
|---|
| 1518 |  | -		array->owner_prog_type = fp->type;  | 
|---|
| 1519 |  | -		array->owner_jited = fp->jited;  | 
|---|
| 1520 |  | -  | 
|---|
| 1521 |  | -		return true;  | 
|---|
 | 1793 | +		array->aux->owner.type  = fp->type;  | 
|---|
 | 1794 | +		array->aux->owner.jited = fp->jited;  | 
|---|
 | 1795 | +		ret = true;  | 
|---|
 | 1796 | +	} else {  | 
|---|
 | 1797 | +		ret = array->aux->owner.type  == fp->type &&  | 
|---|
 | 1798 | +		      array->aux->owner.jited == fp->jited;  | 
|---|
| 1522 | 1799 |  	} | 
|---|
| 1523 |  | -  | 
|---|
| 1524 |  | -	return array->owner_prog_type == fp->type &&  | 
|---|
| 1525 |  | -	       array->owner_jited == fp->jited;  | 
|---|
 | 1800 | +	spin_unlock(&array->aux->owner.lock);  | 
|---|
 | 1801 | +	return ret;  | 
|---|
| 1526 | 1802 |  } | 
|---|
| 1527 | 1803 |   | 
|---|
| 1528 | 1804 |  static int bpf_check_tail_call(const struct bpf_prog *fp) | 
|---|
| 1529 | 1805 |  { | 
|---|
| 1530 | 1806 |  	struct bpf_prog_aux *aux = fp->aux; | 
|---|
| 1531 |  | -	int i;  | 
|---|
 | 1807 | +	int i, ret = 0;  | 
|---|
| 1532 | 1808 |   | 
|---|
 | 1809 | +	mutex_lock(&aux->used_maps_mutex);  | 
|---|
| 1533 | 1810 |  	for (i = 0; i < aux->used_map_cnt; i++) { | 
|---|
| 1534 | 1811 |  		struct bpf_map *map = aux->used_maps[i]; | 
|---|
| 1535 | 1812 |  		struct bpf_array *array; | 
|---|
| .. | .. | 
|---|
| 1538 | 1815 |  			continue; | 
|---|
| 1539 | 1816 |   | 
|---|
| 1540 | 1817 |  		array = container_of(map, struct bpf_array, map); | 
|---|
| 1541 |  | -		if (!bpf_prog_array_compatible(array, fp))  | 
|---|
| 1542 |  | -			return -EINVAL;  | 
|---|
 | 1818 | +		if (!bpf_prog_array_compatible(array, fp)) {  | 
|---|
 | 1819 | +			ret = -EINVAL;  | 
|---|
 | 1820 | +			goto out;  | 
|---|
 | 1821 | +		}  | 
|---|
| 1543 | 1822 |  	} | 
|---|
| 1544 | 1823 |   | 
|---|
| 1545 |  | -	return 0;  | 
|---|
 | 1824 | +out:  | 
|---|
 | 1825 | +	mutex_unlock(&aux->used_maps_mutex);  | 
|---|
 | 1826 | +	return ret;  | 
|---|
| 1546 | 1827 |  } | 
|---|
| 1547 | 1828 |   | 
|---|
| 1548 | 1829 |  static void bpf_prog_select_func(struct bpf_prog *fp) | 
|---|
| .. | .. | 
|---|
| 1581 | 1862 |  	 * be JITed, but falls back to the interpreter. | 
|---|
| 1582 | 1863 |  	 */ | 
|---|
| 1583 | 1864 |  	if (!bpf_prog_is_dev_bound(fp->aux)) { | 
|---|
 | 1865 | +		*err = bpf_prog_alloc_jited_linfo(fp);  | 
|---|
 | 1866 | +		if (*err)  | 
|---|
 | 1867 | +			return fp;  | 
|---|
 | 1868 | +  | 
|---|
| 1584 | 1869 |  		fp = bpf_int_jit_compile(fp); | 
|---|
| 1585 |  | -#ifdef CONFIG_BPF_JIT_ALWAYS_ON  | 
|---|
| 1586 | 1870 |  		if (!fp->jited) { | 
|---|
 | 1871 | +			bpf_prog_free_jited_linfo(fp);  | 
|---|
 | 1872 | +#ifdef CONFIG_BPF_JIT_ALWAYS_ON  | 
|---|
| 1587 | 1873 |  			*err = -ENOTSUPP; | 
|---|
| 1588 | 1874 |  			return fp; | 
|---|
| 1589 |  | -		}  | 
|---|
| 1590 | 1875 |  #endif | 
|---|
 | 1876 | +		} else {  | 
|---|
 | 1877 | +			bpf_prog_free_unused_jited_linfo(fp);  | 
|---|
 | 1878 | +		}  | 
|---|
| 1591 | 1879 |  	} else { | 
|---|
| 1592 | 1880 |  		*err = bpf_prog_offload_compile(fp); | 
|---|
| 1593 | 1881 |  		if (*err) | 
|---|
| .. | .. | 
|---|
| 1646 | 1934 |  	return &empty_prog_array.hdr; | 
|---|
| 1647 | 1935 |  } | 
|---|
| 1648 | 1936 |   | 
|---|
| 1649 |  | -void bpf_prog_array_free(struct bpf_prog_array __rcu *progs)  | 
|---|
 | 1937 | +void bpf_prog_array_free(struct bpf_prog_array *progs)  | 
|---|
| 1650 | 1938 |  { | 
|---|
| 1651 |  | -	if (!progs ||  | 
|---|
| 1652 |  | -	    progs == (struct bpf_prog_array __rcu *)&empty_prog_array.hdr)  | 
|---|
 | 1939 | +	if (!progs || progs == &empty_prog_array.hdr)  | 
|---|
| 1653 | 1940 |  		return; | 
|---|
| 1654 | 1941 |  	kfree_rcu(progs, rcu); | 
|---|
| 1655 | 1942 |  } | 
|---|
| 1656 | 1943 |   | 
|---|
| 1657 |  | -int bpf_prog_array_length(struct bpf_prog_array __rcu *array)  | 
|---|
 | 1944 | +int bpf_prog_array_length(struct bpf_prog_array *array)  | 
|---|
| 1658 | 1945 |  { | 
|---|
| 1659 | 1946 |  	struct bpf_prog_array_item *item; | 
|---|
| 1660 | 1947 |  	u32 cnt = 0; | 
|---|
| 1661 | 1948 |   | 
|---|
| 1662 |  | -	rcu_read_lock();  | 
|---|
| 1663 |  | -	item = rcu_dereference(array)->items;  | 
|---|
| 1664 |  | -	for (; item->prog; item++)  | 
|---|
 | 1949 | +	for (item = array->items; item->prog; item++)  | 
|---|
| 1665 | 1950 |  		if (item->prog != &dummy_bpf_prog.prog) | 
|---|
| 1666 | 1951 |  			cnt++; | 
|---|
| 1667 |  | -	rcu_read_unlock();  | 
|---|
| 1668 | 1952 |  	return cnt; | 
|---|
| 1669 | 1953 |  } | 
|---|
| 1670 | 1954 |   | 
|---|
 | 1955 | +bool bpf_prog_array_is_empty(struct bpf_prog_array *array)  | 
|---|
 | 1956 | +{  | 
|---|
 | 1957 | +	struct bpf_prog_array_item *item;  | 
|---|
| 1671 | 1958 |   | 
|---|
| 1672 |  | -static bool bpf_prog_array_copy_core(struct bpf_prog_array __rcu *array,  | 
|---|
 | 1959 | +	for (item = array->items; item->prog; item++)  | 
|---|
 | 1960 | +		if (item->prog != &dummy_bpf_prog.prog)  | 
|---|
 | 1961 | +			return false;  | 
|---|
 | 1962 | +	return true;  | 
|---|
 | 1963 | +}  | 
|---|
 | 1964 | +  | 
|---|
 | 1965 | +static bool bpf_prog_array_copy_core(struct bpf_prog_array *array,  | 
|---|
| 1673 | 1966 |  				     u32 *prog_ids, | 
|---|
| 1674 | 1967 |  				     u32 request_cnt) | 
|---|
| 1675 | 1968 |  { | 
|---|
| 1676 | 1969 |  	struct bpf_prog_array_item *item; | 
|---|
| 1677 | 1970 |  	int i = 0; | 
|---|
| 1678 | 1971 |   | 
|---|
| 1679 |  | -	item = rcu_dereference_check(array, 1)->items;  | 
|---|
| 1680 |  | -	for (; item->prog; item++) {  | 
|---|
 | 1972 | +	for (item = array->items; item->prog; item++) {  | 
|---|
| 1681 | 1973 |  		if (item->prog == &dummy_bpf_prog.prog) | 
|---|
| 1682 | 1974 |  			continue; | 
|---|
| 1683 | 1975 |  		prog_ids[i] = item->prog->aux->id; | 
|---|
| .. | .. | 
|---|
| 1690 | 1982 |  	return !!(item->prog); | 
|---|
| 1691 | 1983 |  } | 
|---|
| 1692 | 1984 |   | 
|---|
| 1693 |  | -int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *array,  | 
|---|
 | 1985 | +int bpf_prog_array_copy_to_user(struct bpf_prog_array *array,  | 
|---|
| 1694 | 1986 |  				__u32 __user *prog_ids, u32 cnt) | 
|---|
| 1695 | 1987 |  { | 
|---|
| 1696 | 1988 |  	unsigned long err = 0; | 
|---|
| .. | .. | 
|---|
| 1701 | 1993 |  	 * cnt = bpf_prog_array_length(); | 
|---|
| 1702 | 1994 |  	 * if (cnt > 0) | 
|---|
| 1703 | 1995 |  	 *     bpf_prog_array_copy_to_user(..., cnt); | 
|---|
| 1704 |  | -	 * so below kcalloc doesn't need extra cnt > 0 check, but  | 
|---|
| 1705 |  | -	 * bpf_prog_array_length() releases rcu lock and  | 
|---|
| 1706 |  | -	 * prog array could have been swapped with empty or larger array,  | 
|---|
| 1707 |  | -	 * so always copy 'cnt' prog_ids to the user.  | 
|---|
| 1708 |  | -	 * In a rare race the user will see zero prog_ids  | 
|---|
 | 1996 | +	 * so below kcalloc doesn't need extra cnt > 0 check.  | 
|---|
| 1709 | 1997 |  	 */ | 
|---|
| 1710 | 1998 |  	ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN); | 
|---|
| 1711 | 1999 |  	if (!ids) | 
|---|
| 1712 | 2000 |  		return -ENOMEM; | 
|---|
| 1713 |  | -	rcu_read_lock();  | 
|---|
| 1714 | 2001 |  	nospc = bpf_prog_array_copy_core(array, ids, cnt); | 
|---|
| 1715 |  | -	rcu_read_unlock();  | 
|---|
| 1716 | 2002 |  	err = copy_to_user(prog_ids, ids, cnt * sizeof(u32)); | 
|---|
| 1717 | 2003 |  	kfree(ids); | 
|---|
| 1718 | 2004 |  	if (err) | 
|---|
| .. | .. | 
|---|
| 1722 | 2008 |  	return 0; | 
|---|
| 1723 | 2009 |  } | 
|---|
| 1724 | 2010 |   | 
|---|
| 1725 |  | -void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *array,  | 
|---|
 | 2011 | +void bpf_prog_array_delete_safe(struct bpf_prog_array *array,  | 
|---|
| 1726 | 2012 |  				struct bpf_prog *old_prog) | 
|---|
| 1727 | 2013 |  { | 
|---|
| 1728 |  | -	struct bpf_prog_array_item *item = array->items;  | 
|---|
 | 2014 | +	struct bpf_prog_array_item *item;  | 
|---|
| 1729 | 2015 |   | 
|---|
| 1730 |  | -	for (; item->prog; item++)  | 
|---|
 | 2016 | +	for (item = array->items; item->prog; item++)  | 
|---|
| 1731 | 2017 |  		if (item->prog == old_prog) { | 
|---|
| 1732 | 2018 |  			WRITE_ONCE(item->prog, &dummy_bpf_prog.prog); | 
|---|
| 1733 | 2019 |  			break; | 
|---|
| 1734 | 2020 |  		} | 
|---|
| 1735 | 2021 |  } | 
|---|
| 1736 | 2022 |   | 
|---|
| 1737 |  | -int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,  | 
|---|
 | 2023 | +/**  | 
|---|
 | 2024 | + * bpf_prog_array_delete_safe_at() - Replaces the program at the given  | 
|---|
 | 2025 | + *                                   index into the program array with  | 
|---|
 | 2026 | + *                                   a dummy no-op program.  | 
|---|
 | 2027 | + * @array: a bpf_prog_array  | 
|---|
 | 2028 | + * @index: the index of the program to replace  | 
|---|
 | 2029 | + *  | 
|---|
 | 2030 | + * Skips over dummy programs, by not counting them, when calculating  | 
|---|
 | 2031 | + * the position of the program to replace.  | 
|---|
 | 2032 | + *  | 
|---|
 | 2033 | + * Return:  | 
|---|
 | 2034 | + * * 0		- Success  | 
|---|
 | 2035 | + * * -EINVAL	- Invalid index value. Must be a non-negative integer.  | 
|---|
 | 2036 | + * * -ENOENT	- Index out of range  | 
|---|
 | 2037 | + */  | 
|---|
 | 2038 | +int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index)  | 
|---|
 | 2039 | +{  | 
|---|
 | 2040 | +	return bpf_prog_array_update_at(array, index, &dummy_bpf_prog.prog);  | 
|---|
 | 2041 | +}  | 
|---|
 | 2042 | +  | 
|---|
 | 2043 | +/**  | 
|---|
 | 2044 | + * bpf_prog_array_update_at() - Updates the program at the given index  | 
|---|
 | 2045 | + *                              into the program array.  | 
|---|
 | 2046 | + * @array: a bpf_prog_array  | 
|---|
 | 2047 | + * @index: the index of the program to update  | 
|---|
 | 2048 | + * @prog: the program to insert into the array  | 
|---|
 | 2049 | + *  | 
|---|
 | 2050 | + * Skips over dummy programs, by not counting them, when calculating  | 
|---|
 | 2051 | + * the position of the program to update.  | 
|---|
 | 2052 | + *  | 
|---|
 | 2053 | + * Return:  | 
|---|
 | 2054 | + * * 0		- Success  | 
|---|
 | 2055 | + * * -EINVAL	- Invalid index value. Must be a non-negative integer.  | 
|---|
 | 2056 | + * * -ENOENT	- Index out of range  | 
|---|
 | 2057 | + */  | 
|---|
 | 2058 | +int bpf_prog_array_update_at(struct bpf_prog_array *array, int index,  | 
|---|
 | 2059 | +			     struct bpf_prog *prog)  | 
|---|
 | 2060 | +{  | 
|---|
 | 2061 | +	struct bpf_prog_array_item *item;  | 
|---|
 | 2062 | +  | 
|---|
 | 2063 | +	if (unlikely(index < 0))  | 
|---|
 | 2064 | +		return -EINVAL;  | 
|---|
 | 2065 | +  | 
|---|
 | 2066 | +	for (item = array->items; item->prog; item++) {  | 
|---|
 | 2067 | +		if (item->prog == &dummy_bpf_prog.prog)  | 
|---|
 | 2068 | +			continue;  | 
|---|
 | 2069 | +		if (!index) {  | 
|---|
 | 2070 | +			WRITE_ONCE(item->prog, prog);  | 
|---|
 | 2071 | +			return 0;  | 
|---|
 | 2072 | +		}  | 
|---|
 | 2073 | +		index--;  | 
|---|
 | 2074 | +	}  | 
|---|
 | 2075 | +	return -ENOENT;  | 
|---|
 | 2076 | +}  | 
|---|
 | 2077 | +  | 
|---|
 | 2078 | +int bpf_prog_array_copy(struct bpf_prog_array *old_array,  | 
|---|
| 1738 | 2079 |  			struct bpf_prog *exclude_prog, | 
|---|
| 1739 | 2080 |  			struct bpf_prog *include_prog, | 
|---|
| 1740 | 2081 |  			struct bpf_prog_array **new_array) | 
|---|
| .. | .. | 
|---|
| 1798 | 2139 |  	return 0; | 
|---|
| 1799 | 2140 |  } | 
|---|
| 1800 | 2141 |   | 
|---|
| 1801 |  | -int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array,  | 
|---|
 | 2142 | +int bpf_prog_array_copy_info(struct bpf_prog_array *array,  | 
|---|
| 1802 | 2143 |  			     u32 *prog_ids, u32 request_cnt, | 
|---|
| 1803 | 2144 |  			     u32 *prog_cnt) | 
|---|
| 1804 | 2145 |  { | 
|---|
| .. | .. | 
|---|
| 1818 | 2159 |  								     : 0; | 
|---|
| 1819 | 2160 |  } | 
|---|
| 1820 | 2161 |   | 
|---|
 | 2162 | +void __bpf_free_used_maps(struct bpf_prog_aux *aux,  | 
|---|
 | 2163 | +			  struct bpf_map **used_maps, u32 len)  | 
|---|
 | 2164 | +{  | 
|---|
 | 2165 | +	struct bpf_map *map;  | 
|---|
 | 2166 | +	u32 i;  | 
|---|
 | 2167 | +  | 
|---|
 | 2168 | +	for (i = 0; i < len; i++) {  | 
|---|
 | 2169 | +		map = used_maps[i];  | 
|---|
 | 2170 | +		if (map->ops->map_poke_untrack)  | 
|---|
 | 2171 | +			map->ops->map_poke_untrack(map, aux);  | 
|---|
 | 2172 | +		bpf_map_put(map);  | 
|---|
 | 2173 | +	}  | 
|---|
 | 2174 | +}  | 
|---|
 | 2175 | +  | 
|---|
 | 2176 | +static void bpf_free_used_maps(struct bpf_prog_aux *aux)  | 
|---|
 | 2177 | +{  | 
|---|
 | 2178 | +	__bpf_free_used_maps(aux, aux->used_maps, aux->used_map_cnt);  | 
|---|
 | 2179 | +	kfree(aux->used_maps);  | 
|---|
 | 2180 | +}  | 
|---|
 | 2181 | +  | 
|---|
| 1821 | 2182 |  static void bpf_prog_free_deferred(struct work_struct *work) | 
|---|
| 1822 | 2183 |  { | 
|---|
| 1823 | 2184 |  	struct bpf_prog_aux *aux; | 
|---|
| 1824 | 2185 |  	int i; | 
|---|
| 1825 | 2186 |   | 
|---|
| 1826 | 2187 |  	aux = container_of(work, struct bpf_prog_aux, work); | 
|---|
 | 2188 | +	bpf_free_used_maps(aux);  | 
|---|
| 1827 | 2189 |  	if (bpf_prog_is_dev_bound(aux)) | 
|---|
| 1828 | 2190 |  		bpf_prog_offload_destroy(aux->prog); | 
|---|
| 1829 | 2191 |  #ifdef CONFIG_PERF_EVENTS | 
|---|
| 1830 | 2192 |  	if (aux->prog->has_callchain_buf) | 
|---|
| 1831 | 2193 |  		put_callchain_buffers(); | 
|---|
| 1832 | 2194 |  #endif | 
|---|
 | 2195 | +	if (aux->dst_trampoline)  | 
|---|
 | 2196 | +		bpf_trampoline_put(aux->dst_trampoline);  | 
|---|
| 1833 | 2197 |  	for (i = 0; i < aux->func_cnt; i++) | 
|---|
| 1834 | 2198 |  		bpf_jit_free(aux->func[i]); | 
|---|
| 1835 | 2199 |  	if (aux->func_cnt) { | 
|---|
| .. | .. | 
|---|
| 1845 | 2209 |  { | 
|---|
| 1846 | 2210 |  	struct bpf_prog_aux *aux = fp->aux; | 
|---|
| 1847 | 2211 |   | 
|---|
 | 2212 | +	if (aux->dst_prog)  | 
|---|
 | 2213 | +		bpf_prog_put(aux->dst_prog);  | 
|---|
| 1848 | 2214 |  	INIT_WORK(&aux->work, bpf_prog_free_deferred); | 
|---|
| 1849 | 2215 |  	schedule_work(&aux->work); | 
|---|
| 1850 | 2216 |  } | 
|---|
| .. | .. | 
|---|
| 1876 | 2242 |  	return res; | 
|---|
| 1877 | 2243 |  } | 
|---|
| 1878 | 2244 |   | 
|---|
 | 2245 | +BPF_CALL_0(bpf_get_raw_cpu_id)  | 
|---|
 | 2246 | +{  | 
|---|
 | 2247 | +	return raw_smp_processor_id();  | 
|---|
 | 2248 | +}  | 
|---|
 | 2249 | +  | 
|---|
| 1879 | 2250 |  /* Weak definitions of helper functions in case we don't have bpf syscall. */ | 
|---|
| 1880 | 2251 |  const struct bpf_func_proto bpf_map_lookup_elem_proto __weak; | 
|---|
| 1881 | 2252 |  const struct bpf_func_proto bpf_map_update_elem_proto __weak; | 
|---|
| 1882 | 2253 |  const struct bpf_func_proto bpf_map_delete_elem_proto __weak; | 
|---|
 | 2254 | +const struct bpf_func_proto bpf_map_push_elem_proto __weak;  | 
|---|
 | 2255 | +const struct bpf_func_proto bpf_map_pop_elem_proto __weak;  | 
|---|
 | 2256 | +const struct bpf_func_proto bpf_map_peek_elem_proto __weak;  | 
|---|
 | 2257 | +const struct bpf_func_proto bpf_spin_lock_proto __weak;  | 
|---|
 | 2258 | +const struct bpf_func_proto bpf_spin_unlock_proto __weak;  | 
|---|
 | 2259 | +const struct bpf_func_proto bpf_jiffies64_proto __weak;  | 
|---|
| 1883 | 2260 |   | 
|---|
| 1884 | 2261 |  const struct bpf_func_proto bpf_get_prandom_u32_proto __weak; | 
|---|
| 1885 | 2262 |  const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak; | 
|---|
| .. | .. | 
|---|
| 1890 | 2267 |  const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak; | 
|---|
| 1891 | 2268 |  const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak; | 
|---|
| 1892 | 2269 |  const struct bpf_func_proto bpf_get_current_comm_proto __weak; | 
|---|
| 1893 |  | -const struct bpf_func_proto bpf_sock_map_update_proto __weak;  | 
|---|
| 1894 |  | -const struct bpf_func_proto bpf_sock_hash_update_proto __weak;  | 
|---|
| 1895 | 2270 |  const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak; | 
|---|
 | 2271 | +const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto __weak;  | 
|---|
| 1896 | 2272 |  const struct bpf_func_proto bpf_get_local_storage_proto __weak; | 
|---|
 | 2273 | +const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto __weak;  | 
|---|
 | 2274 | +const struct bpf_func_proto bpf_snprintf_btf_proto __weak;  | 
|---|
 | 2275 | +const struct bpf_func_proto bpf_seq_printf_btf_proto __weak;  | 
|---|
| 1897 | 2276 |   | 
|---|
| 1898 | 2277 |  const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void) | 
|---|
| 1899 | 2278 |  { | 
|---|
| .. | .. | 
|---|
| 1939 | 2318 |  	return false; | 
|---|
| 1940 | 2319 |  } | 
|---|
| 1941 | 2320 |   | 
|---|
 | 2321 | +/* Return TRUE if the JIT backend wants verifier to enable sub-register usage  | 
|---|
 | 2322 | + * analysis code and wants explicit zero extension inserted by verifier.  | 
|---|
 | 2323 | + * Otherwise, return FALSE.  | 
|---|
 | 2324 | + */  | 
|---|
 | 2325 | +bool __weak bpf_jit_needs_zext(void)  | 
|---|
 | 2326 | +{  | 
|---|
 | 2327 | +	return false;  | 
|---|
 | 2328 | +}  | 
|---|
 | 2329 | +  | 
|---|
| 1942 | 2330 |  /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call | 
|---|
| 1943 | 2331 |   * skb_copy_bits(), so provide a weak definition of it for NET-less config. | 
|---|
| 1944 | 2332 |   */ | 
|---|
| .. | .. | 
|---|
| 1948 | 2336 |  	return -EFAULT; | 
|---|
| 1949 | 2337 |  } | 
|---|
| 1950 | 2338 |   | 
|---|
 | 2339 | +int __weak bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,  | 
|---|
 | 2340 | +			      void *addr1, void *addr2)  | 
|---|
 | 2341 | +{  | 
|---|
 | 2342 | +	return -ENOTSUPP;  | 
|---|
 | 2343 | +}  | 
|---|
 | 2344 | +  | 
|---|
 | 2345 | +DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key);  | 
|---|
 | 2346 | +EXPORT_SYMBOL(bpf_stats_enabled_key);  | 
|---|
 | 2347 | +  | 
|---|
| 1951 | 2348 |  /* All definitions of tracepoints related to BPF. */ | 
|---|
 | 2349 | +#undef TRACE_INCLUDE_PATH  | 
|---|
| 1952 | 2350 |  #define CREATE_TRACE_POINTS | 
|---|
| 1953 | 2351 |  #include <linux/bpf_trace.h> | 
|---|
| 1954 | 2352 |   | 
|---|
| 1955 | 2353 |  EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception); | 
|---|
 | 2354 | +EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_bulk_tx);  | 
|---|