.. | .. |
---|
1 | | -/* |
---|
2 | | - * Copyright (C) 2016-2018 Netronome Systems, Inc. |
---|
3 | | - * |
---|
4 | | - * This software is dual licensed under the GNU General License Version 2, |
---|
5 | | - * June 1991 as shown in the file COPYING in the top-level directory of this |
---|
6 | | - * source tree or the BSD 2-Clause License provided below. You have the |
---|
7 | | - * option to license this software under the complete terms of either license. |
---|
8 | | - * |
---|
9 | | - * The BSD 2-Clause License: |
---|
10 | | - * |
---|
11 | | - * Redistribution and use in source and binary forms, with or |
---|
12 | | - * without modification, are permitted provided that the following |
---|
13 | | - * conditions are met: |
---|
14 | | - * |
---|
15 | | - * 1. Redistributions of source code must retain the above |
---|
16 | | - * copyright notice, this list of conditions and the following |
---|
17 | | - * disclaimer. |
---|
18 | | - * |
---|
19 | | - * 2. Redistributions in binary form must reproduce the above |
---|
20 | | - * copyright notice, this list of conditions and the following |
---|
21 | | - * disclaimer in the documentation and/or other materials |
---|
22 | | - * provided with the distribution. |
---|
23 | | - * |
---|
24 | | - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
---|
25 | | - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
---|
26 | | - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
---|
27 | | - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
---|
28 | | - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
---|
29 | | - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
---|
30 | | - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
---|
31 | | - * SOFTWARE. |
---|
32 | | - */ |
---|
| 1 | +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) |
---|
| 2 | +/* Copyright (C) 2016-2018 Netronome Systems, Inc. */ |
---|
33 | 3 | |
---|
34 | 4 | /* |
---|
35 | 5 | * nfp_net_offload.c |
---|
.. | .. |
---|
52 | 22 | #include <net/tc_act/tc_mirred.h> |
---|
53 | 23 | |
---|
54 | 24 | #include "main.h" |
---|
| 25 | +#include "../ccm.h" |
---|
55 | 26 | #include "../nfp_app.h" |
---|
56 | 27 | #include "../nfp_net_ctrl.h" |
---|
57 | 28 | #include "../nfp_net.h" |
---|
.. | .. |
---|
62 | 33 | { |
---|
63 | 34 | struct nfp_bpf_neutral_map *record; |
---|
64 | 35 | int err; |
---|
65 | | - |
---|
66 | | - /* Map record paths are entered via ndo, update side is protected. */ |
---|
67 | | - ASSERT_RTNL(); |
---|
68 | 36 | |
---|
69 | 37 | /* Reuse path - other offloaded program is already tracking this map. */ |
---|
70 | 38 | record = rhashtable_lookup_fast(&bpf->maps_neutral, &map->id, |
---|
.. | .. |
---|
78 | 46 | /* Grab a single ref to the map for our record. The prog destroy ndo |
---|
79 | 47 | * happens after free_used_maps(). |
---|
80 | 48 | */ |
---|
81 | | - map = bpf_map_inc(map, false); |
---|
82 | | - if (IS_ERR(map)) |
---|
83 | | - return PTR_ERR(map); |
---|
| 49 | + bpf_map_inc(map); |
---|
84 | 50 | |
---|
85 | 51 | record = kmalloc(sizeof(*record), GFP_KERNEL); |
---|
86 | 52 | if (!record) { |
---|
.. | .. |
---|
114 | 80 | bool freed = false; |
---|
115 | 81 | int i; |
---|
116 | 82 | |
---|
117 | | - ASSERT_RTNL(); |
---|
118 | | - |
---|
119 | 83 | for (i = 0; i < nfp_prog->map_records_cnt; i++) { |
---|
120 | 84 | if (--nfp_prog->map_records[i]->count) { |
---|
121 | 85 | nfp_prog->map_records[i] = NULL; |
---|
.. | .. |
---|
147 | 111 | nfp_map_ptrs_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog, |
---|
148 | 112 | struct bpf_prog *prog) |
---|
149 | 113 | { |
---|
150 | | - int i, cnt, err; |
---|
| 114 | + int i, cnt, err = 0; |
---|
| 115 | + |
---|
| 116 | + mutex_lock(&prog->aux->used_maps_mutex); |
---|
151 | 117 | |
---|
152 | 118 | /* Quickly count the maps we will have to remember */ |
---|
153 | 119 | cnt = 0; |
---|
.. | .. |
---|
155 | 121 | if (bpf_map_offload_neutral(prog->aux->used_maps[i])) |
---|
156 | 122 | cnt++; |
---|
157 | 123 | if (!cnt) |
---|
158 | | - return 0; |
---|
| 124 | + goto out; |
---|
159 | 125 | |
---|
160 | 126 | nfp_prog->map_records = kmalloc_array(cnt, |
---|
161 | 127 | sizeof(nfp_prog->map_records[0]), |
---|
162 | 128 | GFP_KERNEL); |
---|
163 | | - if (!nfp_prog->map_records) |
---|
164 | | - return -ENOMEM; |
---|
| 129 | + if (!nfp_prog->map_records) { |
---|
| 130 | + err = -ENOMEM; |
---|
| 131 | + goto out; |
---|
| 132 | + } |
---|
165 | 133 | |
---|
166 | 134 | for (i = 0; i < prog->aux->used_map_cnt; i++) |
---|
167 | 135 | if (bpf_map_offload_neutral(prog->aux->used_maps[i])) { |
---|
.. | .. |
---|
169 | 137 | prog->aux->used_maps[i]); |
---|
170 | 138 | if (err) { |
---|
171 | 139 | nfp_map_ptrs_forget(bpf, nfp_prog); |
---|
172 | | - return err; |
---|
| 140 | + goto out; |
---|
173 | 141 | } |
---|
174 | 142 | } |
---|
175 | 143 | WARN_ON(cnt != nfp_prog->map_records_cnt); |
---|
176 | 144 | |
---|
177 | | - return 0; |
---|
| 145 | +out: |
---|
| 146 | + mutex_unlock(&prog->aux->used_maps_mutex); |
---|
| 147 | + return err; |
---|
178 | 148 | } |
---|
179 | 149 | |
---|
180 | 150 | static int |
---|
.. | .. |
---|
198 | 168 | |
---|
199 | 169 | list_add_tail(&meta->l, &nfp_prog->insns); |
---|
200 | 170 | } |
---|
| 171 | + nfp_prog->n_insns = cnt; |
---|
201 | 172 | |
---|
202 | | - nfp_bpf_jit_prepare(nfp_prog, cnt); |
---|
| 173 | + nfp_bpf_jit_prepare(nfp_prog); |
---|
203 | 174 | |
---|
204 | 175 | return 0; |
---|
205 | 176 | } |
---|
.. | .. |
---|
208 | 179 | { |
---|
209 | 180 | struct nfp_insn_meta *meta, *tmp; |
---|
210 | 181 | |
---|
| 182 | + kfree(nfp_prog->subprog); |
---|
| 183 | + |
---|
211 | 184 | list_for_each_entry_safe(meta, tmp, &nfp_prog->insns, l) { |
---|
212 | 185 | list_del(&meta->l); |
---|
213 | 186 | kfree(meta); |
---|
.. | .. |
---|
215 | 188 | kfree(nfp_prog); |
---|
216 | 189 | } |
---|
217 | 190 | |
---|
218 | | -static int |
---|
219 | | -nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn, |
---|
220 | | - struct netdev_bpf *bpf) |
---|
| 191 | +static int nfp_bpf_verifier_prep(struct bpf_prog *prog) |
---|
221 | 192 | { |
---|
222 | | - struct bpf_prog *prog = bpf->verifier.prog; |
---|
223 | 193 | struct nfp_prog *nfp_prog; |
---|
224 | 194 | int ret; |
---|
225 | 195 | |
---|
.. | .. |
---|
230 | 200 | |
---|
231 | 201 | INIT_LIST_HEAD(&nfp_prog->insns); |
---|
232 | 202 | nfp_prog->type = prog->type; |
---|
233 | | - nfp_prog->bpf = app->priv; |
---|
| 203 | + nfp_prog->bpf = bpf_offload_dev_priv(prog->aux->offload->offdev); |
---|
234 | 204 | |
---|
235 | 205 | ret = nfp_prog_prepare(nfp_prog, prog->insnsi, prog->len); |
---|
236 | 206 | if (ret) |
---|
237 | 207 | goto err_free; |
---|
238 | 208 | |
---|
239 | 209 | nfp_prog->verifier_meta = nfp_prog_first_meta(nfp_prog); |
---|
240 | | - bpf->verifier.ops = &nfp_bpf_analyzer_ops; |
---|
241 | 210 | |
---|
242 | 211 | return 0; |
---|
243 | 212 | |
---|
.. | .. |
---|
247 | 216 | return ret; |
---|
248 | 217 | } |
---|
249 | 218 | |
---|
250 | | -static int nfp_bpf_translate(struct nfp_net *nn, struct bpf_prog *prog) |
---|
| 219 | +static int nfp_bpf_translate(struct bpf_prog *prog) |
---|
251 | 220 | { |
---|
| 221 | + struct nfp_net *nn = netdev_priv(prog->aux->offload->netdev); |
---|
252 | 222 | struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv; |
---|
253 | | - unsigned int stack_size; |
---|
254 | 223 | unsigned int max_instr; |
---|
255 | 224 | int err; |
---|
256 | 225 | |
---|
257 | | - stack_size = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64; |
---|
258 | | - if (prog->aux->stack_depth > stack_size) { |
---|
259 | | - nn_info(nn, "stack too large: program %dB > FW stack %dB\n", |
---|
260 | | - prog->aux->stack_depth, stack_size); |
---|
261 | | - return -EOPNOTSUPP; |
---|
262 | | - } |
---|
263 | | - nfp_prog->stack_depth = round_up(prog->aux->stack_depth, 4); |
---|
| 226 | + /* We depend on dead code elimination succeeding */ |
---|
| 227 | + if (prog->aux->offload->opt_failed) |
---|
| 228 | + return -EINVAL; |
---|
264 | 229 | |
---|
265 | 230 | max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN); |
---|
266 | 231 | nfp_prog->__prog_alloc_len = max_instr * sizeof(u64); |
---|
.. | .. |
---|
279 | 244 | return nfp_map_ptrs_record(nfp_prog->bpf, nfp_prog, prog); |
---|
280 | 245 | } |
---|
281 | 246 | |
---|
282 | | -static int nfp_bpf_destroy(struct nfp_net *nn, struct bpf_prog *prog) |
---|
| 247 | +static void nfp_bpf_destroy(struct bpf_prog *prog) |
---|
283 | 248 | { |
---|
284 | 249 | struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv; |
---|
285 | 250 | |
---|
286 | 251 | kvfree(nfp_prog->prog); |
---|
287 | 252 | nfp_map_ptrs_forget(nfp_prog->bpf, nfp_prog); |
---|
288 | 253 | nfp_prog_free(nfp_prog); |
---|
289 | | - |
---|
290 | | - return 0; |
---|
291 | 254 | } |
---|
292 | 255 | |
---|
293 | 256 | /* Atomic engine requires values to be in big endian, we need to byte swap |
---|
.. | .. |
---|
417 | 380 | } |
---|
418 | 381 | |
---|
419 | 382 | use_map_size = DIV_ROUND_UP(offmap->map.value_size, 4) * |
---|
420 | | - FIELD_SIZEOF(struct nfp_bpf_map, use_map[0]); |
---|
| 383 | + sizeof_field(struct nfp_bpf_map, use_map[0]); |
---|
421 | 384 | |
---|
422 | 385 | nfp_map = kzalloc(sizeof(*nfp_map) + use_map_size, GFP_USER); |
---|
423 | 386 | if (!nfp_map) |
---|
.. | .. |
---|
426 | 389 | offmap->dev_priv = nfp_map; |
---|
427 | 390 | nfp_map->offmap = offmap; |
---|
428 | 391 | nfp_map->bpf = bpf; |
---|
| 392 | + spin_lock_init(&nfp_map->cache_lock); |
---|
429 | 393 | |
---|
430 | 394 | res = nfp_bpf_ctrl_alloc_map(bpf, &offmap->map); |
---|
431 | 395 | if (res < 0) { |
---|
.. | .. |
---|
448 | 412 | struct nfp_bpf_map *nfp_map = offmap->dev_priv; |
---|
449 | 413 | |
---|
450 | 414 | nfp_bpf_ctrl_free_map(bpf, nfp_map); |
---|
| 415 | + dev_consume_skb_any(nfp_map->cache); |
---|
| 416 | + WARN_ON_ONCE(nfp_map->cache_blockers); |
---|
451 | 417 | list_del_init(&nfp_map->l); |
---|
452 | 418 | bpf->map_elems_in_use -= offmap->map.max_entries; |
---|
453 | 419 | bpf->maps_in_use--; |
---|
.. | .. |
---|
459 | 425 | int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn, struct netdev_bpf *bpf) |
---|
460 | 426 | { |
---|
461 | 427 | switch (bpf->command) { |
---|
462 | | - case BPF_OFFLOAD_VERIFIER_PREP: |
---|
463 | | - return nfp_bpf_verifier_prep(app, nn, bpf); |
---|
464 | | - case BPF_OFFLOAD_TRANSLATE: |
---|
465 | | - return nfp_bpf_translate(nn, bpf->offload.prog); |
---|
466 | | - case BPF_OFFLOAD_DESTROY: |
---|
467 | | - return nfp_bpf_destroy(nn, bpf->offload.prog); |
---|
468 | 428 | case BPF_OFFLOAD_MAP_ALLOC: |
---|
469 | 429 | return nfp_bpf_map_alloc(app->priv, bpf->offmap); |
---|
470 | 430 | case BPF_OFFLOAD_MAP_FREE: |
---|
.. | .. |
---|
500 | 460 | |
---|
501 | 461 | if (len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size) |
---|
502 | 462 | return -EINVAL; |
---|
503 | | - if (cbe->hdr.ver != CMSG_MAP_ABI_VERSION) |
---|
| 463 | + if (cbe->hdr.ver != NFP_CCM_ABI_VERSION) |
---|
504 | 464 | return -EINVAL; |
---|
505 | 465 | |
---|
506 | 466 | rcu_read_lock(); |
---|
507 | | - record = rhashtable_lookup_fast(&bpf->maps_neutral, &map_id, |
---|
508 | | - nfp_bpf_maps_neutral_params); |
---|
| 467 | + record = rhashtable_lookup(&bpf->maps_neutral, &map_id, |
---|
| 468 | + nfp_bpf_maps_neutral_params); |
---|
509 | 469 | if (!record || map_id_full > U32_MAX) { |
---|
510 | 470 | rcu_read_unlock(); |
---|
511 | 471 | cmsg_warn(bpf, "perf event: map id %lld (0x%llx) not recognized, dropping event\n", |
---|
.. | .. |
---|
521 | 481 | return 0; |
---|
522 | 482 | } |
---|
523 | 483 | |
---|
| 484 | +bool nfp_bpf_offload_check_mtu(struct nfp_net *nn, struct bpf_prog *prog, |
---|
| 485 | + unsigned int mtu) |
---|
| 486 | +{ |
---|
| 487 | + unsigned int fw_mtu, pkt_off; |
---|
| 488 | + |
---|
| 489 | + fw_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32; |
---|
| 490 | + pkt_off = min(prog->aux->max_pkt_offset, mtu); |
---|
| 491 | + |
---|
| 492 | + return fw_mtu < pkt_off; |
---|
| 493 | +} |
---|
| 494 | + |
---|
524 | 495 | static int |
---|
525 | 496 | nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog, |
---|
526 | 497 | struct netlink_ext_ack *extack) |
---|
527 | 498 | { |
---|
528 | 499 | struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv; |
---|
529 | | - unsigned int max_mtu; |
---|
| 500 | + unsigned int max_stack, max_prog_len; |
---|
530 | 501 | dma_addr_t dma_addr; |
---|
531 | 502 | void *img; |
---|
532 | 503 | int err; |
---|
533 | 504 | |
---|
534 | | - max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32; |
---|
535 | | - if (max_mtu < nn->dp.netdev->mtu) { |
---|
536 | | - NL_SET_ERR_MSG_MOD(extack, "BPF offload not supported with MTU larger than HW packet split boundary"); |
---|
| 505 | + if (nfp_bpf_offload_check_mtu(nn, prog, nn->dp.netdev->mtu)) { |
---|
| 506 | + NL_SET_ERR_MSG_MOD(extack, "BPF offload not supported with potential packet access beyond HW packet split boundary"); |
---|
| 507 | + return -EOPNOTSUPP; |
---|
| 508 | + } |
---|
| 509 | + |
---|
| 510 | + max_stack = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64; |
---|
| 511 | + if (nfp_prog->stack_size > max_stack) { |
---|
| 512 | + NL_SET_ERR_MSG_MOD(extack, "stack too large"); |
---|
| 513 | + return -EOPNOTSUPP; |
---|
| 514 | + } |
---|
| 515 | + |
---|
| 516 | + max_prog_len = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN); |
---|
| 517 | + if (nfp_prog->prog_len > max_prog_len) { |
---|
| 518 | + NL_SET_ERR_MSG_MOD(extack, "program too long"); |
---|
537 | 519 | return -EOPNOTSUPP; |
---|
538 | 520 | } |
---|
539 | 521 | |
---|
.. | .. |
---|
625 | 607 | |
---|
626 | 608 | return 0; |
---|
627 | 609 | } |
---|
| 610 | + |
---|
| 611 | +const struct bpf_prog_offload_ops nfp_bpf_dev_ops = { |
---|
| 612 | + .insn_hook = nfp_verify_insn, |
---|
| 613 | + .finalize = nfp_bpf_finalize, |
---|
| 614 | + .replace_insn = nfp_bpf_opt_replace_insn, |
---|
| 615 | + .remove_insns = nfp_bpf_opt_remove_insns, |
---|
| 616 | + .prepare = nfp_bpf_verifier_prep, |
---|
| 617 | + .translate = nfp_bpf_translate, |
---|
| 618 | + .destroy = nfp_bpf_destroy, |
---|
| 619 | +}; |
---|