forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-10-12 a5969cabbb4660eab42b6ef0412cbbd1200cf14d
kernel/drivers/net/ethernet/netronome/nfp/bpf/offload.c
....@@ -1,35 +1,5 @@
1
-/*
2
- * Copyright (C) 2016-2018 Netronome Systems, Inc.
3
- *
4
- * This software is dual licensed under the GNU General License Version 2,
5
- * June 1991 as shown in the file COPYING in the top-level directory of this
6
- * source tree or the BSD 2-Clause License provided below. You have the
7
- * option to license this software under the complete terms of either license.
8
- *
9
- * The BSD 2-Clause License:
10
- *
11
- * Redistribution and use in source and binary forms, with or
12
- * without modification, are permitted provided that the following
13
- * conditions are met:
14
- *
15
- * 1. Redistributions of source code must retain the above
16
- * copyright notice, this list of conditions and the following
17
- * disclaimer.
18
- *
19
- * 2. Redistributions in binary form must reproduce the above
20
- * copyright notice, this list of conditions and the following
21
- * disclaimer in the documentation and/or other materials
22
- * provided with the distribution.
23
- *
24
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31
- * SOFTWARE.
32
- */
1
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2
+/* Copyright (C) 2016-2018 Netronome Systems, Inc. */
333
344 /*
355 * nfp_net_offload.c
....@@ -52,6 +22,7 @@
5222 #include <net/tc_act/tc_mirred.h>
5323
5424 #include "main.h"
25
+#include "../ccm.h"
5526 #include "../nfp_app.h"
5627 #include "../nfp_net_ctrl.h"
5728 #include "../nfp_net.h"
....@@ -62,9 +33,6 @@
6233 {
6334 struct nfp_bpf_neutral_map *record;
6435 int err;
65
-
66
- /* Map record paths are entered via ndo, update side is protected. */
67
- ASSERT_RTNL();
6836
6937 /* Reuse path - other offloaded program is already tracking this map. */
7038 record = rhashtable_lookup_fast(&bpf->maps_neutral, &map->id,
....@@ -78,9 +46,7 @@
7846 /* Grab a single ref to the map for our record. The prog destroy ndo
7947 * happens after free_used_maps().
8048 */
81
- map = bpf_map_inc(map, false);
82
- if (IS_ERR(map))
83
- return PTR_ERR(map);
49
+ bpf_map_inc(map);
8450
8551 record = kmalloc(sizeof(*record), GFP_KERNEL);
8652 if (!record) {
....@@ -114,8 +80,6 @@
11480 bool freed = false;
11581 int i;
11682
117
- ASSERT_RTNL();
118
-
11983 for (i = 0; i < nfp_prog->map_records_cnt; i++) {
12084 if (--nfp_prog->map_records[i]->count) {
12185 nfp_prog->map_records[i] = NULL;
....@@ -147,7 +111,9 @@
147111 nfp_map_ptrs_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
148112 struct bpf_prog *prog)
149113 {
150
- int i, cnt, err;
114
+ int i, cnt, err = 0;
115
+
116
+ mutex_lock(&prog->aux->used_maps_mutex);
151117
152118 /* Quickly count the maps we will have to remember */
153119 cnt = 0;
....@@ -155,13 +121,15 @@
155121 if (bpf_map_offload_neutral(prog->aux->used_maps[i]))
156122 cnt++;
157123 if (!cnt)
158
- return 0;
124
+ goto out;
159125
160126 nfp_prog->map_records = kmalloc_array(cnt,
161127 sizeof(nfp_prog->map_records[0]),
162128 GFP_KERNEL);
163
- if (!nfp_prog->map_records)
164
- return -ENOMEM;
129
+ if (!nfp_prog->map_records) {
130
+ err = -ENOMEM;
131
+ goto out;
132
+ }
165133
166134 for (i = 0; i < prog->aux->used_map_cnt; i++)
167135 if (bpf_map_offload_neutral(prog->aux->used_maps[i])) {
....@@ -169,12 +137,14 @@
169137 prog->aux->used_maps[i]);
170138 if (err) {
171139 nfp_map_ptrs_forget(bpf, nfp_prog);
172
- return err;
140
+ goto out;
173141 }
174142 }
175143 WARN_ON(cnt != nfp_prog->map_records_cnt);
176144
177
- return 0;
145
+out:
146
+ mutex_unlock(&prog->aux->used_maps_mutex);
147
+ return err;
178148 }
179149
180150 static int
....@@ -198,8 +168,9 @@
198168
199169 list_add_tail(&meta->l, &nfp_prog->insns);
200170 }
171
+ nfp_prog->n_insns = cnt;
201172
202
- nfp_bpf_jit_prepare(nfp_prog, cnt);
173
+ nfp_bpf_jit_prepare(nfp_prog);
203174
204175 return 0;
205176 }
....@@ -208,6 +179,8 @@
208179 {
209180 struct nfp_insn_meta *meta, *tmp;
210181
182
+ kfree(nfp_prog->subprog);
183
+
211184 list_for_each_entry_safe(meta, tmp, &nfp_prog->insns, l) {
212185 list_del(&meta->l);
213186 kfree(meta);
....@@ -215,11 +188,8 @@
215188 kfree(nfp_prog);
216189 }
217190
218
-static int
219
-nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
220
- struct netdev_bpf *bpf)
191
+static int nfp_bpf_verifier_prep(struct bpf_prog *prog)
221192 {
222
- struct bpf_prog *prog = bpf->verifier.prog;
223193 struct nfp_prog *nfp_prog;
224194 int ret;
225195
....@@ -230,14 +200,13 @@
230200
231201 INIT_LIST_HEAD(&nfp_prog->insns);
232202 nfp_prog->type = prog->type;
233
- nfp_prog->bpf = app->priv;
203
+ nfp_prog->bpf = bpf_offload_dev_priv(prog->aux->offload->offdev);
234204
235205 ret = nfp_prog_prepare(nfp_prog, prog->insnsi, prog->len);
236206 if (ret)
237207 goto err_free;
238208
239209 nfp_prog->verifier_meta = nfp_prog_first_meta(nfp_prog);
240
- bpf->verifier.ops = &nfp_bpf_analyzer_ops;
241210
242211 return 0;
243212
....@@ -247,20 +216,16 @@
247216 return ret;
248217 }
249218
250
-static int nfp_bpf_translate(struct nfp_net *nn, struct bpf_prog *prog)
219
+static int nfp_bpf_translate(struct bpf_prog *prog)
251220 {
221
+ struct nfp_net *nn = netdev_priv(prog->aux->offload->netdev);
252222 struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
253
- unsigned int stack_size;
254223 unsigned int max_instr;
255224 int err;
256225
257
- stack_size = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64;
258
- if (prog->aux->stack_depth > stack_size) {
259
- nn_info(nn, "stack too large: program %dB > FW stack %dB\n",
260
- prog->aux->stack_depth, stack_size);
261
- return -EOPNOTSUPP;
262
- }
263
- nfp_prog->stack_depth = round_up(prog->aux->stack_depth, 4);
226
+ /* We depend on dead code elimination succeeding */
227
+ if (prog->aux->offload->opt_failed)
228
+ return -EINVAL;
264229
265230 max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN);
266231 nfp_prog->__prog_alloc_len = max_instr * sizeof(u64);
....@@ -279,15 +244,13 @@
279244 return nfp_map_ptrs_record(nfp_prog->bpf, nfp_prog, prog);
280245 }
281246
282
-static int nfp_bpf_destroy(struct nfp_net *nn, struct bpf_prog *prog)
247
+static void nfp_bpf_destroy(struct bpf_prog *prog)
283248 {
284249 struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
285250
286251 kvfree(nfp_prog->prog);
287252 nfp_map_ptrs_forget(nfp_prog->bpf, nfp_prog);
288253 nfp_prog_free(nfp_prog);
289
-
290
- return 0;
291254 }
292255
293256 /* Atomic engine requires values to be in big endian, we need to byte swap
....@@ -417,7 +380,7 @@
417380 }
418381
419382 use_map_size = DIV_ROUND_UP(offmap->map.value_size, 4) *
420
- FIELD_SIZEOF(struct nfp_bpf_map, use_map[0]);
383
+ sizeof_field(struct nfp_bpf_map, use_map[0]);
421384
422385 nfp_map = kzalloc(sizeof(*nfp_map) + use_map_size, GFP_USER);
423386 if (!nfp_map)
....@@ -426,6 +389,7 @@
426389 offmap->dev_priv = nfp_map;
427390 nfp_map->offmap = offmap;
428391 nfp_map->bpf = bpf;
392
+ spin_lock_init(&nfp_map->cache_lock);
429393
430394 res = nfp_bpf_ctrl_alloc_map(bpf, &offmap->map);
431395 if (res < 0) {
....@@ -448,6 +412,8 @@
448412 struct nfp_bpf_map *nfp_map = offmap->dev_priv;
449413
450414 nfp_bpf_ctrl_free_map(bpf, nfp_map);
415
+ dev_consume_skb_any(nfp_map->cache);
416
+ WARN_ON_ONCE(nfp_map->cache_blockers);
451417 list_del_init(&nfp_map->l);
452418 bpf->map_elems_in_use -= offmap->map.max_entries;
453419 bpf->maps_in_use--;
....@@ -459,12 +425,6 @@
459425 int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn, struct netdev_bpf *bpf)
460426 {
461427 switch (bpf->command) {
462
- case BPF_OFFLOAD_VERIFIER_PREP:
463
- return nfp_bpf_verifier_prep(app, nn, bpf);
464
- case BPF_OFFLOAD_TRANSLATE:
465
- return nfp_bpf_translate(nn, bpf->offload.prog);
466
- case BPF_OFFLOAD_DESTROY:
467
- return nfp_bpf_destroy(nn, bpf->offload.prog);
468428 case BPF_OFFLOAD_MAP_ALLOC:
469429 return nfp_bpf_map_alloc(app->priv, bpf->offmap);
470430 case BPF_OFFLOAD_MAP_FREE:
....@@ -500,12 +460,12 @@
500460
501461 if (len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size)
502462 return -EINVAL;
503
- if (cbe->hdr.ver != CMSG_MAP_ABI_VERSION)
463
+ if (cbe->hdr.ver != NFP_CCM_ABI_VERSION)
504464 return -EINVAL;
505465
506466 rcu_read_lock();
507
- record = rhashtable_lookup_fast(&bpf->maps_neutral, &map_id,
508
- nfp_bpf_maps_neutral_params);
467
+ record = rhashtable_lookup(&bpf->maps_neutral, &map_id,
468
+ nfp_bpf_maps_neutral_params);
509469 if (!record || map_id_full > U32_MAX) {
510470 rcu_read_unlock();
511471 cmsg_warn(bpf, "perf event: map id %lld (0x%llx) not recognized, dropping event\n",
....@@ -521,19 +481,41 @@
521481 return 0;
522482 }
523483
484
+bool nfp_bpf_offload_check_mtu(struct nfp_net *nn, struct bpf_prog *prog,
485
+ unsigned int mtu)
486
+{
487
+ unsigned int fw_mtu, pkt_off;
488
+
489
+ fw_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
490
+ pkt_off = min(prog->aux->max_pkt_offset, mtu);
491
+
492
+ return fw_mtu < pkt_off;
493
+}
494
+
524495 static int
525496 nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog,
526497 struct netlink_ext_ack *extack)
527498 {
528499 struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
529
- unsigned int max_mtu;
500
+ unsigned int max_stack, max_prog_len;
530501 dma_addr_t dma_addr;
531502 void *img;
532503 int err;
533504
534
- max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
535
- if (max_mtu < nn->dp.netdev->mtu) {
536
- NL_SET_ERR_MSG_MOD(extack, "BPF offload not supported with MTU larger than HW packet split boundary");
505
+ if (nfp_bpf_offload_check_mtu(nn, prog, nn->dp.netdev->mtu)) {
506
+ NL_SET_ERR_MSG_MOD(extack, "BPF offload not supported with potential packet access beyond HW packet split boundary");
507
+ return -EOPNOTSUPP;
508
+ }
509
+
510
+ max_stack = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64;
511
+ if (nfp_prog->stack_size > max_stack) {
512
+ NL_SET_ERR_MSG_MOD(extack, "stack too large");
513
+ return -EOPNOTSUPP;
514
+ }
515
+
516
+ max_prog_len = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN);
517
+ if (nfp_prog->prog_len > max_prog_len) {
518
+ NL_SET_ERR_MSG_MOD(extack, "program too long");
537519 return -EOPNOTSUPP;
538520 }
539521
....@@ -625,3 +607,13 @@
625607
626608 return 0;
627609 }
610
+
611
+const struct bpf_prog_offload_ops nfp_bpf_dev_ops = {
612
+ .insn_hook = nfp_verify_insn,
613
+ .finalize = nfp_bpf_finalize,
614
+ .replace_insn = nfp_bpf_opt_replace_insn,
615
+ .remove_insns = nfp_bpf_opt_remove_insns,
616
+ .prepare = nfp_bpf_verifier_prep,
617
+ .translate = nfp_bpf_translate,
618
+ .destroy = nfp_bpf_destroy,
619
+};