hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
....@@ -1,82 +1,18 @@
1
-/*
2
- * Copyright (C) 2017-2018 Netronome Systems, Inc.
3
- *
4
- * This software is dual licensed under the GNU General License Version 2,
5
- * June 1991 as shown in the file COPYING in the top-level directory of this
6
- * source tree or the BSD 2-Clause License provided below. You have the
7
- * option to license this software under the complete terms of either license.
8
- *
9
- * The BSD 2-Clause License:
10
- *
11
- * Redistribution and use in source and binary forms, with or
12
- * without modification, are permitted provided that the following
13
- * conditions are met:
14
- *
15
- * 1. Redistributions of source code must retain the above
16
- * copyright notice, this list of conditions and the following
17
- * disclaimer.
18
- *
19
- * 2. Redistributions in binary form must reproduce the above
20
- * copyright notice, this list of conditions and the following
21
- * disclaimer in the documentation and/or other materials
22
- * provided with the distribution.
23
- *
24
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31
- * SOFTWARE.
32
- */
1
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
333
344 #include <linux/bpf.h>
355 #include <linux/bitops.h>
366 #include <linux/bug.h>
377 #include <linux/jiffies.h>
388 #include <linux/skbuff.h>
39
-#include <linux/wait.h>
9
+#include <linux/timekeeping.h>
4010
11
+#include "../ccm.h"
4112 #include "../nfp_app.h"
4213 #include "../nfp_net.h"
4314 #include "fw.h"
4415 #include "main.h"
45
-
46
-#define NFP_BPF_TAG_ALLOC_SPAN (U16_MAX / 4)
47
-
48
-static bool nfp_bpf_all_tags_busy(struct nfp_app_bpf *bpf)
49
-{
50
- u16 used_tags;
51
-
52
- used_tags = bpf->tag_alloc_next - bpf->tag_alloc_last;
53
-
54
- return used_tags > NFP_BPF_TAG_ALLOC_SPAN;
55
-}
56
-
57
-static int nfp_bpf_alloc_tag(struct nfp_app_bpf *bpf)
58
-{
59
- /* All FW communication for BPF is request-reply. To make sure we
60
- * don't reuse the message ID too early after timeout - limit the
61
- * number of requests in flight.
62
- */
63
- if (nfp_bpf_all_tags_busy(bpf)) {
64
- cmsg_warn(bpf, "all FW request contexts busy!\n");
65
- return -EAGAIN;
66
- }
67
-
68
- WARN_ON(__test_and_set_bit(bpf->tag_alloc_next, bpf->tag_allocator));
69
- return bpf->tag_alloc_next++;
70
-}
71
-
72
-static void nfp_bpf_free_tag(struct nfp_app_bpf *bpf, u16 tag)
73
-{
74
- WARN_ON(!__test_and_clear_bit(tag, bpf->tag_allocator));
75
-
76
- while (!test_bit(bpf->tag_alloc_last, bpf->tag_allocator) &&
77
- bpf->tag_alloc_last != bpf->tag_alloc_next)
78
- bpf->tag_alloc_last++;
79
-}
8016
8117 static struct sk_buff *
8218 nfp_bpf_cmsg_alloc(struct nfp_app_bpf *bpf, unsigned int size)
....@@ -89,158 +25,32 @@
8925 return skb;
9026 }
9127
92
-static struct sk_buff *
93
-nfp_bpf_cmsg_map_req_alloc(struct nfp_app_bpf *bpf, unsigned int n)
28
+static unsigned int
29
+nfp_bpf_cmsg_map_req_size(struct nfp_app_bpf *bpf, unsigned int n)
9430 {
9531 unsigned int size;
9632
9733 size = sizeof(struct cmsg_req_map_op);
98
- size += sizeof(struct cmsg_key_value_pair) * n;
34
+ size += (bpf->cmsg_key_sz + bpf->cmsg_val_sz) * n;
9935
100
- return nfp_bpf_cmsg_alloc(bpf, size);
101
-}
102
-
103
-static u8 nfp_bpf_cmsg_get_type(struct sk_buff *skb)
104
-{
105
- struct cmsg_hdr *hdr;
106
-
107
- hdr = (struct cmsg_hdr *)skb->data;
108
-
109
- return hdr->type;
110
-}
111
-
112
-static unsigned int nfp_bpf_cmsg_get_tag(struct sk_buff *skb)
113
-{
114
- struct cmsg_hdr *hdr;
115
-
116
- hdr = (struct cmsg_hdr *)skb->data;
117
-
118
- return be16_to_cpu(hdr->tag);
119
-}
120
-
121
-static struct sk_buff *__nfp_bpf_reply(struct nfp_app_bpf *bpf, u16 tag)
122
-{
123
- unsigned int msg_tag;
124
- struct sk_buff *skb;
125
-
126
- skb_queue_walk(&bpf->cmsg_replies, skb) {
127
- msg_tag = nfp_bpf_cmsg_get_tag(skb);
128
- if (msg_tag == tag) {
129
- nfp_bpf_free_tag(bpf, tag);
130
- __skb_unlink(skb, &bpf->cmsg_replies);
131
- return skb;
132
- }
133
- }
134
-
135
- return NULL;
136
-}
137
-
138
-static struct sk_buff *nfp_bpf_reply(struct nfp_app_bpf *bpf, u16 tag)
139
-{
140
- struct sk_buff *skb;
141
-
142
- nfp_ctrl_lock(bpf->app->ctrl);
143
- skb = __nfp_bpf_reply(bpf, tag);
144
- nfp_ctrl_unlock(bpf->app->ctrl);
145
-
146
- return skb;
147
-}
148
-
149
-static struct sk_buff *nfp_bpf_reply_drop_tag(struct nfp_app_bpf *bpf, u16 tag)
150
-{
151
- struct sk_buff *skb;
152
-
153
- nfp_ctrl_lock(bpf->app->ctrl);
154
- skb = __nfp_bpf_reply(bpf, tag);
155
- if (!skb)
156
- nfp_bpf_free_tag(bpf, tag);
157
- nfp_ctrl_unlock(bpf->app->ctrl);
158
-
159
- return skb;
36
+ return size;
16037 }
16138
16239 static struct sk_buff *
163
-nfp_bpf_cmsg_wait_reply(struct nfp_app_bpf *bpf, enum nfp_bpf_cmsg_type type,
164
- int tag)
40
+nfp_bpf_cmsg_map_req_alloc(struct nfp_app_bpf *bpf, unsigned int n)
16541 {
166
- struct sk_buff *skb;
167
- int i, err;
168
-
169
- for (i = 0; i < 50; i++) {
170
- udelay(4);
171
- skb = nfp_bpf_reply(bpf, tag);
172
- if (skb)
173
- return skb;
174
- }
175
-
176
- err = wait_event_interruptible_timeout(bpf->cmsg_wq,
177
- skb = nfp_bpf_reply(bpf, tag),
178
- msecs_to_jiffies(5000));
179
- /* We didn't get a response - try last time and atomically drop
180
- * the tag even if no response is matched.
181
- */
182
- if (!skb)
183
- skb = nfp_bpf_reply_drop_tag(bpf, tag);
184
- if (err < 0) {
185
- cmsg_warn(bpf, "%s waiting for response to 0x%02x: %d\n",
186
- err == ERESTARTSYS ? "interrupted" : "error",
187
- type, err);
188
- return ERR_PTR(err);
189
- }
190
- if (!skb) {
191
- cmsg_warn(bpf, "timeout waiting for response to 0x%02x\n",
192
- type);
193
- return ERR_PTR(-ETIMEDOUT);
194
- }
195
-
196
- return skb;
42
+ return nfp_bpf_cmsg_alloc(bpf, nfp_bpf_cmsg_map_req_size(bpf, n));
19743 }
19844
199
-static struct sk_buff *
200
-nfp_bpf_cmsg_communicate(struct nfp_app_bpf *bpf, struct sk_buff *skb,
201
- enum nfp_bpf_cmsg_type type, unsigned int reply_size)
45
+static unsigned int
46
+nfp_bpf_cmsg_map_reply_size(struct nfp_app_bpf *bpf, unsigned int n)
20247 {
203
- struct cmsg_hdr *hdr;
204
- int tag;
48
+ unsigned int size;
20549
206
- nfp_ctrl_lock(bpf->app->ctrl);
207
- tag = nfp_bpf_alloc_tag(bpf);
208
- if (tag < 0) {
209
- nfp_ctrl_unlock(bpf->app->ctrl);
210
- dev_kfree_skb_any(skb);
211
- return ERR_PTR(tag);
212
- }
50
+ size = sizeof(struct cmsg_reply_map_op);
51
+ size += (bpf->cmsg_key_sz + bpf->cmsg_val_sz) * n;
21352
214
- hdr = (void *)skb->data;
215
- hdr->ver = CMSG_MAP_ABI_VERSION;
216
- hdr->type = type;
217
- hdr->tag = cpu_to_be16(tag);
218
-
219
- __nfp_app_ctrl_tx(bpf->app, skb);
220
-
221
- nfp_ctrl_unlock(bpf->app->ctrl);
222
-
223
- skb = nfp_bpf_cmsg_wait_reply(bpf, type, tag);
224
- if (IS_ERR(skb))
225
- return skb;
226
-
227
- hdr = (struct cmsg_hdr *)skb->data;
228
- if (hdr->type != __CMSG_REPLY(type)) {
229
- cmsg_warn(bpf, "cmsg drop - wrong type 0x%02x != 0x%02lx!\n",
230
- hdr->type, __CMSG_REPLY(type));
231
- goto err_free;
232
- }
233
- /* 0 reply_size means caller will do the validation */
234
- if (reply_size && skb->len != reply_size) {
235
- cmsg_warn(bpf, "cmsg drop - type 0x%02x wrong size %d != %d!\n",
236
- type, skb->len, reply_size);
237
- goto err_free;
238
- }
239
-
240
- return skb;
241
-err_free:
242
- dev_kfree_skb_any(skb);
243
- return ERR_PTR(-EIO);
53
+ return size;
24454 }
24555
24656 static int
....@@ -288,8 +98,8 @@
28898 req->map_type = cpu_to_be32(map->map_type);
28999 req->map_flags = 0;
290100
291
- skb = nfp_bpf_cmsg_communicate(bpf, skb, CMSG_TYPE_MAP_ALLOC,
292
- sizeof(*reply));
101
+ skb = nfp_ccm_communicate(&bpf->ccm, skb, NFP_CCM_TYPE_BPF_MAP_ALLOC,
102
+ sizeof(*reply));
293103 if (IS_ERR(skb))
294104 return PTR_ERR(skb);
295105
....@@ -323,8 +133,8 @@
323133 req = (void *)skb->data;
324134 req->tid = cpu_to_be32(nfp_map->tid);
325135
326
- skb = nfp_bpf_cmsg_communicate(bpf, skb, CMSG_TYPE_MAP_FREE,
327
- sizeof(*reply));
136
+ skb = nfp_ccm_communicate(&bpf->ccm, skb, NFP_CCM_TYPE_BPF_MAP_FREE,
137
+ sizeof(*reply));
328138 if (IS_ERR(skb)) {
329139 cmsg_warn(bpf, "leaking map - I/O error\n");
330140 return;
....@@ -338,107 +148,308 @@
338148 dev_consume_skb_any(skb);
339149 }
340150
151
+static void *
152
+nfp_bpf_ctrl_req_key(struct nfp_app_bpf *bpf, struct cmsg_req_map_op *req,
153
+ unsigned int n)
154
+{
155
+ return &req->data[bpf->cmsg_key_sz * n + bpf->cmsg_val_sz * n];
156
+}
157
+
158
+static void *
159
+nfp_bpf_ctrl_req_val(struct nfp_app_bpf *bpf, struct cmsg_req_map_op *req,
160
+ unsigned int n)
161
+{
162
+ return &req->data[bpf->cmsg_key_sz * (n + 1) + bpf->cmsg_val_sz * n];
163
+}
164
+
165
+static void *
166
+nfp_bpf_ctrl_reply_key(struct nfp_app_bpf *bpf, struct cmsg_reply_map_op *reply,
167
+ unsigned int n)
168
+{
169
+ return &reply->data[bpf->cmsg_key_sz * n + bpf->cmsg_val_sz * n];
170
+}
171
+
172
+static void *
173
+nfp_bpf_ctrl_reply_val(struct nfp_app_bpf *bpf, struct cmsg_reply_map_op *reply,
174
+ unsigned int n)
175
+{
176
+ return &reply->data[bpf->cmsg_key_sz * (n + 1) + bpf->cmsg_val_sz * n];
177
+}
178
+
179
+static bool nfp_bpf_ctrl_op_cache_invalidate(enum nfp_ccm_type op)
180
+{
181
+ return op == NFP_CCM_TYPE_BPF_MAP_UPDATE ||
182
+ op == NFP_CCM_TYPE_BPF_MAP_DELETE;
183
+}
184
+
185
+static bool nfp_bpf_ctrl_op_cache_capable(enum nfp_ccm_type op)
186
+{
187
+ return op == NFP_CCM_TYPE_BPF_MAP_LOOKUP ||
188
+ op == NFP_CCM_TYPE_BPF_MAP_GETNEXT;
189
+}
190
+
191
+static bool nfp_bpf_ctrl_op_cache_fill(enum nfp_ccm_type op)
192
+{
193
+ return op == NFP_CCM_TYPE_BPF_MAP_GETFIRST ||
194
+ op == NFP_CCM_TYPE_BPF_MAP_GETNEXT;
195
+}
196
+
197
+static unsigned int
198
+nfp_bpf_ctrl_op_cache_get(struct nfp_bpf_map *nfp_map, enum nfp_ccm_type op,
199
+ const u8 *key, u8 *out_key, u8 *out_value,
200
+ u32 *cache_gen)
201
+{
202
+ struct bpf_map *map = &nfp_map->offmap->map;
203
+ struct nfp_app_bpf *bpf = nfp_map->bpf;
204
+ unsigned int i, count, n_entries;
205
+ struct cmsg_reply_map_op *reply;
206
+
207
+ n_entries = nfp_bpf_ctrl_op_cache_fill(op) ? bpf->cmsg_cache_cnt : 1;
208
+
209
+ spin_lock(&nfp_map->cache_lock);
210
+ *cache_gen = nfp_map->cache_gen;
211
+ if (nfp_map->cache_blockers)
212
+ n_entries = 1;
213
+
214
+ if (nfp_bpf_ctrl_op_cache_invalidate(op))
215
+ goto exit_block;
216
+ if (!nfp_bpf_ctrl_op_cache_capable(op))
217
+ goto exit_unlock;
218
+
219
+ if (!nfp_map->cache)
220
+ goto exit_unlock;
221
+ if (nfp_map->cache_to < ktime_get_ns())
222
+ goto exit_invalidate;
223
+
224
+ reply = (void *)nfp_map->cache->data;
225
+ count = be32_to_cpu(reply->count);
226
+
227
+ for (i = 0; i < count; i++) {
228
+ void *cached_key;
229
+
230
+ cached_key = nfp_bpf_ctrl_reply_key(bpf, reply, i);
231
+ if (memcmp(cached_key, key, map->key_size))
232
+ continue;
233
+
234
+ if (op == NFP_CCM_TYPE_BPF_MAP_LOOKUP)
235
+ memcpy(out_value, nfp_bpf_ctrl_reply_val(bpf, reply, i),
236
+ map->value_size);
237
+ if (op == NFP_CCM_TYPE_BPF_MAP_GETNEXT) {
238
+ if (i + 1 == count)
239
+ break;
240
+
241
+ memcpy(out_key,
242
+ nfp_bpf_ctrl_reply_key(bpf, reply, i + 1),
243
+ map->key_size);
244
+ }
245
+
246
+ n_entries = 0;
247
+ goto exit_unlock;
248
+ }
249
+ goto exit_unlock;
250
+
251
+exit_block:
252
+ nfp_map->cache_blockers++;
253
+exit_invalidate:
254
+ dev_consume_skb_any(nfp_map->cache);
255
+ nfp_map->cache = NULL;
256
+exit_unlock:
257
+ spin_unlock(&nfp_map->cache_lock);
258
+ return n_entries;
259
+}
260
+
261
+static void
262
+nfp_bpf_ctrl_op_cache_put(struct nfp_bpf_map *nfp_map, enum nfp_ccm_type op,
263
+ struct sk_buff *skb, u32 cache_gen)
264
+{
265
+ bool blocker, filler;
266
+
267
+ blocker = nfp_bpf_ctrl_op_cache_invalidate(op);
268
+ filler = nfp_bpf_ctrl_op_cache_fill(op);
269
+ if (blocker || filler) {
270
+ u64 to = 0;
271
+
272
+ if (filler)
273
+ to = ktime_get_ns() + NFP_BPF_MAP_CACHE_TIME_NS;
274
+
275
+ spin_lock(&nfp_map->cache_lock);
276
+ if (blocker) {
277
+ nfp_map->cache_blockers--;
278
+ nfp_map->cache_gen++;
279
+ }
280
+ if (filler && !nfp_map->cache_blockers &&
281
+ nfp_map->cache_gen == cache_gen) {
282
+ nfp_map->cache_to = to;
283
+ swap(nfp_map->cache, skb);
284
+ }
285
+ spin_unlock(&nfp_map->cache_lock);
286
+ }
287
+
288
+ dev_consume_skb_any(skb);
289
+}
290
+
341291 static int
342
-nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap,
343
- enum nfp_bpf_cmsg_type op,
292
+nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap, enum nfp_ccm_type op,
344293 u8 *key, u8 *value, u64 flags, u8 *out_key, u8 *out_value)
345294 {
346295 struct nfp_bpf_map *nfp_map = offmap->dev_priv;
296
+ unsigned int n_entries, reply_entries, count;
347297 struct nfp_app_bpf *bpf = nfp_map->bpf;
348298 struct bpf_map *map = &offmap->map;
349299 struct cmsg_reply_map_op *reply;
350300 struct cmsg_req_map_op *req;
351301 struct sk_buff *skb;
302
+ u32 cache_gen;
352303 int err;
353304
354305 /* FW messages have no space for more than 32 bits of flags */
355306 if (flags >> 32)
356307 return -EOPNOTSUPP;
357308
309
+ /* Handle op cache */
310
+ n_entries = nfp_bpf_ctrl_op_cache_get(nfp_map, op, key, out_key,
311
+ out_value, &cache_gen);
312
+ if (!n_entries)
313
+ return 0;
314
+
358315 skb = nfp_bpf_cmsg_map_req_alloc(bpf, 1);
359
- if (!skb)
360
- return -ENOMEM;
316
+ if (!skb) {
317
+ err = -ENOMEM;
318
+ goto err_cache_put;
319
+ }
361320
362321 req = (void *)skb->data;
363322 req->tid = cpu_to_be32(nfp_map->tid);
364
- req->count = cpu_to_be32(1);
323
+ req->count = cpu_to_be32(n_entries);
365324 req->flags = cpu_to_be32(flags);
366325
367326 /* Copy inputs */
368327 if (key)
369
- memcpy(&req->elem[0].key, key, map->key_size);
328
+ memcpy(nfp_bpf_ctrl_req_key(bpf, req, 0), key, map->key_size);
370329 if (value)
371
- memcpy(&req->elem[0].value, value, map->value_size);
330
+ memcpy(nfp_bpf_ctrl_req_val(bpf, req, 0), value,
331
+ map->value_size);
372332
373
- skb = nfp_bpf_cmsg_communicate(bpf, skb, op,
374
- sizeof(*reply) + sizeof(*reply->elem));
375
- if (IS_ERR(skb))
376
- return PTR_ERR(skb);
333
+ skb = nfp_ccm_communicate(&bpf->ccm, skb, op, 0);
334
+ if (IS_ERR(skb)) {
335
+ err = PTR_ERR(skb);
336
+ goto err_cache_put;
337
+ }
338
+
339
+ if (skb->len < sizeof(*reply)) {
340
+ cmsg_warn(bpf, "cmsg drop - type 0x%02x too short %d!\n",
341
+ op, skb->len);
342
+ err = -EIO;
343
+ goto err_free;
344
+ }
377345
378346 reply = (void *)skb->data;
347
+ count = be32_to_cpu(reply->count);
379348 err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr);
349
+ /* FW responds with message sized to hold the good entries,
350
+ * plus one extra entry if there was an error.
351
+ */
352
+ reply_entries = count + !!err;
353
+ if (n_entries > 1 && count)
354
+ err = 0;
380355 if (err)
381356 goto err_free;
382357
358
+ if (skb->len != nfp_bpf_cmsg_map_reply_size(bpf, reply_entries)) {
359
+ cmsg_warn(bpf, "cmsg drop - type 0x%02x too short %d for %d entries!\n",
360
+ op, skb->len, reply_entries);
361
+ err = -EIO;
362
+ goto err_free;
363
+ }
364
+
383365 /* Copy outputs */
384366 if (out_key)
385
- memcpy(out_key, &reply->elem[0].key, map->key_size);
367
+ memcpy(out_key, nfp_bpf_ctrl_reply_key(bpf, reply, 0),
368
+ map->key_size);
386369 if (out_value)
387
- memcpy(out_value, &reply->elem[0].value, map->value_size);
370
+ memcpy(out_value, nfp_bpf_ctrl_reply_val(bpf, reply, 0),
371
+ map->value_size);
388372
389
- dev_consume_skb_any(skb);
373
+ nfp_bpf_ctrl_op_cache_put(nfp_map, op, skb, cache_gen);
390374
391375 return 0;
392376 err_free:
393377 dev_kfree_skb_any(skb);
378
+err_cache_put:
379
+ nfp_bpf_ctrl_op_cache_put(nfp_map, op, NULL, cache_gen);
394380 return err;
395381 }
396382
397383 int nfp_bpf_ctrl_update_entry(struct bpf_offloaded_map *offmap,
398384 void *key, void *value, u64 flags)
399385 {
400
- return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_UPDATE,
386
+ return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_UPDATE,
401387 key, value, flags, NULL, NULL);
402388 }
403389
404390 int nfp_bpf_ctrl_del_entry(struct bpf_offloaded_map *offmap, void *key)
405391 {
406
- return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_DELETE,
392
+ return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_DELETE,
407393 key, NULL, 0, NULL, NULL);
408394 }
409395
410396 int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap,
411397 void *key, void *value)
412398 {
413
- return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_LOOKUP,
399
+ return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_LOOKUP,
414400 key, NULL, 0, NULL, value);
415401 }
416402
417403 int nfp_bpf_ctrl_getfirst_entry(struct bpf_offloaded_map *offmap,
418404 void *next_key)
419405 {
420
- return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_GETFIRST,
406
+ return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_GETFIRST,
421407 NULL, NULL, 0, next_key, NULL);
422408 }
423409
424410 int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap,
425411 void *key, void *next_key)
426412 {
427
- return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_GETNEXT,
413
+ return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_GETNEXT,
428414 key, NULL, 0, next_key, NULL);
415
+}
416
+
417
+unsigned int nfp_bpf_ctrl_cmsg_min_mtu(struct nfp_app_bpf *bpf)
418
+{
419
+ return max(nfp_bpf_cmsg_map_req_size(bpf, 1),
420
+ nfp_bpf_cmsg_map_reply_size(bpf, 1));
421
+}
422
+
423
+unsigned int nfp_bpf_ctrl_cmsg_mtu(struct nfp_app_bpf *bpf)
424
+{
425
+ return max3(NFP_NET_DEFAULT_MTU,
426
+ nfp_bpf_cmsg_map_req_size(bpf, NFP_BPF_MAP_CACHE_CNT),
427
+ nfp_bpf_cmsg_map_reply_size(bpf, NFP_BPF_MAP_CACHE_CNT));
428
+}
429
+
430
+unsigned int nfp_bpf_ctrl_cmsg_cache_cnt(struct nfp_app_bpf *bpf)
431
+{
432
+ unsigned int mtu, req_max, reply_max, entry_sz;
433
+
434
+ mtu = bpf->app->ctrl->dp.mtu;
435
+ entry_sz = bpf->cmsg_key_sz + bpf->cmsg_val_sz;
436
+ req_max = (mtu - sizeof(struct cmsg_req_map_op)) / entry_sz;
437
+ reply_max = (mtu - sizeof(struct cmsg_reply_map_op)) / entry_sz;
438
+
439
+ return min3(req_max, reply_max, NFP_BPF_MAP_CACHE_CNT);
429440 }
430441
431442 void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb)
432443 {
433444 struct nfp_app_bpf *bpf = app->priv;
434
- unsigned int tag;
435445
436446 if (unlikely(skb->len < sizeof(struct cmsg_reply_map_simple))) {
437447 cmsg_warn(bpf, "cmsg drop - too short %d!\n", skb->len);
438
- goto err_free;
448
+ dev_kfree_skb_any(skb);
449
+ return;
439450 }
440451
441
- if (nfp_bpf_cmsg_get_type(skb) == CMSG_TYPE_BPF_EVENT) {
452
+ if (nfp_ccm_get_type(skb) == NFP_CCM_TYPE_BPF_BPF_EVENT) {
442453 if (!nfp_bpf_event_output(bpf, skb->data, skb->len))
443454 dev_consume_skb_any(skb);
444455 else
....@@ -446,39 +457,21 @@
446457 return;
447458 }
448459
449
- nfp_ctrl_lock(bpf->app->ctrl);
450
-
451
- tag = nfp_bpf_cmsg_get_tag(skb);
452
- if (unlikely(!test_bit(tag, bpf->tag_allocator))) {
453
- cmsg_warn(bpf, "cmsg drop - no one is waiting for tag %u!\n",
454
- tag);
455
- goto err_unlock;
456
- }
457
-
458
- __skb_queue_tail(&bpf->cmsg_replies, skb);
459
- wake_up_interruptible_all(&bpf->cmsg_wq);
460
-
461
- nfp_ctrl_unlock(bpf->app->ctrl);
462
-
463
- return;
464
-err_unlock:
465
- nfp_ctrl_unlock(bpf->app->ctrl);
466
-err_free:
467
- dev_kfree_skb_any(skb);
460
+ nfp_ccm_rx(&bpf->ccm, skb);
468461 }
469462
470463 void
471464 nfp_bpf_ctrl_msg_rx_raw(struct nfp_app *app, const void *data, unsigned int len)
472465 {
466
+ const struct nfp_ccm_hdr *hdr = data;
473467 struct nfp_app_bpf *bpf = app->priv;
474
- const struct cmsg_hdr *hdr = data;
475468
476469 if (unlikely(len < sizeof(struct cmsg_reply_map_simple))) {
477470 cmsg_warn(bpf, "cmsg drop - too short %d!\n", len);
478471 return;
479472 }
480473
481
- if (hdr->type == CMSG_TYPE_BPF_EVENT)
474
+ if (hdr->type == NFP_CCM_TYPE_BPF_BPF_EVENT)
482475 nfp_bpf_event_output(bpf, data, len);
483476 else
484477 cmsg_warn(bpf, "cmsg drop - msg type %d with raw buffer!\n",