hc
2023-12-06 08f87f769b595151be1afeff53e144f543faa614
kernel/drivers/net/ethernet/netronome/nfp/flower/metadata.c
....@@ -1,39 +1,10 @@
1
-/*
2
- * Copyright (C) 2017 Netronome Systems, Inc.
3
- *
4
- * This software is dual licensed under the GNU General License Version 2,
5
- * June 1991 as shown in the file COPYING in the top-level directory of this
6
- * source tree or the BSD 2-Clause License provided below. You have the
7
- * option to license this software under the complete terms of either license.
8
- *
9
- * The BSD 2-Clause License:
10
- *
11
- * Redistribution and use in source and binary forms, with or
12
- * without modification, are permitted provided that the following
13
- * conditions are met:
14
- *
15
- * 1. Redistributions of source code must retain the above
16
- * copyright notice, this list of conditions and the following
17
- * disclaimer.
18
- *
19
- * 2. Redistributions in binary form must reproduce the above
20
- * copyright notice, this list of conditions and the following
21
- * disclaimer in the documentation and/or other materials
22
- * provided with the distribution.
23
- *
24
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31
- * SOFTWARE.
32
- */
1
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
333
344 #include <linux/hash.h>
355 #include <linux/hashtable.h>
366 #include <linux/jhash.h>
7
+#include <linux/math64.h>
378 #include <linux/vmalloc.h>
389 #include <net/pkt_cls.h>
3910
....@@ -48,6 +19,23 @@
4819 u8 mask_id;
4920 };
5021
22
+struct nfp_fl_flow_table_cmp_arg {
23
+ struct net_device *netdev;
24
+ unsigned long cookie;
25
+};
26
+
27
+struct nfp_fl_stats_ctx_to_flow {
28
+ struct rhash_head ht_node;
29
+ u32 stats_cxt;
30
+ struct nfp_fl_payload *flow;
31
+};
32
+
33
+static const struct rhashtable_params stats_ctx_table_params = {
34
+ .key_offset = offsetof(struct nfp_fl_stats_ctx_to_flow, stats_cxt),
35
+ .head_offset = offsetof(struct nfp_fl_stats_ctx_to_flow, ht_node),
36
+ .key_len = sizeof(u32),
37
+};
38
+
5139 static int nfp_release_stats_entry(struct nfp_app *app, u32 stats_context_id)
5240 {
5341 struct nfp_flower_priv *priv = app->priv;
....@@ -55,14 +43,14 @@
5543
5644 ring = &priv->stats_ids.free_list;
5745 /* Check if buffer is full. */
58
- if (!CIRC_SPACE(ring->head, ring->tail, NFP_FL_STATS_ENTRY_RS *
59
- NFP_FL_STATS_ELEM_RS -
46
+ if (!CIRC_SPACE(ring->head, ring->tail,
47
+ priv->stats_ring_size * NFP_FL_STATS_ELEM_RS -
6048 NFP_FL_STATS_ELEM_RS + 1))
6149 return -ENOBUFS;
6250
6351 memcpy(&ring->buf[ring->head], &stats_context_id, NFP_FL_STATS_ELEM_RS);
6452 ring->head = (ring->head + NFP_FL_STATS_ELEM_RS) %
65
- (NFP_FL_STATS_ENTRY_RS * NFP_FL_STATS_ELEM_RS);
53
+ (priv->stats_ring_size * NFP_FL_STATS_ELEM_RS);
6654
6755 return 0;
6856 }
....@@ -74,11 +62,20 @@
7462 struct circ_buf *ring;
7563
7664 ring = &priv->stats_ids.free_list;
77
- freed_stats_id = NFP_FL_STATS_ENTRY_RS;
65
+ freed_stats_id = priv->stats_ring_size;
7866 /* Check for unallocated entries first. */
7967 if (priv->stats_ids.init_unalloc > 0) {
80
- *stats_context_id = priv->stats_ids.init_unalloc - 1;
81
- priv->stats_ids.init_unalloc--;
68
+ *stats_context_id =
69
+ FIELD_PREP(NFP_FL_STAT_ID_STAT,
70
+ priv->stats_ids.init_unalloc - 1) |
71
+ FIELD_PREP(NFP_FL_STAT_ID_MU_NUM,
72
+ priv->active_mem_unit);
73
+
74
+ if (++priv->active_mem_unit == priv->total_mem_units) {
75
+ priv->stats_ids.init_unalloc--;
76
+ priv->active_mem_unit = 0;
77
+ }
78
+
8279 return 0;
8380 }
8481
....@@ -92,7 +89,7 @@
9289 *stats_context_id = temp_stats_id;
9390 memcpy(&ring->buf[ring->tail], &freed_stats_id, NFP_FL_STATS_ELEM_RS);
9491 ring->tail = (ring->tail + NFP_FL_STATS_ELEM_RS) %
95
- (NFP_FL_STATS_ENTRY_RS * NFP_FL_STATS_ELEM_RS);
92
+ (priv->stats_ring_size * NFP_FL_STATS_ELEM_RS);
9693
9794 return 0;
9895 }
....@@ -100,58 +97,38 @@
10097 /* Must be called with either RTNL or rcu_read_lock */
10198 struct nfp_fl_payload *
10299 nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie,
103
- struct net_device *netdev, __be32 host_ctx)
100
+ struct net_device *netdev)
104101 {
102
+ struct nfp_fl_flow_table_cmp_arg flower_cmp_arg;
105103 struct nfp_flower_priv *priv = app->priv;
106
- struct nfp_fl_payload *flower_entry;
107104
108
- hash_for_each_possible_rcu(priv->flow_table, flower_entry, link,
109
- tc_flower_cookie)
110
- if (flower_entry->tc_flower_cookie == tc_flower_cookie &&
111
- (!netdev || flower_entry->ingress_dev == netdev) &&
112
- (host_ctx == NFP_FL_STATS_CTX_DONT_CARE ||
113
- flower_entry->meta.host_ctx_id == host_ctx))
114
- return flower_entry;
105
+ flower_cmp_arg.netdev = netdev;
106
+ flower_cmp_arg.cookie = tc_flower_cookie;
115107
116
- return NULL;
117
-}
118
-
119
-static void
120
-nfp_flower_update_stats(struct nfp_app *app, struct nfp_fl_stats_frame *stats)
121
-{
122
- struct nfp_fl_payload *nfp_flow;
123
- unsigned long flower_cookie;
124
-
125
- flower_cookie = be64_to_cpu(stats->stats_cookie);
126
-
127
- rcu_read_lock();
128
- nfp_flow = nfp_flower_search_fl_table(app, flower_cookie, NULL,
129
- stats->stats_con_id);
130
- if (!nfp_flow)
131
- goto exit_rcu_unlock;
132
-
133
- spin_lock(&nfp_flow->lock);
134
- nfp_flow->stats.pkts += be32_to_cpu(stats->pkt_count);
135
- nfp_flow->stats.bytes += be64_to_cpu(stats->byte_count);
136
- nfp_flow->stats.used = jiffies;
137
- spin_unlock(&nfp_flow->lock);
138
-
139
-exit_rcu_unlock:
140
- rcu_read_unlock();
108
+ return rhashtable_lookup_fast(&priv->flow_table, &flower_cmp_arg,
109
+ nfp_flower_table_params);
141110 }
142111
143112 void nfp_flower_rx_flow_stats(struct nfp_app *app, struct sk_buff *skb)
144113 {
145114 unsigned int msg_len = nfp_flower_cmsg_get_data_len(skb);
146
- struct nfp_fl_stats_frame *stats_frame;
115
+ struct nfp_flower_priv *priv = app->priv;
116
+ struct nfp_fl_stats_frame *stats;
147117 unsigned char *msg;
118
+ u32 ctx_id;
148119 int i;
149120
150121 msg = nfp_flower_cmsg_get_data(skb);
151122
152
- stats_frame = (struct nfp_fl_stats_frame *)msg;
153
- for (i = 0; i < msg_len / sizeof(*stats_frame); i++)
154
- nfp_flower_update_stats(app, stats_frame + i);
123
+ spin_lock(&priv->stats_lock);
124
+ for (i = 0; i < msg_len / sizeof(*stats); i++) {
125
+ stats = (struct nfp_fl_stats_frame *)msg + i;
126
+ ctx_id = be32_to_cpu(stats->stats_con_id);
127
+ priv->stats[ctx_id].pkts += be32_to_cpu(stats->pkt_count);
128
+ priv->stats[ctx_id].bytes += be64_to_cpu(stats->byte_count);
129
+ priv->stats[ctx_id].used = jiffies;
130
+ }
131
+ spin_unlock(&priv->stats_lock);
155132 }
156133
157134 static int nfp_release_mask_id(struct nfp_app *app, u8 mask_id)
....@@ -299,9 +276,6 @@
299276 if (!mask_entry)
300277 return false;
301278
302
- if (meta_flags)
303
- *meta_flags &= ~NFP_FL_META_FLAG_MANAGE_MASK;
304
-
305279 *mask_id = mask_entry->mask_id;
306280 mask_entry->ref_cnt--;
307281 if (!mask_entry->ref_cnt) {
....@@ -316,28 +290,61 @@
316290 }
317291
318292 int nfp_compile_flow_metadata(struct nfp_app *app,
319
- struct tc_cls_flower_offload *flow,
293
+ struct flow_cls_offload *flow,
320294 struct nfp_fl_payload *nfp_flow,
321
- struct net_device *netdev)
295
+ struct net_device *netdev,
296
+ struct netlink_ext_ack *extack)
322297 {
298
+ struct nfp_fl_stats_ctx_to_flow *ctx_entry;
323299 struct nfp_flower_priv *priv = app->priv;
324300 struct nfp_fl_payload *check_entry;
325301 u8 new_mask_id;
326302 u32 stats_cxt;
303
+ int err;
327304
328
- if (nfp_get_stats_entry(app, &stats_cxt))
329
- return -ENOENT;
305
+ err = nfp_get_stats_entry(app, &stats_cxt);
306
+ if (err) {
307
+ NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot allocate new stats context");
308
+ return err;
309
+ }
330310
331311 nfp_flow->meta.host_ctx_id = cpu_to_be32(stats_cxt);
332312 nfp_flow->meta.host_cookie = cpu_to_be64(flow->cookie);
313
+ nfp_flow->ingress_dev = netdev;
333314
315
+ ctx_entry = kzalloc(sizeof(*ctx_entry), GFP_KERNEL);
316
+ if (!ctx_entry) {
317
+ err = -ENOMEM;
318
+ goto err_release_stats;
319
+ }
320
+
321
+ ctx_entry->stats_cxt = stats_cxt;
322
+ ctx_entry->flow = nfp_flow;
323
+
324
+ if (rhashtable_insert_fast(&priv->stats_ctx_table, &ctx_entry->ht_node,
325
+ stats_ctx_table_params)) {
326
+ err = -ENOMEM;
327
+ goto err_free_ctx_entry;
328
+ }
329
+
330
+ /* Do net allocate a mask-id for pre_tun_rules. These flows are used to
331
+ * configure the pre_tun table and are never actually send to the
332
+ * firmware as an add-flow message. This causes the mask-id allocation
333
+ * on the firmware to get out of sync if allocated here.
334
+ */
334335 new_mask_id = 0;
335
- if (!nfp_check_mask_add(app, nfp_flow->mask_data,
336
+ if (!nfp_flow->pre_tun_rule.dev &&
337
+ !nfp_check_mask_add(app, nfp_flow->mask_data,
336338 nfp_flow->meta.mask_len,
337339 &nfp_flow->meta.flags, &new_mask_id)) {
338
- if (nfp_release_stats_entry(app, stats_cxt))
339
- return -EINVAL;
340
- return -ENOENT;
340
+ NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot allocate a new mask id");
341
+ if (nfp_release_stats_entry(app, stats_cxt)) {
342
+ NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot release stats context");
343
+ err = -EINVAL;
344
+ goto err_remove_rhash;
345
+ }
346
+ err = -ENOENT;
347
+ goto err_remove_rhash;
341348 }
342349
343350 nfp_flow->meta.flow_version = cpu_to_be64(priv->flower_version);
....@@ -345,56 +352,170 @@
345352
346353 /* Update flow payload with mask ids. */
347354 nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id;
348
- nfp_flow->stats.pkts = 0;
349
- nfp_flow->stats.bytes = 0;
350
- nfp_flow->stats.used = jiffies;
355
+ priv->stats[stats_cxt].pkts = 0;
356
+ priv->stats[stats_cxt].bytes = 0;
357
+ priv->stats[stats_cxt].used = jiffies;
351358
352
- check_entry = nfp_flower_search_fl_table(app, flow->cookie, netdev,
353
- NFP_FL_STATS_CTX_DONT_CARE);
359
+ check_entry = nfp_flower_search_fl_table(app, flow->cookie, netdev);
354360 if (check_entry) {
355
- if (nfp_release_stats_entry(app, stats_cxt))
356
- return -EINVAL;
361
+ NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot offload duplicate flow entry");
362
+ if (nfp_release_stats_entry(app, stats_cxt)) {
363
+ NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot release stats context");
364
+ err = -EINVAL;
365
+ goto err_remove_mask;
366
+ }
357367
358
- if (!nfp_check_mask_remove(app, nfp_flow->mask_data,
368
+ if (!nfp_flow->pre_tun_rule.dev &&
369
+ !nfp_check_mask_remove(app, nfp_flow->mask_data,
359370 nfp_flow->meta.mask_len,
360
- NULL, &new_mask_id))
361
- return -EINVAL;
371
+ NULL, &new_mask_id)) {
372
+ NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot release mask id");
373
+ err = -EINVAL;
374
+ goto err_remove_mask;
375
+ }
362376
363
- return -EEXIST;
377
+ err = -EEXIST;
378
+ goto err_remove_mask;
364379 }
365380
366381 return 0;
382
+
383
+err_remove_mask:
384
+ if (!nfp_flow->pre_tun_rule.dev)
385
+ nfp_check_mask_remove(app, nfp_flow->mask_data,
386
+ nfp_flow->meta.mask_len,
387
+ NULL, &new_mask_id);
388
+err_remove_rhash:
389
+ WARN_ON_ONCE(rhashtable_remove_fast(&priv->stats_ctx_table,
390
+ &ctx_entry->ht_node,
391
+ stats_ctx_table_params));
392
+err_free_ctx_entry:
393
+ kfree(ctx_entry);
394
+err_release_stats:
395
+ nfp_release_stats_entry(app, stats_cxt);
396
+
397
+ return err;
398
+}
399
+
400
+void __nfp_modify_flow_metadata(struct nfp_flower_priv *priv,
401
+ struct nfp_fl_payload *nfp_flow)
402
+{
403
+ nfp_flow->meta.flags &= ~NFP_FL_META_FLAG_MANAGE_MASK;
404
+ nfp_flow->meta.flow_version = cpu_to_be64(priv->flower_version);
405
+ priv->flower_version++;
367406 }
368407
369408 int nfp_modify_flow_metadata(struct nfp_app *app,
370409 struct nfp_fl_payload *nfp_flow)
371410 {
411
+ struct nfp_fl_stats_ctx_to_flow *ctx_entry;
372412 struct nfp_flower_priv *priv = app->priv;
373413 u8 new_mask_id = 0;
374414 u32 temp_ctx_id;
375415
376
- nfp_check_mask_remove(app, nfp_flow->mask_data,
377
- nfp_flow->meta.mask_len, &nfp_flow->meta.flags,
378
- &new_mask_id);
416
+ __nfp_modify_flow_metadata(priv, nfp_flow);
379417
380
- nfp_flow->meta.flow_version = cpu_to_be64(priv->flower_version);
381
- priv->flower_version++;
418
+ if (!nfp_flow->pre_tun_rule.dev)
419
+ nfp_check_mask_remove(app, nfp_flow->mask_data,
420
+ nfp_flow->meta.mask_len, &nfp_flow->meta.flags,
421
+ &new_mask_id);
382422
383423 /* Update flow payload with mask ids. */
384424 nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id;
385425
386
- /* Release the stats ctx id. */
426
+ /* Release the stats ctx id and ctx to flow table entry. */
387427 temp_ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
428
+
429
+ ctx_entry = rhashtable_lookup_fast(&priv->stats_ctx_table, &temp_ctx_id,
430
+ stats_ctx_table_params);
431
+ if (!ctx_entry)
432
+ return -ENOENT;
433
+
434
+ WARN_ON_ONCE(rhashtable_remove_fast(&priv->stats_ctx_table,
435
+ &ctx_entry->ht_node,
436
+ stats_ctx_table_params));
437
+ kfree(ctx_entry);
388438
389439 return nfp_release_stats_entry(app, temp_ctx_id);
390440 }
391441
392
-int nfp_flower_metadata_init(struct nfp_app *app)
442
+struct nfp_fl_payload *
443
+nfp_flower_get_fl_payload_from_ctx(struct nfp_app *app, u32 ctx_id)
393444 {
445
+ struct nfp_fl_stats_ctx_to_flow *ctx_entry;
394446 struct nfp_flower_priv *priv = app->priv;
395447
448
+ ctx_entry = rhashtable_lookup_fast(&priv->stats_ctx_table, &ctx_id,
449
+ stats_ctx_table_params);
450
+ if (!ctx_entry)
451
+ return NULL;
452
+
453
+ return ctx_entry->flow;
454
+}
455
+
456
+static int nfp_fl_obj_cmpfn(struct rhashtable_compare_arg *arg,
457
+ const void *obj)
458
+{
459
+ const struct nfp_fl_flow_table_cmp_arg *cmp_arg = arg->key;
460
+ const struct nfp_fl_payload *flow_entry = obj;
461
+
462
+ if (flow_entry->ingress_dev == cmp_arg->netdev)
463
+ return flow_entry->tc_flower_cookie != cmp_arg->cookie;
464
+
465
+ return 1;
466
+}
467
+
468
+static u32 nfp_fl_obj_hashfn(const void *data, u32 len, u32 seed)
469
+{
470
+ const struct nfp_fl_payload *flower_entry = data;
471
+
472
+ return jhash2((u32 *)&flower_entry->tc_flower_cookie,
473
+ sizeof(flower_entry->tc_flower_cookie) / sizeof(u32),
474
+ seed);
475
+}
476
+
477
+static u32 nfp_fl_key_hashfn(const void *data, u32 len, u32 seed)
478
+{
479
+ const struct nfp_fl_flow_table_cmp_arg *cmp_arg = data;
480
+
481
+ return jhash2((u32 *)&cmp_arg->cookie,
482
+ sizeof(cmp_arg->cookie) / sizeof(u32), seed);
483
+}
484
+
485
+const struct rhashtable_params nfp_flower_table_params = {
486
+ .head_offset = offsetof(struct nfp_fl_payload, fl_node),
487
+ .hashfn = nfp_fl_key_hashfn,
488
+ .obj_cmpfn = nfp_fl_obj_cmpfn,
489
+ .obj_hashfn = nfp_fl_obj_hashfn,
490
+ .automatic_shrinking = true,
491
+};
492
+
493
+const struct rhashtable_params merge_table_params = {
494
+ .key_offset = offsetof(struct nfp_merge_info, parent_ctx),
495
+ .head_offset = offsetof(struct nfp_merge_info, ht_node),
496
+ .key_len = sizeof(u64),
497
+};
498
+
499
+int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
500
+ unsigned int host_num_mems)
501
+{
502
+ struct nfp_flower_priv *priv = app->priv;
503
+ int err, stats_size;
504
+
396505 hash_init(priv->mask_table);
397
- hash_init(priv->flow_table);
506
+
507
+ err = rhashtable_init(&priv->flow_table, &nfp_flower_table_params);
508
+ if (err)
509
+ return err;
510
+
511
+ err = rhashtable_init(&priv->stats_ctx_table, &stats_ctx_table_params);
512
+ if (err)
513
+ goto err_free_flow_table;
514
+
515
+ err = rhashtable_init(&priv->merge_table, &merge_table_params);
516
+ if (err)
517
+ goto err_free_stats_ctx_table;
518
+
398519 get_random_bytes(&priv->mask_id_seed, sizeof(priv->mask_id_seed));
399520
400521 /* Init ring buffer and unallocated mask_ids. */
....@@ -402,7 +523,7 @@
402523 kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS,
403524 NFP_FLOWER_MASK_ELEMENT_RS, GFP_KERNEL);
404525 if (!priv->mask_ids.mask_id_free_list.buf)
405
- return -ENOMEM;
526
+ goto err_free_merge_table;
406527
407528 priv->mask_ids.init_unallocated = NFP_FLOWER_MASK_ENTRY_RS - 1;
408529
....@@ -416,18 +537,35 @@
416537 /* Init ring buffer and unallocated stats_ids. */
417538 priv->stats_ids.free_list.buf =
418539 vmalloc(array_size(NFP_FL_STATS_ELEM_RS,
419
- NFP_FL_STATS_ENTRY_RS));
540
+ priv->stats_ring_size));
420541 if (!priv->stats_ids.free_list.buf)
421542 goto err_free_last_used;
422543
423
- priv->stats_ids.init_unalloc = NFP_FL_REPEATED_HASH_MAX;
544
+ priv->stats_ids.init_unalloc = div_u64(host_ctx_count, host_num_mems);
545
+
546
+ stats_size = FIELD_PREP(NFP_FL_STAT_ID_STAT, host_ctx_count) |
547
+ FIELD_PREP(NFP_FL_STAT_ID_MU_NUM, host_num_mems - 1);
548
+ priv->stats = kvmalloc_array(stats_size, sizeof(struct nfp_fl_stats),
549
+ GFP_KERNEL);
550
+ if (!priv->stats)
551
+ goto err_free_ring_buf;
552
+
553
+ spin_lock_init(&priv->stats_lock);
424554
425555 return 0;
426556
557
+err_free_ring_buf:
558
+ vfree(priv->stats_ids.free_list.buf);
427559 err_free_last_used:
428560 kfree(priv->mask_ids.last_used);
429561 err_free_mask_id:
430562 kfree(priv->mask_ids.mask_id_free_list.buf);
563
+err_free_merge_table:
564
+ rhashtable_destroy(&priv->merge_table);
565
+err_free_stats_ctx_table:
566
+ rhashtable_destroy(&priv->stats_ctx_table);
567
+err_free_flow_table:
568
+ rhashtable_destroy(&priv->flow_table);
431569 return -ENOMEM;
432570 }
433571
....@@ -438,6 +576,13 @@
438576 if (!priv)
439577 return;
440578
579
+ rhashtable_free_and_destroy(&priv->flow_table,
580
+ nfp_check_rhashtable_empty, NULL);
581
+ rhashtable_free_and_destroy(&priv->stats_ctx_table,
582
+ nfp_check_rhashtable_empty, NULL);
583
+ rhashtable_free_and_destroy(&priv->merge_table,
584
+ nfp_check_rhashtable_empty, NULL);
585
+ kvfree(priv->stats);
441586 kfree(priv->mask_ids.mask_id_free_list.buf);
442587 kfree(priv->mask_ids.last_used);
443588 vfree(priv->stats_ids.free_list.buf);