forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-10 9999e48639b3cecb08ffb37358bcba3b48161b29
kernel/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
....@@ -32,6 +32,7 @@
3232
3333 #include <linux/mutex.h>
3434 #include <linux/mlx5/driver.h>
35
+#include <linux/mlx5/vport.h>
3536 #include <linux/mlx5/eswitch.h>
3637
3738 #include "mlx5_core.h"
....@@ -58,7 +59,8 @@
5859 ADD_PRIO(num_prios_val, 0, num_levels_val, {},\
5960 __VA_ARGS__)\
6061
61
-#define ADD_NS(...) {.type = FS_TYPE_NAMESPACE,\
62
+#define ADD_NS(def_miss_act, ...) {.type = FS_TYPE_NAMESPACE, \
63
+ .def_miss_action = def_miss_act,\
6264 .children = (struct init_tree_node[]) {__VA_ARGS__},\
6365 .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
6466 }
....@@ -76,6 +78,23 @@
7678 FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), \
7779 FS_CAP(flow_table_properties_nic_receive.flow_table_modify))
7880
81
+#define FS_CHAINING_CAPS_EGRESS \
82
+ FS_REQUIRED_CAPS( \
83
+ FS_CAP(flow_table_properties_nic_transmit.flow_modify_en), \
84
+ FS_CAP(flow_table_properties_nic_transmit.modify_root), \
85
+ FS_CAP(flow_table_properties_nic_transmit \
86
+ .identified_miss_table_mode), \
87
+ FS_CAP(flow_table_properties_nic_transmit.flow_table_modify))
88
+
89
+#define FS_CHAINING_CAPS_RDMA_TX \
90
+ FS_REQUIRED_CAPS( \
91
+ FS_CAP(flow_table_properties_nic_transmit_rdma.flow_modify_en), \
92
+ FS_CAP(flow_table_properties_nic_transmit_rdma.modify_root), \
93
+ FS_CAP(flow_table_properties_nic_transmit_rdma \
94
+ .identified_miss_table_mode), \
95
+ FS_CAP(flow_table_properties_nic_transmit_rdma \
96
+ .flow_table_modify))
97
+
7998 #define LEFTOVERS_NUM_LEVELS 1
8099 #define LEFTOVERS_NUM_PRIOS 1
81100
....@@ -86,8 +105,8 @@
86105 #define ETHTOOL_PRIO_NUM_LEVELS 1
87106 #define ETHTOOL_NUM_PRIOS 11
88107 #define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
89
-/* Vlan, mac, ttc, inner ttc, aRFS */
90
-#define KERNEL_NIC_PRIO_NUM_LEVELS 5
108
+/* Vlan, mac, ttc, inner ttc, {aRFS/accel and esp/esp_err} */
109
+#define KERNEL_NIC_PRIO_NUM_LEVELS 6
91110 #define KERNEL_NIC_NUM_PRIOS 1
92111 /* One more level for tc */
93112 #define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
....@@ -99,13 +118,17 @@
99118 #define ANCHOR_NUM_PRIOS 1
100119 #define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1)
101120
102
-#define OFFLOADS_MAX_FT 1
103
-#define OFFLOADS_NUM_PRIOS 1
104
-#define OFFLOADS_MIN_LEVEL (ANCHOR_MIN_LEVEL + 1)
121
+#define OFFLOADS_MAX_FT 2
122
+#define OFFLOADS_NUM_PRIOS 2
123
+#define OFFLOADS_MIN_LEVEL (ANCHOR_MIN_LEVEL + OFFLOADS_NUM_PRIOS)
105124
106125 #define LAG_PRIO_NUM_LEVELS 1
107126 #define LAG_NUM_PRIOS 1
108127 #define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + 1)
128
+
129
+#define KERNEL_TX_IPSEC_NUM_PRIOS 1
130
+#define KERNEL_TX_IPSEC_NUM_LEVELS 1
131
+#define KERNEL_TX_MIN_LEVEL (KERNEL_TX_IPSEC_NUM_LEVELS)
109132
110133 struct node_caps {
111134 size_t arr_sz;
....@@ -121,33 +144,96 @@
121144 int num_leaf_prios;
122145 int prio;
123146 int num_levels;
147
+ enum mlx5_flow_table_miss_action def_miss_action;
124148 } root_fs = {
125149 .type = FS_TYPE_NAMESPACE,
126150 .ar_size = 7,
151
+ .children = (struct init_tree_node[]){
152
+ ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
153
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
154
+ ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
155
+ BY_PASS_PRIO_NUM_LEVELS))),
156
+ ADD_PRIO(0, LAG_MIN_LEVEL, 0, FS_CHAINING_CAPS,
157
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
158
+ ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS,
159
+ LAG_PRIO_NUM_LEVELS))),
160
+ ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
161
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
162
+ ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS,
163
+ OFFLOADS_MAX_FT))),
164
+ ADD_PRIO(0, ETHTOOL_MIN_LEVEL, 0, FS_CHAINING_CAPS,
165
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
166
+ ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS,
167
+ ETHTOOL_PRIO_NUM_LEVELS))),
168
+ ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {},
169
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
170
+ ADD_MULTIPLE_PRIO(KERNEL_NIC_TC_NUM_PRIOS,
171
+ KERNEL_NIC_TC_NUM_LEVELS),
172
+ ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS,
173
+ KERNEL_NIC_PRIO_NUM_LEVELS))),
174
+ ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
175
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
176
+ ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS,
177
+ LEFTOVERS_NUM_LEVELS))),
178
+ ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {},
179
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
180
+ ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS,
181
+ ANCHOR_NUM_LEVELS))),
182
+ }
183
+};
184
+
185
+static struct init_tree_node egress_root_fs = {
186
+ .type = FS_TYPE_NAMESPACE,
187
+#ifdef CONFIG_MLX5_IPSEC
188
+ .ar_size = 2,
189
+#else
190
+ .ar_size = 1,
191
+#endif
127192 .children = (struct init_tree_node[]) {
128
- ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0,
129
- FS_CHAINING_CAPS,
130
- ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
193
+ ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0,
194
+ FS_CHAINING_CAPS_EGRESS,
195
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
196
+ ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
131197 BY_PASS_PRIO_NUM_LEVELS))),
132
- ADD_PRIO(0, LAG_MIN_LEVEL, 0,
198
+#ifdef CONFIG_MLX5_IPSEC
199
+ ADD_PRIO(0, KERNEL_TX_MIN_LEVEL, 0,
200
+ FS_CHAINING_CAPS_EGRESS,
201
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
202
+ ADD_MULTIPLE_PRIO(KERNEL_TX_IPSEC_NUM_PRIOS,
203
+ KERNEL_TX_IPSEC_NUM_LEVELS))),
204
+#endif
205
+ }
206
+};
207
+
208
+#define RDMA_RX_BYPASS_PRIO 0
209
+#define RDMA_RX_KERNEL_PRIO 1
210
+static struct init_tree_node rdma_rx_root_fs = {
211
+ .type = FS_TYPE_NAMESPACE,
212
+ .ar_size = 2,
213
+ .children = (struct init_tree_node[]) {
214
+ [RDMA_RX_BYPASS_PRIO] =
215
+ ADD_PRIO(0, MLX5_BY_PASS_NUM_REGULAR_PRIOS, 0,
133216 FS_CHAINING_CAPS,
134
- ADD_NS(ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS,
135
- LAG_PRIO_NUM_LEVELS))),
136
- ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, {},
137
- ADD_NS(ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS, OFFLOADS_MAX_FT))),
138
- ADD_PRIO(0, ETHTOOL_MIN_LEVEL, 0,
217
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
218
+ ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_REGULAR_PRIOS,
219
+ BY_PASS_PRIO_NUM_LEVELS))),
220
+ [RDMA_RX_KERNEL_PRIO] =
221
+ ADD_PRIO(0, MLX5_BY_PASS_NUM_REGULAR_PRIOS + 1, 0,
139222 FS_CHAINING_CAPS,
140
- ADD_NS(ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS,
141
- ETHTOOL_PRIO_NUM_LEVELS))),
142
- ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {},
143
- ADD_NS(ADD_MULTIPLE_PRIO(KERNEL_NIC_TC_NUM_PRIOS, KERNEL_NIC_TC_NUM_LEVELS),
144
- ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS,
145
- KERNEL_NIC_PRIO_NUM_LEVELS))),
146
- ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0,
147
- FS_CHAINING_CAPS,
148
- ADD_NS(ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS, LEFTOVERS_NUM_LEVELS))),
149
- ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {},
150
- ADD_NS(ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS, ANCHOR_NUM_LEVELS))),
223
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN,
224
+ ADD_MULTIPLE_PRIO(1, 1))),
225
+ }
226
+};
227
+
228
+static struct init_tree_node rdma_tx_root_fs = {
229
+ .type = FS_TYPE_NAMESPACE,
230
+ .ar_size = 1,
231
+ .children = (struct init_tree_node[]) {
232
+ ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0,
233
+ FS_CHAINING_CAPS_RDMA_TX,
234
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
235
+ ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
236
+ BY_PASS_PRIO_NUM_LEVELS))),
151237 }
152238 };
153239
....@@ -158,7 +244,7 @@
158244 };
159245
160246 static const struct rhashtable_params rhash_fte = {
161
- .key_len = FIELD_SIZEOF(struct fs_fte, val),
247
+ .key_len = sizeof_field(struct fs_fte, val),
162248 .key_offset = offsetof(struct fs_fte, val),
163249 .head_offset = offsetof(struct fs_fte, hash),
164250 .automatic_shrinking = true,
....@@ -166,7 +252,7 @@
166252 };
167253
168254 static const struct rhashtable_params rhash_fg = {
169
- .key_len = FIELD_SIZEOF(struct mlx5_flow_group, mask),
255
+ .key_len = sizeof_field(struct mlx5_flow_group, mask),
170256 .key_offset = offsetof(struct mlx5_flow_group, mask),
171257 .head_offset = offsetof(struct mlx5_flow_group, hash),
172258 .automatic_shrinking = true,
....@@ -182,7 +268,7 @@
182268 static void del_sw_fte(struct fs_node *node);
183269 static void del_sw_prio(struct fs_node *node);
184270 static void del_sw_ns(struct fs_node *node);
185
-/* Delete rule (destination) is special case that
271
+/* Delete rule (destination) is special case that
186272 * requires to lock the FTE for all the deletion process.
187273 */
188274 static void del_sw_hw_rule(struct fs_node *node);
....@@ -242,10 +328,11 @@
242328 }
243329 }
244330
245
-static void down_write_ref_node(struct fs_node *node)
331
+static void down_write_ref_node(struct fs_node *node, bool locked)
246332 {
247333 if (node) {
248
- down_write(&node->lock);
334
+ if (!locked)
335
+ down_write(&node->lock);
249336 refcount_inc(&node->refcount);
250337 }
251338 }
....@@ -256,13 +343,14 @@
256343 up_read(&node->lock);
257344 }
258345
259
-static void up_write_ref_node(struct fs_node *node)
346
+static void up_write_ref_node(struct fs_node *node, bool locked)
260347 {
261348 refcount_dec(&node->refcount);
262
- up_write(&node->lock);
349
+ if (!locked)
350
+ up_write(&node->lock);
263351 }
264352
265
-static void tree_put_node(struct fs_node *node)
353
+static void tree_put_node(struct fs_node *node, bool locked)
266354 {
267355 struct fs_node *parent_node = node->parent;
268356
....@@ -270,30 +358,25 @@
270358 if (node->del_hw_func)
271359 node->del_hw_func(node);
272360 if (parent_node) {
273
- /* Only root namespace doesn't have parent and we just
274
- * need to free its node.
275
- */
276
- down_write_ref_node(parent_node);
361
+ down_write_ref_node(parent_node, locked);
277362 list_del_init(&node->list);
278
- if (node->del_sw_func)
279
- node->del_sw_func(node);
280
- up_write_ref_node(parent_node);
281
- } else {
282
- kfree(node);
283363 }
364
+ node->del_sw_func(node);
365
+ if (parent_node)
366
+ up_write_ref_node(parent_node, locked);
284367 node = NULL;
285368 }
286369 if (!node && parent_node)
287
- tree_put_node(parent_node);
370
+ tree_put_node(parent_node, locked);
288371 }
289372
290
-static int tree_remove_node(struct fs_node *node)
373
+static int tree_remove_node(struct fs_node *node, bool locked)
291374 {
292375 if (refcount_read(&node->refcount) > 1) {
293376 refcount_dec(&node->refcount);
294377 return -EEXIST;
295378 }
296
- tree_put_node(node);
379
+ tree_put_node(node, locked);
297380 return 0;
298381 }
299382
....@@ -308,6 +391,12 @@
308391 }
309392
310393 return NULL;
394
+}
395
+
396
+static bool is_fwd_next_action(u32 action)
397
+{
398
+ return action & (MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO |
399
+ MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS);
311400 }
312401
313402 static bool check_valid_spec(const struct mlx5_flow_spec *spec)
....@@ -377,9 +466,10 @@
377466 fs_get_obj(ft, node);
378467 dev = get_dev(&ft->node);
379468 root = find_root(&ft->node);
469
+ trace_mlx5_fs_del_ft(ft);
380470
381471 if (node->active) {
382
- err = root->cmds->destroy_flow_table(dev, ft);
472
+ err = root->cmds->destroy_flow_table(root, ft);
383473 if (err)
384474 mlx5_core_warn(dev, "flow steering can't destroy ft\n");
385475 }
....@@ -393,29 +483,43 @@
393483 fs_get_obj(ft, node);
394484
395485 rhltable_destroy(&ft->fgs_hash);
396
- fs_get_obj(prio, ft->node.parent);
397
- prio->num_ft--;
486
+ if (ft->node.parent) {
487
+ fs_get_obj(prio, ft->node.parent);
488
+ prio->num_ft--;
489
+ }
398490 kfree(ft);
491
+}
492
+
493
+static void modify_fte(struct fs_fte *fte)
494
+{
495
+ struct mlx5_flow_root_namespace *root;
496
+ struct mlx5_flow_table *ft;
497
+ struct mlx5_flow_group *fg;
498
+ struct mlx5_core_dev *dev;
499
+ int err;
500
+
501
+ fs_get_obj(fg, fte->node.parent);
502
+ fs_get_obj(ft, fg->node.parent);
503
+ dev = get_dev(&fte->node);
504
+
505
+ root = find_root(&ft->node);
506
+ err = root->cmds->update_fte(root, ft, fg, fte->modify_mask, fte);
507
+ if (err)
508
+ mlx5_core_warn(dev,
509
+ "%s can't del rule fg id=%d fte_index=%d\n",
510
+ __func__, fg->id, fte->index);
511
+ fte->modify_mask = 0;
399512 }
400513
401514 static void del_sw_hw_rule(struct fs_node *node)
402515 {
403
- struct mlx5_flow_root_namespace *root;
404516 struct mlx5_flow_rule *rule;
405
- struct mlx5_flow_table *ft;
406
- struct mlx5_flow_group *fg;
407517 struct fs_fte *fte;
408
- int modify_mask;
409
- struct mlx5_core_dev *dev = get_dev(node);
410
- int err;
411
- bool update_fte = false;
412518
413519 fs_get_obj(rule, node);
414520 fs_get_obj(fte, rule->node.parent);
415
- fs_get_obj(fg, fte->node.parent);
416
- fs_get_obj(ft, fg->node.parent);
417521 trace_mlx5_fs_del_rule(rule);
418
- if (rule->sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
522
+ if (is_fwd_next_action(rule->sw_action)) {
419523 mutex_lock(&rule->dest_attr.ft->lock);
420524 list_del(&rule->next_ft);
421525 mutex_unlock(&rule->dest_attr.ft->lock);
....@@ -423,27 +527,26 @@
423527
424528 if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER &&
425529 --fte->dests_size) {
426
- modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
427
- BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
530
+ fte->modify_mask |=
531
+ BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
532
+ BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
428533 fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
429
- update_fte = true;
534
+ goto out;
535
+ }
536
+
537
+ if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_PORT &&
538
+ --fte->dests_size) {
539
+ fte->modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
540
+ fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_ALLOW;
430541 goto out;
431542 }
432543
433544 if ((fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
434545 --fte->dests_size) {
435
- modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
436
- update_fte = true;
546
+ fte->modify_mask |=
547
+ BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
437548 }
438549 out:
439
- root = find_root(&ft->node);
440
- if (update_fte && fte->dests_size) {
441
- err = root->cmds->update_fte(dev, ft, fg->id, modify_mask, fte);
442
- if (err)
443
- mlx5_core_warn(dev,
444
- "%s can't del rule fg id=%d fte_index=%d\n",
445
- __func__, fg->id, fte->index);
446
- }
447550 kfree(rule);
448551 }
449552
....@@ -464,7 +567,7 @@
464567 dev = get_dev(&ft->node);
465568 root = find_root(&ft->node);
466569 if (node->active) {
467
- err = root->cmds->delete_fte(dev, ft, fte);
570
+ err = root->cmds->delete_fte(root, ft, fte);
468571 if (err)
469572 mlx5_core_warn(dev,
470573 "flow steering can't delete fte in index %d of flow group id %d\n",
....@@ -504,7 +607,7 @@
504607 trace_mlx5_fs_del_fg(fg);
505608
506609 root = find_root(&ft->node);
507
- if (fg->node.active && root->cmds->destroy_flow_group(dev, ft, fg->id))
610
+ if (fg->node.active && root->cmds->destroy_flow_group(root, ft, fg))
508611 mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n",
509612 fg->id, ft->id);
510613 }
....@@ -521,7 +624,9 @@
521624
522625 rhashtable_destroy(&fg->ftes_hash);
523626 ida_destroy(&fg->fte_allocator);
524
- if (ft->autogroup.active && fg->max_ftes == ft->autogroup.group_size)
627
+ if (ft->autogroup.active &&
628
+ fg->max_ftes == ft->autogroup.group_size &&
629
+ fg->start_index < ft->autogroup.max_fte)
525630 ft->autogroup.num_groups--;
526631 err = rhltable_remove(&ft->fgs_hash,
527632 &fg->hash,
....@@ -556,7 +661,7 @@
556661 }
557662
558663 static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft,
559
- u32 *match_value,
664
+ const struct mlx5_flow_spec *spec,
560665 struct mlx5_flow_act *flow_act)
561666 {
562667 struct mlx5_flow_steering *steering = get_steering(&ft->node);
....@@ -566,9 +671,10 @@
566671 if (!fte)
567672 return ERR_PTR(-ENOMEM);
568673
569
- memcpy(fte->val, match_value, sizeof(fte->val));
674
+ memcpy(fte->val, &spec->match_value, sizeof(fte->val));
570675 fte->node.type = FS_TYPE_FLOW_ENTRY;
571676 fte->action = *flow_act;
677
+ fte->flow_context = spec->flow_context;
572678
573679 tree_init_node(&fte->node, del_hw_fte, del_sw_fte);
574680
....@@ -584,7 +690,7 @@
584690
585691 static struct mlx5_flow_group *alloc_flow_group(struct mlx5_flow_steering *steering,
586692 u8 match_criteria_enable,
587
- void *match_criteria,
693
+ const void *match_criteria,
588694 int start_index,
589695 int end_index)
590696 {
....@@ -599,7 +705,8 @@
599705 if (ret) {
600706 kmem_cache_free(steering->fgs_cache, fg);
601707 return ERR_PTR(ret);
602
-}
708
+ }
709
+
603710 ida_init(&fg->fte_allocator);
604711 fg->mask.match_criteria_enable = match_criteria_enable;
605712 memcpy(&fg->mask.match_criteria, match_criteria,
....@@ -613,7 +720,7 @@
613720
614721 static struct mlx5_flow_group *alloc_insert_flow_group(struct mlx5_flow_table *ft,
615722 u8 match_criteria_enable,
616
- void *match_criteria,
723
+ const void *match_criteria,
617724 int start_index,
618725 int end_index,
619726 struct list_head *prev)
....@@ -711,20 +818,42 @@
711818 return ft;
712819 }
713820
714
-/* If reverse if false then return the first flow table in next priority of
715
- * prio in the tree, else return the last flow table in the previous priority
716
- * of prio in the tree.
717
- */
718
-static struct mlx5_flow_table *find_closest_ft(struct fs_prio *prio, bool reverse)
821
+static struct fs_node *find_prio_chains_parent(struct fs_node *parent,
822
+ struct fs_node **child)
719823 {
824
+ struct fs_node *node = NULL;
825
+
826
+ while (parent && parent->type != FS_TYPE_PRIO_CHAINS) {
827
+ node = parent;
828
+ parent = parent->parent;
829
+ }
830
+
831
+ if (child)
832
+ *child = node;
833
+
834
+ return parent;
835
+}
836
+
837
+/* If reverse is false then return the first flow table next to the passed node
838
+ * in the tree, else return the last flow table before the node in the tree.
839
+ * If skip is true, skip the flow tables in the same prio_chains prio.
840
+ */
841
+static struct mlx5_flow_table *find_closest_ft(struct fs_node *node, bool reverse,
842
+ bool skip)
843
+{
844
+ struct fs_node *prio_chains_parent = NULL;
720845 struct mlx5_flow_table *ft = NULL;
721846 struct fs_node *curr_node;
722847 struct fs_node *parent;
723848
724
- parent = prio->node.parent;
725
- curr_node = &prio->node;
849
+ if (skip)
850
+ prio_chains_parent = find_prio_chains_parent(node, NULL);
851
+ parent = node->parent;
852
+ curr_node = node;
726853 while (!ft && parent) {
727
- ft = find_closest_ft_recursive(parent, &curr_node->list, reverse);
854
+ if (parent != prio_chains_parent)
855
+ ft = find_closest_ft_recursive(parent, &curr_node->list,
856
+ reverse);
728857 curr_node = parent;
729858 parent = curr_node->parent;
730859 }
....@@ -732,15 +861,27 @@
732861 }
733862
734863 /* Assuming all the tree is locked by mutex chain lock */
735
-static struct mlx5_flow_table *find_next_chained_ft(struct fs_prio *prio)
864
+static struct mlx5_flow_table *find_next_chained_ft(struct fs_node *node)
736865 {
737
- return find_closest_ft(prio, false);
866
+ return find_closest_ft(node, false, true);
738867 }
739868
740869 /* Assuming all the tree is locked by mutex chain lock */
741
-static struct mlx5_flow_table *find_prev_chained_ft(struct fs_prio *prio)
870
+static struct mlx5_flow_table *find_prev_chained_ft(struct fs_node *node)
742871 {
743
- return find_closest_ft(prio, true);
872
+ return find_closest_ft(node, true, true);
873
+}
874
+
875
+static struct mlx5_flow_table *find_next_fwd_ft(struct mlx5_flow_table *ft,
876
+ struct mlx5_flow_act *flow_act)
877
+{
878
+ struct fs_prio *prio;
879
+ bool next_ns;
880
+
881
+ next_ns = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS;
882
+ fs_get_obj(prio, next_ns ? ft->ns->node.parent : ft->node.parent);
883
+
884
+ return find_next_chained_ft(&prio->node);
744885 }
745886
746887 static int connect_fts_in_prio(struct mlx5_core_dev *dev,
....@@ -749,22 +890,34 @@
749890 {
750891 struct mlx5_flow_root_namespace *root = find_root(&prio->node);
751892 struct mlx5_flow_table *iter;
752
- int i = 0;
753893 int err;
754894
755895 fs_for_each_ft(iter, prio) {
756
- i++;
757
- err = root->cmds->modify_flow_table(dev, iter, ft);
896
+ err = root->cmds->modify_flow_table(root, iter, ft);
758897 if (err) {
759
- mlx5_core_warn(dev, "Failed to modify flow table %d\n",
760
- iter->id);
898
+ mlx5_core_err(dev,
899
+ "Failed to modify flow table id %d, type %d, err %d\n",
900
+ iter->id, iter->type, err);
761901 /* The driver is out of sync with the FW */
762
- if (i > 1)
763
- WARN_ON(true);
764902 return err;
765903 }
766904 }
767905 return 0;
906
+}
907
+
908
+static struct mlx5_flow_table *find_closet_ft_prio_chains(struct fs_node *node,
909
+ struct fs_node *parent,
910
+ struct fs_node **child,
911
+ bool reverse)
912
+{
913
+ struct mlx5_flow_table *ft;
914
+
915
+ ft = find_closest_ft(node, reverse, false);
916
+
917
+ if (ft && parent == find_prio_chains_parent(&ft->node, child))
918
+ return ft;
919
+
920
+ return NULL;
768921 }
769922
770923 /* Connect flow tables from previous priority of prio to ft */
....@@ -772,16 +925,35 @@
772925 struct mlx5_flow_table *ft,
773926 struct fs_prio *prio)
774927 {
928
+ struct fs_node *prio_parent, *parent = NULL, *child, *node;
775929 struct mlx5_flow_table *prev_ft;
930
+ int err = 0;
776931
777
- prev_ft = find_prev_chained_ft(prio);
778
- if (prev_ft) {
932
+ prio_parent = find_prio_chains_parent(&prio->node, &child);
933
+
934
+ /* return directly if not under the first sub ns of prio_chains prio */
935
+ if (prio_parent && !list_is_first(&child->list, &prio_parent->children))
936
+ return 0;
937
+
938
+ prev_ft = find_prev_chained_ft(&prio->node);
939
+ while (prev_ft) {
779940 struct fs_prio *prev_prio;
780941
781942 fs_get_obj(prev_prio, prev_ft->node.parent);
782
- return connect_fts_in_prio(dev, prev_prio, ft);
943
+ err = connect_fts_in_prio(dev, prev_prio, ft);
944
+ if (err)
945
+ break;
946
+
947
+ if (!parent) {
948
+ parent = find_prio_chains_parent(&prev_prio->node, &child);
949
+ if (!parent)
950
+ break;
951
+ }
952
+
953
+ node = child;
954
+ prev_ft = find_closet_ft_prio_chains(node, parent, &child, true);
783955 }
784
- return 0;
956
+ return err;
785957 }
786958
787959 static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
....@@ -790,7 +962,7 @@
790962 struct mlx5_flow_root_namespace *root = find_root(&prio->node);
791963 struct mlx5_ft_underlay_qp *uqp;
792964 int min_level = INT_MAX;
793
- int err;
965
+ int err = 0;
794966 u32 qpn;
795967
796968 if (root->root_ft)
....@@ -802,11 +974,11 @@
802974 if (list_empty(&root->underlay_qpns)) {
803975 /* Don't set any QPN (zero) in case QPN list is empty */
804976 qpn = 0;
805
- err = root->cmds->update_root_ft(root->dev, ft, qpn, false);
977
+ err = root->cmds->update_root_ft(root, ft, qpn, false);
806978 } else {
807979 list_for_each_entry(uqp, &root->underlay_qpns, list) {
808980 qpn = uqp->qpn;
809
- err = root->cmds->update_root_ft(root->dev, ft,
981
+ err = root->cmds->update_root_ft(root, ft,
810982 qpn, false);
811983 if (err)
812984 break;
....@@ -836,15 +1008,15 @@
8361008 fs_get_obj(fte, rule->node.parent);
8371009 if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
8381010 return -EINVAL;
839
- down_write_ref_node(&fte->node);
1011
+ down_write_ref_node(&fte->node, false);
8401012 fs_get_obj(fg, fte->node.parent);
8411013 fs_get_obj(ft, fg->node.parent);
8421014
8431015 memcpy(&rule->dest_attr, dest, sizeof(*dest));
8441016 root = find_root(&ft->node);
845
- err = root->cmds->update_fte(get_dev(&ft->node), ft, fg->id,
1017
+ err = root->cmds->update_fte(root, ft, fg,
8461018 modify_mask, fte);
847
- up_write_ref_node(&fte->node);
1019
+ up_write_ref_node(&fte->node, false);
8481020
8491021 return err;
8501022 }
....@@ -893,6 +1065,10 @@
8931065 list_splice_init(&old_next_ft->fwd_rules, &new_next_ft->fwd_rules);
8941066 mutex_unlock(&old_next_ft->lock);
8951067 list_for_each_entry(iter, &new_next_ft->fwd_rules, next_ft) {
1068
+ if ((iter->sw_action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS) &&
1069
+ iter->ft->ns == new_next_ft->ns)
1070
+ continue;
1071
+
8961072 err = _mlx5_modify_rule_destination(iter, &dest);
8971073 if (err)
8981074 pr_err("mlx5_core: failed to modify rule to point on flow table %d\n",
....@@ -916,7 +1092,7 @@
9161092 if (err)
9171093 return err;
9181094
919
- next_ft = first_ft ? first_ft : find_next_chained_ft(prio);
1095
+ next_ft = first_ft ? first_ft : find_next_chained_ft(&prio->node);
9201096 err = connect_fwd_rules(dev, ft, next_ft);
9211097 if (err)
9221098 return err;
....@@ -948,7 +1124,8 @@
9481124 u16 vport)
9491125 {
9501126 struct mlx5_flow_root_namespace *root = find_root(&ns->node);
951
- struct mlx5_flow_table *next_ft = NULL;
1127
+ bool unmanaged = ft_attr->flags & MLX5_FLOW_TABLE_UNMANAGED;
1128
+ struct mlx5_flow_table *next_ft;
9521129 struct fs_prio *fs_prio = NULL;
9531130 struct mlx5_flow_table *ft;
9541131 int log_table_sz;
....@@ -965,14 +1142,21 @@
9651142 err = -EINVAL;
9661143 goto unlock_root;
9671144 }
968
- if (ft_attr->level >= fs_prio->num_levels) {
969
- err = -ENOSPC;
970
- goto unlock_root;
1145
+ if (!unmanaged) {
1146
+ /* The level is related to the
1147
+ * priority level range.
1148
+ */
1149
+ if (ft_attr->level >= fs_prio->num_levels) {
1150
+ err = -ENOSPC;
1151
+ goto unlock_root;
1152
+ }
1153
+
1154
+ ft_attr->level += fs_prio->start_level;
9711155 }
1156
+
9721157 /* The level is related to the
9731158 * priority level range.
9741159 */
975
- ft_attr->level += fs_prio->start_level;
9761160 ft = alloc_flow_table(ft_attr->level,
9771161 vport,
9781162 ft_attr->max_fte ? roundup_pow_of_two(ft_attr->max_fte) : 0,
....@@ -985,26 +1169,35 @@
9851169
9861170 tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table);
9871171 log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0;
988
- next_ft = find_next_chained_ft(fs_prio);
989
- err = root->cmds->create_flow_table(root->dev, ft->vport, ft->op_mod,
990
- ft->type, ft->level, log_table_sz,
991
- next_ft, &ft->id, ft->flags);
1172
+ next_ft = unmanaged ? ft_attr->next_ft :
1173
+ find_next_chained_ft(&fs_prio->node);
1174
+ ft->def_miss_action = ns->def_miss_action;
1175
+ ft->ns = ns;
1176
+ err = root->cmds->create_flow_table(root, ft, log_table_sz, next_ft);
9921177 if (err)
9931178 goto free_ft;
9941179
995
- err = connect_flow_table(root->dev, ft, fs_prio);
996
- if (err)
997
- goto destroy_ft;
1180
+ if (!unmanaged) {
1181
+ err = connect_flow_table(root->dev, ft, fs_prio);
1182
+ if (err)
1183
+ goto destroy_ft;
1184
+ }
1185
+
9981186 ft->node.active = true;
999
- down_write_ref_node(&fs_prio->node);
1000
- tree_add_node(&ft->node, &fs_prio->node);
1001
- list_add_flow_table(ft, fs_prio);
1187
+ down_write_ref_node(&fs_prio->node, false);
1188
+ if (!unmanaged) {
1189
+ tree_add_node(&ft->node, &fs_prio->node);
1190
+ list_add_flow_table(ft, fs_prio);
1191
+ } else {
1192
+ ft->node.root = fs_prio->node.root;
1193
+ }
10021194 fs_prio->num_ft++;
1003
- up_write_ref_node(&fs_prio->node);
1195
+ up_write_ref_node(&fs_prio->node, false);
10041196 mutex_unlock(&root->chain_lock);
1197
+ trace_mlx5_fs_add_ft(ft);
10051198 return ft;
10061199 destroy_ft:
1007
- root->cmds->destroy_flow_table(root->dev, ft);
1200
+ root->cmds->destroy_flow_table(root, ft);
10081201 free_ft:
10091202 rhltable_destroy(&ft->fgs_hash);
10101203 kfree(ft);
....@@ -1046,31 +1239,27 @@
10461239
10471240 struct mlx5_flow_table*
10481241 mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
1049
- int prio,
1050
- int num_flow_table_entries,
1051
- int max_num_groups,
1052
- u32 level,
1053
- u32 flags)
1242
+ struct mlx5_flow_table_attr *ft_attr)
10541243 {
1055
- struct mlx5_flow_table_attr ft_attr = {};
1244
+ int num_reserved_entries = ft_attr->autogroup.num_reserved_entries;
1245
+ int autogroups_max_fte = ft_attr->max_fte - num_reserved_entries;
1246
+ int max_num_groups = ft_attr->autogroup.max_num_groups;
10561247 struct mlx5_flow_table *ft;
10571248
1058
- if (max_num_groups > num_flow_table_entries)
1249
+ if (max_num_groups > autogroups_max_fte)
1250
+ return ERR_PTR(-EINVAL);
1251
+ if (num_reserved_entries > ft_attr->max_fte)
10591252 return ERR_PTR(-EINVAL);
10601253
1061
- ft_attr.max_fte = num_flow_table_entries;
1062
- ft_attr.prio = prio;
1063
- ft_attr.level = level;
1064
- ft_attr.flags = flags;
1065
-
1066
- ft = mlx5_create_flow_table(ns, &ft_attr);
1254
+ ft = mlx5_create_flow_table(ns, ft_attr);
10671255 if (IS_ERR(ft))
10681256 return ft;
10691257
10701258 ft->autogroup.active = true;
10711259 ft->autogroup.required_groups = max_num_groups;
1260
+ ft->autogroup.max_fte = autogroups_max_fte;
10721261 /* We save place for flow groups in addition to max types */
1073
- ft->autogroup.group_size = ft->max_fte / (max_num_groups + 1);
1262
+ ft->autogroup.group_size = autogroups_max_fte / (max_num_groups + 1);
10741263
10751264 return ft;
10761265 }
....@@ -1089,24 +1278,23 @@
10891278 start_flow_index);
10901279 int end_index = MLX5_GET(create_flow_group_in, fg_in,
10911280 end_flow_index);
1092
- struct mlx5_core_dev *dev = get_dev(&ft->node);
10931281 struct mlx5_flow_group *fg;
10941282 int err;
10951283
1096
- if (ft->autogroup.active)
1284
+ if (ft->autogroup.active && start_index < ft->autogroup.max_fte)
10971285 return ERR_PTR(-EPERM);
10981286
1099
- down_write_ref_node(&ft->node);
1287
+ down_write_ref_node(&ft->node, false);
11001288 fg = alloc_insert_flow_group(ft, match_criteria_enable, match_criteria,
11011289 start_index, end_index,
11021290 ft->node.children.prev);
1103
- up_write_ref_node(&ft->node);
1291
+ up_write_ref_node(&ft->node, false);
11041292 if (IS_ERR(fg))
11051293 return fg;
11061294
1107
- err = root->cmds->create_flow_group(dev, ft, fg_in, &fg->id);
1295
+ err = root->cmds->create_flow_group(root, ft, fg_in, fg);
11081296 if (err) {
1109
- tree_put_node(&fg->node);
1297
+ tree_put_node(&fg->node, false);
11101298 return ERR_PTR(err);
11111299 }
11121300 trace_mlx5_fs_add_fg(fg);
....@@ -1244,17 +1432,15 @@
12441432 fs_get_obj(ft, fg->node.parent);
12451433 root = find_root(&fg->node);
12461434 if (!(fte->status & FS_FTE_STATUS_EXISTING))
1247
- err = root->cmds->create_fte(get_dev(&ft->node),
1248
- ft, fg, fte);
1435
+ err = root->cmds->create_fte(root, ft, fg, fte);
12491436 else
1250
- err = root->cmds->update_fte(get_dev(&ft->node), ft, fg->id,
1251
- modify_mask, fte);
1437
+ err = root->cmds->update_fte(root, ft, fg, modify_mask, fte);
12521438 if (err)
12531439 goto free_handle;
12541440
12551441 fte->node.active = true;
12561442 fte->status |= FS_FTE_STATUS_EXISTING;
1257
- atomic_inc(&fte->node.version);
1443
+ atomic_inc(&fg->node.version);
12581444
12591445 out:
12601446 return handle;
....@@ -1265,12 +1451,13 @@
12651451 }
12661452
12671453 static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft,
1268
- struct mlx5_flow_spec *spec)
1454
+ const struct mlx5_flow_spec *spec)
12691455 {
12701456 struct list_head *prev = &ft->node.children;
1271
- struct mlx5_flow_group *fg;
1457
+ u32 max_fte = ft->autogroup.max_fte;
12721458 unsigned int candidate_index = 0;
12731459 unsigned int group_size = 0;
1460
+ struct mlx5_flow_group *fg;
12741461
12751462 if (!ft->autogroup.active)
12761463 return ERR_PTR(-ENOENT);
....@@ -1278,7 +1465,7 @@
12781465 if (ft->autogroup.num_groups < ft->autogroup.required_groups)
12791466 group_size = ft->autogroup.group_size;
12801467
1281
- /* ft->max_fte == ft->autogroup.max_types */
1468
+ /* max_fte == ft->autogroup.max_types */
12821469 if (group_size == 0)
12831470 group_size = 1;
12841471
....@@ -1291,7 +1478,7 @@
12911478 prev = &fg->node.list;
12921479 }
12931480
1294
- if (candidate_index + group_size > ft->max_fte)
1481
+ if (candidate_index + group_size > max_fte)
12951482 return ERR_PTR(-ENOSPC);
12961483
12971484 fg = alloc_insert_flow_group(ft,
....@@ -1314,7 +1501,6 @@
13141501 struct mlx5_flow_group *fg)
13151502 {
13161503 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1317
- struct mlx5_core_dev *dev = get_dev(&ft->node);
13181504 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
13191505 void *match_criteria_addr;
13201506 u8 src_esw_owner_mask_on;
....@@ -1344,7 +1530,7 @@
13441530 memcpy(match_criteria_addr, fg->mask.match_criteria,
13451531 sizeof(fg->mask.match_criteria));
13461532
1347
- err = root->cmds->create_flow_group(dev, ft, in, &fg->id);
1533
+ err = root->cmds->create_flow_group(root, ft, in, fg);
13481534 if (!err) {
13491535 fg->node.active = true;
13501536 trace_mlx5_fs_add_fg(fg);
....@@ -1359,7 +1545,13 @@
13591545 {
13601546 if (d1->type == d2->type) {
13611547 if ((d1->type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
1362
- d1->vport.num == d2->vport.num) ||
1548
+ d1->vport.num == d2->vport.num &&
1549
+ d1->vport.flags == d2->vport.flags &&
1550
+ ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_VHCA_ID) ?
1551
+ (d1->vport.vhca_id == d2->vport.vhca_id) : true) &&
1552
+ ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID) ?
1553
+ (d1->vport.pkt_reformat->id ==
1554
+ d2->vport.pkt_reformat->id) : true)) ||
13631555 (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
13641556 d1->ft == d2->ft) ||
13651557 (d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
....@@ -1384,9 +1576,22 @@
13841576 return NULL;
13851577 }
13861578
1387
-static bool check_conflicting_actions(u32 action1, u32 action2)
1579
+static bool check_conflicting_actions_vlan(const struct mlx5_fs_vlan *vlan0,
1580
+ const struct mlx5_fs_vlan *vlan1)
13881581 {
1389
- u32 xored_actions = action1 ^ action2;
1582
+ return vlan0->ethtype != vlan1->ethtype ||
1583
+ vlan0->vid != vlan1->vid ||
1584
+ vlan0->prio != vlan1->prio;
1585
+}
1586
+
1587
+static bool check_conflicting_actions(const struct mlx5_flow_act *act1,
1588
+ const struct mlx5_flow_act *act2)
1589
+{
1590
+ u32 action1 = act1->action;
1591
+ u32 action2 = act2->action;
1592
+ u32 xored_actions;
1593
+
1594
+ xored_actions = action1 ^ action2;
13901595
13911596 /* if one rule only wants to count, it's ok */
13921597 if (action1 == MLX5_FLOW_CONTEXT_ACTION_COUNT ||
....@@ -1394,7 +1599,7 @@
13941599 return false;
13951600
13961601 if (xored_actions & (MLX5_FLOW_CONTEXT_ACTION_DROP |
1397
- MLX5_FLOW_CONTEXT_ACTION_ENCAP |
1602
+ MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
13981603 MLX5_FLOW_CONTEXT_ACTION_DECAP |
13991604 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
14001605 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
....@@ -1403,23 +1608,41 @@
14031608 MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2))
14041609 return true;
14051610
1611
+ if (action1 & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT &&
1612
+ act1->pkt_reformat != act2->pkt_reformat)
1613
+ return true;
1614
+
1615
+ if (action1 & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
1616
+ act1->modify_hdr != act2->modify_hdr)
1617
+ return true;
1618
+
1619
+ if (action1 & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH &&
1620
+ check_conflicting_actions_vlan(&act1->vlan[0], &act2->vlan[0]))
1621
+ return true;
1622
+
1623
+ if (action1 & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2 &&
1624
+ check_conflicting_actions_vlan(&act1->vlan[1], &act2->vlan[1]))
1625
+ return true;
1626
+
14061627 return false;
14071628 }
14081629
1409
-static int check_conflicting_ftes(struct fs_fte *fte, const struct mlx5_flow_act *flow_act)
1630
+static int check_conflicting_ftes(struct fs_fte *fte,
1631
+ const struct mlx5_flow_context *flow_context,
1632
+ const struct mlx5_flow_act *flow_act)
14101633 {
1411
- if (check_conflicting_actions(flow_act->action, fte->action.action)) {
1634
+ if (check_conflicting_actions(flow_act, &fte->action)) {
14121635 mlx5_core_warn(get_dev(&fte->node),
14131636 "Found two FTEs with conflicting actions\n");
14141637 return -EEXIST;
14151638 }
14161639
1417
- if (flow_act->has_flow_tag &&
1418
- fte->action.flow_tag != flow_act->flow_tag) {
1640
+ if ((flow_context->flags & FLOW_CONTEXT_HAS_TAG) &&
1641
+ fte->flow_context.flow_tag != flow_context->flow_tag) {
14191642 mlx5_core_warn(get_dev(&fte->node),
14201643 "FTE flow tag %u already exists with different flow tag %u\n",
1421
- fte->action.flow_tag,
1422
- flow_act->flow_tag);
1644
+ fte->flow_context.flow_tag,
1645
+ flow_context->flow_tag);
14231646 return -EEXIST;
14241647 }
14251648
....@@ -1427,7 +1650,7 @@
14271650 }
14281651
14291652 static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
1430
- u32 *match_value,
1653
+ const struct mlx5_flow_spec *spec,
14311654 struct mlx5_flow_act *flow_act,
14321655 struct mlx5_flow_destination *dest,
14331656 int dest_num,
....@@ -1438,7 +1661,7 @@
14381661 int i;
14391662 int ret;
14401663
1441
- ret = check_conflicting_ftes(fte, flow_act);
1664
+ ret = check_conflicting_ftes(fte, &spec->flow_context, flow_act);
14421665 if (ret)
14431666 return ERR_PTR(ret);
14441667
....@@ -1461,46 +1684,39 @@
14611684 return handle;
14621685 }
14631686
1464
-struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_handle *handle)
1687
+static bool counter_is_valid(u32 action)
14651688 {
1466
- struct mlx5_flow_rule *dst;
1467
- struct fs_fte *fte;
1468
-
1469
- fs_get_obj(fte, handle->rule[0]->node.parent);
1470
-
1471
- fs_for_each_dst(dst, fte) {
1472
- if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
1473
- return dst->dest_attr.counter;
1474
- }
1475
-
1476
- return NULL;
1477
-}
1478
-
1479
-static bool counter_is_valid(struct mlx5_fc *counter, u32 action)
1480
-{
1481
- if (!(action & MLX5_FLOW_CONTEXT_ACTION_COUNT))
1482
- return !counter;
1483
-
1484
- if (!counter)
1485
- return false;
1486
-
14871689 return (action & (MLX5_FLOW_CONTEXT_ACTION_DROP |
1690
+ MLX5_FLOW_CONTEXT_ACTION_ALLOW |
14881691 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST));
14891692 }
14901693
14911694 static bool dest_is_valid(struct mlx5_flow_destination *dest,
1492
- u32 action,
1695
+ struct mlx5_flow_act *flow_act,
14931696 struct mlx5_flow_table *ft)
14941697 {
1698
+ bool ignore_level = flow_act->flags & FLOW_ACT_IGNORE_FLOW_LEVEL;
1699
+ u32 action = flow_act->action;
1700
+
14951701 if (dest && (dest->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER))
1496
- return counter_is_valid(dest->counter, action);
1702
+ return counter_is_valid(action);
14971703
14981704 if (!(action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
14991705 return true;
15001706
1707
+ if (ignore_level) {
1708
+ if (ft->type != FS_FT_FDB &&
1709
+ ft->type != FS_FT_NIC_RX)
1710
+ return false;
1711
+
1712
+ if (dest->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
1713
+ ft->type != dest->ft->type)
1714
+ return false;
1715
+ }
1716
+
15011717 if (!dest || ((dest->type ==
15021718 MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) &&
1503
- (dest->ft->level <= ft->level)))
1719
+ (dest->ft->level <= ft->level && !ignore_level)))
15041720 return false;
15051721 return true;
15061722 }
....@@ -1510,30 +1726,22 @@
15101726 struct mlx5_flow_group *g;
15111727 };
15121728
1513
-struct match_list_head {
1514
- struct list_head list;
1515
- struct match_list first;
1516
-};
1517
-
1518
-static void free_match_list(struct match_list_head *head)
1729
+static void free_match_list(struct match_list *head, bool ft_locked)
15191730 {
1520
- if (!list_empty(&head->list)) {
1521
- struct match_list *iter, *match_tmp;
1731
+ struct match_list *iter, *match_tmp;
15221732
1523
- list_del(&head->first.list);
1524
- tree_put_node(&head->first.g->node);
1525
- list_for_each_entry_safe(iter, match_tmp, &head->list,
1526
- list) {
1527
- tree_put_node(&iter->g->node);
1528
- list_del(&iter->list);
1529
- kfree(iter);
1530
- }
1733
+ list_for_each_entry_safe(iter, match_tmp, &head->list,
1734
+ list) {
1735
+ tree_put_node(&iter->g->node, ft_locked);
1736
+ list_del(&iter->list);
1737
+ kfree(iter);
15311738 }
15321739 }
15331740
1534
-static int build_match_list(struct match_list_head *match_head,
1741
+static int build_match_list(struct match_list *match_head,
15351742 struct mlx5_flow_table *ft,
1536
- struct mlx5_flow_spec *spec)
1743
+ const struct mlx5_flow_spec *spec,
1744
+ bool ft_locked)
15371745 {
15381746 struct rhlist_head *tmp, *list;
15391747 struct mlx5_flow_group *g;
....@@ -1547,24 +1755,14 @@
15471755 rhl_for_each_entry_rcu(g, tmp, list, hash) {
15481756 struct match_list *curr_match;
15491757
1550
- if (likely(list_empty(&match_head->list))) {
1551
- if (!tree_get_node(&g->node))
1552
- continue;
1553
- match_head->first.g = g;
1554
- list_add_tail(&match_head->first.list,
1555
- &match_head->list);
1758
+ if (unlikely(!tree_get_node(&g->node)))
15561759 continue;
1557
- }
15581760
15591761 curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC);
15601762 if (!curr_match) {
15611763 rcu_read_unlock();
1562
- free_match_list(match_head);
1764
+ free_match_list(match_head, ft_locked);
15631765 return -ENOMEM;
1564
- }
1565
- if (!tree_get_node(&g->node)) {
1566
- kfree(curr_match);
1567
- continue;
15681766 }
15691767 curr_match->g = g;
15701768 list_add_tail(&curr_match->list, &match_head->list);
....@@ -1585,7 +1783,7 @@
15851783
15861784 static struct fs_fte *
15871785 lookup_fte_locked(struct mlx5_flow_group *g,
1588
- u32 *match_value,
1786
+ const u32 *match_value,
15891787 bool take_write)
15901788 {
15911789 struct fs_fte *fte_tmp;
....@@ -1601,7 +1799,7 @@
16011799 goto out;
16021800 }
16031801 if (!fte_tmp->node.active) {
1604
- tree_put_node(&fte_tmp->node);
1802
+ tree_put_node(&fte_tmp->node, false);
16051803 fte_tmp = NULL;
16061804 goto out;
16071805 }
....@@ -1609,7 +1807,7 @@
16091807 nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
16101808 out:
16111809 if (take_write)
1612
- up_write_ref_node(&g->node);
1810
+ up_write_ref_node(&g->node, false);
16131811 else
16141812 up_read_ref_node(&g->node);
16151813 return fte_tmp;
....@@ -1618,7 +1816,7 @@
16181816 static struct mlx5_flow_handle *
16191817 try_add_to_existing_fg(struct mlx5_flow_table *ft,
16201818 struct list_head *match_head,
1621
- struct mlx5_flow_spec *spec,
1819
+ const struct mlx5_flow_spec *spec,
16221820 struct mlx5_flow_act *flow_act,
16231821 struct mlx5_flow_destination *dest,
16241822 int dest_num,
....@@ -1630,16 +1828,20 @@
16301828 struct match_list *iter;
16311829 bool take_write = false;
16321830 struct fs_fte *fte;
1633
- u64 version;
1831
+ u64 version = 0;
16341832 int err;
16351833
1636
- fte = alloc_fte(ft, spec->match_value, flow_act);
1834
+ fte = alloc_fte(ft, spec, flow_act);
16371835 if (IS_ERR(fte))
16381836 return ERR_PTR(-ENOMEM);
16391837
16401838 search_again_locked:
1839
+ if (flow_act->flags & FLOW_ACT_NO_APPEND)
1840
+ goto skip_search;
16411841 version = matched_fgs_get_version(match_head);
1642
- /* Try to find a fg that already contains a matching fte */
1842
+ /* Try to find an fte with identical match value and attempt update its
1843
+ * action.
1844
+ */
16431845 list_for_each_entry(iter, match_head, list) {
16441846 struct fs_fte *fte_tmp;
16451847
....@@ -1647,13 +1849,18 @@
16471849 fte_tmp = lookup_fte_locked(g, spec->match_value, take_write);
16481850 if (!fte_tmp)
16491851 continue;
1650
- rule = add_rule_fg(g, spec->match_value,
1651
- flow_act, dest, dest_num, fte_tmp);
1652
- up_write_ref_node(&fte_tmp->node);
1653
- tree_put_node(&fte_tmp->node);
1852
+ rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte_tmp);
1853
+ /* No error check needed here, because insert_fte() is not called */
1854
+ up_write_ref_node(&fte_tmp->node, false);
1855
+ tree_put_node(&fte_tmp->node, false);
16541856 kmem_cache_free(steering->ftes_cache, fte);
16551857 return rule;
16561858 }
1859
+
1860
+skip_search:
1861
+ /* No group with matching fte found, or we skipped the search.
1862
+ * Try to add a new fte to any matching fg.
1863
+ */
16571864
16581865 /* Check the ft version, for case that new flow group
16591866 * was added while the fgs weren't locked
....@@ -1663,10 +1870,12 @@
16631870 goto out;
16641871 }
16651872
1666
- /* Check the fgs version, for case the new FTE with the
1667
- * same values was added while the fgs weren't locked
1873
+ /* Check the fgs version. If version have changed it could be that an
1874
+ * FTE with the same match value was added while the fgs weren't
1875
+ * locked.
16681876 */
1669
- if (version != matched_fgs_get_version(match_head)) {
1877
+ if (!(flow_act->flags & FLOW_ACT_NO_APPEND) &&
1878
+ version != matched_fgs_get_version(match_head)) {
16701879 take_write = true;
16711880 goto search_again_locked;
16721881 }
....@@ -1674,14 +1883,16 @@
16741883 list_for_each_entry(iter, match_head, list) {
16751884 g = iter->g;
16761885
1677
- if (!g->node.active)
1678
- continue;
1679
-
16801886 nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1887
+
1888
+ if (!g->node.active) {
1889
+ up_write_ref_node(&g->node, false);
1890
+ continue;
1891
+ }
16811892
16821893 err = insert_fte(g, fte);
16831894 if (err) {
1684
- up_write_ref_node(&g->node);
1895
+ up_write_ref_node(&g->node, false);
16851896 if (err == -ENOSPC)
16861897 continue;
16871898 kmem_cache_free(steering->ftes_cache, fte);
....@@ -1689,11 +1900,11 @@
16891900 }
16901901
16911902 nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
1692
- up_write_ref_node(&g->node);
1693
- rule = add_rule_fg(g, spec->match_value,
1694
- flow_act, dest, dest_num, fte);
1695
- up_write_ref_node(&fte->node);
1696
- tree_put_node(&fte->node);
1903
+ up_write_ref_node(&g->node, false);
1904
+ rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
1905
+ up_write_ref_node(&fte->node, false);
1906
+ if (IS_ERR(rule))
1907
+ tree_put_node(&fte->node, false);
16971908 return rule;
16981909 }
16991910 rule = ERR_PTR(-ENOENT);
....@@ -1704,16 +1915,16 @@
17041915
17051916 static struct mlx5_flow_handle *
17061917 _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
1707
- struct mlx5_flow_spec *spec,
1918
+ const struct mlx5_flow_spec *spec,
17081919 struct mlx5_flow_act *flow_act,
17091920 struct mlx5_flow_destination *dest,
17101921 int dest_num)
17111922
17121923 {
17131924 struct mlx5_flow_steering *steering = get_steering(&ft->node);
1714
- struct mlx5_flow_group *g;
17151925 struct mlx5_flow_handle *rule;
1716
- struct match_list_head match_head;
1926
+ struct match_list match_head;
1927
+ struct mlx5_flow_group *g;
17171928 bool take_write = false;
17181929 struct fs_fte *fte;
17191930 int version;
....@@ -1724,7 +1935,7 @@
17241935 return ERR_PTR(-EINVAL);
17251936
17261937 for (i = 0; i < dest_num; i++) {
1727
- if (!dest_is_valid(&dest[i], flow_act->action, ft))
1938
+ if (!dest_is_valid(&dest[i], flow_act, ft))
17281939 return ERR_PTR(-EINVAL);
17291940 }
17301941 nested_down_read_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
....@@ -1732,10 +1943,10 @@
17321943 version = atomic_read(&ft->node.version);
17331944
17341945 /* Collect all fgs which has a matching match_criteria */
1735
- err = build_match_list(&match_head, ft, spec);
1946
+ err = build_match_list(&match_head, ft, spec, take_write);
17361947 if (err) {
17371948 if (take_write)
1738
- up_write_ref_node(&ft->node);
1949
+ up_write_ref_node(&ft->node, false);
17391950 else
17401951 up_read_ref_node(&ft->node);
17411952 return ERR_PTR(err);
....@@ -1746,11 +1957,11 @@
17461957
17471958 rule = try_add_to_existing_fg(ft, &match_head.list, spec, flow_act, dest,
17481959 dest_num, version);
1749
- free_match_list(&match_head);
1960
+ free_match_list(&match_head, take_write);
17501961 if (!IS_ERR(rule) ||
17511962 (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) {
17521963 if (take_write)
1753
- up_write_ref_node(&ft->node);
1964
+ up_write_ref_node(&ft->node, false);
17541965 return rule;
17551966 }
17561967
....@@ -1766,41 +1977,42 @@
17661977 g = alloc_auto_flow_group(ft, spec);
17671978 if (IS_ERR(g)) {
17681979 rule = ERR_CAST(g);
1769
- up_write_ref_node(&ft->node);
1980
+ up_write_ref_node(&ft->node, false);
17701981 return rule;
17711982 }
17721983
1984
+ fte = alloc_fte(ft, spec, flow_act);
1985
+ if (IS_ERR(fte)) {
1986
+ up_write_ref_node(&ft->node, false);
1987
+ err = PTR_ERR(fte);
1988
+ goto err_alloc_fte;
1989
+ }
1990
+
17731991 nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1774
- up_write_ref_node(&ft->node);
1992
+ up_write_ref_node(&ft->node, false);
17751993
17761994 err = create_auto_flow_group(ft, g);
17771995 if (err)
17781996 goto err_release_fg;
17791997
1780
- fte = alloc_fte(ft, spec->match_value, flow_act);
1781
- if (IS_ERR(fte)) {
1782
- err = PTR_ERR(fte);
1783
- goto err_release_fg;
1784
- }
1785
-
17861998 err = insert_fte(g, fte);
1787
- if (err) {
1788
- kmem_cache_free(steering->ftes_cache, fte);
1999
+ if (err)
17892000 goto err_release_fg;
1790
- }
17912001
17922002 nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
1793
- up_write_ref_node(&g->node);
1794
- rule = add_rule_fg(g, spec->match_value, flow_act, dest,
1795
- dest_num, fte);
1796
- up_write_ref_node(&fte->node);
1797
- tree_put_node(&fte->node);
1798
- tree_put_node(&g->node);
2003
+ up_write_ref_node(&g->node, false);
2004
+ rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
2005
+ up_write_ref_node(&fte->node, false);
2006
+ if (IS_ERR(rule))
2007
+ tree_put_node(&fte->node, false);
2008
+ tree_put_node(&g->node, false);
17992009 return rule;
18002010
18012011 err_release_fg:
1802
- up_write_ref_node(&g->node);
1803
- tree_put_node(&g->node);
2012
+ up_write_ref_node(&g->node, false);
2013
+ kmem_cache_free(steering->ftes_cache, fte);
2014
+err_alloc_fte:
2015
+ tree_put_node(&g->node, false);
18042016 return ERR_PTR(err);
18052017 }
18062018
....@@ -1812,61 +2024,104 @@
18122024
18132025 struct mlx5_flow_handle *
18142026 mlx5_add_flow_rules(struct mlx5_flow_table *ft,
1815
- struct mlx5_flow_spec *spec,
2027
+ const struct mlx5_flow_spec *spec,
18162028 struct mlx5_flow_act *flow_act,
18172029 struct mlx5_flow_destination *dest,
18182030 int num_dest)
18192031 {
18202032 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1821
- struct mlx5_flow_destination gen_dest = {};
2033
+ static const struct mlx5_flow_spec zero_spec = {};
2034
+ struct mlx5_flow_destination *gen_dest = NULL;
18222035 struct mlx5_flow_table *next_ft = NULL;
18232036 struct mlx5_flow_handle *handle = NULL;
18242037 u32 sw_action = flow_act->action;
1825
- struct fs_prio *prio;
2038
+ int i;
18262039
1827
- fs_get_obj(prio, ft->node.parent);
1828
- if (flow_act->action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
1829
- if (!fwd_next_prio_supported(ft))
1830
- return ERR_PTR(-EOPNOTSUPP);
1831
- if (num_dest)
1832
- return ERR_PTR(-EINVAL);
1833
- mutex_lock(&root->chain_lock);
1834
- next_ft = find_next_chained_ft(prio);
1835
- if (next_ft) {
1836
- gen_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1837
- gen_dest.ft = next_ft;
1838
- dest = &gen_dest;
1839
- num_dest = 1;
1840
- flow_act->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1841
- } else {
1842
- mutex_unlock(&root->chain_lock);
1843
- return ERR_PTR(-EOPNOTSUPP);
1844
- }
2040
+ if (!spec)
2041
+ spec = &zero_spec;
2042
+
2043
+ if (!is_fwd_next_action(sw_action))
2044
+ return _mlx5_add_flow_rules(ft, spec, flow_act, dest, num_dest);
2045
+
2046
+ if (!fwd_next_prio_supported(ft))
2047
+ return ERR_PTR(-EOPNOTSUPP);
2048
+
2049
+ mutex_lock(&root->chain_lock);
2050
+ next_ft = find_next_fwd_ft(ft, flow_act);
2051
+ if (!next_ft) {
2052
+ handle = ERR_PTR(-EOPNOTSUPP);
2053
+ goto unlock;
18452054 }
18462055
2056
+ gen_dest = kcalloc(num_dest + 1, sizeof(*dest),
2057
+ GFP_KERNEL);
2058
+ if (!gen_dest) {
2059
+ handle = ERR_PTR(-ENOMEM);
2060
+ goto unlock;
2061
+ }
2062
+ for (i = 0; i < num_dest; i++)
2063
+ gen_dest[i] = dest[i];
2064
+ gen_dest[i].type =
2065
+ MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
2066
+ gen_dest[i].ft = next_ft;
2067
+ dest = gen_dest;
2068
+ num_dest++;
2069
+ flow_act->action &= ~(MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO |
2070
+ MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS);
2071
+ flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
18472072 handle = _mlx5_add_flow_rules(ft, spec, flow_act, dest, num_dest);
2073
+ if (IS_ERR(handle))
2074
+ goto unlock;
18482075
1849
- if (sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
1850
- if (!IS_ERR_OR_NULL(handle) &&
1851
- (list_empty(&handle->rule[0]->next_ft))) {
1852
- mutex_lock(&next_ft->lock);
1853
- list_add(&handle->rule[0]->next_ft,
1854
- &next_ft->fwd_rules);
1855
- mutex_unlock(&next_ft->lock);
1856
- handle->rule[0]->sw_action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
1857
- }
1858
- mutex_unlock(&root->chain_lock);
2076
+ if (list_empty(&handle->rule[num_dest - 1]->next_ft)) {
2077
+ mutex_lock(&next_ft->lock);
2078
+ list_add(&handle->rule[num_dest - 1]->next_ft,
2079
+ &next_ft->fwd_rules);
2080
+ mutex_unlock(&next_ft->lock);
2081
+ handle->rule[num_dest - 1]->sw_action = sw_action;
2082
+ handle->rule[num_dest - 1]->ft = ft;
18592083 }
2084
+unlock:
2085
+ mutex_unlock(&root->chain_lock);
2086
+ kfree(gen_dest);
18602087 return handle;
18612088 }
18622089 EXPORT_SYMBOL(mlx5_add_flow_rules);
18632090
18642091 void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
18652092 {
2093
+ struct fs_fte *fte;
18662094 int i;
18672095
2096
+ /* In order to consolidate the HW changes we lock the FTE for other
2097
+ * changes, and increase its refcount, in order not to perform the
2098
+ * "del" functions of the FTE. Will handle them here.
2099
+ * The removal of the rules is done under locked FTE.
2100
+ * After removing all the handle's rules, if there are remaining
2101
+ * rules, it means we just need to modify the FTE in FW, and
2102
+ * unlock/decrease the refcount we increased before.
2103
+ * Otherwise, it means the FTE should be deleted. First delete the
2104
+ * FTE in FW. Then, unlock the FTE, and proceed the tree_put_node of
2105
+ * the FTE, which will handle the last decrease of the refcount, as
2106
+ * well as required handling of its parent.
2107
+ */
2108
+ fs_get_obj(fte, handle->rule[0]->node.parent);
2109
+ down_write_ref_node(&fte->node, false);
18682110 for (i = handle->num_rules - 1; i >= 0; i--)
1869
- tree_remove_node(&handle->rule[i]->node);
2111
+ tree_remove_node(&handle->rule[i]->node, true);
2112
+ if (list_empty(&fte->node.children)) {
2113
+ del_hw_fte(&fte->node);
2114
+ /* Avoid double call to del_hw_fte */
2115
+ fte->node.del_hw_func = NULL;
2116
+ up_write_ref_node(&fte->node, false);
2117
+ tree_put_node(&fte->node, false);
2118
+ } else if (fte->dests_size) {
2119
+ if (fte->modify_mask)
2120
+ modify_fte(fte);
2121
+ up_write_ref_node(&fte->node, false);
2122
+ } else {
2123
+ up_write_ref_node(&fte->node, false);
2124
+ }
18702125 kfree(handle);
18712126 }
18722127 EXPORT_SYMBOL(mlx5_del_flow_rules);
....@@ -1874,13 +2129,20 @@
18742129 /* Assuming prio->node.children(flow tables) is sorted by level */
18752130 static struct mlx5_flow_table *find_next_ft(struct mlx5_flow_table *ft)
18762131 {
2132
+ struct fs_node *prio_parent, *child;
18772133 struct fs_prio *prio;
18782134
18792135 fs_get_obj(prio, ft->node.parent);
18802136
18812137 if (!list_is_last(&ft->node.list, &prio->node.children))
18822138 return list_next_entry(ft, node.list);
1883
- return find_next_chained_ft(prio);
2139
+
2140
+ prio_parent = find_prio_chains_parent(&prio->node, &child);
2141
+
2142
+ if (prio_parent && list_is_first(&child->list, &prio_parent->children))
2143
+ return find_closest_ft(&prio->node, false, false);
2144
+
2145
+ return find_next_chained_ft(&prio->node);
18842146 }
18852147
18862148 static int update_root_ft_destroy(struct mlx5_flow_table *ft)
....@@ -1903,12 +2165,12 @@
19032165 if (list_empty(&root->underlay_qpns)) {
19042166 /* Don't set any QPN (zero) in case QPN list is empty */
19052167 qpn = 0;
1906
- err = root->cmds->update_root_ft(root->dev, new_root_ft,
2168
+ err = root->cmds->update_root_ft(root, new_root_ft,
19072169 qpn, false);
19082170 } else {
19092171 list_for_each_entry(uqp, &root->underlay_qpns, list) {
19102172 qpn = uqp->qpn;
1911
- err = root->cmds->update_root_ft(root->dev,
2173
+ err = root->cmds->update_root_ft(root,
19122174 new_root_ft, qpn,
19132175 false);
19142176 if (err)
....@@ -1964,12 +2226,13 @@
19642226 int err = 0;
19652227
19662228 mutex_lock(&root->chain_lock);
1967
- err = disconnect_flow_table(ft);
2229
+ if (!(ft->flags & MLX5_FLOW_TABLE_UNMANAGED))
2230
+ err = disconnect_flow_table(ft);
19682231 if (err) {
19692232 mutex_unlock(&root->chain_lock);
19702233 return err;
19712234 }
1972
- if (tree_remove_node(&ft->node))
2235
+ if (tree_remove_node(&ft->node, false))
19732236 mlx5_core_warn(get_dev(&ft->node), "Flow table %d wasn't destroyed, refcount > 1\n",
19742237 ft->id);
19752238 mutex_unlock(&root->chain_lock);
....@@ -1980,17 +2243,29 @@
19802243
19812244 void mlx5_destroy_flow_group(struct mlx5_flow_group *fg)
19822245 {
1983
- if (tree_remove_node(&fg->node))
2246
+ if (tree_remove_node(&fg->node, false))
19842247 mlx5_core_warn(get_dev(&fg->node), "Flow group %d wasn't destroyed, refcount > 1\n",
19852248 fg->id);
19862249 }
2250
+
2251
+struct mlx5_flow_namespace *mlx5_get_fdb_sub_ns(struct mlx5_core_dev *dev,
2252
+ int n)
2253
+{
2254
+ struct mlx5_flow_steering *steering = dev->priv.steering;
2255
+
2256
+ if (!steering || !steering->fdb_sub_ns)
2257
+ return NULL;
2258
+
2259
+ return steering->fdb_sub_ns[n];
2260
+}
2261
+EXPORT_SYMBOL(mlx5_get_fdb_sub_ns);
19872262
19882263 struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
19892264 enum mlx5_flow_namespace_type type)
19902265 {
19912266 struct mlx5_flow_steering *steering = dev->priv.steering;
19922267 struct mlx5_flow_root_namespace *root_ns;
1993
- int prio;
2268
+ int prio = 0;
19942269 struct fs_prio *fs_prio;
19952270 struct mlx5_flow_namespace *ns;
19962271
....@@ -1998,40 +2273,39 @@
19982273 return NULL;
19992274
20002275 switch (type) {
2001
- case MLX5_FLOW_NAMESPACE_BYPASS:
2002
- case MLX5_FLOW_NAMESPACE_LAG:
2003
- case MLX5_FLOW_NAMESPACE_OFFLOADS:
2004
- case MLX5_FLOW_NAMESPACE_ETHTOOL:
2005
- case MLX5_FLOW_NAMESPACE_KERNEL:
2006
- case MLX5_FLOW_NAMESPACE_LEFTOVERS:
2007
- case MLX5_FLOW_NAMESPACE_ANCHOR:
2008
- prio = type;
2009
- break;
20102276 case MLX5_FLOW_NAMESPACE_FDB:
20112277 if (steering->fdb_root_ns)
20122278 return &steering->fdb_root_ns->ns;
2013
- else
2014
- return NULL;
2279
+ return NULL;
20152280 case MLX5_FLOW_NAMESPACE_SNIFFER_RX:
20162281 if (steering->sniffer_rx_root_ns)
20172282 return &steering->sniffer_rx_root_ns->ns;
2018
- else
2019
- return NULL;
2283
+ return NULL;
20202284 case MLX5_FLOW_NAMESPACE_SNIFFER_TX:
20212285 if (steering->sniffer_tx_root_ns)
20222286 return &steering->sniffer_tx_root_ns->ns;
2023
- else
2024
- return NULL;
2025
- case MLX5_FLOW_NAMESPACE_EGRESS:
2026
- if (steering->egress_root_ns)
2027
- return &steering->egress_root_ns->ns;
2028
- else
2029
- return NULL;
2030
- default:
20312287 return NULL;
2288
+ default:
2289
+ break;
20322290 }
20332291
2034
- root_ns = steering->root_ns;
2292
+ if (type == MLX5_FLOW_NAMESPACE_EGRESS ||
2293
+ type == MLX5_FLOW_NAMESPACE_EGRESS_KERNEL) {
2294
+ root_ns = steering->egress_root_ns;
2295
+ prio = type - MLX5_FLOW_NAMESPACE_EGRESS;
2296
+ } else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX) {
2297
+ root_ns = steering->rdma_rx_root_ns;
2298
+ prio = RDMA_RX_BYPASS_PRIO;
2299
+ } else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL) {
2300
+ root_ns = steering->rdma_rx_root_ns;
2301
+ prio = RDMA_RX_KERNEL_PRIO;
2302
+ } else if (type == MLX5_FLOW_NAMESPACE_RDMA_TX) {
2303
+ root_ns = steering->rdma_tx_root_ns;
2304
+ } else { /* Must be NIC RX */
2305
+ root_ns = steering->root_ns;
2306
+ prio = type;
2307
+ }
2308
+
20352309 if (!root_ns)
20362310 return NULL;
20372311
....@@ -2053,7 +2327,7 @@
20532327 {
20542328 struct mlx5_flow_steering *steering = dev->priv.steering;
20552329
2056
- if (!steering || vport >= MLX5_TOTAL_VPORTS(dev))
2330
+ if (!steering || vport >= mlx5_eswitch_get_total_vports(dev))
20572331 return NULL;
20582332
20592333 switch (type) {
....@@ -2074,8 +2348,10 @@
20742348 }
20752349 }
20762350
2077
-static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
2078
- unsigned int prio, int num_levels)
2351
+static struct fs_prio *_fs_create_prio(struct mlx5_flow_namespace *ns,
2352
+ unsigned int prio,
2353
+ int num_levels,
2354
+ enum fs_node_type type)
20792355 {
20802356 struct fs_prio *fs_prio;
20812357
....@@ -2083,7 +2359,7 @@
20832359 if (!fs_prio)
20842360 return ERR_PTR(-ENOMEM);
20852361
2086
- fs_prio->node.type = FS_TYPE_PRIO;
2362
+ fs_prio->node.type = type;
20872363 tree_init_node(&fs_prio->node, NULL, del_sw_prio);
20882364 tree_add_node(&fs_prio->node, &ns->node);
20892365 fs_prio->num_levels = num_levels;
....@@ -2091,6 +2367,19 @@
20912367 list_add_tail(&fs_prio->node.list, &ns->node.children);
20922368
20932369 return fs_prio;
2370
+}
2371
+
2372
+static struct fs_prio *fs_create_prio_chained(struct mlx5_flow_namespace *ns,
2373
+ unsigned int prio,
2374
+ int num_levels)
2375
+{
2376
+ return _fs_create_prio(ns, prio, num_levels, FS_TYPE_PRIO_CHAINS);
2377
+}
2378
+
2379
+static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
2380
+ unsigned int prio, int num_levels)
2381
+{
2382
+ return _fs_create_prio(ns, prio, num_levels, FS_TYPE_PRIO);
20942383 }
20952384
20962385 static struct mlx5_flow_namespace *fs_init_namespace(struct mlx5_flow_namespace
....@@ -2101,7 +2390,8 @@
21012390 return ns;
21022391 }
21032392
2104
-static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio)
2393
+static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio,
2394
+ int def_miss_act)
21052395 {
21062396 struct mlx5_flow_namespace *ns;
21072397
....@@ -2110,6 +2400,7 @@
21102400 return ERR_PTR(-ENOMEM);
21112401
21122402 fs_init_namespace(ns);
2403
+ ns->def_miss_action = def_miss_act;
21132404 tree_init_node(&ns->node, NULL, del_sw_ns);
21142405 tree_add_node(&ns->node, &prio->node);
21152406 list_add_tail(&ns->node.list, &prio->node.children);
....@@ -2176,7 +2467,7 @@
21762467 base = &fs_prio->node;
21772468 } else if (init_node->type == FS_TYPE_NAMESPACE) {
21782469 fs_get_obj(fs_prio, fs_parent_node);
2179
- fs_ns = fs_create_namespace(fs_prio);
2470
+ fs_ns = fs_create_namespace(fs_prio, init_node->def_miss_action);
21802471 if (IS_ERR(fs_ns))
21812472 return PTR_ERR(fs_ns);
21822473 base = &fs_ns->node;
....@@ -2217,6 +2508,17 @@
22172508 return 0;
22182509 }
22192510
2511
+static void del_sw_root_ns(struct fs_node *node)
2512
+{
2513
+ struct mlx5_flow_root_namespace *root_ns;
2514
+ struct mlx5_flow_namespace *ns;
2515
+
2516
+ fs_get_obj(ns, node);
2517
+ root_ns = container_of(ns, struct mlx5_flow_root_namespace, ns);
2518
+ mutex_destroy(&root_ns->chain_lock);
2519
+ kfree(node);
2520
+}
2521
+
22202522 static struct mlx5_flow_root_namespace
22212523 *create_root_ns(struct mlx5_flow_steering *steering,
22222524 enum fs_flow_table_type table_type)
....@@ -2225,7 +2527,7 @@
22252527 struct mlx5_flow_root_namespace *root_ns;
22262528 struct mlx5_flow_namespace *ns;
22272529
2228
- if (mlx5_accel_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE &&
2530
+ if (mlx5_fpga_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE &&
22292531 (table_type == FS_FT_NIC_RX || table_type == FS_FT_NIC_TX))
22302532 cmds = mlx5_fs_cmd_get_default_ipsec_fpga_cmds(table_type);
22312533
....@@ -2243,7 +2545,7 @@
22432545 ns = &root_ns->ns;
22442546 fs_init_namespace(ns);
22452547 mutex_init(&root_ns->chain_lock);
2246
- tree_init_node(&ns->node, NULL, NULL);
2548
+ tree_init_node(&ns->node, NULL, del_sw_root_ns);
22472549 tree_add_node(&ns->node, NULL);
22482550
22492551 return root_ns;
....@@ -2269,9 +2571,17 @@
22692571 int acc_level_ns = acc_level;
22702572
22712573 prio->start_level = acc_level;
2272
- fs_for_each_ns(ns, prio)
2574
+ fs_for_each_ns(ns, prio) {
22732575 /* This updates start_level and num_levels of ns's priority descendants */
22742576 acc_level_ns = set_prio_attrs_in_ns(ns, acc_level);
2577
+
2578
+ /* If this a prio with chains, and we can jump from one chain
2579
+ * (namepsace) to another, so we accumulate the levels
2580
+ */
2581
+ if (prio->node.type == FS_TYPE_PRIO_CHAINS)
2582
+ acc_level = acc_level_ns;
2583
+ }
2584
+
22752585 if (!prio->num_levels)
22762586 prio->num_levels = acc_level_ns - prio->start_level;
22772587 WARN_ON(prio->num_levels < acc_level_ns - prio->start_level);
....@@ -2348,8 +2658,8 @@
23482658 tree_get_node(node);
23492659 list_for_each_entry_safe(iter, temp, &node->children, list)
23502660 clean_tree(iter);
2351
- tree_put_node(node);
2352
- tree_remove_node(node);
2661
+ tree_put_node(node, false);
2662
+ tree_remove_node(node, false);
23532663 }
23542664 }
23552665
....@@ -2369,7 +2679,7 @@
23692679 if (!steering->esw_egress_root_ns)
23702680 return;
23712681
2372
- for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++)
2682
+ for (i = 0; i < mlx5_eswitch_get_total_vports(dev); i++)
23732683 cleanup_root_ns(steering->esw_egress_root_ns[i]);
23742684
23752685 kfree(steering->esw_egress_root_ns);
....@@ -2384,7 +2694,7 @@
23842694 if (!steering->esw_ingress_root_ns)
23852695 return;
23862696
2387
- for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++)
2697
+ for (i = 0; i < mlx5_eswitch_get_total_vports(dev); i++)
23882698 cleanup_root_ns(steering->esw_ingress_root_ns[i]);
23892699
23902700 kfree(steering->esw_ingress_root_ns);
....@@ -2399,8 +2709,13 @@
23992709 cleanup_egress_acls_root_ns(dev);
24002710 cleanup_ingress_acls_root_ns(dev);
24012711 cleanup_root_ns(steering->fdb_root_ns);
2712
+ steering->fdb_root_ns = NULL;
2713
+ kfree(steering->fdb_sub_ns);
2714
+ steering->fdb_sub_ns = NULL;
24022715 cleanup_root_ns(steering->sniffer_rx_root_ns);
24032716 cleanup_root_ns(steering->sniffer_tx_root_ns);
2717
+ cleanup_root_ns(steering->rdma_rx_root_ns);
2718
+ cleanup_root_ns(steering->rdma_tx_root_ns);
24042719 cleanup_root_ns(steering->egress_root_ns);
24052720 mlx5_cleanup_fc_stats(dev);
24062721 kmem_cache_destroy(steering->ftes_cache);
....@@ -2418,11 +2733,7 @@
24182733
24192734 /* Create single prio */
24202735 prio = fs_create_prio(&steering->sniffer_tx_root_ns->ns, 0, 1);
2421
- if (IS_ERR(prio)) {
2422
- cleanup_root_ns(steering->sniffer_tx_root_ns);
2423
- return PTR_ERR(prio);
2424
- }
2425
- return 0;
2736
+ return PTR_ERR_OR_ZERO(prio);
24262737 }
24272738
24282739 static int init_sniffer_rx_root_ns(struct mlx5_flow_steering *steering)
....@@ -2435,36 +2746,184 @@
24352746
24362747 /* Create single prio */
24372748 prio = fs_create_prio(&steering->sniffer_rx_root_ns->ns, 0, 1);
2438
- if (IS_ERR(prio)) {
2439
- cleanup_root_ns(steering->sniffer_rx_root_ns);
2440
- return PTR_ERR(prio);
2749
+ return PTR_ERR_OR_ZERO(prio);
2750
+}
2751
+
2752
+static int init_rdma_rx_root_ns(struct mlx5_flow_steering *steering)
2753
+{
2754
+ int err;
2755
+
2756
+ steering->rdma_rx_root_ns = create_root_ns(steering, FS_FT_RDMA_RX);
2757
+ if (!steering->rdma_rx_root_ns)
2758
+ return -ENOMEM;
2759
+
2760
+ err = init_root_tree(steering, &rdma_rx_root_fs,
2761
+ &steering->rdma_rx_root_ns->ns.node);
2762
+ if (err)
2763
+ goto out_err;
2764
+
2765
+ set_prio_attrs(steering->rdma_rx_root_ns);
2766
+
2767
+ return 0;
2768
+
2769
+out_err:
2770
+ cleanup_root_ns(steering->rdma_rx_root_ns);
2771
+ steering->rdma_rx_root_ns = NULL;
2772
+ return err;
2773
+}
2774
+
2775
+static int init_rdma_tx_root_ns(struct mlx5_flow_steering *steering)
2776
+{
2777
+ int err;
2778
+
2779
+ steering->rdma_tx_root_ns = create_root_ns(steering, FS_FT_RDMA_TX);
2780
+ if (!steering->rdma_tx_root_ns)
2781
+ return -ENOMEM;
2782
+
2783
+ err = init_root_tree(steering, &rdma_tx_root_fs,
2784
+ &steering->rdma_tx_root_ns->ns.node);
2785
+ if (err)
2786
+ goto out_err;
2787
+
2788
+ set_prio_attrs(steering->rdma_tx_root_ns);
2789
+
2790
+ return 0;
2791
+
2792
+out_err:
2793
+ cleanup_root_ns(steering->rdma_tx_root_ns);
2794
+ steering->rdma_tx_root_ns = NULL;
2795
+ return err;
2796
+}
2797
+
2798
+/* FT and tc chains are stored in the same array so we can re-use the
2799
+ * mlx5_get_fdb_sub_ns() and tc api for FT chains.
2800
+ * When creating a new ns for each chain store it in the first available slot.
2801
+ * Assume tc chains are created and stored first and only then the FT chain.
2802
+ */
2803
+static void store_fdb_sub_ns_prio_chain(struct mlx5_flow_steering *steering,
2804
+ struct mlx5_flow_namespace *ns)
2805
+{
2806
+ int chain = 0;
2807
+
2808
+ while (steering->fdb_sub_ns[chain])
2809
+ ++chain;
2810
+
2811
+ steering->fdb_sub_ns[chain] = ns;
2812
+}
2813
+
2814
+static int create_fdb_sub_ns_prio_chain(struct mlx5_flow_steering *steering,
2815
+ struct fs_prio *maj_prio)
2816
+{
2817
+ struct mlx5_flow_namespace *ns;
2818
+ struct fs_prio *min_prio;
2819
+ int prio;
2820
+
2821
+ ns = fs_create_namespace(maj_prio, MLX5_FLOW_TABLE_MISS_ACTION_DEF);
2822
+ if (IS_ERR(ns))
2823
+ return PTR_ERR(ns);
2824
+
2825
+ for (prio = 0; prio < FDB_TC_MAX_PRIO; prio++) {
2826
+ min_prio = fs_create_prio(ns, prio, FDB_TC_LEVELS_PER_PRIO);
2827
+ if (IS_ERR(min_prio))
2828
+ return PTR_ERR(min_prio);
24412829 }
2830
+
2831
+ store_fdb_sub_ns_prio_chain(steering, ns);
2832
+
2833
+ return 0;
2834
+}
2835
+
2836
+static int create_fdb_chains(struct mlx5_flow_steering *steering,
2837
+ int fs_prio,
2838
+ int chains)
2839
+{
2840
+ struct fs_prio *maj_prio;
2841
+ int levels;
2842
+ int chain;
2843
+ int err;
2844
+
2845
+ levels = FDB_TC_LEVELS_PER_PRIO * FDB_TC_MAX_PRIO * chains;
2846
+ maj_prio = fs_create_prio_chained(&steering->fdb_root_ns->ns,
2847
+ fs_prio,
2848
+ levels);
2849
+ if (IS_ERR(maj_prio))
2850
+ return PTR_ERR(maj_prio);
2851
+
2852
+ for (chain = 0; chain < chains; chain++) {
2853
+ err = create_fdb_sub_ns_prio_chain(steering, maj_prio);
2854
+ if (err)
2855
+ return err;
2856
+ }
2857
+
2858
+ return 0;
2859
+}
2860
+
2861
+static int create_fdb_fast_path(struct mlx5_flow_steering *steering)
2862
+{
2863
+ int err;
2864
+
2865
+ steering->fdb_sub_ns = kcalloc(FDB_NUM_CHAINS,
2866
+ sizeof(*steering->fdb_sub_ns),
2867
+ GFP_KERNEL);
2868
+ if (!steering->fdb_sub_ns)
2869
+ return -ENOMEM;
2870
+
2871
+ err = create_fdb_chains(steering, FDB_TC_OFFLOAD, FDB_TC_MAX_CHAIN + 1);
2872
+ if (err)
2873
+ return err;
2874
+
2875
+ err = create_fdb_chains(steering, FDB_FT_OFFLOAD, 1);
2876
+ if (err)
2877
+ return err;
2878
+
24422879 return 0;
24432880 }
24442881
24452882 static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
24462883 {
2447
- struct fs_prio *prio;
2884
+ struct fs_prio *maj_prio;
2885
+ int err;
24482886
24492887 steering->fdb_root_ns = create_root_ns(steering, FS_FT_FDB);
24502888 if (!steering->fdb_root_ns)
24512889 return -ENOMEM;
24522890
2453
- prio = fs_create_prio(&steering->fdb_root_ns->ns, 0, 2);
2454
- if (IS_ERR(prio))
2891
+ maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_BYPASS_PATH,
2892
+ 1);
2893
+ if (IS_ERR(maj_prio)) {
2894
+ err = PTR_ERR(maj_prio);
2895
+ goto out_err;
2896
+ }
2897
+ err = create_fdb_fast_path(steering);
2898
+ if (err)
24552899 goto out_err;
24562900
2457
- prio = fs_create_prio(&steering->fdb_root_ns->ns, 1, 1);
2458
- if (IS_ERR(prio))
2901
+ maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_SLOW_PATH, 1);
2902
+ if (IS_ERR(maj_prio)) {
2903
+ err = PTR_ERR(maj_prio);
24592904 goto out_err;
2905
+ }
2906
+
2907
+ /* We put this priority last, knowing that nothing will get here
2908
+ * unless explicitly forwarded to. This is possible because the
2909
+ * slow path tables have catch all rules and nothing gets passed
2910
+ * those tables.
2911
+ */
2912
+ maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_PER_VPORT, 1);
2913
+ if (IS_ERR(maj_prio)) {
2914
+ err = PTR_ERR(maj_prio);
2915
+ goto out_err;
2916
+ }
24602917
24612918 set_prio_attrs(steering->fdb_root_ns);
24622919 return 0;
24632920
24642921 out_err:
24652922 cleanup_root_ns(steering->fdb_root_ns);
2923
+ kfree(steering->fdb_sub_ns);
2924
+ steering->fdb_sub_ns = NULL;
24662925 steering->fdb_root_ns = NULL;
2467
- return PTR_ERR(prio);
2926
+ return err;
24682927 }
24692928
24702929 static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
....@@ -2496,16 +2955,18 @@
24962955 static int init_egress_acls_root_ns(struct mlx5_core_dev *dev)
24972956 {
24982957 struct mlx5_flow_steering *steering = dev->priv.steering;
2958
+ int total_vports = mlx5_eswitch_get_total_vports(dev);
24992959 int err;
25002960 int i;
25012961
2502
- steering->esw_egress_root_ns = kcalloc(MLX5_TOTAL_VPORTS(dev),
2503
- sizeof(*steering->esw_egress_root_ns),
2504
- GFP_KERNEL);
2962
+ steering->esw_egress_root_ns =
2963
+ kcalloc(total_vports,
2964
+ sizeof(*steering->esw_egress_root_ns),
2965
+ GFP_KERNEL);
25052966 if (!steering->esw_egress_root_ns)
25062967 return -ENOMEM;
25072968
2508
- for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++) {
2969
+ for (i = 0; i < total_vports; i++) {
25092970 err = init_egress_acl_root_ns(steering, i);
25102971 if (err)
25112972 goto cleanup_root_ns;
....@@ -2524,16 +2985,18 @@
25242985 static int init_ingress_acls_root_ns(struct mlx5_core_dev *dev)
25252986 {
25262987 struct mlx5_flow_steering *steering = dev->priv.steering;
2988
+ int total_vports = mlx5_eswitch_get_total_vports(dev);
25272989 int err;
25282990 int i;
25292991
2530
- steering->esw_ingress_root_ns = kcalloc(MLX5_TOTAL_VPORTS(dev),
2531
- sizeof(*steering->esw_ingress_root_ns),
2532
- GFP_KERNEL);
2992
+ steering->esw_ingress_root_ns =
2993
+ kcalloc(total_vports,
2994
+ sizeof(*steering->esw_ingress_root_ns),
2995
+ GFP_KERNEL);
25332996 if (!steering->esw_ingress_root_ns)
25342997 return -ENOMEM;
25352998
2536
- for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++) {
2999
+ for (i = 0; i < total_vports; i++) {
25373000 err = init_ingress_acl_root_ns(steering, i);
25383001 if (err)
25393002 goto cleanup_root_ns;
....@@ -2551,16 +3014,23 @@
25513014
25523015 static int init_egress_root_ns(struct mlx5_flow_steering *steering)
25533016 {
2554
- struct fs_prio *prio;
3017
+ int err;
25553018
25563019 steering->egress_root_ns = create_root_ns(steering,
25573020 FS_FT_NIC_TX);
25583021 if (!steering->egress_root_ns)
25593022 return -ENOMEM;
25603023
2561
- /* create 1 prio*/
2562
- prio = fs_create_prio(&steering->egress_root_ns->ns, 0, 1);
2563
- return PTR_ERR_OR_ZERO(prio);
3024
+ err = init_root_tree(steering, &egress_root_fs,
3025
+ &steering->egress_root_ns->ns.node);
3026
+ if (err)
3027
+ goto cleanup;
3028
+ set_prio_attrs(steering->egress_root_ns);
3029
+ return 0;
3030
+cleanup:
3031
+ cleanup_root_ns(steering->egress_root_ns);
3032
+ steering->egress_root_ns = NULL;
3033
+ return err;
25643034 }
25653035
25663036 int mlx5_init_fs(struct mlx5_core_dev *dev)
....@@ -2628,7 +3098,21 @@
26283098 goto err;
26293099 }
26303100
2631
- if (MLX5_IPSEC_DEV(dev)) {
3101
+ if (MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support) &&
3102
+ MLX5_CAP_FLOWTABLE_RDMA_RX(dev, table_miss_action_domain)) {
3103
+ err = init_rdma_rx_root_ns(steering);
3104
+ if (err)
3105
+ goto err;
3106
+ }
3107
+
3108
+ if (MLX5_CAP_FLOWTABLE_RDMA_TX(dev, ft_support)) {
3109
+ err = init_rdma_tx_root_ns(steering);
3110
+ if (err)
3111
+ goto err;
3112
+ }
3113
+
3114
+ if (mlx5_fpga_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE ||
3115
+ MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) {
26323116 err = init_egress_root_ns(steering);
26333117 if (err)
26343118 goto err;
....@@ -2657,7 +3141,7 @@
26573141 goto update_ft_fail;
26583142 }
26593143
2660
- err = root->cmds->update_root_ft(dev, root->root_ft, underlay_qpn,
3144
+ err = root->cmds->update_root_ft(root, root->root_ft, underlay_qpn,
26613145 false);
26623146 if (err) {
26633147 mlx5_core_warn(dev, "Failed adding underlay QPN (%u) to root FT err(%d)\n",
....@@ -2701,7 +3185,7 @@
27013185 goto out;
27023186 }
27033187
2704
- err = root->cmds->update_root_ft(dev, root->root_ft, underlay_qpn,
3188
+ err = root->cmds->update_root_ft(root, root->root_ft, underlay_qpn,
27053189 true);
27063190 if (err)
27073191 mlx5_core_warn(dev, "Failed removing underlay QPN (%u) from root FT err(%d)\n",
....@@ -2718,3 +3202,160 @@
27183202 return err;
27193203 }
27203204 EXPORT_SYMBOL(mlx5_fs_remove_rx_underlay_qpn);
3205
+
3206
+static struct mlx5_flow_root_namespace
3207
+*get_root_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type ns_type)
3208
+{
3209
+ struct mlx5_flow_namespace *ns;
3210
+
3211
+ if (ns_type == MLX5_FLOW_NAMESPACE_ESW_EGRESS ||
3212
+ ns_type == MLX5_FLOW_NAMESPACE_ESW_INGRESS)
3213
+ ns = mlx5_get_flow_vport_acl_namespace(dev, ns_type, 0);
3214
+ else
3215
+ ns = mlx5_get_flow_namespace(dev, ns_type);
3216
+ if (!ns)
3217
+ return NULL;
3218
+
3219
+ return find_root(&ns->node);
3220
+}
3221
+
3222
+struct mlx5_modify_hdr *mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
3223
+ u8 ns_type, u8 num_actions,
3224
+ void *modify_actions)
3225
+{
3226
+ struct mlx5_flow_root_namespace *root;
3227
+ struct mlx5_modify_hdr *modify_hdr;
3228
+ int err;
3229
+
3230
+ root = get_root_namespace(dev, ns_type);
3231
+ if (!root)
3232
+ return ERR_PTR(-EOPNOTSUPP);
3233
+
3234
+ modify_hdr = kzalloc(sizeof(*modify_hdr), GFP_KERNEL);
3235
+ if (!modify_hdr)
3236
+ return ERR_PTR(-ENOMEM);
3237
+
3238
+ modify_hdr->ns_type = ns_type;
3239
+ err = root->cmds->modify_header_alloc(root, ns_type, num_actions,
3240
+ modify_actions, modify_hdr);
3241
+ if (err) {
3242
+ kfree(modify_hdr);
3243
+ return ERR_PTR(err);
3244
+ }
3245
+
3246
+ return modify_hdr;
3247
+}
3248
+EXPORT_SYMBOL(mlx5_modify_header_alloc);
3249
+
3250
+void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev,
3251
+ struct mlx5_modify_hdr *modify_hdr)
3252
+{
3253
+ struct mlx5_flow_root_namespace *root;
3254
+
3255
+ root = get_root_namespace(dev, modify_hdr->ns_type);
3256
+ if (WARN_ON(!root))
3257
+ return;
3258
+ root->cmds->modify_header_dealloc(root, modify_hdr);
3259
+ kfree(modify_hdr);
3260
+}
3261
+EXPORT_SYMBOL(mlx5_modify_header_dealloc);
3262
+
3263
+struct mlx5_pkt_reformat *mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
3264
+ int reformat_type,
3265
+ size_t size,
3266
+ void *reformat_data,
3267
+ enum mlx5_flow_namespace_type ns_type)
3268
+{
3269
+ struct mlx5_pkt_reformat *pkt_reformat;
3270
+ struct mlx5_flow_root_namespace *root;
3271
+ int err;
3272
+
3273
+ root = get_root_namespace(dev, ns_type);
3274
+ if (!root)
3275
+ return ERR_PTR(-EOPNOTSUPP);
3276
+
3277
+ pkt_reformat = kzalloc(sizeof(*pkt_reformat), GFP_KERNEL);
3278
+ if (!pkt_reformat)
3279
+ return ERR_PTR(-ENOMEM);
3280
+
3281
+ pkt_reformat->ns_type = ns_type;
3282
+ pkt_reformat->reformat_type = reformat_type;
3283
+ err = root->cmds->packet_reformat_alloc(root, reformat_type, size,
3284
+ reformat_data, ns_type,
3285
+ pkt_reformat);
3286
+ if (err) {
3287
+ kfree(pkt_reformat);
3288
+ return ERR_PTR(err);
3289
+ }
3290
+
3291
+ return pkt_reformat;
3292
+}
3293
+EXPORT_SYMBOL(mlx5_packet_reformat_alloc);
3294
+
3295
+void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
3296
+ struct mlx5_pkt_reformat *pkt_reformat)
3297
+{
3298
+ struct mlx5_flow_root_namespace *root;
3299
+
3300
+ root = get_root_namespace(dev, pkt_reformat->ns_type);
3301
+ if (WARN_ON(!root))
3302
+ return;
3303
+ root->cmds->packet_reformat_dealloc(root, pkt_reformat);
3304
+ kfree(pkt_reformat);
3305
+}
3306
+EXPORT_SYMBOL(mlx5_packet_reformat_dealloc);
3307
+
3308
+int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
3309
+ struct mlx5_flow_root_namespace *peer_ns)
3310
+{
3311
+ if (peer_ns && ns->mode != peer_ns->mode) {
3312
+ mlx5_core_err(ns->dev,
3313
+ "Can't peer namespace of different steering mode\n");
3314
+ return -EINVAL;
3315
+ }
3316
+
3317
+ return ns->cmds->set_peer(ns, peer_ns);
3318
+}
3319
+
3320
+/* This function should be called only at init stage of the namespace.
3321
+ * It is not safe to call this function while steering operations
3322
+ * are executed in the namespace.
3323
+ */
3324
+int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns,
3325
+ enum mlx5_flow_steering_mode mode)
3326
+{
3327
+ struct mlx5_flow_root_namespace *root;
3328
+ const struct mlx5_flow_cmds *cmds;
3329
+ int err;
3330
+
3331
+ root = find_root(&ns->node);
3332
+ if (&root->ns != ns)
3333
+ /* Can't set cmds to non root namespace */
3334
+ return -EINVAL;
3335
+
3336
+ if (root->table_type != FS_FT_FDB)
3337
+ return -EOPNOTSUPP;
3338
+
3339
+ if (root->mode == mode)
3340
+ return 0;
3341
+
3342
+ if (mode == MLX5_FLOW_STEERING_MODE_SMFS)
3343
+ cmds = mlx5_fs_cmd_get_dr_cmds();
3344
+ else
3345
+ cmds = mlx5_fs_cmd_get_fw_cmds();
3346
+ if (!cmds)
3347
+ return -EOPNOTSUPP;
3348
+
3349
+ err = cmds->create_ns(root);
3350
+ if (err) {
3351
+ mlx5_core_err(root->dev, "Failed to create flow namespace (%d)\n",
3352
+ err);
3353
+ return err;
3354
+ }
3355
+
3356
+ root->cmds->destroy_ns(root);
3357
+ root->cmds = cmds;
3358
+ root->mode = mode;
3359
+
3360
+ return 0;
3361
+}