forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-08 01573e231f18eb2d99162747186f59511f56b64d
kernel/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
....@@ -32,6 +32,7 @@
3232
3333 #include <linux/mutex.h>
3434 #include <linux/mlx5/driver.h>
35
+#include <linux/mlx5/vport.h>
3536 #include <linux/mlx5/eswitch.h>
3637
3738 #include "mlx5_core.h"
....@@ -58,7 +59,8 @@
5859 ADD_PRIO(num_prios_val, 0, num_levels_val, {},\
5960 __VA_ARGS__)\
6061
61
-#define ADD_NS(...) {.type = FS_TYPE_NAMESPACE,\
62
+#define ADD_NS(def_miss_act, ...) {.type = FS_TYPE_NAMESPACE, \
63
+ .def_miss_action = def_miss_act,\
6264 .children = (struct init_tree_node[]) {__VA_ARGS__},\
6365 .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
6466 }
....@@ -76,6 +78,23 @@
7678 FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), \
7779 FS_CAP(flow_table_properties_nic_receive.flow_table_modify))
7880
81
+#define FS_CHAINING_CAPS_EGRESS \
82
+ FS_REQUIRED_CAPS( \
83
+ FS_CAP(flow_table_properties_nic_transmit.flow_modify_en), \
84
+ FS_CAP(flow_table_properties_nic_transmit.modify_root), \
85
+ FS_CAP(flow_table_properties_nic_transmit \
86
+ .identified_miss_table_mode), \
87
+ FS_CAP(flow_table_properties_nic_transmit.flow_table_modify))
88
+
89
+#define FS_CHAINING_CAPS_RDMA_TX \
90
+ FS_REQUIRED_CAPS( \
91
+ FS_CAP(flow_table_properties_nic_transmit_rdma.flow_modify_en), \
92
+ FS_CAP(flow_table_properties_nic_transmit_rdma.modify_root), \
93
+ FS_CAP(flow_table_properties_nic_transmit_rdma \
94
+ .identified_miss_table_mode), \
95
+ FS_CAP(flow_table_properties_nic_transmit_rdma \
96
+ .flow_table_modify))
97
+
7998 #define LEFTOVERS_NUM_LEVELS 1
8099 #define LEFTOVERS_NUM_PRIOS 1
81100
....@@ -86,8 +105,8 @@
86105 #define ETHTOOL_PRIO_NUM_LEVELS 1
87106 #define ETHTOOL_NUM_PRIOS 11
88107 #define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
89
-/* Vlan, mac, ttc, inner ttc, aRFS */
90
-#define KERNEL_NIC_PRIO_NUM_LEVELS 5
108
+/* Vlan, mac, ttc, inner ttc, {aRFS/accel and esp/esp_err} */
109
+#define KERNEL_NIC_PRIO_NUM_LEVELS 6
91110 #define KERNEL_NIC_NUM_PRIOS 1
92111 /* One more level for tc */
93112 #define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
....@@ -99,13 +118,17 @@
99118 #define ANCHOR_NUM_PRIOS 1
100119 #define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1)
101120
102
-#define OFFLOADS_MAX_FT 1
103
-#define OFFLOADS_NUM_PRIOS 1
104
-#define OFFLOADS_MIN_LEVEL (ANCHOR_MIN_LEVEL + 1)
121
+#define OFFLOADS_MAX_FT 2
122
+#define OFFLOADS_NUM_PRIOS 2
123
+#define OFFLOADS_MIN_LEVEL (ANCHOR_MIN_LEVEL + OFFLOADS_NUM_PRIOS)
105124
106125 #define LAG_PRIO_NUM_LEVELS 1
107126 #define LAG_NUM_PRIOS 1
108127 #define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + 1)
128
+
129
+#define KERNEL_TX_IPSEC_NUM_PRIOS 1
130
+#define KERNEL_TX_IPSEC_NUM_LEVELS 1
131
+#define KERNEL_TX_MIN_LEVEL (KERNEL_TX_IPSEC_NUM_LEVELS)
109132
110133 struct node_caps {
111134 size_t arr_sz;
....@@ -121,33 +144,96 @@
121144 int num_leaf_prios;
122145 int prio;
123146 int num_levels;
147
+ enum mlx5_flow_table_miss_action def_miss_action;
124148 } root_fs = {
125149 .type = FS_TYPE_NAMESPACE,
126150 .ar_size = 7,
151
+ .children = (struct init_tree_node[]){
152
+ ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
153
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
154
+ ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
155
+ BY_PASS_PRIO_NUM_LEVELS))),
156
+ ADD_PRIO(0, LAG_MIN_LEVEL, 0, FS_CHAINING_CAPS,
157
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
158
+ ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS,
159
+ LAG_PRIO_NUM_LEVELS))),
160
+ ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
161
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
162
+ ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS,
163
+ OFFLOADS_MAX_FT))),
164
+ ADD_PRIO(0, ETHTOOL_MIN_LEVEL, 0, FS_CHAINING_CAPS,
165
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
166
+ ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS,
167
+ ETHTOOL_PRIO_NUM_LEVELS))),
168
+ ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {},
169
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
170
+ ADD_MULTIPLE_PRIO(KERNEL_NIC_TC_NUM_PRIOS,
171
+ KERNEL_NIC_TC_NUM_LEVELS),
172
+ ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS,
173
+ KERNEL_NIC_PRIO_NUM_LEVELS))),
174
+ ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
175
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
176
+ ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS,
177
+ LEFTOVERS_NUM_LEVELS))),
178
+ ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {},
179
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
180
+ ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS,
181
+ ANCHOR_NUM_LEVELS))),
182
+ }
183
+};
184
+
185
+static struct init_tree_node egress_root_fs = {
186
+ .type = FS_TYPE_NAMESPACE,
187
+#ifdef CONFIG_MLX5_IPSEC
188
+ .ar_size = 2,
189
+#else
190
+ .ar_size = 1,
191
+#endif
127192 .children = (struct init_tree_node[]) {
128
- ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0,
129
- FS_CHAINING_CAPS,
130
- ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
193
+ ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0,
194
+ FS_CHAINING_CAPS_EGRESS,
195
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
196
+ ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
131197 BY_PASS_PRIO_NUM_LEVELS))),
132
- ADD_PRIO(0, LAG_MIN_LEVEL, 0,
198
+#ifdef CONFIG_MLX5_IPSEC
199
+ ADD_PRIO(0, KERNEL_TX_MIN_LEVEL, 0,
200
+ FS_CHAINING_CAPS_EGRESS,
201
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
202
+ ADD_MULTIPLE_PRIO(KERNEL_TX_IPSEC_NUM_PRIOS,
203
+ KERNEL_TX_IPSEC_NUM_LEVELS))),
204
+#endif
205
+ }
206
+};
207
+
208
+#define RDMA_RX_BYPASS_PRIO 0
209
+#define RDMA_RX_KERNEL_PRIO 1
210
+static struct init_tree_node rdma_rx_root_fs = {
211
+ .type = FS_TYPE_NAMESPACE,
212
+ .ar_size = 2,
213
+ .children = (struct init_tree_node[]) {
214
+ [RDMA_RX_BYPASS_PRIO] =
215
+ ADD_PRIO(0, MLX5_BY_PASS_NUM_REGULAR_PRIOS, 0,
133216 FS_CHAINING_CAPS,
134
- ADD_NS(ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS,
135
- LAG_PRIO_NUM_LEVELS))),
136
- ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, {},
137
- ADD_NS(ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS, OFFLOADS_MAX_FT))),
138
- ADD_PRIO(0, ETHTOOL_MIN_LEVEL, 0,
217
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
218
+ ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_REGULAR_PRIOS,
219
+ BY_PASS_PRIO_NUM_LEVELS))),
220
+ [RDMA_RX_KERNEL_PRIO] =
221
+ ADD_PRIO(0, MLX5_BY_PASS_NUM_REGULAR_PRIOS + 1, 0,
139222 FS_CHAINING_CAPS,
140
- ADD_NS(ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS,
141
- ETHTOOL_PRIO_NUM_LEVELS))),
142
- ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {},
143
- ADD_NS(ADD_MULTIPLE_PRIO(KERNEL_NIC_TC_NUM_PRIOS, KERNEL_NIC_TC_NUM_LEVELS),
144
- ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS,
145
- KERNEL_NIC_PRIO_NUM_LEVELS))),
146
- ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0,
147
- FS_CHAINING_CAPS,
148
- ADD_NS(ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS, LEFTOVERS_NUM_LEVELS))),
149
- ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {},
150
- ADD_NS(ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS, ANCHOR_NUM_LEVELS))),
223
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN,
224
+ ADD_MULTIPLE_PRIO(1, 1))),
225
+ }
226
+};
227
+
228
+static struct init_tree_node rdma_tx_root_fs = {
229
+ .type = FS_TYPE_NAMESPACE,
230
+ .ar_size = 1,
231
+ .children = (struct init_tree_node[]) {
232
+ ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0,
233
+ FS_CHAINING_CAPS_RDMA_TX,
234
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
235
+ ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
236
+ BY_PASS_PRIO_NUM_LEVELS))),
151237 }
152238 };
153239
....@@ -158,7 +244,7 @@
158244 };
159245
160246 static const struct rhashtable_params rhash_fte = {
161
- .key_len = FIELD_SIZEOF(struct fs_fte, val),
247
+ .key_len = sizeof_field(struct fs_fte, val),
162248 .key_offset = offsetof(struct fs_fte, val),
163249 .head_offset = offsetof(struct fs_fte, hash),
164250 .automatic_shrinking = true,
....@@ -166,7 +252,7 @@
166252 };
167253
168254 static const struct rhashtable_params rhash_fg = {
169
- .key_len = FIELD_SIZEOF(struct mlx5_flow_group, mask),
255
+ .key_len = sizeof_field(struct mlx5_flow_group, mask),
170256 .key_offset = offsetof(struct mlx5_flow_group, mask),
171257 .head_offset = offsetof(struct mlx5_flow_group, hash),
172258 .automatic_shrinking = true,
....@@ -182,7 +268,7 @@
182268 static void del_sw_fte(struct fs_node *node);
183269 static void del_sw_prio(struct fs_node *node);
184270 static void del_sw_ns(struct fs_node *node);
185
-/* Delete rule (destination) is special case that
271
+/* Delete rule (destination) is special case that
186272 * requires to lock the FTE for all the deletion process.
187273 */
188274 static void del_sw_hw_rule(struct fs_node *node);
....@@ -242,10 +328,11 @@
242328 }
243329 }
244330
245
-static void down_write_ref_node(struct fs_node *node)
331
+static void down_write_ref_node(struct fs_node *node, bool locked)
246332 {
247333 if (node) {
248
- down_write(&node->lock);
334
+ if (!locked)
335
+ down_write(&node->lock);
249336 refcount_inc(&node->refcount);
250337 }
251338 }
....@@ -256,13 +343,14 @@
256343 up_read(&node->lock);
257344 }
258345
259
-static void up_write_ref_node(struct fs_node *node)
346
+static void up_write_ref_node(struct fs_node *node, bool locked)
260347 {
261348 refcount_dec(&node->refcount);
262
- up_write(&node->lock);
349
+ if (!locked)
350
+ up_write(&node->lock);
263351 }
264352
265
-static void tree_put_node(struct fs_node *node)
353
+static void tree_put_node(struct fs_node *node, bool locked)
266354 {
267355 struct fs_node *parent_node = node->parent;
268356
....@@ -270,30 +358,25 @@
270358 if (node->del_hw_func)
271359 node->del_hw_func(node);
272360 if (parent_node) {
273
- /* Only root namespace doesn't have parent and we just
274
- * need to free its node.
275
- */
276
- down_write_ref_node(parent_node);
361
+ down_write_ref_node(parent_node, locked);
277362 list_del_init(&node->list);
278
- if (node->del_sw_func)
279
- node->del_sw_func(node);
280
- up_write_ref_node(parent_node);
281
- } else {
282
- kfree(node);
283363 }
364
+ node->del_sw_func(node);
365
+ if (parent_node)
366
+ up_write_ref_node(parent_node, locked);
284367 node = NULL;
285368 }
286369 if (!node && parent_node)
287
- tree_put_node(parent_node);
370
+ tree_put_node(parent_node, locked);
288371 }
289372
290
-static int tree_remove_node(struct fs_node *node)
373
+static int tree_remove_node(struct fs_node *node, bool locked)
291374 {
292375 if (refcount_read(&node->refcount) > 1) {
293376 refcount_dec(&node->refcount);
294377 return -EEXIST;
295378 }
296
- tree_put_node(node);
379
+ tree_put_node(node, locked);
297380 return 0;
298381 }
299382
....@@ -308,6 +391,12 @@
308391 }
309392
310393 return NULL;
394
+}
395
+
396
+static bool is_fwd_next_action(u32 action)
397
+{
398
+ return action & (MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO |
399
+ MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS);
311400 }
312401
313402 static bool check_valid_spec(const struct mlx5_flow_spec *spec)
....@@ -377,9 +466,10 @@
377466 fs_get_obj(ft, node);
378467 dev = get_dev(&ft->node);
379468 root = find_root(&ft->node);
469
+ trace_mlx5_fs_del_ft(ft);
380470
381471 if (node->active) {
382
- err = root->cmds->destroy_flow_table(dev, ft);
472
+ err = root->cmds->destroy_flow_table(root, ft);
383473 if (err)
384474 mlx5_core_warn(dev, "flow steering can't destroy ft\n");
385475 }
....@@ -393,29 +483,43 @@
393483 fs_get_obj(ft, node);
394484
395485 rhltable_destroy(&ft->fgs_hash);
396
- fs_get_obj(prio, ft->node.parent);
397
- prio->num_ft--;
486
+ if (ft->node.parent) {
487
+ fs_get_obj(prio, ft->node.parent);
488
+ prio->num_ft--;
489
+ }
398490 kfree(ft);
491
+}
492
+
493
+static void modify_fte(struct fs_fte *fte)
494
+{
495
+ struct mlx5_flow_root_namespace *root;
496
+ struct mlx5_flow_table *ft;
497
+ struct mlx5_flow_group *fg;
498
+ struct mlx5_core_dev *dev;
499
+ int err;
500
+
501
+ fs_get_obj(fg, fte->node.parent);
502
+ fs_get_obj(ft, fg->node.parent);
503
+ dev = get_dev(&fte->node);
504
+
505
+ root = find_root(&ft->node);
506
+ err = root->cmds->update_fte(root, ft, fg, fte->modify_mask, fte);
507
+ if (err)
508
+ mlx5_core_warn(dev,
509
+ "%s can't del rule fg id=%d fte_index=%d\n",
510
+ __func__, fg->id, fte->index);
511
+ fte->modify_mask = 0;
399512 }
400513
401514 static void del_sw_hw_rule(struct fs_node *node)
402515 {
403
- struct mlx5_flow_root_namespace *root;
404516 struct mlx5_flow_rule *rule;
405
- struct mlx5_flow_table *ft;
406
- struct mlx5_flow_group *fg;
407517 struct fs_fte *fte;
408
- int modify_mask;
409
- struct mlx5_core_dev *dev = get_dev(node);
410
- int err;
411
- bool update_fte = false;
412518
413519 fs_get_obj(rule, node);
414520 fs_get_obj(fte, rule->node.parent);
415
- fs_get_obj(fg, fte->node.parent);
416
- fs_get_obj(ft, fg->node.parent);
417521 trace_mlx5_fs_del_rule(rule);
418
- if (rule->sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
522
+ if (is_fwd_next_action(rule->sw_action)) {
419523 mutex_lock(&rule->dest_attr.ft->lock);
420524 list_del(&rule->next_ft);
421525 mutex_unlock(&rule->dest_attr.ft->lock);
....@@ -423,27 +527,26 @@
423527
424528 if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER &&
425529 --fte->dests_size) {
426
- modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
427
- BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
530
+ fte->modify_mask |=
531
+ BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
532
+ BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
428533 fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
429
- update_fte = true;
534
+ goto out;
535
+ }
536
+
537
+ if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_PORT &&
538
+ --fte->dests_size) {
539
+ fte->modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
540
+ fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_ALLOW;
430541 goto out;
431542 }
432543
433544 if ((fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
434545 --fte->dests_size) {
435
- modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
436
- update_fte = true;
546
+ fte->modify_mask |=
547
+ BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
437548 }
438549 out:
439
- root = find_root(&ft->node);
440
- if (update_fte && fte->dests_size) {
441
- err = root->cmds->update_fte(dev, ft, fg->id, modify_mask, fte);
442
- if (err)
443
- mlx5_core_warn(dev,
444
- "%s can't del rule fg id=%d fte_index=%d\n",
445
- __func__, fg->id, fte->index);
446
- }
447550 kfree(rule);
448551 }
449552
....@@ -464,7 +567,7 @@
464567 dev = get_dev(&ft->node);
465568 root = find_root(&ft->node);
466569 if (node->active) {
467
- err = root->cmds->delete_fte(dev, ft, fte);
570
+ err = root->cmds->delete_fte(root, ft, fte);
468571 if (err)
469572 mlx5_core_warn(dev,
470573 "flow steering can't delete fte in index %d of flow group id %d\n",
....@@ -504,7 +607,7 @@
504607 trace_mlx5_fs_del_fg(fg);
505608
506609 root = find_root(&ft->node);
507
- if (fg->node.active && root->cmds->destroy_flow_group(dev, ft, fg->id))
610
+ if (fg->node.active && root->cmds->destroy_flow_group(root, ft, fg))
508611 mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n",
509612 fg->id, ft->id);
510613 }
....@@ -521,7 +624,9 @@
521624
522625 rhashtable_destroy(&fg->ftes_hash);
523626 ida_destroy(&fg->fte_allocator);
524
- if (ft->autogroup.active && fg->max_ftes == ft->autogroup.group_size)
627
+ if (ft->autogroup.active &&
628
+ fg->max_ftes == ft->autogroup.group_size &&
629
+ fg->start_index < ft->autogroup.max_fte)
525630 ft->autogroup.num_groups--;
526631 err = rhltable_remove(&ft->fgs_hash,
527632 &fg->hash,
....@@ -556,7 +661,7 @@
556661 }
557662
558663 static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft,
559
- u32 *match_value,
664
+ const struct mlx5_flow_spec *spec,
560665 struct mlx5_flow_act *flow_act)
561666 {
562667 struct mlx5_flow_steering *steering = get_steering(&ft->node);
....@@ -566,9 +671,10 @@
566671 if (!fte)
567672 return ERR_PTR(-ENOMEM);
568673
569
- memcpy(fte->val, match_value, sizeof(fte->val));
674
+ memcpy(fte->val, &spec->match_value, sizeof(fte->val));
570675 fte->node.type = FS_TYPE_FLOW_ENTRY;
571676 fte->action = *flow_act;
677
+ fte->flow_context = spec->flow_context;
572678
573679 tree_init_node(&fte->node, del_hw_fte, del_sw_fte);
574680
....@@ -584,7 +690,7 @@
584690
585691 static struct mlx5_flow_group *alloc_flow_group(struct mlx5_flow_steering *steering,
586692 u8 match_criteria_enable,
587
- void *match_criteria,
693
+ const void *match_criteria,
588694 int start_index,
589695 int end_index)
590696 {
....@@ -599,7 +705,8 @@
599705 if (ret) {
600706 kmem_cache_free(steering->fgs_cache, fg);
601707 return ERR_PTR(ret);
602
-}
708
+ }
709
+
603710 ida_init(&fg->fte_allocator);
604711 fg->mask.match_criteria_enable = match_criteria_enable;
605712 memcpy(&fg->mask.match_criteria, match_criteria,
....@@ -613,7 +720,7 @@
613720
614721 static struct mlx5_flow_group *alloc_insert_flow_group(struct mlx5_flow_table *ft,
615722 u8 match_criteria_enable,
616
- void *match_criteria,
723
+ const void *match_criteria,
617724 int start_index,
618725 int end_index,
619726 struct list_head *prev)
....@@ -695,7 +802,7 @@
695802 struct fs_node *iter = list_entry(start, struct fs_node, list);
696803 struct mlx5_flow_table *ft = NULL;
697804
698
- if (!root)
805
+ if (!root || root->type == FS_TYPE_PRIO_CHAINS)
699806 return NULL;
700807
701808 list_for_each_advance_continue(iter, &root->children, reverse) {
....@@ -711,7 +818,7 @@
711818 return ft;
712819 }
713820
714
-/* If reverse if false then return the first flow table in next priority of
821
+/* If reverse is false then return the first flow table in next priority of
715822 * prio in the tree, else return the last flow table in the previous priority
716823 * of prio in the tree.
717824 */
....@@ -743,24 +850,33 @@
743850 return find_closest_ft(prio, true);
744851 }
745852
853
+static struct mlx5_flow_table *find_next_fwd_ft(struct mlx5_flow_table *ft,
854
+ struct mlx5_flow_act *flow_act)
855
+{
856
+ struct fs_prio *prio;
857
+ bool next_ns;
858
+
859
+ next_ns = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS;
860
+ fs_get_obj(prio, next_ns ? ft->ns->node.parent : ft->node.parent);
861
+
862
+ return find_next_chained_ft(prio);
863
+}
864
+
746865 static int connect_fts_in_prio(struct mlx5_core_dev *dev,
747866 struct fs_prio *prio,
748867 struct mlx5_flow_table *ft)
749868 {
750869 struct mlx5_flow_root_namespace *root = find_root(&prio->node);
751870 struct mlx5_flow_table *iter;
752
- int i = 0;
753871 int err;
754872
755873 fs_for_each_ft(iter, prio) {
756
- i++;
757
- err = root->cmds->modify_flow_table(dev, iter, ft);
874
+ err = root->cmds->modify_flow_table(root, iter, ft);
758875 if (err) {
759
- mlx5_core_warn(dev, "Failed to modify flow table %d\n",
760
- iter->id);
876
+ mlx5_core_err(dev,
877
+ "Failed to modify flow table id %d, type %d, err %d\n",
878
+ iter->id, iter->type, err);
761879 /* The driver is out of sync with the FW */
762
- if (i > 1)
763
- WARN_ON(true);
764880 return err;
765881 }
766882 }
....@@ -790,7 +906,7 @@
790906 struct mlx5_flow_root_namespace *root = find_root(&prio->node);
791907 struct mlx5_ft_underlay_qp *uqp;
792908 int min_level = INT_MAX;
793
- int err;
909
+ int err = 0;
794910 u32 qpn;
795911
796912 if (root->root_ft)
....@@ -802,11 +918,11 @@
802918 if (list_empty(&root->underlay_qpns)) {
803919 /* Don't set any QPN (zero) in case QPN list is empty */
804920 qpn = 0;
805
- err = root->cmds->update_root_ft(root->dev, ft, qpn, false);
921
+ err = root->cmds->update_root_ft(root, ft, qpn, false);
806922 } else {
807923 list_for_each_entry(uqp, &root->underlay_qpns, list) {
808924 qpn = uqp->qpn;
809
- err = root->cmds->update_root_ft(root->dev, ft,
925
+ err = root->cmds->update_root_ft(root, ft,
810926 qpn, false);
811927 if (err)
812928 break;
....@@ -836,15 +952,15 @@
836952 fs_get_obj(fte, rule->node.parent);
837953 if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
838954 return -EINVAL;
839
- down_write_ref_node(&fte->node);
955
+ down_write_ref_node(&fte->node, false);
840956 fs_get_obj(fg, fte->node.parent);
841957 fs_get_obj(ft, fg->node.parent);
842958
843959 memcpy(&rule->dest_attr, dest, sizeof(*dest));
844960 root = find_root(&ft->node);
845
- err = root->cmds->update_fte(get_dev(&ft->node), ft, fg->id,
961
+ err = root->cmds->update_fte(root, ft, fg,
846962 modify_mask, fte);
847
- up_write_ref_node(&fte->node);
963
+ up_write_ref_node(&fte->node, false);
848964
849965 return err;
850966 }
....@@ -893,6 +1009,10 @@
8931009 list_splice_init(&old_next_ft->fwd_rules, &new_next_ft->fwd_rules);
8941010 mutex_unlock(&old_next_ft->lock);
8951011 list_for_each_entry(iter, &new_next_ft->fwd_rules, next_ft) {
1012
+ if ((iter->sw_action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS) &&
1013
+ iter->ft->ns == new_next_ft->ns)
1014
+ continue;
1015
+
8961016 err = _mlx5_modify_rule_destination(iter, &dest);
8971017 if (err)
8981018 pr_err("mlx5_core: failed to modify rule to point on flow table %d\n",
....@@ -948,7 +1068,8 @@
9481068 u16 vport)
9491069 {
9501070 struct mlx5_flow_root_namespace *root = find_root(&ns->node);
951
- struct mlx5_flow_table *next_ft = NULL;
1071
+ bool unmanaged = ft_attr->flags & MLX5_FLOW_TABLE_UNMANAGED;
1072
+ struct mlx5_flow_table *next_ft;
9521073 struct fs_prio *fs_prio = NULL;
9531074 struct mlx5_flow_table *ft;
9541075 int log_table_sz;
....@@ -965,14 +1086,21 @@
9651086 err = -EINVAL;
9661087 goto unlock_root;
9671088 }
968
- if (ft_attr->level >= fs_prio->num_levels) {
969
- err = -ENOSPC;
970
- goto unlock_root;
1089
+ if (!unmanaged) {
1090
+ /* The level is related to the
1091
+ * priority level range.
1092
+ */
1093
+ if (ft_attr->level >= fs_prio->num_levels) {
1094
+ err = -ENOSPC;
1095
+ goto unlock_root;
1096
+ }
1097
+
1098
+ ft_attr->level += fs_prio->start_level;
9711099 }
1100
+
9721101 /* The level is related to the
9731102 * priority level range.
9741103 */
975
- ft_attr->level += fs_prio->start_level;
9761104 ft = alloc_flow_table(ft_attr->level,
9771105 vport,
9781106 ft_attr->max_fte ? roundup_pow_of_two(ft_attr->max_fte) : 0,
....@@ -985,26 +1113,35 @@
9851113
9861114 tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table);
9871115 log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0;
988
- next_ft = find_next_chained_ft(fs_prio);
989
- err = root->cmds->create_flow_table(root->dev, ft->vport, ft->op_mod,
990
- ft->type, ft->level, log_table_sz,
991
- next_ft, &ft->id, ft->flags);
1116
+ next_ft = unmanaged ? ft_attr->next_ft :
1117
+ find_next_chained_ft(fs_prio);
1118
+ ft->def_miss_action = ns->def_miss_action;
1119
+ ft->ns = ns;
1120
+ err = root->cmds->create_flow_table(root, ft, log_table_sz, next_ft);
9921121 if (err)
9931122 goto free_ft;
9941123
995
- err = connect_flow_table(root->dev, ft, fs_prio);
996
- if (err)
997
- goto destroy_ft;
1124
+ if (!unmanaged) {
1125
+ err = connect_flow_table(root->dev, ft, fs_prio);
1126
+ if (err)
1127
+ goto destroy_ft;
1128
+ }
1129
+
9981130 ft->node.active = true;
999
- down_write_ref_node(&fs_prio->node);
1000
- tree_add_node(&ft->node, &fs_prio->node);
1001
- list_add_flow_table(ft, fs_prio);
1131
+ down_write_ref_node(&fs_prio->node, false);
1132
+ if (!unmanaged) {
1133
+ tree_add_node(&ft->node, &fs_prio->node);
1134
+ list_add_flow_table(ft, fs_prio);
1135
+ } else {
1136
+ ft->node.root = fs_prio->node.root;
1137
+ }
10021138 fs_prio->num_ft++;
1003
- up_write_ref_node(&fs_prio->node);
1139
+ up_write_ref_node(&fs_prio->node, false);
10041140 mutex_unlock(&root->chain_lock);
1141
+ trace_mlx5_fs_add_ft(ft);
10051142 return ft;
10061143 destroy_ft:
1007
- root->cmds->destroy_flow_table(root->dev, ft);
1144
+ root->cmds->destroy_flow_table(root, ft);
10081145 free_ft:
10091146 rhltable_destroy(&ft->fgs_hash);
10101147 kfree(ft);
....@@ -1046,31 +1183,27 @@
10461183
10471184 struct mlx5_flow_table*
10481185 mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
1049
- int prio,
1050
- int num_flow_table_entries,
1051
- int max_num_groups,
1052
- u32 level,
1053
- u32 flags)
1186
+ struct mlx5_flow_table_attr *ft_attr)
10541187 {
1055
- struct mlx5_flow_table_attr ft_attr = {};
1188
+ int num_reserved_entries = ft_attr->autogroup.num_reserved_entries;
1189
+ int autogroups_max_fte = ft_attr->max_fte - num_reserved_entries;
1190
+ int max_num_groups = ft_attr->autogroup.max_num_groups;
10561191 struct mlx5_flow_table *ft;
10571192
1058
- if (max_num_groups > num_flow_table_entries)
1193
+ if (max_num_groups > autogroups_max_fte)
1194
+ return ERR_PTR(-EINVAL);
1195
+ if (num_reserved_entries > ft_attr->max_fte)
10591196 return ERR_PTR(-EINVAL);
10601197
1061
- ft_attr.max_fte = num_flow_table_entries;
1062
- ft_attr.prio = prio;
1063
- ft_attr.level = level;
1064
- ft_attr.flags = flags;
1065
-
1066
- ft = mlx5_create_flow_table(ns, &ft_attr);
1198
+ ft = mlx5_create_flow_table(ns, ft_attr);
10671199 if (IS_ERR(ft))
10681200 return ft;
10691201
10701202 ft->autogroup.active = true;
10711203 ft->autogroup.required_groups = max_num_groups;
1204
+ ft->autogroup.max_fte = autogroups_max_fte;
10721205 /* We save place for flow groups in addition to max types */
1073
- ft->autogroup.group_size = ft->max_fte / (max_num_groups + 1);
1206
+ ft->autogroup.group_size = autogroups_max_fte / (max_num_groups + 1);
10741207
10751208 return ft;
10761209 }
....@@ -1089,24 +1222,23 @@
10891222 start_flow_index);
10901223 int end_index = MLX5_GET(create_flow_group_in, fg_in,
10911224 end_flow_index);
1092
- struct mlx5_core_dev *dev = get_dev(&ft->node);
10931225 struct mlx5_flow_group *fg;
10941226 int err;
10951227
1096
- if (ft->autogroup.active)
1228
+ if (ft->autogroup.active && start_index < ft->autogroup.max_fte)
10971229 return ERR_PTR(-EPERM);
10981230
1099
- down_write_ref_node(&ft->node);
1231
+ down_write_ref_node(&ft->node, false);
11001232 fg = alloc_insert_flow_group(ft, match_criteria_enable, match_criteria,
11011233 start_index, end_index,
11021234 ft->node.children.prev);
1103
- up_write_ref_node(&ft->node);
1235
+ up_write_ref_node(&ft->node, false);
11041236 if (IS_ERR(fg))
11051237 return fg;
11061238
1107
- err = root->cmds->create_flow_group(dev, ft, fg_in, &fg->id);
1239
+ err = root->cmds->create_flow_group(root, ft, fg_in, fg);
11081240 if (err) {
1109
- tree_put_node(&fg->node);
1241
+ tree_put_node(&fg->node, false);
11101242 return ERR_PTR(err);
11111243 }
11121244 trace_mlx5_fs_add_fg(fg);
....@@ -1244,17 +1376,15 @@
12441376 fs_get_obj(ft, fg->node.parent);
12451377 root = find_root(&fg->node);
12461378 if (!(fte->status & FS_FTE_STATUS_EXISTING))
1247
- err = root->cmds->create_fte(get_dev(&ft->node),
1248
- ft, fg, fte);
1379
+ err = root->cmds->create_fte(root, ft, fg, fte);
12491380 else
1250
- err = root->cmds->update_fte(get_dev(&ft->node), ft, fg->id,
1251
- modify_mask, fte);
1381
+ err = root->cmds->update_fte(root, ft, fg, modify_mask, fte);
12521382 if (err)
12531383 goto free_handle;
12541384
12551385 fte->node.active = true;
12561386 fte->status |= FS_FTE_STATUS_EXISTING;
1257
- atomic_inc(&fte->node.version);
1387
+ atomic_inc(&fg->node.version);
12581388
12591389 out:
12601390 return handle;
....@@ -1265,12 +1395,13 @@
12651395 }
12661396
12671397 static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft,
1268
- struct mlx5_flow_spec *spec)
1398
+ const struct mlx5_flow_spec *spec)
12691399 {
12701400 struct list_head *prev = &ft->node.children;
1271
- struct mlx5_flow_group *fg;
1401
+ u32 max_fte = ft->autogroup.max_fte;
12721402 unsigned int candidate_index = 0;
12731403 unsigned int group_size = 0;
1404
+ struct mlx5_flow_group *fg;
12741405
12751406 if (!ft->autogroup.active)
12761407 return ERR_PTR(-ENOENT);
....@@ -1278,7 +1409,7 @@
12781409 if (ft->autogroup.num_groups < ft->autogroup.required_groups)
12791410 group_size = ft->autogroup.group_size;
12801411
1281
- /* ft->max_fte == ft->autogroup.max_types */
1412
+ /* max_fte == ft->autogroup.max_types */
12821413 if (group_size == 0)
12831414 group_size = 1;
12841415
....@@ -1291,7 +1422,7 @@
12911422 prev = &fg->node.list;
12921423 }
12931424
1294
- if (candidate_index + group_size > ft->max_fte)
1425
+ if (candidate_index + group_size > max_fte)
12951426 return ERR_PTR(-ENOSPC);
12961427
12971428 fg = alloc_insert_flow_group(ft,
....@@ -1314,7 +1445,6 @@
13141445 struct mlx5_flow_group *fg)
13151446 {
13161447 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1317
- struct mlx5_core_dev *dev = get_dev(&ft->node);
13181448 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
13191449 void *match_criteria_addr;
13201450 u8 src_esw_owner_mask_on;
....@@ -1344,7 +1474,7 @@
13441474 memcpy(match_criteria_addr, fg->mask.match_criteria,
13451475 sizeof(fg->mask.match_criteria));
13461476
1347
- err = root->cmds->create_flow_group(dev, ft, in, &fg->id);
1477
+ err = root->cmds->create_flow_group(root, ft, in, fg);
13481478 if (!err) {
13491479 fg->node.active = true;
13501480 trace_mlx5_fs_add_fg(fg);
....@@ -1359,7 +1489,13 @@
13591489 {
13601490 if (d1->type == d2->type) {
13611491 if ((d1->type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
1362
- d1->vport.num == d2->vport.num) ||
1492
+ d1->vport.num == d2->vport.num &&
1493
+ d1->vport.flags == d2->vport.flags &&
1494
+ ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_VHCA_ID) ?
1495
+ (d1->vport.vhca_id == d2->vport.vhca_id) : true) &&
1496
+ ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID) ?
1497
+ (d1->vport.pkt_reformat->id ==
1498
+ d2->vport.pkt_reformat->id) : true)) ||
13631499 (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
13641500 d1->ft == d2->ft) ||
13651501 (d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
....@@ -1384,9 +1520,22 @@
13841520 return NULL;
13851521 }
13861522
1387
-static bool check_conflicting_actions(u32 action1, u32 action2)
1523
+static bool check_conflicting_actions_vlan(const struct mlx5_fs_vlan *vlan0,
1524
+ const struct mlx5_fs_vlan *vlan1)
13881525 {
1389
- u32 xored_actions = action1 ^ action2;
1526
+ return vlan0->ethtype != vlan1->ethtype ||
1527
+ vlan0->vid != vlan1->vid ||
1528
+ vlan0->prio != vlan1->prio;
1529
+}
1530
+
1531
+static bool check_conflicting_actions(const struct mlx5_flow_act *act1,
1532
+ const struct mlx5_flow_act *act2)
1533
+{
1534
+ u32 action1 = act1->action;
1535
+ u32 action2 = act2->action;
1536
+ u32 xored_actions;
1537
+
1538
+ xored_actions = action1 ^ action2;
13901539
13911540 /* if one rule only wants to count, it's ok */
13921541 if (action1 == MLX5_FLOW_CONTEXT_ACTION_COUNT ||
....@@ -1394,7 +1543,7 @@
13941543 return false;
13951544
13961545 if (xored_actions & (MLX5_FLOW_CONTEXT_ACTION_DROP |
1397
- MLX5_FLOW_CONTEXT_ACTION_ENCAP |
1546
+ MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
13981547 MLX5_FLOW_CONTEXT_ACTION_DECAP |
13991548 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
14001549 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
....@@ -1403,23 +1552,41 @@
14031552 MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2))
14041553 return true;
14051554
1555
+ if (action1 & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT &&
1556
+ act1->pkt_reformat != act2->pkt_reformat)
1557
+ return true;
1558
+
1559
+ if (action1 & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
1560
+ act1->modify_hdr != act2->modify_hdr)
1561
+ return true;
1562
+
1563
+ if (action1 & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH &&
1564
+ check_conflicting_actions_vlan(&act1->vlan[0], &act2->vlan[0]))
1565
+ return true;
1566
+
1567
+ if (action1 & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2 &&
1568
+ check_conflicting_actions_vlan(&act1->vlan[1], &act2->vlan[1]))
1569
+ return true;
1570
+
14061571 return false;
14071572 }
14081573
1409
-static int check_conflicting_ftes(struct fs_fte *fte, const struct mlx5_flow_act *flow_act)
1574
+static int check_conflicting_ftes(struct fs_fte *fte,
1575
+ const struct mlx5_flow_context *flow_context,
1576
+ const struct mlx5_flow_act *flow_act)
14101577 {
1411
- if (check_conflicting_actions(flow_act->action, fte->action.action)) {
1578
+ if (check_conflicting_actions(flow_act, &fte->action)) {
14121579 mlx5_core_warn(get_dev(&fte->node),
14131580 "Found two FTEs with conflicting actions\n");
14141581 return -EEXIST;
14151582 }
14161583
1417
- if (flow_act->has_flow_tag &&
1418
- fte->action.flow_tag != flow_act->flow_tag) {
1584
+ if ((flow_context->flags & FLOW_CONTEXT_HAS_TAG) &&
1585
+ fte->flow_context.flow_tag != flow_context->flow_tag) {
14191586 mlx5_core_warn(get_dev(&fte->node),
14201587 "FTE flow tag %u already exists with different flow tag %u\n",
1421
- fte->action.flow_tag,
1422
- flow_act->flow_tag);
1588
+ fte->flow_context.flow_tag,
1589
+ flow_context->flow_tag);
14231590 return -EEXIST;
14241591 }
14251592
....@@ -1427,7 +1594,7 @@
14271594 }
14281595
14291596 static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
1430
- u32 *match_value,
1597
+ const struct mlx5_flow_spec *spec,
14311598 struct mlx5_flow_act *flow_act,
14321599 struct mlx5_flow_destination *dest,
14331600 int dest_num,
....@@ -1438,7 +1605,7 @@
14381605 int i;
14391606 int ret;
14401607
1441
- ret = check_conflicting_ftes(fte, flow_act);
1608
+ ret = check_conflicting_ftes(fte, &spec->flow_context, flow_act);
14421609 if (ret)
14431610 return ERR_PTR(ret);
14441611
....@@ -1461,46 +1628,39 @@
14611628 return handle;
14621629 }
14631630
1464
-struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_handle *handle)
1631
+static bool counter_is_valid(u32 action)
14651632 {
1466
- struct mlx5_flow_rule *dst;
1467
- struct fs_fte *fte;
1468
-
1469
- fs_get_obj(fte, handle->rule[0]->node.parent);
1470
-
1471
- fs_for_each_dst(dst, fte) {
1472
- if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
1473
- return dst->dest_attr.counter;
1474
- }
1475
-
1476
- return NULL;
1477
-}
1478
-
1479
-static bool counter_is_valid(struct mlx5_fc *counter, u32 action)
1480
-{
1481
- if (!(action & MLX5_FLOW_CONTEXT_ACTION_COUNT))
1482
- return !counter;
1483
-
1484
- if (!counter)
1485
- return false;
1486
-
14871633 return (action & (MLX5_FLOW_CONTEXT_ACTION_DROP |
1634
+ MLX5_FLOW_CONTEXT_ACTION_ALLOW |
14881635 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST));
14891636 }
14901637
14911638 static bool dest_is_valid(struct mlx5_flow_destination *dest,
1492
- u32 action,
1639
+ struct mlx5_flow_act *flow_act,
14931640 struct mlx5_flow_table *ft)
14941641 {
1642
+ bool ignore_level = flow_act->flags & FLOW_ACT_IGNORE_FLOW_LEVEL;
1643
+ u32 action = flow_act->action;
1644
+
14951645 if (dest && (dest->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER))
1496
- return counter_is_valid(dest->counter, action);
1646
+ return counter_is_valid(action);
14971647
14981648 if (!(action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
14991649 return true;
15001650
1651
+ if (ignore_level) {
1652
+ if (ft->type != FS_FT_FDB &&
1653
+ ft->type != FS_FT_NIC_RX)
1654
+ return false;
1655
+
1656
+ if (dest->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
1657
+ ft->type != dest->ft->type)
1658
+ return false;
1659
+ }
1660
+
15011661 if (!dest || ((dest->type ==
15021662 MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) &&
1503
- (dest->ft->level <= ft->level)))
1663
+ (dest->ft->level <= ft->level && !ignore_level)))
15041664 return false;
15051665 return true;
15061666 }
....@@ -1510,30 +1670,22 @@
15101670 struct mlx5_flow_group *g;
15111671 };
15121672
1513
-struct match_list_head {
1514
- struct list_head list;
1515
- struct match_list first;
1516
-};
1517
-
1518
-static void free_match_list(struct match_list_head *head)
1673
+static void free_match_list(struct match_list *head, bool ft_locked)
15191674 {
1520
- if (!list_empty(&head->list)) {
1521
- struct match_list *iter, *match_tmp;
1675
+ struct match_list *iter, *match_tmp;
15221676
1523
- list_del(&head->first.list);
1524
- tree_put_node(&head->first.g->node);
1525
- list_for_each_entry_safe(iter, match_tmp, &head->list,
1526
- list) {
1527
- tree_put_node(&iter->g->node);
1528
- list_del(&iter->list);
1529
- kfree(iter);
1530
- }
1677
+ list_for_each_entry_safe(iter, match_tmp, &head->list,
1678
+ list) {
1679
+ tree_put_node(&iter->g->node, ft_locked);
1680
+ list_del(&iter->list);
1681
+ kfree(iter);
15311682 }
15321683 }
15331684
1534
-static int build_match_list(struct match_list_head *match_head,
1685
+static int build_match_list(struct match_list *match_head,
15351686 struct mlx5_flow_table *ft,
1536
- struct mlx5_flow_spec *spec)
1687
+ const struct mlx5_flow_spec *spec,
1688
+ bool ft_locked)
15371689 {
15381690 struct rhlist_head *tmp, *list;
15391691 struct mlx5_flow_group *g;
....@@ -1547,24 +1699,14 @@
15471699 rhl_for_each_entry_rcu(g, tmp, list, hash) {
15481700 struct match_list *curr_match;
15491701
1550
- if (likely(list_empty(&match_head->list))) {
1551
- if (!tree_get_node(&g->node))
1552
- continue;
1553
- match_head->first.g = g;
1554
- list_add_tail(&match_head->first.list,
1555
- &match_head->list);
1702
+ if (unlikely(!tree_get_node(&g->node)))
15561703 continue;
1557
- }
15581704
15591705 curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC);
15601706 if (!curr_match) {
15611707 rcu_read_unlock();
1562
- free_match_list(match_head);
1708
+ free_match_list(match_head, ft_locked);
15631709 return -ENOMEM;
1564
- }
1565
- if (!tree_get_node(&g->node)) {
1566
- kfree(curr_match);
1567
- continue;
15681710 }
15691711 curr_match->g = g;
15701712 list_add_tail(&curr_match->list, &match_head->list);
....@@ -1585,7 +1727,7 @@
15851727
15861728 static struct fs_fte *
15871729 lookup_fte_locked(struct mlx5_flow_group *g,
1588
- u32 *match_value,
1730
+ const u32 *match_value,
15891731 bool take_write)
15901732 {
15911733 struct fs_fte *fte_tmp;
....@@ -1601,7 +1743,7 @@
16011743 goto out;
16021744 }
16031745 if (!fte_tmp->node.active) {
1604
- tree_put_node(&fte_tmp->node);
1746
+ tree_put_node(&fte_tmp->node, false);
16051747 fte_tmp = NULL;
16061748 goto out;
16071749 }
....@@ -1609,7 +1751,7 @@
16091751 nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
16101752 out:
16111753 if (take_write)
1612
- up_write_ref_node(&g->node);
1754
+ up_write_ref_node(&g->node, false);
16131755 else
16141756 up_read_ref_node(&g->node);
16151757 return fte_tmp;
....@@ -1618,7 +1760,7 @@
16181760 static struct mlx5_flow_handle *
16191761 try_add_to_existing_fg(struct mlx5_flow_table *ft,
16201762 struct list_head *match_head,
1621
- struct mlx5_flow_spec *spec,
1763
+ const struct mlx5_flow_spec *spec,
16221764 struct mlx5_flow_act *flow_act,
16231765 struct mlx5_flow_destination *dest,
16241766 int dest_num,
....@@ -1630,16 +1772,20 @@
16301772 struct match_list *iter;
16311773 bool take_write = false;
16321774 struct fs_fte *fte;
1633
- u64 version;
1775
+ u64 version = 0;
16341776 int err;
16351777
1636
- fte = alloc_fte(ft, spec->match_value, flow_act);
1778
+ fte = alloc_fte(ft, spec, flow_act);
16371779 if (IS_ERR(fte))
16381780 return ERR_PTR(-ENOMEM);
16391781
16401782 search_again_locked:
1783
+ if (flow_act->flags & FLOW_ACT_NO_APPEND)
1784
+ goto skip_search;
16411785 version = matched_fgs_get_version(match_head);
1642
- /* Try to find a fg that already contains a matching fte */
1786
+ /* Try to find an fte with identical match value and attempt update its
1787
+ * action.
1788
+ */
16431789 list_for_each_entry(iter, match_head, list) {
16441790 struct fs_fte *fte_tmp;
16451791
....@@ -1647,13 +1793,18 @@
16471793 fte_tmp = lookup_fte_locked(g, spec->match_value, take_write);
16481794 if (!fte_tmp)
16491795 continue;
1650
- rule = add_rule_fg(g, spec->match_value,
1651
- flow_act, dest, dest_num, fte_tmp);
1652
- up_write_ref_node(&fte_tmp->node);
1653
- tree_put_node(&fte_tmp->node);
1796
+ rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte_tmp);
1797
+ /* No error check needed here, because insert_fte() is not called */
1798
+ up_write_ref_node(&fte_tmp->node, false);
1799
+ tree_put_node(&fte_tmp->node, false);
16541800 kmem_cache_free(steering->ftes_cache, fte);
16551801 return rule;
16561802 }
1803
+
1804
+skip_search:
1805
+ /* No group with matching fte found, or we skipped the search.
1806
+ * Try to add a new fte to any matching fg.
1807
+ */
16571808
16581809 /* Check the ft version, for case that new flow group
16591810 * was added while the fgs weren't locked
....@@ -1663,10 +1814,12 @@
16631814 goto out;
16641815 }
16651816
1666
- /* Check the fgs version, for case the new FTE with the
1667
- * same values was added while the fgs weren't locked
1817
+ /* Check the fgs version. If version have changed it could be that an
1818
+ * FTE with the same match value was added while the fgs weren't
1819
+ * locked.
16681820 */
1669
- if (version != matched_fgs_get_version(match_head)) {
1821
+ if (!(flow_act->flags & FLOW_ACT_NO_APPEND) &&
1822
+ version != matched_fgs_get_version(match_head)) {
16701823 take_write = true;
16711824 goto search_again_locked;
16721825 }
....@@ -1674,14 +1827,16 @@
16741827 list_for_each_entry(iter, match_head, list) {
16751828 g = iter->g;
16761829
1677
- if (!g->node.active)
1678
- continue;
1679
-
16801830 nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1831
+
1832
+ if (!g->node.active) {
1833
+ up_write_ref_node(&g->node, false);
1834
+ continue;
1835
+ }
16811836
16821837 err = insert_fte(g, fte);
16831838 if (err) {
1684
- up_write_ref_node(&g->node);
1839
+ up_write_ref_node(&g->node, false);
16851840 if (err == -ENOSPC)
16861841 continue;
16871842 kmem_cache_free(steering->ftes_cache, fte);
....@@ -1689,11 +1844,11 @@
16891844 }
16901845
16911846 nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
1692
- up_write_ref_node(&g->node);
1693
- rule = add_rule_fg(g, spec->match_value,
1694
- flow_act, dest, dest_num, fte);
1695
- up_write_ref_node(&fte->node);
1696
- tree_put_node(&fte->node);
1847
+ up_write_ref_node(&g->node, false);
1848
+ rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
1849
+ up_write_ref_node(&fte->node, false);
1850
+ if (IS_ERR(rule))
1851
+ tree_put_node(&fte->node, false);
16971852 return rule;
16981853 }
16991854 rule = ERR_PTR(-ENOENT);
....@@ -1704,16 +1859,16 @@
17041859
17051860 static struct mlx5_flow_handle *
17061861 _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
1707
- struct mlx5_flow_spec *spec,
1862
+ const struct mlx5_flow_spec *spec,
17081863 struct mlx5_flow_act *flow_act,
17091864 struct mlx5_flow_destination *dest,
17101865 int dest_num)
17111866
17121867 {
17131868 struct mlx5_flow_steering *steering = get_steering(&ft->node);
1714
- struct mlx5_flow_group *g;
17151869 struct mlx5_flow_handle *rule;
1716
- struct match_list_head match_head;
1870
+ struct match_list match_head;
1871
+ struct mlx5_flow_group *g;
17171872 bool take_write = false;
17181873 struct fs_fte *fte;
17191874 int version;
....@@ -1724,7 +1879,7 @@
17241879 return ERR_PTR(-EINVAL);
17251880
17261881 for (i = 0; i < dest_num; i++) {
1727
- if (!dest_is_valid(&dest[i], flow_act->action, ft))
1882
+ if (!dest_is_valid(&dest[i], flow_act, ft))
17281883 return ERR_PTR(-EINVAL);
17291884 }
17301885 nested_down_read_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
....@@ -1732,10 +1887,10 @@
17321887 version = atomic_read(&ft->node.version);
17331888
17341889 /* Collect all fgs which has a matching match_criteria */
1735
- err = build_match_list(&match_head, ft, spec);
1890
+ err = build_match_list(&match_head, ft, spec, take_write);
17361891 if (err) {
17371892 if (take_write)
1738
- up_write_ref_node(&ft->node);
1893
+ up_write_ref_node(&ft->node, false);
17391894 else
17401895 up_read_ref_node(&ft->node);
17411896 return ERR_PTR(err);
....@@ -1746,11 +1901,11 @@
17461901
17471902 rule = try_add_to_existing_fg(ft, &match_head.list, spec, flow_act, dest,
17481903 dest_num, version);
1749
- free_match_list(&match_head);
1904
+ free_match_list(&match_head, take_write);
17501905 if (!IS_ERR(rule) ||
17511906 (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) {
17521907 if (take_write)
1753
- up_write_ref_node(&ft->node);
1908
+ up_write_ref_node(&ft->node, false);
17541909 return rule;
17551910 }
17561911
....@@ -1766,41 +1921,42 @@
17661921 g = alloc_auto_flow_group(ft, spec);
17671922 if (IS_ERR(g)) {
17681923 rule = ERR_CAST(g);
1769
- up_write_ref_node(&ft->node);
1924
+ up_write_ref_node(&ft->node, false);
17701925 return rule;
17711926 }
17721927
1928
+ fte = alloc_fte(ft, spec, flow_act);
1929
+ if (IS_ERR(fte)) {
1930
+ up_write_ref_node(&ft->node, false);
1931
+ err = PTR_ERR(fte);
1932
+ goto err_alloc_fte;
1933
+ }
1934
+
17731935 nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1774
- up_write_ref_node(&ft->node);
1936
+ up_write_ref_node(&ft->node, false);
17751937
17761938 err = create_auto_flow_group(ft, g);
17771939 if (err)
17781940 goto err_release_fg;
17791941
1780
- fte = alloc_fte(ft, spec->match_value, flow_act);
1781
- if (IS_ERR(fte)) {
1782
- err = PTR_ERR(fte);
1783
- goto err_release_fg;
1784
- }
1785
-
17861942 err = insert_fte(g, fte);
1787
- if (err) {
1788
- kmem_cache_free(steering->ftes_cache, fte);
1943
+ if (err)
17891944 goto err_release_fg;
1790
- }
17911945
17921946 nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
1793
- up_write_ref_node(&g->node);
1794
- rule = add_rule_fg(g, spec->match_value, flow_act, dest,
1795
- dest_num, fte);
1796
- up_write_ref_node(&fte->node);
1797
- tree_put_node(&fte->node);
1798
- tree_put_node(&g->node);
1947
+ up_write_ref_node(&g->node, false);
1948
+ rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
1949
+ up_write_ref_node(&fte->node, false);
1950
+ if (IS_ERR(rule))
1951
+ tree_put_node(&fte->node, false);
1952
+ tree_put_node(&g->node, false);
17991953 return rule;
18001954
18011955 err_release_fg:
1802
- up_write_ref_node(&g->node);
1803
- tree_put_node(&g->node);
1956
+ up_write_ref_node(&g->node, false);
1957
+ kmem_cache_free(steering->ftes_cache, fte);
1958
+err_alloc_fte:
1959
+ tree_put_node(&g->node, false);
18041960 return ERR_PTR(err);
18051961 }
18061962
....@@ -1812,61 +1968,104 @@
18121968
18131969 struct mlx5_flow_handle *
18141970 mlx5_add_flow_rules(struct mlx5_flow_table *ft,
1815
- struct mlx5_flow_spec *spec,
1971
+ const struct mlx5_flow_spec *spec,
18161972 struct mlx5_flow_act *flow_act,
18171973 struct mlx5_flow_destination *dest,
18181974 int num_dest)
18191975 {
18201976 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1821
- struct mlx5_flow_destination gen_dest = {};
1977
+ static const struct mlx5_flow_spec zero_spec = {};
1978
+ struct mlx5_flow_destination *gen_dest = NULL;
18221979 struct mlx5_flow_table *next_ft = NULL;
18231980 struct mlx5_flow_handle *handle = NULL;
18241981 u32 sw_action = flow_act->action;
1825
- struct fs_prio *prio;
1982
+ int i;
18261983
1827
- fs_get_obj(prio, ft->node.parent);
1828
- if (flow_act->action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
1829
- if (!fwd_next_prio_supported(ft))
1830
- return ERR_PTR(-EOPNOTSUPP);
1831
- if (num_dest)
1832
- return ERR_PTR(-EINVAL);
1833
- mutex_lock(&root->chain_lock);
1834
- next_ft = find_next_chained_ft(prio);
1835
- if (next_ft) {
1836
- gen_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1837
- gen_dest.ft = next_ft;
1838
- dest = &gen_dest;
1839
- num_dest = 1;
1840
- flow_act->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1841
- } else {
1842
- mutex_unlock(&root->chain_lock);
1843
- return ERR_PTR(-EOPNOTSUPP);
1844
- }
1984
+ if (!spec)
1985
+ spec = &zero_spec;
1986
+
1987
+ if (!is_fwd_next_action(sw_action))
1988
+ return _mlx5_add_flow_rules(ft, spec, flow_act, dest, num_dest);
1989
+
1990
+ if (!fwd_next_prio_supported(ft))
1991
+ return ERR_PTR(-EOPNOTSUPP);
1992
+
1993
+ mutex_lock(&root->chain_lock);
1994
+ next_ft = find_next_fwd_ft(ft, flow_act);
1995
+ if (!next_ft) {
1996
+ handle = ERR_PTR(-EOPNOTSUPP);
1997
+ goto unlock;
18451998 }
18461999
2000
+ gen_dest = kcalloc(num_dest + 1, sizeof(*dest),
2001
+ GFP_KERNEL);
2002
+ if (!gen_dest) {
2003
+ handle = ERR_PTR(-ENOMEM);
2004
+ goto unlock;
2005
+ }
2006
+ for (i = 0; i < num_dest; i++)
2007
+ gen_dest[i] = dest[i];
2008
+ gen_dest[i].type =
2009
+ MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
2010
+ gen_dest[i].ft = next_ft;
2011
+ dest = gen_dest;
2012
+ num_dest++;
2013
+ flow_act->action &= ~(MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO |
2014
+ MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS);
2015
+ flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
18472016 handle = _mlx5_add_flow_rules(ft, spec, flow_act, dest, num_dest);
2017
+ if (IS_ERR(handle))
2018
+ goto unlock;
18482019
1849
- if (sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
1850
- if (!IS_ERR_OR_NULL(handle) &&
1851
- (list_empty(&handle->rule[0]->next_ft))) {
1852
- mutex_lock(&next_ft->lock);
1853
- list_add(&handle->rule[0]->next_ft,
1854
- &next_ft->fwd_rules);
1855
- mutex_unlock(&next_ft->lock);
1856
- handle->rule[0]->sw_action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
1857
- }
1858
- mutex_unlock(&root->chain_lock);
2020
+ if (list_empty(&handle->rule[num_dest - 1]->next_ft)) {
2021
+ mutex_lock(&next_ft->lock);
2022
+ list_add(&handle->rule[num_dest - 1]->next_ft,
2023
+ &next_ft->fwd_rules);
2024
+ mutex_unlock(&next_ft->lock);
2025
+ handle->rule[num_dest - 1]->sw_action = sw_action;
2026
+ handle->rule[num_dest - 1]->ft = ft;
18592027 }
2028
+unlock:
2029
+ mutex_unlock(&root->chain_lock);
2030
+ kfree(gen_dest);
18602031 return handle;
18612032 }
18622033 EXPORT_SYMBOL(mlx5_add_flow_rules);
18632034
18642035 void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
18652036 {
2037
+ struct fs_fte *fte;
18662038 int i;
18672039
2040
+ /* In order to consolidate the HW changes we lock the FTE for other
2041
+ * changes, and increase its refcount, in order not to perform the
2042
+ * "del" functions of the FTE. Will handle them here.
2043
+ * The removal of the rules is done under locked FTE.
2044
+ * After removing all the handle's rules, if there are remaining
2045
+ * rules, it means we just need to modify the FTE in FW, and
2046
+ * unlock/decrease the refcount we increased before.
2047
+ * Otherwise, it means the FTE should be deleted. First delete the
2048
+ * FTE in FW. Then, unlock the FTE, and proceed the tree_put_node of
2049
+ * the FTE, which will handle the last decrease of the refcount, as
2050
+ * well as required handling of its parent.
2051
+ */
2052
+ fs_get_obj(fte, handle->rule[0]->node.parent);
2053
+ down_write_ref_node(&fte->node, false);
18682054 for (i = handle->num_rules - 1; i >= 0; i--)
1869
- tree_remove_node(&handle->rule[i]->node);
2055
+ tree_remove_node(&handle->rule[i]->node, true);
2056
+ if (list_empty(&fte->node.children)) {
2057
+ del_hw_fte(&fte->node);
2058
+ /* Avoid double call to del_hw_fte */
2059
+ fte->node.del_hw_func = NULL;
2060
+ up_write_ref_node(&fte->node, false);
2061
+ tree_put_node(&fte->node, false);
2062
+ } else if (fte->dests_size) {
2063
+ if (fte->modify_mask)
2064
+ modify_fte(fte);
2065
+ up_write_ref_node(&fte->node, false);
2066
+ } else {
2067
+ up_write_ref_node(&fte->node, false);
2068
+ }
18702069 kfree(handle);
18712070 }
18722071 EXPORT_SYMBOL(mlx5_del_flow_rules);
....@@ -1903,12 +2102,12 @@
19032102 if (list_empty(&root->underlay_qpns)) {
19042103 /* Don't set any QPN (zero) in case QPN list is empty */
19052104 qpn = 0;
1906
- err = root->cmds->update_root_ft(root->dev, new_root_ft,
2105
+ err = root->cmds->update_root_ft(root, new_root_ft,
19072106 qpn, false);
19082107 } else {
19092108 list_for_each_entry(uqp, &root->underlay_qpns, list) {
19102109 qpn = uqp->qpn;
1911
- err = root->cmds->update_root_ft(root->dev,
2110
+ err = root->cmds->update_root_ft(root,
19122111 new_root_ft, qpn,
19132112 false);
19142113 if (err)
....@@ -1964,12 +2163,13 @@
19642163 int err = 0;
19652164
19662165 mutex_lock(&root->chain_lock);
1967
- err = disconnect_flow_table(ft);
2166
+ if (!(ft->flags & MLX5_FLOW_TABLE_UNMANAGED))
2167
+ err = disconnect_flow_table(ft);
19682168 if (err) {
19692169 mutex_unlock(&root->chain_lock);
19702170 return err;
19712171 }
1972
- if (tree_remove_node(&ft->node))
2172
+ if (tree_remove_node(&ft->node, false))
19732173 mlx5_core_warn(get_dev(&ft->node), "Flow table %d wasn't destroyed, refcount > 1\n",
19742174 ft->id);
19752175 mutex_unlock(&root->chain_lock);
....@@ -1980,17 +2180,29 @@
19802180
19812181 void mlx5_destroy_flow_group(struct mlx5_flow_group *fg)
19822182 {
1983
- if (tree_remove_node(&fg->node))
2183
+ if (tree_remove_node(&fg->node, false))
19842184 mlx5_core_warn(get_dev(&fg->node), "Flow group %d wasn't destroyed, refcount > 1\n",
19852185 fg->id);
19862186 }
2187
+
2188
+struct mlx5_flow_namespace *mlx5_get_fdb_sub_ns(struct mlx5_core_dev *dev,
2189
+ int n)
2190
+{
2191
+ struct mlx5_flow_steering *steering = dev->priv.steering;
2192
+
2193
+ if (!steering || !steering->fdb_sub_ns)
2194
+ return NULL;
2195
+
2196
+ return steering->fdb_sub_ns[n];
2197
+}
2198
+EXPORT_SYMBOL(mlx5_get_fdb_sub_ns);
19872199
19882200 struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
19892201 enum mlx5_flow_namespace_type type)
19902202 {
19912203 struct mlx5_flow_steering *steering = dev->priv.steering;
19922204 struct mlx5_flow_root_namespace *root_ns;
1993
- int prio;
2205
+ int prio = 0;
19942206 struct fs_prio *fs_prio;
19952207 struct mlx5_flow_namespace *ns;
19962208
....@@ -1998,40 +2210,39 @@
19982210 return NULL;
19992211
20002212 switch (type) {
2001
- case MLX5_FLOW_NAMESPACE_BYPASS:
2002
- case MLX5_FLOW_NAMESPACE_LAG:
2003
- case MLX5_FLOW_NAMESPACE_OFFLOADS:
2004
- case MLX5_FLOW_NAMESPACE_ETHTOOL:
2005
- case MLX5_FLOW_NAMESPACE_KERNEL:
2006
- case MLX5_FLOW_NAMESPACE_LEFTOVERS:
2007
- case MLX5_FLOW_NAMESPACE_ANCHOR:
2008
- prio = type;
2009
- break;
20102213 case MLX5_FLOW_NAMESPACE_FDB:
20112214 if (steering->fdb_root_ns)
20122215 return &steering->fdb_root_ns->ns;
2013
- else
2014
- return NULL;
2216
+ return NULL;
20152217 case MLX5_FLOW_NAMESPACE_SNIFFER_RX:
20162218 if (steering->sniffer_rx_root_ns)
20172219 return &steering->sniffer_rx_root_ns->ns;
2018
- else
2019
- return NULL;
2220
+ return NULL;
20202221 case MLX5_FLOW_NAMESPACE_SNIFFER_TX:
20212222 if (steering->sniffer_tx_root_ns)
20222223 return &steering->sniffer_tx_root_ns->ns;
2023
- else
2024
- return NULL;
2025
- case MLX5_FLOW_NAMESPACE_EGRESS:
2026
- if (steering->egress_root_ns)
2027
- return &steering->egress_root_ns->ns;
2028
- else
2029
- return NULL;
2030
- default:
20312224 return NULL;
2225
+ default:
2226
+ break;
20322227 }
20332228
2034
- root_ns = steering->root_ns;
2229
+ if (type == MLX5_FLOW_NAMESPACE_EGRESS ||
2230
+ type == MLX5_FLOW_NAMESPACE_EGRESS_KERNEL) {
2231
+ root_ns = steering->egress_root_ns;
2232
+ prio = type - MLX5_FLOW_NAMESPACE_EGRESS;
2233
+ } else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX) {
2234
+ root_ns = steering->rdma_rx_root_ns;
2235
+ prio = RDMA_RX_BYPASS_PRIO;
2236
+ } else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL) {
2237
+ root_ns = steering->rdma_rx_root_ns;
2238
+ prio = RDMA_RX_KERNEL_PRIO;
2239
+ } else if (type == MLX5_FLOW_NAMESPACE_RDMA_TX) {
2240
+ root_ns = steering->rdma_tx_root_ns;
2241
+ } else { /* Must be NIC RX */
2242
+ root_ns = steering->root_ns;
2243
+ prio = type;
2244
+ }
2245
+
20352246 if (!root_ns)
20362247 return NULL;
20372248
....@@ -2053,7 +2264,7 @@
20532264 {
20542265 struct mlx5_flow_steering *steering = dev->priv.steering;
20552266
2056
- if (!steering || vport >= MLX5_TOTAL_VPORTS(dev))
2267
+ if (!steering || vport >= mlx5_eswitch_get_total_vports(dev))
20572268 return NULL;
20582269
20592270 switch (type) {
....@@ -2074,8 +2285,10 @@
20742285 }
20752286 }
20762287
2077
-static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
2078
- unsigned int prio, int num_levels)
2288
+static struct fs_prio *_fs_create_prio(struct mlx5_flow_namespace *ns,
2289
+ unsigned int prio,
2290
+ int num_levels,
2291
+ enum fs_node_type type)
20792292 {
20802293 struct fs_prio *fs_prio;
20812294
....@@ -2083,7 +2296,7 @@
20832296 if (!fs_prio)
20842297 return ERR_PTR(-ENOMEM);
20852298
2086
- fs_prio->node.type = FS_TYPE_PRIO;
2299
+ fs_prio->node.type = type;
20872300 tree_init_node(&fs_prio->node, NULL, del_sw_prio);
20882301 tree_add_node(&fs_prio->node, &ns->node);
20892302 fs_prio->num_levels = num_levels;
....@@ -2091,6 +2304,19 @@
20912304 list_add_tail(&fs_prio->node.list, &ns->node.children);
20922305
20932306 return fs_prio;
2307
+}
2308
+
2309
+static struct fs_prio *fs_create_prio_chained(struct mlx5_flow_namespace *ns,
2310
+ unsigned int prio,
2311
+ int num_levels)
2312
+{
2313
+ return _fs_create_prio(ns, prio, num_levels, FS_TYPE_PRIO_CHAINS);
2314
+}
2315
+
2316
+static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
2317
+ unsigned int prio, int num_levels)
2318
+{
2319
+ return _fs_create_prio(ns, prio, num_levels, FS_TYPE_PRIO);
20942320 }
20952321
20962322 static struct mlx5_flow_namespace *fs_init_namespace(struct mlx5_flow_namespace
....@@ -2101,7 +2327,8 @@
21012327 return ns;
21022328 }
21032329
2104
-static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio)
2330
+static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio,
2331
+ int def_miss_act)
21052332 {
21062333 struct mlx5_flow_namespace *ns;
21072334
....@@ -2110,6 +2337,7 @@
21102337 return ERR_PTR(-ENOMEM);
21112338
21122339 fs_init_namespace(ns);
2340
+ ns->def_miss_action = def_miss_act;
21132341 tree_init_node(&ns->node, NULL, del_sw_ns);
21142342 tree_add_node(&ns->node, &prio->node);
21152343 list_add_tail(&ns->node.list, &prio->node.children);
....@@ -2176,7 +2404,7 @@
21762404 base = &fs_prio->node;
21772405 } else if (init_node->type == FS_TYPE_NAMESPACE) {
21782406 fs_get_obj(fs_prio, fs_parent_node);
2179
- fs_ns = fs_create_namespace(fs_prio);
2407
+ fs_ns = fs_create_namespace(fs_prio, init_node->def_miss_action);
21802408 if (IS_ERR(fs_ns))
21812409 return PTR_ERR(fs_ns);
21822410 base = &fs_ns->node;
....@@ -2217,6 +2445,17 @@
22172445 return 0;
22182446 }
22192447
2448
+static void del_sw_root_ns(struct fs_node *node)
2449
+{
2450
+ struct mlx5_flow_root_namespace *root_ns;
2451
+ struct mlx5_flow_namespace *ns;
2452
+
2453
+ fs_get_obj(ns, node);
2454
+ root_ns = container_of(ns, struct mlx5_flow_root_namespace, ns);
2455
+ mutex_destroy(&root_ns->chain_lock);
2456
+ kfree(node);
2457
+}
2458
+
22202459 static struct mlx5_flow_root_namespace
22212460 *create_root_ns(struct mlx5_flow_steering *steering,
22222461 enum fs_flow_table_type table_type)
....@@ -2225,7 +2464,7 @@
22252464 struct mlx5_flow_root_namespace *root_ns;
22262465 struct mlx5_flow_namespace *ns;
22272466
2228
- if (mlx5_accel_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE &&
2467
+ if (mlx5_fpga_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE &&
22292468 (table_type == FS_FT_NIC_RX || table_type == FS_FT_NIC_TX))
22302469 cmds = mlx5_fs_cmd_get_default_ipsec_fpga_cmds(table_type);
22312470
....@@ -2243,7 +2482,7 @@
22432482 ns = &root_ns->ns;
22442483 fs_init_namespace(ns);
22452484 mutex_init(&root_ns->chain_lock);
2246
- tree_init_node(&ns->node, NULL, NULL);
2485
+ tree_init_node(&ns->node, NULL, del_sw_root_ns);
22472486 tree_add_node(&ns->node, NULL);
22482487
22492488 return root_ns;
....@@ -2269,9 +2508,17 @@
22692508 int acc_level_ns = acc_level;
22702509
22712510 prio->start_level = acc_level;
2272
- fs_for_each_ns(ns, prio)
2511
+ fs_for_each_ns(ns, prio) {
22732512 /* This updates start_level and num_levels of ns's priority descendants */
22742513 acc_level_ns = set_prio_attrs_in_ns(ns, acc_level);
2514
+
2515
+ /* If this a prio with chains, and we can jump from one chain
2516
+ * (namepsace) to another, so we accumulate the levels
2517
+ */
2518
+ if (prio->node.type == FS_TYPE_PRIO_CHAINS)
2519
+ acc_level = acc_level_ns;
2520
+ }
2521
+
22752522 if (!prio->num_levels)
22762523 prio->num_levels = acc_level_ns - prio->start_level;
22772524 WARN_ON(prio->num_levels < acc_level_ns - prio->start_level);
....@@ -2348,8 +2595,8 @@
23482595 tree_get_node(node);
23492596 list_for_each_entry_safe(iter, temp, &node->children, list)
23502597 clean_tree(iter);
2351
- tree_put_node(node);
2352
- tree_remove_node(node);
2598
+ tree_put_node(node, false);
2599
+ tree_remove_node(node, false);
23532600 }
23542601 }
23552602
....@@ -2369,7 +2616,7 @@
23692616 if (!steering->esw_egress_root_ns)
23702617 return;
23712618
2372
- for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++)
2619
+ for (i = 0; i < mlx5_eswitch_get_total_vports(dev); i++)
23732620 cleanup_root_ns(steering->esw_egress_root_ns[i]);
23742621
23752622 kfree(steering->esw_egress_root_ns);
....@@ -2384,7 +2631,7 @@
23842631 if (!steering->esw_ingress_root_ns)
23852632 return;
23862633
2387
- for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++)
2634
+ for (i = 0; i < mlx5_eswitch_get_total_vports(dev); i++)
23882635 cleanup_root_ns(steering->esw_ingress_root_ns[i]);
23892636
23902637 kfree(steering->esw_ingress_root_ns);
....@@ -2399,8 +2646,13 @@
23992646 cleanup_egress_acls_root_ns(dev);
24002647 cleanup_ingress_acls_root_ns(dev);
24012648 cleanup_root_ns(steering->fdb_root_ns);
2649
+ steering->fdb_root_ns = NULL;
2650
+ kfree(steering->fdb_sub_ns);
2651
+ steering->fdb_sub_ns = NULL;
24022652 cleanup_root_ns(steering->sniffer_rx_root_ns);
24032653 cleanup_root_ns(steering->sniffer_tx_root_ns);
2654
+ cleanup_root_ns(steering->rdma_rx_root_ns);
2655
+ cleanup_root_ns(steering->rdma_tx_root_ns);
24042656 cleanup_root_ns(steering->egress_root_ns);
24052657 mlx5_cleanup_fc_stats(dev);
24062658 kmem_cache_destroy(steering->ftes_cache);
....@@ -2418,11 +2670,7 @@
24182670
24192671 /* Create single prio */
24202672 prio = fs_create_prio(&steering->sniffer_tx_root_ns->ns, 0, 1);
2421
- if (IS_ERR(prio)) {
2422
- cleanup_root_ns(steering->sniffer_tx_root_ns);
2423
- return PTR_ERR(prio);
2424
- }
2425
- return 0;
2673
+ return PTR_ERR_OR_ZERO(prio);
24262674 }
24272675
24282676 static int init_sniffer_rx_root_ns(struct mlx5_flow_steering *steering)
....@@ -2435,36 +2683,184 @@
24352683
24362684 /* Create single prio */
24372685 prio = fs_create_prio(&steering->sniffer_rx_root_ns->ns, 0, 1);
2438
- if (IS_ERR(prio)) {
2439
- cleanup_root_ns(steering->sniffer_rx_root_ns);
2440
- return PTR_ERR(prio);
2686
+ return PTR_ERR_OR_ZERO(prio);
2687
+}
2688
+
2689
+static int init_rdma_rx_root_ns(struct mlx5_flow_steering *steering)
2690
+{
2691
+ int err;
2692
+
2693
+ steering->rdma_rx_root_ns = create_root_ns(steering, FS_FT_RDMA_RX);
2694
+ if (!steering->rdma_rx_root_ns)
2695
+ return -ENOMEM;
2696
+
2697
+ err = init_root_tree(steering, &rdma_rx_root_fs,
2698
+ &steering->rdma_rx_root_ns->ns.node);
2699
+ if (err)
2700
+ goto out_err;
2701
+
2702
+ set_prio_attrs(steering->rdma_rx_root_ns);
2703
+
2704
+ return 0;
2705
+
2706
+out_err:
2707
+ cleanup_root_ns(steering->rdma_rx_root_ns);
2708
+ steering->rdma_rx_root_ns = NULL;
2709
+ return err;
2710
+}
2711
+
2712
+static int init_rdma_tx_root_ns(struct mlx5_flow_steering *steering)
2713
+{
2714
+ int err;
2715
+
2716
+ steering->rdma_tx_root_ns = create_root_ns(steering, FS_FT_RDMA_TX);
2717
+ if (!steering->rdma_tx_root_ns)
2718
+ return -ENOMEM;
2719
+
2720
+ err = init_root_tree(steering, &rdma_tx_root_fs,
2721
+ &steering->rdma_tx_root_ns->ns.node);
2722
+ if (err)
2723
+ goto out_err;
2724
+
2725
+ set_prio_attrs(steering->rdma_tx_root_ns);
2726
+
2727
+ return 0;
2728
+
2729
+out_err:
2730
+ cleanup_root_ns(steering->rdma_tx_root_ns);
2731
+ steering->rdma_tx_root_ns = NULL;
2732
+ return err;
2733
+}
2734
+
2735
+/* FT and tc chains are stored in the same array so we can re-use the
2736
+ * mlx5_get_fdb_sub_ns() and tc api for FT chains.
2737
+ * When creating a new ns for each chain store it in the first available slot.
2738
+ * Assume tc chains are created and stored first and only then the FT chain.
2739
+ */
2740
+static void store_fdb_sub_ns_prio_chain(struct mlx5_flow_steering *steering,
2741
+ struct mlx5_flow_namespace *ns)
2742
+{
2743
+ int chain = 0;
2744
+
2745
+ while (steering->fdb_sub_ns[chain])
2746
+ ++chain;
2747
+
2748
+ steering->fdb_sub_ns[chain] = ns;
2749
+}
2750
+
2751
+static int create_fdb_sub_ns_prio_chain(struct mlx5_flow_steering *steering,
2752
+ struct fs_prio *maj_prio)
2753
+{
2754
+ struct mlx5_flow_namespace *ns;
2755
+ struct fs_prio *min_prio;
2756
+ int prio;
2757
+
2758
+ ns = fs_create_namespace(maj_prio, MLX5_FLOW_TABLE_MISS_ACTION_DEF);
2759
+ if (IS_ERR(ns))
2760
+ return PTR_ERR(ns);
2761
+
2762
+ for (prio = 0; prio < FDB_TC_MAX_PRIO; prio++) {
2763
+ min_prio = fs_create_prio(ns, prio, FDB_TC_LEVELS_PER_PRIO);
2764
+ if (IS_ERR(min_prio))
2765
+ return PTR_ERR(min_prio);
24412766 }
2767
+
2768
+ store_fdb_sub_ns_prio_chain(steering, ns);
2769
+
2770
+ return 0;
2771
+}
2772
+
2773
+static int create_fdb_chains(struct mlx5_flow_steering *steering,
2774
+ int fs_prio,
2775
+ int chains)
2776
+{
2777
+ struct fs_prio *maj_prio;
2778
+ int levels;
2779
+ int chain;
2780
+ int err;
2781
+
2782
+ levels = FDB_TC_LEVELS_PER_PRIO * FDB_TC_MAX_PRIO * chains;
2783
+ maj_prio = fs_create_prio_chained(&steering->fdb_root_ns->ns,
2784
+ fs_prio,
2785
+ levels);
2786
+ if (IS_ERR(maj_prio))
2787
+ return PTR_ERR(maj_prio);
2788
+
2789
+ for (chain = 0; chain < chains; chain++) {
2790
+ err = create_fdb_sub_ns_prio_chain(steering, maj_prio);
2791
+ if (err)
2792
+ return err;
2793
+ }
2794
+
2795
+ return 0;
2796
+}
2797
+
2798
+static int create_fdb_fast_path(struct mlx5_flow_steering *steering)
2799
+{
2800
+ int err;
2801
+
2802
+ steering->fdb_sub_ns = kcalloc(FDB_NUM_CHAINS,
2803
+ sizeof(*steering->fdb_sub_ns),
2804
+ GFP_KERNEL);
2805
+ if (!steering->fdb_sub_ns)
2806
+ return -ENOMEM;
2807
+
2808
+ err = create_fdb_chains(steering, FDB_TC_OFFLOAD, FDB_TC_MAX_CHAIN + 1);
2809
+ if (err)
2810
+ return err;
2811
+
2812
+ err = create_fdb_chains(steering, FDB_FT_OFFLOAD, 1);
2813
+ if (err)
2814
+ return err;
2815
+
24422816 return 0;
24432817 }
24442818
24452819 static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
24462820 {
2447
- struct fs_prio *prio;
2821
+ struct fs_prio *maj_prio;
2822
+ int err;
24482823
24492824 steering->fdb_root_ns = create_root_ns(steering, FS_FT_FDB);
24502825 if (!steering->fdb_root_ns)
24512826 return -ENOMEM;
24522827
2453
- prio = fs_create_prio(&steering->fdb_root_ns->ns, 0, 2);
2454
- if (IS_ERR(prio))
2828
+ maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_BYPASS_PATH,
2829
+ 1);
2830
+ if (IS_ERR(maj_prio)) {
2831
+ err = PTR_ERR(maj_prio);
2832
+ goto out_err;
2833
+ }
2834
+ err = create_fdb_fast_path(steering);
2835
+ if (err)
24552836 goto out_err;
24562837
2457
- prio = fs_create_prio(&steering->fdb_root_ns->ns, 1, 1);
2458
- if (IS_ERR(prio))
2838
+ maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_SLOW_PATH, 1);
2839
+ if (IS_ERR(maj_prio)) {
2840
+ err = PTR_ERR(maj_prio);
24592841 goto out_err;
2842
+ }
2843
+
2844
+ /* We put this priority last, knowing that nothing will get here
2845
+ * unless explicitly forwarded to. This is possible because the
2846
+ * slow path tables have catch all rules and nothing gets passed
2847
+ * those tables.
2848
+ */
2849
+ maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_PER_VPORT, 1);
2850
+ if (IS_ERR(maj_prio)) {
2851
+ err = PTR_ERR(maj_prio);
2852
+ goto out_err;
2853
+ }
24602854
24612855 set_prio_attrs(steering->fdb_root_ns);
24622856 return 0;
24632857
24642858 out_err:
24652859 cleanup_root_ns(steering->fdb_root_ns);
2860
+ kfree(steering->fdb_sub_ns);
2861
+ steering->fdb_sub_ns = NULL;
24662862 steering->fdb_root_ns = NULL;
2467
- return PTR_ERR(prio);
2863
+ return err;
24682864 }
24692865
24702866 static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
....@@ -2496,16 +2892,18 @@
24962892 static int init_egress_acls_root_ns(struct mlx5_core_dev *dev)
24972893 {
24982894 struct mlx5_flow_steering *steering = dev->priv.steering;
2895
+ int total_vports = mlx5_eswitch_get_total_vports(dev);
24992896 int err;
25002897 int i;
25012898
2502
- steering->esw_egress_root_ns = kcalloc(MLX5_TOTAL_VPORTS(dev),
2503
- sizeof(*steering->esw_egress_root_ns),
2504
- GFP_KERNEL);
2899
+ steering->esw_egress_root_ns =
2900
+ kcalloc(total_vports,
2901
+ sizeof(*steering->esw_egress_root_ns),
2902
+ GFP_KERNEL);
25052903 if (!steering->esw_egress_root_ns)
25062904 return -ENOMEM;
25072905
2508
- for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++) {
2906
+ for (i = 0; i < total_vports; i++) {
25092907 err = init_egress_acl_root_ns(steering, i);
25102908 if (err)
25112909 goto cleanup_root_ns;
....@@ -2524,16 +2922,18 @@
25242922 static int init_ingress_acls_root_ns(struct mlx5_core_dev *dev)
25252923 {
25262924 struct mlx5_flow_steering *steering = dev->priv.steering;
2925
+ int total_vports = mlx5_eswitch_get_total_vports(dev);
25272926 int err;
25282927 int i;
25292928
2530
- steering->esw_ingress_root_ns = kcalloc(MLX5_TOTAL_VPORTS(dev),
2531
- sizeof(*steering->esw_ingress_root_ns),
2532
- GFP_KERNEL);
2929
+ steering->esw_ingress_root_ns =
2930
+ kcalloc(total_vports,
2931
+ sizeof(*steering->esw_ingress_root_ns),
2932
+ GFP_KERNEL);
25332933 if (!steering->esw_ingress_root_ns)
25342934 return -ENOMEM;
25352935
2536
- for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++) {
2936
+ for (i = 0; i < total_vports; i++) {
25372937 err = init_ingress_acl_root_ns(steering, i);
25382938 if (err)
25392939 goto cleanup_root_ns;
....@@ -2551,16 +2951,23 @@
25512951
25522952 static int init_egress_root_ns(struct mlx5_flow_steering *steering)
25532953 {
2554
- struct fs_prio *prio;
2954
+ int err;
25552955
25562956 steering->egress_root_ns = create_root_ns(steering,
25572957 FS_FT_NIC_TX);
25582958 if (!steering->egress_root_ns)
25592959 return -ENOMEM;
25602960
2561
- /* create 1 prio*/
2562
- prio = fs_create_prio(&steering->egress_root_ns->ns, 0, 1);
2563
- return PTR_ERR_OR_ZERO(prio);
2961
+ err = init_root_tree(steering, &egress_root_fs,
2962
+ &steering->egress_root_ns->ns.node);
2963
+ if (err)
2964
+ goto cleanup;
2965
+ set_prio_attrs(steering->egress_root_ns);
2966
+ return 0;
2967
+cleanup:
2968
+ cleanup_root_ns(steering->egress_root_ns);
2969
+ steering->egress_root_ns = NULL;
2970
+ return err;
25642971 }
25652972
25662973 int mlx5_init_fs(struct mlx5_core_dev *dev)
....@@ -2628,7 +3035,21 @@
26283035 goto err;
26293036 }
26303037
2631
- if (MLX5_IPSEC_DEV(dev)) {
3038
+ if (MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support) &&
3039
+ MLX5_CAP_FLOWTABLE_RDMA_RX(dev, table_miss_action_domain)) {
3040
+ err = init_rdma_rx_root_ns(steering);
3041
+ if (err)
3042
+ goto err;
3043
+ }
3044
+
3045
+ if (MLX5_CAP_FLOWTABLE_RDMA_TX(dev, ft_support)) {
3046
+ err = init_rdma_tx_root_ns(steering);
3047
+ if (err)
3048
+ goto err;
3049
+ }
3050
+
3051
+ if (mlx5_fpga_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE ||
3052
+ MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) {
26323053 err = init_egress_root_ns(steering);
26333054 if (err)
26343055 goto err;
....@@ -2657,7 +3078,7 @@
26573078 goto update_ft_fail;
26583079 }
26593080
2660
- err = root->cmds->update_root_ft(dev, root->root_ft, underlay_qpn,
3081
+ err = root->cmds->update_root_ft(root, root->root_ft, underlay_qpn,
26613082 false);
26623083 if (err) {
26633084 mlx5_core_warn(dev, "Failed adding underlay QPN (%u) to root FT err(%d)\n",
....@@ -2701,7 +3122,7 @@
27013122 goto out;
27023123 }
27033124
2704
- err = root->cmds->update_root_ft(dev, root->root_ft, underlay_qpn,
3125
+ err = root->cmds->update_root_ft(root, root->root_ft, underlay_qpn,
27053126 true);
27063127 if (err)
27073128 mlx5_core_warn(dev, "Failed removing underlay QPN (%u) from root FT err(%d)\n",
....@@ -2718,3 +3139,160 @@
27183139 return err;
27193140 }
27203141 EXPORT_SYMBOL(mlx5_fs_remove_rx_underlay_qpn);
3142
+
3143
+static struct mlx5_flow_root_namespace
3144
+*get_root_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type ns_type)
3145
+{
3146
+ struct mlx5_flow_namespace *ns;
3147
+
3148
+ if (ns_type == MLX5_FLOW_NAMESPACE_ESW_EGRESS ||
3149
+ ns_type == MLX5_FLOW_NAMESPACE_ESW_INGRESS)
3150
+ ns = mlx5_get_flow_vport_acl_namespace(dev, ns_type, 0);
3151
+ else
3152
+ ns = mlx5_get_flow_namespace(dev, ns_type);
3153
+ if (!ns)
3154
+ return NULL;
3155
+
3156
+ return find_root(&ns->node);
3157
+}
3158
+
3159
+struct mlx5_modify_hdr *mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
3160
+ u8 ns_type, u8 num_actions,
3161
+ void *modify_actions)
3162
+{
3163
+ struct mlx5_flow_root_namespace *root;
3164
+ struct mlx5_modify_hdr *modify_hdr;
3165
+ int err;
3166
+
3167
+ root = get_root_namespace(dev, ns_type);
3168
+ if (!root)
3169
+ return ERR_PTR(-EOPNOTSUPP);
3170
+
3171
+ modify_hdr = kzalloc(sizeof(*modify_hdr), GFP_KERNEL);
3172
+ if (!modify_hdr)
3173
+ return ERR_PTR(-ENOMEM);
3174
+
3175
+ modify_hdr->ns_type = ns_type;
3176
+ err = root->cmds->modify_header_alloc(root, ns_type, num_actions,
3177
+ modify_actions, modify_hdr);
3178
+ if (err) {
3179
+ kfree(modify_hdr);
3180
+ return ERR_PTR(err);
3181
+ }
3182
+
3183
+ return modify_hdr;
3184
+}
3185
+EXPORT_SYMBOL(mlx5_modify_header_alloc);
3186
+
3187
+void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev,
3188
+ struct mlx5_modify_hdr *modify_hdr)
3189
+{
3190
+ struct mlx5_flow_root_namespace *root;
3191
+
3192
+ root = get_root_namespace(dev, modify_hdr->ns_type);
3193
+ if (WARN_ON(!root))
3194
+ return;
3195
+ root->cmds->modify_header_dealloc(root, modify_hdr);
3196
+ kfree(modify_hdr);
3197
+}
3198
+EXPORT_SYMBOL(mlx5_modify_header_dealloc);
3199
+
3200
+struct mlx5_pkt_reformat *mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
3201
+ int reformat_type,
3202
+ size_t size,
3203
+ void *reformat_data,
3204
+ enum mlx5_flow_namespace_type ns_type)
3205
+{
3206
+ struct mlx5_pkt_reformat *pkt_reformat;
3207
+ struct mlx5_flow_root_namespace *root;
3208
+ int err;
3209
+
3210
+ root = get_root_namespace(dev, ns_type);
3211
+ if (!root)
3212
+ return ERR_PTR(-EOPNOTSUPP);
3213
+
3214
+ pkt_reformat = kzalloc(sizeof(*pkt_reformat), GFP_KERNEL);
3215
+ if (!pkt_reformat)
3216
+ return ERR_PTR(-ENOMEM);
3217
+
3218
+ pkt_reformat->ns_type = ns_type;
3219
+ pkt_reformat->reformat_type = reformat_type;
3220
+ err = root->cmds->packet_reformat_alloc(root, reformat_type, size,
3221
+ reformat_data, ns_type,
3222
+ pkt_reformat);
3223
+ if (err) {
3224
+ kfree(pkt_reformat);
3225
+ return ERR_PTR(err);
3226
+ }
3227
+
3228
+ return pkt_reformat;
3229
+}
3230
+EXPORT_SYMBOL(mlx5_packet_reformat_alloc);
3231
+
3232
+void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
3233
+ struct mlx5_pkt_reformat *pkt_reformat)
3234
+{
3235
+ struct mlx5_flow_root_namespace *root;
3236
+
3237
+ root = get_root_namespace(dev, pkt_reformat->ns_type);
3238
+ if (WARN_ON(!root))
3239
+ return;
3240
+ root->cmds->packet_reformat_dealloc(root, pkt_reformat);
3241
+ kfree(pkt_reformat);
3242
+}
3243
+EXPORT_SYMBOL(mlx5_packet_reformat_dealloc);
3244
+
3245
+int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
3246
+ struct mlx5_flow_root_namespace *peer_ns)
3247
+{
3248
+ if (peer_ns && ns->mode != peer_ns->mode) {
3249
+ mlx5_core_err(ns->dev,
3250
+ "Can't peer namespace of different steering mode\n");
3251
+ return -EINVAL;
3252
+ }
3253
+
3254
+ return ns->cmds->set_peer(ns, peer_ns);
3255
+}
3256
+
3257
+/* This function should be called only at init stage of the namespace.
3258
+ * It is not safe to call this function while steering operations
3259
+ * are executed in the namespace.
3260
+ */
3261
+int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns,
3262
+ enum mlx5_flow_steering_mode mode)
3263
+{
3264
+ struct mlx5_flow_root_namespace *root;
3265
+ const struct mlx5_flow_cmds *cmds;
3266
+ int err;
3267
+
3268
+ root = find_root(&ns->node);
3269
+ if (&root->ns != ns)
3270
+ /* Can't set cmds to non root namespace */
3271
+ return -EINVAL;
3272
+
3273
+ if (root->table_type != FS_FT_FDB)
3274
+ return -EOPNOTSUPP;
3275
+
3276
+ if (root->mode == mode)
3277
+ return 0;
3278
+
3279
+ if (mode == MLX5_FLOW_STEERING_MODE_SMFS)
3280
+ cmds = mlx5_fs_cmd_get_dr_cmds();
3281
+ else
3282
+ cmds = mlx5_fs_cmd_get_fw_cmds();
3283
+ if (!cmds)
3284
+ return -EOPNOTSUPP;
3285
+
3286
+ err = cmds->create_ns(root);
3287
+ if (err) {
3288
+ mlx5_core_err(root->dev, "Failed to create flow namespace (%d)\n",
3289
+ err);
3290
+ return err;
3291
+ }
3292
+
3293
+ root->cmds->destroy_ns(root);
3294
+ root->cmds = cmds;
3295
+ root->mode = mode;
3296
+
3297
+ return 0;
3298
+}