forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-09 b22da3d8526a935aa31e086e63f60ff3246cb61c
kernel/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
....@@ -35,11 +35,15 @@
3535 #include <linux/mlx5/mlx5_ifc.h>
3636 #include <linux/mlx5/vport.h>
3737 #include <linux/mlx5/fs.h>
38
+#include <linux/mlx5/mpfs.h>
39
+#include "esw/acl/lgcy.h"
3840 #include "mlx5_core.h"
41
+#include "lib/eq.h"
3942 #include "eswitch.h"
4043 #include "fs_core.h"
41
-
42
-#define UPLINK_VPORT 0xFFFF
44
+#include "devlink.h"
45
+#include "ecpf.h"
46
+#include "en/mod_hdr.h"
4347
4448 enum {
4549 MLX5_ACTION_NONE = 0,
....@@ -51,29 +55,62 @@
5155 struct vport_addr {
5256 struct l2addr_node node;
5357 u8 action;
54
- u32 vport;
58
+ u16 vport;
5559 struct mlx5_flow_handle *flow_rule;
5660 bool mpfs; /* UC MAC was added to MPFs */
5761 /* A flag indicating that mac was added due to mc promiscuous vport */
5862 bool mc_promisc;
5963 };
6064
61
-enum {
62
- UC_ADDR_CHANGE = BIT(0),
63
- MC_ADDR_CHANGE = BIT(1),
64
- PROMISC_CHANGE = BIT(3),
65
-};
65
+static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw);
66
+static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw);
6667
67
-/* Vport context events */
68
-#define SRIOV_VPORT_EVENTS (UC_ADDR_CHANGE | \
69
- MC_ADDR_CHANGE | \
70
- PROMISC_CHANGE)
68
+static int mlx5_eswitch_check(const struct mlx5_core_dev *dev)
69
+{
70
+ if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
71
+ return -EOPNOTSUPP;
72
+
73
+ if (!MLX5_ESWITCH_MANAGER(dev))
74
+ return -EOPNOTSUPP;
75
+
76
+ return 0;
77
+}
78
+
79
+struct mlx5_eswitch *mlx5_devlink_eswitch_get(struct devlink *devlink)
80
+{
81
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
82
+ int err;
83
+
84
+ err = mlx5_eswitch_check(dev);
85
+ if (err)
86
+ return ERR_PTR(err);
87
+
88
+ return dev->priv.eswitch;
89
+}
90
+
91
+struct mlx5_vport *__must_check
92
+mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num)
93
+{
94
+ u16 idx;
95
+
96
+ if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager))
97
+ return ERR_PTR(-EPERM);
98
+
99
+ idx = mlx5_eswitch_vport_num_to_index(esw, vport_num);
100
+
101
+ if (idx > esw->total_vports - 1) {
102
+ esw_debug(esw->dev, "vport out of range: num(0x%x), idx(0x%x)\n",
103
+ vport_num, idx);
104
+ return ERR_PTR(-EINVAL);
105
+ }
106
+
107
+ return &esw->vports[idx];
108
+}
71109
72110 static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
73111 u32 events_mask)
74112 {
75
- int in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {0};
76
- int out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
113
+ u32 in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {};
77114 void *nic_vport_ctx;
78115
79116 MLX5_SET(modify_nic_vport_context_in, in,
....@@ -86,36 +123,34 @@
86123
87124 MLX5_SET(nic_vport_context, nic_vport_ctx, arm_change_event, 1);
88125
89
- if (events_mask & UC_ADDR_CHANGE)
126
+ if (events_mask & MLX5_VPORT_UC_ADDR_CHANGE)
90127 MLX5_SET(nic_vport_context, nic_vport_ctx,
91128 event_on_uc_address_change, 1);
92
- if (events_mask & MC_ADDR_CHANGE)
129
+ if (events_mask & MLX5_VPORT_MC_ADDR_CHANGE)
93130 MLX5_SET(nic_vport_context, nic_vport_ctx,
94131 event_on_mc_address_change, 1);
95
- if (events_mask & PROMISC_CHANGE)
132
+ if (events_mask & MLX5_VPORT_PROMISC_CHANGE)
96133 MLX5_SET(nic_vport_context, nic_vport_ctx,
97134 event_on_promisc_change, 1);
98135
99
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
136
+ return mlx5_cmd_exec_in(dev, modify_nic_vport_context, in);
100137 }
101138
102139 /* E-Switch vport context HW commands */
103
-static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
104
- void *in, int inlen)
140
+int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev *dev, u16 vport,
141
+ bool other_vport, void *in)
105142 {
106
- u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)] = {0};
107
-
108143 MLX5_SET(modify_esw_vport_context_in, in, opcode,
109144 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
110145 MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
111
- MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
112
- return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
146
+ MLX5_SET(modify_esw_vport_context_in, in, other_vport, other_vport);
147
+ return mlx5_cmd_exec_in(dev, modify_esw_vport_context, in);
113148 }
114149
115
-static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
150
+static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u16 vport,
116151 u16 vlan, u8 qos, u8 set_flags)
117152 {
118
- u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {0};
153
+ u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
119154
120155 if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
121156 !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
....@@ -144,12 +179,12 @@
144179 MLX5_SET(modify_esw_vport_context_in, in,
145180 field_select.vport_cvlan_insert, 1);
146181
147
- return modify_esw_vport_context_cmd(dev, vport, in, sizeof(in));
182
+ return mlx5_eswitch_modify_esw_vport_context(dev, vport, true, in);
148183 }
149184
150185 /* E-Switch FDB */
151186 static struct mlx5_flow_handle *
152
-__esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
187
+__esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u16 vport, bool rx_rule,
153188 u8 mac_c[ETH_ALEN], u8 mac_v[ETH_ALEN])
154189 {
155190 int match_header = (is_zero_ether_addr(mac_c) ? 0 :
....@@ -185,7 +220,7 @@
185220 misc_parameters);
186221 mc_misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
187222 misc_parameters);
188
- MLX5_SET(fte_match_set_misc, mv_misc, source_port, UPLINK_VPORT);
223
+ MLX5_SET(fte_match_set_misc, mv_misc, source_port, MLX5_VPORT_UPLINK);
189224 MLX5_SET_TO_ONES(fte_match_set_misc, mc_misc, source_port);
190225 }
191226
....@@ -212,7 +247,7 @@
212247 }
213248
214249 static struct mlx5_flow_handle *
215
-esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport)
250
+esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u16 vport)
216251 {
217252 u8 mac_c[ETH_ALEN];
218253
....@@ -221,7 +256,7 @@
221256 }
222257
223258 static struct mlx5_flow_handle *
224
-esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u32 vport)
259
+esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u16 vport)
225260 {
226261 u8 mac_c[ETH_ALEN];
227262 u8 mac_v[ETH_ALEN];
....@@ -234,7 +269,7 @@
234269 }
235270
236271 static struct mlx5_flow_handle *
237
-esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u32 vport)
272
+esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u16 vport)
238273 {
239274 u8 mac_c[ETH_ALEN];
240275 u8 mac_v[ETH_ALEN];
....@@ -242,6 +277,40 @@
242277 eth_zero_addr(mac_c);
243278 eth_zero_addr(mac_v);
244279 return __esw_fdb_set_vport_rule(esw, vport, true, mac_c, mac_v);
280
+}
281
+
282
+enum {
283
+ LEGACY_VEPA_PRIO = 0,
284
+ LEGACY_FDB_PRIO,
285
+};
286
+
287
+static int esw_create_legacy_vepa_table(struct mlx5_eswitch *esw)
288
+{
289
+ struct mlx5_flow_table_attr ft_attr = {};
290
+ struct mlx5_core_dev *dev = esw->dev;
291
+ struct mlx5_flow_namespace *root_ns;
292
+ struct mlx5_flow_table *fdb;
293
+ int err;
294
+
295
+ root_ns = mlx5_get_fdb_sub_ns(dev, 0);
296
+ if (!root_ns) {
297
+ esw_warn(dev, "Failed to get FDB flow namespace\n");
298
+ return -EOPNOTSUPP;
299
+ }
300
+
301
+ /* num FTE 2, num FG 2 */
302
+ ft_attr.prio = LEGACY_VEPA_PRIO;
303
+ ft_attr.max_fte = 2;
304
+ ft_attr.autogroup.max_num_groups = 2;
305
+ fdb = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr);
306
+ if (IS_ERR(fdb)) {
307
+ err = PTR_ERR(fdb);
308
+ esw_warn(dev, "Failed to create VEPA FDB err %d\n", err);
309
+ return err;
310
+ }
311
+ esw->fdb_table.legacy.vepa_fdb = fdb;
312
+
313
+ return 0;
245314 }
246315
247316 static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw)
....@@ -261,7 +330,7 @@
261330 esw_debug(dev, "Create FDB log_max_size(%d)\n",
262331 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
263332
264
- root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
333
+ root_ns = mlx5_get_fdb_sub_ns(dev, 0);
265334 if (!root_ns) {
266335 esw_warn(dev, "Failed to get FDB flow namespace\n");
267336 return -EOPNOTSUPP;
....@@ -272,8 +341,8 @@
272341 return -ENOMEM;
273342
274343 table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
275
-
276344 ft_attr.max_fte = table_size;
345
+ ft_attr.prio = LEGACY_FDB_PRIO;
277346 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
278347 if (IS_ERR(fdb)) {
279348 err = PTR_ERR(fdb);
....@@ -332,39 +401,100 @@
332401 esw->fdb_table.legacy.promisc_grp = g;
333402
334403 out:
335
- if (err) {
336
- if (!IS_ERR_OR_NULL(esw->fdb_table.legacy.allmulti_grp)) {
337
- mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp);
338
- esw->fdb_table.legacy.allmulti_grp = NULL;
339
- }
340
- if (!IS_ERR_OR_NULL(esw->fdb_table.legacy.addr_grp)) {
341
- mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
342
- esw->fdb_table.legacy.addr_grp = NULL;
343
- }
344
- if (!IS_ERR_OR_NULL(esw->fdb_table.legacy.fdb)) {
345
- mlx5_destroy_flow_table(esw->fdb_table.legacy.fdb);
346
- esw->fdb_table.legacy.fdb = NULL;
347
- }
348
- }
404
+ if (err)
405
+ esw_destroy_legacy_fdb_table(esw);
349406
350407 kvfree(flow_group_in);
351408 return err;
352409 }
353410
411
+static void esw_destroy_legacy_vepa_table(struct mlx5_eswitch *esw)
412
+{
413
+ esw_debug(esw->dev, "Destroy VEPA Table\n");
414
+ if (!esw->fdb_table.legacy.vepa_fdb)
415
+ return;
416
+
417
+ mlx5_destroy_flow_table(esw->fdb_table.legacy.vepa_fdb);
418
+ esw->fdb_table.legacy.vepa_fdb = NULL;
419
+}
420
+
354421 static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw)
355422 {
423
+ esw_debug(esw->dev, "Destroy FDB Table\n");
356424 if (!esw->fdb_table.legacy.fdb)
357425 return;
358426
359
- esw_debug(esw->dev, "Destroy FDB Table\n");
360
- mlx5_destroy_flow_group(esw->fdb_table.legacy.promisc_grp);
361
- mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp);
362
- mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
427
+ if (esw->fdb_table.legacy.promisc_grp)
428
+ mlx5_destroy_flow_group(esw->fdb_table.legacy.promisc_grp);
429
+ if (esw->fdb_table.legacy.allmulti_grp)
430
+ mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp);
431
+ if (esw->fdb_table.legacy.addr_grp)
432
+ mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
363433 mlx5_destroy_flow_table(esw->fdb_table.legacy.fdb);
434
+
364435 esw->fdb_table.legacy.fdb = NULL;
365436 esw->fdb_table.legacy.addr_grp = NULL;
366437 esw->fdb_table.legacy.allmulti_grp = NULL;
367438 esw->fdb_table.legacy.promisc_grp = NULL;
439
+}
440
+
441
+static int esw_create_legacy_table(struct mlx5_eswitch *esw)
442
+{
443
+ int err;
444
+
445
+ memset(&esw->fdb_table.legacy, 0, sizeof(struct legacy_fdb));
446
+
447
+ err = esw_create_legacy_vepa_table(esw);
448
+ if (err)
449
+ return err;
450
+
451
+ err = esw_create_legacy_fdb_table(esw);
452
+ if (err)
453
+ esw_destroy_legacy_vepa_table(esw);
454
+
455
+ return err;
456
+}
457
+
458
+static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
459
+{
460
+ esw_cleanup_vepa_rules(esw);
461
+ esw_destroy_legacy_fdb_table(esw);
462
+ esw_destroy_legacy_vepa_table(esw);
463
+}
464
+
465
+#define MLX5_LEGACY_SRIOV_VPORT_EVENTS (MLX5_VPORT_UC_ADDR_CHANGE | \
466
+ MLX5_VPORT_MC_ADDR_CHANGE | \
467
+ MLX5_VPORT_PROMISC_CHANGE)
468
+
469
+static int esw_legacy_enable(struct mlx5_eswitch *esw)
470
+{
471
+ struct mlx5_vport *vport;
472
+ int ret, i;
473
+
474
+ ret = esw_create_legacy_table(esw);
475
+ if (ret)
476
+ return ret;
477
+
478
+ mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
479
+ vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
480
+
481
+ ret = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_LEGACY_SRIOV_VPORT_EVENTS);
482
+ if (ret)
483
+ esw_destroy_legacy_table(esw);
484
+ return ret;
485
+}
486
+
487
+static void esw_legacy_disable(struct mlx5_eswitch *esw)
488
+{
489
+ struct esw_mc_addr *mc_promisc;
490
+
491
+ mlx5_eswitch_disable_pf_vf_vports(esw);
492
+
493
+ mc_promisc = &esw->mc_promisc;
494
+ if (mc_promisc->uplink_rule)
495
+ mlx5_del_flow_rules(mc_promisc->uplink_rule);
496
+
497
+ esw_destroy_legacy_table(esw);
368498 }
369499
370500 /* E-Switch vport UC/MC lists management */
....@@ -374,19 +504,19 @@
374504 static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
375505 {
376506 u8 *mac = vaddr->node.addr;
377
- u32 vport = vaddr->vport;
507
+ u16 vport = vaddr->vport;
378508 int err;
379509
380
- /* Skip mlx5_mpfs_add_mac for PFs,
381
- * it is already done by the PF netdev in mlx5e_execute_l2_action
510
+ /* Skip mlx5_mpfs_add_mac for eswitch_managers,
511
+ * it is already done by its netdev in mlx5e_execute_l2_action
382512 */
383
- if (!vport)
513
+ if (mlx5_esw_is_manager_vport(esw, vport))
384514 goto fdb_add;
385515
386516 err = mlx5_mpfs_add_mac(esw->dev, mac);
387517 if (err) {
388518 esw_warn(esw->dev,
389
- "Failed to add L2 table mac(%pM) for vport(%d), err(%d)\n",
519
+ "Failed to add L2 table mac(%pM) for vport(0x%x), err(%d)\n",
390520 mac, vport, err);
391521 return err;
392522 }
....@@ -394,7 +524,7 @@
394524
395525 fdb_add:
396526 /* SRIOV is enabled: Forward UC MAC to vport */
397
- if (esw->fdb_table.legacy.fdb && esw->mode == SRIOV_LEGACY)
527
+ if (esw->fdb_table.legacy.fdb && esw->mode == MLX5_ESWITCH_LEGACY)
398528 vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
399529
400530 esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM fr(%p)\n",
....@@ -406,13 +536,13 @@
406536 static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
407537 {
408538 u8 *mac = vaddr->node.addr;
409
- u32 vport = vaddr->vport;
539
+ u16 vport = vaddr->vport;
410540 int err = 0;
411541
412
- /* Skip mlx5_mpfs_del_mac for PFs,
413
- * it is already done by the PF netdev in mlx5e_execute_l2_action
542
+ /* Skip mlx5_mpfs_del_mac for eswitch managers,
543
+ * it is already done by its netdev in mlx5e_execute_l2_action
414544 */
415
- if (!vport || !vaddr->mpfs)
545
+ if (!vaddr->mpfs || mlx5_esw_is_manager_vport(esw, vport))
416546 goto fdb_del;
417547
418548 err = mlx5_mpfs_del_mac(esw->dev, mac);
....@@ -435,17 +565,18 @@
435565 struct esw_mc_addr *esw_mc)
436566 {
437567 u8 *mac = vaddr->node.addr;
438
- u32 vport_idx = 0;
568
+ struct mlx5_vport *vport;
569
+ u16 i, vport_num;
439570
440
- for (vport_idx = 0; vport_idx < esw->total_vports; vport_idx++) {
441
- struct mlx5_vport *vport = &esw->vports[vport_idx];
571
+ mlx5_esw_for_all_vports(esw, i, vport) {
442572 struct hlist_head *vport_hash = vport->mc_list;
443573 struct vport_addr *iter_vaddr =
444574 l2addr_hash_find(vport_hash,
445575 mac,
446576 struct vport_addr);
577
+ vport_num = vport->vport;
447578 if (IS_ERR_OR_NULL(vport->allmulti_rule) ||
448
- vaddr->vport == vport_idx)
579
+ vaddr->vport == vport_num)
449580 continue;
450581 switch (vaddr->action) {
451582 case MLX5_ACTION_ADD:
....@@ -457,14 +588,14 @@
457588 if (!iter_vaddr) {
458589 esw_warn(esw->dev,
459590 "ALL-MULTI: Failed to add MAC(%pM) to vport[%d] DB\n",
460
- mac, vport_idx);
591
+ mac, vport_num);
461592 continue;
462593 }
463
- iter_vaddr->vport = vport_idx;
594
+ iter_vaddr->vport = vport_num;
464595 iter_vaddr->flow_rule =
465596 esw_fdb_set_vport_rule(esw,
466597 mac,
467
- vport_idx);
598
+ vport_num);
468599 iter_vaddr->mc_promisc = true;
469600 break;
470601 case MLX5_ACTION_DEL:
....@@ -482,7 +613,7 @@
482613 struct hlist_head *hash = esw->mc_table;
483614 struct esw_mc_addr *esw_mc;
484615 u8 *mac = vaddr->node.addr;
485
- u32 vport = vaddr->vport;
616
+ u16 vport = vaddr->vport;
486617
487618 if (!esw->fdb_table.legacy.fdb)
488619 return 0;
....@@ -496,7 +627,7 @@
496627 return -ENOMEM;
497628
498629 esw_mc->uplink_rule = /* Forward MC MAC to Uplink */
499
- esw_fdb_set_vport_rule(esw, mac, UPLINK_VPORT);
630
+ esw_fdb_set_vport_rule(esw, mac, MLX5_VPORT_UPLINK);
500631
501632 /* Add this multicast mac to all the mc promiscuous vports */
502633 update_allmulti_vports(esw, vaddr, esw_mc);
....@@ -522,7 +653,7 @@
522653 struct hlist_head *hash = esw->mc_table;
523654 struct esw_mc_addr *esw_mc;
524655 u8 *mac = vaddr->node.addr;
525
- u32 vport = vaddr->vport;
656
+ u16 vport = vaddr->vport;
526657
527658 if (!esw->fdb_table.legacy.fdb)
528659 return 0;
....@@ -561,9 +692,8 @@
561692
562693 /* Apply vport UC/MC list to HW l2 table and FDB table */
563694 static void esw_apply_vport_addr_list(struct mlx5_eswitch *esw,
564
- u32 vport_num, int list_type)
695
+ struct mlx5_vport *vport, int list_type)
565696 {
566
- struct mlx5_vport *vport = &esw->vports[vport_num];
567697 bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
568698 vport_addr_action vport_addr_add;
569699 vport_addr_action vport_addr_del;
....@@ -596,9 +726,8 @@
596726
597727 /* Sync vport UC/MC list from vport context */
598728 static void esw_update_vport_addr_list(struct mlx5_eswitch *esw,
599
- u32 vport_num, int list_type)
729
+ struct mlx5_vport *vport, int list_type)
600730 {
601
- struct mlx5_vport *vport = &esw->vports[vport_num];
602731 bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
603732 u8 (*mac_list)[ETH_ALEN];
604733 struct l2addr_node *node;
....@@ -627,12 +756,12 @@
627756 if (!vport->enabled)
628757 goto out;
629758
630
- err = mlx5_query_nic_vport_mac_list(esw->dev, vport_num, list_type,
759
+ err = mlx5_query_nic_vport_mac_list(esw->dev, vport->vport, list_type,
631760 mac_list, &size);
632761 if (err)
633762 goto out;
634763 esw_debug(esw->dev, "vport[%d] context update %s list size (%d)\n",
635
- vport_num, is_uc ? "UC" : "MC", size);
764
+ vport->vport, is_uc ? "UC" : "MC", size);
636765
637766 for (i = 0; i < size; i++) {
638767 if (is_uc && !is_valid_ether_addr(mac_list[i]))
....@@ -670,10 +799,10 @@
670799 if (!addr) {
671800 esw_warn(esw->dev,
672801 "Failed to add MAC(%pM) to vport[%d] DB\n",
673
- mac_list[i], vport_num);
802
+ mac_list[i], vport->vport);
674803 continue;
675804 }
676
- addr->vport = vport_num;
805
+ addr->vport = vport->vport;
677806 addr->action = MLX5_ACTION_ADD;
678807 }
679808 out:
....@@ -683,9 +812,9 @@
683812 /* Sync vport UC/MC list from vport context
684813 * Must be called after esw_update_vport_addr_list
685814 */
686
-static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw, u32 vport_num)
815
+static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw,
816
+ struct mlx5_vport *vport)
687817 {
688
- struct mlx5_vport *vport = &esw->vports[vport_num];
689818 struct l2addr_node *node;
690819 struct vport_addr *addr;
691820 struct hlist_head *hash;
....@@ -708,32 +837,32 @@
708837 if (!addr) {
709838 esw_warn(esw->dev,
710839 "Failed to add allmulti MAC(%pM) to vport[%d] DB\n",
711
- mac, vport_num);
840
+ mac, vport->vport);
712841 continue;
713842 }
714
- addr->vport = vport_num;
843
+ addr->vport = vport->vport;
715844 addr->action = MLX5_ACTION_ADD;
716845 addr->mc_promisc = true;
717846 }
718847 }
719848
720849 /* Apply vport rx mode to HW FDB table */
721
-static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num,
850
+static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw,
851
+ struct mlx5_vport *vport,
722852 bool promisc, bool mc_promisc)
723853 {
724854 struct esw_mc_addr *allmulti_addr = &esw->mc_promisc;
725
- struct mlx5_vport *vport = &esw->vports[vport_num];
726855
727856 if (IS_ERR_OR_NULL(vport->allmulti_rule) != mc_promisc)
728857 goto promisc;
729858
730859 if (mc_promisc) {
731860 vport->allmulti_rule =
732
- esw_fdb_set_vport_allmulti_rule(esw, vport_num);
861
+ esw_fdb_set_vport_allmulti_rule(esw, vport->vport);
733862 if (!allmulti_addr->uplink_rule)
734863 allmulti_addr->uplink_rule =
735864 esw_fdb_set_vport_allmulti_rule(esw,
736
- UPLINK_VPORT);
865
+ MLX5_VPORT_UPLINK);
737866 allmulti_addr->refcnt++;
738867 } else if (vport->allmulti_rule) {
739868 mlx5_del_flow_rules(vport->allmulti_rule);
....@@ -752,8 +881,8 @@
752881 return;
753882
754883 if (promisc) {
755
- vport->promisc_rule = esw_fdb_set_vport_promisc_rule(esw,
756
- vport_num);
884
+ vport->promisc_rule =
885
+ esw_fdb_set_vport_promisc_rule(esw, vport->vport);
757886 } else if (vport->promisc_rule) {
758887 mlx5_del_flow_rules(vport->promisc_rule);
759888 vport->promisc_rule = NULL;
....@@ -761,23 +890,23 @@
761890 }
762891
763892 /* Sync vport rx mode from vport context */
764
-static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num)
893
+static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw,
894
+ struct mlx5_vport *vport)
765895 {
766
- struct mlx5_vport *vport = &esw->vports[vport_num];
767896 int promisc_all = 0;
768897 int promisc_uc = 0;
769898 int promisc_mc = 0;
770899 int err;
771900
772901 err = mlx5_query_nic_vport_promisc(esw->dev,
773
- vport_num,
902
+ vport->vport,
774903 &promisc_uc,
775904 &promisc_mc,
776905 &promisc_all);
777906 if (err)
778907 return;
779908 esw_debug(esw->dev, "vport[%d] context update rx mode promisc_all=%d, all_multi=%d\n",
780
- vport_num, promisc_all, promisc_mc);
909
+ vport->vport, promisc_all, promisc_mc);
781910
782911 if (!vport->info.trusted || !vport->enabled) {
783912 promisc_uc = 0;
....@@ -785,7 +914,7 @@
785914 promisc_all = 0;
786915 }
787916
788
- esw_apply_vport_rx_mode(esw, vport_num, promisc_all,
917
+ esw_apply_vport_rx_mode(esw, vport, promisc_all,
789918 (promisc_all || promisc_mc));
790919 }
791920
....@@ -795,32 +924,26 @@
795924 struct mlx5_eswitch *esw = dev->priv.eswitch;
796925 u8 mac[ETH_ALEN];
797926
798
- mlx5_query_nic_vport_mac_address(dev, vport->vport, mac);
927
+ mlx5_query_nic_vport_mac_address(dev, vport->vport, true, mac);
799928 esw_debug(dev, "vport[%d] Context Changed: perm mac: %pM\n",
800929 vport->vport, mac);
801930
802
- if (vport->enabled_events & UC_ADDR_CHANGE) {
803
- esw_update_vport_addr_list(esw, vport->vport,
804
- MLX5_NVPRT_LIST_TYPE_UC);
805
- esw_apply_vport_addr_list(esw, vport->vport,
806
- MLX5_NVPRT_LIST_TYPE_UC);
931
+ if (vport->enabled_events & MLX5_VPORT_UC_ADDR_CHANGE) {
932
+ esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC);
933
+ esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC);
807934 }
808935
809
- if (vport->enabled_events & MC_ADDR_CHANGE) {
810
- esw_update_vport_addr_list(esw, vport->vport,
811
- MLX5_NVPRT_LIST_TYPE_MC);
812
- }
936
+ if (vport->enabled_events & MLX5_VPORT_MC_ADDR_CHANGE)
937
+ esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC);
813938
814
- if (vport->enabled_events & PROMISC_CHANGE) {
815
- esw_update_vport_rx_mode(esw, vport->vport);
939
+ if (vport->enabled_events & MLX5_VPORT_PROMISC_CHANGE) {
940
+ esw_update_vport_rx_mode(esw, vport);
816941 if (!IS_ERR_OR_NULL(vport->allmulti_rule))
817
- esw_update_vport_mc_promisc(esw, vport->vport);
942
+ esw_update_vport_mc_promisc(esw, vport);
818943 }
819944
820
- if (vport->enabled_events & (PROMISC_CHANGE | MC_ADDR_CHANGE)) {
821
- esw_apply_vport_addr_list(esw, vport->vport,
822
- MLX5_NVPRT_LIST_TYPE_MC);
823
- }
945
+ if (vport->enabled_events & (MLX5_VPORT_PROMISC_CHANGE | MLX5_VPORT_MC_ADDR_CHANGE))
946
+ esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC);
824947
825948 esw_debug(esw->dev, "vport[%d] Context Changed: Done\n", vport->vport);
826949 if (vport->enabled)
....@@ -839,474 +962,49 @@
839962 mutex_unlock(&esw->state_lock);
840963 }
841964
842
-static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
843
- struct mlx5_vport *vport)
965
+static bool element_type_supported(struct mlx5_eswitch *esw, int type)
844966 {
845
- int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
846
- struct mlx5_flow_group *vlan_grp = NULL;
847
- struct mlx5_flow_group *drop_grp = NULL;
848
- struct mlx5_core_dev *dev = esw->dev;
849
- struct mlx5_flow_namespace *root_ns;
850
- struct mlx5_flow_table *acl;
851
- void *match_criteria;
852
- u32 *flow_group_in;
853
- /* The egress acl table contains 2 rules:
854
- * 1)Allow traffic with vlan_tag=vst_vlan_id
855
- * 2)Drop all other traffic.
856
- */
857
- int table_size = 2;
858
- int err = 0;
967
+ const struct mlx5_core_dev *dev = esw->dev;
859968
860
- if (!MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
861
- return -EOPNOTSUPP;
862
-
863
- if (!IS_ERR_OR_NULL(vport->egress.acl))
864
- return 0;
865
-
866
- esw_debug(dev, "Create vport[%d] egress ACL log_max_size(%d)\n",
867
- vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size));
868
-
869
- root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS,
870
- vport->vport);
871
- if (!root_ns) {
872
- esw_warn(dev, "Failed to get E-Switch egress flow namespace for vport (%d)\n", vport->vport);
873
- return -EOPNOTSUPP;
969
+ switch (type) {
970
+ case SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR:
971
+ return MLX5_CAP_QOS(dev, esw_element_type) &
972
+ ELEMENT_TYPE_CAP_MASK_TASR;
973
+ case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT:
974
+ return MLX5_CAP_QOS(dev, esw_element_type) &
975
+ ELEMENT_TYPE_CAP_MASK_VPORT;
976
+ case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC:
977
+ return MLX5_CAP_QOS(dev, esw_element_type) &
978
+ ELEMENT_TYPE_CAP_MASK_VPORT_TC;
979
+ case SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC:
980
+ return MLX5_CAP_QOS(dev, esw_element_type) &
981
+ ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC;
874982 }
875
-
876
- flow_group_in = kvzalloc(inlen, GFP_KERNEL);
877
- if (!flow_group_in)
878
- return -ENOMEM;
879
-
880
- acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
881
- if (IS_ERR(acl)) {
882
- err = PTR_ERR(acl);
883
- esw_warn(dev, "Failed to create E-Switch vport[%d] egress flow Table, err(%d)\n",
884
- vport->vport, err);
885
- goto out;
886
- }
887
-
888
- MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
889
- match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
890
- MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
891
- MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.first_vid);
892
- MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
893
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
894
-
895
- vlan_grp = mlx5_create_flow_group(acl, flow_group_in);
896
- if (IS_ERR(vlan_grp)) {
897
- err = PTR_ERR(vlan_grp);
898
- esw_warn(dev, "Failed to create E-Switch vport[%d] egress allowed vlans flow group, err(%d)\n",
899
- vport->vport, err);
900
- goto out;
901
- }
902
-
903
- memset(flow_group_in, 0, inlen);
904
- MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
905
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
906
- drop_grp = mlx5_create_flow_group(acl, flow_group_in);
907
- if (IS_ERR(drop_grp)) {
908
- err = PTR_ERR(drop_grp);
909
- esw_warn(dev, "Failed to create E-Switch vport[%d] egress drop flow group, err(%d)\n",
910
- vport->vport, err);
911
- goto out;
912
- }
913
-
914
- vport->egress.acl = acl;
915
- vport->egress.drop_grp = drop_grp;
916
- vport->egress.allowed_vlans_grp = vlan_grp;
917
-out:
918
- kvfree(flow_group_in);
919
- if (err && !IS_ERR_OR_NULL(vlan_grp))
920
- mlx5_destroy_flow_group(vlan_grp);
921
- if (err && !IS_ERR_OR_NULL(acl))
922
- mlx5_destroy_flow_table(acl);
923
- return err;
924
-}
925
-
926
-static void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
927
- struct mlx5_vport *vport)
928
-{
929
- if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan))
930
- mlx5_del_flow_rules(vport->egress.allowed_vlan);
931
-
932
- if (!IS_ERR_OR_NULL(vport->egress.drop_rule))
933
- mlx5_del_flow_rules(vport->egress.drop_rule);
934
-
935
- vport->egress.allowed_vlan = NULL;
936
- vport->egress.drop_rule = NULL;
937
-}
938
-
939
-static void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
940
- struct mlx5_vport *vport)
941
-{
942
- if (IS_ERR_OR_NULL(vport->egress.acl))
943
- return;
944
-
945
- esw_debug(esw->dev, "Destroy vport[%d] E-Switch egress ACL\n", vport->vport);
946
-
947
- esw_vport_cleanup_egress_rules(esw, vport);
948
- mlx5_destroy_flow_group(vport->egress.allowed_vlans_grp);
949
- mlx5_destroy_flow_group(vport->egress.drop_grp);
950
- mlx5_destroy_flow_table(vport->egress.acl);
951
- vport->egress.allowed_vlans_grp = NULL;
952
- vport->egress.drop_grp = NULL;
953
- vport->egress.acl = NULL;
954
-}
955
-
956
-static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
957
- struct mlx5_vport *vport)
958
-{
959
- int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
960
- struct mlx5_core_dev *dev = esw->dev;
961
- struct mlx5_flow_namespace *root_ns;
962
- struct mlx5_flow_table *acl;
963
- struct mlx5_flow_group *g;
964
- void *match_criteria;
965
- u32 *flow_group_in;
966
- /* The ingress acl table contains 4 groups
967
- * (2 active rules at the same time -
968
- * 1 allow rule from one of the first 3 groups.
969
- * 1 drop rule from the last group):
970
- * 1)Allow untagged traffic with smac=original mac.
971
- * 2)Allow untagged traffic.
972
- * 3)Allow traffic with smac=original mac.
973
- * 4)Drop all other traffic.
974
- */
975
- int table_size = 4;
976
- int err = 0;
977
-
978
- if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
979
- return -EOPNOTSUPP;
980
-
981
- if (!IS_ERR_OR_NULL(vport->ingress.acl))
982
- return 0;
983
-
984
- esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n",
985
- vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));
986
-
987
- root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
988
- vport->vport);
989
- if (!root_ns) {
990
- esw_warn(dev, "Failed to get E-Switch ingress flow namespace for vport (%d)\n", vport->vport);
991
- return -EOPNOTSUPP;
992
- }
993
-
994
- flow_group_in = kvzalloc(inlen, GFP_KERNEL);
995
- if (!flow_group_in)
996
- return -ENOMEM;
997
-
998
- acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
999
- if (IS_ERR(acl)) {
1000
- err = PTR_ERR(acl);
1001
- esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n",
1002
- vport->vport, err);
1003
- goto out;
1004
- }
1005
- vport->ingress.acl = acl;
1006
-
1007
- match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1008
-
1009
- MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1010
- MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
1011
- MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
1012
- MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
1013
- MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1014
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
1015
-
1016
- g = mlx5_create_flow_group(acl, flow_group_in);
1017
- if (IS_ERR(g)) {
1018
- err = PTR_ERR(g);
1019
- esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged spoofchk flow group, err(%d)\n",
1020
- vport->vport, err);
1021
- goto out;
1022
- }
1023
- vport->ingress.allow_untagged_spoofchk_grp = g;
1024
-
1025
- memset(flow_group_in, 0, inlen);
1026
- MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1027
- MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
1028
- MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
1029
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
1030
-
1031
- g = mlx5_create_flow_group(acl, flow_group_in);
1032
- if (IS_ERR(g)) {
1033
- err = PTR_ERR(g);
1034
- esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged flow group, err(%d)\n",
1035
- vport->vport, err);
1036
- goto out;
1037
- }
1038
- vport->ingress.allow_untagged_only_grp = g;
1039
-
1040
- memset(flow_group_in, 0, inlen);
1041
- MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1042
- MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
1043
- MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
1044
- MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 2);
1045
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2);
1046
-
1047
- g = mlx5_create_flow_group(acl, flow_group_in);
1048
- if (IS_ERR(g)) {
1049
- err = PTR_ERR(g);
1050
- esw_warn(dev, "Failed to create E-Switch vport[%d] ingress spoofchk flow group, err(%d)\n",
1051
- vport->vport, err);
1052
- goto out;
1053
- }
1054
- vport->ingress.allow_spoofchk_only_grp = g;
1055
-
1056
- memset(flow_group_in, 0, inlen);
1057
- MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 3);
1058
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3);
1059
-
1060
- g = mlx5_create_flow_group(acl, flow_group_in);
1061
- if (IS_ERR(g)) {
1062
- err = PTR_ERR(g);
1063
- esw_warn(dev, "Failed to create E-Switch vport[%d] ingress drop flow group, err(%d)\n",
1064
- vport->vport, err);
1065
- goto out;
1066
- }
1067
- vport->ingress.drop_grp = g;
1068
-
1069
-out:
1070
- if (err) {
1071
- if (!IS_ERR_OR_NULL(vport->ingress.allow_spoofchk_only_grp))
1072
- mlx5_destroy_flow_group(
1073
- vport->ingress.allow_spoofchk_only_grp);
1074
- if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_only_grp))
1075
- mlx5_destroy_flow_group(
1076
- vport->ingress.allow_untagged_only_grp);
1077
- if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_spoofchk_grp))
1078
- mlx5_destroy_flow_group(
1079
- vport->ingress.allow_untagged_spoofchk_grp);
1080
- if (!IS_ERR_OR_NULL(vport->ingress.acl))
1081
- mlx5_destroy_flow_table(vport->ingress.acl);
1082
- }
1083
-
1084
- kvfree(flow_group_in);
1085
- return err;
1086
-}
1087
-
1088
-static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
1089
- struct mlx5_vport *vport)
1090
-{
1091
- if (!IS_ERR_OR_NULL(vport->ingress.drop_rule))
1092
- mlx5_del_flow_rules(vport->ingress.drop_rule);
1093
-
1094
- if (!IS_ERR_OR_NULL(vport->ingress.allow_rule))
1095
- mlx5_del_flow_rules(vport->ingress.allow_rule);
1096
-
1097
- vport->ingress.drop_rule = NULL;
1098
- vport->ingress.allow_rule = NULL;
1099
-}
1100
-
1101
-static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
1102
- struct mlx5_vport *vport)
1103
-{
1104
- if (IS_ERR_OR_NULL(vport->ingress.acl))
1105
- return;
1106
-
1107
- esw_debug(esw->dev, "Destroy vport[%d] E-Switch ingress ACL\n", vport->vport);
1108
-
1109
- esw_vport_cleanup_ingress_rules(esw, vport);
1110
- mlx5_destroy_flow_group(vport->ingress.allow_spoofchk_only_grp);
1111
- mlx5_destroy_flow_group(vport->ingress.allow_untagged_only_grp);
1112
- mlx5_destroy_flow_group(vport->ingress.allow_untagged_spoofchk_grp);
1113
- mlx5_destroy_flow_group(vport->ingress.drop_grp);
1114
- mlx5_destroy_flow_table(vport->ingress.acl);
1115
- vport->ingress.acl = NULL;
1116
- vport->ingress.drop_grp = NULL;
1117
- vport->ingress.allow_spoofchk_only_grp = NULL;
1118
- vport->ingress.allow_untagged_only_grp = NULL;
1119
- vport->ingress.allow_untagged_spoofchk_grp = NULL;
1120
-}
1121
-
1122
-static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
1123
- struct mlx5_vport *vport)
1124
-{
1125
- struct mlx5_fc *counter = vport->ingress.drop_counter;
1126
- struct mlx5_flow_destination drop_ctr_dst = {0};
1127
- struct mlx5_flow_destination *dst = NULL;
1128
- struct mlx5_flow_act flow_act = {0};
1129
- struct mlx5_flow_spec *spec;
1130
- int dest_num = 0;
1131
- int err = 0;
1132
- u8 *smac_v;
1133
-
1134
- esw_vport_cleanup_ingress_rules(esw, vport);
1135
-
1136
- if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) {
1137
- esw_vport_disable_ingress_acl(esw, vport);
1138
- return 0;
1139
- }
1140
-
1141
- err = esw_vport_enable_ingress_acl(esw, vport);
1142
- if (err) {
1143
- mlx5_core_warn(esw->dev,
1144
- "failed to enable ingress acl (%d) on vport[%d]\n",
1145
- err, vport->vport);
1146
- return err;
1147
- }
1148
-
1149
- esw_debug(esw->dev,
1150
- "vport[%d] configure ingress rules, vlan(%d) qos(%d)\n",
1151
- vport->vport, vport->info.vlan, vport->info.qos);
1152
-
1153
- spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1154
- if (!spec) {
1155
- err = -ENOMEM;
1156
- goto out;
1157
- }
1158
-
1159
- if (vport->info.vlan || vport->info.qos)
1160
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
1161
-
1162
- if (vport->info.spoofchk) {
1163
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_47_16);
1164
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_15_0);
1165
- smac_v = MLX5_ADDR_OF(fte_match_param,
1166
- spec->match_value,
1167
- outer_headers.smac_47_16);
1168
- ether_addr_copy(smac_v, vport->info.mac);
1169
- }
1170
-
1171
- spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1172
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
1173
- vport->ingress.allow_rule =
1174
- mlx5_add_flow_rules(vport->ingress.acl, spec,
1175
- &flow_act, NULL, 0);
1176
- if (IS_ERR(vport->ingress.allow_rule)) {
1177
- err = PTR_ERR(vport->ingress.allow_rule);
1178
- esw_warn(esw->dev,
1179
- "vport[%d] configure ingress allow rule, err(%d)\n",
1180
- vport->vport, err);
1181
- vport->ingress.allow_rule = NULL;
1182
- goto out;
1183
- }
1184
-
1185
- memset(spec, 0, sizeof(*spec));
1186
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
1187
-
1188
- /* Attach drop flow counter */
1189
- if (counter) {
1190
- flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
1191
- drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1192
- drop_ctr_dst.counter = counter;
1193
- dst = &drop_ctr_dst;
1194
- dest_num++;
1195
- }
1196
- vport->ingress.drop_rule =
1197
- mlx5_add_flow_rules(vport->ingress.acl, spec,
1198
- &flow_act, dst, dest_num);
1199
- if (IS_ERR(vport->ingress.drop_rule)) {
1200
- err = PTR_ERR(vport->ingress.drop_rule);
1201
- esw_warn(esw->dev,
1202
- "vport[%d] configure ingress drop rule, err(%d)\n",
1203
- vport->vport, err);
1204
- vport->ingress.drop_rule = NULL;
1205
- goto out;
1206
- }
1207
-
1208
-out:
1209
- if (err)
1210
- esw_vport_cleanup_ingress_rules(esw, vport);
1211
- kvfree(spec);
1212
- return err;
1213
-}
1214
-
1215
-static int esw_vport_egress_config(struct mlx5_eswitch *esw,
1216
- struct mlx5_vport *vport)
1217
-{
1218
- struct mlx5_fc *counter = vport->egress.drop_counter;
1219
- struct mlx5_flow_destination drop_ctr_dst = {0};
1220
- struct mlx5_flow_destination *dst = NULL;
1221
- struct mlx5_flow_act flow_act = {0};
1222
- struct mlx5_flow_spec *spec;
1223
- int dest_num = 0;
1224
- int err = 0;
1225
-
1226
- esw_vport_cleanup_egress_rules(esw, vport);
1227
-
1228
- if (!vport->info.vlan && !vport->info.qos) {
1229
- esw_vport_disable_egress_acl(esw, vport);
1230
- return 0;
1231
- }
1232
-
1233
- err = esw_vport_enable_egress_acl(esw, vport);
1234
- if (err) {
1235
- mlx5_core_warn(esw->dev,
1236
- "failed to enable egress acl (%d) on vport[%d]\n",
1237
- err, vport->vport);
1238
- return err;
1239
- }
1240
-
1241
- esw_debug(esw->dev,
1242
- "vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
1243
- vport->vport, vport->info.vlan, vport->info.qos);
1244
-
1245
- spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1246
- if (!spec) {
1247
- err = -ENOMEM;
1248
- goto out;
1249
- }
1250
-
1251
- /* Allowed vlan rule */
1252
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
1253
- MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag);
1254
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
1255
- MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vport->info.vlan);
1256
-
1257
- spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1258
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
1259
- vport->egress.allowed_vlan =
1260
- mlx5_add_flow_rules(vport->egress.acl, spec,
1261
- &flow_act, NULL, 0);
1262
- if (IS_ERR(vport->egress.allowed_vlan)) {
1263
- err = PTR_ERR(vport->egress.allowed_vlan);
1264
- esw_warn(esw->dev,
1265
- "vport[%d] configure egress allowed vlan rule failed, err(%d)\n",
1266
- vport->vport, err);
1267
- vport->egress.allowed_vlan = NULL;
1268
- goto out;
1269
- }
1270
-
1271
- /* Drop others rule (star rule) */
1272
- memset(spec, 0, sizeof(*spec));
1273
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
1274
-
1275
- /* Attach egress drop flow counter */
1276
- if (counter) {
1277
- flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
1278
- drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1279
- drop_ctr_dst.counter = counter;
1280
- dst = &drop_ctr_dst;
1281
- dest_num++;
1282
- }
1283
- vport->egress.drop_rule =
1284
- mlx5_add_flow_rules(vport->egress.acl, spec,
1285
- &flow_act, dst, dest_num);
1286
- if (IS_ERR(vport->egress.drop_rule)) {
1287
- err = PTR_ERR(vport->egress.drop_rule);
1288
- esw_warn(esw->dev,
1289
- "vport[%d] configure egress drop rule failed, err(%d)\n",
1290
- vport->vport, err);
1291
- vport->egress.drop_rule = NULL;
1292
- }
1293
-out:
1294
- kvfree(spec);
1295
- return err;
983
+ return false;
1296984 }
1297985
1298986 /* Vport QoS management */
1299
-static int esw_create_tsar(struct mlx5_eswitch *esw)
987
+static void esw_create_tsar(struct mlx5_eswitch *esw)
1300988 {
1301989 u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
1302990 struct mlx5_core_dev *dev = esw->dev;
991
+ __be32 *attr;
1303992 int err;
1304993
1305994 if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
1306
- return 0;
995
+ return;
996
+
997
+ if (!element_type_supported(esw, SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR))
998
+ return;
1307999
13081000 if (esw->qos.enabled)
1309
- return -EEXIST;
1001
+ return;
1002
+
1003
+ MLX5_SET(scheduling_context, tsar_ctx, element_type,
1004
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR);
1005
+
1006
+ attr = MLX5_ADDR_OF(scheduling_context, tsar_ctx, element_attributes);
1007
+ *attr = cpu_to_be32(TSAR_ELEMENT_TSAR_TYPE_DWRR << 16);
13101008
13111009 err = mlx5_create_scheduling_element_cmd(dev,
13121010 SCHEDULING_HIERARCHY_E_SWITCH,
....@@ -1314,11 +1012,10 @@
13141012 &esw->qos.root_tsar_id);
13151013 if (err) {
13161014 esw_warn(esw->dev, "E-Switch create TSAR failed (%d)\n", err);
1317
- return err;
1015
+ return;
13181016 }
13191017
13201018 esw->qos.enabled = true;
1321
- return 0;
13221019 }
13231020
13241021 static void esw_destroy_tsar(struct mlx5_eswitch *esw)
....@@ -1337,11 +1034,11 @@
13371034 esw->qos.enabled = false;
13381035 }
13391036
1340
-static int esw_vport_enable_qos(struct mlx5_eswitch *esw, int vport_num,
1037
+static int esw_vport_enable_qos(struct mlx5_eswitch *esw,
1038
+ struct mlx5_vport *vport,
13411039 u32 initial_max_rate, u32 initial_bw_share)
13421040 {
13431041 u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
1344
- struct mlx5_vport *vport = &esw->vports[vport_num];
13451042 struct mlx5_core_dev *dev = esw->dev;
13461043 void *vport_elem;
13471044 int err = 0;
....@@ -1357,7 +1054,7 @@
13571054 SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
13581055 vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx,
13591056 element_attributes);
1360
- MLX5_SET(vport_element, vport_elem, vport_number, vport_num);
1057
+ MLX5_SET(vport_element, vport_elem, vport_number, vport->vport);
13611058 MLX5_SET(scheduling_context, sched_ctx, parent_element_id,
13621059 esw->qos.root_tsar_id);
13631060 MLX5_SET(scheduling_context, sched_ctx, max_average_bw,
....@@ -1370,7 +1067,7 @@
13701067 &vport->qos.esw_tsar_ix);
13711068 if (err) {
13721069 esw_warn(esw->dev, "E-Switch create TSAR vport element failed (vport=%d,err=%d)\n",
1373
- vport_num, err);
1070
+ vport->vport, err);
13741071 return err;
13751072 }
13761073
....@@ -1378,10 +1075,10 @@
13781075 return 0;
13791076 }
13801077
1381
-static void esw_vport_disable_qos(struct mlx5_eswitch *esw, int vport_num)
1078
+static void esw_vport_disable_qos(struct mlx5_eswitch *esw,
1079
+ struct mlx5_vport *vport)
13821080 {
1383
- struct mlx5_vport *vport = &esw->vports[vport_num];
1384
- int err = 0;
1081
+ int err;
13851082
13861083 if (!vport->qos.enabled)
13871084 return;
....@@ -1391,16 +1088,16 @@
13911088 vport->qos.esw_tsar_ix);
13921089 if (err)
13931090 esw_warn(esw->dev, "E-Switch destroy TSAR vport element failed (vport=%d,err=%d)\n",
1394
- vport_num, err);
1091
+ vport->vport, err);
13951092
13961093 vport->qos.enabled = false;
13971094 }
13981095
1399
-static int esw_vport_qos_config(struct mlx5_eswitch *esw, int vport_num,
1096
+static int esw_vport_qos_config(struct mlx5_eswitch *esw,
1097
+ struct mlx5_vport *vport,
14001098 u32 max_rate, u32 bw_share)
14011099 {
14021100 u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
1403
- struct mlx5_vport *vport = &esw->vports[vport_num];
14041101 struct mlx5_core_dev *dev = esw->dev;
14051102 void *vport_elem;
14061103 u32 bitmask = 0;
....@@ -1416,7 +1113,7 @@
14161113 SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
14171114 vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx,
14181115 element_attributes);
1419
- MLX5_SET(vport_element, vport_elem, vport_number, vport_num);
1116
+ MLX5_SET(vport_element, vport_elem, vport_number, vport->vport);
14201117 MLX5_SET(scheduling_context, sched_ctx, parent_element_id,
14211118 esw->qos.root_tsar_id);
14221119 MLX5_SET(scheduling_context, sched_ctx, max_average_bw,
....@@ -1432,14 +1129,34 @@
14321129 bitmask);
14331130 if (err) {
14341131 esw_warn(esw->dev, "E-Switch modify TSAR vport element failed (vport=%d,err=%d)\n",
1435
- vport_num, err);
1132
+ vport->vport, err);
14361133 return err;
14371134 }
14381135
14391136 return 0;
14401137 }
14411138
1442
-static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN])
1139
+int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num,
1140
+ u32 rate_mbps)
1141
+{
1142
+ u32 ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
1143
+ struct mlx5_vport *vport;
1144
+
1145
+ vport = mlx5_eswitch_get_vport(esw, vport_num);
1146
+
1147
+ if (!vport->qos.enabled)
1148
+ return -EOPNOTSUPP;
1149
+
1150
+ MLX5_SET(scheduling_context, ctx, max_average_bw, rate_mbps);
1151
+
1152
+ return mlx5_modify_scheduling_element_cmd(esw->dev,
1153
+ SCHEDULING_HIERARCHY_E_SWITCH,
1154
+ ctx,
1155
+ vport->qos.esw_tsar_ix,
1156
+ MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW);
1157
+}
1158
+
1159
+static void node_guid_gen_from_mac(u64 *node_guid, const u8 *mac)
14431160 {
14441161 ((u8 *)node_guid)[7] = mac[0];
14451162 ((u8 *)node_guid)[6] = mac[1];
....@@ -1451,244 +1168,574 @@
14511168 ((u8 *)node_guid)[0] = mac[5];
14521169 }
14531170
1454
-static void esw_apply_vport_conf(struct mlx5_eswitch *esw,
1455
- struct mlx5_vport *vport)
1171
+static int esw_vport_create_legacy_acl_tables(struct mlx5_eswitch *esw,
1172
+ struct mlx5_vport *vport)
14561173 {
1457
- int vport_num = vport->vport;
1174
+ int ret;
14581175
1459
- if (!vport_num)
1176
+ /* Only non manager vports need ACL in legacy mode */
1177
+ if (mlx5_esw_is_manager_vport(esw, vport->vport))
1178
+ return 0;
1179
+
1180
+ ret = esw_acl_ingress_lgcy_setup(esw, vport);
1181
+ if (ret)
1182
+ goto ingress_err;
1183
+
1184
+ ret = esw_acl_egress_lgcy_setup(esw, vport);
1185
+ if (ret)
1186
+ goto egress_err;
1187
+
1188
+ return 0;
1189
+
1190
+egress_err:
1191
+ esw_acl_ingress_lgcy_cleanup(esw, vport);
1192
+ingress_err:
1193
+ return ret;
1194
+}
1195
+
1196
+static int esw_vport_setup_acl(struct mlx5_eswitch *esw,
1197
+ struct mlx5_vport *vport)
1198
+{
1199
+ if (esw->mode == MLX5_ESWITCH_LEGACY)
1200
+ return esw_vport_create_legacy_acl_tables(esw, vport);
1201
+ else
1202
+ return esw_vport_create_offloads_acl_tables(esw, vport);
1203
+}
1204
+
1205
+static void esw_vport_destroy_legacy_acl_tables(struct mlx5_eswitch *esw,
1206
+ struct mlx5_vport *vport)
1207
+
1208
+{
1209
+ if (mlx5_esw_is_manager_vport(esw, vport->vport))
14601210 return;
1211
+
1212
+ esw_acl_egress_lgcy_cleanup(esw, vport);
1213
+ esw_acl_ingress_lgcy_cleanup(esw, vport);
1214
+}
1215
+
1216
+static void esw_vport_cleanup_acl(struct mlx5_eswitch *esw,
1217
+ struct mlx5_vport *vport)
1218
+{
1219
+ if (esw->mode == MLX5_ESWITCH_LEGACY)
1220
+ esw_vport_destroy_legacy_acl_tables(esw, vport);
1221
+ else
1222
+ esw_vport_destroy_offloads_acl_tables(esw, vport);
1223
+}
1224
+
1225
+static int esw_vport_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
1226
+{
1227
+ u16 vport_num = vport->vport;
1228
+ int flags;
1229
+ int err;
1230
+
1231
+ err = esw_vport_setup_acl(esw, vport);
1232
+ if (err)
1233
+ return err;
1234
+
1235
+ /* Attach vport to the eswitch rate limiter */
1236
+ esw_vport_enable_qos(esw, vport, vport->info.max_rate, vport->qos.bw_share);
1237
+
1238
+ if (mlx5_esw_is_manager_vport(esw, vport_num))
1239
+ return 0;
14611240
14621241 mlx5_modify_vport_admin_state(esw->dev,
14631242 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
1464
- vport_num,
1243
+ vport_num, 1,
14651244 vport->info.link_state);
1466
- mlx5_modify_nic_vport_mac_address(esw->dev, vport_num, vport->info.mac);
1467
- mlx5_modify_nic_vport_node_guid(esw->dev, vport_num, vport->info.node_guid);
1468
- modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan, vport->info.qos,
1469
- (vport->info.vlan || vport->info.qos));
14701245
1471
- /* Only legacy mode needs ACLs */
1472
- if (esw->mode == SRIOV_LEGACY) {
1473
- esw_vport_ingress_config(esw, vport);
1474
- esw_vport_egress_config(esw, vport);
1475
- }
1476
-}
1477
-
1478
-static void esw_vport_create_drop_counters(struct mlx5_vport *vport)
1479
-{
1480
- struct mlx5_core_dev *dev = vport->dev;
1481
-
1482
- if (MLX5_CAP_ESW_INGRESS_ACL(dev, flow_counter)) {
1483
- vport->ingress.drop_counter = mlx5_fc_create(dev, false);
1484
- if (IS_ERR(vport->ingress.drop_counter)) {
1485
- esw_warn(dev,
1486
- "vport[%d] configure ingress drop rule counter failed\n",
1487
- vport->vport);
1488
- vport->ingress.drop_counter = NULL;
1489
- }
1246
+ /* Host PF has its own mac/guid. */
1247
+ if (vport_num) {
1248
+ mlx5_modify_nic_vport_mac_address(esw->dev, vport_num,
1249
+ vport->info.mac);
1250
+ mlx5_modify_nic_vport_node_guid(esw->dev, vport_num,
1251
+ vport->info.node_guid);
14901252 }
14911253
1492
- if (MLX5_CAP_ESW_EGRESS_ACL(dev, flow_counter)) {
1493
- vport->egress.drop_counter = mlx5_fc_create(dev, false);
1494
- if (IS_ERR(vport->egress.drop_counter)) {
1495
- esw_warn(dev,
1496
- "vport[%d] configure egress drop rule counter failed\n",
1497
- vport->vport);
1498
- vport->egress.drop_counter = NULL;
1499
- }
1500
- }
1254
+ flags = (vport->info.vlan || vport->info.qos) ?
1255
+ SET_VLAN_STRIP | SET_VLAN_INSERT : 0;
1256
+ modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan,
1257
+ vport->info.qos, flags);
1258
+
1259
+ return 0;
15011260 }
15021261
1503
-static void esw_vport_destroy_drop_counters(struct mlx5_vport *vport)
1262
+/* Don't cleanup vport->info, it's needed to restore vport configuration */
1263
+static void esw_vport_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
15041264 {
1505
- struct mlx5_core_dev *dev = vport->dev;
1265
+ u16 vport_num = vport->vport;
15061266
1507
- if (vport->ingress.drop_counter)
1508
- mlx5_fc_destroy(dev, vport->ingress.drop_counter);
1509
- if (vport->egress.drop_counter)
1510
- mlx5_fc_destroy(dev, vport->egress.drop_counter);
1267
+ if (!mlx5_esw_is_manager_vport(esw, vport_num))
1268
+ mlx5_modify_vport_admin_state(esw->dev,
1269
+ MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
1270
+ vport_num, 1,
1271
+ MLX5_VPORT_ADMIN_STATE_DOWN);
1272
+
1273
+ esw_vport_disable_qos(esw, vport);
1274
+ esw_vport_cleanup_acl(esw, vport);
15111275 }
15121276
1513
-static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
1514
- int enable_events)
1277
+static int esw_enable_vport(struct mlx5_eswitch *esw, u16 vport_num,
1278
+ enum mlx5_eswitch_vport_event enabled_events)
15151279 {
1516
- struct mlx5_vport *vport = &esw->vports[vport_num];
1280
+ struct mlx5_vport *vport;
1281
+ int ret;
1282
+
1283
+ vport = mlx5_eswitch_get_vport(esw, vport_num);
15171284
15181285 mutex_lock(&esw->state_lock);
15191286 WARN_ON(vport->enabled);
15201287
15211288 esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
15221289
1523
- /* Create steering drop counters for ingress and egress ACLs */
1524
- if (vport_num && esw->mode == SRIOV_LEGACY)
1525
- esw_vport_create_drop_counters(vport);
1526
-
1527
- /* Restore old vport configuration */
1528
- esw_apply_vport_conf(esw, vport);
1529
-
1530
- /* Attach vport to the eswitch rate limiter */
1531
- if (esw_vport_enable_qos(esw, vport_num, vport->info.max_rate,
1532
- vport->qos.bw_share))
1533
- esw_warn(esw->dev, "Failed to attach vport %d to eswitch rate limiter", vport_num);
1290
+ ret = esw_vport_setup(esw, vport);
1291
+ if (ret)
1292
+ goto done;
15341293
15351294 /* Sync with current vport context */
1536
- vport->enabled_events = enable_events;
1295
+ vport->enabled_events = enabled_events;
15371296 vport->enabled = true;
15381297
1539
- /* only PF is trusted by default */
1540
- if (!vport_num)
1298
+ /* Esw manager is trusted by default. Host PF (vport 0) is trusted as well
1299
+ * in smartNIC as it's a vport group manager.
1300
+ */
1301
+ if (mlx5_esw_is_manager_vport(esw, vport_num) ||
1302
+ (!vport_num && mlx5_core_is_ecpf(esw->dev)))
15411303 vport->info.trusted = true;
1304
+
1305
+ /* External controller host PF has factory programmed MAC.
1306
+ * Read it from the device.
1307
+ */
1308
+ if (mlx5_core_is_ecpf(esw->dev) && vport_num == MLX5_VPORT_PF)
1309
+ mlx5_query_nic_vport_mac_address(esw->dev, vport_num, true, vport->info.mac);
15421310
15431311 esw_vport_change_handle_locked(vport);
15441312
15451313 esw->enabled_vports++;
15461314 esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num);
1315
+done:
15471316 mutex_unlock(&esw->state_lock);
1317
+ return ret;
15481318 }
15491319
1550
-static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
1320
+static void esw_disable_vport(struct mlx5_eswitch *esw, u16 vport_num)
15511321 {
1552
- struct mlx5_vport *vport = &esw->vports[vport_num];
1322
+ struct mlx5_vport *vport;
15531323
1324
+ vport = mlx5_eswitch_get_vport(esw, vport_num);
1325
+
1326
+ mutex_lock(&esw->state_lock);
15541327 if (!vport->enabled)
1555
- return;
1328
+ goto done;
15561329
15571330 esw_debug(esw->dev, "Disabling vport(%d)\n", vport_num);
15581331 /* Mark this vport as disabled to discard new events */
15591332 vport->enabled = false;
15601333
1561
- synchronize_irq(pci_irq_vector(esw->dev->pdev, MLX5_EQ_VEC_ASYNC));
1562
- /* Wait for current already scheduled events to complete */
1563
- flush_workqueue(esw->work_queue);
15641334 /* Disable events from this vport */
15651335 arm_vport_context_events_cmd(esw->dev, vport->vport, 0);
1566
- mutex_lock(&esw->state_lock);
15671336 /* We don't assume VFs will cleanup after themselves.
15681337 * Calling vport change handler while vport is disabled will cleanup
15691338 * the vport resources.
15701339 */
15711340 esw_vport_change_handle_locked(vport);
15721341 vport->enabled_events = 0;
1573
- esw_vport_disable_qos(esw, vport_num);
1574
- if (vport_num && esw->mode == SRIOV_LEGACY) {
1575
- mlx5_modify_vport_admin_state(esw->dev,
1576
- MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
1577
- vport_num,
1578
- MLX5_VPORT_ADMIN_STATE_DOWN);
1579
- esw_vport_disable_egress_acl(esw, vport);
1580
- esw_vport_disable_ingress_acl(esw, vport);
1581
- esw_vport_destroy_drop_counters(vport);
1582
- }
1342
+ esw_vport_cleanup(esw, vport);
15831343 esw->enabled_vports--;
1344
+
1345
+done:
15841346 mutex_unlock(&esw->state_lock);
1347
+}
1348
+
1349
+static int eswitch_vport_event(struct notifier_block *nb,
1350
+ unsigned long type, void *data)
1351
+{
1352
+ struct mlx5_eswitch *esw = mlx5_nb_cof(nb, struct mlx5_eswitch, nb);
1353
+ struct mlx5_eqe *eqe = data;
1354
+ struct mlx5_vport *vport;
1355
+ u16 vport_num;
1356
+
1357
+ vport_num = be16_to_cpu(eqe->data.vport_change.vport_num);
1358
+ vport = mlx5_eswitch_get_vport(esw, vport_num);
1359
+ if (!IS_ERR(vport))
1360
+ queue_work(esw->work_queue, &vport->vport_change_handler);
1361
+ return NOTIFY_OK;
1362
+}
1363
+
1364
+/**
1365
+ * mlx5_esw_query_functions - Returns raw output about functions state
1366
+ * @dev: Pointer to device to query
1367
+ *
1368
+ * mlx5_esw_query_functions() allocates and returns functions changed
1369
+ * raw output memory pointer from device on success. Otherwise returns ERR_PTR.
1370
+ * Caller must free the memory using kvfree() when valid pointer is returned.
1371
+ */
1372
+const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
1373
+{
1374
+ int outlen = MLX5_ST_SZ_BYTES(query_esw_functions_out);
1375
+ u32 in[MLX5_ST_SZ_DW(query_esw_functions_in)] = {};
1376
+ u32 *out;
1377
+ int err;
1378
+
1379
+ out = kvzalloc(outlen, GFP_KERNEL);
1380
+ if (!out)
1381
+ return ERR_PTR(-ENOMEM);
1382
+
1383
+ MLX5_SET(query_esw_functions_in, in, opcode,
1384
+ MLX5_CMD_OP_QUERY_ESW_FUNCTIONS);
1385
+
1386
+ err = mlx5_cmd_exec_inout(dev, query_esw_functions, in, out);
1387
+ if (!err)
1388
+ return out;
1389
+
1390
+ kvfree(out);
1391
+ return ERR_PTR(err);
1392
+}
1393
+
1394
+static void mlx5_eswitch_event_handlers_register(struct mlx5_eswitch *esw)
1395
+{
1396
+ MLX5_NB_INIT(&esw->nb, eswitch_vport_event, NIC_VPORT_CHANGE);
1397
+ mlx5_eq_notifier_register(esw->dev, &esw->nb);
1398
+
1399
+ if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev)) {
1400
+ MLX5_NB_INIT(&esw->esw_funcs.nb, mlx5_esw_funcs_changed_handler,
1401
+ ESW_FUNCTIONS_CHANGED);
1402
+ mlx5_eq_notifier_register(esw->dev, &esw->esw_funcs.nb);
1403
+ }
1404
+}
1405
+
1406
+static void mlx5_eswitch_event_handlers_unregister(struct mlx5_eswitch *esw)
1407
+{
1408
+ if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev))
1409
+ mlx5_eq_notifier_unregister(esw->dev, &esw->esw_funcs.nb);
1410
+
1411
+ mlx5_eq_notifier_unregister(esw->dev, &esw->nb);
1412
+
1413
+ flush_workqueue(esw->work_queue);
1414
+}
1415
+
1416
+static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw)
1417
+{
1418
+ struct mlx5_vport *vport;
1419
+ int i;
1420
+
1421
+ mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
1422
+ memset(&vport->qos, 0, sizeof(vport->qos));
1423
+ memset(&vport->info, 0, sizeof(vport->info));
1424
+ vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
1425
+ }
15851426 }
15861427
15871428 /* Public E-Switch API */
15881429 #define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev))
15891430
1590
-
1591
-int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
1431
+int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num,
1432
+ enum mlx5_eswitch_vport_event enabled_events)
15921433 {
15931434 int err;
1594
- int i, enabled_events;
15951435
1596
- if (!ESW_ALLOWED(esw) ||
1597
- !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
1598
- esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n");
1436
+ err = esw_enable_vport(esw, vport_num, enabled_events);
1437
+ if (err)
1438
+ return err;
1439
+
1440
+ err = esw_offloads_load_rep(esw, vport_num);
1441
+ if (err)
1442
+ goto err_rep;
1443
+
1444
+ return err;
1445
+
1446
+err_rep:
1447
+ esw_disable_vport(esw, vport_num);
1448
+ return err;
1449
+}
1450
+
1451
+void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num)
1452
+{
1453
+ esw_offloads_unload_rep(esw, vport_num);
1454
+ esw_disable_vport(esw, vport_num);
1455
+}
1456
+
1457
+void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs)
1458
+{
1459
+ int i;
1460
+
1461
+ mlx5_esw_for_each_vf_vport_num_reverse(esw, i, num_vfs)
1462
+ mlx5_eswitch_unload_vport(esw, i);
1463
+}
1464
+
1465
+int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs,
1466
+ enum mlx5_eswitch_vport_event enabled_events)
1467
+{
1468
+ int err;
1469
+ int i;
1470
+
1471
+ mlx5_esw_for_each_vf_vport_num(esw, i, num_vfs) {
1472
+ err = mlx5_eswitch_load_vport(esw, i, enabled_events);
1473
+ if (err)
1474
+ goto vf_err;
1475
+ }
1476
+
1477
+ return 0;
1478
+
1479
+vf_err:
1480
+ mlx5_eswitch_unload_vf_vports(esw, i - 1);
1481
+ return err;
1482
+}
1483
+
1484
+/* mlx5_eswitch_enable_pf_vf_vports() enables vports of PF, ECPF and VFs
1485
+ * whichever are present on the eswitch.
1486
+ */
1487
+int
1488
+mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
1489
+ enum mlx5_eswitch_vport_event enabled_events)
1490
+{
1491
+ int ret;
1492
+
1493
+ /* Enable PF vport */
1494
+ ret = mlx5_eswitch_load_vport(esw, MLX5_VPORT_PF, enabled_events);
1495
+ if (ret)
1496
+ return ret;
1497
+
1498
+ /* Enable ECPF vport */
1499
+ if (mlx5_ecpf_vport_exists(esw->dev)) {
1500
+ ret = mlx5_eswitch_load_vport(esw, MLX5_VPORT_ECPF, enabled_events);
1501
+ if (ret)
1502
+ goto ecpf_err;
1503
+ }
1504
+
1505
+ /* Enable VF vports */
1506
+ ret = mlx5_eswitch_load_vf_vports(esw, esw->esw_funcs.num_vfs,
1507
+ enabled_events);
1508
+ if (ret)
1509
+ goto vf_err;
1510
+ return 0;
1511
+
1512
+vf_err:
1513
+ if (mlx5_ecpf_vport_exists(esw->dev))
1514
+ mlx5_eswitch_unload_vport(esw, MLX5_VPORT_ECPF);
1515
+
1516
+ecpf_err:
1517
+ mlx5_eswitch_unload_vport(esw, MLX5_VPORT_PF);
1518
+ return ret;
1519
+}
1520
+
1521
+/* mlx5_eswitch_disable_pf_vf_vports() disables vports of PF, ECPF and VFs
1522
+ * whichever are previously enabled on the eswitch.
1523
+ */
1524
+void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw)
1525
+{
1526
+ mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
1527
+
1528
+ if (mlx5_ecpf_vport_exists(esw->dev))
1529
+ mlx5_eswitch_unload_vport(esw, MLX5_VPORT_ECPF);
1530
+
1531
+ mlx5_eswitch_unload_vport(esw, MLX5_VPORT_PF);
1532
+}
1533
+
1534
+static void mlx5_eswitch_get_devlink_param(struct mlx5_eswitch *esw)
1535
+{
1536
+ struct devlink *devlink = priv_to_devlink(esw->dev);
1537
+ union devlink_param_value val;
1538
+ int err;
1539
+
1540
+ err = devlink_param_driverinit_value_get(devlink,
1541
+ MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
1542
+ &val);
1543
+ if (!err) {
1544
+ esw->params.large_group_num = val.vu32;
1545
+ } else {
1546
+ esw_warn(esw->dev,
1547
+ "Devlink can't get param fdb_large_groups, uses default (%d).\n",
1548
+ ESW_OFFLOADS_DEFAULT_NUM_GROUPS);
1549
+ esw->params.large_group_num = ESW_OFFLOADS_DEFAULT_NUM_GROUPS;
1550
+ }
1551
+}
1552
+
1553
+static void
1554
+mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, int num_vfs)
1555
+{
1556
+ const u32 *out;
1557
+
1558
+ WARN_ON_ONCE(esw->mode != MLX5_ESWITCH_NONE);
1559
+
1560
+ if (num_vfs < 0)
1561
+ return;
1562
+
1563
+ if (!mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1564
+ esw->esw_funcs.num_vfs = num_vfs;
1565
+ return;
1566
+ }
1567
+
1568
+ out = mlx5_esw_query_functions(esw->dev);
1569
+ if (IS_ERR(out))
1570
+ return;
1571
+
1572
+ esw->esw_funcs.num_vfs = MLX5_GET(query_esw_functions_out, out,
1573
+ host_params_context.host_num_of_vfs);
1574
+ kvfree(out);
1575
+}
1576
+
1577
+/**
1578
+ * mlx5_eswitch_enable_locked - Enable eswitch
1579
+ * @esw: Pointer to eswitch
1580
+ * @mode: Eswitch mode to enable
1581
+ * @num_vfs: Enable eswitch for given number of VFs. This is optional.
1582
+ * Valid value are 0, > 0 and MLX5_ESWITCH_IGNORE_NUM_VFS.
1583
+ * Caller should pass num_vfs > 0 when enabling eswitch for
1584
+ * vf vports. Caller should pass num_vfs = 0, when eswitch
1585
+ * is enabled without sriov VFs or when caller
1586
+ * is unaware of the sriov state of the host PF on ECPF based
1587
+ * eswitch. Caller should pass < 0 when num_vfs should be
1588
+ * completely ignored. This is typically the case when eswitch
1589
+ * is enabled without sriov regardless of PF/ECPF system.
1590
+ * mlx5_eswitch_enable_locked() Enables eswitch in either legacy or offloads
1591
+ * mode. If num_vfs >=0 is provided, it setup VF related eswitch vports.
1592
+ * It returns 0 on success or error code on failure.
1593
+ */
1594
+int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs)
1595
+{
1596
+ int err;
1597
+
1598
+ lockdep_assert_held(&esw->mode_lock);
1599
+
1600
+ if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
1601
+ esw_warn(esw->dev, "FDB is not supported, aborting ...\n");
15991602 return -EOPNOTSUPP;
16001603 }
16011604
16021605 if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support))
1603
- esw_warn(esw->dev, "E-Switch ingress ACL is not supported by FW\n");
1606
+ esw_warn(esw->dev, "ingress ACL is not supported by FW\n");
16041607
16051608 if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support))
1606
- esw_warn(esw->dev, "E-Switch engress ACL is not supported by FW\n");
1609
+ esw_warn(esw->dev, "engress ACL is not supported by FW\n");
16071610
1608
- esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d) mode (%d)\n", nvfs, mode);
1611
+ mlx5_eswitch_get_devlink_param(esw);
1612
+
1613
+ mlx5_eswitch_update_num_of_vfs(esw, num_vfs);
1614
+
1615
+ esw_create_tsar(esw);
1616
+
16091617 esw->mode = mode;
16101618
1611
- if (mode == SRIOV_LEGACY) {
1612
- err = esw_create_legacy_fdb_table(esw);
1613
- } else {
1614
- mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
1619
+ mlx5_lag_update(esw->dev);
16151620
1616
- err = esw_offloads_init(esw, nvfs + 1);
1621
+ if (mode == MLX5_ESWITCH_LEGACY) {
1622
+ err = esw_legacy_enable(esw);
1623
+ } else {
1624
+ mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
1625
+ mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
1626
+ err = esw_offloads_enable(esw);
16171627 }
16181628
16191629 if (err)
16201630 goto abort;
16211631
1622
- err = esw_create_tsar(esw);
1623
- if (err)
1624
- esw_warn(esw->dev, "Failed to create eswitch TSAR");
1632
+ mlx5_eswitch_event_handlers_register(esw);
16251633
1626
- /* Don't enable vport events when in SRIOV_OFFLOADS mode, since:
1627
- * 1. L2 table (MPFS) is programmed by PF/VF representors netdevs set_rx_mode
1628
- * 2. FDB/Eswitch is programmed by user space tools
1629
- */
1630
- enabled_events = (mode == SRIOV_LEGACY) ? SRIOV_VPORT_EVENTS : 0;
1631
- for (i = 0; i <= nvfs; i++)
1632
- esw_enable_vport(esw, i, enabled_events);
1634
+ esw_info(esw->dev, "Enable: mode(%s), nvfs(%d), active vports(%d)\n",
1635
+ mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
1636
+ esw->esw_funcs.num_vfs, esw->enabled_vports);
16331637
1634
- esw_info(esw->dev, "SRIOV enabled: active vports(%d)\n",
1635
- esw->enabled_vports);
16361638 return 0;
16371639
16381640 abort:
1639
- esw->mode = SRIOV_NONE;
1641
+ esw->mode = MLX5_ESWITCH_NONE;
16401642
1641
- if (mode == SRIOV_OFFLOADS)
1643
+ if (mode == MLX5_ESWITCH_OFFLOADS) {
16421644 mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
1643
-
1645
+ mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
1646
+ }
1647
+ esw_destroy_tsar(esw);
16441648 return err;
16451649 }
16461650
1647
-void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
1651
+/**
1652
+ * mlx5_eswitch_enable - Enable eswitch
1653
+ * @esw: Pointer to eswitch
1654
+ * @num_vfs: Enable eswitch swich for given number of VFs.
1655
+ * Caller must pass num_vfs > 0 when enabling eswitch for
1656
+ * vf vports.
1657
+ * mlx5_eswitch_enable() returns 0 on success or error code on failure.
1658
+ */
1659
+int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
16481660 {
1649
- struct esw_mc_addr *mc_promisc;
1650
- int old_mode;
1651
- int nvports;
1652
- int i;
1661
+ int ret;
16531662
1654
- if (!ESW_ALLOWED(esw) || esw->mode == SRIOV_NONE)
1663
+ if (!ESW_ALLOWED(esw))
1664
+ return 0;
1665
+
1666
+ down_write(&esw->mode_lock);
1667
+ if (esw->mode == MLX5_ESWITCH_NONE) {
1668
+ ret = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY, num_vfs);
1669
+ } else {
1670
+ enum mlx5_eswitch_vport_event vport_events;
1671
+
1672
+ vport_events = (esw->mode == MLX5_ESWITCH_LEGACY) ?
1673
+ MLX5_LEGACY_SRIOV_VPORT_EVENTS : MLX5_VPORT_UC_ADDR_CHANGE;
1674
+ ret = mlx5_eswitch_load_vf_vports(esw, num_vfs, vport_events);
1675
+ if (!ret)
1676
+ esw->esw_funcs.num_vfs = num_vfs;
1677
+ }
1678
+ up_write(&esw->mode_lock);
1679
+ return ret;
1680
+}
1681
+
1682
+void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf)
1683
+{
1684
+ int old_mode;
1685
+
1686
+ lockdep_assert_held_write(&esw->mode_lock);
1687
+
1688
+ if (esw->mode == MLX5_ESWITCH_NONE)
16551689 return;
16561690
1657
- esw_info(esw->dev, "disable SRIOV: active vports(%d) mode(%d)\n",
1658
- esw->enabled_vports, esw->mode);
1691
+ esw_info(esw->dev, "Disable: mode(%s), nvfs(%d), active vports(%d)\n",
1692
+ esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
1693
+ esw->esw_funcs.num_vfs, esw->enabled_vports);
16591694
1660
- mc_promisc = &esw->mc_promisc;
1661
- nvports = esw->enabled_vports;
1695
+ mlx5_eswitch_event_handlers_unregister(esw);
16621696
1663
- for (i = 0; i < esw->total_vports; i++)
1664
- esw_disable_vport(esw, i);
1665
-
1666
- if (mc_promisc && mc_promisc->uplink_rule)
1667
- mlx5_del_flow_rules(mc_promisc->uplink_rule);
1668
-
1669
- esw_destroy_tsar(esw);
1670
-
1671
- if (esw->mode == SRIOV_LEGACY)
1672
- esw_destroy_legacy_fdb_table(esw);
1673
- else if (esw->mode == SRIOV_OFFLOADS)
1674
- esw_offloads_cleanup(esw, nvports);
1697
+ if (esw->mode == MLX5_ESWITCH_LEGACY)
1698
+ esw_legacy_disable(esw);
1699
+ else if (esw->mode == MLX5_ESWITCH_OFFLOADS)
1700
+ esw_offloads_disable(esw);
16751701
16761702 old_mode = esw->mode;
1677
- esw->mode = SRIOV_NONE;
1703
+ esw->mode = MLX5_ESWITCH_NONE;
16781704
1679
- if (old_mode == SRIOV_OFFLOADS)
1705
+ mlx5_lag_update(esw->dev);
1706
+
1707
+ if (old_mode == MLX5_ESWITCH_OFFLOADS) {
16801708 mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
1709
+ mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
1710
+ }
1711
+ esw_destroy_tsar(esw);
1712
+
1713
+ if (clear_vf)
1714
+ mlx5_eswitch_clear_vf_vports_info(esw);
1715
+}
1716
+
1717
+void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf)
1718
+{
1719
+ if (!ESW_ALLOWED(esw))
1720
+ return;
1721
+
1722
+ down_write(&esw->mode_lock);
1723
+ mlx5_eswitch_disable_locked(esw, clear_vf);
1724
+ esw->esw_funcs.num_vfs = 0;
1725
+ up_write(&esw->mode_lock);
16811726 }
16821727
16831728 int mlx5_eswitch_init(struct mlx5_core_dev *dev)
16841729 {
1685
- int total_vports = MLX5_TOTAL_VPORTS(dev);
16861730 struct mlx5_eswitch *esw;
1687
- int vport_num;
1688
- int err;
1731
+ struct mlx5_vport *vport;
1732
+ int total_vports;
1733
+ int err, i;
16891734
16901735 if (!MLX5_VPORT_MANAGER(dev))
16911736 return 0;
1737
+
1738
+ total_vports = mlx5_eswitch_get_total_vports(dev);
16921739
16931740 esw_info(dev,
16941741 "Total vports %d, per vport: max uc(%d) max mc(%d)\n",
....@@ -1701,6 +1748,8 @@
17011748 return -ENOMEM;
17021749
17031750 esw->dev = dev;
1751
+ esw->manager_vport = mlx5_eswitch_manager_vport(dev);
1752
+ esw->first_host_vport = mlx5_eswitch_first_host_vport_num(dev);
17041753
17051754 esw->work_queue = create_singlethread_workqueue("mlx5_esw_wq");
17061755 if (!esw->work_queue) {
....@@ -1715,33 +1764,33 @@
17151764 goto abort;
17161765 }
17171766
1767
+ esw->total_vports = total_vports;
1768
+
17181769 err = esw_offloads_init_reps(esw);
17191770 if (err)
17201771 goto abort;
17211772
1773
+ mutex_init(&esw->offloads.encap_tbl_lock);
17221774 hash_init(esw->offloads.encap_tbl);
1723
- hash_init(esw->offloads.mod_hdr_tbl);
1775
+ mutex_init(&esw->offloads.decap_tbl_lock);
1776
+ hash_init(esw->offloads.decap_tbl);
1777
+ mlx5e_mod_hdr_tbl_init(&esw->offloads.mod_hdr);
1778
+ atomic64_set(&esw->offloads.num_flows, 0);
1779
+ ida_init(&esw->offloads.vport_metadata_ida);
17241780 mutex_init(&esw->state_lock);
1781
+ init_rwsem(&esw->mode_lock);
17251782
1726
- for (vport_num = 0; vport_num < total_vports; vport_num++) {
1727
- struct mlx5_vport *vport = &esw->vports[vport_num];
1728
-
1729
- vport->vport = vport_num;
1783
+ mlx5_esw_for_all_vports(esw, i, vport) {
1784
+ vport->vport = mlx5_eswitch_index_to_vport_num(esw, i);
17301785 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
17311786 vport->dev = dev;
17321787 INIT_WORK(&vport->vport_change_handler,
17331788 esw_vport_change_handler);
17341789 }
17351790
1736
- esw->total_vports = total_vports;
17371791 esw->enabled_vports = 0;
1738
- esw->mode = SRIOV_NONE;
1792
+ esw->mode = MLX5_ESWITCH_NONE;
17391793 esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE;
1740
- if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) &&
1741
- MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))
1742
- esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
1743
- else
1744
- esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
17451794
17461795 dev->priv.eswitch = esw;
17471796 return 0;
....@@ -1764,96 +1813,171 @@
17641813 esw->dev->priv.eswitch = NULL;
17651814 destroy_workqueue(esw->work_queue);
17661815 esw_offloads_cleanup_reps(esw);
1816
+ mutex_destroy(&esw->state_lock);
1817
+ ida_destroy(&esw->offloads.vport_metadata_ida);
1818
+ mlx5e_mod_hdr_tbl_destroy(&esw->offloads.mod_hdr);
1819
+ mutex_destroy(&esw->offloads.encap_tbl_lock);
1820
+ mutex_destroy(&esw->offloads.decap_tbl_lock);
17671821 kfree(esw->vports);
17681822 kfree(esw);
17691823 }
17701824
1771
-void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe)
1772
-{
1773
- struct mlx5_eqe_vport_change *vc_eqe = &eqe->data.vport_change;
1774
- u16 vport_num = be16_to_cpu(vc_eqe->vport_num);
1775
- struct mlx5_vport *vport;
1776
-
1777
- if (!esw) {
1778
- pr_warn("MLX5 E-Switch: vport %d got an event while eswitch is not initialized\n",
1779
- vport_num);
1780
- return;
1781
- }
1782
-
1783
- vport = &esw->vports[vport_num];
1784
- if (vport->enabled)
1785
- queue_work(esw->work_queue, &vport->vport_change_handler);
1786
-}
1787
-
17881825 /* Vport Administration */
1789
-#define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports)
1790
-
1791
-int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
1792
- int vport, u8 mac[ETH_ALEN])
1826
+static int
1827
+mlx5_esw_set_vport_mac_locked(struct mlx5_eswitch *esw,
1828
+ struct mlx5_vport *evport, const u8 *mac)
17931829 {
1794
- struct mlx5_vport *evport;
1830
+ u16 vport_num = evport->vport;
17951831 u64 node_guid;
17961832 int err = 0;
17971833
1798
- if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager))
1799
- return -EPERM;
1800
- if (!LEGAL_VPORT(esw, vport) || is_multicast_ether_addr(mac))
1834
+ if (is_multicast_ether_addr(mac))
18011835 return -EINVAL;
1802
-
1803
- mutex_lock(&esw->state_lock);
1804
- evport = &esw->vports[vport];
18051836
18061837 if (evport->info.spoofchk && !is_valid_ether_addr(mac))
18071838 mlx5_core_warn(esw->dev,
18081839 "Set invalid MAC while spoofchk is on, vport(%d)\n",
1809
- vport);
1840
+ vport_num);
18101841
1811
- err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac);
1842
+ err = mlx5_modify_nic_vport_mac_address(esw->dev, vport_num, mac);
18121843 if (err) {
18131844 mlx5_core_warn(esw->dev,
18141845 "Failed to mlx5_modify_nic_vport_mac vport(%d) err=(%d)\n",
1815
- vport, err);
1816
- goto unlock;
1846
+ vport_num, err);
1847
+ return err;
18171848 }
18181849
18191850 node_guid_gen_from_mac(&node_guid, mac);
1820
- err = mlx5_modify_nic_vport_node_guid(esw->dev, vport, node_guid);
1851
+ err = mlx5_modify_nic_vport_node_guid(esw->dev, vport_num, node_guid);
18211852 if (err)
18221853 mlx5_core_warn(esw->dev,
18231854 "Failed to set vport %d node guid, err = %d. RDMA_CM will not function properly for this VF.\n",
1824
- vport, err);
1855
+ vport_num, err);
18251856
18261857 ether_addr_copy(evport->info.mac, mac);
18271858 evport->info.node_guid = node_guid;
1828
- if (evport->enabled && esw->mode == SRIOV_LEGACY)
1829
- err = esw_vport_ingress_config(esw, evport);
1859
+ if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY)
1860
+ err = esw_acl_ingress_lgcy_setup(esw, evport);
18301861
1831
-unlock:
1862
+ return err;
1863
+}
1864
+
1865
+int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
1866
+ u16 vport, const u8 *mac)
1867
+{
1868
+ struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
1869
+ int err = 0;
1870
+
1871
+ if (IS_ERR(evport))
1872
+ return PTR_ERR(evport);
1873
+
1874
+ mutex_lock(&esw->state_lock);
1875
+ err = mlx5_esw_set_vport_mac_locked(esw, evport, mac);
1876
+ mutex_unlock(&esw->state_lock);
1877
+ return err;
1878
+}
1879
+
1880
+static bool
1881
+is_port_function_supported(const struct mlx5_eswitch *esw, u16 vport_num)
1882
+{
1883
+ return vport_num == MLX5_VPORT_PF ||
1884
+ mlx5_eswitch_is_vf_vport(esw, vport_num);
1885
+}
1886
+
1887
+int mlx5_devlink_port_function_hw_addr_get(struct devlink *devlink,
1888
+ struct devlink_port *port,
1889
+ u8 *hw_addr, int *hw_addr_len,
1890
+ struct netlink_ext_ack *extack)
1891
+{
1892
+ struct mlx5_eswitch *esw;
1893
+ struct mlx5_vport *vport;
1894
+ int err = -EOPNOTSUPP;
1895
+ u16 vport_num;
1896
+
1897
+ esw = mlx5_devlink_eswitch_get(devlink);
1898
+ if (IS_ERR(esw))
1899
+ return PTR_ERR(esw);
1900
+
1901
+ vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
1902
+ if (!is_port_function_supported(esw, vport_num))
1903
+ return -EOPNOTSUPP;
1904
+
1905
+ vport = mlx5_eswitch_get_vport(esw, vport_num);
1906
+ if (IS_ERR(vport)) {
1907
+ NL_SET_ERR_MSG_MOD(extack, "Invalid port");
1908
+ return PTR_ERR(vport);
1909
+ }
1910
+
1911
+ mutex_lock(&esw->state_lock);
1912
+ if (vport->enabled) {
1913
+ ether_addr_copy(hw_addr, vport->info.mac);
1914
+ *hw_addr_len = ETH_ALEN;
1915
+ err = 0;
1916
+ }
1917
+ mutex_unlock(&esw->state_lock);
1918
+ return err;
1919
+}
1920
+
1921
+int mlx5_devlink_port_function_hw_addr_set(struct devlink *devlink,
1922
+ struct devlink_port *port,
1923
+ const u8 *hw_addr, int hw_addr_len,
1924
+ struct netlink_ext_ack *extack)
1925
+{
1926
+ struct mlx5_eswitch *esw;
1927
+ struct mlx5_vport *vport;
1928
+ int err = -EOPNOTSUPP;
1929
+ u16 vport_num;
1930
+
1931
+ esw = mlx5_devlink_eswitch_get(devlink);
1932
+ if (IS_ERR(esw)) {
1933
+ NL_SET_ERR_MSG_MOD(extack, "Eswitch doesn't support set hw_addr");
1934
+ return PTR_ERR(esw);
1935
+ }
1936
+
1937
+ vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
1938
+ if (!is_port_function_supported(esw, vport_num)) {
1939
+ NL_SET_ERR_MSG_MOD(extack, "Port doesn't support set hw_addr");
1940
+ return -EINVAL;
1941
+ }
1942
+ vport = mlx5_eswitch_get_vport(esw, vport_num);
1943
+ if (IS_ERR(vport)) {
1944
+ NL_SET_ERR_MSG_MOD(extack, "Invalid port");
1945
+ return PTR_ERR(vport);
1946
+ }
1947
+
1948
+ mutex_lock(&esw->state_lock);
1949
+ if (vport->enabled)
1950
+ err = mlx5_esw_set_vport_mac_locked(esw, vport, hw_addr);
1951
+ else
1952
+ NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled");
18321953 mutex_unlock(&esw->state_lock);
18331954 return err;
18341955 }
18351956
18361957 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
1837
- int vport, int link_state)
1958
+ u16 vport, int link_state)
18381959 {
1839
- struct mlx5_vport *evport;
1960
+ struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
1961
+ int opmod = MLX5_VPORT_STATE_OP_MOD_ESW_VPORT;
1962
+ int other_vport = 1;
18401963 int err = 0;
18411964
18421965 if (!ESW_ALLOWED(esw))
18431966 return -EPERM;
1844
- if (!LEGAL_VPORT(esw, vport))
1845
- return -EINVAL;
1967
+ if (IS_ERR(evport))
1968
+ return PTR_ERR(evport);
18461969
1970
+ if (vport == MLX5_VPORT_UPLINK) {
1971
+ opmod = MLX5_VPORT_STATE_OP_MOD_UPLINK;
1972
+ other_vport = 0;
1973
+ vport = 0;
1974
+ }
18471975 mutex_lock(&esw->state_lock);
1848
- evport = &esw->vports[vport];
18491976
1850
- err = mlx5_modify_vport_admin_state(esw->dev,
1851
- MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
1852
- vport, link_state);
1977
+ err = mlx5_modify_vport_admin_state(esw->dev, opmod, vport, other_vport, link_state);
18531978 if (err) {
1854
- mlx5_core_warn(esw->dev,
1855
- "Failed to set vport %d link state, err = %d",
1856
- vport, err);
1979
+ mlx5_core_warn(esw->dev, "Failed to set vport %d link state, opmod = %d, err = %d",
1980
+ vport, opmod, err);
18571981 goto unlock;
18581982 }
18591983
....@@ -1865,16 +1989,12 @@
18651989 }
18661990
18671991 int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
1868
- int vport, struct ifla_vf_info *ivi)
1992
+ u16 vport, struct ifla_vf_info *ivi)
18691993 {
1870
- struct mlx5_vport *evport;
1994
+ struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
18711995
1872
- if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager))
1873
- return -EPERM;
1874
- if (!LEGAL_VPORT(esw, vport))
1875
- return -EINVAL;
1876
-
1877
- evport = &esw->vports[vport];
1996
+ if (IS_ERR(evport))
1997
+ return PTR_ERR(evport);
18781998
18791999 memset(ivi, 0, sizeof(*ivi));
18802000 ivi->vf = vport - 1;
....@@ -1894,70 +2014,72 @@
18942014 }
18952015
18962016 int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
1897
- int vport, u16 vlan, u8 qos, u8 set_flags)
2017
+ u16 vport, u16 vlan, u8 qos, u8 set_flags)
18982018 {
1899
- struct mlx5_vport *evport;
2019
+ struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
19002020 int err = 0;
19012021
1902
- if (!ESW_ALLOWED(esw))
1903
- return -EPERM;
1904
- if (!LEGAL_VPORT(esw, vport) || (vlan > 4095) || (qos > 7))
2022
+ if (IS_ERR(evport))
2023
+ return PTR_ERR(evport);
2024
+ if (vlan > 4095 || qos > 7)
19052025 return -EINVAL;
1906
-
1907
- mutex_lock(&esw->state_lock);
1908
- evport = &esw->vports[vport];
19092026
19102027 err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set_flags);
19112028 if (err)
1912
- goto unlock;
2029
+ return err;
19132030
19142031 evport->info.vlan = vlan;
19152032 evport->info.qos = qos;
1916
- if (evport->enabled && esw->mode == SRIOV_LEGACY) {
1917
- err = esw_vport_ingress_config(esw, evport);
2033
+ if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY) {
2034
+ err = esw_acl_ingress_lgcy_setup(esw, evport);
19182035 if (err)
1919
- goto unlock;
1920
- err = esw_vport_egress_config(esw, evport);
2036
+ return err;
2037
+ err = esw_acl_egress_lgcy_setup(esw, evport);
19212038 }
19222039
1923
-unlock:
1924
- mutex_unlock(&esw->state_lock);
19252040 return err;
19262041 }
19272042
19282043 int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
1929
- int vport, u16 vlan, u8 qos)
2044
+ u16 vport, u16 vlan, u8 qos)
19302045 {
19312046 u8 set_flags = 0;
2047
+ int err;
2048
+
2049
+ if (!ESW_ALLOWED(esw))
2050
+ return -EPERM;
19322051
19332052 if (vlan || qos)
19342053 set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT;
19352054
1936
- return __mlx5_eswitch_set_vport_vlan(esw, vport, vlan, qos, set_flags);
2055
+ mutex_lock(&esw->state_lock);
2056
+ err = __mlx5_eswitch_set_vport_vlan(esw, vport, vlan, qos, set_flags);
2057
+ mutex_unlock(&esw->state_lock);
2058
+
2059
+ return err;
19372060 }
19382061
19392062 int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
1940
- int vport, bool spoofchk)
2063
+ u16 vport, bool spoofchk)
19412064 {
1942
- struct mlx5_vport *evport;
2065
+ struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
19432066 bool pschk;
19442067 int err = 0;
19452068
19462069 if (!ESW_ALLOWED(esw))
19472070 return -EPERM;
1948
- if (!LEGAL_VPORT(esw, vport))
1949
- return -EINVAL;
2071
+ if (IS_ERR(evport))
2072
+ return PTR_ERR(evport);
19502073
19512074 mutex_lock(&esw->state_lock);
1952
- evport = &esw->vports[vport];
19532075 pschk = evport->info.spoofchk;
19542076 evport->info.spoofchk = spoofchk;
19552077 if (pschk && !is_valid_ether_addr(evport->info.mac))
19562078 mlx5_core_warn(esw->dev,
19572079 "Spoofchk in set while MAC is invalid, vport(%d)\n",
19582080 evport->vport);
1959
- if (evport->enabled && esw->mode == SRIOV_LEGACY)
1960
- err = esw_vport_ingress_config(esw, evport);
2081
+ if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY)
2082
+ err = esw_acl_ingress_lgcy_setup(esw, evport);
19612083 if (err)
19622084 evport->info.spoofchk = pschk;
19632085 mutex_unlock(&esw->state_lock);
....@@ -1965,18 +2087,130 @@
19652087 return err;
19662088 }
19672089
1968
-int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
1969
- int vport, bool setting)
2090
+static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw)
19702091 {
1971
- struct mlx5_vport *evport;
2092
+ if (esw->fdb_table.legacy.vepa_uplink_rule)
2093
+ mlx5_del_flow_rules(esw->fdb_table.legacy.vepa_uplink_rule);
2094
+
2095
+ if (esw->fdb_table.legacy.vepa_star_rule)
2096
+ mlx5_del_flow_rules(esw->fdb_table.legacy.vepa_star_rule);
2097
+
2098
+ esw->fdb_table.legacy.vepa_uplink_rule = NULL;
2099
+ esw->fdb_table.legacy.vepa_star_rule = NULL;
2100
+}
2101
+
2102
+static int _mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch *esw,
2103
+ u8 setting)
2104
+{
2105
+ struct mlx5_flow_destination dest = {};
2106
+ struct mlx5_flow_act flow_act = {};
2107
+ struct mlx5_flow_handle *flow_rule;
2108
+ struct mlx5_flow_spec *spec;
2109
+ int err = 0;
2110
+ void *misc;
2111
+
2112
+ if (!setting) {
2113
+ esw_cleanup_vepa_rules(esw);
2114
+ return 0;
2115
+ }
2116
+
2117
+ if (esw->fdb_table.legacy.vepa_uplink_rule)
2118
+ return 0;
2119
+
2120
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
2121
+ if (!spec)
2122
+ return -ENOMEM;
2123
+
2124
+ /* Uplink rule forward uplink traffic to FDB */
2125
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
2126
+ MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_UPLINK);
2127
+
2128
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
2129
+ MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
2130
+
2131
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
2132
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
2133
+ dest.ft = esw->fdb_table.legacy.fdb;
2134
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2135
+ flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, spec,
2136
+ &flow_act, &dest, 1);
2137
+ if (IS_ERR(flow_rule)) {
2138
+ err = PTR_ERR(flow_rule);
2139
+ goto out;
2140
+ } else {
2141
+ esw->fdb_table.legacy.vepa_uplink_rule = flow_rule;
2142
+ }
2143
+
2144
+ /* Star rule to forward all traffic to uplink vport */
2145
+ memset(&dest, 0, sizeof(dest));
2146
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
2147
+ dest.vport.num = MLX5_VPORT_UPLINK;
2148
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2149
+ flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, NULL,
2150
+ &flow_act, &dest, 1);
2151
+ if (IS_ERR(flow_rule)) {
2152
+ err = PTR_ERR(flow_rule);
2153
+ goto out;
2154
+ } else {
2155
+ esw->fdb_table.legacy.vepa_star_rule = flow_rule;
2156
+ }
2157
+
2158
+out:
2159
+ kvfree(spec);
2160
+ if (err)
2161
+ esw_cleanup_vepa_rules(esw);
2162
+ return err;
2163
+}
2164
+
2165
+int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting)
2166
+{
2167
+ int err = 0;
2168
+
2169
+ if (!esw)
2170
+ return -EOPNOTSUPP;
19722171
19732172 if (!ESW_ALLOWED(esw))
19742173 return -EPERM;
1975
- if (!LEGAL_VPORT(esw, vport))
1976
- return -EINVAL;
19772174
19782175 mutex_lock(&esw->state_lock);
1979
- evport = &esw->vports[vport];
2176
+ if (esw->mode != MLX5_ESWITCH_LEGACY) {
2177
+ err = -EOPNOTSUPP;
2178
+ goto out;
2179
+ }
2180
+
2181
+ err = _mlx5_eswitch_set_vepa_locked(esw, setting);
2182
+
2183
+out:
2184
+ mutex_unlock(&esw->state_lock);
2185
+ return err;
2186
+}
2187
+
2188
+int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting)
2189
+{
2190
+ if (!esw)
2191
+ return -EOPNOTSUPP;
2192
+
2193
+ if (!ESW_ALLOWED(esw))
2194
+ return -EPERM;
2195
+
2196
+ if (esw->mode != MLX5_ESWITCH_LEGACY)
2197
+ return -EOPNOTSUPP;
2198
+
2199
+ *setting = esw->fdb_table.legacy.vepa_uplink_rule ? 1 : 0;
2200
+ return 0;
2201
+}
2202
+
2203
+int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
2204
+ u16 vport, bool setting)
2205
+{
2206
+ struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
2207
+
2208
+ if (!ESW_ALLOWED(esw))
2209
+ return -EPERM;
2210
+ if (IS_ERR(evport))
2211
+ return PTR_ERR(evport);
2212
+
2213
+ mutex_lock(&esw->state_lock);
19802214 evport->info.trusted = setting;
19812215 if (evport->enabled)
19822216 esw_vport_change_handle_locked(evport);
....@@ -1992,8 +2226,7 @@
19922226 u32 max_guarantee = 0;
19932227 int i;
19942228
1995
- for (i = 0; i < esw->total_vports; i++) {
1996
- evport = &esw->vports[i];
2229
+ mlx5_esw_for_all_vports(esw, i, evport) {
19972230 if (!evport->enabled || evport->info.min_rate < max_guarantee)
19982231 continue;
19992232 max_guarantee = evport->info.min_rate;
....@@ -2015,8 +2248,7 @@
20152248 int err;
20162249 int i;
20172250
2018
- for (i = 0; i < esw->total_vports; i++) {
2019
- evport = &esw->vports[i];
2251
+ mlx5_esw_for_all_vports(esw, i, evport) {
20202252 if (!evport->enabled)
20212253 continue;
20222254 vport_min_rate = evport->info.min_rate;
....@@ -2031,7 +2263,7 @@
20312263 if (bw_share == evport->qos.bw_share)
20322264 continue;
20332265
2034
- err = esw_vport_qos_config(esw, i, vport_max_rate,
2266
+ err = esw_vport_qos_config(esw, evport, vport_max_rate,
20352267 bw_share);
20362268 if (!err)
20372269 evport->qos.bw_share = bw_share;
....@@ -2042,10 +2274,10 @@
20422274 return 0;
20432275 }
20442276
2045
-int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, int vport,
2277
+int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport,
20462278 u32 max_rate, u32 min_rate)
20472279 {
2048
- struct mlx5_vport *evport;
2280
+ struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
20492281 u32 fw_max_bw_share;
20502282 u32 previous_min_rate;
20512283 bool min_rate_supported;
....@@ -2054,8 +2286,8 @@
20542286
20552287 if (!ESW_ALLOWED(esw))
20562288 return -EPERM;
2057
- if (!LEGAL_VPORT(esw, vport))
2058
- return -EINVAL;
2289
+ if (IS_ERR(evport))
2290
+ return PTR_ERR(evport);
20592291
20602292 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
20612293 min_rate_supported = MLX5_CAP_QOS(esw->dev, esw_bw_share) &&
....@@ -2066,7 +2298,6 @@
20662298 return -EOPNOTSUPP;
20672299
20682300 mutex_lock(&esw->state_lock);
2069
- evport = &esw->vports[vport];
20702301
20712302 if (min_rate == evport->info.min_rate)
20722303 goto set_max_rate;
....@@ -2083,7 +2314,7 @@
20832314 if (max_rate == evport->info.max_rate)
20842315 goto unlock;
20852316
2086
- err = esw_vport_qos_config(esw, vport, max_rate, evport->qos.bw_share);
2317
+ err = esw_vport_qos_config(esw, evport, max_rate, evport->qos.bw_share);
20872318 if (!err)
20882319 evport->info.max_rate = max_rate;
20892320
....@@ -2093,58 +2324,62 @@
20932324 }
20942325
20952326 static int mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev,
2096
- int vport_idx,
2327
+ struct mlx5_vport *vport,
20972328 struct mlx5_vport_drop_stats *stats)
20982329 {
20992330 struct mlx5_eswitch *esw = dev->priv.eswitch;
2100
- struct mlx5_vport *vport = &esw->vports[vport_idx];
21012331 u64 rx_discard_vport_down, tx_discard_vport_down;
21022332 u64 bytes = 0;
21032333 int err = 0;
21042334
2105
- if (!vport->enabled || esw->mode != SRIOV_LEGACY)
2335
+ if (esw->mode != MLX5_ESWITCH_LEGACY)
21062336 return 0;
21072337
2108
- if (vport->egress.drop_counter)
2109
- mlx5_fc_query(dev, vport->egress.drop_counter,
2338
+ mutex_lock(&esw->state_lock);
2339
+ if (!vport->enabled)
2340
+ goto unlock;
2341
+
2342
+ if (!IS_ERR_OR_NULL(vport->egress.legacy.drop_counter))
2343
+ mlx5_fc_query(dev, vport->egress.legacy.drop_counter,
21102344 &stats->rx_dropped, &bytes);
21112345
2112
- if (vport->ingress.drop_counter)
2113
- mlx5_fc_query(dev, vport->ingress.drop_counter,
2346
+ if (vport->ingress.legacy.drop_counter)
2347
+ mlx5_fc_query(dev, vport->ingress.legacy.drop_counter,
21142348 &stats->tx_dropped, &bytes);
21152349
21162350 if (!MLX5_CAP_GEN(dev, receive_discard_vport_down) &&
21172351 !MLX5_CAP_GEN(dev, transmit_discard_vport_down))
2118
- return 0;
2352
+ goto unlock;
21192353
2120
- err = mlx5_query_vport_down_stats(dev, vport_idx,
2354
+ err = mlx5_query_vport_down_stats(dev, vport->vport, 1,
21212355 &rx_discard_vport_down,
21222356 &tx_discard_vport_down);
21232357 if (err)
2124
- return err;
2358
+ goto unlock;
21252359
21262360 if (MLX5_CAP_GEN(dev, receive_discard_vport_down))
21272361 stats->rx_dropped += rx_discard_vport_down;
21282362 if (MLX5_CAP_GEN(dev, transmit_discard_vport_down))
21292363 stats->tx_dropped += tx_discard_vport_down;
21302364
2131
- return 0;
2365
+unlock:
2366
+ mutex_unlock(&esw->state_lock);
2367
+ return err;
21322368 }
21332369
21342370 int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
2135
- int vport,
2371
+ u16 vport_num,
21362372 struct ifla_vf_stats *vf_stats)
21372373 {
2374
+ struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
21382375 int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
2139
- u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
2140
- struct mlx5_vport_drop_stats stats = {0};
2376
+ u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {};
2377
+ struct mlx5_vport_drop_stats stats = {};
21412378 int err = 0;
21422379 u32 *out;
21432380
2144
- if (!ESW_ALLOWED(esw))
2145
- return -EPERM;
2146
- if (!LEGAL_VPORT(esw, vport))
2147
- return -EINVAL;
2381
+ if (IS_ERR(vport))
2382
+ return PTR_ERR(vport);
21482383
21492384 out = kvzalloc(outlen, GFP_KERNEL);
21502385 if (!out)
....@@ -2153,12 +2388,10 @@
21532388 MLX5_SET(query_vport_counter_in, in, opcode,
21542389 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
21552390 MLX5_SET(query_vport_counter_in, in, op_mod, 0);
2156
- MLX5_SET(query_vport_counter_in, in, vport_number, vport);
2157
- if (vport)
2158
- MLX5_SET(query_vport_counter_in, in, other_vport, 1);
2391
+ MLX5_SET(query_vport_counter_in, in, vport_number, vport->vport);
2392
+ MLX5_SET(query_vport_counter_in, in, other_vport, 1);
21592393
2160
- memset(out, 0, outlen);
2161
- err = mlx5_cmd_exec(esw->dev, in, sizeof(in), out, outlen);
2394
+ err = mlx5_cmd_exec_inout(esw->dev, query_vport_counter, in, out);
21622395 if (err)
21632396 goto free_out;
21642397
....@@ -2214,6 +2447,37 @@
22142447
22152448 u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw)
22162449 {
2217
- return ESW_ALLOWED(esw) ? esw->mode : SRIOV_NONE;
2450
+ return ESW_ALLOWED(esw) ? esw->mode : MLX5_ESWITCH_NONE;
22182451 }
22192452 EXPORT_SYMBOL_GPL(mlx5_eswitch_mode);
2453
+
2454
+enum devlink_eswitch_encap_mode
2455
+mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev)
2456
+{
2457
+ struct mlx5_eswitch *esw;
2458
+
2459
+ esw = dev->priv.eswitch;
2460
+ return ESW_ALLOWED(esw) ? esw->offloads.encap :
2461
+ DEVLINK_ESWITCH_ENCAP_MODE_NONE;
2462
+}
2463
+EXPORT_SYMBOL(mlx5_eswitch_get_encap_mode);
2464
+
2465
+bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1)
2466
+{
2467
+ if ((dev0->priv.eswitch->mode == MLX5_ESWITCH_NONE &&
2468
+ dev1->priv.eswitch->mode == MLX5_ESWITCH_NONE) ||
2469
+ (dev0->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS &&
2470
+ dev1->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS))
2471
+ return true;
2472
+
2473
+ return false;
2474
+}
2475
+
2476
+bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
2477
+ struct mlx5_core_dev *dev1)
2478
+{
2479
+ return (dev0->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS &&
2480
+ dev1->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS);
2481
+}
2482
+
2483
+