hc
2024-01-03 2f7c68cb55ecb7331f2381deb497c27155f32faf
kernel/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
....@@ -45,6 +45,7 @@
4545 static const struct net_device_ops mlx5i_netdev_ops = {
4646 .ndo_open = mlx5i_open,
4747 .ndo_stop = mlx5i_close,
48
+ .ndo_get_stats64 = mlx5i_get_stats,
4849 .ndo_init = mlx5i_dev_init,
4950 .ndo_uninit = mlx5i_dev_cleanup,
5051 .ndo_change_mtu = mlx5i_change_mtu,
....@@ -67,29 +68,31 @@
6768
6869 params->lro_en = false;
6970 params->hard_mtu = MLX5_IB_GRH_BYTES + MLX5_IPOIB_HARD_LEN;
71
+ params->tunneled_offload_en = false;
72
+
73
+ /* CQE compression is not supported for IPoIB */
74
+ params->rx_cqe_compress_def = false;
75
+ MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS, params->rx_cqe_compress_def);
7076 }
7177
7278 /* Called directly after IPoIB netdevice was created to initialize SW structs */
73
-void mlx5i_init(struct mlx5_core_dev *mdev,
74
- struct net_device *netdev,
75
- const struct mlx5e_profile *profile,
76
- void *ppriv)
79
+int mlx5i_init(struct mlx5_core_dev *mdev,
80
+ struct net_device *netdev,
81
+ const struct mlx5e_profile *profile,
82
+ void *ppriv)
7783 {
7884 struct mlx5e_priv *priv = mlx5i_epriv(netdev);
79
- u16 max_mtu;
85
+ int err;
8086
81
- /* priv init */
82
- priv->mdev = mdev;
83
- priv->netdev = netdev;
84
- priv->profile = profile;
85
- priv->ppriv = ppriv;
86
- mutex_init(&priv->state_lock);
87
+ err = mlx5e_netdev_init(netdev, priv, mdev, profile, ppriv);
88
+ if (err)
89
+ return err;
8790
88
- mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
89
- netdev->mtu = max_mtu;
91
+ mlx5e_set_netdev_mtu_boundaries(priv);
92
+ netdev->mtu = netdev->max_mtu;
9093
91
- mlx5e_build_nic_params(mdev, &priv->channels.params,
92
- profile->max_nch(mdev), netdev->mtu);
94
+ mlx5e_build_nic_params(priv, NULL, &priv->rss_params, &priv->channels.params,
95
+ netdev->mtu);
9396 mlx5i_build_nic_params(mdev, &priv->channels.params);
9497
9598 mlx5e_timestamp_init(priv);
....@@ -106,57 +109,109 @@
106109
107110 netdev->netdev_ops = &mlx5i_netdev_ops;
108111 netdev->ethtool_ops = &mlx5i_ethtool_ops;
112
+
113
+ return 0;
109114 }
110115
111116 /* Called directly before IPoIB netdevice is destroyed to cleanup SW structs */
112
-static void mlx5i_cleanup(struct mlx5e_priv *priv)
117
+void mlx5i_cleanup(struct mlx5e_priv *priv)
113118 {
114
- /* Do nothing .. */
119
+ mlx5e_netdev_cleanup(priv->netdev, priv);
120
+}
121
+
122
+static void mlx5i_grp_sw_update_stats(struct mlx5e_priv *priv)
123
+{
124
+ struct mlx5e_sw_stats s = { 0 };
125
+ int i, j;
126
+
127
+ for (i = 0; i < priv->max_nch; i++) {
128
+ struct mlx5e_channel_stats *channel_stats;
129
+ struct mlx5e_rq_stats *rq_stats;
130
+
131
+ channel_stats = &priv->channel_stats[i];
132
+ rq_stats = &channel_stats->rq;
133
+
134
+ s.rx_packets += rq_stats->packets;
135
+ s.rx_bytes += rq_stats->bytes;
136
+
137
+ for (j = 0; j < priv->max_opened_tc; j++) {
138
+ struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j];
139
+
140
+ s.tx_packets += sq_stats->packets;
141
+ s.tx_bytes += sq_stats->bytes;
142
+ s.tx_queue_dropped += sq_stats->dropped;
143
+ }
144
+ }
145
+
146
+ memcpy(&priv->stats.sw, &s, sizeof(s));
147
+}
148
+
149
+void mlx5i_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
150
+{
151
+ struct mlx5e_priv *priv = mlx5i_epriv(dev);
152
+ struct mlx5e_sw_stats *sstats = &priv->stats.sw;
153
+
154
+ mlx5i_grp_sw_update_stats(priv);
155
+
156
+ stats->rx_packets = sstats->rx_packets;
157
+ stats->rx_bytes = sstats->rx_bytes;
158
+ stats->tx_packets = sstats->tx_packets;
159
+ stats->tx_bytes = sstats->tx_bytes;
160
+ stats->tx_dropped = sstats->tx_queue_dropped;
115161 }
116162
117163 int mlx5i_init_underlay_qp(struct mlx5e_priv *priv)
118164 {
119165 struct mlx5_core_dev *mdev = priv->mdev;
120166 struct mlx5i_priv *ipriv = priv->ppriv;
121
- struct mlx5_core_qp *qp = &ipriv->qp;
122
- struct mlx5_qp_context *context;
123167 int ret;
124168
125
- /* QP states */
126
- context = kzalloc(sizeof(*context), GFP_KERNEL);
127
- if (!context)
128
- return -ENOMEM;
169
+ {
170
+ u32 in[MLX5_ST_SZ_DW(rst2init_qp_in)] = {};
171
+ u32 *qpc;
129172
130
- context->flags = cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
131
- context->pri_path.port = 1;
132
- context->pri_path.pkey_index = cpu_to_be16(ipriv->pkey_index);
133
- context->qkey = cpu_to_be32(IB_DEFAULT_Q_KEY);
173
+ qpc = MLX5_ADDR_OF(rst2init_qp_in, in, qpc);
134174
135
- ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RST2INIT_QP, 0, context, qp);
136
- if (ret) {
137
- mlx5_core_err(mdev, "Failed to modify qp RST2INIT, err: %d\n", ret);
138
- goto err_qp_modify_to_err;
175
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
176
+ MLX5_SET(qpc, qpc, primary_address_path.pkey_index,
177
+ ipriv->pkey_index);
178
+ MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, 1);
179
+ MLX5_SET(qpc, qpc, q_key, IB_DEFAULT_Q_KEY);
180
+
181
+ MLX5_SET(rst2init_qp_in, in, opcode, MLX5_CMD_OP_RST2INIT_QP);
182
+ MLX5_SET(rst2init_qp_in, in, qpn, ipriv->qpn);
183
+ ret = mlx5_cmd_exec_in(mdev, rst2init_qp, in);
184
+ if (ret)
185
+ goto err_qp_modify_to_err;
139186 }
140
- memset(context, 0, sizeof(*context));
187
+ {
188
+ u32 in[MLX5_ST_SZ_DW(init2rtr_qp_in)] = {};
141189
142
- ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_INIT2RTR_QP, 0, context, qp);
143
- if (ret) {
144
- mlx5_core_err(mdev, "Failed to modify qp INIT2RTR, err: %d\n", ret);
145
- goto err_qp_modify_to_err;
190
+ MLX5_SET(init2rtr_qp_in, in, opcode, MLX5_CMD_OP_INIT2RTR_QP);
191
+ MLX5_SET(init2rtr_qp_in, in, qpn, ipriv->qpn);
192
+ ret = mlx5_cmd_exec_in(mdev, init2rtr_qp, in);
193
+ if (ret)
194
+ goto err_qp_modify_to_err;
146195 }
196
+ {
197
+ u32 in[MLX5_ST_SZ_DW(rtr2rts_qp_in)] = {};
147198
148
- ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RTR2RTS_QP, 0, context, qp);
149
- if (ret) {
150
- mlx5_core_err(mdev, "Failed to modify qp RTR2RTS, err: %d\n", ret);
151
- goto err_qp_modify_to_err;
199
+ MLX5_SET(rtr2rts_qp_in, in, opcode, MLX5_CMD_OP_RTR2RTS_QP);
200
+ MLX5_SET(rtr2rts_qp_in, in, qpn, ipriv->qpn);
201
+ ret = mlx5_cmd_exec_in(mdev, rtr2rts_qp, in);
202
+ if (ret)
203
+ goto err_qp_modify_to_err;
152204 }
153
-
154
- kfree(context);
155205 return 0;
156206
157207 err_qp_modify_to_err:
158
- mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2ERR_QP, 0, &context, qp);
159
- kfree(context);
208
+ {
209
+ u32 in[MLX5_ST_SZ_DW(qp_2err_in)] = {};
210
+
211
+ MLX5_SET(qp_2err_in, in, opcode, MLX5_CMD_OP_2ERR_QP);
212
+ MLX5_SET(qp_2err_in, in, qpn, ipriv->qpn);
213
+ mlx5_cmd_exec_in(mdev, qp_2err, in);
214
+ }
160215 return ret;
161216 }
162217
....@@ -164,29 +219,30 @@
164219 {
165220 struct mlx5i_priv *ipriv = priv->ppriv;
166221 struct mlx5_core_dev *mdev = priv->mdev;
167
- struct mlx5_qp_context context;
168
- int err;
222
+ u32 in[MLX5_ST_SZ_DW(qp_2rst_in)] = {};
169223
170
- err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2RST_QP, 0, &context,
171
- &ipriv->qp);
172
- if (err)
173
- mlx5_core_err(mdev, "Failed to modify qp 2RST, err: %d\n", err);
224
+ MLX5_SET(qp_2rst_in, in, opcode, MLX5_CMD_OP_2RST_QP);
225
+ MLX5_SET(qp_2rst_in, in, qpn, ipriv->qpn);
226
+ mlx5_cmd_exec_in(mdev, qp_2rst, in);
174227 }
175228
176229 #define MLX5_QP_ENHANCED_ULP_STATELESS_MODE 2
177230
178
-int mlx5i_create_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp)
231
+int mlx5i_create_underlay_qp(struct mlx5e_priv *priv)
179232 {
180
- u32 *in = NULL;
233
+ unsigned char *dev_addr = priv->netdev->dev_addr;
234
+ u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
235
+ u32 in[MLX5_ST_SZ_DW(create_qp_in)] = {};
236
+ struct mlx5i_priv *ipriv = priv->ppriv;
181237 void *addr_path;
238
+ int qpn = 0;
182239 int ret = 0;
183
- int inlen;
184240 void *qpc;
185241
186
- inlen = MLX5_ST_SZ_BYTES(create_qp_in);
187
- in = kvzalloc(inlen, GFP_KERNEL);
188
- if (!in)
189
- return -ENOMEM;
242
+ if (MLX5_CAP_GEN(priv->mdev, mkey_by_name)) {
243
+ qpn = (dev_addr[1] << 16) + (dev_addr[2] << 8) + dev_addr[3];
244
+ MLX5_SET(create_qp_in, in, input_qpn, qpn);
245
+ }
190246
191247 qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
192248 MLX5_SET(qpc, qpc, st, MLX5_QP_ST_UD);
....@@ -198,20 +254,40 @@
198254 MLX5_SET(ads, addr_path, vhca_port_num, 1);
199255 MLX5_SET(ads, addr_path, grh, 1);
200256
201
- ret = mlx5_core_create_qp(mdev, qp, in, inlen);
202
- if (ret) {
203
- mlx5_core_err(mdev, "Failed creating IPoIB QP err : %d\n", ret);
204
- goto out;
205
- }
257
+ MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
258
+ ret = mlx5_cmd_exec_inout(priv->mdev, create_qp, in, out);
259
+ if (ret)
260
+ return ret;
206261
207
-out:
208
- kvfree(in);
209
- return ret;
262
+ ipriv->qpn = MLX5_GET(create_qp_out, out, qpn);
263
+
264
+ return 0;
210265 }
211266
212
-void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp)
267
+void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, u32 qpn)
213268 {
214
- mlx5_core_destroy_qp(mdev, qp);
269
+ u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {};
270
+
271
+ MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
272
+ MLX5_SET(destroy_qp_in, in, qpn, qpn);
273
+ mlx5_cmd_exec_in(mdev, destroy_qp, in);
274
+}
275
+
276
+int mlx5i_update_nic_rx(struct mlx5e_priv *priv)
277
+{
278
+ return mlx5e_refresh_tirs(priv, true, true);
279
+}
280
+
281
+int mlx5i_create_tis(struct mlx5_core_dev *mdev, u32 underlay_qpn, u32 *tisn)
282
+{
283
+ u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
284
+ void *tisc;
285
+
286
+ tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
287
+
288
+ MLX5_SET(tisc, tisc, underlay_qpn, underlay_qpn);
289
+
290
+ return mlx5e_create_tis(mdev, in, tisn);
215291 }
216292
217293 static int mlx5i_init_tx(struct mlx5e_priv *priv)
....@@ -219,13 +295,13 @@
219295 struct mlx5i_priv *ipriv = priv->ppriv;
220296 int err;
221297
222
- err = mlx5i_create_underlay_qp(priv->mdev, &ipriv->qp);
298
+ err = mlx5i_create_underlay_qp(priv);
223299 if (err) {
224300 mlx5_core_warn(priv->mdev, "create underlay QP failed, %d\n", err);
225301 return err;
226302 }
227303
228
- err = mlx5e_create_tis(priv->mdev, 0 /* tc */, ipriv->qp.qpn, &priv->tisn[0]);
304
+ err = mlx5i_create_tis(priv->mdev, ipriv->qpn, &priv->tisn[0][0]);
229305 if (err) {
230306 mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err);
231307 goto err_destroy_underlay_qp;
....@@ -234,7 +310,7 @@
234310 return 0;
235311
236312 err_destroy_underlay_qp:
237
- mlx5i_destroy_underlay_qp(priv->mdev, &ipriv->qp);
313
+ mlx5i_destroy_underlay_qp(priv->mdev, ipriv->qpn);
238314 return err;
239315 }
240316
....@@ -242,8 +318,8 @@
242318 {
243319 struct mlx5i_priv *ipriv = priv->ppriv;
244320
245
- mlx5e_destroy_tis(priv->mdev, priv->tisn[0]);
246
- mlx5i_destroy_underlay_qp(priv->mdev, &ipriv->qp);
321
+ mlx5e_destroy_tis(priv->mdev, priv->tisn[0][0]);
322
+ mlx5i_destroy_underlay_qp(priv->mdev, ipriv->qpn);
247323 }
248324
249325 static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
....@@ -265,17 +341,6 @@
265341 }
266342
267343 mlx5e_set_ttc_basic_params(priv, &ttc_params);
268
- mlx5e_set_inner_ttc_ft_params(&ttc_params);
269
- for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
270
- ttc_params.indir_tirn[tt] = priv->inner_indir_tir[tt].tirn;
271
-
272
- err = mlx5e_create_inner_ttc_table(priv, &ttc_params, &priv->fs.inner_ttc);
273
- if (err) {
274
- netdev_err(priv->netdev, "Failed to create inner ttc table, err=%d\n",
275
- err);
276
- goto err_destroy_arfs_tables;
277
- }
278
-
279344 mlx5e_set_ttc_ft_params(&ttc_params);
280345 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
281346 ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn;
....@@ -284,13 +349,11 @@
284349 if (err) {
285350 netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
286351 err);
287
- goto err_destroy_inner_ttc_table;
352
+ goto err_destroy_arfs_tables;
288353 }
289354
290355 return 0;
291356
292
-err_destroy_inner_ttc_table:
293
- mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
294357 err_destroy_arfs_tables:
295358 mlx5e_arfs_destroy_tables(priv);
296359
....@@ -300,27 +363,35 @@
300363 static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv)
301364 {
302365 mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
303
- mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
304366 mlx5e_arfs_destroy_tables(priv);
305367 }
306368
307369 static int mlx5i_init_rx(struct mlx5e_priv *priv)
308370 {
371
+ struct mlx5_core_dev *mdev = priv->mdev;
309372 int err;
373
+
374
+ mlx5e_create_q_counters(priv);
375
+
376
+ err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
377
+ if (err) {
378
+ mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
379
+ goto err_destroy_q_counters;
380
+ }
310381
311382 err = mlx5e_create_indirect_rqt(priv);
312383 if (err)
313
- return err;
384
+ goto err_close_drop_rq;
314385
315
- err = mlx5e_create_direct_rqts(priv);
386
+ err = mlx5e_create_direct_rqts(priv, priv->direct_tir);
316387 if (err)
317388 goto err_destroy_indirect_rqts;
318389
319
- err = mlx5e_create_indirect_tirs(priv);
390
+ err = mlx5e_create_indirect_tirs(priv, false);
320391 if (err)
321392 goto err_destroy_direct_rqts;
322393
323
- err = mlx5e_create_direct_tirs(priv);
394
+ err = mlx5e_create_direct_tirs(priv, priv->direct_tir);
324395 if (err)
325396 goto err_destroy_indirect_tirs;
326397
....@@ -331,23 +402,51 @@
331402 return 0;
332403
333404 err_destroy_direct_tirs:
334
- mlx5e_destroy_direct_tirs(priv);
405
+ mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
335406 err_destroy_indirect_tirs:
336407 mlx5e_destroy_indirect_tirs(priv);
337408 err_destroy_direct_rqts:
338
- mlx5e_destroy_direct_rqts(priv);
409
+ mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
339410 err_destroy_indirect_rqts:
340411 mlx5e_destroy_rqt(priv, &priv->indir_rqt);
412
+err_close_drop_rq:
413
+ mlx5e_close_drop_rq(&priv->drop_rq);
414
+err_destroy_q_counters:
415
+ mlx5e_destroy_q_counters(priv);
341416 return err;
342417 }
343418
344419 static void mlx5i_cleanup_rx(struct mlx5e_priv *priv)
345420 {
346421 mlx5i_destroy_flow_steering(priv);
347
- mlx5e_destroy_direct_tirs(priv);
422
+ mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
348423 mlx5e_destroy_indirect_tirs(priv);
349
- mlx5e_destroy_direct_rqts(priv);
424
+ mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
350425 mlx5e_destroy_rqt(priv, &priv->indir_rqt);
426
+ mlx5e_close_drop_rq(&priv->drop_rq);
427
+ mlx5e_destroy_q_counters(priv);
428
+}
429
+
430
+/* The stats groups order is opposite to the update_stats() order calls */
431
+static mlx5e_stats_grp_t mlx5i_stats_grps[] = {
432
+ &MLX5E_STATS_GRP(sw),
433
+ &MLX5E_STATS_GRP(qcnt),
434
+ &MLX5E_STATS_GRP(vnic_env),
435
+ &MLX5E_STATS_GRP(vport),
436
+ &MLX5E_STATS_GRP(802_3),
437
+ &MLX5E_STATS_GRP(2863),
438
+ &MLX5E_STATS_GRP(2819),
439
+ &MLX5E_STATS_GRP(phy),
440
+ &MLX5E_STATS_GRP(pcie),
441
+ &MLX5E_STATS_GRP(per_prio),
442
+ &MLX5E_STATS_GRP(pme),
443
+ &MLX5E_STATS_GRP(channels),
444
+ &MLX5E_STATS_GRP(per_port_buff_congest),
445
+};
446
+
447
+static unsigned int mlx5i_stats_grps_num(struct mlx5e_priv *priv)
448
+{
449
+ return ARRAY_SIZE(mlx5i_stats_grps);
351450 }
352451
353452 static const struct mlx5e_profile mlx5i_nic_profile = {
....@@ -359,12 +458,14 @@
359458 .cleanup_rx = mlx5i_cleanup_rx,
360459 .enable = NULL, /* mlx5i_enable */
361460 .disable = NULL, /* mlx5i_disable */
461
+ .update_rx = mlx5i_update_nic_rx,
362462 .update_stats = NULL, /* mlx5i_update_stats */
363
- .max_nch = mlx5e_get_max_num_channels,
364463 .update_carrier = NULL, /* no HW update in IB link */
365
- .rx_handlers.handle_rx_cqe = mlx5i_handle_rx_cqe,
366
- .rx_handlers.handle_rx_cqe_mpwqe = NULL, /* Not supported */
464
+ .rx_handlers = &mlx5i_rx_handlers,
367465 .max_tc = MLX5I_MAX_NUM_TC,
466
+ .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
467
+ .stats_grps = mlx5i_stats_grps,
468
+ .stats_grps_num = mlx5i_stats_grps_num,
368469 };
369470
370471 /* mlx5i netdev NDos */
....@@ -388,11 +489,11 @@
388489
389490 new_channels.params = *params;
390491 new_channels.params.sw_mtu = new_mtu;
391
- err = mlx5e_open_channels(priv, &new_channels);
492
+
493
+ err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL);
392494 if (err)
393495 goto out;
394496
395
- mlx5e_switch_priv_channels(priv, &new_channels, NULL);
396497 netdev->mtu = new_channels.params.sw_mtu;
397498
398499 out:
....@@ -406,12 +507,12 @@
406507 struct mlx5i_priv *ipriv = priv->ppriv;
407508
408509 /* Set dev address using underlay QP */
409
- dev->dev_addr[1] = (ipriv->qp.qpn >> 16) & 0xff;
410
- dev->dev_addr[2] = (ipriv->qp.qpn >> 8) & 0xff;
411
- dev->dev_addr[3] = (ipriv->qp.qpn) & 0xff;
510
+ dev->dev_addr[1] = (ipriv->qpn >> 16) & 0xff;
511
+ dev->dev_addr[2] = (ipriv->qpn >> 8) & 0xff;
512
+ dev->dev_addr[3] = (ipriv->qpn) & 0xff;
412513
413514 /* Add QPN to net-device mapping to HT */
414
- mlx5i_pkey_add_qpn(dev ,ipriv->qp.qpn);
515
+ mlx5i_pkey_add_qpn(dev, ipriv->qpn);
415516
416517 return 0;
417518 }
....@@ -438,7 +539,7 @@
438539 mlx5i_uninit_underlay_qp(priv);
439540
440541 /* Delete QPN to net-device mapping from HT */
441
- mlx5i_pkey_del_qpn(dev, ipriv->qp.qpn);
542
+ mlx5i_pkey_del_qpn(dev, ipriv->qpn);
442543 }
443544
444545 static int mlx5i_open(struct net_device *netdev)
....@@ -458,7 +559,7 @@
458559 goto err_clear_state_opened_flag;
459560 }
460561
461
- err = mlx5_fs_add_rx_underlay_qpn(mdev, ipriv->qp.qpn);
562
+ err = mlx5_fs_add_rx_underlay_qpn(mdev, ipriv->qpn);
462563 if (err) {
463564 mlx5_core_warn(mdev, "attach underlay qp to ft failed, %d\n", err);
464565 goto err_reset_qp;
....@@ -468,14 +569,14 @@
468569 if (err)
469570 goto err_remove_fs_underlay_qp;
470571
471
- mlx5e_refresh_tirs(epriv, false);
572
+ epriv->profile->update_rx(epriv);
472573 mlx5e_activate_priv_channels(epriv);
473574
474575 mutex_unlock(&epriv->state_lock);
475576 return 0;
476577
477578 err_remove_fs_underlay_qp:
478
- mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn);
579
+ mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qpn);
479580 err_reset_qp:
480581 mlx5i_uninit_underlay_qp(epriv);
481582 err_clear_state_opened_flag:
....@@ -501,7 +602,7 @@
501602 clear_bit(MLX5E_STATE_OPENED, &epriv->state);
502603
503604 netif_carrier_off(epriv->netdev);
504
- mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn);
605
+ mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qpn);
505606 mlx5e_deactivate_priv_channels(epriv);
506607 mlx5e_close_channels(&epriv->channels);
507608 mlx5i_uninit_underlay_qp(epriv);
....@@ -520,11 +621,12 @@
520621 struct mlx5i_priv *ipriv = epriv->ppriv;
521622 int err;
522623
523
- mlx5_core_dbg(mdev, "attaching QPN 0x%x, MGID %pI6\n", ipriv->qp.qpn, gid->raw);
524
- err = mlx5_core_attach_mcg(mdev, gid, ipriv->qp.qpn);
624
+ mlx5_core_dbg(mdev, "attaching QPN 0x%x, MGID %pI6\n", ipriv->qpn,
625
+ gid->raw);
626
+ err = mlx5_core_attach_mcg(mdev, gid, ipriv->qpn);
525627 if (err)
526628 mlx5_core_warn(mdev, "failed attaching QPN 0x%x, MGID %pI6\n",
527
- ipriv->qp.qpn, gid->raw);
629
+ ipriv->qpn, gid->raw);
528630
529631 if (set_qkey) {
530632 mlx5_core_dbg(mdev, "%s setting qkey 0x%x\n",
....@@ -543,12 +645,13 @@
543645 struct mlx5i_priv *ipriv = epriv->ppriv;
544646 int err;
545647
546
- mlx5_core_dbg(mdev, "detaching QPN 0x%x, MGID %pI6\n", ipriv->qp.qpn, gid->raw);
648
+ mlx5_core_dbg(mdev, "detaching QPN 0x%x, MGID %pI6\n", ipriv->qpn,
649
+ gid->raw);
547650
548
- err = mlx5_core_detach_mcg(mdev, gid, ipriv->qp.qpn);
651
+ err = mlx5_core_detach_mcg(mdev, gid, ipriv->qpn);
549652 if (err)
550653 mlx5_core_dbg(mdev, "failed detaching QPN 0x%x, MGID %pI6\n",
551
- ipriv->qp.qpn, gid->raw);
654
+ ipriv->qpn, gid->raw);
552655
553656 return err;
554657 }
....@@ -561,7 +664,9 @@
561664 struct mlx5_ib_ah *mah = to_mah(address);
562665 struct mlx5i_priv *ipriv = epriv->ppriv;
563666
564
- return mlx5i_sq_xmit(sq, skb, &mah->av, dqpn, ipriv->qkey);
667
+ mlx5i_sq_xmit(sq, skb, &mah->av, dqpn, ipriv->qkey, netdev_xmit_more());
668
+
669
+ return NETDEV_TX_OK;
565670 }
566671
567672 static void mlx5i_set_pkey_index(struct net_device *netdev, int id)
....@@ -592,7 +697,6 @@
592697
593698 mlx5e_detach_netdev(priv);
594699 profile->cleanup(priv);
595
- destroy_workqueue(priv->wq);
596700
597701 if (!ipriv->sub_interface) {
598702 mlx5i_pkey_qpn_ht_cleanup(netdev);
....@@ -600,58 +704,37 @@
600704 }
601705 }
602706
603
-struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
604
- struct ib_device *ibdev,
605
- const char *name,
606
- void (*setup)(struct net_device *))
707
+static bool mlx5_is_sub_interface(struct mlx5_core_dev *mdev)
607708 {
608
- const struct mlx5e_profile *profile;
609
- struct net_device *netdev;
709
+ return mdev->mlx5e_res.pdn != 0;
710
+}
711
+
712
+static const struct mlx5e_profile *mlx5_get_profile(struct mlx5_core_dev *mdev)
713
+{
714
+ if (mlx5_is_sub_interface(mdev))
715
+ return mlx5i_pkey_get_profile();
716
+ return &mlx5i_nic_profile;
717
+}
718
+
719
+static int mlx5_rdma_setup_rn(struct ib_device *ibdev, u8 port_num,
720
+ struct net_device *netdev, void *param)
721
+{
722
+ struct mlx5_core_dev *mdev = (struct mlx5_core_dev *)param;
723
+ const struct mlx5e_profile *prof = mlx5_get_profile(mdev);
610724 struct mlx5i_priv *ipriv;
611725 struct mlx5e_priv *epriv;
612726 struct rdma_netdev *rn;
613
- bool sub_interface;
614
- int nch;
615727 int err;
616
-
617
- if (mlx5i_check_required_hca_cap(mdev)) {
618
- mlx5_core_warn(mdev, "Accelerated mode is not supported\n");
619
- return ERR_PTR(-EOPNOTSUPP);
620
- }
621
-
622
- /* TODO: Need to find a better way to check if child device*/
623
- sub_interface = (mdev->mlx5e_res.pdn != 0);
624
-
625
- if (sub_interface)
626
- profile = mlx5i_pkey_get_profile();
627
- else
628
- profile = &mlx5i_nic_profile;
629
-
630
- nch = profile->max_nch(mdev);
631
-
632
- netdev = alloc_netdev_mqs(sizeof(struct mlx5i_priv) + sizeof(struct mlx5e_priv),
633
- name, NET_NAME_UNKNOWN,
634
- setup,
635
- nch * MLX5E_MAX_NUM_TC,
636
- nch);
637
- if (!netdev) {
638
- mlx5_core_warn(mdev, "alloc_netdev_mqs failed\n");
639
- return NULL;
640
- }
641728
642729 ipriv = netdev_priv(netdev);
643730 epriv = mlx5i_epriv(netdev);
644731
645
- epriv->wq = create_singlethread_workqueue("mlx5i");
646
- if (!epriv->wq)
647
- goto err_free_netdev;
648
-
649
- ipriv->sub_interface = sub_interface;
732
+ ipriv->sub_interface = mlx5_is_sub_interface(mdev);
650733 if (!ipriv->sub_interface) {
651734 err = mlx5i_pkey_qpn_ht_init(netdev);
652735 if (err) {
653736 mlx5_core_warn(mdev, "allocate qpn_to_netdev ht failed\n");
654
- goto destroy_wq;
737
+ return err;
655738 }
656739
657740 /* This should only be called once per mdev */
....@@ -660,7 +743,7 @@
660743 goto destroy_ht;
661744 }
662745
663
- profile->init(mdev, netdev, profile, ipriv);
746
+ prof->init(mdev, netdev, prof, ipriv);
664747
665748 err = mlx5e_attach_netdev(epriv);
666749 if (err)
....@@ -678,20 +761,40 @@
678761 netdev->priv_destructor = mlx5_rdma_netdev_free;
679762 netdev->needs_free_netdev = 1;
680763
681
- return netdev;
764
+ return 0;
682765
683766 detach:
684
- profile->cleanup(epriv);
767
+ prof->cleanup(epriv);
685768 if (ipriv->sub_interface)
686
- return NULL;
769
+ return err;
687770 mlx5e_destroy_mdev_resources(mdev);
688771 destroy_ht:
689772 mlx5i_pkey_qpn_ht_cleanup(netdev);
690
-destroy_wq:
691
- destroy_workqueue(epriv->wq);
692
-err_free_netdev:
693
- free_netdev(netdev);
694
-
695
- return NULL;
773
+ return err;
696774 }
697
-EXPORT_SYMBOL(mlx5_rdma_netdev_alloc);
775
+
776
+int mlx5_rdma_rn_get_params(struct mlx5_core_dev *mdev,
777
+ struct ib_device *device,
778
+ struct rdma_netdev_alloc_params *params)
779
+{
780
+ int nch;
781
+ int rc;
782
+
783
+ rc = mlx5i_check_required_hca_cap(mdev);
784
+ if (rc)
785
+ return rc;
786
+
787
+ nch = mlx5e_get_max_num_channels(mdev);
788
+
789
+ *params = (struct rdma_netdev_alloc_params){
790
+ .sizeof_priv = sizeof(struct mlx5i_priv) +
791
+ sizeof(struct mlx5e_priv),
792
+ .txqs = nch * MLX5E_MAX_NUM_TC,
793
+ .rxqs = nch,
794
+ .param = mdev,
795
+ .initialize_rdma_netdev = mlx5_rdma_setup_rn,
796
+ };
797
+
798
+ return 0;
799
+}
800
+EXPORT_SYMBOL(mlx5_rdma_rn_get_params);