hc
2023-12-06 08f87f769b595151be1afeff53e144f543faa614
kernel/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
....@@ -45,6 +45,7 @@
4545 static const struct net_device_ops mlx5i_netdev_ops = {
4646 .ndo_open = mlx5i_open,
4747 .ndo_stop = mlx5i_close,
48
+ .ndo_get_stats64 = mlx5i_get_stats,
4849 .ndo_init = mlx5i_dev_init,
4950 .ndo_uninit = mlx5i_dev_cleanup,
5051 .ndo_change_mtu = mlx5i_change_mtu,
....@@ -67,29 +68,27 @@
6768
6869 params->lro_en = false;
6970 params->hard_mtu = MLX5_IB_GRH_BYTES + MLX5_IPOIB_HARD_LEN;
71
+ params->tunneled_offload_en = false;
7072 }
7173
7274 /* Called directly after IPoIB netdevice was created to initialize SW structs */
73
-void mlx5i_init(struct mlx5_core_dev *mdev,
74
- struct net_device *netdev,
75
- const struct mlx5e_profile *profile,
76
- void *ppriv)
75
+int mlx5i_init(struct mlx5_core_dev *mdev,
76
+ struct net_device *netdev,
77
+ const struct mlx5e_profile *profile,
78
+ void *ppriv)
7779 {
7880 struct mlx5e_priv *priv = mlx5i_epriv(netdev);
79
- u16 max_mtu;
81
+ int err;
8082
81
- /* priv init */
82
- priv->mdev = mdev;
83
- priv->netdev = netdev;
84
- priv->profile = profile;
85
- priv->ppriv = ppriv;
86
- mutex_init(&priv->state_lock);
83
+ err = mlx5e_netdev_init(netdev, priv, mdev, profile, ppriv);
84
+ if (err)
85
+ return err;
8786
88
- mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
89
- netdev->mtu = max_mtu;
87
+ mlx5e_set_netdev_mtu_boundaries(priv);
88
+ netdev->mtu = netdev->max_mtu;
9089
91
- mlx5e_build_nic_params(mdev, &priv->channels.params,
92
- profile->max_nch(mdev), netdev->mtu);
90
+ mlx5e_build_nic_params(priv, NULL, &priv->rss_params, &priv->channels.params,
91
+ netdev->mtu);
9392 mlx5i_build_nic_params(mdev, &priv->channels.params);
9493
9594 mlx5e_timestamp_init(priv);
....@@ -106,57 +105,109 @@
106105
107106 netdev->netdev_ops = &mlx5i_netdev_ops;
108107 netdev->ethtool_ops = &mlx5i_ethtool_ops;
108
+
109
+ return 0;
109110 }
110111
111112 /* Called directly before IPoIB netdevice is destroyed to cleanup SW structs */
112
-static void mlx5i_cleanup(struct mlx5e_priv *priv)
113
+void mlx5i_cleanup(struct mlx5e_priv *priv)
113114 {
114
- /* Do nothing .. */
115
+ mlx5e_netdev_cleanup(priv->netdev, priv);
116
+}
117
+
118
+static void mlx5i_grp_sw_update_stats(struct mlx5e_priv *priv)
119
+{
120
+ struct mlx5e_sw_stats s = { 0 };
121
+ int i, j;
122
+
123
+ for (i = 0; i < priv->max_nch; i++) {
124
+ struct mlx5e_channel_stats *channel_stats;
125
+ struct mlx5e_rq_stats *rq_stats;
126
+
127
+ channel_stats = &priv->channel_stats[i];
128
+ rq_stats = &channel_stats->rq;
129
+
130
+ s.rx_packets += rq_stats->packets;
131
+ s.rx_bytes += rq_stats->bytes;
132
+
133
+ for (j = 0; j < priv->max_opened_tc; j++) {
134
+ struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j];
135
+
136
+ s.tx_packets += sq_stats->packets;
137
+ s.tx_bytes += sq_stats->bytes;
138
+ s.tx_queue_dropped += sq_stats->dropped;
139
+ }
140
+ }
141
+
142
+ memcpy(&priv->stats.sw, &s, sizeof(s));
143
+}
144
+
145
+void mlx5i_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
146
+{
147
+ struct mlx5e_priv *priv = mlx5i_epriv(dev);
148
+ struct mlx5e_sw_stats *sstats = &priv->stats.sw;
149
+
150
+ mlx5i_grp_sw_update_stats(priv);
151
+
152
+ stats->rx_packets = sstats->rx_packets;
153
+ stats->rx_bytes = sstats->rx_bytes;
154
+ stats->tx_packets = sstats->tx_packets;
155
+ stats->tx_bytes = sstats->tx_bytes;
156
+ stats->tx_dropped = sstats->tx_queue_dropped;
115157 }
116158
117159 int mlx5i_init_underlay_qp(struct mlx5e_priv *priv)
118160 {
119161 struct mlx5_core_dev *mdev = priv->mdev;
120162 struct mlx5i_priv *ipriv = priv->ppriv;
121
- struct mlx5_core_qp *qp = &ipriv->qp;
122
- struct mlx5_qp_context *context;
123163 int ret;
124164
125
- /* QP states */
126
- context = kzalloc(sizeof(*context), GFP_KERNEL);
127
- if (!context)
128
- return -ENOMEM;
165
+ {
166
+ u32 in[MLX5_ST_SZ_DW(rst2init_qp_in)] = {};
167
+ u32 *qpc;
129168
130
- context->flags = cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
131
- context->pri_path.port = 1;
132
- context->pri_path.pkey_index = cpu_to_be16(ipriv->pkey_index);
133
- context->qkey = cpu_to_be32(IB_DEFAULT_Q_KEY);
169
+ qpc = MLX5_ADDR_OF(rst2init_qp_in, in, qpc);
134170
135
- ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RST2INIT_QP, 0, context, qp);
136
- if (ret) {
137
- mlx5_core_err(mdev, "Failed to modify qp RST2INIT, err: %d\n", ret);
138
- goto err_qp_modify_to_err;
171
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
172
+ MLX5_SET(qpc, qpc, primary_address_path.pkey_index,
173
+ ipriv->pkey_index);
174
+ MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, 1);
175
+ MLX5_SET(qpc, qpc, q_key, IB_DEFAULT_Q_KEY);
176
+
177
+ MLX5_SET(rst2init_qp_in, in, opcode, MLX5_CMD_OP_RST2INIT_QP);
178
+ MLX5_SET(rst2init_qp_in, in, qpn, ipriv->qpn);
179
+ ret = mlx5_cmd_exec_in(mdev, rst2init_qp, in);
180
+ if (ret)
181
+ goto err_qp_modify_to_err;
139182 }
140
- memset(context, 0, sizeof(*context));
183
+ {
184
+ u32 in[MLX5_ST_SZ_DW(init2rtr_qp_in)] = {};
141185
142
- ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_INIT2RTR_QP, 0, context, qp);
143
- if (ret) {
144
- mlx5_core_err(mdev, "Failed to modify qp INIT2RTR, err: %d\n", ret);
145
- goto err_qp_modify_to_err;
186
+ MLX5_SET(init2rtr_qp_in, in, opcode, MLX5_CMD_OP_INIT2RTR_QP);
187
+ MLX5_SET(init2rtr_qp_in, in, qpn, ipriv->qpn);
188
+ ret = mlx5_cmd_exec_in(mdev, init2rtr_qp, in);
189
+ if (ret)
190
+ goto err_qp_modify_to_err;
146191 }
192
+ {
193
+ u32 in[MLX5_ST_SZ_DW(rtr2rts_qp_in)] = {};
147194
148
- ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RTR2RTS_QP, 0, context, qp);
149
- if (ret) {
150
- mlx5_core_err(mdev, "Failed to modify qp RTR2RTS, err: %d\n", ret);
151
- goto err_qp_modify_to_err;
195
+ MLX5_SET(rtr2rts_qp_in, in, opcode, MLX5_CMD_OP_RTR2RTS_QP);
196
+ MLX5_SET(rtr2rts_qp_in, in, qpn, ipriv->qpn);
197
+ ret = mlx5_cmd_exec_in(mdev, rtr2rts_qp, in);
198
+ if (ret)
199
+ goto err_qp_modify_to_err;
152200 }
153
-
154
- kfree(context);
155201 return 0;
156202
157203 err_qp_modify_to_err:
158
- mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2ERR_QP, 0, &context, qp);
159
- kfree(context);
204
+ {
205
+ u32 in[MLX5_ST_SZ_DW(qp_2err_in)] = {};
206
+
207
+ MLX5_SET(qp_2err_in, in, opcode, MLX5_CMD_OP_2ERR_QP);
208
+ MLX5_SET(qp_2err_in, in, qpn, ipriv->qpn);
209
+ mlx5_cmd_exec_in(mdev, qp_2err, in);
210
+ }
160211 return ret;
161212 }
162213
....@@ -164,29 +215,30 @@
164215 {
165216 struct mlx5i_priv *ipriv = priv->ppriv;
166217 struct mlx5_core_dev *mdev = priv->mdev;
167
- struct mlx5_qp_context context;
168
- int err;
218
+ u32 in[MLX5_ST_SZ_DW(qp_2rst_in)] = {};
169219
170
- err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2RST_QP, 0, &context,
171
- &ipriv->qp);
172
- if (err)
173
- mlx5_core_err(mdev, "Failed to modify qp 2RST, err: %d\n", err);
220
+ MLX5_SET(qp_2rst_in, in, opcode, MLX5_CMD_OP_2RST_QP);
221
+ MLX5_SET(qp_2rst_in, in, qpn, ipriv->qpn);
222
+ mlx5_cmd_exec_in(mdev, qp_2rst, in);
174223 }
175224
176225 #define MLX5_QP_ENHANCED_ULP_STATELESS_MODE 2
177226
178
-int mlx5i_create_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp)
227
+int mlx5i_create_underlay_qp(struct mlx5e_priv *priv)
179228 {
180
- u32 *in = NULL;
229
+ unsigned char *dev_addr = priv->netdev->dev_addr;
230
+ u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
231
+ u32 in[MLX5_ST_SZ_DW(create_qp_in)] = {};
232
+ struct mlx5i_priv *ipriv = priv->ppriv;
181233 void *addr_path;
234
+ int qpn = 0;
182235 int ret = 0;
183
- int inlen;
184236 void *qpc;
185237
186
- inlen = MLX5_ST_SZ_BYTES(create_qp_in);
187
- in = kvzalloc(inlen, GFP_KERNEL);
188
- if (!in)
189
- return -ENOMEM;
238
+ if (MLX5_CAP_GEN(priv->mdev, mkey_by_name)) {
239
+ qpn = (dev_addr[1] << 16) + (dev_addr[2] << 8) + dev_addr[3];
240
+ MLX5_SET(create_qp_in, in, input_qpn, qpn);
241
+ }
190242
191243 qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
192244 MLX5_SET(qpc, qpc, st, MLX5_QP_ST_UD);
....@@ -198,20 +250,40 @@
198250 MLX5_SET(ads, addr_path, vhca_port_num, 1);
199251 MLX5_SET(ads, addr_path, grh, 1);
200252
201
- ret = mlx5_core_create_qp(mdev, qp, in, inlen);
202
- if (ret) {
203
- mlx5_core_err(mdev, "Failed creating IPoIB QP err : %d\n", ret);
204
- goto out;
205
- }
253
+ MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
254
+ ret = mlx5_cmd_exec_inout(priv->mdev, create_qp, in, out);
255
+ if (ret)
256
+ return ret;
206257
207
-out:
208
- kvfree(in);
209
- return ret;
258
+ ipriv->qpn = MLX5_GET(create_qp_out, out, qpn);
259
+
260
+ return 0;
210261 }
211262
212
-void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp)
263
+void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, u32 qpn)
213264 {
214
- mlx5_core_destroy_qp(mdev, qp);
265
+ u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {};
266
+
267
+ MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
268
+ MLX5_SET(destroy_qp_in, in, qpn, qpn);
269
+ mlx5_cmd_exec_in(mdev, destroy_qp, in);
270
+}
271
+
272
+int mlx5i_update_nic_rx(struct mlx5e_priv *priv)
273
+{
274
+ return mlx5e_refresh_tirs(priv, true, true);
275
+}
276
+
277
+int mlx5i_create_tis(struct mlx5_core_dev *mdev, u32 underlay_qpn, u32 *tisn)
278
+{
279
+ u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
280
+ void *tisc;
281
+
282
+ tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
283
+
284
+ MLX5_SET(tisc, tisc, underlay_qpn, underlay_qpn);
285
+
286
+ return mlx5e_create_tis(mdev, in, tisn);
215287 }
216288
217289 static int mlx5i_init_tx(struct mlx5e_priv *priv)
....@@ -219,13 +291,13 @@
219291 struct mlx5i_priv *ipriv = priv->ppriv;
220292 int err;
221293
222
- err = mlx5i_create_underlay_qp(priv->mdev, &ipriv->qp);
294
+ err = mlx5i_create_underlay_qp(priv);
223295 if (err) {
224296 mlx5_core_warn(priv->mdev, "create underlay QP failed, %d\n", err);
225297 return err;
226298 }
227299
228
- err = mlx5e_create_tis(priv->mdev, 0 /* tc */, ipriv->qp.qpn, &priv->tisn[0]);
300
+ err = mlx5i_create_tis(priv->mdev, ipriv->qpn, &priv->tisn[0][0]);
229301 if (err) {
230302 mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err);
231303 goto err_destroy_underlay_qp;
....@@ -234,7 +306,7 @@
234306 return 0;
235307
236308 err_destroy_underlay_qp:
237
- mlx5i_destroy_underlay_qp(priv->mdev, &ipriv->qp);
309
+ mlx5i_destroy_underlay_qp(priv->mdev, ipriv->qpn);
238310 return err;
239311 }
240312
....@@ -242,8 +314,8 @@
242314 {
243315 struct mlx5i_priv *ipriv = priv->ppriv;
244316
245
- mlx5e_destroy_tis(priv->mdev, priv->tisn[0]);
246
- mlx5i_destroy_underlay_qp(priv->mdev, &ipriv->qp);
317
+ mlx5e_destroy_tis(priv->mdev, priv->tisn[0][0]);
318
+ mlx5i_destroy_underlay_qp(priv->mdev, ipriv->qpn);
247319 }
248320
249321 static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
....@@ -265,17 +337,6 @@
265337 }
266338
267339 mlx5e_set_ttc_basic_params(priv, &ttc_params);
268
- mlx5e_set_inner_ttc_ft_params(&ttc_params);
269
- for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
270
- ttc_params.indir_tirn[tt] = priv->inner_indir_tir[tt].tirn;
271
-
272
- err = mlx5e_create_inner_ttc_table(priv, &ttc_params, &priv->fs.inner_ttc);
273
- if (err) {
274
- netdev_err(priv->netdev, "Failed to create inner ttc table, err=%d\n",
275
- err);
276
- goto err_destroy_arfs_tables;
277
- }
278
-
279340 mlx5e_set_ttc_ft_params(&ttc_params);
280341 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
281342 ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn;
....@@ -284,13 +345,11 @@
284345 if (err) {
285346 netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
286347 err);
287
- goto err_destroy_inner_ttc_table;
348
+ goto err_destroy_arfs_tables;
288349 }
289350
290351 return 0;
291352
292
-err_destroy_inner_ttc_table:
293
- mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
294353 err_destroy_arfs_tables:
295354 mlx5e_arfs_destroy_tables(priv);
296355
....@@ -300,27 +359,35 @@
300359 static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv)
301360 {
302361 mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
303
- mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
304362 mlx5e_arfs_destroy_tables(priv);
305363 }
306364
307365 static int mlx5i_init_rx(struct mlx5e_priv *priv)
308366 {
367
+ struct mlx5_core_dev *mdev = priv->mdev;
309368 int err;
369
+
370
+ mlx5e_create_q_counters(priv);
371
+
372
+ err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
373
+ if (err) {
374
+ mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
375
+ goto err_destroy_q_counters;
376
+ }
310377
311378 err = mlx5e_create_indirect_rqt(priv);
312379 if (err)
313
- return err;
380
+ goto err_close_drop_rq;
314381
315
- err = mlx5e_create_direct_rqts(priv);
382
+ err = mlx5e_create_direct_rqts(priv, priv->direct_tir);
316383 if (err)
317384 goto err_destroy_indirect_rqts;
318385
319
- err = mlx5e_create_indirect_tirs(priv);
386
+ err = mlx5e_create_indirect_tirs(priv, false);
320387 if (err)
321388 goto err_destroy_direct_rqts;
322389
323
- err = mlx5e_create_direct_tirs(priv);
390
+ err = mlx5e_create_direct_tirs(priv, priv->direct_tir);
324391 if (err)
325392 goto err_destroy_indirect_tirs;
326393
....@@ -331,23 +398,51 @@
331398 return 0;
332399
333400 err_destroy_direct_tirs:
334
- mlx5e_destroy_direct_tirs(priv);
401
+ mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
335402 err_destroy_indirect_tirs:
336403 mlx5e_destroy_indirect_tirs(priv);
337404 err_destroy_direct_rqts:
338
- mlx5e_destroy_direct_rqts(priv);
405
+ mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
339406 err_destroy_indirect_rqts:
340407 mlx5e_destroy_rqt(priv, &priv->indir_rqt);
408
+err_close_drop_rq:
409
+ mlx5e_close_drop_rq(&priv->drop_rq);
410
+err_destroy_q_counters:
411
+ mlx5e_destroy_q_counters(priv);
341412 return err;
342413 }
343414
344415 static void mlx5i_cleanup_rx(struct mlx5e_priv *priv)
345416 {
346417 mlx5i_destroy_flow_steering(priv);
347
- mlx5e_destroy_direct_tirs(priv);
418
+ mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
348419 mlx5e_destroy_indirect_tirs(priv);
349
- mlx5e_destroy_direct_rqts(priv);
420
+ mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
350421 mlx5e_destroy_rqt(priv, &priv->indir_rqt);
422
+ mlx5e_close_drop_rq(&priv->drop_rq);
423
+ mlx5e_destroy_q_counters(priv);
424
+}
425
+
426
+/* The stats groups order is opposite to the update_stats() order calls */
427
+static mlx5e_stats_grp_t mlx5i_stats_grps[] = {
428
+ &MLX5E_STATS_GRP(sw),
429
+ &MLX5E_STATS_GRP(qcnt),
430
+ &MLX5E_STATS_GRP(vnic_env),
431
+ &MLX5E_STATS_GRP(vport),
432
+ &MLX5E_STATS_GRP(802_3),
433
+ &MLX5E_STATS_GRP(2863),
434
+ &MLX5E_STATS_GRP(2819),
435
+ &MLX5E_STATS_GRP(phy),
436
+ &MLX5E_STATS_GRP(pcie),
437
+ &MLX5E_STATS_GRP(per_prio),
438
+ &MLX5E_STATS_GRP(pme),
439
+ &MLX5E_STATS_GRP(channels),
440
+ &MLX5E_STATS_GRP(per_port_buff_congest),
441
+};
442
+
443
+static unsigned int mlx5i_stats_grps_num(struct mlx5e_priv *priv)
444
+{
445
+ return ARRAY_SIZE(mlx5i_stats_grps);
351446 }
352447
353448 static const struct mlx5e_profile mlx5i_nic_profile = {
....@@ -359,12 +454,14 @@
359454 .cleanup_rx = mlx5i_cleanup_rx,
360455 .enable = NULL, /* mlx5i_enable */
361456 .disable = NULL, /* mlx5i_disable */
457
+ .update_rx = mlx5i_update_nic_rx,
362458 .update_stats = NULL, /* mlx5i_update_stats */
363
- .max_nch = mlx5e_get_max_num_channels,
364459 .update_carrier = NULL, /* no HW update in IB link */
365
- .rx_handlers.handle_rx_cqe = mlx5i_handle_rx_cqe,
366
- .rx_handlers.handle_rx_cqe_mpwqe = NULL, /* Not supported */
460
+ .rx_handlers = &mlx5i_rx_handlers,
367461 .max_tc = MLX5I_MAX_NUM_TC,
462
+ .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
463
+ .stats_grps = mlx5i_stats_grps,
464
+ .stats_grps_num = mlx5i_stats_grps_num,
368465 };
369466
370467 /* mlx5i netdev NDos */
....@@ -388,11 +485,11 @@
388485
389486 new_channels.params = *params;
390487 new_channels.params.sw_mtu = new_mtu;
391
- err = mlx5e_open_channels(priv, &new_channels);
488
+
489
+ err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL);
392490 if (err)
393491 goto out;
394492
395
- mlx5e_switch_priv_channels(priv, &new_channels, NULL);
396493 netdev->mtu = new_channels.params.sw_mtu;
397494
398495 out:
....@@ -406,12 +503,12 @@
406503 struct mlx5i_priv *ipriv = priv->ppriv;
407504
408505 /* Set dev address using underlay QP */
409
- dev->dev_addr[1] = (ipriv->qp.qpn >> 16) & 0xff;
410
- dev->dev_addr[2] = (ipriv->qp.qpn >> 8) & 0xff;
411
- dev->dev_addr[3] = (ipriv->qp.qpn) & 0xff;
506
+ dev->dev_addr[1] = (ipriv->qpn >> 16) & 0xff;
507
+ dev->dev_addr[2] = (ipriv->qpn >> 8) & 0xff;
508
+ dev->dev_addr[3] = (ipriv->qpn) & 0xff;
412509
413510 /* Add QPN to net-device mapping to HT */
414
- mlx5i_pkey_add_qpn(dev ,ipriv->qp.qpn);
511
+ mlx5i_pkey_add_qpn(dev, ipriv->qpn);
415512
416513 return 0;
417514 }
....@@ -438,7 +535,7 @@
438535 mlx5i_uninit_underlay_qp(priv);
439536
440537 /* Delete QPN to net-device mapping from HT */
441
- mlx5i_pkey_del_qpn(dev, ipriv->qp.qpn);
538
+ mlx5i_pkey_del_qpn(dev, ipriv->qpn);
442539 }
443540
444541 static int mlx5i_open(struct net_device *netdev)
....@@ -458,7 +555,7 @@
458555 goto err_clear_state_opened_flag;
459556 }
460557
461
- err = mlx5_fs_add_rx_underlay_qpn(mdev, ipriv->qp.qpn);
558
+ err = mlx5_fs_add_rx_underlay_qpn(mdev, ipriv->qpn);
462559 if (err) {
463560 mlx5_core_warn(mdev, "attach underlay qp to ft failed, %d\n", err);
464561 goto err_reset_qp;
....@@ -468,14 +565,14 @@
468565 if (err)
469566 goto err_remove_fs_underlay_qp;
470567
471
- mlx5e_refresh_tirs(epriv, false);
568
+ epriv->profile->update_rx(epriv);
472569 mlx5e_activate_priv_channels(epriv);
473570
474571 mutex_unlock(&epriv->state_lock);
475572 return 0;
476573
477574 err_remove_fs_underlay_qp:
478
- mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn);
575
+ mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qpn);
479576 err_reset_qp:
480577 mlx5i_uninit_underlay_qp(epriv);
481578 err_clear_state_opened_flag:
....@@ -501,7 +598,7 @@
501598 clear_bit(MLX5E_STATE_OPENED, &epriv->state);
502599
503600 netif_carrier_off(epriv->netdev);
504
- mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn);
601
+ mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qpn);
505602 mlx5e_deactivate_priv_channels(epriv);
506603 mlx5e_close_channels(&epriv->channels);
507604 mlx5i_uninit_underlay_qp(epriv);
....@@ -520,11 +617,12 @@
520617 struct mlx5i_priv *ipriv = epriv->ppriv;
521618 int err;
522619
523
- mlx5_core_dbg(mdev, "attaching QPN 0x%x, MGID %pI6\n", ipriv->qp.qpn, gid->raw);
524
- err = mlx5_core_attach_mcg(mdev, gid, ipriv->qp.qpn);
620
+ mlx5_core_dbg(mdev, "attaching QPN 0x%x, MGID %pI6\n", ipriv->qpn,
621
+ gid->raw);
622
+ err = mlx5_core_attach_mcg(mdev, gid, ipriv->qpn);
525623 if (err)
526624 mlx5_core_warn(mdev, "failed attaching QPN 0x%x, MGID %pI6\n",
527
- ipriv->qp.qpn, gid->raw);
625
+ ipriv->qpn, gid->raw);
528626
529627 if (set_qkey) {
530628 mlx5_core_dbg(mdev, "%s setting qkey 0x%x\n",
....@@ -543,12 +641,13 @@
543641 struct mlx5i_priv *ipriv = epriv->ppriv;
544642 int err;
545643
546
- mlx5_core_dbg(mdev, "detaching QPN 0x%x, MGID %pI6\n", ipriv->qp.qpn, gid->raw);
644
+ mlx5_core_dbg(mdev, "detaching QPN 0x%x, MGID %pI6\n", ipriv->qpn,
645
+ gid->raw);
547646
548
- err = mlx5_core_detach_mcg(mdev, gid, ipriv->qp.qpn);
647
+ err = mlx5_core_detach_mcg(mdev, gid, ipriv->qpn);
549648 if (err)
550649 mlx5_core_dbg(mdev, "failed detaching QPN 0x%x, MGID %pI6\n",
551
- ipriv->qp.qpn, gid->raw);
650
+ ipriv->qpn, gid->raw);
552651
553652 return err;
554653 }
....@@ -561,7 +660,9 @@
561660 struct mlx5_ib_ah *mah = to_mah(address);
562661 struct mlx5i_priv *ipriv = epriv->ppriv;
563662
564
- return mlx5i_sq_xmit(sq, skb, &mah->av, dqpn, ipriv->qkey);
663
+ mlx5i_sq_xmit(sq, skb, &mah->av, dqpn, ipriv->qkey, netdev_xmit_more());
664
+
665
+ return NETDEV_TX_OK;
565666 }
566667
567668 static void mlx5i_set_pkey_index(struct net_device *netdev, int id)
....@@ -592,7 +693,6 @@
592693
593694 mlx5e_detach_netdev(priv);
594695 profile->cleanup(priv);
595
- destroy_workqueue(priv->wq);
596696
597697 if (!ipriv->sub_interface) {
598698 mlx5i_pkey_qpn_ht_cleanup(netdev);
....@@ -600,58 +700,37 @@
600700 }
601701 }
602702
603
-struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
604
- struct ib_device *ibdev,
605
- const char *name,
606
- void (*setup)(struct net_device *))
703
+static bool mlx5_is_sub_interface(struct mlx5_core_dev *mdev)
607704 {
608
- const struct mlx5e_profile *profile;
609
- struct net_device *netdev;
705
+ return mdev->mlx5e_res.pdn != 0;
706
+}
707
+
708
+static const struct mlx5e_profile *mlx5_get_profile(struct mlx5_core_dev *mdev)
709
+{
710
+ if (mlx5_is_sub_interface(mdev))
711
+ return mlx5i_pkey_get_profile();
712
+ return &mlx5i_nic_profile;
713
+}
714
+
715
+static int mlx5_rdma_setup_rn(struct ib_device *ibdev, u8 port_num,
716
+ struct net_device *netdev, void *param)
717
+{
718
+ struct mlx5_core_dev *mdev = (struct mlx5_core_dev *)param;
719
+ const struct mlx5e_profile *prof = mlx5_get_profile(mdev);
610720 struct mlx5i_priv *ipriv;
611721 struct mlx5e_priv *epriv;
612722 struct rdma_netdev *rn;
613
- bool sub_interface;
614
- int nch;
615723 int err;
616
-
617
- if (mlx5i_check_required_hca_cap(mdev)) {
618
- mlx5_core_warn(mdev, "Accelerated mode is not supported\n");
619
- return ERR_PTR(-EOPNOTSUPP);
620
- }
621
-
622
- /* TODO: Need to find a better way to check if child device*/
623
- sub_interface = (mdev->mlx5e_res.pdn != 0);
624
-
625
- if (sub_interface)
626
- profile = mlx5i_pkey_get_profile();
627
- else
628
- profile = &mlx5i_nic_profile;
629
-
630
- nch = profile->max_nch(mdev);
631
-
632
- netdev = alloc_netdev_mqs(sizeof(struct mlx5i_priv) + sizeof(struct mlx5e_priv),
633
- name, NET_NAME_UNKNOWN,
634
- setup,
635
- nch * MLX5E_MAX_NUM_TC,
636
- nch);
637
- if (!netdev) {
638
- mlx5_core_warn(mdev, "alloc_netdev_mqs failed\n");
639
- return NULL;
640
- }
641724
642725 ipriv = netdev_priv(netdev);
643726 epriv = mlx5i_epriv(netdev);
644727
645
- epriv->wq = create_singlethread_workqueue("mlx5i");
646
- if (!epriv->wq)
647
- goto err_free_netdev;
648
-
649
- ipriv->sub_interface = sub_interface;
728
+ ipriv->sub_interface = mlx5_is_sub_interface(mdev);
650729 if (!ipriv->sub_interface) {
651730 err = mlx5i_pkey_qpn_ht_init(netdev);
652731 if (err) {
653732 mlx5_core_warn(mdev, "allocate qpn_to_netdev ht failed\n");
654
- goto destroy_wq;
733
+ return err;
655734 }
656735
657736 /* This should only be called once per mdev */
....@@ -660,7 +739,7 @@
660739 goto destroy_ht;
661740 }
662741
663
- profile->init(mdev, netdev, profile, ipriv);
742
+ prof->init(mdev, netdev, prof, ipriv);
664743
665744 err = mlx5e_attach_netdev(epriv);
666745 if (err)
....@@ -678,20 +757,40 @@
678757 netdev->priv_destructor = mlx5_rdma_netdev_free;
679758 netdev->needs_free_netdev = 1;
680759
681
- return netdev;
760
+ return 0;
682761
683762 detach:
684
- profile->cleanup(epriv);
763
+ prof->cleanup(epriv);
685764 if (ipriv->sub_interface)
686
- return NULL;
765
+ return err;
687766 mlx5e_destroy_mdev_resources(mdev);
688767 destroy_ht:
689768 mlx5i_pkey_qpn_ht_cleanup(netdev);
690
-destroy_wq:
691
- destroy_workqueue(epriv->wq);
692
-err_free_netdev:
693
- free_netdev(netdev);
694
-
695
- return NULL;
769
+ return err;
696770 }
697
-EXPORT_SYMBOL(mlx5_rdma_netdev_alloc);
771
+
772
+int mlx5_rdma_rn_get_params(struct mlx5_core_dev *mdev,
773
+ struct ib_device *device,
774
+ struct rdma_netdev_alloc_params *params)
775
+{
776
+ int nch;
777
+ int rc;
778
+
779
+ rc = mlx5i_check_required_hca_cap(mdev);
780
+ if (rc)
781
+ return rc;
782
+
783
+ nch = mlx5e_get_max_num_channels(mdev);
784
+
785
+ *params = (struct rdma_netdev_alloc_params){
786
+ .sizeof_priv = sizeof(struct mlx5i_priv) +
787
+ sizeof(struct mlx5e_priv),
788
+ .txqs = nch * MLX5E_MAX_NUM_TC,
789
+ .rxqs = nch,
790
+ .param = mdev,
791
+ .initialize_rdma_netdev = mlx5_rdma_setup_rn,
792
+ };
793
+
794
+ return 0;
795
+}
796
+EXPORT_SYMBOL(mlx5_rdma_rn_get_params);