.. | .. |
---|
45 | 45 | static const struct net_device_ops mlx5i_netdev_ops = { |
---|
46 | 46 | .ndo_open = mlx5i_open, |
---|
47 | 47 | .ndo_stop = mlx5i_close, |
---|
| 48 | + .ndo_get_stats64 = mlx5i_get_stats, |
---|
48 | 49 | .ndo_init = mlx5i_dev_init, |
---|
49 | 50 | .ndo_uninit = mlx5i_dev_cleanup, |
---|
50 | 51 | .ndo_change_mtu = mlx5i_change_mtu, |
---|
.. | .. |
---|
67 | 68 | |
---|
68 | 69 | params->lro_en = false; |
---|
69 | 70 | params->hard_mtu = MLX5_IB_GRH_BYTES + MLX5_IPOIB_HARD_LEN; |
---|
| 71 | + params->tunneled_offload_en = false; |
---|
70 | 72 | } |
---|
71 | 73 | |
---|
72 | 74 | /* Called directly after IPoIB netdevice was created to initialize SW structs */ |
---|
73 | | -void mlx5i_init(struct mlx5_core_dev *mdev, |
---|
74 | | - struct net_device *netdev, |
---|
75 | | - const struct mlx5e_profile *profile, |
---|
76 | | - void *ppriv) |
---|
| 75 | +int mlx5i_init(struct mlx5_core_dev *mdev, |
---|
| 76 | + struct net_device *netdev, |
---|
| 77 | + const struct mlx5e_profile *profile, |
---|
| 78 | + void *ppriv) |
---|
77 | 79 | { |
---|
78 | 80 | struct mlx5e_priv *priv = mlx5i_epriv(netdev); |
---|
79 | | - u16 max_mtu; |
---|
| 81 | + int err; |
---|
80 | 82 | |
---|
81 | | - /* priv init */ |
---|
82 | | - priv->mdev = mdev; |
---|
83 | | - priv->netdev = netdev; |
---|
84 | | - priv->profile = profile; |
---|
85 | | - priv->ppriv = ppriv; |
---|
86 | | - mutex_init(&priv->state_lock); |
---|
| 83 | + err = mlx5e_netdev_init(netdev, priv, mdev, profile, ppriv); |
---|
| 84 | + if (err) |
---|
| 85 | + return err; |
---|
87 | 86 | |
---|
88 | | - mlx5_query_port_max_mtu(mdev, &max_mtu, 1); |
---|
89 | | - netdev->mtu = max_mtu; |
---|
| 87 | + mlx5e_set_netdev_mtu_boundaries(priv); |
---|
| 88 | + netdev->mtu = netdev->max_mtu; |
---|
90 | 89 | |
---|
91 | | - mlx5e_build_nic_params(mdev, &priv->channels.params, |
---|
92 | | - profile->max_nch(mdev), netdev->mtu); |
---|
| 90 | + mlx5e_build_nic_params(priv, NULL, &priv->rss_params, &priv->channels.params, |
---|
| 91 | + netdev->mtu); |
---|
93 | 92 | mlx5i_build_nic_params(mdev, &priv->channels.params); |
---|
94 | 93 | |
---|
95 | 94 | mlx5e_timestamp_init(priv); |
---|
.. | .. |
---|
106 | 105 | |
---|
107 | 106 | netdev->netdev_ops = &mlx5i_netdev_ops; |
---|
108 | 107 | netdev->ethtool_ops = &mlx5i_ethtool_ops; |
---|
| 108 | + |
---|
| 109 | + return 0; |
---|
109 | 110 | } |
---|
110 | 111 | |
---|
111 | 112 | /* Called directly before IPoIB netdevice is destroyed to cleanup SW structs */ |
---|
112 | | -static void mlx5i_cleanup(struct mlx5e_priv *priv) |
---|
| 113 | +void mlx5i_cleanup(struct mlx5e_priv *priv) |
---|
113 | 114 | { |
---|
114 | | - /* Do nothing .. */ |
---|
| 115 | + mlx5e_netdev_cleanup(priv->netdev, priv); |
---|
| 116 | +} |
---|
| 117 | + |
---|
| 118 | +static void mlx5i_grp_sw_update_stats(struct mlx5e_priv *priv) |
---|
| 119 | +{ |
---|
| 120 | + struct mlx5e_sw_stats s = { 0 }; |
---|
| 121 | + int i, j; |
---|
| 122 | + |
---|
| 123 | + for (i = 0; i < priv->max_nch; i++) { |
---|
| 124 | + struct mlx5e_channel_stats *channel_stats; |
---|
| 125 | + struct mlx5e_rq_stats *rq_stats; |
---|
| 126 | + |
---|
| 127 | + channel_stats = &priv->channel_stats[i]; |
---|
| 128 | + rq_stats = &channel_stats->rq; |
---|
| 129 | + |
---|
| 130 | + s.rx_packets += rq_stats->packets; |
---|
| 131 | + s.rx_bytes += rq_stats->bytes; |
---|
| 132 | + |
---|
| 133 | + for (j = 0; j < priv->max_opened_tc; j++) { |
---|
| 134 | + struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j]; |
---|
| 135 | + |
---|
| 136 | + s.tx_packets += sq_stats->packets; |
---|
| 137 | + s.tx_bytes += sq_stats->bytes; |
---|
| 138 | + s.tx_queue_dropped += sq_stats->dropped; |
---|
| 139 | + } |
---|
| 140 | + } |
---|
| 141 | + |
---|
| 142 | + memcpy(&priv->stats.sw, &s, sizeof(s)); |
---|
| 143 | +} |
---|
| 144 | + |
---|
| 145 | +void mlx5i_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) |
---|
| 146 | +{ |
---|
| 147 | + struct mlx5e_priv *priv = mlx5i_epriv(dev); |
---|
| 148 | + struct mlx5e_sw_stats *sstats = &priv->stats.sw; |
---|
| 149 | + |
---|
| 150 | + mlx5i_grp_sw_update_stats(priv); |
---|
| 151 | + |
---|
| 152 | + stats->rx_packets = sstats->rx_packets; |
---|
| 153 | + stats->rx_bytes = sstats->rx_bytes; |
---|
| 154 | + stats->tx_packets = sstats->tx_packets; |
---|
| 155 | + stats->tx_bytes = sstats->tx_bytes; |
---|
| 156 | + stats->tx_dropped = sstats->tx_queue_dropped; |
---|
115 | 157 | } |
---|
116 | 158 | |
---|
117 | 159 | int mlx5i_init_underlay_qp(struct mlx5e_priv *priv) |
---|
118 | 160 | { |
---|
119 | 161 | struct mlx5_core_dev *mdev = priv->mdev; |
---|
120 | 162 | struct mlx5i_priv *ipriv = priv->ppriv; |
---|
121 | | - struct mlx5_core_qp *qp = &ipriv->qp; |
---|
122 | | - struct mlx5_qp_context *context; |
---|
123 | 163 | int ret; |
---|
124 | 164 | |
---|
125 | | - /* QP states */ |
---|
126 | | - context = kzalloc(sizeof(*context), GFP_KERNEL); |
---|
127 | | - if (!context) |
---|
128 | | - return -ENOMEM; |
---|
| 165 | + { |
---|
| 166 | + u32 in[MLX5_ST_SZ_DW(rst2init_qp_in)] = {}; |
---|
| 167 | + u32 *qpc; |
---|
129 | 168 | |
---|
130 | | - context->flags = cpu_to_be32(MLX5_QP_PM_MIGRATED << 11); |
---|
131 | | - context->pri_path.port = 1; |
---|
132 | | - context->pri_path.pkey_index = cpu_to_be16(ipriv->pkey_index); |
---|
133 | | - context->qkey = cpu_to_be32(IB_DEFAULT_Q_KEY); |
---|
| 169 | + qpc = MLX5_ADDR_OF(rst2init_qp_in, in, qpc); |
---|
134 | 170 | |
---|
135 | | - ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RST2INIT_QP, 0, context, qp); |
---|
136 | | - if (ret) { |
---|
137 | | - mlx5_core_err(mdev, "Failed to modify qp RST2INIT, err: %d\n", ret); |
---|
138 | | - goto err_qp_modify_to_err; |
---|
| 171 | + MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); |
---|
| 172 | + MLX5_SET(qpc, qpc, primary_address_path.pkey_index, |
---|
| 173 | + ipriv->pkey_index); |
---|
| 174 | + MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, 1); |
---|
| 175 | + MLX5_SET(qpc, qpc, q_key, IB_DEFAULT_Q_KEY); |
---|
| 176 | + |
---|
| 177 | + MLX5_SET(rst2init_qp_in, in, opcode, MLX5_CMD_OP_RST2INIT_QP); |
---|
| 178 | + MLX5_SET(rst2init_qp_in, in, qpn, ipriv->qpn); |
---|
| 179 | + ret = mlx5_cmd_exec_in(mdev, rst2init_qp, in); |
---|
| 180 | + if (ret) |
---|
| 181 | + goto err_qp_modify_to_err; |
---|
139 | 182 | } |
---|
140 | | - memset(context, 0, sizeof(*context)); |
---|
| 183 | + { |
---|
| 184 | + u32 in[MLX5_ST_SZ_DW(init2rtr_qp_in)] = {}; |
---|
141 | 185 | |
---|
142 | | - ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_INIT2RTR_QP, 0, context, qp); |
---|
143 | | - if (ret) { |
---|
144 | | - mlx5_core_err(mdev, "Failed to modify qp INIT2RTR, err: %d\n", ret); |
---|
145 | | - goto err_qp_modify_to_err; |
---|
| 186 | + MLX5_SET(init2rtr_qp_in, in, opcode, MLX5_CMD_OP_INIT2RTR_QP); |
---|
| 187 | + MLX5_SET(init2rtr_qp_in, in, qpn, ipriv->qpn); |
---|
| 188 | + ret = mlx5_cmd_exec_in(mdev, init2rtr_qp, in); |
---|
| 189 | + if (ret) |
---|
| 190 | + goto err_qp_modify_to_err; |
---|
146 | 191 | } |
---|
| 192 | + { |
---|
| 193 | + u32 in[MLX5_ST_SZ_DW(rtr2rts_qp_in)] = {}; |
---|
147 | 194 | |
---|
148 | | - ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RTR2RTS_QP, 0, context, qp); |
---|
149 | | - if (ret) { |
---|
150 | | - mlx5_core_err(mdev, "Failed to modify qp RTR2RTS, err: %d\n", ret); |
---|
151 | | - goto err_qp_modify_to_err; |
---|
| 195 | + MLX5_SET(rtr2rts_qp_in, in, opcode, MLX5_CMD_OP_RTR2RTS_QP); |
---|
| 196 | + MLX5_SET(rtr2rts_qp_in, in, qpn, ipriv->qpn); |
---|
| 197 | + ret = mlx5_cmd_exec_in(mdev, rtr2rts_qp, in); |
---|
| 198 | + if (ret) |
---|
| 199 | + goto err_qp_modify_to_err; |
---|
152 | 200 | } |
---|
153 | | - |
---|
154 | | - kfree(context); |
---|
155 | 201 | return 0; |
---|
156 | 202 | |
---|
157 | 203 | err_qp_modify_to_err: |
---|
158 | | - mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2ERR_QP, 0, &context, qp); |
---|
159 | | - kfree(context); |
---|
| 204 | + { |
---|
| 205 | + u32 in[MLX5_ST_SZ_DW(qp_2err_in)] = {}; |
---|
| 206 | + |
---|
| 207 | + MLX5_SET(qp_2err_in, in, opcode, MLX5_CMD_OP_2ERR_QP); |
---|
| 208 | + MLX5_SET(qp_2err_in, in, qpn, ipriv->qpn); |
---|
| 209 | + mlx5_cmd_exec_in(mdev, qp_2err, in); |
---|
| 210 | + } |
---|
160 | 211 | return ret; |
---|
161 | 212 | } |
---|
162 | 213 | |
---|
.. | .. |
---|
164 | 215 | { |
---|
165 | 216 | struct mlx5i_priv *ipriv = priv->ppriv; |
---|
166 | 217 | struct mlx5_core_dev *mdev = priv->mdev; |
---|
167 | | - struct mlx5_qp_context context; |
---|
168 | | - int err; |
---|
| 218 | + u32 in[MLX5_ST_SZ_DW(qp_2rst_in)] = {}; |
---|
169 | 219 | |
---|
170 | | - err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2RST_QP, 0, &context, |
---|
171 | | - &ipriv->qp); |
---|
172 | | - if (err) |
---|
173 | | - mlx5_core_err(mdev, "Failed to modify qp 2RST, err: %d\n", err); |
---|
| 220 | + MLX5_SET(qp_2rst_in, in, opcode, MLX5_CMD_OP_2RST_QP); |
---|
| 221 | + MLX5_SET(qp_2rst_in, in, qpn, ipriv->qpn); |
---|
| 222 | + mlx5_cmd_exec_in(mdev, qp_2rst, in); |
---|
174 | 223 | } |
---|
175 | 224 | |
---|
176 | 225 | #define MLX5_QP_ENHANCED_ULP_STATELESS_MODE 2 |
---|
177 | 226 | |
---|
178 | | -int mlx5i_create_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp) |
---|
| 227 | +int mlx5i_create_underlay_qp(struct mlx5e_priv *priv) |
---|
179 | 228 | { |
---|
180 | | - u32 *in = NULL; |
---|
| 229 | + unsigned char *dev_addr = priv->netdev->dev_addr; |
---|
| 230 | + u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {}; |
---|
| 231 | + u32 in[MLX5_ST_SZ_DW(create_qp_in)] = {}; |
---|
| 232 | + struct mlx5i_priv *ipriv = priv->ppriv; |
---|
181 | 233 | void *addr_path; |
---|
| 234 | + int qpn = 0; |
---|
182 | 235 | int ret = 0; |
---|
183 | | - int inlen; |
---|
184 | 236 | void *qpc; |
---|
185 | 237 | |
---|
186 | | - inlen = MLX5_ST_SZ_BYTES(create_qp_in); |
---|
187 | | - in = kvzalloc(inlen, GFP_KERNEL); |
---|
188 | | - if (!in) |
---|
189 | | - return -ENOMEM; |
---|
| 238 | + if (MLX5_CAP_GEN(priv->mdev, mkey_by_name)) { |
---|
| 239 | + qpn = (dev_addr[1] << 16) + (dev_addr[2] << 8) + dev_addr[3]; |
---|
| 240 | + MLX5_SET(create_qp_in, in, input_qpn, qpn); |
---|
| 241 | + } |
---|
190 | 242 | |
---|
191 | 243 | qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); |
---|
192 | 244 | MLX5_SET(qpc, qpc, st, MLX5_QP_ST_UD); |
---|
.. | .. |
---|
198 | 250 | MLX5_SET(ads, addr_path, vhca_port_num, 1); |
---|
199 | 251 | MLX5_SET(ads, addr_path, grh, 1); |
---|
200 | 252 | |
---|
201 | | - ret = mlx5_core_create_qp(mdev, qp, in, inlen); |
---|
202 | | - if (ret) { |
---|
203 | | - mlx5_core_err(mdev, "Failed creating IPoIB QP err : %d\n", ret); |
---|
204 | | - goto out; |
---|
205 | | - } |
---|
| 253 | + MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP); |
---|
| 254 | + ret = mlx5_cmd_exec_inout(priv->mdev, create_qp, in, out); |
---|
| 255 | + if (ret) |
---|
| 256 | + return ret; |
---|
206 | 257 | |
---|
207 | | -out: |
---|
208 | | - kvfree(in); |
---|
209 | | - return ret; |
---|
| 258 | + ipriv->qpn = MLX5_GET(create_qp_out, out, qpn); |
---|
| 259 | + |
---|
| 260 | + return 0; |
---|
210 | 261 | } |
---|
211 | 262 | |
---|
212 | | -void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp) |
---|
| 263 | +void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, u32 qpn) |
---|
213 | 264 | { |
---|
214 | | - mlx5_core_destroy_qp(mdev, qp); |
---|
| 265 | + u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {}; |
---|
| 266 | + |
---|
| 267 | + MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP); |
---|
| 268 | + MLX5_SET(destroy_qp_in, in, qpn, qpn); |
---|
| 269 | + mlx5_cmd_exec_in(mdev, destroy_qp, in); |
---|
| 270 | +} |
---|
| 271 | + |
---|
| 272 | +int mlx5i_update_nic_rx(struct mlx5e_priv *priv) |
---|
| 273 | +{ |
---|
| 274 | + return mlx5e_refresh_tirs(priv, true, true); |
---|
| 275 | +} |
---|
| 276 | + |
---|
| 277 | +int mlx5i_create_tis(struct mlx5_core_dev *mdev, u32 underlay_qpn, u32 *tisn) |
---|
| 278 | +{ |
---|
| 279 | + u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {}; |
---|
| 280 | + void *tisc; |
---|
| 281 | + |
---|
| 282 | + tisc = MLX5_ADDR_OF(create_tis_in, in, ctx); |
---|
| 283 | + |
---|
| 284 | + MLX5_SET(tisc, tisc, underlay_qpn, underlay_qpn); |
---|
| 285 | + |
---|
| 286 | + return mlx5e_create_tis(mdev, in, tisn); |
---|
215 | 287 | } |
---|
216 | 288 | |
---|
217 | 289 | static int mlx5i_init_tx(struct mlx5e_priv *priv) |
---|
.. | .. |
---|
219 | 291 | struct mlx5i_priv *ipriv = priv->ppriv; |
---|
220 | 292 | int err; |
---|
221 | 293 | |
---|
222 | | - err = mlx5i_create_underlay_qp(priv->mdev, &ipriv->qp); |
---|
| 294 | + err = mlx5i_create_underlay_qp(priv); |
---|
223 | 295 | if (err) { |
---|
224 | 296 | mlx5_core_warn(priv->mdev, "create underlay QP failed, %d\n", err); |
---|
225 | 297 | return err; |
---|
226 | 298 | } |
---|
227 | 299 | |
---|
228 | | - err = mlx5e_create_tis(priv->mdev, 0 /* tc */, ipriv->qp.qpn, &priv->tisn[0]); |
---|
| 300 | + err = mlx5i_create_tis(priv->mdev, ipriv->qpn, &priv->tisn[0][0]); |
---|
229 | 301 | if (err) { |
---|
230 | 302 | mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err); |
---|
231 | 303 | goto err_destroy_underlay_qp; |
---|
.. | .. |
---|
234 | 306 | return 0; |
---|
235 | 307 | |
---|
236 | 308 | err_destroy_underlay_qp: |
---|
237 | | - mlx5i_destroy_underlay_qp(priv->mdev, &ipriv->qp); |
---|
| 309 | + mlx5i_destroy_underlay_qp(priv->mdev, ipriv->qpn); |
---|
238 | 310 | return err; |
---|
239 | 311 | } |
---|
240 | 312 | |
---|
.. | .. |
---|
242 | 314 | { |
---|
243 | 315 | struct mlx5i_priv *ipriv = priv->ppriv; |
---|
244 | 316 | |
---|
245 | | - mlx5e_destroy_tis(priv->mdev, priv->tisn[0]); |
---|
246 | | - mlx5i_destroy_underlay_qp(priv->mdev, &ipriv->qp); |
---|
| 317 | + mlx5e_destroy_tis(priv->mdev, priv->tisn[0][0]); |
---|
| 318 | + mlx5i_destroy_underlay_qp(priv->mdev, ipriv->qpn); |
---|
247 | 319 | } |
---|
248 | 320 | |
---|
249 | 321 | static int mlx5i_create_flow_steering(struct mlx5e_priv *priv) |
---|
.. | .. |
---|
265 | 337 | } |
---|
266 | 338 | |
---|
267 | 339 | mlx5e_set_ttc_basic_params(priv, &ttc_params); |
---|
268 | | - mlx5e_set_inner_ttc_ft_params(&ttc_params); |
---|
269 | | - for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) |
---|
270 | | - ttc_params.indir_tirn[tt] = priv->inner_indir_tir[tt].tirn; |
---|
271 | | - |
---|
272 | | - err = mlx5e_create_inner_ttc_table(priv, &ttc_params, &priv->fs.inner_ttc); |
---|
273 | | - if (err) { |
---|
274 | | - netdev_err(priv->netdev, "Failed to create inner ttc table, err=%d\n", |
---|
275 | | - err); |
---|
276 | | - goto err_destroy_arfs_tables; |
---|
277 | | - } |
---|
278 | | - |
---|
279 | 340 | mlx5e_set_ttc_ft_params(&ttc_params); |
---|
280 | 341 | for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) |
---|
281 | 342 | ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn; |
---|
.. | .. |
---|
284 | 345 | if (err) { |
---|
285 | 346 | netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n", |
---|
286 | 347 | err); |
---|
287 | | - goto err_destroy_inner_ttc_table; |
---|
| 348 | + goto err_destroy_arfs_tables; |
---|
288 | 349 | } |
---|
289 | 350 | |
---|
290 | 351 | return 0; |
---|
291 | 352 | |
---|
292 | | -err_destroy_inner_ttc_table: |
---|
293 | | - mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc); |
---|
294 | 353 | err_destroy_arfs_tables: |
---|
295 | 354 | mlx5e_arfs_destroy_tables(priv); |
---|
296 | 355 | |
---|
.. | .. |
---|
300 | 359 | static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv) |
---|
301 | 360 | { |
---|
302 | 361 | mlx5e_destroy_ttc_table(priv, &priv->fs.ttc); |
---|
303 | | - mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc); |
---|
304 | 362 | mlx5e_arfs_destroy_tables(priv); |
---|
305 | 363 | } |
---|
306 | 364 | |
---|
307 | 365 | static int mlx5i_init_rx(struct mlx5e_priv *priv) |
---|
308 | 366 | { |
---|
| 367 | + struct mlx5_core_dev *mdev = priv->mdev; |
---|
309 | 368 | int err; |
---|
| 369 | + |
---|
| 370 | + mlx5e_create_q_counters(priv); |
---|
| 371 | + |
---|
| 372 | + err = mlx5e_open_drop_rq(priv, &priv->drop_rq); |
---|
| 373 | + if (err) { |
---|
| 374 | + mlx5_core_err(mdev, "open drop rq failed, %d\n", err); |
---|
| 375 | + goto err_destroy_q_counters; |
---|
| 376 | + } |
---|
310 | 377 | |
---|
311 | 378 | err = mlx5e_create_indirect_rqt(priv); |
---|
312 | 379 | if (err) |
---|
313 | | - return err; |
---|
| 380 | + goto err_close_drop_rq; |
---|
314 | 381 | |
---|
315 | | - err = mlx5e_create_direct_rqts(priv); |
---|
| 382 | + err = mlx5e_create_direct_rqts(priv, priv->direct_tir); |
---|
316 | 383 | if (err) |
---|
317 | 384 | goto err_destroy_indirect_rqts; |
---|
318 | 385 | |
---|
319 | | - err = mlx5e_create_indirect_tirs(priv); |
---|
| 386 | + err = mlx5e_create_indirect_tirs(priv, false); |
---|
320 | 387 | if (err) |
---|
321 | 388 | goto err_destroy_direct_rqts; |
---|
322 | 389 | |
---|
323 | | - err = mlx5e_create_direct_tirs(priv); |
---|
| 390 | + err = mlx5e_create_direct_tirs(priv, priv->direct_tir); |
---|
324 | 391 | if (err) |
---|
325 | 392 | goto err_destroy_indirect_tirs; |
---|
326 | 393 | |
---|
.. | .. |
---|
331 | 398 | return 0; |
---|
332 | 399 | |
---|
333 | 400 | err_destroy_direct_tirs: |
---|
334 | | - mlx5e_destroy_direct_tirs(priv); |
---|
| 401 | + mlx5e_destroy_direct_tirs(priv, priv->direct_tir); |
---|
335 | 402 | err_destroy_indirect_tirs: |
---|
336 | 403 | mlx5e_destroy_indirect_tirs(priv); |
---|
337 | 404 | err_destroy_direct_rqts: |
---|
338 | | - mlx5e_destroy_direct_rqts(priv); |
---|
| 405 | + mlx5e_destroy_direct_rqts(priv, priv->direct_tir); |
---|
339 | 406 | err_destroy_indirect_rqts: |
---|
340 | 407 | mlx5e_destroy_rqt(priv, &priv->indir_rqt); |
---|
| 408 | +err_close_drop_rq: |
---|
| 409 | + mlx5e_close_drop_rq(&priv->drop_rq); |
---|
| 410 | +err_destroy_q_counters: |
---|
| 411 | + mlx5e_destroy_q_counters(priv); |
---|
341 | 412 | return err; |
---|
342 | 413 | } |
---|
343 | 414 | |
---|
344 | 415 | static void mlx5i_cleanup_rx(struct mlx5e_priv *priv) |
---|
345 | 416 | { |
---|
346 | 417 | mlx5i_destroy_flow_steering(priv); |
---|
347 | | - mlx5e_destroy_direct_tirs(priv); |
---|
| 418 | + mlx5e_destroy_direct_tirs(priv, priv->direct_tir); |
---|
348 | 419 | mlx5e_destroy_indirect_tirs(priv); |
---|
349 | | - mlx5e_destroy_direct_rqts(priv); |
---|
| 420 | + mlx5e_destroy_direct_rqts(priv, priv->direct_tir); |
---|
350 | 421 | mlx5e_destroy_rqt(priv, &priv->indir_rqt); |
---|
| 422 | + mlx5e_close_drop_rq(&priv->drop_rq); |
---|
| 423 | + mlx5e_destroy_q_counters(priv); |
---|
| 424 | +} |
---|
| 425 | + |
---|
| 426 | +/* The stats groups order is opposite to the update_stats() order calls */ |
---|
| 427 | +static mlx5e_stats_grp_t mlx5i_stats_grps[] = { |
---|
| 428 | + &MLX5E_STATS_GRP(sw), |
---|
| 429 | + &MLX5E_STATS_GRP(qcnt), |
---|
| 430 | + &MLX5E_STATS_GRP(vnic_env), |
---|
| 431 | + &MLX5E_STATS_GRP(vport), |
---|
| 432 | + &MLX5E_STATS_GRP(802_3), |
---|
| 433 | + &MLX5E_STATS_GRP(2863), |
---|
| 434 | + &MLX5E_STATS_GRP(2819), |
---|
| 435 | + &MLX5E_STATS_GRP(phy), |
---|
| 436 | + &MLX5E_STATS_GRP(pcie), |
---|
| 437 | + &MLX5E_STATS_GRP(per_prio), |
---|
| 438 | + &MLX5E_STATS_GRP(pme), |
---|
| 439 | + &MLX5E_STATS_GRP(channels), |
---|
| 440 | + &MLX5E_STATS_GRP(per_port_buff_congest), |
---|
| 441 | +}; |
---|
| 442 | + |
---|
| 443 | +static unsigned int mlx5i_stats_grps_num(struct mlx5e_priv *priv) |
---|
| 444 | +{ |
---|
| 445 | + return ARRAY_SIZE(mlx5i_stats_grps); |
---|
351 | 446 | } |
---|
352 | 447 | |
---|
353 | 448 | static const struct mlx5e_profile mlx5i_nic_profile = { |
---|
.. | .. |
---|
359 | 454 | .cleanup_rx = mlx5i_cleanup_rx, |
---|
360 | 455 | .enable = NULL, /* mlx5i_enable */ |
---|
361 | 456 | .disable = NULL, /* mlx5i_disable */ |
---|
| 457 | + .update_rx = mlx5i_update_nic_rx, |
---|
362 | 458 | .update_stats = NULL, /* mlx5i_update_stats */ |
---|
363 | | - .max_nch = mlx5e_get_max_num_channels, |
---|
364 | 459 | .update_carrier = NULL, /* no HW update in IB link */ |
---|
365 | | - .rx_handlers.handle_rx_cqe = mlx5i_handle_rx_cqe, |
---|
366 | | - .rx_handlers.handle_rx_cqe_mpwqe = NULL, /* Not supported */ |
---|
| 460 | + .rx_handlers = &mlx5i_rx_handlers, |
---|
367 | 461 | .max_tc = MLX5I_MAX_NUM_TC, |
---|
| 462 | + .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR), |
---|
| 463 | + .stats_grps = mlx5i_stats_grps, |
---|
| 464 | + .stats_grps_num = mlx5i_stats_grps_num, |
---|
368 | 465 | }; |
---|
369 | 466 | |
---|
370 | 467 | /* mlx5i netdev NDos */ |
---|
.. | .. |
---|
388 | 485 | |
---|
389 | 486 | new_channels.params = *params; |
---|
390 | 487 | new_channels.params.sw_mtu = new_mtu; |
---|
391 | | - err = mlx5e_open_channels(priv, &new_channels); |
---|
| 488 | + |
---|
| 489 | + err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL); |
---|
392 | 490 | if (err) |
---|
393 | 491 | goto out; |
---|
394 | 492 | |
---|
395 | | - mlx5e_switch_priv_channels(priv, &new_channels, NULL); |
---|
396 | 493 | netdev->mtu = new_channels.params.sw_mtu; |
---|
397 | 494 | |
---|
398 | 495 | out: |
---|
.. | .. |
---|
406 | 503 | struct mlx5i_priv *ipriv = priv->ppriv; |
---|
407 | 504 | |
---|
408 | 505 | /* Set dev address using underlay QP */ |
---|
409 | | - dev->dev_addr[1] = (ipriv->qp.qpn >> 16) & 0xff; |
---|
410 | | - dev->dev_addr[2] = (ipriv->qp.qpn >> 8) & 0xff; |
---|
411 | | - dev->dev_addr[3] = (ipriv->qp.qpn) & 0xff; |
---|
| 506 | + dev->dev_addr[1] = (ipriv->qpn >> 16) & 0xff; |
---|
| 507 | + dev->dev_addr[2] = (ipriv->qpn >> 8) & 0xff; |
---|
| 508 | + dev->dev_addr[3] = (ipriv->qpn) & 0xff; |
---|
412 | 509 | |
---|
413 | 510 | /* Add QPN to net-device mapping to HT */ |
---|
414 | | - mlx5i_pkey_add_qpn(dev ,ipriv->qp.qpn); |
---|
| 511 | + mlx5i_pkey_add_qpn(dev, ipriv->qpn); |
---|
415 | 512 | |
---|
416 | 513 | return 0; |
---|
417 | 514 | } |
---|
.. | .. |
---|
438 | 535 | mlx5i_uninit_underlay_qp(priv); |
---|
439 | 536 | |
---|
440 | 537 | /* Delete QPN to net-device mapping from HT */ |
---|
441 | | - mlx5i_pkey_del_qpn(dev, ipriv->qp.qpn); |
---|
| 538 | + mlx5i_pkey_del_qpn(dev, ipriv->qpn); |
---|
442 | 539 | } |
---|
443 | 540 | |
---|
444 | 541 | static int mlx5i_open(struct net_device *netdev) |
---|
.. | .. |
---|
458 | 555 | goto err_clear_state_opened_flag; |
---|
459 | 556 | } |
---|
460 | 557 | |
---|
461 | | - err = mlx5_fs_add_rx_underlay_qpn(mdev, ipriv->qp.qpn); |
---|
| 558 | + err = mlx5_fs_add_rx_underlay_qpn(mdev, ipriv->qpn); |
---|
462 | 559 | if (err) { |
---|
463 | 560 | mlx5_core_warn(mdev, "attach underlay qp to ft failed, %d\n", err); |
---|
464 | 561 | goto err_reset_qp; |
---|
.. | .. |
---|
468 | 565 | if (err) |
---|
469 | 566 | goto err_remove_fs_underlay_qp; |
---|
470 | 567 | |
---|
471 | | - mlx5e_refresh_tirs(epriv, false); |
---|
| 568 | + epriv->profile->update_rx(epriv); |
---|
472 | 569 | mlx5e_activate_priv_channels(epriv); |
---|
473 | 570 | |
---|
474 | 571 | mutex_unlock(&epriv->state_lock); |
---|
475 | 572 | return 0; |
---|
476 | 573 | |
---|
477 | 574 | err_remove_fs_underlay_qp: |
---|
478 | | - mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn); |
---|
| 575 | + mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qpn); |
---|
479 | 576 | err_reset_qp: |
---|
480 | 577 | mlx5i_uninit_underlay_qp(epriv); |
---|
481 | 578 | err_clear_state_opened_flag: |
---|
.. | .. |
---|
501 | 598 | clear_bit(MLX5E_STATE_OPENED, &epriv->state); |
---|
502 | 599 | |
---|
503 | 600 | netif_carrier_off(epriv->netdev); |
---|
504 | | - mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn); |
---|
| 601 | + mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qpn); |
---|
505 | 602 | mlx5e_deactivate_priv_channels(epriv); |
---|
506 | 603 | mlx5e_close_channels(&epriv->channels); |
---|
507 | 604 | mlx5i_uninit_underlay_qp(epriv); |
---|
.. | .. |
---|
520 | 617 | struct mlx5i_priv *ipriv = epriv->ppriv; |
---|
521 | 618 | int err; |
---|
522 | 619 | |
---|
523 | | - mlx5_core_dbg(mdev, "attaching QPN 0x%x, MGID %pI6\n", ipriv->qp.qpn, gid->raw); |
---|
524 | | - err = mlx5_core_attach_mcg(mdev, gid, ipriv->qp.qpn); |
---|
| 620 | + mlx5_core_dbg(mdev, "attaching QPN 0x%x, MGID %pI6\n", ipriv->qpn, |
---|
| 621 | + gid->raw); |
---|
| 622 | + err = mlx5_core_attach_mcg(mdev, gid, ipriv->qpn); |
---|
525 | 623 | if (err) |
---|
526 | 624 | mlx5_core_warn(mdev, "failed attaching QPN 0x%x, MGID %pI6\n", |
---|
527 | | - ipriv->qp.qpn, gid->raw); |
---|
| 625 | + ipriv->qpn, gid->raw); |
---|
528 | 626 | |
---|
529 | 627 | if (set_qkey) { |
---|
530 | 628 | mlx5_core_dbg(mdev, "%s setting qkey 0x%x\n", |
---|
.. | .. |
---|
543 | 641 | struct mlx5i_priv *ipriv = epriv->ppriv; |
---|
544 | 642 | int err; |
---|
545 | 643 | |
---|
546 | | - mlx5_core_dbg(mdev, "detaching QPN 0x%x, MGID %pI6\n", ipriv->qp.qpn, gid->raw); |
---|
| 644 | + mlx5_core_dbg(mdev, "detaching QPN 0x%x, MGID %pI6\n", ipriv->qpn, |
---|
| 645 | + gid->raw); |
---|
547 | 646 | |
---|
548 | | - err = mlx5_core_detach_mcg(mdev, gid, ipriv->qp.qpn); |
---|
| 647 | + err = mlx5_core_detach_mcg(mdev, gid, ipriv->qpn); |
---|
549 | 648 | if (err) |
---|
550 | 649 | mlx5_core_dbg(mdev, "failed detaching QPN 0x%x, MGID %pI6\n", |
---|
551 | | - ipriv->qp.qpn, gid->raw); |
---|
| 650 | + ipriv->qpn, gid->raw); |
---|
552 | 651 | |
---|
553 | 652 | return err; |
---|
554 | 653 | } |
---|
.. | .. |
---|
561 | 660 | struct mlx5_ib_ah *mah = to_mah(address); |
---|
562 | 661 | struct mlx5i_priv *ipriv = epriv->ppriv; |
---|
563 | 662 | |
---|
564 | | - return mlx5i_sq_xmit(sq, skb, &mah->av, dqpn, ipriv->qkey); |
---|
| 663 | + mlx5i_sq_xmit(sq, skb, &mah->av, dqpn, ipriv->qkey, netdev_xmit_more()); |
---|
| 664 | + |
---|
| 665 | + return NETDEV_TX_OK; |
---|
565 | 666 | } |
---|
566 | 667 | |
---|
567 | 668 | static void mlx5i_set_pkey_index(struct net_device *netdev, int id) |
---|
.. | .. |
---|
592 | 693 | |
---|
593 | 694 | mlx5e_detach_netdev(priv); |
---|
594 | 695 | profile->cleanup(priv); |
---|
595 | | - destroy_workqueue(priv->wq); |
---|
596 | 696 | |
---|
597 | 697 | if (!ipriv->sub_interface) { |
---|
598 | 698 | mlx5i_pkey_qpn_ht_cleanup(netdev); |
---|
.. | .. |
---|
600 | 700 | } |
---|
601 | 701 | } |
---|
602 | 702 | |
---|
603 | | -struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev, |
---|
604 | | - struct ib_device *ibdev, |
---|
605 | | - const char *name, |
---|
606 | | - void (*setup)(struct net_device *)) |
---|
| 703 | +static bool mlx5_is_sub_interface(struct mlx5_core_dev *mdev) |
---|
607 | 704 | { |
---|
608 | | - const struct mlx5e_profile *profile; |
---|
609 | | - struct net_device *netdev; |
---|
| 705 | + return mdev->mlx5e_res.pdn != 0; |
---|
| 706 | +} |
---|
| 707 | + |
---|
| 708 | +static const struct mlx5e_profile *mlx5_get_profile(struct mlx5_core_dev *mdev) |
---|
| 709 | +{ |
---|
| 710 | + if (mlx5_is_sub_interface(mdev)) |
---|
| 711 | + return mlx5i_pkey_get_profile(); |
---|
| 712 | + return &mlx5i_nic_profile; |
---|
| 713 | +} |
---|
| 714 | + |
---|
| 715 | +static int mlx5_rdma_setup_rn(struct ib_device *ibdev, u8 port_num, |
---|
| 716 | + struct net_device *netdev, void *param) |
---|
| 717 | +{ |
---|
| 718 | + struct mlx5_core_dev *mdev = (struct mlx5_core_dev *)param; |
---|
| 719 | + const struct mlx5e_profile *prof = mlx5_get_profile(mdev); |
---|
610 | 720 | struct mlx5i_priv *ipriv; |
---|
611 | 721 | struct mlx5e_priv *epriv; |
---|
612 | 722 | struct rdma_netdev *rn; |
---|
613 | | - bool sub_interface; |
---|
614 | | - int nch; |
---|
615 | 723 | int err; |
---|
616 | | - |
---|
617 | | - if (mlx5i_check_required_hca_cap(mdev)) { |
---|
618 | | - mlx5_core_warn(mdev, "Accelerated mode is not supported\n"); |
---|
619 | | - return ERR_PTR(-EOPNOTSUPP); |
---|
620 | | - } |
---|
621 | | - |
---|
622 | | - /* TODO: Need to find a better way to check if child device*/ |
---|
623 | | - sub_interface = (mdev->mlx5e_res.pdn != 0); |
---|
624 | | - |
---|
625 | | - if (sub_interface) |
---|
626 | | - profile = mlx5i_pkey_get_profile(); |
---|
627 | | - else |
---|
628 | | - profile = &mlx5i_nic_profile; |
---|
629 | | - |
---|
630 | | - nch = profile->max_nch(mdev); |
---|
631 | | - |
---|
632 | | - netdev = alloc_netdev_mqs(sizeof(struct mlx5i_priv) + sizeof(struct mlx5e_priv), |
---|
633 | | - name, NET_NAME_UNKNOWN, |
---|
634 | | - setup, |
---|
635 | | - nch * MLX5E_MAX_NUM_TC, |
---|
636 | | - nch); |
---|
637 | | - if (!netdev) { |
---|
638 | | - mlx5_core_warn(mdev, "alloc_netdev_mqs failed\n"); |
---|
639 | | - return NULL; |
---|
640 | | - } |
---|
641 | 724 | |
---|
642 | 725 | ipriv = netdev_priv(netdev); |
---|
643 | 726 | epriv = mlx5i_epriv(netdev); |
---|
644 | 727 | |
---|
645 | | - epriv->wq = create_singlethread_workqueue("mlx5i"); |
---|
646 | | - if (!epriv->wq) |
---|
647 | | - goto err_free_netdev; |
---|
648 | | - |
---|
649 | | - ipriv->sub_interface = sub_interface; |
---|
| 728 | + ipriv->sub_interface = mlx5_is_sub_interface(mdev); |
---|
650 | 729 | if (!ipriv->sub_interface) { |
---|
651 | 730 | err = mlx5i_pkey_qpn_ht_init(netdev); |
---|
652 | 731 | if (err) { |
---|
653 | 732 | mlx5_core_warn(mdev, "allocate qpn_to_netdev ht failed\n"); |
---|
654 | | - goto destroy_wq; |
---|
| 733 | + return err; |
---|
655 | 734 | } |
---|
656 | 735 | |
---|
657 | 736 | /* This should only be called once per mdev */ |
---|
.. | .. |
---|
660 | 739 | goto destroy_ht; |
---|
661 | 740 | } |
---|
662 | 741 | |
---|
663 | | - profile->init(mdev, netdev, profile, ipriv); |
---|
| 742 | + prof->init(mdev, netdev, prof, ipriv); |
---|
664 | 743 | |
---|
665 | 744 | err = mlx5e_attach_netdev(epriv); |
---|
666 | 745 | if (err) |
---|
.. | .. |
---|
678 | 757 | netdev->priv_destructor = mlx5_rdma_netdev_free; |
---|
679 | 758 | netdev->needs_free_netdev = 1; |
---|
680 | 759 | |
---|
681 | | - return netdev; |
---|
| 760 | + return 0; |
---|
682 | 761 | |
---|
683 | 762 | detach: |
---|
684 | | - profile->cleanup(epriv); |
---|
| 763 | + prof->cleanup(epriv); |
---|
685 | 764 | if (ipriv->sub_interface) |
---|
686 | | - return NULL; |
---|
| 765 | + return err; |
---|
687 | 766 | mlx5e_destroy_mdev_resources(mdev); |
---|
688 | 767 | destroy_ht: |
---|
689 | 768 | mlx5i_pkey_qpn_ht_cleanup(netdev); |
---|
690 | | -destroy_wq: |
---|
691 | | - destroy_workqueue(epriv->wq); |
---|
692 | | -err_free_netdev: |
---|
693 | | - free_netdev(netdev); |
---|
694 | | - |
---|
695 | | - return NULL; |
---|
| 769 | + return err; |
---|
696 | 770 | } |
---|
697 | | -EXPORT_SYMBOL(mlx5_rdma_netdev_alloc); |
---|
| 771 | + |
---|
| 772 | +int mlx5_rdma_rn_get_params(struct mlx5_core_dev *mdev, |
---|
| 773 | + struct ib_device *device, |
---|
| 774 | + struct rdma_netdev_alloc_params *params) |
---|
| 775 | +{ |
---|
| 776 | + int nch; |
---|
| 777 | + int rc; |
---|
| 778 | + |
---|
| 779 | + rc = mlx5i_check_required_hca_cap(mdev); |
---|
| 780 | + if (rc) |
---|
| 781 | + return rc; |
---|
| 782 | + |
---|
| 783 | + nch = mlx5e_get_max_num_channels(mdev); |
---|
| 784 | + |
---|
| 785 | + *params = (struct rdma_netdev_alloc_params){ |
---|
| 786 | + .sizeof_priv = sizeof(struct mlx5i_priv) + |
---|
| 787 | + sizeof(struct mlx5e_priv), |
---|
| 788 | + .txqs = nch * MLX5E_MAX_NUM_TC, |
---|
| 789 | + .rxqs = nch, |
---|
| 790 | + .param = mdev, |
---|
| 791 | + .initialize_rdma_netdev = mlx5_rdma_setup_rn, |
---|
| 792 | + }; |
---|
| 793 | + |
---|
| 794 | + return 0; |
---|
| 795 | +} |
---|
| 796 | +EXPORT_SYMBOL(mlx5_rdma_rn_get_params); |
---|