.. | .. |
---|
34 | 34 | #include <net/pkt_cls.h> |
---|
35 | 35 | #include <linux/mlx5/fs.h> |
---|
36 | 36 | #include <net/vxlan.h> |
---|
| 37 | +#include <net/geneve.h> |
---|
37 | 38 | #include <linux/bpf.h> |
---|
| 39 | +#include <linux/if_bridge.h> |
---|
38 | 40 | #include <net/page_pool.h> |
---|
| 41 | +#include <net/xdp_sock_drv.h> |
---|
39 | 42 | #include "eswitch.h" |
---|
40 | 43 | #include "en.h" |
---|
| 44 | +#include "en/txrx.h" |
---|
41 | 45 | #include "en_tc.h" |
---|
42 | 46 | #include "en_rep.h" |
---|
43 | 47 | #include "en_accel/ipsec.h" |
---|
44 | | -#include "en_accel/ipsec_rxtx.h" |
---|
| 48 | +#include "en_accel/en_accel.h" |
---|
45 | 49 | #include "en_accel/tls.h" |
---|
46 | 50 | #include "accel/ipsec.h" |
---|
47 | 51 | #include "accel/tls.h" |
---|
.. | .. |
---|
49 | 53 | #include "lib/clock.h" |
---|
50 | 54 | #include "en/port.h" |
---|
51 | 55 | #include "en/xdp.h" |
---|
52 | | - |
---|
53 | | -struct mlx5e_rq_param { |
---|
54 | | - u32 rqc[MLX5_ST_SZ_DW(rqc)]; |
---|
55 | | - struct mlx5_wq_param wq; |
---|
56 | | - struct mlx5e_rq_frags_info frags_info; |
---|
57 | | -}; |
---|
58 | | - |
---|
59 | | -struct mlx5e_sq_param { |
---|
60 | | - u32 sqc[MLX5_ST_SZ_DW(sqc)]; |
---|
61 | | - struct mlx5_wq_param wq; |
---|
62 | | -}; |
---|
63 | | - |
---|
64 | | -struct mlx5e_cq_param { |
---|
65 | | - u32 cqc[MLX5_ST_SZ_DW(cqc)]; |
---|
66 | | - struct mlx5_wq_param wq; |
---|
67 | | - u16 eq_ix; |
---|
68 | | - u8 cq_period_mode; |
---|
69 | | -}; |
---|
70 | | - |
---|
71 | | -struct mlx5e_channel_param { |
---|
72 | | - struct mlx5e_rq_param rq; |
---|
73 | | - struct mlx5e_sq_param sq; |
---|
74 | | - struct mlx5e_sq_param xdp_sq; |
---|
75 | | - struct mlx5e_sq_param icosq; |
---|
76 | | - struct mlx5e_cq_param rx_cq; |
---|
77 | | - struct mlx5e_cq_param tx_cq; |
---|
78 | | - struct mlx5e_cq_param icosq_cq; |
---|
79 | | -}; |
---|
| 56 | +#include "lib/eq.h" |
---|
| 57 | +#include "en/monitor_stats.h" |
---|
| 58 | +#include "en/health.h" |
---|
| 59 | +#include "en/params.h" |
---|
| 60 | +#include "en/xsk/pool.h" |
---|
| 61 | +#include "en/xsk/setup.h" |
---|
| 62 | +#include "en/xsk/rx.h" |
---|
| 63 | +#include "en/xsk/tx.h" |
---|
| 64 | +#include "en/hv_vhca_stats.h" |
---|
| 65 | +#include "en/devlink.h" |
---|
| 66 | +#include "lib/mlx5.h" |
---|
80 | 67 | |
---|
81 | 68 | bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev) |
---|
82 | 69 | { |
---|
.. | .. |
---|
96 | 83 | return true; |
---|
97 | 84 | } |
---|
98 | 85 | |
---|
99 | | -static u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params) |
---|
100 | | -{ |
---|
101 | | - u16 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); |
---|
102 | | - u16 linear_rq_headroom = params->xdp_prog ? |
---|
103 | | - XDP_PACKET_HEADROOM : MLX5_RX_HEADROOM; |
---|
104 | | - u32 frag_sz; |
---|
105 | | - |
---|
106 | | - linear_rq_headroom += NET_IP_ALIGN; |
---|
107 | | - |
---|
108 | | - frag_sz = MLX5_SKB_FRAG_SZ(linear_rq_headroom + hw_mtu); |
---|
109 | | - |
---|
110 | | - if (params->xdp_prog && frag_sz < PAGE_SIZE) |
---|
111 | | - frag_sz = PAGE_SIZE; |
---|
112 | | - |
---|
113 | | - return frag_sz; |
---|
114 | | -} |
---|
115 | | - |
---|
116 | | -static u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params) |
---|
117 | | -{ |
---|
118 | | - u32 linear_frag_sz = mlx5e_rx_get_linear_frag_sz(params); |
---|
119 | | - |
---|
120 | | - return MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(linear_frag_sz); |
---|
121 | | -} |
---|
122 | | - |
---|
123 | | -static bool mlx5e_rx_is_linear_skb(struct mlx5_core_dev *mdev, |
---|
124 | | - struct mlx5e_params *params) |
---|
125 | | -{ |
---|
126 | | - u32 frag_sz = mlx5e_rx_get_linear_frag_sz(params); |
---|
127 | | - |
---|
128 | | - return !params->lro_en && frag_sz <= PAGE_SIZE; |
---|
129 | | -} |
---|
130 | | - |
---|
131 | | -#define MLX5_MAX_MPWQE_LOG_WQE_STRIDE_SZ ((BIT(__mlx5_bit_sz(wq, log_wqe_stride_size)) - 1) + \ |
---|
132 | | - MLX5_MPWQE_LOG_STRIDE_SZ_BASE) |
---|
133 | | -static bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev, |
---|
134 | | - struct mlx5e_params *params) |
---|
135 | | -{ |
---|
136 | | - u32 frag_sz = mlx5e_rx_get_linear_frag_sz(params); |
---|
137 | | - s8 signed_log_num_strides_param; |
---|
138 | | - u8 log_num_strides; |
---|
139 | | - |
---|
140 | | - if (!mlx5e_rx_is_linear_skb(mdev, params)) |
---|
141 | | - return false; |
---|
142 | | - |
---|
143 | | - if (order_base_2(frag_sz) > MLX5_MAX_MPWQE_LOG_WQE_STRIDE_SZ) |
---|
144 | | - return false; |
---|
145 | | - |
---|
146 | | - if (MLX5_CAP_GEN(mdev, ext_stride_num_range)) |
---|
147 | | - return true; |
---|
148 | | - |
---|
149 | | - log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(frag_sz); |
---|
150 | | - signed_log_num_strides_param = |
---|
151 | | - (s8)log_num_strides - MLX5_MPWQE_LOG_NUM_STRIDES_BASE; |
---|
152 | | - |
---|
153 | | - return signed_log_num_strides_param >= 0; |
---|
154 | | -} |
---|
155 | | - |
---|
156 | | -static u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params *params) |
---|
157 | | -{ |
---|
158 | | - if (params->log_rq_mtu_frames < |
---|
159 | | - mlx5e_mpwqe_log_pkts_per_wqe(params) + MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW) |
---|
160 | | - return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW; |
---|
161 | | - |
---|
162 | | - return params->log_rq_mtu_frames - mlx5e_mpwqe_log_pkts_per_wqe(params); |
---|
163 | | -} |
---|
164 | | - |
---|
165 | | -static u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev, |
---|
166 | | - struct mlx5e_params *params) |
---|
167 | | -{ |
---|
168 | | - if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params)) |
---|
169 | | - return order_base_2(mlx5e_rx_get_linear_frag_sz(params)); |
---|
170 | | - |
---|
171 | | - return MLX5E_MPWQE_STRIDE_SZ(mdev, |
---|
172 | | - MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)); |
---|
173 | | -} |
---|
174 | | - |
---|
175 | | -static u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev, |
---|
176 | | - struct mlx5e_params *params) |
---|
177 | | -{ |
---|
178 | | - return MLX5_MPWRQ_LOG_WQE_SZ - |
---|
179 | | - mlx5e_mpwqe_get_log_stride_size(mdev, params); |
---|
180 | | -} |
---|
181 | | - |
---|
182 | | -static u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev, |
---|
183 | | - struct mlx5e_params *params) |
---|
184 | | -{ |
---|
185 | | - u16 linear_rq_headroom = params->xdp_prog ? |
---|
186 | | - XDP_PACKET_HEADROOM : MLX5_RX_HEADROOM; |
---|
187 | | - bool is_linear_skb; |
---|
188 | | - |
---|
189 | | - linear_rq_headroom += NET_IP_ALIGN; |
---|
190 | | - |
---|
191 | | - is_linear_skb = (params->rq_wq_type == MLX5_WQ_TYPE_CYCLIC) ? |
---|
192 | | - mlx5e_rx_is_linear_skb(mdev, params) : |
---|
193 | | - mlx5e_rx_mpwqe_is_linear_skb(mdev, params); |
---|
194 | | - |
---|
195 | | - return is_linear_skb ? linear_rq_headroom : 0; |
---|
196 | | -} |
---|
197 | | - |
---|
198 | 86 | void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, |
---|
199 | 87 | struct mlx5e_params *params) |
---|
200 | 88 | { |
---|
201 | | - params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; |
---|
202 | 89 | params->log_rq_mtu_frames = is_kdump_kernel() ? |
---|
203 | 90 | MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE : |
---|
204 | 91 | MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE; |
---|
.. | .. |
---|
206 | 93 | mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n", |
---|
207 | 94 | params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ, |
---|
208 | 95 | params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ? |
---|
209 | | - BIT(mlx5e_mpwqe_get_log_rq_size(params)) : |
---|
| 96 | + BIT(mlx5e_mpwqe_get_log_rq_size(params, NULL)) : |
---|
210 | 97 | BIT(params->log_rq_mtu_frames), |
---|
211 | | - BIT(mlx5e_mpwqe_get_log_stride_size(mdev, params)), |
---|
| 98 | + BIT(mlx5e_mpwqe_get_log_stride_size(mdev, params, NULL)), |
---|
212 | 99 | MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)); |
---|
213 | 100 | } |
---|
214 | 101 | |
---|
215 | 102 | bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev, |
---|
216 | 103 | struct mlx5e_params *params) |
---|
217 | 104 | { |
---|
218 | | - return mlx5e_check_fragmented_striding_rq_cap(mdev) && |
---|
219 | | - !MLX5_IPSEC_DEV(mdev) && |
---|
220 | | - !(params->xdp_prog && !mlx5e_rx_mpwqe_is_linear_skb(mdev, params)); |
---|
| 105 | + if (!mlx5e_check_fragmented_striding_rq_cap(mdev)) |
---|
| 106 | + return false; |
---|
| 107 | + |
---|
| 108 | + if (MLX5_IPSEC_DEV(mdev)) |
---|
| 109 | + return false; |
---|
| 110 | + |
---|
| 111 | + if (params->xdp_prog) { |
---|
| 112 | + /* XSK params are not considered here. If striding RQ is in use, |
---|
| 113 | + * and an XSK is being opened, mlx5e_rx_mpwqe_is_linear_skb will |
---|
| 114 | + * be called with the known XSK params. |
---|
| 115 | + */ |
---|
| 116 | + if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL)) |
---|
| 117 | + return false; |
---|
| 118 | + } |
---|
| 119 | + |
---|
| 120 | + return true; |
---|
221 | 121 | } |
---|
222 | 122 | |
---|
223 | 123 | void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params) |
---|
.. | .. |
---|
228 | 128 | MLX5_WQ_TYPE_CYCLIC; |
---|
229 | 129 | } |
---|
230 | 130 | |
---|
231 | | -static void mlx5e_update_carrier(struct mlx5e_priv *priv) |
---|
| 131 | +void mlx5e_update_carrier(struct mlx5e_priv *priv) |
---|
232 | 132 | { |
---|
233 | 133 | struct mlx5_core_dev *mdev = priv->mdev; |
---|
234 | 134 | u8 port_state; |
---|
.. | .. |
---|
258 | 158 | mutex_unlock(&priv->state_lock); |
---|
259 | 159 | } |
---|
260 | 160 | |
---|
261 | | -void mlx5e_update_stats(struct mlx5e_priv *priv) |
---|
| 161 | +static void mlx5e_update_stats_work(struct work_struct *work) |
---|
262 | 162 | { |
---|
263 | | - int i; |
---|
264 | | - |
---|
265 | | - for (i = mlx5e_num_stats_grps - 1; i >= 0; i--) |
---|
266 | | - if (mlx5e_stats_grps[i].update_stats) |
---|
267 | | - mlx5e_stats_grps[i].update_stats(priv); |
---|
268 | | -} |
---|
269 | | - |
---|
270 | | -static void mlx5e_update_ndo_stats(struct mlx5e_priv *priv) |
---|
271 | | -{ |
---|
272 | | - int i; |
---|
273 | | - |
---|
274 | | - for (i = mlx5e_num_stats_grps - 1; i >= 0; i--) |
---|
275 | | - if (mlx5e_stats_grps[i].update_stats_mask & |
---|
276 | | - MLX5E_NDO_UPDATE_STATS) |
---|
277 | | - mlx5e_stats_grps[i].update_stats(priv); |
---|
278 | | -} |
---|
279 | | - |
---|
280 | | -void mlx5e_update_stats_work(struct work_struct *work) |
---|
281 | | -{ |
---|
282 | | - struct delayed_work *dwork = to_delayed_work(work); |
---|
283 | | - struct mlx5e_priv *priv = container_of(dwork, struct mlx5e_priv, |
---|
| 163 | + struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, |
---|
284 | 164 | update_stats_work); |
---|
285 | 165 | |
---|
286 | 166 | mutex_lock(&priv->state_lock); |
---|
.. | .. |
---|
288 | 168 | mutex_unlock(&priv->state_lock); |
---|
289 | 169 | } |
---|
290 | 170 | |
---|
291 | | -static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv, |
---|
292 | | - enum mlx5_dev_event event, unsigned long param) |
---|
| 171 | +void mlx5e_queue_update_stats(struct mlx5e_priv *priv) |
---|
293 | 172 | { |
---|
294 | | - struct mlx5e_priv *priv = vpriv; |
---|
295 | | - |
---|
296 | | - if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state)) |
---|
| 173 | + if (!priv->profile->update_stats) |
---|
297 | 174 | return; |
---|
298 | 175 | |
---|
299 | | - switch (event) { |
---|
300 | | - case MLX5_DEV_EVENT_PORT_UP: |
---|
301 | | - case MLX5_DEV_EVENT_PORT_DOWN: |
---|
| 176 | + if (unlikely(test_bit(MLX5E_STATE_DESTROYING, &priv->state))) |
---|
| 177 | + return; |
---|
| 178 | + |
---|
| 179 | + queue_work(priv->wq, &priv->update_stats_work); |
---|
| 180 | +} |
---|
| 181 | + |
---|
| 182 | +static int async_event(struct notifier_block *nb, unsigned long event, void *data) |
---|
| 183 | +{ |
---|
| 184 | + struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, events_nb); |
---|
| 185 | + struct mlx5_eqe *eqe = data; |
---|
| 186 | + |
---|
| 187 | + if (event != MLX5_EVENT_TYPE_PORT_CHANGE) |
---|
| 188 | + return NOTIFY_DONE; |
---|
| 189 | + |
---|
| 190 | + switch (eqe->sub_type) { |
---|
| 191 | + case MLX5_PORT_CHANGE_SUBTYPE_DOWN: |
---|
| 192 | + case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE: |
---|
302 | 193 | queue_work(priv->wq, &priv->update_carrier_work); |
---|
303 | 194 | break; |
---|
304 | 195 | default: |
---|
305 | | - break; |
---|
| 196 | + return NOTIFY_DONE; |
---|
306 | 197 | } |
---|
| 198 | + |
---|
| 199 | + return NOTIFY_OK; |
---|
307 | 200 | } |
---|
308 | 201 | |
---|
309 | 202 | static void mlx5e_enable_async_events(struct mlx5e_priv *priv) |
---|
310 | 203 | { |
---|
311 | | - set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state); |
---|
| 204 | + priv->events_nb.notifier_call = async_event; |
---|
| 205 | + mlx5_notifier_register(priv->mdev, &priv->events_nb); |
---|
312 | 206 | } |
---|
313 | 207 | |
---|
314 | 208 | static void mlx5e_disable_async_events(struct mlx5e_priv *priv) |
---|
315 | 209 | { |
---|
316 | | - clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state); |
---|
317 | | - synchronize_irq(pci_irq_vector(priv->mdev->pdev, MLX5_EQ_VEC_ASYNC)); |
---|
| 210 | + mlx5_notifier_unregister(priv->mdev, &priv->events_nb); |
---|
318 | 211 | } |
---|
319 | 212 | |
---|
320 | 213 | static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, |
---|
.. | .. |
---|
327 | 220 | |
---|
328 | 221 | cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) | |
---|
329 | 222 | ds_cnt); |
---|
330 | | - cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; |
---|
331 | | - cseg->imm = rq->mkey_be; |
---|
| 223 | + cseg->umr_mkey = rq->mkey_be; |
---|
332 | 224 | |
---|
333 | 225 | ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE; |
---|
334 | 226 | ucseg->xlt_octowords = |
---|
335 | 227 | cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE)); |
---|
336 | 228 | ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE); |
---|
337 | | -} |
---|
338 | | - |
---|
339 | | -static u32 mlx5e_rqwq_get_size(struct mlx5e_rq *rq) |
---|
340 | | -{ |
---|
341 | | - switch (rq->wq_type) { |
---|
342 | | - case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: |
---|
343 | | - return mlx5_wq_ll_get_size(&rq->mpwqe.wq); |
---|
344 | | - default: |
---|
345 | | - return mlx5_wq_cyc_get_size(&rq->wqe.wq); |
---|
346 | | - } |
---|
347 | | -} |
---|
348 | | - |
---|
349 | | -static u32 mlx5e_rqwq_get_cur_sz(struct mlx5e_rq *rq) |
---|
350 | | -{ |
---|
351 | | - switch (rq->wq_type) { |
---|
352 | | - case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: |
---|
353 | | - return rq->mpwqe.wq.cur_sz; |
---|
354 | | - default: |
---|
355 | | - return rq->wqe.wq.cur_sz; |
---|
356 | | - } |
---|
357 | 229 | } |
---|
358 | 230 | |
---|
359 | 231 | static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, |
---|
.. | .. |
---|
374 | 246 | |
---|
375 | 247 | static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev, |
---|
376 | 248 | u64 npages, u8 page_shift, |
---|
377 | | - struct mlx5_core_mkey *umr_mkey) |
---|
| 249 | + struct mlx5_core_mkey *umr_mkey, |
---|
| 250 | + dma_addr_t filler_addr) |
---|
378 | 251 | { |
---|
379 | | - int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); |
---|
| 252 | + struct mlx5_mtt *mtt; |
---|
| 253 | + int inlen; |
---|
380 | 254 | void *mkc; |
---|
381 | 255 | u32 *in; |
---|
382 | 256 | int err; |
---|
| 257 | + int i; |
---|
| 258 | + |
---|
| 259 | + inlen = MLX5_ST_SZ_BYTES(create_mkey_in) + sizeof(*mtt) * npages; |
---|
383 | 260 | |
---|
384 | 261 | in = kvzalloc(inlen, GFP_KERNEL); |
---|
385 | 262 | if (!in) |
---|
.. | .. |
---|
392 | 269 | MLX5_SET(mkc, mkc, lw, 1); |
---|
393 | 270 | MLX5_SET(mkc, mkc, lr, 1); |
---|
394 | 271 | MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT); |
---|
395 | | - |
---|
| 272 | + mlx5e_mkey_set_relaxed_ordering(mdev, mkc); |
---|
396 | 273 | MLX5_SET(mkc, mkc, qpn, 0xffffff); |
---|
397 | 274 | MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.pdn); |
---|
398 | 275 | MLX5_SET64(mkc, mkc, len, npages << page_shift); |
---|
399 | 276 | MLX5_SET(mkc, mkc, translations_octword_size, |
---|
400 | 277 | MLX5_MTT_OCTW(npages)); |
---|
401 | 278 | MLX5_SET(mkc, mkc, log_page_size, page_shift); |
---|
| 279 | + MLX5_SET(create_mkey_in, in, translations_octword_actual_size, |
---|
| 280 | + MLX5_MTT_OCTW(npages)); |
---|
| 281 | + |
---|
| 282 | + /* Initialize the mkey with all MTTs pointing to a default |
---|
| 283 | + * page (filler_addr). When the channels are activated, UMR |
---|
| 284 | + * WQEs will redirect the RX WQEs to the actual memory from |
---|
| 285 | + * the RQ's pool, while the gaps (wqe_overflow) remain mapped |
---|
| 286 | + * to the default page. |
---|
| 287 | + */ |
---|
| 288 | + mtt = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt); |
---|
| 289 | + for (i = 0 ; i < npages ; i++) |
---|
| 290 | + mtt[i].ptag = cpu_to_be64(filler_addr); |
---|
402 | 291 | |
---|
403 | 292 | err = mlx5_core_create_mkey(mdev, umr_mkey, in, inlen); |
---|
404 | 293 | |
---|
.. | .. |
---|
410 | 299 | { |
---|
411 | 300 | u64 num_mtts = MLX5E_REQUIRED_MTTS(mlx5_wq_ll_get_size(&rq->mpwqe.wq)); |
---|
412 | 301 | |
---|
413 | | - return mlx5e_create_umr_mkey(mdev, num_mtts, PAGE_SHIFT, &rq->umr_mkey); |
---|
| 302 | + return mlx5e_create_umr_mkey(mdev, num_mtts, PAGE_SHIFT, &rq->umr_mkey, |
---|
| 303 | + rq->wqe_overflow.addr); |
---|
414 | 304 | } |
---|
415 | 305 | |
---|
416 | | -static inline u64 mlx5e_get_mpwqe_offset(struct mlx5e_rq *rq, u16 wqe_ix) |
---|
| 306 | +static u64 mlx5e_get_mpwqe_offset(u16 wqe_ix) |
---|
417 | 307 | { |
---|
418 | | - return (wqe_ix << MLX5E_LOG_ALIGNED_MPWQE_PPW) << PAGE_SHIFT; |
---|
| 308 | + return MLX5E_REQUIRED_MTTS(wqe_ix) << PAGE_SHIFT; |
---|
419 | 309 | } |
---|
420 | 310 | |
---|
421 | 311 | static void mlx5e_init_frags_partition(struct mlx5e_rq *rq) |
---|
.. | .. |
---|
452 | 342 | } |
---|
453 | 343 | |
---|
454 | 344 | static int mlx5e_init_di_list(struct mlx5e_rq *rq, |
---|
455 | | - struct mlx5e_params *params, |
---|
456 | 345 | int wq_sz, int cpu) |
---|
457 | 346 | { |
---|
458 | 347 | int len = wq_sz << rq->wqe.info.log_num_frags; |
---|
.. | .. |
---|
472 | 361 | kvfree(rq->wqe.di); |
---|
473 | 362 | } |
---|
474 | 363 | |
---|
| 364 | +static void mlx5e_rq_err_cqe_work(struct work_struct *recover_work) |
---|
| 365 | +{ |
---|
| 366 | + struct mlx5e_rq *rq = container_of(recover_work, struct mlx5e_rq, recover_work); |
---|
| 367 | + |
---|
| 368 | + mlx5e_reporter_rq_cqe_err(rq); |
---|
| 369 | +} |
---|
| 370 | + |
---|
| 371 | +static int mlx5e_alloc_mpwqe_rq_drop_page(struct mlx5e_rq *rq) |
---|
| 372 | +{ |
---|
| 373 | + rq->wqe_overflow.page = alloc_page(GFP_KERNEL); |
---|
| 374 | + if (!rq->wqe_overflow.page) |
---|
| 375 | + return -ENOMEM; |
---|
| 376 | + |
---|
| 377 | + rq->wqe_overflow.addr = dma_map_page(rq->pdev, rq->wqe_overflow.page, 0, |
---|
| 378 | + PAGE_SIZE, rq->buff.map_dir); |
---|
| 379 | + if (dma_mapping_error(rq->pdev, rq->wqe_overflow.addr)) { |
---|
| 380 | + __free_page(rq->wqe_overflow.page); |
---|
| 381 | + return -ENOMEM; |
---|
| 382 | + } |
---|
| 383 | + return 0; |
---|
| 384 | +} |
---|
| 385 | + |
---|
| 386 | +static void mlx5e_free_mpwqe_rq_drop_page(struct mlx5e_rq *rq) |
---|
| 387 | +{ |
---|
| 388 | + dma_unmap_page(rq->pdev, rq->wqe_overflow.addr, PAGE_SIZE, |
---|
| 389 | + rq->buff.map_dir); |
---|
| 390 | + __free_page(rq->wqe_overflow.page); |
---|
| 391 | +} |
---|
| 392 | + |
---|
475 | 393 | static int mlx5e_alloc_rq(struct mlx5e_channel *c, |
---|
476 | 394 | struct mlx5e_params *params, |
---|
| 395 | + struct mlx5e_xsk_param *xsk, |
---|
| 396 | + struct xsk_buff_pool *xsk_pool, |
---|
477 | 397 | struct mlx5e_rq_param *rqp, |
---|
478 | 398 | struct mlx5e_rq *rq) |
---|
479 | 399 | { |
---|
.. | .. |
---|
481 | 401 | struct mlx5_core_dev *mdev = c->mdev; |
---|
482 | 402 | void *rqc = rqp->rqc; |
---|
483 | 403 | void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq); |
---|
| 404 | + u32 rq_xdp_ix; |
---|
484 | 405 | u32 pool_size; |
---|
485 | 406 | int wq_sz; |
---|
486 | 407 | int err; |
---|
.. | .. |
---|
497 | 418 | rq->ix = c->ix; |
---|
498 | 419 | rq->mdev = mdev; |
---|
499 | 420 | rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); |
---|
500 | | - rq->stats = &c->priv->channel_stats[c->ix].rq; |
---|
| 421 | + rq->xdpsq = &c->rq_xdpsq; |
---|
| 422 | + rq->xsk_pool = xsk_pool; |
---|
501 | 423 | |
---|
502 | | - rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL; |
---|
503 | | - if (IS_ERR(rq->xdp_prog)) { |
---|
504 | | - err = PTR_ERR(rq->xdp_prog); |
---|
505 | | - rq->xdp_prog = NULL; |
---|
506 | | - goto err_rq_wq_destroy; |
---|
507 | | - } |
---|
| 424 | + if (rq->xsk_pool) |
---|
| 425 | + rq->stats = &c->priv->channel_stats[c->ix].xskrq; |
---|
| 426 | + else |
---|
| 427 | + rq->stats = &c->priv->channel_stats[c->ix].rq; |
---|
| 428 | + INIT_WORK(&rq->recover_work, mlx5e_rq_err_cqe_work); |
---|
508 | 429 | |
---|
509 | | - err = xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix); |
---|
| 430 | + if (params->xdp_prog) |
---|
| 431 | + bpf_prog_inc(params->xdp_prog); |
---|
| 432 | + RCU_INIT_POINTER(rq->xdp_prog, params->xdp_prog); |
---|
| 433 | + |
---|
| 434 | + rq_xdp_ix = rq->ix; |
---|
| 435 | + if (xsk) |
---|
| 436 | + rq_xdp_ix += params->num_channels * MLX5E_RQ_GROUP_XSK; |
---|
| 437 | + err = xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq_xdp_ix); |
---|
510 | 438 | if (err < 0) |
---|
511 | | - goto err_rq_wq_destroy; |
---|
| 439 | + goto err_rq_xdp_prog; |
---|
512 | 440 | |
---|
513 | | - rq->buff.map_dir = rq->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; |
---|
514 | | - rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params); |
---|
| 441 | + rq->buff.map_dir = params->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; |
---|
| 442 | + rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params, xsk); |
---|
515 | 443 | pool_size = 1 << params->log_rq_mtu_frames; |
---|
516 | 444 | |
---|
517 | 445 | switch (rq->wq_type) { |
---|
.. | .. |
---|
519 | 447 | err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->mpwqe.wq, |
---|
520 | 448 | &rq->wq_ctrl); |
---|
521 | 449 | if (err) |
---|
| 450 | + goto err_rq_xdp; |
---|
| 451 | + |
---|
| 452 | + err = mlx5e_alloc_mpwqe_rq_drop_page(rq); |
---|
| 453 | + if (err) |
---|
522 | 454 | goto err_rq_wq_destroy; |
---|
523 | 455 | |
---|
524 | 456 | rq->mpwqe.wq.db = &rq->mpwqe.wq.db[MLX5_RCV_DBR]; |
---|
525 | 457 | |
---|
526 | 458 | wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq); |
---|
527 | 459 | |
---|
528 | | - pool_size = MLX5_MPWRQ_PAGES_PER_WQE << mlx5e_mpwqe_get_log_rq_size(params); |
---|
| 460 | + pool_size = MLX5_MPWRQ_PAGES_PER_WQE << |
---|
| 461 | + mlx5e_mpwqe_get_log_rq_size(params, xsk); |
---|
529 | 462 | |
---|
530 | | - rq->post_wqes = mlx5e_post_rx_mpwqes; |
---|
531 | | - rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe; |
---|
| 463 | + rq->mpwqe.log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk); |
---|
| 464 | + rq->mpwqe.num_strides = |
---|
| 465 | + BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk)); |
---|
532 | 466 | |
---|
533 | | - rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe_mpwqe; |
---|
534 | | -#ifdef CONFIG_MLX5_EN_IPSEC |
---|
535 | | - if (MLX5_IPSEC_DEV(mdev)) { |
---|
536 | | - err = -EINVAL; |
---|
537 | | - netdev_err(c->netdev, "MPWQE RQ with IPSec offload not supported\n"); |
---|
538 | | - goto err_rq_wq_destroy; |
---|
539 | | - } |
---|
540 | | -#endif |
---|
541 | | - if (!rq->handle_rx_cqe) { |
---|
542 | | - err = -EINVAL; |
---|
543 | | - netdev_err(c->netdev, "RX handler of MPWQE RQ is not set, err %d\n", err); |
---|
544 | | - goto err_rq_wq_destroy; |
---|
545 | | - } |
---|
546 | | - |
---|
547 | | - rq->mpwqe.skb_from_cqe_mpwrq = |
---|
548 | | - mlx5e_rx_mpwqe_is_linear_skb(mdev, params) ? |
---|
549 | | - mlx5e_skb_from_cqe_mpwrq_linear : |
---|
550 | | - mlx5e_skb_from_cqe_mpwrq_nonlinear; |
---|
551 | | - rq->mpwqe.log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params); |
---|
552 | | - rq->mpwqe.num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params)); |
---|
| 467 | + rq->buff.frame0_sz = (1 << rq->mpwqe.log_stride_sz); |
---|
553 | 468 | |
---|
554 | 469 | err = mlx5e_create_rq_umr_mkey(mdev, rq); |
---|
555 | 470 | if (err) |
---|
556 | | - goto err_rq_wq_destroy; |
---|
| 471 | + goto err_rq_drop_page; |
---|
557 | 472 | rq->mkey_be = cpu_to_be32(rq->umr_mkey.key); |
---|
558 | 473 | |
---|
559 | 474 | err = mlx5e_rq_alloc_mpwqe_info(rq, c); |
---|
560 | 475 | if (err) |
---|
561 | | - goto err_free; |
---|
| 476 | + goto err_rq_mkey; |
---|
562 | 477 | break; |
---|
563 | 478 | default: /* MLX5_WQ_TYPE_CYCLIC */ |
---|
564 | 479 | err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq, |
---|
565 | 480 | &rq->wq_ctrl); |
---|
566 | 481 | if (err) |
---|
567 | | - goto err_rq_wq_destroy; |
---|
| 482 | + goto err_rq_xdp; |
---|
568 | 483 | |
---|
569 | 484 | rq->wqe.wq.db = &rq->wqe.wq.db[MLX5_RCV_DBR]; |
---|
570 | 485 | |
---|
571 | 486 | wq_sz = mlx5_wq_cyc_get_size(&rq->wqe.wq); |
---|
572 | 487 | |
---|
573 | 488 | rq->wqe.info = rqp->frags_info; |
---|
| 489 | + rq->buff.frame0_sz = rq->wqe.info.arr[0].frag_stride; |
---|
| 490 | + |
---|
574 | 491 | rq->wqe.frags = |
---|
575 | 492 | kvzalloc_node(array_size(sizeof(*rq->wqe.frags), |
---|
576 | 493 | (wq_sz << rq->wqe.info.log_num_frags)), |
---|
577 | 494 | GFP_KERNEL, cpu_to_node(c->cpu)); |
---|
578 | 495 | if (!rq->wqe.frags) { |
---|
579 | 496 | err = -ENOMEM; |
---|
580 | | - goto err_free; |
---|
| 497 | + goto err_rq_wq_destroy; |
---|
581 | 498 | } |
---|
582 | 499 | |
---|
583 | | - err = mlx5e_init_di_list(rq, params, wq_sz, c->cpu); |
---|
| 500 | + err = mlx5e_init_di_list(rq, wq_sz, c->cpu); |
---|
584 | 501 | if (err) |
---|
585 | | - goto err_free; |
---|
586 | | - rq->post_wqes = mlx5e_post_rx_wqes; |
---|
587 | | - rq->dealloc_wqe = mlx5e_dealloc_rx_wqe; |
---|
| 502 | + goto err_rq_frags; |
---|
588 | 503 | |
---|
589 | | -#ifdef CONFIG_MLX5_EN_IPSEC |
---|
590 | | - if (c->priv->ipsec) |
---|
591 | | - rq->handle_rx_cqe = mlx5e_ipsec_handle_rx_cqe; |
---|
592 | | - else |
---|
593 | | -#endif |
---|
594 | | - rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe; |
---|
595 | | - if (!rq->handle_rx_cqe) { |
---|
596 | | - err = -EINVAL; |
---|
597 | | - netdev_err(c->netdev, "RX handler of RQ is not set, err %d\n", err); |
---|
598 | | - goto err_free; |
---|
599 | | - } |
---|
600 | | - |
---|
601 | | - rq->wqe.skb_from_cqe = mlx5e_rx_is_linear_skb(mdev, params) ? |
---|
602 | | - mlx5e_skb_from_cqe_linear : |
---|
603 | | - mlx5e_skb_from_cqe_nonlinear; |
---|
604 | 504 | rq->mkey_be = c->mkey_be; |
---|
605 | 505 | } |
---|
606 | 506 | |
---|
607 | | - /* Create a page_pool and register it with rxq */ |
---|
608 | | - pp_params.order = 0; |
---|
609 | | - pp_params.flags = 0; /* No-internal DMA mapping in page_pool */ |
---|
610 | | - pp_params.pool_size = pool_size; |
---|
611 | | - pp_params.nid = cpu_to_node(c->cpu); |
---|
612 | | - pp_params.dev = c->pdev; |
---|
613 | | - pp_params.dma_dir = rq->buff.map_dir; |
---|
614 | | - |
---|
615 | | - /* page_pool can be used even when there is no rq->xdp_prog, |
---|
616 | | - * given page_pool does not handle DMA mapping there is no |
---|
617 | | - * required state to clear. And page_pool gracefully handle |
---|
618 | | - * elevated refcnt. |
---|
619 | | - */ |
---|
620 | | - rq->page_pool = page_pool_create(&pp_params); |
---|
621 | | - if (IS_ERR(rq->page_pool)) { |
---|
622 | | - err = PTR_ERR(rq->page_pool); |
---|
623 | | - rq->page_pool = NULL; |
---|
624 | | - goto err_free; |
---|
625 | | - } |
---|
626 | | - err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq, |
---|
627 | | - MEM_TYPE_PAGE_POOL, rq->page_pool); |
---|
| 507 | + err = mlx5e_rq_set_handlers(rq, params, xsk); |
---|
628 | 508 | if (err) |
---|
629 | | - goto err_free; |
---|
| 509 | + goto err_free_by_rq_type; |
---|
| 510 | + |
---|
| 511 | + if (xsk) { |
---|
| 512 | + err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq, |
---|
| 513 | + MEM_TYPE_XSK_BUFF_POOL, NULL); |
---|
| 514 | + xsk_pool_set_rxq_info(rq->xsk_pool, &rq->xdp_rxq); |
---|
| 515 | + } else { |
---|
| 516 | + /* Create a page_pool and register it with rxq */ |
---|
| 517 | + pp_params.order = 0; |
---|
| 518 | + pp_params.flags = 0; /* No-internal DMA mapping in page_pool */ |
---|
| 519 | + pp_params.pool_size = pool_size; |
---|
| 520 | + pp_params.nid = cpu_to_node(c->cpu); |
---|
| 521 | + pp_params.dev = c->pdev; |
---|
| 522 | + pp_params.dma_dir = rq->buff.map_dir; |
---|
| 523 | + |
---|
| 524 | + /* page_pool can be used even when there is no rq->xdp_prog, |
---|
| 525 | + * given page_pool does not handle DMA mapping there is no |
---|
| 526 | + * required state to clear. And page_pool gracefully handle |
---|
| 527 | + * elevated refcnt. |
---|
| 528 | + */ |
---|
| 529 | + rq->page_pool = page_pool_create(&pp_params); |
---|
| 530 | + if (IS_ERR(rq->page_pool)) { |
---|
| 531 | + err = PTR_ERR(rq->page_pool); |
---|
| 532 | + rq->page_pool = NULL; |
---|
| 533 | + goto err_free_by_rq_type; |
---|
| 534 | + } |
---|
| 535 | + err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq, |
---|
| 536 | + MEM_TYPE_PAGE_POOL, rq->page_pool); |
---|
| 537 | + } |
---|
| 538 | + if (err) |
---|
| 539 | + goto err_free_by_rq_type; |
---|
630 | 540 | |
---|
631 | 541 | for (i = 0; i < wq_sz; i++) { |
---|
632 | 542 | if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { |
---|
.. | .. |
---|
634 | 544 | mlx5_wq_ll_get_wqe(&rq->mpwqe.wq, i); |
---|
635 | 545 | u32 byte_count = |
---|
636 | 546 | rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz; |
---|
637 | | - u64 dma_offset = mlx5e_get_mpwqe_offset(rq, i); |
---|
| 547 | + u64 dma_offset = mlx5e_get_mpwqe_offset(i); |
---|
638 | 548 | |
---|
639 | 549 | wqe->data[0].addr = cpu_to_be64(dma_offset + rq->buff.headroom); |
---|
640 | 550 | wqe->data[0].byte_count = cpu_to_be32(byte_count); |
---|
.. | .. |
---|
664 | 574 | |
---|
665 | 575 | switch (params->rx_cq_moderation.cq_period_mode) { |
---|
666 | 576 | case MLX5_CQ_PERIOD_MODE_START_FROM_CQE: |
---|
667 | | - rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE; |
---|
| 577 | + rq->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE; |
---|
668 | 578 | break; |
---|
669 | 579 | case MLX5_CQ_PERIOD_MODE_START_FROM_EQE: |
---|
670 | 580 | default: |
---|
671 | | - rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE; |
---|
| 581 | + rq->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; |
---|
672 | 582 | } |
---|
673 | 583 | |
---|
674 | 584 | rq->page_cache.head = 0; |
---|
.. | .. |
---|
676 | 586 | |
---|
677 | 587 | return 0; |
---|
678 | 588 | |
---|
679 | | -err_free: |
---|
| 589 | +err_free_by_rq_type: |
---|
680 | 590 | switch (rq->wq_type) { |
---|
681 | 591 | case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: |
---|
682 | 592 | kvfree(rq->mpwqe.info); |
---|
| 593 | +err_rq_mkey: |
---|
683 | 594 | mlx5_core_destroy_mkey(mdev, &rq->umr_mkey); |
---|
| 595 | +err_rq_drop_page: |
---|
| 596 | + mlx5e_free_mpwqe_rq_drop_page(rq); |
---|
684 | 597 | break; |
---|
685 | 598 | default: /* MLX5_WQ_TYPE_CYCLIC */ |
---|
686 | | - kvfree(rq->wqe.frags); |
---|
687 | 599 | mlx5e_free_di_list(rq); |
---|
| 600 | +err_rq_frags: |
---|
| 601 | + kvfree(rq->wqe.frags); |
---|
688 | 602 | } |
---|
689 | | - |
---|
690 | 603 | err_rq_wq_destroy: |
---|
691 | | - if (rq->xdp_prog) |
---|
692 | | - bpf_prog_put(rq->xdp_prog); |
---|
693 | | - xdp_rxq_info_unreg(&rq->xdp_rxq); |
---|
694 | | - if (rq->page_pool) |
---|
695 | | - page_pool_destroy(rq->page_pool); |
---|
696 | 604 | mlx5_wq_destroy(&rq->wq_ctrl); |
---|
| 605 | +err_rq_xdp: |
---|
| 606 | + xdp_rxq_info_unreg(&rq->xdp_rxq); |
---|
| 607 | +err_rq_xdp_prog: |
---|
| 608 | + if (params->xdp_prog) |
---|
| 609 | + bpf_prog_put(params->xdp_prog); |
---|
697 | 610 | |
---|
698 | 611 | return err; |
---|
699 | 612 | } |
---|
700 | 613 | |
---|
701 | 614 | static void mlx5e_free_rq(struct mlx5e_rq *rq) |
---|
702 | 615 | { |
---|
| 616 | + struct mlx5e_channel *c = rq->channel; |
---|
| 617 | + struct bpf_prog *old_prog = NULL; |
---|
703 | 618 | int i; |
---|
704 | 619 | |
---|
705 | | - if (rq->xdp_prog) |
---|
706 | | - bpf_prog_put(rq->xdp_prog); |
---|
707 | | - |
---|
708 | | - xdp_rxq_info_unreg(&rq->xdp_rxq); |
---|
709 | | - if (rq->page_pool) |
---|
710 | | - page_pool_destroy(rq->page_pool); |
---|
| 620 | + /* drop_rq has neither channel nor xdp_prog. */ |
---|
| 621 | + if (c) |
---|
| 622 | + old_prog = rcu_dereference_protected(rq->xdp_prog, |
---|
| 623 | + lockdep_is_held(&c->priv->state_lock)); |
---|
| 624 | + if (old_prog) |
---|
| 625 | + bpf_prog_put(old_prog); |
---|
711 | 626 | |
---|
712 | 627 | switch (rq->wq_type) { |
---|
713 | 628 | case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: |
---|
714 | 629 | kvfree(rq->mpwqe.info); |
---|
715 | 630 | mlx5_core_destroy_mkey(rq->mdev, &rq->umr_mkey); |
---|
| 631 | + mlx5e_free_mpwqe_rq_drop_page(rq); |
---|
716 | 632 | break; |
---|
717 | 633 | default: /* MLX5_WQ_TYPE_CYCLIC */ |
---|
718 | 634 | kvfree(rq->wqe.frags); |
---|
.. | .. |
---|
723 | 639 | i = (i + 1) & (MLX5E_CACHE_SIZE - 1)) { |
---|
724 | 640 | struct mlx5e_dma_info *dma_info = &rq->page_cache.page_cache[i]; |
---|
725 | 641 | |
---|
726 | | - mlx5e_page_release(rq, dma_info, false); |
---|
| 642 | + /* With AF_XDP, page_cache is not used, so this loop is not |
---|
| 643 | + * entered, and it's safe to call mlx5e_page_release_dynamic |
---|
| 644 | + * directly. |
---|
| 645 | + */ |
---|
| 646 | + mlx5e_page_release_dynamic(rq, dma_info, false); |
---|
727 | 647 | } |
---|
| 648 | + |
---|
| 649 | + xdp_rxq_info_unreg(&rq->xdp_rxq); |
---|
| 650 | + page_pool_destroy(rq->page_pool); |
---|
728 | 651 | mlx5_wq_destroy(&rq->wq_ctrl); |
---|
729 | 652 | } |
---|
730 | 653 | |
---|
.. | .. |
---|
766 | 689 | return err; |
---|
767 | 690 | } |
---|
768 | 691 | |
---|
769 | | -static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, |
---|
770 | | - int next_state) |
---|
| 692 | +int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state) |
---|
771 | 693 | { |
---|
772 | 694 | struct mlx5_core_dev *mdev = rq->mdev; |
---|
773 | 695 | |
---|
.. | .. |
---|
781 | 703 | if (!in) |
---|
782 | 704 | return -ENOMEM; |
---|
783 | 705 | |
---|
| 706 | + if (curr_state == MLX5_RQC_STATE_RST && next_state == MLX5_RQC_STATE_RDY) |
---|
| 707 | + mlx5e_rqwq_reset(rq); |
---|
| 708 | + |
---|
784 | 709 | rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx); |
---|
785 | 710 | |
---|
786 | 711 | MLX5_SET(modify_rq_in, in, rq_state, curr_state); |
---|
787 | 712 | MLX5_SET(rqc, rqc, state, next_state); |
---|
788 | 713 | |
---|
789 | | - err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen); |
---|
| 714 | + err = mlx5_core_modify_rq(mdev, rq->rqn, in); |
---|
790 | 715 | |
---|
791 | 716 | kvfree(in); |
---|
792 | 717 | |
---|
.. | .. |
---|
817 | 742 | MLX5_SET(rqc, rqc, scatter_fcs, enable); |
---|
818 | 743 | MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY); |
---|
819 | 744 | |
---|
820 | | - err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen); |
---|
| 745 | + err = mlx5_core_modify_rq(mdev, rq->rqn, in); |
---|
821 | 746 | |
---|
822 | 747 | kvfree(in); |
---|
823 | 748 | |
---|
.. | .. |
---|
846 | 771 | MLX5_SET(rqc, rqc, vsd, vsd); |
---|
847 | 772 | MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY); |
---|
848 | 773 | |
---|
849 | | - err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen); |
---|
| 774 | + err = mlx5_core_modify_rq(mdev, rq->rqn, in); |
---|
850 | 775 | |
---|
851 | 776 | kvfree(in); |
---|
852 | 777 | |
---|
.. | .. |
---|
858 | 783 | mlx5_core_destroy_rq(rq->mdev, rq->rqn); |
---|
859 | 784 | } |
---|
860 | 785 | |
---|
861 | | -static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time) |
---|
| 786 | +int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time) |
---|
862 | 787 | { |
---|
863 | 788 | unsigned long exp_time = jiffies + msecs_to_jiffies(wait_time); |
---|
864 | 789 | struct mlx5e_channel *c = rq->channel; |
---|
.. | .. |
---|
875 | 800 | netdev_warn(c->netdev, "Failed to get min RX wqes on Channel[%d] RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n", |
---|
876 | 801 | c->ix, rq->rqn, mlx5e_rqwq_get_cur_sz(rq), min_wqes); |
---|
877 | 802 | |
---|
| 803 | + mlx5e_reporter_rx_timeout(rq); |
---|
878 | 804 | return -ETIMEDOUT; |
---|
879 | 805 | } |
---|
880 | 806 | |
---|
881 | | -static void mlx5e_free_rx_descs(struct mlx5e_rq *rq) |
---|
| 807 | +void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq) |
---|
| 808 | +{ |
---|
| 809 | + struct mlx5_wq_ll *wq; |
---|
| 810 | + u16 head; |
---|
| 811 | + int i; |
---|
| 812 | + |
---|
| 813 | + if (rq->wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) |
---|
| 814 | + return; |
---|
| 815 | + |
---|
| 816 | + wq = &rq->mpwqe.wq; |
---|
| 817 | + head = wq->head; |
---|
| 818 | + |
---|
| 819 | + /* Outstanding UMR WQEs (in progress) start at wq->head */ |
---|
| 820 | + for (i = 0; i < rq->mpwqe.umr_in_progress; i++) { |
---|
| 821 | + rq->dealloc_wqe(rq, head); |
---|
| 822 | + head = mlx5_wq_ll_get_wqe_next_ix(wq, head); |
---|
| 823 | + } |
---|
| 824 | + |
---|
| 825 | + rq->mpwqe.actual_wq_head = wq->head; |
---|
| 826 | + rq->mpwqe.umr_in_progress = 0; |
---|
| 827 | + rq->mpwqe.umr_completed = 0; |
---|
| 828 | +} |
---|
| 829 | + |
---|
| 830 | +void mlx5e_free_rx_descs(struct mlx5e_rq *rq) |
---|
882 | 831 | { |
---|
883 | 832 | __be16 wqe_ix_be; |
---|
884 | 833 | u16 wqe_ix; |
---|
.. | .. |
---|
886 | 835 | if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { |
---|
887 | 836 | struct mlx5_wq_ll *wq = &rq->mpwqe.wq; |
---|
888 | 837 | |
---|
889 | | - /* UMR WQE (if in progress) is always at wq->head */ |
---|
890 | | - if (rq->mpwqe.umr_in_progress) |
---|
891 | | - rq->dealloc_wqe(rq, wq->head); |
---|
| 838 | + mlx5e_free_rx_in_progress_descs(rq); |
---|
892 | 839 | |
---|
893 | 840 | while (!mlx5_wq_ll_is_empty(wq)) { |
---|
894 | 841 | struct mlx5e_rx_wqe_ll *wqe; |
---|
.. | .. |
---|
912 | 859 | |
---|
913 | 860 | } |
---|
914 | 861 | |
---|
915 | | -static int mlx5e_open_rq(struct mlx5e_channel *c, |
---|
916 | | - struct mlx5e_params *params, |
---|
917 | | - struct mlx5e_rq_param *param, |
---|
918 | | - struct mlx5e_rq *rq) |
---|
| 862 | +int mlx5e_open_rq(struct mlx5e_channel *c, struct mlx5e_params *params, |
---|
| 863 | + struct mlx5e_rq_param *param, struct mlx5e_xsk_param *xsk, |
---|
| 864 | + struct xsk_buff_pool *xsk_pool, struct mlx5e_rq *rq) |
---|
919 | 865 | { |
---|
920 | 866 | int err; |
---|
921 | 867 | |
---|
922 | | - err = mlx5e_alloc_rq(c, params, param, rq); |
---|
| 868 | + err = mlx5e_alloc_rq(c, params, xsk, xsk_pool, param, rq); |
---|
923 | 869 | if (err) |
---|
924 | 870 | return err; |
---|
925 | 871 | |
---|
.. | .. |
---|
931 | 877 | if (err) |
---|
932 | 878 | goto err_destroy_rq; |
---|
933 | 879 | |
---|
| 880 | + if (mlx5e_is_tls_on(c->priv) && !mlx5_accel_is_ktls_device(c->mdev)) |
---|
| 881 | + __set_bit(MLX5E_RQ_STATE_FPGA_TLS, &c->rq.state); /* must be FPGA */ |
---|
| 882 | + |
---|
| 883 | + if (MLX5_CAP_ETH(c->mdev, cqe_checksum_full)) |
---|
| 884 | + __set_bit(MLX5E_RQ_STATE_CSUM_FULL, &c->rq.state); |
---|
| 885 | + |
---|
934 | 886 | if (params->rx_dim_enabled) |
---|
935 | 887 | __set_bit(MLX5E_RQ_STATE_AM, &c->rq.state); |
---|
936 | 888 | |
---|
.. | .. |
---|
940 | 892 | */ |
---|
941 | 893 | if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE) || c->xdp) |
---|
942 | 894 | __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state); |
---|
| 895 | + |
---|
| 896 | + /* For CQE compression on striding RQ, use stride index provided by |
---|
| 897 | + * HW if capability is supported. |
---|
| 898 | + */ |
---|
| 899 | + if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) && |
---|
| 900 | + MLX5_CAP_GEN(c->mdev, mini_cqe_resp_stride_index)) |
---|
| 901 | + __set_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &c->rq.state); |
---|
943 | 902 | |
---|
944 | 903 | return 0; |
---|
945 | 904 | |
---|
.. | .. |
---|
951 | 910 | return err; |
---|
952 | 911 | } |
---|
953 | 912 | |
---|
954 | | -static void mlx5e_activate_rq(struct mlx5e_rq *rq) |
---|
| 913 | +void mlx5e_activate_rq(struct mlx5e_rq *rq) |
---|
955 | 914 | { |
---|
956 | | - struct mlx5e_icosq *sq = &rq->channel->icosq; |
---|
957 | | - struct mlx5_wq_cyc *wq = &sq->wq; |
---|
958 | | - struct mlx5e_tx_wqe *nopwqe; |
---|
959 | | - |
---|
960 | | - u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); |
---|
961 | | - |
---|
962 | 915 | set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state); |
---|
963 | | - sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP; |
---|
964 | | - nopwqe = mlx5e_post_nop(wq, sq->sqn, &sq->pc); |
---|
965 | | - mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nopwqe->ctrl); |
---|
| 916 | + mlx5e_trigger_irq(&rq->channel->icosq); |
---|
966 | 917 | } |
---|
967 | 918 | |
---|
968 | | -static void mlx5e_deactivate_rq(struct mlx5e_rq *rq) |
---|
| 919 | +void mlx5e_deactivate_rq(struct mlx5e_rq *rq) |
---|
969 | 920 | { |
---|
970 | 921 | clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state); |
---|
971 | | - napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */ |
---|
| 922 | + synchronize_net(); /* Sync with NAPI to prevent mlx5e_post_rx_wqes. */ |
---|
972 | 923 | } |
---|
973 | 924 | |
---|
974 | | -static void mlx5e_close_rq(struct mlx5e_rq *rq) |
---|
| 925 | +void mlx5e_close_rq(struct mlx5e_rq *rq) |
---|
975 | 926 | { |
---|
976 | 927 | cancel_work_sync(&rq->dim.work); |
---|
| 928 | + cancel_work_sync(&rq->channel->icosq.recover_work); |
---|
| 929 | + cancel_work_sync(&rq->recover_work); |
---|
977 | 930 | mlx5e_destroy_rq(rq); |
---|
978 | 931 | mlx5e_free_rx_descs(rq); |
---|
979 | 932 | mlx5e_free_rq(rq); |
---|
.. | .. |
---|
981 | 934 | |
---|
982 | 935 | static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq) |
---|
983 | 936 | { |
---|
984 | | - kvfree(sq->db.xdpi); |
---|
| 937 | + kvfree(sq->db.xdpi_fifo.xi); |
---|
| 938 | + kvfree(sq->db.wqe_info); |
---|
| 939 | +} |
---|
| 940 | + |
---|
| 941 | +static int mlx5e_alloc_xdpsq_fifo(struct mlx5e_xdpsq *sq, int numa) |
---|
| 942 | +{ |
---|
| 943 | + struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo; |
---|
| 944 | + int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); |
---|
| 945 | + int dsegs_per_wq = wq_sz * MLX5_SEND_WQEBB_NUM_DS; |
---|
| 946 | + |
---|
| 947 | + xdpi_fifo->xi = kvzalloc_node(sizeof(*xdpi_fifo->xi) * dsegs_per_wq, |
---|
| 948 | + GFP_KERNEL, numa); |
---|
| 949 | + if (!xdpi_fifo->xi) |
---|
| 950 | + return -ENOMEM; |
---|
| 951 | + |
---|
| 952 | + xdpi_fifo->pc = &sq->xdpi_fifo_pc; |
---|
| 953 | + xdpi_fifo->cc = &sq->xdpi_fifo_cc; |
---|
| 954 | + xdpi_fifo->mask = dsegs_per_wq - 1; |
---|
| 955 | + |
---|
| 956 | + return 0; |
---|
985 | 957 | } |
---|
986 | 958 | |
---|
987 | 959 | static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa) |
---|
988 | 960 | { |
---|
989 | 961 | int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); |
---|
| 962 | + int err; |
---|
990 | 963 | |
---|
991 | | - sq->db.xdpi = kvzalloc_node(array_size(wq_sz, sizeof(*sq->db.xdpi)), |
---|
992 | | - GFP_KERNEL, numa); |
---|
993 | | - if (!sq->db.xdpi) { |
---|
994 | | - mlx5e_free_xdpsq_db(sq); |
---|
| 964 | + sq->db.wqe_info = kvzalloc_node(sizeof(*sq->db.wqe_info) * wq_sz, |
---|
| 965 | + GFP_KERNEL, numa); |
---|
| 966 | + if (!sq->db.wqe_info) |
---|
995 | 967 | return -ENOMEM; |
---|
| 968 | + |
---|
| 969 | + err = mlx5e_alloc_xdpsq_fifo(sq, numa); |
---|
| 970 | + if (err) { |
---|
| 971 | + mlx5e_free_xdpsq_db(sq); |
---|
| 972 | + return err; |
---|
996 | 973 | } |
---|
997 | 974 | |
---|
998 | 975 | return 0; |
---|
.. | .. |
---|
1000 | 977 | |
---|
1001 | 978 | static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c, |
---|
1002 | 979 | struct mlx5e_params *params, |
---|
| 980 | + struct xsk_buff_pool *xsk_pool, |
---|
1003 | 981 | struct mlx5e_sq_param *param, |
---|
1004 | 982 | struct mlx5e_xdpsq *sq, |
---|
1005 | 983 | bool is_redirect) |
---|
.. | .. |
---|
1014 | 992 | sq->channel = c; |
---|
1015 | 993 | sq->uar_map = mdev->mlx5e_res.bfreg.map; |
---|
1016 | 994 | sq->min_inline_mode = params->tx_min_inline_mode; |
---|
1017 | | - sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); |
---|
1018 | | - sq->stats = is_redirect ? |
---|
1019 | | - &c->priv->channel_stats[c->ix].xdpsq : |
---|
1020 | | - &c->priv->channel_stats[c->ix].rq_xdpsq; |
---|
| 995 | + sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu) - ETH_FCS_LEN; |
---|
| 996 | + sq->xsk_pool = xsk_pool; |
---|
| 997 | + |
---|
| 998 | + sq->stats = sq->xsk_pool ? |
---|
| 999 | + &c->priv->channel_stats[c->ix].xsksq : |
---|
| 1000 | + is_redirect ? |
---|
| 1001 | + &c->priv->channel_stats[c->ix].xdpsq : |
---|
| 1002 | + &c->priv->channel_stats[c->ix].rq_xdpsq; |
---|
1021 | 1003 | |
---|
1022 | 1004 | param->wq.db_numa_node = cpu_to_node(c->cpu); |
---|
1023 | 1005 | err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl); |
---|
.. | .. |
---|
1045 | 1027 | |
---|
1046 | 1028 | static void mlx5e_free_icosq_db(struct mlx5e_icosq *sq) |
---|
1047 | 1029 | { |
---|
1048 | | - kvfree(sq->db.ico_wqe); |
---|
| 1030 | + kvfree(sq->db.wqe_info); |
---|
1049 | 1031 | } |
---|
1050 | 1032 | |
---|
1051 | 1033 | static int mlx5e_alloc_icosq_db(struct mlx5e_icosq *sq, int numa) |
---|
1052 | 1034 | { |
---|
1053 | | - u8 wq_sz = mlx5_wq_cyc_get_size(&sq->wq); |
---|
| 1035 | + int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); |
---|
| 1036 | + size_t size; |
---|
1054 | 1037 | |
---|
1055 | | - sq->db.ico_wqe = kvzalloc_node(array_size(wq_sz, |
---|
1056 | | - sizeof(*sq->db.ico_wqe)), |
---|
1057 | | - GFP_KERNEL, numa); |
---|
1058 | | - if (!sq->db.ico_wqe) |
---|
| 1038 | + size = array_size(wq_sz, sizeof(*sq->db.wqe_info)); |
---|
| 1039 | + sq->db.wqe_info = kvzalloc_node(size, GFP_KERNEL, numa); |
---|
| 1040 | + if (!sq->db.wqe_info) |
---|
1059 | 1041 | return -ENOMEM; |
---|
1060 | 1042 | |
---|
1061 | 1043 | return 0; |
---|
1062 | 1044 | } |
---|
1063 | 1045 | |
---|
| 1046 | +static void mlx5e_icosq_err_cqe_work(struct work_struct *recover_work) |
---|
| 1047 | +{ |
---|
| 1048 | + struct mlx5e_icosq *sq = container_of(recover_work, struct mlx5e_icosq, |
---|
| 1049 | + recover_work); |
---|
| 1050 | + |
---|
| 1051 | + mlx5e_reporter_icosq_cqe_err(sq); |
---|
| 1052 | +} |
---|
| 1053 | + |
---|
| 1054 | +static void mlx5e_async_icosq_err_cqe_work(struct work_struct *recover_work) |
---|
| 1055 | +{ |
---|
| 1056 | + struct mlx5e_icosq *sq = container_of(recover_work, struct mlx5e_icosq, |
---|
| 1057 | + recover_work); |
---|
| 1058 | + |
---|
| 1059 | + /* Not implemented yet. */ |
---|
| 1060 | + |
---|
| 1061 | + netdev_warn(sq->channel->netdev, "async_icosq recovery is not implemented\n"); |
---|
| 1062 | +} |
---|
| 1063 | + |
---|
1064 | 1064 | static int mlx5e_alloc_icosq(struct mlx5e_channel *c, |
---|
1065 | 1065 | struct mlx5e_sq_param *param, |
---|
1066 | | - struct mlx5e_icosq *sq) |
---|
| 1066 | + struct mlx5e_icosq *sq, |
---|
| 1067 | + work_func_t recover_work_func) |
---|
1067 | 1068 | { |
---|
1068 | 1069 | void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq); |
---|
1069 | 1070 | struct mlx5_core_dev *mdev = c->mdev; |
---|
.. | .. |
---|
1083 | 1084 | if (err) |
---|
1084 | 1085 | goto err_sq_wq_destroy; |
---|
1085 | 1086 | |
---|
| 1087 | + INIT_WORK(&sq->recover_work, recover_work_func); |
---|
| 1088 | + |
---|
1086 | 1089 | return 0; |
---|
1087 | 1090 | |
---|
1088 | 1091 | err_sq_wq_destroy: |
---|
.. | .. |
---|
1100 | 1103 | static void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq) |
---|
1101 | 1104 | { |
---|
1102 | 1105 | kvfree(sq->db.wqe_info); |
---|
| 1106 | + kvfree(sq->db.skb_fifo); |
---|
1103 | 1107 | kvfree(sq->db.dma_fifo); |
---|
1104 | 1108 | } |
---|
1105 | 1109 | |
---|
.. | .. |
---|
1111 | 1115 | sq->db.dma_fifo = kvzalloc_node(array_size(df_sz, |
---|
1112 | 1116 | sizeof(*sq->db.dma_fifo)), |
---|
1113 | 1117 | GFP_KERNEL, numa); |
---|
| 1118 | + sq->db.skb_fifo = kvzalloc_node(array_size(df_sz, |
---|
| 1119 | + sizeof(*sq->db.skb_fifo)), |
---|
| 1120 | + GFP_KERNEL, numa); |
---|
1114 | 1121 | sq->db.wqe_info = kvzalloc_node(array_size(wq_sz, |
---|
1115 | 1122 | sizeof(*sq->db.wqe_info)), |
---|
1116 | 1123 | GFP_KERNEL, numa); |
---|
1117 | | - if (!sq->db.dma_fifo || !sq->db.wqe_info) { |
---|
| 1124 | + if (!sq->db.dma_fifo || !sq->db.skb_fifo || !sq->db.wqe_info) { |
---|
1118 | 1125 | mlx5e_free_txqsq_db(sq); |
---|
1119 | 1126 | return -ENOMEM; |
---|
1120 | 1127 | } |
---|
1121 | 1128 | |
---|
1122 | 1129 | sq->dma_fifo_mask = df_sz - 1; |
---|
| 1130 | + sq->skb_fifo_mask = df_sz - 1; |
---|
1123 | 1131 | |
---|
1124 | 1132 | return 0; |
---|
1125 | 1133 | } |
---|
1126 | 1134 | |
---|
1127 | | -static void mlx5e_sq_recover(struct work_struct *work); |
---|
| 1135 | +static int mlx5e_calc_sq_stop_room(struct mlx5e_txqsq *sq, u8 log_sq_size) |
---|
| 1136 | +{ |
---|
| 1137 | + int sq_size = 1 << log_sq_size; |
---|
| 1138 | + |
---|
| 1139 | + sq->stop_room = mlx5e_tls_get_stop_room(sq); |
---|
| 1140 | + sq->stop_room += mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS); |
---|
| 1141 | + if (test_bit(MLX5E_SQ_STATE_MPWQE, &sq->state)) |
---|
| 1142 | + /* A MPWQE can take up to the maximum-sized WQE + all the normal |
---|
| 1143 | + * stop room can be taken if a new packet breaks the active |
---|
| 1144 | + * MPWQE session and allocates its WQEs right away. |
---|
| 1145 | + */ |
---|
| 1146 | + sq->stop_room += mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS); |
---|
| 1147 | + |
---|
| 1148 | + if (WARN_ON(sq->stop_room >= sq_size)) { |
---|
| 1149 | + netdev_err(sq->channel->netdev, "Stop room %hu is bigger than the SQ size %d\n", |
---|
| 1150 | + sq->stop_room, sq_size); |
---|
| 1151 | + return -ENOSPC; |
---|
| 1152 | + } |
---|
| 1153 | + |
---|
| 1154 | + return 0; |
---|
| 1155 | +} |
---|
| 1156 | + |
---|
| 1157 | +static void mlx5e_tx_err_cqe_work(struct work_struct *recover_work); |
---|
1128 | 1158 | static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, |
---|
1129 | 1159 | int txq_ix, |
---|
1130 | 1160 | struct mlx5e_params *params, |
---|
.. | .. |
---|
1142 | 1172 | sq->clock = &mdev->clock; |
---|
1143 | 1173 | sq->mkey_be = c->mkey_be; |
---|
1144 | 1174 | sq->channel = c; |
---|
| 1175 | + sq->ch_ix = c->ix; |
---|
1145 | 1176 | sq->txq_ix = txq_ix; |
---|
1146 | 1177 | sq->uar_map = mdev->mlx5e_res.bfreg.map; |
---|
1147 | 1178 | sq->min_inline_mode = params->tx_min_inline_mode; |
---|
| 1179 | + sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); |
---|
1148 | 1180 | sq->stats = &c->priv->channel_stats[c->ix].sq[tc]; |
---|
1149 | | - INIT_WORK(&sq->recover.recover_work, mlx5e_sq_recover); |
---|
| 1181 | + INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work); |
---|
| 1182 | + if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert)) |
---|
| 1183 | + set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state); |
---|
1150 | 1184 | if (MLX5_IPSEC_DEV(c->priv->mdev)) |
---|
1151 | 1185 | set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state); |
---|
1152 | 1186 | if (mlx5_accel_is_tls_device(c->priv->mdev)) |
---|
1153 | 1187 | set_bit(MLX5E_SQ_STATE_TLS, &sq->state); |
---|
| 1188 | + if (param->is_mpw) |
---|
| 1189 | + set_bit(MLX5E_SQ_STATE_MPWQE, &sq->state); |
---|
| 1190 | + err = mlx5e_calc_sq_stop_room(sq, params->log_sq_size); |
---|
| 1191 | + if (err) |
---|
| 1192 | + return err; |
---|
1154 | 1193 | |
---|
1155 | 1194 | param->wq.db_numa_node = cpu_to_node(c->cpu); |
---|
1156 | 1195 | err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl); |
---|
.. | .. |
---|
1234 | 1273 | return err; |
---|
1235 | 1274 | } |
---|
1236 | 1275 | |
---|
1237 | | -struct mlx5e_modify_sq_param { |
---|
1238 | | - int curr_state; |
---|
1239 | | - int next_state; |
---|
1240 | | - bool rl_update; |
---|
1241 | | - int rl_index; |
---|
1242 | | -}; |
---|
1243 | | - |
---|
1244 | | -static int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn, |
---|
1245 | | - struct mlx5e_modify_sq_param *p) |
---|
| 1276 | +int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn, |
---|
| 1277 | + struct mlx5e_modify_sq_param *p) |
---|
1246 | 1278 | { |
---|
1247 | 1279 | void *in; |
---|
1248 | 1280 | void *sqc; |
---|
.. | .. |
---|
1263 | 1295 | MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, p->rl_index); |
---|
1264 | 1296 | } |
---|
1265 | 1297 | |
---|
1266 | | - err = mlx5_core_modify_sq(mdev, sqn, in, inlen); |
---|
| 1298 | + err = mlx5_core_modify_sq(mdev, sqn, in); |
---|
1267 | 1299 | |
---|
1268 | 1300 | kvfree(in); |
---|
1269 | 1301 | |
---|
.. | .. |
---|
1334 | 1366 | return 0; |
---|
1335 | 1367 | |
---|
1336 | 1368 | err_free_txqsq: |
---|
1337 | | - clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); |
---|
1338 | 1369 | mlx5e_free_txqsq(sq); |
---|
1339 | 1370 | |
---|
1340 | 1371 | return err; |
---|
1341 | 1372 | } |
---|
1342 | 1373 | |
---|
1343 | | -static void mlx5e_reset_txqsq_cc_pc(struct mlx5e_txqsq *sq) |
---|
1344 | | -{ |
---|
1345 | | - WARN_ONCE(sq->cc != sq->pc, |
---|
1346 | | - "SQ 0x%x: cc (0x%x) != pc (0x%x)\n", |
---|
1347 | | - sq->sqn, sq->cc, sq->pc); |
---|
1348 | | - sq->cc = 0; |
---|
1349 | | - sq->dma_fifo_cc = 0; |
---|
1350 | | - sq->pc = 0; |
---|
1351 | | -} |
---|
1352 | | - |
---|
1353 | | -static void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq) |
---|
| 1374 | +void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq) |
---|
1354 | 1375 | { |
---|
1355 | 1376 | sq->txq = netdev_get_tx_queue(sq->channel->netdev, sq->txq_ix); |
---|
1356 | | - clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state); |
---|
1357 | 1377 | set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); |
---|
1358 | 1378 | netdev_tx_reset_queue(sq->txq); |
---|
1359 | 1379 | netif_tx_start_queue(sq->txq); |
---|
1360 | 1380 | } |
---|
1361 | 1381 | |
---|
1362 | | -static inline void netif_tx_disable_queue(struct netdev_queue *txq) |
---|
| 1382 | +void mlx5e_tx_disable_queue(struct netdev_queue *txq) |
---|
1363 | 1383 | { |
---|
1364 | 1384 | __netif_tx_lock_bh(txq); |
---|
1365 | 1385 | netif_tx_stop_queue(txq); |
---|
.. | .. |
---|
1368 | 1388 | |
---|
1369 | 1389 | static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq) |
---|
1370 | 1390 | { |
---|
1371 | | - struct mlx5e_channel *c = sq->channel; |
---|
1372 | 1391 | struct mlx5_wq_cyc *wq = &sq->wq; |
---|
1373 | 1392 | |
---|
1374 | 1393 | clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); |
---|
1375 | | - /* prevent netif_tx_wake_queue */ |
---|
1376 | | - napi_synchronize(&c->napi); |
---|
| 1394 | + synchronize_net(); /* Sync with NAPI to prevent netif_tx_wake_queue. */ |
---|
1377 | 1395 | |
---|
1378 | | - netif_tx_disable_queue(sq->txq); |
---|
| 1396 | + mlx5e_tx_disable_queue(sq->txq); |
---|
1379 | 1397 | |
---|
1380 | 1398 | /* last doorbell out, godspeed .. */ |
---|
1381 | 1399 | if (mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1)) { |
---|
1382 | 1400 | u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); |
---|
1383 | 1401 | struct mlx5e_tx_wqe *nop; |
---|
1384 | 1402 | |
---|
1385 | | - sq->db.wqe_info[pi].skb = NULL; |
---|
| 1403 | + sq->db.wqe_info[pi] = (struct mlx5e_tx_wqe_info) { |
---|
| 1404 | + .num_wqebbs = 1, |
---|
| 1405 | + }; |
---|
| 1406 | + |
---|
1386 | 1407 | nop = mlx5e_post_nop(wq, sq->sqn, &sq->pc); |
---|
1387 | 1408 | mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nop->ctrl); |
---|
1388 | 1409 | } |
---|
.. | .. |
---|
1395 | 1416 | struct mlx5_rate_limit rl = {0}; |
---|
1396 | 1417 | |
---|
1397 | 1418 | cancel_work_sync(&sq->dim.work); |
---|
| 1419 | + cancel_work_sync(&sq->recover_work); |
---|
1398 | 1420 | mlx5e_destroy_sq(mdev, sq->sqn); |
---|
1399 | 1421 | if (sq->rate_limit) { |
---|
1400 | 1422 | rl.rate = sq->rate_limit; |
---|
.. | .. |
---|
1404 | 1426 | mlx5e_free_txqsq(sq); |
---|
1405 | 1427 | } |
---|
1406 | 1428 | |
---|
1407 | | -static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq) |
---|
| 1429 | +static void mlx5e_tx_err_cqe_work(struct work_struct *recover_work) |
---|
1408 | 1430 | { |
---|
1409 | | - unsigned long exp_time = jiffies + msecs_to_jiffies(2000); |
---|
| 1431 | + struct mlx5e_txqsq *sq = container_of(recover_work, struct mlx5e_txqsq, |
---|
| 1432 | + recover_work); |
---|
1410 | 1433 | |
---|
1411 | | - while (time_before(jiffies, exp_time)) { |
---|
1412 | | - if (sq->cc == sq->pc) |
---|
1413 | | - return 0; |
---|
1414 | | - |
---|
1415 | | - msleep(20); |
---|
1416 | | - } |
---|
1417 | | - |
---|
1418 | | - netdev_err(sq->channel->netdev, |
---|
1419 | | - "Wait for SQ 0x%x flush timeout (sq cc = 0x%x, sq pc = 0x%x)\n", |
---|
1420 | | - sq->sqn, sq->cc, sq->pc); |
---|
1421 | | - |
---|
1422 | | - return -ETIMEDOUT; |
---|
| 1434 | + mlx5e_reporter_tx_err_cqe(sq); |
---|
1423 | 1435 | } |
---|
1424 | 1436 | |
---|
1425 | | -static int mlx5e_sq_to_ready(struct mlx5e_txqsq *sq, int curr_state) |
---|
1426 | | -{ |
---|
1427 | | - struct mlx5_core_dev *mdev = sq->channel->mdev; |
---|
1428 | | - struct net_device *dev = sq->channel->netdev; |
---|
1429 | | - struct mlx5e_modify_sq_param msp = {0}; |
---|
1430 | | - int err; |
---|
1431 | | - |
---|
1432 | | - msp.curr_state = curr_state; |
---|
1433 | | - msp.next_state = MLX5_SQC_STATE_RST; |
---|
1434 | | - |
---|
1435 | | - err = mlx5e_modify_sq(mdev, sq->sqn, &msp); |
---|
1436 | | - if (err) { |
---|
1437 | | - netdev_err(dev, "Failed to move sq 0x%x to reset\n", sq->sqn); |
---|
1438 | | - return err; |
---|
1439 | | - } |
---|
1440 | | - |
---|
1441 | | - memset(&msp, 0, sizeof(msp)); |
---|
1442 | | - msp.curr_state = MLX5_SQC_STATE_RST; |
---|
1443 | | - msp.next_state = MLX5_SQC_STATE_RDY; |
---|
1444 | | - |
---|
1445 | | - err = mlx5e_modify_sq(mdev, sq->sqn, &msp); |
---|
1446 | | - if (err) { |
---|
1447 | | - netdev_err(dev, "Failed to move sq 0x%x to ready\n", sq->sqn); |
---|
1448 | | - return err; |
---|
1449 | | - } |
---|
1450 | | - |
---|
1451 | | - return 0; |
---|
1452 | | -} |
---|
1453 | | - |
---|
1454 | | -static void mlx5e_sq_recover(struct work_struct *work) |
---|
1455 | | -{ |
---|
1456 | | - struct mlx5e_txqsq_recover *recover = |
---|
1457 | | - container_of(work, struct mlx5e_txqsq_recover, |
---|
1458 | | - recover_work); |
---|
1459 | | - struct mlx5e_txqsq *sq = container_of(recover, struct mlx5e_txqsq, |
---|
1460 | | - recover); |
---|
1461 | | - struct mlx5_core_dev *mdev = sq->channel->mdev; |
---|
1462 | | - struct net_device *dev = sq->channel->netdev; |
---|
1463 | | - u8 state; |
---|
1464 | | - int err; |
---|
1465 | | - |
---|
1466 | | - err = mlx5_core_query_sq_state(mdev, sq->sqn, &state); |
---|
1467 | | - if (err) { |
---|
1468 | | - netdev_err(dev, "Failed to query SQ 0x%x state. err = %d\n", |
---|
1469 | | - sq->sqn, err); |
---|
1470 | | - return; |
---|
1471 | | - } |
---|
1472 | | - |
---|
1473 | | - if (state != MLX5_RQC_STATE_ERR) { |
---|
1474 | | - netdev_err(dev, "SQ 0x%x not in ERROR state\n", sq->sqn); |
---|
1475 | | - return; |
---|
1476 | | - } |
---|
1477 | | - |
---|
1478 | | - netif_tx_disable_queue(sq->txq); |
---|
1479 | | - |
---|
1480 | | - if (mlx5e_wait_for_sq_flush(sq)) |
---|
1481 | | - return; |
---|
1482 | | - |
---|
1483 | | - /* If the interval between two consecutive recovers per SQ is too |
---|
1484 | | - * short, don't recover to avoid infinite loop of ERR_CQE -> recover. |
---|
1485 | | - * If we reached this state, there is probably a bug that needs to be |
---|
1486 | | - * fixed. let's keep the queue close and let tx timeout cleanup. |
---|
1487 | | - */ |
---|
1488 | | - if (jiffies_to_msecs(jiffies - recover->last_recover) < |
---|
1489 | | - MLX5E_SQ_RECOVER_MIN_INTERVAL) { |
---|
1490 | | - netdev_err(dev, "Recover SQ 0x%x canceled, too many error CQEs\n", |
---|
1491 | | - sq->sqn); |
---|
1492 | | - return; |
---|
1493 | | - } |
---|
1494 | | - |
---|
1495 | | - /* At this point, no new packets will arrive from the stack as TXQ is |
---|
1496 | | - * marked with QUEUE_STATE_DRV_XOFF. In addition, NAPI cleared all |
---|
1497 | | - * pending WQEs. SQ can safely reset the SQ. |
---|
1498 | | - */ |
---|
1499 | | - if (mlx5e_sq_to_ready(sq, state)) |
---|
1500 | | - return; |
---|
1501 | | - |
---|
1502 | | - mlx5e_reset_txqsq_cc_pc(sq); |
---|
1503 | | - sq->stats->recover++; |
---|
1504 | | - recover->last_recover = jiffies; |
---|
1505 | | - mlx5e_activate_txqsq(sq); |
---|
1506 | | -} |
---|
1507 | | - |
---|
1508 | | -static int mlx5e_open_icosq(struct mlx5e_channel *c, |
---|
1509 | | - struct mlx5e_params *params, |
---|
1510 | | - struct mlx5e_sq_param *param, |
---|
1511 | | - struct mlx5e_icosq *sq) |
---|
| 1437 | +static int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params, |
---|
| 1438 | + struct mlx5e_sq_param *param, struct mlx5e_icosq *sq, |
---|
| 1439 | + work_func_t recover_work_func) |
---|
1512 | 1440 | { |
---|
1513 | 1441 | struct mlx5e_create_sq_param csp = {}; |
---|
1514 | 1442 | int err; |
---|
1515 | 1443 | |
---|
1516 | | - err = mlx5e_alloc_icosq(c, param, sq); |
---|
| 1444 | + err = mlx5e_alloc_icosq(c, param, sq, recover_work_func); |
---|
1517 | 1445 | if (err) |
---|
1518 | 1446 | return err; |
---|
1519 | 1447 | |
---|
1520 | 1448 | csp.cqn = sq->cq.mcq.cqn; |
---|
1521 | 1449 | csp.wq_ctrl = &sq->wq_ctrl; |
---|
1522 | 1450 | csp.min_inline_mode = params->tx_min_inline_mode; |
---|
1523 | | - set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); |
---|
1524 | 1451 | err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn); |
---|
1525 | 1452 | if (err) |
---|
1526 | 1453 | goto err_free_icosq; |
---|
.. | .. |
---|
1528 | 1455 | return 0; |
---|
1529 | 1456 | |
---|
1530 | 1457 | err_free_icosq: |
---|
1531 | | - clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); |
---|
1532 | 1458 | mlx5e_free_icosq(sq); |
---|
1533 | 1459 | |
---|
1534 | 1460 | return err; |
---|
| 1461 | +} |
---|
| 1462 | + |
---|
| 1463 | +void mlx5e_activate_icosq(struct mlx5e_icosq *icosq) |
---|
| 1464 | +{ |
---|
| 1465 | + set_bit(MLX5E_SQ_STATE_ENABLED, &icosq->state); |
---|
| 1466 | +} |
---|
| 1467 | + |
---|
| 1468 | +void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq) |
---|
| 1469 | +{ |
---|
| 1470 | + clear_bit(MLX5E_SQ_STATE_ENABLED, &icosq->state); |
---|
| 1471 | + synchronize_net(); /* Sync with NAPI. */ |
---|
1535 | 1472 | } |
---|
1536 | 1473 | |
---|
1537 | 1474 | static void mlx5e_close_icosq(struct mlx5e_icosq *sq) |
---|
1538 | 1475 | { |
---|
1539 | 1476 | struct mlx5e_channel *c = sq->channel; |
---|
1540 | 1477 | |
---|
1541 | | - clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); |
---|
1542 | | - napi_synchronize(&c->napi); |
---|
1543 | | - |
---|
1544 | 1478 | mlx5e_destroy_sq(c->mdev, sq->sqn); |
---|
| 1479 | + mlx5e_free_icosq_descs(sq); |
---|
1545 | 1480 | mlx5e_free_icosq(sq); |
---|
1546 | 1481 | } |
---|
1547 | 1482 | |
---|
1548 | | -static int mlx5e_open_xdpsq(struct mlx5e_channel *c, |
---|
1549 | | - struct mlx5e_params *params, |
---|
1550 | | - struct mlx5e_sq_param *param, |
---|
1551 | | - struct mlx5e_xdpsq *sq, |
---|
1552 | | - bool is_redirect) |
---|
| 1483 | +int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params, |
---|
| 1484 | + struct mlx5e_sq_param *param, struct xsk_buff_pool *xsk_pool, |
---|
| 1485 | + struct mlx5e_xdpsq *sq, bool is_redirect) |
---|
1553 | 1486 | { |
---|
1554 | | - unsigned int ds_cnt = MLX5E_XDP_TX_DS_COUNT; |
---|
1555 | 1487 | struct mlx5e_create_sq_param csp = {}; |
---|
1556 | | - unsigned int inline_hdr_sz = 0; |
---|
1557 | 1488 | int err; |
---|
1558 | | - int i; |
---|
1559 | 1489 | |
---|
1560 | | - err = mlx5e_alloc_xdpsq(c, params, param, sq, is_redirect); |
---|
| 1490 | + err = mlx5e_alloc_xdpsq(c, params, xsk_pool, param, sq, is_redirect); |
---|
1561 | 1491 | if (err) |
---|
1562 | 1492 | return err; |
---|
1563 | 1493 | |
---|
1564 | 1494 | csp.tis_lst_sz = 1; |
---|
1565 | | - csp.tisn = c->priv->tisn[0]; /* tc = 0 */ |
---|
| 1495 | + csp.tisn = c->priv->tisn[c->lag_port][0]; /* tc = 0 */ |
---|
1566 | 1496 | csp.cqn = sq->cq.mcq.cqn; |
---|
1567 | 1497 | csp.wq_ctrl = &sq->wq_ctrl; |
---|
1568 | 1498 | csp.min_inline_mode = sq->min_inline_mode; |
---|
1569 | | - if (is_redirect) |
---|
1570 | | - set_bit(MLX5E_SQ_STATE_REDIRECT, &sq->state); |
---|
1571 | 1499 | set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); |
---|
1572 | 1500 | err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn); |
---|
1573 | 1501 | if (err) |
---|
1574 | 1502 | goto err_free_xdpsq; |
---|
1575 | 1503 | |
---|
1576 | | - if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) { |
---|
1577 | | - inline_hdr_sz = MLX5E_XDP_MIN_INLINE; |
---|
1578 | | - ds_cnt++; |
---|
1579 | | - } |
---|
| 1504 | + mlx5e_set_xmit_fp(sq, param->is_mpw); |
---|
1580 | 1505 | |
---|
1581 | | - /* Pre initialize fixed WQE fields */ |
---|
1582 | | - for (i = 0; i < mlx5_wq_cyc_get_size(&sq->wq); i++) { |
---|
1583 | | - struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, i); |
---|
1584 | | - struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; |
---|
1585 | | - struct mlx5_wqe_eth_seg *eseg = &wqe->eth; |
---|
1586 | | - struct mlx5_wqe_data_seg *dseg; |
---|
| 1506 | + if (!param->is_mpw) { |
---|
| 1507 | + unsigned int ds_cnt = MLX5E_XDP_TX_DS_COUNT; |
---|
| 1508 | + unsigned int inline_hdr_sz = 0; |
---|
| 1509 | + int i; |
---|
1587 | 1510 | |
---|
1588 | | - cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); |
---|
1589 | | - eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz); |
---|
| 1511 | + if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) { |
---|
| 1512 | + inline_hdr_sz = MLX5E_XDP_MIN_INLINE; |
---|
| 1513 | + ds_cnt++; |
---|
| 1514 | + } |
---|
1590 | 1515 | |
---|
1591 | | - dseg = (struct mlx5_wqe_data_seg *)cseg + (ds_cnt - 1); |
---|
1592 | | - dseg->lkey = sq->mkey_be; |
---|
| 1516 | + /* Pre initialize fixed WQE fields */ |
---|
| 1517 | + for (i = 0; i < mlx5_wq_cyc_get_size(&sq->wq); i++) { |
---|
| 1518 | + struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, i); |
---|
| 1519 | + struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; |
---|
| 1520 | + struct mlx5_wqe_eth_seg *eseg = &wqe->eth; |
---|
| 1521 | + struct mlx5_wqe_data_seg *dseg; |
---|
| 1522 | + |
---|
| 1523 | + sq->db.wqe_info[i] = (struct mlx5e_xdp_wqe_info) { |
---|
| 1524 | + .num_wqebbs = 1, |
---|
| 1525 | + .num_pkts = 1, |
---|
| 1526 | + }; |
---|
| 1527 | + |
---|
| 1528 | + cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); |
---|
| 1529 | + eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz); |
---|
| 1530 | + |
---|
| 1531 | + dseg = (struct mlx5_wqe_data_seg *)cseg + (ds_cnt - 1); |
---|
| 1532 | + dseg->lkey = sq->mkey_be; |
---|
| 1533 | + } |
---|
1593 | 1534 | } |
---|
1594 | 1535 | |
---|
1595 | 1536 | return 0; |
---|
.. | .. |
---|
1601 | 1542 | return err; |
---|
1602 | 1543 | } |
---|
1603 | 1544 | |
---|
1604 | | -static void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq) |
---|
| 1545 | +void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq) |
---|
1605 | 1546 | { |
---|
1606 | 1547 | struct mlx5e_channel *c = sq->channel; |
---|
1607 | 1548 | |
---|
1608 | 1549 | clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); |
---|
1609 | | - napi_synchronize(&c->napi); |
---|
| 1550 | + synchronize_net(); /* Sync with NAPI. */ |
---|
1610 | 1551 | |
---|
1611 | 1552 | mlx5e_destroy_sq(c->mdev, sq->sqn); |
---|
1612 | 1553 | mlx5e_free_xdpsq_descs(sq); |
---|
.. | .. |
---|
1618 | 1559 | struct mlx5e_cq *cq) |
---|
1619 | 1560 | { |
---|
1620 | 1561 | struct mlx5_core_cq *mcq = &cq->mcq; |
---|
1621 | | - int eqn_not_used; |
---|
1622 | | - unsigned int irqn; |
---|
1623 | 1562 | int err; |
---|
1624 | 1563 | u32 i; |
---|
1625 | | - |
---|
1626 | | - err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn); |
---|
1627 | | - if (err) |
---|
1628 | | - return err; |
---|
1629 | 1564 | |
---|
1630 | 1565 | err = mlx5_cqwq_create(mdev, ¶m->wq, param->cqc, &cq->wq, |
---|
1631 | 1566 | &cq->wq_ctrl); |
---|
.. | .. |
---|
1640 | 1575 | mcq->vector = param->eq_ix; |
---|
1641 | 1576 | mcq->comp = mlx5e_completion_event; |
---|
1642 | 1577 | mcq->event = mlx5e_cq_error_event; |
---|
1643 | | - mcq->irqn = irqn; |
---|
1644 | 1578 | |
---|
1645 | 1579 | for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) { |
---|
1646 | 1580 | struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i); |
---|
.. | .. |
---|
1679 | 1613 | |
---|
1680 | 1614 | static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) |
---|
1681 | 1615 | { |
---|
| 1616 | + u32 out[MLX5_ST_SZ_DW(create_cq_out)]; |
---|
1682 | 1617 | struct mlx5_core_dev *mdev = cq->mdev; |
---|
1683 | 1618 | struct mlx5_core_cq *mcq = &cq->mcq; |
---|
1684 | 1619 | |
---|
1685 | 1620 | void *in; |
---|
1686 | 1621 | void *cqc; |
---|
1687 | 1622 | int inlen; |
---|
1688 | | - unsigned int irqn_not_used; |
---|
1689 | 1623 | int eqn; |
---|
1690 | 1624 | int err; |
---|
1691 | 1625 | |
---|
1692 | | - err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used); |
---|
| 1626 | + err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn); |
---|
1693 | 1627 | if (err) |
---|
1694 | 1628 | return err; |
---|
1695 | 1629 | |
---|
.. | .. |
---|
1713 | 1647 | MLX5_ADAPTER_PAGE_SHIFT); |
---|
1714 | 1648 | MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma); |
---|
1715 | 1649 | |
---|
1716 | | - err = mlx5_core_create_cq(mdev, mcq, in, inlen); |
---|
| 1650 | + err = mlx5_core_create_cq(mdev, mcq, in, inlen, out, sizeof(out)); |
---|
1717 | 1651 | |
---|
1718 | 1652 | kvfree(in); |
---|
1719 | 1653 | |
---|
.. | .. |
---|
1730 | 1664 | mlx5_core_destroy_cq(cq->mdev, &cq->mcq); |
---|
1731 | 1665 | } |
---|
1732 | 1666 | |
---|
1733 | | -static int mlx5e_open_cq(struct mlx5e_channel *c, |
---|
1734 | | - struct net_dim_cq_moder moder, |
---|
1735 | | - struct mlx5e_cq_param *param, |
---|
1736 | | - struct mlx5e_cq *cq) |
---|
| 1667 | +int mlx5e_open_cq(struct mlx5e_channel *c, struct dim_cq_moder moder, |
---|
| 1668 | + struct mlx5e_cq_param *param, struct mlx5e_cq *cq) |
---|
1737 | 1669 | { |
---|
1738 | 1670 | struct mlx5_core_dev *mdev = c->mdev; |
---|
1739 | 1671 | int err; |
---|
.. | .. |
---|
1756 | 1688 | return err; |
---|
1757 | 1689 | } |
---|
1758 | 1690 | |
---|
1759 | | -static void mlx5e_close_cq(struct mlx5e_cq *cq) |
---|
| 1691 | +void mlx5e_close_cq(struct mlx5e_cq *cq) |
---|
1760 | 1692 | { |
---|
1761 | 1693 | mlx5e_destroy_cq(cq); |
---|
1762 | 1694 | mlx5e_free_cq(cq); |
---|
1763 | | -} |
---|
1764 | | - |
---|
1765 | | -static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix) |
---|
1766 | | -{ |
---|
1767 | | - return cpumask_first(priv->mdev->priv.irq_info[ix + MLX5_EQ_VEC_COMP_BASE].mask); |
---|
1768 | 1695 | } |
---|
1769 | 1696 | |
---|
1770 | 1697 | static int mlx5e_open_tx_cqs(struct mlx5e_channel *c, |
---|
.. | .. |
---|
1776 | 1703 | |
---|
1777 | 1704 | for (tc = 0; tc < c->num_tc; tc++) { |
---|
1778 | 1705 | err = mlx5e_open_cq(c, params->tx_cq_moderation, |
---|
1779 | | - &cparam->tx_cq, &c->sq[tc].cq); |
---|
| 1706 | + &cparam->txq_sq.cqp, &c->sq[tc].cq); |
---|
1780 | 1707 | if (err) |
---|
1781 | 1708 | goto err_close_tx_cqs; |
---|
1782 | 1709 | } |
---|
.. | .. |
---|
1802 | 1729 | struct mlx5e_params *params, |
---|
1803 | 1730 | struct mlx5e_channel_param *cparam) |
---|
1804 | 1731 | { |
---|
1805 | | - struct mlx5e_priv *priv = c->priv; |
---|
1806 | | - int err, tc, max_nch = priv->profile->max_nch(priv->mdev); |
---|
| 1732 | + int err, tc; |
---|
1807 | 1733 | |
---|
1808 | 1734 | for (tc = 0; tc < params->num_tc; tc++) { |
---|
1809 | | - int txq_ix = c->ix + tc * max_nch; |
---|
| 1735 | + int txq_ix = c->ix + tc * params->num_channels; |
---|
1810 | 1736 | |
---|
1811 | | - err = mlx5e_open_txqsq(c, c->priv->tisn[tc], txq_ix, |
---|
1812 | | - params, &cparam->sq, &c->sq[tc], tc); |
---|
| 1737 | + err = mlx5e_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix, |
---|
| 1738 | + params, &cparam->txq_sq, &c->sq[tc], tc); |
---|
1813 | 1739 | if (err) |
---|
1814 | 1740 | goto err_close_sqs; |
---|
1815 | 1741 | } |
---|
.. | .. |
---|
1912 | 1838 | return err; |
---|
1913 | 1839 | } |
---|
1914 | 1840 | |
---|
1915 | | -static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, |
---|
1916 | | - struct mlx5e_params *params, |
---|
1917 | | - struct mlx5e_channel_param *cparam, |
---|
1918 | | - struct mlx5e_channel **cp) |
---|
| 1841 | +static int mlx5e_open_queues(struct mlx5e_channel *c, |
---|
| 1842 | + struct mlx5e_params *params, |
---|
| 1843 | + struct mlx5e_channel_param *cparam) |
---|
1919 | 1844 | { |
---|
1920 | | - struct net_dim_cq_moder icocq_moder = {0, 0}; |
---|
1921 | | - struct net_device *netdev = priv->netdev; |
---|
1922 | | - int cpu = mlx5e_get_cpu(priv, ix); |
---|
1923 | | - struct mlx5e_channel *c; |
---|
1924 | | - unsigned int irq; |
---|
| 1845 | + struct dim_cq_moder icocq_moder = {0, 0}; |
---|
1925 | 1846 | int err; |
---|
1926 | | - int eqn; |
---|
1927 | 1847 | |
---|
1928 | | - err = mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq); |
---|
| 1848 | + err = mlx5e_open_cq(c, icocq_moder, &cparam->icosq.cqp, &c->async_icosq.cq); |
---|
1929 | 1849 | if (err) |
---|
1930 | 1850 | return err; |
---|
1931 | 1851 | |
---|
1932 | | - c = kvzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu)); |
---|
1933 | | - if (!c) |
---|
1934 | | - return -ENOMEM; |
---|
1935 | | - |
---|
1936 | | - c->priv = priv; |
---|
1937 | | - c->mdev = priv->mdev; |
---|
1938 | | - c->tstamp = &priv->tstamp; |
---|
1939 | | - c->ix = ix; |
---|
1940 | | - c->cpu = cpu; |
---|
1941 | | - c->pdev = &priv->mdev->pdev->dev; |
---|
1942 | | - c->netdev = priv->netdev; |
---|
1943 | | - c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key); |
---|
1944 | | - c->num_tc = params->num_tc; |
---|
1945 | | - c->xdp = !!params->xdp_prog; |
---|
1946 | | - c->stats = &priv->channel_stats[ix].ch; |
---|
1947 | | - |
---|
1948 | | - c->irq_desc = irq_to_desc(irq); |
---|
1949 | | - |
---|
1950 | | - netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64); |
---|
1951 | | - |
---|
1952 | | - err = mlx5e_open_cq(c, icocq_moder, &cparam->icosq_cq, &c->icosq.cq); |
---|
| 1852 | + err = mlx5e_open_cq(c, icocq_moder, &cparam->async_icosq.cqp, &c->icosq.cq); |
---|
1953 | 1853 | if (err) |
---|
1954 | | - goto err_napi_del; |
---|
| 1854 | + goto err_close_async_icosq_cq; |
---|
1955 | 1855 | |
---|
1956 | 1856 | err = mlx5e_open_tx_cqs(c, params, cparam); |
---|
1957 | 1857 | if (err) |
---|
1958 | 1858 | goto err_close_icosq_cq; |
---|
1959 | 1859 | |
---|
1960 | | - err = mlx5e_open_cq(c, params->tx_cq_moderation, &cparam->tx_cq, &c->xdpsq.cq); |
---|
| 1860 | + err = mlx5e_open_cq(c, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &c->xdpsq.cq); |
---|
1961 | 1861 | if (err) |
---|
1962 | 1862 | goto err_close_tx_cqs; |
---|
1963 | 1863 | |
---|
1964 | | - err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rx_cq, &c->rq.cq); |
---|
| 1864 | + err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rq.cqp, &c->rq.cq); |
---|
1965 | 1865 | if (err) |
---|
1966 | 1866 | goto err_close_xdp_tx_cqs; |
---|
1967 | 1867 | |
---|
1968 | | - /* XDP SQ CQ params are same as normal TXQ sq CQ params */ |
---|
1969 | 1868 | err = c->xdp ? mlx5e_open_cq(c, params->tx_cq_moderation, |
---|
1970 | | - &cparam->tx_cq, &c->rq.xdpsq.cq) : 0; |
---|
| 1869 | + &cparam->xdp_sq.cqp, &c->rq_xdpsq.cq) : 0; |
---|
1971 | 1870 | if (err) |
---|
1972 | 1871 | goto err_close_rx_cq; |
---|
1973 | 1872 | |
---|
1974 | 1873 | napi_enable(&c->napi); |
---|
1975 | 1874 | |
---|
1976 | | - err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->icosq); |
---|
| 1875 | + spin_lock_init(&c->async_icosq_lock); |
---|
| 1876 | + |
---|
| 1877 | + err = mlx5e_open_icosq(c, params, &cparam->async_icosq, &c->async_icosq, |
---|
| 1878 | + mlx5e_async_icosq_err_cqe_work); |
---|
1977 | 1879 | if (err) |
---|
1978 | 1880 | goto err_disable_napi; |
---|
| 1881 | + |
---|
| 1882 | + err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->icosq, |
---|
| 1883 | + mlx5e_icosq_err_cqe_work); |
---|
| 1884 | + if (err) |
---|
| 1885 | + goto err_close_async_icosq; |
---|
1979 | 1886 | |
---|
1980 | 1887 | err = mlx5e_open_sqs(c, params, cparam); |
---|
1981 | 1888 | if (err) |
---|
1982 | 1889 | goto err_close_icosq; |
---|
1983 | 1890 | |
---|
1984 | | - err = c->xdp ? mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, &c->rq.xdpsq, false) : 0; |
---|
1985 | | - if (err) |
---|
1986 | | - goto err_close_sqs; |
---|
| 1891 | + if (c->xdp) { |
---|
| 1892 | + err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, NULL, |
---|
| 1893 | + &c->rq_xdpsq, false); |
---|
| 1894 | + if (err) |
---|
| 1895 | + goto err_close_sqs; |
---|
| 1896 | + } |
---|
1987 | 1897 | |
---|
1988 | | - err = mlx5e_open_rq(c, params, &cparam->rq, &c->rq); |
---|
| 1898 | + err = mlx5e_open_rq(c, params, &cparam->rq, NULL, NULL, &c->rq); |
---|
1989 | 1899 | if (err) |
---|
1990 | 1900 | goto err_close_xdp_sq; |
---|
1991 | 1901 | |
---|
1992 | | - err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, &c->xdpsq, true); |
---|
| 1902 | + err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, NULL, &c->xdpsq, true); |
---|
1993 | 1903 | if (err) |
---|
1994 | 1904 | goto err_close_rq; |
---|
1995 | | - |
---|
1996 | | - *cp = c; |
---|
1997 | 1905 | |
---|
1998 | 1906 | return 0; |
---|
1999 | 1907 | |
---|
.. | .. |
---|
2002 | 1910 | |
---|
2003 | 1911 | err_close_xdp_sq: |
---|
2004 | 1912 | if (c->xdp) |
---|
2005 | | - mlx5e_close_xdpsq(&c->rq.xdpsq); |
---|
| 1913 | + mlx5e_close_xdpsq(&c->rq_xdpsq); |
---|
2006 | 1914 | |
---|
2007 | 1915 | err_close_sqs: |
---|
2008 | 1916 | mlx5e_close_sqs(c); |
---|
.. | .. |
---|
2010 | 1918 | err_close_icosq: |
---|
2011 | 1919 | mlx5e_close_icosq(&c->icosq); |
---|
2012 | 1920 | |
---|
| 1921 | +err_close_async_icosq: |
---|
| 1922 | + mlx5e_close_icosq(&c->async_icosq); |
---|
| 1923 | + |
---|
2013 | 1924 | err_disable_napi: |
---|
2014 | 1925 | napi_disable(&c->napi); |
---|
| 1926 | + |
---|
2015 | 1927 | if (c->xdp) |
---|
2016 | | - mlx5e_close_cq(&c->rq.xdpsq.cq); |
---|
| 1928 | + mlx5e_close_cq(&c->rq_xdpsq.cq); |
---|
2017 | 1929 | |
---|
2018 | 1930 | err_close_rx_cq: |
---|
2019 | 1931 | mlx5e_close_cq(&c->rq.cq); |
---|
.. | .. |
---|
2027 | 1939 | err_close_icosq_cq: |
---|
2028 | 1940 | mlx5e_close_cq(&c->icosq.cq); |
---|
2029 | 1941 | |
---|
| 1942 | +err_close_async_icosq_cq: |
---|
| 1943 | + mlx5e_close_cq(&c->async_icosq.cq); |
---|
| 1944 | + |
---|
| 1945 | + return err; |
---|
| 1946 | +} |
---|
| 1947 | + |
---|
| 1948 | +static void mlx5e_close_queues(struct mlx5e_channel *c) |
---|
| 1949 | +{ |
---|
| 1950 | + mlx5e_close_xdpsq(&c->xdpsq); |
---|
| 1951 | + mlx5e_close_rq(&c->rq); |
---|
| 1952 | + if (c->xdp) |
---|
| 1953 | + mlx5e_close_xdpsq(&c->rq_xdpsq); |
---|
| 1954 | + mlx5e_close_sqs(c); |
---|
| 1955 | + mlx5e_close_icosq(&c->icosq); |
---|
| 1956 | + mlx5e_close_icosq(&c->async_icosq); |
---|
| 1957 | + napi_disable(&c->napi); |
---|
| 1958 | + if (c->xdp) |
---|
| 1959 | + mlx5e_close_cq(&c->rq_xdpsq.cq); |
---|
| 1960 | + mlx5e_close_cq(&c->rq.cq); |
---|
| 1961 | + mlx5e_close_cq(&c->xdpsq.cq); |
---|
| 1962 | + mlx5e_close_tx_cqs(c); |
---|
| 1963 | + mlx5e_close_cq(&c->icosq.cq); |
---|
| 1964 | + mlx5e_close_cq(&c->async_icosq.cq); |
---|
| 1965 | +} |
---|
| 1966 | + |
---|
| 1967 | +static u8 mlx5e_enumerate_lag_port(struct mlx5_core_dev *mdev, int ix) |
---|
| 1968 | +{ |
---|
| 1969 | + u16 port_aff_bias = mlx5_core_is_pf(mdev) ? 0 : MLX5_CAP_GEN(mdev, vhca_id); |
---|
| 1970 | + |
---|
| 1971 | + return (ix + port_aff_bias) % mlx5e_get_num_lag_ports(mdev); |
---|
| 1972 | +} |
---|
| 1973 | + |
---|
| 1974 | +static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, |
---|
| 1975 | + struct mlx5e_params *params, |
---|
| 1976 | + struct mlx5e_channel_param *cparam, |
---|
| 1977 | + struct xsk_buff_pool *xsk_pool, |
---|
| 1978 | + struct mlx5e_channel **cp) |
---|
| 1979 | +{ |
---|
| 1980 | + int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(priv->mdev, ix)); |
---|
| 1981 | + struct net_device *netdev = priv->netdev; |
---|
| 1982 | + struct mlx5e_xsk_param xsk; |
---|
| 1983 | + struct mlx5e_channel *c; |
---|
| 1984 | + unsigned int irq; |
---|
| 1985 | + int err; |
---|
| 1986 | + |
---|
| 1987 | + err = mlx5_vector2irqn(priv->mdev, ix, &irq); |
---|
| 1988 | + if (err) |
---|
| 1989 | + return err; |
---|
| 1990 | + |
---|
| 1991 | + c = kvzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu)); |
---|
| 1992 | + if (!c) |
---|
| 1993 | + return -ENOMEM; |
---|
| 1994 | + |
---|
| 1995 | + c->priv = priv; |
---|
| 1996 | + c->mdev = priv->mdev; |
---|
| 1997 | + c->tstamp = &priv->tstamp; |
---|
| 1998 | + c->ix = ix; |
---|
| 1999 | + c->cpu = cpu; |
---|
| 2000 | + c->pdev = mlx5_core_dma_dev(priv->mdev); |
---|
| 2001 | + c->netdev = priv->netdev; |
---|
| 2002 | + c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key); |
---|
| 2003 | + c->num_tc = params->num_tc; |
---|
| 2004 | + c->xdp = !!params->xdp_prog; |
---|
| 2005 | + c->stats = &priv->channel_stats[ix].ch; |
---|
| 2006 | + c->irq_desc = irq_to_desc(irq); |
---|
| 2007 | + c->lag_port = mlx5e_enumerate_lag_port(priv->mdev, ix); |
---|
| 2008 | + |
---|
| 2009 | + netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64); |
---|
| 2010 | + |
---|
| 2011 | + err = mlx5e_open_queues(c, params, cparam); |
---|
| 2012 | + if (unlikely(err)) |
---|
| 2013 | + goto err_napi_del; |
---|
| 2014 | + |
---|
| 2015 | + if (xsk_pool) { |
---|
| 2016 | + mlx5e_build_xsk_param(xsk_pool, &xsk); |
---|
| 2017 | + err = mlx5e_open_xsk(priv, params, &xsk, xsk_pool, c); |
---|
| 2018 | + if (unlikely(err)) |
---|
| 2019 | + goto err_close_queues; |
---|
| 2020 | + } |
---|
| 2021 | + |
---|
| 2022 | + *cp = c; |
---|
| 2023 | + |
---|
| 2024 | + return 0; |
---|
| 2025 | + |
---|
| 2026 | +err_close_queues: |
---|
| 2027 | + mlx5e_close_queues(c); |
---|
| 2028 | + |
---|
2030 | 2029 | err_napi_del: |
---|
2031 | 2030 | netif_napi_del(&c->napi); |
---|
| 2031 | + |
---|
2032 | 2032 | kvfree(c); |
---|
2033 | 2033 | |
---|
2034 | 2034 | return err; |
---|
.. | .. |
---|
2040 | 2040 | |
---|
2041 | 2041 | for (tc = 0; tc < c->num_tc; tc++) |
---|
2042 | 2042 | mlx5e_activate_txqsq(&c->sq[tc]); |
---|
| 2043 | + mlx5e_activate_icosq(&c->icosq); |
---|
| 2044 | + mlx5e_activate_icosq(&c->async_icosq); |
---|
2043 | 2045 | mlx5e_activate_rq(&c->rq); |
---|
2044 | | - netif_set_xps_queue(c->netdev, get_cpu_mask(c->cpu), c->ix); |
---|
| 2046 | + |
---|
| 2047 | + if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)) |
---|
| 2048 | + mlx5e_activate_xsk(c); |
---|
2045 | 2049 | } |
---|
2046 | 2050 | |
---|
2047 | 2051 | static void mlx5e_deactivate_channel(struct mlx5e_channel *c) |
---|
2048 | 2052 | { |
---|
2049 | 2053 | int tc; |
---|
2050 | 2054 | |
---|
| 2055 | + if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)) |
---|
| 2056 | + mlx5e_deactivate_xsk(c); |
---|
| 2057 | + |
---|
2051 | 2058 | mlx5e_deactivate_rq(&c->rq); |
---|
| 2059 | + mlx5e_deactivate_icosq(&c->async_icosq); |
---|
| 2060 | + mlx5e_deactivate_icosq(&c->icosq); |
---|
2052 | 2061 | for (tc = 0; tc < c->num_tc; tc++) |
---|
2053 | 2062 | mlx5e_deactivate_txqsq(&c->sq[tc]); |
---|
2054 | 2063 | } |
---|
2055 | 2064 | |
---|
2056 | 2065 | static void mlx5e_close_channel(struct mlx5e_channel *c) |
---|
2057 | 2066 | { |
---|
2058 | | - mlx5e_close_xdpsq(&c->xdpsq); |
---|
2059 | | - mlx5e_close_rq(&c->rq); |
---|
2060 | | - if (c->xdp) |
---|
2061 | | - mlx5e_close_xdpsq(&c->rq.xdpsq); |
---|
2062 | | - mlx5e_close_sqs(c); |
---|
2063 | | - mlx5e_close_icosq(&c->icosq); |
---|
2064 | | - napi_disable(&c->napi); |
---|
2065 | | - if (c->xdp) |
---|
2066 | | - mlx5e_close_cq(&c->rq.xdpsq.cq); |
---|
2067 | | - mlx5e_close_cq(&c->rq.cq); |
---|
2068 | | - mlx5e_close_cq(&c->xdpsq.cq); |
---|
2069 | | - mlx5e_close_tx_cqs(c); |
---|
2070 | | - mlx5e_close_cq(&c->icosq.cq); |
---|
| 2067 | + if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)) |
---|
| 2068 | + mlx5e_close_xsk(c); |
---|
| 2069 | + mlx5e_close_queues(c); |
---|
2071 | 2070 | netif_napi_del(&c->napi); |
---|
2072 | 2071 | |
---|
2073 | 2072 | kvfree(c); |
---|
.. | .. |
---|
2077 | 2076 | |
---|
2078 | 2077 | static void mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev, |
---|
2079 | 2078 | struct mlx5e_params *params, |
---|
| 2079 | + struct mlx5e_xsk_param *xsk, |
---|
2080 | 2080 | struct mlx5e_rq_frags_info *info) |
---|
2081 | 2081 | { |
---|
2082 | 2082 | u32 byte_count = MLX5E_SW2HW_MTU(params, params->sw_mtu); |
---|
.. | .. |
---|
2089 | 2089 | byte_count += MLX5E_METADATA_ETHER_LEN; |
---|
2090 | 2090 | #endif |
---|
2091 | 2091 | |
---|
2092 | | - if (mlx5e_rx_is_linear_skb(mdev, params)) { |
---|
| 2092 | + if (mlx5e_rx_is_linear_skb(params, xsk)) { |
---|
2093 | 2093 | int frag_stride; |
---|
2094 | 2094 | |
---|
2095 | | - frag_stride = mlx5e_rx_get_linear_frag_sz(params); |
---|
| 2095 | + frag_stride = mlx5e_rx_get_linear_frag_sz(params, xsk); |
---|
2096 | 2096 | frag_stride = roundup_pow_of_two(frag_stride); |
---|
2097 | 2097 | |
---|
2098 | 2098 | info->arr[0].frag_size = byte_count; |
---|
.. | .. |
---|
2143 | 2143 | return order_base_2(sz); |
---|
2144 | 2144 | } |
---|
2145 | 2145 | |
---|
2146 | | -static void mlx5e_build_rq_param(struct mlx5e_priv *priv, |
---|
2147 | | - struct mlx5e_params *params, |
---|
2148 | | - struct mlx5e_rq_param *param) |
---|
| 2146 | +static u8 mlx5e_get_rq_log_wq_sz(void *rqc) |
---|
| 2147 | +{ |
---|
| 2148 | + void *wq = MLX5_ADDR_OF(rqc, rqc, wq); |
---|
| 2149 | + |
---|
| 2150 | + return MLX5_GET(wq, wq, log_wq_sz); |
---|
| 2151 | +} |
---|
| 2152 | + |
---|
| 2153 | +void mlx5e_build_rq_param(struct mlx5e_priv *priv, |
---|
| 2154 | + struct mlx5e_params *params, |
---|
| 2155 | + struct mlx5e_xsk_param *xsk, |
---|
| 2156 | + struct mlx5e_rq_param *param) |
---|
2149 | 2157 | { |
---|
2150 | 2158 | struct mlx5_core_dev *mdev = priv->mdev; |
---|
2151 | 2159 | void *rqc = param->rqc; |
---|
.. | .. |
---|
2155 | 2163 | switch (params->rq_wq_type) { |
---|
2156 | 2164 | case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: |
---|
2157 | 2165 | MLX5_SET(wq, wq, log_wqe_num_of_strides, |
---|
2158 | | - mlx5e_mpwqe_get_log_num_strides(mdev, params) - |
---|
| 2166 | + mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk) - |
---|
2159 | 2167 | MLX5_MPWQE_LOG_NUM_STRIDES_BASE); |
---|
2160 | 2168 | MLX5_SET(wq, wq, log_wqe_stride_size, |
---|
2161 | | - mlx5e_mpwqe_get_log_stride_size(mdev, params) - |
---|
| 2169 | + mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk) - |
---|
2162 | 2170 | MLX5_MPWQE_LOG_STRIDE_SZ_BASE); |
---|
2163 | | - MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(params)); |
---|
| 2171 | + MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(params, xsk)); |
---|
2164 | 2172 | break; |
---|
2165 | 2173 | default: /* MLX5_WQ_TYPE_CYCLIC */ |
---|
2166 | 2174 | MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames); |
---|
2167 | | - mlx5e_build_rq_frags_info(mdev, params, ¶m->frags_info); |
---|
| 2175 | + mlx5e_build_rq_frags_info(mdev, params, xsk, ¶m->frags_info); |
---|
2168 | 2176 | ndsegs = param->frags_info.num_frags; |
---|
2169 | 2177 | } |
---|
2170 | 2178 | |
---|
.. | .. |
---|
2177 | 2185 | MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable); |
---|
2178 | 2186 | MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en); |
---|
2179 | 2187 | |
---|
2180 | | - param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev); |
---|
| 2188 | + param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev)); |
---|
| 2189 | + mlx5e_build_rx_cq_param(priv, params, xsk, ¶m->cqp); |
---|
2181 | 2190 | } |
---|
2182 | 2191 | |
---|
2183 | 2192 | static void mlx5e_build_drop_rq_param(struct mlx5e_priv *priv, |
---|
.. | .. |
---|
2192 | 2201 | mlx5e_get_rqwq_log_stride(MLX5_WQ_TYPE_CYCLIC, 1)); |
---|
2193 | 2202 | MLX5_SET(rqc, rqc, counter_set_id, priv->drop_rq_q_counter); |
---|
2194 | 2203 | |
---|
2195 | | - param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev); |
---|
| 2204 | + param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev)); |
---|
2196 | 2205 | } |
---|
2197 | 2206 | |
---|
2198 | | -static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv, |
---|
2199 | | - struct mlx5e_sq_param *param) |
---|
| 2207 | +void mlx5e_build_sq_param_common(struct mlx5e_priv *priv, |
---|
| 2208 | + struct mlx5e_sq_param *param) |
---|
2200 | 2209 | { |
---|
2201 | 2210 | void *sqc = param->sqc; |
---|
2202 | 2211 | void *wq = MLX5_ADDR_OF(sqc, sqc, wq); |
---|
.. | .. |
---|
2204 | 2213 | MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB)); |
---|
2205 | 2214 | MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn); |
---|
2206 | 2215 | |
---|
2207 | | - param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev); |
---|
| 2216 | + param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(priv->mdev)); |
---|
2208 | 2217 | } |
---|
2209 | 2218 | |
---|
2210 | 2219 | static void mlx5e_build_sq_param(struct mlx5e_priv *priv, |
---|
.. | .. |
---|
2213 | 2222 | { |
---|
2214 | 2223 | void *sqc = param->sqc; |
---|
2215 | 2224 | void *wq = MLX5_ADDR_OF(sqc, sqc, wq); |
---|
| 2225 | + bool allow_swp; |
---|
2216 | 2226 | |
---|
| 2227 | + allow_swp = mlx5_geneve_tx_allowed(priv->mdev) || |
---|
| 2228 | + !!MLX5_IPSEC_DEV(priv->mdev); |
---|
2217 | 2229 | mlx5e_build_sq_param_common(priv, param); |
---|
2218 | 2230 | MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size); |
---|
2219 | | - MLX5_SET(sqc, sqc, allow_swp, !!MLX5_IPSEC_DEV(priv->mdev)); |
---|
| 2231 | + MLX5_SET(sqc, sqc, allow_swp, allow_swp); |
---|
| 2232 | + param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE); |
---|
| 2233 | + mlx5e_build_tx_cq_param(priv, params, ¶m->cqp); |
---|
2220 | 2234 | } |
---|
2221 | 2235 | |
---|
2222 | 2236 | static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv, |
---|
.. | .. |
---|
2225 | 2239 | void *cqc = param->cqc; |
---|
2226 | 2240 | |
---|
2227 | 2241 | MLX5_SET(cqc, cqc, uar_page, priv->mdev->priv.uar->index); |
---|
| 2242 | + if (MLX5_CAP_GEN(priv->mdev, cqe_128_always) && cache_line_size() >= 128) |
---|
| 2243 | + MLX5_SET(cqc, cqc, cqe_sz, CQE_STRIDE_128_PAD); |
---|
2228 | 2244 | } |
---|
2229 | 2245 | |
---|
2230 | | -static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv, |
---|
2231 | | - struct mlx5e_params *params, |
---|
2232 | | - struct mlx5e_cq_param *param) |
---|
| 2246 | +void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv, |
---|
| 2247 | + struct mlx5e_params *params, |
---|
| 2248 | + struct mlx5e_xsk_param *xsk, |
---|
| 2249 | + struct mlx5e_cq_param *param) |
---|
2233 | 2250 | { |
---|
2234 | 2251 | struct mlx5_core_dev *mdev = priv->mdev; |
---|
| 2252 | + bool hw_stridx = false; |
---|
2235 | 2253 | void *cqc = param->cqc; |
---|
2236 | 2254 | u8 log_cq_size; |
---|
2237 | 2255 | |
---|
2238 | 2256 | switch (params->rq_wq_type) { |
---|
2239 | 2257 | case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: |
---|
2240 | | - log_cq_size = mlx5e_mpwqe_get_log_rq_size(params) + |
---|
2241 | | - mlx5e_mpwqe_get_log_num_strides(mdev, params); |
---|
| 2258 | + log_cq_size = mlx5e_mpwqe_get_log_rq_size(params, xsk) + |
---|
| 2259 | + mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk); |
---|
| 2260 | + hw_stridx = MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index); |
---|
2242 | 2261 | break; |
---|
2243 | 2262 | default: /* MLX5_WQ_TYPE_CYCLIC */ |
---|
2244 | 2263 | log_cq_size = params->log_rq_mtu_frames; |
---|
.. | .. |
---|
2246 | 2265 | |
---|
2247 | 2266 | MLX5_SET(cqc, cqc, log_cq_size, log_cq_size); |
---|
2248 | 2267 | if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) { |
---|
2249 | | - MLX5_SET(cqc, cqc, mini_cqe_res_format, MLX5_CQE_FORMAT_CSUM); |
---|
| 2268 | + MLX5_SET(cqc, cqc, mini_cqe_res_format, hw_stridx ? |
---|
| 2269 | + MLX5_CQE_FORMAT_CSUM_STRIDX : MLX5_CQE_FORMAT_CSUM); |
---|
2250 | 2270 | MLX5_SET(cqc, cqc, cqe_comp_en, 1); |
---|
2251 | 2271 | } |
---|
2252 | 2272 | |
---|
.. | .. |
---|
2254 | 2274 | param->cq_period_mode = params->rx_cq_moderation.cq_period_mode; |
---|
2255 | 2275 | } |
---|
2256 | 2276 | |
---|
2257 | | -static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv, |
---|
2258 | | - struct mlx5e_params *params, |
---|
2259 | | - struct mlx5e_cq_param *param) |
---|
| 2277 | +void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv, |
---|
| 2278 | + struct mlx5e_params *params, |
---|
| 2279 | + struct mlx5e_cq_param *param) |
---|
2260 | 2280 | { |
---|
2261 | 2281 | void *cqc = param->cqc; |
---|
2262 | 2282 | |
---|
.. | .. |
---|
2266 | 2286 | param->cq_period_mode = params->tx_cq_moderation.cq_period_mode; |
---|
2267 | 2287 | } |
---|
2268 | 2288 | |
---|
2269 | | -static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv, |
---|
2270 | | - u8 log_wq_size, |
---|
2271 | | - struct mlx5e_cq_param *param) |
---|
| 2289 | +void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv, |
---|
| 2290 | + u8 log_wq_size, |
---|
| 2291 | + struct mlx5e_cq_param *param) |
---|
2272 | 2292 | { |
---|
2273 | 2293 | void *cqc = param->cqc; |
---|
2274 | 2294 | |
---|
.. | .. |
---|
2276 | 2296 | |
---|
2277 | 2297 | mlx5e_build_common_cq_param(priv, param); |
---|
2278 | 2298 | |
---|
2279 | | - param->cq_period_mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE; |
---|
| 2299 | + param->cq_period_mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; |
---|
2280 | 2300 | } |
---|
2281 | 2301 | |
---|
2282 | | -static void mlx5e_build_icosq_param(struct mlx5e_priv *priv, |
---|
2283 | | - u8 log_wq_size, |
---|
2284 | | - struct mlx5e_sq_param *param) |
---|
| 2302 | +void mlx5e_build_icosq_param(struct mlx5e_priv *priv, |
---|
| 2303 | + u8 log_wq_size, |
---|
| 2304 | + struct mlx5e_sq_param *param) |
---|
2285 | 2305 | { |
---|
2286 | 2306 | void *sqc = param->sqc; |
---|
2287 | 2307 | void *wq = MLX5_ADDR_OF(sqc, sqc, wq); |
---|
.. | .. |
---|
2290 | 2310 | |
---|
2291 | 2311 | MLX5_SET(wq, wq, log_wq_sz, log_wq_size); |
---|
2292 | 2312 | MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(priv->mdev, reg_umr_sq)); |
---|
| 2313 | + mlx5e_build_ico_cq_param(priv, log_wq_size, ¶m->cqp); |
---|
2293 | 2314 | } |
---|
2294 | 2315 | |
---|
2295 | | -static void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv, |
---|
2296 | | - struct mlx5e_params *params, |
---|
2297 | | - struct mlx5e_sq_param *param) |
---|
| 2316 | +void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv, |
---|
| 2317 | + struct mlx5e_params *params, |
---|
| 2318 | + struct mlx5e_sq_param *param) |
---|
2298 | 2319 | { |
---|
2299 | 2320 | void *sqc = param->sqc; |
---|
2300 | 2321 | void *wq = MLX5_ADDR_OF(sqc, sqc, wq); |
---|
2301 | 2322 | |
---|
2302 | 2323 | mlx5e_build_sq_param_common(priv, param); |
---|
2303 | 2324 | MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size); |
---|
| 2325 | + param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE); |
---|
| 2326 | + mlx5e_build_tx_cq_param(priv, params, ¶m->cqp); |
---|
| 2327 | +} |
---|
| 2328 | + |
---|
| 2329 | +static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5e_params *params, |
---|
| 2330 | + struct mlx5e_rq_param *rqp) |
---|
| 2331 | +{ |
---|
| 2332 | + switch (params->rq_wq_type) { |
---|
| 2333 | + case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: |
---|
| 2334 | + return max_t(u8, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE, |
---|
| 2335 | + order_base_2(MLX5E_UMR_WQEBBS) + |
---|
| 2336 | + mlx5e_get_rq_log_wq_sz(rqp->rqc)); |
---|
| 2337 | + default: /* MLX5_WQ_TYPE_CYCLIC */ |
---|
| 2338 | + return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; |
---|
| 2339 | + } |
---|
| 2340 | +} |
---|
| 2341 | + |
---|
| 2342 | +static u8 mlx5e_build_async_icosq_log_wq_sz(struct net_device *netdev) |
---|
| 2343 | +{ |
---|
| 2344 | + if (netdev->hw_features & NETIF_F_HW_TLS_RX) |
---|
| 2345 | + return MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; |
---|
| 2346 | + |
---|
| 2347 | + return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; |
---|
2304 | 2348 | } |
---|
2305 | 2349 | |
---|
2306 | 2350 | static void mlx5e_build_channel_param(struct mlx5e_priv *priv, |
---|
2307 | 2351 | struct mlx5e_params *params, |
---|
2308 | 2352 | struct mlx5e_channel_param *cparam) |
---|
2309 | 2353 | { |
---|
2310 | | - u8 icosq_log_wq_sz = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; |
---|
| 2354 | + u8 icosq_log_wq_sz, async_icosq_log_wq_sz; |
---|
2311 | 2355 | |
---|
2312 | | - mlx5e_build_rq_param(priv, params, &cparam->rq); |
---|
2313 | | - mlx5e_build_sq_param(priv, params, &cparam->sq); |
---|
| 2356 | + mlx5e_build_rq_param(priv, params, NULL, &cparam->rq); |
---|
| 2357 | + |
---|
| 2358 | + icosq_log_wq_sz = mlx5e_build_icosq_log_wq_sz(params, &cparam->rq); |
---|
| 2359 | + async_icosq_log_wq_sz = mlx5e_build_async_icosq_log_wq_sz(priv->netdev); |
---|
| 2360 | + |
---|
| 2361 | + mlx5e_build_sq_param(priv, params, &cparam->txq_sq); |
---|
2314 | 2362 | mlx5e_build_xdpsq_param(priv, params, &cparam->xdp_sq); |
---|
2315 | 2363 | mlx5e_build_icosq_param(priv, icosq_log_wq_sz, &cparam->icosq); |
---|
2316 | | - mlx5e_build_rx_cq_param(priv, params, &cparam->rx_cq); |
---|
2317 | | - mlx5e_build_tx_cq_param(priv, params, &cparam->tx_cq); |
---|
2318 | | - mlx5e_build_ico_cq_param(priv, icosq_log_wq_sz, &cparam->icosq_cq); |
---|
| 2364 | + mlx5e_build_icosq_param(priv, async_icosq_log_wq_sz, &cparam->async_icosq); |
---|
2319 | 2365 | } |
---|
2320 | 2366 | |
---|
2321 | 2367 | int mlx5e_open_channels(struct mlx5e_priv *priv, |
---|
.. | .. |
---|
2334 | 2380 | |
---|
2335 | 2381 | mlx5e_build_channel_param(priv, &chs->params, cparam); |
---|
2336 | 2382 | for (i = 0; i < chs->num; i++) { |
---|
2337 | | - err = mlx5e_open_channel(priv, i, &chs->params, cparam, &chs->c[i]); |
---|
| 2383 | + struct xsk_buff_pool *xsk_pool = NULL; |
---|
| 2384 | + |
---|
| 2385 | + if (chs->params.xdp_prog) |
---|
| 2386 | + xsk_pool = mlx5e_xsk_get_pool(&chs->params, chs->params.xsk, i); |
---|
| 2387 | + |
---|
| 2388 | + err = mlx5e_open_channel(priv, i, &chs->params, cparam, xsk_pool, &chs->c[i]); |
---|
2338 | 2389 | if (err) |
---|
2339 | 2390 | goto err_close_channels; |
---|
2340 | 2391 | } |
---|
2341 | 2392 | |
---|
| 2393 | + mlx5e_health_channels_update(priv); |
---|
2342 | 2394 | kvfree(cparam); |
---|
2343 | 2395 | return 0; |
---|
2344 | 2396 | |
---|
.. | .. |
---|
2361 | 2413 | mlx5e_activate_channel(chs->c[i]); |
---|
2362 | 2414 | } |
---|
2363 | 2415 | |
---|
| 2416 | +#define MLX5E_RQ_WQES_TIMEOUT 20000 /* msecs */ |
---|
| 2417 | + |
---|
2364 | 2418 | static int mlx5e_wait_channels_min_rx_wqes(struct mlx5e_channels *chs) |
---|
2365 | 2419 | { |
---|
2366 | 2420 | int err = 0; |
---|
2367 | 2421 | int i; |
---|
2368 | 2422 | |
---|
2369 | | - for (i = 0; i < chs->num; i++) |
---|
2370 | | - err |= mlx5e_wait_for_min_rx_wqes(&chs->c[i]->rq, |
---|
2371 | | - err ? 0 : 20000); |
---|
| 2423 | + for (i = 0; i < chs->num; i++) { |
---|
| 2424 | + int timeout = err ? 0 : MLX5E_RQ_WQES_TIMEOUT; |
---|
| 2425 | + |
---|
| 2426 | + err |= mlx5e_wait_for_min_rx_wqes(&chs->c[i]->rq, timeout); |
---|
| 2427 | + |
---|
| 2428 | + /* Don't wait on the XSK RQ, because the newer xdpsock sample |
---|
| 2429 | + * doesn't provide any Fill Ring entries at the setup stage. |
---|
| 2430 | + */ |
---|
| 2431 | + } |
---|
2372 | 2432 | |
---|
2373 | 2433 | return err ? -ETIMEDOUT : 0; |
---|
2374 | 2434 | } |
---|
.. | .. |
---|
2440 | 2500 | return err; |
---|
2441 | 2501 | } |
---|
2442 | 2502 | |
---|
2443 | | -int mlx5e_create_direct_rqts(struct mlx5e_priv *priv) |
---|
| 2503 | +int mlx5e_create_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs) |
---|
2444 | 2504 | { |
---|
2445 | | - struct mlx5e_rqt *rqt; |
---|
2446 | 2505 | int err; |
---|
2447 | 2506 | int ix; |
---|
2448 | 2507 | |
---|
2449 | | - for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) { |
---|
2450 | | - rqt = &priv->direct_tir[ix].rqt; |
---|
2451 | | - err = mlx5e_create_rqt(priv, 1 /*size */, rqt); |
---|
2452 | | - if (err) |
---|
| 2508 | + for (ix = 0; ix < priv->max_nch; ix++) { |
---|
| 2509 | + err = mlx5e_create_rqt(priv, 1 /*size */, &tirs[ix].rqt); |
---|
| 2510 | + if (unlikely(err)) |
---|
2453 | 2511 | goto err_destroy_rqts; |
---|
2454 | 2512 | } |
---|
2455 | 2513 | |
---|
2456 | 2514 | return 0; |
---|
2457 | 2515 | |
---|
2458 | 2516 | err_destroy_rqts: |
---|
2459 | | - mlx5_core_warn(priv->mdev, "create direct rqts failed, %d\n", err); |
---|
| 2517 | + mlx5_core_warn(priv->mdev, "create rqts failed, %d\n", err); |
---|
2460 | 2518 | for (ix--; ix >= 0; ix--) |
---|
2461 | | - mlx5e_destroy_rqt(priv, &priv->direct_tir[ix].rqt); |
---|
| 2519 | + mlx5e_destroy_rqt(priv, &tirs[ix].rqt); |
---|
2462 | 2520 | |
---|
2463 | 2521 | return err; |
---|
2464 | 2522 | } |
---|
2465 | 2523 | |
---|
2466 | | -void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv) |
---|
| 2524 | +void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs) |
---|
2467 | 2525 | { |
---|
2468 | 2526 | int i; |
---|
2469 | 2527 | |
---|
2470 | | - for (i = 0; i < priv->profile->max_nch(priv->mdev); i++) |
---|
2471 | | - mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt); |
---|
| 2528 | + for (i = 0; i < priv->max_nch; i++) |
---|
| 2529 | + mlx5e_destroy_rqt(priv, &tirs[i].rqt); |
---|
2472 | 2530 | } |
---|
2473 | 2531 | |
---|
2474 | 2532 | static int mlx5e_rx_hash_fn(int hfunc) |
---|
.. | .. |
---|
2503 | 2561 | if (rrp.rss.hfunc == ETH_RSS_HASH_XOR) |
---|
2504 | 2562 | ix = mlx5e_bits_invert(i, ilog2(sz)); |
---|
2505 | 2563 | |
---|
2506 | | - ix = priv->channels.params.indirection_rqt[ix]; |
---|
| 2564 | + ix = priv->rss_params.indirection_rqt[ix]; |
---|
2507 | 2565 | rqn = rrp.rss.channels->c[ix]->rq.rqn; |
---|
2508 | 2566 | } else { |
---|
2509 | 2567 | rqn = rrp.rqn; |
---|
.. | .. |
---|
2561 | 2619 | mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, rrp); |
---|
2562 | 2620 | } |
---|
2563 | 2621 | |
---|
2564 | | - for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) { |
---|
| 2622 | + for (ix = 0; ix < priv->max_nch; ix++) { |
---|
2565 | 2623 | struct mlx5e_redirect_rqt_param direct_rrp = { |
---|
2566 | 2624 | .is_rss = false, |
---|
2567 | 2625 | { |
---|
.. | .. |
---|
2586 | 2644 | { |
---|
2587 | 2645 | .rss = { |
---|
2588 | 2646 | .channels = chs, |
---|
2589 | | - .hfunc = chs->params.rss_hfunc, |
---|
| 2647 | + .hfunc = priv->rss_params.hfunc, |
---|
2590 | 2648 | } |
---|
2591 | 2649 | }, |
---|
2592 | 2650 | }; |
---|
.. | .. |
---|
2606 | 2664 | mlx5e_redirect_rqts(priv, drop_rrp); |
---|
2607 | 2665 | } |
---|
2608 | 2666 | |
---|
| 2667 | +static const struct mlx5e_tirc_config tirc_default_config[MLX5E_NUM_INDIR_TIRS] = { |
---|
| 2668 | + [MLX5E_TT_IPV4_TCP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4, |
---|
| 2669 | + .l4_prot_type = MLX5_L4_PROT_TYPE_TCP, |
---|
| 2670 | + .rx_hash_fields = MLX5_HASH_IP_L4PORTS, |
---|
| 2671 | + }, |
---|
| 2672 | + [MLX5E_TT_IPV6_TCP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6, |
---|
| 2673 | + .l4_prot_type = MLX5_L4_PROT_TYPE_TCP, |
---|
| 2674 | + .rx_hash_fields = MLX5_HASH_IP_L4PORTS, |
---|
| 2675 | + }, |
---|
| 2676 | + [MLX5E_TT_IPV4_UDP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4, |
---|
| 2677 | + .l4_prot_type = MLX5_L4_PROT_TYPE_UDP, |
---|
| 2678 | + .rx_hash_fields = MLX5_HASH_IP_L4PORTS, |
---|
| 2679 | + }, |
---|
| 2680 | + [MLX5E_TT_IPV6_UDP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6, |
---|
| 2681 | + .l4_prot_type = MLX5_L4_PROT_TYPE_UDP, |
---|
| 2682 | + .rx_hash_fields = MLX5_HASH_IP_L4PORTS, |
---|
| 2683 | + }, |
---|
| 2684 | + [MLX5E_TT_IPV4_IPSEC_AH] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4, |
---|
| 2685 | + .l4_prot_type = 0, |
---|
| 2686 | + .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI, |
---|
| 2687 | + }, |
---|
| 2688 | + [MLX5E_TT_IPV6_IPSEC_AH] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6, |
---|
| 2689 | + .l4_prot_type = 0, |
---|
| 2690 | + .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI, |
---|
| 2691 | + }, |
---|
| 2692 | + [MLX5E_TT_IPV4_IPSEC_ESP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4, |
---|
| 2693 | + .l4_prot_type = 0, |
---|
| 2694 | + .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI, |
---|
| 2695 | + }, |
---|
| 2696 | + [MLX5E_TT_IPV6_IPSEC_ESP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6, |
---|
| 2697 | + .l4_prot_type = 0, |
---|
| 2698 | + .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI, |
---|
| 2699 | + }, |
---|
| 2700 | + [MLX5E_TT_IPV4] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4, |
---|
| 2701 | + .l4_prot_type = 0, |
---|
| 2702 | + .rx_hash_fields = MLX5_HASH_IP, |
---|
| 2703 | + }, |
---|
| 2704 | + [MLX5E_TT_IPV6] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6, |
---|
| 2705 | + .l4_prot_type = 0, |
---|
| 2706 | + .rx_hash_fields = MLX5_HASH_IP, |
---|
| 2707 | + }, |
---|
| 2708 | +}; |
---|
| 2709 | + |
---|
| 2710 | +struct mlx5e_tirc_config mlx5e_tirc_get_default_config(enum mlx5e_traffic_types tt) |
---|
| 2711 | +{ |
---|
| 2712 | + return tirc_default_config[tt]; |
---|
| 2713 | +} |
---|
| 2714 | + |
---|
2609 | 2715 | static void mlx5e_build_tir_ctx_lro(struct mlx5e_params *params, void *tirc) |
---|
2610 | 2716 | { |
---|
2611 | 2717 | if (!params->lro_en) |
---|
.. | .. |
---|
2617 | 2723 | MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO | |
---|
2618 | 2724 | MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO); |
---|
2619 | 2725 | MLX5_SET(tirc, tirc, lro_max_ip_payload_size, |
---|
2620 | | - (params->lro_wqe_sz - ROUGH_MAX_L2_L3_HDR_SZ) >> 8); |
---|
| 2726 | + (MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ - ROUGH_MAX_L2_L3_HDR_SZ) >> 8); |
---|
2621 | 2727 | MLX5_SET(tirc, tirc, lro_timeout_period_usecs, params->lro_timeout); |
---|
2622 | 2728 | } |
---|
2623 | 2729 | |
---|
2624 | | -void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_params *params, |
---|
2625 | | - enum mlx5e_traffic_types tt, |
---|
| 2730 | +void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_rss_params *rss_params, |
---|
| 2731 | + const struct mlx5e_tirc_config *ttconfig, |
---|
2626 | 2732 | void *tirc, bool inner) |
---|
2627 | 2733 | { |
---|
2628 | 2734 | void *hfso = inner ? MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_inner) : |
---|
2629 | 2735 | MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer); |
---|
2630 | 2736 | |
---|
2631 | | -#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\ |
---|
2632 | | - MLX5_HASH_FIELD_SEL_DST_IP) |
---|
2633 | | - |
---|
2634 | | -#define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\ |
---|
2635 | | - MLX5_HASH_FIELD_SEL_DST_IP |\ |
---|
2636 | | - MLX5_HASH_FIELD_SEL_L4_SPORT |\ |
---|
2637 | | - MLX5_HASH_FIELD_SEL_L4_DPORT) |
---|
2638 | | - |
---|
2639 | | -#define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\ |
---|
2640 | | - MLX5_HASH_FIELD_SEL_DST_IP |\ |
---|
2641 | | - MLX5_HASH_FIELD_SEL_IPSEC_SPI) |
---|
2642 | | - |
---|
2643 | | - MLX5_SET(tirc, tirc, rx_hash_fn, mlx5e_rx_hash_fn(params->rss_hfunc)); |
---|
2644 | | - if (params->rss_hfunc == ETH_RSS_HASH_TOP) { |
---|
| 2737 | + MLX5_SET(tirc, tirc, rx_hash_fn, mlx5e_rx_hash_fn(rss_params->hfunc)); |
---|
| 2738 | + if (rss_params->hfunc == ETH_RSS_HASH_TOP) { |
---|
2645 | 2739 | void *rss_key = MLX5_ADDR_OF(tirc, tirc, |
---|
2646 | 2740 | rx_hash_toeplitz_key); |
---|
2647 | 2741 | size_t len = MLX5_FLD_SZ_BYTES(tirc, |
---|
2648 | 2742 | rx_hash_toeplitz_key); |
---|
2649 | 2743 | |
---|
2650 | 2744 | MLX5_SET(tirc, tirc, rx_hash_symmetric, 1); |
---|
2651 | | - memcpy(rss_key, params->toeplitz_hash_key, len); |
---|
| 2745 | + memcpy(rss_key, rss_params->toeplitz_hash_key, len); |
---|
| 2746 | + } |
---|
| 2747 | + MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, |
---|
| 2748 | + ttconfig->l3_prot_type); |
---|
| 2749 | + MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, |
---|
| 2750 | + ttconfig->l4_prot_type); |
---|
| 2751 | + MLX5_SET(rx_hash_field_select, hfso, selected_fields, |
---|
| 2752 | + ttconfig->rx_hash_fields); |
---|
| 2753 | +} |
---|
| 2754 | + |
---|
| 2755 | +static void mlx5e_update_rx_hash_fields(struct mlx5e_tirc_config *ttconfig, |
---|
| 2756 | + enum mlx5e_traffic_types tt, |
---|
| 2757 | + u32 rx_hash_fields) |
---|
| 2758 | +{ |
---|
| 2759 | + *ttconfig = tirc_default_config[tt]; |
---|
| 2760 | + ttconfig->rx_hash_fields = rx_hash_fields; |
---|
| 2761 | +} |
---|
| 2762 | + |
---|
| 2763 | +void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in) |
---|
| 2764 | +{ |
---|
| 2765 | + void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx); |
---|
| 2766 | + struct mlx5e_rss_params *rss = &priv->rss_params; |
---|
| 2767 | + struct mlx5_core_dev *mdev = priv->mdev; |
---|
| 2768 | + int ctxlen = MLX5_ST_SZ_BYTES(tirc); |
---|
| 2769 | + struct mlx5e_tirc_config ttconfig; |
---|
| 2770 | + int tt; |
---|
| 2771 | + |
---|
| 2772 | + MLX5_SET(modify_tir_in, in, bitmask.hash, 1); |
---|
| 2773 | + |
---|
| 2774 | + for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { |
---|
| 2775 | + memset(tirc, 0, ctxlen); |
---|
| 2776 | + mlx5e_update_rx_hash_fields(&ttconfig, tt, |
---|
| 2777 | + rss->rx_hash_fields[tt]); |
---|
| 2778 | + mlx5e_build_indir_tir_ctx_hash(rss, &ttconfig, tirc, false); |
---|
| 2779 | + mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in); |
---|
2652 | 2780 | } |
---|
2653 | 2781 | |
---|
2654 | | - switch (tt) { |
---|
2655 | | - case MLX5E_TT_IPV4_TCP: |
---|
2656 | | - MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, |
---|
2657 | | - MLX5_L3_PROT_TYPE_IPV4); |
---|
2658 | | - MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, |
---|
2659 | | - MLX5_L4_PROT_TYPE_TCP); |
---|
2660 | | - MLX5_SET(rx_hash_field_select, hfso, selected_fields, |
---|
2661 | | - MLX5_HASH_IP_L4PORTS); |
---|
2662 | | - break; |
---|
| 2782 | + /* Verify inner tirs resources allocated */ |
---|
| 2783 | + if (!priv->inner_indir_tir[0].tirn) |
---|
| 2784 | + return; |
---|
2663 | 2785 | |
---|
2664 | | - case MLX5E_TT_IPV6_TCP: |
---|
2665 | | - MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, |
---|
2666 | | - MLX5_L3_PROT_TYPE_IPV6); |
---|
2667 | | - MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, |
---|
2668 | | - MLX5_L4_PROT_TYPE_TCP); |
---|
2669 | | - MLX5_SET(rx_hash_field_select, hfso, selected_fields, |
---|
2670 | | - MLX5_HASH_IP_L4PORTS); |
---|
2671 | | - break; |
---|
2672 | | - |
---|
2673 | | - case MLX5E_TT_IPV4_UDP: |
---|
2674 | | - MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, |
---|
2675 | | - MLX5_L3_PROT_TYPE_IPV4); |
---|
2676 | | - MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, |
---|
2677 | | - MLX5_L4_PROT_TYPE_UDP); |
---|
2678 | | - MLX5_SET(rx_hash_field_select, hfso, selected_fields, |
---|
2679 | | - MLX5_HASH_IP_L4PORTS); |
---|
2680 | | - break; |
---|
2681 | | - |
---|
2682 | | - case MLX5E_TT_IPV6_UDP: |
---|
2683 | | - MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, |
---|
2684 | | - MLX5_L3_PROT_TYPE_IPV6); |
---|
2685 | | - MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, |
---|
2686 | | - MLX5_L4_PROT_TYPE_UDP); |
---|
2687 | | - MLX5_SET(rx_hash_field_select, hfso, selected_fields, |
---|
2688 | | - MLX5_HASH_IP_L4PORTS); |
---|
2689 | | - break; |
---|
2690 | | - |
---|
2691 | | - case MLX5E_TT_IPV4_IPSEC_AH: |
---|
2692 | | - MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, |
---|
2693 | | - MLX5_L3_PROT_TYPE_IPV4); |
---|
2694 | | - MLX5_SET(rx_hash_field_select, hfso, selected_fields, |
---|
2695 | | - MLX5_HASH_IP_IPSEC_SPI); |
---|
2696 | | - break; |
---|
2697 | | - |
---|
2698 | | - case MLX5E_TT_IPV6_IPSEC_AH: |
---|
2699 | | - MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, |
---|
2700 | | - MLX5_L3_PROT_TYPE_IPV6); |
---|
2701 | | - MLX5_SET(rx_hash_field_select, hfso, selected_fields, |
---|
2702 | | - MLX5_HASH_IP_IPSEC_SPI); |
---|
2703 | | - break; |
---|
2704 | | - |
---|
2705 | | - case MLX5E_TT_IPV4_IPSEC_ESP: |
---|
2706 | | - MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, |
---|
2707 | | - MLX5_L3_PROT_TYPE_IPV4); |
---|
2708 | | - MLX5_SET(rx_hash_field_select, hfso, selected_fields, |
---|
2709 | | - MLX5_HASH_IP_IPSEC_SPI); |
---|
2710 | | - break; |
---|
2711 | | - |
---|
2712 | | - case MLX5E_TT_IPV6_IPSEC_ESP: |
---|
2713 | | - MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, |
---|
2714 | | - MLX5_L3_PROT_TYPE_IPV6); |
---|
2715 | | - MLX5_SET(rx_hash_field_select, hfso, selected_fields, |
---|
2716 | | - MLX5_HASH_IP_IPSEC_SPI); |
---|
2717 | | - break; |
---|
2718 | | - |
---|
2719 | | - case MLX5E_TT_IPV4: |
---|
2720 | | - MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, |
---|
2721 | | - MLX5_L3_PROT_TYPE_IPV4); |
---|
2722 | | - MLX5_SET(rx_hash_field_select, hfso, selected_fields, |
---|
2723 | | - MLX5_HASH_IP); |
---|
2724 | | - break; |
---|
2725 | | - |
---|
2726 | | - case MLX5E_TT_IPV6: |
---|
2727 | | - MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, |
---|
2728 | | - MLX5_L3_PROT_TYPE_IPV6); |
---|
2729 | | - MLX5_SET(rx_hash_field_select, hfso, selected_fields, |
---|
2730 | | - MLX5_HASH_IP); |
---|
2731 | | - break; |
---|
2732 | | - default: |
---|
2733 | | - WARN_ONCE(true, "%s: bad traffic type!\n", __func__); |
---|
| 2786 | + for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { |
---|
| 2787 | + memset(tirc, 0, ctxlen); |
---|
| 2788 | + mlx5e_update_rx_hash_fields(&ttconfig, tt, |
---|
| 2789 | + rss->rx_hash_fields[tt]); |
---|
| 2790 | + mlx5e_build_indir_tir_ctx_hash(rss, &ttconfig, tirc, true); |
---|
| 2791 | + mlx5_core_modify_tir(mdev, priv->inner_indir_tir[tt].tirn, in); |
---|
2734 | 2792 | } |
---|
2735 | 2793 | } |
---|
2736 | 2794 | |
---|
.. | .. |
---|
2756 | 2814 | mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc); |
---|
2757 | 2815 | |
---|
2758 | 2816 | for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { |
---|
2759 | | - err = mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, |
---|
2760 | | - inlen); |
---|
| 2817 | + err = mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in); |
---|
| 2818 | + if (err) |
---|
| 2819 | + goto free_in; |
---|
| 2820 | + |
---|
| 2821 | + /* Verify inner tirs resources allocated */ |
---|
| 2822 | + if (!priv->inner_indir_tir[0].tirn) |
---|
| 2823 | + continue; |
---|
| 2824 | + |
---|
| 2825 | + err = mlx5_core_modify_tir(mdev, priv->inner_indir_tir[tt].tirn, in); |
---|
2761 | 2826 | if (err) |
---|
2762 | 2827 | goto free_in; |
---|
2763 | 2828 | } |
---|
2764 | 2829 | |
---|
2765 | | - for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) { |
---|
2766 | | - err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn, |
---|
2767 | | - in, inlen); |
---|
| 2830 | + for (ix = 0; ix < priv->max_nch; ix++) { |
---|
| 2831 | + err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn, in); |
---|
2768 | 2832 | if (err) |
---|
2769 | 2833 | goto free_in; |
---|
2770 | 2834 | } |
---|
.. | .. |
---|
2775 | 2839 | return err; |
---|
2776 | 2840 | } |
---|
2777 | 2841 | |
---|
2778 | | -static void mlx5e_build_inner_indir_tir_ctx(struct mlx5e_priv *priv, |
---|
2779 | | - enum mlx5e_traffic_types tt, |
---|
2780 | | - u32 *tirc) |
---|
2781 | | -{ |
---|
2782 | | - MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn); |
---|
2783 | | - |
---|
2784 | | - mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc); |
---|
2785 | | - |
---|
2786 | | - MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT); |
---|
2787 | | - MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn); |
---|
2788 | | - MLX5_SET(tirc, tirc, tunneled_offload_en, 0x1); |
---|
2789 | | - |
---|
2790 | | - mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, true); |
---|
2791 | | -} |
---|
| 2842 | +static MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_modify_tirs_lro); |
---|
2792 | 2843 | |
---|
2793 | 2844 | static int mlx5e_set_mtu(struct mlx5_core_dev *mdev, |
---|
2794 | 2845 | struct mlx5e_params *params, u16 mtu) |
---|
.. | .. |
---|
2818 | 2869 | *mtu = MLX5E_HW2SW_MTU(params, hw_mtu); |
---|
2819 | 2870 | } |
---|
2820 | 2871 | |
---|
2821 | | -static int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv) |
---|
| 2872 | +int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv) |
---|
2822 | 2873 | { |
---|
2823 | 2874 | struct mlx5e_params *params = &priv->channels.params; |
---|
2824 | 2875 | struct net_device *netdev = priv->netdev; |
---|
.. | .. |
---|
2839 | 2890 | return 0; |
---|
2840 | 2891 | } |
---|
2841 | 2892 | |
---|
2842 | | -static void mlx5e_netdev_set_tcs(struct net_device *netdev) |
---|
| 2893 | +MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_set_dev_port_mtu); |
---|
| 2894 | + |
---|
| 2895 | +void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv) |
---|
2843 | 2896 | { |
---|
2844 | | - struct mlx5e_priv *priv = netdev_priv(netdev); |
---|
2845 | | - int nch = priv->channels.params.num_channels; |
---|
2846 | | - int ntc = priv->channels.params.num_tc; |
---|
| 2897 | + struct mlx5e_params *params = &priv->channels.params; |
---|
| 2898 | + struct net_device *netdev = priv->netdev; |
---|
| 2899 | + struct mlx5_core_dev *mdev = priv->mdev; |
---|
| 2900 | + u16 max_mtu; |
---|
| 2901 | + |
---|
| 2902 | + /* MTU range: 68 - hw-specific max */ |
---|
| 2903 | + netdev->min_mtu = ETH_MIN_MTU; |
---|
| 2904 | + |
---|
| 2905 | + mlx5_query_port_max_mtu(mdev, &max_mtu, 1); |
---|
| 2906 | + netdev->max_mtu = min_t(unsigned int, MLX5E_HW2SW_MTU(params, max_mtu), |
---|
| 2907 | + ETH_MAX_MTU); |
---|
| 2908 | +} |
---|
| 2909 | + |
---|
| 2910 | +static void mlx5e_netdev_set_tcs(struct net_device *netdev, u16 nch, u8 ntc) |
---|
| 2911 | +{ |
---|
2847 | 2912 | int tc; |
---|
2848 | 2913 | |
---|
2849 | 2914 | netdev_reset_tc(netdev); |
---|
.. | .. |
---|
2860 | 2925 | netdev_set_tc_queue(netdev, tc, nch, 0); |
---|
2861 | 2926 | } |
---|
2862 | 2927 | |
---|
2863 | | -static void mlx5e_build_tc2txq_maps(struct mlx5e_priv *priv) |
---|
| 2928 | +static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv) |
---|
2864 | 2929 | { |
---|
2865 | | - int max_nch = priv->profile->max_nch(priv->mdev); |
---|
2866 | | - int i, tc; |
---|
| 2930 | + struct net_device *netdev = priv->netdev; |
---|
| 2931 | + int num_txqs, num_rxqs, nch, ntc; |
---|
| 2932 | + int old_num_txqs, old_ntc; |
---|
| 2933 | + int err; |
---|
2867 | 2934 | |
---|
2868 | | - for (i = 0; i < max_nch; i++) |
---|
2869 | | - for (tc = 0; tc < priv->profile->max_tc; tc++) |
---|
2870 | | - priv->channel_tc2txq[i][tc] = i + tc * max_nch; |
---|
| 2935 | + old_num_txqs = netdev->real_num_tx_queues; |
---|
| 2936 | + old_ntc = netdev->num_tc ? : 1; |
---|
| 2937 | + |
---|
| 2938 | + nch = priv->channels.params.num_channels; |
---|
| 2939 | + ntc = priv->channels.params.num_tc; |
---|
| 2940 | + num_txqs = nch * ntc; |
---|
| 2941 | + num_rxqs = nch * priv->profile->rq_groups; |
---|
| 2942 | + |
---|
| 2943 | + mlx5e_netdev_set_tcs(netdev, nch, ntc); |
---|
| 2944 | + |
---|
| 2945 | + err = netif_set_real_num_tx_queues(netdev, num_txqs); |
---|
| 2946 | + if (err) { |
---|
| 2947 | + netdev_warn(netdev, "netif_set_real_num_tx_queues failed, %d\n", err); |
---|
| 2948 | + goto err_tcs; |
---|
| 2949 | + } |
---|
| 2950 | + err = netif_set_real_num_rx_queues(netdev, num_rxqs); |
---|
| 2951 | + if (err) { |
---|
| 2952 | + netdev_warn(netdev, "netif_set_real_num_rx_queues failed, %d\n", err); |
---|
| 2953 | + goto err_txqs; |
---|
| 2954 | + } |
---|
| 2955 | + |
---|
| 2956 | + return 0; |
---|
| 2957 | + |
---|
| 2958 | +err_txqs: |
---|
| 2959 | + /* netif_set_real_num_rx_queues could fail only when nch increased. Only |
---|
| 2960 | + * one of nch and ntc is changed in this function. That means, the call |
---|
| 2961 | + * to netif_set_real_num_tx_queues below should not fail, because it |
---|
| 2962 | + * decreases the number of TX queues. |
---|
| 2963 | + */ |
---|
| 2964 | + WARN_ON_ONCE(netif_set_real_num_tx_queues(netdev, old_num_txqs)); |
---|
| 2965 | + |
---|
| 2966 | +err_tcs: |
---|
| 2967 | + mlx5e_netdev_set_tcs(netdev, old_num_txqs / old_ntc, old_ntc); |
---|
| 2968 | + return err; |
---|
2871 | 2969 | } |
---|
2872 | 2970 | |
---|
2873 | | -static void mlx5e_build_tx2sq_maps(struct mlx5e_priv *priv) |
---|
| 2971 | +static void mlx5e_set_default_xps_cpumasks(struct mlx5e_priv *priv, |
---|
| 2972 | + struct mlx5e_params *params) |
---|
2874 | 2973 | { |
---|
2875 | | - struct mlx5e_channel *c; |
---|
2876 | | - struct mlx5e_txqsq *sq; |
---|
2877 | | - int i, tc; |
---|
| 2974 | + struct mlx5_core_dev *mdev = priv->mdev; |
---|
| 2975 | + int num_comp_vectors, ix, irq; |
---|
2878 | 2976 | |
---|
2879 | | - for (i = 0; i < priv->channels.num; i++) { |
---|
2880 | | - c = priv->channels.c[i]; |
---|
2881 | | - for (tc = 0; tc < c->num_tc; tc++) { |
---|
2882 | | - sq = &c->sq[tc]; |
---|
| 2977 | + num_comp_vectors = mlx5_comp_vectors_count(mdev); |
---|
| 2978 | + |
---|
| 2979 | + for (ix = 0; ix < params->num_channels; ix++) { |
---|
| 2980 | + cpumask_clear(priv->scratchpad.cpumask); |
---|
| 2981 | + |
---|
| 2982 | + for (irq = ix; irq < num_comp_vectors; irq += params->num_channels) { |
---|
| 2983 | + int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(mdev, irq)); |
---|
| 2984 | + |
---|
| 2985 | + cpumask_set_cpu(cpu, priv->scratchpad.cpumask); |
---|
| 2986 | + } |
---|
| 2987 | + |
---|
| 2988 | + netif_set_xps_queue(priv->netdev, priv->scratchpad.cpumask, ix); |
---|
| 2989 | + } |
---|
| 2990 | +} |
---|
| 2991 | + |
---|
| 2992 | +int mlx5e_num_channels_changed(struct mlx5e_priv *priv) |
---|
| 2993 | +{ |
---|
| 2994 | + u16 count = priv->channels.params.num_channels; |
---|
| 2995 | + int err; |
---|
| 2996 | + |
---|
| 2997 | + err = mlx5e_update_netdev_queues(priv); |
---|
| 2998 | + if (err) |
---|
| 2999 | + return err; |
---|
| 3000 | + |
---|
| 3001 | + mlx5e_set_default_xps_cpumasks(priv, &priv->channels.params); |
---|
| 3002 | + |
---|
| 3003 | + if (!netif_is_rxfh_configured(priv->netdev)) |
---|
| 3004 | + mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt, |
---|
| 3005 | + MLX5E_INDIR_RQT_SIZE, count); |
---|
| 3006 | + |
---|
| 3007 | + return 0; |
---|
| 3008 | +} |
---|
| 3009 | + |
---|
| 3010 | +MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_num_channels_changed); |
---|
| 3011 | + |
---|
| 3012 | +static void mlx5e_build_txq_maps(struct mlx5e_priv *priv) |
---|
| 3013 | +{ |
---|
| 3014 | + int i, ch; |
---|
| 3015 | + |
---|
| 3016 | + ch = priv->channels.num; |
---|
| 3017 | + |
---|
| 3018 | + for (i = 0; i < ch; i++) { |
---|
| 3019 | + int tc; |
---|
| 3020 | + |
---|
| 3021 | + for (tc = 0; tc < priv->channels.params.num_tc; tc++) { |
---|
| 3022 | + struct mlx5e_channel *c = priv->channels.c[i]; |
---|
| 3023 | + struct mlx5e_txqsq *sq = &c->sq[tc]; |
---|
| 3024 | + |
---|
2883 | 3025 | priv->txq2sq[sq->txq_ix] = sq; |
---|
| 3026 | + priv->channel_tc2realtxq[i][tc] = i + tc * ch; |
---|
2884 | 3027 | } |
---|
2885 | 3028 | } |
---|
2886 | 3029 | } |
---|
2887 | 3030 | |
---|
2888 | 3031 | void mlx5e_activate_priv_channels(struct mlx5e_priv *priv) |
---|
2889 | 3032 | { |
---|
2890 | | - int num_txqs = priv->channels.num * priv->channels.params.num_tc; |
---|
2891 | | - struct net_device *netdev = priv->netdev; |
---|
2892 | | - |
---|
2893 | | - mlx5e_netdev_set_tcs(netdev); |
---|
2894 | | - netif_set_real_num_tx_queues(netdev, num_txqs); |
---|
2895 | | - netif_set_real_num_rx_queues(netdev, priv->channels.num); |
---|
2896 | | - |
---|
2897 | | - mlx5e_build_tx2sq_maps(priv); |
---|
| 3033 | + mlx5e_build_txq_maps(priv); |
---|
2898 | 3034 | mlx5e_activate_channels(&priv->channels); |
---|
2899 | 3035 | mlx5e_xdp_tx_enable(priv); |
---|
2900 | 3036 | netif_tx_start_all_queues(priv->netdev); |
---|
2901 | 3037 | |
---|
2902 | | - if (MLX5_ESWITCH_MANAGER(priv->mdev)) |
---|
| 3038 | + if (mlx5e_is_vport_rep(priv)) |
---|
2903 | 3039 | mlx5e_add_sqs_fwd_rules(priv); |
---|
2904 | 3040 | |
---|
2905 | 3041 | mlx5e_wait_channels_min_rx_wqes(&priv->channels); |
---|
2906 | 3042 | mlx5e_redirect_rqts_to_channels(priv, &priv->channels); |
---|
| 3043 | + |
---|
| 3044 | + mlx5e_xsk_redirect_rqts_to_channels(priv, &priv->channels); |
---|
2907 | 3045 | } |
---|
2908 | 3046 | |
---|
2909 | 3047 | void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv) |
---|
2910 | 3048 | { |
---|
| 3049 | + mlx5e_xsk_redirect_rqts_to_drop(priv, &priv->channels); |
---|
| 3050 | + |
---|
2911 | 3051 | mlx5e_redirect_rqts_to_drop(priv); |
---|
2912 | 3052 | |
---|
2913 | | - if (MLX5_ESWITCH_MANAGER(priv->mdev)) |
---|
| 3053 | + if (mlx5e_is_vport_rep(priv)) |
---|
2914 | 3054 | mlx5e_remove_sqs_fwd_rules(priv); |
---|
2915 | 3055 | |
---|
2916 | 3056 | /* FIXME: This is a W/A only for tx timeout watch dog false alarm when |
---|
.. | .. |
---|
2922 | 3062 | mlx5e_deactivate_channels(&priv->channels); |
---|
2923 | 3063 | } |
---|
2924 | 3064 | |
---|
2925 | | -void mlx5e_switch_priv_channels(struct mlx5e_priv *priv, |
---|
2926 | | - struct mlx5e_channels *new_chs, |
---|
2927 | | - mlx5e_fp_hw_modify hw_modify) |
---|
| 3065 | +static int mlx5e_switch_priv_channels(struct mlx5e_priv *priv, |
---|
| 3066 | + struct mlx5e_channels *new_chs, |
---|
| 3067 | + mlx5e_fp_preactivate preactivate, |
---|
| 3068 | + void *context) |
---|
2928 | 3069 | { |
---|
2929 | 3070 | struct net_device *netdev = priv->netdev; |
---|
2930 | | - int new_num_txqs; |
---|
| 3071 | + struct mlx5e_channels old_chs; |
---|
2931 | 3072 | int carrier_ok; |
---|
2932 | | - new_num_txqs = new_chs->num * new_chs->params.num_tc; |
---|
| 3073 | + int err = 0; |
---|
2933 | 3074 | |
---|
2934 | 3075 | carrier_ok = netif_carrier_ok(netdev); |
---|
2935 | 3076 | netif_carrier_off(netdev); |
---|
2936 | 3077 | |
---|
2937 | | - if (new_num_txqs < netdev->real_num_tx_queues) |
---|
2938 | | - netif_set_real_num_tx_queues(netdev, new_num_txqs); |
---|
2939 | | - |
---|
2940 | 3078 | mlx5e_deactivate_priv_channels(priv); |
---|
2941 | | - mlx5e_close_channels(&priv->channels); |
---|
2942 | 3079 | |
---|
| 3080 | + old_chs = priv->channels; |
---|
2943 | 3081 | priv->channels = *new_chs; |
---|
2944 | 3082 | |
---|
2945 | | - /* New channels are ready to roll, modify HW settings if needed */ |
---|
2946 | | - if (hw_modify) |
---|
2947 | | - hw_modify(priv); |
---|
| 3083 | + /* New channels are ready to roll, call the preactivate hook if needed |
---|
| 3084 | + * to modify HW settings or update kernel parameters. |
---|
| 3085 | + */ |
---|
| 3086 | + if (preactivate) { |
---|
| 3087 | + err = preactivate(priv, context); |
---|
| 3088 | + if (err) { |
---|
| 3089 | + priv->channels = old_chs; |
---|
| 3090 | + goto out; |
---|
| 3091 | + } |
---|
| 3092 | + } |
---|
2948 | 3093 | |
---|
2949 | | - mlx5e_refresh_tirs(priv, false); |
---|
| 3094 | + mlx5e_close_channels(&old_chs); |
---|
| 3095 | + priv->profile->update_rx(priv); |
---|
| 3096 | + |
---|
| 3097 | +out: |
---|
2950 | 3098 | mlx5e_activate_priv_channels(priv); |
---|
2951 | 3099 | |
---|
2952 | 3100 | /* return carrier back if needed */ |
---|
2953 | 3101 | if (carrier_ok) |
---|
2954 | 3102 | netif_carrier_on(netdev); |
---|
| 3103 | + |
---|
| 3104 | + return err; |
---|
| 3105 | +} |
---|
| 3106 | + |
---|
| 3107 | +int mlx5e_safe_switch_channels(struct mlx5e_priv *priv, |
---|
| 3108 | + struct mlx5e_channels *new_chs, |
---|
| 3109 | + mlx5e_fp_preactivate preactivate, |
---|
| 3110 | + void *context) |
---|
| 3111 | +{ |
---|
| 3112 | + int err; |
---|
| 3113 | + |
---|
| 3114 | + err = mlx5e_open_channels(priv, new_chs); |
---|
| 3115 | + if (err) |
---|
| 3116 | + return err; |
---|
| 3117 | + |
---|
| 3118 | + err = mlx5e_switch_priv_channels(priv, new_chs, preactivate, context); |
---|
| 3119 | + if (err) |
---|
| 3120 | + goto err_close; |
---|
| 3121 | + |
---|
| 3122 | + return 0; |
---|
| 3123 | + |
---|
| 3124 | +err_close: |
---|
| 3125 | + mlx5e_close_channels(new_chs); |
---|
| 3126 | + |
---|
| 3127 | + return err; |
---|
| 3128 | +} |
---|
| 3129 | + |
---|
| 3130 | +int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv) |
---|
| 3131 | +{ |
---|
| 3132 | + struct mlx5e_channels new_channels = {}; |
---|
| 3133 | + |
---|
| 3134 | + new_channels.params = priv->channels.params; |
---|
| 3135 | + return mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL); |
---|
2955 | 3136 | } |
---|
2956 | 3137 | |
---|
2957 | 3138 | void mlx5e_timestamp_init(struct mlx5e_priv *priv) |
---|
2958 | 3139 | { |
---|
2959 | 3140 | priv->tstamp.tx_type = HWTSTAMP_TX_OFF; |
---|
2960 | 3141 | priv->tstamp.rx_filter = HWTSTAMP_FILTER_NONE; |
---|
| 3142 | +} |
---|
| 3143 | + |
---|
| 3144 | +static void mlx5e_modify_admin_state(struct mlx5_core_dev *mdev, |
---|
| 3145 | + enum mlx5_port_status state) |
---|
| 3146 | +{ |
---|
| 3147 | + struct mlx5_eswitch *esw = mdev->priv.eswitch; |
---|
| 3148 | + int vport_admin_state; |
---|
| 3149 | + |
---|
| 3150 | + mlx5_set_port_admin_status(mdev, state); |
---|
| 3151 | + |
---|
| 3152 | + if (!MLX5_ESWITCH_MANAGER(mdev) || mlx5_eswitch_mode(esw) == MLX5_ESWITCH_OFFLOADS) |
---|
| 3153 | + return; |
---|
| 3154 | + |
---|
| 3155 | + if (state == MLX5_PORT_UP) |
---|
| 3156 | + vport_admin_state = MLX5_VPORT_ADMIN_STATE_AUTO; |
---|
| 3157 | + else |
---|
| 3158 | + vport_admin_state = MLX5_VPORT_ADMIN_STATE_DOWN; |
---|
| 3159 | + |
---|
| 3160 | + mlx5_eswitch_set_vport_state(esw, MLX5_VPORT_UPLINK, vport_admin_state); |
---|
2961 | 3161 | } |
---|
2962 | 3162 | |
---|
2963 | 3163 | int mlx5e_open_locked(struct net_device *netdev) |
---|
.. | .. |
---|
2971 | 3171 | if (err) |
---|
2972 | 3172 | goto err_clear_state_opened_flag; |
---|
2973 | 3173 | |
---|
2974 | | - mlx5e_refresh_tirs(priv, false); |
---|
| 3174 | + priv->profile->update_rx(priv); |
---|
2975 | 3175 | mlx5e_activate_priv_channels(priv); |
---|
2976 | 3176 | if (priv->profile->update_carrier) |
---|
2977 | 3177 | priv->profile->update_carrier(priv); |
---|
2978 | 3178 | |
---|
2979 | | - if (priv->profile->update_stats) |
---|
2980 | | - queue_delayed_work(priv->wq, &priv->update_stats_work, 0); |
---|
2981 | | - |
---|
| 3179 | + mlx5e_queue_update_stats(priv); |
---|
2982 | 3180 | return 0; |
---|
2983 | 3181 | |
---|
2984 | 3182 | err_clear_state_opened_flag: |
---|
.. | .. |
---|
2994 | 3192 | mutex_lock(&priv->state_lock); |
---|
2995 | 3193 | err = mlx5e_open_locked(netdev); |
---|
2996 | 3194 | if (!err) |
---|
2997 | | - mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP); |
---|
| 3195 | + mlx5e_modify_admin_state(priv->mdev, MLX5_PORT_UP); |
---|
2998 | 3196 | mutex_unlock(&priv->state_lock); |
---|
2999 | | - |
---|
3000 | | - if (mlx5_vxlan_allowed(priv->mdev->vxlan)) |
---|
3001 | | - udp_tunnel_get_rx_info(netdev); |
---|
3002 | 3197 | |
---|
3003 | 3198 | return err; |
---|
3004 | 3199 | } |
---|
.. | .. |
---|
3031 | 3226 | return -ENODEV; |
---|
3032 | 3227 | |
---|
3033 | 3228 | mutex_lock(&priv->state_lock); |
---|
3034 | | - mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_DOWN); |
---|
| 3229 | + mlx5e_modify_admin_state(priv->mdev, MLX5_PORT_DOWN); |
---|
3035 | 3230 | err = mlx5e_close_locked(netdev); |
---|
3036 | 3231 | mutex_unlock(&priv->state_lock); |
---|
3037 | 3232 | |
---|
.. | .. |
---|
3065 | 3260 | struct mlx5e_cq *cq, |
---|
3066 | 3261 | struct mlx5e_cq_param *param) |
---|
3067 | 3262 | { |
---|
3068 | | - param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev); |
---|
3069 | | - param->wq.db_numa_node = dev_to_node(&mdev->pdev->dev); |
---|
| 3263 | + param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev)); |
---|
| 3264 | + param->wq.db_numa_node = dev_to_node(mlx5_core_dma_dev(mdev)); |
---|
3070 | 3265 | |
---|
3071 | 3266 | return mlx5e_alloc_cq_common(mdev, param, cq); |
---|
3072 | 3267 | } |
---|
3073 | 3268 | |
---|
3074 | | -static int mlx5e_open_drop_rq(struct mlx5e_priv *priv, |
---|
3075 | | - struct mlx5e_rq *drop_rq) |
---|
| 3269 | +int mlx5e_open_drop_rq(struct mlx5e_priv *priv, |
---|
| 3270 | + struct mlx5e_rq *drop_rq) |
---|
3076 | 3271 | { |
---|
3077 | 3272 | struct mlx5_core_dev *mdev = priv->mdev; |
---|
3078 | 3273 | struct mlx5e_cq_param cq_param = {}; |
---|
.. | .. |
---|
3116 | 3311 | return err; |
---|
3117 | 3312 | } |
---|
3118 | 3313 | |
---|
3119 | | -static void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq) |
---|
| 3314 | +void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq) |
---|
3120 | 3315 | { |
---|
3121 | 3316 | mlx5e_destroy_rq(drop_rq); |
---|
3122 | 3317 | mlx5e_free_rq(drop_rq); |
---|
.. | .. |
---|
3124 | 3319 | mlx5e_free_cq(&drop_rq->cq); |
---|
3125 | 3320 | } |
---|
3126 | 3321 | |
---|
3127 | | -int mlx5e_create_tis(struct mlx5_core_dev *mdev, int tc, |
---|
3128 | | - u32 underlay_qpn, u32 *tisn) |
---|
| 3322 | +int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn) |
---|
3129 | 3323 | { |
---|
3130 | | - u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {0}; |
---|
3131 | 3324 | void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx); |
---|
3132 | 3325 | |
---|
3133 | | - MLX5_SET(tisc, tisc, prio, tc << 1); |
---|
3134 | | - MLX5_SET(tisc, tisc, underlay_qpn, underlay_qpn); |
---|
3135 | 3326 | MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.td.tdn); |
---|
| 3327 | + |
---|
| 3328 | + if (MLX5_GET(tisc, tisc, tls_en)) |
---|
| 3329 | + MLX5_SET(tisc, tisc, pd, mdev->mlx5e_res.pdn); |
---|
3136 | 3330 | |
---|
3137 | 3331 | if (mlx5_lag_is_lacp_owner(mdev)) |
---|
3138 | 3332 | MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1); |
---|
3139 | 3333 | |
---|
3140 | | - return mlx5_core_create_tis(mdev, in, sizeof(in), tisn); |
---|
| 3334 | + return mlx5_core_create_tis(mdev, in, tisn); |
---|
3141 | 3335 | } |
---|
3142 | 3336 | |
---|
3143 | 3337 | void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn) |
---|
.. | .. |
---|
3145 | 3339 | mlx5_core_destroy_tis(mdev, tisn); |
---|
3146 | 3340 | } |
---|
3147 | 3341 | |
---|
| 3342 | +void mlx5e_destroy_tises(struct mlx5e_priv *priv) |
---|
| 3343 | +{ |
---|
| 3344 | + int tc, i; |
---|
| 3345 | + |
---|
| 3346 | + for (i = 0; i < mlx5e_get_num_lag_ports(priv->mdev); i++) |
---|
| 3347 | + for (tc = 0; tc < priv->profile->max_tc; tc++) |
---|
| 3348 | + mlx5e_destroy_tis(priv->mdev, priv->tisn[i][tc]); |
---|
| 3349 | +} |
---|
| 3350 | + |
---|
| 3351 | +static bool mlx5e_lag_should_assign_affinity(struct mlx5_core_dev *mdev) |
---|
| 3352 | +{ |
---|
| 3353 | + return MLX5_CAP_GEN(mdev, lag_tx_port_affinity) && mlx5e_get_num_lag_ports(mdev) > 1; |
---|
| 3354 | +} |
---|
| 3355 | + |
---|
3148 | 3356 | int mlx5e_create_tises(struct mlx5e_priv *priv) |
---|
3149 | 3357 | { |
---|
| 3358 | + int tc, i; |
---|
3150 | 3359 | int err; |
---|
3151 | | - int tc; |
---|
3152 | 3360 | |
---|
3153 | | - for (tc = 0; tc < priv->profile->max_tc; tc++) { |
---|
3154 | | - err = mlx5e_create_tis(priv->mdev, tc, 0, &priv->tisn[tc]); |
---|
3155 | | - if (err) |
---|
3156 | | - goto err_close_tises; |
---|
| 3361 | + for (i = 0; i < mlx5e_get_num_lag_ports(priv->mdev); i++) { |
---|
| 3362 | + for (tc = 0; tc < priv->profile->max_tc; tc++) { |
---|
| 3363 | + u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {}; |
---|
| 3364 | + void *tisc; |
---|
| 3365 | + |
---|
| 3366 | + tisc = MLX5_ADDR_OF(create_tis_in, in, ctx); |
---|
| 3367 | + |
---|
| 3368 | + MLX5_SET(tisc, tisc, prio, tc << 1); |
---|
| 3369 | + |
---|
| 3370 | + if (mlx5e_lag_should_assign_affinity(priv->mdev)) |
---|
| 3371 | + MLX5_SET(tisc, tisc, lag_tx_port_affinity, i + 1); |
---|
| 3372 | + |
---|
| 3373 | + err = mlx5e_create_tis(priv->mdev, in, &priv->tisn[i][tc]); |
---|
| 3374 | + if (err) |
---|
| 3375 | + goto err_close_tises; |
---|
| 3376 | + } |
---|
3157 | 3377 | } |
---|
3158 | 3378 | |
---|
3159 | 3379 | return 0; |
---|
3160 | 3380 | |
---|
3161 | 3381 | err_close_tises: |
---|
3162 | | - for (tc--; tc >= 0; tc--) |
---|
3163 | | - mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]); |
---|
| 3382 | + for (; i >= 0; i--) { |
---|
| 3383 | + for (tc--; tc >= 0; tc--) |
---|
| 3384 | + mlx5e_destroy_tis(priv->mdev, priv->tisn[i][tc]); |
---|
| 3385 | + tc = priv->profile->max_tc; |
---|
| 3386 | + } |
---|
3164 | 3387 | |
---|
3165 | 3388 | return err; |
---|
3166 | 3389 | } |
---|
3167 | 3390 | |
---|
3168 | | -void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv) |
---|
| 3391 | +static void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv) |
---|
3169 | 3392 | { |
---|
3170 | | - int tc; |
---|
| 3393 | + mlx5e_destroy_tises(priv); |
---|
| 3394 | +} |
---|
3171 | 3395 | |
---|
3172 | | - for (tc = 0; tc < priv->profile->max_tc; tc++) |
---|
3173 | | - mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]); |
---|
| 3396 | +static void mlx5e_build_indir_tir_ctx_common(struct mlx5e_priv *priv, |
---|
| 3397 | + u32 rqtn, u32 *tirc) |
---|
| 3398 | +{ |
---|
| 3399 | + MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn); |
---|
| 3400 | + MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT); |
---|
| 3401 | + MLX5_SET(tirc, tirc, indirect_table, rqtn); |
---|
| 3402 | + MLX5_SET(tirc, tirc, tunneled_offload_en, |
---|
| 3403 | + priv->channels.params.tunneled_offload_en); |
---|
| 3404 | + |
---|
| 3405 | + mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc); |
---|
3174 | 3406 | } |
---|
3175 | 3407 | |
---|
3176 | 3408 | static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, |
---|
3177 | 3409 | enum mlx5e_traffic_types tt, |
---|
3178 | 3410 | u32 *tirc) |
---|
3179 | 3411 | { |
---|
3180 | | - MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn); |
---|
3181 | | - |
---|
3182 | | - mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc); |
---|
3183 | | - |
---|
3184 | | - MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT); |
---|
3185 | | - MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn); |
---|
3186 | | - mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, false); |
---|
| 3412 | + mlx5e_build_indir_tir_ctx_common(priv, priv->indir_rqt.rqtn, tirc); |
---|
| 3413 | + mlx5e_build_indir_tir_ctx_hash(&priv->rss_params, |
---|
| 3414 | + &tirc_default_config[tt], tirc, false); |
---|
3187 | 3415 | } |
---|
3188 | 3416 | |
---|
3189 | 3417 | static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 rqtn, u32 *tirc) |
---|
3190 | 3418 | { |
---|
3191 | | - MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn); |
---|
3192 | | - |
---|
3193 | | - mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc); |
---|
3194 | | - |
---|
3195 | | - MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT); |
---|
3196 | | - MLX5_SET(tirc, tirc, indirect_table, rqtn); |
---|
| 3419 | + mlx5e_build_indir_tir_ctx_common(priv, rqtn, tirc); |
---|
3197 | 3420 | MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8); |
---|
3198 | 3421 | } |
---|
3199 | 3422 | |
---|
3200 | | -int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv) |
---|
| 3423 | +static void mlx5e_build_inner_indir_tir_ctx(struct mlx5e_priv *priv, |
---|
| 3424 | + enum mlx5e_traffic_types tt, |
---|
| 3425 | + u32 *tirc) |
---|
| 3426 | +{ |
---|
| 3427 | + mlx5e_build_indir_tir_ctx_common(priv, priv->indir_rqt.rqtn, tirc); |
---|
| 3428 | + mlx5e_build_indir_tir_ctx_hash(&priv->rss_params, |
---|
| 3429 | + &tirc_default_config[tt], tirc, true); |
---|
| 3430 | +} |
---|
| 3431 | + |
---|
| 3432 | +int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc) |
---|
3201 | 3433 | { |
---|
3202 | 3434 | struct mlx5e_tir *tir; |
---|
3203 | 3435 | void *tirc; |
---|
.. | .. |
---|
3217 | 3449 | tir = &priv->indir_tir[tt]; |
---|
3218 | 3450 | tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); |
---|
3219 | 3451 | mlx5e_build_indir_tir_ctx(priv, tt, tirc); |
---|
3220 | | - err = mlx5e_create_tir(priv->mdev, tir, in, inlen); |
---|
| 3452 | + err = mlx5e_create_tir(priv->mdev, tir, in); |
---|
3221 | 3453 | if (err) { |
---|
3222 | 3454 | mlx5_core_warn(priv->mdev, "create indirect tirs failed, %d\n", err); |
---|
3223 | 3455 | goto err_destroy_inner_tirs; |
---|
3224 | 3456 | } |
---|
3225 | 3457 | } |
---|
3226 | 3458 | |
---|
3227 | | - if (!mlx5e_tunnel_inner_ft_supported(priv->mdev)) |
---|
| 3459 | + if (!inner_ttc || !mlx5e_tunnel_inner_ft_supported(priv->mdev)) |
---|
3228 | 3460 | goto out; |
---|
3229 | 3461 | |
---|
3230 | 3462 | for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) { |
---|
.. | .. |
---|
3232 | 3464 | tir = &priv->inner_indir_tir[i]; |
---|
3233 | 3465 | tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); |
---|
3234 | 3466 | mlx5e_build_inner_indir_tir_ctx(priv, i, tirc); |
---|
3235 | | - err = mlx5e_create_tir(priv->mdev, tir, in, inlen); |
---|
| 3467 | + err = mlx5e_create_tir(priv->mdev, tir, in); |
---|
3236 | 3468 | if (err) { |
---|
3237 | 3469 | mlx5_core_warn(priv->mdev, "create inner indirect tirs failed, %d\n", err); |
---|
3238 | 3470 | goto err_destroy_inner_tirs; |
---|
.. | .. |
---|
3256 | 3488 | return err; |
---|
3257 | 3489 | } |
---|
3258 | 3490 | |
---|
3259 | | -int mlx5e_create_direct_tirs(struct mlx5e_priv *priv) |
---|
| 3491 | +int mlx5e_create_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs) |
---|
3260 | 3492 | { |
---|
3261 | | - int nch = priv->profile->max_nch(priv->mdev); |
---|
3262 | 3493 | struct mlx5e_tir *tir; |
---|
3263 | 3494 | void *tirc; |
---|
3264 | 3495 | int inlen; |
---|
3265 | | - int err; |
---|
| 3496 | + int err = 0; |
---|
3266 | 3497 | u32 *in; |
---|
3267 | 3498 | int ix; |
---|
3268 | 3499 | |
---|
.. | .. |
---|
3271 | 3502 | if (!in) |
---|
3272 | 3503 | return -ENOMEM; |
---|
3273 | 3504 | |
---|
3274 | | - for (ix = 0; ix < nch; ix++) { |
---|
| 3505 | + for (ix = 0; ix < priv->max_nch; ix++) { |
---|
3275 | 3506 | memset(in, 0, inlen); |
---|
3276 | | - tir = &priv->direct_tir[ix]; |
---|
| 3507 | + tir = &tirs[ix]; |
---|
3277 | 3508 | tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); |
---|
3278 | | - mlx5e_build_direct_tir_ctx(priv, priv->direct_tir[ix].rqt.rqtn, tirc); |
---|
3279 | | - err = mlx5e_create_tir(priv->mdev, tir, in, inlen); |
---|
3280 | | - if (err) |
---|
| 3509 | + mlx5e_build_direct_tir_ctx(priv, tir->rqt.rqtn, tirc); |
---|
| 3510 | + err = mlx5e_create_tir(priv->mdev, tir, in); |
---|
| 3511 | + if (unlikely(err)) |
---|
3281 | 3512 | goto err_destroy_ch_tirs; |
---|
3282 | 3513 | } |
---|
3283 | 3514 | |
---|
3284 | | - kvfree(in); |
---|
3285 | | - |
---|
3286 | | - return 0; |
---|
| 3515 | + goto out; |
---|
3287 | 3516 | |
---|
3288 | 3517 | err_destroy_ch_tirs: |
---|
3289 | | - mlx5_core_warn(priv->mdev, "create direct tirs failed, %d\n", err); |
---|
| 3518 | + mlx5_core_warn(priv->mdev, "create tirs failed, %d\n", err); |
---|
3290 | 3519 | for (ix--; ix >= 0; ix--) |
---|
3291 | | - mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[ix]); |
---|
| 3520 | + mlx5e_destroy_tir(priv->mdev, &tirs[ix]); |
---|
3292 | 3521 | |
---|
| 3522 | +out: |
---|
3293 | 3523 | kvfree(in); |
---|
3294 | 3524 | |
---|
3295 | 3525 | return err; |
---|
.. | .. |
---|
3302 | 3532 | for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) |
---|
3303 | 3533 | mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[i]); |
---|
3304 | 3534 | |
---|
3305 | | - if (!mlx5e_tunnel_inner_ft_supported(priv->mdev)) |
---|
| 3535 | + /* Verify inner tirs resources allocated */ |
---|
| 3536 | + if (!priv->inner_indir_tir[0].tirn) |
---|
3306 | 3537 | return; |
---|
3307 | 3538 | |
---|
3308 | 3539 | for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) |
---|
3309 | 3540 | mlx5e_destroy_tir(priv->mdev, &priv->inner_indir_tir[i]); |
---|
3310 | 3541 | } |
---|
3311 | 3542 | |
---|
3312 | | -void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv) |
---|
| 3543 | +void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs) |
---|
3313 | 3544 | { |
---|
3314 | | - int nch = priv->profile->max_nch(priv->mdev); |
---|
3315 | 3545 | int i; |
---|
3316 | 3546 | |
---|
3317 | | - for (i = 0; i < nch; i++) |
---|
3318 | | - mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[i]); |
---|
| 3547 | + for (i = 0; i < priv->max_nch; i++) |
---|
| 3548 | + mlx5e_destroy_tir(priv->mdev, &tirs[i]); |
---|
3319 | 3549 | } |
---|
3320 | 3550 | |
---|
3321 | 3551 | static int mlx5e_modify_channels_scatter_fcs(struct mlx5e_channels *chs, bool enable) |
---|
.. | .. |
---|
3346 | 3576 | return 0; |
---|
3347 | 3577 | } |
---|
3348 | 3578 | |
---|
3349 | | -static int mlx5e_setup_tc_mqprio(struct net_device *netdev, |
---|
| 3579 | +static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv, |
---|
3350 | 3580 | struct tc_mqprio_qopt *mqprio) |
---|
3351 | 3581 | { |
---|
3352 | | - struct mlx5e_priv *priv = netdev_priv(netdev); |
---|
3353 | 3582 | struct mlx5e_channels new_channels = {}; |
---|
3354 | 3583 | u8 tc = mqprio->num_tc; |
---|
3355 | 3584 | int err = 0; |
---|
.. | .. |
---|
3365 | 3594 | new_channels.params.num_tc = tc ? tc : 1; |
---|
3366 | 3595 | |
---|
3367 | 3596 | if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { |
---|
| 3597 | + struct mlx5e_params old_params; |
---|
| 3598 | + |
---|
| 3599 | + old_params = priv->channels.params; |
---|
3368 | 3600 | priv->channels.params = new_channels.params; |
---|
| 3601 | + err = mlx5e_num_channels_changed(priv); |
---|
| 3602 | + if (err) |
---|
| 3603 | + priv->channels.params = old_params; |
---|
| 3604 | + |
---|
3369 | 3605 | goto out; |
---|
3370 | 3606 | } |
---|
3371 | 3607 | |
---|
3372 | | - err = mlx5e_open_channels(priv, &new_channels); |
---|
3373 | | - if (err) |
---|
3374 | | - goto out; |
---|
| 3608 | + err = mlx5e_safe_switch_channels(priv, &new_channels, |
---|
| 3609 | + mlx5e_num_channels_changed_ctx, NULL); |
---|
3375 | 3610 | |
---|
3376 | | - priv->max_opened_tc = max_t(u8, priv->max_opened_tc, |
---|
3377 | | - new_channels.params.num_tc); |
---|
3378 | | - mlx5e_switch_priv_channels(priv, &new_channels, NULL); |
---|
3379 | 3611 | out: |
---|
| 3612 | + priv->max_opened_tc = max_t(u8, priv->max_opened_tc, |
---|
| 3613 | + priv->channels.params.num_tc); |
---|
3380 | 3614 | mutex_unlock(&priv->state_lock); |
---|
3381 | 3615 | return err; |
---|
3382 | 3616 | } |
---|
3383 | 3617 | |
---|
3384 | | -#ifdef CONFIG_MLX5_ESWITCH |
---|
3385 | | -static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv, |
---|
3386 | | - struct tc_cls_flower_offload *cls_flower, |
---|
3387 | | - int flags) |
---|
3388 | | -{ |
---|
3389 | | - switch (cls_flower->command) { |
---|
3390 | | - case TC_CLSFLOWER_REPLACE: |
---|
3391 | | - return mlx5e_configure_flower(priv, cls_flower, flags); |
---|
3392 | | - case TC_CLSFLOWER_DESTROY: |
---|
3393 | | - return mlx5e_delete_flower(priv, cls_flower, flags); |
---|
3394 | | - case TC_CLSFLOWER_STATS: |
---|
3395 | | - return mlx5e_stats_flower(priv, cls_flower, flags); |
---|
3396 | | - default: |
---|
3397 | | - return -EOPNOTSUPP; |
---|
3398 | | - } |
---|
3399 | | -} |
---|
3400 | | - |
---|
3401 | | -static int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, |
---|
3402 | | - void *cb_priv) |
---|
3403 | | -{ |
---|
3404 | | - struct mlx5e_priv *priv = cb_priv; |
---|
3405 | | - |
---|
3406 | | - if (!tc_cls_can_offload_and_chain0(priv->netdev, type_data)) |
---|
3407 | | - return -EOPNOTSUPP; |
---|
3408 | | - |
---|
3409 | | - switch (type) { |
---|
3410 | | - case TC_SETUP_CLSFLOWER: |
---|
3411 | | - return mlx5e_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS); |
---|
3412 | | - default: |
---|
3413 | | - return -EOPNOTSUPP; |
---|
3414 | | - } |
---|
3415 | | -} |
---|
3416 | | - |
---|
3417 | | -static int mlx5e_setup_tc_block(struct net_device *dev, |
---|
3418 | | - struct tc_block_offload *f) |
---|
3419 | | -{ |
---|
3420 | | - struct mlx5e_priv *priv = netdev_priv(dev); |
---|
3421 | | - |
---|
3422 | | - if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) |
---|
3423 | | - return -EOPNOTSUPP; |
---|
3424 | | - |
---|
3425 | | - switch (f->command) { |
---|
3426 | | - case TC_BLOCK_BIND: |
---|
3427 | | - return tcf_block_cb_register(f->block, mlx5e_setup_tc_block_cb, |
---|
3428 | | - priv, priv, f->extack); |
---|
3429 | | - case TC_BLOCK_UNBIND: |
---|
3430 | | - tcf_block_cb_unregister(f->block, mlx5e_setup_tc_block_cb, |
---|
3431 | | - priv); |
---|
3432 | | - return 0; |
---|
3433 | | - default: |
---|
3434 | | - return -EOPNOTSUPP; |
---|
3435 | | - } |
---|
3436 | | -} |
---|
3437 | | -#endif |
---|
| 3618 | +static LIST_HEAD(mlx5e_block_cb_list); |
---|
3438 | 3619 | |
---|
3439 | 3620 | static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type, |
---|
3440 | 3621 | void *type_data) |
---|
3441 | 3622 | { |
---|
| 3623 | + struct mlx5e_priv *priv = netdev_priv(dev); |
---|
| 3624 | + |
---|
3442 | 3625 | switch (type) { |
---|
3443 | | -#ifdef CONFIG_MLX5_ESWITCH |
---|
3444 | | - case TC_SETUP_BLOCK: |
---|
3445 | | - return mlx5e_setup_tc_block(dev, type_data); |
---|
3446 | | -#endif |
---|
| 3626 | + case TC_SETUP_BLOCK: { |
---|
| 3627 | + struct flow_block_offload *f = type_data; |
---|
| 3628 | + |
---|
| 3629 | + f->unlocked_driver_cb = true; |
---|
| 3630 | + return flow_block_cb_setup_simple(type_data, |
---|
| 3631 | + &mlx5e_block_cb_list, |
---|
| 3632 | + mlx5e_setup_tc_block_cb, |
---|
| 3633 | + priv, priv, true); |
---|
| 3634 | + } |
---|
3447 | 3635 | case TC_SETUP_QDISC_MQPRIO: |
---|
3448 | | - return mlx5e_setup_tc_mqprio(dev, type_data); |
---|
| 3636 | + return mlx5e_setup_tc_mqprio(priv, type_data); |
---|
3449 | 3637 | default: |
---|
3450 | 3638 | return -EOPNOTSUPP; |
---|
3451 | 3639 | } |
---|
3452 | 3640 | } |
---|
3453 | 3641 | |
---|
3454 | | -static void |
---|
| 3642 | +void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s) |
---|
| 3643 | +{ |
---|
| 3644 | + int i; |
---|
| 3645 | + |
---|
| 3646 | + for (i = 0; i < priv->max_nch; i++) { |
---|
| 3647 | + struct mlx5e_channel_stats *channel_stats = &priv->channel_stats[i]; |
---|
| 3648 | + struct mlx5e_rq_stats *xskrq_stats = &channel_stats->xskrq; |
---|
| 3649 | + struct mlx5e_rq_stats *rq_stats = &channel_stats->rq; |
---|
| 3650 | + int j; |
---|
| 3651 | + |
---|
| 3652 | + s->rx_packets += rq_stats->packets + xskrq_stats->packets; |
---|
| 3653 | + s->rx_bytes += rq_stats->bytes + xskrq_stats->bytes; |
---|
| 3654 | + s->multicast += rq_stats->mcast_packets + xskrq_stats->mcast_packets; |
---|
| 3655 | + |
---|
| 3656 | + for (j = 0; j < priv->max_opened_tc; j++) { |
---|
| 3657 | + struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j]; |
---|
| 3658 | + |
---|
| 3659 | + s->tx_packets += sq_stats->packets; |
---|
| 3660 | + s->tx_bytes += sq_stats->bytes; |
---|
| 3661 | + s->tx_dropped += sq_stats->dropped; |
---|
| 3662 | + } |
---|
| 3663 | + } |
---|
| 3664 | +} |
---|
| 3665 | + |
---|
| 3666 | +void |
---|
3455 | 3667 | mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) |
---|
3456 | 3668 | { |
---|
3457 | 3669 | struct mlx5e_priv *priv = netdev_priv(dev); |
---|
3458 | | - struct mlx5e_sw_stats *sstats = &priv->stats.sw; |
---|
3459 | | - struct mlx5e_vport_stats *vstats = &priv->stats.vport; |
---|
3460 | 3670 | struct mlx5e_pport_stats *pstats = &priv->stats.pport; |
---|
3461 | 3671 | |
---|
3462 | | - /* update HW stats in background for next time */ |
---|
3463 | | - queue_delayed_work(priv->wq, &priv->update_stats_work, 0); |
---|
| 3672 | + /* In switchdev mode, monitor counters doesn't monitor |
---|
| 3673 | + * rx/tx stats of 802_3. The update stats mechanism |
---|
| 3674 | + * should keep the 802_3 layout counters updated |
---|
| 3675 | + */ |
---|
| 3676 | + if (!mlx5e_monitor_counter_supported(priv) || |
---|
| 3677 | + mlx5e_is_uplink_rep(priv)) { |
---|
| 3678 | + /* update HW stats in background for next time */ |
---|
| 3679 | + mlx5e_queue_update_stats(priv); |
---|
| 3680 | + } |
---|
3464 | 3681 | |
---|
3465 | 3682 | if (mlx5e_is_uplink_rep(priv)) { |
---|
| 3683 | + struct mlx5e_vport_stats *vstats = &priv->stats.vport; |
---|
| 3684 | + |
---|
3466 | 3685 | stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok); |
---|
3467 | 3686 | stats->rx_bytes = PPORT_802_3_GET(pstats, a_octets_received_ok); |
---|
3468 | 3687 | stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok); |
---|
3469 | 3688 | stats->tx_bytes = PPORT_802_3_GET(pstats, a_octets_transmitted_ok); |
---|
| 3689 | + |
---|
| 3690 | + /* vport multicast also counts packets that are dropped due to steering |
---|
| 3691 | + * or rx out of buffer |
---|
| 3692 | + */ |
---|
| 3693 | + stats->multicast = VPORT_COUNTER_GET(vstats, received_eth_multicast.packets); |
---|
3470 | 3694 | } else { |
---|
3471 | | - mlx5e_grp_sw_update_stats(priv); |
---|
3472 | | - stats->rx_packets = sstats->rx_packets; |
---|
3473 | | - stats->rx_bytes = sstats->rx_bytes; |
---|
3474 | | - stats->tx_packets = sstats->tx_packets; |
---|
3475 | | - stats->tx_bytes = sstats->tx_bytes; |
---|
3476 | | - stats->tx_dropped = sstats->tx_queue_dropped; |
---|
| 3695 | + mlx5e_fold_sw_stats64(priv, stats); |
---|
3477 | 3696 | } |
---|
3478 | 3697 | |
---|
3479 | 3698 | stats->rx_dropped = priv->stats.qcnt.rx_out_of_buffer; |
---|
.. | .. |
---|
3489 | 3708 | stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors + |
---|
3490 | 3709 | stats->rx_frame_errors; |
---|
3491 | 3710 | stats->tx_errors = stats->tx_aborted_errors + stats->tx_carrier_errors; |
---|
3492 | | - |
---|
3493 | | - /* vport multicast also counts packets that are dropped due to steering |
---|
3494 | | - * or rx out of buffer |
---|
3495 | | - */ |
---|
3496 | | - stats->multicast = |
---|
3497 | | - VPORT_COUNTER_GET(vstats, received_eth_multicast.packets); |
---|
3498 | 3711 | } |
---|
3499 | 3712 | |
---|
3500 | 3713 | static void mlx5e_set_rx_mode(struct net_device *dev) |
---|
.. | .. |
---|
3536 | 3749 | struct mlx5e_priv *priv = netdev_priv(netdev); |
---|
3537 | 3750 | struct mlx5_core_dev *mdev = priv->mdev; |
---|
3538 | 3751 | struct mlx5e_channels new_channels = {}; |
---|
3539 | | - struct mlx5e_params *old_params; |
---|
| 3752 | + struct mlx5e_params *cur_params; |
---|
3540 | 3753 | int err = 0; |
---|
3541 | 3754 | bool reset; |
---|
3542 | 3755 | |
---|
3543 | 3756 | mutex_lock(&priv->state_lock); |
---|
3544 | 3757 | |
---|
3545 | | - old_params = &priv->channels.params; |
---|
3546 | | - if (enable && !MLX5E_GET_PFLAG(old_params, MLX5E_PFLAG_RX_STRIDING_RQ)) { |
---|
| 3758 | + if (enable && priv->xsk.refcnt) { |
---|
| 3759 | + netdev_warn(netdev, "LRO is incompatible with AF_XDP (%hu XSKs are active)\n", |
---|
| 3760 | + priv->xsk.refcnt); |
---|
| 3761 | + err = -EINVAL; |
---|
| 3762 | + goto out; |
---|
| 3763 | + } |
---|
| 3764 | + |
---|
| 3765 | + cur_params = &priv->channels.params; |
---|
| 3766 | + if (enable && !MLX5E_GET_PFLAG(cur_params, MLX5E_PFLAG_RX_STRIDING_RQ)) { |
---|
3547 | 3767 | netdev_warn(netdev, "can't set LRO with legacy RQ\n"); |
---|
3548 | 3768 | err = -EINVAL; |
---|
3549 | 3769 | goto out; |
---|
.. | .. |
---|
3551 | 3771 | |
---|
3552 | 3772 | reset = test_bit(MLX5E_STATE_OPENED, &priv->state); |
---|
3553 | 3773 | |
---|
3554 | | - new_channels.params = *old_params; |
---|
| 3774 | + new_channels.params = *cur_params; |
---|
3555 | 3775 | new_channels.params.lro_en = enable; |
---|
3556 | 3776 | |
---|
3557 | | - if (old_params->rq_wq_type != MLX5_WQ_TYPE_CYCLIC) { |
---|
3558 | | - if (mlx5e_rx_mpwqe_is_linear_skb(mdev, old_params) == |
---|
3559 | | - mlx5e_rx_mpwqe_is_linear_skb(mdev, &new_channels.params)) |
---|
| 3777 | + if (cur_params->rq_wq_type != MLX5_WQ_TYPE_CYCLIC) { |
---|
| 3778 | + if (mlx5e_rx_mpwqe_is_linear_skb(mdev, cur_params, NULL) == |
---|
| 3779 | + mlx5e_rx_mpwqe_is_linear_skb(mdev, &new_channels.params, NULL)) |
---|
3560 | 3780 | reset = false; |
---|
3561 | 3781 | } |
---|
3562 | 3782 | |
---|
3563 | 3783 | if (!reset) { |
---|
3564 | | - *old_params = new_channels.params; |
---|
| 3784 | + struct mlx5e_params old_params; |
---|
| 3785 | + |
---|
| 3786 | + old_params = *cur_params; |
---|
| 3787 | + *cur_params = new_channels.params; |
---|
3565 | 3788 | err = mlx5e_modify_tirs_lro(priv); |
---|
| 3789 | + if (err) |
---|
| 3790 | + *cur_params = old_params; |
---|
3566 | 3791 | goto out; |
---|
3567 | 3792 | } |
---|
3568 | 3793 | |
---|
3569 | | - err = mlx5e_open_channels(priv, &new_channels); |
---|
3570 | | - if (err) |
---|
3571 | | - goto out; |
---|
3572 | | - |
---|
3573 | | - mlx5e_switch_priv_channels(priv, &new_channels, mlx5e_modify_tirs_lro); |
---|
| 3794 | + err = mlx5e_safe_switch_channels(priv, &new_channels, |
---|
| 3795 | + mlx5e_modify_tirs_lro_ctx, NULL); |
---|
3574 | 3796 | out: |
---|
3575 | 3797 | mutex_unlock(&priv->state_lock); |
---|
3576 | 3798 | return err; |
---|
.. | .. |
---|
3588 | 3810 | return 0; |
---|
3589 | 3811 | } |
---|
3590 | 3812 | |
---|
3591 | | -#ifdef CONFIG_MLX5_ESWITCH |
---|
| 3813 | +#if IS_ENABLED(CONFIG_MLX5_CLS_ACT) |
---|
3592 | 3814 | static int set_feature_tc_num_filters(struct net_device *netdev, bool enable) |
---|
3593 | 3815 | { |
---|
3594 | 3816 | struct mlx5e_priv *priv = netdev_priv(netdev); |
---|
3595 | 3817 | |
---|
3596 | | - if (!enable && mlx5e_tc_num_filters(priv)) { |
---|
| 3818 | + if (!enable && mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD))) { |
---|
3597 | 3819 | netdev_err(netdev, |
---|
3598 | 3820 | "Active offloaded tc filters, can't turn hw_tc_offload off\n"); |
---|
3599 | 3821 | return -EINVAL; |
---|
.. | .. |
---|
3611 | 3833 | return mlx5_set_port_fcs(mdev, !enable); |
---|
3612 | 3834 | } |
---|
3613 | 3835 | |
---|
| 3836 | +static int mlx5e_set_rx_port_ts(struct mlx5_core_dev *mdev, bool enable) |
---|
| 3837 | +{ |
---|
| 3838 | + u32 in[MLX5_ST_SZ_DW(pcmr_reg)] = {}; |
---|
| 3839 | + bool supported, curr_state; |
---|
| 3840 | + int err; |
---|
| 3841 | + |
---|
| 3842 | + if (!MLX5_CAP_GEN(mdev, ports_check)) |
---|
| 3843 | + return 0; |
---|
| 3844 | + |
---|
| 3845 | + err = mlx5_query_ports_check(mdev, in, sizeof(in)); |
---|
| 3846 | + if (err) |
---|
| 3847 | + return err; |
---|
| 3848 | + |
---|
| 3849 | + supported = MLX5_GET(pcmr_reg, in, rx_ts_over_crc_cap); |
---|
| 3850 | + curr_state = MLX5_GET(pcmr_reg, in, rx_ts_over_crc); |
---|
| 3851 | + |
---|
| 3852 | + if (!supported || enable == curr_state) |
---|
| 3853 | + return 0; |
---|
| 3854 | + |
---|
| 3855 | + MLX5_SET(pcmr_reg, in, local_port, 1); |
---|
| 3856 | + MLX5_SET(pcmr_reg, in, rx_ts_over_crc, enable); |
---|
| 3857 | + |
---|
| 3858 | + return mlx5_set_ports_check(mdev, in, sizeof(in)); |
---|
| 3859 | +} |
---|
| 3860 | + |
---|
3614 | 3861 | static int set_feature_rx_fcs(struct net_device *netdev, bool enable) |
---|
3615 | 3862 | { |
---|
3616 | 3863 | struct mlx5e_priv *priv = netdev_priv(netdev); |
---|
| 3864 | + struct mlx5e_channels *chs = &priv->channels; |
---|
| 3865 | + struct mlx5_core_dev *mdev = priv->mdev; |
---|
3617 | 3866 | int err; |
---|
3618 | 3867 | |
---|
3619 | 3868 | mutex_lock(&priv->state_lock); |
---|
3620 | 3869 | |
---|
3621 | | - priv->channels.params.scatter_fcs_en = enable; |
---|
3622 | | - err = mlx5e_modify_channels_scatter_fcs(&priv->channels, enable); |
---|
3623 | | - if (err) |
---|
3624 | | - priv->channels.params.scatter_fcs_en = !enable; |
---|
| 3870 | + if (enable) { |
---|
| 3871 | + err = mlx5e_set_rx_port_ts(mdev, false); |
---|
| 3872 | + if (err) |
---|
| 3873 | + goto out; |
---|
3625 | 3874 | |
---|
| 3875 | + chs->params.scatter_fcs_en = true; |
---|
| 3876 | + err = mlx5e_modify_channels_scatter_fcs(chs, true); |
---|
| 3877 | + if (err) { |
---|
| 3878 | + chs->params.scatter_fcs_en = false; |
---|
| 3879 | + mlx5e_set_rx_port_ts(mdev, true); |
---|
| 3880 | + } |
---|
| 3881 | + } else { |
---|
| 3882 | + chs->params.scatter_fcs_en = false; |
---|
| 3883 | + err = mlx5e_modify_channels_scatter_fcs(chs, false); |
---|
| 3884 | + if (err) { |
---|
| 3885 | + chs->params.scatter_fcs_en = true; |
---|
| 3886 | + goto out; |
---|
| 3887 | + } |
---|
| 3888 | + err = mlx5e_set_rx_port_ts(mdev, true); |
---|
| 3889 | + if (err) { |
---|
| 3890 | + mlx5_core_warn(mdev, "Failed to set RX port timestamp %d\n", err); |
---|
| 3891 | + err = 0; |
---|
| 3892 | + } |
---|
| 3893 | + } |
---|
| 3894 | + |
---|
| 3895 | +out: |
---|
3626 | 3896 | mutex_unlock(&priv->state_lock); |
---|
3627 | | - |
---|
3628 | 3897 | return err; |
---|
3629 | 3898 | } |
---|
3630 | 3899 | |
---|
.. | .. |
---|
3687 | 3956 | return 0; |
---|
3688 | 3957 | } |
---|
3689 | 3958 | |
---|
3690 | | -static int mlx5e_set_features(struct net_device *netdev, |
---|
3691 | | - netdev_features_t features) |
---|
| 3959 | +int mlx5e_set_features(struct net_device *netdev, netdev_features_t features) |
---|
3692 | 3960 | { |
---|
3693 | 3961 | netdev_features_t oper_features = features; |
---|
3694 | 3962 | int err = 0; |
---|
.. | .. |
---|
3699 | 3967 | err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro); |
---|
3700 | 3968 | err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER, |
---|
3701 | 3969 | set_feature_cvlan_filter); |
---|
3702 | | -#ifdef CONFIG_MLX5_ESWITCH |
---|
| 3970 | +#if IS_ENABLED(CONFIG_MLX5_CLS_ACT) |
---|
3703 | 3971 | err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_tc_num_filters); |
---|
3704 | 3972 | #endif |
---|
3705 | 3973 | err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all); |
---|
.. | .. |
---|
3708 | 3976 | #ifdef CONFIG_MLX5_EN_ARFS |
---|
3709 | 3977 | err |= MLX5E_HANDLE_FEATURE(NETIF_F_NTUPLE, set_feature_arfs); |
---|
3710 | 3978 | #endif |
---|
| 3979 | + err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TLS_RX, mlx5e_ktls_set_feature_rx); |
---|
3711 | 3980 | |
---|
3712 | 3981 | if (err) { |
---|
3713 | 3982 | netdev->features = oper_features; |
---|
.. | .. |
---|
3734 | 4003 | netdev_warn(netdev, "Dropping C-tag vlan stripping offload due to S-tag vlan\n"); |
---|
3735 | 4004 | } |
---|
3736 | 4005 | if (!MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ)) { |
---|
3737 | | - features &= ~NETIF_F_LRO; |
---|
3738 | | - if (params->lro_en) |
---|
| 4006 | + if (features & NETIF_F_LRO) { |
---|
3739 | 4007 | netdev_warn(netdev, "Disabling LRO, not supported in legacy RQ\n"); |
---|
| 4008 | + features &= ~NETIF_F_LRO; |
---|
| 4009 | + } |
---|
| 4010 | + } |
---|
| 4011 | + |
---|
| 4012 | + if (params->xdp_prog) { |
---|
| 4013 | + if (features & NETIF_F_LRO) { |
---|
| 4014 | + netdev_warn(netdev, "LRO is incompatible with XDP\n"); |
---|
| 4015 | + features &= ~NETIF_F_LRO; |
---|
| 4016 | + } |
---|
3740 | 4017 | } |
---|
3741 | 4018 | |
---|
3742 | 4019 | if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) { |
---|
.. | .. |
---|
3750 | 4027 | return features; |
---|
3751 | 4028 | } |
---|
3752 | 4029 | |
---|
| 4030 | +static bool mlx5e_xsk_validate_mtu(struct net_device *netdev, |
---|
| 4031 | + struct mlx5e_channels *chs, |
---|
| 4032 | + struct mlx5e_params *new_params, |
---|
| 4033 | + struct mlx5_core_dev *mdev) |
---|
| 4034 | +{ |
---|
| 4035 | + u16 ix; |
---|
| 4036 | + |
---|
| 4037 | + for (ix = 0; ix < chs->params.num_channels; ix++) { |
---|
| 4038 | + struct xsk_buff_pool *xsk_pool = |
---|
| 4039 | + mlx5e_xsk_get_pool(&chs->params, chs->params.xsk, ix); |
---|
| 4040 | + struct mlx5e_xsk_param xsk; |
---|
| 4041 | + |
---|
| 4042 | + if (!xsk_pool) |
---|
| 4043 | + continue; |
---|
| 4044 | + |
---|
| 4045 | + mlx5e_build_xsk_param(xsk_pool, &xsk); |
---|
| 4046 | + |
---|
| 4047 | + if (!mlx5e_validate_xsk_param(new_params, &xsk, mdev)) { |
---|
| 4048 | + u32 hr = mlx5e_get_linear_rq_headroom(new_params, &xsk); |
---|
| 4049 | + int max_mtu_frame, max_mtu_page, max_mtu; |
---|
| 4050 | + |
---|
| 4051 | + /* Two criteria must be met: |
---|
| 4052 | + * 1. HW MTU + all headrooms <= XSK frame size. |
---|
| 4053 | + * 2. Size of SKBs allocated on XDP_PASS <= PAGE_SIZE. |
---|
| 4054 | + */ |
---|
| 4055 | + max_mtu_frame = MLX5E_HW2SW_MTU(new_params, xsk.chunk_size - hr); |
---|
| 4056 | + max_mtu_page = mlx5e_xdp_max_mtu(new_params, &xsk); |
---|
| 4057 | + max_mtu = min(max_mtu_frame, max_mtu_page); |
---|
| 4058 | + |
---|
| 4059 | + netdev_err(netdev, "MTU %d is too big for an XSK running on channel %hu. Try MTU <= %d\n", |
---|
| 4060 | + new_params->sw_mtu, ix, max_mtu); |
---|
| 4061 | + return false; |
---|
| 4062 | + } |
---|
| 4063 | + } |
---|
| 4064 | + |
---|
| 4065 | + return true; |
---|
| 4066 | +} |
---|
| 4067 | + |
---|
3753 | 4068 | int mlx5e_change_mtu(struct net_device *netdev, int new_mtu, |
---|
3754 | | - change_hw_mtu_cb set_mtu_cb) |
---|
| 4069 | + mlx5e_fp_preactivate preactivate) |
---|
3755 | 4070 | { |
---|
3756 | 4071 | struct mlx5e_priv *priv = netdev_priv(netdev); |
---|
3757 | 4072 | struct mlx5e_channels new_channels = {}; |
---|
.. | .. |
---|
3770 | 4085 | new_channels.params.sw_mtu = new_mtu; |
---|
3771 | 4086 | |
---|
3772 | 4087 | if (params->xdp_prog && |
---|
3773 | | - !mlx5e_rx_is_linear_skb(priv->mdev, &new_channels.params)) { |
---|
| 4088 | + !mlx5e_rx_is_linear_skb(&new_channels.params, NULL)) { |
---|
3774 | 4089 | netdev_err(netdev, "MTU(%d) > %d is not allowed while XDP enabled\n", |
---|
3775 | | - new_mtu, mlx5e_xdp_max_mtu(params)); |
---|
| 4090 | + new_mtu, mlx5e_xdp_max_mtu(params, NULL)); |
---|
| 4091 | + err = -EINVAL; |
---|
| 4092 | + goto out; |
---|
| 4093 | + } |
---|
| 4094 | + |
---|
| 4095 | + if (priv->xsk.refcnt && |
---|
| 4096 | + !mlx5e_xsk_validate_mtu(netdev, &priv->channels, |
---|
| 4097 | + &new_channels.params, priv->mdev)) { |
---|
3776 | 4098 | err = -EINVAL; |
---|
3777 | 4099 | goto out; |
---|
3778 | 4100 | } |
---|
3779 | 4101 | |
---|
3780 | 4102 | if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { |
---|
3781 | | - bool is_linear = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev, &new_channels.params); |
---|
3782 | | - u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params); |
---|
3783 | | - u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_channels.params); |
---|
| 4103 | + bool is_linear = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev, |
---|
| 4104 | + &new_channels.params, |
---|
| 4105 | + NULL); |
---|
| 4106 | + u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params, NULL); |
---|
| 4107 | + u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_channels.params, NULL); |
---|
3784 | 4108 | |
---|
| 4109 | + /* If XSK is active, XSK RQs are linear. */ |
---|
| 4110 | + is_linear |= priv->xsk.refcnt; |
---|
| 4111 | + |
---|
| 4112 | + /* Always reset in linear mode - hw_mtu is used in data path. */ |
---|
3785 | 4113 | reset = reset && (is_linear || (ppw_old != ppw_new)); |
---|
3786 | 4114 | } |
---|
3787 | 4115 | |
---|
3788 | 4116 | if (!reset) { |
---|
| 4117 | + unsigned int old_mtu = params->sw_mtu; |
---|
| 4118 | + |
---|
3789 | 4119 | params->sw_mtu = new_mtu; |
---|
3790 | | - if (set_mtu_cb) |
---|
3791 | | - set_mtu_cb(priv); |
---|
| 4120 | + if (preactivate) { |
---|
| 4121 | + err = preactivate(priv, NULL); |
---|
| 4122 | + if (err) { |
---|
| 4123 | + params->sw_mtu = old_mtu; |
---|
| 4124 | + goto out; |
---|
| 4125 | + } |
---|
| 4126 | + } |
---|
3792 | 4127 | netdev->mtu = params->sw_mtu; |
---|
3793 | 4128 | goto out; |
---|
3794 | 4129 | } |
---|
3795 | 4130 | |
---|
3796 | | - err = mlx5e_open_channels(priv, &new_channels); |
---|
| 4131 | + err = mlx5e_safe_switch_channels(priv, &new_channels, preactivate, NULL); |
---|
3797 | 4132 | if (err) |
---|
3798 | 4133 | goto out; |
---|
3799 | 4134 | |
---|
3800 | | - mlx5e_switch_priv_channels(priv, &new_channels, set_mtu_cb); |
---|
3801 | 4135 | netdev->mtu = new_channels.params.sw_mtu; |
---|
3802 | 4136 | |
---|
3803 | 4137 | out: |
---|
.. | .. |
---|
3807 | 4141 | |
---|
3808 | 4142 | static int mlx5e_change_nic_mtu(struct net_device *netdev, int new_mtu) |
---|
3809 | 4143 | { |
---|
3810 | | - return mlx5e_change_mtu(netdev, new_mtu, mlx5e_set_dev_port_mtu); |
---|
| 4144 | + return mlx5e_change_mtu(netdev, new_mtu, mlx5e_set_dev_port_mtu_ctx); |
---|
3811 | 4145 | } |
---|
3812 | 4146 | |
---|
3813 | 4147 | int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr) |
---|
.. | .. |
---|
3854 | 4188 | case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: |
---|
3855 | 4189 | case HWTSTAMP_FILTER_NTP_ALL: |
---|
3856 | 4190 | /* Disable CQE compression */ |
---|
3857 | | - netdev_warn(priv->netdev, "Disabling cqe compression"); |
---|
| 4191 | + if (MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS)) |
---|
| 4192 | + netdev_warn(priv->netdev, "Disabling RX cqe compression\n"); |
---|
3858 | 4193 | err = mlx5e_modify_rx_cqe_compression_locked(priv, false); |
---|
3859 | 4194 | if (err) { |
---|
3860 | 4195 | netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err); |
---|
.. | .. |
---|
3903 | 4238 | } |
---|
3904 | 4239 | |
---|
3905 | 4240 | #ifdef CONFIG_MLX5_ESWITCH |
---|
3906 | | -static int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac) |
---|
| 4241 | +int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac) |
---|
3907 | 4242 | { |
---|
3908 | 4243 | struct mlx5e_priv *priv = netdev_priv(dev); |
---|
3909 | 4244 | struct mlx5_core_dev *mdev = priv->mdev; |
---|
.. | .. |
---|
3940 | 4275 | return mlx5_eswitch_set_vport_trust(mdev->priv.eswitch, vf + 1, setting); |
---|
3941 | 4276 | } |
---|
3942 | 4277 | |
---|
3943 | | -static int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, |
---|
3944 | | - int max_tx_rate) |
---|
| 4278 | +int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, |
---|
| 4279 | + int max_tx_rate) |
---|
3945 | 4280 | { |
---|
3946 | 4281 | struct mlx5e_priv *priv = netdev_priv(dev); |
---|
3947 | 4282 | struct mlx5_core_dev *mdev = priv->mdev; |
---|
.. | .. |
---|
3982 | 4317 | mlx5_ifla_link2vport(link_state)); |
---|
3983 | 4318 | } |
---|
3984 | 4319 | |
---|
3985 | | -static int mlx5e_get_vf_config(struct net_device *dev, |
---|
3986 | | - int vf, struct ifla_vf_info *ivi) |
---|
| 4320 | +int mlx5e_get_vf_config(struct net_device *dev, |
---|
| 4321 | + int vf, struct ifla_vf_info *ivi) |
---|
3987 | 4322 | { |
---|
3988 | 4323 | struct mlx5e_priv *priv = netdev_priv(dev); |
---|
3989 | 4324 | struct mlx5_core_dev *mdev = priv->mdev; |
---|
.. | .. |
---|
3996 | 4331 | return 0; |
---|
3997 | 4332 | } |
---|
3998 | 4333 | |
---|
3999 | | -static int mlx5e_get_vf_stats(struct net_device *dev, |
---|
4000 | | - int vf, struct ifla_vf_stats *vf_stats) |
---|
| 4334 | +int mlx5e_get_vf_stats(struct net_device *dev, |
---|
| 4335 | + int vf, struct ifla_vf_stats *vf_stats) |
---|
4001 | 4336 | { |
---|
4002 | 4337 | struct mlx5e_priv *priv = netdev_priv(dev); |
---|
4003 | 4338 | struct mlx5_core_dev *mdev = priv->mdev; |
---|
.. | .. |
---|
4007 | 4342 | } |
---|
4008 | 4343 | #endif |
---|
4009 | 4344 | |
---|
4010 | | -struct mlx5e_vxlan_work { |
---|
4011 | | - struct work_struct work; |
---|
4012 | | - struct mlx5e_priv *priv; |
---|
4013 | | - u16 port; |
---|
4014 | | -}; |
---|
4015 | | - |
---|
4016 | | -static void mlx5e_vxlan_add_work(struct work_struct *work) |
---|
| 4345 | +static bool mlx5e_gre_tunnel_inner_proto_offload_supported(struct mlx5_core_dev *mdev, |
---|
| 4346 | + struct sk_buff *skb) |
---|
4017 | 4347 | { |
---|
4018 | | - struct mlx5e_vxlan_work *vxlan_work = |
---|
4019 | | - container_of(work, struct mlx5e_vxlan_work, work); |
---|
4020 | | - struct mlx5e_priv *priv = vxlan_work->priv; |
---|
4021 | | - u16 port = vxlan_work->port; |
---|
4022 | | - |
---|
4023 | | - mutex_lock(&priv->state_lock); |
---|
4024 | | - mlx5_vxlan_add_port(priv->mdev->vxlan, port); |
---|
4025 | | - mutex_unlock(&priv->state_lock); |
---|
4026 | | - |
---|
4027 | | - kfree(vxlan_work); |
---|
4028 | | -} |
---|
4029 | | - |
---|
4030 | | -static void mlx5e_vxlan_del_work(struct work_struct *work) |
---|
4031 | | -{ |
---|
4032 | | - struct mlx5e_vxlan_work *vxlan_work = |
---|
4033 | | - container_of(work, struct mlx5e_vxlan_work, work); |
---|
4034 | | - struct mlx5e_priv *priv = vxlan_work->priv; |
---|
4035 | | - u16 port = vxlan_work->port; |
---|
4036 | | - |
---|
4037 | | - mutex_lock(&priv->state_lock); |
---|
4038 | | - mlx5_vxlan_del_port(priv->mdev->vxlan, port); |
---|
4039 | | - mutex_unlock(&priv->state_lock); |
---|
4040 | | - kfree(vxlan_work); |
---|
4041 | | -} |
---|
4042 | | - |
---|
4043 | | -static void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, u16 port, int add) |
---|
4044 | | -{ |
---|
4045 | | - struct mlx5e_vxlan_work *vxlan_work; |
---|
4046 | | - |
---|
4047 | | - vxlan_work = kmalloc(sizeof(*vxlan_work), GFP_ATOMIC); |
---|
4048 | | - if (!vxlan_work) |
---|
4049 | | - return; |
---|
4050 | | - |
---|
4051 | | - if (add) |
---|
4052 | | - INIT_WORK(&vxlan_work->work, mlx5e_vxlan_add_work); |
---|
4053 | | - else |
---|
4054 | | - INIT_WORK(&vxlan_work->work, mlx5e_vxlan_del_work); |
---|
4055 | | - |
---|
4056 | | - vxlan_work->priv = priv; |
---|
4057 | | - vxlan_work->port = port; |
---|
4058 | | - queue_work(priv->wq, &vxlan_work->work); |
---|
4059 | | -} |
---|
4060 | | - |
---|
4061 | | -static void mlx5e_add_vxlan_port(struct net_device *netdev, |
---|
4062 | | - struct udp_tunnel_info *ti) |
---|
4063 | | -{ |
---|
4064 | | - struct mlx5e_priv *priv = netdev_priv(netdev); |
---|
4065 | | - |
---|
4066 | | - if (ti->type != UDP_TUNNEL_TYPE_VXLAN) |
---|
4067 | | - return; |
---|
4068 | | - |
---|
4069 | | - if (!mlx5_vxlan_allowed(priv->mdev->vxlan)) |
---|
4070 | | - return; |
---|
4071 | | - |
---|
4072 | | - mlx5e_vxlan_queue_work(priv, be16_to_cpu(ti->port), 1); |
---|
4073 | | -} |
---|
4074 | | - |
---|
4075 | | -static void mlx5e_del_vxlan_port(struct net_device *netdev, |
---|
4076 | | - struct udp_tunnel_info *ti) |
---|
4077 | | -{ |
---|
4078 | | - struct mlx5e_priv *priv = netdev_priv(netdev); |
---|
4079 | | - |
---|
4080 | | - if (ti->type != UDP_TUNNEL_TYPE_VXLAN) |
---|
4081 | | - return; |
---|
4082 | | - |
---|
4083 | | - if (!mlx5_vxlan_allowed(priv->mdev->vxlan)) |
---|
4084 | | - return; |
---|
4085 | | - |
---|
4086 | | - mlx5e_vxlan_queue_work(priv, be16_to_cpu(ti->port), 0); |
---|
| 4348 | + switch (skb->inner_protocol) { |
---|
| 4349 | + case htons(ETH_P_IP): |
---|
| 4350 | + case htons(ETH_P_IPV6): |
---|
| 4351 | + case htons(ETH_P_TEB): |
---|
| 4352 | + return true; |
---|
| 4353 | + case htons(ETH_P_MPLS_UC): |
---|
| 4354 | + case htons(ETH_P_MPLS_MC): |
---|
| 4355 | + return MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_gre); |
---|
| 4356 | + } |
---|
| 4357 | + return false; |
---|
4087 | 4358 | } |
---|
4088 | 4359 | |
---|
4089 | 4360 | static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv, |
---|
.. | .. |
---|
4108 | 4379 | |
---|
4109 | 4380 | switch (proto) { |
---|
4110 | 4381 | case IPPROTO_GRE: |
---|
4111 | | - return features; |
---|
| 4382 | + if (mlx5e_gre_tunnel_inner_proto_offload_supported(priv->mdev, skb)) |
---|
| 4383 | + return features; |
---|
| 4384 | + break; |
---|
| 4385 | + case IPPROTO_IPIP: |
---|
| 4386 | + case IPPROTO_IPV6: |
---|
| 4387 | + if (mlx5e_tunnel_proto_supported(priv->mdev, IPPROTO_IPIP)) |
---|
| 4388 | + return features; |
---|
| 4389 | + break; |
---|
4112 | 4390 | case IPPROTO_UDP: |
---|
4113 | 4391 | udph = udp_hdr(skb); |
---|
4114 | 4392 | port = be16_to_cpu(udph->dest); |
---|
.. | .. |
---|
4116 | 4394 | /* Verify if UDP port is being offloaded by HW */ |
---|
4117 | 4395 | if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, port)) |
---|
4118 | 4396 | return features; |
---|
| 4397 | + |
---|
| 4398 | +#if IS_ENABLED(CONFIG_GENEVE) |
---|
| 4399 | + /* Support Geneve offload for default UDP port */ |
---|
| 4400 | + if (port == GENEVE_UDP_PORT && mlx5_geneve_tx_allowed(priv->mdev)) |
---|
| 4401 | + return features; |
---|
| 4402 | +#endif |
---|
4119 | 4403 | } |
---|
4120 | 4404 | |
---|
4121 | 4405 | out: |
---|
.. | .. |
---|
4123 | 4407 | return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); |
---|
4124 | 4408 | } |
---|
4125 | 4409 | |
---|
4126 | | -static netdev_features_t mlx5e_features_check(struct sk_buff *skb, |
---|
4127 | | - struct net_device *netdev, |
---|
4128 | | - netdev_features_t features) |
---|
| 4410 | +netdev_features_t mlx5e_features_check(struct sk_buff *skb, |
---|
| 4411 | + struct net_device *netdev, |
---|
| 4412 | + netdev_features_t features) |
---|
4129 | 4413 | { |
---|
4130 | 4414 | struct mlx5e_priv *priv = netdev_priv(netdev); |
---|
4131 | 4415 | |
---|
.. | .. |
---|
4145 | 4429 | return features; |
---|
4146 | 4430 | } |
---|
4147 | 4431 | |
---|
4148 | | -static bool mlx5e_tx_timeout_eq_recover(struct net_device *dev, |
---|
4149 | | - struct mlx5e_txqsq *sq) |
---|
4150 | | -{ |
---|
4151 | | - struct mlx5_eq *eq = sq->cq.mcq.eq; |
---|
4152 | | - u32 eqe_count; |
---|
4153 | | - |
---|
4154 | | - netdev_err(dev, "EQ 0x%x: Cons = 0x%x, irqn = 0x%x\n", |
---|
4155 | | - eq->eqn, eq->cons_index, eq->irqn); |
---|
4156 | | - |
---|
4157 | | - eqe_count = mlx5_eq_poll_irq_disabled(eq); |
---|
4158 | | - if (!eqe_count) |
---|
4159 | | - return false; |
---|
4160 | | - |
---|
4161 | | - netdev_err(dev, "Recover %d eqes on EQ 0x%x\n", eqe_count, eq->eqn); |
---|
4162 | | - sq->channel->stats->eq_rearm++; |
---|
4163 | | - return true; |
---|
4164 | | -} |
---|
4165 | | - |
---|
4166 | 4432 | static void mlx5e_tx_timeout_work(struct work_struct *work) |
---|
4167 | 4433 | { |
---|
4168 | 4434 | struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, |
---|
4169 | 4435 | tx_timeout_work); |
---|
4170 | | - struct net_device *dev = priv->netdev; |
---|
4171 | | - bool reopen_channels = false; |
---|
4172 | | - int i, err; |
---|
| 4436 | + int i; |
---|
4173 | 4437 | |
---|
4174 | 4438 | rtnl_lock(); |
---|
4175 | 4439 | mutex_lock(&priv->state_lock); |
---|
.. | .. |
---|
4178 | 4442 | goto unlock; |
---|
4179 | 4443 | |
---|
4180 | 4444 | for (i = 0; i < priv->channels.num * priv->channels.params.num_tc; i++) { |
---|
4181 | | - struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, i); |
---|
| 4445 | + struct netdev_queue *dev_queue = |
---|
| 4446 | + netdev_get_tx_queue(priv->netdev, i); |
---|
4182 | 4447 | struct mlx5e_txqsq *sq = priv->txq2sq[i]; |
---|
4183 | 4448 | |
---|
4184 | 4449 | if (!netif_xmit_stopped(dev_queue)) |
---|
4185 | 4450 | continue; |
---|
4186 | 4451 | |
---|
4187 | | - netdev_err(dev, |
---|
4188 | | - "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u\n", |
---|
4189 | | - i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc, |
---|
4190 | | - jiffies_to_usecs(jiffies - dev_queue->trans_start)); |
---|
4191 | | - |
---|
4192 | | - /* If we recover a lost interrupt, most likely TX timeout will |
---|
4193 | | - * be resolved, skip reopening channels |
---|
4194 | | - */ |
---|
4195 | | - if (!mlx5e_tx_timeout_eq_recover(dev, sq)) { |
---|
4196 | | - clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); |
---|
4197 | | - reopen_channels = true; |
---|
4198 | | - } |
---|
| 4452 | + if (mlx5e_reporter_tx_timeout(sq)) |
---|
| 4453 | + /* break if tried to reopened channels */ |
---|
| 4454 | + break; |
---|
4199 | 4455 | } |
---|
4200 | | - |
---|
4201 | | - if (!reopen_channels) |
---|
4202 | | - goto unlock; |
---|
4203 | | - |
---|
4204 | | - mlx5e_close_locked(dev); |
---|
4205 | | - err = mlx5e_open_locked(dev); |
---|
4206 | | - if (err) |
---|
4207 | | - netdev_err(priv->netdev, |
---|
4208 | | - "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n", |
---|
4209 | | - err); |
---|
4210 | 4456 | |
---|
4211 | 4457 | unlock: |
---|
4212 | 4458 | mutex_unlock(&priv->state_lock); |
---|
4213 | 4459 | rtnl_unlock(); |
---|
4214 | 4460 | } |
---|
4215 | 4461 | |
---|
4216 | | -static void mlx5e_tx_timeout(struct net_device *dev) |
---|
| 4462 | +static void mlx5e_tx_timeout(struct net_device *dev, unsigned int txqueue) |
---|
4217 | 4463 | { |
---|
4218 | 4464 | struct mlx5e_priv *priv = netdev_priv(dev); |
---|
4219 | 4465 | |
---|
.. | .. |
---|
4239 | 4485 | new_channels.params = priv->channels.params; |
---|
4240 | 4486 | new_channels.params.xdp_prog = prog; |
---|
4241 | 4487 | |
---|
4242 | | - if (!mlx5e_rx_is_linear_skb(priv->mdev, &new_channels.params)) { |
---|
| 4488 | + /* No XSK params: AF_XDP can't be enabled yet at the point of setting |
---|
| 4489 | + * the XDP program. |
---|
| 4490 | + */ |
---|
| 4491 | + if (!mlx5e_rx_is_linear_skb(&new_channels.params, NULL)) { |
---|
4243 | 4492 | netdev_warn(netdev, "XDP is not allowed with MTU(%d) > %d\n", |
---|
4244 | 4493 | new_channels.params.sw_mtu, |
---|
4245 | | - mlx5e_xdp_max_mtu(&new_channels.params)); |
---|
| 4494 | + mlx5e_xdp_max_mtu(&new_channels.params, NULL)); |
---|
4246 | 4495 | return -EINVAL; |
---|
4247 | 4496 | } |
---|
4248 | 4497 | |
---|
4249 | 4498 | return 0; |
---|
| 4499 | +} |
---|
| 4500 | + |
---|
| 4501 | +static void mlx5e_rq_replace_xdp_prog(struct mlx5e_rq *rq, struct bpf_prog *prog) |
---|
| 4502 | +{ |
---|
| 4503 | + struct bpf_prog *old_prog; |
---|
| 4504 | + |
---|
| 4505 | + old_prog = rcu_replace_pointer(rq->xdp_prog, prog, |
---|
| 4506 | + lockdep_is_held(&rq->channel->priv->state_lock)); |
---|
| 4507 | + if (old_prog) |
---|
| 4508 | + bpf_prog_put(old_prog); |
---|
4250 | 4509 | } |
---|
4251 | 4510 | |
---|
4252 | 4511 | static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog) |
---|
.. | .. |
---|
4269 | 4528 | /* no need for full reset when exchanging programs */ |
---|
4270 | 4529 | reset = (!priv->channels.params.xdp_prog || !prog); |
---|
4271 | 4530 | |
---|
4272 | | - if (was_opened && reset) |
---|
4273 | | - mlx5e_close_locked(netdev); |
---|
4274 | | - if (was_opened && !reset) { |
---|
| 4531 | + if (was_opened && !reset) |
---|
4275 | 4532 | /* num_channels is invariant here, so we can take the |
---|
4276 | 4533 | * batched reference right upfront. |
---|
4277 | 4534 | */ |
---|
4278 | | - prog = bpf_prog_add(prog, priv->channels.num); |
---|
4279 | | - if (IS_ERR(prog)) { |
---|
4280 | | - err = PTR_ERR(prog); |
---|
| 4535 | + bpf_prog_add(prog, priv->channels.num); |
---|
| 4536 | + |
---|
| 4537 | + if (was_opened && reset) { |
---|
| 4538 | + struct mlx5e_channels new_channels = {}; |
---|
| 4539 | + |
---|
| 4540 | + new_channels.params = priv->channels.params; |
---|
| 4541 | + new_channels.params.xdp_prog = prog; |
---|
| 4542 | + mlx5e_set_rq_type(priv->mdev, &new_channels.params); |
---|
| 4543 | + old_prog = priv->channels.params.xdp_prog; |
---|
| 4544 | + |
---|
| 4545 | + err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL); |
---|
| 4546 | + if (err) |
---|
4281 | 4547 | goto unlock; |
---|
4282 | | - } |
---|
| 4548 | + } else { |
---|
| 4549 | + /* exchange programs, extra prog reference we got from caller |
---|
| 4550 | + * as long as we don't fail from this point onwards. |
---|
| 4551 | + */ |
---|
| 4552 | + old_prog = xchg(&priv->channels.params.xdp_prog, prog); |
---|
4283 | 4553 | } |
---|
4284 | 4554 | |
---|
4285 | | - /* exchange programs, extra prog reference we got from caller |
---|
4286 | | - * as long as we don't fail from this point onwards. |
---|
4287 | | - */ |
---|
4288 | | - old_prog = xchg(&priv->channels.params.xdp_prog, prog); |
---|
4289 | 4555 | if (old_prog) |
---|
4290 | 4556 | bpf_prog_put(old_prog); |
---|
4291 | 4557 | |
---|
4292 | | - if (reset) /* change RQ type according to priv->xdp_prog */ |
---|
| 4558 | + if (!was_opened && reset) /* change RQ type according to priv->xdp_prog */ |
---|
4293 | 4559 | mlx5e_set_rq_type(priv->mdev, &priv->channels.params); |
---|
4294 | 4560 | |
---|
4295 | | - if (was_opened && reset) |
---|
4296 | | - mlx5e_open_locked(netdev); |
---|
4297 | | - |
---|
4298 | | - if (!test_bit(MLX5E_STATE_OPENED, &priv->state) || reset) |
---|
| 4561 | + if (!was_opened || reset) |
---|
4299 | 4562 | goto unlock; |
---|
4300 | 4563 | |
---|
4301 | 4564 | /* exchanging programs w/o reset, we update ref counts on behalf |
---|
.. | .. |
---|
4304 | 4567 | for (i = 0; i < priv->channels.num; i++) { |
---|
4305 | 4568 | struct mlx5e_channel *c = priv->channels.c[i]; |
---|
4306 | 4569 | |
---|
4307 | | - clear_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state); |
---|
4308 | | - napi_synchronize(&c->napi); |
---|
4309 | | - /* prevent mlx5e_poll_rx_cq from accessing rq->xdp_prog */ |
---|
4310 | | - |
---|
4311 | | - old_prog = xchg(&c->rq.xdp_prog, prog); |
---|
4312 | | - |
---|
4313 | | - set_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state); |
---|
4314 | | - /* napi_schedule in case we have missed anything */ |
---|
4315 | | - napi_schedule(&c->napi); |
---|
4316 | | - |
---|
4317 | | - if (old_prog) |
---|
4318 | | - bpf_prog_put(old_prog); |
---|
| 4570 | + mlx5e_rq_replace_xdp_prog(&c->rq, prog); |
---|
| 4571 | + if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)) { |
---|
| 4572 | + bpf_prog_inc(prog); |
---|
| 4573 | + mlx5e_rq_replace_xdp_prog(&c->xskrq, prog); |
---|
| 4574 | + } |
---|
4319 | 4575 | } |
---|
4320 | 4576 | |
---|
4321 | 4577 | unlock: |
---|
4322 | 4578 | mutex_unlock(&priv->state_lock); |
---|
| 4579 | + |
---|
| 4580 | + /* Need to fix some features. */ |
---|
| 4581 | + if (!err) |
---|
| 4582 | + netdev_update_features(netdev); |
---|
| 4583 | + |
---|
4323 | 4584 | return err; |
---|
4324 | | -} |
---|
4325 | | - |
---|
4326 | | -static u32 mlx5e_xdp_query(struct net_device *dev) |
---|
4327 | | -{ |
---|
4328 | | - struct mlx5e_priv *priv = netdev_priv(dev); |
---|
4329 | | - const struct bpf_prog *xdp_prog; |
---|
4330 | | - u32 prog_id = 0; |
---|
4331 | | - |
---|
4332 | | - mutex_lock(&priv->state_lock); |
---|
4333 | | - xdp_prog = priv->channels.params.xdp_prog; |
---|
4334 | | - if (xdp_prog) |
---|
4335 | | - prog_id = xdp_prog->aux->id; |
---|
4336 | | - mutex_unlock(&priv->state_lock); |
---|
4337 | | - |
---|
4338 | | - return prog_id; |
---|
4339 | 4585 | } |
---|
4340 | 4586 | |
---|
4341 | 4587 | static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp) |
---|
.. | .. |
---|
4343 | 4589 | switch (xdp->command) { |
---|
4344 | 4590 | case XDP_SETUP_PROG: |
---|
4345 | 4591 | return mlx5e_xdp_set(dev, xdp->prog); |
---|
4346 | | - case XDP_QUERY_PROG: |
---|
4347 | | - xdp->prog_id = mlx5e_xdp_query(dev); |
---|
4348 | | - return 0; |
---|
| 4592 | + case XDP_SETUP_XSK_POOL: |
---|
| 4593 | + return mlx5e_xsk_setup_pool(dev, xdp->xsk.pool, |
---|
| 4594 | + xdp->xsk.queue_id); |
---|
4349 | 4595 | default: |
---|
4350 | 4596 | return -EINVAL; |
---|
4351 | 4597 | } |
---|
4352 | 4598 | } |
---|
| 4599 | + |
---|
| 4600 | +#ifdef CONFIG_MLX5_ESWITCH |
---|
| 4601 | +static int mlx5e_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, |
---|
| 4602 | + struct net_device *dev, u32 filter_mask, |
---|
| 4603 | + int nlflags) |
---|
| 4604 | +{ |
---|
| 4605 | + struct mlx5e_priv *priv = netdev_priv(dev); |
---|
| 4606 | + struct mlx5_core_dev *mdev = priv->mdev; |
---|
| 4607 | + u8 mode, setting; |
---|
| 4608 | + int err; |
---|
| 4609 | + |
---|
| 4610 | + err = mlx5_eswitch_get_vepa(mdev->priv.eswitch, &setting); |
---|
| 4611 | + if (err) |
---|
| 4612 | + return err; |
---|
| 4613 | + mode = setting ? BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB; |
---|
| 4614 | + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, |
---|
| 4615 | + mode, |
---|
| 4616 | + 0, 0, nlflags, filter_mask, NULL); |
---|
| 4617 | +} |
---|
| 4618 | + |
---|
| 4619 | +static int mlx5e_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, |
---|
| 4620 | + u16 flags, struct netlink_ext_ack *extack) |
---|
| 4621 | +{ |
---|
| 4622 | + struct mlx5e_priv *priv = netdev_priv(dev); |
---|
| 4623 | + struct mlx5_core_dev *mdev = priv->mdev; |
---|
| 4624 | + struct nlattr *attr, *br_spec; |
---|
| 4625 | + u16 mode = BRIDGE_MODE_UNDEF; |
---|
| 4626 | + u8 setting; |
---|
| 4627 | + int rem; |
---|
| 4628 | + |
---|
| 4629 | + br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); |
---|
| 4630 | + if (!br_spec) |
---|
| 4631 | + return -EINVAL; |
---|
| 4632 | + |
---|
| 4633 | + nla_for_each_nested(attr, br_spec, rem) { |
---|
| 4634 | + if (nla_type(attr) != IFLA_BRIDGE_MODE) |
---|
| 4635 | + continue; |
---|
| 4636 | + |
---|
| 4637 | + if (nla_len(attr) < sizeof(mode)) |
---|
| 4638 | + return -EINVAL; |
---|
| 4639 | + |
---|
| 4640 | + mode = nla_get_u16(attr); |
---|
| 4641 | + if (mode > BRIDGE_MODE_VEPA) |
---|
| 4642 | + return -EINVAL; |
---|
| 4643 | + |
---|
| 4644 | + break; |
---|
| 4645 | + } |
---|
| 4646 | + |
---|
| 4647 | + if (mode == BRIDGE_MODE_UNDEF) |
---|
| 4648 | + return -EINVAL; |
---|
| 4649 | + |
---|
| 4650 | + setting = (mode == BRIDGE_MODE_VEPA) ? 1 : 0; |
---|
| 4651 | + return mlx5_eswitch_set_vepa(mdev->priv.eswitch, setting); |
---|
| 4652 | +} |
---|
| 4653 | +#endif |
---|
4353 | 4654 | |
---|
4354 | 4655 | const struct net_device_ops mlx5e_netdev_ops = { |
---|
4355 | 4656 | .ndo_open = mlx5e_open, |
---|
.. | .. |
---|
4367 | 4668 | .ndo_change_mtu = mlx5e_change_nic_mtu, |
---|
4368 | 4669 | .ndo_do_ioctl = mlx5e_ioctl, |
---|
4369 | 4670 | .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate, |
---|
4370 | | - .ndo_udp_tunnel_add = mlx5e_add_vxlan_port, |
---|
4371 | | - .ndo_udp_tunnel_del = mlx5e_del_vxlan_port, |
---|
| 4671 | + .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, |
---|
| 4672 | + .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, |
---|
4372 | 4673 | .ndo_features_check = mlx5e_features_check, |
---|
4373 | 4674 | .ndo_tx_timeout = mlx5e_tx_timeout, |
---|
4374 | 4675 | .ndo_bpf = mlx5e_xdp, |
---|
4375 | 4676 | .ndo_xdp_xmit = mlx5e_xdp_xmit, |
---|
| 4677 | + .ndo_xsk_wakeup = mlx5e_xsk_wakeup, |
---|
4376 | 4678 | #ifdef CONFIG_MLX5_EN_ARFS |
---|
4377 | 4679 | .ndo_rx_flow_steer = mlx5e_rx_flow_steer, |
---|
4378 | 4680 | #endif |
---|
4379 | 4681 | #ifdef CONFIG_MLX5_ESWITCH |
---|
| 4682 | + .ndo_bridge_setlink = mlx5e_bridge_setlink, |
---|
| 4683 | + .ndo_bridge_getlink = mlx5e_bridge_getlink, |
---|
| 4684 | + |
---|
4380 | 4685 | /* SRIOV E-Switch NDOs */ |
---|
4381 | 4686 | .ndo_set_vf_mac = mlx5e_set_vf_mac, |
---|
4382 | 4687 | .ndo_set_vf_vlan = mlx5e_set_vf_vlan, |
---|
.. | .. |
---|
4386 | 4691 | .ndo_get_vf_config = mlx5e_get_vf_config, |
---|
4387 | 4692 | .ndo_set_vf_link_state = mlx5e_set_vf_link_state, |
---|
4388 | 4693 | .ndo_get_vf_stats = mlx5e_get_vf_stats, |
---|
4389 | | - .ndo_has_offload_stats = mlx5e_has_offload_stats, |
---|
4390 | | - .ndo_get_offload_stats = mlx5e_get_offload_stats, |
---|
4391 | 4694 | #endif |
---|
| 4695 | + .ndo_get_devlink_port = mlx5e_get_devlink_port, |
---|
4392 | 4696 | }; |
---|
4393 | 4697 | |
---|
4394 | 4698 | static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) |
---|
.. | .. |
---|
4441 | 4745 | link_speed > MLX5E_SLOW_PCI_RATIO * pci_bw; |
---|
4442 | 4746 | } |
---|
4443 | 4747 | |
---|
4444 | | -static struct net_dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode) |
---|
| 4748 | +static struct dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode) |
---|
4445 | 4749 | { |
---|
4446 | | - struct net_dim_cq_moder moder; |
---|
| 4750 | + struct dim_cq_moder moder; |
---|
4447 | 4751 | |
---|
4448 | 4752 | moder.cq_period_mode = cq_period_mode; |
---|
4449 | 4753 | moder.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS; |
---|
.. | .. |
---|
4454 | 4758 | return moder; |
---|
4455 | 4759 | } |
---|
4456 | 4760 | |
---|
4457 | | -static struct net_dim_cq_moder mlx5e_get_def_rx_moderation(u8 cq_period_mode) |
---|
| 4761 | +static struct dim_cq_moder mlx5e_get_def_rx_moderation(u8 cq_period_mode) |
---|
4458 | 4762 | { |
---|
4459 | | - struct net_dim_cq_moder moder; |
---|
| 4763 | + struct dim_cq_moder moder; |
---|
4460 | 4764 | |
---|
4461 | 4765 | moder.cq_period_mode = cq_period_mode; |
---|
4462 | 4766 | moder.pkts = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS; |
---|
.. | .. |
---|
4470 | 4774 | static u8 mlx5_to_net_dim_cq_period_mode(u8 cq_period_mode) |
---|
4471 | 4775 | { |
---|
4472 | 4776 | return cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE ? |
---|
4473 | | - NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE : |
---|
4474 | | - NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE; |
---|
| 4777 | + DIM_CQ_PERIOD_MODE_START_FROM_CQE : |
---|
| 4778 | + DIM_CQ_PERIOD_MODE_START_FROM_EQE; |
---|
4475 | 4779 | } |
---|
4476 | 4780 | |
---|
4477 | | -void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) |
---|
| 4781 | +void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode) |
---|
4478 | 4782 | { |
---|
4479 | 4783 | if (params->tx_dim_enabled) { |
---|
4480 | 4784 | u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode); |
---|
.. | .. |
---|
4483 | 4787 | } else { |
---|
4484 | 4788 | params->tx_cq_moderation = mlx5e_get_def_tx_moderation(cq_period_mode); |
---|
4485 | 4789 | } |
---|
4486 | | - |
---|
4487 | | - MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER, |
---|
4488 | | - params->tx_cq_moderation.cq_period_mode == |
---|
4489 | | - MLX5_CQ_PERIOD_MODE_START_FROM_CQE); |
---|
4490 | 4790 | } |
---|
4491 | 4791 | |
---|
4492 | | -void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) |
---|
| 4792 | +void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode) |
---|
4493 | 4793 | { |
---|
4494 | 4794 | if (params->rx_dim_enabled) { |
---|
4495 | 4795 | u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode); |
---|
.. | .. |
---|
4498 | 4798 | } else { |
---|
4499 | 4799 | params->rx_cq_moderation = mlx5e_get_def_rx_moderation(cq_period_mode); |
---|
4500 | 4800 | } |
---|
| 4801 | +} |
---|
4501 | 4802 | |
---|
| 4803 | +void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) |
---|
| 4804 | +{ |
---|
| 4805 | + mlx5e_reset_tx_moderation(params, cq_period_mode); |
---|
| 4806 | + MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER, |
---|
| 4807 | + params->tx_cq_moderation.cq_period_mode == |
---|
| 4808 | + MLX5_CQ_PERIOD_MODE_START_FROM_CQE); |
---|
| 4809 | +} |
---|
| 4810 | + |
---|
| 4811 | +void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) |
---|
| 4812 | +{ |
---|
| 4813 | + mlx5e_reset_rx_moderation(params, cq_period_mode); |
---|
4502 | 4814 | MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER, |
---|
4503 | 4815 | params->rx_cq_moderation.cq_period_mode == |
---|
4504 | 4816 | MLX5_CQ_PERIOD_MODE_START_FROM_CQE); |
---|
.. | .. |
---|
4516 | 4828 | return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]); |
---|
4517 | 4829 | } |
---|
4518 | 4830 | |
---|
4519 | | -void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, |
---|
4520 | | - struct mlx5e_params *params, |
---|
4521 | | - u16 max_channels, u16 mtu) |
---|
| 4831 | +void mlx5e_build_rq_params(struct mlx5_core_dev *mdev, |
---|
| 4832 | + struct mlx5e_params *params) |
---|
4522 | 4833 | { |
---|
| 4834 | + /* Prefer Striding RQ, unless any of the following holds: |
---|
| 4835 | + * - Striding RQ configuration is not possible/supported. |
---|
| 4836 | + * - Slow PCI heuristic. |
---|
| 4837 | + * - Legacy RQ would use linear SKB while Striding RQ would use non-linear. |
---|
| 4838 | + * |
---|
| 4839 | + * No XSK params: checking the availability of striding RQ in general. |
---|
| 4840 | + */ |
---|
| 4841 | + if (!slow_pci_heuristic(mdev) && |
---|
| 4842 | + mlx5e_striding_rq_possible(mdev, params) && |
---|
| 4843 | + (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) || |
---|
| 4844 | + !mlx5e_rx_is_linear_skb(params, NULL))) |
---|
| 4845 | + MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, true); |
---|
| 4846 | + mlx5e_set_rq_type(mdev, params); |
---|
| 4847 | + mlx5e_init_rq_type_params(mdev, params); |
---|
| 4848 | +} |
---|
| 4849 | + |
---|
| 4850 | +void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params, |
---|
| 4851 | + u16 num_channels) |
---|
| 4852 | +{ |
---|
| 4853 | + enum mlx5e_traffic_types tt; |
---|
| 4854 | + |
---|
| 4855 | + rss_params->hfunc = ETH_RSS_HASH_TOP; |
---|
| 4856 | + netdev_rss_key_fill(rss_params->toeplitz_hash_key, |
---|
| 4857 | + sizeof(rss_params->toeplitz_hash_key)); |
---|
| 4858 | + mlx5e_build_default_indir_rqt(rss_params->indirection_rqt, |
---|
| 4859 | + MLX5E_INDIR_RQT_SIZE, num_channels); |
---|
| 4860 | + for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) |
---|
| 4861 | + rss_params->rx_hash_fields[tt] = |
---|
| 4862 | + tirc_default_config[tt].rx_hash_fields; |
---|
| 4863 | +} |
---|
| 4864 | + |
---|
| 4865 | +void mlx5e_build_nic_params(struct mlx5e_priv *priv, |
---|
| 4866 | + struct mlx5e_xsk *xsk, |
---|
| 4867 | + struct mlx5e_rss_params *rss_params, |
---|
| 4868 | + struct mlx5e_params *params, |
---|
| 4869 | + u16 mtu) |
---|
| 4870 | +{ |
---|
| 4871 | + struct mlx5_core_dev *mdev = priv->mdev; |
---|
4523 | 4872 | u8 rx_cq_period_mode; |
---|
4524 | 4873 | |
---|
4525 | 4874 | params->sw_mtu = mtu; |
---|
4526 | 4875 | params->hard_mtu = MLX5E_ETH_HARD_MTU; |
---|
4527 | | - params->num_channels = max_channels; |
---|
| 4876 | + params->num_channels = min_t(unsigned int, MLX5E_MAX_NUM_CHANNELS / 2, |
---|
| 4877 | + priv->max_nch); |
---|
4528 | 4878 | params->num_tc = 1; |
---|
4529 | 4879 | |
---|
4530 | 4880 | /* SQ */ |
---|
4531 | 4881 | params->log_sq_size = is_kdump_kernel() ? |
---|
4532 | 4882 | MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE : |
---|
4533 | 4883 | MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; |
---|
| 4884 | + MLX5E_SET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE, |
---|
| 4885 | + MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe)); |
---|
| 4886 | + |
---|
| 4887 | + /* XDP SQ */ |
---|
| 4888 | + MLX5E_SET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE, |
---|
| 4889 | + MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe)); |
---|
4534 | 4890 | |
---|
4535 | 4891 | /* set CQE compression */ |
---|
4536 | 4892 | params->rx_cqe_compress_def = false; |
---|
.. | .. |
---|
4542 | 4898 | MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE, false); |
---|
4543 | 4899 | |
---|
4544 | 4900 | /* RQ */ |
---|
4545 | | - /* Prefer Striding RQ, unless any of the following holds: |
---|
4546 | | - * - Striding RQ configuration is not possible/supported. |
---|
4547 | | - * - Slow PCI heuristic. |
---|
4548 | | - * - Legacy RQ would use linear SKB while Striding RQ would use non-linear. |
---|
4549 | | - */ |
---|
4550 | | - if (!slow_pci_heuristic(mdev) && |
---|
4551 | | - mlx5e_striding_rq_possible(mdev, params) && |
---|
4552 | | - (mlx5e_rx_mpwqe_is_linear_skb(mdev, params) || |
---|
4553 | | - !mlx5e_rx_is_linear_skb(mdev, params))) |
---|
4554 | | - MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, true); |
---|
4555 | | - mlx5e_set_rq_type(mdev, params); |
---|
4556 | | - mlx5e_init_rq_type_params(mdev, params); |
---|
| 4901 | + mlx5e_build_rq_params(mdev, params); |
---|
4557 | 4902 | |
---|
4558 | 4903 | /* HW LRO */ |
---|
4559 | | - |
---|
4560 | | - /* TODO: && MLX5_CAP_ETH(mdev, lro_cap) */ |
---|
4561 | | - if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) |
---|
4562 | | - if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params)) |
---|
| 4904 | + if (MLX5_CAP_ETH(mdev, lro_cap) && |
---|
| 4905 | + params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { |
---|
| 4906 | + /* No XSK params: checking the availability of striding RQ in general. */ |
---|
| 4907 | + if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL)) |
---|
4563 | 4908 | params->lro_en = !slow_pci_heuristic(mdev); |
---|
| 4909 | + } |
---|
4564 | 4910 | params->lro_timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT); |
---|
4565 | 4911 | |
---|
4566 | 4912 | /* CQ moderation params */ |
---|
.. | .. |
---|
4573 | 4919 | mlx5e_set_tx_cq_mode_params(params, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); |
---|
4574 | 4920 | |
---|
4575 | 4921 | /* TX inline */ |
---|
4576 | | - params->tx_min_inline_mode = mlx5e_params_calculate_tx_min_inline(mdev); |
---|
| 4922 | + mlx5_query_min_inline(mdev, ¶ms->tx_min_inline_mode); |
---|
4577 | 4923 | |
---|
4578 | 4924 | /* RSS */ |
---|
4579 | | - params->rss_hfunc = ETH_RSS_HASH_XOR; |
---|
4580 | | - netdev_rss_key_fill(params->toeplitz_hash_key, sizeof(params->toeplitz_hash_key)); |
---|
4581 | | - mlx5e_build_default_indir_rqt(params->indirection_rqt, |
---|
4582 | | - MLX5E_INDIR_RQT_SIZE, max_channels); |
---|
4583 | | -} |
---|
| 4925 | + mlx5e_build_rss_params(rss_params, params->num_channels); |
---|
| 4926 | + params->tunneled_offload_en = |
---|
| 4927 | + mlx5e_tunnel_inner_ft_supported(mdev); |
---|
4584 | 4928 | |
---|
4585 | | -static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev, |
---|
4586 | | - struct net_device *netdev, |
---|
4587 | | - const struct mlx5e_profile *profile, |
---|
4588 | | - void *ppriv) |
---|
4589 | | -{ |
---|
4590 | | - struct mlx5e_priv *priv = netdev_priv(netdev); |
---|
4591 | | - |
---|
4592 | | - priv->mdev = mdev; |
---|
4593 | | - priv->netdev = netdev; |
---|
4594 | | - priv->profile = profile; |
---|
4595 | | - priv->ppriv = ppriv; |
---|
4596 | | - priv->msglevel = MLX5E_MSG_LEVEL; |
---|
4597 | | - priv->max_opened_tc = 1; |
---|
4598 | | - |
---|
4599 | | - mlx5e_build_nic_params(mdev, &priv->channels.params, |
---|
4600 | | - profile->max_nch(mdev), netdev->mtu); |
---|
4601 | | - |
---|
4602 | | - mutex_init(&priv->state_lock); |
---|
4603 | | - |
---|
4604 | | - INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work); |
---|
4605 | | - INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work); |
---|
4606 | | - INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work); |
---|
4607 | | - INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work); |
---|
4608 | | - |
---|
4609 | | - mlx5e_timestamp_init(priv); |
---|
| 4929 | + /* AF_XDP */ |
---|
| 4930 | + params->xsk = xsk; |
---|
4610 | 4931 | } |
---|
4611 | 4932 | |
---|
4612 | 4933 | static void mlx5e_set_netdev_dev_addr(struct net_device *netdev) |
---|
4613 | 4934 | { |
---|
4614 | 4935 | struct mlx5e_priv *priv = netdev_priv(netdev); |
---|
4615 | 4936 | |
---|
4616 | | - mlx5_query_nic_vport_mac_address(priv->mdev, 0, netdev->dev_addr); |
---|
| 4937 | + mlx5_query_mac_address(priv->mdev, netdev->dev_addr); |
---|
4617 | 4938 | if (is_zero_ether_addr(netdev->dev_addr) && |
---|
4618 | 4939 | !MLX5_CAP_GEN(priv->mdev, vport_group_manager)) { |
---|
4619 | 4940 | eth_hw_addr_random(netdev); |
---|
.. | .. |
---|
4621 | 4942 | } |
---|
4622 | 4943 | } |
---|
4623 | 4944 | |
---|
4624 | | -#if IS_ENABLED(CONFIG_MLX5_ESWITCH) |
---|
4625 | | -static const struct switchdev_ops mlx5e_switchdev_ops = { |
---|
4626 | | - .switchdev_port_attr_get = mlx5e_attr_get, |
---|
4627 | | -}; |
---|
4628 | | -#endif |
---|
| 4945 | +static int mlx5e_vxlan_set_port(struct net_device *netdev, unsigned int table, |
---|
| 4946 | + unsigned int entry, struct udp_tunnel_info *ti) |
---|
| 4947 | +{ |
---|
| 4948 | + struct mlx5e_priv *priv = netdev_priv(netdev); |
---|
| 4949 | + |
---|
| 4950 | + return mlx5_vxlan_add_port(priv->mdev->vxlan, ntohs(ti->port)); |
---|
| 4951 | +} |
---|
| 4952 | + |
---|
| 4953 | +static int mlx5e_vxlan_unset_port(struct net_device *netdev, unsigned int table, |
---|
| 4954 | + unsigned int entry, struct udp_tunnel_info *ti) |
---|
| 4955 | +{ |
---|
| 4956 | + struct mlx5e_priv *priv = netdev_priv(netdev); |
---|
| 4957 | + |
---|
| 4958 | + return mlx5_vxlan_del_port(priv->mdev->vxlan, ntohs(ti->port)); |
---|
| 4959 | +} |
---|
| 4960 | + |
---|
| 4961 | +void mlx5e_vxlan_set_netdev_info(struct mlx5e_priv *priv) |
---|
| 4962 | +{ |
---|
| 4963 | + if (!mlx5_vxlan_allowed(priv->mdev->vxlan)) |
---|
| 4964 | + return; |
---|
| 4965 | + |
---|
| 4966 | + priv->nic_info.set_port = mlx5e_vxlan_set_port; |
---|
| 4967 | + priv->nic_info.unset_port = mlx5e_vxlan_unset_port; |
---|
| 4968 | + priv->nic_info.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP | |
---|
| 4969 | + UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN; |
---|
| 4970 | + priv->nic_info.tables[0].tunnel_types = UDP_TUNNEL_TYPE_VXLAN; |
---|
| 4971 | + /* Don't count the space hard-coded to the IANA port */ |
---|
| 4972 | + priv->nic_info.tables[0].n_entries = |
---|
| 4973 | + mlx5_vxlan_max_udp_ports(priv->mdev) - 1; |
---|
| 4974 | + |
---|
| 4975 | + priv->netdev->udp_tunnel_nic_info = &priv->nic_info; |
---|
| 4976 | +} |
---|
4629 | 4977 | |
---|
4630 | 4978 | static void mlx5e_build_nic_netdev(struct net_device *netdev) |
---|
4631 | 4979 | { |
---|
.. | .. |
---|
4634 | 4982 | bool fcs_supported; |
---|
4635 | 4983 | bool fcs_enabled; |
---|
4636 | 4984 | |
---|
4637 | | - SET_NETDEV_DEV(netdev, &mdev->pdev->dev); |
---|
| 4985 | + SET_NETDEV_DEV(netdev, mdev->device); |
---|
4638 | 4986 | |
---|
4639 | 4987 | netdev->netdev_ops = &mlx5e_netdev_ops; |
---|
4640 | 4988 | |
---|
4641 | | -#ifdef CONFIG_MLX5_CORE_EN_DCB |
---|
4642 | | - if (MLX5_CAP_GEN(mdev, vport_group_manager) && MLX5_CAP_GEN(mdev, qos)) |
---|
4643 | | - netdev->dcbnl_ops = &mlx5e_dcbnl_ops; |
---|
4644 | | -#endif |
---|
| 4989 | + mlx5e_dcbnl_build_netdev(netdev); |
---|
4645 | 4990 | |
---|
4646 | 4991 | netdev->watchdog_timeo = 15 * HZ; |
---|
4647 | 4992 | |
---|
4648 | 4993 | netdev->ethtool_ops = &mlx5e_ethtool_ops; |
---|
4649 | 4994 | |
---|
4650 | 4995 | netdev->vlan_features |= NETIF_F_SG; |
---|
4651 | | - netdev->vlan_features |= NETIF_F_IP_CSUM; |
---|
4652 | | - netdev->vlan_features |= NETIF_F_IPV6_CSUM; |
---|
| 4996 | + netdev->vlan_features |= NETIF_F_HW_CSUM; |
---|
4653 | 4997 | netdev->vlan_features |= NETIF_F_GRO; |
---|
4654 | 4998 | netdev->vlan_features |= NETIF_F_TSO; |
---|
4655 | 4999 | netdev->vlan_features |= NETIF_F_TSO6; |
---|
4656 | 5000 | netdev->vlan_features |= NETIF_F_RXCSUM; |
---|
4657 | 5001 | netdev->vlan_features |= NETIF_F_RXHASH; |
---|
4658 | 5002 | |
---|
| 5003 | + netdev->mpls_features |= NETIF_F_SG; |
---|
| 5004 | + netdev->mpls_features |= NETIF_F_HW_CSUM; |
---|
| 5005 | + netdev->mpls_features |= NETIF_F_TSO; |
---|
| 5006 | + netdev->mpls_features |= NETIF_F_TSO6; |
---|
| 5007 | + |
---|
4659 | 5008 | netdev->hw_enc_features |= NETIF_F_HW_VLAN_CTAG_TX; |
---|
4660 | 5009 | netdev->hw_enc_features |= NETIF_F_HW_VLAN_CTAG_RX; |
---|
4661 | 5010 | |
---|
| 5011 | + /* Tunneled LRO is not supported in the driver, and the same RQs are |
---|
| 5012 | + * shared between inner and outer TIRs, so the driver can't disable LRO |
---|
| 5013 | + * for inner TIRs while having it enabled for outer TIRs. Due to this, |
---|
| 5014 | + * block LRO altogether if the firmware declares tunneled LRO support. |
---|
| 5015 | + */ |
---|
4662 | 5016 | if (!!MLX5_CAP_ETH(mdev, lro_cap) && |
---|
| 5017 | + !MLX5_CAP_ETH(mdev, tunnel_lro_vxlan) && |
---|
| 5018 | + !MLX5_CAP_ETH(mdev, tunnel_lro_gre) && |
---|
4663 | 5019 | mlx5e_check_fragmented_striding_rq_cap(mdev)) |
---|
4664 | 5020 | netdev->vlan_features |= NETIF_F_LRO; |
---|
4665 | 5021 | |
---|
.. | .. |
---|
4669 | 5025 | netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; |
---|
4670 | 5026 | netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX; |
---|
4671 | 5027 | |
---|
4672 | | - if (mlx5_vxlan_allowed(mdev->vxlan) || MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) { |
---|
4673 | | - netdev->hw_enc_features |= NETIF_F_IP_CSUM; |
---|
4674 | | - netdev->hw_enc_features |= NETIF_F_IPV6_CSUM; |
---|
| 5028 | + mlx5e_vxlan_set_netdev_info(priv); |
---|
| 5029 | + |
---|
| 5030 | + if (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev) || |
---|
| 5031 | + mlx5e_any_tunnel_proto_supported(mdev)) { |
---|
| 5032 | + netdev->hw_enc_features |= NETIF_F_HW_CSUM; |
---|
4675 | 5033 | netdev->hw_enc_features |= NETIF_F_TSO; |
---|
4676 | 5034 | netdev->hw_enc_features |= NETIF_F_TSO6; |
---|
4677 | 5035 | netdev->hw_enc_features |= NETIF_F_GSO_PARTIAL; |
---|
4678 | 5036 | } |
---|
4679 | 5037 | |
---|
4680 | | - if (mlx5_vxlan_allowed(mdev->vxlan)) { |
---|
4681 | | - netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; |
---|
4682 | | - netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL; |
---|
| 5038 | + if (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev)) { |
---|
| 5039 | + netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL | |
---|
| 5040 | + NETIF_F_GSO_UDP_TUNNEL_CSUM; |
---|
| 5041 | + netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL | |
---|
| 5042 | + NETIF_F_GSO_UDP_TUNNEL_CSUM; |
---|
| 5043 | + netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM; |
---|
| 5044 | + netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL | |
---|
| 5045 | + NETIF_F_GSO_UDP_TUNNEL_CSUM; |
---|
4683 | 5046 | } |
---|
4684 | 5047 | |
---|
4685 | | - if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) { |
---|
| 5048 | + if (mlx5e_tunnel_proto_supported(mdev, IPPROTO_GRE)) { |
---|
4686 | 5049 | netdev->hw_features |= NETIF_F_GSO_GRE | |
---|
4687 | 5050 | NETIF_F_GSO_GRE_CSUM; |
---|
4688 | 5051 | netdev->hw_enc_features |= NETIF_F_GSO_GRE | |
---|
4689 | 5052 | NETIF_F_GSO_GRE_CSUM; |
---|
4690 | 5053 | netdev->gso_partial_features |= NETIF_F_GSO_GRE | |
---|
4691 | 5054 | NETIF_F_GSO_GRE_CSUM; |
---|
| 5055 | + } |
---|
| 5056 | + |
---|
| 5057 | + if (mlx5e_tunnel_proto_supported(mdev, IPPROTO_IPIP)) { |
---|
| 5058 | + netdev->hw_features |= NETIF_F_GSO_IPXIP4 | |
---|
| 5059 | + NETIF_F_GSO_IPXIP6; |
---|
| 5060 | + netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4 | |
---|
| 5061 | + NETIF_F_GSO_IPXIP6; |
---|
| 5062 | + netdev->gso_partial_features |= NETIF_F_GSO_IPXIP4 | |
---|
| 5063 | + NETIF_F_GSO_IPXIP6; |
---|
4692 | 5064 | } |
---|
4693 | 5065 | |
---|
4694 | 5066 | netdev->hw_features |= NETIF_F_GSO_PARTIAL; |
---|
.. | .. |
---|
4723 | 5095 | FT_CAP(modify_root) && |
---|
4724 | 5096 | FT_CAP(identified_miss_table_mode) && |
---|
4725 | 5097 | FT_CAP(flow_table_modify)) { |
---|
4726 | | -#ifdef CONFIG_MLX5_ESWITCH |
---|
| 5098 | +#if IS_ENABLED(CONFIG_MLX5_CLS_ACT) |
---|
4727 | 5099 | netdev->hw_features |= NETIF_F_HW_TC; |
---|
4728 | 5100 | #endif |
---|
4729 | 5101 | #ifdef CONFIG_MLX5_EN_ARFS |
---|
.. | .. |
---|
4737 | 5109 | netdev->priv_flags |= IFF_UNICAST_FLT; |
---|
4738 | 5110 | |
---|
4739 | 5111 | mlx5e_set_netdev_dev_addr(netdev); |
---|
4740 | | - |
---|
4741 | | -#if IS_ENABLED(CONFIG_MLX5_ESWITCH) |
---|
4742 | | - if (MLX5_ESWITCH_MANAGER(mdev)) |
---|
4743 | | - netdev->switchdev_ops = &mlx5e_switchdev_ops; |
---|
4744 | | -#endif |
---|
4745 | | - |
---|
4746 | 5112 | mlx5e_ipsec_build_netdev(priv); |
---|
4747 | 5113 | mlx5e_tls_build_netdev(priv); |
---|
4748 | 5114 | } |
---|
4749 | 5115 | |
---|
4750 | | -static void mlx5e_create_q_counters(struct mlx5e_priv *priv) |
---|
| 5116 | +void mlx5e_create_q_counters(struct mlx5e_priv *priv) |
---|
4751 | 5117 | { |
---|
| 5118 | + u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {}; |
---|
| 5119 | + u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {}; |
---|
4752 | 5120 | struct mlx5_core_dev *mdev = priv->mdev; |
---|
4753 | 5121 | int err; |
---|
4754 | 5122 | |
---|
4755 | | - err = mlx5_core_alloc_q_counter(mdev, &priv->q_counter); |
---|
4756 | | - if (err) { |
---|
4757 | | - mlx5_core_warn(mdev, "alloc queue counter failed, %d\n", err); |
---|
4758 | | - priv->q_counter = 0; |
---|
4759 | | - } |
---|
| 5123 | + MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER); |
---|
| 5124 | + err = mlx5_cmd_exec_inout(mdev, alloc_q_counter, in, out); |
---|
| 5125 | + if (!err) |
---|
| 5126 | + priv->q_counter = |
---|
| 5127 | + MLX5_GET(alloc_q_counter_out, out, counter_set_id); |
---|
4760 | 5128 | |
---|
4761 | | - err = mlx5_core_alloc_q_counter(mdev, &priv->drop_rq_q_counter); |
---|
4762 | | - if (err) { |
---|
4763 | | - mlx5_core_warn(mdev, "alloc drop RQ counter failed, %d\n", err); |
---|
4764 | | - priv->drop_rq_q_counter = 0; |
---|
4765 | | - } |
---|
| 5129 | + err = mlx5_cmd_exec_inout(mdev, alloc_q_counter, in, out); |
---|
| 5130 | + if (!err) |
---|
| 5131 | + priv->drop_rq_q_counter = |
---|
| 5132 | + MLX5_GET(alloc_q_counter_out, out, counter_set_id); |
---|
4766 | 5133 | } |
---|
4767 | 5134 | |
---|
4768 | | -static void mlx5e_destroy_q_counters(struct mlx5e_priv *priv) |
---|
| 5135 | +void mlx5e_destroy_q_counters(struct mlx5e_priv *priv) |
---|
4769 | 5136 | { |
---|
4770 | | - if (priv->q_counter) |
---|
4771 | | - mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter); |
---|
| 5137 | + u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)] = {}; |
---|
4772 | 5138 | |
---|
4773 | | - if (priv->drop_rq_q_counter) |
---|
4774 | | - mlx5_core_dealloc_q_counter(priv->mdev, priv->drop_rq_q_counter); |
---|
| 5139 | + MLX5_SET(dealloc_q_counter_in, in, opcode, |
---|
| 5140 | + MLX5_CMD_OP_DEALLOC_Q_COUNTER); |
---|
| 5141 | + if (priv->q_counter) { |
---|
| 5142 | + MLX5_SET(dealloc_q_counter_in, in, counter_set_id, |
---|
| 5143 | + priv->q_counter); |
---|
| 5144 | + mlx5_cmd_exec_in(priv->mdev, dealloc_q_counter, in); |
---|
| 5145 | + } |
---|
| 5146 | + |
---|
| 5147 | + if (priv->drop_rq_q_counter) { |
---|
| 5148 | + MLX5_SET(dealloc_q_counter_in, in, counter_set_id, |
---|
| 5149 | + priv->drop_rq_q_counter); |
---|
| 5150 | + mlx5_cmd_exec_in(priv->mdev, dealloc_q_counter, in); |
---|
| 5151 | + } |
---|
4775 | 5152 | } |
---|
4776 | 5153 | |
---|
4777 | | -static void mlx5e_nic_init(struct mlx5_core_dev *mdev, |
---|
4778 | | - struct net_device *netdev, |
---|
4779 | | - const struct mlx5e_profile *profile, |
---|
4780 | | - void *ppriv) |
---|
| 5154 | +static int mlx5e_nic_init(struct mlx5_core_dev *mdev, |
---|
| 5155 | + struct net_device *netdev, |
---|
| 5156 | + const struct mlx5e_profile *profile, |
---|
| 5157 | + void *ppriv) |
---|
4781 | 5158 | { |
---|
4782 | 5159 | struct mlx5e_priv *priv = netdev_priv(netdev); |
---|
| 5160 | + struct mlx5e_rss_params *rss = &priv->rss_params; |
---|
4783 | 5161 | int err; |
---|
4784 | 5162 | |
---|
4785 | | - mlx5e_build_nic_netdev_priv(mdev, netdev, profile, ppriv); |
---|
| 5163 | + err = mlx5e_netdev_init(netdev, priv, mdev, profile, ppriv); |
---|
| 5164 | + if (err) |
---|
| 5165 | + return err; |
---|
| 5166 | + |
---|
| 5167 | + mlx5e_build_nic_params(priv, &priv->xsk, rss, &priv->channels.params, |
---|
| 5168 | + netdev->mtu); |
---|
| 5169 | + |
---|
| 5170 | + mlx5e_timestamp_init(priv); |
---|
| 5171 | + |
---|
4786 | 5172 | err = mlx5e_ipsec_init(priv); |
---|
4787 | 5173 | if (err) |
---|
4788 | 5174 | mlx5_core_err(mdev, "IPSec initialization failed, %d\n", err); |
---|
.. | .. |
---|
4790 | 5176 | if (err) |
---|
4791 | 5177 | mlx5_core_err(mdev, "TLS initialization failed, %d\n", err); |
---|
4792 | 5178 | mlx5e_build_nic_netdev(netdev); |
---|
4793 | | - mlx5e_build_tc2txq_maps(priv); |
---|
| 5179 | + err = mlx5e_devlink_port_register(priv); |
---|
| 5180 | + if (err) |
---|
| 5181 | + mlx5_core_err(mdev, "mlx5e_devlink_port_register failed, %d\n", err); |
---|
| 5182 | + mlx5e_health_create_reporters(priv); |
---|
| 5183 | + |
---|
| 5184 | + return 0; |
---|
4794 | 5185 | } |
---|
4795 | 5186 | |
---|
4796 | 5187 | static void mlx5e_nic_cleanup(struct mlx5e_priv *priv) |
---|
4797 | 5188 | { |
---|
| 5189 | + mlx5e_health_destroy_reporters(priv); |
---|
| 5190 | + mlx5e_devlink_port_unregister(priv); |
---|
4798 | 5191 | mlx5e_tls_cleanup(priv); |
---|
4799 | 5192 | mlx5e_ipsec_cleanup(priv); |
---|
| 5193 | + mlx5e_netdev_cleanup(priv->netdev, priv); |
---|
4800 | 5194 | } |
---|
4801 | 5195 | |
---|
4802 | 5196 | static int mlx5e_init_nic_rx(struct mlx5e_priv *priv) |
---|
.. | .. |
---|
4804 | 5198 | struct mlx5_core_dev *mdev = priv->mdev; |
---|
4805 | 5199 | int err; |
---|
4806 | 5200 | |
---|
| 5201 | + mlx5e_create_q_counters(priv); |
---|
| 5202 | + |
---|
| 5203 | + err = mlx5e_open_drop_rq(priv, &priv->drop_rq); |
---|
| 5204 | + if (err) { |
---|
| 5205 | + mlx5_core_err(mdev, "open drop rq failed, %d\n", err); |
---|
| 5206 | + goto err_destroy_q_counters; |
---|
| 5207 | + } |
---|
| 5208 | + |
---|
4807 | 5209 | err = mlx5e_create_indirect_rqt(priv); |
---|
4808 | 5210 | if (err) |
---|
4809 | | - return err; |
---|
| 5211 | + goto err_close_drop_rq; |
---|
4810 | 5212 | |
---|
4811 | | - err = mlx5e_create_direct_rqts(priv); |
---|
| 5213 | + err = mlx5e_create_direct_rqts(priv, priv->direct_tir); |
---|
4812 | 5214 | if (err) |
---|
4813 | 5215 | goto err_destroy_indirect_rqts; |
---|
4814 | 5216 | |
---|
4815 | | - err = mlx5e_create_indirect_tirs(priv); |
---|
| 5217 | + err = mlx5e_create_indirect_tirs(priv, true); |
---|
4816 | 5218 | if (err) |
---|
4817 | 5219 | goto err_destroy_direct_rqts; |
---|
4818 | 5220 | |
---|
4819 | | - err = mlx5e_create_direct_tirs(priv); |
---|
| 5221 | + err = mlx5e_create_direct_tirs(priv, priv->direct_tir); |
---|
4820 | 5222 | if (err) |
---|
4821 | 5223 | goto err_destroy_indirect_tirs; |
---|
| 5224 | + |
---|
| 5225 | + err = mlx5e_create_direct_rqts(priv, priv->xsk_tir); |
---|
| 5226 | + if (unlikely(err)) |
---|
| 5227 | + goto err_destroy_direct_tirs; |
---|
| 5228 | + |
---|
| 5229 | + err = mlx5e_create_direct_tirs(priv, priv->xsk_tir); |
---|
| 5230 | + if (unlikely(err)) |
---|
| 5231 | + goto err_destroy_xsk_rqts; |
---|
4822 | 5232 | |
---|
4823 | 5233 | err = mlx5e_create_flow_steering(priv); |
---|
4824 | 5234 | if (err) { |
---|
4825 | 5235 | mlx5_core_warn(mdev, "create flow steering failed, %d\n", err); |
---|
4826 | | - goto err_destroy_direct_tirs; |
---|
| 5236 | + goto err_destroy_xsk_tirs; |
---|
4827 | 5237 | } |
---|
4828 | 5238 | |
---|
4829 | 5239 | err = mlx5e_tc_nic_init(priv); |
---|
4830 | 5240 | if (err) |
---|
4831 | 5241 | goto err_destroy_flow_steering; |
---|
4832 | 5242 | |
---|
| 5243 | + err = mlx5e_accel_init_rx(priv); |
---|
| 5244 | + if (err) |
---|
| 5245 | + goto err_tc_nic_cleanup; |
---|
| 5246 | + |
---|
| 5247 | +#ifdef CONFIG_MLX5_EN_ARFS |
---|
| 5248 | + priv->netdev->rx_cpu_rmap = mlx5_eq_table_get_rmap(priv->mdev); |
---|
| 5249 | +#endif |
---|
| 5250 | + |
---|
4833 | 5251 | return 0; |
---|
4834 | 5252 | |
---|
| 5253 | +err_tc_nic_cleanup: |
---|
| 5254 | + mlx5e_tc_nic_cleanup(priv); |
---|
4835 | 5255 | err_destroy_flow_steering: |
---|
4836 | 5256 | mlx5e_destroy_flow_steering(priv); |
---|
| 5257 | +err_destroy_xsk_tirs: |
---|
| 5258 | + mlx5e_destroy_direct_tirs(priv, priv->xsk_tir); |
---|
| 5259 | +err_destroy_xsk_rqts: |
---|
| 5260 | + mlx5e_destroy_direct_rqts(priv, priv->xsk_tir); |
---|
4837 | 5261 | err_destroy_direct_tirs: |
---|
4838 | | - mlx5e_destroy_direct_tirs(priv); |
---|
| 5262 | + mlx5e_destroy_direct_tirs(priv, priv->direct_tir); |
---|
4839 | 5263 | err_destroy_indirect_tirs: |
---|
4840 | 5264 | mlx5e_destroy_indirect_tirs(priv); |
---|
4841 | 5265 | err_destroy_direct_rqts: |
---|
4842 | | - mlx5e_destroy_direct_rqts(priv); |
---|
| 5266 | + mlx5e_destroy_direct_rqts(priv, priv->direct_tir); |
---|
4843 | 5267 | err_destroy_indirect_rqts: |
---|
4844 | 5268 | mlx5e_destroy_rqt(priv, &priv->indir_rqt); |
---|
| 5269 | +err_close_drop_rq: |
---|
| 5270 | + mlx5e_close_drop_rq(&priv->drop_rq); |
---|
| 5271 | +err_destroy_q_counters: |
---|
| 5272 | + mlx5e_destroy_q_counters(priv); |
---|
4845 | 5273 | return err; |
---|
4846 | 5274 | } |
---|
4847 | 5275 | |
---|
4848 | 5276 | static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv) |
---|
4849 | 5277 | { |
---|
| 5278 | + mlx5e_accel_cleanup_rx(priv); |
---|
4850 | 5279 | mlx5e_tc_nic_cleanup(priv); |
---|
4851 | 5280 | mlx5e_destroy_flow_steering(priv); |
---|
4852 | | - mlx5e_destroy_direct_tirs(priv); |
---|
| 5281 | + mlx5e_destroy_direct_tirs(priv, priv->xsk_tir); |
---|
| 5282 | + mlx5e_destroy_direct_rqts(priv, priv->xsk_tir); |
---|
| 5283 | + mlx5e_destroy_direct_tirs(priv, priv->direct_tir); |
---|
4853 | 5284 | mlx5e_destroy_indirect_tirs(priv); |
---|
4854 | | - mlx5e_destroy_direct_rqts(priv); |
---|
| 5285 | + mlx5e_destroy_direct_rqts(priv, priv->direct_tir); |
---|
4855 | 5286 | mlx5e_destroy_rqt(priv, &priv->indir_rqt); |
---|
| 5287 | + mlx5e_close_drop_rq(&priv->drop_rq); |
---|
| 5288 | + mlx5e_destroy_q_counters(priv); |
---|
4856 | 5289 | } |
---|
4857 | 5290 | |
---|
4858 | 5291 | static int mlx5e_init_nic_tx(struct mlx5e_priv *priv) |
---|
.. | .. |
---|
4865 | 5298 | return err; |
---|
4866 | 5299 | } |
---|
4867 | 5300 | |
---|
4868 | | -#ifdef CONFIG_MLX5_CORE_EN_DCB |
---|
4869 | 5301 | mlx5e_dcbnl_initialize(priv); |
---|
4870 | | -#endif |
---|
4871 | 5302 | return 0; |
---|
4872 | 5303 | } |
---|
4873 | 5304 | |
---|
.. | .. |
---|
4875 | 5306 | { |
---|
4876 | 5307 | struct net_device *netdev = priv->netdev; |
---|
4877 | 5308 | struct mlx5_core_dev *mdev = priv->mdev; |
---|
4878 | | - u16 max_mtu; |
---|
4879 | 5309 | |
---|
4880 | 5310 | mlx5e_init_l2_addr(priv); |
---|
4881 | 5311 | |
---|
4882 | 5312 | /* Marking the link as currently not needed by the Driver */ |
---|
4883 | 5313 | if (!netif_running(netdev)) |
---|
4884 | | - mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN); |
---|
| 5314 | + mlx5e_modify_admin_state(mdev, MLX5_PORT_DOWN); |
---|
4885 | 5315 | |
---|
4886 | | - /* MTU range: 68 - hw-specific max */ |
---|
4887 | | - netdev->min_mtu = ETH_MIN_MTU; |
---|
4888 | | - mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1); |
---|
4889 | | - netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu); |
---|
| 5316 | + mlx5e_set_netdev_mtu_boundaries(priv); |
---|
4890 | 5317 | mlx5e_set_dev_port_mtu(priv); |
---|
4891 | 5318 | |
---|
4892 | 5319 | mlx5_lag_add(mdev, netdev); |
---|
4893 | 5320 | |
---|
4894 | 5321 | mlx5e_enable_async_events(priv); |
---|
| 5322 | + if (mlx5e_monitor_counter_supported(priv)) |
---|
| 5323 | + mlx5e_monitor_counter_init(priv); |
---|
4895 | 5324 | |
---|
4896 | | - if (MLX5_ESWITCH_MANAGER(priv->mdev)) |
---|
4897 | | - mlx5e_register_vport_reps(priv); |
---|
4898 | | - |
---|
| 5325 | + mlx5e_hv_vhca_stats_create(priv); |
---|
4899 | 5326 | if (netdev->reg_state != NETREG_REGISTERED) |
---|
4900 | 5327 | return; |
---|
4901 | | -#ifdef CONFIG_MLX5_CORE_EN_DCB |
---|
4902 | 5328 | mlx5e_dcbnl_init_app(priv); |
---|
4903 | | -#endif |
---|
4904 | 5329 | |
---|
4905 | 5330 | queue_work(priv->wq, &priv->set_rx_mode_work); |
---|
4906 | 5331 | |
---|
4907 | 5332 | rtnl_lock(); |
---|
4908 | 5333 | if (netif_running(netdev)) |
---|
4909 | 5334 | mlx5e_open(netdev); |
---|
| 5335 | + udp_tunnel_nic_reset_ntf(priv->netdev); |
---|
4910 | 5336 | netif_device_attach(netdev); |
---|
4911 | 5337 | rtnl_unlock(); |
---|
4912 | 5338 | } |
---|
.. | .. |
---|
4915 | 5341 | { |
---|
4916 | 5342 | struct mlx5_core_dev *mdev = priv->mdev; |
---|
4917 | 5343 | |
---|
4918 | | -#ifdef CONFIG_MLX5_CORE_EN_DCB |
---|
4919 | 5344 | if (priv->netdev->reg_state == NETREG_REGISTERED) |
---|
4920 | 5345 | mlx5e_dcbnl_delete_app(priv); |
---|
4921 | | -#endif |
---|
4922 | 5346 | |
---|
4923 | 5347 | rtnl_lock(); |
---|
4924 | 5348 | if (netif_running(priv->netdev)) |
---|
.. | .. |
---|
4928 | 5352 | |
---|
4929 | 5353 | queue_work(priv->wq, &priv->set_rx_mode_work); |
---|
4930 | 5354 | |
---|
4931 | | - if (MLX5_ESWITCH_MANAGER(priv->mdev)) |
---|
4932 | | - mlx5e_unregister_vport_reps(priv); |
---|
| 5355 | + mlx5e_hv_vhca_stats_destroy(priv); |
---|
| 5356 | + if (mlx5e_monitor_counter_supported(priv)) |
---|
| 5357 | + mlx5e_monitor_counter_cleanup(priv); |
---|
4933 | 5358 | |
---|
4934 | 5359 | mlx5e_disable_async_events(priv); |
---|
4935 | 5360 | mlx5_lag_remove(mdev); |
---|
| 5361 | + mlx5_vxlan_reset_to_default(mdev->vxlan); |
---|
| 5362 | +} |
---|
| 5363 | + |
---|
| 5364 | +int mlx5e_update_nic_rx(struct mlx5e_priv *priv) |
---|
| 5365 | +{ |
---|
| 5366 | + return mlx5e_refresh_tirs(priv, false, false); |
---|
4936 | 5367 | } |
---|
4937 | 5368 | |
---|
4938 | 5369 | static const struct mlx5e_profile mlx5e_nic_profile = { |
---|
.. | .. |
---|
4944 | 5375 | .cleanup_tx = mlx5e_cleanup_nic_tx, |
---|
4945 | 5376 | .enable = mlx5e_nic_enable, |
---|
4946 | 5377 | .disable = mlx5e_nic_disable, |
---|
4947 | | - .update_stats = mlx5e_update_ndo_stats, |
---|
4948 | | - .max_nch = mlx5e_get_max_num_channels, |
---|
| 5378 | + .update_rx = mlx5e_update_nic_rx, |
---|
| 5379 | + .update_stats = mlx5e_stats_update_ndo_stats, |
---|
4949 | 5380 | .update_carrier = mlx5e_update_carrier, |
---|
4950 | | - .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe, |
---|
4951 | | - .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq, |
---|
| 5381 | + .rx_handlers = &mlx5e_rx_handlers_nic, |
---|
4952 | 5382 | .max_tc = MLX5E_MAX_NUM_TC, |
---|
| 5383 | + .rq_groups = MLX5E_NUM_RQ_GROUPS(XSK), |
---|
| 5384 | + .stats_grps = mlx5e_nic_stats_grps, |
---|
| 5385 | + .stats_grps_num = mlx5e_nic_stats_grps_num, |
---|
4953 | 5386 | }; |
---|
4954 | 5387 | |
---|
4955 | 5388 | /* mlx5e generic netdev management API (move to en_common.c) */ |
---|
4956 | 5389 | |
---|
| 5390 | +/* mlx5e_netdev_init/cleanup must be called from profile->init/cleanup callbacks */ |
---|
| 5391 | +int mlx5e_netdev_init(struct net_device *netdev, |
---|
| 5392 | + struct mlx5e_priv *priv, |
---|
| 5393 | + struct mlx5_core_dev *mdev, |
---|
| 5394 | + const struct mlx5e_profile *profile, |
---|
| 5395 | + void *ppriv) |
---|
| 5396 | +{ |
---|
| 5397 | + /* priv init */ |
---|
| 5398 | + priv->mdev = mdev; |
---|
| 5399 | + priv->netdev = netdev; |
---|
| 5400 | + priv->profile = profile; |
---|
| 5401 | + priv->ppriv = ppriv; |
---|
| 5402 | + priv->msglevel = MLX5E_MSG_LEVEL; |
---|
| 5403 | + priv->max_nch = netdev->num_rx_queues / max_t(u8, profile->rq_groups, 1); |
---|
| 5404 | + priv->max_opened_tc = 1; |
---|
| 5405 | + |
---|
| 5406 | + if (!alloc_cpumask_var(&priv->scratchpad.cpumask, GFP_KERNEL)) |
---|
| 5407 | + return -ENOMEM; |
---|
| 5408 | + |
---|
| 5409 | + mutex_init(&priv->state_lock); |
---|
| 5410 | + INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work); |
---|
| 5411 | + INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work); |
---|
| 5412 | + INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work); |
---|
| 5413 | + INIT_WORK(&priv->update_stats_work, mlx5e_update_stats_work); |
---|
| 5414 | + |
---|
| 5415 | + priv->wq = create_singlethread_workqueue("mlx5e"); |
---|
| 5416 | + if (!priv->wq) |
---|
| 5417 | + goto err_free_cpumask; |
---|
| 5418 | + |
---|
| 5419 | + /* netdev init */ |
---|
| 5420 | + netif_carrier_off(netdev); |
---|
| 5421 | + |
---|
| 5422 | + return 0; |
---|
| 5423 | + |
---|
| 5424 | +err_free_cpumask: |
---|
| 5425 | + free_cpumask_var(priv->scratchpad.cpumask); |
---|
| 5426 | + |
---|
| 5427 | + return -ENOMEM; |
---|
| 5428 | +} |
---|
| 5429 | + |
---|
| 5430 | +void mlx5e_netdev_cleanup(struct net_device *netdev, struct mlx5e_priv *priv) |
---|
| 5431 | +{ |
---|
| 5432 | + destroy_workqueue(priv->wq); |
---|
| 5433 | + free_cpumask_var(priv->scratchpad.cpumask); |
---|
| 5434 | +} |
---|
| 5435 | + |
---|
4957 | 5436 | struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev, |
---|
4958 | 5437 | const struct mlx5e_profile *profile, |
---|
| 5438 | + int nch, |
---|
4959 | 5439 | void *ppriv) |
---|
4960 | 5440 | { |
---|
4961 | | - int nch = profile->max_nch(mdev); |
---|
4962 | 5441 | struct net_device *netdev; |
---|
4963 | | - struct mlx5e_priv *priv; |
---|
| 5442 | + int err; |
---|
4964 | 5443 | |
---|
4965 | 5444 | netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), |
---|
4966 | 5445 | nch * profile->max_tc, |
---|
4967 | | - nch); |
---|
| 5446 | + nch * profile->rq_groups); |
---|
4968 | 5447 | if (!netdev) { |
---|
4969 | 5448 | mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n"); |
---|
4970 | 5449 | return NULL; |
---|
4971 | 5450 | } |
---|
4972 | 5451 | |
---|
4973 | | -#ifdef CONFIG_MLX5_EN_ARFS |
---|
4974 | | - netdev->rx_cpu_rmap = mdev->rmap; |
---|
4975 | | -#endif |
---|
4976 | | - |
---|
4977 | | - profile->init(mdev, netdev, profile, ppriv); |
---|
4978 | | - |
---|
4979 | | - netif_carrier_off(netdev); |
---|
4980 | | - |
---|
4981 | | - priv = netdev_priv(netdev); |
---|
4982 | | - |
---|
4983 | | - priv->wq = create_singlethread_workqueue("mlx5e"); |
---|
4984 | | - if (!priv->wq) |
---|
4985 | | - goto err_cleanup_nic; |
---|
| 5452 | + err = profile->init(mdev, netdev, profile, ppriv); |
---|
| 5453 | + if (err) { |
---|
| 5454 | + mlx5_core_err(mdev, "failed to init mlx5e profile %d\n", err); |
---|
| 5455 | + goto err_free_netdev; |
---|
| 5456 | + } |
---|
4986 | 5457 | |
---|
4987 | 5458 | return netdev; |
---|
4988 | 5459 | |
---|
4989 | | -err_cleanup_nic: |
---|
4990 | | - if (profile->cleanup) |
---|
4991 | | - profile->cleanup(priv); |
---|
| 5460 | +err_free_netdev: |
---|
4992 | 5461 | free_netdev(netdev); |
---|
4993 | 5462 | |
---|
4994 | 5463 | return NULL; |
---|
4995 | 5464 | } |
---|
4996 | 5465 | |
---|
| 5466 | +static void mlx5e_reset_channels(struct net_device *netdev) |
---|
| 5467 | +{ |
---|
| 5468 | + netdev_reset_tc(netdev); |
---|
| 5469 | +} |
---|
| 5470 | + |
---|
4997 | 5471 | int mlx5e_attach_netdev(struct mlx5e_priv *priv) |
---|
4998 | 5472 | { |
---|
4999 | | - struct mlx5_core_dev *mdev = priv->mdev; |
---|
| 5473 | + const bool take_rtnl = priv->netdev->reg_state == NETREG_REGISTERED; |
---|
5000 | 5474 | const struct mlx5e_profile *profile; |
---|
5001 | 5475 | int max_nch; |
---|
5002 | 5476 | int err; |
---|
.. | .. |
---|
5008 | 5482 | max_nch = mlx5e_get_max_num_channels(priv->mdev); |
---|
5009 | 5483 | if (priv->channels.params.num_channels > max_nch) { |
---|
5010 | 5484 | mlx5_core_warn(priv->mdev, "MLX5E: Reducing number of channels to %d\n", max_nch); |
---|
| 5485 | + /* Reducing the number of channels - RXFH has to be reset, and |
---|
| 5486 | + * mlx5e_num_channels_changed below will build the RQT. |
---|
| 5487 | + */ |
---|
| 5488 | + priv->netdev->priv_flags &= ~IFF_RXFH_CONFIGURED; |
---|
5011 | 5489 | priv->channels.params.num_channels = max_nch; |
---|
5012 | | - mlx5e_build_default_indir_rqt(priv->channels.params.indirection_rqt, |
---|
5013 | | - MLX5E_INDIR_RQT_SIZE, max_nch); |
---|
5014 | 5490 | } |
---|
| 5491 | + /* 1. Set the real number of queues in the kernel the first time. |
---|
| 5492 | + * 2. Set our default XPS cpumask. |
---|
| 5493 | + * 3. Build the RQT. |
---|
| 5494 | + * |
---|
| 5495 | + * rtnl_lock is required by netif_set_real_num_*_queues in case the |
---|
| 5496 | + * netdev has been registered by this point (if this function was called |
---|
| 5497 | + * in the reload or resume flow). |
---|
| 5498 | + */ |
---|
| 5499 | + if (take_rtnl) |
---|
| 5500 | + rtnl_lock(); |
---|
| 5501 | + err = mlx5e_num_channels_changed(priv); |
---|
| 5502 | + if (take_rtnl) |
---|
| 5503 | + rtnl_unlock(); |
---|
| 5504 | + if (err) |
---|
| 5505 | + goto out; |
---|
5015 | 5506 | |
---|
5016 | 5507 | err = profile->init_tx(priv); |
---|
5017 | 5508 | if (err) |
---|
5018 | 5509 | goto out; |
---|
5019 | 5510 | |
---|
5020 | | - mlx5e_create_q_counters(priv); |
---|
5021 | | - |
---|
5022 | | - err = mlx5e_open_drop_rq(priv, &priv->drop_rq); |
---|
5023 | | - if (err) { |
---|
5024 | | - mlx5_core_err(mdev, "open drop rq failed, %d\n", err); |
---|
5025 | | - goto err_destroy_q_counters; |
---|
5026 | | - } |
---|
5027 | | - |
---|
5028 | 5511 | err = profile->init_rx(priv); |
---|
5029 | 5512 | if (err) |
---|
5030 | | - goto err_close_drop_rq; |
---|
| 5513 | + goto err_cleanup_tx; |
---|
5031 | 5514 | |
---|
5032 | 5515 | if (profile->enable) |
---|
5033 | 5516 | profile->enable(priv); |
---|
5034 | 5517 | |
---|
5035 | 5518 | return 0; |
---|
5036 | 5519 | |
---|
5037 | | -err_close_drop_rq: |
---|
5038 | | - mlx5e_close_drop_rq(&priv->drop_rq); |
---|
5039 | | - |
---|
5040 | | -err_destroy_q_counters: |
---|
5041 | | - mlx5e_destroy_q_counters(priv); |
---|
| 5520 | +err_cleanup_tx: |
---|
5042 | 5521 | profile->cleanup_tx(priv); |
---|
5043 | 5522 | |
---|
5044 | 5523 | out: |
---|
| 5524 | + mlx5e_reset_channels(priv->netdev); |
---|
| 5525 | + set_bit(MLX5E_STATE_DESTROYING, &priv->state); |
---|
| 5526 | + cancel_work_sync(&priv->update_stats_work); |
---|
5045 | 5527 | return err; |
---|
5046 | 5528 | } |
---|
5047 | 5529 | |
---|
.. | .. |
---|
5056 | 5538 | flush_workqueue(priv->wq); |
---|
5057 | 5539 | |
---|
5058 | 5540 | profile->cleanup_rx(priv); |
---|
5059 | | - mlx5e_close_drop_rq(&priv->drop_rq); |
---|
5060 | | - mlx5e_destroy_q_counters(priv); |
---|
5061 | 5541 | profile->cleanup_tx(priv); |
---|
5062 | | - cancel_delayed_work_sync(&priv->update_stats_work); |
---|
| 5542 | + mlx5e_reset_channels(priv->netdev); |
---|
| 5543 | + cancel_work_sync(&priv->update_stats_work); |
---|
5063 | 5544 | } |
---|
5064 | 5545 | |
---|
5065 | 5546 | void mlx5e_destroy_netdev(struct mlx5e_priv *priv) |
---|
.. | .. |
---|
5067 | 5548 | const struct mlx5e_profile *profile = priv->profile; |
---|
5068 | 5549 | struct net_device *netdev = priv->netdev; |
---|
5069 | 5550 | |
---|
5070 | | - destroy_workqueue(priv->wq); |
---|
5071 | 5551 | if (profile->cleanup) |
---|
5072 | 5552 | profile->cleanup(priv); |
---|
5073 | 5553 | free_netdev(netdev); |
---|
.. | .. |
---|
5103 | 5583 | struct mlx5e_priv *priv = vpriv; |
---|
5104 | 5584 | struct net_device *netdev = priv->netdev; |
---|
5105 | 5585 | |
---|
| 5586 | +#ifdef CONFIG_MLX5_ESWITCH |
---|
| 5587 | + if (MLX5_ESWITCH_MANAGER(mdev) && vpriv == mdev) |
---|
| 5588 | + return; |
---|
| 5589 | +#endif |
---|
| 5590 | + |
---|
5106 | 5591 | if (!netif_device_present(netdev)) |
---|
5107 | 5592 | return; |
---|
5108 | 5593 | |
---|
.. | .. |
---|
5113 | 5598 | static void *mlx5e_add(struct mlx5_core_dev *mdev) |
---|
5114 | 5599 | { |
---|
5115 | 5600 | struct net_device *netdev; |
---|
5116 | | - void *rpriv = NULL; |
---|
5117 | 5601 | void *priv; |
---|
5118 | 5602 | int err; |
---|
| 5603 | + int nch; |
---|
5119 | 5604 | |
---|
5120 | 5605 | err = mlx5e_check_required_hca_cap(mdev); |
---|
5121 | 5606 | if (err) |
---|
5122 | 5607 | return NULL; |
---|
5123 | 5608 | |
---|
5124 | 5609 | #ifdef CONFIG_MLX5_ESWITCH |
---|
5125 | | - if (MLX5_ESWITCH_MANAGER(mdev)) { |
---|
5126 | | - rpriv = mlx5e_alloc_nic_rep_priv(mdev); |
---|
5127 | | - if (!rpriv) { |
---|
5128 | | - mlx5_core_warn(mdev, "Failed to alloc NIC rep priv data\n"); |
---|
5129 | | - return NULL; |
---|
5130 | | - } |
---|
| 5610 | + if (MLX5_ESWITCH_MANAGER(mdev) && |
---|
| 5611 | + mlx5_eswitch_mode(mdev->priv.eswitch) == MLX5_ESWITCH_OFFLOADS) { |
---|
| 5612 | + mlx5e_rep_register_vport_reps(mdev); |
---|
| 5613 | + return mdev; |
---|
5131 | 5614 | } |
---|
5132 | 5615 | #endif |
---|
5133 | 5616 | |
---|
5134 | | - netdev = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, rpriv); |
---|
| 5617 | + nch = mlx5e_get_max_num_channels(mdev); |
---|
| 5618 | + netdev = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, nch, NULL); |
---|
5135 | 5619 | if (!netdev) { |
---|
5136 | 5620 | mlx5_core_err(mdev, "mlx5e_create_netdev failed\n"); |
---|
5137 | | - goto err_free_rpriv; |
---|
| 5621 | + return NULL; |
---|
5138 | 5622 | } |
---|
5139 | 5623 | |
---|
| 5624 | + dev_net_set(netdev, mlx5_core_net(mdev)); |
---|
5140 | 5625 | priv = netdev_priv(netdev); |
---|
5141 | 5626 | |
---|
5142 | 5627 | err = mlx5e_attach(mdev, priv); |
---|
.. | .. |
---|
5151 | 5636 | goto err_detach; |
---|
5152 | 5637 | } |
---|
5153 | 5638 | |
---|
5154 | | -#ifdef CONFIG_MLX5_CORE_EN_DCB |
---|
| 5639 | + mlx5e_devlink_port_type_eth_set(priv); |
---|
| 5640 | + |
---|
5155 | 5641 | mlx5e_dcbnl_init_app(priv); |
---|
5156 | | -#endif |
---|
5157 | 5642 | return priv; |
---|
5158 | 5643 | |
---|
5159 | 5644 | err_detach: |
---|
5160 | 5645 | mlx5e_detach(mdev, priv); |
---|
5161 | 5646 | err_destroy_netdev: |
---|
5162 | 5647 | mlx5e_destroy_netdev(priv); |
---|
5163 | | -err_free_rpriv: |
---|
5164 | | - kfree(rpriv); |
---|
5165 | 5648 | return NULL; |
---|
5166 | 5649 | } |
---|
5167 | 5650 | |
---|
5168 | 5651 | static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv) |
---|
5169 | 5652 | { |
---|
5170 | | - struct mlx5e_priv *priv = vpriv; |
---|
5171 | | - void *ppriv = priv->ppriv; |
---|
| 5653 | + struct mlx5e_priv *priv; |
---|
5172 | 5654 | |
---|
5173 | | -#ifdef CONFIG_MLX5_CORE_EN_DCB |
---|
5174 | | - mlx5e_dcbnl_delete_app(priv); |
---|
| 5655 | +#ifdef CONFIG_MLX5_ESWITCH |
---|
| 5656 | + if (MLX5_ESWITCH_MANAGER(mdev) && vpriv == mdev) { |
---|
| 5657 | + mlx5e_rep_unregister_vport_reps(mdev); |
---|
| 5658 | + return; |
---|
| 5659 | + } |
---|
5175 | 5660 | #endif |
---|
| 5661 | + priv = vpriv; |
---|
| 5662 | + mlx5e_dcbnl_delete_app(priv); |
---|
5176 | 5663 | unregister_netdev(priv->netdev); |
---|
5177 | 5664 | mlx5e_detach(mdev, vpriv); |
---|
5178 | 5665 | mlx5e_destroy_netdev(priv); |
---|
5179 | | - kfree(ppriv); |
---|
5180 | | -} |
---|
5181 | | - |
---|
5182 | | -static void *mlx5e_get_netdev(void *vpriv) |
---|
5183 | | -{ |
---|
5184 | | - struct mlx5e_priv *priv = vpriv; |
---|
5185 | | - |
---|
5186 | | - return priv->netdev; |
---|
5187 | 5666 | } |
---|
5188 | 5667 | |
---|
5189 | 5668 | static struct mlx5_interface mlx5e_interface = { |
---|
.. | .. |
---|
5191 | 5670 | .remove = mlx5e_remove, |
---|
5192 | 5671 | .attach = mlx5e_attach, |
---|
5193 | 5672 | .detach = mlx5e_detach, |
---|
5194 | | - .event = mlx5e_async_event, |
---|
5195 | 5673 | .protocol = MLX5_INTERFACE_PROTOCOL_ETH, |
---|
5196 | | - .get_dev = mlx5e_get_netdev, |
---|
5197 | 5674 | }; |
---|
5198 | 5675 | |
---|
5199 | 5676 | void mlx5e_init(void) |
---|