hc
2024-01-03 2f7c68cb55ecb7331f2381deb497c27155f32faf
kernel/drivers/net/ethernet/mellanox/mlx5/core/en.h
....@@ -36,7 +36,6 @@
3636 #include <linux/etherdevice.h>
3737 #include <linux/timecounter.h>
3838 #include <linux/net_tstamp.h>
39
-#include <linux/ptp_clock_kernel.h>
4039 #include <linux/crash_dump.h>
4140 #include <linux/mlx5/driver.h>
4241 #include <linux/mlx5/qp.h>
....@@ -46,13 +45,17 @@
4645 #include <linux/mlx5/transobj.h>
4746 #include <linux/mlx5/fs.h>
4847 #include <linux/rhashtable.h>
48
+#include <net/udp_tunnel.h>
4949 #include <net/switchdev.h>
5050 #include <net/xdp.h>
51
-#include <linux/net_dim.h>
51
+#include <linux/dim.h>
52
+#include <linux/bits.h>
5253 #include "wq.h"
5354 #include "mlx5_core.h"
5455 #include "en_stats.h"
56
+#include "en/dcbnl.h"
5557 #include "en/fs.h"
58
+#include "lib/hv_vhca.h"
5659
5760 extern const struct net_device_ops mlx5e_netdev_ops;
5861 struct page_pool;
....@@ -67,35 +70,38 @@
6770 #define MLX5E_HW2SW_MTU(params, hwmtu) ((hwmtu) - ((params)->hard_mtu))
6871 #define MLX5E_SW2HW_MTU(params, swmtu) ((swmtu) + ((params)->hard_mtu))
6972
70
-#define MLX5E_MAX_PRIORITY 8
71
-#define MLX5E_MAX_DSCP 64
7273 #define MLX5E_MAX_NUM_TC 8
7374
7475 #define MLX5_RX_HEADROOM NET_SKB_PAD
7576 #define MLX5_SKB_FRAG_SZ(len) (SKB_DATA_ALIGN(len) + \
7677 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
7778
79
+#define MLX5E_RX_MAX_HEAD (256)
80
+
7881 #define MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev) \
7982 (6 + MLX5_CAP_GEN(mdev, cache_line_128byte)) /* HW restriction */
8083 #define MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, req) \
8184 max_t(u32, MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev), req)
82
-#define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev) MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, 6)
83
-#define MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, 8)
84
-#define MLX5E_MPWQE_STRIDE_SZ(mdev, cqe_cmprs) \
85
- (cqe_cmprs ? MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) : \
86
- MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev))
85
+#define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev) \
86
+ MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, order_base_2(MLX5E_RX_MAX_HEAD))
8787
8888 #define MLX5_MPWRQ_LOG_WQE_SZ 18
8989 #define MLX5_MPWRQ_WQE_PAGE_ORDER (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \
9090 MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0)
9191 #define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER)
9292
93
-#define MLX5_MTT_OCTW(npages) (ALIGN(npages, 8) / 2)
94
-#define MLX5E_REQUIRED_WQE_MTTS (ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8))
95
-#define MLX5E_LOG_ALIGNED_MPWQE_PPW (ilog2(MLX5E_REQUIRED_WQE_MTTS))
93
+#define MLX5_ALIGN_MTTS(mtts) (ALIGN(mtts, 8))
94
+#define MLX5_ALIGNED_MTTS_OCTW(mtts) ((mtts) / 2)
95
+#define MLX5_MTT_OCTW(mtts) (MLX5_ALIGNED_MTTS_OCTW(MLX5_ALIGN_MTTS(mtts)))
96
+/* Add another page to MLX5E_REQUIRED_WQE_MTTS as a buffer between
97
+ * WQEs, This page will absorb write overflow by the hardware, when
98
+ * receiving packets larger than MTU. These oversize packets are
99
+ * dropped by the driver at a later stage.
100
+ */
101
+#define MLX5E_REQUIRED_WQE_MTTS (MLX5_ALIGN_MTTS(MLX5_MPWRQ_PAGES_PER_WQE + 1))
96102 #define MLX5E_REQUIRED_MTTS(wqes) (wqes * MLX5E_REQUIRED_WQE_MTTS)
97103 #define MLX5E_MAX_RQ_NUM_MTTS \
98
- ((1 << 16) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */
104
+ (ALIGN_DOWN(U16_MAX, 4) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */
99105 #define MLX5E_ORDER2_MAX_PACKET_MTU (order_base_2(10 * 1024))
100106 #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW \
101107 (ilog2(MLX5E_MAX_RQ_NUM_MTTS / MLX5E_REQUIRED_WQE_MTTS))
....@@ -118,8 +124,6 @@
118124
119125 #define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x2
120126
121
-#define MLX5E_RX_MAX_HEAD (256)
122
-
123127 #define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024)
124128 #define MLX5E_DEFAULT_LRO_TIMEOUT 32
125129 #define MLX5E_LRO_TIMEOUT_ARR_SIZE 4
....@@ -136,9 +140,10 @@
136140 #define MLX5E_LOG_INDIR_RQT_SIZE 0x7
137141 #define MLX5E_INDIR_RQT_SIZE BIT(MLX5E_LOG_INDIR_RQT_SIZE)
138142 #define MLX5E_MIN_NUM_CHANNELS 0x1
139
-#define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE >> 1)
143
+#define MLX5E_MAX_NUM_CHANNELS MLX5E_INDIR_RQT_SIZE
140144 #define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC)
141145 #define MLX5E_TX_CQ_POLL_BUDGET 128
146
+#define MLX5E_TX_XSK_POLL_BUDGET 64
142147 #define MLX5E_SQ_RECOVER_MIN_INTERVAL 500 /* msecs */
143148
144149 #define MLX5E_UMR_WQE_INLINE_SZ \
....@@ -147,9 +152,6 @@
147152 MLX5_UMR_MTT_ALIGNMENT))
148153 #define MLX5E_UMR_WQEBBS \
149154 (DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_BB))
150
-#define MLX5E_ICOSQ_MAX_WQEBBS MLX5E_UMR_WQEBBS
151
-
152
-#define MLX5E_NUM_MAIN_GROUPS 9
153155
154156 #define MLX5E_MSG_LEVEL NETIF_MSG_LINK
155157
....@@ -160,6 +162,19 @@
160162 ##__VA_ARGS__); \
161163 } while (0)
162164
165
+enum mlx5e_rq_group {
166
+ MLX5E_RQ_GROUP_REGULAR,
167
+ MLX5E_RQ_GROUP_XSK,
168
+#define MLX5E_NUM_RQ_GROUPS(g) (1 + MLX5E_RQ_GROUP_##g)
169
+};
170
+
171
+static inline u8 mlx5e_get_num_lag_ports(struct mlx5_core_dev *mdev)
172
+{
173
+ if (mlx5_lag_is_lacp_owner(mdev))
174
+ return 1;
175
+
176
+ return clamp_t(u8, MLX5_CAP_GEN(mdev, num_lag_ports), 1, MLX5_MAX_PORTS);
177
+}
163178
164179 static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size)
165180 {
....@@ -173,12 +188,12 @@
173188 }
174189 }
175190
191
+/* Use this function to get max num channels (rxqs/txqs) only to create netdev */
176192 static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
177193 {
178194 return is_kdump_kernel() ?
179195 MLX5E_MIN_NUM_CHANNELS :
180
- min_t(int, mdev->priv.eq_table.num_comp_vectors,
181
- MLX5E_MAX_NUM_CHANNELS);
196
+ min_t(int, mlx5_comp_vectors_count(mdev), MLX5E_MAX_NUM_CHANNELS);
182197 }
183198
184199 struct mlx5e_tx_wqe {
....@@ -189,7 +204,7 @@
189204
190205 struct mlx5e_rx_wqe_ll {
191206 struct mlx5_wqe_srq_next_seg next;
192
- struct mlx5_wqe_data_seg data[0];
207
+ struct mlx5_wqe_data_seg data[];
193208 };
194209
195210 struct mlx5e_rx_wqe_cyc {
....@@ -205,35 +220,26 @@
205220
206221 extern const char mlx5e_self_tests[][ETH_GSTRING_LEN];
207222
208
-static const char mlx5e_priv_flags[][ETH_GSTRING_LEN] = {
209
- "rx_cqe_moder",
210
- "tx_cqe_moder",
211
- "rx_cqe_compress",
212
- "rx_striding_rq",
213
- "rx_no_csum_complete",
214
-};
215
-
216223 enum mlx5e_priv_flag {
217
- MLX5E_PFLAG_RX_CQE_BASED_MODER = (1 << 0),
218
- MLX5E_PFLAG_TX_CQE_BASED_MODER = (1 << 1),
219
- MLX5E_PFLAG_RX_CQE_COMPRESS = (1 << 2),
220
- MLX5E_PFLAG_RX_STRIDING_RQ = (1 << 3),
221
- MLX5E_PFLAG_RX_NO_CSUM_COMPLETE = (1 << 4),
224
+ MLX5E_PFLAG_RX_CQE_BASED_MODER,
225
+ MLX5E_PFLAG_TX_CQE_BASED_MODER,
226
+ MLX5E_PFLAG_RX_CQE_COMPRESS,
227
+ MLX5E_PFLAG_RX_STRIDING_RQ,
228
+ MLX5E_PFLAG_RX_NO_CSUM_COMPLETE,
229
+ MLX5E_PFLAG_XDP_TX_MPWQE,
230
+ MLX5E_PFLAG_SKB_TX_MPWQE,
231
+ MLX5E_NUM_PFLAGS, /* Keep last */
222232 };
223233
224234 #define MLX5E_SET_PFLAG(params, pflag, enable) \
225235 do { \
226236 if (enable) \
227
- (params)->pflags |= (pflag); \
237
+ (params)->pflags |= BIT(pflag); \
228238 else \
229
- (params)->pflags &= ~(pflag); \
239
+ (params)->pflags &= ~(BIT(pflag)); \
230240 } while (0)
231241
232
-#define MLX5E_GET_PFLAG(params, pflag) (!!((params)->pflags & (pflag)))
233
-
234
-#ifdef CONFIG_MLX5_CORE_EN_DCB
235
-#define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
236
-#endif
242
+#define MLX5E_GET_PFLAG(params, pflag) (!!((params)->pflags & (BIT(pflag))))
237243
238244 struct mlx5e_params {
239245 u8 log_sq_size;
....@@ -242,14 +248,11 @@
242248 u16 num_channels;
243249 u8 num_tc;
244250 bool rx_cqe_compress_def;
245
- struct net_dim_cq_moder rx_cq_moderation;
246
- struct net_dim_cq_moder tx_cq_moderation;
251
+ bool tunneled_offload_en;
252
+ struct dim_cq_moder rx_cq_moderation;
253
+ struct dim_cq_moder tx_cq_moderation;
247254 bool lro_en;
248
- u32 lro_wqe_sz;
249255 u8 tx_min_inline_mode;
250
- u8 rss_hfunc;
251
- u8 toeplitz_hash_key[40];
252
- u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE];
253256 bool vlan_strip_disable;
254257 bool scatter_fcs_en;
255258 bool rx_dim_enabled;
....@@ -257,50 +260,19 @@
257260 u32 lro_timeout;
258261 u32 pflags;
259262 struct bpf_prog *xdp_prog;
263
+ struct mlx5e_xsk *xsk;
260264 unsigned int sw_mtu;
261265 int hard_mtu;
262266 };
263267
264
-#ifdef CONFIG_MLX5_CORE_EN_DCB
265
-struct mlx5e_cee_config {
266
- /* bw pct for priority group */
267
- u8 pg_bw_pct[CEE_DCBX_MAX_PGS];
268
- u8 prio_to_pg_map[CEE_DCBX_MAX_PRIO];
269
- bool pfc_setting[CEE_DCBX_MAX_PRIO];
270
- bool pfc_enable;
271
-};
272
-
273
-enum {
274
- MLX5_DCB_CHG_RESET,
275
- MLX5_DCB_NO_CHG,
276
- MLX5_DCB_CHG_NO_RESET,
277
-};
278
-
279
-struct mlx5e_dcbx {
280
- enum mlx5_dcbx_oper_mode mode;
281
- struct mlx5e_cee_config cee_cfg; /* pending configuration */
282
- u8 dscp_app_cnt;
283
-
284
- /* The only setting that cannot be read from FW */
285
- u8 tc_tsa[IEEE_8021QAZ_MAX_TCS];
286
- u8 cap;
287
-
288
- /* Buffer configuration */
289
- bool manual_buffer;
290
- u32 cable_len;
291
- u32 xoff;
292
-};
293
-
294
-struct mlx5e_dcbx_dp {
295
- u8 dscp2prio[MLX5E_MAX_DSCP];
296
- u8 trust_state;
297
-};
298
-#endif
299
-
300268 enum {
301269 MLX5E_RQ_STATE_ENABLED,
270
+ MLX5E_RQ_STATE_RECOVERING,
302271 MLX5E_RQ_STATE_AM,
303272 MLX5E_RQ_STATE_NO_CSUM_COMPLETE,
273
+ MLX5E_RQ_STATE_CSUM_FULL, /* cqe_csum_full hw bit is set */
274
+ MLX5E_RQ_STATE_FPGA_TLS, /* FPGA TLS enabled */
275
+ MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX /* set when mini_cqe_resp_stride_index cap is used */
304276 };
305277
306278 struct mlx5e_cq {
....@@ -313,24 +285,19 @@
313285 struct mlx5_core_cq mcq;
314286 struct mlx5e_channel *channel;
315287
316
- /* cqe decompression */
317
- struct mlx5_cqe64 title;
318
- struct mlx5_mini_cqe8 mini_arr[MLX5_MINI_CQE_ARRAY_SIZE];
319
- u8 mini_arr_idx;
320
- u16 decmprs_left;
321
- u16 decmprs_wqe_counter;
322
-
323288 /* control */
324289 struct mlx5_core_dev *mdev;
325290 struct mlx5_wq_ctrl wq_ctrl;
326291 } ____cacheline_aligned_in_smp;
327292
328
-struct mlx5e_tx_wqe_info {
329
- struct sk_buff *skb;
330
- u32 num_bytes;
331
- u8 num_wqebbs;
332
- u8 num_dma;
333
-};
293
+struct mlx5e_cq_decomp {
294
+ /* cqe decompression */
295
+ struct mlx5_cqe64 title;
296
+ struct mlx5_mini_cqe8 mini_arr[MLX5_MINI_CQE_ARRAY_SIZE];
297
+ u8 mini_arr_idx;
298
+ u16 left;
299
+ u16 wqe_counter;
300
+} ____cacheline_aligned_in_smp;
334301
335302 enum mlx5e_dma_map_type {
336303 MLX5E_DMA_MAP_SINGLE,
....@@ -345,15 +312,22 @@
345312
346313 enum {
347314 MLX5E_SQ_STATE_ENABLED,
315
+ MLX5E_SQ_STATE_MPWQE,
348316 MLX5E_SQ_STATE_RECOVERING,
349317 MLX5E_SQ_STATE_IPSEC,
350318 MLX5E_SQ_STATE_AM,
351319 MLX5E_SQ_STATE_TLS,
352
- MLX5E_SQ_STATE_REDIRECT,
320
+ MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE,
321
+ MLX5E_SQ_STATE_PENDING_XSK_TX,
353322 };
354323
355
-struct mlx5e_sq_wqe_info {
356
- u8 opcode;
324
+struct mlx5e_tx_mpwqe {
325
+ /* Current MPWQE session */
326
+ struct mlx5e_tx_wqe *wqe;
327
+ u32 bytes_count;
328
+ u8 ds_count;
329
+ u8 pkt_count;
330
+ u8 inline_on;
357331 };
358332
359333 struct mlx5e_txqsq {
....@@ -361,73 +335,136 @@
361335
362336 /* dirtied @completion */
363337 u16 cc;
338
+ u16 skb_fifo_cc;
364339 u32 dma_fifo_cc;
365
- struct net_dim dim; /* Adaptive Moderation */
340
+ struct dim dim; /* Adaptive Moderation */
366341
367342 /* dirtied @xmit */
368343 u16 pc ____cacheline_aligned_in_smp;
344
+ u16 skb_fifo_pc;
369345 u32 dma_fifo_pc;
346
+ struct mlx5e_tx_mpwqe mpwqe;
370347
371348 struct mlx5e_cq cq;
372349
373350 /* read only */
374351 struct mlx5_wq_cyc wq;
375352 u32 dma_fifo_mask;
353
+ u16 skb_fifo_mask;
376354 struct mlx5e_sq_stats *stats;
377355 struct {
378356 struct mlx5e_sq_dma *dma_fifo;
357
+ struct sk_buff **skb_fifo;
379358 struct mlx5e_tx_wqe_info *wqe_info;
380359 } db;
381360 void __iomem *uar_map;
382361 struct netdev_queue *txq;
383362 u32 sqn;
363
+ u16 stop_room;
384364 u8 min_inline_mode;
385365 struct device *pdev;
386366 __be32 mkey_be;
387367 unsigned long state;
368
+ unsigned int hw_mtu;
388369 struct hwtstamp_config *tstamp;
389370 struct mlx5_clock *clock;
390371
391372 /* control path */
392373 struct mlx5_wq_ctrl wq_ctrl;
393374 struct mlx5e_channel *channel;
375
+ int ch_ix;
394376 int txq_ix;
395377 u32 rate_limit;
396
- struct mlx5e_txqsq_recover {
397
- struct work_struct recover_work;
398
- u64 last_recover;
399
- } recover;
378
+ struct work_struct recover_work;
400379 } ____cacheline_aligned_in_smp;
401380
402381 struct mlx5e_dma_info {
403
- struct page *page;
404
- dma_addr_t addr;
382
+ dma_addr_t addr;
383
+ union {
384
+ struct page *page;
385
+ struct xdp_buff *xsk;
386
+ };
387
+};
388
+
389
+/* XDP packets can be transmitted in different ways. On completion, we need to
390
+ * distinguish between them to clean up things in a proper way.
391
+ */
392
+enum mlx5e_xdp_xmit_mode {
393
+ /* An xdp_frame was transmitted due to either XDP_REDIRECT from another
394
+ * device or XDP_TX from an XSK RQ. The frame has to be unmapped and
395
+ * returned.
396
+ */
397
+ MLX5E_XDP_XMIT_MODE_FRAME,
398
+
399
+ /* The xdp_frame was created in place as a result of XDP_TX from a
400
+ * regular RQ. No DMA remapping happened, and the page belongs to us.
401
+ */
402
+ MLX5E_XDP_XMIT_MODE_PAGE,
403
+
404
+ /* No xdp_frame was created at all, the transmit happened from a UMEM
405
+ * page. The UMEM Completion Ring producer pointer has to be increased.
406
+ */
407
+ MLX5E_XDP_XMIT_MODE_XSK,
405408 };
406409
407410 struct mlx5e_xdp_info {
408
- struct xdp_frame *xdpf;
409
- dma_addr_t dma_addr;
410
- struct mlx5e_dma_info di;
411
+ enum mlx5e_xdp_xmit_mode mode;
412
+ union {
413
+ struct {
414
+ struct xdp_frame *xdpf;
415
+ dma_addr_t dma_addr;
416
+ } frame;
417
+ struct {
418
+ struct mlx5e_rq *rq;
419
+ struct mlx5e_dma_info di;
420
+ } page;
421
+ };
411422 };
423
+
424
+struct mlx5e_xmit_data {
425
+ dma_addr_t dma_addr;
426
+ void *data;
427
+ u32 len;
428
+};
429
+
430
+struct mlx5e_xdp_info_fifo {
431
+ struct mlx5e_xdp_info *xi;
432
+ u32 *cc;
433
+ u32 *pc;
434
+ u32 mask;
435
+};
436
+
437
+struct mlx5e_xdpsq;
438
+typedef int (*mlx5e_fp_xmit_xdp_frame_check)(struct mlx5e_xdpsq *);
439
+typedef bool (*mlx5e_fp_xmit_xdp_frame)(struct mlx5e_xdpsq *,
440
+ struct mlx5e_xmit_data *,
441
+ struct mlx5e_xdp_info *,
442
+ int);
412443
413444 struct mlx5e_xdpsq {
414445 /* data path */
415446
416447 /* dirtied @completion */
448
+ u32 xdpi_fifo_cc;
417449 u16 cc;
418
- bool redirect_flush;
419450
420451 /* dirtied @xmit */
421
- u16 pc ____cacheline_aligned_in_smp;
422
- bool doorbell;
452
+ u32 xdpi_fifo_pc ____cacheline_aligned_in_smp;
453
+ u16 pc;
454
+ struct mlx5_wqe_ctrl_seg *doorbell_cseg;
455
+ struct mlx5e_tx_mpwqe mpwqe;
423456
424457 struct mlx5e_cq cq;
425458
426459 /* read only */
460
+ struct xsk_buff_pool *xsk_pool;
427461 struct mlx5_wq_cyc wq;
428462 struct mlx5e_xdpsq_stats *stats;
463
+ mlx5e_fp_xmit_xdp_frame_check xmit_xdp_frame_check;
464
+ mlx5e_fp_xmit_xdp_frame xmit_xdp_frame;
429465 struct {
430
- struct mlx5e_xdp_info *xdpi;
466
+ struct mlx5e_xdp_wqe_info *wqe_info;
467
+ struct mlx5e_xdp_info_fifo xdpi_fifo;
431468 } db;
432469 void __iomem *uar_map;
433470 u32 sqn;
....@@ -444,15 +481,15 @@
444481
445482 struct mlx5e_icosq {
446483 /* data path */
484
+ u16 cc;
485
+ u16 pc;
447486
448
- /* dirtied @xmit */
449
- u16 pc ____cacheline_aligned_in_smp;
450
-
487
+ struct mlx5_wqe_ctrl_seg *doorbell_cseg;
451488 struct mlx5e_cq cq;
452489
453490 /* write@xmit, read@completion */
454491 struct {
455
- struct mlx5e_sq_wqe_info *ico_wqe;
492
+ struct mlx5e_icosq_wqe_info *wqe_info;
456493 } db;
457494
458495 /* read only */
....@@ -464,13 +501,9 @@
464501 /* control path */
465502 struct mlx5_wq_ctrl wq_ctrl;
466503 struct mlx5e_channel *channel;
467
-} ____cacheline_aligned_in_smp;
468504
469
-static inline bool
470
-mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n)
471
-{
472
- return (mlx5_wq_cyc_ctr2ix(wq, cc - pc) >= n) || (cc == pc);
473
-}
505
+ struct work_struct recover_work;
506
+} ____cacheline_aligned_in_smp;
474507
475508 struct mlx5e_wqe_frag_info {
476509 struct mlx5e_dma_info *di;
....@@ -513,8 +546,11 @@
513546 typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq);
514547 typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16);
515548
549
+int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk);
550
+
516551 enum mlx5e_rq_flag {
517
- MLX5E_RQ_FLAG_XDP_XMIT = BIT(0),
552
+ MLX5E_RQ_FLAG_XDP_XMIT,
553
+ MLX5E_RQ_FLAG_XDP_REDIRECT,
518554 };
519555
520556 struct mlx5e_rq_frag_info {
....@@ -545,12 +581,16 @@
545581 struct mlx5e_mpw_info *info;
546582 mlx5e_fp_skb_from_cqe_mpwrq skb_from_cqe_mpwrq;
547583 u16 num_strides;
584
+ u16 actual_wq_head;
548585 u8 log_stride_sz;
549
- bool umr_in_progress;
586
+ u8 umr_in_progress;
587
+ u8 umr_last_bulk;
588
+ u8 umr_completed;
550589 } mpwqe;
551590 };
552591 struct {
553592 u16 headroom;
593
+ u32 frame0_sz;
554594 u8 map_dir; /* dma map direction */
555595 } buff;
556596
....@@ -559,6 +599,7 @@
559599 struct net_device *netdev;
560600 struct mlx5e_rq_stats *stats;
561601 struct mlx5e_cq cq;
602
+ struct mlx5e_cq_decomp cqd;
562603 struct mlx5e_page_cache page_cache;
563604 struct hwtstamp_config *tstamp;
564605 struct mlx5_clock *clock;
....@@ -571,13 +612,18 @@
571612 int ix;
572613 unsigned int hw_mtu;
573614
574
- struct net_dim dim; /* Dynamic Interrupt Moderation */
615
+ struct dim dim; /* Dynamic Interrupt Moderation */
575616
576617 /* XDP */
577
- struct bpf_prog *xdp_prog;
578
- struct mlx5e_xdpsq xdpsq;
618
+ struct bpf_prog __rcu *xdp_prog;
619
+ struct mlx5e_xdpsq *xdpsq;
579620 DECLARE_BITMAP(flags, 8);
580621 struct page_pool *page_pool;
622
+
623
+ /* AF_XDP zero-copy */
624
+ struct xsk_buff_pool *xsk_pool;
625
+
626
+ struct work_struct recover_work;
581627
582628 /* control */
583629 struct mlx5_wq_ctrl wq_ctrl;
....@@ -586,14 +632,21 @@
586632 u32 rqn;
587633 struct mlx5_core_dev *mdev;
588634 struct mlx5_core_mkey umr_mkey;
635
+ struct mlx5e_dma_info wqe_overflow;
589636
590637 /* XDP read-mostly */
591638 struct xdp_rxq_info xdp_rxq;
592639 } ____cacheline_aligned_in_smp;
593640
641
+enum mlx5e_channel_state {
642
+ MLX5E_CHANNEL_STATE_XSK,
643
+ MLX5E_CHANNEL_NUM_STATES
644
+};
645
+
594646 struct mlx5e_channel {
595647 /* data path */
596648 struct mlx5e_rq rq;
649
+ struct mlx5e_xdpsq rq_xdpsq;
597650 struct mlx5e_txqsq sq[MLX5E_MAX_NUM_TC];
598651 struct mlx5e_icosq icosq; /* internal control operations */
599652 bool xdp;
....@@ -602,9 +655,19 @@
602655 struct net_device *netdev;
603656 __be32 mkey_be;
604657 u8 num_tc;
658
+ u8 lag_port;
605659
606660 /* XDP_REDIRECT */
607661 struct mlx5e_xdpsq xdpsq;
662
+
663
+ /* AF_XDP zero-copy */
664
+ struct mlx5e_rq xskrq;
665
+ struct mlx5e_xdpsq xsksq;
666
+
667
+ /* Async ICOSQ */
668
+ struct mlx5e_icosq async_icosq;
669
+ /* async_icosq can be accessed from any CPU - the spinlock protects it. */
670
+ spinlock_t async_icosq_lock;
608671
609672 /* data path - accessed per napi poll */
610673 struct irq_desc *irq_desc;
....@@ -614,6 +677,7 @@
614677 struct mlx5e_priv *priv;
615678 struct mlx5_core_dev *mdev;
616679 struct hwtstamp_config *tstamp;
680
+ DECLARE_BITMAP(state, MLX5E_CHANNEL_NUM_STATES);
617681 int ix;
618682 int cpu;
619683 };
....@@ -628,15 +692,17 @@
628692 struct mlx5e_ch_stats ch;
629693 struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC];
630694 struct mlx5e_rq_stats rq;
695
+ struct mlx5e_rq_stats xskrq;
631696 struct mlx5e_xdpsq_stats rq_xdpsq;
632697 struct mlx5e_xdpsq_stats xdpsq;
698
+ struct mlx5e_xdpsq_stats xsksq;
633699 } ____cacheline_aligned_in_smp;
634700
635701 enum {
636
- MLX5E_STATE_ASYNC_EVENTS_ENABLED,
637702 MLX5E_STATE_OPENED,
638703 MLX5E_STATE_DESTROYING,
639704 MLX5E_STATE_XDP_TX_ENABLED,
705
+ MLX5E_STATE_XDP_ACTIVE,
640706 };
641707
642708 struct mlx5e_rqt {
....@@ -655,10 +721,54 @@
655721 MLX5E_NIC_PRIO
656722 };
657723
724
+struct mlx5e_rss_params {
725
+ u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE];
726
+ u32 rx_hash_fields[MLX5E_NUM_INDIR_TIRS];
727
+ u8 toeplitz_hash_key[40];
728
+ u8 hfunc;
729
+};
730
+
731
+struct mlx5e_modify_sq_param {
732
+ int curr_state;
733
+ int next_state;
734
+ int rl_update;
735
+ int rl_index;
736
+};
737
+
738
+#if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE)
739
+struct mlx5e_hv_vhca_stats_agent {
740
+ struct mlx5_hv_vhca_agent *agent;
741
+ struct delayed_work work;
742
+ u16 delay;
743
+ void *buf;
744
+};
745
+#endif
746
+
747
+struct mlx5e_xsk {
748
+ /* XSK buffer pools are stored separately from channels,
749
+ * because we don't want to lose them when channels are
750
+ * recreated. The kernel also stores buffer pool, but it doesn't
751
+ * distinguish between zero-copy and non-zero-copy UMEMs, so
752
+ * rely on our mechanism.
753
+ */
754
+ struct xsk_buff_pool **pools;
755
+ u16 refcnt;
756
+ bool ever_used;
757
+};
758
+
759
+/* Temporary storage for variables that are allocated when struct mlx5e_priv is
760
+ * initialized, and used where we can't allocate them because that functions
761
+ * must not fail. Use with care and make sure the same variable is not used
762
+ * simultaneously by multiple users.
763
+ */
764
+struct mlx5e_scratchpad {
765
+ cpumask_var_t cpumask;
766
+};
767
+
658768 struct mlx5e_priv {
659769 /* priv data path fields - start */
660770 struct mlx5e_txqsq *txq2sq[MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC];
661
- int channel_tc2txq[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
771
+ int channel_tc2realtxq[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
662772 #ifdef CONFIG_MLX5_CORE_EN_DCB
663773 struct mlx5e_dcbx_dp dcbx_dp;
664774 #endif
....@@ -670,11 +780,13 @@
670780 struct mlx5e_rq drop_rq;
671781
672782 struct mlx5e_channels channels;
673
- u32 tisn[MLX5E_MAX_NUM_TC];
783
+ u32 tisn[MLX5_MAX_PORTS][MLX5E_MAX_NUM_TC];
674784 struct mlx5e_rqt indir_rqt;
675785 struct mlx5e_tir indir_tir[MLX5E_NUM_INDIR_TIRS];
676786 struct mlx5e_tir inner_indir_tir[MLX5E_NUM_INDIR_TIRS];
677787 struct mlx5e_tir direct_tir[MLX5E_MAX_NUM_CHANNELS];
788
+ struct mlx5e_tir xsk_tir[MLX5E_MAX_NUM_CHANNELS];
789
+ struct mlx5e_rss_params rss_params;
678790 u32 tx_rates[MLX5E_MAX_NUM_SQS];
679791
680792 struct mlx5e_flow_steering fs;
....@@ -683,16 +795,22 @@
683795 struct work_struct update_carrier_work;
684796 struct work_struct set_rx_mode_work;
685797 struct work_struct tx_timeout_work;
686
- struct delayed_work update_stats_work;
798
+ struct work_struct update_stats_work;
799
+ struct work_struct monitor_counters_work;
800
+ struct mlx5_nb monitor_counters_nb;
687801
688802 struct mlx5_core_dev *mdev;
689803 struct net_device *netdev;
690804 struct mlx5e_stats stats;
691805 struct mlx5e_channel_stats channel_stats[MLX5E_MAX_NUM_CHANNELS];
806
+ u16 max_nch;
692807 u8 max_opened_tc;
693808 struct hwtstamp_config tstamp;
694809 u16 q_counter;
695810 u16 drop_rq_q_counter;
811
+ struct notifier_block events_nb;
812
+
813
+ struct udp_tunnel_nic_info nic_info;
696814 #ifdef CONFIG_MLX5_CORE_EN_DCB
697815 struct mlx5e_dcbx dcbx;
698816 #endif
....@@ -705,10 +823,25 @@
705823 #ifdef CONFIG_MLX5_EN_TLS
706824 struct mlx5e_tls *tls;
707825 #endif
826
+ struct devlink_health_reporter *tx_reporter;
827
+ struct devlink_health_reporter *rx_reporter;
828
+ struct devlink_port dl_port;
829
+ struct mlx5e_xsk xsk;
830
+#if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE)
831
+ struct mlx5e_hv_vhca_stats_agent stats_agent;
832
+#endif
833
+ struct mlx5e_scratchpad scratchpad;
708834 };
709835
836
+struct mlx5e_rx_handlers {
837
+ mlx5e_fp_handle_rx_cqe handle_rx_cqe;
838
+ mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe;
839
+};
840
+
841
+extern const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic;
842
+
710843 struct mlx5e_profile {
711
- void (*init)(struct mlx5_core_dev *mdev,
844
+ int (*init)(struct mlx5_core_dev *mdev,
712845 struct net_device *netdev,
713846 const struct mlx5e_profile *profile, void *ppriv);
714847 void (*cleanup)(struct mlx5e_priv *priv);
....@@ -718,59 +851,24 @@
718851 void (*cleanup_tx)(struct mlx5e_priv *priv);
719852 void (*enable)(struct mlx5e_priv *priv);
720853 void (*disable)(struct mlx5e_priv *priv);
854
+ int (*update_rx)(struct mlx5e_priv *priv);
721855 void (*update_stats)(struct mlx5e_priv *priv);
722856 void (*update_carrier)(struct mlx5e_priv *priv);
723
- int (*max_nch)(struct mlx5_core_dev *mdev);
724
- struct {
725
- mlx5e_fp_handle_rx_cqe handle_rx_cqe;
726
- mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe;
727
- } rx_handlers;
857
+ unsigned int (*stats_grps_num)(struct mlx5e_priv *priv);
858
+ mlx5e_stats_grp_t *stats_grps;
859
+ const struct mlx5e_rx_handlers *rx_handlers;
728860 int max_tc;
861
+ u8 rq_groups;
729862 };
730863
731864 void mlx5e_build_ptys2ethtool_map(void);
732
-
733
-u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
734
- struct net_device *sb_dev,
735
- select_queue_fallback_t fallback);
736
-netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
737
-netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
738
- struct mlx5e_tx_wqe *wqe, u16 pi);
739
-
740
-void mlx5e_completion_event(struct mlx5_core_cq *mcq);
741
-void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
742
-int mlx5e_napi_poll(struct napi_struct *napi, int budget);
743
-bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
744
-int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
745
-void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq);
746865
747866 bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev);
748867 bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
749868 struct mlx5e_params *params);
750869
751
-void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info);
752
-void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info,
753
- bool recycle);
754
-void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
755
-void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
756
-bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
757
-bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq);
758
-void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
759
-void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
760
-struct sk_buff *
761
-mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
762
- u16 cqe_bcnt, u32 head_offset, u32 page_idx);
763
-struct sk_buff *
764
-mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
765
- u16 cqe_bcnt, u32 head_offset, u32 page_idx);
766
-struct sk_buff *
767
-mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
768
- struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt);
769
-struct sk_buff *
770
-mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
771
- struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt);
772
-
773
-void mlx5e_update_stats(struct mlx5e_priv *priv);
870
+void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
871
+void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s);
774872
775873 void mlx5e_init_l2_addr(struct mlx5e_priv *priv);
776874 int mlx5e_self_test_num(struct mlx5e_priv *priv);
....@@ -801,9 +899,32 @@
801899
802900 int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz,
803901 struct mlx5e_redirect_rqt_param rrp);
804
-void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_params *params,
805
- enum mlx5e_traffic_types tt,
902
+void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_rss_params *rss_params,
903
+ const struct mlx5e_tirc_config *ttconfig,
806904 void *tirc, bool inner);
905
+void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in);
906
+struct mlx5e_tirc_config mlx5e_tirc_get_default_config(enum mlx5e_traffic_types tt);
907
+
908
+struct mlx5e_xsk_param;
909
+
910
+struct mlx5e_rq_param;
911
+int mlx5e_open_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
912
+ struct mlx5e_rq_param *param, struct mlx5e_xsk_param *xsk,
913
+ struct xsk_buff_pool *xsk_pool, struct mlx5e_rq *rq);
914
+int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time);
915
+void mlx5e_deactivate_rq(struct mlx5e_rq *rq);
916
+void mlx5e_close_rq(struct mlx5e_rq *rq);
917
+
918
+struct mlx5e_sq_param;
919
+int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
920
+ struct mlx5e_sq_param *param, struct xsk_buff_pool *xsk_pool,
921
+ struct mlx5e_xdpsq *sq, bool is_redirect);
922
+void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq);
923
+
924
+struct mlx5e_cq_param;
925
+int mlx5e_open_cq(struct mlx5e_channel *c, struct dim_cq_moder moder,
926
+ struct mlx5e_cq_param *param, struct mlx5e_cq *cq);
927
+void mlx5e_close_cq(struct mlx5e_cq *cq);
807928
808929 int mlx5e_open_locked(struct net_device *netdev);
809930 int mlx5e_close_locked(struct net_device *netdev);
....@@ -812,131 +933,101 @@
812933 struct mlx5e_channels *chs);
813934 void mlx5e_close_channels(struct mlx5e_channels *chs);
814935
815
-/* Function pointer to be used to modify WH settings while
936
+/* Function pointer to be used to modify HW or kernel settings while
816937 * switching channels
817938 */
818
-typedef int (*mlx5e_fp_hw_modify)(struct mlx5e_priv *priv);
819
-void mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
820
- struct mlx5e_channels *new_chs,
821
- mlx5e_fp_hw_modify hw_modify);
939
+typedef int (*mlx5e_fp_preactivate)(struct mlx5e_priv *priv, void *context);
940
+#define MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(fn) \
941
+int fn##_ctx(struct mlx5e_priv *priv, void *context) \
942
+{ \
943
+ return fn(priv); \
944
+}
945
+int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv);
946
+int mlx5e_safe_switch_channels(struct mlx5e_priv *priv,
947
+ struct mlx5e_channels *new_chs,
948
+ mlx5e_fp_preactivate preactivate,
949
+ void *context);
950
+int mlx5e_num_channels_changed(struct mlx5e_priv *priv);
951
+int mlx5e_num_channels_changed_ctx(struct mlx5e_priv *priv, void *context);
822952 void mlx5e_activate_priv_channels(struct mlx5e_priv *priv);
823953 void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv);
824954
825955 void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
826956 int num_channels);
827
-void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params,
828
- u8 cq_period_mode);
829
-void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params,
830
- u8 cq_period_mode);
957
+
958
+void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode);
959
+void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode);
960
+void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode);
961
+void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode);
962
+
831963 void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
832964 void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
833965 struct mlx5e_params *params);
966
+int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state);
967
+void mlx5e_activate_rq(struct mlx5e_rq *rq);
968
+void mlx5e_deactivate_rq(struct mlx5e_rq *rq);
969
+void mlx5e_activate_icosq(struct mlx5e_icosq *icosq);
970
+void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq);
834971
835
-static inline bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev)
972
+int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
973
+ struct mlx5e_modify_sq_param *p);
974
+void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq);
975
+void mlx5e_tx_disable_queue(struct netdev_queue *txq);
976
+
977
+static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev)
836978 {
837
- return (MLX5_CAP_ETH(mdev, tunnel_stateless_gre) &&
838
- MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ft_field_support.inner_ip_version));
839
-}
840
-
841
-static inline void mlx5e_sq_fetch_wqe(struct mlx5e_txqsq *sq,
842
- struct mlx5e_tx_wqe **wqe,
843
- u16 *pi)
844
-{
845
- struct mlx5_wq_cyc *wq = &sq->wq;
846
-
847
- *pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
848
- *wqe = mlx5_wq_cyc_get_wqe(wq, *pi);
849
- memset(*wqe, 0, sizeof(**wqe));
850
-}
851
-
852
-static inline
853
-struct mlx5e_tx_wqe *mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
854
-{
855
- u16 pi = mlx5_wq_cyc_ctr2ix(wq, *pc);
856
- struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
857
- struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
858
-
859
- memset(cseg, 0, sizeof(*cseg));
860
-
861
- cseg->opmod_idx_opcode = cpu_to_be32((*pc << 8) | MLX5_OPCODE_NOP);
862
- cseg->qpn_ds = cpu_to_be32((sqn << 8) | 0x01);
863
-
864
- (*pc)++;
865
-
866
- return wqe;
867
-}
868
-
869
-static inline
870
-void mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc,
871
- void __iomem *uar_map,
872
- struct mlx5_wqe_ctrl_seg *ctrl)
873
-{
874
- ctrl->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
875
- /* ensure wqe is visible to device before updating doorbell record */
876
- dma_wmb();
877
-
878
- *wq->db = cpu_to_be32(pc);
879
-
880
- /* ensure doorbell record is visible to device before ringing the
881
- * doorbell
882
- */
883
- wmb();
884
-
885
- mlx5_write64((__be32 *)ctrl, uar_map, NULL);
886
-}
887
-
888
-static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
889
-{
890
- struct mlx5_core_cq *mcq;
891
-
892
- mcq = &cq->mcq;
893
- mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, cq->wq.cc);
979
+ return MLX5_CAP_ETH(mdev, swp) &&
980
+ MLX5_CAP_ETH(mdev, swp_csum) && MLX5_CAP_ETH(mdev, swp_lso);
894981 }
895982
896983 extern const struct ethtool_ops mlx5e_ethtool_ops;
897
-#ifdef CONFIG_MLX5_CORE_EN_DCB
898
-extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops;
899
-int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets);
900
-void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv);
901
-void mlx5e_dcbnl_init_app(struct mlx5e_priv *priv);
902
-void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv);
903
-#endif
904984
905
-int mlx5e_create_tir(struct mlx5_core_dev *mdev,
906
- struct mlx5e_tir *tir, u32 *in, int inlen);
985
+int mlx5e_create_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir,
986
+ u32 *in);
907987 void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
908988 struct mlx5e_tir *tir);
909989 int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev);
910990 void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev);
911
-int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb);
991
+int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb,
992
+ bool enable_mc_lb);
993
+void mlx5e_mkey_set_relaxed_ordering(struct mlx5_core_dev *mdev, void *mkc);
912994
913995 /* common netdev helpers */
996
+void mlx5e_create_q_counters(struct mlx5e_priv *priv);
997
+void mlx5e_destroy_q_counters(struct mlx5e_priv *priv);
998
+int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
999
+ struct mlx5e_rq *drop_rq);
1000
+void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq);
1001
+
9141002 int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv);
9151003
916
-int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv);
1004
+int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc);
9171005 void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv);
9181006
919
-int mlx5e_create_direct_rqts(struct mlx5e_priv *priv);
920
-void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv);
921
-int mlx5e_create_direct_tirs(struct mlx5e_priv *priv);
922
-void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv);
1007
+int mlx5e_create_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs);
1008
+void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs);
1009
+int mlx5e_create_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs);
1010
+void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs);
9231011 void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt);
9241012
925
-int mlx5e_create_tis(struct mlx5_core_dev *mdev, int tc,
926
- u32 underlay_qpn, u32 *tisn);
1013
+int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn);
9271014 void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn);
9281015
9291016 int mlx5e_create_tises(struct mlx5e_priv *priv);
930
-void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv);
1017
+void mlx5e_destroy_tises(struct mlx5e_priv *priv);
1018
+int mlx5e_update_nic_rx(struct mlx5e_priv *priv);
1019
+void mlx5e_update_carrier(struct mlx5e_priv *priv);
9311020 int mlx5e_close(struct net_device *netdev);
9321021 int mlx5e_open(struct net_device *netdev);
933
-void mlx5e_update_stats_work(struct work_struct *work);
9341022
1023
+void mlx5e_queue_update_stats(struct mlx5e_priv *priv);
9351024 int mlx5e_bits_invert(unsigned long a, int size);
9361025
937
-typedef int (*change_hw_mtu_cb)(struct mlx5e_priv *priv);
1026
+int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv);
1027
+int mlx5e_set_dev_port_mtu_ctx(struct mlx5e_priv *priv, void *context);
9381028 int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
939
- change_hw_mtu_cb set_mtu_cb);
1029
+ mlx5e_fp_preactivate preactivate);
1030
+void mlx5e_vxlan_set_netdev_info(struct mlx5e_priv *priv);
9401031
9411032 /* ethtool helpers */
9421033 void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
....@@ -958,22 +1049,61 @@
9581049 struct ethtool_coalesce *coal);
9591050 int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
9601051 struct ethtool_coalesce *coal);
1052
+int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
1053
+ struct ethtool_link_ksettings *link_ksettings);
1054
+int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
1055
+ const struct ethtool_link_ksettings *link_ksettings);
1056
+int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc);
1057
+int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
1058
+ const u8 hfunc);
1059
+int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
1060
+ u32 *rule_locs);
1061
+int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd);
1062
+u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv);
1063
+u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv);
9611064 int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
9621065 struct ethtool_ts_info *info);
9631066 int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
9641067 struct ethtool_flash *flash);
1068
+void mlx5e_ethtool_get_pauseparam(struct mlx5e_priv *priv,
1069
+ struct ethtool_pauseparam *pauseparam);
1070
+int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv,
1071
+ struct ethtool_pauseparam *pauseparam);
9651072
9661073 /* mlx5e generic netdev management API */
1074
+int mlx5e_netdev_init(struct net_device *netdev,
1075
+ struct mlx5e_priv *priv,
1076
+ struct mlx5_core_dev *mdev,
1077
+ const struct mlx5e_profile *profile,
1078
+ void *ppriv);
1079
+void mlx5e_netdev_cleanup(struct net_device *netdev, struct mlx5e_priv *priv);
9671080 struct net_device*
9681081 mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile,
969
- void *ppriv);
1082
+ int nch, void *ppriv);
9701083 int mlx5e_attach_netdev(struct mlx5e_priv *priv);
9711084 void mlx5e_detach_netdev(struct mlx5e_priv *priv);
9721085 void mlx5e_destroy_netdev(struct mlx5e_priv *priv);
973
-void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
1086
+void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv);
1087
+void mlx5e_build_nic_params(struct mlx5e_priv *priv,
1088
+ struct mlx5e_xsk *xsk,
1089
+ struct mlx5e_rss_params *rss_params,
9741090 struct mlx5e_params *params,
975
- u16 max_channels, u16 mtu);
976
-u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev);
1091
+ u16 mtu);
1092
+void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
1093
+ struct mlx5e_params *params);
1094
+void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params,
1095
+ u16 num_channels);
9771096 void mlx5e_rx_dim_work(struct work_struct *work);
9781097 void mlx5e_tx_dim_work(struct work_struct *work);
1098
+
1099
+netdev_features_t mlx5e_features_check(struct sk_buff *skb,
1100
+ struct net_device *netdev,
1101
+ netdev_features_t features);
1102
+int mlx5e_set_features(struct net_device *netdev, netdev_features_t features);
1103
+#ifdef CONFIG_MLX5_ESWITCH
1104
+int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac);
1105
+int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, int max_tx_rate);
1106
+int mlx5e_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivi);
1107
+int mlx5e_get_vf_stats(struct net_device *dev, int vf, struct ifla_vf_stats *vf_stats);
1108
+#endif
9791109 #endif /* __MLX5_EN_H__ */