hc
2023-12-06 08f87f769b595151be1afeff53e144f543faa614
kernel/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
....@@ -32,32 +32,58 @@
3232 #ifndef __MLX5_EN_XDP_H__
3333 #define __MLX5_EN_XDP_H__
3434
35
+#include <linux/indirect_call_wrapper.h>
36
+
3537 #include "en.h"
38
+#include "en/txrx.h"
3639
3740 #define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
38
-#define MLX5E_XDP_TX_DS_COUNT \
39
- ((sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) + 1 /* SG DS */)
41
+#define MLX5E_XDP_TX_DS_COUNT (MLX5E_TX_WQE_EMPTY_DS_COUNT + 1 /* SG DS */)
4042
41
-int mlx5e_xdp_max_mtu(struct mlx5e_params *params);
43
+#define MLX5E_XDP_INLINE_WQE_MAX_DS_CNT 16
44
+#define MLX5E_XDP_INLINE_WQE_SZ_THRSD \
45
+ (MLX5E_XDP_INLINE_WQE_MAX_DS_CNT * MLX5_SEND_WQE_DS - \
46
+ sizeof(struct mlx5_wqe_inline_seg))
47
+
48
+struct mlx5e_xsk_param;
49
+int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk);
4250 bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
43
- void *va, u16 *rx_headroom, u32 *len);
51
+ u32 *len, struct xdp_buff *xdp);
52
+void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq);
4453 bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq);
4554 void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq);
46
-
47
-bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi);
55
+void mlx5e_set_xmit_fp(struct mlx5e_xdpsq *sq, bool is_mpw);
56
+void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq);
4857 int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
4958 u32 flags);
59
+
60
+INDIRECT_CALLABLE_DECLARE(bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq,
61
+ struct mlx5e_xmit_data *xdptxd,
62
+ struct mlx5e_xdp_info *xdpi,
63
+ int check_result));
64
+INDIRECT_CALLABLE_DECLARE(bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq,
65
+ struct mlx5e_xmit_data *xdptxd,
66
+ struct mlx5e_xdp_info *xdpi,
67
+ int check_result));
68
+INDIRECT_CALLABLE_DECLARE(int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq));
69
+INDIRECT_CALLABLE_DECLARE(int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq));
5070
5171 static inline void mlx5e_xdp_tx_enable(struct mlx5e_priv *priv)
5272 {
5373 set_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
74
+
75
+ if (priv->channels.params.xdp_prog)
76
+ set_bit(MLX5E_STATE_XDP_ACTIVE, &priv->state);
5477 }
5578
5679 static inline void mlx5e_xdp_tx_disable(struct mlx5e_priv *priv)
5780 {
81
+ if (priv->channels.params.xdp_prog)
82
+ clear_bit(MLX5E_STATE_XDP_ACTIVE, &priv->state);
83
+
5884 clear_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
59
- /* let other device's napi(s) see our new state */
60
- synchronize_rcu();
85
+ /* Let other device's napi(s) and XSK wakeups see our new state. */
86
+ synchronize_net();
6187 }
6288
6389 static inline bool mlx5e_xdp_tx_is_enabled(struct mlx5e_priv *priv)
....@@ -65,15 +91,96 @@
6591 return test_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
6692 }
6793
68
-static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq)
94
+static inline bool mlx5e_xdp_is_active(struct mlx5e_priv *priv)
6995 {
70
- struct mlx5_wq_cyc *wq = &sq->wq;
71
- struct mlx5e_tx_wqe *wqe;
72
- u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc - 1); /* last pi */
73
-
74
- wqe = mlx5_wq_cyc_get_wqe(wq, pi);
75
-
76
- mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &wqe->ctrl);
96
+ return test_bit(MLX5E_STATE_XDP_ACTIVE, &priv->state);
7797 }
7898
99
+static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq)
100
+{
101
+ if (sq->doorbell_cseg) {
102
+ mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, sq->doorbell_cseg);
103
+ sq->doorbell_cseg = NULL;
104
+ }
105
+}
106
+
107
+/* Enable inline WQEs to shift some load from a congested HCA (HW) to
108
+ * a less congested cpu (SW).
109
+ */
110
+static inline bool mlx5e_xdp_get_inline_state(struct mlx5e_xdpsq *sq, bool cur)
111
+{
112
+ u16 outstanding = sq->xdpi_fifo_pc - sq->xdpi_fifo_cc;
113
+
114
+#define MLX5E_XDP_INLINE_WATERMARK_LOW 10
115
+#define MLX5E_XDP_INLINE_WATERMARK_HIGH 128
116
+
117
+ if (cur && outstanding <= MLX5E_XDP_INLINE_WATERMARK_LOW)
118
+ return false;
119
+
120
+ if (!cur && outstanding >= MLX5E_XDP_INLINE_WATERMARK_HIGH)
121
+ return true;
122
+
123
+ return cur;
124
+}
125
+
126
+static inline bool mlx5e_xdp_mpqwe_is_full(struct mlx5e_tx_mpwqe *session)
127
+{
128
+ if (session->inline_on)
129
+ return session->ds_count + MLX5E_XDP_INLINE_WQE_MAX_DS_CNT >
130
+ MLX5E_TX_MPW_MAX_NUM_DS;
131
+ return mlx5e_tx_mpwqe_is_full(session);
132
+}
133
+
134
+struct mlx5e_xdp_wqe_info {
135
+ u8 num_wqebbs;
136
+ u8 num_pkts;
137
+};
138
+
139
+static inline void
140
+mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq,
141
+ struct mlx5e_xmit_data *xdptxd,
142
+ struct mlx5e_xdpsq_stats *stats)
143
+{
144
+ struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
145
+ struct mlx5_wqe_data_seg *dseg =
146
+ (struct mlx5_wqe_data_seg *)session->wqe + session->ds_count;
147
+ u32 dma_len = xdptxd->len;
148
+
149
+ session->pkt_count++;
150
+ session->bytes_count += dma_len;
151
+
152
+ if (session->inline_on && dma_len <= MLX5E_XDP_INLINE_WQE_SZ_THRSD) {
153
+ struct mlx5_wqe_inline_seg *inline_dseg =
154
+ (struct mlx5_wqe_inline_seg *)dseg;
155
+ u16 ds_len = sizeof(*inline_dseg) + dma_len;
156
+ u16 ds_cnt = DIV_ROUND_UP(ds_len, MLX5_SEND_WQE_DS);
157
+
158
+ inline_dseg->byte_count = cpu_to_be32(dma_len | MLX5_INLINE_SEG);
159
+ memcpy(inline_dseg->data, xdptxd->data, dma_len);
160
+
161
+ session->ds_count += ds_cnt;
162
+ stats->inlnw++;
163
+ return;
164
+ }
165
+
166
+ dseg->addr = cpu_to_be64(xdptxd->dma_addr);
167
+ dseg->byte_count = cpu_to_be32(dma_len);
168
+ dseg->lkey = sq->mkey_be;
169
+ session->ds_count++;
170
+}
171
+
172
+static inline void
173
+mlx5e_xdpi_fifo_push(struct mlx5e_xdp_info_fifo *fifo,
174
+ struct mlx5e_xdp_info *xi)
175
+{
176
+ u32 i = (*fifo->pc)++ & fifo->mask;
177
+
178
+ fifo->xi[i] = *xi;
179
+}
180
+
181
+static inline struct mlx5e_xdp_info
182
+mlx5e_xdpi_fifo_pop(struct mlx5e_xdp_info_fifo *fifo)
183
+{
184
+ return fifo->xi[(*fifo->cc)++ & fifo->mask];
185
+}
79186 #endif