.. | .. |
---|
31 | 31 | */ |
---|
32 | 32 | |
---|
33 | 33 | #include <linux/bpf_trace.h> |
---|
| 34 | +#include <net/xdp_sock_drv.h> |
---|
34 | 35 | #include "en/xdp.h" |
---|
| 36 | +#include "en/params.h" |
---|
35 | 37 | |
---|
36 | | -int mlx5e_xdp_max_mtu(struct mlx5e_params *params) |
---|
| 38 | +int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk) |
---|
37 | 39 | { |
---|
38 | | - int hr = NET_IP_ALIGN + XDP_PACKET_HEADROOM; |
---|
| 40 | + int hr = mlx5e_get_linear_rq_headroom(params, xsk); |
---|
39 | 41 | |
---|
40 | 42 | /* Let S := SKB_DATA_ALIGN(sizeof(struct skb_shared_info)). |
---|
41 | 43 | * The condition checked in mlx5e_rx_is_linear_skb is: |
---|
.. | .. |
---|
54 | 56 | } |
---|
55 | 57 | |
---|
56 | 58 | static inline bool |
---|
57 | | -mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_dma_info *di, |
---|
58 | | - struct xdp_buff *xdp) |
---|
| 59 | +mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq, |
---|
| 60 | + struct mlx5e_dma_info *di, struct xdp_buff *xdp) |
---|
59 | 61 | { |
---|
| 62 | + struct mlx5e_xmit_data xdptxd; |
---|
60 | 63 | struct mlx5e_xdp_info xdpi; |
---|
| 64 | + struct xdp_frame *xdpf; |
---|
| 65 | + dma_addr_t dma_addr; |
---|
61 | 66 | |
---|
62 | | - xdpi.xdpf = convert_to_xdp_frame(xdp); |
---|
63 | | - if (unlikely(!xdpi.xdpf)) |
---|
| 67 | + xdpf = xdp_convert_buff_to_frame(xdp); |
---|
| 68 | + if (unlikely(!xdpf)) |
---|
64 | 69 | return false; |
---|
65 | | - xdpi.dma_addr = di->addr + (xdpi.xdpf->data - (void *)xdpi.xdpf); |
---|
66 | | - dma_sync_single_for_device(sq->pdev, xdpi.dma_addr, |
---|
67 | | - xdpi.xdpf->len, PCI_DMA_TODEVICE); |
---|
68 | | - xdpi.di = *di; |
---|
69 | 70 | |
---|
70 | | - return mlx5e_xmit_xdp_frame(sq, &xdpi); |
---|
| 71 | + xdptxd.data = xdpf->data; |
---|
| 72 | + xdptxd.len = xdpf->len; |
---|
| 73 | + |
---|
| 74 | + if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) { |
---|
| 75 | + /* The xdp_buff was in the UMEM and was copied into a newly |
---|
| 76 | + * allocated page. The UMEM page was returned via the ZCA, and |
---|
| 77 | + * this new page has to be mapped at this point and has to be |
---|
| 78 | + * unmapped and returned via xdp_return_frame on completion. |
---|
| 79 | + */ |
---|
| 80 | + |
---|
| 81 | + /* Prevent double recycling of the UMEM page. Even in case this |
---|
| 82 | + * function returns false, the xdp_buff shouldn't be recycled, |
---|
| 83 | + * as it was already done in xdp_convert_zc_to_xdp_frame. |
---|
| 84 | + */ |
---|
| 85 | + __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */ |
---|
| 86 | + |
---|
| 87 | + xdpi.mode = MLX5E_XDP_XMIT_MODE_FRAME; |
---|
| 88 | + |
---|
| 89 | + dma_addr = dma_map_single(sq->pdev, xdptxd.data, xdptxd.len, |
---|
| 90 | + DMA_TO_DEVICE); |
---|
| 91 | + if (dma_mapping_error(sq->pdev, dma_addr)) { |
---|
| 92 | + xdp_return_frame(xdpf); |
---|
| 93 | + return false; |
---|
| 94 | + } |
---|
| 95 | + |
---|
| 96 | + xdptxd.dma_addr = dma_addr; |
---|
| 97 | + xdpi.frame.xdpf = xdpf; |
---|
| 98 | + xdpi.frame.dma_addr = dma_addr; |
---|
| 99 | + } else { |
---|
| 100 | + /* Driver assumes that xdp_convert_buff_to_frame returns |
---|
| 101 | + * an xdp_frame that points to the same memory region as |
---|
| 102 | + * the original xdp_buff. It allows to map the memory only |
---|
| 103 | + * once and to use the DMA_BIDIRECTIONAL mode. |
---|
| 104 | + */ |
---|
| 105 | + |
---|
| 106 | + xdpi.mode = MLX5E_XDP_XMIT_MODE_PAGE; |
---|
| 107 | + |
---|
| 108 | + dma_addr = di->addr + (xdpf->data - (void *)xdpf); |
---|
| 109 | + dma_sync_single_for_device(sq->pdev, dma_addr, xdptxd.len, |
---|
| 110 | + DMA_TO_DEVICE); |
---|
| 111 | + |
---|
| 112 | + xdptxd.dma_addr = dma_addr; |
---|
| 113 | + xdpi.page.rq = rq; |
---|
| 114 | + xdpi.page.di = *di; |
---|
| 115 | + } |
---|
| 116 | + |
---|
| 117 | + return INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe, |
---|
| 118 | + mlx5e_xmit_xdp_frame, sq, &xdptxd, &xdpi, 0); |
---|
71 | 119 | } |
---|
72 | 120 | |
---|
73 | 121 | /* returns true if packet was consumed by xdp */ |
---|
74 | 122 | bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di, |
---|
75 | | - void *va, u16 *rx_headroom, u32 *len) |
---|
| 123 | + u32 *len, struct xdp_buff *xdp) |
---|
76 | 124 | { |
---|
77 | | - struct bpf_prog *prog = READ_ONCE(rq->xdp_prog); |
---|
78 | | - struct xdp_buff xdp; |
---|
| 125 | + struct bpf_prog *prog = rcu_dereference(rq->xdp_prog); |
---|
79 | 126 | u32 act; |
---|
80 | 127 | int err; |
---|
81 | 128 | |
---|
82 | 129 | if (!prog) |
---|
83 | 130 | return false; |
---|
84 | 131 | |
---|
85 | | - xdp.data = va + *rx_headroom; |
---|
86 | | - xdp_set_data_meta_invalid(&xdp); |
---|
87 | | - xdp.data_end = xdp.data + *len; |
---|
88 | | - xdp.data_hard_start = va; |
---|
89 | | - xdp.rxq = &rq->xdp_rxq; |
---|
90 | | - |
---|
91 | | - act = bpf_prog_run_xdp(prog, &xdp); |
---|
| 132 | + act = bpf_prog_run_xdp(prog, xdp); |
---|
92 | 133 | switch (act) { |
---|
93 | 134 | case XDP_PASS: |
---|
94 | | - *rx_headroom = xdp.data - xdp.data_hard_start; |
---|
95 | | - *len = xdp.data_end - xdp.data; |
---|
| 135 | + *len = xdp->data_end - xdp->data; |
---|
96 | 136 | return false; |
---|
97 | 137 | case XDP_TX: |
---|
98 | | - if (unlikely(!mlx5e_xmit_xdp_buff(&rq->xdpsq, di, &xdp))) |
---|
| 138 | + if (unlikely(!mlx5e_xmit_xdp_buff(rq->xdpsq, rq, di, xdp))) |
---|
99 | 139 | goto xdp_abort; |
---|
100 | 140 | __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */ |
---|
101 | 141 | return true; |
---|
102 | 142 | case XDP_REDIRECT: |
---|
103 | 143 | /* When XDP enabled then page-refcnt==1 here */ |
---|
104 | | - err = xdp_do_redirect(rq->netdev, &xdp, prog); |
---|
| 144 | + err = xdp_do_redirect(rq->netdev, xdp, prog); |
---|
105 | 145 | if (unlikely(err)) |
---|
106 | 146 | goto xdp_abort; |
---|
107 | 147 | __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); |
---|
108 | | - rq->xdpsq.redirect_flush = true; |
---|
109 | | - mlx5e_page_dma_unmap(rq, di); |
---|
| 148 | + __set_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags); |
---|
| 149 | + if (xdp->rxq->mem.type != MEM_TYPE_XSK_BUFF_POOL) |
---|
| 150 | + mlx5e_page_dma_unmap(rq, di); |
---|
110 | 151 | rq->stats->xdp_redirect++; |
---|
111 | 152 | return true; |
---|
112 | 153 | default: |
---|
113 | 154 | bpf_warn_invalid_xdp_action(act); |
---|
114 | | - /* fall through */ |
---|
| 155 | + fallthrough; |
---|
115 | 156 | case XDP_ABORTED: |
---|
116 | 157 | xdp_abort: |
---|
117 | 158 | trace_xdp_exception(rq->netdev, prog, act); |
---|
118 | | - /* fall through */ |
---|
| 159 | + fallthrough; |
---|
119 | 160 | case XDP_DROP: |
---|
120 | 161 | rq->stats->xdp_drop++; |
---|
121 | 162 | return true; |
---|
122 | 163 | } |
---|
123 | 164 | } |
---|
124 | 165 | |
---|
125 | | -bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi) |
---|
| 166 | +static u16 mlx5e_xdpsq_get_next_pi(struct mlx5e_xdpsq *sq, u16 size) |
---|
| 167 | +{ |
---|
| 168 | + struct mlx5_wq_cyc *wq = &sq->wq; |
---|
| 169 | + u16 pi, contig_wqebbs; |
---|
| 170 | + |
---|
| 171 | + pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); |
---|
| 172 | + contig_wqebbs = mlx5_wq_cyc_get_contig_wqebbs(wq, pi); |
---|
| 173 | + if (unlikely(contig_wqebbs < size)) { |
---|
| 174 | + struct mlx5e_xdp_wqe_info *wi, *edge_wi; |
---|
| 175 | + |
---|
| 176 | + wi = &sq->db.wqe_info[pi]; |
---|
| 177 | + edge_wi = wi + contig_wqebbs; |
---|
| 178 | + |
---|
| 179 | + /* Fill SQ frag edge with NOPs to avoid WQE wrapping two pages. */ |
---|
| 180 | + for (; wi < edge_wi; wi++) { |
---|
| 181 | + *wi = (struct mlx5e_xdp_wqe_info) { |
---|
| 182 | + .num_wqebbs = 1, |
---|
| 183 | + .num_pkts = 0, |
---|
| 184 | + }; |
---|
| 185 | + mlx5e_post_nop(wq, sq->sqn, &sq->pc); |
---|
| 186 | + } |
---|
| 187 | + sq->stats->nops += contig_wqebbs; |
---|
| 188 | + |
---|
| 189 | + pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); |
---|
| 190 | + } |
---|
| 191 | + |
---|
| 192 | + return pi; |
---|
| 193 | +} |
---|
| 194 | + |
---|
| 195 | +static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq *sq) |
---|
| 196 | +{ |
---|
| 197 | + struct mlx5e_tx_mpwqe *session = &sq->mpwqe; |
---|
| 198 | + struct mlx5e_xdpsq_stats *stats = sq->stats; |
---|
| 199 | + struct mlx5e_tx_wqe *wqe; |
---|
| 200 | + u16 pi; |
---|
| 201 | + |
---|
| 202 | + pi = mlx5e_xdpsq_get_next_pi(sq, MLX5E_TX_MPW_MAX_WQEBBS); |
---|
| 203 | + wqe = MLX5E_TX_FETCH_WQE(sq, pi); |
---|
| 204 | + net_prefetchw(wqe->data); |
---|
| 205 | + |
---|
| 206 | + *session = (struct mlx5e_tx_mpwqe) { |
---|
| 207 | + .wqe = wqe, |
---|
| 208 | + .bytes_count = 0, |
---|
| 209 | + .ds_count = MLX5E_TX_WQE_EMPTY_DS_COUNT, |
---|
| 210 | + .pkt_count = 0, |
---|
| 211 | + .inline_on = mlx5e_xdp_get_inline_state(sq, session->inline_on), |
---|
| 212 | + }; |
---|
| 213 | + |
---|
| 214 | + stats->mpwqe++; |
---|
| 215 | +} |
---|
| 216 | + |
---|
| 217 | +void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq) |
---|
| 218 | +{ |
---|
| 219 | + struct mlx5_wq_cyc *wq = &sq->wq; |
---|
| 220 | + struct mlx5e_tx_mpwqe *session = &sq->mpwqe; |
---|
| 221 | + struct mlx5_wqe_ctrl_seg *cseg = &session->wqe->ctrl; |
---|
| 222 | + u16 ds_count = session->ds_count; |
---|
| 223 | + u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); |
---|
| 224 | + struct mlx5e_xdp_wqe_info *wi = &sq->db.wqe_info[pi]; |
---|
| 225 | + |
---|
| 226 | + cseg->opmod_idx_opcode = |
---|
| 227 | + cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_ENHANCED_MPSW); |
---|
| 228 | + cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_count); |
---|
| 229 | + |
---|
| 230 | + wi->num_wqebbs = DIV_ROUND_UP(ds_count, MLX5_SEND_WQEBB_NUM_DS); |
---|
| 231 | + wi->num_pkts = session->pkt_count; |
---|
| 232 | + |
---|
| 233 | + sq->pc += wi->num_wqebbs; |
---|
| 234 | + |
---|
| 235 | + sq->doorbell_cseg = cseg; |
---|
| 236 | + |
---|
| 237 | + session->wqe = NULL; /* Close session */ |
---|
| 238 | +} |
---|
| 239 | + |
---|
| 240 | +enum { |
---|
| 241 | + MLX5E_XDP_CHECK_OK = 1, |
---|
| 242 | + MLX5E_XDP_CHECK_START_MPWQE = 2, |
---|
| 243 | +}; |
---|
| 244 | + |
---|
| 245 | +INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq) |
---|
| 246 | +{ |
---|
| 247 | + if (unlikely(!sq->mpwqe.wqe)) { |
---|
| 248 | + const u16 stop_room = mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS); |
---|
| 249 | + |
---|
| 250 | + if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, |
---|
| 251 | + stop_room))) { |
---|
| 252 | + /* SQ is full, ring doorbell */ |
---|
| 253 | + mlx5e_xmit_xdp_doorbell(sq); |
---|
| 254 | + sq->stats->full++; |
---|
| 255 | + return -EBUSY; |
---|
| 256 | + } |
---|
| 257 | + |
---|
| 258 | + return MLX5E_XDP_CHECK_START_MPWQE; |
---|
| 259 | + } |
---|
| 260 | + |
---|
| 261 | + return MLX5E_XDP_CHECK_OK; |
---|
| 262 | +} |
---|
| 263 | + |
---|
| 264 | +INDIRECT_CALLABLE_SCOPE bool |
---|
| 265 | +mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd, |
---|
| 266 | + struct mlx5e_xdp_info *xdpi, int check_result) |
---|
| 267 | +{ |
---|
| 268 | + struct mlx5e_tx_mpwqe *session = &sq->mpwqe; |
---|
| 269 | + struct mlx5e_xdpsq_stats *stats = sq->stats; |
---|
| 270 | + |
---|
| 271 | + if (unlikely(xdptxd->len > sq->hw_mtu)) { |
---|
| 272 | + stats->err++; |
---|
| 273 | + return false; |
---|
| 274 | + } |
---|
| 275 | + |
---|
| 276 | + if (!check_result) |
---|
| 277 | + check_result = mlx5e_xmit_xdp_frame_check_mpwqe(sq); |
---|
| 278 | + if (unlikely(check_result < 0)) |
---|
| 279 | + return false; |
---|
| 280 | + |
---|
| 281 | + if (check_result == MLX5E_XDP_CHECK_START_MPWQE) { |
---|
| 282 | + /* Start the session when nothing can fail, so it's guaranteed |
---|
| 283 | + * that if there is an active session, it has at least one dseg, |
---|
| 284 | + * and it's safe to complete it at any time. |
---|
| 285 | + */ |
---|
| 286 | + mlx5e_xdp_mpwqe_session_start(sq); |
---|
| 287 | + } |
---|
| 288 | + |
---|
| 289 | + mlx5e_xdp_mpwqe_add_dseg(sq, xdptxd, stats); |
---|
| 290 | + |
---|
| 291 | + if (unlikely(mlx5e_xdp_mpqwe_is_full(session))) |
---|
| 292 | + mlx5e_xdp_mpwqe_complete(sq); |
---|
| 293 | + |
---|
| 294 | + mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, xdpi); |
---|
| 295 | + stats->xmit++; |
---|
| 296 | + return true; |
---|
| 297 | +} |
---|
| 298 | + |
---|
| 299 | +INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq) |
---|
| 300 | +{ |
---|
| 301 | + if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 1))) { |
---|
| 302 | + /* SQ is full, ring doorbell */ |
---|
| 303 | + mlx5e_xmit_xdp_doorbell(sq); |
---|
| 304 | + sq->stats->full++; |
---|
| 305 | + return -EBUSY; |
---|
| 306 | + } |
---|
| 307 | + |
---|
| 308 | + return MLX5E_XDP_CHECK_OK; |
---|
| 309 | +} |
---|
| 310 | + |
---|
| 311 | +INDIRECT_CALLABLE_SCOPE bool |
---|
| 312 | +mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd, |
---|
| 313 | + struct mlx5e_xdp_info *xdpi, int check_result) |
---|
126 | 314 | { |
---|
127 | 315 | struct mlx5_wq_cyc *wq = &sq->wq; |
---|
128 | 316 | u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); |
---|
.. | .. |
---|
132 | 320 | struct mlx5_wqe_eth_seg *eseg = &wqe->eth; |
---|
133 | 321 | struct mlx5_wqe_data_seg *dseg = wqe->data; |
---|
134 | 322 | |
---|
135 | | - struct xdp_frame *xdpf = xdpi->xdpf; |
---|
136 | | - dma_addr_t dma_addr = xdpi->dma_addr; |
---|
137 | | - unsigned int dma_len = xdpf->len; |
---|
| 323 | + dma_addr_t dma_addr = xdptxd->dma_addr; |
---|
| 324 | + u32 dma_len = xdptxd->len; |
---|
138 | 325 | |
---|
139 | 326 | struct mlx5e_xdpsq_stats *stats = sq->stats; |
---|
140 | 327 | |
---|
141 | | - prefetchw(wqe); |
---|
| 328 | + net_prefetchw(wqe); |
---|
142 | 329 | |
---|
143 | 330 | if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE || sq->hw_mtu < dma_len)) { |
---|
144 | 331 | stats->err++; |
---|
145 | 332 | return false; |
---|
146 | 333 | } |
---|
147 | 334 | |
---|
148 | | - if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1))) { |
---|
149 | | - if (sq->doorbell) { |
---|
150 | | - /* SQ is full, ring doorbell */ |
---|
151 | | - mlx5e_xmit_xdp_doorbell(sq); |
---|
152 | | - sq->doorbell = false; |
---|
153 | | - } |
---|
154 | | - stats->full++; |
---|
| 335 | + if (!check_result) |
---|
| 336 | + check_result = mlx5e_xmit_xdp_frame_check(sq); |
---|
| 337 | + if (unlikely(check_result < 0)) |
---|
155 | 338 | return false; |
---|
156 | | - } |
---|
157 | 339 | |
---|
158 | 340 | cseg->fm_ce_se = 0; |
---|
159 | 341 | |
---|
160 | 342 | /* copy the inline part if required */ |
---|
161 | 343 | if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) { |
---|
162 | | - memcpy(eseg->inline_hdr.start, xdpf->data, MLX5E_XDP_MIN_INLINE); |
---|
| 344 | + memcpy(eseg->inline_hdr.start, xdptxd->data, MLX5E_XDP_MIN_INLINE); |
---|
163 | 345 | eseg->inline_hdr.sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE); |
---|
164 | 346 | dma_len -= MLX5E_XDP_MIN_INLINE; |
---|
165 | 347 | dma_addr += MLX5E_XDP_MIN_INLINE; |
---|
.. | .. |
---|
172 | 354 | |
---|
173 | 355 | cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND); |
---|
174 | 356 | |
---|
175 | | - /* move page to reference to sq responsibility, |
---|
176 | | - * and mark so it's not put back in page-cache. |
---|
177 | | - */ |
---|
178 | | - sq->db.xdpi[pi] = *xdpi; |
---|
179 | 357 | sq->pc++; |
---|
180 | 358 | |
---|
181 | | - sq->doorbell = true; |
---|
| 359 | + sq->doorbell_cseg = cseg; |
---|
182 | 360 | |
---|
| 361 | + mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, xdpi); |
---|
183 | 362 | stats->xmit++; |
---|
184 | 363 | return true; |
---|
| 364 | +} |
---|
| 365 | + |
---|
| 366 | +static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq, |
---|
| 367 | + struct mlx5e_xdp_wqe_info *wi, |
---|
| 368 | + u32 *xsk_frames, |
---|
| 369 | + bool recycle) |
---|
| 370 | +{ |
---|
| 371 | + struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo; |
---|
| 372 | + u16 i; |
---|
| 373 | + |
---|
| 374 | + for (i = 0; i < wi->num_pkts; i++) { |
---|
| 375 | + struct mlx5e_xdp_info xdpi = mlx5e_xdpi_fifo_pop(xdpi_fifo); |
---|
| 376 | + |
---|
| 377 | + switch (xdpi.mode) { |
---|
| 378 | + case MLX5E_XDP_XMIT_MODE_FRAME: |
---|
| 379 | + /* XDP_TX from the XSK RQ and XDP_REDIRECT */ |
---|
| 380 | + dma_unmap_single(sq->pdev, xdpi.frame.dma_addr, |
---|
| 381 | + xdpi.frame.xdpf->len, DMA_TO_DEVICE); |
---|
| 382 | + xdp_return_frame(xdpi.frame.xdpf); |
---|
| 383 | + break; |
---|
| 384 | + case MLX5E_XDP_XMIT_MODE_PAGE: |
---|
| 385 | + /* XDP_TX from the regular RQ */ |
---|
| 386 | + mlx5e_page_release_dynamic(xdpi.page.rq, &xdpi.page.di, recycle); |
---|
| 387 | + break; |
---|
| 388 | + case MLX5E_XDP_XMIT_MODE_XSK: |
---|
| 389 | + /* AF_XDP send */ |
---|
| 390 | + (*xsk_frames)++; |
---|
| 391 | + break; |
---|
| 392 | + default: |
---|
| 393 | + WARN_ON_ONCE(true); |
---|
| 394 | + } |
---|
| 395 | + } |
---|
185 | 396 | } |
---|
186 | 397 | |
---|
187 | 398 | bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq) |
---|
188 | 399 | { |
---|
189 | 400 | struct mlx5e_xdpsq *sq; |
---|
190 | 401 | struct mlx5_cqe64 *cqe; |
---|
191 | | - struct mlx5e_rq *rq; |
---|
192 | | - bool is_redirect; |
---|
| 402 | + u32 xsk_frames = 0; |
---|
193 | 403 | u16 sqcc; |
---|
194 | 404 | int i; |
---|
195 | 405 | |
---|
.. | .. |
---|
202 | 412 | if (!cqe) |
---|
203 | 413 | return false; |
---|
204 | 414 | |
---|
205 | | - is_redirect = test_bit(MLX5E_SQ_STATE_REDIRECT, &sq->state); |
---|
206 | | - rq = container_of(sq, struct mlx5e_rq, xdpsq); |
---|
207 | | - |
---|
208 | 415 | /* sq->cc must be updated only after mlx5_cqwq_update_db_record(), |
---|
209 | 416 | * otherwise a cq overrun may occur |
---|
210 | 417 | */ |
---|
.. | .. |
---|
212 | 419 | |
---|
213 | 420 | i = 0; |
---|
214 | 421 | do { |
---|
215 | | - u16 wqe_counter; |
---|
| 422 | + struct mlx5e_xdp_wqe_info *wi; |
---|
| 423 | + u16 wqe_counter, ci; |
---|
216 | 424 | bool last_wqe; |
---|
217 | 425 | |
---|
218 | 426 | mlx5_cqwq_pop(&cq->wq); |
---|
.. | .. |
---|
220 | 428 | wqe_counter = be16_to_cpu(cqe->wqe_counter); |
---|
221 | 429 | |
---|
222 | 430 | do { |
---|
223 | | - u16 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); |
---|
224 | | - struct mlx5e_xdp_info *xdpi = &sq->db.xdpi[ci]; |
---|
225 | | - |
---|
226 | 431 | last_wqe = (sqcc == wqe_counter); |
---|
227 | | - sqcc++; |
---|
| 432 | + ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); |
---|
| 433 | + wi = &sq->db.wqe_info[ci]; |
---|
228 | 434 | |
---|
229 | | - if (is_redirect) { |
---|
230 | | - dma_unmap_single(sq->pdev, xdpi->dma_addr, |
---|
231 | | - xdpi->xdpf->len, DMA_TO_DEVICE); |
---|
232 | | - xdp_return_frame(xdpi->xdpf); |
---|
233 | | - } else { |
---|
234 | | - /* Recycle RX page */ |
---|
235 | | - mlx5e_page_release(rq, &xdpi->di, true); |
---|
236 | | - } |
---|
| 435 | + sqcc += wi->num_wqebbs; |
---|
| 436 | + |
---|
| 437 | + mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, true); |
---|
237 | 438 | } while (!last_wqe); |
---|
| 439 | + |
---|
| 440 | + if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) { |
---|
| 441 | + netdev_WARN_ONCE(sq->channel->netdev, |
---|
| 442 | + "Bad OP in XDPSQ CQE: 0x%x\n", |
---|
| 443 | + get_cqe_opcode(cqe)); |
---|
| 444 | + mlx5e_dump_error_cqe(&sq->cq, sq->sqn, |
---|
| 445 | + (struct mlx5_err_cqe *)cqe); |
---|
| 446 | + mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs); |
---|
| 447 | + } |
---|
238 | 448 | } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); |
---|
| 449 | + |
---|
| 450 | + if (xsk_frames) |
---|
| 451 | + xsk_tx_completed(sq->xsk_pool, xsk_frames); |
---|
239 | 452 | |
---|
240 | 453 | sq->stats->cqes += i; |
---|
241 | 454 | |
---|
.. | .. |
---|
250 | 463 | |
---|
251 | 464 | void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq) |
---|
252 | 465 | { |
---|
253 | | - struct mlx5e_rq *rq; |
---|
254 | | - bool is_redirect; |
---|
255 | | - |
---|
256 | | - is_redirect = test_bit(MLX5E_SQ_STATE_REDIRECT, &sq->state); |
---|
257 | | - rq = is_redirect ? NULL : container_of(sq, struct mlx5e_rq, xdpsq); |
---|
| 466 | + u32 xsk_frames = 0; |
---|
258 | 467 | |
---|
259 | 468 | while (sq->cc != sq->pc) { |
---|
260 | | - u16 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc); |
---|
261 | | - struct mlx5e_xdp_info *xdpi = &sq->db.xdpi[ci]; |
---|
| 469 | + struct mlx5e_xdp_wqe_info *wi; |
---|
| 470 | + u16 ci; |
---|
262 | 471 | |
---|
263 | | - sq->cc++; |
---|
| 472 | + ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc); |
---|
| 473 | + wi = &sq->db.wqe_info[ci]; |
---|
264 | 474 | |
---|
265 | | - if (is_redirect) { |
---|
266 | | - dma_unmap_single(sq->pdev, xdpi->dma_addr, |
---|
267 | | - xdpi->xdpf->len, DMA_TO_DEVICE); |
---|
268 | | - xdp_return_frame(xdpi->xdpf); |
---|
269 | | - } else { |
---|
270 | | - /* Recycle RX page */ |
---|
271 | | - mlx5e_page_release(rq, &xdpi->di, false); |
---|
272 | | - } |
---|
| 475 | + sq->cc += wi->num_wqebbs; |
---|
| 476 | + |
---|
| 477 | + mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, false); |
---|
273 | 478 | } |
---|
| 479 | + |
---|
| 480 | + if (xsk_frames) |
---|
| 481 | + xsk_tx_completed(sq->xsk_pool, xsk_frames); |
---|
274 | 482 | } |
---|
275 | 483 | |
---|
276 | 484 | int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, |
---|
.. | .. |
---|
298 | 506 | |
---|
299 | 507 | for (i = 0; i < n; i++) { |
---|
300 | 508 | struct xdp_frame *xdpf = frames[i]; |
---|
| 509 | + struct mlx5e_xmit_data xdptxd; |
---|
301 | 510 | struct mlx5e_xdp_info xdpi; |
---|
| 511 | + bool ret; |
---|
302 | 512 | |
---|
303 | | - xdpi.dma_addr = dma_map_single(sq->pdev, xdpf->data, xdpf->len, |
---|
304 | | - DMA_TO_DEVICE); |
---|
305 | | - if (unlikely(dma_mapping_error(sq->pdev, xdpi.dma_addr))) { |
---|
| 513 | + xdptxd.data = xdpf->data; |
---|
| 514 | + xdptxd.len = xdpf->len; |
---|
| 515 | + xdptxd.dma_addr = dma_map_single(sq->pdev, xdptxd.data, |
---|
| 516 | + xdptxd.len, DMA_TO_DEVICE); |
---|
| 517 | + |
---|
| 518 | + if (unlikely(dma_mapping_error(sq->pdev, xdptxd.dma_addr))) { |
---|
306 | 519 | xdp_return_frame_rx_napi(xdpf); |
---|
307 | 520 | drops++; |
---|
308 | 521 | continue; |
---|
309 | 522 | } |
---|
310 | 523 | |
---|
311 | | - xdpi.xdpf = xdpf; |
---|
| 524 | + xdpi.mode = MLX5E_XDP_XMIT_MODE_FRAME; |
---|
| 525 | + xdpi.frame.xdpf = xdpf; |
---|
| 526 | + xdpi.frame.dma_addr = xdptxd.dma_addr; |
---|
312 | 527 | |
---|
313 | | - if (unlikely(!mlx5e_xmit_xdp_frame(sq, &xdpi))) { |
---|
314 | | - dma_unmap_single(sq->pdev, xdpi.dma_addr, |
---|
315 | | - xdpf->len, DMA_TO_DEVICE); |
---|
| 528 | + ret = INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe, |
---|
| 529 | + mlx5e_xmit_xdp_frame, sq, &xdptxd, &xdpi, 0); |
---|
| 530 | + if (unlikely(!ret)) { |
---|
| 531 | + dma_unmap_single(sq->pdev, xdptxd.dma_addr, |
---|
| 532 | + xdptxd.len, DMA_TO_DEVICE); |
---|
316 | 533 | xdp_return_frame_rx_napi(xdpf); |
---|
317 | 534 | drops++; |
---|
318 | 535 | } |
---|
319 | 536 | } |
---|
320 | 537 | |
---|
321 | | - if (flags & XDP_XMIT_FLUSH) |
---|
| 538 | + if (flags & XDP_XMIT_FLUSH) { |
---|
| 539 | + if (sq->mpwqe.wqe) |
---|
| 540 | + mlx5e_xdp_mpwqe_complete(sq); |
---|
322 | 541 | mlx5e_xmit_xdp_doorbell(sq); |
---|
| 542 | + } |
---|
323 | 543 | |
---|
324 | 544 | return n - drops; |
---|
325 | 545 | } |
---|
| 546 | + |
---|
| 547 | +void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq) |
---|
| 548 | +{ |
---|
| 549 | + struct mlx5e_xdpsq *xdpsq = rq->xdpsq; |
---|
| 550 | + |
---|
| 551 | + if (xdpsq->mpwqe.wqe) |
---|
| 552 | + mlx5e_xdp_mpwqe_complete(xdpsq); |
---|
| 553 | + |
---|
| 554 | + mlx5e_xmit_xdp_doorbell(xdpsq); |
---|
| 555 | + |
---|
| 556 | + if (test_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags)) { |
---|
| 557 | + xdp_do_flush_map(); |
---|
| 558 | + __clear_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags); |
---|
| 559 | + } |
---|
| 560 | +} |
---|
| 561 | + |
---|
| 562 | +void mlx5e_set_xmit_fp(struct mlx5e_xdpsq *sq, bool is_mpw) |
---|
| 563 | +{ |
---|
| 564 | + sq->xmit_xdp_frame_check = is_mpw ? |
---|
| 565 | + mlx5e_xmit_xdp_frame_check_mpwqe : mlx5e_xmit_xdp_frame_check; |
---|
| 566 | + sq->xmit_xdp_frame = is_mpw ? |
---|
| 567 | + mlx5e_xmit_xdp_frame_mpwqe : mlx5e_xmit_xdp_frame; |
---|
| 568 | +} |
---|