.. | .. |
---|
15 | 15 | #include <linux/bpf.h> |
---|
16 | 16 | #include <linux/bpf_trace.h> |
---|
17 | 17 | #include <linux/filter.h> |
---|
| 18 | +#include <net/page_pool.h> |
---|
18 | 19 | #include "bnxt_hsi.h" |
---|
19 | 20 | #include "bnxt.h" |
---|
20 | 21 | #include "bnxt_xdp.h" |
---|
21 | 22 | |
---|
22 | | -void bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr, |
---|
23 | | - dma_addr_t mapping, u32 len, u16 rx_prod) |
---|
| 23 | +struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp, |
---|
| 24 | + struct bnxt_tx_ring_info *txr, |
---|
| 25 | + dma_addr_t mapping, u32 len) |
---|
24 | 26 | { |
---|
25 | 27 | struct bnxt_sw_tx_bd *tx_buf; |
---|
26 | 28 | struct tx_bd *txbd; |
---|
.. | .. |
---|
29 | 31 | |
---|
30 | 32 | prod = txr->tx_prod; |
---|
31 | 33 | tx_buf = &txr->tx_buf_ring[prod]; |
---|
32 | | - tx_buf->rx_prod = rx_prod; |
---|
33 | 34 | |
---|
34 | 35 | txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; |
---|
35 | 36 | flags = (len << TX_BD_LEN_SHIFT) | (1 << TX_BD_FLAGS_BD_CNT_SHIFT) | |
---|
.. | .. |
---|
40 | 41 | |
---|
41 | 42 | prod = NEXT_TX(prod); |
---|
42 | 43 | txr->tx_prod = prod; |
---|
| 44 | + return tx_buf; |
---|
| 45 | +} |
---|
| 46 | + |
---|
| 47 | +static void __bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr, |
---|
| 48 | + dma_addr_t mapping, u32 len, u16 rx_prod) |
---|
| 49 | +{ |
---|
| 50 | + struct bnxt_sw_tx_bd *tx_buf; |
---|
| 51 | + |
---|
| 52 | + tx_buf = bnxt_xmit_bd(bp, txr, mapping, len); |
---|
| 53 | + tx_buf->rx_prod = rx_prod; |
---|
| 54 | + tx_buf->action = XDP_TX; |
---|
| 55 | +} |
---|
| 56 | + |
---|
| 57 | +static void __bnxt_xmit_xdp_redirect(struct bnxt *bp, |
---|
| 58 | + struct bnxt_tx_ring_info *txr, |
---|
| 59 | + dma_addr_t mapping, u32 len, |
---|
| 60 | + struct xdp_frame *xdpf) |
---|
| 61 | +{ |
---|
| 62 | + struct bnxt_sw_tx_bd *tx_buf; |
---|
| 63 | + |
---|
| 64 | + tx_buf = bnxt_xmit_bd(bp, txr, mapping, len); |
---|
| 65 | + tx_buf->action = XDP_REDIRECT; |
---|
| 66 | + tx_buf->xdpf = xdpf; |
---|
| 67 | + dma_unmap_addr_set(tx_buf, mapping, mapping); |
---|
| 68 | + dma_unmap_len_set(tx_buf, len, 0); |
---|
43 | 69 | } |
---|
44 | 70 | |
---|
45 | 71 | void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) |
---|
46 | 72 | { |
---|
47 | 73 | struct bnxt_tx_ring_info *txr = bnapi->tx_ring; |
---|
48 | 74 | struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; |
---|
| 75 | + bool rx_doorbell_needed = false; |
---|
49 | 76 | struct bnxt_sw_tx_bd *tx_buf; |
---|
50 | 77 | u16 tx_cons = txr->tx_cons; |
---|
51 | 78 | u16 last_tx_cons = tx_cons; |
---|
52 | | - u16 rx_prod; |
---|
53 | 79 | int i; |
---|
54 | 80 | |
---|
55 | 81 | for (i = 0; i < nr_pkts; i++) { |
---|
56 | | - last_tx_cons = tx_cons; |
---|
| 82 | + tx_buf = &txr->tx_buf_ring[tx_cons]; |
---|
| 83 | + |
---|
| 84 | + if (tx_buf->action == XDP_REDIRECT) { |
---|
| 85 | + struct pci_dev *pdev = bp->pdev; |
---|
| 86 | + |
---|
| 87 | + dma_unmap_single(&pdev->dev, |
---|
| 88 | + dma_unmap_addr(tx_buf, mapping), |
---|
| 89 | + dma_unmap_len(tx_buf, len), |
---|
| 90 | + PCI_DMA_TODEVICE); |
---|
| 91 | + xdp_return_frame(tx_buf->xdpf); |
---|
| 92 | + tx_buf->action = 0; |
---|
| 93 | + tx_buf->xdpf = NULL; |
---|
| 94 | + } else if (tx_buf->action == XDP_TX) { |
---|
| 95 | + rx_doorbell_needed = true; |
---|
| 96 | + last_tx_cons = tx_cons; |
---|
| 97 | + } |
---|
57 | 98 | tx_cons = NEXT_TX(tx_cons); |
---|
58 | 99 | } |
---|
59 | 100 | txr->tx_cons = tx_cons; |
---|
60 | | - if (bnxt_tx_avail(bp, txr) == bp->tx_ring_size) { |
---|
61 | | - rx_prod = rxr->rx_prod; |
---|
62 | | - } else { |
---|
| 101 | + if (rx_doorbell_needed) { |
---|
63 | 102 | tx_buf = &txr->tx_buf_ring[last_tx_cons]; |
---|
64 | | - rx_prod = tx_buf->rx_prod; |
---|
| 103 | + bnxt_db_write(bp, &rxr->rx_db, tx_buf->rx_prod); |
---|
65 | 104 | } |
---|
66 | | - bnxt_db_write(bp, rxr->rx_doorbell, DB_KEY_RX | rx_prod); |
---|
67 | 105 | } |
---|
68 | 106 | |
---|
69 | 107 | /* returns the following: |
---|
.. | .. |
---|
88 | 126 | return false; |
---|
89 | 127 | |
---|
90 | 128 | pdev = bp->pdev; |
---|
91 | | - txr = rxr->bnapi->tx_ring; |
---|
92 | 129 | rx_buf = &rxr->rx_buf_ring[cons]; |
---|
93 | 130 | offset = bp->rx_offset; |
---|
94 | 131 | |
---|
| 132 | + mapping = rx_buf->mapping - bp->rx_dma_offset; |
---|
| 133 | + dma_sync_single_for_cpu(&pdev->dev, mapping + offset, *len, bp->rx_dir); |
---|
| 134 | + |
---|
| 135 | + txr = rxr->bnapi->tx_ring; |
---|
95 | 136 | xdp.data_hard_start = *data_ptr - offset; |
---|
96 | 137 | xdp.data = *data_ptr; |
---|
97 | 138 | xdp_set_data_meta_invalid(&xdp); |
---|
98 | 139 | xdp.data_end = *data_ptr + *len; |
---|
99 | 140 | xdp.rxq = &rxr->xdp_rxq; |
---|
| 141 | + xdp.frame_sz = PAGE_SIZE; /* BNXT_RX_PAGE_MODE(bp) when XDP enabled */ |
---|
100 | 142 | orig_data = xdp.data; |
---|
101 | | - mapping = rx_buf->mapping - bp->rx_dma_offset; |
---|
102 | | - |
---|
103 | | - dma_sync_single_for_cpu(&pdev->dev, mapping + offset, *len, bp->rx_dir); |
---|
104 | 143 | |
---|
105 | 144 | rcu_read_lock(); |
---|
106 | 145 | act = bpf_prog_run_xdp(xdp_prog, &xdp); |
---|
.. | .. |
---|
132 | 171 | *event = BNXT_TX_EVENT; |
---|
133 | 172 | dma_sync_single_for_device(&pdev->dev, mapping + offset, *len, |
---|
134 | 173 | bp->rx_dir); |
---|
135 | | - bnxt_xmit_xdp(bp, txr, mapping + offset, *len, |
---|
136 | | - NEXT_RX(rxr->rx_prod)); |
---|
| 174 | + __bnxt_xmit_xdp(bp, txr, mapping + offset, *len, |
---|
| 175 | + NEXT_RX(rxr->rx_prod)); |
---|
137 | 176 | bnxt_reuse_rx_data(rxr, cons, page); |
---|
138 | 177 | return true; |
---|
| 178 | + case XDP_REDIRECT: |
---|
| 179 | + /* if we are calling this here then we know that the |
---|
| 180 | + * redirect is coming from a frame received by the |
---|
| 181 | + * bnxt_en driver. |
---|
| 182 | + */ |
---|
| 183 | + dma_unmap_page_attrs(&pdev->dev, mapping, |
---|
| 184 | + PAGE_SIZE, bp->rx_dir, |
---|
| 185 | + DMA_ATTR_WEAK_ORDERING); |
---|
| 186 | + |
---|
| 187 | + /* if we are unable to allocate a new buffer, abort and reuse */ |
---|
| 188 | + if (bnxt_alloc_rx_data(bp, rxr, rxr->rx_prod, GFP_ATOMIC)) { |
---|
| 189 | + trace_xdp_exception(bp->dev, xdp_prog, act); |
---|
| 190 | + bnxt_reuse_rx_data(rxr, cons, page); |
---|
| 191 | + return true; |
---|
| 192 | + } |
---|
| 193 | + |
---|
| 194 | + if (xdp_do_redirect(bp->dev, &xdp, xdp_prog)) { |
---|
| 195 | + trace_xdp_exception(bp->dev, xdp_prog, act); |
---|
| 196 | + page_pool_recycle_direct(rxr->page_pool, page); |
---|
| 197 | + return true; |
---|
| 198 | + } |
---|
| 199 | + |
---|
| 200 | + *event |= BNXT_REDIRECT_EVENT; |
---|
| 201 | + break; |
---|
139 | 202 | default: |
---|
140 | 203 | bpf_warn_invalid_xdp_action(act); |
---|
141 | | - /* Fall thru */ |
---|
| 204 | + fallthrough; |
---|
142 | 205 | case XDP_ABORTED: |
---|
143 | 206 | trace_xdp_exception(bp->dev, xdp_prog, act); |
---|
144 | | - /* Fall thru */ |
---|
| 207 | + fallthrough; |
---|
145 | 208 | case XDP_DROP: |
---|
146 | 209 | bnxt_reuse_rx_data(rxr, cons, page); |
---|
147 | 210 | break; |
---|
148 | 211 | } |
---|
149 | 212 | return true; |
---|
| 213 | +} |
---|
| 214 | + |
---|
| 215 | +int bnxt_xdp_xmit(struct net_device *dev, int num_frames, |
---|
| 216 | + struct xdp_frame **frames, u32 flags) |
---|
| 217 | +{ |
---|
| 218 | + struct bnxt *bp = netdev_priv(dev); |
---|
| 219 | + struct bpf_prog *xdp_prog = READ_ONCE(bp->xdp_prog); |
---|
| 220 | + struct pci_dev *pdev = bp->pdev; |
---|
| 221 | + struct bnxt_tx_ring_info *txr; |
---|
| 222 | + dma_addr_t mapping; |
---|
| 223 | + int drops = 0; |
---|
| 224 | + int ring; |
---|
| 225 | + int i; |
---|
| 226 | + |
---|
| 227 | + if (!test_bit(BNXT_STATE_OPEN, &bp->state) || |
---|
| 228 | + !bp->tx_nr_rings_xdp || |
---|
| 229 | + !xdp_prog) |
---|
| 230 | + return -EINVAL; |
---|
| 231 | + |
---|
| 232 | + ring = smp_processor_id() % bp->tx_nr_rings_xdp; |
---|
| 233 | + txr = &bp->tx_ring[ring]; |
---|
| 234 | + |
---|
| 235 | + for (i = 0; i < num_frames; i++) { |
---|
| 236 | + struct xdp_frame *xdp = frames[i]; |
---|
| 237 | + |
---|
| 238 | + if (!txr || !bnxt_tx_avail(bp, txr) || |
---|
| 239 | + !(bp->bnapi[ring]->flags & BNXT_NAPI_FLAG_XDP)) { |
---|
| 240 | + xdp_return_frame_rx_napi(xdp); |
---|
| 241 | + drops++; |
---|
| 242 | + continue; |
---|
| 243 | + } |
---|
| 244 | + |
---|
| 245 | + mapping = dma_map_single(&pdev->dev, xdp->data, xdp->len, |
---|
| 246 | + DMA_TO_DEVICE); |
---|
| 247 | + |
---|
| 248 | + if (dma_mapping_error(&pdev->dev, mapping)) { |
---|
| 249 | + xdp_return_frame_rx_napi(xdp); |
---|
| 250 | + drops++; |
---|
| 251 | + continue; |
---|
| 252 | + } |
---|
| 253 | + __bnxt_xmit_xdp_redirect(bp, txr, mapping, xdp->len, xdp); |
---|
| 254 | + } |
---|
| 255 | + |
---|
| 256 | + if (flags & XDP_XMIT_FLUSH) { |
---|
| 257 | + /* Sync BD data before updating doorbell */ |
---|
| 258 | + wmb(); |
---|
| 259 | + bnxt_db_write(bp, &txr->tx_db, txr->tx_prod); |
---|
| 260 | + } |
---|
| 261 | + |
---|
| 262 | + return num_frames - drops; |
---|
150 | 263 | } |
---|
151 | 264 | |
---|
152 | 265 | /* Under rtnl_lock */ |
---|
.. | .. |
---|
199 | 312 | bp->tx_nr_rings_xdp = tx_xdp; |
---|
200 | 313 | bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc + tx_xdp; |
---|
201 | 314 | bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings); |
---|
202 | | - bp->num_stat_ctxs = bp->cp_nr_rings; |
---|
203 | 315 | bnxt_set_tpa_flags(bp); |
---|
204 | 316 | bnxt_set_ring_params(bp); |
---|
205 | 317 | |
---|
.. | .. |
---|
217 | 329 | switch (xdp->command) { |
---|
218 | 330 | case XDP_SETUP_PROG: |
---|
219 | 331 | rc = bnxt_xdp_set(bp, xdp->prog); |
---|
220 | | - break; |
---|
221 | | - case XDP_QUERY_PROG: |
---|
222 | | - xdp->prog_id = bp->xdp_prog ? bp->xdp_prog->aux->id : 0; |
---|
223 | | - rc = 0; |
---|
224 | 332 | break; |
---|
225 | 333 | default: |
---|
226 | 334 | rc = -EINVAL; |
---|