.. | .. |
---|
| 1 | +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) |
---|
1 | 2 | /* QLogic qede NIC Driver |
---|
2 | 3 | * Copyright (c) 2015-2017 QLogic Corporation |
---|
3 | | - * |
---|
4 | | - * This software is available to you under a choice of one of two |
---|
5 | | - * licenses. You may choose to be licensed under the terms of the GNU |
---|
6 | | - * General Public License (GPL) Version 2, available from the file |
---|
7 | | - * COPYING in the main directory of this source tree, or the |
---|
8 | | - * OpenIB.org BSD license below: |
---|
9 | | - * |
---|
10 | | - * Redistribution and use in source and binary forms, with or |
---|
11 | | - * without modification, are permitted provided that the following |
---|
12 | | - * conditions are met: |
---|
13 | | - * |
---|
14 | | - * - Redistributions of source code must retain the above |
---|
15 | | - * copyright notice, this list of conditions and the following |
---|
16 | | - * disclaimer. |
---|
17 | | - * |
---|
18 | | - * - Redistributions in binary form must reproduce the above |
---|
19 | | - * copyright notice, this list of conditions and the following |
---|
20 | | - * disclaimer in the documentation and /or other materials |
---|
21 | | - * provided with the distribution. |
---|
22 | | - * |
---|
23 | | - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
---|
24 | | - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
---|
25 | | - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
---|
26 | | - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
---|
27 | | - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
---|
28 | | - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
---|
29 | | - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
---|
30 | | - * SOFTWARE. |
---|
| 4 | + * Copyright (c) 2019-2020 Marvell International Ltd. |
---|
31 | 5 | */ |
---|
| 6 | + |
---|
32 | 7 | #include <linux/netdevice.h> |
---|
33 | 8 | #include <linux/etherdevice.h> |
---|
34 | 9 | #include <linux/skbuff.h> |
---|
.. | .. |
---|
327 | 302 | wmb(); |
---|
328 | 303 | } |
---|
329 | 304 | |
---|
330 | | -static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp, |
---|
331 | | - struct sw_rx_data *metadata, u16 padding, u16 length) |
---|
| 305 | +static int qede_xdp_xmit(struct qede_tx_queue *txq, dma_addr_t dma, u16 pad, |
---|
| 306 | + u16 len, struct page *page, struct xdp_frame *xdpf) |
---|
332 | 307 | { |
---|
333 | | - struct qede_tx_queue *txq = fp->xdp_tx; |
---|
334 | | - struct eth_tx_1st_bd *first_bd; |
---|
335 | | - u16 idx = txq->sw_tx_prod; |
---|
| 308 | + struct eth_tx_1st_bd *bd; |
---|
| 309 | + struct sw_tx_xdp *xdp; |
---|
336 | 310 | u16 val; |
---|
337 | 311 | |
---|
338 | | - if (!qed_chain_get_elem_left(&txq->tx_pbl)) { |
---|
| 312 | + if (unlikely(qed_chain_get_elem_used(&txq->tx_pbl) >= |
---|
| 313 | + txq->num_tx_buffers)) { |
---|
339 | 314 | txq->stopped_cnt++; |
---|
340 | 315 | return -ENOMEM; |
---|
341 | 316 | } |
---|
342 | 317 | |
---|
343 | | - first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl); |
---|
| 318 | + bd = qed_chain_produce(&txq->tx_pbl); |
---|
| 319 | + bd->data.nbds = 1; |
---|
| 320 | + bd->data.bd_flags.bitfields = BIT(ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT); |
---|
344 | 321 | |
---|
345 | | - memset(first_bd, 0, sizeof(*first_bd)); |
---|
346 | | - first_bd->data.bd_flags.bitfields = |
---|
347 | | - BIT(ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT); |
---|
348 | | - |
---|
349 | | - val = (length & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) << |
---|
| 322 | + val = (len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) << |
---|
350 | 323 | ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT; |
---|
351 | 324 | |
---|
352 | | - first_bd->data.bitfields |= cpu_to_le16(val); |
---|
353 | | - first_bd->data.nbds = 1; |
---|
| 325 | + bd->data.bitfields = cpu_to_le16(val); |
---|
354 | 326 | |
---|
355 | 327 | /* We can safely ignore the offset, as it's 0 for XDP */ |
---|
356 | | - BD_SET_UNMAP_ADDR_LEN(first_bd, metadata->mapping + padding, length); |
---|
| 328 | + BD_SET_UNMAP_ADDR_LEN(bd, dma + pad, len); |
---|
357 | 329 | |
---|
358 | | - /* Synchronize the buffer back to device, as program [probably] |
---|
359 | | - * has changed it. |
---|
360 | | - */ |
---|
361 | | - dma_sync_single_for_device(&edev->pdev->dev, |
---|
362 | | - metadata->mapping + padding, |
---|
363 | | - length, PCI_DMA_TODEVICE); |
---|
| 330 | + xdp = txq->sw_tx_ring.xdp + txq->sw_tx_prod; |
---|
| 331 | + xdp->mapping = dma; |
---|
| 332 | + xdp->page = page; |
---|
| 333 | + xdp->xdpf = xdpf; |
---|
364 | 334 | |
---|
365 | | - txq->sw_tx_ring.xdp[idx].page = metadata->data; |
---|
366 | | - txq->sw_tx_ring.xdp[idx].mapping = metadata->mapping; |
---|
367 | 335 | txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers; |
---|
368 | 336 | |
---|
369 | | - /* Mark the fastpath for future XDP doorbell */ |
---|
370 | | - fp->xdp_xmit = 1; |
---|
371 | | - |
---|
372 | 337 | return 0; |
---|
| 338 | +} |
---|
| 339 | + |
---|
| 340 | +int qede_xdp_transmit(struct net_device *dev, int n_frames, |
---|
| 341 | + struct xdp_frame **frames, u32 flags) |
---|
| 342 | +{ |
---|
| 343 | + struct qede_dev *edev = netdev_priv(dev); |
---|
| 344 | + struct device *dmadev = &edev->pdev->dev; |
---|
| 345 | + struct qede_tx_queue *xdp_tx; |
---|
| 346 | + struct xdp_frame *xdpf; |
---|
| 347 | + dma_addr_t mapping; |
---|
| 348 | + int i, drops = 0; |
---|
| 349 | + u16 xdp_prod; |
---|
| 350 | + |
---|
| 351 | + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) |
---|
| 352 | + return -EINVAL; |
---|
| 353 | + |
---|
| 354 | + if (unlikely(!netif_running(dev))) |
---|
| 355 | + return -ENETDOWN; |
---|
| 356 | + |
---|
| 357 | + i = smp_processor_id() % edev->total_xdp_queues; |
---|
| 358 | + xdp_tx = edev->fp_array[i].xdp_tx; |
---|
| 359 | + |
---|
| 360 | + spin_lock(&xdp_tx->xdp_tx_lock); |
---|
| 361 | + |
---|
| 362 | + for (i = 0; i < n_frames; i++) { |
---|
| 363 | + xdpf = frames[i]; |
---|
| 364 | + |
---|
| 365 | + mapping = dma_map_single(dmadev, xdpf->data, xdpf->len, |
---|
| 366 | + DMA_TO_DEVICE); |
---|
| 367 | + if (unlikely(dma_mapping_error(dmadev, mapping))) { |
---|
| 368 | + xdp_return_frame_rx_napi(xdpf); |
---|
| 369 | + drops++; |
---|
| 370 | + |
---|
| 371 | + continue; |
---|
| 372 | + } |
---|
| 373 | + |
---|
| 374 | + if (unlikely(qede_xdp_xmit(xdp_tx, mapping, 0, xdpf->len, |
---|
| 375 | + NULL, xdpf))) { |
---|
| 376 | + xdp_return_frame_rx_napi(xdpf); |
---|
| 377 | + drops++; |
---|
| 378 | + } |
---|
| 379 | + } |
---|
| 380 | + |
---|
| 381 | + if (flags & XDP_XMIT_FLUSH) { |
---|
| 382 | + xdp_prod = qed_chain_get_prod_idx(&xdp_tx->tx_pbl); |
---|
| 383 | + |
---|
| 384 | + xdp_tx->tx_db.data.bd_prod = cpu_to_le16(xdp_prod); |
---|
| 385 | + qede_update_tx_producer(xdp_tx); |
---|
| 386 | + } |
---|
| 387 | + |
---|
| 388 | + spin_unlock(&xdp_tx->xdp_tx_lock); |
---|
| 389 | + |
---|
| 390 | + return n_frames - drops; |
---|
373 | 391 | } |
---|
374 | 392 | |
---|
375 | 393 | int qede_txq_has_work(struct qede_tx_queue *txq) |
---|
.. | .. |
---|
387 | 405 | |
---|
388 | 406 | static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq) |
---|
389 | 407 | { |
---|
390 | | - u16 hw_bd_cons, idx; |
---|
| 408 | + struct sw_tx_xdp *xdp_info, *xdp_arr = txq->sw_tx_ring.xdp; |
---|
| 409 | + struct device *dev = &edev->pdev->dev; |
---|
| 410 | + struct xdp_frame *xdpf; |
---|
| 411 | + u16 hw_bd_cons; |
---|
391 | 412 | |
---|
392 | 413 | hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr); |
---|
393 | 414 | barrier(); |
---|
394 | 415 | |
---|
395 | 416 | while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) { |
---|
| 417 | + xdp_info = xdp_arr + txq->sw_tx_cons; |
---|
| 418 | + xdpf = xdp_info->xdpf; |
---|
| 419 | + |
---|
| 420 | + if (xdpf) { |
---|
| 421 | + dma_unmap_single(dev, xdp_info->mapping, xdpf->len, |
---|
| 422 | + DMA_TO_DEVICE); |
---|
| 423 | + xdp_return_frame(xdpf); |
---|
| 424 | + |
---|
| 425 | + xdp_info->xdpf = NULL; |
---|
| 426 | + } else { |
---|
| 427 | + dma_unmap_page(dev, xdp_info->mapping, PAGE_SIZE, |
---|
| 428 | + DMA_BIDIRECTIONAL); |
---|
| 429 | + __free_page(xdp_info->page); |
---|
| 430 | + } |
---|
| 431 | + |
---|
396 | 432 | qed_chain_consume(&txq->tx_pbl); |
---|
397 | | - idx = txq->sw_tx_cons; |
---|
398 | | - |
---|
399 | | - dma_unmap_page(&edev->pdev->dev, |
---|
400 | | - txq->sw_tx_ring.xdp[idx].mapping, |
---|
401 | | - PAGE_SIZE, DMA_BIDIRECTIONAL); |
---|
402 | | - __free_page(txq->sw_tx_ring.xdp[idx].page); |
---|
403 | | - |
---|
404 | 433 | txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers; |
---|
405 | 434 | txq->xmit_pkts++; |
---|
406 | 435 | } |
---|
.. | .. |
---|
580 | 609 | |
---|
581 | 610 | internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods), |
---|
582 | 611 | (u32 *)&rx_prods); |
---|
583 | | - |
---|
584 | | - /* mmiowb is needed to synchronize doorbell writes from more than one |
---|
585 | | - * processor. It guarantees that the write arrives to the device before |
---|
586 | | - * the napi lock is released and another qede_poll is called (possibly |
---|
587 | | - * on another CPU). Without this barrier, the next doorbell can bypass |
---|
588 | | - * this doorbell. This is applicable to IA64/Altix systems. |
---|
589 | | - */ |
---|
590 | | - mmiowb(); |
---|
591 | 612 | } |
---|
592 | 613 | |
---|
593 | 614 | static void qede_get_rxhash(struct sk_buff *skb, u8 bitfields, __le32 rss_hash) |
---|
.. | .. |
---|
731 | 752 | buf = page_address(bd->data) + bd->page_offset; |
---|
732 | 753 | skb = build_skb(buf, rxq->rx_buf_seg_size); |
---|
733 | 754 | |
---|
| 755 | + if (unlikely(!skb)) |
---|
| 756 | + return NULL; |
---|
| 757 | + |
---|
734 | 758 | skb_reserve(skb, pad); |
---|
735 | 759 | skb_put(skb, len); |
---|
736 | 760 | |
---|
.. | .. |
---|
787 | 811 | return NULL; |
---|
788 | 812 | |
---|
789 | 813 | skb_reserve(skb, pad); |
---|
790 | | - memcpy(skb_put(skb, len), |
---|
791 | | - page_address(bd->data) + offset, len); |
---|
| 814 | + skb_put_data(skb, page_address(bd->data) + offset, len); |
---|
792 | 815 | qede_reuse_page(rxq, bd); |
---|
793 | 816 | goto out; |
---|
794 | 817 | } |
---|
.. | .. |
---|
857 | 880 | qede_set_gro_params(edev, tpa_info->skb, cqe); |
---|
858 | 881 | |
---|
859 | 882 | cons_buf: /* We still need to handle bd_len_list to consume buffers */ |
---|
860 | | - if (likely(cqe->ext_bd_len_list[0])) |
---|
| 883 | + if (likely(cqe->bw_ext_bd_len_list[0])) |
---|
861 | 884 | qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, |
---|
862 | | - le16_to_cpu(cqe->ext_bd_len_list[0])); |
---|
| 885 | + le16_to_cpu(cqe->bw_ext_bd_len_list[0])); |
---|
863 | 886 | |
---|
864 | | - if (unlikely(cqe->ext_bd_len_list[1])) { |
---|
| 887 | + if (unlikely(cqe->bw_ext_bd_len_list[1])) { |
---|
865 | 888 | DP_ERR(edev, |
---|
866 | | - "Unlikely - got a TPA aggregation with more than one ext_bd_len_list entry in the TPA start\n"); |
---|
| 889 | + "Unlikely - got a TPA aggregation with more than one bw_ext_bd_len_list entry in the TPA start\n"); |
---|
867 | 890 | tpa_info->state = QEDE_AGG_STATE_ERROR; |
---|
868 | 891 | } |
---|
869 | 892 | } |
---|
.. | .. |
---|
1075 | 1098 | xdp_set_data_meta_invalid(&xdp); |
---|
1076 | 1099 | xdp.data_end = xdp.data + *len; |
---|
1077 | 1100 | xdp.rxq = &rxq->xdp_rxq; |
---|
| 1101 | + xdp.frame_sz = rxq->rx_buf_seg_size; /* PAGE_SIZE when XDP enabled */ |
---|
1078 | 1102 | |
---|
1079 | 1103 | /* Queues always have a full reset currently, so for the time |
---|
1080 | 1104 | * being until there's atomic program replace just mark read |
---|
.. | .. |
---|
1097 | 1121 | switch (act) { |
---|
1098 | 1122 | case XDP_TX: |
---|
1099 | 1123 | /* We need the replacement buffer before transmit. */ |
---|
1100 | | - if (qede_alloc_rx_buffer(rxq, true)) { |
---|
| 1124 | + if (unlikely(qede_alloc_rx_buffer(rxq, true))) { |
---|
1101 | 1125 | qede_recycle_rx_bd_ring(rxq, 1); |
---|
| 1126 | + |
---|
1102 | 1127 | trace_xdp_exception(edev->ndev, prog, act); |
---|
1103 | | - return false; |
---|
| 1128 | + break; |
---|
1104 | 1129 | } |
---|
1105 | 1130 | |
---|
1106 | 1131 | /* Now if there's a transmission problem, we'd still have to |
---|
1107 | 1132 | * throw current buffer, as replacement was already allocated. |
---|
1108 | 1133 | */ |
---|
1109 | | - if (qede_xdp_xmit(edev, fp, bd, *data_offset, *len)) { |
---|
1110 | | - dma_unmap_page(rxq->dev, bd->mapping, |
---|
1111 | | - PAGE_SIZE, DMA_BIDIRECTIONAL); |
---|
| 1134 | + if (unlikely(qede_xdp_xmit(fp->xdp_tx, bd->mapping, |
---|
| 1135 | + *data_offset, *len, bd->data, |
---|
| 1136 | + NULL))) { |
---|
| 1137 | + dma_unmap_page(rxq->dev, bd->mapping, PAGE_SIZE, |
---|
| 1138 | + rxq->data_direction); |
---|
1112 | 1139 | __free_page(bd->data); |
---|
| 1140 | + |
---|
1113 | 1141 | trace_xdp_exception(edev->ndev, prog, act); |
---|
| 1142 | + } else { |
---|
| 1143 | + dma_sync_single_for_device(rxq->dev, |
---|
| 1144 | + bd->mapping + *data_offset, |
---|
| 1145 | + *len, rxq->data_direction); |
---|
| 1146 | + fp->xdp_xmit |= QEDE_XDP_TX; |
---|
1114 | 1147 | } |
---|
1115 | 1148 | |
---|
1116 | 1149 | /* Regardless, we've consumed an Rx BD */ |
---|
1117 | 1150 | qede_rx_bd_ring_consume(rxq); |
---|
1118 | | - return false; |
---|
| 1151 | + break; |
---|
| 1152 | + case XDP_REDIRECT: |
---|
| 1153 | + /* We need the replacement buffer before transmit. */ |
---|
| 1154 | + if (unlikely(qede_alloc_rx_buffer(rxq, true))) { |
---|
| 1155 | + qede_recycle_rx_bd_ring(rxq, 1); |
---|
1119 | 1156 | |
---|
| 1157 | + trace_xdp_exception(edev->ndev, prog, act); |
---|
| 1158 | + break; |
---|
| 1159 | + } |
---|
| 1160 | + |
---|
| 1161 | + dma_unmap_page(rxq->dev, bd->mapping, PAGE_SIZE, |
---|
| 1162 | + rxq->data_direction); |
---|
| 1163 | + |
---|
| 1164 | + if (unlikely(xdp_do_redirect(edev->ndev, &xdp, prog))) |
---|
| 1165 | + DP_NOTICE(edev, "Failed to redirect the packet\n"); |
---|
| 1166 | + else |
---|
| 1167 | + fp->xdp_xmit |= QEDE_XDP_REDIRECT; |
---|
| 1168 | + |
---|
| 1169 | + qede_rx_bd_ring_consume(rxq); |
---|
| 1170 | + break; |
---|
1120 | 1171 | default: |
---|
1121 | 1172 | bpf_warn_invalid_xdp_action(act); |
---|
1122 | | - /* Fall through */ |
---|
| 1173 | + fallthrough; |
---|
1123 | 1174 | case XDP_ABORTED: |
---|
1124 | 1175 | trace_xdp_exception(edev->ndev, prog, act); |
---|
1125 | | - /* Fall through */ |
---|
| 1176 | + fallthrough; |
---|
1126 | 1177 | case XDP_DROP: |
---|
1127 | 1178 | qede_recycle_rx_bd_ring(rxq, cqe->bd_num); |
---|
1128 | 1179 | } |
---|
.. | .. |
---|
1386 | 1437 | napi); |
---|
1387 | 1438 | struct qede_dev *edev = fp->edev; |
---|
1388 | 1439 | int rx_work_done = 0; |
---|
| 1440 | + u16 xdp_prod; |
---|
| 1441 | + |
---|
| 1442 | + fp->xdp_xmit = 0; |
---|
1389 | 1443 | |
---|
1390 | 1444 | if (likely(fp->type & QEDE_FASTPATH_TX)) { |
---|
1391 | 1445 | int cos; |
---|
.. | .. |
---|
1402 | 1456 | rx_work_done = (likely(fp->type & QEDE_FASTPATH_RX) && |
---|
1403 | 1457 | qede_has_rx_work(fp->rxq)) ? |
---|
1404 | 1458 | qede_rx_int(fp, budget) : 0; |
---|
1405 | | - if (rx_work_done < budget) { |
---|
| 1459 | + |
---|
| 1460 | + if (fp->xdp_xmit & QEDE_XDP_REDIRECT) |
---|
| 1461 | + xdp_do_flush(); |
---|
| 1462 | + |
---|
| 1463 | + /* Handle case where we are called by netpoll with a budget of 0 */ |
---|
| 1464 | + if (rx_work_done < budget || !budget) { |
---|
1406 | 1465 | if (!qede_poll_is_more_work(fp)) { |
---|
1407 | 1466 | napi_complete_done(napi, rx_work_done); |
---|
1408 | 1467 | |
---|
.. | .. |
---|
1413 | 1472 | } |
---|
1414 | 1473 | } |
---|
1415 | 1474 | |
---|
1416 | | - if (fp->xdp_xmit) { |
---|
1417 | | - u16 xdp_prod = qed_chain_get_prod_idx(&fp->xdp_tx->tx_pbl); |
---|
| 1475 | + if (fp->xdp_xmit & QEDE_XDP_TX) { |
---|
| 1476 | + xdp_prod = qed_chain_get_prod_idx(&fp->xdp_tx->tx_pbl); |
---|
1418 | 1477 | |
---|
1419 | | - fp->xdp_xmit = 0; |
---|
1420 | 1478 | fp->xdp_tx->tx_db.data.bd_prod = cpu_to_le16(xdp_prod); |
---|
1421 | 1479 | qede_update_tx_producer(fp->xdp_tx); |
---|
1422 | 1480 | } |
---|
.. | .. |
---|
1466 | 1524 | #if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET) |
---|
1467 | 1525 | if (qede_pkt_req_lin(skb, xmit_type)) { |
---|
1468 | 1526 | if (skb_linearize(skb)) { |
---|
1469 | | - DP_NOTICE(edev, |
---|
1470 | | - "SKB linearization failed - silently dropping this SKB\n"); |
---|
| 1527 | + txq->tx_mem_alloc_err++; |
---|
| 1528 | + |
---|
1471 | 1529 | dev_kfree_skb_any(skb); |
---|
1472 | 1530 | return NETDEV_TX_OK; |
---|
1473 | 1531 | } |
---|
.. | .. |
---|
1672 | 1730 | txq->tx_db.data.bd_prod = |
---|
1673 | 1731 | cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl)); |
---|
1674 | 1732 | |
---|
1675 | | - if (!skb->xmit_more || netif_xmit_stopped(netdev_txq)) |
---|
| 1733 | + if (!netdev_xmit_more() || netif_xmit_stopped(netdev_txq)) |
---|
1676 | 1734 | qede_update_tx_producer(txq); |
---|
1677 | 1735 | |
---|
1678 | 1736 | if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl) |
---|
1679 | 1737 | < (MAX_SKB_FRAGS + 1))) { |
---|
1680 | | - if (skb->xmit_more) |
---|
| 1738 | + if (netdev_xmit_more()) |
---|
1681 | 1739 | qede_update_tx_producer(txq); |
---|
1682 | 1740 | |
---|
1683 | 1741 | netif_tx_stop_queue(netdev_txq); |
---|
.. | .. |
---|
1703 | 1761 | } |
---|
1704 | 1762 | |
---|
1705 | 1763 | u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb, |
---|
1706 | | - struct net_device *sb_dev, |
---|
1707 | | - select_queue_fallback_t fallback) |
---|
| 1764 | + struct net_device *sb_dev) |
---|
1708 | 1765 | { |
---|
1709 | 1766 | struct qede_dev *edev = netdev_priv(dev); |
---|
1710 | 1767 | int total_txq; |
---|
.. | .. |
---|
1712 | 1769 | total_txq = QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc; |
---|
1713 | 1770 | |
---|
1714 | 1771 | return QEDE_TSS_COUNT(edev) ? |
---|
1715 | | - fallback(dev, skb, NULL) % total_txq : 0; |
---|
| 1772 | + netdev_pick_tx(dev, skb, NULL) % total_txq : 0; |
---|
1716 | 1773 | } |
---|
1717 | 1774 | |
---|
1718 | 1775 | /* 8B udp header + 8B base tunnel header + 32B option length */ |
---|