From 95099d4622f8cb224d94e314c7a8e0df60b13f87 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Sat, 09 Dec 2023 08:38:01 +0000
Subject: [PATCH] enable docker ppp

---
 kernel/drivers/net/ethernet/qlogic/qede/qede_fp.c |  243 +++++++++++++++++++++++++++++------------------
 1 files changed, 149 insertions(+), 94 deletions(-)

diff --git a/kernel/drivers/net/ethernet/qlogic/qede/qede_fp.c b/kernel/drivers/net/ethernet/qlogic/qede/qede_fp.c
index 9d5c2e3..d210632 100644
--- a/kernel/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/kernel/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -1,34 +1,9 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
 /* QLogic qede NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
+
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/skbuff.h>
@@ -327,49 +302,92 @@
 	wmb();
 }
 
-static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp,
-			 struct sw_rx_data *metadata, u16 padding, u16 length)
+static int qede_xdp_xmit(struct qede_tx_queue *txq, dma_addr_t dma, u16 pad,
+			 u16 len, struct page *page, struct xdp_frame *xdpf)
 {
-	struct qede_tx_queue *txq = fp->xdp_tx;
-	struct eth_tx_1st_bd *first_bd;
-	u16 idx = txq->sw_tx_prod;
+	struct eth_tx_1st_bd *bd;
+	struct sw_tx_xdp *xdp;
 	u16 val;
 
-	if (!qed_chain_get_elem_left(&txq->tx_pbl)) {
+	if (unlikely(qed_chain_get_elem_used(&txq->tx_pbl) >=
+		     txq->num_tx_buffers)) {
 		txq->stopped_cnt++;
 		return -ENOMEM;
 	}
 
-	first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl);
+	bd = qed_chain_produce(&txq->tx_pbl);
+	bd->data.nbds = 1;
+	bd->data.bd_flags.bitfields = BIT(ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT);
 
-	memset(first_bd, 0, sizeof(*first_bd));
-	first_bd->data.bd_flags.bitfields =
-	    BIT(ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT);
-
-	val = (length & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
+	val = (len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
 	       ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
 
-	first_bd->data.bitfields |= cpu_to_le16(val);
-	first_bd->data.nbds = 1;
+	bd->data.bitfields = cpu_to_le16(val);
 
 	/* We can safely ignore the offset, as it's 0 for XDP */
-	BD_SET_UNMAP_ADDR_LEN(first_bd, metadata->mapping + padding, length);
+	BD_SET_UNMAP_ADDR_LEN(bd, dma + pad, len);
 
-	/* Synchronize the buffer back to device, as program [probably]
-	 * has changed it.
-	 */
-	dma_sync_single_for_device(&edev->pdev->dev,
-				   metadata->mapping + padding,
-				   length, PCI_DMA_TODEVICE);
+	xdp = txq->sw_tx_ring.xdp + txq->sw_tx_prod;
+	xdp->mapping = dma;
+	xdp->page = page;
+	xdp->xdpf = xdpf;
 
-	txq->sw_tx_ring.xdp[idx].page = metadata->data;
-	txq->sw_tx_ring.xdp[idx].mapping = metadata->mapping;
 	txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers;
 
-	/* Mark the fastpath for future XDP doorbell */
-	fp->xdp_xmit = 1;
-
 	return 0;
+}
+
+int qede_xdp_transmit(struct net_device *dev, int n_frames,
+		      struct xdp_frame **frames, u32 flags)
+{
+	struct qede_dev *edev = netdev_priv(dev);
+	struct device *dmadev = &edev->pdev->dev;
+	struct qede_tx_queue *xdp_tx;
+	struct xdp_frame *xdpf;
+	dma_addr_t mapping;
+	int i, drops = 0;
+	u16 xdp_prod;
+
+	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+		return -EINVAL;
+
+	if (unlikely(!netif_running(dev)))
+		return -ENETDOWN;
+
+	i = smp_processor_id() % edev->total_xdp_queues;
+	xdp_tx = edev->fp_array[i].xdp_tx;
+
+	spin_lock(&xdp_tx->xdp_tx_lock);
+
+	for (i = 0; i < n_frames; i++) {
+		xdpf = frames[i];
+
+		mapping = dma_map_single(dmadev, xdpf->data, xdpf->len,
+					 DMA_TO_DEVICE);
+		if (unlikely(dma_mapping_error(dmadev, mapping))) {
+			xdp_return_frame_rx_napi(xdpf);
+			drops++;
+
+			continue;
+		}
+
+		if (unlikely(qede_xdp_xmit(xdp_tx, mapping, 0, xdpf->len,
+					   NULL, xdpf))) {
+			xdp_return_frame_rx_napi(xdpf);
+			drops++;
+		}
+	}
+
+	if (flags & XDP_XMIT_FLUSH) {
+		xdp_prod = qed_chain_get_prod_idx(&xdp_tx->tx_pbl);
+
+		xdp_tx->tx_db.data.bd_prod = cpu_to_le16(xdp_prod);
+		qede_update_tx_producer(xdp_tx);
+	}
+
+	spin_unlock(&xdp_tx->xdp_tx_lock);
+
+	return n_frames - drops;
 }
 
 int qede_txq_has_work(struct qede_tx_queue *txq)
@@ -387,20 +405,31 @@
 
 static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
 {
-	u16 hw_bd_cons, idx;
+	struct sw_tx_xdp *xdp_info, *xdp_arr = txq->sw_tx_ring.xdp;
+	struct device *dev = &edev->pdev->dev;
+	struct xdp_frame *xdpf;
+	u16 hw_bd_cons;
 
 	hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
 	barrier();
 
 	while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
+		xdp_info = xdp_arr + txq->sw_tx_cons;
+		xdpf = xdp_info->xdpf;
+
+		if (xdpf) {
+			dma_unmap_single(dev, xdp_info->mapping, xdpf->len,
+					 DMA_TO_DEVICE);
+			xdp_return_frame(xdpf);
+
+			xdp_info->xdpf = NULL;
+		} else {
+			dma_unmap_page(dev, xdp_info->mapping, PAGE_SIZE,
+				       DMA_BIDIRECTIONAL);
+			__free_page(xdp_info->page);
+		}
+
 		qed_chain_consume(&txq->tx_pbl);
-		idx = txq->sw_tx_cons;
-
-		dma_unmap_page(&edev->pdev->dev,
-			       txq->sw_tx_ring.xdp[idx].mapping,
-			       PAGE_SIZE, DMA_BIDIRECTIONAL);
-		__free_page(txq->sw_tx_ring.xdp[idx].page);
-
 		txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers;
 		txq->xmit_pkts++;
 	}
@@ -580,14 +609,6 @@
 
 	internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),
 			(u32 *)&rx_prods);
-
-	/* mmiowb is needed to synchronize doorbell writes from more than one
-	 * processor. It guarantees that the write arrives to the device before
-	 * the napi lock is released and another qede_poll is called (possibly
-	 * on another CPU). Without this barrier, the next doorbell can bypass
-	 * this doorbell. This is applicable to IA64/Altix systems.
-	 */
-	mmiowb();
 }
 
 static void qede_get_rxhash(struct sk_buff *skb, u8 bitfields, __le32 rss_hash)
@@ -731,6 +752,9 @@
 	buf = page_address(bd->data) + bd->page_offset;
 	skb = build_skb(buf, rxq->rx_buf_seg_size);
 
+	if (unlikely(!skb))
+		return NULL;
+
 	skb_reserve(skb, pad);
 	skb_put(skb, len);
 
@@ -787,8 +811,7 @@
 			return NULL;
 
 		skb_reserve(skb, pad);
-		memcpy(skb_put(skb, len),
-		       page_address(bd->data) + offset, len);
+		skb_put_data(skb, page_address(bd->data) + offset, len);
 		qede_reuse_page(rxq, bd);
 		goto out;
 	}
@@ -857,13 +880,13 @@
 	qede_set_gro_params(edev, tpa_info->skb, cqe);
 
 cons_buf: /* We still need to handle bd_len_list to consume buffers */
-	if (likely(cqe->ext_bd_len_list[0]))
+	if (likely(cqe->bw_ext_bd_len_list[0]))
 		qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
-				   le16_to_cpu(cqe->ext_bd_len_list[0]));
+				   le16_to_cpu(cqe->bw_ext_bd_len_list[0]));
 
-	if (unlikely(cqe->ext_bd_len_list[1])) {
+	if (unlikely(cqe->bw_ext_bd_len_list[1])) {
 		DP_ERR(edev,
-		       "Unlikely - got a TPA aggregation with more than one ext_bd_len_list entry in the TPA start\n");
+		       "Unlikely - got a TPA aggregation with more than one bw_ext_bd_len_list entry in the TPA start\n");
 		tpa_info->state = QEDE_AGG_STATE_ERROR;
 	}
 }
@@ -1075,6 +1098,7 @@
 	xdp_set_data_meta_invalid(&xdp);
 	xdp.data_end = xdp.data + *len;
 	xdp.rxq = &rxq->xdp_rxq;
+	xdp.frame_sz = rxq->rx_buf_seg_size; /* PAGE_SIZE when XDP enabled */
 
 	/* Queues always have a full reset currently, so for the time
 	 * being until there's atomic program replace just mark read
@@ -1097,32 +1121,59 @@
 	switch (act) {
 	case XDP_TX:
 		/* We need the replacement buffer before transmit. */
-		if (qede_alloc_rx_buffer(rxq, true)) {
+		if (unlikely(qede_alloc_rx_buffer(rxq, true))) {
 			qede_recycle_rx_bd_ring(rxq, 1);
+
 			trace_xdp_exception(edev->ndev, prog, act);
-			return false;
+			break;
 		}
 
 		/* Now if there's a transmission problem, we'd still have to
 		 * throw current buffer, as replacement was already allocated.
 		 */
-		if (qede_xdp_xmit(edev, fp, bd, *data_offset, *len)) {
-			dma_unmap_page(rxq->dev, bd->mapping,
-				       PAGE_SIZE, DMA_BIDIRECTIONAL);
+		if (unlikely(qede_xdp_xmit(fp->xdp_tx, bd->mapping,
+					   *data_offset, *len, bd->data,
+					   NULL))) {
+			dma_unmap_page(rxq->dev, bd->mapping, PAGE_SIZE,
+				       rxq->data_direction);
 			__free_page(bd->data);
+
 			trace_xdp_exception(edev->ndev, prog, act);
+		} else {
+			dma_sync_single_for_device(rxq->dev,
+						   bd->mapping + *data_offset,
+						   *len, rxq->data_direction);
+			fp->xdp_xmit |= QEDE_XDP_TX;
 		}
 
 		/* Regardless, we've consumed an Rx BD */
 		qede_rx_bd_ring_consume(rxq);
-		return false;
+		break;
+	case XDP_REDIRECT:
+		/* We need the replacement buffer before transmit. */
+		if (unlikely(qede_alloc_rx_buffer(rxq, true))) {
+			qede_recycle_rx_bd_ring(rxq, 1);
 
+			trace_xdp_exception(edev->ndev, prog, act);
+			break;
+		}
+
+		dma_unmap_page(rxq->dev, bd->mapping, PAGE_SIZE,
+			       rxq->data_direction);
+
+		if (unlikely(xdp_do_redirect(edev->ndev, &xdp, prog)))
+			DP_NOTICE(edev, "Failed to redirect the packet\n");
+		else
+			fp->xdp_xmit |= QEDE_XDP_REDIRECT;
+
+		qede_rx_bd_ring_consume(rxq);
+		break;
 	default:
 		bpf_warn_invalid_xdp_action(act);
-		/* Fall through */
+		fallthrough;
 	case XDP_ABORTED:
 		trace_xdp_exception(edev->ndev, prog, act);
-		/* Fall through */
+		fallthrough;
 	case XDP_DROP:
 		qede_recycle_rx_bd_ring(rxq, cqe->bd_num);
 	}
@@ -1386,6 +1437,9 @@
 						napi);
 	struct qede_dev *edev = fp->edev;
 	int rx_work_done = 0;
+	u16 xdp_prod;
+
+	fp->xdp_xmit = 0;
 
 	if (likely(fp->type & QEDE_FASTPATH_TX)) {
 		int cos;
@@ -1413,13 +1467,15 @@
 		}
 	}
 
-	if (fp->xdp_xmit) {
-		u16 xdp_prod = qed_chain_get_prod_idx(&fp->xdp_tx->tx_pbl);
+	if (fp->xdp_xmit & QEDE_XDP_TX) {
+		xdp_prod = qed_chain_get_prod_idx(&fp->xdp_tx->tx_pbl);
 
-		fp->xdp_xmit = 0;
 		fp->xdp_tx->tx_db.data.bd_prod = cpu_to_le16(xdp_prod);
 		qede_update_tx_producer(fp->xdp_tx);
 	}
+
+	if (fp->xdp_xmit & QEDE_XDP_REDIRECT)
+		xdp_do_flush_map();
 
 	return rx_work_done;
 }
@@ -1466,8 +1522,8 @@
 #if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
 	if (qede_pkt_req_lin(skb, xmit_type)) {
 		if (skb_linearize(skb)) {
-			DP_NOTICE(edev,
-				  "SKB linearization failed - silently dropping this SKB\n");
+			txq->tx_mem_alloc_err++;
+
 			dev_kfree_skb_any(skb);
 			return NETDEV_TX_OK;
 		}
@@ -1672,12 +1728,12 @@
 	txq->tx_db.data.bd_prod =
 		cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
 
-	if (!skb->xmit_more || netif_xmit_stopped(netdev_txq))
+	if (!netdev_xmit_more() || netif_xmit_stopped(netdev_txq))
 		qede_update_tx_producer(txq);
 
 	if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl)
 		      < (MAX_SKB_FRAGS + 1))) {
-		if (skb->xmit_more)
+		if (netdev_xmit_more())
 			qede_update_tx_producer(txq);
 
 		netif_tx_stop_queue(netdev_txq);
@@ -1703,8 +1759,7 @@
 }
 
 u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
-		      struct net_device *sb_dev,
-		      select_queue_fallback_t fallback)
+		      struct net_device *sb_dev)
 {
 	struct qede_dev *edev = netdev_priv(dev);
 	int total_txq;
@@ -1712,7 +1767,7 @@
 	total_txq = QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc;
 
 	return QEDE_TSS_COUNT(edev) ?
-		fallback(dev, skb, NULL) % total_txq :  0;
+		netdev_pick_tx(dev, skb, NULL) % total_txq :  0;
 }
 
 /* 8B udp header + 8B base tunnel header + 32B option length */

--
Gitblit v1.6.2