From 071106ecf68c401173c58808b1cf5f68cc50d390 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Fri, 05 Jan 2024 08:39:27 +0000
Subject: [PATCH] change wifi driver to cypress

---
 kernel/drivers/net/ethernet/aquantia/atlantic/aq_ring.c |  436 +++++++++++++++++++++++++++++++++++++++++-------------
 1 files changed, 332 insertions(+), 104 deletions(-)

diff --git a/kernel/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/kernel/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index b03e5fd..e9c6f1f 100644
--- a/kernel/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/kernel/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -1,10 +1,8 @@
-/*
- * aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+// SPDX-License-Identifier: GPL-2.0-only
+/* Atlantic Network Driver
  *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
  */
 
 /* File aq_ring.c: Definition of functions for Rx/Tx rings. */
@@ -12,9 +10,100 @@
 #include "aq_ring.h"
 #include "aq_nic.h"
 #include "aq_hw.h"
+#include "aq_hw_utils.h"
+#include "aq_ptp.h"
 
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
+
+static inline void aq_free_rxpage(struct aq_rxpage *rxpage, struct device *dev)
+{
+	unsigned int len = PAGE_SIZE << rxpage->order;
+
+	dma_unmap_page(dev, rxpage->daddr, len, DMA_FROM_DEVICE);
+
+	/* Drop the ref for being in the ring. */
+	__free_pages(rxpage->page, rxpage->order);
+	rxpage->page = NULL;
+}
+
+static int aq_get_rxpage(struct aq_rxpage *rxpage, unsigned int order,
+			 struct device *dev)
+{
+	struct page *page;
+	int ret = -ENOMEM;
+	dma_addr_t daddr;
+
+	page = dev_alloc_pages(order);
+	if (unlikely(!page))
+		goto err_exit;
+
+	daddr = dma_map_page(dev, page, 0, PAGE_SIZE << order,
+			     DMA_FROM_DEVICE);
+
+	if (unlikely(dma_mapping_error(dev, daddr)))
+		goto free_page;
+
+	rxpage->page = page;
+	rxpage->daddr = daddr;
+	rxpage->order = order;
+	rxpage->pg_off = 0;
+
+	return 0;
+
+free_page:
+	__free_pages(page, order);
+
+err_exit:
+	return ret;
+}
+
+static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf,
+			  int order)
+{
+	int ret;
+
+	if (rxbuf->rxdata.page) {
+		/* One means ring is the only user and can reuse */
+		if (page_ref_count(rxbuf->rxdata.page) > 1) {
+			/* Try reuse buffer */
+			rxbuf->rxdata.pg_off += AQ_CFG_RX_FRAME_MAX;
+			if (rxbuf->rxdata.pg_off + AQ_CFG_RX_FRAME_MAX <=
+				(PAGE_SIZE << order)) {
+				u64_stats_update_begin(&self->stats.rx.syncp);
+				self->stats.rx.pg_flips++;
+				u64_stats_update_end(&self->stats.rx.syncp);
+			} else {
+				/* Buffer exhausted. We have other users and
+				 * should release this page and realloc
+				 */
+				aq_free_rxpage(&rxbuf->rxdata,
+					       aq_nic_get_dev(self->aq_nic));
+				u64_stats_update_begin(&self->stats.rx.syncp);
+				self->stats.rx.pg_losts++;
+				u64_stats_update_end(&self->stats.rx.syncp);
+			}
+		} else {
+			rxbuf->rxdata.pg_off = 0;
+			u64_stats_update_begin(&self->stats.rx.syncp);
+			self->stats.rx.pg_reuses++;
+			u64_stats_update_end(&self->stats.rx.syncp);
+		}
+	}
+
+	if (!rxbuf->rxdata.page) {
+		ret = aq_get_rxpage(&rxbuf->rxdata, order,
+				    aq_nic_get_dev(self->aq_nic));
+		if (ret) {
+			u64_stats_update_begin(&self->stats.rx.syncp);
+			self->stats.rx.alloc_fails++;
+			u64_stats_update_end(&self->stats.rx.syncp);
+		}
+		return ret;
+	}
+
+	return 0;
+}
 
 static struct aq_ring_s *aq_ring_alloc(struct aq_ring_s *self,
 				       struct aq_nic_s *aq_nic)
@@ -29,8 +118,8 @@
 		goto err_exit;
 	}
 	self->dx_ring = dma_alloc_coherent(aq_nic_get_dev(aq_nic),
-						self->size * self->dx_size,
-						&self->dx_ring_pa, GFP_KERNEL);
+					   self->size * self->dx_size,
+					   &self->dx_ring_pa, GFP_KERNEL);
 	if (!self->dx_ring) {
 		err = -ENOMEM;
 		goto err_exit;
@@ -41,6 +130,7 @@
 		aq_ring_free(self);
 		self = NULL;
 	}
+
 	return self;
 }
 
@@ -67,6 +157,7 @@
 		aq_ring_free(self);
 		self = NULL;
 	}
+
 	return self;
 }
 
@@ -81,6 +172,11 @@
 	self->idx = idx;
 	self->size = aq_nic_cfg->rxds;
 	self->dx_size = aq_nic_cfg->aq_hw_caps->rxd_size;
+	self->page_order = fls(AQ_CFG_RX_FRAME_MAX / PAGE_SIZE +
+			       (AQ_CFG_RX_FRAME_MAX % PAGE_SIZE ? 1 : 0)) - 1;
+
+	if (aq_nic_cfg->rxpageorder > self->page_order)
+		self->page_order = aq_nic_cfg->rxpageorder;
 
 	self = aq_ring_alloc(self, aq_nic);
 	if (!self) {
@@ -93,14 +189,46 @@
 		aq_ring_free(self);
 		self = NULL;
 	}
+
 	return self;
 }
 
-int aq_ring_init(struct aq_ring_s *self)
+struct aq_ring_s *
+aq_ring_hwts_rx_alloc(struct aq_ring_s *self, struct aq_nic_s *aq_nic,
+		      unsigned int idx, unsigned int size, unsigned int dx_size)
+{
+	struct device *dev = aq_nic_get_dev(aq_nic);
+	size_t sz = size * dx_size + AQ_CFG_RXDS_DEF;
+
+	memset(self, 0, sizeof(*self));
+
+	self->aq_nic = aq_nic;
+	self->idx = idx;
+	self->size = size;
+	self->dx_size = dx_size;
+
+	self->dx_ring = dma_alloc_coherent(dev, sz, &self->dx_ring_pa,
+					   GFP_KERNEL);
+	if (!self->dx_ring) {
+		aq_ring_free(self);
+		return NULL;
+	}
+
+	return self;
+}
+
+int aq_ring_init(struct aq_ring_s *self, const enum atl_ring_type ring_type)
 {
 	self->hw_head = 0;
 	self->sw_head = 0;
 	self->sw_tail = 0;
+	self->ring_type = ring_type;
+
+	if (self->ring_type == ATL_RING_RX)
+		u64_stats_init(&self->stats.rx.syncp);
+	else
+		u64_stats_init(&self->stats.tx.syncp);
+
 	return 0;
 }
 
@@ -122,9 +250,14 @@
 {
 	struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic);
 
-	if (__netif_subqueue_stopped(ndev, ring->idx)) {
-		netif_wake_subqueue(ndev, ring->idx);
+	if (__netif_subqueue_stopped(ndev,
+				     AQ_NIC_RING2QMAP(ring->aq_nic,
+						      ring->idx))) {
+		netif_wake_subqueue(ndev,
+				    AQ_NIC_RING2QMAP(ring->aq_nic, ring->idx));
+		u64_stats_update_begin(&ring->stats.tx.syncp);
 		ring->stats.tx.queue_restarts++;
+		u64_stats_update_end(&ring->stats.tx.syncp);
 	}
 }
 
@@ -132,8 +265,11 @@
 {
 	struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic);
 
-	if (!__netif_subqueue_stopped(ndev, ring->idx))
-		netif_stop_subqueue(ndev, ring->idx);
+	if (!__netif_subqueue_stopped(ndev,
+				      AQ_NIC_RING2QMAP(ring->aq_nic,
+						       ring->idx)))
+		netif_stop_subqueue(ndev,
+				    AQ_NIC_RING2QMAP(ring->aq_nic, ring->idx));
 }
 
 bool aq_ring_tx_clean(struct aq_ring_s *self)
@@ -163,8 +299,10 @@
 		}
 
 		if (unlikely(buff->is_eop)) {
-			++self->stats.rx.packets;
+			u64_stats_update_begin(&self->stats.tx.syncp);
+			++self->stats.tx.packets;
 			self->stats.tx.bytes += buff->skb->len;
+			u64_stats_update_end(&self->stats.tx.syncp);
 
 			dev_kfree_skb_any(buff->skb);
 		}
@@ -184,7 +322,9 @@
 		return;
 
 	if (unlikely(buff->is_cso_err)) {
+		u64_stats_update_begin(&self->stats.rx.syncp);
 		++self->stats.rx.errors;
+		u64_stats_update_end(&self->stats.rx.syncp);
 		skb->ip_summed = CHECKSUM_NONE;
 		return;
 	}
@@ -206,91 +346,145 @@
 {
 	struct net_device *ndev = aq_nic_get_ndev(self->aq_nic);
 	int err = 0;
-	bool is_rsc_completed = true;
 
 	for (; (self->sw_head != self->hw_head) && budget;
 		self->sw_head = aq_ring_next_dx(self, self->sw_head),
 		--budget, ++(*work_done)) {
 		struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
+		bool is_ptp_ring = aq_ptp_ring(self->aq_nic, self);
+		struct aq_ring_buff_s *buff_ = NULL;
 		struct sk_buff *skb = NULL;
 		unsigned int next_ = 0U;
 		unsigned int i = 0U;
-		struct aq_ring_buff_s *buff_ = NULL;
-
-		if (buff->is_error) {
-			__free_pages(buff->page, 0);
-			continue;
-		}
+		u16 hdr_len;
 
 		if (buff->is_cleaned)
 			continue;
 
 		if (!buff->is_eop) {
-			for (next_ = buff->next,
-			     buff_ = &self->buff_ring[next_]; true;
-			     next_ = buff_->next,
-			     buff_ = &self->buff_ring[next_]) {
+			unsigned int frag_cnt = 0U;
+			buff_ = buff;
+			do {
+				bool is_rsc_completed = true;
+
+				if (buff_->next >= self->size) {
+					err = -EIO;
+					goto err_exit;
+				}
+
+				frag_cnt++;
+				next_ = buff_->next,
+				buff_ = &self->buff_ring[next_];
 				is_rsc_completed =
 					aq_ring_dx_in_range(self->sw_head,
 							    next_,
 							    self->hw_head);
 
-				if (unlikely(!is_rsc_completed)) {
-					is_rsc_completed = false;
-					break;
+				if (unlikely(!is_rsc_completed) ||
+						frag_cnt > MAX_SKB_FRAGS) {
+					err = 0;
+					goto err_exit;
 				}
 
-				if (buff_->is_eop)
-					break;
-			}
+				buff->is_error |= buff_->is_error;
+				buff->is_cso_err |= buff_->is_cso_err;
 
-			if (!is_rsc_completed) {
-				err = 0;
-				goto err_exit;
+			} while (!buff_->is_eop);
+
+			if (buff->is_error ||
+			    (buff->is_lro && buff->is_cso_err)) {
+				buff_ = buff;
+				do {
+					if (buff_->next >= self->size) {
+						err = -EIO;
+						goto err_exit;
+					}
+					next_ = buff_->next,
+					buff_ = &self->buff_ring[next_];
+
+					buff_->is_cleaned = true;
+				} while (!buff_->is_eop);
+
+				u64_stats_update_begin(&self->stats.rx.syncp);
+				++self->stats.rx.errors;
+				u64_stats_update_end(&self->stats.rx.syncp);
+				continue;
 			}
 		}
 
-		/* for single fragment packets use build_skb() */
-		if (buff->is_eop &&
-		    buff->len <= AQ_CFG_RX_FRAME_MAX - AQ_SKB_ALIGN) {
-			skb = build_skb(page_address(buff->page),
+		if (buff->is_error) {
+			u64_stats_update_begin(&self->stats.rx.syncp);
+			++self->stats.rx.errors;
+			u64_stats_update_end(&self->stats.rx.syncp);
+			continue;
+		}
+
+		dma_sync_single_range_for_cpu(aq_nic_get_dev(self->aq_nic),
+					      buff->rxdata.daddr,
+					      buff->rxdata.pg_off,
+					      buff->len, DMA_FROM_DEVICE);
+
+		skb = napi_alloc_skb(napi, AQ_CFG_RX_HDR_SIZE);
+		if (unlikely(!skb)) {
+			u64_stats_update_begin(&self->stats.rx.syncp);
+			self->stats.rx.skb_alloc_fails++;
+			u64_stats_update_end(&self->stats.rx.syncp);
+			err = -ENOMEM;
+			goto err_exit;
+		}
+		if (is_ptp_ring)
+			buff->len -=
+				aq_ptp_extract_ts(self->aq_nic, skb,
+						  aq_buf_vaddr(&buff->rxdata),
+						  buff->len);
+
+		hdr_len = buff->len;
+		if (hdr_len > AQ_CFG_RX_HDR_SIZE)
+			hdr_len = eth_get_headlen(skb->dev,
+						  aq_buf_vaddr(&buff->rxdata),
+						  AQ_CFG_RX_HDR_SIZE);
+
+		memcpy(__skb_put(skb, hdr_len), aq_buf_vaddr(&buff->rxdata),
+		       ALIGN(hdr_len, sizeof(long)));
+
+		if (buff->len - hdr_len > 0) {
+			skb_add_rx_frag(skb, i++, buff->rxdata.page,
+					buff->rxdata.pg_off + hdr_len,
+					buff->len - hdr_len,
 					AQ_CFG_RX_FRAME_MAX);
-			if (unlikely(!skb)) {
-				err = -ENOMEM;
-				goto err_exit;
-			}
-
-			skb_put(skb, buff->len);
-		} else {
-			skb = netdev_alloc_skb(ndev, ETH_HLEN);
-			if (unlikely(!skb)) {
-				err = -ENOMEM;
-				goto err_exit;
-			}
-			skb_put(skb, ETH_HLEN);
-			memcpy(skb->data, page_address(buff->page), ETH_HLEN);
-
-			skb_add_rx_frag(skb, 0, buff->page, ETH_HLEN,
-					buff->len - ETH_HLEN,
-					SKB_TRUESIZE(buff->len - ETH_HLEN));
-
-			if (!buff->is_eop) {
-				for (i = 1U, next_ = buff->next,
-				     buff_ = &self->buff_ring[next_];
-				     true; next_ = buff_->next,
-				     buff_ = &self->buff_ring[next_], ++i) {
-					skb_add_rx_frag(skb, i,
-							buff_->page, 0,
-							buff_->len,
-							SKB_TRUESIZE(buff->len -
-							ETH_HLEN));
-					buff_->is_cleaned = 1;
-
-					if (buff_->is_eop)
-						break;
-				}
-			}
+			page_ref_inc(buff->rxdata.page);
 		}
+
+		if (!buff->is_eop) {
+			buff_ = buff;
+			do {
+				next_ = buff_->next;
+				buff_ = &self->buff_ring[next_];
+
+				dma_sync_single_range_for_cpu(aq_nic_get_dev(self->aq_nic),
+							      buff_->rxdata.daddr,
+							      buff_->rxdata.pg_off,
+							      buff_->len,
+							      DMA_FROM_DEVICE);
+				skb_add_rx_frag(skb, i++,
+						buff_->rxdata.page,
+						buff_->rxdata.pg_off,
+						buff_->len,
+						AQ_CFG_RX_FRAME_MAX);
+				page_ref_inc(buff_->rxdata.page);
+				buff_->is_cleaned = 1;
+
+				buff->is_ip_cso &= buff_->is_ip_cso;
+				buff->is_udp_cso &= buff_->is_udp_cso;
+				buff->is_tcp_cso &= buff_->is_tcp_cso;
+				buff->is_cso_err |= buff_->is_cso_err;
+
+			} while (!buff_->is_eop);
+		}
+
+		if (buff->is_vlan)
+			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+					       buff->vlan_rx_tag);
 
 		skb->protocol = eth_type_trans(skb, ndev);
 
@@ -299,11 +493,16 @@
 		skb_set_hash(skb, buff->rss_hash,
 			     buff->is_hash_l4 ? PKT_HASH_TYPE_L4 :
 			     PKT_HASH_TYPE_NONE);
+		/* Send all PTP traffic to 0 queue */
+		skb_record_rx_queue(skb,
+				    is_ptp_ring ? 0
+						: AQ_NIC_RING2QMAP(self->aq_nic,
+								   self->idx));
 
-		skb_record_rx_queue(skb, self->idx);
-
+		u64_stats_update_begin(&self->stats.rx.syncp);
 		++self->stats.rx.packets;
 		self->stats.rx.bytes += skb->len;
+		u64_stats_update_end(&self->stats.rx.syncp);
 
 		napi_gro_receive(napi, skb);
 	}
@@ -312,13 +511,33 @@
 	return err;
 }
 
+void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic)
+{
+#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
+	while (self->sw_head != self->hw_head) {
+		u64 ns;
+
+		aq_nic->aq_hw_ops->extract_hwts(aq_nic->aq_hw,
+						self->dx_ring +
+						(self->sw_head * self->dx_size),
+						self->dx_size, &ns);
+		aq_ptp_tx_hwtstamp(aq_nic, ns);
+
+		self->sw_head = aq_ring_next_dx(self, self->sw_head);
+	}
+#endif
+}
+
 int aq_ring_rx_fill(struct aq_ring_s *self)
 {
-	unsigned int pages_order = fls(AQ_CFG_RX_FRAME_MAX / PAGE_SIZE +
-		(AQ_CFG_RX_FRAME_MAX % PAGE_SIZE ? 1 : 0)) - 1;
+	unsigned int page_order = self->page_order;
 	struct aq_ring_buff_s *buff = NULL;
 	int err = 0;
 	int i = 0;
+
+	if (aq_ring_avail_dx(self) < min_t(unsigned int, AQ_CFG_RX_REFILL_THRES,
+					   self->size / 2))
+		return err;
 
 	for (i = aq_ring_avail_dx(self); i--;
 		self->sw_tail = aq_ring_next_dx(self, self->sw_tail)) {
@@ -327,55 +546,35 @@
 		buff->flags = 0U;
 		buff->len = AQ_CFG_RX_FRAME_MAX;
 
-		buff->page = alloc_pages(GFP_ATOMIC | __GFP_COMP, pages_order);
-		if (!buff->page) {
-			err = -ENOMEM;
+		err = aq_get_rxpages(self, buff, page_order);
+		if (err)
 			goto err_exit;
-		}
 
-		buff->pa = dma_map_page(aq_nic_get_dev(self->aq_nic),
-					buff->page, 0,
-					AQ_CFG_RX_FRAME_MAX, DMA_FROM_DEVICE);
-
-		if (dma_mapping_error(aq_nic_get_dev(self->aq_nic), buff->pa)) {
-			err = -ENOMEM;
-			goto err_exit;
-		}
-
+		buff->pa = aq_buf_daddr(&buff->rxdata);
 		buff = NULL;
 	}
 
 err_exit:
-	if (err < 0) {
-		if (buff && buff->page)
-			__free_pages(buff->page, 0);
-	}
-
 	return err;
 }
 
 void aq_ring_rx_deinit(struct aq_ring_s *self)
 {
 	if (!self)
-		goto err_exit;
+		return;
 
 	for (; self->sw_head != self->sw_tail;
 		self->sw_head = aq_ring_next_dx(self, self->sw_head)) {
 		struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
 
-		dma_unmap_page(aq_nic_get_dev(self->aq_nic), buff->pa,
-			       AQ_CFG_RX_FRAME_MAX, DMA_FROM_DEVICE);
-
-		__free_pages(buff->page, 0);
+		aq_free_rxpage(&buff->rxdata, aq_nic_get_dev(self->aq_nic));
 	}
-
-err_exit:;
 }
 
 void aq_ring_free(struct aq_ring_s *self)
 {
 	if (!self)
-		goto err_exit;
+		return;
 
 	kfree(self->buff_ring);
 
@@ -383,6 +582,35 @@
 		dma_free_coherent(aq_nic_get_dev(self->aq_nic),
 				  self->size * self->dx_size, self->dx_ring,
 				  self->dx_ring_pa);
+}
 
-err_exit:;
+unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data)
+{
+	unsigned int count;
+	unsigned int start;
+
+	if (self->ring_type == ATL_RING_RX) {
+		/* This data should mimic aq_ethtool_queue_rx_stat_names structure */
+		do {
+			count = 0;
+			start = u64_stats_fetch_begin_irq(&self->stats.rx.syncp);
+			data[count] = self->stats.rx.packets;
+			data[++count] = self->stats.rx.jumbo_packets;
+			data[++count] = self->stats.rx.lro_packets;
+			data[++count] = self->stats.rx.errors;
+			data[++count] = self->stats.rx.alloc_fails;
+			data[++count] = self->stats.rx.skb_alloc_fails;
+			data[++count] = self->stats.rx.polls;
+		} while (u64_stats_fetch_retry_irq(&self->stats.rx.syncp, start));
+	} else {
+		/* This data should mimic aq_ethtool_queue_tx_stat_names structure */
+		do {
+			count = 0;
+			start = u64_stats_fetch_begin_irq(&self->stats.tx.syncp);
+			data[count] = self->stats.tx.packets;
+			data[++count] = self->stats.tx.queue_restarts;
+		} while (u64_stats_fetch_retry_irq(&self->stats.tx.syncp, start));
+	}
+
+	return ++count;
 }

--
Gitblit v1.6.2