From 9999e48639b3cecb08ffb37358bcba3b48161b29 Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Fri, 10 May 2024 08:50:17 +0000 Subject: [PATCH] add ax88772_rst --- kernel/drivers/net/ethernet/nvidia/forcedeth.c | 222 ++++++++++++++++++++++++++++++++++++++----------------- 1 files changed, 152 insertions(+), 70 deletions(-) diff --git a/kernel/drivers/net/ethernet/nvidia/forcedeth.c b/kernel/drivers/net/ethernet/nvidia/forcedeth.c index 1d9b0d4..e14dd10 100644 --- a/kernel/drivers/net/ethernet/nvidia/forcedeth.c +++ b/kernel/drivers/net/ethernet/nvidia/forcedeth.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * forcedeth: Ethernet driver for NVIDIA nForce media access controllers. * @@ -14,19 +15,6 @@ * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane * IRQ rate fixes, bigendian fixes, cleanups, verification) * Copyright (c) 2004,2005,2006,2007,2008,2009 NVIDIA Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see <http://www.gnu.org/licenses/>. * * Known bugs: * We suspect that on some hardware no TX done interrupts are generated. @@ -725,6 +713,21 @@ struct nv_skb_map *next_tx_ctx; }; +struct nv_txrx_stats { + u64 stat_rx_packets; + u64 stat_rx_bytes; /* not always available in HW */ + u64 stat_rx_missed_errors; + u64 stat_rx_dropped; + u64 stat_tx_packets; /* not always available in HW */ + u64 stat_tx_bytes; + u64 stat_tx_dropped; +}; + +#define nv_txrx_stats_inc(member) \ + __this_cpu_inc(np->txrx_stats->member) +#define nv_txrx_stats_add(member, count) \ + __this_cpu_add(np->txrx_stats->member, (count)) + /* * SMP locking: * All hardware access under netdev_priv(dev)->lock, except the performance @@ -809,10 +812,7 @@ /* RX software stats */ struct u64_stats_sync swstats_rx_syncp; - u64 stat_rx_packets; - u64 stat_rx_bytes; /* not always available in HW */ - u64 stat_rx_missed_errors; - u64 stat_rx_dropped; + struct nv_txrx_stats __percpu *txrx_stats; /* media detection workaround. * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); @@ -838,9 +838,6 @@ /* TX software stats */ struct u64_stats_sync swstats_tx_syncp; - u64 stat_tx_packets; /* not always available in HW */ - u64 stat_tx_bytes; - u64 stat_tx_dropped; /* msi/msi-x fields */ u32 msi_flags; @@ -1733,6 +1730,39 @@ } } +static void nv_get_stats(int cpu, struct fe_priv *np, + struct rtnl_link_stats64 *storage) +{ + struct nv_txrx_stats *src = per_cpu_ptr(np->txrx_stats, cpu); + unsigned int syncp_start; + u64 rx_packets, rx_bytes, rx_dropped, rx_missed_errors; + u64 tx_packets, tx_bytes, tx_dropped; + + do { + syncp_start = u64_stats_fetch_begin_irq(&np->swstats_rx_syncp); + rx_packets = src->stat_rx_packets; + rx_bytes = src->stat_rx_bytes; + rx_dropped = src->stat_rx_dropped; + rx_missed_errors = src->stat_rx_missed_errors; + } while (u64_stats_fetch_retry_irq(&np->swstats_rx_syncp, syncp_start)); + + storage->rx_packets += rx_packets; + storage->rx_bytes += rx_bytes; + storage->rx_dropped += rx_dropped; + storage->rx_missed_errors += rx_missed_errors; + + do { + syncp_start = u64_stats_fetch_begin_irq(&np->swstats_tx_syncp); + tx_packets = src->stat_tx_packets; + tx_bytes = src->stat_tx_bytes; + tx_dropped = src->stat_tx_dropped; + } while (u64_stats_fetch_retry_irq(&np->swstats_tx_syncp, syncp_start)); + + storage->tx_packets += tx_packets; + storage->tx_bytes += tx_bytes; + storage->tx_dropped += tx_dropped; +} + /* * nv_get_stats64: dev->ndo_get_stats64 function * Get latest stats value from the nic. @@ -1745,7 +1775,7 @@ __releases(&netdev_priv(dev)->hwstats_lock) { struct fe_priv *np = netdev_priv(dev); - unsigned int syncp_start; + int cpu; /* * Note: because HW stats are not always available and for @@ -1758,20 +1788,8 @@ */ /* software stats */ - do { - syncp_start = u64_stats_fetch_begin_irq(&np->swstats_rx_syncp); - storage->rx_packets = np->stat_rx_packets; - storage->rx_bytes = np->stat_rx_bytes; - storage->rx_dropped = np->stat_rx_dropped; - storage->rx_missed_errors = np->stat_rx_missed_errors; - } while (u64_stats_fetch_retry_irq(&np->swstats_rx_syncp, syncp_start)); - - do { - syncp_start = u64_stats_fetch_begin_irq(&np->swstats_tx_syncp); - storage->tx_packets = np->stat_tx_packets; - storage->tx_bytes = np->stat_tx_bytes; - storage->tx_dropped = np->stat_tx_dropped; - } while (u64_stats_fetch_retry_irq(&np->swstats_tx_syncp, syncp_start)); + for_each_online_cpu(cpu) + nv_get_stats(cpu, np, storage); /* If the nic supports hw counters then retrieve latest values */ if (np->driver_data & DEV_HAS_STATISTICS_V123) { @@ -1839,7 +1857,7 @@ } else { packet_dropped: u64_stats_update_begin(&np->swstats_rx_syncp); - np->stat_rx_dropped++; + nv_txrx_stats_inc(stat_rx_dropped); u64_stats_update_end(&np->swstats_rx_syncp); return 1; } @@ -1881,7 +1899,7 @@ } else { packet_dropped: u64_stats_update_begin(&np->swstats_rx_syncp); - np->stat_rx_dropped++; + nv_txrx_stats_inc(stat_rx_dropped); u64_stats_update_end(&np->swstats_rx_syncp); return 1; } @@ -2025,7 +2043,7 @@ } if (nv_release_txskb(np, &np->tx_skb[i])) { u64_stats_update_begin(&np->swstats_tx_syncp); - np->stat_tx_dropped++; + nv_txrx_stats_inc(stat_tx_dropped); u64_stats_update_end(&np->swstats_tx_syncp); } np->tx_skb[i].dma = 0; @@ -2207,6 +2225,7 @@ struct nv_skb_map *prev_tx_ctx; struct nv_skb_map *tmp_tx_ctx = NULL, *start_tx_ctx = NULL; unsigned long flags; + netdev_tx_t ret = NETDEV_TX_OK; /* add fragments to entries count */ for (i = 0; i < fragments; i++) { @@ -2222,7 +2241,12 @@ netif_stop_queue(dev); np->tx_stop = 1; spin_unlock_irqrestore(&np->lock, flags); - return NETDEV_TX_BUSY; + + /* When normal packets and/or xmit_more packets fill up + * tx_desc, it is necessary to trigger NIC tx reg. + */ + ret = NETDEV_TX_BUSY; + goto txkick; } spin_unlock_irqrestore(&np->lock, flags); @@ -2239,9 +2263,12 @@ /* on DMA mapping error - drop the packet */ dev_kfree_skb_any(skb); u64_stats_update_begin(&np->swstats_tx_syncp); - np->stat_tx_dropped++; + nv_txrx_stats_inc(stat_tx_dropped); u64_stats_update_end(&np->swstats_tx_syncp); - return NETDEV_TX_OK; + + ret = NETDEV_TX_OK; + + goto dma_error; } np->put_tx_ctx->dma_len = bcnt; np->put_tx_ctx->dma_single = 1; @@ -2285,9 +2312,12 @@ dev_kfree_skb_any(skb); np->put_tx_ctx = start_tx_ctx; u64_stats_update_begin(&np->swstats_tx_syncp); - np->stat_tx_dropped++; + nv_txrx_stats_inc(stat_tx_dropped); u64_stats_update_end(&np->swstats_tx_syncp); - return NETDEV_TX_OK; + + ret = NETDEV_TX_OK; + + goto dma_error; } np->put_tx_ctx->dma_len = bcnt; @@ -2339,8 +2369,15 @@ spin_unlock_irqrestore(&np->lock, flags); - writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); - return NETDEV_TX_OK; +txkick: + if (netif_queue_stopped(dev) || !netdev_xmit_more()) { + u32 txrxctl_kick; +dma_error: + txrxctl_kick = NVREG_TXRXCTL_KICK | np->txrxctl_bits; + writel(txrxctl_kick, get_hwbase(dev) + NvRegTxRxControl); + } + + return ret; } static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, @@ -2363,6 +2400,7 @@ struct nv_skb_map *start_tx_ctx = NULL; struct nv_skb_map *tmp_tx_ctx = NULL; unsigned long flags; + netdev_tx_t ret = NETDEV_TX_OK; /* add fragments to entries count */ for (i = 0; i < fragments; i++) { @@ -2378,7 +2416,13 @@ netif_stop_queue(dev); np->tx_stop = 1; spin_unlock_irqrestore(&np->lock, flags); - return NETDEV_TX_BUSY; + + /* When normal packets and/or xmit_more packets fill up + * tx_desc, it is necessary to trigger NIC tx reg. + */ + ret = NETDEV_TX_BUSY; + + goto txkick; } spin_unlock_irqrestore(&np->lock, flags); @@ -2396,9 +2440,12 @@ /* on DMA mapping error - drop the packet */ dev_kfree_skb_any(skb); u64_stats_update_begin(&np->swstats_tx_syncp); - np->stat_tx_dropped++; + nv_txrx_stats_inc(stat_tx_dropped); u64_stats_update_end(&np->swstats_tx_syncp); - return NETDEV_TX_OK; + + ret = NETDEV_TX_OK; + + goto dma_error; } np->put_tx_ctx->dma_len = bcnt; np->put_tx_ctx->dma_single = 1; @@ -2443,9 +2490,12 @@ dev_kfree_skb_any(skb); np->put_tx_ctx = start_tx_ctx; u64_stats_update_begin(&np->swstats_tx_syncp); - np->stat_tx_dropped++; + nv_txrx_stats_inc(stat_tx_dropped); u64_stats_update_end(&np->swstats_tx_syncp); - return NETDEV_TX_OK; + + ret = NETDEV_TX_OK; + + goto dma_error; } np->put_tx_ctx->dma_len = bcnt; np->put_tx_ctx->dma_single = 0; @@ -2524,8 +2574,15 @@ spin_unlock_irqrestore(&np->lock, flags); - writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); - return NETDEV_TX_OK; +txkick: + if (netif_queue_stopped(dev) || !netdev_xmit_more()) { + u32 txrxctl_kick; +dma_error: + txrxctl_kick = NVREG_TXRXCTL_KICK | np->txrxctl_bits; + writel(txrxctl_kick, get_hwbase(dev) + NvRegTxRxControl); + } + + return ret; } static inline void nv_tx_flip_ownership(struct net_device *dev) @@ -2572,9 +2629,12 @@ && !(flags & NV_TX_RETRYCOUNT_MASK)) nv_legacybackoff_reseed(dev); } else { + unsigned int len; + u64_stats_update_begin(&np->swstats_tx_syncp); - np->stat_tx_packets++; - np->stat_tx_bytes += np->get_tx_ctx->skb->len; + nv_txrx_stats_inc(stat_tx_packets); + len = np->get_tx_ctx->skb->len; + nv_txrx_stats_add(stat_tx_bytes, len); u64_stats_update_end(&np->swstats_tx_syncp); } bytes_compl += np->get_tx_ctx->skb->len; @@ -2589,9 +2649,12 @@ && !(flags & NV_TX2_RETRYCOUNT_MASK)) nv_legacybackoff_reseed(dev); } else { + unsigned int len; + u64_stats_update_begin(&np->swstats_tx_syncp); - np->stat_tx_packets++; - np->stat_tx_bytes += np->get_tx_ctx->skb->len; + nv_txrx_stats_inc(stat_tx_packets); + len = np->get_tx_ctx->skb->len; + nv_txrx_stats_add(stat_tx_bytes, len); u64_stats_update_end(&np->swstats_tx_syncp); } bytes_compl += np->get_tx_ctx->skb->len; @@ -2639,9 +2702,12 @@ nv_legacybackoff_reseed(dev); } } else { + unsigned int len; + u64_stats_update_begin(&np->swstats_tx_syncp); - np->stat_tx_packets++; - np->stat_tx_bytes += np->get_tx_ctx->skb->len; + nv_txrx_stats_inc(stat_tx_packets); + len = np->get_tx_ctx->skb->len; + nv_txrx_stats_add(stat_tx_bytes, len); u64_stats_update_end(&np->swstats_tx_syncp); } @@ -2673,7 +2739,7 @@ * nv_tx_timeout: dev->tx_timeout function * Called with netif_tx_lock held. */ -static void nv_tx_timeout(struct net_device *dev) +static void nv_tx_timeout(struct net_device *dev, unsigned int txqueue) { struct fe_priv *np = netdev_priv(dev); u8 __iomem *base = get_hwbase(dev); @@ -2818,6 +2884,15 @@ } } +static void rx_missing_handler(u32 flags, struct fe_priv *np) +{ + if (flags & NV_RX_MISSEDFRAME) { + u64_stats_update_begin(&np->swstats_rx_syncp); + nv_txrx_stats_inc(stat_rx_missed_errors); + u64_stats_update_end(&np->swstats_rx_syncp); + } +} + static int nv_rx_process(struct net_device *dev, int limit) { struct fe_priv *np = netdev_priv(dev); @@ -2860,11 +2935,7 @@ } /* the rest are hard errors */ else { - if (flags & NV_RX_MISSEDFRAME) { - u64_stats_update_begin(&np->swstats_rx_syncp); - np->stat_rx_missed_errors++; - u64_stats_update_end(&np->swstats_rx_syncp); - } + rx_missing_handler(flags, np); dev_kfree_skb(skb); goto next_pkt; } @@ -2908,8 +2979,8 @@ skb->protocol = eth_type_trans(skb, dev); napi_gro_receive(&np->napi, skb); u64_stats_update_begin(&np->swstats_rx_syncp); - np->stat_rx_packets++; - np->stat_rx_bytes += len; + nv_txrx_stats_inc(stat_rx_packets); + nv_txrx_stats_add(stat_rx_bytes, len); u64_stats_update_end(&np->swstats_rx_syncp); next_pkt: if (unlikely(np->get_rx.orig++ == np->last_rx.orig)) @@ -2994,8 +3065,8 @@ } napi_gro_receive(&np->napi, skb); u64_stats_update_begin(&np->swstats_rx_syncp); - np->stat_rx_packets++; - np->stat_rx_bytes += len; + nv_txrx_stats_inc(stat_rx_packets); + nv_txrx_stats_add(stat_rx_bytes, len); u64_stats_update_end(&np->swstats_rx_syncp); } else { dev_kfree_skb(skb); @@ -5663,6 +5734,12 @@ SET_NETDEV_DEV(dev, &pci_dev->dev); u64_stats_init(&np->swstats_rx_syncp); u64_stats_init(&np->swstats_tx_syncp); + np->txrx_stats = alloc_percpu(struct nv_txrx_stats); + if (!np->txrx_stats) { + pr_err("np->txrx_stats, alloc memory error.\n"); + err = -ENOMEM; + goto out_alloc_percpu; + } timer_setup(&np->oom_kick, nv_do_rx_refill, 0); timer_setup(&np->nic_poll, nv_do_nic_poll, 0); @@ -6061,6 +6138,7 @@ return 0; out_error: + nv_mgmt_release_sema(dev); if (phystate_orig) writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl); out_freering: @@ -6072,6 +6150,8 @@ out_disable: pci_disable_device(pci_dev); out_free: + free_percpu(np->txrx_stats); +out_alloc_percpu: free_netdev(dev); out: return err; @@ -6117,6 +6197,9 @@ static void nv_remove(struct pci_dev *pci_dev) { struct net_device *dev = pci_get_drvdata(pci_dev); + struct fe_priv *np = netdev_priv(dev); + + free_percpu(np->txrx_stats); unregister_netdev(dev); @@ -6138,8 +6221,7 @@ #ifdef CONFIG_PM_SLEEP static int nv_suspend(struct device *device) { - struct pci_dev *pdev = to_pci_dev(device); - struct net_device *dev = pci_get_drvdata(pdev); + struct net_device *dev = dev_get_drvdata(device); struct fe_priv *np = netdev_priv(dev); u8 __iomem *base = get_hwbase(dev); int i; -- Gitblit v1.6.2