hc
2024-01-05 071106ecf68c401173c58808b1cf5f68cc50d390
kernel/drivers/net/ethernet/nvidia/forcedeth.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /*
23 * forcedeth: Ethernet driver for NVIDIA nForce media access controllers.
34 *
....@@ -14,19 +15,6 @@
1415 * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane
1516 * IRQ rate fixes, bigendian fixes, cleanups, verification)
1617 * Copyright (c) 2004,2005,2006,2007,2008,2009 NVIDIA Corporation
17
- *
18
- * This program is free software; you can redistribute it and/or modify
19
- * it under the terms of the GNU General Public License as published by
20
- * the Free Software Foundation; either version 2 of the License, or
21
- * (at your option) any later version.
22
- *
23
- * This program is distributed in the hope that it will be useful,
24
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
25
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26
- * GNU General Public License for more details.
27
- *
28
- * You should have received a copy of the GNU General Public License
29
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
3018 *
3119 * Known bugs:
3220 * We suspect that on some hardware no TX done interrupts are generated.
....@@ -725,6 +713,21 @@
725713 struct nv_skb_map *next_tx_ctx;
726714 };
727715
716
+struct nv_txrx_stats {
717
+ u64 stat_rx_packets;
718
+ u64 stat_rx_bytes; /* not always available in HW */
719
+ u64 stat_rx_missed_errors;
720
+ u64 stat_rx_dropped;
721
+ u64 stat_tx_packets; /* not always available in HW */
722
+ u64 stat_tx_bytes;
723
+ u64 stat_tx_dropped;
724
+};
725
+
726
+#define nv_txrx_stats_inc(member) \
727
+ __this_cpu_inc(np->txrx_stats->member)
728
+#define nv_txrx_stats_add(member, count) \
729
+ __this_cpu_add(np->txrx_stats->member, (count))
730
+
728731 /*
729732 * SMP locking:
730733 * All hardware access under netdev_priv(dev)->lock, except the performance
....@@ -809,10 +812,7 @@
809812
810813 /* RX software stats */
811814 struct u64_stats_sync swstats_rx_syncp;
812
- u64 stat_rx_packets;
813
- u64 stat_rx_bytes; /* not always available in HW */
814
- u64 stat_rx_missed_errors;
815
- u64 stat_rx_dropped;
815
+ struct nv_txrx_stats __percpu *txrx_stats;
816816
817817 /* media detection workaround.
818818 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
....@@ -838,9 +838,6 @@
838838
839839 /* TX software stats */
840840 struct u64_stats_sync swstats_tx_syncp;
841
- u64 stat_tx_packets; /* not always available in HW */
842
- u64 stat_tx_bytes;
843
- u64 stat_tx_dropped;
844841
845842 /* msi/msi-x fields */
846843 u32 msi_flags;
....@@ -1733,6 +1730,39 @@
17331730 }
17341731 }
17351732
1733
+static void nv_get_stats(int cpu, struct fe_priv *np,
1734
+ struct rtnl_link_stats64 *storage)
1735
+{
1736
+ struct nv_txrx_stats *src = per_cpu_ptr(np->txrx_stats, cpu);
1737
+ unsigned int syncp_start;
1738
+ u64 rx_packets, rx_bytes, rx_dropped, rx_missed_errors;
1739
+ u64 tx_packets, tx_bytes, tx_dropped;
1740
+
1741
+ do {
1742
+ syncp_start = u64_stats_fetch_begin_irq(&np->swstats_rx_syncp);
1743
+ rx_packets = src->stat_rx_packets;
1744
+ rx_bytes = src->stat_rx_bytes;
1745
+ rx_dropped = src->stat_rx_dropped;
1746
+ rx_missed_errors = src->stat_rx_missed_errors;
1747
+ } while (u64_stats_fetch_retry_irq(&np->swstats_rx_syncp, syncp_start));
1748
+
1749
+ storage->rx_packets += rx_packets;
1750
+ storage->rx_bytes += rx_bytes;
1751
+ storage->rx_dropped += rx_dropped;
1752
+ storage->rx_missed_errors += rx_missed_errors;
1753
+
1754
+ do {
1755
+ syncp_start = u64_stats_fetch_begin_irq(&np->swstats_tx_syncp);
1756
+ tx_packets = src->stat_tx_packets;
1757
+ tx_bytes = src->stat_tx_bytes;
1758
+ tx_dropped = src->stat_tx_dropped;
1759
+ } while (u64_stats_fetch_retry_irq(&np->swstats_tx_syncp, syncp_start));
1760
+
1761
+ storage->tx_packets += tx_packets;
1762
+ storage->tx_bytes += tx_bytes;
1763
+ storage->tx_dropped += tx_dropped;
1764
+}
1765
+
17361766 /*
17371767 * nv_get_stats64: dev->ndo_get_stats64 function
17381768 * Get latest stats value from the nic.
....@@ -1745,7 +1775,7 @@
17451775 __releases(&netdev_priv(dev)->hwstats_lock)
17461776 {
17471777 struct fe_priv *np = netdev_priv(dev);
1748
- unsigned int syncp_start;
1778
+ int cpu;
17491779
17501780 /*
17511781 * Note: because HW stats are not always available and for
....@@ -1758,20 +1788,8 @@
17581788 */
17591789
17601790 /* software stats */
1761
- do {
1762
- syncp_start = u64_stats_fetch_begin_irq(&np->swstats_rx_syncp);
1763
- storage->rx_packets = np->stat_rx_packets;
1764
- storage->rx_bytes = np->stat_rx_bytes;
1765
- storage->rx_dropped = np->stat_rx_dropped;
1766
- storage->rx_missed_errors = np->stat_rx_missed_errors;
1767
- } while (u64_stats_fetch_retry_irq(&np->swstats_rx_syncp, syncp_start));
1768
-
1769
- do {
1770
- syncp_start = u64_stats_fetch_begin_irq(&np->swstats_tx_syncp);
1771
- storage->tx_packets = np->stat_tx_packets;
1772
- storage->tx_bytes = np->stat_tx_bytes;
1773
- storage->tx_dropped = np->stat_tx_dropped;
1774
- } while (u64_stats_fetch_retry_irq(&np->swstats_tx_syncp, syncp_start));
1791
+ for_each_online_cpu(cpu)
1792
+ nv_get_stats(cpu, np, storage);
17751793
17761794 /* If the nic supports hw counters then retrieve latest values */
17771795 if (np->driver_data & DEV_HAS_STATISTICS_V123) {
....@@ -1839,7 +1857,7 @@
18391857 } else {
18401858 packet_dropped:
18411859 u64_stats_update_begin(&np->swstats_rx_syncp);
1842
- np->stat_rx_dropped++;
1860
+ nv_txrx_stats_inc(stat_rx_dropped);
18431861 u64_stats_update_end(&np->swstats_rx_syncp);
18441862 return 1;
18451863 }
....@@ -1881,7 +1899,7 @@
18811899 } else {
18821900 packet_dropped:
18831901 u64_stats_update_begin(&np->swstats_rx_syncp);
1884
- np->stat_rx_dropped++;
1902
+ nv_txrx_stats_inc(stat_rx_dropped);
18851903 u64_stats_update_end(&np->swstats_rx_syncp);
18861904 return 1;
18871905 }
....@@ -2025,7 +2043,7 @@
20252043 }
20262044 if (nv_release_txskb(np, &np->tx_skb[i])) {
20272045 u64_stats_update_begin(&np->swstats_tx_syncp);
2028
- np->stat_tx_dropped++;
2046
+ nv_txrx_stats_inc(stat_tx_dropped);
20292047 u64_stats_update_end(&np->swstats_tx_syncp);
20302048 }
20312049 np->tx_skb[i].dma = 0;
....@@ -2207,6 +2225,7 @@
22072225 struct nv_skb_map *prev_tx_ctx;
22082226 struct nv_skb_map *tmp_tx_ctx = NULL, *start_tx_ctx = NULL;
22092227 unsigned long flags;
2228
+ netdev_tx_t ret = NETDEV_TX_OK;
22102229
22112230 /* add fragments to entries count */
22122231 for (i = 0; i < fragments; i++) {
....@@ -2222,7 +2241,12 @@
22222241 netif_stop_queue(dev);
22232242 np->tx_stop = 1;
22242243 spin_unlock_irqrestore(&np->lock, flags);
2225
- return NETDEV_TX_BUSY;
2244
+
2245
+ /* When normal packets and/or xmit_more packets fill up
2246
+ * tx_desc, it is necessary to trigger NIC tx reg.
2247
+ */
2248
+ ret = NETDEV_TX_BUSY;
2249
+ goto txkick;
22262250 }
22272251 spin_unlock_irqrestore(&np->lock, flags);
22282252
....@@ -2239,9 +2263,12 @@
22392263 /* on DMA mapping error - drop the packet */
22402264 dev_kfree_skb_any(skb);
22412265 u64_stats_update_begin(&np->swstats_tx_syncp);
2242
- np->stat_tx_dropped++;
2266
+ nv_txrx_stats_inc(stat_tx_dropped);
22432267 u64_stats_update_end(&np->swstats_tx_syncp);
2244
- return NETDEV_TX_OK;
2268
+
2269
+ ret = NETDEV_TX_OK;
2270
+
2271
+ goto dma_error;
22452272 }
22462273 np->put_tx_ctx->dma_len = bcnt;
22472274 np->put_tx_ctx->dma_single = 1;
....@@ -2285,9 +2312,12 @@
22852312 dev_kfree_skb_any(skb);
22862313 np->put_tx_ctx = start_tx_ctx;
22872314 u64_stats_update_begin(&np->swstats_tx_syncp);
2288
- np->stat_tx_dropped++;
2315
+ nv_txrx_stats_inc(stat_tx_dropped);
22892316 u64_stats_update_end(&np->swstats_tx_syncp);
2290
- return NETDEV_TX_OK;
2317
+
2318
+ ret = NETDEV_TX_OK;
2319
+
2320
+ goto dma_error;
22912321 }
22922322
22932323 np->put_tx_ctx->dma_len = bcnt;
....@@ -2339,8 +2369,15 @@
23392369
23402370 spin_unlock_irqrestore(&np->lock, flags);
23412371
2342
- writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2343
- return NETDEV_TX_OK;
2372
+txkick:
2373
+ if (netif_queue_stopped(dev) || !netdev_xmit_more()) {
2374
+ u32 txrxctl_kick;
2375
+dma_error:
2376
+ txrxctl_kick = NVREG_TXRXCTL_KICK | np->txrxctl_bits;
2377
+ writel(txrxctl_kick, get_hwbase(dev) + NvRegTxRxControl);
2378
+ }
2379
+
2380
+ return ret;
23442381 }
23452382
23462383 static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
....@@ -2363,6 +2400,7 @@
23632400 struct nv_skb_map *start_tx_ctx = NULL;
23642401 struct nv_skb_map *tmp_tx_ctx = NULL;
23652402 unsigned long flags;
2403
+ netdev_tx_t ret = NETDEV_TX_OK;
23662404
23672405 /* add fragments to entries count */
23682406 for (i = 0; i < fragments; i++) {
....@@ -2378,7 +2416,13 @@
23782416 netif_stop_queue(dev);
23792417 np->tx_stop = 1;
23802418 spin_unlock_irqrestore(&np->lock, flags);
2381
- return NETDEV_TX_BUSY;
2419
+
2420
+ /* When normal packets and/or xmit_more packets fill up
2421
+ * tx_desc, it is necessary to trigger NIC tx reg.
2422
+ */
2423
+ ret = NETDEV_TX_BUSY;
2424
+
2425
+ goto txkick;
23822426 }
23832427 spin_unlock_irqrestore(&np->lock, flags);
23842428
....@@ -2396,9 +2440,12 @@
23962440 /* on DMA mapping error - drop the packet */
23972441 dev_kfree_skb_any(skb);
23982442 u64_stats_update_begin(&np->swstats_tx_syncp);
2399
- np->stat_tx_dropped++;
2443
+ nv_txrx_stats_inc(stat_tx_dropped);
24002444 u64_stats_update_end(&np->swstats_tx_syncp);
2401
- return NETDEV_TX_OK;
2445
+
2446
+ ret = NETDEV_TX_OK;
2447
+
2448
+ goto dma_error;
24022449 }
24032450 np->put_tx_ctx->dma_len = bcnt;
24042451 np->put_tx_ctx->dma_single = 1;
....@@ -2443,9 +2490,12 @@
24432490 dev_kfree_skb_any(skb);
24442491 np->put_tx_ctx = start_tx_ctx;
24452492 u64_stats_update_begin(&np->swstats_tx_syncp);
2446
- np->stat_tx_dropped++;
2493
+ nv_txrx_stats_inc(stat_tx_dropped);
24472494 u64_stats_update_end(&np->swstats_tx_syncp);
2448
- return NETDEV_TX_OK;
2495
+
2496
+ ret = NETDEV_TX_OK;
2497
+
2498
+ goto dma_error;
24492499 }
24502500 np->put_tx_ctx->dma_len = bcnt;
24512501 np->put_tx_ctx->dma_single = 0;
....@@ -2524,8 +2574,15 @@
25242574
25252575 spin_unlock_irqrestore(&np->lock, flags);
25262576
2527
- writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2528
- return NETDEV_TX_OK;
2577
+txkick:
2578
+ if (netif_queue_stopped(dev) || !netdev_xmit_more()) {
2579
+ u32 txrxctl_kick;
2580
+dma_error:
2581
+ txrxctl_kick = NVREG_TXRXCTL_KICK | np->txrxctl_bits;
2582
+ writel(txrxctl_kick, get_hwbase(dev) + NvRegTxRxControl);
2583
+ }
2584
+
2585
+ return ret;
25292586 }
25302587
25312588 static inline void nv_tx_flip_ownership(struct net_device *dev)
....@@ -2572,9 +2629,12 @@
25722629 && !(flags & NV_TX_RETRYCOUNT_MASK))
25732630 nv_legacybackoff_reseed(dev);
25742631 } else {
2632
+ unsigned int len;
2633
+
25752634 u64_stats_update_begin(&np->swstats_tx_syncp);
2576
- np->stat_tx_packets++;
2577
- np->stat_tx_bytes += np->get_tx_ctx->skb->len;
2635
+ nv_txrx_stats_inc(stat_tx_packets);
2636
+ len = np->get_tx_ctx->skb->len;
2637
+ nv_txrx_stats_add(stat_tx_bytes, len);
25782638 u64_stats_update_end(&np->swstats_tx_syncp);
25792639 }
25802640 bytes_compl += np->get_tx_ctx->skb->len;
....@@ -2589,9 +2649,12 @@
25892649 && !(flags & NV_TX2_RETRYCOUNT_MASK))
25902650 nv_legacybackoff_reseed(dev);
25912651 } else {
2652
+ unsigned int len;
2653
+
25922654 u64_stats_update_begin(&np->swstats_tx_syncp);
2593
- np->stat_tx_packets++;
2594
- np->stat_tx_bytes += np->get_tx_ctx->skb->len;
2655
+ nv_txrx_stats_inc(stat_tx_packets);
2656
+ len = np->get_tx_ctx->skb->len;
2657
+ nv_txrx_stats_add(stat_tx_bytes, len);
25952658 u64_stats_update_end(&np->swstats_tx_syncp);
25962659 }
25972660 bytes_compl += np->get_tx_ctx->skb->len;
....@@ -2639,9 +2702,12 @@
26392702 nv_legacybackoff_reseed(dev);
26402703 }
26412704 } else {
2705
+ unsigned int len;
2706
+
26422707 u64_stats_update_begin(&np->swstats_tx_syncp);
2643
- np->stat_tx_packets++;
2644
- np->stat_tx_bytes += np->get_tx_ctx->skb->len;
2708
+ nv_txrx_stats_inc(stat_tx_packets);
2709
+ len = np->get_tx_ctx->skb->len;
2710
+ nv_txrx_stats_add(stat_tx_bytes, len);
26452711 u64_stats_update_end(&np->swstats_tx_syncp);
26462712 }
26472713
....@@ -2673,7 +2739,7 @@
26732739 * nv_tx_timeout: dev->tx_timeout function
26742740 * Called with netif_tx_lock held.
26752741 */
2676
-static void nv_tx_timeout(struct net_device *dev)
2742
+static void nv_tx_timeout(struct net_device *dev, unsigned int txqueue)
26772743 {
26782744 struct fe_priv *np = netdev_priv(dev);
26792745 u8 __iomem *base = get_hwbase(dev);
....@@ -2818,6 +2884,15 @@
28182884 }
28192885 }
28202886
2887
+static void rx_missing_handler(u32 flags, struct fe_priv *np)
2888
+{
2889
+ if (flags & NV_RX_MISSEDFRAME) {
2890
+ u64_stats_update_begin(&np->swstats_rx_syncp);
2891
+ nv_txrx_stats_inc(stat_rx_missed_errors);
2892
+ u64_stats_update_end(&np->swstats_rx_syncp);
2893
+ }
2894
+}
2895
+
28212896 static int nv_rx_process(struct net_device *dev, int limit)
28222897 {
28232898 struct fe_priv *np = netdev_priv(dev);
....@@ -2860,11 +2935,7 @@
28602935 }
28612936 /* the rest are hard errors */
28622937 else {
2863
- if (flags & NV_RX_MISSEDFRAME) {
2864
- u64_stats_update_begin(&np->swstats_rx_syncp);
2865
- np->stat_rx_missed_errors++;
2866
- u64_stats_update_end(&np->swstats_rx_syncp);
2867
- }
2938
+ rx_missing_handler(flags, np);
28682939 dev_kfree_skb(skb);
28692940 goto next_pkt;
28702941 }
....@@ -2908,8 +2979,8 @@
29082979 skb->protocol = eth_type_trans(skb, dev);
29092980 napi_gro_receive(&np->napi, skb);
29102981 u64_stats_update_begin(&np->swstats_rx_syncp);
2911
- np->stat_rx_packets++;
2912
- np->stat_rx_bytes += len;
2982
+ nv_txrx_stats_inc(stat_rx_packets);
2983
+ nv_txrx_stats_add(stat_rx_bytes, len);
29132984 u64_stats_update_end(&np->swstats_rx_syncp);
29142985 next_pkt:
29152986 if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
....@@ -2994,8 +3065,8 @@
29943065 }
29953066 napi_gro_receive(&np->napi, skb);
29963067 u64_stats_update_begin(&np->swstats_rx_syncp);
2997
- np->stat_rx_packets++;
2998
- np->stat_rx_bytes += len;
3068
+ nv_txrx_stats_inc(stat_rx_packets);
3069
+ nv_txrx_stats_add(stat_rx_bytes, len);
29993070 u64_stats_update_end(&np->swstats_rx_syncp);
30003071 } else {
30013072 dev_kfree_skb(skb);
....@@ -5663,6 +5734,12 @@
56635734 SET_NETDEV_DEV(dev, &pci_dev->dev);
56645735 u64_stats_init(&np->swstats_rx_syncp);
56655736 u64_stats_init(&np->swstats_tx_syncp);
5737
+ np->txrx_stats = alloc_percpu(struct nv_txrx_stats);
5738
+ if (!np->txrx_stats) {
5739
+ pr_err("np->txrx_stats, alloc memory error.\n");
5740
+ err = -ENOMEM;
5741
+ goto out_alloc_percpu;
5742
+ }
56665743
56675744 timer_setup(&np->oom_kick, nv_do_rx_refill, 0);
56685745 timer_setup(&np->nic_poll, nv_do_nic_poll, 0);
....@@ -6061,6 +6138,7 @@
60616138 return 0;
60626139
60636140 out_error:
6141
+ nv_mgmt_release_sema(dev);
60646142 if (phystate_orig)
60656143 writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl);
60666144 out_freering:
....@@ -6072,6 +6150,8 @@
60726150 out_disable:
60736151 pci_disable_device(pci_dev);
60746152 out_free:
6153
+ free_percpu(np->txrx_stats);
6154
+out_alloc_percpu:
60756155 free_netdev(dev);
60766156 out:
60776157 return err;
....@@ -6117,6 +6197,9 @@
61176197 static void nv_remove(struct pci_dev *pci_dev)
61186198 {
61196199 struct net_device *dev = pci_get_drvdata(pci_dev);
6200
+ struct fe_priv *np = netdev_priv(dev);
6201
+
6202
+ free_percpu(np->txrx_stats);
61206203
61216204 unregister_netdev(dev);
61226205
....@@ -6138,8 +6221,7 @@
61386221 #ifdef CONFIG_PM_SLEEP
61396222 static int nv_suspend(struct device *device)
61406223 {
6141
- struct pci_dev *pdev = to_pci_dev(device);
6142
- struct net_device *dev = pci_get_drvdata(pdev);
6224
+ struct net_device *dev = dev_get_drvdata(device);
61436225 struct fe_priv *np = netdev_priv(dev);
61446226 u8 __iomem *base = get_hwbase(dev);
61456227 int i;