.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-or-later |
---|
1 | 2 | /* |
---|
2 | 3 | * forcedeth: Ethernet driver for NVIDIA nForce media access controllers. |
---|
3 | 4 | * |
---|
.. | .. |
---|
14 | 15 | * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane |
---|
15 | 16 | * IRQ rate fixes, bigendian fixes, cleanups, verification) |
---|
16 | 17 | * Copyright (c) 2004,2005,2006,2007,2008,2009 NVIDIA Corporation |
---|
17 | | - * |
---|
18 | | - * This program is free software; you can redistribute it and/or modify |
---|
19 | | - * it under the terms of the GNU General Public License as published by |
---|
20 | | - * the Free Software Foundation; either version 2 of the License, or |
---|
21 | | - * (at your option) any later version. |
---|
22 | | - * |
---|
23 | | - * This program is distributed in the hope that it will be useful, |
---|
24 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
25 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
---|
26 | | - * GNU General Public License for more details. |
---|
27 | | - * |
---|
28 | | - * You should have received a copy of the GNU General Public License |
---|
29 | | - * along with this program; if not, see <http://www.gnu.org/licenses/>. |
---|
30 | 18 | * |
---|
31 | 19 | * Known bugs: |
---|
32 | 20 | * We suspect that on some hardware no TX done interrupts are generated. |
---|
.. | .. |
---|
725 | 713 | struct nv_skb_map *next_tx_ctx; |
---|
726 | 714 | }; |
---|
727 | 715 | |
---|
| 716 | +struct nv_txrx_stats { |
---|
| 717 | + u64 stat_rx_packets; |
---|
| 718 | + u64 stat_rx_bytes; /* not always available in HW */ |
---|
| 719 | + u64 stat_rx_missed_errors; |
---|
| 720 | + u64 stat_rx_dropped; |
---|
| 721 | + u64 stat_tx_packets; /* not always available in HW */ |
---|
| 722 | + u64 stat_tx_bytes; |
---|
| 723 | + u64 stat_tx_dropped; |
---|
| 724 | +}; |
---|
| 725 | + |
---|
| 726 | +#define nv_txrx_stats_inc(member) \ |
---|
| 727 | + __this_cpu_inc(np->txrx_stats->member) |
---|
| 728 | +#define nv_txrx_stats_add(member, count) \ |
---|
| 729 | + __this_cpu_add(np->txrx_stats->member, (count)) |
---|
| 730 | + |
---|
728 | 731 | /* |
---|
729 | 732 | * SMP locking: |
---|
730 | 733 | * All hardware access under netdev_priv(dev)->lock, except the performance |
---|
.. | .. |
---|
809 | 812 | |
---|
810 | 813 | /* RX software stats */ |
---|
811 | 814 | struct u64_stats_sync swstats_rx_syncp; |
---|
812 | | - u64 stat_rx_packets; |
---|
813 | | - u64 stat_rx_bytes; /* not always available in HW */ |
---|
814 | | - u64 stat_rx_missed_errors; |
---|
815 | | - u64 stat_rx_dropped; |
---|
| 815 | + struct nv_txrx_stats __percpu *txrx_stats; |
---|
816 | 816 | |
---|
817 | 817 | /* media detection workaround. |
---|
818 | 818 | * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); |
---|
.. | .. |
---|
838 | 838 | |
---|
839 | 839 | /* TX software stats */ |
---|
840 | 840 | struct u64_stats_sync swstats_tx_syncp; |
---|
841 | | - u64 stat_tx_packets; /* not always available in HW */ |
---|
842 | | - u64 stat_tx_bytes; |
---|
843 | | - u64 stat_tx_dropped; |
---|
844 | 841 | |
---|
845 | 842 | /* msi/msi-x fields */ |
---|
846 | 843 | u32 msi_flags; |
---|
.. | .. |
---|
1733 | 1730 | } |
---|
1734 | 1731 | } |
---|
1735 | 1732 | |
---|
| 1733 | +static void nv_get_stats(int cpu, struct fe_priv *np, |
---|
| 1734 | + struct rtnl_link_stats64 *storage) |
---|
| 1735 | +{ |
---|
| 1736 | + struct nv_txrx_stats *src = per_cpu_ptr(np->txrx_stats, cpu); |
---|
| 1737 | + unsigned int syncp_start; |
---|
| 1738 | + u64 rx_packets, rx_bytes, rx_dropped, rx_missed_errors; |
---|
| 1739 | + u64 tx_packets, tx_bytes, tx_dropped; |
---|
| 1740 | + |
---|
| 1741 | + do { |
---|
| 1742 | + syncp_start = u64_stats_fetch_begin_irq(&np->swstats_rx_syncp); |
---|
| 1743 | + rx_packets = src->stat_rx_packets; |
---|
| 1744 | + rx_bytes = src->stat_rx_bytes; |
---|
| 1745 | + rx_dropped = src->stat_rx_dropped; |
---|
| 1746 | + rx_missed_errors = src->stat_rx_missed_errors; |
---|
| 1747 | + } while (u64_stats_fetch_retry_irq(&np->swstats_rx_syncp, syncp_start)); |
---|
| 1748 | + |
---|
| 1749 | + storage->rx_packets += rx_packets; |
---|
| 1750 | + storage->rx_bytes += rx_bytes; |
---|
| 1751 | + storage->rx_dropped += rx_dropped; |
---|
| 1752 | + storage->rx_missed_errors += rx_missed_errors; |
---|
| 1753 | + |
---|
| 1754 | + do { |
---|
| 1755 | + syncp_start = u64_stats_fetch_begin_irq(&np->swstats_tx_syncp); |
---|
| 1756 | + tx_packets = src->stat_tx_packets; |
---|
| 1757 | + tx_bytes = src->stat_tx_bytes; |
---|
| 1758 | + tx_dropped = src->stat_tx_dropped; |
---|
| 1759 | + } while (u64_stats_fetch_retry_irq(&np->swstats_tx_syncp, syncp_start)); |
---|
| 1760 | + |
---|
| 1761 | + storage->tx_packets += tx_packets; |
---|
| 1762 | + storage->tx_bytes += tx_bytes; |
---|
| 1763 | + storage->tx_dropped += tx_dropped; |
---|
| 1764 | +} |
---|
| 1765 | + |
---|
1736 | 1766 | /* |
---|
1737 | 1767 | * nv_get_stats64: dev->ndo_get_stats64 function |
---|
1738 | 1768 | * Get latest stats value from the nic. |
---|
.. | .. |
---|
1745 | 1775 | __releases(&netdev_priv(dev)->hwstats_lock) |
---|
1746 | 1776 | { |
---|
1747 | 1777 | struct fe_priv *np = netdev_priv(dev); |
---|
1748 | | - unsigned int syncp_start; |
---|
| 1778 | + int cpu; |
---|
1749 | 1779 | |
---|
1750 | 1780 | /* |
---|
1751 | 1781 | * Note: because HW stats are not always available and for |
---|
.. | .. |
---|
1758 | 1788 | */ |
---|
1759 | 1789 | |
---|
1760 | 1790 | /* software stats */ |
---|
1761 | | - do { |
---|
1762 | | - syncp_start = u64_stats_fetch_begin_irq(&np->swstats_rx_syncp); |
---|
1763 | | - storage->rx_packets = np->stat_rx_packets; |
---|
1764 | | - storage->rx_bytes = np->stat_rx_bytes; |
---|
1765 | | - storage->rx_dropped = np->stat_rx_dropped; |
---|
1766 | | - storage->rx_missed_errors = np->stat_rx_missed_errors; |
---|
1767 | | - } while (u64_stats_fetch_retry_irq(&np->swstats_rx_syncp, syncp_start)); |
---|
1768 | | - |
---|
1769 | | - do { |
---|
1770 | | - syncp_start = u64_stats_fetch_begin_irq(&np->swstats_tx_syncp); |
---|
1771 | | - storage->tx_packets = np->stat_tx_packets; |
---|
1772 | | - storage->tx_bytes = np->stat_tx_bytes; |
---|
1773 | | - storage->tx_dropped = np->stat_tx_dropped; |
---|
1774 | | - } while (u64_stats_fetch_retry_irq(&np->swstats_tx_syncp, syncp_start)); |
---|
| 1791 | + for_each_online_cpu(cpu) |
---|
| 1792 | + nv_get_stats(cpu, np, storage); |
---|
1775 | 1793 | |
---|
1776 | 1794 | /* If the nic supports hw counters then retrieve latest values */ |
---|
1777 | 1795 | if (np->driver_data & DEV_HAS_STATISTICS_V123) { |
---|
.. | .. |
---|
1839 | 1857 | } else { |
---|
1840 | 1858 | packet_dropped: |
---|
1841 | 1859 | u64_stats_update_begin(&np->swstats_rx_syncp); |
---|
1842 | | - np->stat_rx_dropped++; |
---|
| 1860 | + nv_txrx_stats_inc(stat_rx_dropped); |
---|
1843 | 1861 | u64_stats_update_end(&np->swstats_rx_syncp); |
---|
1844 | 1862 | return 1; |
---|
1845 | 1863 | } |
---|
.. | .. |
---|
1881 | 1899 | } else { |
---|
1882 | 1900 | packet_dropped: |
---|
1883 | 1901 | u64_stats_update_begin(&np->swstats_rx_syncp); |
---|
1884 | | - np->stat_rx_dropped++; |
---|
| 1902 | + nv_txrx_stats_inc(stat_rx_dropped); |
---|
1885 | 1903 | u64_stats_update_end(&np->swstats_rx_syncp); |
---|
1886 | 1904 | return 1; |
---|
1887 | 1905 | } |
---|
.. | .. |
---|
2025 | 2043 | } |
---|
2026 | 2044 | if (nv_release_txskb(np, &np->tx_skb[i])) { |
---|
2027 | 2045 | u64_stats_update_begin(&np->swstats_tx_syncp); |
---|
2028 | | - np->stat_tx_dropped++; |
---|
| 2046 | + nv_txrx_stats_inc(stat_tx_dropped); |
---|
2029 | 2047 | u64_stats_update_end(&np->swstats_tx_syncp); |
---|
2030 | 2048 | } |
---|
2031 | 2049 | np->tx_skb[i].dma = 0; |
---|
.. | .. |
---|
2207 | 2225 | struct nv_skb_map *prev_tx_ctx; |
---|
2208 | 2226 | struct nv_skb_map *tmp_tx_ctx = NULL, *start_tx_ctx = NULL; |
---|
2209 | 2227 | unsigned long flags; |
---|
| 2228 | + netdev_tx_t ret = NETDEV_TX_OK; |
---|
2210 | 2229 | |
---|
2211 | 2230 | /* add fragments to entries count */ |
---|
2212 | 2231 | for (i = 0; i < fragments; i++) { |
---|
.. | .. |
---|
2222 | 2241 | netif_stop_queue(dev); |
---|
2223 | 2242 | np->tx_stop = 1; |
---|
2224 | 2243 | spin_unlock_irqrestore(&np->lock, flags); |
---|
2225 | | - return NETDEV_TX_BUSY; |
---|
| 2244 | + |
---|
| 2245 | + /* When normal packets and/or xmit_more packets fill up |
---|
| 2246 | + * tx_desc, it is necessary to trigger NIC tx reg. |
---|
| 2247 | + */ |
---|
| 2248 | + ret = NETDEV_TX_BUSY; |
---|
| 2249 | + goto txkick; |
---|
2226 | 2250 | } |
---|
2227 | 2251 | spin_unlock_irqrestore(&np->lock, flags); |
---|
2228 | 2252 | |
---|
.. | .. |
---|
2239 | 2263 | /* on DMA mapping error - drop the packet */ |
---|
2240 | 2264 | dev_kfree_skb_any(skb); |
---|
2241 | 2265 | u64_stats_update_begin(&np->swstats_tx_syncp); |
---|
2242 | | - np->stat_tx_dropped++; |
---|
| 2266 | + nv_txrx_stats_inc(stat_tx_dropped); |
---|
2243 | 2267 | u64_stats_update_end(&np->swstats_tx_syncp); |
---|
2244 | | - return NETDEV_TX_OK; |
---|
| 2268 | + |
---|
| 2269 | + ret = NETDEV_TX_OK; |
---|
| 2270 | + |
---|
| 2271 | + goto dma_error; |
---|
2245 | 2272 | } |
---|
2246 | 2273 | np->put_tx_ctx->dma_len = bcnt; |
---|
2247 | 2274 | np->put_tx_ctx->dma_single = 1; |
---|
.. | .. |
---|
2285 | 2312 | dev_kfree_skb_any(skb); |
---|
2286 | 2313 | np->put_tx_ctx = start_tx_ctx; |
---|
2287 | 2314 | u64_stats_update_begin(&np->swstats_tx_syncp); |
---|
2288 | | - np->stat_tx_dropped++; |
---|
| 2315 | + nv_txrx_stats_inc(stat_tx_dropped); |
---|
2289 | 2316 | u64_stats_update_end(&np->swstats_tx_syncp); |
---|
2290 | | - return NETDEV_TX_OK; |
---|
| 2317 | + |
---|
| 2318 | + ret = NETDEV_TX_OK; |
---|
| 2319 | + |
---|
| 2320 | + goto dma_error; |
---|
2291 | 2321 | } |
---|
2292 | 2322 | |
---|
2293 | 2323 | np->put_tx_ctx->dma_len = bcnt; |
---|
.. | .. |
---|
2339 | 2369 | |
---|
2340 | 2370 | spin_unlock_irqrestore(&np->lock, flags); |
---|
2341 | 2371 | |
---|
2342 | | - writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); |
---|
2343 | | - return NETDEV_TX_OK; |
---|
| 2372 | +txkick: |
---|
| 2373 | + if (netif_queue_stopped(dev) || !netdev_xmit_more()) { |
---|
| 2374 | + u32 txrxctl_kick; |
---|
| 2375 | +dma_error: |
---|
| 2376 | + txrxctl_kick = NVREG_TXRXCTL_KICK | np->txrxctl_bits; |
---|
| 2377 | + writel(txrxctl_kick, get_hwbase(dev) + NvRegTxRxControl); |
---|
| 2378 | + } |
---|
| 2379 | + |
---|
| 2380 | + return ret; |
---|
2344 | 2381 | } |
---|
2345 | 2382 | |
---|
2346 | 2383 | static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, |
---|
.. | .. |
---|
2363 | 2400 | struct nv_skb_map *start_tx_ctx = NULL; |
---|
2364 | 2401 | struct nv_skb_map *tmp_tx_ctx = NULL; |
---|
2365 | 2402 | unsigned long flags; |
---|
| 2403 | + netdev_tx_t ret = NETDEV_TX_OK; |
---|
2366 | 2404 | |
---|
2367 | 2405 | /* add fragments to entries count */ |
---|
2368 | 2406 | for (i = 0; i < fragments; i++) { |
---|
.. | .. |
---|
2378 | 2416 | netif_stop_queue(dev); |
---|
2379 | 2417 | np->tx_stop = 1; |
---|
2380 | 2418 | spin_unlock_irqrestore(&np->lock, flags); |
---|
2381 | | - return NETDEV_TX_BUSY; |
---|
| 2419 | + |
---|
| 2420 | + /* When normal packets and/or xmit_more packets fill up |
---|
| 2421 | + * tx_desc, it is necessary to trigger NIC tx reg. |
---|
| 2422 | + */ |
---|
| 2423 | + ret = NETDEV_TX_BUSY; |
---|
| 2424 | + |
---|
| 2425 | + goto txkick; |
---|
2382 | 2426 | } |
---|
2383 | 2427 | spin_unlock_irqrestore(&np->lock, flags); |
---|
2384 | 2428 | |
---|
.. | .. |
---|
2396 | 2440 | /* on DMA mapping error - drop the packet */ |
---|
2397 | 2441 | dev_kfree_skb_any(skb); |
---|
2398 | 2442 | u64_stats_update_begin(&np->swstats_tx_syncp); |
---|
2399 | | - np->stat_tx_dropped++; |
---|
| 2443 | + nv_txrx_stats_inc(stat_tx_dropped); |
---|
2400 | 2444 | u64_stats_update_end(&np->swstats_tx_syncp); |
---|
2401 | | - return NETDEV_TX_OK; |
---|
| 2445 | + |
---|
| 2446 | + ret = NETDEV_TX_OK; |
---|
| 2447 | + |
---|
| 2448 | + goto dma_error; |
---|
2402 | 2449 | } |
---|
2403 | 2450 | np->put_tx_ctx->dma_len = bcnt; |
---|
2404 | 2451 | np->put_tx_ctx->dma_single = 1; |
---|
.. | .. |
---|
2443 | 2490 | dev_kfree_skb_any(skb); |
---|
2444 | 2491 | np->put_tx_ctx = start_tx_ctx; |
---|
2445 | 2492 | u64_stats_update_begin(&np->swstats_tx_syncp); |
---|
2446 | | - np->stat_tx_dropped++; |
---|
| 2493 | + nv_txrx_stats_inc(stat_tx_dropped); |
---|
2447 | 2494 | u64_stats_update_end(&np->swstats_tx_syncp); |
---|
2448 | | - return NETDEV_TX_OK; |
---|
| 2495 | + |
---|
| 2496 | + ret = NETDEV_TX_OK; |
---|
| 2497 | + |
---|
| 2498 | + goto dma_error; |
---|
2449 | 2499 | } |
---|
2450 | 2500 | np->put_tx_ctx->dma_len = bcnt; |
---|
2451 | 2501 | np->put_tx_ctx->dma_single = 0; |
---|
.. | .. |
---|
2524 | 2574 | |
---|
2525 | 2575 | spin_unlock_irqrestore(&np->lock, flags); |
---|
2526 | 2576 | |
---|
2527 | | - writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); |
---|
2528 | | - return NETDEV_TX_OK; |
---|
| 2577 | +txkick: |
---|
| 2578 | + if (netif_queue_stopped(dev) || !netdev_xmit_more()) { |
---|
| 2579 | + u32 txrxctl_kick; |
---|
| 2580 | +dma_error: |
---|
| 2581 | + txrxctl_kick = NVREG_TXRXCTL_KICK | np->txrxctl_bits; |
---|
| 2582 | + writel(txrxctl_kick, get_hwbase(dev) + NvRegTxRxControl); |
---|
| 2583 | + } |
---|
| 2584 | + |
---|
| 2585 | + return ret; |
---|
2529 | 2586 | } |
---|
2530 | 2587 | |
---|
2531 | 2588 | static inline void nv_tx_flip_ownership(struct net_device *dev) |
---|
.. | .. |
---|
2572 | 2629 | && !(flags & NV_TX_RETRYCOUNT_MASK)) |
---|
2573 | 2630 | nv_legacybackoff_reseed(dev); |
---|
2574 | 2631 | } else { |
---|
| 2632 | + unsigned int len; |
---|
| 2633 | + |
---|
2575 | 2634 | u64_stats_update_begin(&np->swstats_tx_syncp); |
---|
2576 | | - np->stat_tx_packets++; |
---|
2577 | | - np->stat_tx_bytes += np->get_tx_ctx->skb->len; |
---|
| 2635 | + nv_txrx_stats_inc(stat_tx_packets); |
---|
| 2636 | + len = np->get_tx_ctx->skb->len; |
---|
| 2637 | + nv_txrx_stats_add(stat_tx_bytes, len); |
---|
2578 | 2638 | u64_stats_update_end(&np->swstats_tx_syncp); |
---|
2579 | 2639 | } |
---|
2580 | 2640 | bytes_compl += np->get_tx_ctx->skb->len; |
---|
.. | .. |
---|
2589 | 2649 | && !(flags & NV_TX2_RETRYCOUNT_MASK)) |
---|
2590 | 2650 | nv_legacybackoff_reseed(dev); |
---|
2591 | 2651 | } else { |
---|
| 2652 | + unsigned int len; |
---|
| 2653 | + |
---|
2592 | 2654 | u64_stats_update_begin(&np->swstats_tx_syncp); |
---|
2593 | | - np->stat_tx_packets++; |
---|
2594 | | - np->stat_tx_bytes += np->get_tx_ctx->skb->len; |
---|
| 2655 | + nv_txrx_stats_inc(stat_tx_packets); |
---|
| 2656 | + len = np->get_tx_ctx->skb->len; |
---|
| 2657 | + nv_txrx_stats_add(stat_tx_bytes, len); |
---|
2595 | 2658 | u64_stats_update_end(&np->swstats_tx_syncp); |
---|
2596 | 2659 | } |
---|
2597 | 2660 | bytes_compl += np->get_tx_ctx->skb->len; |
---|
.. | .. |
---|
2639 | 2702 | nv_legacybackoff_reseed(dev); |
---|
2640 | 2703 | } |
---|
2641 | 2704 | } else { |
---|
| 2705 | + unsigned int len; |
---|
| 2706 | + |
---|
2642 | 2707 | u64_stats_update_begin(&np->swstats_tx_syncp); |
---|
2643 | | - np->stat_tx_packets++; |
---|
2644 | | - np->stat_tx_bytes += np->get_tx_ctx->skb->len; |
---|
| 2708 | + nv_txrx_stats_inc(stat_tx_packets); |
---|
| 2709 | + len = np->get_tx_ctx->skb->len; |
---|
| 2710 | + nv_txrx_stats_add(stat_tx_bytes, len); |
---|
2645 | 2711 | u64_stats_update_end(&np->swstats_tx_syncp); |
---|
2646 | 2712 | } |
---|
2647 | 2713 | |
---|
.. | .. |
---|
2673 | 2739 | * nv_tx_timeout: dev->tx_timeout function |
---|
2674 | 2740 | * Called with netif_tx_lock held. |
---|
2675 | 2741 | */ |
---|
2676 | | -static void nv_tx_timeout(struct net_device *dev) |
---|
| 2742 | +static void nv_tx_timeout(struct net_device *dev, unsigned int txqueue) |
---|
2677 | 2743 | { |
---|
2678 | 2744 | struct fe_priv *np = netdev_priv(dev); |
---|
2679 | 2745 | u8 __iomem *base = get_hwbase(dev); |
---|
.. | .. |
---|
2818 | 2884 | } |
---|
2819 | 2885 | } |
---|
2820 | 2886 | |
---|
| 2887 | +static void rx_missing_handler(u32 flags, struct fe_priv *np) |
---|
| 2888 | +{ |
---|
| 2889 | + if (flags & NV_RX_MISSEDFRAME) { |
---|
| 2890 | + u64_stats_update_begin(&np->swstats_rx_syncp); |
---|
| 2891 | + nv_txrx_stats_inc(stat_rx_missed_errors); |
---|
| 2892 | + u64_stats_update_end(&np->swstats_rx_syncp); |
---|
| 2893 | + } |
---|
| 2894 | +} |
---|
| 2895 | + |
---|
2821 | 2896 | static int nv_rx_process(struct net_device *dev, int limit) |
---|
2822 | 2897 | { |
---|
2823 | 2898 | struct fe_priv *np = netdev_priv(dev); |
---|
.. | .. |
---|
2860 | 2935 | } |
---|
2861 | 2936 | /* the rest are hard errors */ |
---|
2862 | 2937 | else { |
---|
2863 | | - if (flags & NV_RX_MISSEDFRAME) { |
---|
2864 | | - u64_stats_update_begin(&np->swstats_rx_syncp); |
---|
2865 | | - np->stat_rx_missed_errors++; |
---|
2866 | | - u64_stats_update_end(&np->swstats_rx_syncp); |
---|
2867 | | - } |
---|
| 2938 | + rx_missing_handler(flags, np); |
---|
2868 | 2939 | dev_kfree_skb(skb); |
---|
2869 | 2940 | goto next_pkt; |
---|
2870 | 2941 | } |
---|
.. | .. |
---|
2908 | 2979 | skb->protocol = eth_type_trans(skb, dev); |
---|
2909 | 2980 | napi_gro_receive(&np->napi, skb); |
---|
2910 | 2981 | u64_stats_update_begin(&np->swstats_rx_syncp); |
---|
2911 | | - np->stat_rx_packets++; |
---|
2912 | | - np->stat_rx_bytes += len; |
---|
| 2982 | + nv_txrx_stats_inc(stat_rx_packets); |
---|
| 2983 | + nv_txrx_stats_add(stat_rx_bytes, len); |
---|
2913 | 2984 | u64_stats_update_end(&np->swstats_rx_syncp); |
---|
2914 | 2985 | next_pkt: |
---|
2915 | 2986 | if (unlikely(np->get_rx.orig++ == np->last_rx.orig)) |
---|
.. | .. |
---|
2994 | 3065 | } |
---|
2995 | 3066 | napi_gro_receive(&np->napi, skb); |
---|
2996 | 3067 | u64_stats_update_begin(&np->swstats_rx_syncp); |
---|
2997 | | - np->stat_rx_packets++; |
---|
2998 | | - np->stat_rx_bytes += len; |
---|
| 3068 | + nv_txrx_stats_inc(stat_rx_packets); |
---|
| 3069 | + nv_txrx_stats_add(stat_rx_bytes, len); |
---|
2999 | 3070 | u64_stats_update_end(&np->swstats_rx_syncp); |
---|
3000 | 3071 | } else { |
---|
3001 | 3072 | dev_kfree_skb(skb); |
---|
.. | .. |
---|
5663 | 5734 | SET_NETDEV_DEV(dev, &pci_dev->dev); |
---|
5664 | 5735 | u64_stats_init(&np->swstats_rx_syncp); |
---|
5665 | 5736 | u64_stats_init(&np->swstats_tx_syncp); |
---|
| 5737 | + np->txrx_stats = alloc_percpu(struct nv_txrx_stats); |
---|
| 5738 | + if (!np->txrx_stats) { |
---|
| 5739 | + pr_err("np->txrx_stats, alloc memory error.\n"); |
---|
| 5740 | + err = -ENOMEM; |
---|
| 5741 | + goto out_alloc_percpu; |
---|
| 5742 | + } |
---|
5666 | 5743 | |
---|
5667 | 5744 | timer_setup(&np->oom_kick, nv_do_rx_refill, 0); |
---|
5668 | 5745 | timer_setup(&np->nic_poll, nv_do_nic_poll, 0); |
---|
.. | .. |
---|
6061 | 6138 | return 0; |
---|
6062 | 6139 | |
---|
6063 | 6140 | out_error: |
---|
| 6141 | + nv_mgmt_release_sema(dev); |
---|
6064 | 6142 | if (phystate_orig) |
---|
6065 | 6143 | writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl); |
---|
6066 | 6144 | out_freering: |
---|
.. | .. |
---|
6072 | 6150 | out_disable: |
---|
6073 | 6151 | pci_disable_device(pci_dev); |
---|
6074 | 6152 | out_free: |
---|
| 6153 | + free_percpu(np->txrx_stats); |
---|
| 6154 | +out_alloc_percpu: |
---|
6075 | 6155 | free_netdev(dev); |
---|
6076 | 6156 | out: |
---|
6077 | 6157 | return err; |
---|
.. | .. |
---|
6117 | 6197 | static void nv_remove(struct pci_dev *pci_dev) |
---|
6118 | 6198 | { |
---|
6119 | 6199 | struct net_device *dev = pci_get_drvdata(pci_dev); |
---|
| 6200 | + struct fe_priv *np = netdev_priv(dev); |
---|
| 6201 | + |
---|
| 6202 | + free_percpu(np->txrx_stats); |
---|
6120 | 6203 | |
---|
6121 | 6204 | unregister_netdev(dev); |
---|
6122 | 6205 | |
---|
.. | .. |
---|
6138 | 6221 | #ifdef CONFIG_PM_SLEEP |
---|
6139 | 6222 | static int nv_suspend(struct device *device) |
---|
6140 | 6223 | { |
---|
6141 | | - struct pci_dev *pdev = to_pci_dev(device); |
---|
6142 | | - struct net_device *dev = pci_get_drvdata(pdev); |
---|
| 6224 | + struct net_device *dev = dev_get_drvdata(device); |
---|
6143 | 6225 | struct fe_priv *np = netdev_priv(dev); |
---|
6144 | 6226 | u8 __iomem *base = get_hwbase(dev); |
---|
6145 | 6227 | int i; |
---|