hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/drivers/net/ethernet/intel/igb/igb_main.c
....@@ -30,6 +30,8 @@
3030 #include <linux/if_ether.h>
3131 #include <linux/aer.h>
3232 #include <linux/prefetch.h>
33
+#include <linux/bpf.h>
34
+#include <linux/bpf_trace.h>
3335 #include <linux/pm_runtime.h>
3436 #include <linux/etherdevice.h>
3537 #ifdef CONFIG_IGB_DCA
....@@ -37,12 +39,6 @@
3739 #endif
3840 #include <linux/i2c.h>
3941 #include "igb.h"
40
-
41
-#define MAJ 5
42
-#define MIN 4
43
-#define BUILD 0
44
-#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
45
-__stringify(BUILD) "-k"
4642
4743 enum queue_mode {
4844 QUEUE_MODE_STRICT_PRIORITY,
....@@ -55,7 +51,6 @@
5551 };
5652
5753 char igb_driver_name[] = "igb";
58
-char igb_driver_version[] = DRV_VERSION;
5954 static const char igb_driver_string[] =
6055 "Intel(R) Gigabit Ethernet Network Driver";
6156 static const char igb_copyright[] =
....@@ -146,7 +141,7 @@
146141 static bool igb_clean_tx_irq(struct igb_q_vector *, int);
147142 static int igb_clean_rx_irq(struct igb_q_vector *, int);
148143 static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
149
-static void igb_tx_timeout(struct net_device *);
144
+static void igb_tx_timeout(struct net_device *, unsigned int txqueue);
150145 static void igb_reset_task(struct work_struct *);
151146 static void igb_vlan_mode(struct net_device *netdev,
152147 netdev_features_t features);
....@@ -239,8 +234,7 @@
239234
240235 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
241236 MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
242
-MODULE_LICENSE("GPL");
243
-MODULE_VERSION(DRV_VERSION);
237
+MODULE_LICENSE("GPL v2");
244238
245239 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
246240 static int debug = -1;
....@@ -557,8 +551,7 @@
557551
558552 /**
559553 * igb_get_i2c_data - Reads the I2C SDA data bit
560
- * @hw: pointer to hardware structure
561
- * @i2cctl: Current value of I2CCTL register
554
+ * @data: opaque pointer to adapter struct
562555 *
563556 * Returns the I2C data bit value
564557 **/
....@@ -666,8 +659,7 @@
666659 {
667660 int ret;
668661
669
- pr_info("%s - version %s\n",
670
- igb_driver_string, igb_driver_version);
662
+ pr_info("%s\n", igb_driver_string);
671663 pr_info("%s\n", igb_copyright);
672664
673665 #ifdef CONFIG_IGB_DCA
....@@ -720,14 +712,13 @@
720712 adapter->rx_ring[i]->reg_idx = rbase_offset +
721713 Q_IDX_82576(i);
722714 }
723
- /* Fall through */
715
+ fallthrough;
724716 case e1000_82575:
725717 case e1000_82580:
726718 case e1000_i350:
727719 case e1000_i354:
728720 case e1000_i210:
729721 case e1000_i211:
730
- /* Fall through */
731722 default:
732723 for (; i < adapter->num_rx_queues; i++)
733724 adapter->rx_ring[i]->reg_idx = rbase_offset + i;
....@@ -753,6 +744,8 @@
753744 struct net_device *netdev = igb->netdev;
754745 hw->hw_addr = NULL;
755746 netdev_err(netdev, "PCIe link lost\n");
747
+ WARN(pci_device_is_present(igb->pdev),
748
+ "igb: Failed to read reg 0x%x!\n", reg);
756749 }
757750
758751 return value;
....@@ -1196,15 +1189,15 @@
11961189 {
11971190 struct igb_q_vector *q_vector;
11981191 struct igb_ring *ring;
1199
- int ring_count, size;
1192
+ int ring_count;
1193
+ size_t size;
12001194
12011195 /* igb only supports 1 Tx and/or 1 Rx queue per vector */
12021196 if (txr_count > 1 || rxr_count > 1)
12031197 return -ENOMEM;
12041198
12051199 ring_count = txr_count + rxr_count;
1206
- size = sizeof(struct igb_q_vector) +
1207
- (sizeof(struct igb_ring) * ring_count);
1200
+ size = struct_size(q_vector, ring, ring_count);
12081201
12091202 /* allocate q_vector and rings */
12101203 q_vector = adapter->q_vector[v_idx];
....@@ -1858,13 +1851,12 @@
18581851 * configuration' in respect to these parameters.
18591852 */
18601853
1861
- netdev_dbg(netdev, "Qav Tx mode: cbs %s, launchtime %s, queue %d \
1862
- idleslope %d sendslope %d hiCredit %d \
1863
- locredit %d\n",
1864
- (ring->cbs_enable) ? "enabled" : "disabled",
1865
- (ring->launchtime_enable) ? "enabled" : "disabled", queue,
1866
- ring->idleslope, ring->sendslope, ring->hicredit,
1867
- ring->locredit);
1854
+ netdev_dbg(netdev, "Qav Tx mode: cbs %s, launchtime %s, queue %d idleslope %d sendslope %d hiCredit %d locredit %d\n",
1855
+ ring->cbs_enable ? "enabled" : "disabled",
1856
+ ring->launchtime_enable ? "enabled" : "disabled",
1857
+ queue,
1858
+ ring->idleslope, ring->sendslope,
1859
+ ring->hicredit, ring->locredit);
18681860 }
18691861
18701862 static int igb_save_txtime_params(struct igb_adapter *adapter, int queue,
....@@ -1943,7 +1935,7 @@
19431935
19441936 val = rd32(E1000_RXPBS);
19451937 val &= ~I210_RXPBSIZE_MASK;
1946
- val |= I210_RXPBSIZE_PB_32KB;
1938
+ val |= I210_RXPBSIZE_PB_30KB;
19471939 wr32(E1000_RXPBS, val);
19481940
19491941 /* Section 8.12.9 states that MAX_TPKT_SIZE from DTXMXPKTSZ
....@@ -2237,7 +2229,6 @@
22372229
22382230 void igb_reinit_locked(struct igb_adapter *adapter)
22392231 {
2240
- WARN_ON(in_interrupt());
22412232 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
22422233 usleep_range(1000, 2000);
22432234 igb_down(adapter);
....@@ -2379,7 +2370,7 @@
23792370 adapter->ei.get_invariants(hw);
23802371 adapter->flags &= ~IGB_FLAG_MEDIA_RESET;
23812372 }
2382
- if ((mac->type == e1000_82575) &&
2373
+ if ((mac->type == e1000_82575 || mac->type == e1000_i350) &&
23832374 (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
23842375 igb_enable_mas(adapter);
23852376 }
....@@ -2490,13 +2481,14 @@
24902481 else
24912482 igb_reset(adapter);
24922483
2493
- return 0;
2484
+ return 1;
24942485 }
24952486
24962487 static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
24972488 struct net_device *dev,
24982489 const unsigned char *addr, u16 vid,
2499
- u16 flags)
2490
+ u16 flags,
2491
+ struct netlink_ext_ack *extack)
25002492 {
25012493 /* guarantee we can provide a unique filter for the unicast address */
25022494 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
....@@ -2524,6 +2516,7 @@
25242516 if (unlikely(mac_hdr_len > IGB_MAX_MAC_HDR_LEN))
25252517 return features & ~(NETIF_F_HW_CSUM |
25262518 NETIF_F_SCTP_CRC |
2519
+ NETIF_F_GSO_UDP_L4 |
25272520 NETIF_F_HW_VLAN_CTAG_TX |
25282521 NETIF_F_TSO |
25292522 NETIF_F_TSO6);
....@@ -2532,6 +2525,7 @@
25322525 if (unlikely(network_hdr_len > IGB_MAX_NETWORK_HDR_LEN))
25332526 return features & ~(NETIF_F_HW_CSUM |
25342527 NETIF_F_SCTP_CRC |
2528
+ NETIF_F_GSO_UDP_L4 |
25352529 NETIF_F_TSO |
25362530 NETIF_F_TSO6);
25372531
....@@ -2586,13 +2580,15 @@
25862580 #define VLAN_PRIO_FULL_MASK (0x07)
25872581
25882582 static int igb_parse_cls_flower(struct igb_adapter *adapter,
2589
- struct tc_cls_flower_offload *f,
2583
+ struct flow_cls_offload *f,
25902584 int traffic_class,
25912585 struct igb_nfc_filter *input)
25922586 {
2587
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2588
+ struct flow_dissector *dissector = rule->match.dissector;
25932589 struct netlink_ext_ack *extack = f->common.extack;
25942590
2595
- if (f->dissector->used_keys &
2591
+ if (dissector->used_keys &
25962592 ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
25972593 BIT(FLOW_DISSECTOR_KEY_CONTROL) |
25982594 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
....@@ -2602,78 +2598,61 @@
26022598 return -EOPNOTSUPP;
26032599 }
26042600
2605
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
2606
- struct flow_dissector_key_eth_addrs *key, *mask;
2601
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
2602
+ struct flow_match_eth_addrs match;
26072603
2608
- key = skb_flow_dissector_target(f->dissector,
2609
- FLOW_DISSECTOR_KEY_ETH_ADDRS,
2610
- f->key);
2611
- mask = skb_flow_dissector_target(f->dissector,
2612
- FLOW_DISSECTOR_KEY_ETH_ADDRS,
2613
- f->mask);
2614
-
2615
- if (!is_zero_ether_addr(mask->dst)) {
2616
- if (!is_broadcast_ether_addr(mask->dst)) {
2604
+ flow_rule_match_eth_addrs(rule, &match);
2605
+ if (!is_zero_ether_addr(match.mask->dst)) {
2606
+ if (!is_broadcast_ether_addr(match.mask->dst)) {
26172607 NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for destination MAC address");
26182608 return -EINVAL;
26192609 }
26202610
26212611 input->filter.match_flags |=
26222612 IGB_FILTER_FLAG_DST_MAC_ADDR;
2623
- ether_addr_copy(input->filter.dst_addr, key->dst);
2613
+ ether_addr_copy(input->filter.dst_addr, match.key->dst);
26242614 }
26252615
2626
- if (!is_zero_ether_addr(mask->src)) {
2627
- if (!is_broadcast_ether_addr(mask->src)) {
2616
+ if (!is_zero_ether_addr(match.mask->src)) {
2617
+ if (!is_broadcast_ether_addr(match.mask->src)) {
26282618 NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for source MAC address");
26292619 return -EINVAL;
26302620 }
26312621
26322622 input->filter.match_flags |=
26332623 IGB_FILTER_FLAG_SRC_MAC_ADDR;
2634
- ether_addr_copy(input->filter.src_addr, key->src);
2624
+ ether_addr_copy(input->filter.src_addr, match.key->src);
26352625 }
26362626 }
26372627
2638
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
2639
- struct flow_dissector_key_basic *key, *mask;
2628
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
2629
+ struct flow_match_basic match;
26402630
2641
- key = skb_flow_dissector_target(f->dissector,
2642
- FLOW_DISSECTOR_KEY_BASIC,
2643
- f->key);
2644
- mask = skb_flow_dissector_target(f->dissector,
2645
- FLOW_DISSECTOR_KEY_BASIC,
2646
- f->mask);
2647
-
2648
- if (mask->n_proto) {
2649
- if (mask->n_proto != ETHER_TYPE_FULL_MASK) {
2631
+ flow_rule_match_basic(rule, &match);
2632
+ if (match.mask->n_proto) {
2633
+ if (match.mask->n_proto != ETHER_TYPE_FULL_MASK) {
26502634 NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for EtherType filter");
26512635 return -EINVAL;
26522636 }
26532637
26542638 input->filter.match_flags |= IGB_FILTER_FLAG_ETHER_TYPE;
2655
- input->filter.etype = key->n_proto;
2639
+ input->filter.etype = match.key->n_proto;
26562640 }
26572641 }
26582642
2659
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
2660
- struct flow_dissector_key_vlan *key, *mask;
2643
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
2644
+ struct flow_match_vlan match;
26612645
2662
- key = skb_flow_dissector_target(f->dissector,
2663
- FLOW_DISSECTOR_KEY_VLAN,
2664
- f->key);
2665
- mask = skb_flow_dissector_target(f->dissector,
2666
- FLOW_DISSECTOR_KEY_VLAN,
2667
- f->mask);
2668
-
2669
- if (mask->vlan_priority) {
2670
- if (mask->vlan_priority != VLAN_PRIO_FULL_MASK) {
2646
+ flow_rule_match_vlan(rule, &match);
2647
+ if (match.mask->vlan_priority) {
2648
+ if (match.mask->vlan_priority != VLAN_PRIO_FULL_MASK) {
26712649 NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN priority");
26722650 return -EINVAL;
26732651 }
26742652
26752653 input->filter.match_flags |= IGB_FILTER_FLAG_VLAN_TCI;
2676
- input->filter.vlan_tci = key->vlan_priority;
2654
+ input->filter.vlan_tci =
2655
+ (__force __be16)match.key->vlan_priority;
26772656 }
26782657 }
26792658
....@@ -2684,7 +2663,7 @@
26842663 }
26852664
26862665 static int igb_configure_clsflower(struct igb_adapter *adapter,
2687
- struct tc_cls_flower_offload *cls_flower)
2666
+ struct flow_cls_offload *cls_flower)
26882667 {
26892668 struct netlink_ext_ack *extack = cls_flower->common.extack;
26902669 struct igb_nfc_filter *filter, *f;
....@@ -2746,7 +2725,7 @@
27462725 }
27472726
27482727 static int igb_delete_clsflower(struct igb_adapter *adapter,
2749
- struct tc_cls_flower_offload *cls_flower)
2728
+ struct flow_cls_offload *cls_flower)
27502729 {
27512730 struct igb_nfc_filter *filter;
27522731 int err;
....@@ -2776,14 +2755,14 @@
27762755 }
27772756
27782757 static int igb_setup_tc_cls_flower(struct igb_adapter *adapter,
2779
- struct tc_cls_flower_offload *cls_flower)
2758
+ struct flow_cls_offload *cls_flower)
27802759 {
27812760 switch (cls_flower->command) {
2782
- case TC_CLSFLOWER_REPLACE:
2761
+ case FLOW_CLS_REPLACE:
27832762 return igb_configure_clsflower(adapter, cls_flower);
2784
- case TC_CLSFLOWER_DESTROY:
2763
+ case FLOW_CLS_DESTROY:
27852764 return igb_delete_clsflower(adapter, cls_flower);
2786
- case TC_CLSFLOWER_STATS:
2765
+ case FLOW_CLS_STATS:
27872766 return -EOPNOTSUPP;
27882767 default:
27892768 return -EOPNOTSUPP;
....@@ -2802,25 +2781,6 @@
28022781 case TC_SETUP_CLSFLOWER:
28032782 return igb_setup_tc_cls_flower(adapter, type_data);
28042783
2805
- default:
2806
- return -EOPNOTSUPP;
2807
- }
2808
-}
2809
-
2810
-static int igb_setup_tc_block(struct igb_adapter *adapter,
2811
- struct tc_block_offload *f)
2812
-{
2813
- if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
2814
- return -EOPNOTSUPP;
2815
-
2816
- switch (f->command) {
2817
- case TC_BLOCK_BIND:
2818
- return tcf_block_cb_register(f->block, igb_setup_tc_block_cb,
2819
- adapter, adapter, f->extack);
2820
- case TC_BLOCK_UNBIND:
2821
- tcf_block_cb_unregister(f->block, igb_setup_tc_block_cb,
2822
- adapter);
2823
- return 0;
28242784 default:
28252785 return -EOPNOTSUPP;
28262786 }
....@@ -2849,6 +2809,8 @@
28492809 return 0;
28502810 }
28512811
2812
+static LIST_HEAD(igb_block_cb_list);
2813
+
28522814 static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type,
28532815 void *type_data)
28542816 {
....@@ -2858,13 +2820,168 @@
28582820 case TC_SETUP_QDISC_CBS:
28592821 return igb_offload_cbs(adapter, type_data);
28602822 case TC_SETUP_BLOCK:
2861
- return igb_setup_tc_block(adapter, type_data);
2823
+ return flow_block_cb_setup_simple(type_data,
2824
+ &igb_block_cb_list,
2825
+ igb_setup_tc_block_cb,
2826
+ adapter, adapter, true);
2827
+
28622828 case TC_SETUP_QDISC_ETF:
28632829 return igb_offload_txtime(adapter, type_data);
28642830
28652831 default:
28662832 return -EOPNOTSUPP;
28672833 }
2834
+}
2835
+
2836
+static int igb_xdp_setup(struct net_device *dev, struct netdev_bpf *bpf)
2837
+{
2838
+ int i, frame_size = dev->mtu + IGB_ETH_PKT_HDR_PAD;
2839
+ struct igb_adapter *adapter = netdev_priv(dev);
2840
+ struct bpf_prog *prog = bpf->prog, *old_prog;
2841
+ bool running = netif_running(dev);
2842
+ bool need_reset;
2843
+
2844
+ /* verify igb ring attributes are sufficient for XDP */
2845
+ for (i = 0; i < adapter->num_rx_queues; i++) {
2846
+ struct igb_ring *ring = adapter->rx_ring[i];
2847
+
2848
+ if (frame_size > igb_rx_bufsz(ring)) {
2849
+ NL_SET_ERR_MSG_MOD(bpf->extack,
2850
+ "The RX buffer size is too small for the frame size");
2851
+ netdev_warn(dev, "XDP RX buffer size %d is too small for the frame size %d\n",
2852
+ igb_rx_bufsz(ring), frame_size);
2853
+ return -EINVAL;
2854
+ }
2855
+ }
2856
+
2857
+ old_prog = xchg(&adapter->xdp_prog, prog);
2858
+ need_reset = (!!prog != !!old_prog);
2859
+
2860
+ /* device is up and bpf is added/removed, must setup the RX queues */
2861
+ if (need_reset && running) {
2862
+ igb_close(dev);
2863
+ } else {
2864
+ for (i = 0; i < adapter->num_rx_queues; i++)
2865
+ (void)xchg(&adapter->rx_ring[i]->xdp_prog,
2866
+ adapter->xdp_prog);
2867
+ }
2868
+
2869
+ if (old_prog)
2870
+ bpf_prog_put(old_prog);
2871
+
2872
+ /* bpf is just replaced, RXQ and MTU are already setup */
2873
+ if (!need_reset)
2874
+ return 0;
2875
+
2876
+ if (running)
2877
+ igb_open(dev);
2878
+
2879
+ return 0;
2880
+}
2881
+
2882
+static int igb_xdp(struct net_device *dev, struct netdev_bpf *xdp)
2883
+{
2884
+ switch (xdp->command) {
2885
+ case XDP_SETUP_PROG:
2886
+ return igb_xdp_setup(dev, xdp);
2887
+ default:
2888
+ return -EINVAL;
2889
+ }
2890
+}
2891
+
2892
+static void igb_xdp_ring_update_tail(struct igb_ring *ring)
2893
+{
2894
+ /* Force memory writes to complete before letting h/w know there
2895
+ * are new descriptors to fetch.
2896
+ */
2897
+ wmb();
2898
+ writel(ring->next_to_use, ring->tail);
2899
+}
2900
+
2901
+static struct igb_ring *igb_xdp_tx_queue_mapping(struct igb_adapter *adapter)
2902
+{
2903
+ unsigned int r_idx = smp_processor_id();
2904
+
2905
+ if (r_idx >= adapter->num_tx_queues)
2906
+ r_idx = r_idx % adapter->num_tx_queues;
2907
+
2908
+ return adapter->tx_ring[r_idx];
2909
+}
2910
+
2911
+static int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp)
2912
+{
2913
+ struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
2914
+ int cpu = smp_processor_id();
2915
+ struct igb_ring *tx_ring;
2916
+ struct netdev_queue *nq;
2917
+ u32 ret;
2918
+
2919
+ if (unlikely(!xdpf))
2920
+ return IGB_XDP_CONSUMED;
2921
+
2922
+ /* During program transitions its possible adapter->xdp_prog is assigned
2923
+ * but ring has not been configured yet. In this case simply abort xmit.
2924
+ */
2925
+ tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL;
2926
+ if (unlikely(!tx_ring))
2927
+ return IGB_XDP_CONSUMED;
2928
+
2929
+ nq = txring_txq(tx_ring);
2930
+ __netif_tx_lock(nq, cpu);
2931
+ /* Avoid transmit queue timeout since we share it with the slow path */
2932
+ nq->trans_start = jiffies;
2933
+ ret = igb_xmit_xdp_ring(adapter, tx_ring, xdpf);
2934
+ __netif_tx_unlock(nq);
2935
+
2936
+ return ret;
2937
+}
2938
+
2939
+static int igb_xdp_xmit(struct net_device *dev, int n,
2940
+ struct xdp_frame **frames, u32 flags)
2941
+{
2942
+ struct igb_adapter *adapter = netdev_priv(dev);
2943
+ int cpu = smp_processor_id();
2944
+ struct igb_ring *tx_ring;
2945
+ struct netdev_queue *nq;
2946
+ int drops = 0;
2947
+ int i;
2948
+
2949
+ if (unlikely(test_bit(__IGB_DOWN, &adapter->state)))
2950
+ return -ENETDOWN;
2951
+
2952
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
2953
+ return -EINVAL;
2954
+
2955
+ /* During program transitions its possible adapter->xdp_prog is assigned
2956
+ * but ring has not been configured yet. In this case simply abort xmit.
2957
+ */
2958
+ tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL;
2959
+ if (unlikely(!tx_ring))
2960
+ return -ENXIO;
2961
+
2962
+ nq = txring_txq(tx_ring);
2963
+ __netif_tx_lock(nq, cpu);
2964
+
2965
+ /* Avoid transmit queue timeout since we share it with the slow path */
2966
+ nq->trans_start = jiffies;
2967
+
2968
+ for (i = 0; i < n; i++) {
2969
+ struct xdp_frame *xdpf = frames[i];
2970
+ int err;
2971
+
2972
+ err = igb_xmit_xdp_ring(adapter, tx_ring, xdpf);
2973
+ if (err != IGB_XDP_TX) {
2974
+ xdp_return_frame_rx_napi(xdpf);
2975
+ drops++;
2976
+ }
2977
+ }
2978
+
2979
+ __netif_tx_unlock(nq);
2980
+
2981
+ if (unlikely(flags & XDP_XMIT_FLUSH))
2982
+ igb_xdp_ring_update_tail(tx_ring);
2983
+
2984
+ return n - drops;
28682985 }
28692986
28702987 static const struct net_device_ops igb_netdev_ops = {
....@@ -2891,6 +3008,8 @@
28913008 .ndo_fdb_add = igb_ndo_fdb_add,
28923009 .ndo_features_check = igb_features_check,
28933010 .ndo_setup_tc = igb_setup_tc,
3011
+ .ndo_bpf = igb_xdp,
3012
+ .ndo_xdp_xmit = igb_xdp_xmit,
28943013 };
28953014
28963015 /**
....@@ -2915,7 +3034,7 @@
29153034 fw.invm_img_type);
29163035 break;
29173036 }
2918
- /* fall through */
3037
+ fallthrough;
29193038 default:
29203039 /* if option is rom valid, display its version too */
29213040 if (fw.or_valid) {
....@@ -3157,7 +3276,7 @@
31573276 NETIF_F_HW_CSUM;
31583277
31593278 if (hw->mac.type >= e1000_82576)
3160
- netdev->features |= NETIF_F_SCTP_CRC;
3279
+ netdev->features |= NETIF_F_SCTP_CRC | NETIF_F_GSO_UDP_L4;
31613280
31623281 if (hw->mac.type >= e1000_i350)
31633282 netdev->features |= NETIF_F_HW_TC;
....@@ -3431,7 +3550,9 @@
34313550 "Width x1" : "unknown"), netdev->dev_addr);
34323551 }
34333552
3434
- if ((hw->mac.type >= e1000_i210 ||
3553
+ if ((hw->mac.type == e1000_82576 &&
3554
+ rd32(E1000_EECD) & E1000_EECD_PRES) ||
3555
+ (hw->mac.type >= e1000_i210 ||
34353556 igb_get_flash_presence_i210(hw))) {
34363557 ret_val = igb_read_part_string(hw, part_str,
34373558 E1000_PBANUM_LENGTH);
....@@ -3478,7 +3599,7 @@
34783599 }
34793600 }
34803601
3481
- dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP);
3602
+ dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
34823603
34833604 pm_runtime_put_noidle(&pdev->dev);
34843605 return 0;
....@@ -3517,6 +3638,7 @@
35173638 struct net_device *netdev = pci_get_drvdata(pdev);
35183639 struct igb_adapter *adapter = netdev_priv(netdev);
35193640 struct e1000_hw *hw = &adapter->hw;
3641
+ unsigned long flags;
35203642
35213643 /* reclaim resources allocated to VFs */
35223644 if (adapter->vf_data) {
....@@ -3529,12 +3651,13 @@
35293651 pci_disable_sriov(pdev);
35303652 msleep(500);
35313653 }
3532
-
3654
+ spin_lock_irqsave(&adapter->vfs_lock, flags);
35333655 kfree(adapter->vf_mac_list);
35343656 adapter->vf_mac_list = NULL;
35353657 kfree(adapter->vf_data);
35363658 adapter->vf_data = NULL;
35373659 adapter->vfs_allocated_count = 0;
3660
+ spin_unlock_irqrestore(&adapter->vfs_lock, flags);
35383661 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
35393662 wrfl();
35403663 msleep(100);
....@@ -3694,7 +3817,9 @@
36943817 igb_release_hw_control(adapter);
36953818
36963819 #ifdef CONFIG_PCI_IOV
3820
+ rtnl_lock();
36973821 igb_disable_sriov(pdev);
3822
+ rtnl_unlock();
36983823 #endif
36993824
37003825 unregister_netdev(netdev);
....@@ -3767,13 +3892,13 @@
37673892 max_rss_queues = 1;
37683893 break;
37693894 }
3770
- /* fall through */
3895
+ fallthrough;
37713896 case e1000_82576:
37723897 if (!!adapter->vfs_allocated_count) {
37733898 max_rss_queues = 2;
37743899 break;
37753900 }
3776
- /* fall through */
3901
+ fallthrough;
37773902 case e1000_82580:
37783903 case e1000_i354:
37793904 default:
....@@ -3849,12 +3974,14 @@
38493974 /* set default work limits */
38503975 adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
38513976
3852
- adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
3853
- VLAN_HLEN;
3977
+ adapter->max_frame_size = netdev->mtu + IGB_ETH_PKT_HDR_PAD;
38543978 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
38553979
38563980 spin_lock_init(&adapter->nfc_lock);
38573981 spin_lock_init(&adapter->stats64_lock);
3982
+
3983
+ /* init spinlock to avoid concurrency of VF resources */
3984
+ spin_lock_init(&adapter->vfs_lock);
38583985 #ifdef CONFIG_PCI_IOV
38593986 switch (hw->mac.type) {
38603987 case e1000_82576:
....@@ -3912,6 +4039,7 @@
39124039 /**
39134040 * igb_open - Called when a network interface is made active
39144041 * @netdev: network interface device structure
4042
+ * @resuming: indicates whether we are in a resume call
39154043 *
39164044 * Returns 0 on success, negative value on failure
39174045 *
....@@ -4029,6 +4157,7 @@
40294157 /**
40304158 * igb_close - Disables a network interface
40314159 * @netdev: network interface device structure
4160
+ * @suspending: indicates we are in a suspend call
40324161 *
40334162 * Returns 0, this is not allowed to fail
40344163 *
....@@ -4222,6 +4351,7 @@
42224351 **/
42234352 int igb_setup_rx_resources(struct igb_ring *rx_ring)
42244353 {
4354
+ struct igb_adapter *adapter = netdev_priv(rx_ring->netdev);
42254355 struct device *dev = rx_ring->dev;
42264356 int size;
42274357
....@@ -4243,6 +4373,13 @@
42434373 rx_ring->next_to_alloc = 0;
42444374 rx_ring->next_to_clean = 0;
42454375 rx_ring->next_to_use = 0;
4376
+
4377
+ rx_ring->xdp_prog = adapter->xdp_prog;
4378
+
4379
+ /* XDP RX-queue info */
4380
+ if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
4381
+ rx_ring->queue_index) < 0)
4382
+ goto err;
42464383
42474384 return 0;
42484385
....@@ -4362,8 +4499,7 @@
43624499 else
43634500 mrqc |= E1000_MRQC_ENABLE_VMDQ;
43644501 } else {
4365
- if (hw->mac.type != e1000_i211)
4366
- mrqc |= E1000_MRQC_ENABLE_RSS_MQ;
4502
+ mrqc |= E1000_MRQC_ENABLE_RSS_MQ;
43674503 }
43684504 igb_vmm_control(adapter);
43694505
....@@ -4502,6 +4638,37 @@
45024638 }
45034639
45044640 /**
4641
+ * igb_setup_srrctl - configure the split and replication receive control
4642
+ * registers
4643
+ * @adapter: Board private structure
4644
+ * @ring: receive ring to be configured
4645
+ **/
4646
+void igb_setup_srrctl(struct igb_adapter *adapter, struct igb_ring *ring)
4647
+{
4648
+ struct e1000_hw *hw = &adapter->hw;
4649
+ int reg_idx = ring->reg_idx;
4650
+ u32 srrctl = 0;
4651
+
4652
+ srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
4653
+ if (ring_uses_large_buffer(ring))
4654
+ srrctl |= IGB_RXBUFFER_3072 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
4655
+ else
4656
+ srrctl |= IGB_RXBUFFER_2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
4657
+ srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
4658
+ if (hw->mac.type >= e1000_82580)
4659
+ srrctl |= E1000_SRRCTL_TIMESTAMP;
4660
+ /* Only set Drop Enable if VFs allocated, or we are supporting multiple
4661
+ * queues and rx flow control is disabled
4662
+ */
4663
+ if (adapter->vfs_allocated_count ||
4664
+ (!(hw->fc.current_mode & e1000_fc_rx_pause) &&
4665
+ adapter->num_rx_queues > 1))
4666
+ srrctl |= E1000_SRRCTL_DROP_EN;
4667
+
4668
+ wr32(E1000_SRRCTL(reg_idx), srrctl);
4669
+}
4670
+
4671
+/**
45054672 * igb_configure_rx_ring - Configure a receive ring after Reset
45064673 * @adapter: board private structure
45074674 * @ring: receive ring to be configured
....@@ -4515,7 +4682,11 @@
45154682 union e1000_adv_rx_desc *rx_desc;
45164683 u64 rdba = ring->dma;
45174684 int reg_idx = ring->reg_idx;
4518
- u32 srrctl = 0, rxdctl = 0;
4685
+ u32 rxdctl = 0;
4686
+
4687
+ xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
4688
+ WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
4689
+ MEM_TYPE_PAGE_SHARED, NULL));
45194690
45204691 /* disable the queue */
45214692 wr32(E1000_RXDCTL(reg_idx), 0);
....@@ -4533,19 +4704,7 @@
45334704 writel(0, ring->tail);
45344705
45354706 /* set descriptor configuration */
4536
- srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
4537
- if (ring_uses_large_buffer(ring))
4538
- srrctl |= IGB_RXBUFFER_3072 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
4539
- else
4540
- srrctl |= IGB_RXBUFFER_2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
4541
- srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
4542
- if (hw->mac.type >= e1000_82580)
4543
- srrctl |= E1000_SRRCTL_TIMESTAMP;
4544
- /* Only set Drop Enable if we are supporting multiple queues */
4545
- if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
4546
- srrctl |= E1000_SRRCTL_DROP_EN;
4547
-
4548
- wr32(E1000_SRRCTL(reg_idx), srrctl);
4707
+ igb_setup_srrctl(adapter, ring);
45494708
45504709 /* set filtering for VMDQ pools */
45514710 igb_set_vmolr(adapter, reg_idx & 0x7, true);
....@@ -4661,8 +4820,11 @@
46614820 while (i != tx_ring->next_to_use) {
46624821 union e1000_adv_tx_desc *eop_desc, *tx_desc;
46634822
4664
- /* Free all the Tx ring sk_buffs */
4665
- dev_kfree_skb_any(tx_buffer->skb);
4823
+ /* Free all the Tx ring sk_buffs or xdp frames */
4824
+ if (tx_buffer->type == IGB_TYPE_SKB)
4825
+ dev_kfree_skb_any(tx_buffer->skb);
4826
+ else
4827
+ xdp_return_frame(tx_buffer->xdpf);
46664828
46674829 /* unmap skb header data */
46684830 dma_unmap_single(tx_ring->dev,
....@@ -4735,6 +4897,8 @@
47354897 {
47364898 igb_clean_rx_ring(rx_ring);
47374899
4900
+ rx_ring->xdp_prog = NULL;
4901
+ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
47384902 vfree(rx_ring->rx_buffer_info);
47394903 rx_ring->rx_buffer_info = NULL;
47404904
....@@ -4771,8 +4935,7 @@
47714935 {
47724936 u16 i = rx_ring->next_to_clean;
47734937
4774
- if (rx_ring->skb)
4775
- dev_kfree_skb(rx_ring->skb);
4938
+ dev_kfree_skb(rx_ring->skb);
47764939 rx_ring->skb = NULL;
47774940
47784941 /* Free all the Rx ring sk_buffs */
....@@ -4896,14 +5059,14 @@
48965059 /* VLAN filtering needed for VLAN prio filter */
48975060 if (adapter->netdev->features & NETIF_F_NTUPLE)
48985061 break;
4899
- /* fall through */
5062
+ fallthrough;
49005063 case e1000_82576:
49015064 case e1000_82580:
49025065 case e1000_i354:
49035066 /* VLAN filtering needed for pool filtering */
49045067 if (adapter->vfs_allocated_count)
49055068 break;
4906
- /* fall through */
5069
+ fallthrough;
49075070 default:
49085071 return 1;
49095072 }
....@@ -5183,7 +5346,7 @@
51835346 case e1000_media_type_copper:
51845347 if (!hw->mac.get_link_status)
51855348 return true;
5186
- /* fall through */
5349
+ fallthrough;
51875350 case e1000_media_type_internal_serdes:
51885351 hw->mac.ops.check_for_link(hw);
51895352 link_active = !hw->mac.get_link_status;
....@@ -5247,7 +5410,7 @@
52475410
52485411 /**
52495412 * igb_watchdog - Timer Call-back
5250
- * @data: pointer to adapter cast into an unsigned long
5413
+ * @t: pointer to timer_list containing our private info pointer
52515414 **/
52525415 static void igb_watchdog(struct timer_list *t)
52535416 {
....@@ -5346,7 +5509,8 @@
53465509 break;
53475510 }
53485511
5349
- if (adapter->link_speed != SPEED_1000)
5512
+ if (adapter->link_speed != SPEED_1000 ||
5513
+ !hw->phy.ops.read_reg)
53505514 goto no_wait;
53515515
53525516 /* wait for Remote receiver status OK */
....@@ -5714,8 +5878,8 @@
57145878 * should have been handled by the upper layers.
57155879 */
57165880 if (tx_ring->launchtime_enable) {
5717
- ts = ns_to_timespec64(first->skb->tstamp);
5718
- first->skb->tstamp = 0;
5881
+ ts = ktime_to_timespec64(first->skb->tstamp);
5882
+ first->skb->tstamp = ktime_set(0, 0);
57195883 context_desc->seqnum_seed = cpu_to_le32(ts.tv_nsec / 32);
57205884 } else {
57215885 context_desc->seqnum_seed = 0;
....@@ -5735,6 +5899,7 @@
57355899 } ip;
57365900 union {
57375901 struct tcphdr *tcp;
5902
+ struct udphdr *udp;
57385903 unsigned char *hdr;
57395904 } l4;
57405905 u32 paylen, l4_offset;
....@@ -5754,7 +5919,8 @@
57545919 l4.hdr = skb_checksum_start(skb);
57555920
57565921 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
5757
- type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
5922
+ type_tucmd = (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ?
5923
+ E1000_ADVTXD_TUCMD_L4T_UDP : E1000_ADVTXD_TUCMD_L4T_TCP;
57585924
57595925 /* initialize outer IP header fields */
57605926 if (ip.v4->version == 4) {
....@@ -5782,12 +5948,19 @@
57825948 /* determine offset of inner transport header */
57835949 l4_offset = l4.hdr - skb->data;
57845950
5785
- /* compute length of segmentation header */
5786
- *hdr_len = (l4.tcp->doff * 4) + l4_offset;
5787
-
57885951 /* remove payload length from inner checksum */
57895952 paylen = skb->len - l4_offset;
5790
- csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
5953
+ if (type_tucmd & E1000_ADVTXD_TUCMD_L4T_TCP) {
5954
+ /* compute length of segmentation header */
5955
+ *hdr_len = (l4.tcp->doff * 4) + l4_offset;
5956
+ csum_replace_by_diff(&l4.tcp->check,
5957
+ (__force __wsum)htonl(paylen));
5958
+ } else {
5959
+ /* compute length of segmentation header */
5960
+ *hdr_len = sizeof(*l4.udp) + l4_offset;
5961
+ csum_replace_by_diff(&l4.udp->check,
5962
+ (__force __wsum)htonl(paylen));
5963
+ }
57915964
57925965 /* update gso size and bytecount with header size */
57935966 first->gso_segs = skb_shinfo(skb)->gso_segs;
....@@ -5834,7 +6007,7 @@
58346007 switch (skb->csum_offset) {
58356008 case offsetof(struct tcphdr, check):
58366009 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
5837
- /* fall through */
6010
+ fallthrough;
58386011 case offsetof(struct udphdr, check):
58396012 break;
58406013 case offsetof(struct sctphdr, checksum):
....@@ -5846,7 +6019,7 @@
58466019 type_tucmd = E1000_ADVTXD_TUCMD_L4T_SCTP;
58476020 break;
58486021 }
5849
- /* fall through */
6022
+ fallthrough;
58506023 default:
58516024 skb_checksum_help(skb);
58526025 goto csum_failed;
....@@ -5958,7 +6131,7 @@
59586131 struct sk_buff *skb = first->skb;
59596132 struct igb_tx_buffer *tx_buffer;
59606133 union e1000_adv_tx_desc *tx_desc;
5961
- struct skb_frag_struct *frag;
6134
+ skb_frag_t *frag;
59626135 dma_addr_t dma;
59636136 unsigned int data_len, size;
59646137 u32 tx_flags = first->tx_flags;
....@@ -6035,6 +6208,8 @@
60356208 /* set the timestamp */
60366209 first->time_stamp = jiffies;
60376210
6211
+ skb_tx_timestamp(skb);
6212
+
60386213 /* Force memory writes to complete before letting h/w know there
60396214 * are new descriptors to fetch. (Only applicable for weak-ordered
60406215 * memory model archs, such as IA-64).
....@@ -6056,13 +6231,8 @@
60566231 /* Make sure there is space in the ring for the next send. */
60576232 igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
60586233
6059
- if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
6234
+ if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
60606235 writel(i, tx_ring->tail);
6061
-
6062
- /* we need this if more than one processor can write to our tail
6063
- * at a time, it synchronizes IO on IA64/Altix systems
6064
- */
6065
- mmiowb();
60666236 }
60676237 return 0;
60686238
....@@ -6099,6 +6269,80 @@
60996269 return -1;
61006270 }
61016271
6272
+int igb_xmit_xdp_ring(struct igb_adapter *adapter,
6273
+ struct igb_ring *tx_ring,
6274
+ struct xdp_frame *xdpf)
6275
+{
6276
+ union e1000_adv_tx_desc *tx_desc;
6277
+ u32 len, cmd_type, olinfo_status;
6278
+ struct igb_tx_buffer *tx_buffer;
6279
+ dma_addr_t dma;
6280
+ u16 i;
6281
+
6282
+ len = xdpf->len;
6283
+
6284
+ if (unlikely(!igb_desc_unused(tx_ring)))
6285
+ return IGB_XDP_CONSUMED;
6286
+
6287
+ dma = dma_map_single(tx_ring->dev, xdpf->data, len, DMA_TO_DEVICE);
6288
+ if (dma_mapping_error(tx_ring->dev, dma))
6289
+ return IGB_XDP_CONSUMED;
6290
+
6291
+ /* record the location of the first descriptor for this packet */
6292
+ tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
6293
+ tx_buffer->bytecount = len;
6294
+ tx_buffer->gso_segs = 1;
6295
+ tx_buffer->protocol = 0;
6296
+
6297
+ i = tx_ring->next_to_use;
6298
+ tx_desc = IGB_TX_DESC(tx_ring, i);
6299
+
6300
+ dma_unmap_len_set(tx_buffer, len, len);
6301
+ dma_unmap_addr_set(tx_buffer, dma, dma);
6302
+ tx_buffer->type = IGB_TYPE_XDP;
6303
+ tx_buffer->xdpf = xdpf;
6304
+
6305
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
6306
+
6307
+ /* put descriptor type bits */
6308
+ cmd_type = E1000_ADVTXD_DTYP_DATA |
6309
+ E1000_ADVTXD_DCMD_DEXT |
6310
+ E1000_ADVTXD_DCMD_IFCS;
6311
+ cmd_type |= len | IGB_TXD_DCMD;
6312
+ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
6313
+
6314
+ olinfo_status = len << E1000_ADVTXD_PAYLEN_SHIFT;
6315
+ /* 82575 requires a unique index per ring */
6316
+ if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
6317
+ olinfo_status |= tx_ring->reg_idx << 4;
6318
+
6319
+ tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
6320
+
6321
+ netdev_tx_sent_queue(txring_txq(tx_ring), tx_buffer->bytecount);
6322
+
6323
+ /* set the timestamp */
6324
+ tx_buffer->time_stamp = jiffies;
6325
+
6326
+ /* Avoid any potential race with xdp_xmit and cleanup */
6327
+ smp_wmb();
6328
+
6329
+ /* set next_to_watch value indicating a packet is present */
6330
+ i++;
6331
+ if (i == tx_ring->count)
6332
+ i = 0;
6333
+
6334
+ tx_buffer->next_to_watch = tx_desc;
6335
+ tx_ring->next_to_use = i;
6336
+
6337
+ /* Make sure there is space in the ring for the next send. */
6338
+ igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
6339
+
6340
+ if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more())
6341
+ writel(i, tx_ring->tail);
6342
+
6343
+ return IGB_XDP_TX;
6344
+}
6345
+
61026346 netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
61036347 struct igb_ring *tx_ring)
61046348 {
....@@ -6117,7 +6361,8 @@
61176361 * otherwise try next time
61186362 */
61196363 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
6120
- count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
6364
+ count += TXD_USE_COUNT(skb_frag_size(
6365
+ &skb_shinfo(skb)->frags[f]));
61216366
61226367 if (igb_maybe_stop_tx(tx_ring, count + 3)) {
61236368 /* this is a hard error */
....@@ -6126,6 +6371,7 @@
61266371
61276372 /* record the location of the first descriptor for this packet */
61286373 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
6374
+ first->type = IGB_TYPE_SKB;
61296375 first->skb = skb;
61306376 first->bytecount = skb->len;
61316377 first->gso_segs = 1;
....@@ -6162,8 +6408,6 @@
61626408 goto out_drop;
61636409 else if (!tso)
61646410 igb_tx_csum(tx_ring, first);
6165
-
6166
- skb_tx_timestamp(skb);
61676411
61686412 if (igb_tx_map(tx_ring, first, hdr_len))
61696413 goto cleanup_tx_tstamp;
....@@ -6215,8 +6459,9 @@
62156459 /**
62166460 * igb_tx_timeout - Respond to a Tx Hang
62176461 * @netdev: network interface device structure
6462
+ * @txqueue: number of the Tx queue that hung (unused)
62186463 **/
6219
-static void igb_tx_timeout(struct net_device *netdev)
6464
+static void igb_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
62206465 {
62216466 struct igb_adapter *adapter = netdev_priv(netdev);
62226467 struct e1000_hw *hw = &adapter->hw;
....@@ -6277,8 +6522,22 @@
62776522 static int igb_change_mtu(struct net_device *netdev, int new_mtu)
62786523 {
62796524 struct igb_adapter *adapter = netdev_priv(netdev);
6280
- struct pci_dev *pdev = adapter->pdev;
6281
- int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
6525
+ int max_frame = new_mtu + IGB_ETH_PKT_HDR_PAD;
6526
+
6527
+ if (adapter->xdp_prog) {
6528
+ int i;
6529
+
6530
+ for (i = 0; i < adapter->num_rx_queues; i++) {
6531
+ struct igb_ring *ring = adapter->rx_ring[i];
6532
+
6533
+ if (max_frame > igb_rx_bufsz(ring)) {
6534
+ netdev_warn(adapter->netdev,
6535
+ "Requested MTU size is not supported with XDP. Max frame size is %d\n",
6536
+ max_frame);
6537
+ return -EINVAL;
6538
+ }
6539
+ }
6540
+ }
62826541
62836542 /* adjust max frame to be at least the size of a standard frame */
62846543 if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
....@@ -6293,8 +6552,8 @@
62936552 if (netif_running(netdev))
62946553 igb_down(adapter);
62956554
6296
- dev_info(&pdev->dev, "changing MTU from %d to %d\n",
6297
- netdev->mtu, new_mtu);
6555
+ netdev_dbg(netdev, "changing MTU from %d to %d\n",
6556
+ netdev->mtu, new_mtu);
62986557 netdev->mtu = new_mtu;
62996558
63006559 if (netif_running(netdev))
....@@ -6738,7 +6997,7 @@
67386997 igb_setup_dca(adapter);
67396998 break;
67406999 }
6741
- /* Fall Through since DCA is disabled. */
7000
+ fallthrough; /* since DCA is disabled. */
67427001 case DCA_PROVIDER_REMOVE:
67437002 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
67447003 /* without this a class_device is left
....@@ -7157,7 +7416,7 @@
71577416 {
71587417 struct e1000_hw *hw = &adapter->hw;
71597418 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
7160
- u32 reg, msgbuf[3];
7419
+ u32 reg, msgbuf[3] = {};
71617420 u8 *addr = (u8 *)(&msgbuf[1]);
71627421
71637422 /* process all the same items cleared in a function level reset */
....@@ -7191,7 +7450,7 @@
71917450
71927451 for (i = 0; i < hw->mac.rar_entry_count; i++) {
71937452 adapter->mac_table[i].state &= ~IGB_MAC_STATE_IN_USE;
7194
- memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
7453
+ eth_zero_addr(adapter->mac_table[i].addr);
71957454 adapter->mac_table[i].queue = 0;
71967455 igb_rar_set_index(adapter, i);
71977456 }
....@@ -7340,7 +7599,7 @@
73407599 } else {
73417600 adapter->mac_table[i].state = 0;
73427601 adapter->mac_table[i].queue = 0;
7343
- memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
7602
+ eth_zero_addr(adapter->mac_table[i].addr);
73447603 }
73457604
73467605 igb_rar_set_index(adapter, i);
....@@ -7600,8 +7859,10 @@
76007859 static void igb_msg_task(struct igb_adapter *adapter)
76017860 {
76027861 struct e1000_hw *hw = &adapter->hw;
7862
+ unsigned long flags;
76037863 u32 vf;
76047864
7865
+ spin_lock_irqsave(&adapter->vfs_lock, flags);
76057866 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
76067867 /* process any reset requests */
76077868 if (!igb_check_for_rst(hw, vf))
....@@ -7615,6 +7876,7 @@
76157876 if (!igb_check_for_ack(hw, vf))
76167877 igb_rcv_ack_from_vf(adapter, vf);
76177878 }
7879
+ spin_unlock_irqrestore(&adapter->vfs_lock, flags);
76187880 }
76197881
76207882 /**
....@@ -7778,11 +8040,13 @@
77788040 if (!clean_complete)
77798041 return budget;
77808042
7781
- /* If not enough Rx work done, exit the polling mode */
7782
- napi_complete_done(napi, work_done);
7783
- igb_ring_irq_enable(q_vector);
8043
+ /* Exit the polling mode, but don't re-enable interrupts if stack might
8044
+ * poll us due to busy-polling
8045
+ */
8046
+ if (likely(napi_complete_done(napi, work_done)))
8047
+ igb_ring_irq_enable(q_vector);
77848048
7785
- return 0;
8049
+ return work_done;
77868050 }
77878051
77888052 /**
....@@ -7831,7 +8095,10 @@
78318095 total_packets += tx_buffer->gso_segs;
78328096
78338097 /* free the skb */
7834
- napi_consume_skb(tx_buffer->skb, napi_budget);
8098
+ if (tx_buffer->type == IGB_TYPE_SKB)
8099
+ napi_consume_skb(tx_buffer->skb, napi_budget);
8100
+ else
8101
+ xdp_return_frame(tx_buffer->xdpf);
78358102
78368103 /* unmap skb header data */
78378104 dma_unmap_single(tx_ring->dev,
....@@ -7990,7 +8257,8 @@
79908257 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
79918258 }
79928259
7993
-static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer)
8260
+static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
8261
+ int rx_buf_pgcnt)
79948262 {
79958263 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
79968264 struct page *page = rx_buffer->page;
....@@ -8001,7 +8269,7 @@
80018269
80028270 #if (PAGE_SIZE < 8192)
80038271 /* if we are only owner of page we can reuse it */
8004
- if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
8272
+ if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1))
80058273 return false;
80068274 #else
80078275 #define IGB_LAST_OFFSET \
....@@ -8015,8 +8283,8 @@
80158283 * the pagecnt_bias and page count so that we fully restock the
80168284 * number of references the driver holds.
80178285 */
8018
- if (unlikely(!pagecnt_bias)) {
8019
- page_ref_add(page, USHRT_MAX);
8286
+ if (unlikely(pagecnt_bias == 1)) {
8287
+ page_ref_add(page, USHRT_MAX - 1);
80208288 rx_buffer->pagecnt_bias = USHRT_MAX;
80218289 }
80228290
....@@ -8055,23 +8323,21 @@
80558323
80568324 static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
80578325 struct igb_rx_buffer *rx_buffer,
8058
- union e1000_adv_rx_desc *rx_desc,
8059
- unsigned int size)
8326
+ struct xdp_buff *xdp,
8327
+ union e1000_adv_rx_desc *rx_desc)
80608328 {
8061
- void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
80628329 #if (PAGE_SIZE < 8192)
80638330 unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
80648331 #else
8065
- unsigned int truesize = SKB_DATA_ALIGN(size);
8332
+ unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end -
8333
+ xdp->data_hard_start);
80668334 #endif
8335
+ unsigned int size = xdp->data_end - xdp->data;
80678336 unsigned int headlen;
80688337 struct sk_buff *skb;
80698338
80708339 /* prefetch first cache line of first page */
8071
- prefetch(va);
8072
-#if L1_CACHE_BYTES < 128
8073
- prefetch(va + L1_CACHE_BYTES);
8074
-#endif
8340
+ net_prefetch(xdp->data);
80758341
80768342 /* allocate a skb to store the frags */
80778343 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN);
....@@ -8079,24 +8345,25 @@
80798345 return NULL;
80808346
80818347 if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) {
8082
- igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
8083
- va += IGB_TS_HDR_LEN;
8084
- size -= IGB_TS_HDR_LEN;
8348
+ if (!igb_ptp_rx_pktstamp(rx_ring->q_vector, xdp->data, skb)) {
8349
+ xdp->data += IGB_TS_HDR_LEN;
8350
+ size -= IGB_TS_HDR_LEN;
8351
+ }
80858352 }
80868353
80878354 /* Determine available headroom for copy */
80888355 headlen = size;
80898356 if (headlen > IGB_RX_HDR_LEN)
8090
- headlen = eth_get_headlen(va, IGB_RX_HDR_LEN);
8357
+ headlen = eth_get_headlen(skb->dev, xdp->data, IGB_RX_HDR_LEN);
80918358
80928359 /* align pull length to size of long to optimize memcpy performance */
8093
- memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
8360
+ memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen, sizeof(long)));
80948361
80958362 /* update all of the pointers */
80968363 size -= headlen;
80978364 if (size) {
80988365 skb_add_rx_frag(skb, 0, rx_buffer->page,
8099
- (va + headlen) - page_address(rx_buffer->page),
8366
+ (xdp->data + headlen) - page_address(rx_buffer->page),
81008367 size, truesize);
81018368 #if (PAGE_SIZE < 8192)
81028369 rx_buffer->page_offset ^= truesize;
....@@ -8112,37 +8379,38 @@
81128379
81138380 static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
81148381 struct igb_rx_buffer *rx_buffer,
8115
- union e1000_adv_rx_desc *rx_desc,
8116
- unsigned int size)
8382
+ struct xdp_buff *xdp,
8383
+ union e1000_adv_rx_desc *rx_desc)
81178384 {
8118
- void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
81198385 #if (PAGE_SIZE < 8192)
81208386 unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
81218387 #else
81228388 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
8123
- SKB_DATA_ALIGN(IGB_SKB_PAD + size);
8389
+ SKB_DATA_ALIGN(xdp->data_end -
8390
+ xdp->data_hard_start);
81248391 #endif
8392
+ unsigned int metasize = xdp->data - xdp->data_meta;
81258393 struct sk_buff *skb;
81268394
81278395 /* prefetch first cache line of first page */
8128
- prefetch(va);
8129
-#if L1_CACHE_BYTES < 128
8130
- prefetch(va + L1_CACHE_BYTES);
8131
-#endif
8396
+ net_prefetch(xdp->data_meta);
81328397
81338398 /* build an skb around the page buffer */
8134
- skb = build_skb(va - IGB_SKB_PAD, truesize);
8399
+ skb = build_skb(xdp->data_hard_start, truesize);
81358400 if (unlikely(!skb))
81368401 return NULL;
81378402
81388403 /* update pointers within the skb to store the data */
8139
- skb_reserve(skb, IGB_SKB_PAD);
8140
- __skb_put(skb, size);
8404
+ skb_reserve(skb, xdp->data - xdp->data_hard_start);
8405
+ __skb_put(skb, xdp->data_end - xdp->data);
8406
+
8407
+ if (metasize)
8408
+ skb_metadata_set(skb, metasize);
81418409
81428410 /* pull timestamp out of packet data */
81438411 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
8144
- igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb);
8145
- __skb_pull(skb, IGB_TS_HDR_LEN);
8412
+ if (!igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb))
8413
+ __skb_pull(skb, IGB_TS_HDR_LEN);
81468414 }
81478415
81488416 /* update buffer offset */
....@@ -8153,6 +8421,81 @@
81538421 #endif
81548422
81558423 return skb;
8424
+}
8425
+
8426
+static struct sk_buff *igb_run_xdp(struct igb_adapter *adapter,
8427
+ struct igb_ring *rx_ring,
8428
+ struct xdp_buff *xdp)
8429
+{
8430
+ int err, result = IGB_XDP_PASS;
8431
+ struct bpf_prog *xdp_prog;
8432
+ u32 act;
8433
+
8434
+ rcu_read_lock();
8435
+ xdp_prog = READ_ONCE(rx_ring->xdp_prog);
8436
+
8437
+ if (!xdp_prog)
8438
+ goto xdp_out;
8439
+
8440
+ prefetchw(xdp->data_hard_start); /* xdp_frame write */
8441
+
8442
+ act = bpf_prog_run_xdp(xdp_prog, xdp);
8443
+ switch (act) {
8444
+ case XDP_PASS:
8445
+ break;
8446
+ case XDP_TX:
8447
+ result = igb_xdp_xmit_back(adapter, xdp);
8448
+ if (result == IGB_XDP_CONSUMED)
8449
+ goto out_failure;
8450
+ break;
8451
+ case XDP_REDIRECT:
8452
+ err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
8453
+ if (err)
8454
+ goto out_failure;
8455
+ result = IGB_XDP_REDIR;
8456
+ break;
8457
+ default:
8458
+ bpf_warn_invalid_xdp_action(act);
8459
+ fallthrough;
8460
+ case XDP_ABORTED:
8461
+out_failure:
8462
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
8463
+ fallthrough;
8464
+ case XDP_DROP:
8465
+ result = IGB_XDP_CONSUMED;
8466
+ break;
8467
+ }
8468
+xdp_out:
8469
+ rcu_read_unlock();
8470
+ return ERR_PTR(-result);
8471
+}
8472
+
8473
+static unsigned int igb_rx_frame_truesize(struct igb_ring *rx_ring,
8474
+ unsigned int size)
8475
+{
8476
+ unsigned int truesize;
8477
+
8478
+#if (PAGE_SIZE < 8192)
8479
+ truesize = igb_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
8480
+#else
8481
+ truesize = ring_uses_build_skb(rx_ring) ?
8482
+ SKB_DATA_ALIGN(IGB_SKB_PAD + size) +
8483
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
8484
+ SKB_DATA_ALIGN(size);
8485
+#endif
8486
+ return truesize;
8487
+}
8488
+
8489
+static void igb_rx_buffer_flip(struct igb_ring *rx_ring,
8490
+ struct igb_rx_buffer *rx_buffer,
8491
+ unsigned int size)
8492
+{
8493
+ unsigned int truesize = igb_rx_frame_truesize(rx_ring, size);
8494
+#if (PAGE_SIZE < 8192)
8495
+ rx_buffer->page_offset ^= truesize;
8496
+#else
8497
+ rx_buffer->page_offset += truesize;
8498
+#endif
81568499 }
81578500
81588501 static inline void igb_rx_checksum(struct igb_ring *ring,
....@@ -8209,7 +8552,6 @@
82098552 * igb_is_non_eop - process handling of non-EOP buffers
82108553 * @rx_ring: Rx ring being processed
82118554 * @rx_desc: Rx descriptor for current buffer
8212
- * @skb: current socket buffer containing buffer in progress
82138555 *
82148556 * This function updates next to clean. If the buffer is an EOP buffer
82158557 * this function exits returning false, otherwise it will place the
....@@ -8251,6 +8593,10 @@
82518593 union e1000_adv_rx_desc *rx_desc,
82528594 struct sk_buff *skb)
82538595 {
8596
+ /* XDP packets use error pointer so abort at this point */
8597
+ if (IS_ERR(skb))
8598
+ return true;
8599
+
82548600 if (unlikely((igb_test_staterr(rx_desc,
82558601 E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) {
82568602 struct net_device *netdev = rx_ring->netdev;
....@@ -8297,7 +8643,7 @@
82978643
82988644 if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
82998645 test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
8300
- vid = be16_to_cpu(rx_desc->wb.upper.vlan);
8646
+ vid = be16_to_cpu((__force __be16)rx_desc->wb.upper.vlan);
83018647 else
83028648 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
83038649
....@@ -8309,12 +8655,23 @@
83098655 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
83108656 }
83118657
8658
+static unsigned int igb_rx_offset(struct igb_ring *rx_ring)
8659
+{
8660
+ return ring_uses_build_skb(rx_ring) ? IGB_SKB_PAD : 0;
8661
+}
8662
+
83128663 static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring,
8313
- const unsigned int size)
8664
+ const unsigned int size, int *rx_buf_pgcnt)
83148665 {
83158666 struct igb_rx_buffer *rx_buffer;
83168667
83178668 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
8669
+ *rx_buf_pgcnt =
8670
+#if (PAGE_SIZE < 8192)
8671
+ page_count(rx_buffer->page);
8672
+#else
8673
+ 0;
8674
+#endif
83188675 prefetchw(rx_buffer->page);
83198676
83208677 /* we are reusing so sync this buffer for CPU use */
....@@ -8330,9 +8687,9 @@
83308687 }
83318688
83328689 static void igb_put_rx_buffer(struct igb_ring *rx_ring,
8333
- struct igb_rx_buffer *rx_buffer)
8690
+ struct igb_rx_buffer *rx_buffer, int rx_buf_pgcnt)
83348691 {
8335
- if (igb_can_reuse_rx_page(rx_buffer)) {
8692
+ if (igb_can_reuse_rx_page(rx_buffer, rx_buf_pgcnt)) {
83368693 /* hand second half of page back to the ring */
83378694 igb_reuse_rx_page(rx_ring, rx_buffer);
83388695 } else {
....@@ -8352,10 +8709,21 @@
83528709
83538710 static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
83548711 {
8712
+ struct igb_adapter *adapter = q_vector->adapter;
83558713 struct igb_ring *rx_ring = q_vector->rx.ring;
83568714 struct sk_buff *skb = rx_ring->skb;
83578715 unsigned int total_bytes = 0, total_packets = 0;
83588716 u16 cleaned_count = igb_desc_unused(rx_ring);
8717
+ unsigned int xdp_xmit = 0;
8718
+ struct xdp_buff xdp;
8719
+ int rx_buf_pgcnt;
8720
+
8721
+ xdp.rxq = &rx_ring->xdp_rxq;
8722
+
8723
+ /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
8724
+#if (PAGE_SIZE < 8192)
8725
+ xdp.frame_sz = igb_rx_frame_truesize(rx_ring, 0);
8726
+#endif
83598727
83608728 while (likely(total_packets < budget)) {
83618729 union e1000_adv_rx_desc *rx_desc;
....@@ -8379,16 +8747,41 @@
83798747 */
83808748 dma_rmb();
83818749
8382
- rx_buffer = igb_get_rx_buffer(rx_ring, size);
8750
+ rx_buffer = igb_get_rx_buffer(rx_ring, size, &rx_buf_pgcnt);
83838751
83848752 /* retrieve a buffer from the ring */
8385
- if (skb)
8753
+ if (!skb) {
8754
+ xdp.data = page_address(rx_buffer->page) +
8755
+ rx_buffer->page_offset;
8756
+ xdp.data_meta = xdp.data;
8757
+ xdp.data_hard_start = xdp.data -
8758
+ igb_rx_offset(rx_ring);
8759
+ xdp.data_end = xdp.data + size;
8760
+#if (PAGE_SIZE > 4096)
8761
+ /* At larger PAGE_SIZE, frame_sz depend on len size */
8762
+ xdp.frame_sz = igb_rx_frame_truesize(rx_ring, size);
8763
+#endif
8764
+ skb = igb_run_xdp(adapter, rx_ring, &xdp);
8765
+ }
8766
+
8767
+ if (IS_ERR(skb)) {
8768
+ unsigned int xdp_res = -PTR_ERR(skb);
8769
+
8770
+ if (xdp_res & (IGB_XDP_TX | IGB_XDP_REDIR)) {
8771
+ xdp_xmit |= xdp_res;
8772
+ igb_rx_buffer_flip(rx_ring, rx_buffer, size);
8773
+ } else {
8774
+ rx_buffer->pagecnt_bias++;
8775
+ }
8776
+ total_packets++;
8777
+ total_bytes += size;
8778
+ } else if (skb)
83868779 igb_add_rx_frag(rx_ring, rx_buffer, skb, size);
83878780 else if (ring_uses_build_skb(rx_ring))
8388
- skb = igb_build_skb(rx_ring, rx_buffer, rx_desc, size);
8781
+ skb = igb_build_skb(rx_ring, rx_buffer, &xdp, rx_desc);
83898782 else
83908783 skb = igb_construct_skb(rx_ring, rx_buffer,
8391
- rx_desc, size);
8784
+ &xdp, rx_desc);
83928785
83938786 /* exit if we failed to retrieve a buffer */
83948787 if (!skb) {
....@@ -8397,7 +8790,7 @@
83978790 break;
83988791 }
83998792
8400
- igb_put_rx_buffer(rx_ring, rx_buffer);
8793
+ igb_put_rx_buffer(rx_ring, rx_buffer, rx_buf_pgcnt);
84018794 cleaned_count++;
84028795
84038796 /* fetch next buffer in frame if non-eop */
....@@ -8428,6 +8821,15 @@
84288821 /* place incomplete frames back on ring for completion */
84298822 rx_ring->skb = skb;
84308823
8824
+ if (xdp_xmit & IGB_XDP_REDIR)
8825
+ xdp_do_flush();
8826
+
8827
+ if (xdp_xmit & IGB_XDP_TX) {
8828
+ struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter);
8829
+
8830
+ igb_xdp_ring_update_tail(tx_ring);
8831
+ }
8832
+
84318833 u64_stats_update_begin(&rx_ring->rx_syncp);
84328834 rx_ring->rx_stats.packets += total_packets;
84338835 rx_ring->rx_stats.bytes += total_bytes;
....@@ -8439,11 +8841,6 @@
84398841 igb_alloc_rx_buffers(rx_ring, cleaned_count);
84408842
84418843 return total_packets;
8442
-}
8443
-
8444
-static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring)
8445
-{
8446
- return ring_uses_build_skb(rx_ring) ? IGB_SKB_PAD : 0;
84478844 }
84488845
84498846 static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
....@@ -8482,14 +8879,16 @@
84828879 bi->dma = dma;
84838880 bi->page = page;
84848881 bi->page_offset = igb_rx_offset(rx_ring);
8485
- bi->pagecnt_bias = 1;
8882
+ page_ref_add(page, USHRT_MAX - 1);
8883
+ bi->pagecnt_bias = USHRT_MAX;
84868884
84878885 return true;
84888886 }
84898887
84908888 /**
8491
- * igb_alloc_rx_buffers - Replace used receive buffers; packet split
8492
- * @adapter: address of board private structure
8889
+ * igb_alloc_rx_buffers - Replace used receive buffers
8890
+ * @rx_ring: rx descriptor ring to allocate new receive buffers
8891
+ * @cleaned_count: count of buffers to allocate
84938892 **/
84948893 void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
84958894 {
....@@ -8558,9 +8957,9 @@
85588957
85598958 /**
85608959 * igb_mii_ioctl -
8561
- * @netdev:
8562
- * @ifreq:
8563
- * @cmd:
8960
+ * @netdev: pointer to netdev struct
8961
+ * @ifr: interface structure
8962
+ * @cmd: ioctl command to execute
85648963 **/
85658964 static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
85668965 {
....@@ -8588,9 +8987,9 @@
85888987
85898988 /**
85908989 * igb_ioctl -
8591
- * @netdev:
8592
- * @ifreq:
8593
- * @cmd:
8990
+ * @netdev: pointer to netdev struct
8991
+ * @ifr: interface structure
8992
+ * @cmd: ioctl command to execute
85948993 **/
85958994 static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
85968995 {
....@@ -8875,7 +9274,7 @@
88759274 return __igb_shutdown(to_pci_dev(dev), NULL, 0);
88769275 }
88779276
8878
-static int __maybe_unused igb_resume(struct device *dev)
9277
+static int __maybe_unused __igb_resume(struct device *dev, bool rpm)
88799278 {
88809279 struct pci_dev *pdev = to_pci_dev(dev);
88819280 struct net_device *netdev = pci_get_drvdata(pdev);
....@@ -8918,21 +9317,27 @@
89189317
89199318 wr32(E1000_WUS, ~0);
89209319
8921
- rtnl_lock();
9320
+ if (!rpm)
9321
+ rtnl_lock();
89229322 if (!err && netif_running(netdev))
89239323 err = __igb_open(netdev, true);
89249324
89259325 if (!err)
89269326 netif_device_attach(netdev);
8927
- rtnl_unlock();
9327
+ if (!rpm)
9328
+ rtnl_unlock();
89289329
89299330 return err;
89309331 }
89319332
9333
+static int __maybe_unused igb_resume(struct device *dev)
9334
+{
9335
+ return __igb_resume(dev, false);
9336
+}
9337
+
89329338 static int __maybe_unused igb_runtime_idle(struct device *dev)
89339339 {
8934
- struct pci_dev *pdev = to_pci_dev(dev);
8935
- struct net_device *netdev = pci_get_drvdata(pdev);
9340
+ struct net_device *netdev = dev_get_drvdata(dev);
89369341 struct igb_adapter *adapter = netdev_priv(netdev);
89379342
89389343 if (!igb_has_link(adapter))
....@@ -8948,7 +9353,7 @@
89489353
89499354 static int __maybe_unused igb_runtime_resume(struct device *dev)
89509355 {
8951
- return igb_resume(dev);
9356
+ return __igb_resume(dev, true);
89529357 }
89539358
89549359 static void igb_shutdown(struct pci_dev *pdev)
....@@ -9064,7 +9469,7 @@
90649469 * @pdev: Pointer to PCI device
90659470 *
90669471 * Restart the card from scratch, as if from a cold-boot. Implementation
9067
- * resembles the first-half of the igb_resume routine.
9472
+ * resembles the first-half of the __igb_resume routine.
90689473 **/
90699474 static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
90709475 {
....@@ -9072,7 +9477,6 @@
90729477 struct igb_adapter *adapter = netdev_priv(netdev);
90739478 struct e1000_hw *hw = &adapter->hw;
90749479 pci_ers_result_t result;
9075
- int err;
90769480
90779481 if (pci_enable_device_mem(pdev)) {
90789482 dev_err(&pdev->dev,
....@@ -9096,14 +9500,6 @@
90969500 result = PCI_ERS_RESULT_RECOVERED;
90979501 }
90989502
9099
- err = pci_cleanup_aer_uncorrect_error_status(pdev);
9100
- if (err) {
9101
- dev_err(&pdev->dev,
9102
- "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
9103
- err);
9104
- /* non-fatal, continue */
9105
- }
9106
-
91079503 return result;
91089504 }
91099505
....@@ -9113,7 +9509,7 @@
91139509 *
91149510 * This callback is called when the error recovery driver tells us that
91159511 * its OK to resume normal operation. Implementation resembles the
9116
- * second-half of the igb_resume routine.
9512
+ * second-half of the __igb_resume routine.
91179513 */
91189514 static void igb_io_resume(struct pci_dev *pdev)
91199515 {
....@@ -9415,13 +9811,13 @@
94159811 reg = rd32(E1000_DTXCTL);
94169812 reg |= E1000_DTXCTL_VLAN_ADDED;
94179813 wr32(E1000_DTXCTL, reg);
9418
- /* Fall through */
9814
+ fallthrough;
94199815 case e1000_82580:
94209816 /* enable replication vlan tag stripping */
94219817 reg = rd32(E1000_RPLOLR);
94229818 reg |= E1000_RPLOLR_STRVLAN;
94239819 wr32(E1000_RPLOLR, reg);
9424
- /* Fall through */
9820
+ fallthrough;
94259821 case e1000_i350:
94269822 /* none of the above registers are supported by i350 */
94279823 break;
....@@ -9443,11 +9839,10 @@
94439839 struct e1000_hw *hw = &adapter->hw;
94449840 u32 dmac_thr;
94459841 u16 hwm;
9842
+ u32 reg;
94469843
94479844 if (hw->mac.type > e1000_82580) {
94489845 if (adapter->flags & IGB_FLAG_DMAC) {
9449
- u32 reg;
9450
-
94519846 /* force threshold to 0. */
94529847 wr32(E1000_DMCTXTH, 0);
94539848
....@@ -9480,7 +9875,6 @@
94809875 /* Disable BMC-to-OS Watchdog Enable */
94819876 if (hw->mac.type != e1000_i354)
94829877 reg &= ~E1000_DMACR_DC_BMC2OSW_EN;
9483
-
94849878 wr32(E1000_DMACR, reg);
94859879
94869880 /* no lower threshold to disable
....@@ -9497,12 +9891,12 @@
94979891 */
94989892 wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE -
94999893 (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6);
9894
+ }
95009895
9501
- /* make low power state decision controlled
9502
- * by DMA coal
9503
- */
9896
+ if (hw->mac.type >= e1000_i210 ||
9897
+ (adapter->flags & IGB_FLAG_DMAC)) {
95049898 reg = rd32(E1000_PCIEMISC);
9505
- reg &= ~E1000_PCIEMISC_LX_DECISION;
9899
+ reg |= E1000_PCIEMISC_LX_DECISION;
95069900 wr32(E1000_PCIEMISC, reg);
95079901 } /* endif adapter->dmac is not disabled */
95089902 } else if (hw->mac.type == e1000_82580) {