.. | .. |
---|
30 | 30 | #include <linux/if_ether.h> |
---|
31 | 31 | #include <linux/aer.h> |
---|
32 | 32 | #include <linux/prefetch.h> |
---|
| 33 | +#include <linux/bpf.h> |
---|
| 34 | +#include <linux/bpf_trace.h> |
---|
33 | 35 | #include <linux/pm_runtime.h> |
---|
34 | 36 | #include <linux/etherdevice.h> |
---|
35 | 37 | #ifdef CONFIG_IGB_DCA |
---|
.. | .. |
---|
37 | 39 | #endif |
---|
38 | 40 | #include <linux/i2c.h> |
---|
39 | 41 | #include "igb.h" |
---|
40 | | - |
---|
41 | | -#define MAJ 5 |
---|
42 | | -#define MIN 4 |
---|
43 | | -#define BUILD 0 |
---|
44 | | -#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ |
---|
45 | | -__stringify(BUILD) "-k" |
---|
46 | 42 | |
---|
47 | 43 | enum queue_mode { |
---|
48 | 44 | QUEUE_MODE_STRICT_PRIORITY, |
---|
.. | .. |
---|
55 | 51 | }; |
---|
56 | 52 | |
---|
57 | 53 | char igb_driver_name[] = "igb"; |
---|
58 | | -char igb_driver_version[] = DRV_VERSION; |
---|
59 | 54 | static const char igb_driver_string[] = |
---|
60 | 55 | "Intel(R) Gigabit Ethernet Network Driver"; |
---|
61 | 56 | static const char igb_copyright[] = |
---|
.. | .. |
---|
146 | 141 | static bool igb_clean_tx_irq(struct igb_q_vector *, int); |
---|
147 | 142 | static int igb_clean_rx_irq(struct igb_q_vector *, int); |
---|
148 | 143 | static int igb_ioctl(struct net_device *, struct ifreq *, int cmd); |
---|
149 | | -static void igb_tx_timeout(struct net_device *); |
---|
| 144 | +static void igb_tx_timeout(struct net_device *, unsigned int txqueue); |
---|
150 | 145 | static void igb_reset_task(struct work_struct *); |
---|
151 | 146 | static void igb_vlan_mode(struct net_device *netdev, |
---|
152 | 147 | netdev_features_t features); |
---|
.. | .. |
---|
239 | 234 | |
---|
240 | 235 | MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); |
---|
241 | 236 | MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver"); |
---|
242 | | -MODULE_LICENSE("GPL"); |
---|
243 | | -MODULE_VERSION(DRV_VERSION); |
---|
| 237 | +MODULE_LICENSE("GPL v2"); |
---|
244 | 238 | |
---|
245 | 239 | #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) |
---|
246 | 240 | static int debug = -1; |
---|
.. | .. |
---|
557 | 551 | |
---|
558 | 552 | /** |
---|
559 | 553 | * igb_get_i2c_data - Reads the I2C SDA data bit |
---|
560 | | - * @hw: pointer to hardware structure |
---|
561 | | - * @i2cctl: Current value of I2CCTL register |
---|
| 554 | + * @data: opaque pointer to adapter struct |
---|
562 | 555 | * |
---|
563 | 556 | * Returns the I2C data bit value |
---|
564 | 557 | **/ |
---|
.. | .. |
---|
666 | 659 | { |
---|
667 | 660 | int ret; |
---|
668 | 661 | |
---|
669 | | - pr_info("%s - version %s\n", |
---|
670 | | - igb_driver_string, igb_driver_version); |
---|
| 662 | + pr_info("%s\n", igb_driver_string); |
---|
671 | 663 | pr_info("%s\n", igb_copyright); |
---|
672 | 664 | |
---|
673 | 665 | #ifdef CONFIG_IGB_DCA |
---|
.. | .. |
---|
720 | 712 | adapter->rx_ring[i]->reg_idx = rbase_offset + |
---|
721 | 713 | Q_IDX_82576(i); |
---|
722 | 714 | } |
---|
723 | | - /* Fall through */ |
---|
| 715 | + fallthrough; |
---|
724 | 716 | case e1000_82575: |
---|
725 | 717 | case e1000_82580: |
---|
726 | 718 | case e1000_i350: |
---|
727 | 719 | case e1000_i354: |
---|
728 | 720 | case e1000_i210: |
---|
729 | 721 | case e1000_i211: |
---|
730 | | - /* Fall through */ |
---|
731 | 722 | default: |
---|
732 | 723 | for (; i < adapter->num_rx_queues; i++) |
---|
733 | 724 | adapter->rx_ring[i]->reg_idx = rbase_offset + i; |
---|
.. | .. |
---|
753 | 744 | struct net_device *netdev = igb->netdev; |
---|
754 | 745 | hw->hw_addr = NULL; |
---|
755 | 746 | netdev_err(netdev, "PCIe link lost\n"); |
---|
| 747 | + WARN(pci_device_is_present(igb->pdev), |
---|
| 748 | + "igb: Failed to read reg 0x%x!\n", reg); |
---|
756 | 749 | } |
---|
757 | 750 | |
---|
758 | 751 | return value; |
---|
.. | .. |
---|
1196 | 1189 | { |
---|
1197 | 1190 | struct igb_q_vector *q_vector; |
---|
1198 | 1191 | struct igb_ring *ring; |
---|
1199 | | - int ring_count, size; |
---|
| 1192 | + int ring_count; |
---|
| 1193 | + size_t size; |
---|
1200 | 1194 | |
---|
1201 | 1195 | /* igb only supports 1 Tx and/or 1 Rx queue per vector */ |
---|
1202 | 1196 | if (txr_count > 1 || rxr_count > 1) |
---|
1203 | 1197 | return -ENOMEM; |
---|
1204 | 1198 | |
---|
1205 | 1199 | ring_count = txr_count + rxr_count; |
---|
1206 | | - size = sizeof(struct igb_q_vector) + |
---|
1207 | | - (sizeof(struct igb_ring) * ring_count); |
---|
| 1200 | + size = struct_size(q_vector, ring, ring_count); |
---|
1208 | 1201 | |
---|
1209 | 1202 | /* allocate q_vector and rings */ |
---|
1210 | 1203 | q_vector = adapter->q_vector[v_idx]; |
---|
1211 | 1204 | if (!q_vector) { |
---|
1212 | 1205 | q_vector = kzalloc(size, GFP_KERNEL); |
---|
1213 | 1206 | } else if (size > ksize(q_vector)) { |
---|
1214 | | - kfree_rcu(q_vector, rcu); |
---|
1215 | | - q_vector = kzalloc(size, GFP_KERNEL); |
---|
| 1207 | + struct igb_q_vector *new_q_vector; |
---|
| 1208 | + |
---|
| 1209 | + new_q_vector = kzalloc(size, GFP_KERNEL); |
---|
| 1210 | + if (new_q_vector) |
---|
| 1211 | + kfree_rcu(q_vector, rcu); |
---|
| 1212 | + q_vector = new_q_vector; |
---|
1216 | 1213 | } else { |
---|
1217 | 1214 | memset(q_vector, 0, size); |
---|
1218 | 1215 | } |
---|
.. | .. |
---|
1858 | 1855 | * configuration' in respect to these parameters. |
---|
1859 | 1856 | */ |
---|
1860 | 1857 | |
---|
1861 | | - netdev_dbg(netdev, "Qav Tx mode: cbs %s, launchtime %s, queue %d \ |
---|
1862 | | - idleslope %d sendslope %d hiCredit %d \ |
---|
1863 | | - locredit %d\n", |
---|
1864 | | - (ring->cbs_enable) ? "enabled" : "disabled", |
---|
1865 | | - (ring->launchtime_enable) ? "enabled" : "disabled", queue, |
---|
1866 | | - ring->idleslope, ring->sendslope, ring->hicredit, |
---|
1867 | | - ring->locredit); |
---|
| 1858 | + netdev_dbg(netdev, "Qav Tx mode: cbs %s, launchtime %s, queue %d idleslope %d sendslope %d hiCredit %d locredit %d\n", |
---|
| 1859 | + ring->cbs_enable ? "enabled" : "disabled", |
---|
| 1860 | + ring->launchtime_enable ? "enabled" : "disabled", |
---|
| 1861 | + queue, |
---|
| 1862 | + ring->idleslope, ring->sendslope, |
---|
| 1863 | + ring->hicredit, ring->locredit); |
---|
1868 | 1864 | } |
---|
1869 | 1865 | |
---|
1870 | 1866 | static int igb_save_txtime_params(struct igb_adapter *adapter, int queue, |
---|
.. | .. |
---|
1943 | 1939 | |
---|
1944 | 1940 | val = rd32(E1000_RXPBS); |
---|
1945 | 1941 | val &= ~I210_RXPBSIZE_MASK; |
---|
1946 | | - val |= I210_RXPBSIZE_PB_32KB; |
---|
| 1942 | + val |= I210_RXPBSIZE_PB_30KB; |
---|
1947 | 1943 | wr32(E1000_RXPBS, val); |
---|
1948 | 1944 | |
---|
1949 | 1945 | /* Section 8.12.9 states that MAX_TPKT_SIZE from DTXMXPKTSZ |
---|
.. | .. |
---|
2237 | 2233 | |
---|
2238 | 2234 | void igb_reinit_locked(struct igb_adapter *adapter) |
---|
2239 | 2235 | { |
---|
2240 | | - WARN_ON(in_interrupt()); |
---|
2241 | 2236 | while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) |
---|
2242 | 2237 | usleep_range(1000, 2000); |
---|
2243 | 2238 | igb_down(adapter); |
---|
.. | .. |
---|
2379 | 2374 | adapter->ei.get_invariants(hw); |
---|
2380 | 2375 | adapter->flags &= ~IGB_FLAG_MEDIA_RESET; |
---|
2381 | 2376 | } |
---|
2382 | | - if ((mac->type == e1000_82575) && |
---|
| 2377 | + if ((mac->type == e1000_82575 || mac->type == e1000_i350) && |
---|
2383 | 2378 | (adapter->flags & IGB_FLAG_MAS_ENABLE)) { |
---|
2384 | 2379 | igb_enable_mas(adapter); |
---|
2385 | 2380 | } |
---|
.. | .. |
---|
2490 | 2485 | else |
---|
2491 | 2486 | igb_reset(adapter); |
---|
2492 | 2487 | |
---|
2493 | | - return 0; |
---|
| 2488 | + return 1; |
---|
2494 | 2489 | } |
---|
2495 | 2490 | |
---|
2496 | 2491 | static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], |
---|
2497 | 2492 | struct net_device *dev, |
---|
2498 | 2493 | const unsigned char *addr, u16 vid, |
---|
2499 | | - u16 flags) |
---|
| 2494 | + u16 flags, |
---|
| 2495 | + struct netlink_ext_ack *extack) |
---|
2500 | 2496 | { |
---|
2501 | 2497 | /* guarantee we can provide a unique filter for the unicast address */ |
---|
2502 | 2498 | if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) { |
---|
.. | .. |
---|
2524 | 2520 | if (unlikely(mac_hdr_len > IGB_MAX_MAC_HDR_LEN)) |
---|
2525 | 2521 | return features & ~(NETIF_F_HW_CSUM | |
---|
2526 | 2522 | NETIF_F_SCTP_CRC | |
---|
| 2523 | + NETIF_F_GSO_UDP_L4 | |
---|
2527 | 2524 | NETIF_F_HW_VLAN_CTAG_TX | |
---|
2528 | 2525 | NETIF_F_TSO | |
---|
2529 | 2526 | NETIF_F_TSO6); |
---|
.. | .. |
---|
2532 | 2529 | if (unlikely(network_hdr_len > IGB_MAX_NETWORK_HDR_LEN)) |
---|
2533 | 2530 | return features & ~(NETIF_F_HW_CSUM | |
---|
2534 | 2531 | NETIF_F_SCTP_CRC | |
---|
| 2532 | + NETIF_F_GSO_UDP_L4 | |
---|
2535 | 2533 | NETIF_F_TSO | |
---|
2536 | 2534 | NETIF_F_TSO6); |
---|
2537 | 2535 | |
---|
.. | .. |
---|
2586 | 2584 | #define VLAN_PRIO_FULL_MASK (0x07) |
---|
2587 | 2585 | |
---|
2588 | 2586 | static int igb_parse_cls_flower(struct igb_adapter *adapter, |
---|
2589 | | - struct tc_cls_flower_offload *f, |
---|
| 2587 | + struct flow_cls_offload *f, |
---|
2590 | 2588 | int traffic_class, |
---|
2591 | 2589 | struct igb_nfc_filter *input) |
---|
2592 | 2590 | { |
---|
| 2591 | + struct flow_rule *rule = flow_cls_offload_flow_rule(f); |
---|
| 2592 | + struct flow_dissector *dissector = rule->match.dissector; |
---|
2593 | 2593 | struct netlink_ext_ack *extack = f->common.extack; |
---|
2594 | 2594 | |
---|
2595 | | - if (f->dissector->used_keys & |
---|
| 2595 | + if (dissector->used_keys & |
---|
2596 | 2596 | ~(BIT(FLOW_DISSECTOR_KEY_BASIC) | |
---|
2597 | 2597 | BIT(FLOW_DISSECTOR_KEY_CONTROL) | |
---|
2598 | 2598 | BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | |
---|
.. | .. |
---|
2602 | 2602 | return -EOPNOTSUPP; |
---|
2603 | 2603 | } |
---|
2604 | 2604 | |
---|
2605 | | - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { |
---|
2606 | | - struct flow_dissector_key_eth_addrs *key, *mask; |
---|
| 2605 | + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { |
---|
| 2606 | + struct flow_match_eth_addrs match; |
---|
2607 | 2607 | |
---|
2608 | | - key = skb_flow_dissector_target(f->dissector, |
---|
2609 | | - FLOW_DISSECTOR_KEY_ETH_ADDRS, |
---|
2610 | | - f->key); |
---|
2611 | | - mask = skb_flow_dissector_target(f->dissector, |
---|
2612 | | - FLOW_DISSECTOR_KEY_ETH_ADDRS, |
---|
2613 | | - f->mask); |
---|
2614 | | - |
---|
2615 | | - if (!is_zero_ether_addr(mask->dst)) { |
---|
2616 | | - if (!is_broadcast_ether_addr(mask->dst)) { |
---|
| 2608 | + flow_rule_match_eth_addrs(rule, &match); |
---|
| 2609 | + if (!is_zero_ether_addr(match.mask->dst)) { |
---|
| 2610 | + if (!is_broadcast_ether_addr(match.mask->dst)) { |
---|
2617 | 2611 | NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for destination MAC address"); |
---|
2618 | 2612 | return -EINVAL; |
---|
2619 | 2613 | } |
---|
2620 | 2614 | |
---|
2621 | 2615 | input->filter.match_flags |= |
---|
2622 | 2616 | IGB_FILTER_FLAG_DST_MAC_ADDR; |
---|
2623 | | - ether_addr_copy(input->filter.dst_addr, key->dst); |
---|
| 2617 | + ether_addr_copy(input->filter.dst_addr, match.key->dst); |
---|
2624 | 2618 | } |
---|
2625 | 2619 | |
---|
2626 | | - if (!is_zero_ether_addr(mask->src)) { |
---|
2627 | | - if (!is_broadcast_ether_addr(mask->src)) { |
---|
| 2620 | + if (!is_zero_ether_addr(match.mask->src)) { |
---|
| 2621 | + if (!is_broadcast_ether_addr(match.mask->src)) { |
---|
2628 | 2622 | NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for source MAC address"); |
---|
2629 | 2623 | return -EINVAL; |
---|
2630 | 2624 | } |
---|
2631 | 2625 | |
---|
2632 | 2626 | input->filter.match_flags |= |
---|
2633 | 2627 | IGB_FILTER_FLAG_SRC_MAC_ADDR; |
---|
2634 | | - ether_addr_copy(input->filter.src_addr, key->src); |
---|
| 2628 | + ether_addr_copy(input->filter.src_addr, match.key->src); |
---|
2635 | 2629 | } |
---|
2636 | 2630 | } |
---|
2637 | 2631 | |
---|
2638 | | - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { |
---|
2639 | | - struct flow_dissector_key_basic *key, *mask; |
---|
| 2632 | + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { |
---|
| 2633 | + struct flow_match_basic match; |
---|
2640 | 2634 | |
---|
2641 | | - key = skb_flow_dissector_target(f->dissector, |
---|
2642 | | - FLOW_DISSECTOR_KEY_BASIC, |
---|
2643 | | - f->key); |
---|
2644 | | - mask = skb_flow_dissector_target(f->dissector, |
---|
2645 | | - FLOW_DISSECTOR_KEY_BASIC, |
---|
2646 | | - f->mask); |
---|
2647 | | - |
---|
2648 | | - if (mask->n_proto) { |
---|
2649 | | - if (mask->n_proto != ETHER_TYPE_FULL_MASK) { |
---|
| 2635 | + flow_rule_match_basic(rule, &match); |
---|
| 2636 | + if (match.mask->n_proto) { |
---|
| 2637 | + if (match.mask->n_proto != ETHER_TYPE_FULL_MASK) { |
---|
2650 | 2638 | NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for EtherType filter"); |
---|
2651 | 2639 | return -EINVAL; |
---|
2652 | 2640 | } |
---|
2653 | 2641 | |
---|
2654 | 2642 | input->filter.match_flags |= IGB_FILTER_FLAG_ETHER_TYPE; |
---|
2655 | | - input->filter.etype = key->n_proto; |
---|
| 2643 | + input->filter.etype = match.key->n_proto; |
---|
2656 | 2644 | } |
---|
2657 | 2645 | } |
---|
2658 | 2646 | |
---|
2659 | | - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) { |
---|
2660 | | - struct flow_dissector_key_vlan *key, *mask; |
---|
| 2647 | + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { |
---|
| 2648 | + struct flow_match_vlan match; |
---|
2661 | 2649 | |
---|
2662 | | - key = skb_flow_dissector_target(f->dissector, |
---|
2663 | | - FLOW_DISSECTOR_KEY_VLAN, |
---|
2664 | | - f->key); |
---|
2665 | | - mask = skb_flow_dissector_target(f->dissector, |
---|
2666 | | - FLOW_DISSECTOR_KEY_VLAN, |
---|
2667 | | - f->mask); |
---|
2668 | | - |
---|
2669 | | - if (mask->vlan_priority) { |
---|
2670 | | - if (mask->vlan_priority != VLAN_PRIO_FULL_MASK) { |
---|
| 2650 | + flow_rule_match_vlan(rule, &match); |
---|
| 2651 | + if (match.mask->vlan_priority) { |
---|
| 2652 | + if (match.mask->vlan_priority != VLAN_PRIO_FULL_MASK) { |
---|
2671 | 2653 | NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN priority"); |
---|
2672 | 2654 | return -EINVAL; |
---|
2673 | 2655 | } |
---|
2674 | 2656 | |
---|
2675 | 2657 | input->filter.match_flags |= IGB_FILTER_FLAG_VLAN_TCI; |
---|
2676 | | - input->filter.vlan_tci = key->vlan_priority; |
---|
| 2658 | + input->filter.vlan_tci = |
---|
| 2659 | + (__force __be16)match.key->vlan_priority; |
---|
2677 | 2660 | } |
---|
2678 | 2661 | } |
---|
2679 | 2662 | |
---|
.. | .. |
---|
2684 | 2667 | } |
---|
2685 | 2668 | |
---|
2686 | 2669 | static int igb_configure_clsflower(struct igb_adapter *adapter, |
---|
2687 | | - struct tc_cls_flower_offload *cls_flower) |
---|
| 2670 | + struct flow_cls_offload *cls_flower) |
---|
2688 | 2671 | { |
---|
2689 | 2672 | struct netlink_ext_ack *extack = cls_flower->common.extack; |
---|
2690 | 2673 | struct igb_nfc_filter *filter, *f; |
---|
.. | .. |
---|
2746 | 2729 | } |
---|
2747 | 2730 | |
---|
2748 | 2731 | static int igb_delete_clsflower(struct igb_adapter *adapter, |
---|
2749 | | - struct tc_cls_flower_offload *cls_flower) |
---|
| 2732 | + struct flow_cls_offload *cls_flower) |
---|
2750 | 2733 | { |
---|
2751 | 2734 | struct igb_nfc_filter *filter; |
---|
2752 | 2735 | int err; |
---|
.. | .. |
---|
2776 | 2759 | } |
---|
2777 | 2760 | |
---|
2778 | 2761 | static int igb_setup_tc_cls_flower(struct igb_adapter *adapter, |
---|
2779 | | - struct tc_cls_flower_offload *cls_flower) |
---|
| 2762 | + struct flow_cls_offload *cls_flower) |
---|
2780 | 2763 | { |
---|
2781 | 2764 | switch (cls_flower->command) { |
---|
2782 | | - case TC_CLSFLOWER_REPLACE: |
---|
| 2765 | + case FLOW_CLS_REPLACE: |
---|
2783 | 2766 | return igb_configure_clsflower(adapter, cls_flower); |
---|
2784 | | - case TC_CLSFLOWER_DESTROY: |
---|
| 2767 | + case FLOW_CLS_DESTROY: |
---|
2785 | 2768 | return igb_delete_clsflower(adapter, cls_flower); |
---|
2786 | | - case TC_CLSFLOWER_STATS: |
---|
| 2769 | + case FLOW_CLS_STATS: |
---|
2787 | 2770 | return -EOPNOTSUPP; |
---|
2788 | 2771 | default: |
---|
2789 | 2772 | return -EOPNOTSUPP; |
---|
.. | .. |
---|
2802 | 2785 | case TC_SETUP_CLSFLOWER: |
---|
2803 | 2786 | return igb_setup_tc_cls_flower(adapter, type_data); |
---|
2804 | 2787 | |
---|
2805 | | - default: |
---|
2806 | | - return -EOPNOTSUPP; |
---|
2807 | | - } |
---|
2808 | | -} |
---|
2809 | | - |
---|
2810 | | -static int igb_setup_tc_block(struct igb_adapter *adapter, |
---|
2811 | | - struct tc_block_offload *f) |
---|
2812 | | -{ |
---|
2813 | | - if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) |
---|
2814 | | - return -EOPNOTSUPP; |
---|
2815 | | - |
---|
2816 | | - switch (f->command) { |
---|
2817 | | - case TC_BLOCK_BIND: |
---|
2818 | | - return tcf_block_cb_register(f->block, igb_setup_tc_block_cb, |
---|
2819 | | - adapter, adapter, f->extack); |
---|
2820 | | - case TC_BLOCK_UNBIND: |
---|
2821 | | - tcf_block_cb_unregister(f->block, igb_setup_tc_block_cb, |
---|
2822 | | - adapter); |
---|
2823 | | - return 0; |
---|
2824 | 2788 | default: |
---|
2825 | 2789 | return -EOPNOTSUPP; |
---|
2826 | 2790 | } |
---|
.. | .. |
---|
2849 | 2813 | return 0; |
---|
2850 | 2814 | } |
---|
2851 | 2815 | |
---|
| 2816 | +static LIST_HEAD(igb_block_cb_list); |
---|
| 2817 | + |
---|
2852 | 2818 | static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type, |
---|
2853 | 2819 | void *type_data) |
---|
2854 | 2820 | { |
---|
.. | .. |
---|
2858 | 2824 | case TC_SETUP_QDISC_CBS: |
---|
2859 | 2825 | return igb_offload_cbs(adapter, type_data); |
---|
2860 | 2826 | case TC_SETUP_BLOCK: |
---|
2861 | | - return igb_setup_tc_block(adapter, type_data); |
---|
| 2827 | + return flow_block_cb_setup_simple(type_data, |
---|
| 2828 | + &igb_block_cb_list, |
---|
| 2829 | + igb_setup_tc_block_cb, |
---|
| 2830 | + adapter, adapter, true); |
---|
| 2831 | + |
---|
2862 | 2832 | case TC_SETUP_QDISC_ETF: |
---|
2863 | 2833 | return igb_offload_txtime(adapter, type_data); |
---|
2864 | 2834 | |
---|
2865 | 2835 | default: |
---|
2866 | 2836 | return -EOPNOTSUPP; |
---|
2867 | 2837 | } |
---|
| 2838 | +} |
---|
| 2839 | + |
---|
| 2840 | +static int igb_xdp_setup(struct net_device *dev, struct netdev_bpf *bpf) |
---|
| 2841 | +{ |
---|
| 2842 | + int i, frame_size = dev->mtu + IGB_ETH_PKT_HDR_PAD; |
---|
| 2843 | + struct igb_adapter *adapter = netdev_priv(dev); |
---|
| 2844 | + struct bpf_prog *prog = bpf->prog, *old_prog; |
---|
| 2845 | + bool running = netif_running(dev); |
---|
| 2846 | + bool need_reset; |
---|
| 2847 | + |
---|
| 2848 | + /* verify igb ring attributes are sufficient for XDP */ |
---|
| 2849 | + for (i = 0; i < adapter->num_rx_queues; i++) { |
---|
| 2850 | + struct igb_ring *ring = adapter->rx_ring[i]; |
---|
| 2851 | + |
---|
| 2852 | + if (frame_size > igb_rx_bufsz(ring)) { |
---|
| 2853 | + NL_SET_ERR_MSG_MOD(bpf->extack, |
---|
| 2854 | + "The RX buffer size is too small for the frame size"); |
---|
| 2855 | + netdev_warn(dev, "XDP RX buffer size %d is too small for the frame size %d\n", |
---|
| 2856 | + igb_rx_bufsz(ring), frame_size); |
---|
| 2857 | + return -EINVAL; |
---|
| 2858 | + } |
---|
| 2859 | + } |
---|
| 2860 | + |
---|
| 2861 | + old_prog = xchg(&adapter->xdp_prog, prog); |
---|
| 2862 | + need_reset = (!!prog != !!old_prog); |
---|
| 2863 | + |
---|
| 2864 | + /* device is up and bpf is added/removed, must setup the RX queues */ |
---|
| 2865 | + if (need_reset && running) { |
---|
| 2866 | + igb_close(dev); |
---|
| 2867 | + } else { |
---|
| 2868 | + for (i = 0; i < adapter->num_rx_queues; i++) |
---|
| 2869 | + (void)xchg(&adapter->rx_ring[i]->xdp_prog, |
---|
| 2870 | + adapter->xdp_prog); |
---|
| 2871 | + } |
---|
| 2872 | + |
---|
| 2873 | + if (old_prog) |
---|
| 2874 | + bpf_prog_put(old_prog); |
---|
| 2875 | + |
---|
| 2876 | + /* bpf is just replaced, RXQ and MTU are already setup */ |
---|
| 2877 | + if (!need_reset) |
---|
| 2878 | + return 0; |
---|
| 2879 | + |
---|
| 2880 | + if (running) |
---|
| 2881 | + igb_open(dev); |
---|
| 2882 | + |
---|
| 2883 | + return 0; |
---|
| 2884 | +} |
---|
| 2885 | + |
---|
| 2886 | +static int igb_xdp(struct net_device *dev, struct netdev_bpf *xdp) |
---|
| 2887 | +{ |
---|
| 2888 | + switch (xdp->command) { |
---|
| 2889 | + case XDP_SETUP_PROG: |
---|
| 2890 | + return igb_xdp_setup(dev, xdp); |
---|
| 2891 | + default: |
---|
| 2892 | + return -EINVAL; |
---|
| 2893 | + } |
---|
| 2894 | +} |
---|
| 2895 | + |
---|
| 2896 | +static void igb_xdp_ring_update_tail(struct igb_ring *ring) |
---|
| 2897 | +{ |
---|
| 2898 | + /* Force memory writes to complete before letting h/w know there |
---|
| 2899 | + * are new descriptors to fetch. |
---|
| 2900 | + */ |
---|
| 2901 | + wmb(); |
---|
| 2902 | + writel(ring->next_to_use, ring->tail); |
---|
| 2903 | +} |
---|
| 2904 | + |
---|
| 2905 | +static struct igb_ring *igb_xdp_tx_queue_mapping(struct igb_adapter *adapter) |
---|
| 2906 | +{ |
---|
| 2907 | + unsigned int r_idx = smp_processor_id(); |
---|
| 2908 | + |
---|
| 2909 | + if (r_idx >= adapter->num_tx_queues) |
---|
| 2910 | + r_idx = r_idx % adapter->num_tx_queues; |
---|
| 2911 | + |
---|
| 2912 | + return adapter->tx_ring[r_idx]; |
---|
| 2913 | +} |
---|
| 2914 | + |
---|
| 2915 | +static int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp) |
---|
| 2916 | +{ |
---|
| 2917 | + struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); |
---|
| 2918 | + int cpu = smp_processor_id(); |
---|
| 2919 | + struct igb_ring *tx_ring; |
---|
| 2920 | + struct netdev_queue *nq; |
---|
| 2921 | + u32 ret; |
---|
| 2922 | + |
---|
| 2923 | + if (unlikely(!xdpf)) |
---|
| 2924 | + return IGB_XDP_CONSUMED; |
---|
| 2925 | + |
---|
| 2926 | + /* During program transitions its possible adapter->xdp_prog is assigned |
---|
| 2927 | + * but ring has not been configured yet. In this case simply abort xmit. |
---|
| 2928 | + */ |
---|
| 2929 | + tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL; |
---|
| 2930 | + if (unlikely(!tx_ring)) |
---|
| 2931 | + return IGB_XDP_CONSUMED; |
---|
| 2932 | + |
---|
| 2933 | + nq = txring_txq(tx_ring); |
---|
| 2934 | + __netif_tx_lock(nq, cpu); |
---|
| 2935 | + /* Avoid transmit queue timeout since we share it with the slow path */ |
---|
| 2936 | + nq->trans_start = jiffies; |
---|
| 2937 | + ret = igb_xmit_xdp_ring(adapter, tx_ring, xdpf); |
---|
| 2938 | + __netif_tx_unlock(nq); |
---|
| 2939 | + |
---|
| 2940 | + return ret; |
---|
| 2941 | +} |
---|
| 2942 | + |
---|
| 2943 | +static int igb_xdp_xmit(struct net_device *dev, int n, |
---|
| 2944 | + struct xdp_frame **frames, u32 flags) |
---|
| 2945 | +{ |
---|
| 2946 | + struct igb_adapter *adapter = netdev_priv(dev); |
---|
| 2947 | + int cpu = smp_processor_id(); |
---|
| 2948 | + struct igb_ring *tx_ring; |
---|
| 2949 | + struct netdev_queue *nq; |
---|
| 2950 | + int drops = 0; |
---|
| 2951 | + int i; |
---|
| 2952 | + |
---|
| 2953 | + if (unlikely(test_bit(__IGB_DOWN, &adapter->state))) |
---|
| 2954 | + return -ENETDOWN; |
---|
| 2955 | + |
---|
| 2956 | + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) |
---|
| 2957 | + return -EINVAL; |
---|
| 2958 | + |
---|
| 2959 | + /* During program transitions its possible adapter->xdp_prog is assigned |
---|
| 2960 | + * but ring has not been configured yet. In this case simply abort xmit. |
---|
| 2961 | + */ |
---|
| 2962 | + tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL; |
---|
| 2963 | + if (unlikely(!tx_ring)) |
---|
| 2964 | + return -ENXIO; |
---|
| 2965 | + |
---|
| 2966 | + nq = txring_txq(tx_ring); |
---|
| 2967 | + __netif_tx_lock(nq, cpu); |
---|
| 2968 | + |
---|
| 2969 | + /* Avoid transmit queue timeout since we share it with the slow path */ |
---|
| 2970 | + nq->trans_start = jiffies; |
---|
| 2971 | + |
---|
| 2972 | + for (i = 0; i < n; i++) { |
---|
| 2973 | + struct xdp_frame *xdpf = frames[i]; |
---|
| 2974 | + int err; |
---|
| 2975 | + |
---|
| 2976 | + err = igb_xmit_xdp_ring(adapter, tx_ring, xdpf); |
---|
| 2977 | + if (err != IGB_XDP_TX) { |
---|
| 2978 | + xdp_return_frame_rx_napi(xdpf); |
---|
| 2979 | + drops++; |
---|
| 2980 | + } |
---|
| 2981 | + } |
---|
| 2982 | + |
---|
| 2983 | + __netif_tx_unlock(nq); |
---|
| 2984 | + |
---|
| 2985 | + if (unlikely(flags & XDP_XMIT_FLUSH)) |
---|
| 2986 | + igb_xdp_ring_update_tail(tx_ring); |
---|
| 2987 | + |
---|
| 2988 | + return n - drops; |
---|
2868 | 2989 | } |
---|
2869 | 2990 | |
---|
2870 | 2991 | static const struct net_device_ops igb_netdev_ops = { |
---|
.. | .. |
---|
2891 | 3012 | .ndo_fdb_add = igb_ndo_fdb_add, |
---|
2892 | 3013 | .ndo_features_check = igb_features_check, |
---|
2893 | 3014 | .ndo_setup_tc = igb_setup_tc, |
---|
| 3015 | + .ndo_bpf = igb_xdp, |
---|
| 3016 | + .ndo_xdp_xmit = igb_xdp_xmit, |
---|
2894 | 3017 | }; |
---|
2895 | 3018 | |
---|
2896 | 3019 | /** |
---|
.. | .. |
---|
2915 | 3038 | fw.invm_img_type); |
---|
2916 | 3039 | break; |
---|
2917 | 3040 | } |
---|
2918 | | - /* fall through */ |
---|
| 3041 | + fallthrough; |
---|
2919 | 3042 | default: |
---|
2920 | 3043 | /* if option is rom valid, display its version too */ |
---|
2921 | 3044 | if (fw.or_valid) { |
---|
.. | .. |
---|
3157 | 3280 | NETIF_F_HW_CSUM; |
---|
3158 | 3281 | |
---|
3159 | 3282 | if (hw->mac.type >= e1000_82576) |
---|
3160 | | - netdev->features |= NETIF_F_SCTP_CRC; |
---|
| 3283 | + netdev->features |= NETIF_F_SCTP_CRC | NETIF_F_GSO_UDP_L4; |
---|
3161 | 3284 | |
---|
3162 | 3285 | if (hw->mac.type >= e1000_i350) |
---|
3163 | 3286 | netdev->features |= NETIF_F_HW_TC; |
---|
.. | .. |
---|
3431 | 3554 | "Width x1" : "unknown"), netdev->dev_addr); |
---|
3432 | 3555 | } |
---|
3433 | 3556 | |
---|
3434 | | - if ((hw->mac.type >= e1000_i210 || |
---|
| 3557 | + if ((hw->mac.type == e1000_82576 && |
---|
| 3558 | + rd32(E1000_EECD) & E1000_EECD_PRES) || |
---|
| 3559 | + (hw->mac.type >= e1000_i210 || |
---|
3435 | 3560 | igb_get_flash_presence_i210(hw))) { |
---|
3436 | 3561 | ret_val = igb_read_part_string(hw, part_str, |
---|
3437 | 3562 | E1000_PBANUM_LENGTH); |
---|
.. | .. |
---|
3478 | 3603 | } |
---|
3479 | 3604 | } |
---|
3480 | 3605 | |
---|
3481 | | - dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP); |
---|
| 3606 | + dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE); |
---|
3482 | 3607 | |
---|
3483 | 3608 | pm_runtime_put_noidle(&pdev->dev); |
---|
3484 | 3609 | return 0; |
---|
.. | .. |
---|
3517 | 3642 | struct net_device *netdev = pci_get_drvdata(pdev); |
---|
3518 | 3643 | struct igb_adapter *adapter = netdev_priv(netdev); |
---|
3519 | 3644 | struct e1000_hw *hw = &adapter->hw; |
---|
| 3645 | + unsigned long flags; |
---|
3520 | 3646 | |
---|
3521 | 3647 | /* reclaim resources allocated to VFs */ |
---|
3522 | 3648 | if (adapter->vf_data) { |
---|
.. | .. |
---|
3529 | 3655 | pci_disable_sriov(pdev); |
---|
3530 | 3656 | msleep(500); |
---|
3531 | 3657 | } |
---|
3532 | | - |
---|
| 3658 | + spin_lock_irqsave(&adapter->vfs_lock, flags); |
---|
3533 | 3659 | kfree(adapter->vf_mac_list); |
---|
3534 | 3660 | adapter->vf_mac_list = NULL; |
---|
3535 | 3661 | kfree(adapter->vf_data); |
---|
3536 | 3662 | adapter->vf_data = NULL; |
---|
3537 | 3663 | adapter->vfs_allocated_count = 0; |
---|
| 3664 | + spin_unlock_irqrestore(&adapter->vfs_lock, flags); |
---|
3538 | 3665 | wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ); |
---|
3539 | 3666 | wrfl(); |
---|
3540 | 3667 | msleep(100); |
---|
.. | .. |
---|
3730 | 3857 | struct pci_dev *pdev = adapter->pdev; |
---|
3731 | 3858 | struct e1000_hw *hw = &adapter->hw; |
---|
3732 | 3859 | |
---|
3733 | | - /* Virtualization features not supported on i210 family. */ |
---|
3734 | | - if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) |
---|
| 3860 | + /* Virtualization features not supported on i210 and 82580 family. */ |
---|
| 3861 | + if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211) || |
---|
| 3862 | + (hw->mac.type == e1000_82580)) |
---|
3735 | 3863 | return; |
---|
3736 | 3864 | |
---|
3737 | 3865 | /* Of the below we really only want the effect of getting |
---|
.. | .. |
---|
3767 | 3895 | max_rss_queues = 1; |
---|
3768 | 3896 | break; |
---|
3769 | 3897 | } |
---|
3770 | | - /* fall through */ |
---|
| 3898 | + fallthrough; |
---|
3771 | 3899 | case e1000_82576: |
---|
3772 | 3900 | if (!!adapter->vfs_allocated_count) { |
---|
3773 | 3901 | max_rss_queues = 2; |
---|
3774 | 3902 | break; |
---|
3775 | 3903 | } |
---|
3776 | | - /* fall through */ |
---|
| 3904 | + fallthrough; |
---|
3777 | 3905 | case e1000_82580: |
---|
3778 | 3906 | case e1000_i354: |
---|
3779 | 3907 | default: |
---|
.. | .. |
---|
3849 | 3977 | /* set default work limits */ |
---|
3850 | 3978 | adapter->tx_work_limit = IGB_DEFAULT_TX_WORK; |
---|
3851 | 3979 | |
---|
3852 | | - adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + |
---|
3853 | | - VLAN_HLEN; |
---|
| 3980 | + adapter->max_frame_size = netdev->mtu + IGB_ETH_PKT_HDR_PAD; |
---|
3854 | 3981 | adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; |
---|
3855 | 3982 | |
---|
3856 | 3983 | spin_lock_init(&adapter->nfc_lock); |
---|
3857 | 3984 | spin_lock_init(&adapter->stats64_lock); |
---|
| 3985 | + |
---|
| 3986 | + /* init spinlock to avoid concurrency of VF resources */ |
---|
| 3987 | + spin_lock_init(&adapter->vfs_lock); |
---|
3858 | 3988 | #ifdef CONFIG_PCI_IOV |
---|
3859 | 3989 | switch (hw->mac.type) { |
---|
3860 | 3990 | case e1000_82576: |
---|
.. | .. |
---|
3912 | 4042 | /** |
---|
3913 | 4043 | * igb_open - Called when a network interface is made active |
---|
3914 | 4044 | * @netdev: network interface device structure |
---|
| 4045 | + * @resuming: indicates whether we are in a resume call |
---|
3915 | 4046 | * |
---|
3916 | 4047 | * Returns 0 on success, negative value on failure |
---|
3917 | 4048 | * |
---|
.. | .. |
---|
4029 | 4160 | /** |
---|
4030 | 4161 | * igb_close - Disables a network interface |
---|
4031 | 4162 | * @netdev: network interface device structure |
---|
| 4163 | + * @suspending: indicates we are in a suspend call |
---|
4032 | 4164 | * |
---|
4033 | 4165 | * Returns 0, this is not allowed to fail |
---|
4034 | 4166 | * |
---|
.. | .. |
---|
4222 | 4354 | **/ |
---|
4223 | 4355 | int igb_setup_rx_resources(struct igb_ring *rx_ring) |
---|
4224 | 4356 | { |
---|
| 4357 | + struct igb_adapter *adapter = netdev_priv(rx_ring->netdev); |
---|
4225 | 4358 | struct device *dev = rx_ring->dev; |
---|
4226 | 4359 | int size; |
---|
4227 | 4360 | |
---|
.. | .. |
---|
4243 | 4376 | rx_ring->next_to_alloc = 0; |
---|
4244 | 4377 | rx_ring->next_to_clean = 0; |
---|
4245 | 4378 | rx_ring->next_to_use = 0; |
---|
| 4379 | + |
---|
| 4380 | + rx_ring->xdp_prog = adapter->xdp_prog; |
---|
| 4381 | + |
---|
| 4382 | + /* XDP RX-queue info */ |
---|
| 4383 | + if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, |
---|
| 4384 | + rx_ring->queue_index) < 0) |
---|
| 4385 | + goto err; |
---|
4246 | 4386 | |
---|
4247 | 4387 | return 0; |
---|
4248 | 4388 | |
---|
.. | .. |
---|
4362 | 4502 | else |
---|
4363 | 4503 | mrqc |= E1000_MRQC_ENABLE_VMDQ; |
---|
4364 | 4504 | } else { |
---|
4365 | | - if (hw->mac.type != e1000_i211) |
---|
4366 | | - mrqc |= E1000_MRQC_ENABLE_RSS_MQ; |
---|
| 4505 | + mrqc |= E1000_MRQC_ENABLE_RSS_MQ; |
---|
4367 | 4506 | } |
---|
4368 | 4507 | igb_vmm_control(adapter); |
---|
4369 | 4508 | |
---|
.. | .. |
---|
4502 | 4641 | } |
---|
4503 | 4642 | |
---|
4504 | 4643 | /** |
---|
| 4644 | + * igb_setup_srrctl - configure the split and replication receive control |
---|
| 4645 | + * registers |
---|
| 4646 | + * @adapter: Board private structure |
---|
| 4647 | + * @ring: receive ring to be configured |
---|
| 4648 | + **/ |
---|
| 4649 | +void igb_setup_srrctl(struct igb_adapter *adapter, struct igb_ring *ring) |
---|
| 4650 | +{ |
---|
| 4651 | + struct e1000_hw *hw = &adapter->hw; |
---|
| 4652 | + int reg_idx = ring->reg_idx; |
---|
| 4653 | + u32 srrctl = 0; |
---|
| 4654 | + |
---|
| 4655 | + srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; |
---|
| 4656 | + if (ring_uses_large_buffer(ring)) |
---|
| 4657 | + srrctl |= IGB_RXBUFFER_3072 >> E1000_SRRCTL_BSIZEPKT_SHIFT; |
---|
| 4658 | + else |
---|
| 4659 | + srrctl |= IGB_RXBUFFER_2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT; |
---|
| 4660 | + srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; |
---|
| 4661 | + if (hw->mac.type >= e1000_82580) |
---|
| 4662 | + srrctl |= E1000_SRRCTL_TIMESTAMP; |
---|
| 4663 | + /* Only set Drop Enable if VFs allocated, or we are supporting multiple |
---|
| 4664 | + * queues and rx flow control is disabled |
---|
| 4665 | + */ |
---|
| 4666 | + if (adapter->vfs_allocated_count || |
---|
| 4667 | + (!(hw->fc.current_mode & e1000_fc_rx_pause) && |
---|
| 4668 | + adapter->num_rx_queues > 1)) |
---|
| 4669 | + srrctl |= E1000_SRRCTL_DROP_EN; |
---|
| 4670 | + |
---|
| 4671 | + wr32(E1000_SRRCTL(reg_idx), srrctl); |
---|
| 4672 | +} |
---|
| 4673 | + |
---|
| 4674 | +/** |
---|
4505 | 4675 | * igb_configure_rx_ring - Configure a receive ring after Reset |
---|
4506 | 4676 | * @adapter: board private structure |
---|
4507 | 4677 | * @ring: receive ring to be configured |
---|
.. | .. |
---|
4515 | 4685 | union e1000_adv_rx_desc *rx_desc; |
---|
4516 | 4686 | u64 rdba = ring->dma; |
---|
4517 | 4687 | int reg_idx = ring->reg_idx; |
---|
4518 | | - u32 srrctl = 0, rxdctl = 0; |
---|
| 4688 | + u32 rxdctl = 0; |
---|
| 4689 | + |
---|
| 4690 | + xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); |
---|
| 4691 | + WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, |
---|
| 4692 | + MEM_TYPE_PAGE_SHARED, NULL)); |
---|
4519 | 4693 | |
---|
4520 | 4694 | /* disable the queue */ |
---|
4521 | 4695 | wr32(E1000_RXDCTL(reg_idx), 0); |
---|
.. | .. |
---|
4533 | 4707 | writel(0, ring->tail); |
---|
4534 | 4708 | |
---|
4535 | 4709 | /* set descriptor configuration */ |
---|
4536 | | - srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; |
---|
4537 | | - if (ring_uses_large_buffer(ring)) |
---|
4538 | | - srrctl |= IGB_RXBUFFER_3072 >> E1000_SRRCTL_BSIZEPKT_SHIFT; |
---|
4539 | | - else |
---|
4540 | | - srrctl |= IGB_RXBUFFER_2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT; |
---|
4541 | | - srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; |
---|
4542 | | - if (hw->mac.type >= e1000_82580) |
---|
4543 | | - srrctl |= E1000_SRRCTL_TIMESTAMP; |
---|
4544 | | - /* Only set Drop Enable if we are supporting multiple queues */ |
---|
4545 | | - if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1) |
---|
4546 | | - srrctl |= E1000_SRRCTL_DROP_EN; |
---|
4547 | | - |
---|
4548 | | - wr32(E1000_SRRCTL(reg_idx), srrctl); |
---|
| 4710 | + igb_setup_srrctl(adapter, ring); |
---|
4549 | 4711 | |
---|
4550 | 4712 | /* set filtering for VMDQ pools */ |
---|
4551 | 4713 | igb_set_vmolr(adapter, reg_idx & 0x7, true); |
---|
.. | .. |
---|
4570 | 4732 | static void igb_set_rx_buffer_len(struct igb_adapter *adapter, |
---|
4571 | 4733 | struct igb_ring *rx_ring) |
---|
4572 | 4734 | { |
---|
| 4735 | +#if (PAGE_SIZE < 8192) |
---|
| 4736 | + struct e1000_hw *hw = &adapter->hw; |
---|
| 4737 | +#endif |
---|
| 4738 | + |
---|
4573 | 4739 | /* set build_skb and buffer size flags */ |
---|
4574 | 4740 | clear_ring_build_skb_enabled(rx_ring); |
---|
4575 | 4741 | clear_ring_uses_large_buffer(rx_ring); |
---|
.. | .. |
---|
4580 | 4746 | set_ring_build_skb_enabled(rx_ring); |
---|
4581 | 4747 | |
---|
4582 | 4748 | #if (PAGE_SIZE < 8192) |
---|
4583 | | - if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB) |
---|
4584 | | - return; |
---|
4585 | | - |
---|
4586 | | - set_ring_uses_large_buffer(rx_ring); |
---|
| 4749 | + if (adapter->max_frame_size > IGB_MAX_FRAME_BUILD_SKB || |
---|
| 4750 | + rd32(E1000_RCTL) & E1000_RCTL_SBP) |
---|
| 4751 | + set_ring_uses_large_buffer(rx_ring); |
---|
4587 | 4752 | #endif |
---|
4588 | 4753 | } |
---|
4589 | 4754 | |
---|
.. | .. |
---|
4661 | 4826 | while (i != tx_ring->next_to_use) { |
---|
4662 | 4827 | union e1000_adv_tx_desc *eop_desc, *tx_desc; |
---|
4663 | 4828 | |
---|
4664 | | - /* Free all the Tx ring sk_buffs */ |
---|
4665 | | - dev_kfree_skb_any(tx_buffer->skb); |
---|
| 4829 | + /* Free all the Tx ring sk_buffs or xdp frames */ |
---|
| 4830 | + if (tx_buffer->type == IGB_TYPE_SKB) |
---|
| 4831 | + dev_kfree_skb_any(tx_buffer->skb); |
---|
| 4832 | + else |
---|
| 4833 | + xdp_return_frame(tx_buffer->xdpf); |
---|
4666 | 4834 | |
---|
4667 | 4835 | /* unmap skb header data */ |
---|
4668 | 4836 | dma_unmap_single(tx_ring->dev, |
---|
.. | .. |
---|
4735 | 4903 | { |
---|
4736 | 4904 | igb_clean_rx_ring(rx_ring); |
---|
4737 | 4905 | |
---|
| 4906 | + rx_ring->xdp_prog = NULL; |
---|
| 4907 | + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); |
---|
4738 | 4908 | vfree(rx_ring->rx_buffer_info); |
---|
4739 | 4909 | rx_ring->rx_buffer_info = NULL; |
---|
4740 | 4910 | |
---|
.. | .. |
---|
4771 | 4941 | { |
---|
4772 | 4942 | u16 i = rx_ring->next_to_clean; |
---|
4773 | 4943 | |
---|
4774 | | - if (rx_ring->skb) |
---|
4775 | | - dev_kfree_skb(rx_ring->skb); |
---|
| 4944 | + dev_kfree_skb(rx_ring->skb); |
---|
4776 | 4945 | rx_ring->skb = NULL; |
---|
4777 | 4946 | |
---|
4778 | 4947 | /* Free all the Rx ring sk_buffs */ |
---|
.. | .. |
---|
4896 | 5065 | /* VLAN filtering needed for VLAN prio filter */ |
---|
4897 | 5066 | if (adapter->netdev->features & NETIF_F_NTUPLE) |
---|
4898 | 5067 | break; |
---|
4899 | | - /* fall through */ |
---|
| 5068 | + fallthrough; |
---|
4900 | 5069 | case e1000_82576: |
---|
4901 | 5070 | case e1000_82580: |
---|
4902 | 5071 | case e1000_i354: |
---|
4903 | 5072 | /* VLAN filtering needed for pool filtering */ |
---|
4904 | 5073 | if (adapter->vfs_allocated_count) |
---|
4905 | 5074 | break; |
---|
4906 | | - /* fall through */ |
---|
| 5075 | + fallthrough; |
---|
4907 | 5076 | default: |
---|
4908 | 5077 | return 1; |
---|
4909 | 5078 | } |
---|
.. | .. |
---|
5183 | 5352 | case e1000_media_type_copper: |
---|
5184 | 5353 | if (!hw->mac.get_link_status) |
---|
5185 | 5354 | return true; |
---|
5186 | | - /* fall through */ |
---|
| 5355 | + fallthrough; |
---|
5187 | 5356 | case e1000_media_type_internal_serdes: |
---|
5188 | 5357 | hw->mac.ops.check_for_link(hw); |
---|
5189 | 5358 | link_active = !hw->mac.get_link_status; |
---|
.. | .. |
---|
5247 | 5416 | |
---|
5248 | 5417 | /** |
---|
5249 | 5418 | * igb_watchdog - Timer Call-back |
---|
5250 | | - * @data: pointer to adapter cast into an unsigned long |
---|
| 5419 | + * @t: pointer to timer_list containing our private info pointer |
---|
5251 | 5420 | **/ |
---|
5252 | 5421 | static void igb_watchdog(struct timer_list *t) |
---|
5253 | 5422 | { |
---|
.. | .. |
---|
5346 | 5515 | break; |
---|
5347 | 5516 | } |
---|
5348 | 5517 | |
---|
5349 | | - if (adapter->link_speed != SPEED_1000) |
---|
| 5518 | + if (adapter->link_speed != SPEED_1000 || |
---|
| 5519 | + !hw->phy.ops.read_reg) |
---|
5350 | 5520 | goto no_wait; |
---|
5351 | 5521 | |
---|
5352 | 5522 | /* wait for Remote receiver status OK */ |
---|
.. | .. |
---|
5714 | 5884 | * should have been handled by the upper layers. |
---|
5715 | 5885 | */ |
---|
5716 | 5886 | if (tx_ring->launchtime_enable) { |
---|
5717 | | - ts = ns_to_timespec64(first->skb->tstamp); |
---|
5718 | | - first->skb->tstamp = 0; |
---|
| 5887 | + ts = ktime_to_timespec64(first->skb->tstamp); |
---|
| 5888 | + skb_txtime_consumed(first->skb); |
---|
5719 | 5889 | context_desc->seqnum_seed = cpu_to_le32(ts.tv_nsec / 32); |
---|
5720 | 5890 | } else { |
---|
5721 | 5891 | context_desc->seqnum_seed = 0; |
---|
.. | .. |
---|
5735 | 5905 | } ip; |
---|
5736 | 5906 | union { |
---|
5737 | 5907 | struct tcphdr *tcp; |
---|
| 5908 | + struct udphdr *udp; |
---|
5738 | 5909 | unsigned char *hdr; |
---|
5739 | 5910 | } l4; |
---|
5740 | 5911 | u32 paylen, l4_offset; |
---|
.. | .. |
---|
5754 | 5925 | l4.hdr = skb_checksum_start(skb); |
---|
5755 | 5926 | |
---|
5756 | 5927 | /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ |
---|
5757 | | - type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP; |
---|
| 5928 | + type_tucmd = (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ? |
---|
| 5929 | + E1000_ADVTXD_TUCMD_L4T_UDP : E1000_ADVTXD_TUCMD_L4T_TCP; |
---|
5758 | 5930 | |
---|
5759 | 5931 | /* initialize outer IP header fields */ |
---|
5760 | 5932 | if (ip.v4->version == 4) { |
---|
.. | .. |
---|
5782 | 5954 | /* determine offset of inner transport header */ |
---|
5783 | 5955 | l4_offset = l4.hdr - skb->data; |
---|
5784 | 5956 | |
---|
5785 | | - /* compute length of segmentation header */ |
---|
5786 | | - *hdr_len = (l4.tcp->doff * 4) + l4_offset; |
---|
5787 | | - |
---|
5788 | 5957 | /* remove payload length from inner checksum */ |
---|
5789 | 5958 | paylen = skb->len - l4_offset; |
---|
5790 | | - csum_replace_by_diff(&l4.tcp->check, htonl(paylen)); |
---|
| 5959 | + if (type_tucmd & E1000_ADVTXD_TUCMD_L4T_TCP) { |
---|
| 5960 | + /* compute length of segmentation header */ |
---|
| 5961 | + *hdr_len = (l4.tcp->doff * 4) + l4_offset; |
---|
| 5962 | + csum_replace_by_diff(&l4.tcp->check, |
---|
| 5963 | + (__force __wsum)htonl(paylen)); |
---|
| 5964 | + } else { |
---|
| 5965 | + /* compute length of segmentation header */ |
---|
| 5966 | + *hdr_len = sizeof(*l4.udp) + l4_offset; |
---|
| 5967 | + csum_replace_by_diff(&l4.udp->check, |
---|
| 5968 | + (__force __wsum)htonl(paylen)); |
---|
| 5969 | + } |
---|
5791 | 5970 | |
---|
5792 | 5971 | /* update gso size and bytecount with header size */ |
---|
5793 | 5972 | first->gso_segs = skb_shinfo(skb)->gso_segs; |
---|
.. | .. |
---|
5834 | 6013 | switch (skb->csum_offset) { |
---|
5835 | 6014 | case offsetof(struct tcphdr, check): |
---|
5836 | 6015 | type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP; |
---|
5837 | | - /* fall through */ |
---|
| 6016 | + fallthrough; |
---|
5838 | 6017 | case offsetof(struct udphdr, check): |
---|
5839 | 6018 | break; |
---|
5840 | 6019 | case offsetof(struct sctphdr, checksum): |
---|
.. | .. |
---|
5846 | 6025 | type_tucmd = E1000_ADVTXD_TUCMD_L4T_SCTP; |
---|
5847 | 6026 | break; |
---|
5848 | 6027 | } |
---|
5849 | | - /* fall through */ |
---|
| 6028 | + fallthrough; |
---|
5850 | 6029 | default: |
---|
5851 | 6030 | skb_checksum_help(skb); |
---|
5852 | 6031 | goto csum_failed; |
---|
.. | .. |
---|
5958 | 6137 | struct sk_buff *skb = first->skb; |
---|
5959 | 6138 | struct igb_tx_buffer *tx_buffer; |
---|
5960 | 6139 | union e1000_adv_tx_desc *tx_desc; |
---|
5961 | | - struct skb_frag_struct *frag; |
---|
| 6140 | + skb_frag_t *frag; |
---|
5962 | 6141 | dma_addr_t dma; |
---|
5963 | 6142 | unsigned int data_len, size; |
---|
5964 | 6143 | u32 tx_flags = first->tx_flags; |
---|
.. | .. |
---|
6035 | 6214 | /* set the timestamp */ |
---|
6036 | 6215 | first->time_stamp = jiffies; |
---|
6037 | 6216 | |
---|
| 6217 | + skb_tx_timestamp(skb); |
---|
| 6218 | + |
---|
6038 | 6219 | /* Force memory writes to complete before letting h/w know there |
---|
6039 | 6220 | * are new descriptors to fetch. (Only applicable for weak-ordered |
---|
6040 | 6221 | * memory model archs, such as IA-64). |
---|
.. | .. |
---|
6056 | 6237 | /* Make sure there is space in the ring for the next send. */ |
---|
6057 | 6238 | igb_maybe_stop_tx(tx_ring, DESC_NEEDED); |
---|
6058 | 6239 | |
---|
6059 | | - if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { |
---|
| 6240 | + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { |
---|
6060 | 6241 | writel(i, tx_ring->tail); |
---|
6061 | | - |
---|
6062 | | - /* we need this if more than one processor can write to our tail |
---|
6063 | | - * at a time, it synchronizes IO on IA64/Altix systems |
---|
6064 | | - */ |
---|
6065 | | - mmiowb(); |
---|
6066 | 6242 | } |
---|
6067 | 6243 | return 0; |
---|
6068 | 6244 | |
---|
.. | .. |
---|
6099 | 6275 | return -1; |
---|
6100 | 6276 | } |
---|
6101 | 6277 | |
---|
| 6278 | +int igb_xmit_xdp_ring(struct igb_adapter *adapter, |
---|
| 6279 | + struct igb_ring *tx_ring, |
---|
| 6280 | + struct xdp_frame *xdpf) |
---|
| 6281 | +{ |
---|
| 6282 | + union e1000_adv_tx_desc *tx_desc; |
---|
| 6283 | + u32 len, cmd_type, olinfo_status; |
---|
| 6284 | + struct igb_tx_buffer *tx_buffer; |
---|
| 6285 | + dma_addr_t dma; |
---|
| 6286 | + u16 i; |
---|
| 6287 | + |
---|
| 6288 | + len = xdpf->len; |
---|
| 6289 | + |
---|
| 6290 | + if (unlikely(!igb_desc_unused(tx_ring))) |
---|
| 6291 | + return IGB_XDP_CONSUMED; |
---|
| 6292 | + |
---|
| 6293 | + dma = dma_map_single(tx_ring->dev, xdpf->data, len, DMA_TO_DEVICE); |
---|
| 6294 | + if (dma_mapping_error(tx_ring->dev, dma)) |
---|
| 6295 | + return IGB_XDP_CONSUMED; |
---|
| 6296 | + |
---|
| 6297 | + /* record the location of the first descriptor for this packet */ |
---|
| 6298 | + tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; |
---|
| 6299 | + tx_buffer->bytecount = len; |
---|
| 6300 | + tx_buffer->gso_segs = 1; |
---|
| 6301 | + tx_buffer->protocol = 0; |
---|
| 6302 | + |
---|
| 6303 | + i = tx_ring->next_to_use; |
---|
| 6304 | + tx_desc = IGB_TX_DESC(tx_ring, i); |
---|
| 6305 | + |
---|
| 6306 | + dma_unmap_len_set(tx_buffer, len, len); |
---|
| 6307 | + dma_unmap_addr_set(tx_buffer, dma, dma); |
---|
| 6308 | + tx_buffer->type = IGB_TYPE_XDP; |
---|
| 6309 | + tx_buffer->xdpf = xdpf; |
---|
| 6310 | + |
---|
| 6311 | + tx_desc->read.buffer_addr = cpu_to_le64(dma); |
---|
| 6312 | + |
---|
| 6313 | + /* put descriptor type bits */ |
---|
| 6314 | + cmd_type = E1000_ADVTXD_DTYP_DATA | |
---|
| 6315 | + E1000_ADVTXD_DCMD_DEXT | |
---|
| 6316 | + E1000_ADVTXD_DCMD_IFCS; |
---|
| 6317 | + cmd_type |= len | IGB_TXD_DCMD; |
---|
| 6318 | + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); |
---|
| 6319 | + |
---|
| 6320 | + olinfo_status = len << E1000_ADVTXD_PAYLEN_SHIFT; |
---|
| 6321 | + /* 82575 requires a unique index per ring */ |
---|
| 6322 | + if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) |
---|
| 6323 | + olinfo_status |= tx_ring->reg_idx << 4; |
---|
| 6324 | + |
---|
| 6325 | + tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); |
---|
| 6326 | + |
---|
| 6327 | + netdev_tx_sent_queue(txring_txq(tx_ring), tx_buffer->bytecount); |
---|
| 6328 | + |
---|
| 6329 | + /* set the timestamp */ |
---|
| 6330 | + tx_buffer->time_stamp = jiffies; |
---|
| 6331 | + |
---|
| 6332 | + /* Avoid any potential race with xdp_xmit and cleanup */ |
---|
| 6333 | + smp_wmb(); |
---|
| 6334 | + |
---|
| 6335 | + /* set next_to_watch value indicating a packet is present */ |
---|
| 6336 | + i++; |
---|
| 6337 | + if (i == tx_ring->count) |
---|
| 6338 | + i = 0; |
---|
| 6339 | + |
---|
| 6340 | + tx_buffer->next_to_watch = tx_desc; |
---|
| 6341 | + tx_ring->next_to_use = i; |
---|
| 6342 | + |
---|
| 6343 | + /* Make sure there is space in the ring for the next send. */ |
---|
| 6344 | + igb_maybe_stop_tx(tx_ring, DESC_NEEDED); |
---|
| 6345 | + |
---|
| 6346 | + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) |
---|
| 6347 | + writel(i, tx_ring->tail); |
---|
| 6348 | + |
---|
| 6349 | + return IGB_XDP_TX; |
---|
| 6350 | +} |
---|
| 6351 | + |
---|
6102 | 6352 | netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, |
---|
6103 | 6353 | struct igb_ring *tx_ring) |
---|
6104 | 6354 | { |
---|
.. | .. |
---|
6117 | 6367 | * otherwise try next time |
---|
6118 | 6368 | */ |
---|
6119 | 6369 | for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) |
---|
6120 | | - count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); |
---|
| 6370 | + count += TXD_USE_COUNT(skb_frag_size( |
---|
| 6371 | + &skb_shinfo(skb)->frags[f])); |
---|
6121 | 6372 | |
---|
6122 | 6373 | if (igb_maybe_stop_tx(tx_ring, count + 3)) { |
---|
6123 | 6374 | /* this is a hard error */ |
---|
.. | .. |
---|
6126 | 6377 | |
---|
6127 | 6378 | /* record the location of the first descriptor for this packet */ |
---|
6128 | 6379 | first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; |
---|
| 6380 | + first->type = IGB_TYPE_SKB; |
---|
6129 | 6381 | first->skb = skb; |
---|
6130 | 6382 | first->bytecount = skb->len; |
---|
6131 | 6383 | first->gso_segs = 1; |
---|
.. | .. |
---|
6162 | 6414 | goto out_drop; |
---|
6163 | 6415 | else if (!tso) |
---|
6164 | 6416 | igb_tx_csum(tx_ring, first); |
---|
6165 | | - |
---|
6166 | | - skb_tx_timestamp(skb); |
---|
6167 | 6417 | |
---|
6168 | 6418 | if (igb_tx_map(tx_ring, first, hdr_len)) |
---|
6169 | 6419 | goto cleanup_tx_tstamp; |
---|
.. | .. |
---|
6215 | 6465 | /** |
---|
6216 | 6466 | * igb_tx_timeout - Respond to a Tx Hang |
---|
6217 | 6467 | * @netdev: network interface device structure |
---|
| 6468 | + * @txqueue: number of the Tx queue that hung (unused) |
---|
6218 | 6469 | **/ |
---|
6219 | | -static void igb_tx_timeout(struct net_device *netdev) |
---|
| 6470 | +static void igb_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue) |
---|
6220 | 6471 | { |
---|
6221 | 6472 | struct igb_adapter *adapter = netdev_priv(netdev); |
---|
6222 | 6473 | struct e1000_hw *hw = &adapter->hw; |
---|
.. | .. |
---|
6277 | 6528 | static int igb_change_mtu(struct net_device *netdev, int new_mtu) |
---|
6278 | 6529 | { |
---|
6279 | 6530 | struct igb_adapter *adapter = netdev_priv(netdev); |
---|
6280 | | - struct pci_dev *pdev = adapter->pdev; |
---|
6281 | | - int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; |
---|
| 6531 | + int max_frame = new_mtu + IGB_ETH_PKT_HDR_PAD; |
---|
| 6532 | + |
---|
| 6533 | + if (adapter->xdp_prog) { |
---|
| 6534 | + int i; |
---|
| 6535 | + |
---|
| 6536 | + for (i = 0; i < adapter->num_rx_queues; i++) { |
---|
| 6537 | + struct igb_ring *ring = adapter->rx_ring[i]; |
---|
| 6538 | + |
---|
| 6539 | + if (max_frame > igb_rx_bufsz(ring)) { |
---|
| 6540 | + netdev_warn(adapter->netdev, |
---|
| 6541 | + "Requested MTU size is not supported with XDP. Max frame size is %d\n", |
---|
| 6542 | + max_frame); |
---|
| 6543 | + return -EINVAL; |
---|
| 6544 | + } |
---|
| 6545 | + } |
---|
| 6546 | + } |
---|
6282 | 6547 | |
---|
6283 | 6548 | /* adjust max frame to be at least the size of a standard frame */ |
---|
6284 | 6549 | if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN)) |
---|
.. | .. |
---|
6293 | 6558 | if (netif_running(netdev)) |
---|
6294 | 6559 | igb_down(adapter); |
---|
6295 | 6560 | |
---|
6296 | | - dev_info(&pdev->dev, "changing MTU from %d to %d\n", |
---|
6297 | | - netdev->mtu, new_mtu); |
---|
| 6561 | + netdev_dbg(netdev, "changing MTU from %d to %d\n", |
---|
| 6562 | + netdev->mtu, new_mtu); |
---|
6298 | 6563 | netdev->mtu = new_mtu; |
---|
6299 | 6564 | |
---|
6300 | 6565 | if (netif_running(netdev)) |
---|
.. | .. |
---|
6738 | 7003 | igb_setup_dca(adapter); |
---|
6739 | 7004 | break; |
---|
6740 | 7005 | } |
---|
6741 | | - /* Fall Through since DCA is disabled. */ |
---|
| 7006 | + fallthrough; /* since DCA is disabled. */ |
---|
6742 | 7007 | case DCA_PROVIDER_REMOVE: |
---|
6743 | 7008 | if (adapter->flags & IGB_FLAG_DCA_ENABLED) { |
---|
6744 | 7009 | /* without this a class_device is left |
---|
.. | .. |
---|
7157 | 7422 | { |
---|
7158 | 7423 | struct e1000_hw *hw = &adapter->hw; |
---|
7159 | 7424 | unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses; |
---|
7160 | | - u32 reg, msgbuf[3]; |
---|
| 7425 | + u32 reg, msgbuf[3] = {}; |
---|
7161 | 7426 | u8 *addr = (u8 *)(&msgbuf[1]); |
---|
7162 | 7427 | |
---|
7163 | 7428 | /* process all the same items cleared in a function level reset */ |
---|
.. | .. |
---|
7191 | 7456 | |
---|
7192 | 7457 | for (i = 0; i < hw->mac.rar_entry_count; i++) { |
---|
7193 | 7458 | adapter->mac_table[i].state &= ~IGB_MAC_STATE_IN_USE; |
---|
7194 | | - memset(adapter->mac_table[i].addr, 0, ETH_ALEN); |
---|
| 7459 | + eth_zero_addr(adapter->mac_table[i].addr); |
---|
7195 | 7460 | adapter->mac_table[i].queue = 0; |
---|
7196 | 7461 | igb_rar_set_index(adapter, i); |
---|
7197 | 7462 | } |
---|
.. | .. |
---|
7340 | 7605 | } else { |
---|
7341 | 7606 | adapter->mac_table[i].state = 0; |
---|
7342 | 7607 | adapter->mac_table[i].queue = 0; |
---|
7343 | | - memset(adapter->mac_table[i].addr, 0, ETH_ALEN); |
---|
| 7608 | + eth_zero_addr(adapter->mac_table[i].addr); |
---|
7344 | 7609 | } |
---|
7345 | 7610 | |
---|
7346 | 7611 | igb_rar_set_index(adapter, i); |
---|
.. | .. |
---|
7600 | 7865 | static void igb_msg_task(struct igb_adapter *adapter) |
---|
7601 | 7866 | { |
---|
7602 | 7867 | struct e1000_hw *hw = &adapter->hw; |
---|
| 7868 | + unsigned long flags; |
---|
7603 | 7869 | u32 vf; |
---|
7604 | 7870 | |
---|
| 7871 | + spin_lock_irqsave(&adapter->vfs_lock, flags); |
---|
7605 | 7872 | for (vf = 0; vf < adapter->vfs_allocated_count; vf++) { |
---|
7606 | 7873 | /* process any reset requests */ |
---|
7607 | 7874 | if (!igb_check_for_rst(hw, vf)) |
---|
.. | .. |
---|
7615 | 7882 | if (!igb_check_for_ack(hw, vf)) |
---|
7616 | 7883 | igb_rcv_ack_from_vf(adapter, vf); |
---|
7617 | 7884 | } |
---|
| 7885 | + spin_unlock_irqrestore(&adapter->vfs_lock, flags); |
---|
7618 | 7886 | } |
---|
7619 | 7887 | |
---|
7620 | 7888 | /** |
---|
.. | .. |
---|
7778 | 8046 | if (!clean_complete) |
---|
7779 | 8047 | return budget; |
---|
7780 | 8048 | |
---|
7781 | | - /* If not enough Rx work done, exit the polling mode */ |
---|
7782 | | - napi_complete_done(napi, work_done); |
---|
7783 | | - igb_ring_irq_enable(q_vector); |
---|
| 8049 | + /* Exit the polling mode, but don't re-enable interrupts if stack might |
---|
| 8050 | + * poll us due to busy-polling |
---|
| 8051 | + */ |
---|
| 8052 | + if (likely(napi_complete_done(napi, work_done))) |
---|
| 8053 | + igb_ring_irq_enable(q_vector); |
---|
7784 | 8054 | |
---|
7785 | | - return 0; |
---|
| 8055 | + return work_done; |
---|
7786 | 8056 | } |
---|
7787 | 8057 | |
---|
7788 | 8058 | /** |
---|
.. | .. |
---|
7831 | 8101 | total_packets += tx_buffer->gso_segs; |
---|
7832 | 8102 | |
---|
7833 | 8103 | /* free the skb */ |
---|
7834 | | - napi_consume_skb(tx_buffer->skb, napi_budget); |
---|
| 8104 | + if (tx_buffer->type == IGB_TYPE_SKB) |
---|
| 8105 | + napi_consume_skb(tx_buffer->skb, napi_budget); |
---|
| 8106 | + else |
---|
| 8107 | + xdp_return_frame(tx_buffer->xdpf); |
---|
7835 | 8108 | |
---|
7836 | 8109 | /* unmap skb header data */ |
---|
7837 | 8110 | dma_unmap_single(tx_ring->dev, |
---|
.. | .. |
---|
7990 | 8263 | return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); |
---|
7991 | 8264 | } |
---|
7992 | 8265 | |
---|
7993 | | -static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer) |
---|
| 8266 | +static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer, |
---|
| 8267 | + int rx_buf_pgcnt) |
---|
7994 | 8268 | { |
---|
7995 | 8269 | unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; |
---|
7996 | 8270 | struct page *page = rx_buffer->page; |
---|
.. | .. |
---|
8001 | 8275 | |
---|
8002 | 8276 | #if (PAGE_SIZE < 8192) |
---|
8003 | 8277 | /* if we are only owner of page we can reuse it */ |
---|
8004 | | - if (unlikely((page_ref_count(page) - pagecnt_bias) > 1)) |
---|
| 8278 | + if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1)) |
---|
8005 | 8279 | return false; |
---|
8006 | 8280 | #else |
---|
8007 | 8281 | #define IGB_LAST_OFFSET \ |
---|
.. | .. |
---|
8015 | 8289 | * the pagecnt_bias and page count so that we fully restock the |
---|
8016 | 8290 | * number of references the driver holds. |
---|
8017 | 8291 | */ |
---|
8018 | | - if (unlikely(!pagecnt_bias)) { |
---|
8019 | | - page_ref_add(page, USHRT_MAX); |
---|
| 8292 | + if (unlikely(pagecnt_bias == 1)) { |
---|
| 8293 | + page_ref_add(page, USHRT_MAX - 1); |
---|
8020 | 8294 | rx_buffer->pagecnt_bias = USHRT_MAX; |
---|
8021 | 8295 | } |
---|
8022 | 8296 | |
---|
.. | .. |
---|
8055 | 8329 | |
---|
8056 | 8330 | static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring, |
---|
8057 | 8331 | struct igb_rx_buffer *rx_buffer, |
---|
8058 | | - union e1000_adv_rx_desc *rx_desc, |
---|
8059 | | - unsigned int size) |
---|
| 8332 | + struct xdp_buff *xdp, |
---|
| 8333 | + union e1000_adv_rx_desc *rx_desc) |
---|
8060 | 8334 | { |
---|
8061 | | - void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; |
---|
8062 | 8335 | #if (PAGE_SIZE < 8192) |
---|
8063 | 8336 | unsigned int truesize = igb_rx_pg_size(rx_ring) / 2; |
---|
8064 | 8337 | #else |
---|
8065 | | - unsigned int truesize = SKB_DATA_ALIGN(size); |
---|
| 8338 | + unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end - |
---|
| 8339 | + xdp->data_hard_start); |
---|
8066 | 8340 | #endif |
---|
| 8341 | + unsigned int size = xdp->data_end - xdp->data; |
---|
8067 | 8342 | unsigned int headlen; |
---|
8068 | 8343 | struct sk_buff *skb; |
---|
8069 | 8344 | |
---|
8070 | 8345 | /* prefetch first cache line of first page */ |
---|
8071 | | - prefetch(va); |
---|
8072 | | -#if L1_CACHE_BYTES < 128 |
---|
8073 | | - prefetch(va + L1_CACHE_BYTES); |
---|
8074 | | -#endif |
---|
| 8346 | + net_prefetch(xdp->data); |
---|
8075 | 8347 | |
---|
8076 | 8348 | /* allocate a skb to store the frags */ |
---|
8077 | 8349 | skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN); |
---|
.. | .. |
---|
8079 | 8351 | return NULL; |
---|
8080 | 8352 | |
---|
8081 | 8353 | if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) { |
---|
8082 | | - igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb); |
---|
8083 | | - va += IGB_TS_HDR_LEN; |
---|
8084 | | - size -= IGB_TS_HDR_LEN; |
---|
| 8354 | + if (!igb_ptp_rx_pktstamp(rx_ring->q_vector, xdp->data, skb)) { |
---|
| 8355 | + xdp->data += IGB_TS_HDR_LEN; |
---|
| 8356 | + size -= IGB_TS_HDR_LEN; |
---|
| 8357 | + } |
---|
8085 | 8358 | } |
---|
8086 | 8359 | |
---|
8087 | 8360 | /* Determine available headroom for copy */ |
---|
8088 | 8361 | headlen = size; |
---|
8089 | 8362 | if (headlen > IGB_RX_HDR_LEN) |
---|
8090 | | - headlen = eth_get_headlen(va, IGB_RX_HDR_LEN); |
---|
| 8363 | + headlen = eth_get_headlen(skb->dev, xdp->data, IGB_RX_HDR_LEN); |
---|
8091 | 8364 | |
---|
8092 | 8365 | /* align pull length to size of long to optimize memcpy performance */ |
---|
8093 | | - memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long))); |
---|
| 8366 | + memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen, sizeof(long))); |
---|
8094 | 8367 | |
---|
8095 | 8368 | /* update all of the pointers */ |
---|
8096 | 8369 | size -= headlen; |
---|
8097 | 8370 | if (size) { |
---|
8098 | 8371 | skb_add_rx_frag(skb, 0, rx_buffer->page, |
---|
8099 | | - (va + headlen) - page_address(rx_buffer->page), |
---|
| 8372 | + (xdp->data + headlen) - page_address(rx_buffer->page), |
---|
8100 | 8373 | size, truesize); |
---|
8101 | 8374 | #if (PAGE_SIZE < 8192) |
---|
8102 | 8375 | rx_buffer->page_offset ^= truesize; |
---|
.. | .. |
---|
8112 | 8385 | |
---|
8113 | 8386 | static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring, |
---|
8114 | 8387 | struct igb_rx_buffer *rx_buffer, |
---|
8115 | | - union e1000_adv_rx_desc *rx_desc, |
---|
8116 | | - unsigned int size) |
---|
| 8388 | + struct xdp_buff *xdp, |
---|
| 8389 | + union e1000_adv_rx_desc *rx_desc) |
---|
8117 | 8390 | { |
---|
8118 | | - void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; |
---|
8119 | 8391 | #if (PAGE_SIZE < 8192) |
---|
8120 | 8392 | unsigned int truesize = igb_rx_pg_size(rx_ring) / 2; |
---|
8121 | 8393 | #else |
---|
8122 | 8394 | unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + |
---|
8123 | | - SKB_DATA_ALIGN(IGB_SKB_PAD + size); |
---|
| 8395 | + SKB_DATA_ALIGN(xdp->data_end - |
---|
| 8396 | + xdp->data_hard_start); |
---|
8124 | 8397 | #endif |
---|
| 8398 | + unsigned int metasize = xdp->data - xdp->data_meta; |
---|
8125 | 8399 | struct sk_buff *skb; |
---|
8126 | 8400 | |
---|
8127 | 8401 | /* prefetch first cache line of first page */ |
---|
8128 | | - prefetch(va); |
---|
8129 | | -#if L1_CACHE_BYTES < 128 |
---|
8130 | | - prefetch(va + L1_CACHE_BYTES); |
---|
8131 | | -#endif |
---|
| 8402 | + net_prefetch(xdp->data_meta); |
---|
8132 | 8403 | |
---|
8133 | 8404 | /* build an skb around the page buffer */ |
---|
8134 | | - skb = build_skb(va - IGB_SKB_PAD, truesize); |
---|
| 8405 | + skb = build_skb(xdp->data_hard_start, truesize); |
---|
8135 | 8406 | if (unlikely(!skb)) |
---|
8136 | 8407 | return NULL; |
---|
8137 | 8408 | |
---|
8138 | 8409 | /* update pointers within the skb to store the data */ |
---|
8139 | | - skb_reserve(skb, IGB_SKB_PAD); |
---|
8140 | | - __skb_put(skb, size); |
---|
| 8410 | + skb_reserve(skb, xdp->data - xdp->data_hard_start); |
---|
| 8411 | + __skb_put(skb, xdp->data_end - xdp->data); |
---|
| 8412 | + |
---|
| 8413 | + if (metasize) |
---|
| 8414 | + skb_metadata_set(skb, metasize); |
---|
8141 | 8415 | |
---|
8142 | 8416 | /* pull timestamp out of packet data */ |
---|
8143 | 8417 | if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { |
---|
8144 | | - igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb); |
---|
8145 | | - __skb_pull(skb, IGB_TS_HDR_LEN); |
---|
| 8418 | + if (!igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb)) |
---|
| 8419 | + __skb_pull(skb, IGB_TS_HDR_LEN); |
---|
8146 | 8420 | } |
---|
8147 | 8421 | |
---|
8148 | 8422 | /* update buffer offset */ |
---|
.. | .. |
---|
8153 | 8427 | #endif |
---|
8154 | 8428 | |
---|
8155 | 8429 | return skb; |
---|
| 8430 | +} |
---|
| 8431 | + |
---|
| 8432 | +static struct sk_buff *igb_run_xdp(struct igb_adapter *adapter, |
---|
| 8433 | + struct igb_ring *rx_ring, |
---|
| 8434 | + struct xdp_buff *xdp) |
---|
| 8435 | +{ |
---|
| 8436 | + int err, result = IGB_XDP_PASS; |
---|
| 8437 | + struct bpf_prog *xdp_prog; |
---|
| 8438 | + u32 act; |
---|
| 8439 | + |
---|
| 8440 | + rcu_read_lock(); |
---|
| 8441 | + xdp_prog = READ_ONCE(rx_ring->xdp_prog); |
---|
| 8442 | + |
---|
| 8443 | + if (!xdp_prog) |
---|
| 8444 | + goto xdp_out; |
---|
| 8445 | + |
---|
| 8446 | + prefetchw(xdp->data_hard_start); /* xdp_frame write */ |
---|
| 8447 | + |
---|
| 8448 | + act = bpf_prog_run_xdp(xdp_prog, xdp); |
---|
| 8449 | + switch (act) { |
---|
| 8450 | + case XDP_PASS: |
---|
| 8451 | + break; |
---|
| 8452 | + case XDP_TX: |
---|
| 8453 | + result = igb_xdp_xmit_back(adapter, xdp); |
---|
| 8454 | + if (result == IGB_XDP_CONSUMED) |
---|
| 8455 | + goto out_failure; |
---|
| 8456 | + break; |
---|
| 8457 | + case XDP_REDIRECT: |
---|
| 8458 | + err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog); |
---|
| 8459 | + if (err) |
---|
| 8460 | + goto out_failure; |
---|
| 8461 | + result = IGB_XDP_REDIR; |
---|
| 8462 | + break; |
---|
| 8463 | + default: |
---|
| 8464 | + bpf_warn_invalid_xdp_action(act); |
---|
| 8465 | + fallthrough; |
---|
| 8466 | + case XDP_ABORTED: |
---|
| 8467 | +out_failure: |
---|
| 8468 | + trace_xdp_exception(rx_ring->netdev, xdp_prog, act); |
---|
| 8469 | + fallthrough; |
---|
| 8470 | + case XDP_DROP: |
---|
| 8471 | + result = IGB_XDP_CONSUMED; |
---|
| 8472 | + break; |
---|
| 8473 | + } |
---|
| 8474 | +xdp_out: |
---|
| 8475 | + rcu_read_unlock(); |
---|
| 8476 | + return ERR_PTR(-result); |
---|
| 8477 | +} |
---|
| 8478 | + |
---|
| 8479 | +static unsigned int igb_rx_frame_truesize(struct igb_ring *rx_ring, |
---|
| 8480 | + unsigned int size) |
---|
| 8481 | +{ |
---|
| 8482 | + unsigned int truesize; |
---|
| 8483 | + |
---|
| 8484 | +#if (PAGE_SIZE < 8192) |
---|
| 8485 | + truesize = igb_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */ |
---|
| 8486 | +#else |
---|
| 8487 | + truesize = ring_uses_build_skb(rx_ring) ? |
---|
| 8488 | + SKB_DATA_ALIGN(IGB_SKB_PAD + size) + |
---|
| 8489 | + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : |
---|
| 8490 | + SKB_DATA_ALIGN(size); |
---|
| 8491 | +#endif |
---|
| 8492 | + return truesize; |
---|
| 8493 | +} |
---|
| 8494 | + |
---|
| 8495 | +static void igb_rx_buffer_flip(struct igb_ring *rx_ring, |
---|
| 8496 | + struct igb_rx_buffer *rx_buffer, |
---|
| 8497 | + unsigned int size) |
---|
| 8498 | +{ |
---|
| 8499 | + unsigned int truesize = igb_rx_frame_truesize(rx_ring, size); |
---|
| 8500 | +#if (PAGE_SIZE < 8192) |
---|
| 8501 | + rx_buffer->page_offset ^= truesize; |
---|
| 8502 | +#else |
---|
| 8503 | + rx_buffer->page_offset += truesize; |
---|
| 8504 | +#endif |
---|
8156 | 8505 | } |
---|
8157 | 8506 | |
---|
8158 | 8507 | static inline void igb_rx_checksum(struct igb_ring *ring, |
---|
.. | .. |
---|
8209 | 8558 | * igb_is_non_eop - process handling of non-EOP buffers |
---|
8210 | 8559 | * @rx_ring: Rx ring being processed |
---|
8211 | 8560 | * @rx_desc: Rx descriptor for current buffer |
---|
8212 | | - * @skb: current socket buffer containing buffer in progress |
---|
8213 | 8561 | * |
---|
8214 | 8562 | * This function updates next to clean. If the buffer is an EOP buffer |
---|
8215 | 8563 | * this function exits returning false, otherwise it will place the |
---|
.. | .. |
---|
8251 | 8599 | union e1000_adv_rx_desc *rx_desc, |
---|
8252 | 8600 | struct sk_buff *skb) |
---|
8253 | 8601 | { |
---|
| 8602 | + /* XDP packets use error pointer so abort at this point */ |
---|
| 8603 | + if (IS_ERR(skb)) |
---|
| 8604 | + return true; |
---|
| 8605 | + |
---|
8254 | 8606 | if (unlikely((igb_test_staterr(rx_desc, |
---|
8255 | 8607 | E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) { |
---|
8256 | 8608 | struct net_device *netdev = rx_ring->netdev; |
---|
.. | .. |
---|
8297 | 8649 | |
---|
8298 | 8650 | if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) && |
---|
8299 | 8651 | test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags)) |
---|
8300 | | - vid = be16_to_cpu(rx_desc->wb.upper.vlan); |
---|
| 8652 | + vid = be16_to_cpu((__force __be16)rx_desc->wb.upper.vlan); |
---|
8301 | 8653 | else |
---|
8302 | 8654 | vid = le16_to_cpu(rx_desc->wb.upper.vlan); |
---|
8303 | 8655 | |
---|
.. | .. |
---|
8309 | 8661 | skb->protocol = eth_type_trans(skb, rx_ring->netdev); |
---|
8310 | 8662 | } |
---|
8311 | 8663 | |
---|
| 8664 | +static unsigned int igb_rx_offset(struct igb_ring *rx_ring) |
---|
| 8665 | +{ |
---|
| 8666 | + return ring_uses_build_skb(rx_ring) ? IGB_SKB_PAD : 0; |
---|
| 8667 | +} |
---|
| 8668 | + |
---|
8312 | 8669 | static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring, |
---|
8313 | | - const unsigned int size) |
---|
| 8670 | + const unsigned int size, int *rx_buf_pgcnt) |
---|
8314 | 8671 | { |
---|
8315 | 8672 | struct igb_rx_buffer *rx_buffer; |
---|
8316 | 8673 | |
---|
8317 | 8674 | rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; |
---|
| 8675 | + *rx_buf_pgcnt = |
---|
| 8676 | +#if (PAGE_SIZE < 8192) |
---|
| 8677 | + page_count(rx_buffer->page); |
---|
| 8678 | +#else |
---|
| 8679 | + 0; |
---|
| 8680 | +#endif |
---|
8318 | 8681 | prefetchw(rx_buffer->page); |
---|
8319 | 8682 | |
---|
8320 | 8683 | /* we are reusing so sync this buffer for CPU use */ |
---|
.. | .. |
---|
8330 | 8693 | } |
---|
8331 | 8694 | |
---|
8332 | 8695 | static void igb_put_rx_buffer(struct igb_ring *rx_ring, |
---|
8333 | | - struct igb_rx_buffer *rx_buffer) |
---|
| 8696 | + struct igb_rx_buffer *rx_buffer, int rx_buf_pgcnt) |
---|
8334 | 8697 | { |
---|
8335 | | - if (igb_can_reuse_rx_page(rx_buffer)) { |
---|
| 8698 | + if (igb_can_reuse_rx_page(rx_buffer, rx_buf_pgcnt)) { |
---|
8336 | 8699 | /* hand second half of page back to the ring */ |
---|
8337 | 8700 | igb_reuse_rx_page(rx_ring, rx_buffer); |
---|
8338 | 8701 | } else { |
---|
.. | .. |
---|
8352 | 8715 | |
---|
8353 | 8716 | static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) |
---|
8354 | 8717 | { |
---|
| 8718 | + struct igb_adapter *adapter = q_vector->adapter; |
---|
8355 | 8719 | struct igb_ring *rx_ring = q_vector->rx.ring; |
---|
8356 | 8720 | struct sk_buff *skb = rx_ring->skb; |
---|
8357 | 8721 | unsigned int total_bytes = 0, total_packets = 0; |
---|
8358 | 8722 | u16 cleaned_count = igb_desc_unused(rx_ring); |
---|
| 8723 | + unsigned int xdp_xmit = 0; |
---|
| 8724 | + struct xdp_buff xdp; |
---|
| 8725 | + int rx_buf_pgcnt; |
---|
| 8726 | + |
---|
| 8727 | + xdp.rxq = &rx_ring->xdp_rxq; |
---|
| 8728 | + |
---|
| 8729 | + /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */ |
---|
| 8730 | +#if (PAGE_SIZE < 8192) |
---|
| 8731 | + xdp.frame_sz = igb_rx_frame_truesize(rx_ring, 0); |
---|
| 8732 | +#endif |
---|
8359 | 8733 | |
---|
8360 | 8734 | while (likely(total_packets < budget)) { |
---|
8361 | 8735 | union e1000_adv_rx_desc *rx_desc; |
---|
.. | .. |
---|
8379 | 8753 | */ |
---|
8380 | 8754 | dma_rmb(); |
---|
8381 | 8755 | |
---|
8382 | | - rx_buffer = igb_get_rx_buffer(rx_ring, size); |
---|
| 8756 | + rx_buffer = igb_get_rx_buffer(rx_ring, size, &rx_buf_pgcnt); |
---|
8383 | 8757 | |
---|
8384 | 8758 | /* retrieve a buffer from the ring */ |
---|
8385 | | - if (skb) |
---|
| 8759 | + if (!skb) { |
---|
| 8760 | + xdp.data = page_address(rx_buffer->page) + |
---|
| 8761 | + rx_buffer->page_offset; |
---|
| 8762 | + xdp.data_meta = xdp.data; |
---|
| 8763 | + xdp.data_hard_start = xdp.data - |
---|
| 8764 | + igb_rx_offset(rx_ring); |
---|
| 8765 | + xdp.data_end = xdp.data + size; |
---|
| 8766 | +#if (PAGE_SIZE > 4096) |
---|
| 8767 | + /* At larger PAGE_SIZE, frame_sz depend on len size */ |
---|
| 8768 | + xdp.frame_sz = igb_rx_frame_truesize(rx_ring, size); |
---|
| 8769 | +#endif |
---|
| 8770 | + skb = igb_run_xdp(adapter, rx_ring, &xdp); |
---|
| 8771 | + } |
---|
| 8772 | + |
---|
| 8773 | + if (IS_ERR(skb)) { |
---|
| 8774 | + unsigned int xdp_res = -PTR_ERR(skb); |
---|
| 8775 | + |
---|
| 8776 | + if (xdp_res & (IGB_XDP_TX | IGB_XDP_REDIR)) { |
---|
| 8777 | + xdp_xmit |= xdp_res; |
---|
| 8778 | + igb_rx_buffer_flip(rx_ring, rx_buffer, size); |
---|
| 8779 | + } else { |
---|
| 8780 | + rx_buffer->pagecnt_bias++; |
---|
| 8781 | + } |
---|
| 8782 | + total_packets++; |
---|
| 8783 | + total_bytes += size; |
---|
| 8784 | + } else if (skb) |
---|
8386 | 8785 | igb_add_rx_frag(rx_ring, rx_buffer, skb, size); |
---|
8387 | 8786 | else if (ring_uses_build_skb(rx_ring)) |
---|
8388 | | - skb = igb_build_skb(rx_ring, rx_buffer, rx_desc, size); |
---|
| 8787 | + skb = igb_build_skb(rx_ring, rx_buffer, &xdp, rx_desc); |
---|
8389 | 8788 | else |
---|
8390 | 8789 | skb = igb_construct_skb(rx_ring, rx_buffer, |
---|
8391 | | - rx_desc, size); |
---|
| 8790 | + &xdp, rx_desc); |
---|
8392 | 8791 | |
---|
8393 | 8792 | /* exit if we failed to retrieve a buffer */ |
---|
8394 | 8793 | if (!skb) { |
---|
.. | .. |
---|
8397 | 8796 | break; |
---|
8398 | 8797 | } |
---|
8399 | 8798 | |
---|
8400 | | - igb_put_rx_buffer(rx_ring, rx_buffer); |
---|
| 8799 | + igb_put_rx_buffer(rx_ring, rx_buffer, rx_buf_pgcnt); |
---|
8401 | 8800 | cleaned_count++; |
---|
8402 | 8801 | |
---|
8403 | 8802 | /* fetch next buffer in frame if non-eop */ |
---|
.. | .. |
---|
8428 | 8827 | /* place incomplete frames back on ring for completion */ |
---|
8429 | 8828 | rx_ring->skb = skb; |
---|
8430 | 8829 | |
---|
| 8830 | + if (xdp_xmit & IGB_XDP_REDIR) |
---|
| 8831 | + xdp_do_flush(); |
---|
| 8832 | + |
---|
| 8833 | + if (xdp_xmit & IGB_XDP_TX) { |
---|
| 8834 | + struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter); |
---|
| 8835 | + |
---|
| 8836 | + igb_xdp_ring_update_tail(tx_ring); |
---|
| 8837 | + } |
---|
| 8838 | + |
---|
8431 | 8839 | u64_stats_update_begin(&rx_ring->rx_syncp); |
---|
8432 | 8840 | rx_ring->rx_stats.packets += total_packets; |
---|
8433 | 8841 | rx_ring->rx_stats.bytes += total_bytes; |
---|
.. | .. |
---|
8439 | 8847 | igb_alloc_rx_buffers(rx_ring, cleaned_count); |
---|
8440 | 8848 | |
---|
8441 | 8849 | return total_packets; |
---|
8442 | | -} |
---|
8443 | | - |
---|
8444 | | -static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring) |
---|
8445 | | -{ |
---|
8446 | | - return ring_uses_build_skb(rx_ring) ? IGB_SKB_PAD : 0; |
---|
8447 | 8850 | } |
---|
8448 | 8851 | |
---|
8449 | 8852 | static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, |
---|
.. | .. |
---|
8482 | 8885 | bi->dma = dma; |
---|
8483 | 8886 | bi->page = page; |
---|
8484 | 8887 | bi->page_offset = igb_rx_offset(rx_ring); |
---|
8485 | | - bi->pagecnt_bias = 1; |
---|
| 8888 | + page_ref_add(page, USHRT_MAX - 1); |
---|
| 8889 | + bi->pagecnt_bias = USHRT_MAX; |
---|
8486 | 8890 | |
---|
8487 | 8891 | return true; |
---|
8488 | 8892 | } |
---|
8489 | 8893 | |
---|
8490 | 8894 | /** |
---|
8491 | | - * igb_alloc_rx_buffers - Replace used receive buffers; packet split |
---|
8492 | | - * @adapter: address of board private structure |
---|
| 8895 | + * igb_alloc_rx_buffers - Replace used receive buffers |
---|
| 8896 | + * @rx_ring: rx descriptor ring to allocate new receive buffers |
---|
| 8897 | + * @cleaned_count: count of buffers to allocate |
---|
8493 | 8898 | **/ |
---|
8494 | 8899 | void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count) |
---|
8495 | 8900 | { |
---|
.. | .. |
---|
8558 | 8963 | |
---|
8559 | 8964 | /** |
---|
8560 | 8965 | * igb_mii_ioctl - |
---|
8561 | | - * @netdev: |
---|
8562 | | - * @ifreq: |
---|
8563 | | - * @cmd: |
---|
| 8966 | + * @netdev: pointer to netdev struct |
---|
| 8967 | + * @ifr: interface structure |
---|
| 8968 | + * @cmd: ioctl command to execute |
---|
8564 | 8969 | **/ |
---|
8565 | 8970 | static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) |
---|
8566 | 8971 | { |
---|
.. | .. |
---|
8588 | 8993 | |
---|
8589 | 8994 | /** |
---|
8590 | 8995 | * igb_ioctl - |
---|
8591 | | - * @netdev: |
---|
8592 | | - * @ifreq: |
---|
8593 | | - * @cmd: |
---|
| 8996 | + * @netdev: pointer to netdev struct |
---|
| 8997 | + * @ifr: interface structure |
---|
| 8998 | + * @cmd: ioctl command to execute |
---|
8594 | 8999 | **/ |
---|
8595 | 9000 | static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) |
---|
8596 | 9001 | { |
---|
.. | .. |
---|
8875 | 9280 | return __igb_shutdown(to_pci_dev(dev), NULL, 0); |
---|
8876 | 9281 | } |
---|
8877 | 9282 | |
---|
8878 | | -static int __maybe_unused igb_resume(struct device *dev) |
---|
| 9283 | +static int __maybe_unused __igb_resume(struct device *dev, bool rpm) |
---|
8879 | 9284 | { |
---|
8880 | 9285 | struct pci_dev *pdev = to_pci_dev(dev); |
---|
8881 | 9286 | struct net_device *netdev = pci_get_drvdata(pdev); |
---|
.. | .. |
---|
8918 | 9323 | |
---|
8919 | 9324 | wr32(E1000_WUS, ~0); |
---|
8920 | 9325 | |
---|
8921 | | - rtnl_lock(); |
---|
| 9326 | + if (!rpm) |
---|
| 9327 | + rtnl_lock(); |
---|
8922 | 9328 | if (!err && netif_running(netdev)) |
---|
8923 | 9329 | err = __igb_open(netdev, true); |
---|
8924 | 9330 | |
---|
8925 | 9331 | if (!err) |
---|
8926 | 9332 | netif_device_attach(netdev); |
---|
8927 | | - rtnl_unlock(); |
---|
| 9333 | + if (!rpm) |
---|
| 9334 | + rtnl_unlock(); |
---|
8928 | 9335 | |
---|
8929 | 9336 | return err; |
---|
8930 | 9337 | } |
---|
8931 | 9338 | |
---|
| 9339 | +static int __maybe_unused igb_resume(struct device *dev) |
---|
| 9340 | +{ |
---|
| 9341 | + return __igb_resume(dev, false); |
---|
| 9342 | +} |
---|
| 9343 | + |
---|
8932 | 9344 | static int __maybe_unused igb_runtime_idle(struct device *dev) |
---|
8933 | 9345 | { |
---|
8934 | | - struct pci_dev *pdev = to_pci_dev(dev); |
---|
8935 | | - struct net_device *netdev = pci_get_drvdata(pdev); |
---|
| 9346 | + struct net_device *netdev = dev_get_drvdata(dev); |
---|
8936 | 9347 | struct igb_adapter *adapter = netdev_priv(netdev); |
---|
8937 | 9348 | |
---|
8938 | 9349 | if (!igb_has_link(adapter)) |
---|
.. | .. |
---|
8948 | 9359 | |
---|
8949 | 9360 | static int __maybe_unused igb_runtime_resume(struct device *dev) |
---|
8950 | 9361 | { |
---|
8951 | | - return igb_resume(dev); |
---|
| 9362 | + return __igb_resume(dev, true); |
---|
8952 | 9363 | } |
---|
8953 | 9364 | |
---|
8954 | 9365 | static void igb_shutdown(struct pci_dev *pdev) |
---|
.. | .. |
---|
9046 | 9457 | struct net_device *netdev = pci_get_drvdata(pdev); |
---|
9047 | 9458 | struct igb_adapter *adapter = netdev_priv(netdev); |
---|
9048 | 9459 | |
---|
| 9460 | + if (state == pci_channel_io_normal) { |
---|
| 9461 | + dev_warn(&pdev->dev, "Non-correctable non-fatal error reported.\n"); |
---|
| 9462 | + return PCI_ERS_RESULT_CAN_RECOVER; |
---|
| 9463 | + } |
---|
| 9464 | + |
---|
9049 | 9465 | netif_device_detach(netdev); |
---|
9050 | 9466 | |
---|
9051 | 9467 | if (state == pci_channel_io_perm_failure) |
---|
.. | .. |
---|
9064 | 9480 | * @pdev: Pointer to PCI device |
---|
9065 | 9481 | * |
---|
9066 | 9482 | * Restart the card from scratch, as if from a cold-boot. Implementation |
---|
9067 | | - * resembles the first-half of the igb_resume routine. |
---|
| 9483 | + * resembles the first-half of the __igb_resume routine. |
---|
9068 | 9484 | **/ |
---|
9069 | 9485 | static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev) |
---|
9070 | 9486 | { |
---|
.. | .. |
---|
9072 | 9488 | struct igb_adapter *adapter = netdev_priv(netdev); |
---|
9073 | 9489 | struct e1000_hw *hw = &adapter->hw; |
---|
9074 | 9490 | pci_ers_result_t result; |
---|
9075 | | - int err; |
---|
9076 | 9491 | |
---|
9077 | 9492 | if (pci_enable_device_mem(pdev)) { |
---|
9078 | 9493 | dev_err(&pdev->dev, |
---|
.. | .. |
---|
9096 | 9511 | result = PCI_ERS_RESULT_RECOVERED; |
---|
9097 | 9512 | } |
---|
9098 | 9513 | |
---|
9099 | | - err = pci_cleanup_aer_uncorrect_error_status(pdev); |
---|
9100 | | - if (err) { |
---|
9101 | | - dev_err(&pdev->dev, |
---|
9102 | | - "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", |
---|
9103 | | - err); |
---|
9104 | | - /* non-fatal, continue */ |
---|
9105 | | - } |
---|
9106 | | - |
---|
9107 | 9514 | return result; |
---|
9108 | 9515 | } |
---|
9109 | 9516 | |
---|
.. | .. |
---|
9113 | 9520 | * |
---|
9114 | 9521 | * This callback is called when the error recovery driver tells us that |
---|
9115 | 9522 | * its OK to resume normal operation. Implementation resembles the |
---|
9116 | | - * second-half of the igb_resume routine. |
---|
| 9523 | + * second-half of the __igb_resume routine. |
---|
9117 | 9524 | */ |
---|
9118 | 9525 | static void igb_io_resume(struct pci_dev *pdev) |
---|
9119 | 9526 | { |
---|
.. | .. |
---|
9415 | 9822 | reg = rd32(E1000_DTXCTL); |
---|
9416 | 9823 | reg |= E1000_DTXCTL_VLAN_ADDED; |
---|
9417 | 9824 | wr32(E1000_DTXCTL, reg); |
---|
9418 | | - /* Fall through */ |
---|
| 9825 | + fallthrough; |
---|
9419 | 9826 | case e1000_82580: |
---|
9420 | 9827 | /* enable replication vlan tag stripping */ |
---|
9421 | 9828 | reg = rd32(E1000_RPLOLR); |
---|
9422 | 9829 | reg |= E1000_RPLOLR_STRVLAN; |
---|
9423 | 9830 | wr32(E1000_RPLOLR, reg); |
---|
9424 | | - /* Fall through */ |
---|
| 9831 | + fallthrough; |
---|
9425 | 9832 | case e1000_i350: |
---|
9426 | 9833 | /* none of the above registers are supported by i350 */ |
---|
9427 | 9834 | break; |
---|
.. | .. |
---|
9443 | 9850 | struct e1000_hw *hw = &adapter->hw; |
---|
9444 | 9851 | u32 dmac_thr; |
---|
9445 | 9852 | u16 hwm; |
---|
| 9853 | + u32 reg; |
---|
9446 | 9854 | |
---|
9447 | 9855 | if (hw->mac.type > e1000_82580) { |
---|
9448 | 9856 | if (adapter->flags & IGB_FLAG_DMAC) { |
---|
9449 | | - u32 reg; |
---|
9450 | | - |
---|
9451 | 9857 | /* force threshold to 0. */ |
---|
9452 | 9858 | wr32(E1000_DMCTXTH, 0); |
---|
9453 | 9859 | |
---|
.. | .. |
---|
9480 | 9886 | /* Disable BMC-to-OS Watchdog Enable */ |
---|
9481 | 9887 | if (hw->mac.type != e1000_i354) |
---|
9482 | 9888 | reg &= ~E1000_DMACR_DC_BMC2OSW_EN; |
---|
9483 | | - |
---|
9484 | 9889 | wr32(E1000_DMACR, reg); |
---|
9485 | 9890 | |
---|
9486 | 9891 | /* no lower threshold to disable |
---|
.. | .. |
---|
9497 | 9902 | */ |
---|
9498 | 9903 | wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE - |
---|
9499 | 9904 | (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6); |
---|
| 9905 | + } |
---|
9500 | 9906 | |
---|
9501 | | - /* make low power state decision controlled |
---|
9502 | | - * by DMA coal |
---|
9503 | | - */ |
---|
| 9907 | + if (hw->mac.type >= e1000_i210 || |
---|
| 9908 | + (adapter->flags & IGB_FLAG_DMAC)) { |
---|
9504 | 9909 | reg = rd32(E1000_PCIEMISC); |
---|
9505 | | - reg &= ~E1000_PCIEMISC_LX_DECISION; |
---|
| 9910 | + reg |= E1000_PCIEMISC_LX_DECISION; |
---|
9506 | 9911 | wr32(E1000_PCIEMISC, reg); |
---|
9507 | 9912 | } /* endif adapter->dmac is not disabled */ |
---|
9508 | 9913 | } else if (hw->mac.type == e1000_82580) { |
---|