| .. | .. |
|---|
| 30 | 30 | #include <linux/if_ether.h> |
|---|
| 31 | 31 | #include <linux/aer.h> |
|---|
| 32 | 32 | #include <linux/prefetch.h> |
|---|
| 33 | +#include <linux/bpf.h> |
|---|
| 34 | +#include <linux/bpf_trace.h> |
|---|
| 33 | 35 | #include <linux/pm_runtime.h> |
|---|
| 34 | 36 | #include <linux/etherdevice.h> |
|---|
| 35 | 37 | #ifdef CONFIG_IGB_DCA |
|---|
| .. | .. |
|---|
| 37 | 39 | #endif |
|---|
| 38 | 40 | #include <linux/i2c.h> |
|---|
| 39 | 41 | #include "igb.h" |
|---|
| 40 | | - |
|---|
| 41 | | -#define MAJ 5 |
|---|
| 42 | | -#define MIN 4 |
|---|
| 43 | | -#define BUILD 0 |
|---|
| 44 | | -#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ |
|---|
| 45 | | -__stringify(BUILD) "-k" |
|---|
| 46 | 42 | |
|---|
| 47 | 43 | enum queue_mode { |
|---|
| 48 | 44 | QUEUE_MODE_STRICT_PRIORITY, |
|---|
| .. | .. |
|---|
| 55 | 51 | }; |
|---|
| 56 | 52 | |
|---|
| 57 | 53 | char igb_driver_name[] = "igb"; |
|---|
| 58 | | -char igb_driver_version[] = DRV_VERSION; |
|---|
| 59 | 54 | static const char igb_driver_string[] = |
|---|
| 60 | 55 | "Intel(R) Gigabit Ethernet Network Driver"; |
|---|
| 61 | 56 | static const char igb_copyright[] = |
|---|
| .. | .. |
|---|
| 146 | 141 | static bool igb_clean_tx_irq(struct igb_q_vector *, int); |
|---|
| 147 | 142 | static int igb_clean_rx_irq(struct igb_q_vector *, int); |
|---|
| 148 | 143 | static int igb_ioctl(struct net_device *, struct ifreq *, int cmd); |
|---|
| 149 | | -static void igb_tx_timeout(struct net_device *); |
|---|
| 144 | +static void igb_tx_timeout(struct net_device *, unsigned int txqueue); |
|---|
| 150 | 145 | static void igb_reset_task(struct work_struct *); |
|---|
| 151 | 146 | static void igb_vlan_mode(struct net_device *netdev, |
|---|
| 152 | 147 | netdev_features_t features); |
|---|
| .. | .. |
|---|
| 239 | 234 | |
|---|
| 240 | 235 | MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); |
|---|
| 241 | 236 | MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver"); |
|---|
| 242 | | -MODULE_LICENSE("GPL"); |
|---|
| 243 | | -MODULE_VERSION(DRV_VERSION); |
|---|
| 237 | +MODULE_LICENSE("GPL v2"); |
|---|
| 244 | 238 | |
|---|
| 245 | 239 | #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) |
|---|
| 246 | 240 | static int debug = -1; |
|---|
| .. | .. |
|---|
| 557 | 551 | |
|---|
| 558 | 552 | /** |
|---|
| 559 | 553 | * igb_get_i2c_data - Reads the I2C SDA data bit |
|---|
| 560 | | - * @hw: pointer to hardware structure |
|---|
| 561 | | - * @i2cctl: Current value of I2CCTL register |
|---|
| 554 | + * @data: opaque pointer to adapter struct |
|---|
| 562 | 555 | * |
|---|
| 563 | 556 | * Returns the I2C data bit value |
|---|
| 564 | 557 | **/ |
|---|
| .. | .. |
|---|
| 666 | 659 | { |
|---|
| 667 | 660 | int ret; |
|---|
| 668 | 661 | |
|---|
| 669 | | - pr_info("%s - version %s\n", |
|---|
| 670 | | - igb_driver_string, igb_driver_version); |
|---|
| 662 | + pr_info("%s\n", igb_driver_string); |
|---|
| 671 | 663 | pr_info("%s\n", igb_copyright); |
|---|
| 672 | 664 | |
|---|
| 673 | 665 | #ifdef CONFIG_IGB_DCA |
|---|
| .. | .. |
|---|
| 720 | 712 | adapter->rx_ring[i]->reg_idx = rbase_offset + |
|---|
| 721 | 713 | Q_IDX_82576(i); |
|---|
| 722 | 714 | } |
|---|
| 723 | | - /* Fall through */ |
|---|
| 715 | + fallthrough; |
|---|
| 724 | 716 | case e1000_82575: |
|---|
| 725 | 717 | case e1000_82580: |
|---|
| 726 | 718 | case e1000_i350: |
|---|
| 727 | 719 | case e1000_i354: |
|---|
| 728 | 720 | case e1000_i210: |
|---|
| 729 | 721 | case e1000_i211: |
|---|
| 730 | | - /* Fall through */ |
|---|
| 731 | 722 | default: |
|---|
| 732 | 723 | for (; i < adapter->num_rx_queues; i++) |
|---|
| 733 | 724 | adapter->rx_ring[i]->reg_idx = rbase_offset + i; |
|---|
| .. | .. |
|---|
| 753 | 744 | struct net_device *netdev = igb->netdev; |
|---|
| 754 | 745 | hw->hw_addr = NULL; |
|---|
| 755 | 746 | netdev_err(netdev, "PCIe link lost\n"); |
|---|
| 747 | + WARN(pci_device_is_present(igb->pdev), |
|---|
| 748 | + "igb: Failed to read reg 0x%x!\n", reg); |
|---|
| 756 | 749 | } |
|---|
| 757 | 750 | |
|---|
| 758 | 751 | return value; |
|---|
| .. | .. |
|---|
| 1196 | 1189 | { |
|---|
| 1197 | 1190 | struct igb_q_vector *q_vector; |
|---|
| 1198 | 1191 | struct igb_ring *ring; |
|---|
| 1199 | | - int ring_count, size; |
|---|
| 1192 | + int ring_count; |
|---|
| 1193 | + size_t size; |
|---|
| 1200 | 1194 | |
|---|
| 1201 | 1195 | /* igb only supports 1 Tx and/or 1 Rx queue per vector */ |
|---|
| 1202 | 1196 | if (txr_count > 1 || rxr_count > 1) |
|---|
| 1203 | 1197 | return -ENOMEM; |
|---|
| 1204 | 1198 | |
|---|
| 1205 | 1199 | ring_count = txr_count + rxr_count; |
|---|
| 1206 | | - size = sizeof(struct igb_q_vector) + |
|---|
| 1207 | | - (sizeof(struct igb_ring) * ring_count); |
|---|
| 1200 | + size = struct_size(q_vector, ring, ring_count); |
|---|
| 1208 | 1201 | |
|---|
| 1209 | 1202 | /* allocate q_vector and rings */ |
|---|
| 1210 | 1203 | q_vector = adapter->q_vector[v_idx]; |
|---|
| .. | .. |
|---|
| 1858 | 1851 | * configuration' in respect to these parameters. |
|---|
| 1859 | 1852 | */ |
|---|
| 1860 | 1853 | |
|---|
| 1861 | | - netdev_dbg(netdev, "Qav Tx mode: cbs %s, launchtime %s, queue %d \ |
|---|
| 1862 | | - idleslope %d sendslope %d hiCredit %d \ |
|---|
| 1863 | | - locredit %d\n", |
|---|
| 1864 | | - (ring->cbs_enable) ? "enabled" : "disabled", |
|---|
| 1865 | | - (ring->launchtime_enable) ? "enabled" : "disabled", queue, |
|---|
| 1866 | | - ring->idleslope, ring->sendslope, ring->hicredit, |
|---|
| 1867 | | - ring->locredit); |
|---|
| 1854 | + netdev_dbg(netdev, "Qav Tx mode: cbs %s, launchtime %s, queue %d idleslope %d sendslope %d hiCredit %d locredit %d\n", |
|---|
| 1855 | + ring->cbs_enable ? "enabled" : "disabled", |
|---|
| 1856 | + ring->launchtime_enable ? "enabled" : "disabled", |
|---|
| 1857 | + queue, |
|---|
| 1858 | + ring->idleslope, ring->sendslope, |
|---|
| 1859 | + ring->hicredit, ring->locredit); |
|---|
| 1868 | 1860 | } |
|---|
| 1869 | 1861 | |
|---|
| 1870 | 1862 | static int igb_save_txtime_params(struct igb_adapter *adapter, int queue, |
|---|
| .. | .. |
|---|
| 1943 | 1935 | |
|---|
| 1944 | 1936 | val = rd32(E1000_RXPBS); |
|---|
| 1945 | 1937 | val &= ~I210_RXPBSIZE_MASK; |
|---|
| 1946 | | - val |= I210_RXPBSIZE_PB_32KB; |
|---|
| 1938 | + val |= I210_RXPBSIZE_PB_30KB; |
|---|
| 1947 | 1939 | wr32(E1000_RXPBS, val); |
|---|
| 1948 | 1940 | |
|---|
| 1949 | 1941 | /* Section 8.12.9 states that MAX_TPKT_SIZE from DTXMXPKTSZ |
|---|
| .. | .. |
|---|
| 2237 | 2229 | |
|---|
| 2238 | 2230 | void igb_reinit_locked(struct igb_adapter *adapter) |
|---|
| 2239 | 2231 | { |
|---|
| 2240 | | - WARN_ON(in_interrupt()); |
|---|
| 2241 | 2232 | while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) |
|---|
| 2242 | 2233 | usleep_range(1000, 2000); |
|---|
| 2243 | 2234 | igb_down(adapter); |
|---|
| .. | .. |
|---|
| 2379 | 2370 | adapter->ei.get_invariants(hw); |
|---|
| 2380 | 2371 | adapter->flags &= ~IGB_FLAG_MEDIA_RESET; |
|---|
| 2381 | 2372 | } |
|---|
| 2382 | | - if ((mac->type == e1000_82575) && |
|---|
| 2373 | + if ((mac->type == e1000_82575 || mac->type == e1000_i350) && |
|---|
| 2383 | 2374 | (adapter->flags & IGB_FLAG_MAS_ENABLE)) { |
|---|
| 2384 | 2375 | igb_enable_mas(adapter); |
|---|
| 2385 | 2376 | } |
|---|
| .. | .. |
|---|
| 2490 | 2481 | else |
|---|
| 2491 | 2482 | igb_reset(adapter); |
|---|
| 2492 | 2483 | |
|---|
| 2493 | | - return 0; |
|---|
| 2484 | + return 1; |
|---|
| 2494 | 2485 | } |
|---|
| 2495 | 2486 | |
|---|
| 2496 | 2487 | static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], |
|---|
| 2497 | 2488 | struct net_device *dev, |
|---|
| 2498 | 2489 | const unsigned char *addr, u16 vid, |
|---|
| 2499 | | - u16 flags) |
|---|
| 2490 | + u16 flags, |
|---|
| 2491 | + struct netlink_ext_ack *extack) |
|---|
| 2500 | 2492 | { |
|---|
| 2501 | 2493 | /* guarantee we can provide a unique filter for the unicast address */ |
|---|
| 2502 | 2494 | if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) { |
|---|
| .. | .. |
|---|
| 2524 | 2516 | if (unlikely(mac_hdr_len > IGB_MAX_MAC_HDR_LEN)) |
|---|
| 2525 | 2517 | return features & ~(NETIF_F_HW_CSUM | |
|---|
| 2526 | 2518 | NETIF_F_SCTP_CRC | |
|---|
| 2519 | + NETIF_F_GSO_UDP_L4 | |
|---|
| 2527 | 2520 | NETIF_F_HW_VLAN_CTAG_TX | |
|---|
| 2528 | 2521 | NETIF_F_TSO | |
|---|
| 2529 | 2522 | NETIF_F_TSO6); |
|---|
| .. | .. |
|---|
| 2532 | 2525 | if (unlikely(network_hdr_len > IGB_MAX_NETWORK_HDR_LEN)) |
|---|
| 2533 | 2526 | return features & ~(NETIF_F_HW_CSUM | |
|---|
| 2534 | 2527 | NETIF_F_SCTP_CRC | |
|---|
| 2528 | + NETIF_F_GSO_UDP_L4 | |
|---|
| 2535 | 2529 | NETIF_F_TSO | |
|---|
| 2536 | 2530 | NETIF_F_TSO6); |
|---|
| 2537 | 2531 | |
|---|
| .. | .. |
|---|
| 2586 | 2580 | #define VLAN_PRIO_FULL_MASK (0x07) |
|---|
| 2587 | 2581 | |
|---|
| 2588 | 2582 | static int igb_parse_cls_flower(struct igb_adapter *adapter, |
|---|
| 2589 | | - struct tc_cls_flower_offload *f, |
|---|
| 2583 | + struct flow_cls_offload *f, |
|---|
| 2590 | 2584 | int traffic_class, |
|---|
| 2591 | 2585 | struct igb_nfc_filter *input) |
|---|
| 2592 | 2586 | { |
|---|
| 2587 | + struct flow_rule *rule = flow_cls_offload_flow_rule(f); |
|---|
| 2588 | + struct flow_dissector *dissector = rule->match.dissector; |
|---|
| 2593 | 2589 | struct netlink_ext_ack *extack = f->common.extack; |
|---|
| 2594 | 2590 | |
|---|
| 2595 | | - if (f->dissector->used_keys & |
|---|
| 2591 | + if (dissector->used_keys & |
|---|
| 2596 | 2592 | ~(BIT(FLOW_DISSECTOR_KEY_BASIC) | |
|---|
| 2597 | 2593 | BIT(FLOW_DISSECTOR_KEY_CONTROL) | |
|---|
| 2598 | 2594 | BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | |
|---|
| .. | .. |
|---|
| 2602 | 2598 | return -EOPNOTSUPP; |
|---|
| 2603 | 2599 | } |
|---|
| 2604 | 2600 | |
|---|
| 2605 | | - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { |
|---|
| 2606 | | - struct flow_dissector_key_eth_addrs *key, *mask; |
|---|
| 2601 | + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { |
|---|
| 2602 | + struct flow_match_eth_addrs match; |
|---|
| 2607 | 2603 | |
|---|
| 2608 | | - key = skb_flow_dissector_target(f->dissector, |
|---|
| 2609 | | - FLOW_DISSECTOR_KEY_ETH_ADDRS, |
|---|
| 2610 | | - f->key); |
|---|
| 2611 | | - mask = skb_flow_dissector_target(f->dissector, |
|---|
| 2612 | | - FLOW_DISSECTOR_KEY_ETH_ADDRS, |
|---|
| 2613 | | - f->mask); |
|---|
| 2614 | | - |
|---|
| 2615 | | - if (!is_zero_ether_addr(mask->dst)) { |
|---|
| 2616 | | - if (!is_broadcast_ether_addr(mask->dst)) { |
|---|
| 2604 | + flow_rule_match_eth_addrs(rule, &match); |
|---|
| 2605 | + if (!is_zero_ether_addr(match.mask->dst)) { |
|---|
| 2606 | + if (!is_broadcast_ether_addr(match.mask->dst)) { |
|---|
| 2617 | 2607 | NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for destination MAC address"); |
|---|
| 2618 | 2608 | return -EINVAL; |
|---|
| 2619 | 2609 | } |
|---|
| 2620 | 2610 | |
|---|
| 2621 | 2611 | input->filter.match_flags |= |
|---|
| 2622 | 2612 | IGB_FILTER_FLAG_DST_MAC_ADDR; |
|---|
| 2623 | | - ether_addr_copy(input->filter.dst_addr, key->dst); |
|---|
| 2613 | + ether_addr_copy(input->filter.dst_addr, match.key->dst); |
|---|
| 2624 | 2614 | } |
|---|
| 2625 | 2615 | |
|---|
| 2626 | | - if (!is_zero_ether_addr(mask->src)) { |
|---|
| 2627 | | - if (!is_broadcast_ether_addr(mask->src)) { |
|---|
| 2616 | + if (!is_zero_ether_addr(match.mask->src)) { |
|---|
| 2617 | + if (!is_broadcast_ether_addr(match.mask->src)) { |
|---|
| 2628 | 2618 | NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for source MAC address"); |
|---|
| 2629 | 2619 | return -EINVAL; |
|---|
| 2630 | 2620 | } |
|---|
| 2631 | 2621 | |
|---|
| 2632 | 2622 | input->filter.match_flags |= |
|---|
| 2633 | 2623 | IGB_FILTER_FLAG_SRC_MAC_ADDR; |
|---|
| 2634 | | - ether_addr_copy(input->filter.src_addr, key->src); |
|---|
| 2624 | + ether_addr_copy(input->filter.src_addr, match.key->src); |
|---|
| 2635 | 2625 | } |
|---|
| 2636 | 2626 | } |
|---|
| 2637 | 2627 | |
|---|
| 2638 | | - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { |
|---|
| 2639 | | - struct flow_dissector_key_basic *key, *mask; |
|---|
| 2628 | + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { |
|---|
| 2629 | + struct flow_match_basic match; |
|---|
| 2640 | 2630 | |
|---|
| 2641 | | - key = skb_flow_dissector_target(f->dissector, |
|---|
| 2642 | | - FLOW_DISSECTOR_KEY_BASIC, |
|---|
| 2643 | | - f->key); |
|---|
| 2644 | | - mask = skb_flow_dissector_target(f->dissector, |
|---|
| 2645 | | - FLOW_DISSECTOR_KEY_BASIC, |
|---|
| 2646 | | - f->mask); |
|---|
| 2647 | | - |
|---|
| 2648 | | - if (mask->n_proto) { |
|---|
| 2649 | | - if (mask->n_proto != ETHER_TYPE_FULL_MASK) { |
|---|
| 2631 | + flow_rule_match_basic(rule, &match); |
|---|
| 2632 | + if (match.mask->n_proto) { |
|---|
| 2633 | + if (match.mask->n_proto != ETHER_TYPE_FULL_MASK) { |
|---|
| 2650 | 2634 | NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for EtherType filter"); |
|---|
| 2651 | 2635 | return -EINVAL; |
|---|
| 2652 | 2636 | } |
|---|
| 2653 | 2637 | |
|---|
| 2654 | 2638 | input->filter.match_flags |= IGB_FILTER_FLAG_ETHER_TYPE; |
|---|
| 2655 | | - input->filter.etype = key->n_proto; |
|---|
| 2639 | + input->filter.etype = match.key->n_proto; |
|---|
| 2656 | 2640 | } |
|---|
| 2657 | 2641 | } |
|---|
| 2658 | 2642 | |
|---|
| 2659 | | - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) { |
|---|
| 2660 | | - struct flow_dissector_key_vlan *key, *mask; |
|---|
| 2643 | + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { |
|---|
| 2644 | + struct flow_match_vlan match; |
|---|
| 2661 | 2645 | |
|---|
| 2662 | | - key = skb_flow_dissector_target(f->dissector, |
|---|
| 2663 | | - FLOW_DISSECTOR_KEY_VLAN, |
|---|
| 2664 | | - f->key); |
|---|
| 2665 | | - mask = skb_flow_dissector_target(f->dissector, |
|---|
| 2666 | | - FLOW_DISSECTOR_KEY_VLAN, |
|---|
| 2667 | | - f->mask); |
|---|
| 2668 | | - |
|---|
| 2669 | | - if (mask->vlan_priority) { |
|---|
| 2670 | | - if (mask->vlan_priority != VLAN_PRIO_FULL_MASK) { |
|---|
| 2646 | + flow_rule_match_vlan(rule, &match); |
|---|
| 2647 | + if (match.mask->vlan_priority) { |
|---|
| 2648 | + if (match.mask->vlan_priority != VLAN_PRIO_FULL_MASK) { |
|---|
| 2671 | 2649 | NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN priority"); |
|---|
| 2672 | 2650 | return -EINVAL; |
|---|
| 2673 | 2651 | } |
|---|
| 2674 | 2652 | |
|---|
| 2675 | 2653 | input->filter.match_flags |= IGB_FILTER_FLAG_VLAN_TCI; |
|---|
| 2676 | | - input->filter.vlan_tci = key->vlan_priority; |
|---|
| 2654 | + input->filter.vlan_tci = |
|---|
| 2655 | + (__force __be16)match.key->vlan_priority; |
|---|
| 2677 | 2656 | } |
|---|
| 2678 | 2657 | } |
|---|
| 2679 | 2658 | |
|---|
| .. | .. |
|---|
| 2684 | 2663 | } |
|---|
| 2685 | 2664 | |
|---|
| 2686 | 2665 | static int igb_configure_clsflower(struct igb_adapter *adapter, |
|---|
| 2687 | | - struct tc_cls_flower_offload *cls_flower) |
|---|
| 2666 | + struct flow_cls_offload *cls_flower) |
|---|
| 2688 | 2667 | { |
|---|
| 2689 | 2668 | struct netlink_ext_ack *extack = cls_flower->common.extack; |
|---|
| 2690 | 2669 | struct igb_nfc_filter *filter, *f; |
|---|
| .. | .. |
|---|
| 2746 | 2725 | } |
|---|
| 2747 | 2726 | |
|---|
| 2748 | 2727 | static int igb_delete_clsflower(struct igb_adapter *adapter, |
|---|
| 2749 | | - struct tc_cls_flower_offload *cls_flower) |
|---|
| 2728 | + struct flow_cls_offload *cls_flower) |
|---|
| 2750 | 2729 | { |
|---|
| 2751 | 2730 | struct igb_nfc_filter *filter; |
|---|
| 2752 | 2731 | int err; |
|---|
| .. | .. |
|---|
| 2776 | 2755 | } |
|---|
| 2777 | 2756 | |
|---|
| 2778 | 2757 | static int igb_setup_tc_cls_flower(struct igb_adapter *adapter, |
|---|
| 2779 | | - struct tc_cls_flower_offload *cls_flower) |
|---|
| 2758 | + struct flow_cls_offload *cls_flower) |
|---|
| 2780 | 2759 | { |
|---|
| 2781 | 2760 | switch (cls_flower->command) { |
|---|
| 2782 | | - case TC_CLSFLOWER_REPLACE: |
|---|
| 2761 | + case FLOW_CLS_REPLACE: |
|---|
| 2783 | 2762 | return igb_configure_clsflower(adapter, cls_flower); |
|---|
| 2784 | | - case TC_CLSFLOWER_DESTROY: |
|---|
| 2763 | + case FLOW_CLS_DESTROY: |
|---|
| 2785 | 2764 | return igb_delete_clsflower(adapter, cls_flower); |
|---|
| 2786 | | - case TC_CLSFLOWER_STATS: |
|---|
| 2765 | + case FLOW_CLS_STATS: |
|---|
| 2787 | 2766 | return -EOPNOTSUPP; |
|---|
| 2788 | 2767 | default: |
|---|
| 2789 | 2768 | return -EOPNOTSUPP; |
|---|
| .. | .. |
|---|
| 2802 | 2781 | case TC_SETUP_CLSFLOWER: |
|---|
| 2803 | 2782 | return igb_setup_tc_cls_flower(adapter, type_data); |
|---|
| 2804 | 2783 | |
|---|
| 2805 | | - default: |
|---|
| 2806 | | - return -EOPNOTSUPP; |
|---|
| 2807 | | - } |
|---|
| 2808 | | -} |
|---|
| 2809 | | - |
|---|
| 2810 | | -static int igb_setup_tc_block(struct igb_adapter *adapter, |
|---|
| 2811 | | - struct tc_block_offload *f) |
|---|
| 2812 | | -{ |
|---|
| 2813 | | - if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) |
|---|
| 2814 | | - return -EOPNOTSUPP; |
|---|
| 2815 | | - |
|---|
| 2816 | | - switch (f->command) { |
|---|
| 2817 | | - case TC_BLOCK_BIND: |
|---|
| 2818 | | - return tcf_block_cb_register(f->block, igb_setup_tc_block_cb, |
|---|
| 2819 | | - adapter, adapter, f->extack); |
|---|
| 2820 | | - case TC_BLOCK_UNBIND: |
|---|
| 2821 | | - tcf_block_cb_unregister(f->block, igb_setup_tc_block_cb, |
|---|
| 2822 | | - adapter); |
|---|
| 2823 | | - return 0; |
|---|
| 2824 | 2784 | default: |
|---|
| 2825 | 2785 | return -EOPNOTSUPP; |
|---|
| 2826 | 2786 | } |
|---|
| .. | .. |
|---|
| 2849 | 2809 | return 0; |
|---|
| 2850 | 2810 | } |
|---|
| 2851 | 2811 | |
|---|
| 2812 | +static LIST_HEAD(igb_block_cb_list); |
|---|
| 2813 | + |
|---|
| 2852 | 2814 | static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type, |
|---|
| 2853 | 2815 | void *type_data) |
|---|
| 2854 | 2816 | { |
|---|
| .. | .. |
|---|
| 2858 | 2820 | case TC_SETUP_QDISC_CBS: |
|---|
| 2859 | 2821 | return igb_offload_cbs(adapter, type_data); |
|---|
| 2860 | 2822 | case TC_SETUP_BLOCK: |
|---|
| 2861 | | - return igb_setup_tc_block(adapter, type_data); |
|---|
| 2823 | + return flow_block_cb_setup_simple(type_data, |
|---|
| 2824 | + &igb_block_cb_list, |
|---|
| 2825 | + igb_setup_tc_block_cb, |
|---|
| 2826 | + adapter, adapter, true); |
|---|
| 2827 | + |
|---|
| 2862 | 2828 | case TC_SETUP_QDISC_ETF: |
|---|
| 2863 | 2829 | return igb_offload_txtime(adapter, type_data); |
|---|
| 2864 | 2830 | |
|---|
| 2865 | 2831 | default: |
|---|
| 2866 | 2832 | return -EOPNOTSUPP; |
|---|
| 2867 | 2833 | } |
|---|
| 2834 | +} |
|---|
| 2835 | + |
|---|
| 2836 | +static int igb_xdp_setup(struct net_device *dev, struct netdev_bpf *bpf) |
|---|
| 2837 | +{ |
|---|
| 2838 | + int i, frame_size = dev->mtu + IGB_ETH_PKT_HDR_PAD; |
|---|
| 2839 | + struct igb_adapter *adapter = netdev_priv(dev); |
|---|
| 2840 | + struct bpf_prog *prog = bpf->prog, *old_prog; |
|---|
| 2841 | + bool running = netif_running(dev); |
|---|
| 2842 | + bool need_reset; |
|---|
| 2843 | + |
|---|
| 2844 | + /* verify igb ring attributes are sufficient for XDP */ |
|---|
| 2845 | + for (i = 0; i < adapter->num_rx_queues; i++) { |
|---|
| 2846 | + struct igb_ring *ring = adapter->rx_ring[i]; |
|---|
| 2847 | + |
|---|
| 2848 | + if (frame_size > igb_rx_bufsz(ring)) { |
|---|
| 2849 | + NL_SET_ERR_MSG_MOD(bpf->extack, |
|---|
| 2850 | + "The RX buffer size is too small for the frame size"); |
|---|
| 2851 | + netdev_warn(dev, "XDP RX buffer size %d is too small for the frame size %d\n", |
|---|
| 2852 | + igb_rx_bufsz(ring), frame_size); |
|---|
| 2853 | + return -EINVAL; |
|---|
| 2854 | + } |
|---|
| 2855 | + } |
|---|
| 2856 | + |
|---|
| 2857 | + old_prog = xchg(&adapter->xdp_prog, prog); |
|---|
| 2858 | + need_reset = (!!prog != !!old_prog); |
|---|
| 2859 | + |
|---|
| 2860 | + /* device is up and bpf is added/removed, must setup the RX queues */ |
|---|
| 2861 | + if (need_reset && running) { |
|---|
| 2862 | + igb_close(dev); |
|---|
| 2863 | + } else { |
|---|
| 2864 | + for (i = 0; i < adapter->num_rx_queues; i++) |
|---|
| 2865 | + (void)xchg(&adapter->rx_ring[i]->xdp_prog, |
|---|
| 2866 | + adapter->xdp_prog); |
|---|
| 2867 | + } |
|---|
| 2868 | + |
|---|
| 2869 | + if (old_prog) |
|---|
| 2870 | + bpf_prog_put(old_prog); |
|---|
| 2871 | + |
|---|
| 2872 | + /* bpf is just replaced, RXQ and MTU are already setup */ |
|---|
| 2873 | + if (!need_reset) |
|---|
| 2874 | + return 0; |
|---|
| 2875 | + |
|---|
| 2876 | + if (running) |
|---|
| 2877 | + igb_open(dev); |
|---|
| 2878 | + |
|---|
| 2879 | + return 0; |
|---|
| 2880 | +} |
|---|
| 2881 | + |
|---|
| 2882 | +static int igb_xdp(struct net_device *dev, struct netdev_bpf *xdp) |
|---|
| 2883 | +{ |
|---|
| 2884 | + switch (xdp->command) { |
|---|
| 2885 | + case XDP_SETUP_PROG: |
|---|
| 2886 | + return igb_xdp_setup(dev, xdp); |
|---|
| 2887 | + default: |
|---|
| 2888 | + return -EINVAL; |
|---|
| 2889 | + } |
|---|
| 2890 | +} |
|---|
| 2891 | + |
|---|
| 2892 | +static void igb_xdp_ring_update_tail(struct igb_ring *ring) |
|---|
| 2893 | +{ |
|---|
| 2894 | + /* Force memory writes to complete before letting h/w know there |
|---|
| 2895 | + * are new descriptors to fetch. |
|---|
| 2896 | + */ |
|---|
| 2897 | + wmb(); |
|---|
| 2898 | + writel(ring->next_to_use, ring->tail); |
|---|
| 2899 | +} |
|---|
| 2900 | + |
|---|
| 2901 | +static struct igb_ring *igb_xdp_tx_queue_mapping(struct igb_adapter *adapter) |
|---|
| 2902 | +{ |
|---|
| 2903 | + unsigned int r_idx = smp_processor_id(); |
|---|
| 2904 | + |
|---|
| 2905 | + if (r_idx >= adapter->num_tx_queues) |
|---|
| 2906 | + r_idx = r_idx % adapter->num_tx_queues; |
|---|
| 2907 | + |
|---|
| 2908 | + return adapter->tx_ring[r_idx]; |
|---|
| 2909 | +} |
|---|
| 2910 | + |
|---|
| 2911 | +static int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp) |
|---|
| 2912 | +{ |
|---|
| 2913 | + struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); |
|---|
| 2914 | + int cpu = smp_processor_id(); |
|---|
| 2915 | + struct igb_ring *tx_ring; |
|---|
| 2916 | + struct netdev_queue *nq; |
|---|
| 2917 | + u32 ret; |
|---|
| 2918 | + |
|---|
| 2919 | + if (unlikely(!xdpf)) |
|---|
| 2920 | + return IGB_XDP_CONSUMED; |
|---|
| 2921 | + |
|---|
| 2922 | + /* During program transitions its possible adapter->xdp_prog is assigned |
|---|
| 2923 | + * but ring has not been configured yet. In this case simply abort xmit. |
|---|
| 2924 | + */ |
|---|
| 2925 | + tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL; |
|---|
| 2926 | + if (unlikely(!tx_ring)) |
|---|
| 2927 | + return IGB_XDP_CONSUMED; |
|---|
| 2928 | + |
|---|
| 2929 | + nq = txring_txq(tx_ring); |
|---|
| 2930 | + __netif_tx_lock(nq, cpu); |
|---|
| 2931 | + /* Avoid transmit queue timeout since we share it with the slow path */ |
|---|
| 2932 | + nq->trans_start = jiffies; |
|---|
| 2933 | + ret = igb_xmit_xdp_ring(adapter, tx_ring, xdpf); |
|---|
| 2934 | + __netif_tx_unlock(nq); |
|---|
| 2935 | + |
|---|
| 2936 | + return ret; |
|---|
| 2937 | +} |
|---|
| 2938 | + |
|---|
| 2939 | +static int igb_xdp_xmit(struct net_device *dev, int n, |
|---|
| 2940 | + struct xdp_frame **frames, u32 flags) |
|---|
| 2941 | +{ |
|---|
| 2942 | + struct igb_adapter *adapter = netdev_priv(dev); |
|---|
| 2943 | + int cpu = smp_processor_id(); |
|---|
| 2944 | + struct igb_ring *tx_ring; |
|---|
| 2945 | + struct netdev_queue *nq; |
|---|
| 2946 | + int drops = 0; |
|---|
| 2947 | + int i; |
|---|
| 2948 | + |
|---|
| 2949 | + if (unlikely(test_bit(__IGB_DOWN, &adapter->state))) |
|---|
| 2950 | + return -ENETDOWN; |
|---|
| 2951 | + |
|---|
| 2952 | + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) |
|---|
| 2953 | + return -EINVAL; |
|---|
| 2954 | + |
|---|
| 2955 | + /* During program transitions its possible adapter->xdp_prog is assigned |
|---|
| 2956 | + * but ring has not been configured yet. In this case simply abort xmit. |
|---|
| 2957 | + */ |
|---|
| 2958 | + tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL; |
|---|
| 2959 | + if (unlikely(!tx_ring)) |
|---|
| 2960 | + return -ENXIO; |
|---|
| 2961 | + |
|---|
| 2962 | + nq = txring_txq(tx_ring); |
|---|
| 2963 | + __netif_tx_lock(nq, cpu); |
|---|
| 2964 | + |
|---|
| 2965 | + /* Avoid transmit queue timeout since we share it with the slow path */ |
|---|
| 2966 | + nq->trans_start = jiffies; |
|---|
| 2967 | + |
|---|
| 2968 | + for (i = 0; i < n; i++) { |
|---|
| 2969 | + struct xdp_frame *xdpf = frames[i]; |
|---|
| 2970 | + int err; |
|---|
| 2971 | + |
|---|
| 2972 | + err = igb_xmit_xdp_ring(adapter, tx_ring, xdpf); |
|---|
| 2973 | + if (err != IGB_XDP_TX) { |
|---|
| 2974 | + xdp_return_frame_rx_napi(xdpf); |
|---|
| 2975 | + drops++; |
|---|
| 2976 | + } |
|---|
| 2977 | + } |
|---|
| 2978 | + |
|---|
| 2979 | + __netif_tx_unlock(nq); |
|---|
| 2980 | + |
|---|
| 2981 | + if (unlikely(flags & XDP_XMIT_FLUSH)) |
|---|
| 2982 | + igb_xdp_ring_update_tail(tx_ring); |
|---|
| 2983 | + |
|---|
| 2984 | + return n - drops; |
|---|
| 2868 | 2985 | } |
|---|
| 2869 | 2986 | |
|---|
| 2870 | 2987 | static const struct net_device_ops igb_netdev_ops = { |
|---|
| .. | .. |
|---|
| 2891 | 3008 | .ndo_fdb_add = igb_ndo_fdb_add, |
|---|
| 2892 | 3009 | .ndo_features_check = igb_features_check, |
|---|
| 2893 | 3010 | .ndo_setup_tc = igb_setup_tc, |
|---|
| 3011 | + .ndo_bpf = igb_xdp, |
|---|
| 3012 | + .ndo_xdp_xmit = igb_xdp_xmit, |
|---|
| 2894 | 3013 | }; |
|---|
| 2895 | 3014 | |
|---|
| 2896 | 3015 | /** |
|---|
| .. | .. |
|---|
| 2915 | 3034 | fw.invm_img_type); |
|---|
| 2916 | 3035 | break; |
|---|
| 2917 | 3036 | } |
|---|
| 2918 | | - /* fall through */ |
|---|
| 3037 | + fallthrough; |
|---|
| 2919 | 3038 | default: |
|---|
| 2920 | 3039 | /* if option is rom valid, display its version too */ |
|---|
| 2921 | 3040 | if (fw.or_valid) { |
|---|
| .. | .. |
|---|
| 3157 | 3276 | NETIF_F_HW_CSUM; |
|---|
| 3158 | 3277 | |
|---|
| 3159 | 3278 | if (hw->mac.type >= e1000_82576) |
|---|
| 3160 | | - netdev->features |= NETIF_F_SCTP_CRC; |
|---|
| 3279 | + netdev->features |= NETIF_F_SCTP_CRC | NETIF_F_GSO_UDP_L4; |
|---|
| 3161 | 3280 | |
|---|
| 3162 | 3281 | if (hw->mac.type >= e1000_i350) |
|---|
| 3163 | 3282 | netdev->features |= NETIF_F_HW_TC; |
|---|
| .. | .. |
|---|
| 3431 | 3550 | "Width x1" : "unknown"), netdev->dev_addr); |
|---|
| 3432 | 3551 | } |
|---|
| 3433 | 3552 | |
|---|
| 3434 | | - if ((hw->mac.type >= e1000_i210 || |
|---|
| 3553 | + if ((hw->mac.type == e1000_82576 && |
|---|
| 3554 | + rd32(E1000_EECD) & E1000_EECD_PRES) || |
|---|
| 3555 | + (hw->mac.type >= e1000_i210 || |
|---|
| 3435 | 3556 | igb_get_flash_presence_i210(hw))) { |
|---|
| 3436 | 3557 | ret_val = igb_read_part_string(hw, part_str, |
|---|
| 3437 | 3558 | E1000_PBANUM_LENGTH); |
|---|
| .. | .. |
|---|
| 3478 | 3599 | } |
|---|
| 3479 | 3600 | } |
|---|
| 3480 | 3601 | |
|---|
| 3481 | | - dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP); |
|---|
| 3602 | + dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE); |
|---|
| 3482 | 3603 | |
|---|
| 3483 | 3604 | pm_runtime_put_noidle(&pdev->dev); |
|---|
| 3484 | 3605 | return 0; |
|---|
| .. | .. |
|---|
| 3517 | 3638 | struct net_device *netdev = pci_get_drvdata(pdev); |
|---|
| 3518 | 3639 | struct igb_adapter *adapter = netdev_priv(netdev); |
|---|
| 3519 | 3640 | struct e1000_hw *hw = &adapter->hw; |
|---|
| 3641 | + unsigned long flags; |
|---|
| 3520 | 3642 | |
|---|
| 3521 | 3643 | /* reclaim resources allocated to VFs */ |
|---|
| 3522 | 3644 | if (adapter->vf_data) { |
|---|
| .. | .. |
|---|
| 3529 | 3651 | pci_disable_sriov(pdev); |
|---|
| 3530 | 3652 | msleep(500); |
|---|
| 3531 | 3653 | } |
|---|
| 3532 | | - |
|---|
| 3654 | + spin_lock_irqsave(&adapter->vfs_lock, flags); |
|---|
| 3533 | 3655 | kfree(adapter->vf_mac_list); |
|---|
| 3534 | 3656 | adapter->vf_mac_list = NULL; |
|---|
| 3535 | 3657 | kfree(adapter->vf_data); |
|---|
| 3536 | 3658 | adapter->vf_data = NULL; |
|---|
| 3537 | 3659 | adapter->vfs_allocated_count = 0; |
|---|
| 3660 | + spin_unlock_irqrestore(&adapter->vfs_lock, flags); |
|---|
| 3538 | 3661 | wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ); |
|---|
| 3539 | 3662 | wrfl(); |
|---|
| 3540 | 3663 | msleep(100); |
|---|
| .. | .. |
|---|
| 3694 | 3817 | igb_release_hw_control(adapter); |
|---|
| 3695 | 3818 | |
|---|
| 3696 | 3819 | #ifdef CONFIG_PCI_IOV |
|---|
| 3820 | + rtnl_lock(); |
|---|
| 3697 | 3821 | igb_disable_sriov(pdev); |
|---|
| 3822 | + rtnl_unlock(); |
|---|
| 3698 | 3823 | #endif |
|---|
| 3699 | 3824 | |
|---|
| 3700 | 3825 | unregister_netdev(netdev); |
|---|
| .. | .. |
|---|
| 3767 | 3892 | max_rss_queues = 1; |
|---|
| 3768 | 3893 | break; |
|---|
| 3769 | 3894 | } |
|---|
| 3770 | | - /* fall through */ |
|---|
| 3895 | + fallthrough; |
|---|
| 3771 | 3896 | case e1000_82576: |
|---|
| 3772 | 3897 | if (!!adapter->vfs_allocated_count) { |
|---|
| 3773 | 3898 | max_rss_queues = 2; |
|---|
| 3774 | 3899 | break; |
|---|
| 3775 | 3900 | } |
|---|
| 3776 | | - /* fall through */ |
|---|
| 3901 | + fallthrough; |
|---|
| 3777 | 3902 | case e1000_82580: |
|---|
| 3778 | 3903 | case e1000_i354: |
|---|
| 3779 | 3904 | default: |
|---|
| .. | .. |
|---|
| 3849 | 3974 | /* set default work limits */ |
|---|
| 3850 | 3975 | adapter->tx_work_limit = IGB_DEFAULT_TX_WORK; |
|---|
| 3851 | 3976 | |
|---|
| 3852 | | - adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + |
|---|
| 3853 | | - VLAN_HLEN; |
|---|
| 3977 | + adapter->max_frame_size = netdev->mtu + IGB_ETH_PKT_HDR_PAD; |
|---|
| 3854 | 3978 | adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; |
|---|
| 3855 | 3979 | |
|---|
| 3856 | 3980 | spin_lock_init(&adapter->nfc_lock); |
|---|
| 3857 | 3981 | spin_lock_init(&adapter->stats64_lock); |
|---|
| 3982 | + |
|---|
| 3983 | + /* init spinlock to avoid concurrency of VF resources */ |
|---|
| 3984 | + spin_lock_init(&adapter->vfs_lock); |
|---|
| 3858 | 3985 | #ifdef CONFIG_PCI_IOV |
|---|
| 3859 | 3986 | switch (hw->mac.type) { |
|---|
| 3860 | 3987 | case e1000_82576: |
|---|
| .. | .. |
|---|
| 3912 | 4039 | /** |
|---|
| 3913 | 4040 | * igb_open - Called when a network interface is made active |
|---|
| 3914 | 4041 | * @netdev: network interface device structure |
|---|
| 4042 | + * @resuming: indicates whether we are in a resume call |
|---|
| 3915 | 4043 | * |
|---|
| 3916 | 4044 | * Returns 0 on success, negative value on failure |
|---|
| 3917 | 4045 | * |
|---|
| .. | .. |
|---|
| 4029 | 4157 | /** |
|---|
| 4030 | 4158 | * igb_close - Disables a network interface |
|---|
| 4031 | 4159 | * @netdev: network interface device structure |
|---|
| 4160 | + * @suspending: indicates we are in a suspend call |
|---|
| 4032 | 4161 | * |
|---|
| 4033 | 4162 | * Returns 0, this is not allowed to fail |
|---|
| 4034 | 4163 | * |
|---|
| .. | .. |
|---|
| 4222 | 4351 | **/ |
|---|
| 4223 | 4352 | int igb_setup_rx_resources(struct igb_ring *rx_ring) |
|---|
| 4224 | 4353 | { |
|---|
| 4354 | + struct igb_adapter *adapter = netdev_priv(rx_ring->netdev); |
|---|
| 4225 | 4355 | struct device *dev = rx_ring->dev; |
|---|
| 4226 | 4356 | int size; |
|---|
| 4227 | 4357 | |
|---|
| .. | .. |
|---|
| 4243 | 4373 | rx_ring->next_to_alloc = 0; |
|---|
| 4244 | 4374 | rx_ring->next_to_clean = 0; |
|---|
| 4245 | 4375 | rx_ring->next_to_use = 0; |
|---|
| 4376 | + |
|---|
| 4377 | + rx_ring->xdp_prog = adapter->xdp_prog; |
|---|
| 4378 | + |
|---|
| 4379 | + /* XDP RX-queue info */ |
|---|
| 4380 | + if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, |
|---|
| 4381 | + rx_ring->queue_index) < 0) |
|---|
| 4382 | + goto err; |
|---|
| 4246 | 4383 | |
|---|
| 4247 | 4384 | return 0; |
|---|
| 4248 | 4385 | |
|---|
| .. | .. |
|---|
| 4362 | 4499 | else |
|---|
| 4363 | 4500 | mrqc |= E1000_MRQC_ENABLE_VMDQ; |
|---|
| 4364 | 4501 | } else { |
|---|
| 4365 | | - if (hw->mac.type != e1000_i211) |
|---|
| 4366 | | - mrqc |= E1000_MRQC_ENABLE_RSS_MQ; |
|---|
| 4502 | + mrqc |= E1000_MRQC_ENABLE_RSS_MQ; |
|---|
| 4367 | 4503 | } |
|---|
| 4368 | 4504 | igb_vmm_control(adapter); |
|---|
| 4369 | 4505 | |
|---|
| .. | .. |
|---|
| 4502 | 4638 | } |
|---|
| 4503 | 4639 | |
|---|
| 4504 | 4640 | /** |
|---|
| 4641 | + * igb_setup_srrctl - configure the split and replication receive control |
|---|
| 4642 | + * registers |
|---|
| 4643 | + * @adapter: Board private structure |
|---|
| 4644 | + * @ring: receive ring to be configured |
|---|
| 4645 | + **/ |
|---|
| 4646 | +void igb_setup_srrctl(struct igb_adapter *adapter, struct igb_ring *ring) |
|---|
| 4647 | +{ |
|---|
| 4648 | + struct e1000_hw *hw = &adapter->hw; |
|---|
| 4649 | + int reg_idx = ring->reg_idx; |
|---|
| 4650 | + u32 srrctl = 0; |
|---|
| 4651 | + |
|---|
| 4652 | + srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; |
|---|
| 4653 | + if (ring_uses_large_buffer(ring)) |
|---|
| 4654 | + srrctl |= IGB_RXBUFFER_3072 >> E1000_SRRCTL_BSIZEPKT_SHIFT; |
|---|
| 4655 | + else |
|---|
| 4656 | + srrctl |= IGB_RXBUFFER_2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT; |
|---|
| 4657 | + srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; |
|---|
| 4658 | + if (hw->mac.type >= e1000_82580) |
|---|
| 4659 | + srrctl |= E1000_SRRCTL_TIMESTAMP; |
|---|
| 4660 | + /* Only set Drop Enable if VFs allocated, or we are supporting multiple |
|---|
| 4661 | + * queues and rx flow control is disabled |
|---|
| 4662 | + */ |
|---|
| 4663 | + if (adapter->vfs_allocated_count || |
|---|
| 4664 | + (!(hw->fc.current_mode & e1000_fc_rx_pause) && |
|---|
| 4665 | + adapter->num_rx_queues > 1)) |
|---|
| 4666 | + srrctl |= E1000_SRRCTL_DROP_EN; |
|---|
| 4667 | + |
|---|
| 4668 | + wr32(E1000_SRRCTL(reg_idx), srrctl); |
|---|
| 4669 | +} |
|---|
| 4670 | + |
|---|
| 4671 | +/** |
|---|
| 4505 | 4672 | * igb_configure_rx_ring - Configure a receive ring after Reset |
|---|
| 4506 | 4673 | * @adapter: board private structure |
|---|
| 4507 | 4674 | * @ring: receive ring to be configured |
|---|
| .. | .. |
|---|
| 4515 | 4682 | union e1000_adv_rx_desc *rx_desc; |
|---|
| 4516 | 4683 | u64 rdba = ring->dma; |
|---|
| 4517 | 4684 | int reg_idx = ring->reg_idx; |
|---|
| 4518 | | - u32 srrctl = 0, rxdctl = 0; |
|---|
| 4685 | + u32 rxdctl = 0; |
|---|
| 4686 | + |
|---|
| 4687 | + xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); |
|---|
| 4688 | + WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, |
|---|
| 4689 | + MEM_TYPE_PAGE_SHARED, NULL)); |
|---|
| 4519 | 4690 | |
|---|
| 4520 | 4691 | /* disable the queue */ |
|---|
| 4521 | 4692 | wr32(E1000_RXDCTL(reg_idx), 0); |
|---|
| .. | .. |
|---|
| 4533 | 4704 | writel(0, ring->tail); |
|---|
| 4534 | 4705 | |
|---|
| 4535 | 4706 | /* set descriptor configuration */ |
|---|
| 4536 | | - srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; |
|---|
| 4537 | | - if (ring_uses_large_buffer(ring)) |
|---|
| 4538 | | - srrctl |= IGB_RXBUFFER_3072 >> E1000_SRRCTL_BSIZEPKT_SHIFT; |
|---|
| 4539 | | - else |
|---|
| 4540 | | - srrctl |= IGB_RXBUFFER_2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT; |
|---|
| 4541 | | - srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; |
|---|
| 4542 | | - if (hw->mac.type >= e1000_82580) |
|---|
| 4543 | | - srrctl |= E1000_SRRCTL_TIMESTAMP; |
|---|
| 4544 | | - /* Only set Drop Enable if we are supporting multiple queues */ |
|---|
| 4545 | | - if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1) |
|---|
| 4546 | | - srrctl |= E1000_SRRCTL_DROP_EN; |
|---|
| 4547 | | - |
|---|
| 4548 | | - wr32(E1000_SRRCTL(reg_idx), srrctl); |
|---|
| 4707 | + igb_setup_srrctl(adapter, ring); |
|---|
| 4549 | 4708 | |
|---|
| 4550 | 4709 | /* set filtering for VMDQ pools */ |
|---|
| 4551 | 4710 | igb_set_vmolr(adapter, reg_idx & 0x7, true); |
|---|
| .. | .. |
|---|
| 4661 | 4820 | while (i != tx_ring->next_to_use) { |
|---|
| 4662 | 4821 | union e1000_adv_tx_desc *eop_desc, *tx_desc; |
|---|
| 4663 | 4822 | |
|---|
| 4664 | | - /* Free all the Tx ring sk_buffs */ |
|---|
| 4665 | | - dev_kfree_skb_any(tx_buffer->skb); |
|---|
| 4823 | + /* Free all the Tx ring sk_buffs or xdp frames */ |
|---|
| 4824 | + if (tx_buffer->type == IGB_TYPE_SKB) |
|---|
| 4825 | + dev_kfree_skb_any(tx_buffer->skb); |
|---|
| 4826 | + else |
|---|
| 4827 | + xdp_return_frame(tx_buffer->xdpf); |
|---|
| 4666 | 4828 | |
|---|
| 4667 | 4829 | /* unmap skb header data */ |
|---|
| 4668 | 4830 | dma_unmap_single(tx_ring->dev, |
|---|
| .. | .. |
|---|
| 4735 | 4897 | { |
|---|
| 4736 | 4898 | igb_clean_rx_ring(rx_ring); |
|---|
| 4737 | 4899 | |
|---|
| 4900 | + rx_ring->xdp_prog = NULL; |
|---|
| 4901 | + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); |
|---|
| 4738 | 4902 | vfree(rx_ring->rx_buffer_info); |
|---|
| 4739 | 4903 | rx_ring->rx_buffer_info = NULL; |
|---|
| 4740 | 4904 | |
|---|
| .. | .. |
|---|
| 4771 | 4935 | { |
|---|
| 4772 | 4936 | u16 i = rx_ring->next_to_clean; |
|---|
| 4773 | 4937 | |
|---|
| 4774 | | - if (rx_ring->skb) |
|---|
| 4775 | | - dev_kfree_skb(rx_ring->skb); |
|---|
| 4938 | + dev_kfree_skb(rx_ring->skb); |
|---|
| 4776 | 4939 | rx_ring->skb = NULL; |
|---|
| 4777 | 4940 | |
|---|
| 4778 | 4941 | /* Free all the Rx ring sk_buffs */ |
|---|
| .. | .. |
|---|
| 4896 | 5059 | /* VLAN filtering needed for VLAN prio filter */ |
|---|
| 4897 | 5060 | if (adapter->netdev->features & NETIF_F_NTUPLE) |
|---|
| 4898 | 5061 | break; |
|---|
| 4899 | | - /* fall through */ |
|---|
| 5062 | + fallthrough; |
|---|
| 4900 | 5063 | case e1000_82576: |
|---|
| 4901 | 5064 | case e1000_82580: |
|---|
| 4902 | 5065 | case e1000_i354: |
|---|
| 4903 | 5066 | /* VLAN filtering needed for pool filtering */ |
|---|
| 4904 | 5067 | if (adapter->vfs_allocated_count) |
|---|
| 4905 | 5068 | break; |
|---|
| 4906 | | - /* fall through */ |
|---|
| 5069 | + fallthrough; |
|---|
| 4907 | 5070 | default: |
|---|
| 4908 | 5071 | return 1; |
|---|
| 4909 | 5072 | } |
|---|
| .. | .. |
|---|
| 5183 | 5346 | case e1000_media_type_copper: |
|---|
| 5184 | 5347 | if (!hw->mac.get_link_status) |
|---|
| 5185 | 5348 | return true; |
|---|
| 5186 | | - /* fall through */ |
|---|
| 5349 | + fallthrough; |
|---|
| 5187 | 5350 | case e1000_media_type_internal_serdes: |
|---|
| 5188 | 5351 | hw->mac.ops.check_for_link(hw); |
|---|
| 5189 | 5352 | link_active = !hw->mac.get_link_status; |
|---|
| .. | .. |
|---|
| 5247 | 5410 | |
|---|
| 5248 | 5411 | /** |
|---|
| 5249 | 5412 | * igb_watchdog - Timer Call-back |
|---|
| 5250 | | - * @data: pointer to adapter cast into an unsigned long |
|---|
| 5413 | + * @t: pointer to timer_list containing our private info pointer |
|---|
| 5251 | 5414 | **/ |
|---|
| 5252 | 5415 | static void igb_watchdog(struct timer_list *t) |
|---|
| 5253 | 5416 | { |
|---|
| .. | .. |
|---|
| 5346 | 5509 | break; |
|---|
| 5347 | 5510 | } |
|---|
| 5348 | 5511 | |
|---|
| 5349 | | - if (adapter->link_speed != SPEED_1000) |
|---|
| 5512 | + if (adapter->link_speed != SPEED_1000 || |
|---|
| 5513 | + !hw->phy.ops.read_reg) |
|---|
| 5350 | 5514 | goto no_wait; |
|---|
| 5351 | 5515 | |
|---|
| 5352 | 5516 | /* wait for Remote receiver status OK */ |
|---|
| .. | .. |
|---|
| 5714 | 5878 | * should have been handled by the upper layers. |
|---|
| 5715 | 5879 | */ |
|---|
| 5716 | 5880 | if (tx_ring->launchtime_enable) { |
|---|
| 5717 | | - ts = ns_to_timespec64(first->skb->tstamp); |
|---|
| 5718 | | - first->skb->tstamp = 0; |
|---|
| 5881 | + ts = ktime_to_timespec64(first->skb->tstamp); |
|---|
| 5882 | + first->skb->tstamp = ktime_set(0, 0); |
|---|
| 5719 | 5883 | context_desc->seqnum_seed = cpu_to_le32(ts.tv_nsec / 32); |
|---|
| 5720 | 5884 | } else { |
|---|
| 5721 | 5885 | context_desc->seqnum_seed = 0; |
|---|
| .. | .. |
|---|
| 5735 | 5899 | } ip; |
|---|
| 5736 | 5900 | union { |
|---|
| 5737 | 5901 | struct tcphdr *tcp; |
|---|
| 5902 | + struct udphdr *udp; |
|---|
| 5738 | 5903 | unsigned char *hdr; |
|---|
| 5739 | 5904 | } l4; |
|---|
| 5740 | 5905 | u32 paylen, l4_offset; |
|---|
| .. | .. |
|---|
| 5754 | 5919 | l4.hdr = skb_checksum_start(skb); |
|---|
| 5755 | 5920 | |
|---|
| 5756 | 5921 | /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ |
|---|
| 5757 | | - type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP; |
|---|
| 5922 | + type_tucmd = (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ? |
|---|
| 5923 | + E1000_ADVTXD_TUCMD_L4T_UDP : E1000_ADVTXD_TUCMD_L4T_TCP; |
|---|
| 5758 | 5924 | |
|---|
| 5759 | 5925 | /* initialize outer IP header fields */ |
|---|
| 5760 | 5926 | if (ip.v4->version == 4) { |
|---|
| .. | .. |
|---|
| 5782 | 5948 | /* determine offset of inner transport header */ |
|---|
| 5783 | 5949 | l4_offset = l4.hdr - skb->data; |
|---|
| 5784 | 5950 | |
|---|
| 5785 | | - /* compute length of segmentation header */ |
|---|
| 5786 | | - *hdr_len = (l4.tcp->doff * 4) + l4_offset; |
|---|
| 5787 | | - |
|---|
| 5788 | 5951 | /* remove payload length from inner checksum */ |
|---|
| 5789 | 5952 | paylen = skb->len - l4_offset; |
|---|
| 5790 | | - csum_replace_by_diff(&l4.tcp->check, htonl(paylen)); |
|---|
| 5953 | + if (type_tucmd & E1000_ADVTXD_TUCMD_L4T_TCP) { |
|---|
| 5954 | + /* compute length of segmentation header */ |
|---|
| 5955 | + *hdr_len = (l4.tcp->doff * 4) + l4_offset; |
|---|
| 5956 | + csum_replace_by_diff(&l4.tcp->check, |
|---|
| 5957 | + (__force __wsum)htonl(paylen)); |
|---|
| 5958 | + } else { |
|---|
| 5959 | + /* compute length of segmentation header */ |
|---|
| 5960 | + *hdr_len = sizeof(*l4.udp) + l4_offset; |
|---|
| 5961 | + csum_replace_by_diff(&l4.udp->check, |
|---|
| 5962 | + (__force __wsum)htonl(paylen)); |
|---|
| 5963 | + } |
|---|
| 5791 | 5964 | |
|---|
| 5792 | 5965 | /* update gso size and bytecount with header size */ |
|---|
| 5793 | 5966 | first->gso_segs = skb_shinfo(skb)->gso_segs; |
|---|
| .. | .. |
|---|
| 5834 | 6007 | switch (skb->csum_offset) { |
|---|
| 5835 | 6008 | case offsetof(struct tcphdr, check): |
|---|
| 5836 | 6009 | type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP; |
|---|
| 5837 | | - /* fall through */ |
|---|
| 6010 | + fallthrough; |
|---|
| 5838 | 6011 | case offsetof(struct udphdr, check): |
|---|
| 5839 | 6012 | break; |
|---|
| 5840 | 6013 | case offsetof(struct sctphdr, checksum): |
|---|
| .. | .. |
|---|
| 5846 | 6019 | type_tucmd = E1000_ADVTXD_TUCMD_L4T_SCTP; |
|---|
| 5847 | 6020 | break; |
|---|
| 5848 | 6021 | } |
|---|
| 5849 | | - /* fall through */ |
|---|
| 6022 | + fallthrough; |
|---|
| 5850 | 6023 | default: |
|---|
| 5851 | 6024 | skb_checksum_help(skb); |
|---|
| 5852 | 6025 | goto csum_failed; |
|---|
| .. | .. |
|---|
| 5958 | 6131 | struct sk_buff *skb = first->skb; |
|---|
| 5959 | 6132 | struct igb_tx_buffer *tx_buffer; |
|---|
| 5960 | 6133 | union e1000_adv_tx_desc *tx_desc; |
|---|
| 5961 | | - struct skb_frag_struct *frag; |
|---|
| 6134 | + skb_frag_t *frag; |
|---|
| 5962 | 6135 | dma_addr_t dma; |
|---|
| 5963 | 6136 | unsigned int data_len, size; |
|---|
| 5964 | 6137 | u32 tx_flags = first->tx_flags; |
|---|
| .. | .. |
|---|
| 6035 | 6208 | /* set the timestamp */ |
|---|
| 6036 | 6209 | first->time_stamp = jiffies; |
|---|
| 6037 | 6210 | |
|---|
| 6211 | + skb_tx_timestamp(skb); |
|---|
| 6212 | + |
|---|
| 6038 | 6213 | /* Force memory writes to complete before letting h/w know there |
|---|
| 6039 | 6214 | * are new descriptors to fetch. (Only applicable for weak-ordered |
|---|
| 6040 | 6215 | * memory model archs, such as IA-64). |
|---|
| .. | .. |
|---|
| 6056 | 6231 | /* Make sure there is space in the ring for the next send. */ |
|---|
| 6057 | 6232 | igb_maybe_stop_tx(tx_ring, DESC_NEEDED); |
|---|
| 6058 | 6233 | |
|---|
| 6059 | | - if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { |
|---|
| 6234 | + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { |
|---|
| 6060 | 6235 | writel(i, tx_ring->tail); |
|---|
| 6061 | | - |
|---|
| 6062 | | - /* we need this if more than one processor can write to our tail |
|---|
| 6063 | | - * at a time, it synchronizes IO on IA64/Altix systems |
|---|
| 6064 | | - */ |
|---|
| 6065 | | - mmiowb(); |
|---|
| 6066 | 6236 | } |
|---|
| 6067 | 6237 | return 0; |
|---|
| 6068 | 6238 | |
|---|
| .. | .. |
|---|
| 6099 | 6269 | return -1; |
|---|
| 6100 | 6270 | } |
|---|
| 6101 | 6271 | |
|---|
| 6272 | +int igb_xmit_xdp_ring(struct igb_adapter *adapter, |
|---|
| 6273 | + struct igb_ring *tx_ring, |
|---|
| 6274 | + struct xdp_frame *xdpf) |
|---|
| 6275 | +{ |
|---|
| 6276 | + union e1000_adv_tx_desc *tx_desc; |
|---|
| 6277 | + u32 len, cmd_type, olinfo_status; |
|---|
| 6278 | + struct igb_tx_buffer *tx_buffer; |
|---|
| 6279 | + dma_addr_t dma; |
|---|
| 6280 | + u16 i; |
|---|
| 6281 | + |
|---|
| 6282 | + len = xdpf->len; |
|---|
| 6283 | + |
|---|
| 6284 | + if (unlikely(!igb_desc_unused(tx_ring))) |
|---|
| 6285 | + return IGB_XDP_CONSUMED; |
|---|
| 6286 | + |
|---|
| 6287 | + dma = dma_map_single(tx_ring->dev, xdpf->data, len, DMA_TO_DEVICE); |
|---|
| 6288 | + if (dma_mapping_error(tx_ring->dev, dma)) |
|---|
| 6289 | + return IGB_XDP_CONSUMED; |
|---|
| 6290 | + |
|---|
| 6291 | + /* record the location of the first descriptor for this packet */ |
|---|
| 6292 | + tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; |
|---|
| 6293 | + tx_buffer->bytecount = len; |
|---|
| 6294 | + tx_buffer->gso_segs = 1; |
|---|
| 6295 | + tx_buffer->protocol = 0; |
|---|
| 6296 | + |
|---|
| 6297 | + i = tx_ring->next_to_use; |
|---|
| 6298 | + tx_desc = IGB_TX_DESC(tx_ring, i); |
|---|
| 6299 | + |
|---|
| 6300 | + dma_unmap_len_set(tx_buffer, len, len); |
|---|
| 6301 | + dma_unmap_addr_set(tx_buffer, dma, dma); |
|---|
| 6302 | + tx_buffer->type = IGB_TYPE_XDP; |
|---|
| 6303 | + tx_buffer->xdpf = xdpf; |
|---|
| 6304 | + |
|---|
| 6305 | + tx_desc->read.buffer_addr = cpu_to_le64(dma); |
|---|
| 6306 | + |
|---|
| 6307 | + /* put descriptor type bits */ |
|---|
| 6308 | + cmd_type = E1000_ADVTXD_DTYP_DATA | |
|---|
| 6309 | + E1000_ADVTXD_DCMD_DEXT | |
|---|
| 6310 | + E1000_ADVTXD_DCMD_IFCS; |
|---|
| 6311 | + cmd_type |= len | IGB_TXD_DCMD; |
|---|
| 6312 | + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); |
|---|
| 6313 | + |
|---|
| 6314 | + olinfo_status = len << E1000_ADVTXD_PAYLEN_SHIFT; |
|---|
| 6315 | + /* 82575 requires a unique index per ring */ |
|---|
| 6316 | + if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) |
|---|
| 6317 | + olinfo_status |= tx_ring->reg_idx << 4; |
|---|
| 6318 | + |
|---|
| 6319 | + tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); |
|---|
| 6320 | + |
|---|
| 6321 | + netdev_tx_sent_queue(txring_txq(tx_ring), tx_buffer->bytecount); |
|---|
| 6322 | + |
|---|
| 6323 | + /* set the timestamp */ |
|---|
| 6324 | + tx_buffer->time_stamp = jiffies; |
|---|
| 6325 | + |
|---|
| 6326 | + /* Avoid any potential race with xdp_xmit and cleanup */ |
|---|
| 6327 | + smp_wmb(); |
|---|
| 6328 | + |
|---|
| 6329 | + /* set next_to_watch value indicating a packet is present */ |
|---|
| 6330 | + i++; |
|---|
| 6331 | + if (i == tx_ring->count) |
|---|
| 6332 | + i = 0; |
|---|
| 6333 | + |
|---|
| 6334 | + tx_buffer->next_to_watch = tx_desc; |
|---|
| 6335 | + tx_ring->next_to_use = i; |
|---|
| 6336 | + |
|---|
| 6337 | + /* Make sure there is space in the ring for the next send. */ |
|---|
| 6338 | + igb_maybe_stop_tx(tx_ring, DESC_NEEDED); |
|---|
| 6339 | + |
|---|
| 6340 | + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) |
|---|
| 6341 | + writel(i, tx_ring->tail); |
|---|
| 6342 | + |
|---|
| 6343 | + return IGB_XDP_TX; |
|---|
| 6344 | +} |
|---|
| 6345 | + |
|---|
| 6102 | 6346 | netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, |
|---|
| 6103 | 6347 | struct igb_ring *tx_ring) |
|---|
| 6104 | 6348 | { |
|---|
| .. | .. |
|---|
| 6117 | 6361 | * otherwise try next time |
|---|
| 6118 | 6362 | */ |
|---|
| 6119 | 6363 | for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) |
|---|
| 6120 | | - count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); |
|---|
| 6364 | + count += TXD_USE_COUNT(skb_frag_size( |
|---|
| 6365 | + &skb_shinfo(skb)->frags[f])); |
|---|
| 6121 | 6366 | |
|---|
| 6122 | 6367 | if (igb_maybe_stop_tx(tx_ring, count + 3)) { |
|---|
| 6123 | 6368 | /* this is a hard error */ |
|---|
| .. | .. |
|---|
| 6126 | 6371 | |
|---|
| 6127 | 6372 | /* record the location of the first descriptor for this packet */ |
|---|
| 6128 | 6373 | first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; |
|---|
| 6374 | + first->type = IGB_TYPE_SKB; |
|---|
| 6129 | 6375 | first->skb = skb; |
|---|
| 6130 | 6376 | first->bytecount = skb->len; |
|---|
| 6131 | 6377 | first->gso_segs = 1; |
|---|
| .. | .. |
|---|
| 6162 | 6408 | goto out_drop; |
|---|
| 6163 | 6409 | else if (!tso) |
|---|
| 6164 | 6410 | igb_tx_csum(tx_ring, first); |
|---|
| 6165 | | - |
|---|
| 6166 | | - skb_tx_timestamp(skb); |
|---|
| 6167 | 6411 | |
|---|
| 6168 | 6412 | if (igb_tx_map(tx_ring, first, hdr_len)) |
|---|
| 6169 | 6413 | goto cleanup_tx_tstamp; |
|---|
| .. | .. |
|---|
| 6215 | 6459 | /** |
|---|
| 6216 | 6460 | * igb_tx_timeout - Respond to a Tx Hang |
|---|
| 6217 | 6461 | * @netdev: network interface device structure |
|---|
| 6462 | + * @txqueue: number of the Tx queue that hung (unused) |
|---|
| 6218 | 6463 | **/ |
|---|
| 6219 | | -static void igb_tx_timeout(struct net_device *netdev) |
|---|
| 6464 | +static void igb_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue) |
|---|
| 6220 | 6465 | { |
|---|
| 6221 | 6466 | struct igb_adapter *adapter = netdev_priv(netdev); |
|---|
| 6222 | 6467 | struct e1000_hw *hw = &adapter->hw; |
|---|
| .. | .. |
|---|
| 6277 | 6522 | static int igb_change_mtu(struct net_device *netdev, int new_mtu) |
|---|
| 6278 | 6523 | { |
|---|
| 6279 | 6524 | struct igb_adapter *adapter = netdev_priv(netdev); |
|---|
| 6280 | | - struct pci_dev *pdev = adapter->pdev; |
|---|
| 6281 | | - int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; |
|---|
| 6525 | + int max_frame = new_mtu + IGB_ETH_PKT_HDR_PAD; |
|---|
| 6526 | + |
|---|
| 6527 | + if (adapter->xdp_prog) { |
|---|
| 6528 | + int i; |
|---|
| 6529 | + |
|---|
| 6530 | + for (i = 0; i < adapter->num_rx_queues; i++) { |
|---|
| 6531 | + struct igb_ring *ring = adapter->rx_ring[i]; |
|---|
| 6532 | + |
|---|
| 6533 | + if (max_frame > igb_rx_bufsz(ring)) { |
|---|
| 6534 | + netdev_warn(adapter->netdev, |
|---|
| 6535 | + "Requested MTU size is not supported with XDP. Max frame size is %d\n", |
|---|
| 6536 | + max_frame); |
|---|
| 6537 | + return -EINVAL; |
|---|
| 6538 | + } |
|---|
| 6539 | + } |
|---|
| 6540 | + } |
|---|
| 6282 | 6541 | |
|---|
| 6283 | 6542 | /* adjust max frame to be at least the size of a standard frame */ |
|---|
| 6284 | 6543 | if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN)) |
|---|
| .. | .. |
|---|
| 6293 | 6552 | if (netif_running(netdev)) |
|---|
| 6294 | 6553 | igb_down(adapter); |
|---|
| 6295 | 6554 | |
|---|
| 6296 | | - dev_info(&pdev->dev, "changing MTU from %d to %d\n", |
|---|
| 6297 | | - netdev->mtu, new_mtu); |
|---|
| 6555 | + netdev_dbg(netdev, "changing MTU from %d to %d\n", |
|---|
| 6556 | + netdev->mtu, new_mtu); |
|---|
| 6298 | 6557 | netdev->mtu = new_mtu; |
|---|
| 6299 | 6558 | |
|---|
| 6300 | 6559 | if (netif_running(netdev)) |
|---|
| .. | .. |
|---|
| 6738 | 6997 | igb_setup_dca(adapter); |
|---|
| 6739 | 6998 | break; |
|---|
| 6740 | 6999 | } |
|---|
| 6741 | | - /* Fall Through since DCA is disabled. */ |
|---|
| 7000 | + fallthrough; /* since DCA is disabled. */ |
|---|
| 6742 | 7001 | case DCA_PROVIDER_REMOVE: |
|---|
| 6743 | 7002 | if (adapter->flags & IGB_FLAG_DCA_ENABLED) { |
|---|
| 6744 | 7003 | /* without this a class_device is left |
|---|
| .. | .. |
|---|
| 7157 | 7416 | { |
|---|
| 7158 | 7417 | struct e1000_hw *hw = &adapter->hw; |
|---|
| 7159 | 7418 | unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses; |
|---|
| 7160 | | - u32 reg, msgbuf[3]; |
|---|
| 7419 | + u32 reg, msgbuf[3] = {}; |
|---|
| 7161 | 7420 | u8 *addr = (u8 *)(&msgbuf[1]); |
|---|
| 7162 | 7421 | |
|---|
| 7163 | 7422 | /* process all the same items cleared in a function level reset */ |
|---|
| .. | .. |
|---|
| 7191 | 7450 | |
|---|
| 7192 | 7451 | for (i = 0; i < hw->mac.rar_entry_count; i++) { |
|---|
| 7193 | 7452 | adapter->mac_table[i].state &= ~IGB_MAC_STATE_IN_USE; |
|---|
| 7194 | | - memset(adapter->mac_table[i].addr, 0, ETH_ALEN); |
|---|
| 7453 | + eth_zero_addr(adapter->mac_table[i].addr); |
|---|
| 7195 | 7454 | adapter->mac_table[i].queue = 0; |
|---|
| 7196 | 7455 | igb_rar_set_index(adapter, i); |
|---|
| 7197 | 7456 | } |
|---|
| .. | .. |
|---|
| 7340 | 7599 | } else { |
|---|
| 7341 | 7600 | adapter->mac_table[i].state = 0; |
|---|
| 7342 | 7601 | adapter->mac_table[i].queue = 0; |
|---|
| 7343 | | - memset(adapter->mac_table[i].addr, 0, ETH_ALEN); |
|---|
| 7602 | + eth_zero_addr(adapter->mac_table[i].addr); |
|---|
| 7344 | 7603 | } |
|---|
| 7345 | 7604 | |
|---|
| 7346 | 7605 | igb_rar_set_index(adapter, i); |
|---|
| .. | .. |
|---|
| 7600 | 7859 | static void igb_msg_task(struct igb_adapter *adapter) |
|---|
| 7601 | 7860 | { |
|---|
| 7602 | 7861 | struct e1000_hw *hw = &adapter->hw; |
|---|
| 7862 | + unsigned long flags; |
|---|
| 7603 | 7863 | u32 vf; |
|---|
| 7604 | 7864 | |
|---|
| 7865 | + spin_lock_irqsave(&adapter->vfs_lock, flags); |
|---|
| 7605 | 7866 | for (vf = 0; vf < adapter->vfs_allocated_count; vf++) { |
|---|
| 7606 | 7867 | /* process any reset requests */ |
|---|
| 7607 | 7868 | if (!igb_check_for_rst(hw, vf)) |
|---|
| .. | .. |
|---|
| 7615 | 7876 | if (!igb_check_for_ack(hw, vf)) |
|---|
| 7616 | 7877 | igb_rcv_ack_from_vf(adapter, vf); |
|---|
| 7617 | 7878 | } |
|---|
| 7879 | + spin_unlock_irqrestore(&adapter->vfs_lock, flags); |
|---|
| 7618 | 7880 | } |
|---|
| 7619 | 7881 | |
|---|
| 7620 | 7882 | /** |
|---|
| .. | .. |
|---|
| 7778 | 8040 | if (!clean_complete) |
|---|
| 7779 | 8041 | return budget; |
|---|
| 7780 | 8042 | |
|---|
| 7781 | | - /* If not enough Rx work done, exit the polling mode */ |
|---|
| 7782 | | - napi_complete_done(napi, work_done); |
|---|
| 7783 | | - igb_ring_irq_enable(q_vector); |
|---|
| 8043 | + /* Exit the polling mode, but don't re-enable interrupts if stack might |
|---|
| 8044 | + * poll us due to busy-polling |
|---|
| 8045 | + */ |
|---|
| 8046 | + if (likely(napi_complete_done(napi, work_done))) |
|---|
| 8047 | + igb_ring_irq_enable(q_vector); |
|---|
| 7784 | 8048 | |
|---|
| 7785 | | - return 0; |
|---|
| 8049 | + return work_done; |
|---|
| 7786 | 8050 | } |
|---|
| 7787 | 8051 | |
|---|
| 7788 | 8052 | /** |
|---|
| .. | .. |
|---|
| 7831 | 8095 | total_packets += tx_buffer->gso_segs; |
|---|
| 7832 | 8096 | |
|---|
| 7833 | 8097 | /* free the skb */ |
|---|
| 7834 | | - napi_consume_skb(tx_buffer->skb, napi_budget); |
|---|
| 8098 | + if (tx_buffer->type == IGB_TYPE_SKB) |
|---|
| 8099 | + napi_consume_skb(tx_buffer->skb, napi_budget); |
|---|
| 8100 | + else |
|---|
| 8101 | + xdp_return_frame(tx_buffer->xdpf); |
|---|
| 7835 | 8102 | |
|---|
| 7836 | 8103 | /* unmap skb header data */ |
|---|
| 7837 | 8104 | dma_unmap_single(tx_ring->dev, |
|---|
| .. | .. |
|---|
| 7990 | 8257 | return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); |
|---|
| 7991 | 8258 | } |
|---|
| 7992 | 8259 | |
|---|
| 7993 | | -static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer) |
|---|
| 8260 | +static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer, |
|---|
| 8261 | + int rx_buf_pgcnt) |
|---|
| 7994 | 8262 | { |
|---|
| 7995 | 8263 | unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; |
|---|
| 7996 | 8264 | struct page *page = rx_buffer->page; |
|---|
| .. | .. |
|---|
| 8001 | 8269 | |
|---|
| 8002 | 8270 | #if (PAGE_SIZE < 8192) |
|---|
| 8003 | 8271 | /* if we are only owner of page we can reuse it */ |
|---|
| 8004 | | - if (unlikely((page_ref_count(page) - pagecnt_bias) > 1)) |
|---|
| 8272 | + if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1)) |
|---|
| 8005 | 8273 | return false; |
|---|
| 8006 | 8274 | #else |
|---|
| 8007 | 8275 | #define IGB_LAST_OFFSET \ |
|---|
| .. | .. |
|---|
| 8015 | 8283 | * the pagecnt_bias and page count so that we fully restock the |
|---|
| 8016 | 8284 | * number of references the driver holds. |
|---|
| 8017 | 8285 | */ |
|---|
| 8018 | | - if (unlikely(!pagecnt_bias)) { |
|---|
| 8019 | | - page_ref_add(page, USHRT_MAX); |
|---|
| 8286 | + if (unlikely(pagecnt_bias == 1)) { |
|---|
| 8287 | + page_ref_add(page, USHRT_MAX - 1); |
|---|
| 8020 | 8288 | rx_buffer->pagecnt_bias = USHRT_MAX; |
|---|
| 8021 | 8289 | } |
|---|
| 8022 | 8290 | |
|---|
| .. | .. |
|---|
| 8055 | 8323 | |
|---|
| 8056 | 8324 | static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring, |
|---|
| 8057 | 8325 | struct igb_rx_buffer *rx_buffer, |
|---|
| 8058 | | - union e1000_adv_rx_desc *rx_desc, |
|---|
| 8059 | | - unsigned int size) |
|---|
| 8326 | + struct xdp_buff *xdp, |
|---|
| 8327 | + union e1000_adv_rx_desc *rx_desc) |
|---|
| 8060 | 8328 | { |
|---|
| 8061 | | - void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; |
|---|
| 8062 | 8329 | #if (PAGE_SIZE < 8192) |
|---|
| 8063 | 8330 | unsigned int truesize = igb_rx_pg_size(rx_ring) / 2; |
|---|
| 8064 | 8331 | #else |
|---|
| 8065 | | - unsigned int truesize = SKB_DATA_ALIGN(size); |
|---|
| 8332 | + unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end - |
|---|
| 8333 | + xdp->data_hard_start); |
|---|
| 8066 | 8334 | #endif |
|---|
| 8335 | + unsigned int size = xdp->data_end - xdp->data; |
|---|
| 8067 | 8336 | unsigned int headlen; |
|---|
| 8068 | 8337 | struct sk_buff *skb; |
|---|
| 8069 | 8338 | |
|---|
| 8070 | 8339 | /* prefetch first cache line of first page */ |
|---|
| 8071 | | - prefetch(va); |
|---|
| 8072 | | -#if L1_CACHE_BYTES < 128 |
|---|
| 8073 | | - prefetch(va + L1_CACHE_BYTES); |
|---|
| 8074 | | -#endif |
|---|
| 8340 | + net_prefetch(xdp->data); |
|---|
| 8075 | 8341 | |
|---|
| 8076 | 8342 | /* allocate a skb to store the frags */ |
|---|
| 8077 | 8343 | skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN); |
|---|
| .. | .. |
|---|
| 8079 | 8345 | return NULL; |
|---|
| 8080 | 8346 | |
|---|
| 8081 | 8347 | if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) { |
|---|
| 8082 | | - igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb); |
|---|
| 8083 | | - va += IGB_TS_HDR_LEN; |
|---|
| 8084 | | - size -= IGB_TS_HDR_LEN; |
|---|
| 8348 | + if (!igb_ptp_rx_pktstamp(rx_ring->q_vector, xdp->data, skb)) { |
|---|
| 8349 | + xdp->data += IGB_TS_HDR_LEN; |
|---|
| 8350 | + size -= IGB_TS_HDR_LEN; |
|---|
| 8351 | + } |
|---|
| 8085 | 8352 | } |
|---|
| 8086 | 8353 | |
|---|
| 8087 | 8354 | /* Determine available headroom for copy */ |
|---|
| 8088 | 8355 | headlen = size; |
|---|
| 8089 | 8356 | if (headlen > IGB_RX_HDR_LEN) |
|---|
| 8090 | | - headlen = eth_get_headlen(va, IGB_RX_HDR_LEN); |
|---|
| 8357 | + headlen = eth_get_headlen(skb->dev, xdp->data, IGB_RX_HDR_LEN); |
|---|
| 8091 | 8358 | |
|---|
| 8092 | 8359 | /* align pull length to size of long to optimize memcpy performance */ |
|---|
| 8093 | | - memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long))); |
|---|
| 8360 | + memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen, sizeof(long))); |
|---|
| 8094 | 8361 | |
|---|
| 8095 | 8362 | /* update all of the pointers */ |
|---|
| 8096 | 8363 | size -= headlen; |
|---|
| 8097 | 8364 | if (size) { |
|---|
| 8098 | 8365 | skb_add_rx_frag(skb, 0, rx_buffer->page, |
|---|
| 8099 | | - (va + headlen) - page_address(rx_buffer->page), |
|---|
| 8366 | + (xdp->data + headlen) - page_address(rx_buffer->page), |
|---|
| 8100 | 8367 | size, truesize); |
|---|
| 8101 | 8368 | #if (PAGE_SIZE < 8192) |
|---|
| 8102 | 8369 | rx_buffer->page_offset ^= truesize; |
|---|
| .. | .. |
|---|
| 8112 | 8379 | |
|---|
| 8113 | 8380 | static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring, |
|---|
| 8114 | 8381 | struct igb_rx_buffer *rx_buffer, |
|---|
| 8115 | | - union e1000_adv_rx_desc *rx_desc, |
|---|
| 8116 | | - unsigned int size) |
|---|
| 8382 | + struct xdp_buff *xdp, |
|---|
| 8383 | + union e1000_adv_rx_desc *rx_desc) |
|---|
| 8117 | 8384 | { |
|---|
| 8118 | | - void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; |
|---|
| 8119 | 8385 | #if (PAGE_SIZE < 8192) |
|---|
| 8120 | 8386 | unsigned int truesize = igb_rx_pg_size(rx_ring) / 2; |
|---|
| 8121 | 8387 | #else |
|---|
| 8122 | 8388 | unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + |
|---|
| 8123 | | - SKB_DATA_ALIGN(IGB_SKB_PAD + size); |
|---|
| 8389 | + SKB_DATA_ALIGN(xdp->data_end - |
|---|
| 8390 | + xdp->data_hard_start); |
|---|
| 8124 | 8391 | #endif |
|---|
| 8392 | + unsigned int metasize = xdp->data - xdp->data_meta; |
|---|
| 8125 | 8393 | struct sk_buff *skb; |
|---|
| 8126 | 8394 | |
|---|
| 8127 | 8395 | /* prefetch first cache line of first page */ |
|---|
| 8128 | | - prefetch(va); |
|---|
| 8129 | | -#if L1_CACHE_BYTES < 128 |
|---|
| 8130 | | - prefetch(va + L1_CACHE_BYTES); |
|---|
| 8131 | | -#endif |
|---|
| 8396 | + net_prefetch(xdp->data_meta); |
|---|
| 8132 | 8397 | |
|---|
| 8133 | 8398 | /* build an skb around the page buffer */ |
|---|
| 8134 | | - skb = build_skb(va - IGB_SKB_PAD, truesize); |
|---|
| 8399 | + skb = build_skb(xdp->data_hard_start, truesize); |
|---|
| 8135 | 8400 | if (unlikely(!skb)) |
|---|
| 8136 | 8401 | return NULL; |
|---|
| 8137 | 8402 | |
|---|
| 8138 | 8403 | /* update pointers within the skb to store the data */ |
|---|
| 8139 | | - skb_reserve(skb, IGB_SKB_PAD); |
|---|
| 8140 | | - __skb_put(skb, size); |
|---|
| 8404 | + skb_reserve(skb, xdp->data - xdp->data_hard_start); |
|---|
| 8405 | + __skb_put(skb, xdp->data_end - xdp->data); |
|---|
| 8406 | + |
|---|
| 8407 | + if (metasize) |
|---|
| 8408 | + skb_metadata_set(skb, metasize); |
|---|
| 8141 | 8409 | |
|---|
| 8142 | 8410 | /* pull timestamp out of packet data */ |
|---|
| 8143 | 8411 | if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { |
|---|
| 8144 | | - igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb); |
|---|
| 8145 | | - __skb_pull(skb, IGB_TS_HDR_LEN); |
|---|
| 8412 | + if (!igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb)) |
|---|
| 8413 | + __skb_pull(skb, IGB_TS_HDR_LEN); |
|---|
| 8146 | 8414 | } |
|---|
| 8147 | 8415 | |
|---|
| 8148 | 8416 | /* update buffer offset */ |
|---|
| .. | .. |
|---|
| 8153 | 8421 | #endif |
|---|
| 8154 | 8422 | |
|---|
| 8155 | 8423 | return skb; |
|---|
| 8424 | +} |
|---|
| 8425 | + |
|---|
| 8426 | +static struct sk_buff *igb_run_xdp(struct igb_adapter *adapter, |
|---|
| 8427 | + struct igb_ring *rx_ring, |
|---|
| 8428 | + struct xdp_buff *xdp) |
|---|
| 8429 | +{ |
|---|
| 8430 | + int err, result = IGB_XDP_PASS; |
|---|
| 8431 | + struct bpf_prog *xdp_prog; |
|---|
| 8432 | + u32 act; |
|---|
| 8433 | + |
|---|
| 8434 | + rcu_read_lock(); |
|---|
| 8435 | + xdp_prog = READ_ONCE(rx_ring->xdp_prog); |
|---|
| 8436 | + |
|---|
| 8437 | + if (!xdp_prog) |
|---|
| 8438 | + goto xdp_out; |
|---|
| 8439 | + |
|---|
| 8440 | + prefetchw(xdp->data_hard_start); /* xdp_frame write */ |
|---|
| 8441 | + |
|---|
| 8442 | + act = bpf_prog_run_xdp(xdp_prog, xdp); |
|---|
| 8443 | + switch (act) { |
|---|
| 8444 | + case XDP_PASS: |
|---|
| 8445 | + break; |
|---|
| 8446 | + case XDP_TX: |
|---|
| 8447 | + result = igb_xdp_xmit_back(adapter, xdp); |
|---|
| 8448 | + if (result == IGB_XDP_CONSUMED) |
|---|
| 8449 | + goto out_failure; |
|---|
| 8450 | + break; |
|---|
| 8451 | + case XDP_REDIRECT: |
|---|
| 8452 | + err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog); |
|---|
| 8453 | + if (err) |
|---|
| 8454 | + goto out_failure; |
|---|
| 8455 | + result = IGB_XDP_REDIR; |
|---|
| 8456 | + break; |
|---|
| 8457 | + default: |
|---|
| 8458 | + bpf_warn_invalid_xdp_action(act); |
|---|
| 8459 | + fallthrough; |
|---|
| 8460 | + case XDP_ABORTED: |
|---|
| 8461 | +out_failure: |
|---|
| 8462 | + trace_xdp_exception(rx_ring->netdev, xdp_prog, act); |
|---|
| 8463 | + fallthrough; |
|---|
| 8464 | + case XDP_DROP: |
|---|
| 8465 | + result = IGB_XDP_CONSUMED; |
|---|
| 8466 | + break; |
|---|
| 8467 | + } |
|---|
| 8468 | +xdp_out: |
|---|
| 8469 | + rcu_read_unlock(); |
|---|
| 8470 | + return ERR_PTR(-result); |
|---|
| 8471 | +} |
|---|
| 8472 | + |
|---|
| 8473 | +static unsigned int igb_rx_frame_truesize(struct igb_ring *rx_ring, |
|---|
| 8474 | + unsigned int size) |
|---|
| 8475 | +{ |
|---|
| 8476 | + unsigned int truesize; |
|---|
| 8477 | + |
|---|
| 8478 | +#if (PAGE_SIZE < 8192) |
|---|
| 8479 | + truesize = igb_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */ |
|---|
| 8480 | +#else |
|---|
| 8481 | + truesize = ring_uses_build_skb(rx_ring) ? |
|---|
| 8482 | + SKB_DATA_ALIGN(IGB_SKB_PAD + size) + |
|---|
| 8483 | + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : |
|---|
| 8484 | + SKB_DATA_ALIGN(size); |
|---|
| 8485 | +#endif |
|---|
| 8486 | + return truesize; |
|---|
| 8487 | +} |
|---|
| 8488 | + |
|---|
| 8489 | +static void igb_rx_buffer_flip(struct igb_ring *rx_ring, |
|---|
| 8490 | + struct igb_rx_buffer *rx_buffer, |
|---|
| 8491 | + unsigned int size) |
|---|
| 8492 | +{ |
|---|
| 8493 | + unsigned int truesize = igb_rx_frame_truesize(rx_ring, size); |
|---|
| 8494 | +#if (PAGE_SIZE < 8192) |
|---|
| 8495 | + rx_buffer->page_offset ^= truesize; |
|---|
| 8496 | +#else |
|---|
| 8497 | + rx_buffer->page_offset += truesize; |
|---|
| 8498 | +#endif |
|---|
| 8156 | 8499 | } |
|---|
| 8157 | 8500 | |
|---|
| 8158 | 8501 | static inline void igb_rx_checksum(struct igb_ring *ring, |
|---|
| .. | .. |
|---|
| 8209 | 8552 | * igb_is_non_eop - process handling of non-EOP buffers |
|---|
| 8210 | 8553 | * @rx_ring: Rx ring being processed |
|---|
| 8211 | 8554 | * @rx_desc: Rx descriptor for current buffer |
|---|
| 8212 | | - * @skb: current socket buffer containing buffer in progress |
|---|
| 8213 | 8555 | * |
|---|
| 8214 | 8556 | * This function updates next to clean. If the buffer is an EOP buffer |
|---|
| 8215 | 8557 | * this function exits returning false, otherwise it will place the |
|---|
| .. | .. |
|---|
| 8251 | 8593 | union e1000_adv_rx_desc *rx_desc, |
|---|
| 8252 | 8594 | struct sk_buff *skb) |
|---|
| 8253 | 8595 | { |
|---|
| 8596 | + /* XDP packets use error pointer so abort at this point */ |
|---|
| 8597 | + if (IS_ERR(skb)) |
|---|
| 8598 | + return true; |
|---|
| 8599 | + |
|---|
| 8254 | 8600 | if (unlikely((igb_test_staterr(rx_desc, |
|---|
| 8255 | 8601 | E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) { |
|---|
| 8256 | 8602 | struct net_device *netdev = rx_ring->netdev; |
|---|
| .. | .. |
|---|
| 8297 | 8643 | |
|---|
| 8298 | 8644 | if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) && |
|---|
| 8299 | 8645 | test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags)) |
|---|
| 8300 | | - vid = be16_to_cpu(rx_desc->wb.upper.vlan); |
|---|
| 8646 | + vid = be16_to_cpu((__force __be16)rx_desc->wb.upper.vlan); |
|---|
| 8301 | 8647 | else |
|---|
| 8302 | 8648 | vid = le16_to_cpu(rx_desc->wb.upper.vlan); |
|---|
| 8303 | 8649 | |
|---|
| .. | .. |
|---|
| 8309 | 8655 | skb->protocol = eth_type_trans(skb, rx_ring->netdev); |
|---|
| 8310 | 8656 | } |
|---|
| 8311 | 8657 | |
|---|
| 8658 | +static unsigned int igb_rx_offset(struct igb_ring *rx_ring) |
|---|
| 8659 | +{ |
|---|
| 8660 | + return ring_uses_build_skb(rx_ring) ? IGB_SKB_PAD : 0; |
|---|
| 8661 | +} |
|---|
| 8662 | + |
|---|
| 8312 | 8663 | static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring, |
|---|
| 8313 | | - const unsigned int size) |
|---|
| 8664 | + const unsigned int size, int *rx_buf_pgcnt) |
|---|
| 8314 | 8665 | { |
|---|
| 8315 | 8666 | struct igb_rx_buffer *rx_buffer; |
|---|
| 8316 | 8667 | |
|---|
| 8317 | 8668 | rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; |
|---|
| 8669 | + *rx_buf_pgcnt = |
|---|
| 8670 | +#if (PAGE_SIZE < 8192) |
|---|
| 8671 | + page_count(rx_buffer->page); |
|---|
| 8672 | +#else |
|---|
| 8673 | + 0; |
|---|
| 8674 | +#endif |
|---|
| 8318 | 8675 | prefetchw(rx_buffer->page); |
|---|
| 8319 | 8676 | |
|---|
| 8320 | 8677 | /* we are reusing so sync this buffer for CPU use */ |
|---|
| .. | .. |
|---|
| 8330 | 8687 | } |
|---|
| 8331 | 8688 | |
|---|
| 8332 | 8689 | static void igb_put_rx_buffer(struct igb_ring *rx_ring, |
|---|
| 8333 | | - struct igb_rx_buffer *rx_buffer) |
|---|
| 8690 | + struct igb_rx_buffer *rx_buffer, int rx_buf_pgcnt) |
|---|
| 8334 | 8691 | { |
|---|
| 8335 | | - if (igb_can_reuse_rx_page(rx_buffer)) { |
|---|
| 8692 | + if (igb_can_reuse_rx_page(rx_buffer, rx_buf_pgcnt)) { |
|---|
| 8336 | 8693 | /* hand second half of page back to the ring */ |
|---|
| 8337 | 8694 | igb_reuse_rx_page(rx_ring, rx_buffer); |
|---|
| 8338 | 8695 | } else { |
|---|
| .. | .. |
|---|
| 8352 | 8709 | |
|---|
| 8353 | 8710 | static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) |
|---|
| 8354 | 8711 | { |
|---|
| 8712 | + struct igb_adapter *adapter = q_vector->adapter; |
|---|
| 8355 | 8713 | struct igb_ring *rx_ring = q_vector->rx.ring; |
|---|
| 8356 | 8714 | struct sk_buff *skb = rx_ring->skb; |
|---|
| 8357 | 8715 | unsigned int total_bytes = 0, total_packets = 0; |
|---|
| 8358 | 8716 | u16 cleaned_count = igb_desc_unused(rx_ring); |
|---|
| 8717 | + unsigned int xdp_xmit = 0; |
|---|
| 8718 | + struct xdp_buff xdp; |
|---|
| 8719 | + int rx_buf_pgcnt; |
|---|
| 8720 | + |
|---|
| 8721 | + xdp.rxq = &rx_ring->xdp_rxq; |
|---|
| 8722 | + |
|---|
| 8723 | + /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */ |
|---|
| 8724 | +#if (PAGE_SIZE < 8192) |
|---|
| 8725 | + xdp.frame_sz = igb_rx_frame_truesize(rx_ring, 0); |
|---|
| 8726 | +#endif |
|---|
| 8359 | 8727 | |
|---|
| 8360 | 8728 | while (likely(total_packets < budget)) { |
|---|
| 8361 | 8729 | union e1000_adv_rx_desc *rx_desc; |
|---|
| .. | .. |
|---|
| 8379 | 8747 | */ |
|---|
| 8380 | 8748 | dma_rmb(); |
|---|
| 8381 | 8749 | |
|---|
| 8382 | | - rx_buffer = igb_get_rx_buffer(rx_ring, size); |
|---|
| 8750 | + rx_buffer = igb_get_rx_buffer(rx_ring, size, &rx_buf_pgcnt); |
|---|
| 8383 | 8751 | |
|---|
| 8384 | 8752 | /* retrieve a buffer from the ring */ |
|---|
| 8385 | | - if (skb) |
|---|
| 8753 | + if (!skb) { |
|---|
| 8754 | + xdp.data = page_address(rx_buffer->page) + |
|---|
| 8755 | + rx_buffer->page_offset; |
|---|
| 8756 | + xdp.data_meta = xdp.data; |
|---|
| 8757 | + xdp.data_hard_start = xdp.data - |
|---|
| 8758 | + igb_rx_offset(rx_ring); |
|---|
| 8759 | + xdp.data_end = xdp.data + size; |
|---|
| 8760 | +#if (PAGE_SIZE > 4096) |
|---|
| 8761 | + /* At larger PAGE_SIZE, frame_sz depend on len size */ |
|---|
| 8762 | + xdp.frame_sz = igb_rx_frame_truesize(rx_ring, size); |
|---|
| 8763 | +#endif |
|---|
| 8764 | + skb = igb_run_xdp(adapter, rx_ring, &xdp); |
|---|
| 8765 | + } |
|---|
| 8766 | + |
|---|
| 8767 | + if (IS_ERR(skb)) { |
|---|
| 8768 | + unsigned int xdp_res = -PTR_ERR(skb); |
|---|
| 8769 | + |
|---|
| 8770 | + if (xdp_res & (IGB_XDP_TX | IGB_XDP_REDIR)) { |
|---|
| 8771 | + xdp_xmit |= xdp_res; |
|---|
| 8772 | + igb_rx_buffer_flip(rx_ring, rx_buffer, size); |
|---|
| 8773 | + } else { |
|---|
| 8774 | + rx_buffer->pagecnt_bias++; |
|---|
| 8775 | + } |
|---|
| 8776 | + total_packets++; |
|---|
| 8777 | + total_bytes += size; |
|---|
| 8778 | + } else if (skb) |
|---|
| 8386 | 8779 | igb_add_rx_frag(rx_ring, rx_buffer, skb, size); |
|---|
| 8387 | 8780 | else if (ring_uses_build_skb(rx_ring)) |
|---|
| 8388 | | - skb = igb_build_skb(rx_ring, rx_buffer, rx_desc, size); |
|---|
| 8781 | + skb = igb_build_skb(rx_ring, rx_buffer, &xdp, rx_desc); |
|---|
| 8389 | 8782 | else |
|---|
| 8390 | 8783 | skb = igb_construct_skb(rx_ring, rx_buffer, |
|---|
| 8391 | | - rx_desc, size); |
|---|
| 8784 | + &xdp, rx_desc); |
|---|
| 8392 | 8785 | |
|---|
| 8393 | 8786 | /* exit if we failed to retrieve a buffer */ |
|---|
| 8394 | 8787 | if (!skb) { |
|---|
| .. | .. |
|---|
| 8397 | 8790 | break; |
|---|
| 8398 | 8791 | } |
|---|
| 8399 | 8792 | |
|---|
| 8400 | | - igb_put_rx_buffer(rx_ring, rx_buffer); |
|---|
| 8793 | + igb_put_rx_buffer(rx_ring, rx_buffer, rx_buf_pgcnt); |
|---|
| 8401 | 8794 | cleaned_count++; |
|---|
| 8402 | 8795 | |
|---|
| 8403 | 8796 | /* fetch next buffer in frame if non-eop */ |
|---|
| .. | .. |
|---|
| 8428 | 8821 | /* place incomplete frames back on ring for completion */ |
|---|
| 8429 | 8822 | rx_ring->skb = skb; |
|---|
| 8430 | 8823 | |
|---|
| 8824 | + if (xdp_xmit & IGB_XDP_REDIR) |
|---|
| 8825 | + xdp_do_flush(); |
|---|
| 8826 | + |
|---|
| 8827 | + if (xdp_xmit & IGB_XDP_TX) { |
|---|
| 8828 | + struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter); |
|---|
| 8829 | + |
|---|
| 8830 | + igb_xdp_ring_update_tail(tx_ring); |
|---|
| 8831 | + } |
|---|
| 8832 | + |
|---|
| 8431 | 8833 | u64_stats_update_begin(&rx_ring->rx_syncp); |
|---|
| 8432 | 8834 | rx_ring->rx_stats.packets += total_packets; |
|---|
| 8433 | 8835 | rx_ring->rx_stats.bytes += total_bytes; |
|---|
| .. | .. |
|---|
| 8439 | 8841 | igb_alloc_rx_buffers(rx_ring, cleaned_count); |
|---|
| 8440 | 8842 | |
|---|
| 8441 | 8843 | return total_packets; |
|---|
| 8442 | | -} |
|---|
| 8443 | | - |
|---|
| 8444 | | -static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring) |
|---|
| 8445 | | -{ |
|---|
| 8446 | | - return ring_uses_build_skb(rx_ring) ? IGB_SKB_PAD : 0; |
|---|
| 8447 | 8844 | } |
|---|
| 8448 | 8845 | |
|---|
| 8449 | 8846 | static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, |
|---|
| .. | .. |
|---|
| 8482 | 8879 | bi->dma = dma; |
|---|
| 8483 | 8880 | bi->page = page; |
|---|
| 8484 | 8881 | bi->page_offset = igb_rx_offset(rx_ring); |
|---|
| 8485 | | - bi->pagecnt_bias = 1; |
|---|
| 8882 | + page_ref_add(page, USHRT_MAX - 1); |
|---|
| 8883 | + bi->pagecnt_bias = USHRT_MAX; |
|---|
| 8486 | 8884 | |
|---|
| 8487 | 8885 | return true; |
|---|
| 8488 | 8886 | } |
|---|
| 8489 | 8887 | |
|---|
| 8490 | 8888 | /** |
|---|
| 8491 | | - * igb_alloc_rx_buffers - Replace used receive buffers; packet split |
|---|
| 8492 | | - * @adapter: address of board private structure |
|---|
| 8889 | + * igb_alloc_rx_buffers - Replace used receive buffers |
|---|
| 8890 | + * @rx_ring: rx descriptor ring to allocate new receive buffers |
|---|
| 8891 | + * @cleaned_count: count of buffers to allocate |
|---|
| 8493 | 8892 | **/ |
|---|
| 8494 | 8893 | void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count) |
|---|
| 8495 | 8894 | { |
|---|
| .. | .. |
|---|
| 8558 | 8957 | |
|---|
| 8559 | 8958 | /** |
|---|
| 8560 | 8959 | * igb_mii_ioctl - |
|---|
| 8561 | | - * @netdev: |
|---|
| 8562 | | - * @ifreq: |
|---|
| 8563 | | - * @cmd: |
|---|
| 8960 | + * @netdev: pointer to netdev struct |
|---|
| 8961 | + * @ifr: interface structure |
|---|
| 8962 | + * @cmd: ioctl command to execute |
|---|
| 8564 | 8963 | **/ |
|---|
| 8565 | 8964 | static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) |
|---|
| 8566 | 8965 | { |
|---|
| .. | .. |
|---|
| 8588 | 8987 | |
|---|
| 8589 | 8988 | /** |
|---|
| 8590 | 8989 | * igb_ioctl - |
|---|
| 8591 | | - * @netdev: |
|---|
| 8592 | | - * @ifreq: |
|---|
| 8593 | | - * @cmd: |
|---|
| 8990 | + * @netdev: pointer to netdev struct |
|---|
| 8991 | + * @ifr: interface structure |
|---|
| 8992 | + * @cmd: ioctl command to execute |
|---|
| 8594 | 8993 | **/ |
|---|
| 8595 | 8994 | static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) |
|---|
| 8596 | 8995 | { |
|---|
| .. | .. |
|---|
| 8875 | 9274 | return __igb_shutdown(to_pci_dev(dev), NULL, 0); |
|---|
| 8876 | 9275 | } |
|---|
| 8877 | 9276 | |
|---|
| 8878 | | -static int __maybe_unused igb_resume(struct device *dev) |
|---|
| 9277 | +static int __maybe_unused __igb_resume(struct device *dev, bool rpm) |
|---|
| 8879 | 9278 | { |
|---|
| 8880 | 9279 | struct pci_dev *pdev = to_pci_dev(dev); |
|---|
| 8881 | 9280 | struct net_device *netdev = pci_get_drvdata(pdev); |
|---|
| .. | .. |
|---|
| 8918 | 9317 | |
|---|
| 8919 | 9318 | wr32(E1000_WUS, ~0); |
|---|
| 8920 | 9319 | |
|---|
| 8921 | | - rtnl_lock(); |
|---|
| 9320 | + if (!rpm) |
|---|
| 9321 | + rtnl_lock(); |
|---|
| 8922 | 9322 | if (!err && netif_running(netdev)) |
|---|
| 8923 | 9323 | err = __igb_open(netdev, true); |
|---|
| 8924 | 9324 | |
|---|
| 8925 | 9325 | if (!err) |
|---|
| 8926 | 9326 | netif_device_attach(netdev); |
|---|
| 8927 | | - rtnl_unlock(); |
|---|
| 9327 | + if (!rpm) |
|---|
| 9328 | + rtnl_unlock(); |
|---|
| 8928 | 9329 | |
|---|
| 8929 | 9330 | return err; |
|---|
| 8930 | 9331 | } |
|---|
| 8931 | 9332 | |
|---|
| 9333 | +static int __maybe_unused igb_resume(struct device *dev) |
|---|
| 9334 | +{ |
|---|
| 9335 | + return __igb_resume(dev, false); |
|---|
| 9336 | +} |
|---|
| 9337 | + |
|---|
| 8932 | 9338 | static int __maybe_unused igb_runtime_idle(struct device *dev) |
|---|
| 8933 | 9339 | { |
|---|
| 8934 | | - struct pci_dev *pdev = to_pci_dev(dev); |
|---|
| 8935 | | - struct net_device *netdev = pci_get_drvdata(pdev); |
|---|
| 9340 | + struct net_device *netdev = dev_get_drvdata(dev); |
|---|
| 8936 | 9341 | struct igb_adapter *adapter = netdev_priv(netdev); |
|---|
| 8937 | 9342 | |
|---|
| 8938 | 9343 | if (!igb_has_link(adapter)) |
|---|
| .. | .. |
|---|
| 8948 | 9353 | |
|---|
| 8949 | 9354 | static int __maybe_unused igb_runtime_resume(struct device *dev) |
|---|
| 8950 | 9355 | { |
|---|
| 8951 | | - return igb_resume(dev); |
|---|
| 9356 | + return __igb_resume(dev, true); |
|---|
| 8952 | 9357 | } |
|---|
| 8953 | 9358 | |
|---|
| 8954 | 9359 | static void igb_shutdown(struct pci_dev *pdev) |
|---|
| .. | .. |
|---|
| 9064 | 9469 | * @pdev: Pointer to PCI device |
|---|
| 9065 | 9470 | * |
|---|
| 9066 | 9471 | * Restart the card from scratch, as if from a cold-boot. Implementation |
|---|
| 9067 | | - * resembles the first-half of the igb_resume routine. |
|---|
| 9472 | + * resembles the first-half of the __igb_resume routine. |
|---|
| 9068 | 9473 | **/ |
|---|
| 9069 | 9474 | static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev) |
|---|
| 9070 | 9475 | { |
|---|
| .. | .. |
|---|
| 9072 | 9477 | struct igb_adapter *adapter = netdev_priv(netdev); |
|---|
| 9073 | 9478 | struct e1000_hw *hw = &adapter->hw; |
|---|
| 9074 | 9479 | pci_ers_result_t result; |
|---|
| 9075 | | - int err; |
|---|
| 9076 | 9480 | |
|---|
| 9077 | 9481 | if (pci_enable_device_mem(pdev)) { |
|---|
| 9078 | 9482 | dev_err(&pdev->dev, |
|---|
| .. | .. |
|---|
| 9096 | 9500 | result = PCI_ERS_RESULT_RECOVERED; |
|---|
| 9097 | 9501 | } |
|---|
| 9098 | 9502 | |
|---|
| 9099 | | - err = pci_cleanup_aer_uncorrect_error_status(pdev); |
|---|
| 9100 | | - if (err) { |
|---|
| 9101 | | - dev_err(&pdev->dev, |
|---|
| 9102 | | - "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", |
|---|
| 9103 | | - err); |
|---|
| 9104 | | - /* non-fatal, continue */ |
|---|
| 9105 | | - } |
|---|
| 9106 | | - |
|---|
| 9107 | 9503 | return result; |
|---|
| 9108 | 9504 | } |
|---|
| 9109 | 9505 | |
|---|
| .. | .. |
|---|
| 9113 | 9509 | * |
|---|
| 9114 | 9510 | * This callback is called when the error recovery driver tells us that |
|---|
| 9115 | 9511 | * its OK to resume normal operation. Implementation resembles the |
|---|
| 9116 | | - * second-half of the igb_resume routine. |
|---|
| 9512 | + * second-half of the __igb_resume routine. |
|---|
| 9117 | 9513 | */ |
|---|
| 9118 | 9514 | static void igb_io_resume(struct pci_dev *pdev) |
|---|
| 9119 | 9515 | { |
|---|
| .. | .. |
|---|
| 9415 | 9811 | reg = rd32(E1000_DTXCTL); |
|---|
| 9416 | 9812 | reg |= E1000_DTXCTL_VLAN_ADDED; |
|---|
| 9417 | 9813 | wr32(E1000_DTXCTL, reg); |
|---|
| 9418 | | - /* Fall through */ |
|---|
| 9814 | + fallthrough; |
|---|
| 9419 | 9815 | case e1000_82580: |
|---|
| 9420 | 9816 | /* enable replication vlan tag stripping */ |
|---|
| 9421 | 9817 | reg = rd32(E1000_RPLOLR); |
|---|
| 9422 | 9818 | reg |= E1000_RPLOLR_STRVLAN; |
|---|
| 9423 | 9819 | wr32(E1000_RPLOLR, reg); |
|---|
| 9424 | | - /* Fall through */ |
|---|
| 9820 | + fallthrough; |
|---|
| 9425 | 9821 | case e1000_i350: |
|---|
| 9426 | 9822 | /* none of the above registers are supported by i350 */ |
|---|
| 9427 | 9823 | break; |
|---|
| .. | .. |
|---|
| 9443 | 9839 | struct e1000_hw *hw = &adapter->hw; |
|---|
| 9444 | 9840 | u32 dmac_thr; |
|---|
| 9445 | 9841 | u16 hwm; |
|---|
| 9842 | + u32 reg; |
|---|
| 9446 | 9843 | |
|---|
| 9447 | 9844 | if (hw->mac.type > e1000_82580) { |
|---|
| 9448 | 9845 | if (adapter->flags & IGB_FLAG_DMAC) { |
|---|
| 9449 | | - u32 reg; |
|---|
| 9450 | | - |
|---|
| 9451 | 9846 | /* force threshold to 0. */ |
|---|
| 9452 | 9847 | wr32(E1000_DMCTXTH, 0); |
|---|
| 9453 | 9848 | |
|---|
| .. | .. |
|---|
| 9480 | 9875 | /* Disable BMC-to-OS Watchdog Enable */ |
|---|
| 9481 | 9876 | if (hw->mac.type != e1000_i354) |
|---|
| 9482 | 9877 | reg &= ~E1000_DMACR_DC_BMC2OSW_EN; |
|---|
| 9483 | | - |
|---|
| 9484 | 9878 | wr32(E1000_DMACR, reg); |
|---|
| 9485 | 9879 | |
|---|
| 9486 | 9880 | /* no lower threshold to disable |
|---|
| .. | .. |
|---|
| 9497 | 9891 | */ |
|---|
| 9498 | 9892 | wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE - |
|---|
| 9499 | 9893 | (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6); |
|---|
| 9894 | + } |
|---|
| 9500 | 9895 | |
|---|
| 9501 | | - /* make low power state decision controlled |
|---|
| 9502 | | - * by DMA coal |
|---|
| 9503 | | - */ |
|---|
| 9896 | + if (hw->mac.type >= e1000_i210 || |
|---|
| 9897 | + (adapter->flags & IGB_FLAG_DMAC)) { |
|---|
| 9504 | 9898 | reg = rd32(E1000_PCIEMISC); |
|---|
| 9505 | | - reg &= ~E1000_PCIEMISC_LX_DECISION; |
|---|
| 9899 | + reg |= E1000_PCIEMISC_LX_DECISION; |
|---|
| 9506 | 9900 | wr32(E1000_PCIEMISC, reg); |
|---|
| 9507 | 9901 | } /* endif adapter->dmac is not disabled */ |
|---|
| 9508 | 9902 | } else if (hw->mac.type == e1000_82580) { |
|---|