.. | .. |
---|
27 | 27 | #include <linux/bpf.h> |
---|
28 | 28 | #include <linux/bpf_trace.h> |
---|
29 | 29 | #include <linux/atomic.h> |
---|
| 30 | +#include <linux/numa.h> |
---|
| 31 | +#include <generated/utsrelease.h> |
---|
30 | 32 | #include <scsi/fc/fc_fcoe.h> |
---|
31 | 33 | #include <net/udp_tunnel.h> |
---|
32 | 34 | #include <net/pkt_cls.h> |
---|
.. | .. |
---|
34 | 36 | #include <net/tc_act/tc_mirred.h> |
---|
35 | 37 | #include <net/vxlan.h> |
---|
36 | 38 | #include <net/mpls.h> |
---|
| 39 | +#include <net/xdp_sock_drv.h> |
---|
37 | 40 | #include <net/xfrm.h> |
---|
38 | 41 | |
---|
39 | 42 | #include "ixgbe.h" |
---|
40 | 43 | #include "ixgbe_common.h" |
---|
41 | 44 | #include "ixgbe_dcb_82599.h" |
---|
| 45 | +#include "ixgbe_phy.h" |
---|
42 | 46 | #include "ixgbe_sriov.h" |
---|
43 | 47 | #include "ixgbe_model.h" |
---|
| 48 | +#include "ixgbe_txrx_common.h" |
---|
44 | 49 | |
---|
45 | 50 | char ixgbe_driver_name[] = "ixgbe"; |
---|
46 | 51 | static const char ixgbe_driver_string[] = |
---|
.. | .. |
---|
52 | 57 | static char ixgbe_default_device_descr[] = |
---|
53 | 58 | "Intel(R) 10 Gigabit Network Connection"; |
---|
54 | 59 | #endif |
---|
55 | | -#define DRV_VERSION "5.1.0-k" |
---|
56 | | -const char ixgbe_driver_version[] = DRV_VERSION; |
---|
57 | 60 | static const char ixgbe_copyright[] = |
---|
58 | 61 | "Copyright (c) 1999-2016 Intel Corporation."; |
---|
59 | 62 | |
---|
.. | .. |
---|
160 | 163 | |
---|
161 | 164 | MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); |
---|
162 | 165 | MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver"); |
---|
163 | | -MODULE_LICENSE("GPL"); |
---|
164 | | -MODULE_VERSION(DRV_VERSION); |
---|
| 166 | +MODULE_LICENSE("GPL v2"); |
---|
165 | 167 | |
---|
166 | 168 | static struct workqueue_struct *ixgbe_wq; |
---|
167 | 169 | |
---|
.. | .. |
---|
894 | 896 | } |
---|
895 | 897 | } |
---|
896 | 898 | |
---|
897 | | -static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, |
---|
898 | | - u64 qmask) |
---|
| 899 | +void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, |
---|
| 900 | + u64 qmask) |
---|
899 | 901 | { |
---|
900 | 902 | u32 mask; |
---|
901 | 903 | |
---|
.. | .. |
---|
1393 | 1395 | IXGBE_DCA_CTRL_DCA_MODE_CB2); |
---|
1394 | 1396 | break; |
---|
1395 | 1397 | } |
---|
1396 | | - /* fall through - DCA is disabled. */ |
---|
| 1398 | + fallthrough; /* DCA is disabled. */ |
---|
1397 | 1399 | case DCA_PROVIDER_REMOVE: |
---|
1398 | 1400 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { |
---|
1399 | 1401 | dca_remove_requester(dev); |
---|
.. | .. |
---|
1674 | 1676 | * order to populate the hash, checksum, VLAN, timestamp, protocol, and |
---|
1675 | 1677 | * other fields within the skb. |
---|
1676 | 1678 | **/ |
---|
1677 | | -static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, |
---|
1678 | | - union ixgbe_adv_rx_desc *rx_desc, |
---|
1679 | | - struct sk_buff *skb) |
---|
| 1679 | +void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, |
---|
| 1680 | + union ixgbe_adv_rx_desc *rx_desc, |
---|
| 1681 | + struct sk_buff *skb) |
---|
1680 | 1682 | { |
---|
1681 | 1683 | struct net_device *dev = rx_ring->netdev; |
---|
1682 | 1684 | u32 flags = rx_ring->q_vector->adapter->flags; |
---|
.. | .. |
---|
1709 | 1711 | skb->protocol = eth_type_trans(skb, dev); |
---|
1710 | 1712 | } |
---|
1711 | 1713 | |
---|
1712 | | -static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, |
---|
1713 | | - struct sk_buff *skb) |
---|
| 1714 | +void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, |
---|
| 1715 | + struct sk_buff *skb) |
---|
1714 | 1716 | { |
---|
1715 | 1717 | napi_gro_receive(&q_vector->napi, skb); |
---|
1716 | 1718 | } |
---|
.. | .. |
---|
1782 | 1784 | static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring, |
---|
1783 | 1785 | struct sk_buff *skb) |
---|
1784 | 1786 | { |
---|
1785 | | - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; |
---|
| 1787 | + skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; |
---|
1786 | 1788 | unsigned char *va; |
---|
1787 | 1789 | unsigned int pull_len; |
---|
1788 | 1790 | |
---|
.. | .. |
---|
1797 | 1799 | * we need the header to contain the greater of either ETH_HLEN or |
---|
1798 | 1800 | * 60 bytes if the skb->len is less than 60 for skb_pad. |
---|
1799 | 1801 | */ |
---|
1800 | | - pull_len = eth_get_headlen(va, IXGBE_RX_HDR_SIZE); |
---|
| 1802 | + pull_len = eth_get_headlen(skb->dev, va, IXGBE_RX_HDR_SIZE); |
---|
1801 | 1803 | |
---|
1802 | 1804 | /* align pull length to size of long to optimize memcpy performance */ |
---|
1803 | 1805 | skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); |
---|
1804 | 1806 | |
---|
1805 | 1807 | /* update all of the pointers */ |
---|
1806 | 1808 | skb_frag_size_sub(frag, pull_len); |
---|
1807 | | - frag->page_offset += pull_len; |
---|
| 1809 | + skb_frag_off_add(frag, pull_len); |
---|
1808 | 1810 | skb->data_len -= pull_len; |
---|
1809 | 1811 | skb->tail += pull_len; |
---|
1810 | 1812 | } |
---|
.. | .. |
---|
1832 | 1834 | skb_headlen(skb), |
---|
1833 | 1835 | DMA_FROM_DEVICE); |
---|
1834 | 1836 | } else { |
---|
1835 | | - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; |
---|
| 1837 | + skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; |
---|
1836 | 1838 | |
---|
1837 | 1839 | dma_sync_single_range_for_cpu(rx_ring->dev, |
---|
1838 | 1840 | IXGBE_CB(skb)->dma, |
---|
1839 | | - frag->page_offset, |
---|
| 1841 | + skb_frag_off(frag), |
---|
1840 | 1842 | skb_frag_size(frag), |
---|
1841 | 1843 | DMA_FROM_DEVICE); |
---|
1842 | 1844 | } |
---|
.. | .. |
---|
1872 | 1874 | * |
---|
1873 | 1875 | * Returns true if an error was encountered and skb was freed. |
---|
1874 | 1876 | **/ |
---|
1875 | | -static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring, |
---|
1876 | | - union ixgbe_adv_rx_desc *rx_desc, |
---|
1877 | | - struct sk_buff *skb) |
---|
| 1877 | +bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring, |
---|
| 1878 | + union ixgbe_adv_rx_desc *rx_desc, |
---|
| 1879 | + struct sk_buff *skb) |
---|
1878 | 1880 | { |
---|
1879 | 1881 | struct net_device *netdev = rx_ring->netdev; |
---|
1880 | 1882 | |
---|
.. | .. |
---|
2103 | 2105 | struct sk_buff *skb; |
---|
2104 | 2106 | |
---|
2105 | 2107 | /* prefetch first cache line of first page */ |
---|
2106 | | - prefetch(xdp->data); |
---|
2107 | | -#if L1_CACHE_BYTES < 128 |
---|
2108 | | - prefetch(xdp->data + L1_CACHE_BYTES); |
---|
2109 | | -#endif |
---|
| 2108 | + net_prefetch(xdp->data); |
---|
| 2109 | + |
---|
2110 | 2110 | /* Note, we get here by enabling legacy-rx via: |
---|
2111 | 2111 | * |
---|
2112 | 2112 | * ethtool --set-priv-flags <dev> legacy-rx on |
---|
.. | .. |
---|
2169 | 2169 | * likely have a consumer accessing first few bytes of meta |
---|
2170 | 2170 | * data, and then actual data. |
---|
2171 | 2171 | */ |
---|
2172 | | - prefetch(xdp->data_meta); |
---|
2173 | | -#if L1_CACHE_BYTES < 128 |
---|
2174 | | - prefetch(xdp->data_meta + L1_CACHE_BYTES); |
---|
2175 | | -#endif |
---|
| 2172 | + net_prefetch(xdp->data_meta); |
---|
2176 | 2173 | |
---|
2177 | 2174 | /* build an skb to around the page buffer */ |
---|
2178 | 2175 | skb = build_skb(xdp->data_hard_start, truesize); |
---|
.. | .. |
---|
2199 | 2196 | return skb; |
---|
2200 | 2197 | } |
---|
2201 | 2198 | |
---|
2202 | | -#define IXGBE_XDP_PASS 0 |
---|
2203 | | -#define IXGBE_XDP_CONSUMED BIT(0) |
---|
2204 | | -#define IXGBE_XDP_TX BIT(1) |
---|
2205 | | -#define IXGBE_XDP_REDIR BIT(2) |
---|
2206 | | - |
---|
2207 | | -static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter, |
---|
2208 | | - struct xdp_frame *xdpf); |
---|
2209 | | - |
---|
2210 | 2199 | static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter, |
---|
2211 | 2200 | struct ixgbe_ring *rx_ring, |
---|
2212 | 2201 | struct xdp_buff *xdp) |
---|
.. | .. |
---|
2229 | 2218 | case XDP_PASS: |
---|
2230 | 2219 | break; |
---|
2231 | 2220 | case XDP_TX: |
---|
2232 | | - xdpf = convert_to_xdp_frame(xdp); |
---|
2233 | | - if (unlikely(!xdpf)) { |
---|
2234 | | - result = IXGBE_XDP_CONSUMED; |
---|
2235 | | - break; |
---|
2236 | | - } |
---|
| 2221 | + xdpf = xdp_convert_buff_to_frame(xdp); |
---|
| 2222 | + if (unlikely(!xdpf)) |
---|
| 2223 | + goto out_failure; |
---|
2237 | 2224 | result = ixgbe_xmit_xdp_ring(adapter, xdpf); |
---|
| 2225 | + if (result == IXGBE_XDP_CONSUMED) |
---|
| 2226 | + goto out_failure; |
---|
2238 | 2227 | break; |
---|
2239 | 2228 | case XDP_REDIRECT: |
---|
2240 | 2229 | err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog); |
---|
2241 | | - if (!err) |
---|
2242 | | - result = IXGBE_XDP_REDIR; |
---|
2243 | | - else |
---|
2244 | | - result = IXGBE_XDP_CONSUMED; |
---|
| 2230 | + if (err) |
---|
| 2231 | + goto out_failure; |
---|
| 2232 | + result = IXGBE_XDP_REDIR; |
---|
2245 | 2233 | break; |
---|
2246 | 2234 | default: |
---|
2247 | 2235 | bpf_warn_invalid_xdp_action(act); |
---|
2248 | | - /* fallthrough */ |
---|
| 2236 | + fallthrough; |
---|
2249 | 2237 | case XDP_ABORTED: |
---|
| 2238 | +out_failure: |
---|
2250 | 2239 | trace_xdp_exception(rx_ring->netdev, xdp_prog, act); |
---|
2251 | | - /* fallthrough -- handle aborts by dropping packet */ |
---|
| 2240 | + fallthrough; /* handle aborts by dropping packet */ |
---|
2252 | 2241 | case XDP_DROP: |
---|
2253 | 2242 | result = IXGBE_XDP_CONSUMED; |
---|
2254 | 2243 | break; |
---|
.. | .. |
---|
2258 | 2247 | return ERR_PTR(-result); |
---|
2259 | 2248 | } |
---|
2260 | 2249 | |
---|
| 2250 | +static unsigned int ixgbe_rx_frame_truesize(struct ixgbe_ring *rx_ring, |
---|
| 2251 | + unsigned int size) |
---|
| 2252 | +{ |
---|
| 2253 | + unsigned int truesize; |
---|
| 2254 | + |
---|
| 2255 | +#if (PAGE_SIZE < 8192) |
---|
| 2256 | + truesize = ixgbe_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */ |
---|
| 2257 | +#else |
---|
| 2258 | + truesize = ring_uses_build_skb(rx_ring) ? |
---|
| 2259 | + SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) + |
---|
| 2260 | + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : |
---|
| 2261 | + SKB_DATA_ALIGN(size); |
---|
| 2262 | +#endif |
---|
| 2263 | + return truesize; |
---|
| 2264 | +} |
---|
| 2265 | + |
---|
2261 | 2266 | static void ixgbe_rx_buffer_flip(struct ixgbe_ring *rx_ring, |
---|
2262 | 2267 | struct ixgbe_rx_buffer *rx_buffer, |
---|
2263 | 2268 | unsigned int size) |
---|
2264 | 2269 | { |
---|
| 2270 | + unsigned int truesize = ixgbe_rx_frame_truesize(rx_ring, size); |
---|
2265 | 2271 | #if (PAGE_SIZE < 8192) |
---|
2266 | | - unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2; |
---|
2267 | | - |
---|
2268 | 2272 | rx_buffer->page_offset ^= truesize; |
---|
2269 | 2273 | #else |
---|
2270 | | - unsigned int truesize = ring_uses_build_skb(rx_ring) ? |
---|
2271 | | - SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) + |
---|
2272 | | - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : |
---|
2273 | | - SKB_DATA_ALIGN(size); |
---|
2274 | | - |
---|
2275 | 2274 | rx_buffer->page_offset += truesize; |
---|
2276 | 2275 | #endif |
---|
2277 | 2276 | } |
---|
.. | .. |
---|
2304 | 2303 | struct xdp_buff xdp; |
---|
2305 | 2304 | |
---|
2306 | 2305 | xdp.rxq = &rx_ring->xdp_rxq; |
---|
| 2306 | + |
---|
| 2307 | + /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */ |
---|
| 2308 | +#if (PAGE_SIZE < 8192) |
---|
| 2309 | + xdp.frame_sz = ixgbe_rx_frame_truesize(rx_ring, 0); |
---|
| 2310 | +#endif |
---|
2307 | 2311 | |
---|
2308 | 2312 | while (likely(total_rx_packets < budget)) { |
---|
2309 | 2313 | union ixgbe_adv_rx_desc *rx_desc; |
---|
.. | .. |
---|
2339 | 2343 | xdp.data_hard_start = xdp.data - |
---|
2340 | 2344 | ixgbe_rx_offset(rx_ring); |
---|
2341 | 2345 | xdp.data_end = xdp.data + size; |
---|
2342 | | - |
---|
| 2346 | +#if (PAGE_SIZE > 4096) |
---|
| 2347 | + /* At larger PAGE_SIZE, frame_sz depend on len size */ |
---|
| 2348 | + xdp.frame_sz = ixgbe_rx_frame_truesize(rx_ring, size); |
---|
| 2349 | +#endif |
---|
2343 | 2350 | skb = ixgbe_run_xdp(adapter, rx_ring, &xdp); |
---|
2344 | 2351 | } |
---|
2345 | 2352 | |
---|
.. | .. |
---|
2970 | 2977 | /* skip the flush */ |
---|
2971 | 2978 | } |
---|
2972 | 2979 | |
---|
2973 | | -static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter, |
---|
2974 | | - u64 qmask) |
---|
2975 | | -{ |
---|
2976 | | - u32 mask; |
---|
2977 | | - struct ixgbe_hw *hw = &adapter->hw; |
---|
2978 | | - |
---|
2979 | | - switch (hw->mac.type) { |
---|
2980 | | - case ixgbe_mac_82598EB: |
---|
2981 | | - mask = (IXGBE_EIMS_RTX_QUEUE & qmask); |
---|
2982 | | - IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask); |
---|
2983 | | - break; |
---|
2984 | | - case ixgbe_mac_82599EB: |
---|
2985 | | - case ixgbe_mac_X540: |
---|
2986 | | - case ixgbe_mac_X550: |
---|
2987 | | - case ixgbe_mac_X550EM_x: |
---|
2988 | | - case ixgbe_mac_x550em_a: |
---|
2989 | | - mask = (qmask & 0xFFFFFFFF); |
---|
2990 | | - if (mask) |
---|
2991 | | - IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask); |
---|
2992 | | - mask = (qmask >> 32); |
---|
2993 | | - if (mask) |
---|
2994 | | - IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask); |
---|
2995 | | - break; |
---|
2996 | | - default: |
---|
2997 | | - break; |
---|
2998 | | - } |
---|
2999 | | - /* skip the flush */ |
---|
3000 | | -} |
---|
3001 | | - |
---|
3002 | 2980 | /** |
---|
3003 | 2981 | * ixgbe_irq_enable - Enable default interrupt generation settings |
---|
3004 | 2982 | * @adapter: board private structure |
---|
.. | .. |
---|
3035 | 3013 | case ixgbe_mac_82599EB: |
---|
3036 | 3014 | mask |= IXGBE_EIMS_GPI_SDP1(hw); |
---|
3037 | 3015 | mask |= IXGBE_EIMS_GPI_SDP2(hw); |
---|
3038 | | - /* fall through */ |
---|
| 3016 | + fallthrough; |
---|
3039 | 3017 | case ixgbe_mac_X540: |
---|
3040 | 3018 | case ixgbe_mac_X550: |
---|
3041 | 3019 | case ixgbe_mac_X550EM_x: |
---|
.. | .. |
---|
3184 | 3162 | #endif |
---|
3185 | 3163 | |
---|
3186 | 3164 | ixgbe_for_each_ring(ring, q_vector->tx) { |
---|
3187 | | - if (!ixgbe_clean_tx_irq(q_vector, ring, budget)) |
---|
| 3165 | + bool wd = ring->xsk_pool ? |
---|
| 3166 | + ixgbe_clean_xdp_tx_irq(q_vector, ring, budget) : |
---|
| 3167 | + ixgbe_clean_tx_irq(q_vector, ring, budget); |
---|
| 3168 | + |
---|
| 3169 | + if (!wd) |
---|
3188 | 3170 | clean_complete = false; |
---|
3189 | 3171 | } |
---|
3190 | 3172 | |
---|
.. | .. |
---|
3200 | 3182 | per_ring_budget = budget; |
---|
3201 | 3183 | |
---|
3202 | 3184 | ixgbe_for_each_ring(ring, q_vector->rx) { |
---|
3203 | | - int cleaned = ixgbe_clean_rx_irq(q_vector, ring, |
---|
| 3185 | + int cleaned = ring->xsk_pool ? |
---|
| 3186 | + ixgbe_clean_rx_irq_zc(q_vector, ring, |
---|
| 3187 | + per_ring_budget) : |
---|
| 3188 | + ixgbe_clean_rx_irq(q_vector, ring, |
---|
3204 | 3189 | per_ring_budget); |
---|
3205 | 3190 | |
---|
3206 | 3191 | work_done += cleaned; |
---|
.. | .. |
---|
3334 | 3319 | switch (hw->mac.type) { |
---|
3335 | 3320 | case ixgbe_mac_82599EB: |
---|
3336 | 3321 | ixgbe_check_sfp_event(adapter, eicr); |
---|
3337 | | - /* Fall through */ |
---|
| 3322 | + fallthrough; |
---|
3338 | 3323 | case ixgbe_mac_X540: |
---|
3339 | 3324 | case ixgbe_mac_X550: |
---|
3340 | 3325 | case ixgbe_mac_X550EM_x: |
---|
.. | .. |
---|
3491 | 3476 | int wait_loop = 10; |
---|
3492 | 3477 | u32 txdctl = IXGBE_TXDCTL_ENABLE; |
---|
3493 | 3478 | u8 reg_idx = ring->reg_idx; |
---|
| 3479 | + |
---|
| 3480 | + ring->xsk_pool = NULL; |
---|
| 3481 | + if (ring_is_xdp(ring)) |
---|
| 3482 | + ring->xsk_pool = ixgbe_xsk_pool(adapter, ring); |
---|
3494 | 3483 | |
---|
3495 | 3484 | /* disable queue to avoid issues while updating state */ |
---|
3496 | 3485 | IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), 0); |
---|
.. | .. |
---|
3730 | 3719 | srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT; |
---|
3731 | 3720 | |
---|
3732 | 3721 | /* configure the packet buffer length */ |
---|
3733 | | - if (test_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state)) |
---|
| 3722 | + if (rx_ring->xsk_pool) { |
---|
| 3723 | + u32 xsk_buf_len = xsk_pool_get_rx_frame_size(rx_ring->xsk_pool); |
---|
| 3724 | + |
---|
| 3725 | + /* If the MAC support setting RXDCTL.RLPML, the |
---|
| 3726 | + * SRRCTL[n].BSIZEPKT is set to PAGE_SIZE and |
---|
| 3727 | + * RXDCTL.RLPML is set to the actual UMEM buffer |
---|
| 3728 | + * size. If not, then we are stuck with a 1k buffer |
---|
| 3729 | + * size resolution. In this case frames larger than |
---|
| 3730 | + * the UMEM buffer size viewed in a 1k resolution will |
---|
| 3731 | + * be dropped. |
---|
| 3732 | + */ |
---|
| 3733 | + if (hw->mac.type != ixgbe_mac_82599EB) |
---|
| 3734 | + srrctl |= PAGE_SIZE >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; |
---|
| 3735 | + else |
---|
| 3736 | + srrctl |= xsk_buf_len >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; |
---|
| 3737 | + } else if (test_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state)) { |
---|
3734 | 3738 | srrctl |= IXGBE_RXBUFFER_3K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; |
---|
3735 | | - else |
---|
| 3739 | + } else { |
---|
3736 | 3740 | srrctl |= IXGBE_RXBUFFER_2K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; |
---|
| 3741 | + } |
---|
3737 | 3742 | |
---|
3738 | 3743 | /* configure descriptor type */ |
---|
3739 | 3744 | srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; |
---|
.. | .. |
---|
4059 | 4064 | u32 rxdctl; |
---|
4060 | 4065 | u8 reg_idx = ring->reg_idx; |
---|
4061 | 4066 | |
---|
| 4067 | + xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); |
---|
| 4068 | + ring->xsk_pool = ixgbe_xsk_pool(adapter, ring); |
---|
| 4069 | + if (ring->xsk_pool) { |
---|
| 4070 | + WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, |
---|
| 4071 | + MEM_TYPE_XSK_BUFF_POOL, |
---|
| 4072 | + NULL)); |
---|
| 4073 | + xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq); |
---|
| 4074 | + } else { |
---|
| 4075 | + WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, |
---|
| 4076 | + MEM_TYPE_PAGE_SHARED, NULL)); |
---|
| 4077 | + } |
---|
| 4078 | + |
---|
4062 | 4079 | /* disable queue to avoid use of these values while updating state */ |
---|
4063 | 4080 | rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); |
---|
4064 | 4081 | rxdctl &= ~IXGBE_RXDCTL_ENABLE; |
---|
.. | .. |
---|
4108 | 4125 | #endif |
---|
4109 | 4126 | } |
---|
4110 | 4127 | |
---|
| 4128 | + if (ring->xsk_pool && hw->mac.type != ixgbe_mac_82599EB) { |
---|
| 4129 | + u32 xsk_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool); |
---|
| 4130 | + |
---|
| 4131 | + rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK | |
---|
| 4132 | + IXGBE_RXDCTL_RLPML_EN); |
---|
| 4133 | + rxdctl |= xsk_buf_len | IXGBE_RXDCTL_RLPML_EN; |
---|
| 4134 | + |
---|
| 4135 | + ring->rx_buf_len = xsk_buf_len; |
---|
| 4136 | + } |
---|
| 4137 | + |
---|
4111 | 4138 | /* initialize rx_buffer_info */ |
---|
4112 | 4139 | memset(ring->rx_buffer_info, 0, |
---|
4113 | 4140 | sizeof(struct ixgbe_rx_buffer) * ring->count); |
---|
.. | .. |
---|
4121 | 4148 | IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); |
---|
4122 | 4149 | |
---|
4123 | 4150 | ixgbe_rx_desc_queue_enable(adapter, ring); |
---|
4124 | | - ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring)); |
---|
| 4151 | + if (ring->xsk_pool) |
---|
| 4152 | + ixgbe_alloc_rx_buffers_zc(ring, ixgbe_desc_unused(ring)); |
---|
| 4153 | + else |
---|
| 4154 | + ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring)); |
---|
4125 | 4155 | } |
---|
4126 | 4156 | |
---|
4127 | 4157 | static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) |
---|
.. | .. |
---|
4271 | 4301 | if (test_bit(__IXGBE_RX_FCOE, &rx_ring->state)) |
---|
4272 | 4302 | set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); |
---|
4273 | 4303 | |
---|
4274 | | - clear_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state); |
---|
4275 | 4304 | if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY) |
---|
4276 | 4305 | continue; |
---|
4277 | 4306 | |
---|
.. | .. |
---|
4312 | 4341 | case ixgbe_mac_x550em_a: |
---|
4313 | 4342 | if (adapter->num_vfs) |
---|
4314 | 4343 | rdrxctl |= IXGBE_RDRXCTL_PSP; |
---|
4315 | | - /* fall through */ |
---|
| 4344 | + fallthrough; |
---|
4316 | 4345 | case ixgbe_mac_82599EB: |
---|
4317 | 4346 | case ixgbe_mac_X540: |
---|
4318 | 4347 | /* Disable RSC for ACK packets */ |
---|
.. | .. |
---|
4971 | 5000 | napi_disable(&adapter->q_vector[q_idx]->napi); |
---|
4972 | 5001 | } |
---|
4973 | 5002 | |
---|
4974 | | -static void ixgbe_clear_udp_tunnel_port(struct ixgbe_adapter *adapter, u32 mask) |
---|
| 5003 | +static int ixgbe_udp_tunnel_sync(struct net_device *dev, unsigned int table) |
---|
4975 | 5004 | { |
---|
| 5005 | + struct ixgbe_adapter *adapter = netdev_priv(dev); |
---|
4976 | 5006 | struct ixgbe_hw *hw = &adapter->hw; |
---|
4977 | | - u32 vxlanctrl; |
---|
| 5007 | + struct udp_tunnel_info ti; |
---|
4978 | 5008 | |
---|
4979 | | - if (!(adapter->flags & (IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE | |
---|
4980 | | - IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE))) |
---|
4981 | | - return; |
---|
| 5009 | + udp_tunnel_nic_get_port(dev, table, 0, &ti); |
---|
| 5010 | + if (ti.type == UDP_TUNNEL_TYPE_VXLAN) |
---|
| 5011 | + adapter->vxlan_port = ti.port; |
---|
| 5012 | + else |
---|
| 5013 | + adapter->geneve_port = ti.port; |
---|
4982 | 5014 | |
---|
4983 | | - vxlanctrl = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) & ~mask; |
---|
4984 | | - IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, vxlanctrl); |
---|
4985 | | - |
---|
4986 | | - if (mask & IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK) |
---|
4987 | | - adapter->vxlan_port = 0; |
---|
4988 | | - |
---|
4989 | | - if (mask & IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK) |
---|
4990 | | - adapter->geneve_port = 0; |
---|
| 5015 | + IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, |
---|
| 5016 | + ntohs(adapter->vxlan_port) | |
---|
| 5017 | + ntohs(adapter->geneve_port) << |
---|
| 5018 | + IXGBE_VXLANCTRL_GENEVE_UDPPORT_SHIFT); |
---|
| 5019 | + return 0; |
---|
4991 | 5020 | } |
---|
| 5021 | + |
---|
| 5022 | +static const struct udp_tunnel_nic_info ixgbe_udp_tunnels_x550 = { |
---|
| 5023 | + .sync_table = ixgbe_udp_tunnel_sync, |
---|
| 5024 | + .flags = UDP_TUNNEL_NIC_INFO_IPV4_ONLY, |
---|
| 5025 | + .tables = { |
---|
| 5026 | + { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, |
---|
| 5027 | + }, |
---|
| 5028 | +}; |
---|
| 5029 | + |
---|
| 5030 | +static const struct udp_tunnel_nic_info ixgbe_udp_tunnels_x550em_a = { |
---|
| 5031 | + .sync_table = ixgbe_udp_tunnel_sync, |
---|
| 5032 | + .flags = UDP_TUNNEL_NIC_INFO_IPV4_ONLY, |
---|
| 5033 | + .tables = { |
---|
| 5034 | + { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, |
---|
| 5035 | + { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, }, |
---|
| 5036 | + }, |
---|
| 5037 | +}; |
---|
4992 | 5038 | |
---|
4993 | 5039 | #ifdef CONFIG_IXGBE_DCB |
---|
4994 | 5040 | /** |
---|
.. | .. |
---|
5252 | 5298 | u16 i = rx_ring->next_to_clean; |
---|
5253 | 5299 | struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i]; |
---|
5254 | 5300 | |
---|
| 5301 | + if (rx_ring->xsk_pool) { |
---|
| 5302 | + ixgbe_xsk_clean_rx_ring(rx_ring); |
---|
| 5303 | + goto skip_free; |
---|
| 5304 | + } |
---|
| 5305 | + |
---|
5255 | 5306 | /* Free all the Rx ring sk_buffs */ |
---|
5256 | 5307 | while (i != rx_ring->next_to_alloc) { |
---|
5257 | 5308 | if (rx_buffer->skb) { |
---|
.. | .. |
---|
5290 | 5341 | } |
---|
5291 | 5342 | } |
---|
5292 | 5343 | |
---|
| 5344 | +skip_free: |
---|
5293 | 5345 | rx_ring->next_to_alloc = 0; |
---|
5294 | 5346 | rx_ring->next_to_clean = 0; |
---|
5295 | 5347 | rx_ring->next_to_use = 0; |
---|
.. | .. |
---|
5350 | 5402 | return err; |
---|
5351 | 5403 | } |
---|
5352 | 5404 | |
---|
5353 | | -static int ixgbe_macvlan_up(struct net_device *vdev, void *data) |
---|
| 5405 | +static int ixgbe_macvlan_up(struct net_device *vdev, |
---|
| 5406 | + struct netdev_nested_priv *priv) |
---|
5354 | 5407 | { |
---|
5355 | | - struct ixgbe_adapter *adapter = data; |
---|
| 5408 | + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)priv->data; |
---|
5356 | 5409 | struct ixgbe_fwd_adapter *accel; |
---|
5357 | 5410 | |
---|
5358 | 5411 | if (!netif_is_macvlan(vdev)) |
---|
.. | .. |
---|
5369 | 5422 | |
---|
5370 | 5423 | static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter) |
---|
5371 | 5424 | { |
---|
| 5425 | + struct netdev_nested_priv priv = { |
---|
| 5426 | + .data = (void *)adapter, |
---|
| 5427 | + }; |
---|
| 5428 | + |
---|
5372 | 5429 | netdev_walk_all_upper_dev_rcu(adapter->netdev, |
---|
5373 | | - ixgbe_macvlan_up, adapter); |
---|
| 5430 | + ixgbe_macvlan_up, &priv); |
---|
5374 | 5431 | } |
---|
5375 | 5432 | |
---|
5376 | 5433 | static void ixgbe_configure(struct ixgbe_adapter *adapter) |
---|
.. | .. |
---|
5472 | 5529 | return ret; |
---|
5473 | 5530 | |
---|
5474 | 5531 | speed = hw->phy.autoneg_advertised; |
---|
5475 | | - if ((!speed) && (hw->mac.ops.get_link_capabilities)) |
---|
| 5532 | + if (!speed && hw->mac.ops.get_link_capabilities) { |
---|
5476 | 5533 | ret = hw->mac.ops.get_link_capabilities(hw, &speed, |
---|
5477 | 5534 | &autoneg); |
---|
| 5535 | + /* remove NBASE-T speeds from default autonegotiation |
---|
| 5536 | + * to accommodate broken network switches in the field |
---|
| 5537 | + * which cannot cope with advertised NBASE-T speeds |
---|
| 5538 | + */ |
---|
| 5539 | + speed &= ~(IXGBE_LINK_SPEED_5GB_FULL | |
---|
| 5540 | + IXGBE_LINK_SPEED_2_5GB_FULL); |
---|
| 5541 | + } |
---|
| 5542 | + |
---|
5478 | 5543 | if (ret) |
---|
5479 | 5544 | return ret; |
---|
5480 | 5545 | |
---|
.. | .. |
---|
5627 | 5692 | |
---|
5628 | 5693 | void ixgbe_reinit_locked(struct ixgbe_adapter *adapter) |
---|
5629 | 5694 | { |
---|
5630 | | - WARN_ON(in_interrupt()); |
---|
5631 | 5695 | /* put off any impending NetWatchDogTimeout */ |
---|
5632 | 5696 | netif_trans_update(adapter->netdev); |
---|
5633 | 5697 | |
---|
.. | .. |
---|
5856 | 5920 | IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, |
---|
5857 | 5921 | (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) & |
---|
5858 | 5922 | ~IXGBE_DMATXCTL_TE)); |
---|
5859 | | - /* fall through */ |
---|
| 5923 | + fallthrough; |
---|
5860 | 5924 | default: |
---|
5861 | 5925 | break; |
---|
5862 | 5926 | } |
---|
.. | .. |
---|
5934 | 5998 | u16 i = tx_ring->next_to_clean; |
---|
5935 | 5999 | struct ixgbe_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; |
---|
5936 | 6000 | |
---|
| 6001 | + if (tx_ring->xsk_pool) { |
---|
| 6002 | + ixgbe_xsk_clean_tx_ring(tx_ring); |
---|
| 6003 | + goto out; |
---|
| 6004 | + } |
---|
| 6005 | + |
---|
5937 | 6006 | while (i != tx_ring->next_to_use) { |
---|
5938 | 6007 | union ixgbe_adv_tx_desc *eop_desc, *tx_desc; |
---|
5939 | 6008 | |
---|
.. | .. |
---|
5985 | 6054 | if (!ring_is_xdp(tx_ring)) |
---|
5986 | 6055 | netdev_tx_reset_queue(txring_txq(tx_ring)); |
---|
5987 | 6056 | |
---|
| 6057 | +out: |
---|
5988 | 6058 | /* reset next_to_use and next_to_clean */ |
---|
5989 | 6059 | tx_ring->next_to_use = 0; |
---|
5990 | 6060 | tx_ring->next_to_clean = 0; |
---|
.. | .. |
---|
6053 | 6123 | /* Disable Rx */ |
---|
6054 | 6124 | ixgbe_disable_rx(adapter); |
---|
6055 | 6125 | |
---|
6056 | | - /* synchronize_sched() needed for pending XDP buffers to drain */ |
---|
| 6126 | + /* synchronize_rcu() needed for pending XDP buffers to drain */ |
---|
6057 | 6127 | if (adapter->xdp_ring[0]) |
---|
6058 | | - synchronize_sched(); |
---|
| 6128 | + synchronize_rcu(); |
---|
6059 | 6129 | |
---|
6060 | 6130 | ixgbe_irq_disable(adapter); |
---|
6061 | 6131 | |
---|
.. | .. |
---|
6124 | 6194 | /** |
---|
6125 | 6195 | * ixgbe_tx_timeout - Respond to a Tx Hang |
---|
6126 | 6196 | * @netdev: network interface device structure |
---|
| 6197 | + * @txqueue: queue number that timed out |
---|
6127 | 6198 | **/ |
---|
6128 | | -static void ixgbe_tx_timeout(struct net_device *netdev) |
---|
| 6199 | +static void ixgbe_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue) |
---|
6129 | 6200 | { |
---|
6130 | 6201 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
---|
6131 | 6202 | |
---|
.. | .. |
---|
6259 | 6330 | if (ixgbe_init_rss_key(adapter)) |
---|
6260 | 6331 | return -ENOMEM; |
---|
6261 | 6332 | |
---|
| 6333 | + adapter->af_xdp_zc_qps = bitmap_zalloc(MAX_XDP_QUEUES, GFP_KERNEL); |
---|
| 6334 | + if (!adapter->af_xdp_zc_qps) |
---|
| 6335 | + return -ENOMEM; |
---|
| 6336 | + |
---|
6262 | 6337 | /* Set MAC specific capability flags and exceptions */ |
---|
6263 | 6338 | switch (hw->mac.type) { |
---|
6264 | 6339 | case ixgbe_mac_82598EB: |
---|
.. | .. |
---|
6289 | 6364 | adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; |
---|
6290 | 6365 | break; |
---|
6291 | 6366 | case ixgbe_mac_x550em_a: |
---|
6292 | | - adapter->flags |= IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE; |
---|
6293 | 6367 | switch (hw->device_id) { |
---|
6294 | 6368 | case IXGBE_DEV_ID_X550EM_A_1G_T: |
---|
6295 | 6369 | case IXGBE_DEV_ID_X550EM_A_1G_T_L: |
---|
.. | .. |
---|
6298 | 6372 | default: |
---|
6299 | 6373 | break; |
---|
6300 | 6374 | } |
---|
6301 | | - /* fall through */ |
---|
| 6375 | + fallthrough; |
---|
6302 | 6376 | case ixgbe_mac_X550EM_x: |
---|
6303 | 6377 | #ifdef CONFIG_IXGBE_DCB |
---|
6304 | 6378 | adapter->flags &= ~IXGBE_FLAG_DCB_CAPABLE; |
---|
.. | .. |
---|
6309 | 6383 | adapter->fcoe.up = 0; |
---|
6310 | 6384 | #endif /* IXGBE_DCB */ |
---|
6311 | 6385 | #endif /* IXGBE_FCOE */ |
---|
6312 | | - /* Fall Through */ |
---|
| 6386 | + fallthrough; |
---|
6313 | 6387 | case ixgbe_mac_X550: |
---|
6314 | 6388 | if (hw->mac.type == ixgbe_mac_X550) |
---|
6315 | 6389 | adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; |
---|
6316 | 6390 | #ifdef CONFIG_IXGBE_DCA |
---|
6317 | 6391 | adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE; |
---|
6318 | 6392 | #endif |
---|
6319 | | - adapter->flags |= IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE; |
---|
6320 | 6393 | break; |
---|
6321 | 6394 | default: |
---|
6322 | 6395 | break; |
---|
.. | .. |
---|
6329 | 6402 | #endif |
---|
6330 | 6403 | /* n-tuple support exists, always init our spinlock */ |
---|
6331 | 6404 | spin_lock_init(&adapter->fdir_perfect_lock); |
---|
| 6405 | + |
---|
| 6406 | + /* init spinlock to avoid concurrency of VF resources */ |
---|
| 6407 | + spin_lock_init(&adapter->vfs_lock); |
---|
6332 | 6408 | |
---|
6333 | 6409 | #ifdef CONFIG_IXGBE_DCB |
---|
6334 | 6410 | ixgbe_init_dcb(adapter); |
---|
.. | .. |
---|
6390 | 6466 | { |
---|
6391 | 6467 | struct device *dev = tx_ring->dev; |
---|
6392 | 6468 | int orig_node = dev_to_node(dev); |
---|
6393 | | - int ring_node = -1; |
---|
| 6469 | + int ring_node = NUMA_NO_NODE; |
---|
6394 | 6470 | int size; |
---|
6395 | 6471 | |
---|
6396 | 6472 | size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; |
---|
.. | .. |
---|
6484 | 6560 | { |
---|
6485 | 6561 | struct device *dev = rx_ring->dev; |
---|
6486 | 6562 | int orig_node = dev_to_node(dev); |
---|
6487 | | - int ring_node = -1; |
---|
6488 | | - int size, err; |
---|
| 6563 | + int ring_node = NUMA_NO_NODE; |
---|
| 6564 | + int size; |
---|
6489 | 6565 | |
---|
6490 | 6566 | size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; |
---|
6491 | 6567 | |
---|
.. | .. |
---|
6521 | 6597 | if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev, |
---|
6522 | 6598 | rx_ring->queue_index) < 0) |
---|
6523 | 6599 | goto err; |
---|
6524 | | - |
---|
6525 | | - err = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq, |
---|
6526 | | - MEM_TYPE_PAGE_SHARED, NULL); |
---|
6527 | | - if (err) { |
---|
6528 | | - xdp_rxq_info_unreg(&rx_ring->xdp_rxq); |
---|
6529 | | - goto err; |
---|
6530 | | - } |
---|
6531 | 6600 | |
---|
6532 | 6601 | rx_ring->xdp_prog = adapter->xdp_prog; |
---|
6533 | 6602 | |
---|
.. | .. |
---|
6695 | 6764 | (new_mtu > ETH_DATA_LEN)) |
---|
6696 | 6765 | e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n"); |
---|
6697 | 6766 | |
---|
6698 | | - e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); |
---|
| 6767 | + netdev_dbg(netdev, "changing MTU from %d to %d\n", |
---|
| 6768 | + netdev->mtu, new_mtu); |
---|
6699 | 6769 | |
---|
6700 | 6770 | /* must set new MTU before calling down or up */ |
---|
6701 | 6771 | netdev->mtu = new_mtu; |
---|
.. | .. |
---|
6761 | 6831 | |
---|
6762 | 6832 | ixgbe_up_complete(adapter); |
---|
6763 | 6833 | |
---|
6764 | | - ixgbe_clear_udp_tunnel_port(adapter, IXGBE_VXLANCTRL_ALL_UDPPORT_MASK); |
---|
6765 | | - udp_tunnel_get_rx_info(netdev); |
---|
| 6834 | + udp_tunnel_nic_reset_ntf(netdev); |
---|
6766 | 6835 | |
---|
6767 | 6836 | return 0; |
---|
6768 | 6837 | |
---|
.. | .. |
---|
6826 | 6895 | return 0; |
---|
6827 | 6896 | } |
---|
6828 | 6897 | |
---|
6829 | | -#ifdef CONFIG_PM |
---|
6830 | | -static int ixgbe_resume(struct pci_dev *pdev) |
---|
| 6898 | +static int __maybe_unused ixgbe_resume(struct device *dev_d) |
---|
6831 | 6899 | { |
---|
| 6900 | + struct pci_dev *pdev = to_pci_dev(dev_d); |
---|
6832 | 6901 | struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); |
---|
6833 | 6902 | struct net_device *netdev = adapter->netdev; |
---|
6834 | 6903 | u32 err; |
---|
6835 | 6904 | |
---|
6836 | 6905 | adapter->hw.hw_addr = adapter->io_addr; |
---|
6837 | | - pci_set_power_state(pdev, PCI_D0); |
---|
6838 | | - pci_restore_state(pdev); |
---|
6839 | | - /* |
---|
6840 | | - * pci_restore_state clears dev->state_saved so call |
---|
6841 | | - * pci_save_state to restore it. |
---|
6842 | | - */ |
---|
6843 | | - pci_save_state(pdev); |
---|
6844 | 6906 | |
---|
6845 | 6907 | err = pci_enable_device_mem(pdev); |
---|
6846 | 6908 | if (err) { |
---|
.. | .. |
---|
6851 | 6913 | clear_bit(__IXGBE_DISABLED, &adapter->state); |
---|
6852 | 6914 | pci_set_master(pdev); |
---|
6853 | 6915 | |
---|
6854 | | - pci_wake_from_d3(pdev, false); |
---|
| 6916 | + device_wakeup_disable(dev_d); |
---|
6855 | 6917 | |
---|
6856 | 6918 | ixgbe_reset(adapter); |
---|
6857 | 6919 | |
---|
.. | .. |
---|
6869 | 6931 | |
---|
6870 | 6932 | return err; |
---|
6871 | 6933 | } |
---|
6872 | | -#endif /* CONFIG_PM */ |
---|
6873 | 6934 | |
---|
6874 | 6935 | static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) |
---|
6875 | 6936 | { |
---|
.. | .. |
---|
6878 | 6939 | struct ixgbe_hw *hw = &adapter->hw; |
---|
6879 | 6940 | u32 ctrl; |
---|
6880 | 6941 | u32 wufc = adapter->wol; |
---|
6881 | | -#ifdef CONFIG_PM |
---|
6882 | | - int retval = 0; |
---|
6883 | | -#endif |
---|
6884 | 6942 | |
---|
6885 | 6943 | rtnl_lock(); |
---|
6886 | 6944 | netif_device_detach(netdev); |
---|
.. | .. |
---|
6891 | 6949 | ixgbe_clear_interrupt_scheme(adapter); |
---|
6892 | 6950 | rtnl_unlock(); |
---|
6893 | 6951 | |
---|
6894 | | -#ifdef CONFIG_PM |
---|
6895 | | - retval = pci_save_state(pdev); |
---|
6896 | | - if (retval) |
---|
6897 | | - return retval; |
---|
6898 | | - |
---|
6899 | | -#endif |
---|
6900 | 6952 | if (hw->mac.ops.stop_link_on_d3) |
---|
6901 | 6953 | hw->mac.ops.stop_link_on_d3(hw); |
---|
6902 | 6954 | |
---|
.. | .. |
---|
6951 | 7003 | return 0; |
---|
6952 | 7004 | } |
---|
6953 | 7005 | |
---|
6954 | | -#ifdef CONFIG_PM |
---|
6955 | | -static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state) |
---|
| 7006 | +static int __maybe_unused ixgbe_suspend(struct device *dev_d) |
---|
6956 | 7007 | { |
---|
| 7008 | + struct pci_dev *pdev = to_pci_dev(dev_d); |
---|
6957 | 7009 | int retval; |
---|
6958 | 7010 | bool wake; |
---|
6959 | 7011 | |
---|
6960 | 7012 | retval = __ixgbe_shutdown(pdev, &wake); |
---|
6961 | | - if (retval) |
---|
6962 | | - return retval; |
---|
6963 | 7013 | |
---|
6964 | | - if (wake) { |
---|
6965 | | - pci_prepare_to_sleep(pdev); |
---|
6966 | | - } else { |
---|
6967 | | - pci_wake_from_d3(pdev, false); |
---|
6968 | | - pci_set_power_state(pdev, PCI_D3hot); |
---|
6969 | | - } |
---|
| 7014 | + device_set_wakeup_enable(dev_d, wake); |
---|
6970 | 7015 | |
---|
6971 | | - return 0; |
---|
| 7016 | + return retval; |
---|
6972 | 7017 | } |
---|
6973 | | -#endif /* CONFIG_PM */ |
---|
6974 | 7018 | |
---|
6975 | 7019 | static void ixgbe_shutdown(struct pci_dev *pdev) |
---|
6976 | 7020 | { |
---|
.. | .. |
---|
7135 | 7179 | hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC); |
---|
7136 | 7180 | hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC); |
---|
7137 | 7181 | hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC); |
---|
7138 | | - /* fall through */ |
---|
| 7182 | + fallthrough; |
---|
7139 | 7183 | case ixgbe_mac_82599EB: |
---|
7140 | 7184 | for (i = 0; i < 16; i++) |
---|
7141 | 7185 | adapter->hw_rx_no_dma_resources += |
---|
.. | .. |
---|
7834 | 7878 | } |
---|
7835 | 7879 | |
---|
7836 | 7880 | /** |
---|
| 7881 | + * ixgbe_check_fw_error - Check firmware for errors |
---|
| 7882 | + * @adapter: the adapter private structure |
---|
| 7883 | + * |
---|
| 7884 | + * Check firmware errors in register FWSM |
---|
| 7885 | + */ |
---|
| 7886 | +static bool ixgbe_check_fw_error(struct ixgbe_adapter *adapter) |
---|
| 7887 | +{ |
---|
| 7888 | + struct ixgbe_hw *hw = &adapter->hw; |
---|
| 7889 | + u32 fwsm; |
---|
| 7890 | + |
---|
| 7891 | + /* read fwsm.ext_err_ind register and log errors */ |
---|
| 7892 | + fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw)); |
---|
| 7893 | + |
---|
| 7894 | + if (fwsm & IXGBE_FWSM_EXT_ERR_IND_MASK || |
---|
| 7895 | + !(fwsm & IXGBE_FWSM_FW_VAL_BIT)) |
---|
| 7896 | + e_dev_warn("Warning firmware error detected FWSM: 0x%08X\n", |
---|
| 7897 | + fwsm); |
---|
| 7898 | + |
---|
| 7899 | + if (hw->mac.ops.fw_recovery_mode && hw->mac.ops.fw_recovery_mode(hw)) { |
---|
| 7900 | + e_dev_err("Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n"); |
---|
| 7901 | + return true; |
---|
| 7902 | + } |
---|
| 7903 | + |
---|
| 7904 | + return false; |
---|
| 7905 | +} |
---|
| 7906 | + |
---|
| 7907 | +/** |
---|
7837 | 7908 | * ixgbe_service_task - manages and runs subtasks |
---|
7838 | 7909 | * @work: pointer to work_struct containing our data |
---|
7839 | 7910 | **/ |
---|
.. | .. |
---|
7851 | 7922 | ixgbe_service_event_complete(adapter); |
---|
7852 | 7923 | return; |
---|
7853 | 7924 | } |
---|
7854 | | - if (adapter->flags2 & IXGBE_FLAG2_UDP_TUN_REREG_NEEDED) { |
---|
7855 | | - rtnl_lock(); |
---|
7856 | | - adapter->flags2 &= ~IXGBE_FLAG2_UDP_TUN_REREG_NEEDED; |
---|
7857 | | - udp_tunnel_get_rx_info(adapter->netdev); |
---|
7858 | | - rtnl_unlock(); |
---|
| 7925 | + if (ixgbe_check_fw_error(adapter)) { |
---|
| 7926 | + if (!test_bit(__IXGBE_DOWN, &adapter->state)) |
---|
| 7927 | + unregister_netdev(adapter->netdev); |
---|
| 7928 | + ixgbe_service_event_complete(adapter); |
---|
| 7929 | + return; |
---|
7859 | 7930 | } |
---|
7860 | 7931 | ixgbe_reset_subtask(adapter); |
---|
7861 | 7932 | ixgbe_phy_interrupt_subtask(adapter); |
---|
.. | .. |
---|
7890 | 7961 | } ip; |
---|
7891 | 7962 | union { |
---|
7892 | 7963 | struct tcphdr *tcp; |
---|
| 7964 | + struct udphdr *udp; |
---|
7893 | 7965 | unsigned char *hdr; |
---|
7894 | 7966 | } l4; |
---|
7895 | 7967 | u32 paylen, l4_offset; |
---|
.. | .. |
---|
7913 | 7985 | l4.hdr = skb_checksum_start(skb); |
---|
7914 | 7986 | |
---|
7915 | 7987 | /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ |
---|
7916 | | - type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; |
---|
| 7988 | + type_tucmd = (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ? |
---|
| 7989 | + IXGBE_ADVTXD_TUCMD_L4T_UDP : IXGBE_ADVTXD_TUCMD_L4T_TCP; |
---|
7917 | 7990 | |
---|
7918 | 7991 | /* initialize outer IP header fields */ |
---|
7919 | 7992 | if (ip.v4->version == 4) { |
---|
.. | .. |
---|
7943 | 8016 | /* determine offset of inner transport header */ |
---|
7944 | 8017 | l4_offset = l4.hdr - skb->data; |
---|
7945 | 8018 | |
---|
7946 | | - /* compute length of segmentation header */ |
---|
7947 | | - *hdr_len = (l4.tcp->doff * 4) + l4_offset; |
---|
7948 | | - |
---|
7949 | 8019 | /* remove payload length from inner checksum */ |
---|
7950 | 8020 | paylen = skb->len - l4_offset; |
---|
7951 | | - csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen)); |
---|
| 8021 | + |
---|
| 8022 | + if (type_tucmd & IXGBE_ADVTXD_TUCMD_L4T_TCP) { |
---|
| 8023 | + /* compute length of segmentation header */ |
---|
| 8024 | + *hdr_len = (l4.tcp->doff * 4) + l4_offset; |
---|
| 8025 | + csum_replace_by_diff(&l4.tcp->check, |
---|
| 8026 | + (__force __wsum)htonl(paylen)); |
---|
| 8027 | + } else { |
---|
| 8028 | + /* compute length of segmentation header */ |
---|
| 8029 | + *hdr_len = sizeof(*l4.udp) + l4_offset; |
---|
| 8030 | + csum_replace_by_diff(&l4.udp->check, |
---|
| 8031 | + (__force __wsum)htonl(paylen)); |
---|
| 8032 | + } |
---|
7952 | 8033 | |
---|
7953 | 8034 | /* update gso size and bytecount with header size */ |
---|
7954 | 8035 | first->gso_segs = skb_shinfo(skb)->gso_segs; |
---|
.. | .. |
---|
8001 | 8082 | switch (skb->csum_offset) { |
---|
8002 | 8083 | case offsetof(struct tcphdr, check): |
---|
8003 | 8084 | type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; |
---|
8004 | | - /* fall through */ |
---|
| 8085 | + fallthrough; |
---|
8005 | 8086 | case offsetof(struct udphdr, check): |
---|
8006 | 8087 | break; |
---|
8007 | 8088 | case offsetof(struct sctphdr, checksum): |
---|
.. | .. |
---|
8013 | 8094 | type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP; |
---|
8014 | 8095 | break; |
---|
8015 | 8096 | } |
---|
8016 | | - /* fall through */ |
---|
| 8097 | + fallthrough; |
---|
8017 | 8098 | default: |
---|
8018 | 8099 | skb_checksum_help(skb); |
---|
8019 | 8100 | goto csum_failed; |
---|
.. | .. |
---|
8125 | 8206 | return __ixgbe_maybe_stop_tx(tx_ring, size); |
---|
8126 | 8207 | } |
---|
8127 | 8208 | |
---|
8128 | | -#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \ |
---|
8129 | | - IXGBE_TXD_CMD_RS) |
---|
8130 | | - |
---|
8131 | 8209 | static int ixgbe_tx_map(struct ixgbe_ring *tx_ring, |
---|
8132 | 8210 | struct ixgbe_tx_buffer *first, |
---|
8133 | 8211 | const u8 hdr_len) |
---|
.. | .. |
---|
8135 | 8213 | struct sk_buff *skb = first->skb; |
---|
8136 | 8214 | struct ixgbe_tx_buffer *tx_buffer; |
---|
8137 | 8215 | union ixgbe_adv_tx_desc *tx_desc; |
---|
8138 | | - struct skb_frag_struct *frag; |
---|
| 8216 | + skb_frag_t *frag; |
---|
8139 | 8217 | dma_addr_t dma; |
---|
8140 | 8218 | unsigned int data_len, size; |
---|
8141 | 8219 | u32 tx_flags = first->tx_flags; |
---|
.. | .. |
---|
8227 | 8305 | /* set the timestamp */ |
---|
8228 | 8306 | first->time_stamp = jiffies; |
---|
8229 | 8307 | |
---|
| 8308 | + skb_tx_timestamp(skb); |
---|
| 8309 | + |
---|
8230 | 8310 | /* |
---|
8231 | 8311 | * Force memory writes to complete before letting h/w know there |
---|
8232 | 8312 | * are new descriptors to fetch. (Only applicable for weak-ordered |
---|
.. | .. |
---|
8248 | 8328 | |
---|
8249 | 8329 | ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); |
---|
8250 | 8330 | |
---|
8251 | | - if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { |
---|
| 8331 | + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { |
---|
8252 | 8332 | writel(i, tx_ring->tail); |
---|
8253 | | - |
---|
8254 | | - /* we need this if more than one processor can write to our tail |
---|
8255 | | - * at a time, it synchronizes IO on IA64/Altix systems |
---|
8256 | | - */ |
---|
8257 | | - mmiowb(); |
---|
8258 | 8333 | } |
---|
8259 | 8334 | |
---|
8260 | 8335 | return 0; |
---|
.. | .. |
---|
8434 | 8509 | |
---|
8435 | 8510 | #ifdef IXGBE_FCOE |
---|
8436 | 8511 | static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, |
---|
8437 | | - struct net_device *sb_dev, |
---|
8438 | | - select_queue_fallback_t fallback) |
---|
| 8512 | + struct net_device *sb_dev) |
---|
8439 | 8513 | { |
---|
8440 | 8514 | struct ixgbe_adapter *adapter; |
---|
8441 | 8515 | struct ixgbe_ring_feature *f; |
---|
.. | .. |
---|
8463 | 8537 | |
---|
8464 | 8538 | if (!sb_dev && (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) |
---|
8465 | 8539 | break; |
---|
8466 | | - /* fall through */ |
---|
| 8540 | + fallthrough; |
---|
8467 | 8541 | default: |
---|
8468 | | - return fallback(dev, skb, sb_dev); |
---|
| 8542 | + return netdev_pick_tx(dev, skb, sb_dev); |
---|
8469 | 8543 | } |
---|
8470 | 8544 | |
---|
8471 | 8545 | f = &adapter->ring_feature[RING_F_FCOE]; |
---|
.. | .. |
---|
8480 | 8554 | } |
---|
8481 | 8555 | |
---|
8482 | 8556 | #endif |
---|
8483 | | -static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter, |
---|
8484 | | - struct xdp_frame *xdpf) |
---|
| 8557 | +int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter, |
---|
| 8558 | + struct xdp_frame *xdpf) |
---|
8485 | 8559 | { |
---|
8486 | 8560 | struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()]; |
---|
8487 | 8561 | struct ixgbe_tx_buffer *tx_buffer; |
---|
.. | .. |
---|
8558 | 8632 | * otherwise try next time |
---|
8559 | 8633 | */ |
---|
8560 | 8634 | for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) |
---|
8561 | | - count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); |
---|
| 8635 | + count += TXD_USE_COUNT(skb_frag_size( |
---|
| 8636 | + &skb_shinfo(skb)->frags[f])); |
---|
8562 | 8637 | |
---|
8563 | 8638 | if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) { |
---|
8564 | 8639 | tx_ring->tx_stats.tx_busy++; |
---|
.. | .. |
---|
8604 | 8679 | adapter->tx_hwtstamp_skipped++; |
---|
8605 | 8680 | } |
---|
8606 | 8681 | } |
---|
8607 | | - |
---|
8608 | | - skb_tx_timestamp(skb); |
---|
8609 | 8682 | |
---|
8610 | 8683 | #ifdef CONFIG_PCI_IOV |
---|
8611 | 8684 | /* |
---|
.. | .. |
---|
8653 | 8726 | |
---|
8654 | 8727 | #endif /* IXGBE_FCOE */ |
---|
8655 | 8728 | |
---|
8656 | | -#ifdef CONFIG_XFRM_OFFLOAD |
---|
| 8729 | +#ifdef CONFIG_IXGBE_IPSEC |
---|
8657 | 8730 | if (xfrm_offload(skb) && |
---|
8658 | 8731 | !ixgbe_ipsec_tx(tx_ring, first, &ipsec_tx)) |
---|
8659 | 8732 | goto out_drop; |
---|
.. | .. |
---|
8704 | 8777 | if (skb_put_padto(skb, 17)) |
---|
8705 | 8778 | return NETDEV_TX_OK; |
---|
8706 | 8779 | |
---|
8707 | | - tx_ring = ring ? ring : adapter->tx_ring[skb->queue_mapping]; |
---|
| 8780 | + tx_ring = ring ? ring : adapter->tx_ring[skb_get_queue_mapping(skb)]; |
---|
| 8781 | + if (unlikely(test_bit(__IXGBE_TX_DISABLED, &tx_ring->state))) |
---|
| 8782 | + return NETDEV_TX_BUSY; |
---|
8708 | 8783 | |
---|
8709 | 8784 | return ixgbe_xmit_frame_ring(skb, adapter, tx_ring); |
---|
8710 | 8785 | } |
---|
.. | .. |
---|
8747 | 8822 | u16 value; |
---|
8748 | 8823 | int rc; |
---|
8749 | 8824 | |
---|
| 8825 | + if (adapter->mii_bus) { |
---|
| 8826 | + int regnum = addr; |
---|
| 8827 | + |
---|
| 8828 | + if (devad != MDIO_DEVAD_NONE) |
---|
| 8829 | + regnum |= (devad << 16) | MII_ADDR_C45; |
---|
| 8830 | + |
---|
| 8831 | + return mdiobus_read(adapter->mii_bus, prtad, regnum); |
---|
| 8832 | + } |
---|
| 8833 | + |
---|
8750 | 8834 | if (prtad != hw->phy.mdio.prtad) |
---|
8751 | 8835 | return -EINVAL; |
---|
8752 | 8836 | rc = hw->phy.ops.read_reg(hw, addr, devad, &value); |
---|
.. | .. |
---|
8760 | 8844 | { |
---|
8761 | 8845 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
---|
8762 | 8846 | struct ixgbe_hw *hw = &adapter->hw; |
---|
| 8847 | + |
---|
| 8848 | + if (adapter->mii_bus) { |
---|
| 8849 | + int regnum = addr; |
---|
| 8850 | + |
---|
| 8851 | + if (devad != MDIO_DEVAD_NONE) |
---|
| 8852 | + regnum |= (devad << 16) | MII_ADDR_C45; |
---|
| 8853 | + |
---|
| 8854 | + return mdiobus_write(adapter->mii_bus, prtad, regnum, value); |
---|
| 8855 | + } |
---|
8763 | 8856 | |
---|
8764 | 8857 | if (prtad != hw->phy.mdio.prtad) |
---|
8765 | 8858 | return -EINVAL; |
---|
.. | .. |
---|
8778 | 8871 | case SIOCGMIIPHY: |
---|
8779 | 8872 | if (!adapter->hw.phy.ops.read_reg) |
---|
8780 | 8873 | return -EOPNOTSUPP; |
---|
8781 | | - /* fall through */ |
---|
| 8874 | + fallthrough; |
---|
8782 | 8875 | default: |
---|
8783 | 8876 | return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd); |
---|
8784 | 8877 | } |
---|
.. | .. |
---|
8953 | 9046 | } |
---|
8954 | 9047 | |
---|
8955 | 9048 | #endif /* CONFIG_IXGBE_DCB */ |
---|
8956 | | -static int ixgbe_reassign_macvlan_pool(struct net_device *vdev, void *data) |
---|
| 9049 | +static int ixgbe_reassign_macvlan_pool(struct net_device *vdev, |
---|
| 9050 | + struct netdev_nested_priv *priv) |
---|
8957 | 9051 | { |
---|
8958 | | - struct ixgbe_adapter *adapter = data; |
---|
| 9052 | + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)priv->data; |
---|
8959 | 9053 | struct ixgbe_fwd_adapter *accel; |
---|
8960 | 9054 | int pool; |
---|
8961 | 9055 | |
---|
.. | .. |
---|
8992 | 9086 | static void ixgbe_defrag_macvlan_pools(struct net_device *dev) |
---|
8993 | 9087 | { |
---|
8994 | 9088 | struct ixgbe_adapter *adapter = netdev_priv(dev); |
---|
| 9089 | + struct netdev_nested_priv priv = { |
---|
| 9090 | + .data = (void *)adapter, |
---|
| 9091 | + }; |
---|
8995 | 9092 | |
---|
8996 | 9093 | /* flush any stale bits out of the fwd bitmask */ |
---|
8997 | 9094 | bitmap_clear(adapter->fwd_bitmask, 1, 63); |
---|
8998 | 9095 | |
---|
8999 | 9096 | /* walk through upper devices reassigning pools */ |
---|
9000 | 9097 | netdev_walk_all_upper_dev_rcu(dev, ixgbe_reassign_macvlan_pool, |
---|
9001 | | - adapter); |
---|
| 9098 | + &priv); |
---|
9002 | 9099 | } |
---|
9003 | 9100 | |
---|
9004 | 9101 | /** |
---|
.. | .. |
---|
9172 | 9269 | u8 queue; |
---|
9173 | 9270 | }; |
---|
9174 | 9271 | |
---|
9175 | | -static int get_macvlan_queue(struct net_device *upper, void *_data) |
---|
| 9272 | +static int get_macvlan_queue(struct net_device *upper, |
---|
| 9273 | + struct netdev_nested_priv *priv) |
---|
9176 | 9274 | { |
---|
9177 | 9275 | if (netif_is_macvlan(upper)) { |
---|
9178 | 9276 | struct ixgbe_fwd_adapter *vadapter = macvlan_accel_priv(upper); |
---|
9179 | | - struct upper_walk_data *data = _data; |
---|
9180 | | - struct ixgbe_adapter *adapter = data->adapter; |
---|
9181 | | - int ifindex = data->ifindex; |
---|
| 9277 | + struct ixgbe_adapter *adapter; |
---|
| 9278 | + struct upper_walk_data *data; |
---|
| 9279 | + int ifindex; |
---|
9182 | 9280 | |
---|
| 9281 | + data = (struct upper_walk_data *)priv->data; |
---|
| 9282 | + ifindex = data->ifindex; |
---|
| 9283 | + adapter = data->adapter; |
---|
9183 | 9284 | if (vadapter && upper->ifindex == ifindex) { |
---|
9184 | 9285 | data->queue = adapter->rx_ring[vadapter->rx_base_queue]->reg_idx; |
---|
9185 | 9286 | data->action = data->queue; |
---|
.. | .. |
---|
9195 | 9296 | { |
---|
9196 | 9297 | struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; |
---|
9197 | 9298 | unsigned int num_vfs = adapter->num_vfs, vf; |
---|
| 9299 | + struct netdev_nested_priv priv; |
---|
9198 | 9300 | struct upper_walk_data data; |
---|
9199 | 9301 | struct net_device *upper; |
---|
9200 | 9302 | |
---|
.. | .. |
---|
9214 | 9316 | data.ifindex = ifindex; |
---|
9215 | 9317 | data.action = 0; |
---|
9216 | 9318 | data.queue = 0; |
---|
| 9319 | + priv.data = (void *)&data; |
---|
9217 | 9320 | if (netdev_walk_all_upper_dev_rcu(adapter->netdev, |
---|
9218 | | - get_macvlan_queue, &data)) { |
---|
| 9321 | + get_macvlan_queue, &priv)) { |
---|
9219 | 9322 | *action = data.action; |
---|
9220 | 9323 | *queue = data.queue; |
---|
9221 | 9324 | |
---|
.. | .. |
---|
9426 | 9529 | jump->mat = nexthdr[i].jump; |
---|
9427 | 9530 | adapter->jump_tables[link_uhtid] = jump; |
---|
9428 | 9531 | break; |
---|
| 9532 | + } else { |
---|
| 9533 | + kfree(mask); |
---|
| 9534 | + kfree(input); |
---|
| 9535 | + kfree(jump); |
---|
9429 | 9536 | } |
---|
9430 | 9537 | } |
---|
9431 | 9538 | return 0; |
---|
.. | .. |
---|
9545 | 9652 | } |
---|
9546 | 9653 | } |
---|
9547 | 9654 | |
---|
9548 | | -static int ixgbe_setup_tc_block(struct net_device *dev, |
---|
9549 | | - struct tc_block_offload *f) |
---|
9550 | | -{ |
---|
9551 | | - struct ixgbe_adapter *adapter = netdev_priv(dev); |
---|
9552 | | - |
---|
9553 | | - if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) |
---|
9554 | | - return -EOPNOTSUPP; |
---|
9555 | | - |
---|
9556 | | - switch (f->command) { |
---|
9557 | | - case TC_BLOCK_BIND: |
---|
9558 | | - return tcf_block_cb_register(f->block, ixgbe_setup_tc_block_cb, |
---|
9559 | | - adapter, adapter, f->extack); |
---|
9560 | | - case TC_BLOCK_UNBIND: |
---|
9561 | | - tcf_block_cb_unregister(f->block, ixgbe_setup_tc_block_cb, |
---|
9562 | | - adapter); |
---|
9563 | | - return 0; |
---|
9564 | | - default: |
---|
9565 | | - return -EOPNOTSUPP; |
---|
9566 | | - } |
---|
9567 | | -} |
---|
9568 | | - |
---|
9569 | 9655 | static int ixgbe_setup_tc_mqprio(struct net_device *dev, |
---|
9570 | 9656 | struct tc_mqprio_qopt *mqprio) |
---|
9571 | 9657 | { |
---|
.. | .. |
---|
9573 | 9659 | return ixgbe_setup_tc(dev, mqprio->num_tc); |
---|
9574 | 9660 | } |
---|
9575 | 9661 | |
---|
| 9662 | +static LIST_HEAD(ixgbe_block_cb_list); |
---|
| 9663 | + |
---|
9576 | 9664 | static int __ixgbe_setup_tc(struct net_device *dev, enum tc_setup_type type, |
---|
9577 | 9665 | void *type_data) |
---|
9578 | 9666 | { |
---|
| 9667 | + struct ixgbe_adapter *adapter = netdev_priv(dev); |
---|
| 9668 | + |
---|
9579 | 9669 | switch (type) { |
---|
9580 | 9670 | case TC_SETUP_BLOCK: |
---|
9581 | | - return ixgbe_setup_tc_block(dev, type_data); |
---|
| 9671 | + return flow_block_cb_setup_simple(type_data, |
---|
| 9672 | + &ixgbe_block_cb_list, |
---|
| 9673 | + ixgbe_setup_tc_block_cb, |
---|
| 9674 | + adapter, adapter, true); |
---|
9582 | 9675 | case TC_SETUP_QDISC_MQPRIO: |
---|
9583 | 9676 | return ixgbe_setup_tc_mqprio(dev, type_data); |
---|
9584 | 9677 | default: |
---|
.. | .. |
---|
9704 | 9797 | |
---|
9705 | 9798 | netdev->features = features; |
---|
9706 | 9799 | |
---|
9707 | | - if ((adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) { |
---|
9708 | | - if (features & NETIF_F_RXCSUM) { |
---|
9709 | | - adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED; |
---|
9710 | | - } else { |
---|
9711 | | - u32 port_mask = IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK; |
---|
9712 | | - |
---|
9713 | | - ixgbe_clear_udp_tunnel_port(adapter, port_mask); |
---|
9714 | | - } |
---|
9715 | | - } |
---|
9716 | | - |
---|
9717 | | - if ((adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)) { |
---|
9718 | | - if (features & NETIF_F_RXCSUM) { |
---|
9719 | | - adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED; |
---|
9720 | | - } else { |
---|
9721 | | - u32 port_mask = IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK; |
---|
9722 | | - |
---|
9723 | | - ixgbe_clear_udp_tunnel_port(adapter, port_mask); |
---|
9724 | | - } |
---|
9725 | | - } |
---|
9726 | | - |
---|
9727 | 9800 | if ((changed & NETIF_F_HW_L2FW_DOFFLOAD) && adapter->num_rx_pools > 1) |
---|
9728 | 9801 | ixgbe_reset_l2fw_offload(adapter); |
---|
9729 | 9802 | else if (need_reset) |
---|
.. | .. |
---|
9732 | 9805 | NETIF_F_HW_VLAN_CTAG_FILTER)) |
---|
9733 | 9806 | ixgbe_set_rx_mode(netdev); |
---|
9734 | 9807 | |
---|
9735 | | - return 0; |
---|
9736 | | -} |
---|
9737 | | - |
---|
9738 | | -/** |
---|
9739 | | - * ixgbe_add_udp_tunnel_port - Get notifications about adding UDP tunnel ports |
---|
9740 | | - * @dev: The port's netdev |
---|
9741 | | - * @ti: Tunnel endpoint information |
---|
9742 | | - **/ |
---|
9743 | | -static void ixgbe_add_udp_tunnel_port(struct net_device *dev, |
---|
9744 | | - struct udp_tunnel_info *ti) |
---|
9745 | | -{ |
---|
9746 | | - struct ixgbe_adapter *adapter = netdev_priv(dev); |
---|
9747 | | - struct ixgbe_hw *hw = &adapter->hw; |
---|
9748 | | - __be16 port = ti->port; |
---|
9749 | | - u32 port_shift = 0; |
---|
9750 | | - u32 reg; |
---|
9751 | | - |
---|
9752 | | - if (ti->sa_family != AF_INET) |
---|
9753 | | - return; |
---|
9754 | | - |
---|
9755 | | - switch (ti->type) { |
---|
9756 | | - case UDP_TUNNEL_TYPE_VXLAN: |
---|
9757 | | - if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) |
---|
9758 | | - return; |
---|
9759 | | - |
---|
9760 | | - if (adapter->vxlan_port == port) |
---|
9761 | | - return; |
---|
9762 | | - |
---|
9763 | | - if (adapter->vxlan_port) { |
---|
9764 | | - netdev_info(dev, |
---|
9765 | | - "VXLAN port %d set, not adding port %d\n", |
---|
9766 | | - ntohs(adapter->vxlan_port), |
---|
9767 | | - ntohs(port)); |
---|
9768 | | - return; |
---|
9769 | | - } |
---|
9770 | | - |
---|
9771 | | - adapter->vxlan_port = port; |
---|
9772 | | - break; |
---|
9773 | | - case UDP_TUNNEL_TYPE_GENEVE: |
---|
9774 | | - if (!(adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)) |
---|
9775 | | - return; |
---|
9776 | | - |
---|
9777 | | - if (adapter->geneve_port == port) |
---|
9778 | | - return; |
---|
9779 | | - |
---|
9780 | | - if (adapter->geneve_port) { |
---|
9781 | | - netdev_info(dev, |
---|
9782 | | - "GENEVE port %d set, not adding port %d\n", |
---|
9783 | | - ntohs(adapter->geneve_port), |
---|
9784 | | - ntohs(port)); |
---|
9785 | | - return; |
---|
9786 | | - } |
---|
9787 | | - |
---|
9788 | | - port_shift = IXGBE_VXLANCTRL_GENEVE_UDPPORT_SHIFT; |
---|
9789 | | - adapter->geneve_port = port; |
---|
9790 | | - break; |
---|
9791 | | - default: |
---|
9792 | | - return; |
---|
9793 | | - } |
---|
9794 | | - |
---|
9795 | | - reg = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) | ntohs(port) << port_shift; |
---|
9796 | | - IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, reg); |
---|
9797 | | -} |
---|
9798 | | - |
---|
9799 | | -/** |
---|
9800 | | - * ixgbe_del_udp_tunnel_port - Get notifications about removing UDP tunnel ports |
---|
9801 | | - * @dev: The port's netdev |
---|
9802 | | - * @ti: Tunnel endpoint information |
---|
9803 | | - **/ |
---|
9804 | | -static void ixgbe_del_udp_tunnel_port(struct net_device *dev, |
---|
9805 | | - struct udp_tunnel_info *ti) |
---|
9806 | | -{ |
---|
9807 | | - struct ixgbe_adapter *adapter = netdev_priv(dev); |
---|
9808 | | - u32 port_mask; |
---|
9809 | | - |
---|
9810 | | - if (ti->type != UDP_TUNNEL_TYPE_VXLAN && |
---|
9811 | | - ti->type != UDP_TUNNEL_TYPE_GENEVE) |
---|
9812 | | - return; |
---|
9813 | | - |
---|
9814 | | - if (ti->sa_family != AF_INET) |
---|
9815 | | - return; |
---|
9816 | | - |
---|
9817 | | - switch (ti->type) { |
---|
9818 | | - case UDP_TUNNEL_TYPE_VXLAN: |
---|
9819 | | - if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) |
---|
9820 | | - return; |
---|
9821 | | - |
---|
9822 | | - if (adapter->vxlan_port != ti->port) { |
---|
9823 | | - netdev_info(dev, "VXLAN port %d not found\n", |
---|
9824 | | - ntohs(ti->port)); |
---|
9825 | | - return; |
---|
9826 | | - } |
---|
9827 | | - |
---|
9828 | | - port_mask = IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK; |
---|
9829 | | - break; |
---|
9830 | | - case UDP_TUNNEL_TYPE_GENEVE: |
---|
9831 | | - if (!(adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)) |
---|
9832 | | - return; |
---|
9833 | | - |
---|
9834 | | - if (adapter->geneve_port != ti->port) { |
---|
9835 | | - netdev_info(dev, "GENEVE port %d not found\n", |
---|
9836 | | - ntohs(ti->port)); |
---|
9837 | | - return; |
---|
9838 | | - } |
---|
9839 | | - |
---|
9840 | | - port_mask = IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK; |
---|
9841 | | - break; |
---|
9842 | | - default: |
---|
9843 | | - return; |
---|
9844 | | - } |
---|
9845 | | - |
---|
9846 | | - ixgbe_clear_udp_tunnel_port(adapter, port_mask); |
---|
9847 | | - adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED; |
---|
| 9808 | + return 1; |
---|
9848 | 9809 | } |
---|
9849 | 9810 | |
---|
9850 | 9811 | static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], |
---|
9851 | 9812 | struct net_device *dev, |
---|
9852 | 9813 | const unsigned char *addr, u16 vid, |
---|
9853 | | - u16 flags) |
---|
| 9814 | + u16 flags, |
---|
| 9815 | + struct netlink_ext_ack *extack) |
---|
9854 | 9816 | { |
---|
9855 | 9817 | /* guarantee we can provide a unique filter for the unicast address */ |
---|
9856 | 9818 | if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) { |
---|
.. | .. |
---|
9939 | 9901 | } |
---|
9940 | 9902 | |
---|
9941 | 9903 | static int ixgbe_ndo_bridge_setlink(struct net_device *dev, |
---|
9942 | | - struct nlmsghdr *nlh, u16 flags) |
---|
| 9904 | + struct nlmsghdr *nlh, u16 flags, |
---|
| 9905 | + struct netlink_ext_ack *extack) |
---|
9943 | 9906 | { |
---|
9944 | 9907 | struct ixgbe_adapter *adapter = netdev_priv(dev); |
---|
9945 | 9908 | struct nlattr *attr, *br_spec; |
---|
.. | .. |
---|
10133 | 10096 | if (unlikely(mac_hdr_len > IXGBE_MAX_MAC_HDR_LEN)) |
---|
10134 | 10097 | return features & ~(NETIF_F_HW_CSUM | |
---|
10135 | 10098 | NETIF_F_SCTP_CRC | |
---|
| 10099 | + NETIF_F_GSO_UDP_L4 | |
---|
10136 | 10100 | NETIF_F_HW_VLAN_CTAG_TX | |
---|
10137 | 10101 | NETIF_F_TSO | |
---|
10138 | 10102 | NETIF_F_TSO6); |
---|
.. | .. |
---|
10141 | 10105 | if (unlikely(network_hdr_len > IXGBE_MAX_NETWORK_HDR_LEN)) |
---|
10142 | 10106 | return features & ~(NETIF_F_HW_CSUM | |
---|
10143 | 10107 | NETIF_F_SCTP_CRC | |
---|
| 10108 | + NETIF_F_GSO_UDP_L4 | |
---|
10144 | 10109 | NETIF_F_TSO | |
---|
10145 | 10110 | NETIF_F_TSO6); |
---|
10146 | 10111 | |
---|
.. | .. |
---|
10150 | 10115 | * the TSO, so it's the exception. |
---|
10151 | 10116 | */ |
---|
10152 | 10117 | if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) { |
---|
10153 | | -#ifdef CONFIG_XFRM_OFFLOAD |
---|
10154 | | - if (!skb->sp) |
---|
| 10118 | +#ifdef CONFIG_IXGBE_IPSEC |
---|
| 10119 | + if (!secpath_exists(skb)) |
---|
10155 | 10120 | #endif |
---|
10156 | 10121 | features &= ~NETIF_F_TSO; |
---|
10157 | 10122 | } |
---|
.. | .. |
---|
10164 | 10129 | int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; |
---|
10165 | 10130 | struct ixgbe_adapter *adapter = netdev_priv(dev); |
---|
10166 | 10131 | struct bpf_prog *old_prog; |
---|
| 10132 | + bool need_reset; |
---|
| 10133 | + int num_queues; |
---|
10167 | 10134 | |
---|
10168 | 10135 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) |
---|
10169 | 10136 | return -EINVAL; |
---|
.. | .. |
---|
10186 | 10153 | return -ENOMEM; |
---|
10187 | 10154 | |
---|
10188 | 10155 | old_prog = xchg(&adapter->xdp_prog, prog); |
---|
| 10156 | + need_reset = (!!prog != !!old_prog); |
---|
10189 | 10157 | |
---|
10190 | 10158 | /* If transitioning XDP modes reconfigure rings */ |
---|
10191 | | - if (!!prog != !!old_prog) { |
---|
10192 | | - int err = ixgbe_setup_tc(dev, adapter->hw_tcs); |
---|
| 10159 | + if (need_reset) { |
---|
| 10160 | + int err; |
---|
| 10161 | + |
---|
| 10162 | + if (!prog) |
---|
| 10163 | + /* Wait until ndo_xsk_wakeup completes. */ |
---|
| 10164 | + synchronize_rcu(); |
---|
| 10165 | + err = ixgbe_setup_tc(dev, adapter->hw_tcs); |
---|
10193 | 10166 | |
---|
10194 | 10167 | if (err) { |
---|
10195 | 10168 | rcu_assign_pointer(adapter->xdp_prog, old_prog); |
---|
.. | .. |
---|
10204 | 10177 | if (old_prog) |
---|
10205 | 10178 | bpf_prog_put(old_prog); |
---|
10206 | 10179 | |
---|
| 10180 | + /* Kick start the NAPI context if there is an AF_XDP socket open |
---|
| 10181 | + * on that queue id. This so that receiving will start. |
---|
| 10182 | + */ |
---|
| 10183 | + if (need_reset && prog) { |
---|
| 10184 | + num_queues = min_t(int, adapter->num_rx_queues, |
---|
| 10185 | + adapter->num_xdp_queues); |
---|
| 10186 | + for (i = 0; i < num_queues; i++) |
---|
| 10187 | + if (adapter->xdp_ring[i]->xsk_pool) |
---|
| 10188 | + (void)ixgbe_xsk_wakeup(adapter->netdev, i, |
---|
| 10189 | + XDP_WAKEUP_RX); |
---|
| 10190 | + } |
---|
| 10191 | + |
---|
10207 | 10192 | return 0; |
---|
10208 | 10193 | } |
---|
10209 | 10194 | |
---|
.. | .. |
---|
10214 | 10199 | switch (xdp->command) { |
---|
10215 | 10200 | case XDP_SETUP_PROG: |
---|
10216 | 10201 | return ixgbe_xdp_setup(dev, xdp->prog); |
---|
10217 | | - case XDP_QUERY_PROG: |
---|
10218 | | - xdp->prog_id = adapter->xdp_prog ? |
---|
10219 | | - adapter->xdp_prog->aux->id : 0; |
---|
10220 | | - return 0; |
---|
| 10202 | + case XDP_SETUP_XSK_POOL: |
---|
| 10203 | + return ixgbe_xsk_pool_setup(adapter, xdp->xsk.pool, |
---|
| 10204 | + xdp->xsk.queue_id); |
---|
| 10205 | + |
---|
10221 | 10206 | default: |
---|
10222 | 10207 | return -EINVAL; |
---|
10223 | 10208 | } |
---|
10224 | 10209 | } |
---|
10225 | 10210 | |
---|
10226 | | -static void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring) |
---|
| 10211 | +void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring) |
---|
10227 | 10212 | { |
---|
10228 | 10213 | /* Force memory writes to complete before letting h/w know there |
---|
10229 | 10214 | * are new descriptors to fetch. |
---|
.. | .. |
---|
10251 | 10236 | */ |
---|
10252 | 10237 | ring = adapter->xdp_prog ? adapter->xdp_ring[smp_processor_id()] : NULL; |
---|
10253 | 10238 | if (unlikely(!ring)) |
---|
| 10239 | + return -ENXIO; |
---|
| 10240 | + |
---|
| 10241 | + if (unlikely(test_bit(__IXGBE_TX_DISABLED, &ring->state))) |
---|
10254 | 10242 | return -ENXIO; |
---|
10255 | 10243 | |
---|
10256 | 10244 | for (i = 0; i < n; i++) { |
---|
.. | .. |
---|
10309 | 10297 | .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink, |
---|
10310 | 10298 | .ndo_dfwd_add_station = ixgbe_fwd_add, |
---|
10311 | 10299 | .ndo_dfwd_del_station = ixgbe_fwd_del, |
---|
10312 | | - .ndo_udp_tunnel_add = ixgbe_add_udp_tunnel_port, |
---|
10313 | | - .ndo_udp_tunnel_del = ixgbe_del_udp_tunnel_port, |
---|
| 10300 | + .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, |
---|
| 10301 | + .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, |
---|
10314 | 10302 | .ndo_features_check = ixgbe_features_check, |
---|
10315 | 10303 | .ndo_bpf = ixgbe_xdp, |
---|
10316 | 10304 | .ndo_xdp_xmit = ixgbe_xdp_xmit, |
---|
| 10305 | + .ndo_xsk_wakeup = ixgbe_xsk_wakeup, |
---|
10317 | 10306 | }; |
---|
| 10307 | + |
---|
| 10308 | +static void ixgbe_disable_txr_hw(struct ixgbe_adapter *adapter, |
---|
| 10309 | + struct ixgbe_ring *tx_ring) |
---|
| 10310 | +{ |
---|
| 10311 | + unsigned long wait_delay, delay_interval; |
---|
| 10312 | + struct ixgbe_hw *hw = &adapter->hw; |
---|
| 10313 | + u8 reg_idx = tx_ring->reg_idx; |
---|
| 10314 | + int wait_loop; |
---|
| 10315 | + u32 txdctl; |
---|
| 10316 | + |
---|
| 10317 | + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH); |
---|
| 10318 | + |
---|
| 10319 | + /* delay mechanism from ixgbe_disable_tx */ |
---|
| 10320 | + delay_interval = ixgbe_get_completion_timeout(adapter) / 100; |
---|
| 10321 | + |
---|
| 10322 | + wait_loop = IXGBE_MAX_RX_DESC_POLL; |
---|
| 10323 | + wait_delay = delay_interval; |
---|
| 10324 | + |
---|
| 10325 | + while (wait_loop--) { |
---|
| 10326 | + usleep_range(wait_delay, wait_delay + 10); |
---|
| 10327 | + wait_delay += delay_interval * 2; |
---|
| 10328 | + txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); |
---|
| 10329 | + |
---|
| 10330 | + if (!(txdctl & IXGBE_TXDCTL_ENABLE)) |
---|
| 10331 | + return; |
---|
| 10332 | + } |
---|
| 10333 | + |
---|
| 10334 | + e_err(drv, "TXDCTL.ENABLE not cleared within the polling period\n"); |
---|
| 10335 | +} |
---|
| 10336 | + |
---|
| 10337 | +static void ixgbe_disable_txr(struct ixgbe_adapter *adapter, |
---|
| 10338 | + struct ixgbe_ring *tx_ring) |
---|
| 10339 | +{ |
---|
| 10340 | + set_bit(__IXGBE_TX_DISABLED, &tx_ring->state); |
---|
| 10341 | + ixgbe_disable_txr_hw(adapter, tx_ring); |
---|
| 10342 | +} |
---|
| 10343 | + |
---|
| 10344 | +static void ixgbe_disable_rxr_hw(struct ixgbe_adapter *adapter, |
---|
| 10345 | + struct ixgbe_ring *rx_ring) |
---|
| 10346 | +{ |
---|
| 10347 | + unsigned long wait_delay, delay_interval; |
---|
| 10348 | + struct ixgbe_hw *hw = &adapter->hw; |
---|
| 10349 | + u8 reg_idx = rx_ring->reg_idx; |
---|
| 10350 | + int wait_loop; |
---|
| 10351 | + u32 rxdctl; |
---|
| 10352 | + |
---|
| 10353 | + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); |
---|
| 10354 | + rxdctl &= ~IXGBE_RXDCTL_ENABLE; |
---|
| 10355 | + rxdctl |= IXGBE_RXDCTL_SWFLSH; |
---|
| 10356 | + |
---|
| 10357 | + /* write value back with RXDCTL.ENABLE bit cleared */ |
---|
| 10358 | + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); |
---|
| 10359 | + |
---|
| 10360 | + /* RXDCTL.EN may not change on 82598 if link is down, so skip it */ |
---|
| 10361 | + if (hw->mac.type == ixgbe_mac_82598EB && |
---|
| 10362 | + !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) |
---|
| 10363 | + return; |
---|
| 10364 | + |
---|
| 10365 | + /* delay mechanism from ixgbe_disable_rx */ |
---|
| 10366 | + delay_interval = ixgbe_get_completion_timeout(adapter) / 100; |
---|
| 10367 | + |
---|
| 10368 | + wait_loop = IXGBE_MAX_RX_DESC_POLL; |
---|
| 10369 | + wait_delay = delay_interval; |
---|
| 10370 | + |
---|
| 10371 | + while (wait_loop--) { |
---|
| 10372 | + usleep_range(wait_delay, wait_delay + 10); |
---|
| 10373 | + wait_delay += delay_interval * 2; |
---|
| 10374 | + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); |
---|
| 10375 | + |
---|
| 10376 | + if (!(rxdctl & IXGBE_RXDCTL_ENABLE)) |
---|
| 10377 | + return; |
---|
| 10378 | + } |
---|
| 10379 | + |
---|
| 10380 | + e_err(drv, "RXDCTL.ENABLE not cleared within the polling period\n"); |
---|
| 10381 | +} |
---|
| 10382 | + |
---|
| 10383 | +static void ixgbe_reset_txr_stats(struct ixgbe_ring *tx_ring) |
---|
| 10384 | +{ |
---|
| 10385 | + memset(&tx_ring->stats, 0, sizeof(tx_ring->stats)); |
---|
| 10386 | + memset(&tx_ring->tx_stats, 0, sizeof(tx_ring->tx_stats)); |
---|
| 10387 | +} |
---|
| 10388 | + |
---|
| 10389 | +static void ixgbe_reset_rxr_stats(struct ixgbe_ring *rx_ring) |
---|
| 10390 | +{ |
---|
| 10391 | + memset(&rx_ring->stats, 0, sizeof(rx_ring->stats)); |
---|
| 10392 | + memset(&rx_ring->rx_stats, 0, sizeof(rx_ring->rx_stats)); |
---|
| 10393 | +} |
---|
| 10394 | + |
---|
| 10395 | +/** |
---|
| 10396 | + * ixgbe_txrx_ring_disable - Disable Rx/Tx/XDP Tx rings |
---|
| 10397 | + * @adapter: adapter structure |
---|
| 10398 | + * @ring: ring index |
---|
| 10399 | + * |
---|
| 10400 | + * This function disables a certain Rx/Tx/XDP Tx ring. The function |
---|
| 10401 | + * assumes that the netdev is running. |
---|
| 10402 | + **/ |
---|
| 10403 | +void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring) |
---|
| 10404 | +{ |
---|
| 10405 | + struct ixgbe_ring *rx_ring, *tx_ring, *xdp_ring; |
---|
| 10406 | + |
---|
| 10407 | + rx_ring = adapter->rx_ring[ring]; |
---|
| 10408 | + tx_ring = adapter->tx_ring[ring]; |
---|
| 10409 | + xdp_ring = adapter->xdp_ring[ring]; |
---|
| 10410 | + |
---|
| 10411 | + ixgbe_disable_txr(adapter, tx_ring); |
---|
| 10412 | + if (xdp_ring) |
---|
| 10413 | + ixgbe_disable_txr(adapter, xdp_ring); |
---|
| 10414 | + ixgbe_disable_rxr_hw(adapter, rx_ring); |
---|
| 10415 | + |
---|
| 10416 | + if (xdp_ring) |
---|
| 10417 | + synchronize_rcu(); |
---|
| 10418 | + |
---|
| 10419 | + /* Rx/Tx/XDP Tx share the same napi context. */ |
---|
| 10420 | + napi_disable(&rx_ring->q_vector->napi); |
---|
| 10421 | + |
---|
| 10422 | + ixgbe_clean_tx_ring(tx_ring); |
---|
| 10423 | + if (xdp_ring) |
---|
| 10424 | + ixgbe_clean_tx_ring(xdp_ring); |
---|
| 10425 | + ixgbe_clean_rx_ring(rx_ring); |
---|
| 10426 | + |
---|
| 10427 | + ixgbe_reset_txr_stats(tx_ring); |
---|
| 10428 | + if (xdp_ring) |
---|
| 10429 | + ixgbe_reset_txr_stats(xdp_ring); |
---|
| 10430 | + ixgbe_reset_rxr_stats(rx_ring); |
---|
| 10431 | +} |
---|
| 10432 | + |
---|
| 10433 | +/** |
---|
| 10434 | + * ixgbe_txrx_ring_enable - Enable Rx/Tx/XDP Tx rings |
---|
| 10435 | + * @adapter: adapter structure |
---|
| 10436 | + * @ring: ring index |
---|
| 10437 | + * |
---|
| 10438 | + * This function enables a certain Rx/Tx/XDP Tx ring. The function |
---|
| 10439 | + * assumes that the netdev is running. |
---|
| 10440 | + **/ |
---|
| 10441 | +void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring) |
---|
| 10442 | +{ |
---|
| 10443 | + struct ixgbe_ring *rx_ring, *tx_ring, *xdp_ring; |
---|
| 10444 | + |
---|
| 10445 | + rx_ring = adapter->rx_ring[ring]; |
---|
| 10446 | + tx_ring = adapter->tx_ring[ring]; |
---|
| 10447 | + xdp_ring = adapter->xdp_ring[ring]; |
---|
| 10448 | + |
---|
| 10449 | + /* Rx/Tx/XDP Tx share the same napi context. */ |
---|
| 10450 | + napi_enable(&rx_ring->q_vector->napi); |
---|
| 10451 | + |
---|
| 10452 | + ixgbe_configure_tx_ring(adapter, tx_ring); |
---|
| 10453 | + if (xdp_ring) |
---|
| 10454 | + ixgbe_configure_tx_ring(adapter, xdp_ring); |
---|
| 10455 | + ixgbe_configure_rx_ring(adapter, rx_ring); |
---|
| 10456 | + |
---|
| 10457 | + clear_bit(__IXGBE_TX_DISABLED, &tx_ring->state); |
---|
| 10458 | + if (xdp_ring) |
---|
| 10459 | + clear_bit(__IXGBE_TX_DISABLED, &xdp_ring->state); |
---|
| 10460 | +} |
---|
10318 | 10461 | |
---|
10319 | 10462 | /** |
---|
10320 | 10463 | * ixgbe_enumerate_functions - Get the number of ports this device has |
---|
.. | .. |
---|
10398 | 10541 | /* only support first port */ |
---|
10399 | 10542 | if (hw->bus.func != 0) |
---|
10400 | 10543 | break; |
---|
10401 | | - /* fall through */ |
---|
| 10544 | + fallthrough; |
---|
10402 | 10545 | case IXGBE_SUBDEV_ID_82599_SP_560FLR: |
---|
10403 | 10546 | case IXGBE_SUBDEV_ID_82599_SFP: |
---|
10404 | 10547 | case IXGBE_SUBDEV_ID_82599_RNDC: |
---|
.. | .. |
---|
10600 | 10743 | if (err) |
---|
10601 | 10744 | goto err_sw_init; |
---|
10602 | 10745 | |
---|
| 10746 | + switch (adapter->hw.mac.type) { |
---|
| 10747 | + case ixgbe_mac_X550: |
---|
| 10748 | + case ixgbe_mac_X550EM_x: |
---|
| 10749 | + netdev->udp_tunnel_nic_info = &ixgbe_udp_tunnels_x550; |
---|
| 10750 | + break; |
---|
| 10751 | + case ixgbe_mac_x550em_a: |
---|
| 10752 | + netdev->udp_tunnel_nic_info = &ixgbe_udp_tunnels_x550em_a; |
---|
| 10753 | + break; |
---|
| 10754 | + default: |
---|
| 10755 | + break; |
---|
| 10756 | + } |
---|
| 10757 | + |
---|
10603 | 10758 | /* Make sure the SWFW semaphore is in a valid state */ |
---|
10604 | 10759 | if (hw->mac.ops.init_swfw_sync) |
---|
10605 | 10760 | hw->mac.ops.init_swfw_sync(hw); |
---|
.. | .. |
---|
10677 | 10832 | IXGBE_GSO_PARTIAL_FEATURES; |
---|
10678 | 10833 | |
---|
10679 | 10834 | if (hw->mac.type >= ixgbe_mac_82599EB) |
---|
10680 | | - netdev->features |= NETIF_F_SCTP_CRC; |
---|
| 10835 | + netdev->features |= NETIF_F_SCTP_CRC | NETIF_F_GSO_UDP_L4; |
---|
10681 | 10836 | |
---|
10682 | | -#ifdef CONFIG_XFRM_OFFLOAD |
---|
| 10837 | +#ifdef CONFIG_IXGBE_IPSEC |
---|
10683 | 10838 | #define IXGBE_ESP_FEATURES (NETIF_F_HW_ESP | \ |
---|
10684 | 10839 | NETIF_F_HW_ESP_TX_CSUM | \ |
---|
10685 | 10840 | NETIF_F_GSO_ESP) |
---|
.. | .. |
---|
10753 | 10908 | netdev->hw_features |= NETIF_F_LRO; |
---|
10754 | 10909 | if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) |
---|
10755 | 10910 | netdev->features |= NETIF_F_LRO; |
---|
| 10911 | + |
---|
| 10912 | + if (ixgbe_check_fw_error(adapter)) { |
---|
| 10913 | + err = -EIO; |
---|
| 10914 | + goto err_sw_init; |
---|
| 10915 | + } |
---|
10756 | 10916 | |
---|
10757 | 10917 | /* make sure the EEPROM is good */ |
---|
10758 | 10918 | if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) { |
---|
.. | .. |
---|
10889 | 11049 | */ |
---|
10890 | 11050 | if (hw->mac.ops.set_fw_drv_ver) |
---|
10891 | 11051 | hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF, 0xFF, |
---|
10892 | | - sizeof(ixgbe_driver_version) - 1, |
---|
10893 | | - ixgbe_driver_version); |
---|
| 11052 | + sizeof(UTS_RELEASE) - 1, |
---|
| 11053 | + UTS_RELEASE); |
---|
10894 | 11054 | |
---|
10895 | 11055 | /* add san mac addr to netdev */ |
---|
10896 | 11056 | ixgbe_add_sanmac_netdev(netdev); |
---|
.. | .. |
---|
10910 | 11070 | IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL, |
---|
10911 | 11071 | true); |
---|
10912 | 11072 | |
---|
| 11073 | + err = ixgbe_mii_bus_init(hw); |
---|
| 11074 | + if (err) |
---|
| 11075 | + goto err_netdev; |
---|
| 11076 | + |
---|
10913 | 11077 | return 0; |
---|
10914 | 11078 | |
---|
| 11079 | +err_netdev: |
---|
| 11080 | + unregister_netdev(netdev); |
---|
10915 | 11081 | err_register: |
---|
10916 | 11082 | ixgbe_release_hw_control(adapter); |
---|
10917 | 11083 | ixgbe_clear_interrupt_scheme(adapter); |
---|
.. | .. |
---|
10922 | 11088 | kfree(adapter->jump_tables[0]); |
---|
10923 | 11089 | kfree(adapter->mac_table); |
---|
10924 | 11090 | kfree(adapter->rss_key); |
---|
| 11091 | + bitmap_free(adapter->af_xdp_zc_qps); |
---|
10925 | 11092 | err_ioremap: |
---|
10926 | 11093 | disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); |
---|
10927 | 11094 | free_netdev(netdev); |
---|
.. | .. |
---|
10961 | 11128 | set_bit(__IXGBE_REMOVING, &adapter->state); |
---|
10962 | 11129 | cancel_work_sync(&adapter->service_task); |
---|
10963 | 11130 | |
---|
| 11131 | + if (adapter->mii_bus) |
---|
| 11132 | + mdiobus_unregister(adapter->mii_bus); |
---|
10964 | 11133 | |
---|
10965 | 11134 | #ifdef CONFIG_IXGBE_DCA |
---|
10966 | 11135 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { |
---|
.. | .. |
---|
11009 | 11178 | |
---|
11010 | 11179 | kfree(adapter->mac_table); |
---|
11011 | 11180 | kfree(adapter->rss_key); |
---|
| 11181 | + bitmap_free(adapter->af_xdp_zc_qps); |
---|
11012 | 11182 | disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); |
---|
11013 | 11183 | free_netdev(netdev); |
---|
11014 | 11184 | |
---|
.. | .. |
---|
11114 | 11284 | /* Free device reference count */ |
---|
11115 | 11285 | pci_dev_put(vfdev); |
---|
11116 | 11286 | } |
---|
11117 | | - |
---|
11118 | | - pci_cleanup_aer_uncorrect_error_status(pdev); |
---|
11119 | 11287 | } |
---|
11120 | 11288 | |
---|
11121 | 11289 | /* |
---|
.. | .. |
---|
11165 | 11333 | { |
---|
11166 | 11334 | struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); |
---|
11167 | 11335 | pci_ers_result_t result; |
---|
11168 | | - int err; |
---|
11169 | 11336 | |
---|
11170 | 11337 | if (pci_enable_device_mem(pdev)) { |
---|
11171 | 11338 | e_err(probe, "Cannot re-enable PCI device after reset.\n"); |
---|
.. | .. |
---|
11183 | 11350 | ixgbe_reset(adapter); |
---|
11184 | 11351 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); |
---|
11185 | 11352 | result = PCI_ERS_RESULT_RECOVERED; |
---|
11186 | | - } |
---|
11187 | | - |
---|
11188 | | - err = pci_cleanup_aer_uncorrect_error_status(pdev); |
---|
11189 | | - if (err) { |
---|
11190 | | - e_dev_err("pci_cleanup_aer_uncorrect_error_status " |
---|
11191 | | - "failed 0x%0x\n", err); |
---|
11192 | | - /* non-fatal, continue */ |
---|
11193 | 11353 | } |
---|
11194 | 11354 | |
---|
11195 | 11355 | return result; |
---|
.. | .. |
---|
11229 | 11389 | .resume = ixgbe_io_resume, |
---|
11230 | 11390 | }; |
---|
11231 | 11391 | |
---|
| 11392 | +static SIMPLE_DEV_PM_OPS(ixgbe_pm_ops, ixgbe_suspend, ixgbe_resume); |
---|
| 11393 | + |
---|
11232 | 11394 | static struct pci_driver ixgbe_driver = { |
---|
11233 | | - .name = ixgbe_driver_name, |
---|
11234 | | - .id_table = ixgbe_pci_tbl, |
---|
11235 | | - .probe = ixgbe_probe, |
---|
11236 | | - .remove = ixgbe_remove, |
---|
11237 | | -#ifdef CONFIG_PM |
---|
11238 | | - .suspend = ixgbe_suspend, |
---|
11239 | | - .resume = ixgbe_resume, |
---|
11240 | | -#endif |
---|
11241 | | - .shutdown = ixgbe_shutdown, |
---|
| 11395 | + .name = ixgbe_driver_name, |
---|
| 11396 | + .id_table = ixgbe_pci_tbl, |
---|
| 11397 | + .probe = ixgbe_probe, |
---|
| 11398 | + .remove = ixgbe_remove, |
---|
| 11399 | + .driver.pm = &ixgbe_pm_ops, |
---|
| 11400 | + .shutdown = ixgbe_shutdown, |
---|
11242 | 11401 | .sriov_configure = ixgbe_pci_sriov_configure, |
---|
11243 | 11402 | .err_handler = &ixgbe_err_handler |
---|
11244 | 11403 | }; |
---|
.. | .. |
---|
11252 | 11411 | static int __init ixgbe_init_module(void) |
---|
11253 | 11412 | { |
---|
11254 | 11413 | int ret; |
---|
11255 | | - pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version); |
---|
| 11414 | + pr_info("%s\n", ixgbe_driver_string); |
---|
11256 | 11415 | pr_info("%s\n", ixgbe_copyright); |
---|
11257 | 11416 | |
---|
11258 | 11417 | ixgbe_wq = create_singlethread_workqueue(ixgbe_driver_name); |
---|