hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
....@@ -27,6 +27,8 @@
2727 #include <linux/bpf.h>
2828 #include <linux/bpf_trace.h>
2929 #include <linux/atomic.h>
30
+#include <linux/numa.h>
31
+#include <generated/utsrelease.h>
3032 #include <scsi/fc/fc_fcoe.h>
3133 #include <net/udp_tunnel.h>
3234 #include <net/pkt_cls.h>
....@@ -34,13 +36,16 @@
3436 #include <net/tc_act/tc_mirred.h>
3537 #include <net/vxlan.h>
3638 #include <net/mpls.h>
39
+#include <net/xdp_sock_drv.h>
3740 #include <net/xfrm.h>
3841
3942 #include "ixgbe.h"
4043 #include "ixgbe_common.h"
4144 #include "ixgbe_dcb_82599.h"
45
+#include "ixgbe_phy.h"
4246 #include "ixgbe_sriov.h"
4347 #include "ixgbe_model.h"
48
+#include "ixgbe_txrx_common.h"
4449
4550 char ixgbe_driver_name[] = "ixgbe";
4651 static const char ixgbe_driver_string[] =
....@@ -52,8 +57,6 @@
5257 static char ixgbe_default_device_descr[] =
5358 "Intel(R) 10 Gigabit Network Connection";
5459 #endif
55
-#define DRV_VERSION "5.1.0-k"
56
-const char ixgbe_driver_version[] = DRV_VERSION;
5760 static const char ixgbe_copyright[] =
5861 "Copyright (c) 1999-2016 Intel Corporation.";
5962
....@@ -160,8 +163,7 @@
160163
161164 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
162165 MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
163
-MODULE_LICENSE("GPL");
164
-MODULE_VERSION(DRV_VERSION);
166
+MODULE_LICENSE("GPL v2");
165167
166168 static struct workqueue_struct *ixgbe_wq;
167169
....@@ -894,8 +896,8 @@
894896 }
895897 }
896898
897
-static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
898
- u64 qmask)
899
+void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
900
+ u64 qmask)
899901 {
900902 u32 mask;
901903
....@@ -1393,7 +1395,7 @@
13931395 IXGBE_DCA_CTRL_DCA_MODE_CB2);
13941396 break;
13951397 }
1396
- /* fall through - DCA is disabled. */
1398
+ fallthrough; /* DCA is disabled. */
13971399 case DCA_PROVIDER_REMOVE:
13981400 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
13991401 dca_remove_requester(dev);
....@@ -1674,9 +1676,9 @@
16741676 * order to populate the hash, checksum, VLAN, timestamp, protocol, and
16751677 * other fields within the skb.
16761678 **/
1677
-static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
1678
- union ixgbe_adv_rx_desc *rx_desc,
1679
- struct sk_buff *skb)
1679
+void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
1680
+ union ixgbe_adv_rx_desc *rx_desc,
1681
+ struct sk_buff *skb)
16801682 {
16811683 struct net_device *dev = rx_ring->netdev;
16821684 u32 flags = rx_ring->q_vector->adapter->flags;
....@@ -1709,8 +1711,8 @@
17091711 skb->protocol = eth_type_trans(skb, dev);
17101712 }
17111713
1712
-static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
1713
- struct sk_buff *skb)
1714
+void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
1715
+ struct sk_buff *skb)
17141716 {
17151717 napi_gro_receive(&q_vector->napi, skb);
17161718 }
....@@ -1782,7 +1784,7 @@
17821784 static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring,
17831785 struct sk_buff *skb)
17841786 {
1785
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
1787
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
17861788 unsigned char *va;
17871789 unsigned int pull_len;
17881790
....@@ -1797,14 +1799,14 @@
17971799 * we need the header to contain the greater of either ETH_HLEN or
17981800 * 60 bytes if the skb->len is less than 60 for skb_pad.
17991801 */
1800
- pull_len = eth_get_headlen(va, IXGBE_RX_HDR_SIZE);
1802
+ pull_len = eth_get_headlen(skb->dev, va, IXGBE_RX_HDR_SIZE);
18011803
18021804 /* align pull length to size of long to optimize memcpy performance */
18031805 skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
18041806
18051807 /* update all of the pointers */
18061808 skb_frag_size_sub(frag, pull_len);
1807
- frag->page_offset += pull_len;
1809
+ skb_frag_off_add(frag, pull_len);
18081810 skb->data_len -= pull_len;
18091811 skb->tail += pull_len;
18101812 }
....@@ -1832,11 +1834,11 @@
18321834 skb_headlen(skb),
18331835 DMA_FROM_DEVICE);
18341836 } else {
1835
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
1837
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
18361838
18371839 dma_sync_single_range_for_cpu(rx_ring->dev,
18381840 IXGBE_CB(skb)->dma,
1839
- frag->page_offset,
1841
+ skb_frag_off(frag),
18401842 skb_frag_size(frag),
18411843 DMA_FROM_DEVICE);
18421844 }
....@@ -1872,9 +1874,9 @@
18721874 *
18731875 * Returns true if an error was encountered and skb was freed.
18741876 **/
1875
-static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
1876
- union ixgbe_adv_rx_desc *rx_desc,
1877
- struct sk_buff *skb)
1877
+bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
1878
+ union ixgbe_adv_rx_desc *rx_desc,
1879
+ struct sk_buff *skb)
18781880 {
18791881 struct net_device *netdev = rx_ring->netdev;
18801882
....@@ -2103,10 +2105,8 @@
21032105 struct sk_buff *skb;
21042106
21052107 /* prefetch first cache line of first page */
2106
- prefetch(xdp->data);
2107
-#if L1_CACHE_BYTES < 128
2108
- prefetch(xdp->data + L1_CACHE_BYTES);
2109
-#endif
2108
+ net_prefetch(xdp->data);
2109
+
21102110 /* Note, we get here by enabling legacy-rx via:
21112111 *
21122112 * ethtool --set-priv-flags <dev> legacy-rx on
....@@ -2169,10 +2169,7 @@
21692169 * likely have a consumer accessing first few bytes of meta
21702170 * data, and then actual data.
21712171 */
2172
- prefetch(xdp->data_meta);
2173
-#if L1_CACHE_BYTES < 128
2174
- prefetch(xdp->data_meta + L1_CACHE_BYTES);
2175
-#endif
2172
+ net_prefetch(xdp->data_meta);
21762173
21772174 /* build an skb to around the page buffer */
21782175 skb = build_skb(xdp->data_hard_start, truesize);
....@@ -2199,14 +2196,6 @@
21992196 return skb;
22002197 }
22012198
2202
-#define IXGBE_XDP_PASS 0
2203
-#define IXGBE_XDP_CONSUMED BIT(0)
2204
-#define IXGBE_XDP_TX BIT(1)
2205
-#define IXGBE_XDP_REDIR BIT(2)
2206
-
2207
-static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
2208
- struct xdp_frame *xdpf);
2209
-
22102199 static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
22112200 struct ixgbe_ring *rx_ring,
22122201 struct xdp_buff *xdp)
....@@ -2229,26 +2218,26 @@
22292218 case XDP_PASS:
22302219 break;
22312220 case XDP_TX:
2232
- xdpf = convert_to_xdp_frame(xdp);
2233
- if (unlikely(!xdpf)) {
2234
- result = IXGBE_XDP_CONSUMED;
2235
- break;
2236
- }
2221
+ xdpf = xdp_convert_buff_to_frame(xdp);
2222
+ if (unlikely(!xdpf))
2223
+ goto out_failure;
22372224 result = ixgbe_xmit_xdp_ring(adapter, xdpf);
2225
+ if (result == IXGBE_XDP_CONSUMED)
2226
+ goto out_failure;
22382227 break;
22392228 case XDP_REDIRECT:
22402229 err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
2241
- if (!err)
2242
- result = IXGBE_XDP_REDIR;
2243
- else
2244
- result = IXGBE_XDP_CONSUMED;
2230
+ if (err)
2231
+ goto out_failure;
2232
+ result = IXGBE_XDP_REDIR;
22452233 break;
22462234 default:
22472235 bpf_warn_invalid_xdp_action(act);
2248
- /* fallthrough */
2236
+ fallthrough;
22492237 case XDP_ABORTED:
2238
+out_failure:
22502239 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
2251
- /* fallthrough -- handle aborts by dropping packet */
2240
+ fallthrough; /* handle aborts by dropping packet */
22522241 case XDP_DROP:
22532242 result = IXGBE_XDP_CONSUMED;
22542243 break;
....@@ -2258,20 +2247,30 @@
22582247 return ERR_PTR(-result);
22592248 }
22602249
2250
+static unsigned int ixgbe_rx_frame_truesize(struct ixgbe_ring *rx_ring,
2251
+ unsigned int size)
2252
+{
2253
+ unsigned int truesize;
2254
+
2255
+#if (PAGE_SIZE < 8192)
2256
+ truesize = ixgbe_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
2257
+#else
2258
+ truesize = ring_uses_build_skb(rx_ring) ?
2259
+ SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) +
2260
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
2261
+ SKB_DATA_ALIGN(size);
2262
+#endif
2263
+ return truesize;
2264
+}
2265
+
22612266 static void ixgbe_rx_buffer_flip(struct ixgbe_ring *rx_ring,
22622267 struct ixgbe_rx_buffer *rx_buffer,
22632268 unsigned int size)
22642269 {
2270
+ unsigned int truesize = ixgbe_rx_frame_truesize(rx_ring, size);
22652271 #if (PAGE_SIZE < 8192)
2266
- unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
2267
-
22682272 rx_buffer->page_offset ^= truesize;
22692273 #else
2270
- unsigned int truesize = ring_uses_build_skb(rx_ring) ?
2271
- SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) +
2272
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
2273
- SKB_DATA_ALIGN(size);
2274
-
22752274 rx_buffer->page_offset += truesize;
22762275 #endif
22772276 }
....@@ -2304,6 +2303,11 @@
23042303 struct xdp_buff xdp;
23052304
23062305 xdp.rxq = &rx_ring->xdp_rxq;
2306
+
2307
+ /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
2308
+#if (PAGE_SIZE < 8192)
2309
+ xdp.frame_sz = ixgbe_rx_frame_truesize(rx_ring, 0);
2310
+#endif
23072311
23082312 while (likely(total_rx_packets < budget)) {
23092313 union ixgbe_adv_rx_desc *rx_desc;
....@@ -2339,7 +2343,10 @@
23392343 xdp.data_hard_start = xdp.data -
23402344 ixgbe_rx_offset(rx_ring);
23412345 xdp.data_end = xdp.data + size;
2342
-
2346
+#if (PAGE_SIZE > 4096)
2347
+ /* At larger PAGE_SIZE, frame_sz depend on len size */
2348
+ xdp.frame_sz = ixgbe_rx_frame_truesize(rx_ring, size);
2349
+#endif
23432350 skb = ixgbe_run_xdp(adapter, rx_ring, &xdp);
23442351 }
23452352
....@@ -2970,35 +2977,6 @@
29702977 /* skip the flush */
29712978 }
29722979
2973
-static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
2974
- u64 qmask)
2975
-{
2976
- u32 mask;
2977
- struct ixgbe_hw *hw = &adapter->hw;
2978
-
2979
- switch (hw->mac.type) {
2980
- case ixgbe_mac_82598EB:
2981
- mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
2982
- IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
2983
- break;
2984
- case ixgbe_mac_82599EB:
2985
- case ixgbe_mac_X540:
2986
- case ixgbe_mac_X550:
2987
- case ixgbe_mac_X550EM_x:
2988
- case ixgbe_mac_x550em_a:
2989
- mask = (qmask & 0xFFFFFFFF);
2990
- if (mask)
2991
- IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
2992
- mask = (qmask >> 32);
2993
- if (mask)
2994
- IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
2995
- break;
2996
- default:
2997
- break;
2998
- }
2999
- /* skip the flush */
3000
-}
3001
-
30022980 /**
30032981 * ixgbe_irq_enable - Enable default interrupt generation settings
30042982 * @adapter: board private structure
....@@ -3035,7 +3013,7 @@
30353013 case ixgbe_mac_82599EB:
30363014 mask |= IXGBE_EIMS_GPI_SDP1(hw);
30373015 mask |= IXGBE_EIMS_GPI_SDP2(hw);
3038
- /* fall through */
3016
+ fallthrough;
30393017 case ixgbe_mac_X540:
30403018 case ixgbe_mac_X550:
30413019 case ixgbe_mac_X550EM_x:
....@@ -3184,7 +3162,11 @@
31843162 #endif
31853163
31863164 ixgbe_for_each_ring(ring, q_vector->tx) {
3187
- if (!ixgbe_clean_tx_irq(q_vector, ring, budget))
3165
+ bool wd = ring->xsk_pool ?
3166
+ ixgbe_clean_xdp_tx_irq(q_vector, ring, budget) :
3167
+ ixgbe_clean_tx_irq(q_vector, ring, budget);
3168
+
3169
+ if (!wd)
31883170 clean_complete = false;
31893171 }
31903172
....@@ -3200,7 +3182,10 @@
32003182 per_ring_budget = budget;
32013183
32023184 ixgbe_for_each_ring(ring, q_vector->rx) {
3203
- int cleaned = ixgbe_clean_rx_irq(q_vector, ring,
3185
+ int cleaned = ring->xsk_pool ?
3186
+ ixgbe_clean_rx_irq_zc(q_vector, ring,
3187
+ per_ring_budget) :
3188
+ ixgbe_clean_rx_irq(q_vector, ring,
32043189 per_ring_budget);
32053190
32063191 work_done += cleaned;
....@@ -3334,7 +3319,7 @@
33343319 switch (hw->mac.type) {
33353320 case ixgbe_mac_82599EB:
33363321 ixgbe_check_sfp_event(adapter, eicr);
3337
- /* Fall through */
3322
+ fallthrough;
33383323 case ixgbe_mac_X540:
33393324 case ixgbe_mac_X550:
33403325 case ixgbe_mac_X550EM_x:
....@@ -3491,6 +3476,10 @@
34913476 int wait_loop = 10;
34923477 u32 txdctl = IXGBE_TXDCTL_ENABLE;
34933478 u8 reg_idx = ring->reg_idx;
3479
+
3480
+ ring->xsk_pool = NULL;
3481
+ if (ring_is_xdp(ring))
3482
+ ring->xsk_pool = ixgbe_xsk_pool(adapter, ring);
34943483
34953484 /* disable queue to avoid issues while updating state */
34963485 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), 0);
....@@ -3730,10 +3719,26 @@
37303719 srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
37313720
37323721 /* configure the packet buffer length */
3733
- if (test_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state))
3722
+ if (rx_ring->xsk_pool) {
3723
+ u32 xsk_buf_len = xsk_pool_get_rx_frame_size(rx_ring->xsk_pool);
3724
+
3725
+ /* If the MAC support setting RXDCTL.RLPML, the
3726
+ * SRRCTL[n].BSIZEPKT is set to PAGE_SIZE and
3727
+ * RXDCTL.RLPML is set to the actual UMEM buffer
3728
+ * size. If not, then we are stuck with a 1k buffer
3729
+ * size resolution. In this case frames larger than
3730
+ * the UMEM buffer size viewed in a 1k resolution will
3731
+ * be dropped.
3732
+ */
3733
+ if (hw->mac.type != ixgbe_mac_82599EB)
3734
+ srrctl |= PAGE_SIZE >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3735
+ else
3736
+ srrctl |= xsk_buf_len >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3737
+ } else if (test_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state)) {
37343738 srrctl |= IXGBE_RXBUFFER_3K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3735
- else
3739
+ } else {
37363740 srrctl |= IXGBE_RXBUFFER_2K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3741
+ }
37373742
37383743 /* configure descriptor type */
37393744 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
....@@ -4059,6 +4064,18 @@
40594064 u32 rxdctl;
40604065 u8 reg_idx = ring->reg_idx;
40614066
4067
+ xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
4068
+ ring->xsk_pool = ixgbe_xsk_pool(adapter, ring);
4069
+ if (ring->xsk_pool) {
4070
+ WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
4071
+ MEM_TYPE_XSK_BUFF_POOL,
4072
+ NULL));
4073
+ xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
4074
+ } else {
4075
+ WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
4076
+ MEM_TYPE_PAGE_SHARED, NULL));
4077
+ }
4078
+
40624079 /* disable queue to avoid use of these values while updating state */
40634080 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
40644081 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
....@@ -4108,6 +4125,16 @@
41084125 #endif
41094126 }
41104127
4128
+ if (ring->xsk_pool && hw->mac.type != ixgbe_mac_82599EB) {
4129
+ u32 xsk_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool);
4130
+
4131
+ rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
4132
+ IXGBE_RXDCTL_RLPML_EN);
4133
+ rxdctl |= xsk_buf_len | IXGBE_RXDCTL_RLPML_EN;
4134
+
4135
+ ring->rx_buf_len = xsk_buf_len;
4136
+ }
4137
+
41114138 /* initialize rx_buffer_info */
41124139 memset(ring->rx_buffer_info, 0,
41134140 sizeof(struct ixgbe_rx_buffer) * ring->count);
....@@ -4121,7 +4148,10 @@
41214148 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
41224149
41234150 ixgbe_rx_desc_queue_enable(adapter, ring);
4124
- ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring));
4151
+ if (ring->xsk_pool)
4152
+ ixgbe_alloc_rx_buffers_zc(ring, ixgbe_desc_unused(ring));
4153
+ else
4154
+ ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring));
41254155 }
41264156
41274157 static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
....@@ -4271,7 +4301,6 @@
42714301 if (test_bit(__IXGBE_RX_FCOE, &rx_ring->state))
42724302 set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
42734303
4274
- clear_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state);
42754304 if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
42764305 continue;
42774306
....@@ -4312,7 +4341,7 @@
43124341 case ixgbe_mac_x550em_a:
43134342 if (adapter->num_vfs)
43144343 rdrxctl |= IXGBE_RDRXCTL_PSP;
4315
- /* fall through */
4344
+ fallthrough;
43164345 case ixgbe_mac_82599EB:
43174346 case ixgbe_mac_X540:
43184347 /* Disable RSC for ACK packets */
....@@ -4971,24 +5000,41 @@
49715000 napi_disable(&adapter->q_vector[q_idx]->napi);
49725001 }
49735002
4974
-static void ixgbe_clear_udp_tunnel_port(struct ixgbe_adapter *adapter, u32 mask)
5003
+static int ixgbe_udp_tunnel_sync(struct net_device *dev, unsigned int table)
49755004 {
5005
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
49765006 struct ixgbe_hw *hw = &adapter->hw;
4977
- u32 vxlanctrl;
5007
+ struct udp_tunnel_info ti;
49785008
4979
- if (!(adapter->flags & (IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE |
4980
- IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)))
4981
- return;
5009
+ udp_tunnel_nic_get_port(dev, table, 0, &ti);
5010
+ if (ti.type == UDP_TUNNEL_TYPE_VXLAN)
5011
+ adapter->vxlan_port = ti.port;
5012
+ else
5013
+ adapter->geneve_port = ti.port;
49825014
4983
- vxlanctrl = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) & ~mask;
4984
- IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, vxlanctrl);
4985
-
4986
- if (mask & IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK)
4987
- adapter->vxlan_port = 0;
4988
-
4989
- if (mask & IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK)
4990
- adapter->geneve_port = 0;
5015
+ IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL,
5016
+ ntohs(adapter->vxlan_port) |
5017
+ ntohs(adapter->geneve_port) <<
5018
+ IXGBE_VXLANCTRL_GENEVE_UDPPORT_SHIFT);
5019
+ return 0;
49915020 }
5021
+
5022
+static const struct udp_tunnel_nic_info ixgbe_udp_tunnels_x550 = {
5023
+ .sync_table = ixgbe_udp_tunnel_sync,
5024
+ .flags = UDP_TUNNEL_NIC_INFO_IPV4_ONLY,
5025
+ .tables = {
5026
+ { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
5027
+ },
5028
+};
5029
+
5030
+static const struct udp_tunnel_nic_info ixgbe_udp_tunnels_x550em_a = {
5031
+ .sync_table = ixgbe_udp_tunnel_sync,
5032
+ .flags = UDP_TUNNEL_NIC_INFO_IPV4_ONLY,
5033
+ .tables = {
5034
+ { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
5035
+ { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
5036
+ },
5037
+};
49925038
49935039 #ifdef CONFIG_IXGBE_DCB
49945040 /**
....@@ -5252,6 +5298,11 @@
52525298 u16 i = rx_ring->next_to_clean;
52535299 struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i];
52545300
5301
+ if (rx_ring->xsk_pool) {
5302
+ ixgbe_xsk_clean_rx_ring(rx_ring);
5303
+ goto skip_free;
5304
+ }
5305
+
52555306 /* Free all the Rx ring sk_buffs */
52565307 while (i != rx_ring->next_to_alloc) {
52575308 if (rx_buffer->skb) {
....@@ -5290,6 +5341,7 @@
52905341 }
52915342 }
52925343
5344
+skip_free:
52935345 rx_ring->next_to_alloc = 0;
52945346 rx_ring->next_to_clean = 0;
52955347 rx_ring->next_to_use = 0;
....@@ -5350,9 +5402,10 @@
53505402 return err;
53515403 }
53525404
5353
-static int ixgbe_macvlan_up(struct net_device *vdev, void *data)
5405
+static int ixgbe_macvlan_up(struct net_device *vdev,
5406
+ struct netdev_nested_priv *priv)
53545407 {
5355
- struct ixgbe_adapter *adapter = data;
5408
+ struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)priv->data;
53565409 struct ixgbe_fwd_adapter *accel;
53575410
53585411 if (!netif_is_macvlan(vdev))
....@@ -5369,8 +5422,12 @@
53695422
53705423 static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter)
53715424 {
5425
+ struct netdev_nested_priv priv = {
5426
+ .data = (void *)adapter,
5427
+ };
5428
+
53725429 netdev_walk_all_upper_dev_rcu(adapter->netdev,
5373
- ixgbe_macvlan_up, adapter);
5430
+ ixgbe_macvlan_up, &priv);
53745431 }
53755432
53765433 static void ixgbe_configure(struct ixgbe_adapter *adapter)
....@@ -5472,9 +5529,17 @@
54725529 return ret;
54735530
54745531 speed = hw->phy.autoneg_advertised;
5475
- if ((!speed) && (hw->mac.ops.get_link_capabilities))
5532
+ if (!speed && hw->mac.ops.get_link_capabilities) {
54765533 ret = hw->mac.ops.get_link_capabilities(hw, &speed,
54775534 &autoneg);
5535
+ /* remove NBASE-T speeds from default autonegotiation
5536
+ * to accommodate broken network switches in the field
5537
+ * which cannot cope with advertised NBASE-T speeds
5538
+ */
5539
+ speed &= ~(IXGBE_LINK_SPEED_5GB_FULL |
5540
+ IXGBE_LINK_SPEED_2_5GB_FULL);
5541
+ }
5542
+
54785543 if (ret)
54795544 return ret;
54805545
....@@ -5627,7 +5692,6 @@
56275692
56285693 void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
56295694 {
5630
- WARN_ON(in_interrupt());
56315695 /* put off any impending NetWatchDogTimeout */
56325696 netif_trans_update(adapter->netdev);
56335697
....@@ -5856,7 +5920,7 @@
58565920 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
58575921 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
58585922 ~IXGBE_DMATXCTL_TE));
5859
- /* fall through */
5923
+ fallthrough;
58605924 default:
58615925 break;
58625926 }
....@@ -5934,6 +5998,11 @@
59345998 u16 i = tx_ring->next_to_clean;
59355999 struct ixgbe_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
59366000
6001
+ if (tx_ring->xsk_pool) {
6002
+ ixgbe_xsk_clean_tx_ring(tx_ring);
6003
+ goto out;
6004
+ }
6005
+
59376006 while (i != tx_ring->next_to_use) {
59386007 union ixgbe_adv_tx_desc *eop_desc, *tx_desc;
59396008
....@@ -5985,6 +6054,7 @@
59856054 if (!ring_is_xdp(tx_ring))
59866055 netdev_tx_reset_queue(txring_txq(tx_ring));
59876056
6057
+out:
59886058 /* reset next_to_use and next_to_clean */
59896059 tx_ring->next_to_use = 0;
59906060 tx_ring->next_to_clean = 0;
....@@ -6053,9 +6123,9 @@
60536123 /* Disable Rx */
60546124 ixgbe_disable_rx(adapter);
60556125
6056
- /* synchronize_sched() needed for pending XDP buffers to drain */
6126
+ /* synchronize_rcu() needed for pending XDP buffers to drain */
60576127 if (adapter->xdp_ring[0])
6058
- synchronize_sched();
6128
+ synchronize_rcu();
60596129
60606130 ixgbe_irq_disable(adapter);
60616131
....@@ -6124,8 +6194,9 @@
61246194 /**
61256195 * ixgbe_tx_timeout - Respond to a Tx Hang
61266196 * @netdev: network interface device structure
6197
+ * @txqueue: queue number that timed out
61276198 **/
6128
-static void ixgbe_tx_timeout(struct net_device *netdev)
6199
+static void ixgbe_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
61296200 {
61306201 struct ixgbe_adapter *adapter = netdev_priv(netdev);
61316202
....@@ -6259,6 +6330,10 @@
62596330 if (ixgbe_init_rss_key(adapter))
62606331 return -ENOMEM;
62616332
6333
+ adapter->af_xdp_zc_qps = bitmap_zalloc(MAX_XDP_QUEUES, GFP_KERNEL);
6334
+ if (!adapter->af_xdp_zc_qps)
6335
+ return -ENOMEM;
6336
+
62626337 /* Set MAC specific capability flags and exceptions */
62636338 switch (hw->mac.type) {
62646339 case ixgbe_mac_82598EB:
....@@ -6289,7 +6364,6 @@
62896364 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
62906365 break;
62916366 case ixgbe_mac_x550em_a:
6292
- adapter->flags |= IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE;
62936367 switch (hw->device_id) {
62946368 case IXGBE_DEV_ID_X550EM_A_1G_T:
62956369 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
....@@ -6298,7 +6372,7 @@
62986372 default:
62996373 break;
63006374 }
6301
- /* fall through */
6375
+ fallthrough;
63026376 case ixgbe_mac_X550EM_x:
63036377 #ifdef CONFIG_IXGBE_DCB
63046378 adapter->flags &= ~IXGBE_FLAG_DCB_CAPABLE;
....@@ -6309,14 +6383,13 @@
63096383 adapter->fcoe.up = 0;
63106384 #endif /* IXGBE_DCB */
63116385 #endif /* IXGBE_FCOE */
6312
- /* Fall Through */
6386
+ fallthrough;
63136387 case ixgbe_mac_X550:
63146388 if (hw->mac.type == ixgbe_mac_X550)
63156389 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
63166390 #ifdef CONFIG_IXGBE_DCA
63176391 adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE;
63186392 #endif
6319
- adapter->flags |= IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE;
63206393 break;
63216394 default:
63226395 break;
....@@ -6329,6 +6402,9 @@
63296402 #endif
63306403 /* n-tuple support exists, always init our spinlock */
63316404 spin_lock_init(&adapter->fdir_perfect_lock);
6405
+
6406
+ /* init spinlock to avoid concurrency of VF resources */
6407
+ spin_lock_init(&adapter->vfs_lock);
63326408
63336409 #ifdef CONFIG_IXGBE_DCB
63346410 ixgbe_init_dcb(adapter);
....@@ -6390,7 +6466,7 @@
63906466 {
63916467 struct device *dev = tx_ring->dev;
63926468 int orig_node = dev_to_node(dev);
6393
- int ring_node = -1;
6469
+ int ring_node = NUMA_NO_NODE;
63946470 int size;
63956471
63966472 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
....@@ -6484,8 +6560,8 @@
64846560 {
64856561 struct device *dev = rx_ring->dev;
64866562 int orig_node = dev_to_node(dev);
6487
- int ring_node = -1;
6488
- int size, err;
6563
+ int ring_node = NUMA_NO_NODE;
6564
+ int size;
64896565
64906566 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
64916567
....@@ -6521,13 +6597,6 @@
65216597 if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev,
65226598 rx_ring->queue_index) < 0)
65236599 goto err;
6524
-
6525
- err = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq,
6526
- MEM_TYPE_PAGE_SHARED, NULL);
6527
- if (err) {
6528
- xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
6529
- goto err;
6530
- }
65316600
65326601 rx_ring->xdp_prog = adapter->xdp_prog;
65336602
....@@ -6695,7 +6764,8 @@
66956764 (new_mtu > ETH_DATA_LEN))
66966765 e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n");
66976766
6698
- e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
6767
+ netdev_dbg(netdev, "changing MTU from %d to %d\n",
6768
+ netdev->mtu, new_mtu);
66996769
67006770 /* must set new MTU before calling down or up */
67016771 netdev->mtu = new_mtu;
....@@ -6761,8 +6831,7 @@
67616831
67626832 ixgbe_up_complete(adapter);
67636833
6764
- ixgbe_clear_udp_tunnel_port(adapter, IXGBE_VXLANCTRL_ALL_UDPPORT_MASK);
6765
- udp_tunnel_get_rx_info(netdev);
6834
+ udp_tunnel_nic_reset_ntf(netdev);
67666835
67676836 return 0;
67686837
....@@ -6826,21 +6895,14 @@
68266895 return 0;
68276896 }
68286897
6829
-#ifdef CONFIG_PM
6830
-static int ixgbe_resume(struct pci_dev *pdev)
6898
+static int __maybe_unused ixgbe_resume(struct device *dev_d)
68316899 {
6900
+ struct pci_dev *pdev = to_pci_dev(dev_d);
68326901 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
68336902 struct net_device *netdev = adapter->netdev;
68346903 u32 err;
68356904
68366905 adapter->hw.hw_addr = adapter->io_addr;
6837
- pci_set_power_state(pdev, PCI_D0);
6838
- pci_restore_state(pdev);
6839
- /*
6840
- * pci_restore_state clears dev->state_saved so call
6841
- * pci_save_state to restore it.
6842
- */
6843
- pci_save_state(pdev);
68446906
68456907 err = pci_enable_device_mem(pdev);
68466908 if (err) {
....@@ -6851,7 +6913,7 @@
68516913 clear_bit(__IXGBE_DISABLED, &adapter->state);
68526914 pci_set_master(pdev);
68536915
6854
- pci_wake_from_d3(pdev, false);
6916
+ device_wakeup_disable(dev_d);
68556917
68566918 ixgbe_reset(adapter);
68576919
....@@ -6869,7 +6931,6 @@
68696931
68706932 return err;
68716933 }
6872
-#endif /* CONFIG_PM */
68736934
68746935 static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
68756936 {
....@@ -6878,9 +6939,6 @@
68786939 struct ixgbe_hw *hw = &adapter->hw;
68796940 u32 ctrl;
68806941 u32 wufc = adapter->wol;
6881
-#ifdef CONFIG_PM
6882
- int retval = 0;
6883
-#endif
68846942
68856943 rtnl_lock();
68866944 netif_device_detach(netdev);
....@@ -6891,12 +6949,6 @@
68916949 ixgbe_clear_interrupt_scheme(adapter);
68926950 rtnl_unlock();
68936951
6894
-#ifdef CONFIG_PM
6895
- retval = pci_save_state(pdev);
6896
- if (retval)
6897
- return retval;
6898
-
6899
-#endif
69006952 if (hw->mac.ops.stop_link_on_d3)
69016953 hw->mac.ops.stop_link_on_d3(hw);
69026954
....@@ -6951,26 +7003,18 @@
69517003 return 0;
69527004 }
69537005
6954
-#ifdef CONFIG_PM
6955
-static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
7006
+static int __maybe_unused ixgbe_suspend(struct device *dev_d)
69567007 {
7008
+ struct pci_dev *pdev = to_pci_dev(dev_d);
69577009 int retval;
69587010 bool wake;
69597011
69607012 retval = __ixgbe_shutdown(pdev, &wake);
6961
- if (retval)
6962
- return retval;
69637013
6964
- if (wake) {
6965
- pci_prepare_to_sleep(pdev);
6966
- } else {
6967
- pci_wake_from_d3(pdev, false);
6968
- pci_set_power_state(pdev, PCI_D3hot);
6969
- }
7014
+ device_set_wakeup_enable(dev_d, wake);
69707015
6971
- return 0;
7016
+ return retval;
69727017 }
6973
-#endif /* CONFIG_PM */
69747018
69757019 static void ixgbe_shutdown(struct pci_dev *pdev)
69767020 {
....@@ -7135,7 +7179,7 @@
71357179 hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC);
71367180 hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC);
71377181 hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC);
7138
- /* fall through */
7182
+ fallthrough;
71397183 case ixgbe_mac_82599EB:
71407184 for (i = 0; i < 16; i++)
71417185 adapter->hw_rx_no_dma_resources +=
....@@ -7834,6 +7878,33 @@
78347878 }
78357879
78367880 /**
7881
+ * ixgbe_check_fw_error - Check firmware for errors
7882
+ * @adapter: the adapter private structure
7883
+ *
7884
+ * Check firmware errors in register FWSM
7885
+ */
7886
+static bool ixgbe_check_fw_error(struct ixgbe_adapter *adapter)
7887
+{
7888
+ struct ixgbe_hw *hw = &adapter->hw;
7889
+ u32 fwsm;
7890
+
7891
+ /* read fwsm.ext_err_ind register and log errors */
7892
+ fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
7893
+
7894
+ if (fwsm & IXGBE_FWSM_EXT_ERR_IND_MASK ||
7895
+ !(fwsm & IXGBE_FWSM_FW_VAL_BIT))
7896
+ e_dev_warn("Warning firmware error detected FWSM: 0x%08X\n",
7897
+ fwsm);
7898
+
7899
+ if (hw->mac.ops.fw_recovery_mode && hw->mac.ops.fw_recovery_mode(hw)) {
7900
+ e_dev_err("Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
7901
+ return true;
7902
+ }
7903
+
7904
+ return false;
7905
+}
7906
+
7907
+/**
78377908 * ixgbe_service_task - manages and runs subtasks
78387909 * @work: pointer to work_struct containing our data
78397910 **/
....@@ -7851,11 +7922,11 @@
78517922 ixgbe_service_event_complete(adapter);
78527923 return;
78537924 }
7854
- if (adapter->flags2 & IXGBE_FLAG2_UDP_TUN_REREG_NEEDED) {
7855
- rtnl_lock();
7856
- adapter->flags2 &= ~IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
7857
- udp_tunnel_get_rx_info(adapter->netdev);
7858
- rtnl_unlock();
7925
+ if (ixgbe_check_fw_error(adapter)) {
7926
+ if (!test_bit(__IXGBE_DOWN, &adapter->state))
7927
+ unregister_netdev(adapter->netdev);
7928
+ ixgbe_service_event_complete(adapter);
7929
+ return;
78597930 }
78607931 ixgbe_reset_subtask(adapter);
78617932 ixgbe_phy_interrupt_subtask(adapter);
....@@ -7890,6 +7961,7 @@
78907961 } ip;
78917962 union {
78927963 struct tcphdr *tcp;
7964
+ struct udphdr *udp;
78937965 unsigned char *hdr;
78947966 } l4;
78957967 u32 paylen, l4_offset;
....@@ -7913,7 +7985,8 @@
79137985 l4.hdr = skb_checksum_start(skb);
79147986
79157987 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
7916
- type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
7988
+ type_tucmd = (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ?
7989
+ IXGBE_ADVTXD_TUCMD_L4T_UDP : IXGBE_ADVTXD_TUCMD_L4T_TCP;
79177990
79187991 /* initialize outer IP header fields */
79197992 if (ip.v4->version == 4) {
....@@ -7943,12 +8016,20 @@
79438016 /* determine offset of inner transport header */
79448017 l4_offset = l4.hdr - skb->data;
79458018
7946
- /* compute length of segmentation header */
7947
- *hdr_len = (l4.tcp->doff * 4) + l4_offset;
7948
-
79498019 /* remove payload length from inner checksum */
79508020 paylen = skb->len - l4_offset;
7951
- csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
8021
+
8022
+ if (type_tucmd & IXGBE_ADVTXD_TUCMD_L4T_TCP) {
8023
+ /* compute length of segmentation header */
8024
+ *hdr_len = (l4.tcp->doff * 4) + l4_offset;
8025
+ csum_replace_by_diff(&l4.tcp->check,
8026
+ (__force __wsum)htonl(paylen));
8027
+ } else {
8028
+ /* compute length of segmentation header */
8029
+ *hdr_len = sizeof(*l4.udp) + l4_offset;
8030
+ csum_replace_by_diff(&l4.udp->check,
8031
+ (__force __wsum)htonl(paylen));
8032
+ }
79528033
79538034 /* update gso size and bytecount with header size */
79548035 first->gso_segs = skb_shinfo(skb)->gso_segs;
....@@ -8001,7 +8082,7 @@
80018082 switch (skb->csum_offset) {
80028083 case offsetof(struct tcphdr, check):
80038084 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
8004
- /* fall through */
8085
+ fallthrough;
80058086 case offsetof(struct udphdr, check):
80068087 break;
80078088 case offsetof(struct sctphdr, checksum):
....@@ -8013,7 +8094,7 @@
80138094 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP;
80148095 break;
80158096 }
8016
- /* fall through */
8097
+ fallthrough;
80178098 default:
80188099 skb_checksum_help(skb);
80198100 goto csum_failed;
....@@ -8125,9 +8206,6 @@
81258206 return __ixgbe_maybe_stop_tx(tx_ring, size);
81268207 }
81278208
8128
-#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \
8129
- IXGBE_TXD_CMD_RS)
8130
-
81318209 static int ixgbe_tx_map(struct ixgbe_ring *tx_ring,
81328210 struct ixgbe_tx_buffer *first,
81338211 const u8 hdr_len)
....@@ -8135,7 +8213,7 @@
81358213 struct sk_buff *skb = first->skb;
81368214 struct ixgbe_tx_buffer *tx_buffer;
81378215 union ixgbe_adv_tx_desc *tx_desc;
8138
- struct skb_frag_struct *frag;
8216
+ skb_frag_t *frag;
81398217 dma_addr_t dma;
81408218 unsigned int data_len, size;
81418219 u32 tx_flags = first->tx_flags;
....@@ -8227,6 +8305,8 @@
82278305 /* set the timestamp */
82288306 first->time_stamp = jiffies;
82298307
8308
+ skb_tx_timestamp(skb);
8309
+
82308310 /*
82318311 * Force memory writes to complete before letting h/w know there
82328312 * are new descriptors to fetch. (Only applicable for weak-ordered
....@@ -8248,13 +8328,8 @@
82488328
82498329 ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
82508330
8251
- if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
8331
+ if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
82528332 writel(i, tx_ring->tail);
8253
-
8254
- /* we need this if more than one processor can write to our tail
8255
- * at a time, it synchronizes IO on IA64/Altix systems
8256
- */
8257
- mmiowb();
82588333 }
82598334
82608335 return 0;
....@@ -8434,8 +8509,7 @@
84348509
84358510 #ifdef IXGBE_FCOE
84368511 static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
8437
- struct net_device *sb_dev,
8438
- select_queue_fallback_t fallback)
8512
+ struct net_device *sb_dev)
84398513 {
84408514 struct ixgbe_adapter *adapter;
84418515 struct ixgbe_ring_feature *f;
....@@ -8463,9 +8537,9 @@
84638537
84648538 if (!sb_dev && (adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
84658539 break;
8466
- /* fall through */
8540
+ fallthrough;
84678541 default:
8468
- return fallback(dev, skb, sb_dev);
8542
+ return netdev_pick_tx(dev, skb, sb_dev);
84698543 }
84708544
84718545 f = &adapter->ring_feature[RING_F_FCOE];
....@@ -8480,8 +8554,8 @@
84808554 }
84818555
84828556 #endif
8483
-static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
8484
- struct xdp_frame *xdpf)
8557
+int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
8558
+ struct xdp_frame *xdpf)
84858559 {
84868560 struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
84878561 struct ixgbe_tx_buffer *tx_buffer;
....@@ -8558,7 +8632,8 @@
85588632 * otherwise try next time
85598633 */
85608634 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
8561
- count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
8635
+ count += TXD_USE_COUNT(skb_frag_size(
8636
+ &skb_shinfo(skb)->frags[f]));
85628637
85638638 if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) {
85648639 tx_ring->tx_stats.tx_busy++;
....@@ -8604,8 +8679,6 @@
86048679 adapter->tx_hwtstamp_skipped++;
86058680 }
86068681 }
8607
-
8608
- skb_tx_timestamp(skb);
86098682
86108683 #ifdef CONFIG_PCI_IOV
86118684 /*
....@@ -8653,7 +8726,7 @@
86538726
86548727 #endif /* IXGBE_FCOE */
86558728
8656
-#ifdef CONFIG_XFRM_OFFLOAD
8729
+#ifdef CONFIG_IXGBE_IPSEC
86578730 if (xfrm_offload(skb) &&
86588731 !ixgbe_ipsec_tx(tx_ring, first, &ipsec_tx))
86598732 goto out_drop;
....@@ -8704,7 +8777,9 @@
87048777 if (skb_put_padto(skb, 17))
87058778 return NETDEV_TX_OK;
87068779
8707
- tx_ring = ring ? ring : adapter->tx_ring[skb->queue_mapping];
8780
+ tx_ring = ring ? ring : adapter->tx_ring[skb_get_queue_mapping(skb)];
8781
+ if (unlikely(test_bit(__IXGBE_TX_DISABLED, &tx_ring->state)))
8782
+ return NETDEV_TX_BUSY;
87088783
87098784 return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
87108785 }
....@@ -8747,6 +8822,15 @@
87478822 u16 value;
87488823 int rc;
87498824
8825
+ if (adapter->mii_bus) {
8826
+ int regnum = addr;
8827
+
8828
+ if (devad != MDIO_DEVAD_NONE)
8829
+ regnum |= (devad << 16) | MII_ADDR_C45;
8830
+
8831
+ return mdiobus_read(adapter->mii_bus, prtad, regnum);
8832
+ }
8833
+
87508834 if (prtad != hw->phy.mdio.prtad)
87518835 return -EINVAL;
87528836 rc = hw->phy.ops.read_reg(hw, addr, devad, &value);
....@@ -8760,6 +8844,15 @@
87608844 {
87618845 struct ixgbe_adapter *adapter = netdev_priv(netdev);
87628846 struct ixgbe_hw *hw = &adapter->hw;
8847
+
8848
+ if (adapter->mii_bus) {
8849
+ int regnum = addr;
8850
+
8851
+ if (devad != MDIO_DEVAD_NONE)
8852
+ regnum |= (devad << 16) | MII_ADDR_C45;
8853
+
8854
+ return mdiobus_write(adapter->mii_bus, prtad, regnum, value);
8855
+ }
87638856
87648857 if (prtad != hw->phy.mdio.prtad)
87658858 return -EINVAL;
....@@ -8778,7 +8871,7 @@
87788871 case SIOCGMIIPHY:
87798872 if (!adapter->hw.phy.ops.read_reg)
87808873 return -EOPNOTSUPP;
8781
- /* fall through */
8874
+ fallthrough;
87828875 default:
87838876 return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
87848877 }
....@@ -8953,9 +9046,10 @@
89539046 }
89549047
89559048 #endif /* CONFIG_IXGBE_DCB */
8956
-static int ixgbe_reassign_macvlan_pool(struct net_device *vdev, void *data)
9049
+static int ixgbe_reassign_macvlan_pool(struct net_device *vdev,
9050
+ struct netdev_nested_priv *priv)
89579051 {
8958
- struct ixgbe_adapter *adapter = data;
9052
+ struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)priv->data;
89599053 struct ixgbe_fwd_adapter *accel;
89609054 int pool;
89619055
....@@ -8992,13 +9086,16 @@
89929086 static void ixgbe_defrag_macvlan_pools(struct net_device *dev)
89939087 {
89949088 struct ixgbe_adapter *adapter = netdev_priv(dev);
9089
+ struct netdev_nested_priv priv = {
9090
+ .data = (void *)adapter,
9091
+ };
89959092
89969093 /* flush any stale bits out of the fwd bitmask */
89979094 bitmap_clear(adapter->fwd_bitmask, 1, 63);
89989095
89999096 /* walk through upper devices reassigning pools */
90009097 netdev_walk_all_upper_dev_rcu(dev, ixgbe_reassign_macvlan_pool,
9001
- adapter);
9098
+ &priv);
90029099 }
90039100
90049101 /**
....@@ -9172,14 +9269,18 @@
91729269 u8 queue;
91739270 };
91749271
9175
-static int get_macvlan_queue(struct net_device *upper, void *_data)
9272
+static int get_macvlan_queue(struct net_device *upper,
9273
+ struct netdev_nested_priv *priv)
91769274 {
91779275 if (netif_is_macvlan(upper)) {
91789276 struct ixgbe_fwd_adapter *vadapter = macvlan_accel_priv(upper);
9179
- struct upper_walk_data *data = _data;
9180
- struct ixgbe_adapter *adapter = data->adapter;
9181
- int ifindex = data->ifindex;
9277
+ struct ixgbe_adapter *adapter;
9278
+ struct upper_walk_data *data;
9279
+ int ifindex;
91829280
9281
+ data = (struct upper_walk_data *)priv->data;
9282
+ ifindex = data->ifindex;
9283
+ adapter = data->adapter;
91839284 if (vadapter && upper->ifindex == ifindex) {
91849285 data->queue = adapter->rx_ring[vadapter->rx_base_queue]->reg_idx;
91859286 data->action = data->queue;
....@@ -9195,6 +9296,7 @@
91959296 {
91969297 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
91979298 unsigned int num_vfs = adapter->num_vfs, vf;
9299
+ struct netdev_nested_priv priv;
91989300 struct upper_walk_data data;
91999301 struct net_device *upper;
92009302
....@@ -9214,8 +9316,9 @@
92149316 data.ifindex = ifindex;
92159317 data.action = 0;
92169318 data.queue = 0;
9319
+ priv.data = (void *)&data;
92179320 if (netdev_walk_all_upper_dev_rcu(adapter->netdev,
9218
- get_macvlan_queue, &data)) {
9321
+ get_macvlan_queue, &priv)) {
92199322 *action = data.action;
92209323 *queue = data.queue;
92219324
....@@ -9426,6 +9529,10 @@
94269529 jump->mat = nexthdr[i].jump;
94279530 adapter->jump_tables[link_uhtid] = jump;
94289531 break;
9532
+ } else {
9533
+ kfree(mask);
9534
+ kfree(input);
9535
+ kfree(jump);
94299536 }
94309537 }
94319538 return 0;
....@@ -9545,27 +9652,6 @@
95459652 }
95469653 }
95479654
9548
-static int ixgbe_setup_tc_block(struct net_device *dev,
9549
- struct tc_block_offload *f)
9550
-{
9551
- struct ixgbe_adapter *adapter = netdev_priv(dev);
9552
-
9553
- if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
9554
- return -EOPNOTSUPP;
9555
-
9556
- switch (f->command) {
9557
- case TC_BLOCK_BIND:
9558
- return tcf_block_cb_register(f->block, ixgbe_setup_tc_block_cb,
9559
- adapter, adapter, f->extack);
9560
- case TC_BLOCK_UNBIND:
9561
- tcf_block_cb_unregister(f->block, ixgbe_setup_tc_block_cb,
9562
- adapter);
9563
- return 0;
9564
- default:
9565
- return -EOPNOTSUPP;
9566
- }
9567
-}
9568
-
95699655 static int ixgbe_setup_tc_mqprio(struct net_device *dev,
95709656 struct tc_mqprio_qopt *mqprio)
95719657 {
....@@ -9573,12 +9659,19 @@
95739659 return ixgbe_setup_tc(dev, mqprio->num_tc);
95749660 }
95759661
9662
+static LIST_HEAD(ixgbe_block_cb_list);
9663
+
95769664 static int __ixgbe_setup_tc(struct net_device *dev, enum tc_setup_type type,
95779665 void *type_data)
95789666 {
9667
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
9668
+
95799669 switch (type) {
95809670 case TC_SETUP_BLOCK:
9581
- return ixgbe_setup_tc_block(dev, type_data);
9671
+ return flow_block_cb_setup_simple(type_data,
9672
+ &ixgbe_block_cb_list,
9673
+ ixgbe_setup_tc_block_cb,
9674
+ adapter, adapter, true);
95829675 case TC_SETUP_QDISC_MQPRIO:
95839676 return ixgbe_setup_tc_mqprio(dev, type_data);
95849677 default:
....@@ -9704,26 +9797,6 @@
97049797
97059798 netdev->features = features;
97069799
9707
- if ((adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) {
9708
- if (features & NETIF_F_RXCSUM) {
9709
- adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
9710
- } else {
9711
- u32 port_mask = IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK;
9712
-
9713
- ixgbe_clear_udp_tunnel_port(adapter, port_mask);
9714
- }
9715
- }
9716
-
9717
- if ((adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)) {
9718
- if (features & NETIF_F_RXCSUM) {
9719
- adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
9720
- } else {
9721
- u32 port_mask = IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK;
9722
-
9723
- ixgbe_clear_udp_tunnel_port(adapter, port_mask);
9724
- }
9725
- }
9726
-
97279800 if ((changed & NETIF_F_HW_L2FW_DOFFLOAD) && adapter->num_rx_pools > 1)
97289801 ixgbe_reset_l2fw_offload(adapter);
97299802 else if (need_reset)
....@@ -9732,125 +9805,14 @@
97329805 NETIF_F_HW_VLAN_CTAG_FILTER))
97339806 ixgbe_set_rx_mode(netdev);
97349807
9735
- return 0;
9736
-}
9737
-
9738
-/**
9739
- * ixgbe_add_udp_tunnel_port - Get notifications about adding UDP tunnel ports
9740
- * @dev: The port's netdev
9741
- * @ti: Tunnel endpoint information
9742
- **/
9743
-static void ixgbe_add_udp_tunnel_port(struct net_device *dev,
9744
- struct udp_tunnel_info *ti)
9745
-{
9746
- struct ixgbe_adapter *adapter = netdev_priv(dev);
9747
- struct ixgbe_hw *hw = &adapter->hw;
9748
- __be16 port = ti->port;
9749
- u32 port_shift = 0;
9750
- u32 reg;
9751
-
9752
- if (ti->sa_family != AF_INET)
9753
- return;
9754
-
9755
- switch (ti->type) {
9756
- case UDP_TUNNEL_TYPE_VXLAN:
9757
- if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
9758
- return;
9759
-
9760
- if (adapter->vxlan_port == port)
9761
- return;
9762
-
9763
- if (adapter->vxlan_port) {
9764
- netdev_info(dev,
9765
- "VXLAN port %d set, not adding port %d\n",
9766
- ntohs(adapter->vxlan_port),
9767
- ntohs(port));
9768
- return;
9769
- }
9770
-
9771
- adapter->vxlan_port = port;
9772
- break;
9773
- case UDP_TUNNEL_TYPE_GENEVE:
9774
- if (!(adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE))
9775
- return;
9776
-
9777
- if (adapter->geneve_port == port)
9778
- return;
9779
-
9780
- if (adapter->geneve_port) {
9781
- netdev_info(dev,
9782
- "GENEVE port %d set, not adding port %d\n",
9783
- ntohs(adapter->geneve_port),
9784
- ntohs(port));
9785
- return;
9786
- }
9787
-
9788
- port_shift = IXGBE_VXLANCTRL_GENEVE_UDPPORT_SHIFT;
9789
- adapter->geneve_port = port;
9790
- break;
9791
- default:
9792
- return;
9793
- }
9794
-
9795
- reg = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) | ntohs(port) << port_shift;
9796
- IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, reg);
9797
-}
9798
-
9799
-/**
9800
- * ixgbe_del_udp_tunnel_port - Get notifications about removing UDP tunnel ports
9801
- * @dev: The port's netdev
9802
- * @ti: Tunnel endpoint information
9803
- **/
9804
-static void ixgbe_del_udp_tunnel_port(struct net_device *dev,
9805
- struct udp_tunnel_info *ti)
9806
-{
9807
- struct ixgbe_adapter *adapter = netdev_priv(dev);
9808
- u32 port_mask;
9809
-
9810
- if (ti->type != UDP_TUNNEL_TYPE_VXLAN &&
9811
- ti->type != UDP_TUNNEL_TYPE_GENEVE)
9812
- return;
9813
-
9814
- if (ti->sa_family != AF_INET)
9815
- return;
9816
-
9817
- switch (ti->type) {
9818
- case UDP_TUNNEL_TYPE_VXLAN:
9819
- if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
9820
- return;
9821
-
9822
- if (adapter->vxlan_port != ti->port) {
9823
- netdev_info(dev, "VXLAN port %d not found\n",
9824
- ntohs(ti->port));
9825
- return;
9826
- }
9827
-
9828
- port_mask = IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK;
9829
- break;
9830
- case UDP_TUNNEL_TYPE_GENEVE:
9831
- if (!(adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE))
9832
- return;
9833
-
9834
- if (adapter->geneve_port != ti->port) {
9835
- netdev_info(dev, "GENEVE port %d not found\n",
9836
- ntohs(ti->port));
9837
- return;
9838
- }
9839
-
9840
- port_mask = IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK;
9841
- break;
9842
- default:
9843
- return;
9844
- }
9845
-
9846
- ixgbe_clear_udp_tunnel_port(adapter, port_mask);
9847
- adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
9808
+ return 1;
98489809 }
98499810
98509811 static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
98519812 struct net_device *dev,
98529813 const unsigned char *addr, u16 vid,
9853
- u16 flags)
9814
+ u16 flags,
9815
+ struct netlink_ext_ack *extack)
98549816 {
98559817 /* guarantee we can provide a unique filter for the unicast address */
98569818 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
....@@ -9939,7 +9901,8 @@
99399901 }
99409902
99419903 static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
9942
- struct nlmsghdr *nlh, u16 flags)
9904
+ struct nlmsghdr *nlh, u16 flags,
9905
+ struct netlink_ext_ack *extack)
99439906 {
99449907 struct ixgbe_adapter *adapter = netdev_priv(dev);
99459908 struct nlattr *attr, *br_spec;
....@@ -10133,6 +10096,7 @@
1013310096 if (unlikely(mac_hdr_len > IXGBE_MAX_MAC_HDR_LEN))
1013410097 return features & ~(NETIF_F_HW_CSUM |
1013510098 NETIF_F_SCTP_CRC |
10099
+ NETIF_F_GSO_UDP_L4 |
1013610100 NETIF_F_HW_VLAN_CTAG_TX |
1013710101 NETIF_F_TSO |
1013810102 NETIF_F_TSO6);
....@@ -10141,6 +10105,7 @@
1014110105 if (unlikely(network_hdr_len > IXGBE_MAX_NETWORK_HDR_LEN))
1014210106 return features & ~(NETIF_F_HW_CSUM |
1014310107 NETIF_F_SCTP_CRC |
10108
+ NETIF_F_GSO_UDP_L4 |
1014410109 NETIF_F_TSO |
1014510110 NETIF_F_TSO6);
1014610111
....@@ -10150,8 +10115,8 @@
1015010115 * the TSO, so it's the exception.
1015110116 */
1015210117 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) {
10153
-#ifdef CONFIG_XFRM_OFFLOAD
10154
- if (!skb->sp)
10118
+#ifdef CONFIG_IXGBE_IPSEC
10119
+ if (!secpath_exists(skb))
1015510120 #endif
1015610121 features &= ~NETIF_F_TSO;
1015710122 }
....@@ -10164,6 +10129,8 @@
1016410129 int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
1016510130 struct ixgbe_adapter *adapter = netdev_priv(dev);
1016610131 struct bpf_prog *old_prog;
10132
+ bool need_reset;
10133
+ int num_queues;
1016710134
1016810135 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
1016910136 return -EINVAL;
....@@ -10186,10 +10153,16 @@
1018610153 return -ENOMEM;
1018710154
1018810155 old_prog = xchg(&adapter->xdp_prog, prog);
10156
+ need_reset = (!!prog != !!old_prog);
1018910157
1019010158 /* If transitioning XDP modes reconfigure rings */
10191
- if (!!prog != !!old_prog) {
10192
- int err = ixgbe_setup_tc(dev, adapter->hw_tcs);
10159
+ if (need_reset) {
10160
+ int err;
10161
+
10162
+ if (!prog)
10163
+ /* Wait until ndo_xsk_wakeup completes. */
10164
+ synchronize_rcu();
10165
+ err = ixgbe_setup_tc(dev, adapter->hw_tcs);
1019310166
1019410167 if (err) {
1019510168 rcu_assign_pointer(adapter->xdp_prog, old_prog);
....@@ -10204,6 +10177,18 @@
1020410177 if (old_prog)
1020510178 bpf_prog_put(old_prog);
1020610179
10180
+ /* Kick start the NAPI context if there is an AF_XDP socket open
10181
+ * on that queue id. This so that receiving will start.
10182
+ */
10183
+ if (need_reset && prog) {
10184
+ num_queues = min_t(int, adapter->num_rx_queues,
10185
+ adapter->num_xdp_queues);
10186
+ for (i = 0; i < num_queues; i++)
10187
+ if (adapter->xdp_ring[i]->xsk_pool)
10188
+ (void)ixgbe_xsk_wakeup(adapter->netdev, i,
10189
+ XDP_WAKEUP_RX);
10190
+ }
10191
+
1020710192 return 0;
1020810193 }
1020910194
....@@ -10214,16 +10199,16 @@
1021410199 switch (xdp->command) {
1021510200 case XDP_SETUP_PROG:
1021610201 return ixgbe_xdp_setup(dev, xdp->prog);
10217
- case XDP_QUERY_PROG:
10218
- xdp->prog_id = adapter->xdp_prog ?
10219
- adapter->xdp_prog->aux->id : 0;
10220
- return 0;
10202
+ case XDP_SETUP_XSK_POOL:
10203
+ return ixgbe_xsk_pool_setup(adapter, xdp->xsk.pool,
10204
+ xdp->xsk.queue_id);
10205
+
1022110206 default:
1022210207 return -EINVAL;
1022310208 }
1022410209 }
1022510210
10226
-static void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring)
10211
+void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring)
1022710212 {
1022810213 /* Force memory writes to complete before letting h/w know there
1022910214 * are new descriptors to fetch.
....@@ -10251,6 +10236,9 @@
1025110236 */
1025210237 ring = adapter->xdp_prog ? adapter->xdp_ring[smp_processor_id()] : NULL;
1025310238 if (unlikely(!ring))
10239
+ return -ENXIO;
10240
+
10241
+ if (unlikely(test_bit(__IXGBE_TX_DISABLED, &ring->state)))
1025410242 return -ENXIO;
1025510243
1025610244 for (i = 0; i < n; i++) {
....@@ -10309,12 +10297,167 @@
1030910297 .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink,
1031010298 .ndo_dfwd_add_station = ixgbe_fwd_add,
1031110299 .ndo_dfwd_del_station = ixgbe_fwd_del,
10312
- .ndo_udp_tunnel_add = ixgbe_add_udp_tunnel_port,
10313
- .ndo_udp_tunnel_del = ixgbe_del_udp_tunnel_port,
10300
+ .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
10301
+ .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
1031410302 .ndo_features_check = ixgbe_features_check,
1031510303 .ndo_bpf = ixgbe_xdp,
1031610304 .ndo_xdp_xmit = ixgbe_xdp_xmit,
10305
+ .ndo_xsk_wakeup = ixgbe_xsk_wakeup,
1031710306 };
10307
+
10308
+static void ixgbe_disable_txr_hw(struct ixgbe_adapter *adapter,
10309
+ struct ixgbe_ring *tx_ring)
10310
+{
10311
+ unsigned long wait_delay, delay_interval;
10312
+ struct ixgbe_hw *hw = &adapter->hw;
10313
+ u8 reg_idx = tx_ring->reg_idx;
10314
+ int wait_loop;
10315
+ u32 txdctl;
10316
+
10317
+ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
10318
+
10319
+ /* delay mechanism from ixgbe_disable_tx */
10320
+ delay_interval = ixgbe_get_completion_timeout(adapter) / 100;
10321
+
10322
+ wait_loop = IXGBE_MAX_RX_DESC_POLL;
10323
+ wait_delay = delay_interval;
10324
+
10325
+ while (wait_loop--) {
10326
+ usleep_range(wait_delay, wait_delay + 10);
10327
+ wait_delay += delay_interval * 2;
10328
+ txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
10329
+
10330
+ if (!(txdctl & IXGBE_TXDCTL_ENABLE))
10331
+ return;
10332
+ }
10333
+
10334
+ e_err(drv, "TXDCTL.ENABLE not cleared within the polling period\n");
10335
+}
10336
+
10337
+static void ixgbe_disable_txr(struct ixgbe_adapter *adapter,
10338
+ struct ixgbe_ring *tx_ring)
10339
+{
10340
+ set_bit(__IXGBE_TX_DISABLED, &tx_ring->state);
10341
+ ixgbe_disable_txr_hw(adapter, tx_ring);
10342
+}
10343
+
10344
+static void ixgbe_disable_rxr_hw(struct ixgbe_adapter *adapter,
10345
+ struct ixgbe_ring *rx_ring)
10346
+{
10347
+ unsigned long wait_delay, delay_interval;
10348
+ struct ixgbe_hw *hw = &adapter->hw;
10349
+ u8 reg_idx = rx_ring->reg_idx;
10350
+ int wait_loop;
10351
+ u32 rxdctl;
10352
+
10353
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
10354
+ rxdctl &= ~IXGBE_RXDCTL_ENABLE;
10355
+ rxdctl |= IXGBE_RXDCTL_SWFLSH;
10356
+
10357
+ /* write value back with RXDCTL.ENABLE bit cleared */
10358
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
10359
+
10360
+ /* RXDCTL.EN may not change on 82598 if link is down, so skip it */
10361
+ if (hw->mac.type == ixgbe_mac_82598EB &&
10362
+ !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
10363
+ return;
10364
+
10365
+ /* delay mechanism from ixgbe_disable_rx */
10366
+ delay_interval = ixgbe_get_completion_timeout(adapter) / 100;
10367
+
10368
+ wait_loop = IXGBE_MAX_RX_DESC_POLL;
10369
+ wait_delay = delay_interval;
10370
+
10371
+ while (wait_loop--) {
10372
+ usleep_range(wait_delay, wait_delay + 10);
10373
+ wait_delay += delay_interval * 2;
10374
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
10375
+
10376
+ if (!(rxdctl & IXGBE_RXDCTL_ENABLE))
10377
+ return;
10378
+ }
10379
+
10380
+ e_err(drv, "RXDCTL.ENABLE not cleared within the polling period\n");
10381
+}
10382
+
10383
+static void ixgbe_reset_txr_stats(struct ixgbe_ring *tx_ring)
10384
+{
10385
+ memset(&tx_ring->stats, 0, sizeof(tx_ring->stats));
10386
+ memset(&tx_ring->tx_stats, 0, sizeof(tx_ring->tx_stats));
10387
+}
10388
+
10389
+static void ixgbe_reset_rxr_stats(struct ixgbe_ring *rx_ring)
10390
+{
10391
+ memset(&rx_ring->stats, 0, sizeof(rx_ring->stats));
10392
+ memset(&rx_ring->rx_stats, 0, sizeof(rx_ring->rx_stats));
10393
+}
10394
+
10395
+/**
10396
+ * ixgbe_txrx_ring_disable - Disable Rx/Tx/XDP Tx rings
10397
+ * @adapter: adapter structure
10398
+ * @ring: ring index
10399
+ *
10400
+ * This function disables a certain Rx/Tx/XDP Tx ring. The function
10401
+ * assumes that the netdev is running.
10402
+ **/
10403
+void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring)
10404
+{
10405
+ struct ixgbe_ring *rx_ring, *tx_ring, *xdp_ring;
10406
+
10407
+ rx_ring = adapter->rx_ring[ring];
10408
+ tx_ring = adapter->tx_ring[ring];
10409
+ xdp_ring = adapter->xdp_ring[ring];
10410
+
10411
+ ixgbe_disable_txr(adapter, tx_ring);
10412
+ if (xdp_ring)
10413
+ ixgbe_disable_txr(adapter, xdp_ring);
10414
+ ixgbe_disable_rxr_hw(adapter, rx_ring);
10415
+
10416
+ if (xdp_ring)
10417
+ synchronize_rcu();
10418
+
10419
+ /* Rx/Tx/XDP Tx share the same napi context. */
10420
+ napi_disable(&rx_ring->q_vector->napi);
10421
+
10422
+ ixgbe_clean_tx_ring(tx_ring);
10423
+ if (xdp_ring)
10424
+ ixgbe_clean_tx_ring(xdp_ring);
10425
+ ixgbe_clean_rx_ring(rx_ring);
10426
+
10427
+ ixgbe_reset_txr_stats(tx_ring);
10428
+ if (xdp_ring)
10429
+ ixgbe_reset_txr_stats(xdp_ring);
10430
+ ixgbe_reset_rxr_stats(rx_ring);
10431
+}
10432
+
10433
+/**
10434
+ * ixgbe_txrx_ring_enable - Enable Rx/Tx/XDP Tx rings
10435
+ * @adapter: adapter structure
10436
+ * @ring: ring index
10437
+ *
10438
+ * This function enables a certain Rx/Tx/XDP Tx ring. The function
10439
+ * assumes that the netdev is running.
10440
+ **/
10441
+void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring)
10442
+{
10443
+ struct ixgbe_ring *rx_ring, *tx_ring, *xdp_ring;
10444
+
10445
+ rx_ring = adapter->rx_ring[ring];
10446
+ tx_ring = adapter->tx_ring[ring];
10447
+ xdp_ring = adapter->xdp_ring[ring];
10448
+
10449
+ /* Rx/Tx/XDP Tx share the same napi context. */
10450
+ napi_enable(&rx_ring->q_vector->napi);
10451
+
10452
+ ixgbe_configure_tx_ring(adapter, tx_ring);
10453
+ if (xdp_ring)
10454
+ ixgbe_configure_tx_ring(adapter, xdp_ring);
10455
+ ixgbe_configure_rx_ring(adapter, rx_ring);
10456
+
10457
+ clear_bit(__IXGBE_TX_DISABLED, &tx_ring->state);
10458
+ if (xdp_ring)
10459
+ clear_bit(__IXGBE_TX_DISABLED, &xdp_ring->state);
10460
+}
1031810461
1031910462 /**
1032010463 * ixgbe_enumerate_functions - Get the number of ports this device has
....@@ -10398,7 +10541,7 @@
1039810541 /* only support first port */
1039910542 if (hw->bus.func != 0)
1040010543 break;
10401
- /* fall through */
10544
+ fallthrough;
1040210545 case IXGBE_SUBDEV_ID_82599_SP_560FLR:
1040310546 case IXGBE_SUBDEV_ID_82599_SFP:
1040410547 case IXGBE_SUBDEV_ID_82599_RNDC:
....@@ -10600,6 +10743,18 @@
1060010743 if (err)
1060110744 goto err_sw_init;
1060210745
10746
+ switch (adapter->hw.mac.type) {
10747
+ case ixgbe_mac_X550:
10748
+ case ixgbe_mac_X550EM_x:
10749
+ netdev->udp_tunnel_nic_info = &ixgbe_udp_tunnels_x550;
10750
+ break;
10751
+ case ixgbe_mac_x550em_a:
10752
+ netdev->udp_tunnel_nic_info = &ixgbe_udp_tunnels_x550em_a;
10753
+ break;
10754
+ default:
10755
+ break;
10756
+ }
10757
+
1060310758 /* Make sure the SWFW semaphore is in a valid state */
1060410759 if (hw->mac.ops.init_swfw_sync)
1060510760 hw->mac.ops.init_swfw_sync(hw);
....@@ -10677,9 +10832,9 @@
1067710832 IXGBE_GSO_PARTIAL_FEATURES;
1067810833
1067910834 if (hw->mac.type >= ixgbe_mac_82599EB)
10680
- netdev->features |= NETIF_F_SCTP_CRC;
10835
+ netdev->features |= NETIF_F_SCTP_CRC | NETIF_F_GSO_UDP_L4;
1068110836
10682
-#ifdef CONFIG_XFRM_OFFLOAD
10837
+#ifdef CONFIG_IXGBE_IPSEC
1068310838 #define IXGBE_ESP_FEATURES (NETIF_F_HW_ESP | \
1068410839 NETIF_F_HW_ESP_TX_CSUM | \
1068510840 NETIF_F_GSO_ESP)
....@@ -10753,6 +10908,11 @@
1075310908 netdev->hw_features |= NETIF_F_LRO;
1075410909 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
1075510910 netdev->features |= NETIF_F_LRO;
10911
+
10912
+ if (ixgbe_check_fw_error(adapter)) {
10913
+ err = -EIO;
10914
+ goto err_sw_init;
10915
+ }
1075610916
1075710917 /* make sure the EEPROM is good */
1075810918 if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
....@@ -10889,8 +11049,8 @@
1088911049 */
1089011050 if (hw->mac.ops.set_fw_drv_ver)
1089111051 hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF, 0xFF,
10892
- sizeof(ixgbe_driver_version) - 1,
10893
- ixgbe_driver_version);
11052
+ sizeof(UTS_RELEASE) - 1,
11053
+ UTS_RELEASE);
1089411054
1089511055 /* add san mac addr to netdev */
1089611056 ixgbe_add_sanmac_netdev(netdev);
....@@ -10910,8 +11070,14 @@
1091011070 IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL,
1091111071 true);
1091211072
11073
+ err = ixgbe_mii_bus_init(hw);
11074
+ if (err)
11075
+ goto err_netdev;
11076
+
1091311077 return 0;
1091411078
11079
+err_netdev:
11080
+ unregister_netdev(netdev);
1091511081 err_register:
1091611082 ixgbe_release_hw_control(adapter);
1091711083 ixgbe_clear_interrupt_scheme(adapter);
....@@ -10922,6 +11088,7 @@
1092211088 kfree(adapter->jump_tables[0]);
1092311089 kfree(adapter->mac_table);
1092411090 kfree(adapter->rss_key);
11091
+ bitmap_free(adapter->af_xdp_zc_qps);
1092511092 err_ioremap:
1092611093 disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
1092711094 free_netdev(netdev);
....@@ -10961,6 +11128,8 @@
1096111128 set_bit(__IXGBE_REMOVING, &adapter->state);
1096211129 cancel_work_sync(&adapter->service_task);
1096311130
11131
+ if (adapter->mii_bus)
11132
+ mdiobus_unregister(adapter->mii_bus);
1096411133
1096511134 #ifdef CONFIG_IXGBE_DCA
1096611135 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
....@@ -11009,6 +11178,7 @@
1100911178
1101011179 kfree(adapter->mac_table);
1101111180 kfree(adapter->rss_key);
11181
+ bitmap_free(adapter->af_xdp_zc_qps);
1101211182 disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
1101311183 free_netdev(netdev);
1101411184
....@@ -11114,8 +11284,6 @@
1111411284 /* Free device reference count */
1111511285 pci_dev_put(vfdev);
1111611286 }
11117
-
11118
- pci_cleanup_aer_uncorrect_error_status(pdev);
1111911287 }
1112011288
1112111289 /*
....@@ -11165,7 +11333,6 @@
1116511333 {
1116611334 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
1116711335 pci_ers_result_t result;
11168
- int err;
1116911336
1117011337 if (pci_enable_device_mem(pdev)) {
1117111338 e_err(probe, "Cannot re-enable PCI device after reset.\n");
....@@ -11183,13 +11350,6 @@
1118311350 ixgbe_reset(adapter);
1118411351 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
1118511352 result = PCI_ERS_RESULT_RECOVERED;
11186
- }
11187
-
11188
- err = pci_cleanup_aer_uncorrect_error_status(pdev);
11189
- if (err) {
11190
- e_dev_err("pci_cleanup_aer_uncorrect_error_status "
11191
- "failed 0x%0x\n", err);
11192
- /* non-fatal, continue */
1119311353 }
1119411354
1119511355 return result;
....@@ -11229,16 +11389,15 @@
1122911389 .resume = ixgbe_io_resume,
1123011390 };
1123111391
11392
+static SIMPLE_DEV_PM_OPS(ixgbe_pm_ops, ixgbe_suspend, ixgbe_resume);
11393
+
1123211394 static struct pci_driver ixgbe_driver = {
11233
- .name = ixgbe_driver_name,
11234
- .id_table = ixgbe_pci_tbl,
11235
- .probe = ixgbe_probe,
11236
- .remove = ixgbe_remove,
11237
-#ifdef CONFIG_PM
11238
- .suspend = ixgbe_suspend,
11239
- .resume = ixgbe_resume,
11240
-#endif
11241
- .shutdown = ixgbe_shutdown,
11395
+ .name = ixgbe_driver_name,
11396
+ .id_table = ixgbe_pci_tbl,
11397
+ .probe = ixgbe_probe,
11398
+ .remove = ixgbe_remove,
11399
+ .driver.pm = &ixgbe_pm_ops,
11400
+ .shutdown = ixgbe_shutdown,
1124211401 .sriov_configure = ixgbe_pci_sriov_configure,
1124311402 .err_handler = &ixgbe_err_handler
1124411403 };
....@@ -11252,7 +11411,7 @@
1125211411 static int __init ixgbe_init_module(void)
1125311412 {
1125411413 int ret;
11255
- pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version);
11414
+ pr_info("%s\n", ixgbe_driver_string);
1125611415 pr_info("%s\n", ixgbe_copyright);
1125711416
1125811417 ixgbe_wq = create_singlethread_workqueue(ixgbe_driver_name);