hc
2023-12-09 b22da3d8526a935aa31e086e63f60ff3246cb61c
kernel/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
....@@ -30,6 +30,7 @@
3030 #include <linux/bpf.h>
3131 #include <linux/bpf_trace.h>
3232 #include <linux/atomic.h>
33
+#include <net/xfrm.h>
3334
3435 #include "ixgbevf.h"
3536
....@@ -37,10 +38,8 @@
3738 static const char ixgbevf_driver_string[] =
3839 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
3940
40
-#define DRV_VERSION "4.1.0-k"
41
-const char ixgbevf_driver_version[] = DRV_VERSION;
4241 static char ixgbevf_copyright[] =
43
- "Copyright (c) 2009 - 2015 Intel Corporation.";
42
+ "Copyright (c) 2009 - 2018 Intel Corporation.";
4443
4544 static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
4645 [board_82599_vf] = &ixgbevf_82599_vf_info,
....@@ -79,8 +78,7 @@
7978
8079 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
8180 MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver");
82
-MODULE_LICENSE("GPL");
83
-MODULE_VERSION(DRV_VERSION);
81
+MODULE_LICENSE("GPL v2");
8482
8583 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
8684 static int debug = -1;
....@@ -248,8 +246,9 @@
248246 /**
249247 * ixgbevf_tx_timeout - Respond to a Tx Hang
250248 * @netdev: network interface device structure
249
+ * @txqueue: transmit queue hanging (unused)
251250 **/
252
-static void ixgbevf_tx_timeout(struct net_device *netdev)
251
+static void ixgbevf_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
253252 {
254253 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
255254
....@@ -268,7 +267,7 @@
268267 struct ixgbevf_adapter *adapter = q_vector->adapter;
269268 struct ixgbevf_tx_buffer *tx_buffer;
270269 union ixgbe_adv_tx_desc *tx_desc;
271
- unsigned int total_bytes = 0, total_packets = 0;
270
+ unsigned int total_bytes = 0, total_packets = 0, total_ipsec = 0;
272271 unsigned int budget = tx_ring->count / 2;
273272 unsigned int i = tx_ring->next_to_clean;
274273
....@@ -299,6 +298,8 @@
299298 /* update the statistics for this packet */
300299 total_bytes += tx_buffer->bytecount;
301300 total_packets += tx_buffer->gso_segs;
301
+ if (tx_buffer->tx_flags & IXGBE_TX_FLAGS_IPSEC)
302
+ total_ipsec++;
302303
303304 /* free the skb */
304305 if (ring_is_xdp(tx_ring))
....@@ -361,6 +362,7 @@
361362 u64_stats_update_end(&tx_ring->syncp);
362363 q_vector->tx.total_bytes += total_bytes;
363364 q_vector->tx.total_packets += total_packets;
365
+ adapter->tx_ipsec += total_ipsec;
364366
365367 if (check_for_tx_hang(tx_ring) && ixgbevf_check_tx_hang(tx_ring)) {
366368 struct ixgbe_hw *hw = &adapter->hw;
....@@ -515,6 +517,9 @@
515517 if (test_bit(vid & VLAN_VID_MASK, active_vlans))
516518 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
517519 }
520
+
521
+ if (ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_STAT_SECP))
522
+ ixgbevf_ipsec_rx(rx_ring, rx_desc, skb);
518523
519524 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
520525 }
....@@ -862,10 +867,8 @@
862867 struct sk_buff *skb;
863868
864869 /* prefetch first cache line of first page */
865
- prefetch(xdp->data);
866
-#if L1_CACHE_BYTES < 128
867
- prefetch(xdp->data + L1_CACHE_BYTES);
868
-#endif
870
+ net_prefetch(xdp->data);
871
+
869872 /* Note, we get here by enabling legacy-rx via:
870873 *
871874 * ethtool --set-priv-flags <dev> legacy-rx on
....@@ -889,7 +892,8 @@
889892 /* Determine available headroom for copy */
890893 headlen = size;
891894 if (headlen > IXGBEVF_RX_HDR_SIZE)
892
- headlen = eth_get_headlen(xdp->data, IXGBEVF_RX_HDR_SIZE);
895
+ headlen = eth_get_headlen(skb->dev, xdp->data,
896
+ IXGBEVF_RX_HDR_SIZE);
893897
894898 /* align pull length to size of long to optimize memcpy performance */
895899 memcpy(__skb_put(skb, headlen), xdp->data,
....@@ -942,10 +946,7 @@
942946 * have a consumer accessing first few bytes of meta data,
943947 * and then actual data.
944948 */
945
- prefetch(xdp->data_meta);
946
-#if L1_CACHE_BYTES < 128
947
- prefetch(xdp->data_meta + L1_CACHE_BYTES);
948
-#endif
949
+ net_prefetch(xdp->data_meta);
949950
950951 /* build an skb around the page buffer */
951952 skb = build_skb(xdp->data_hard_start, truesize);
....@@ -1012,7 +1013,7 @@
10121013 context_desc = IXGBEVF_TX_CTXTDESC(ring, 0);
10131014 context_desc->vlan_macip_lens =
10141015 cpu_to_le32(ETH_HLEN << IXGBE_ADVTXD_MACLEN_SHIFT);
1015
- context_desc->seqnum_seed = 0;
1016
+ context_desc->fceof_saidx = 0;
10161017 context_desc->type_tucmd_mlhl =
10171018 cpu_to_le32(IXGBE_TXD_CMD_DEXT |
10181019 IXGBE_ADVTXD_DTYP_CTXT);
....@@ -1076,11 +1077,11 @@
10761077 break;
10771078 default:
10781079 bpf_warn_invalid_xdp_action(act);
1079
- /* fallthrough */
1080
+ fallthrough;
10801081 case XDP_ABORTED:
10811082 out_failure:
10821083 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
1083
- /* fallthrough -- handle aborts by dropping packet */
1084
+ fallthrough; /* handle aborts by dropping packet */
10841085 case XDP_DROP:
10851086 result = IXGBEVF_XDP_CONSUMED;
10861087 break;
....@@ -1090,19 +1091,31 @@
10901091 return ERR_PTR(-result);
10911092 }
10921093
1094
+static unsigned int ixgbevf_rx_frame_truesize(struct ixgbevf_ring *rx_ring,
1095
+ unsigned int size)
1096
+{
1097
+ unsigned int truesize;
1098
+
1099
+#if (PAGE_SIZE < 8192)
1100
+ truesize = ixgbevf_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
1101
+#else
1102
+ truesize = ring_uses_build_skb(rx_ring) ?
1103
+ SKB_DATA_ALIGN(IXGBEVF_SKB_PAD + size) +
1104
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
1105
+ SKB_DATA_ALIGN(size);
1106
+#endif
1107
+ return truesize;
1108
+}
1109
+
10931110 static void ixgbevf_rx_buffer_flip(struct ixgbevf_ring *rx_ring,
10941111 struct ixgbevf_rx_buffer *rx_buffer,
10951112 unsigned int size)
10961113 {
1097
-#if (PAGE_SIZE < 8192)
1098
- unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2;
1114
+ unsigned int truesize = ixgbevf_rx_frame_truesize(rx_ring, size);
10991115
1116
+#if (PAGE_SIZE < 8192)
11001117 rx_buffer->page_offset ^= truesize;
11011118 #else
1102
- unsigned int truesize = ring_uses_build_skb(rx_ring) ?
1103
- SKB_DATA_ALIGN(IXGBEVF_SKB_PAD + size) :
1104
- SKB_DATA_ALIGN(size);
1105
-
11061119 rx_buffer->page_offset += truesize;
11071120 #endif
11081121 }
....@@ -1119,6 +1132,11 @@
11191132 struct xdp_buff xdp;
11201133
11211134 xdp.rxq = &rx_ring->xdp_rxq;
1135
+
1136
+ /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
1137
+#if (PAGE_SIZE < 8192)
1138
+ xdp.frame_sz = ixgbevf_rx_frame_truesize(rx_ring, 0);
1139
+#endif
11221140
11231141 while (likely(total_rx_packets < budget)) {
11241142 struct ixgbevf_rx_buffer *rx_buffer;
....@@ -1152,7 +1170,10 @@
11521170 xdp.data_hard_start = xdp.data -
11531171 ixgbevf_rx_offset(rx_ring);
11541172 xdp.data_end = xdp.data + size;
1155
-
1173
+#if (PAGE_SIZE > 4096)
1174
+ /* At larger PAGE_SIZE, frame_sz depend on len size */
1175
+ xdp.frame_sz = ixgbevf_rx_frame_truesize(rx_ring, size);
1176
+#endif
11561177 skb = ixgbevf_run_xdp(adapter, rx_ring, &xdp);
11571178 }
11581179
....@@ -1290,16 +1311,20 @@
12901311 /* If all work not completed, return budget and keep polling */
12911312 if (!clean_complete)
12921313 return budget;
1293
- /* all work done, exit the polling mode */
1294
- napi_complete_done(napi, work_done);
1295
- if (adapter->rx_itr_setting == 1)
1296
- ixgbevf_set_itr(q_vector);
1297
- if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
1298
- !test_bit(__IXGBEVF_REMOVING, &adapter->state))
1299
- ixgbevf_irq_enable_queues(adapter,
1300
- BIT(q_vector->v_idx));
13011314
1302
- return 0;
1315
+ /* Exit the polling mode, but don't re-enable interrupts if stack might
1316
+ * poll us due to busy-polling
1317
+ */
1318
+ if (likely(napi_complete_done(napi, work_done))) {
1319
+ if (adapter->rx_itr_setting == 1)
1320
+ ixgbevf_set_itr(q_vector);
1321
+ if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
1322
+ !test_bit(__IXGBEVF_REMOVING, &adapter->state))
1323
+ ixgbevf_irq_enable_queues(adapter,
1324
+ BIT(q_vector->v_idx));
1325
+ }
1326
+
1327
+ return min(work_done, budget - 1);
13031328 }
13041329
13051330 /**
....@@ -1415,6 +1440,9 @@
14151440 */
14161441 /* what was last interrupt timeslice? */
14171442 timepassed_us = q_vector->itr >> 2;
1443
+ if (timepassed_us == 0)
1444
+ return;
1445
+
14181446 bytes_perint = bytes / timepassed_us; /* bytes/usec */
14191447
14201448 switch (itr_setting) {
....@@ -2199,6 +2227,7 @@
21992227 ixgbevf_set_rx_mode(adapter->netdev);
22002228
22012229 ixgbevf_restore_vlan(adapter);
2230
+ ixgbevf_ipsec_restore(adapter);
22022231
22032232 ixgbevf_configure_tx(adapter);
22042233 ixgbevf_configure_rx(adapter);
....@@ -2245,11 +2274,14 @@
22452274 static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
22462275 {
22472276 struct ixgbe_hw *hw = &adapter->hw;
2248
- int api[] = { ixgbe_mbox_api_13,
2249
- ixgbe_mbox_api_12,
2250
- ixgbe_mbox_api_11,
2251
- ixgbe_mbox_api_10,
2252
- ixgbe_mbox_api_unknown };
2277
+ static const int api[] = {
2278
+ ixgbe_mbox_api_14,
2279
+ ixgbe_mbox_api_13,
2280
+ ixgbe_mbox_api_12,
2281
+ ixgbe_mbox_api_11,
2282
+ ixgbe_mbox_api_10,
2283
+ ixgbe_mbox_api_unknown
2284
+ };
22532285 int err, idx = 0;
22542286
22552287 spin_lock_bh(&adapter->mbx_lock);
....@@ -2494,12 +2526,11 @@
24942526
24952527 void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
24962528 {
2497
- WARN_ON(in_interrupt());
2498
-
24992529 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
25002530 msleep(1);
25012531
25022532 ixgbevf_down(adapter);
2533
+ pci_set_master(adapter->pdev);
25032534 ixgbevf_up(adapter);
25042535
25052536 clear_bit(__IXGBEVF_RESETTING, &adapter->state);
....@@ -2569,7 +2600,7 @@
25692600 * important, starting with the "most" number of features turned on at once,
25702601 * and ending with the smallest set of features. This way large combinations
25712602 * can be allocated if they're turned on, and smaller combinations are the
2572
- * fallthrough conditions.
2603
+ * fall through conditions.
25732604 *
25742605 **/
25752606 static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
....@@ -2604,6 +2635,7 @@
26042635 case ixgbe_mbox_api_11:
26052636 case ixgbe_mbox_api_12:
26062637 case ixgbe_mbox_api_13:
2638
+ case ixgbe_mbox_api_14:
26072639 if (adapter->xdp_prog &&
26082640 hw->mac.max_tx_queues == rss)
26092641 rss = rss > 3 ? 2 : 1;
....@@ -3699,8 +3731,8 @@
36993731 }
37003732
37013733 static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
3702
- u32 vlan_macip_lens, u32 type_tucmd,
3703
- u32 mss_l4len_idx)
3734
+ u32 vlan_macip_lens, u32 fceof_saidx,
3735
+ u32 type_tucmd, u32 mss_l4len_idx)
37043736 {
37053737 struct ixgbe_adv_tx_context_desc *context_desc;
37063738 u16 i = tx_ring->next_to_use;
....@@ -3714,14 +3746,15 @@
37143746 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
37153747
37163748 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
3717
- context_desc->seqnum_seed = 0;
3749
+ context_desc->fceof_saidx = cpu_to_le32(fceof_saidx);
37183750 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
37193751 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
37203752 }
37213753
37223754 static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
37233755 struct ixgbevf_tx_buffer *first,
3724
- u8 *hdr_len)
3756
+ u8 *hdr_len,
3757
+ struct ixgbevf_ipsec_tx_data *itd)
37253758 {
37263759 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
37273760 struct sk_buff *skb = first->skb;
....@@ -3735,6 +3768,7 @@
37353768 unsigned char *hdr;
37363769 } l4;
37373770 u32 paylen, l4_offset;
3771
+ u32 fceof_saidx = 0;
37383772 int err;
37393773
37403774 if (skb->ip_summed != CHECKSUM_PARTIAL)
....@@ -3760,13 +3794,15 @@
37603794 if (ip.v4->version == 4) {
37613795 unsigned char *csum_start = skb_checksum_start(skb);
37623796 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
3797
+ int len = csum_start - trans_start;
37633798
37643799 /* IP header will have to cancel out any data that
3765
- * is not a part of the outer IP header
3800
+ * is not a part of the outer IP header, so set to
3801
+ * a reverse csum if needed, else init check to 0.
37663802 */
3767
- ip.v4->check = csum_fold(csum_partial(trans_start,
3768
- csum_start - trans_start,
3769
- 0));
3803
+ ip.v4->check = (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) ?
3804
+ csum_fold(csum_partial(trans_start,
3805
+ len, 0)) : 0;
37703806 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
37713807
37723808 ip.v4->tot_len = 0;
....@@ -3798,13 +3834,16 @@
37983834 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
37993835 mss_l4len_idx |= (1u << IXGBE_ADVTXD_IDX_SHIFT);
38003836
3837
+ fceof_saidx |= itd->pfsa;
3838
+ type_tucmd |= itd->flags | itd->trailer_len;
3839
+
38013840 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
38023841 vlan_macip_lens = l4.hdr - ip.hdr;
38033842 vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT;
38043843 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
38053844
3806
- ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
3807
- type_tucmd, mss_l4len_idx);
3845
+ ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd,
3846
+ mss_l4len_idx);
38083847
38093848 return 1;
38103849 }
....@@ -3819,10 +3858,12 @@
38193858 }
38203859
38213860 static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
3822
- struct ixgbevf_tx_buffer *first)
3861
+ struct ixgbevf_tx_buffer *first,
3862
+ struct ixgbevf_ipsec_tx_data *itd)
38233863 {
38243864 struct sk_buff *skb = first->skb;
38253865 u32 vlan_macip_lens = 0;
3866
+ u32 fceof_saidx = 0;
38263867 u32 type_tucmd = 0;
38273868
38283869 if (skb->ip_summed != CHECKSUM_PARTIAL)
....@@ -3831,7 +3872,7 @@
38313872 switch (skb->csum_offset) {
38323873 case offsetof(struct tcphdr, check):
38333874 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
3834
- /* fall through */
3875
+ fallthrough;
38353876 case offsetof(struct udphdr, check):
38363877 break;
38373878 case offsetof(struct sctphdr, checksum):
....@@ -3843,7 +3884,7 @@
38433884 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP;
38443885 break;
38453886 }
3846
- /* fall through */
3887
+ fallthrough;
38473888 default:
38483889 skb_checksum_help(skb);
38493890 goto no_csum;
....@@ -3861,7 +3902,11 @@
38613902 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
38623903 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
38633904
3864
- ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, 0);
3905
+ fceof_saidx |= itd->pfsa;
3906
+ type_tucmd |= itd->flags | itd->trailer_len;
3907
+
3908
+ ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
3909
+ fceof_saidx, type_tucmd, 0);
38653910 }
38663911
38673912 static __le32 ixgbevf_tx_cmd_type(u32 tx_flags)
....@@ -3895,8 +3940,12 @@
38953940 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
38963941 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM);
38973942
3898
- /* use index 1 context for TSO/FSO/FCOE */
3899
- if (tx_flags & IXGBE_TX_FLAGS_TSO)
3943
+ /* enable IPsec */
3944
+ if (tx_flags & IXGBE_TX_FLAGS_IPSEC)
3945
+ olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IPSEC);
3946
+
3947
+ /* use index 1 context for TSO/FSO/FCOE/IPSEC */
3948
+ if (tx_flags & (IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_IPSEC))
39003949 olinfo_status |= cpu_to_le32(1u << IXGBE_ADVTXD_IDX_SHIFT);
39013950
39023951 /* Check Context must be set if Tx switch is enabled, which it
....@@ -3914,7 +3963,7 @@
39143963 struct sk_buff *skb = first->skb;
39153964 struct ixgbevf_tx_buffer *tx_buffer;
39163965 union ixgbe_adv_tx_desc *tx_desc;
3917
- struct skb_frag_struct *frag;
3966
+ skb_frag_t *frag;
39183967 dma_addr_t dma;
39193968 unsigned int data_len, size;
39203969 u32 tx_flags = first->tx_flags;
....@@ -3988,6 +4037,8 @@
39884037
39894038 /* set the timestamp */
39904039 first->time_stamp = jiffies;
4040
+
4041
+ skb_tx_timestamp(skb);
39914042
39924043 /* Force memory writes to complete before letting h/w know there
39934044 * are new descriptors to fetch. (Only applicable for weak-ordered
....@@ -4078,6 +4129,7 @@
40784129 int tso;
40794130 u32 tx_flags = 0;
40804131 u16 count = TXD_USE_COUNT(skb_headlen(skb));
4132
+ struct ixgbevf_ipsec_tx_data ipsec_tx = { 0 };
40814133 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
40824134 unsigned short f;
40834135 #endif
....@@ -4096,8 +4148,11 @@
40964148 * otherwise try next time
40974149 */
40984150 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
4099
- for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
4100
- count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
4151
+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
4152
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
4153
+
4154
+ count += TXD_USE_COUNT(skb_frag_size(frag));
4155
+ }
41014156 #else
41024157 count += skb_shinfo(skb)->nr_frags;
41034158 #endif
....@@ -4122,11 +4177,15 @@
41224177 first->tx_flags = tx_flags;
41234178 first->protocol = vlan_get_protocol(skb);
41244179
4125
- tso = ixgbevf_tso(tx_ring, first, &hdr_len);
4180
+#ifdef CONFIG_IXGBEVF_IPSEC
4181
+ if (xfrm_offload(skb) && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx))
4182
+ goto out_drop;
4183
+#endif
4184
+ tso = ixgbevf_tso(tx_ring, first, &hdr_len, &ipsec_tx);
41264185 if (tso < 0)
41274186 goto out_drop;
41284187 else if (!tso)
4129
- ixgbevf_tx_csum(tx_ring, first);
4188
+ ixgbevf_tx_csum(tx_ring, first, &ipsec_tx);
41304189
41314190 ixgbevf_tx_map(tx_ring, first, hdr_len);
41324191
....@@ -4236,13 +4295,10 @@
42364295 return 0;
42374296 }
42384297
4239
-static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
4298
+static int __maybe_unused ixgbevf_suspend(struct device *dev_d)
42404299 {
4241
- struct net_device *netdev = pci_get_drvdata(pdev);
4300
+ struct net_device *netdev = dev_get_drvdata(dev_d);
42424301 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4243
-#ifdef CONFIG_PM
4244
- int retval = 0;
4245
-#endif
42464302
42474303 rtnl_lock();
42484304 netif_device_detach(netdev);
....@@ -4253,36 +4309,15 @@
42534309 ixgbevf_clear_interrupt_scheme(adapter);
42544310 rtnl_unlock();
42554311
4256
-#ifdef CONFIG_PM
4257
- retval = pci_save_state(pdev);
4258
- if (retval)
4259
- return retval;
4260
-
4261
-#endif
4262
- if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
4263
- pci_disable_device(pdev);
4264
-
42654312 return 0;
42664313 }
42674314
4268
-#ifdef CONFIG_PM
4269
-static int ixgbevf_resume(struct pci_dev *pdev)
4315
+static int __maybe_unused ixgbevf_resume(struct device *dev_d)
42704316 {
4317
+ struct pci_dev *pdev = to_pci_dev(dev_d);
42714318 struct net_device *netdev = pci_get_drvdata(pdev);
42724319 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
42734320 u32 err;
4274
-
4275
- pci_restore_state(pdev);
4276
- /* pci_restore_state clears dev->state_saved so call
4277
- * pci_save_state to restore it.
4278
- */
4279
- pci_save_state(pdev);
4280
-
4281
- err = pci_enable_device_mem(pdev);
4282
- if (err) {
4283
- dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
4284
- return err;
4285
- }
42864321
42874322 adapter->hw.hw_addr = adapter->io_addr;
42884323 smp_mb__before_atomic();
....@@ -4304,10 +4339,9 @@
43044339 return err;
43054340 }
43064341
4307
-#endif /* CONFIG_PM */
43084342 static void ixgbevf_shutdown(struct pci_dev *pdev)
43094343 {
4310
- ixgbevf_suspend(pdev, PMSG_SUSPEND);
4344
+ ixgbevf_suspend(&pdev->dev);
43114345 }
43124346
43134347 static void ixgbevf_get_tx_ring_stats(struct rtnl_link_stats64 *stats,
....@@ -4441,15 +4475,9 @@
44414475
44424476 static int ixgbevf_xdp(struct net_device *dev, struct netdev_bpf *xdp)
44434477 {
4444
- struct ixgbevf_adapter *adapter = netdev_priv(dev);
4445
-
44464478 switch (xdp->command) {
44474479 case XDP_SETUP_PROG:
44484480 return ixgbevf_xdp_setup(dev, xdp->prog);
4449
- case XDP_QUERY_PROG:
4450
- xdp->prog_id = adapter->xdp_prog ?
4451
- adapter->xdp_prog->aux->id : 0;
4452
- return 0;
44534481 default:
44544482 return -EINVAL;
44554483 }
....@@ -4616,6 +4644,7 @@
46164644 case ixgbe_mbox_api_11:
46174645 case ixgbe_mbox_api_12:
46184646 case ixgbe_mbox_api_13:
4647
+ case ixgbe_mbox_api_14:
46194648 netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE -
46204649 (ETH_HLEN + ETH_FCS_LEN);
46214650 break;
....@@ -4651,6 +4680,7 @@
46514680
46524681 pci_set_drvdata(pdev, netdev);
46534682 netif_carrier_off(netdev);
4683
+ ixgbevf_init_ipsec_offload(adapter);
46544684
46554685 ixgbevf_init_last_counter_stats(adapter);
46564686
....@@ -4717,6 +4747,7 @@
47174747 if (netdev->reg_state == NETREG_REGISTERED)
47184748 unregister_netdev(netdev);
47194749
4750
+ ixgbevf_stop_ipsec_offload(adapter);
47204751 ixgbevf_clear_interrupt_scheme(adapter);
47214752 ixgbevf_reset_interrupt_capability(adapter);
47224753
....@@ -4824,16 +4855,17 @@
48244855 .resume = ixgbevf_io_resume,
48254856 };
48264857
4858
+static SIMPLE_DEV_PM_OPS(ixgbevf_pm_ops, ixgbevf_suspend, ixgbevf_resume);
4859
+
48274860 static struct pci_driver ixgbevf_driver = {
48284861 .name = ixgbevf_driver_name,
48294862 .id_table = ixgbevf_pci_tbl,
48304863 .probe = ixgbevf_probe,
48314864 .remove = ixgbevf_remove,
4832
-#ifdef CONFIG_PM
4865
+
48334866 /* Power Management Hooks */
4834
- .suspend = ixgbevf_suspend,
4835
- .resume = ixgbevf_resume,
4836
-#endif
4867
+ .driver.pm = &ixgbevf_pm_ops,
4868
+
48374869 .shutdown = ixgbevf_shutdown,
48384870 .err_handler = &ixgbevf_err_handler
48394871 };
....@@ -4846,9 +4878,9 @@
48464878 **/
48474879 static int __init ixgbevf_init_module(void)
48484880 {
4849
- pr_info("%s - version %s\n", ixgbevf_driver_string,
4850
- ixgbevf_driver_version);
4881
+ int err;
48514882
4883
+ pr_info("%s\n", ixgbevf_driver_string);
48524884 pr_info("%s\n", ixgbevf_copyright);
48534885 ixgbevf_wq = create_singlethread_workqueue(ixgbevf_driver_name);
48544886 if (!ixgbevf_wq) {
....@@ -4856,7 +4888,13 @@
48564888 return -ENOMEM;
48574889 }
48584890
4859
- return pci_register_driver(&ixgbevf_driver);
4891
+ err = pci_register_driver(&ixgbevf_driver);
4892
+ if (err) {
4893
+ destroy_workqueue(ixgbevf_wq);
4894
+ return err;
4895
+ }
4896
+
4897
+ return 0;
48604898 }
48614899
48624900 module_init(ixgbevf_init_module);