.. | .. |
---|
30 | 30 | #include <linux/bpf.h> |
---|
31 | 31 | #include <linux/bpf_trace.h> |
---|
32 | 32 | #include <linux/atomic.h> |
---|
| 33 | +#include <net/xfrm.h> |
---|
33 | 34 | |
---|
34 | 35 | #include "ixgbevf.h" |
---|
35 | 36 | |
---|
.. | .. |
---|
37 | 38 | static const char ixgbevf_driver_string[] = |
---|
38 | 39 | "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver"; |
---|
39 | 40 | |
---|
40 | | -#define DRV_VERSION "4.1.0-k" |
---|
41 | | -const char ixgbevf_driver_version[] = DRV_VERSION; |
---|
42 | 41 | static char ixgbevf_copyright[] = |
---|
43 | | - "Copyright (c) 2009 - 2015 Intel Corporation."; |
---|
| 42 | + "Copyright (c) 2009 - 2018 Intel Corporation."; |
---|
44 | 43 | |
---|
45 | 44 | static const struct ixgbevf_info *ixgbevf_info_tbl[] = { |
---|
46 | 45 | [board_82599_vf] = &ixgbevf_82599_vf_info, |
---|
.. | .. |
---|
79 | 78 | |
---|
80 | 79 | MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); |
---|
81 | 80 | MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver"); |
---|
82 | | -MODULE_LICENSE("GPL"); |
---|
83 | | -MODULE_VERSION(DRV_VERSION); |
---|
| 81 | +MODULE_LICENSE("GPL v2"); |
---|
84 | 82 | |
---|
85 | 83 | #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) |
---|
86 | 84 | static int debug = -1; |
---|
.. | .. |
---|
248 | 246 | /** |
---|
249 | 247 | * ixgbevf_tx_timeout - Respond to a Tx Hang |
---|
250 | 248 | * @netdev: network interface device structure |
---|
| 249 | + * @txqueue: transmit queue hanging (unused) |
---|
251 | 250 | **/ |
---|
252 | | -static void ixgbevf_tx_timeout(struct net_device *netdev) |
---|
| 251 | +static void ixgbevf_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue) |
---|
253 | 252 | { |
---|
254 | 253 | struct ixgbevf_adapter *adapter = netdev_priv(netdev); |
---|
255 | 254 | |
---|
.. | .. |
---|
268 | 267 | struct ixgbevf_adapter *adapter = q_vector->adapter; |
---|
269 | 268 | struct ixgbevf_tx_buffer *tx_buffer; |
---|
270 | 269 | union ixgbe_adv_tx_desc *tx_desc; |
---|
271 | | - unsigned int total_bytes = 0, total_packets = 0; |
---|
| 270 | + unsigned int total_bytes = 0, total_packets = 0, total_ipsec = 0; |
---|
272 | 271 | unsigned int budget = tx_ring->count / 2; |
---|
273 | 272 | unsigned int i = tx_ring->next_to_clean; |
---|
274 | 273 | |
---|
.. | .. |
---|
299 | 298 | /* update the statistics for this packet */ |
---|
300 | 299 | total_bytes += tx_buffer->bytecount; |
---|
301 | 300 | total_packets += tx_buffer->gso_segs; |
---|
| 301 | + if (tx_buffer->tx_flags & IXGBE_TX_FLAGS_IPSEC) |
---|
| 302 | + total_ipsec++; |
---|
302 | 303 | |
---|
303 | 304 | /* free the skb */ |
---|
304 | 305 | if (ring_is_xdp(tx_ring)) |
---|
.. | .. |
---|
361 | 362 | u64_stats_update_end(&tx_ring->syncp); |
---|
362 | 363 | q_vector->tx.total_bytes += total_bytes; |
---|
363 | 364 | q_vector->tx.total_packets += total_packets; |
---|
| 365 | + adapter->tx_ipsec += total_ipsec; |
---|
364 | 366 | |
---|
365 | 367 | if (check_for_tx_hang(tx_ring) && ixgbevf_check_tx_hang(tx_ring)) { |
---|
366 | 368 | struct ixgbe_hw *hw = &adapter->hw; |
---|
.. | .. |
---|
515 | 517 | if (test_bit(vid & VLAN_VID_MASK, active_vlans)) |
---|
516 | 518 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); |
---|
517 | 519 | } |
---|
| 520 | + |
---|
| 521 | + if (ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_STAT_SECP)) |
---|
| 522 | + ixgbevf_ipsec_rx(rx_ring, rx_desc, skb); |
---|
518 | 523 | |
---|
519 | 524 | skb->protocol = eth_type_trans(skb, rx_ring->netdev); |
---|
520 | 525 | } |
---|
.. | .. |
---|
862 | 867 | struct sk_buff *skb; |
---|
863 | 868 | |
---|
864 | 869 | /* prefetch first cache line of first page */ |
---|
865 | | - prefetch(xdp->data); |
---|
866 | | -#if L1_CACHE_BYTES < 128 |
---|
867 | | - prefetch(xdp->data + L1_CACHE_BYTES); |
---|
868 | | -#endif |
---|
| 870 | + net_prefetch(xdp->data); |
---|
| 871 | + |
---|
869 | 872 | /* Note, we get here by enabling legacy-rx via: |
---|
870 | 873 | * |
---|
871 | 874 | * ethtool --set-priv-flags <dev> legacy-rx on |
---|
.. | .. |
---|
889 | 892 | /* Determine available headroom for copy */ |
---|
890 | 893 | headlen = size; |
---|
891 | 894 | if (headlen > IXGBEVF_RX_HDR_SIZE) |
---|
892 | | - headlen = eth_get_headlen(xdp->data, IXGBEVF_RX_HDR_SIZE); |
---|
| 895 | + headlen = eth_get_headlen(skb->dev, xdp->data, |
---|
| 896 | + IXGBEVF_RX_HDR_SIZE); |
---|
893 | 897 | |
---|
894 | 898 | /* align pull length to size of long to optimize memcpy performance */ |
---|
895 | 899 | memcpy(__skb_put(skb, headlen), xdp->data, |
---|
.. | .. |
---|
942 | 946 | * have a consumer accessing first few bytes of meta data, |
---|
943 | 947 | * and then actual data. |
---|
944 | 948 | */ |
---|
945 | | - prefetch(xdp->data_meta); |
---|
946 | | -#if L1_CACHE_BYTES < 128 |
---|
947 | | - prefetch(xdp->data_meta + L1_CACHE_BYTES); |
---|
948 | | -#endif |
---|
| 949 | + net_prefetch(xdp->data_meta); |
---|
949 | 950 | |
---|
950 | 951 | /* build an skb around the page buffer */ |
---|
951 | 952 | skb = build_skb(xdp->data_hard_start, truesize); |
---|
.. | .. |
---|
1012 | 1013 | context_desc = IXGBEVF_TX_CTXTDESC(ring, 0); |
---|
1013 | 1014 | context_desc->vlan_macip_lens = |
---|
1014 | 1015 | cpu_to_le32(ETH_HLEN << IXGBE_ADVTXD_MACLEN_SHIFT); |
---|
1015 | | - context_desc->seqnum_seed = 0; |
---|
| 1016 | + context_desc->fceof_saidx = 0; |
---|
1016 | 1017 | context_desc->type_tucmd_mlhl = |
---|
1017 | 1018 | cpu_to_le32(IXGBE_TXD_CMD_DEXT | |
---|
1018 | 1019 | IXGBE_ADVTXD_DTYP_CTXT); |
---|
.. | .. |
---|
1076 | 1077 | break; |
---|
1077 | 1078 | default: |
---|
1078 | 1079 | bpf_warn_invalid_xdp_action(act); |
---|
1079 | | - /* fallthrough */ |
---|
| 1080 | + fallthrough; |
---|
1080 | 1081 | case XDP_ABORTED: |
---|
1081 | 1082 | out_failure: |
---|
1082 | 1083 | trace_xdp_exception(rx_ring->netdev, xdp_prog, act); |
---|
1083 | | - /* fallthrough -- handle aborts by dropping packet */ |
---|
| 1084 | + fallthrough; /* handle aborts by dropping packet */ |
---|
1084 | 1085 | case XDP_DROP: |
---|
1085 | 1086 | result = IXGBEVF_XDP_CONSUMED; |
---|
1086 | 1087 | break; |
---|
.. | .. |
---|
1090 | 1091 | return ERR_PTR(-result); |
---|
1091 | 1092 | } |
---|
1092 | 1093 | |
---|
| 1094 | +static unsigned int ixgbevf_rx_frame_truesize(struct ixgbevf_ring *rx_ring, |
---|
| 1095 | + unsigned int size) |
---|
| 1096 | +{ |
---|
| 1097 | + unsigned int truesize; |
---|
| 1098 | + |
---|
| 1099 | +#if (PAGE_SIZE < 8192) |
---|
| 1100 | + truesize = ixgbevf_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */ |
---|
| 1101 | +#else |
---|
| 1102 | + truesize = ring_uses_build_skb(rx_ring) ? |
---|
| 1103 | + SKB_DATA_ALIGN(IXGBEVF_SKB_PAD + size) + |
---|
| 1104 | + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : |
---|
| 1105 | + SKB_DATA_ALIGN(size); |
---|
| 1106 | +#endif |
---|
| 1107 | + return truesize; |
---|
| 1108 | +} |
---|
| 1109 | + |
---|
1093 | 1110 | static void ixgbevf_rx_buffer_flip(struct ixgbevf_ring *rx_ring, |
---|
1094 | 1111 | struct ixgbevf_rx_buffer *rx_buffer, |
---|
1095 | 1112 | unsigned int size) |
---|
1096 | 1113 | { |
---|
1097 | | -#if (PAGE_SIZE < 8192) |
---|
1098 | | - unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2; |
---|
| 1114 | + unsigned int truesize = ixgbevf_rx_frame_truesize(rx_ring, size); |
---|
1099 | 1115 | |
---|
| 1116 | +#if (PAGE_SIZE < 8192) |
---|
1100 | 1117 | rx_buffer->page_offset ^= truesize; |
---|
1101 | 1118 | #else |
---|
1102 | | - unsigned int truesize = ring_uses_build_skb(rx_ring) ? |
---|
1103 | | - SKB_DATA_ALIGN(IXGBEVF_SKB_PAD + size) : |
---|
1104 | | - SKB_DATA_ALIGN(size); |
---|
1105 | | - |
---|
1106 | 1119 | rx_buffer->page_offset += truesize; |
---|
1107 | 1120 | #endif |
---|
1108 | 1121 | } |
---|
.. | .. |
---|
1119 | 1132 | struct xdp_buff xdp; |
---|
1120 | 1133 | |
---|
1121 | 1134 | xdp.rxq = &rx_ring->xdp_rxq; |
---|
| 1135 | + |
---|
| 1136 | + /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */ |
---|
| 1137 | +#if (PAGE_SIZE < 8192) |
---|
| 1138 | + xdp.frame_sz = ixgbevf_rx_frame_truesize(rx_ring, 0); |
---|
| 1139 | +#endif |
---|
1122 | 1140 | |
---|
1123 | 1141 | while (likely(total_rx_packets < budget)) { |
---|
1124 | 1142 | struct ixgbevf_rx_buffer *rx_buffer; |
---|
.. | .. |
---|
1152 | 1170 | xdp.data_hard_start = xdp.data - |
---|
1153 | 1171 | ixgbevf_rx_offset(rx_ring); |
---|
1154 | 1172 | xdp.data_end = xdp.data + size; |
---|
1155 | | - |
---|
| 1173 | +#if (PAGE_SIZE > 4096) |
---|
| 1174 | + /* At larger PAGE_SIZE, frame_sz depend on len size */ |
---|
| 1175 | + xdp.frame_sz = ixgbevf_rx_frame_truesize(rx_ring, size); |
---|
| 1176 | +#endif |
---|
1156 | 1177 | skb = ixgbevf_run_xdp(adapter, rx_ring, &xdp); |
---|
1157 | 1178 | } |
---|
1158 | 1179 | |
---|
.. | .. |
---|
1290 | 1311 | /* If all work not completed, return budget and keep polling */ |
---|
1291 | 1312 | if (!clean_complete) |
---|
1292 | 1313 | return budget; |
---|
1293 | | - /* all work done, exit the polling mode */ |
---|
1294 | | - napi_complete_done(napi, work_done); |
---|
1295 | | - if (adapter->rx_itr_setting == 1) |
---|
1296 | | - ixgbevf_set_itr(q_vector); |
---|
1297 | | - if (!test_bit(__IXGBEVF_DOWN, &adapter->state) && |
---|
1298 | | - !test_bit(__IXGBEVF_REMOVING, &adapter->state)) |
---|
1299 | | - ixgbevf_irq_enable_queues(adapter, |
---|
1300 | | - BIT(q_vector->v_idx)); |
---|
1301 | 1314 | |
---|
1302 | | - return 0; |
---|
| 1315 | + /* Exit the polling mode, but don't re-enable interrupts if stack might |
---|
| 1316 | + * poll us due to busy-polling |
---|
| 1317 | + */ |
---|
| 1318 | + if (likely(napi_complete_done(napi, work_done))) { |
---|
| 1319 | + if (adapter->rx_itr_setting == 1) |
---|
| 1320 | + ixgbevf_set_itr(q_vector); |
---|
| 1321 | + if (!test_bit(__IXGBEVF_DOWN, &adapter->state) && |
---|
| 1322 | + !test_bit(__IXGBEVF_REMOVING, &adapter->state)) |
---|
| 1323 | + ixgbevf_irq_enable_queues(adapter, |
---|
| 1324 | + BIT(q_vector->v_idx)); |
---|
| 1325 | + } |
---|
| 1326 | + |
---|
| 1327 | + return min(work_done, budget - 1); |
---|
1303 | 1328 | } |
---|
1304 | 1329 | |
---|
1305 | 1330 | /** |
---|
.. | .. |
---|
1415 | 1440 | */ |
---|
1416 | 1441 | /* what was last interrupt timeslice? */ |
---|
1417 | 1442 | timepassed_us = q_vector->itr >> 2; |
---|
| 1443 | + if (timepassed_us == 0) |
---|
| 1444 | + return; |
---|
| 1445 | + |
---|
1418 | 1446 | bytes_perint = bytes / timepassed_us; /* bytes/usec */ |
---|
1419 | 1447 | |
---|
1420 | 1448 | switch (itr_setting) { |
---|
.. | .. |
---|
2199 | 2227 | ixgbevf_set_rx_mode(adapter->netdev); |
---|
2200 | 2228 | |
---|
2201 | 2229 | ixgbevf_restore_vlan(adapter); |
---|
| 2230 | + ixgbevf_ipsec_restore(adapter); |
---|
2202 | 2231 | |
---|
2203 | 2232 | ixgbevf_configure_tx(adapter); |
---|
2204 | 2233 | ixgbevf_configure_rx(adapter); |
---|
.. | .. |
---|
2245 | 2274 | static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) |
---|
2246 | 2275 | { |
---|
2247 | 2276 | struct ixgbe_hw *hw = &adapter->hw; |
---|
2248 | | - int api[] = { ixgbe_mbox_api_13, |
---|
2249 | | - ixgbe_mbox_api_12, |
---|
2250 | | - ixgbe_mbox_api_11, |
---|
2251 | | - ixgbe_mbox_api_10, |
---|
2252 | | - ixgbe_mbox_api_unknown }; |
---|
| 2277 | + static const int api[] = { |
---|
| 2278 | + ixgbe_mbox_api_14, |
---|
| 2279 | + ixgbe_mbox_api_13, |
---|
| 2280 | + ixgbe_mbox_api_12, |
---|
| 2281 | + ixgbe_mbox_api_11, |
---|
| 2282 | + ixgbe_mbox_api_10, |
---|
| 2283 | + ixgbe_mbox_api_unknown |
---|
| 2284 | + }; |
---|
2253 | 2285 | int err, idx = 0; |
---|
2254 | 2286 | |
---|
2255 | 2287 | spin_lock_bh(&adapter->mbx_lock); |
---|
.. | .. |
---|
2494 | 2526 | |
---|
2495 | 2527 | void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter) |
---|
2496 | 2528 | { |
---|
2497 | | - WARN_ON(in_interrupt()); |
---|
2498 | | - |
---|
2499 | 2529 | while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state)) |
---|
2500 | 2530 | msleep(1); |
---|
2501 | 2531 | |
---|
2502 | 2532 | ixgbevf_down(adapter); |
---|
| 2533 | + pci_set_master(adapter->pdev); |
---|
2503 | 2534 | ixgbevf_up(adapter); |
---|
2504 | 2535 | |
---|
2505 | 2536 | clear_bit(__IXGBEVF_RESETTING, &adapter->state); |
---|
.. | .. |
---|
2569 | 2600 | * important, starting with the "most" number of features turned on at once, |
---|
2570 | 2601 | * and ending with the smallest set of features. This way large combinations |
---|
2571 | 2602 | * can be allocated if they're turned on, and smaller combinations are the |
---|
2572 | | - * fallthrough conditions. |
---|
| 2603 | + * fall through conditions. |
---|
2573 | 2604 | * |
---|
2574 | 2605 | **/ |
---|
2575 | 2606 | static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter) |
---|
.. | .. |
---|
2604 | 2635 | case ixgbe_mbox_api_11: |
---|
2605 | 2636 | case ixgbe_mbox_api_12: |
---|
2606 | 2637 | case ixgbe_mbox_api_13: |
---|
| 2638 | + case ixgbe_mbox_api_14: |
---|
2607 | 2639 | if (adapter->xdp_prog && |
---|
2608 | 2640 | hw->mac.max_tx_queues == rss) |
---|
2609 | 2641 | rss = rss > 3 ? 2 : 1; |
---|
.. | .. |
---|
3699 | 3731 | } |
---|
3700 | 3732 | |
---|
3701 | 3733 | static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring, |
---|
3702 | | - u32 vlan_macip_lens, u32 type_tucmd, |
---|
3703 | | - u32 mss_l4len_idx) |
---|
| 3734 | + u32 vlan_macip_lens, u32 fceof_saidx, |
---|
| 3735 | + u32 type_tucmd, u32 mss_l4len_idx) |
---|
3704 | 3736 | { |
---|
3705 | 3737 | struct ixgbe_adv_tx_context_desc *context_desc; |
---|
3706 | 3738 | u16 i = tx_ring->next_to_use; |
---|
.. | .. |
---|
3714 | 3746 | type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; |
---|
3715 | 3747 | |
---|
3716 | 3748 | context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); |
---|
3717 | | - context_desc->seqnum_seed = 0; |
---|
| 3749 | + context_desc->fceof_saidx = cpu_to_le32(fceof_saidx); |
---|
3718 | 3750 | context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); |
---|
3719 | 3751 | context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); |
---|
3720 | 3752 | } |
---|
3721 | 3753 | |
---|
3722 | 3754 | static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, |
---|
3723 | 3755 | struct ixgbevf_tx_buffer *first, |
---|
3724 | | - u8 *hdr_len) |
---|
| 3756 | + u8 *hdr_len, |
---|
| 3757 | + struct ixgbevf_ipsec_tx_data *itd) |
---|
3725 | 3758 | { |
---|
3726 | 3759 | u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; |
---|
3727 | 3760 | struct sk_buff *skb = first->skb; |
---|
.. | .. |
---|
3735 | 3768 | unsigned char *hdr; |
---|
3736 | 3769 | } l4; |
---|
3737 | 3770 | u32 paylen, l4_offset; |
---|
| 3771 | + u32 fceof_saidx = 0; |
---|
3738 | 3772 | int err; |
---|
3739 | 3773 | |
---|
3740 | 3774 | if (skb->ip_summed != CHECKSUM_PARTIAL) |
---|
.. | .. |
---|
3760 | 3794 | if (ip.v4->version == 4) { |
---|
3761 | 3795 | unsigned char *csum_start = skb_checksum_start(skb); |
---|
3762 | 3796 | unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); |
---|
| 3797 | + int len = csum_start - trans_start; |
---|
3763 | 3798 | |
---|
3764 | 3799 | /* IP header will have to cancel out any data that |
---|
3765 | | - * is not a part of the outer IP header |
---|
| 3800 | + * is not a part of the outer IP header, so set to |
---|
| 3801 | + * a reverse csum if needed, else init check to 0. |
---|
3766 | 3802 | */ |
---|
3767 | | - ip.v4->check = csum_fold(csum_partial(trans_start, |
---|
3768 | | - csum_start - trans_start, |
---|
3769 | | - 0)); |
---|
| 3803 | + ip.v4->check = (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) ? |
---|
| 3804 | + csum_fold(csum_partial(trans_start, |
---|
| 3805 | + len, 0)) : 0; |
---|
3770 | 3806 | type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; |
---|
3771 | 3807 | |
---|
3772 | 3808 | ip.v4->tot_len = 0; |
---|
.. | .. |
---|
3798 | 3834 | mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; |
---|
3799 | 3835 | mss_l4len_idx |= (1u << IXGBE_ADVTXD_IDX_SHIFT); |
---|
3800 | 3836 | |
---|
| 3837 | + fceof_saidx |= itd->pfsa; |
---|
| 3838 | + type_tucmd |= itd->flags | itd->trailer_len; |
---|
| 3839 | + |
---|
3801 | 3840 | /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ |
---|
3802 | 3841 | vlan_macip_lens = l4.hdr - ip.hdr; |
---|
3803 | 3842 | vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT; |
---|
3804 | 3843 | vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; |
---|
3805 | 3844 | |
---|
3806 | | - ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, |
---|
3807 | | - type_tucmd, mss_l4len_idx); |
---|
| 3845 | + ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd, |
---|
| 3846 | + mss_l4len_idx); |
---|
3808 | 3847 | |
---|
3809 | 3848 | return 1; |
---|
3810 | 3849 | } |
---|
.. | .. |
---|
3819 | 3858 | } |
---|
3820 | 3859 | |
---|
3821 | 3860 | static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, |
---|
3822 | | - struct ixgbevf_tx_buffer *first) |
---|
| 3861 | + struct ixgbevf_tx_buffer *first, |
---|
| 3862 | + struct ixgbevf_ipsec_tx_data *itd) |
---|
3823 | 3863 | { |
---|
3824 | 3864 | struct sk_buff *skb = first->skb; |
---|
3825 | 3865 | u32 vlan_macip_lens = 0; |
---|
| 3866 | + u32 fceof_saidx = 0; |
---|
3826 | 3867 | u32 type_tucmd = 0; |
---|
3827 | 3868 | |
---|
3828 | 3869 | if (skb->ip_summed != CHECKSUM_PARTIAL) |
---|
.. | .. |
---|
3831 | 3872 | switch (skb->csum_offset) { |
---|
3832 | 3873 | case offsetof(struct tcphdr, check): |
---|
3833 | 3874 | type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; |
---|
3834 | | - /* fall through */ |
---|
| 3875 | + fallthrough; |
---|
3835 | 3876 | case offsetof(struct udphdr, check): |
---|
3836 | 3877 | break; |
---|
3837 | 3878 | case offsetof(struct sctphdr, checksum): |
---|
.. | .. |
---|
3843 | 3884 | type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP; |
---|
3844 | 3885 | break; |
---|
3845 | 3886 | } |
---|
3846 | | - /* fall through */ |
---|
| 3887 | + fallthrough; |
---|
3847 | 3888 | default: |
---|
3848 | 3889 | skb_checksum_help(skb); |
---|
3849 | 3890 | goto no_csum; |
---|
.. | .. |
---|
3861 | 3902 | vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; |
---|
3862 | 3903 | vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; |
---|
3863 | 3904 | |
---|
3864 | | - ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, 0); |
---|
| 3905 | + fceof_saidx |= itd->pfsa; |
---|
| 3906 | + type_tucmd |= itd->flags | itd->trailer_len; |
---|
| 3907 | + |
---|
| 3908 | + ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, |
---|
| 3909 | + fceof_saidx, type_tucmd, 0); |
---|
3865 | 3910 | } |
---|
3866 | 3911 | |
---|
3867 | 3912 | static __le32 ixgbevf_tx_cmd_type(u32 tx_flags) |
---|
.. | .. |
---|
3895 | 3940 | if (tx_flags & IXGBE_TX_FLAGS_IPV4) |
---|
3896 | 3941 | olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM); |
---|
3897 | 3942 | |
---|
3898 | | - /* use index 1 context for TSO/FSO/FCOE */ |
---|
3899 | | - if (tx_flags & IXGBE_TX_FLAGS_TSO) |
---|
| 3943 | + /* enable IPsec */ |
---|
| 3944 | + if (tx_flags & IXGBE_TX_FLAGS_IPSEC) |
---|
| 3945 | + olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IPSEC); |
---|
| 3946 | + |
---|
| 3947 | + /* use index 1 context for TSO/FSO/FCOE/IPSEC */ |
---|
| 3948 | + if (tx_flags & (IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_IPSEC)) |
---|
3900 | 3949 | olinfo_status |= cpu_to_le32(1u << IXGBE_ADVTXD_IDX_SHIFT); |
---|
3901 | 3950 | |
---|
3902 | 3951 | /* Check Context must be set if Tx switch is enabled, which it |
---|
.. | .. |
---|
3914 | 3963 | struct sk_buff *skb = first->skb; |
---|
3915 | 3964 | struct ixgbevf_tx_buffer *tx_buffer; |
---|
3916 | 3965 | union ixgbe_adv_tx_desc *tx_desc; |
---|
3917 | | - struct skb_frag_struct *frag; |
---|
| 3966 | + skb_frag_t *frag; |
---|
3918 | 3967 | dma_addr_t dma; |
---|
3919 | 3968 | unsigned int data_len, size; |
---|
3920 | 3969 | u32 tx_flags = first->tx_flags; |
---|
.. | .. |
---|
3988 | 4037 | |
---|
3989 | 4038 | /* set the timestamp */ |
---|
3990 | 4039 | first->time_stamp = jiffies; |
---|
| 4040 | + |
---|
| 4041 | + skb_tx_timestamp(skb); |
---|
3991 | 4042 | |
---|
3992 | 4043 | /* Force memory writes to complete before letting h/w know there |
---|
3993 | 4044 | * are new descriptors to fetch. (Only applicable for weak-ordered |
---|
.. | .. |
---|
4078 | 4129 | int tso; |
---|
4079 | 4130 | u32 tx_flags = 0; |
---|
4080 | 4131 | u16 count = TXD_USE_COUNT(skb_headlen(skb)); |
---|
| 4132 | + struct ixgbevf_ipsec_tx_data ipsec_tx = { 0 }; |
---|
4081 | 4133 | #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD |
---|
4082 | 4134 | unsigned short f; |
---|
4083 | 4135 | #endif |
---|
.. | .. |
---|
4096 | 4148 | * otherwise try next time |
---|
4097 | 4149 | */ |
---|
4098 | 4150 | #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD |
---|
4099 | | - for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) |
---|
4100 | | - count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); |
---|
| 4151 | + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { |
---|
| 4152 | + skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; |
---|
| 4153 | + |
---|
| 4154 | + count += TXD_USE_COUNT(skb_frag_size(frag)); |
---|
| 4155 | + } |
---|
4101 | 4156 | #else |
---|
4102 | 4157 | count += skb_shinfo(skb)->nr_frags; |
---|
4103 | 4158 | #endif |
---|
.. | .. |
---|
4122 | 4177 | first->tx_flags = tx_flags; |
---|
4123 | 4178 | first->protocol = vlan_get_protocol(skb); |
---|
4124 | 4179 | |
---|
4125 | | - tso = ixgbevf_tso(tx_ring, first, &hdr_len); |
---|
| 4180 | +#ifdef CONFIG_IXGBEVF_IPSEC |
---|
| 4181 | + if (xfrm_offload(skb) && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx)) |
---|
| 4182 | + goto out_drop; |
---|
| 4183 | +#endif |
---|
| 4184 | + tso = ixgbevf_tso(tx_ring, first, &hdr_len, &ipsec_tx); |
---|
4126 | 4185 | if (tso < 0) |
---|
4127 | 4186 | goto out_drop; |
---|
4128 | 4187 | else if (!tso) |
---|
4129 | | - ixgbevf_tx_csum(tx_ring, first); |
---|
| 4188 | + ixgbevf_tx_csum(tx_ring, first, &ipsec_tx); |
---|
4130 | 4189 | |
---|
4131 | 4190 | ixgbevf_tx_map(tx_ring, first, hdr_len); |
---|
4132 | 4191 | |
---|
.. | .. |
---|
4236 | 4295 | return 0; |
---|
4237 | 4296 | } |
---|
4238 | 4297 | |
---|
4239 | | -static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state) |
---|
| 4298 | +static int __maybe_unused ixgbevf_suspend(struct device *dev_d) |
---|
4240 | 4299 | { |
---|
4241 | | - struct net_device *netdev = pci_get_drvdata(pdev); |
---|
| 4300 | + struct net_device *netdev = dev_get_drvdata(dev_d); |
---|
4242 | 4301 | struct ixgbevf_adapter *adapter = netdev_priv(netdev); |
---|
4243 | | -#ifdef CONFIG_PM |
---|
4244 | | - int retval = 0; |
---|
4245 | | -#endif |
---|
4246 | 4302 | |
---|
4247 | 4303 | rtnl_lock(); |
---|
4248 | 4304 | netif_device_detach(netdev); |
---|
.. | .. |
---|
4253 | 4309 | ixgbevf_clear_interrupt_scheme(adapter); |
---|
4254 | 4310 | rtnl_unlock(); |
---|
4255 | 4311 | |
---|
4256 | | -#ifdef CONFIG_PM |
---|
4257 | | - retval = pci_save_state(pdev); |
---|
4258 | | - if (retval) |
---|
4259 | | - return retval; |
---|
4260 | | - |
---|
4261 | | -#endif |
---|
4262 | | - if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state)) |
---|
4263 | | - pci_disable_device(pdev); |
---|
4264 | | - |
---|
4265 | 4312 | return 0; |
---|
4266 | 4313 | } |
---|
4267 | 4314 | |
---|
4268 | | -#ifdef CONFIG_PM |
---|
4269 | | -static int ixgbevf_resume(struct pci_dev *pdev) |
---|
| 4315 | +static int __maybe_unused ixgbevf_resume(struct device *dev_d) |
---|
4270 | 4316 | { |
---|
| 4317 | + struct pci_dev *pdev = to_pci_dev(dev_d); |
---|
4271 | 4318 | struct net_device *netdev = pci_get_drvdata(pdev); |
---|
4272 | 4319 | struct ixgbevf_adapter *adapter = netdev_priv(netdev); |
---|
4273 | 4320 | u32 err; |
---|
4274 | | - |
---|
4275 | | - pci_restore_state(pdev); |
---|
4276 | | - /* pci_restore_state clears dev->state_saved so call |
---|
4277 | | - * pci_save_state to restore it. |
---|
4278 | | - */ |
---|
4279 | | - pci_save_state(pdev); |
---|
4280 | | - |
---|
4281 | | - err = pci_enable_device_mem(pdev); |
---|
4282 | | - if (err) { |
---|
4283 | | - dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n"); |
---|
4284 | | - return err; |
---|
4285 | | - } |
---|
4286 | 4321 | |
---|
4287 | 4322 | adapter->hw.hw_addr = adapter->io_addr; |
---|
4288 | 4323 | smp_mb__before_atomic(); |
---|
.. | .. |
---|
4304 | 4339 | return err; |
---|
4305 | 4340 | } |
---|
4306 | 4341 | |
---|
4307 | | -#endif /* CONFIG_PM */ |
---|
4308 | 4342 | static void ixgbevf_shutdown(struct pci_dev *pdev) |
---|
4309 | 4343 | { |
---|
4310 | | - ixgbevf_suspend(pdev, PMSG_SUSPEND); |
---|
| 4344 | + ixgbevf_suspend(&pdev->dev); |
---|
4311 | 4345 | } |
---|
4312 | 4346 | |
---|
4313 | 4347 | static void ixgbevf_get_tx_ring_stats(struct rtnl_link_stats64 *stats, |
---|
.. | .. |
---|
4441 | 4475 | |
---|
4442 | 4476 | static int ixgbevf_xdp(struct net_device *dev, struct netdev_bpf *xdp) |
---|
4443 | 4477 | { |
---|
4444 | | - struct ixgbevf_adapter *adapter = netdev_priv(dev); |
---|
4445 | | - |
---|
4446 | 4478 | switch (xdp->command) { |
---|
4447 | 4479 | case XDP_SETUP_PROG: |
---|
4448 | 4480 | return ixgbevf_xdp_setup(dev, xdp->prog); |
---|
4449 | | - case XDP_QUERY_PROG: |
---|
4450 | | - xdp->prog_id = adapter->xdp_prog ? |
---|
4451 | | - adapter->xdp_prog->aux->id : 0; |
---|
4452 | | - return 0; |
---|
4453 | 4481 | default: |
---|
4454 | 4482 | return -EINVAL; |
---|
4455 | 4483 | } |
---|
.. | .. |
---|
4616 | 4644 | case ixgbe_mbox_api_11: |
---|
4617 | 4645 | case ixgbe_mbox_api_12: |
---|
4618 | 4646 | case ixgbe_mbox_api_13: |
---|
| 4647 | + case ixgbe_mbox_api_14: |
---|
4619 | 4648 | netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - |
---|
4620 | 4649 | (ETH_HLEN + ETH_FCS_LEN); |
---|
4621 | 4650 | break; |
---|
.. | .. |
---|
4651 | 4680 | |
---|
4652 | 4681 | pci_set_drvdata(pdev, netdev); |
---|
4653 | 4682 | netif_carrier_off(netdev); |
---|
| 4683 | + ixgbevf_init_ipsec_offload(adapter); |
---|
4654 | 4684 | |
---|
4655 | 4685 | ixgbevf_init_last_counter_stats(adapter); |
---|
4656 | 4686 | |
---|
.. | .. |
---|
4717 | 4747 | if (netdev->reg_state == NETREG_REGISTERED) |
---|
4718 | 4748 | unregister_netdev(netdev); |
---|
4719 | 4749 | |
---|
| 4750 | + ixgbevf_stop_ipsec_offload(adapter); |
---|
4720 | 4751 | ixgbevf_clear_interrupt_scheme(adapter); |
---|
4721 | 4752 | ixgbevf_reset_interrupt_capability(adapter); |
---|
4722 | 4753 | |
---|
.. | .. |
---|
4824 | 4855 | .resume = ixgbevf_io_resume, |
---|
4825 | 4856 | }; |
---|
4826 | 4857 | |
---|
| 4858 | +static SIMPLE_DEV_PM_OPS(ixgbevf_pm_ops, ixgbevf_suspend, ixgbevf_resume); |
---|
| 4859 | + |
---|
4827 | 4860 | static struct pci_driver ixgbevf_driver = { |
---|
4828 | 4861 | .name = ixgbevf_driver_name, |
---|
4829 | 4862 | .id_table = ixgbevf_pci_tbl, |
---|
4830 | 4863 | .probe = ixgbevf_probe, |
---|
4831 | 4864 | .remove = ixgbevf_remove, |
---|
4832 | | -#ifdef CONFIG_PM |
---|
| 4865 | + |
---|
4833 | 4866 | /* Power Management Hooks */ |
---|
4834 | | - .suspend = ixgbevf_suspend, |
---|
4835 | | - .resume = ixgbevf_resume, |
---|
4836 | | -#endif |
---|
| 4867 | + .driver.pm = &ixgbevf_pm_ops, |
---|
| 4868 | + |
---|
4837 | 4869 | .shutdown = ixgbevf_shutdown, |
---|
4838 | 4870 | .err_handler = &ixgbevf_err_handler |
---|
4839 | 4871 | }; |
---|
.. | .. |
---|
4846 | 4878 | **/ |
---|
4847 | 4879 | static int __init ixgbevf_init_module(void) |
---|
4848 | 4880 | { |
---|
4849 | | - pr_info("%s - version %s\n", ixgbevf_driver_string, |
---|
4850 | | - ixgbevf_driver_version); |
---|
| 4881 | + int err; |
---|
4851 | 4882 | |
---|
| 4883 | + pr_info("%s\n", ixgbevf_driver_string); |
---|
4852 | 4884 | pr_info("%s\n", ixgbevf_copyright); |
---|
4853 | 4885 | ixgbevf_wq = create_singlethread_workqueue(ixgbevf_driver_name); |
---|
4854 | 4886 | if (!ixgbevf_wq) { |
---|
.. | .. |
---|
4856 | 4888 | return -ENOMEM; |
---|
4857 | 4889 | } |
---|
4858 | 4890 | |
---|
4859 | | - return pci_register_driver(&ixgbevf_driver); |
---|
| 4891 | + err = pci_register_driver(&ixgbevf_driver); |
---|
| 4892 | + if (err) { |
---|
| 4893 | + destroy_workqueue(ixgbevf_wq); |
---|
| 4894 | + return err; |
---|
| 4895 | + } |
---|
| 4896 | + |
---|
| 4897 | + return 0; |
---|
4860 | 4898 | } |
---|
4861 | 4899 | |
---|
4862 | 4900 | module_init(ixgbevf_init_module); |
---|