hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/drivers/net/ethernet/intel/fm10k/fm10k_main.c
....@@ -1,5 +1,5 @@
11 // SPDX-License-Identifier: GPL-2.0
2
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
2
+/* Copyright(c) 2013 - 2019 Intel Corporation. */
33
44 #include <linux/types.h>
55 #include <linux/module.h>
....@@ -11,18 +11,15 @@
1111
1212 #include "fm10k.h"
1313
14
-#define DRV_VERSION "0.23.4-k"
1514 #define DRV_SUMMARY "Intel(R) Ethernet Switch Host Interface Driver"
16
-const char fm10k_driver_version[] = DRV_VERSION;
1715 char fm10k_driver_name[] = "fm10k";
1816 static const char fm10k_driver_string[] = DRV_SUMMARY;
1917 static const char fm10k_copyright[] =
20
- "Copyright(c) 2013 - 2018 Intel Corporation.";
18
+ "Copyright(c) 2013 - 2019 Intel Corporation.";
2119
2220 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
2321 MODULE_DESCRIPTION(DRV_SUMMARY);
24
-MODULE_LICENSE("GPL");
25
-MODULE_VERSION(DRV_VERSION);
22
+MODULE_LICENSE("GPL v2");
2623
2724 /* single workqueue for entire fm10k driver */
2825 struct workqueue_struct *fm10k_workqueue;
....@@ -35,7 +32,9 @@
3532 **/
3633 static int __init fm10k_init_module(void)
3734 {
38
- pr_info("%s - version %s\n", fm10k_driver_string, fm10k_driver_version);
35
+ int ret;
36
+
37
+ pr_info("%s\n", fm10k_driver_string);
3938 pr_info("%s\n", fm10k_copyright);
4039
4140 /* create driver workqueue */
....@@ -46,7 +45,13 @@
4645
4746 fm10k_dbg_init();
4847
49
- return fm10k_register_pci_driver();
48
+ ret = fm10k_register_pci_driver();
49
+ if (ret) {
50
+ fm10k_dbg_exit();
51
+ destroy_workqueue(fm10k_workqueue);
52
+ }
53
+
54
+ return ret;
5055 }
5156 module_init(fm10k_init_module);
5257
....@@ -280,7 +285,7 @@
280285 /* we need the header to contain the greater of either ETH_HLEN or
281286 * 60 bytes if the skb->len is less than 60 for skb_pad.
282287 */
283
- pull_len = eth_get_headlen(va, FM10K_RX_HDR_LEN);
288
+ pull_len = eth_get_headlen(skb->dev, va, FM10K_RX_HDR_LEN);
284289
285290 /* align pull length to size of long to optimize memcpy performance */
286291 memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
....@@ -313,10 +318,7 @@
313318 rx_buffer->page_offset;
314319
315320 /* prefetch first cache line of first page */
316
- prefetch(page_addr);
317
-#if L1_CACHE_BYTES < 128
318
- prefetch(page_addr + L1_CACHE_BYTES);
319
-#endif
321
+ net_prefetch(page_addr);
320322
321323 /* allocate a skb to store the frags */
322324 skb = napi_alloc_skb(&rx_ring->q_vector->napi,
....@@ -638,15 +640,8 @@
638640 static struct ethhdr *fm10k_port_is_vxlan(struct sk_buff *skb)
639641 {
640642 struct fm10k_intfc *interface = netdev_priv(skb->dev);
641
- struct fm10k_udp_port *vxlan_port;
642643
643
- /* we can only offload a vxlan if we recognize it as such */
644
- vxlan_port = list_first_entry_or_null(&interface->vxlan_port,
645
- struct fm10k_udp_port, list);
646
-
647
- if (!vxlan_port)
648
- return NULL;
649
- if (vxlan_port->port != udp_hdr(skb)->dest)
644
+ if (interface->vxlan_port != udp_hdr(skb)->dest)
650645 return NULL;
651646
652647 /* return offset of udp_hdr plus 8 bytes for VXLAN header */
....@@ -859,7 +854,7 @@
859854 case IPPROTO_GRE:
860855 if (skb->encapsulation)
861856 break;
862
- /* fall through */
857
+ fallthrough;
863858 default:
864859 if (unlikely(net_ratelimit())) {
865860 dev_warn(tx_ring->dev,
....@@ -946,7 +941,7 @@
946941 struct sk_buff *skb = first->skb;
947942 struct fm10k_tx_buffer *tx_buffer;
948943 struct fm10k_tx_desc *tx_desc;
949
- struct skb_frag_struct *frag;
944
+ skb_frag_t *frag;
950945 unsigned char *data;
951946 dma_addr_t dma;
952947 unsigned int data_len, size;
....@@ -1037,13 +1032,8 @@
10371032 fm10k_maybe_stop_tx(tx_ring, DESC_NEEDED);
10381033
10391034 /* notify HW of packet */
1040
- if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
1035
+ if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
10411036 writel(i, tx_ring->tail);
1042
-
1043
- /* we need this if more than one processor can write to our tail
1044
- * at a time, it synchronizes IO on IA64/Altix systems
1045
- */
1046
- mmiowb();
10471037 }
10481038
10491039 return;
....@@ -1078,8 +1068,11 @@
10781068 * + 2 desc gap to keep tail from touching head
10791069 * otherwise try next time
10801070 */
1081
- for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
1082
- count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
1071
+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
1072
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
1073
+
1074
+ count += TXD_USE_COUNT(skb_frag_size(frag));
1075
+ }
10831076
10841077 if (fm10k_maybe_stop_tx(tx_ring, count + 3)) {
10851078 tx_ring->tx_stats.tx_busy++;
....@@ -1467,11 +1460,11 @@
14671460 if (!clean_complete)
14681461 return budget;
14691462
1470
- /* all work done, exit the polling mode */
1471
- napi_complete_done(napi, work_done);
1472
-
1473
- /* re-enable the q_vector */
1474
- fm10k_qv_enable(q_vector);
1463
+ /* Exit the polling mode, but don't re-enable interrupts if stack might
1464
+ * poll us due to busy-polling
1465
+ */
1466
+ if (likely(napi_complete_done(napi, work_done)))
1467
+ fm10k_qv_enable(q_vector);
14751468
14761469 return min(work_done, budget - 1);
14771470 }
....@@ -1559,7 +1552,7 @@
15591552 * important, starting with the "most" number of features turned on at once,
15601553 * and ending with the smallest set of features. This way large combinations
15611554 * can be allocated if they're turned on, and smaller combinations are the
1562
- * fallthrough conditions.
1555
+ * fall through conditions.
15631556 *
15641557 **/
15651558 static void fm10k_set_num_queues(struct fm10k_intfc *interface)
....@@ -1605,14 +1598,12 @@
16051598 {
16061599 struct fm10k_q_vector *q_vector;
16071600 struct fm10k_ring *ring;
1608
- int ring_count, size;
1601
+ int ring_count;
16091602
16101603 ring_count = txr_count + rxr_count;
1611
- size = sizeof(struct fm10k_q_vector) +
1612
- (sizeof(struct fm10k_ring) * ring_count);
16131604
16141605 /* allocate q_vector and rings */
1615
- q_vector = kzalloc(size, GFP_KERNEL);
1606
+ q_vector = kzalloc(struct_size(q_vector, ring, ring_count), GFP_KERNEL);
16161607 if (!q_vector)
16171608 return -ENOMEM;
16181609
....@@ -1830,7 +1821,7 @@
18301821 v_budget = min_t(u16, v_budget, num_online_cpus());
18311822
18321823 /* account for vectors not related to queues */
1833
- v_budget += NON_Q_VECTORS(hw);
1824
+ v_budget += NON_Q_VECTORS;
18341825
18351826 /* At the same time, hardware can only support a maximum of
18361827 * hw.mac->max_msix_vectors vectors. With features
....@@ -1862,7 +1853,7 @@
18621853 }
18631854
18641855 /* record the number of queues available for q_vectors */
1865
- interface->num_q_vectors = v_budget - NON_Q_VECTORS(hw);
1856
+ interface->num_q_vectors = v_budget - NON_Q_VECTORS;
18661857
18671858 return 0;
18681859 }
....@@ -1876,7 +1867,7 @@
18761867 static bool fm10k_cache_ring_qos(struct fm10k_intfc *interface)
18771868 {
18781869 struct net_device *dev = interface->netdev;
1879
- int pc, offset, rss_i, i, q_idx;
1870
+ int pc, offset, rss_i, i;
18801871 u16 pc_stride = interface->ring_feature[RING_F_QOS].mask + 1;
18811872 u8 num_pcs = netdev_get_num_tc(dev);
18821873
....@@ -1886,7 +1877,8 @@
18861877 rss_i = interface->ring_feature[RING_F_RSS].indices;
18871878
18881879 for (pc = 0, offset = 0; pc < num_pcs; pc++, offset += rss_i) {
1889
- q_idx = pc;
1880
+ int q_idx = pc;
1881
+
18901882 for (i = 0; i < rss_i; i++) {
18911883 interface->tx_ring[offset + i]->reg_idx = q_idx;
18921884 interface->tx_ring[offset + i]->qos_pc = pc;