hc
2024-05-10 23fa18eaa71266feff7ba8d83022d9e1cc83c65a
kernel/drivers/net/ethernet/intel/i40e/i40e_main.c
....@@ -5,11 +5,14 @@
55 #include <linux/of_net.h>
66 #include <linux/pci.h>
77 #include <linux/bpf.h>
8
+#include <generated/utsrelease.h>
89
910 /* Local includes */
1011 #include "i40e.h"
1112 #include "i40e_diag.h"
13
+#include "i40e_xsk.h"
1214 #include <net/udp_tunnel.h>
15
+#include <net/xdp_sock_drv.h>
1316 /* All i40e tracepoints are defined by the include below, which
1417 * must be included exactly once across the whole kernel with
1518 * CREATE_TRACE_POINTS defined
....@@ -21,23 +24,14 @@
2124 static const char i40e_driver_string[] =
2225 "Intel(R) Ethernet Connection XL710 Network Driver";
2326
24
-#define DRV_KERN "-k"
25
-
26
-#define DRV_VERSION_MAJOR 2
27
-#define DRV_VERSION_MINOR 3
28
-#define DRV_VERSION_BUILD 2
29
-#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
30
- __stringify(DRV_VERSION_MINOR) "." \
31
- __stringify(DRV_VERSION_BUILD) DRV_KERN
32
-const char i40e_driver_version_str[] = DRV_VERSION;
33
-static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation.";
27
+static const char i40e_copyright[] = "Copyright (c) 2013 - 2019 Intel Corporation.";
3428
3529 /* a bit of forward declarations */
3630 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
3731 static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired);
3832 static int i40e_add_vsi(struct i40e_vsi *vsi);
3933 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
40
-static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
34
+static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acquired);
4135 static int i40e_setup_misc_vector(struct i40e_pf *pf);
4236 static void i40e_determine_queue_usage(struct i40e_pf *pf);
4337 static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
....@@ -46,11 +40,15 @@
4640 bool lock_acquired);
4741 static int i40e_reset(struct i40e_pf *pf);
4842 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
43
+static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf);
44
+static int i40e_restore_interrupt_scheme(struct i40e_pf *pf);
45
+static bool i40e_check_recovery_mode(struct i40e_pf *pf);
46
+static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw);
4947 static void i40e_fdir_sb_setup(struct i40e_pf *pf);
5048 static int i40e_veb_get_bw_info(struct i40e_veb *veb);
5149 static int i40e_get_capabilities(struct i40e_pf *pf,
5250 enum i40e_admin_queue_opc list_type);
53
-
51
+static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf);
5452
5553 /* i40e_pci_tbl - PCI Device ID Table
5654 *
....@@ -69,6 +67,9 @@
6967 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
7068 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
7169 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
70
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_BC), 0},
71
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_SFP), 0},
72
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_B), 0},
7273 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0},
7374 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0},
7475 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
....@@ -77,6 +78,8 @@
7778 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
7879 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
7980 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
81
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_X710_N3000), 0},
82
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_XXV710_N3000), 0},
8083 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_B), 0},
8184 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_SFP28), 0},
8285 /* required last entry */
....@@ -91,8 +94,7 @@
9194
9295 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
9396 MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
94
-MODULE_LICENSE("GPL");
95
-MODULE_VERSION(DRV_VERSION);
97
+MODULE_LICENSE("GPL v2");
9698
9799 static struct workqueue_struct *i40e_wq;
98100
....@@ -127,8 +129,8 @@
127129 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
128130
129131 mem->size = ALIGN(size, alignment);
130
- mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size,
131
- &mem->pa, GFP_KERNEL);
132
+ mem->va = dma_alloc_coherent(&pf->pdev->dev, mem->size, &mem->pa,
133
+ GFP_KERNEL);
132134 if (!mem->va)
133135 return -ENOMEM;
134136
....@@ -302,56 +304,39 @@
302304 **/
303305 void i40e_service_event_schedule(struct i40e_pf *pf)
304306 {
305
- if (!test_bit(__I40E_DOWN, pf->state) &&
306
- !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
307
+ if ((!test_bit(__I40E_DOWN, pf->state) &&
308
+ !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) ||
309
+ test_bit(__I40E_RECOVERY_MODE, pf->state))
307310 queue_work(i40e_wq, &pf->service_task);
308311 }
309312
310313 /**
311314 * i40e_tx_timeout - Respond to a Tx Hang
312315 * @netdev: network interface device structure
316
+ * @txqueue: queue number timing out
313317 *
314318 * If any port has noticed a Tx timeout, it is likely that the whole
315319 * device is munged, not just the one netdev port, so go for the full
316320 * reset.
317321 **/
318
-static void i40e_tx_timeout(struct net_device *netdev)
322
+static void i40e_tx_timeout(struct net_device *netdev, unsigned int txqueue)
319323 {
320324 struct i40e_netdev_priv *np = netdev_priv(netdev);
321325 struct i40e_vsi *vsi = np->vsi;
322326 struct i40e_pf *pf = vsi->back;
323327 struct i40e_ring *tx_ring = NULL;
324
- unsigned int i, hung_queue = 0;
328
+ unsigned int i;
325329 u32 head, val;
326330
327331 pf->tx_timeout_count++;
328332
329
- /* find the stopped queue the same way the stack does */
330
- for (i = 0; i < netdev->num_tx_queues; i++) {
331
- struct netdev_queue *q;
332
- unsigned long trans_start;
333
-
334
- q = netdev_get_tx_queue(netdev, i);
335
- trans_start = q->trans_start;
336
- if (netif_xmit_stopped(q) &&
337
- time_after(jiffies,
338
- (trans_start + netdev->watchdog_timeo))) {
339
- hung_queue = i;
340
- break;
341
- }
342
- }
343
-
344
- if (i == netdev->num_tx_queues) {
345
- netdev_info(netdev, "tx_timeout: no netdev hung queue found\n");
346
- } else {
347
- /* now that we have an index, find the tx_ring struct */
348
- for (i = 0; i < vsi->num_queue_pairs; i++) {
349
- if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
350
- if (hung_queue ==
351
- vsi->tx_rings[i]->queue_index) {
352
- tx_ring = vsi->tx_rings[i];
353
- break;
354
- }
333
+ /* with txqueue index, find the tx_ring struct */
334
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
335
+ if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
336
+ if (txqueue ==
337
+ vsi->tx_rings[i]->queue_index) {
338
+ tx_ring = vsi->tx_rings[i];
339
+ break;
355340 }
356341 }
357342 }
....@@ -377,14 +362,14 @@
377362 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
378363
379364 netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
380
- vsi->seid, hung_queue, tx_ring->next_to_clean,
365
+ vsi->seid, txqueue, tx_ring->next_to_clean,
381366 head, tx_ring->next_to_use,
382367 readl(tx_ring->tail), val);
383368 }
384369
385370 pf->tx_timeout_last_recovery = jiffies;
386
- netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n",
387
- pf->tx_timeout_recovery_level, hung_queue);
371
+ netdev_info(netdev, "tx_timeout recovery level %d, txqueue %d\n",
372
+ pf->tx_timeout_recovery_level, txqueue);
388373
389374 switch (pf->tx_timeout_recovery_level) {
390375 case 1:
....@@ -397,7 +382,9 @@
397382 set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
398383 break;
399384 default:
400
- netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
385
+ netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in non-recoverable state.\n");
386
+ set_bit(__I40E_DOWN_REQUESTED, pf->state);
387
+ set_bit(__I40E_VSI_DOWN_REQUESTED, vsi->state);
401388 break;
402389 }
403390
....@@ -553,10 +540,55 @@
553540 sizeof(pf->veb[i]->stats));
554541 memset(&pf->veb[i]->stats_offsets, 0,
555542 sizeof(pf->veb[i]->stats_offsets));
543
+ memset(&pf->veb[i]->tc_stats, 0,
544
+ sizeof(pf->veb[i]->tc_stats));
545
+ memset(&pf->veb[i]->tc_stats_offsets, 0,
546
+ sizeof(pf->veb[i]->tc_stats_offsets));
556547 pf->veb[i]->stat_offsets_loaded = false;
557548 }
558549 }
559550 pf->hw_csum_rx_error = 0;
551
+}
552
+
553
+/**
554
+ * i40e_compute_pci_to_hw_id - compute index form PCI function.
555
+ * @vsi: ptr to the VSI to read from.
556
+ * @hw: ptr to the hardware info.
557
+ **/
558
+static u32 i40e_compute_pci_to_hw_id(struct i40e_vsi *vsi, struct i40e_hw *hw)
559
+{
560
+ int pf_count = i40e_get_pf_count(hw);
561
+
562
+ if (vsi->type == I40E_VSI_SRIOV)
563
+ return (hw->port * BIT(7)) / pf_count + vsi->vf_id;
564
+
565
+ return hw->port + BIT(7);
566
+}
567
+
568
+/**
569
+ * i40e_stat_update64 - read and update a 64 bit stat from the chip.
570
+ * @hw: ptr to the hardware info.
571
+ * @hireg: the high 32 bit reg to read.
572
+ * @loreg: the low 32 bit reg to read.
573
+ * @offset_loaded: has the initial offset been loaded yet.
574
+ * @offset: ptr to current offset value.
575
+ * @stat: ptr to the stat.
576
+ *
577
+ * Since the device stats are not reset at PFReset, they will not
578
+ * be zeroed when the driver starts. We'll save the first values read
579
+ * and use them as offsets to be subtracted from the raw values in order
580
+ * to report stats that count from zero.
581
+ **/
582
+static void i40e_stat_update64(struct i40e_hw *hw, u32 hireg, u32 loreg,
583
+ bool offset_loaded, u64 *offset, u64 *stat)
584
+{
585
+ u64 new_data;
586
+
587
+ new_data = rd64(hw, loreg);
588
+
589
+ if (!offset_loaded || new_data < *offset)
590
+ *offset = new_data;
591
+ *stat = new_data - *offset;
560592 }
561593
562594 /**
....@@ -631,6 +663,34 @@
631663 }
632664
633665 /**
666
+ * i40e_stats_update_rx_discards - update rx_discards.
667
+ * @vsi: ptr to the VSI to be updated.
668
+ * @hw: ptr to the hardware info.
669
+ * @stat_idx: VSI's stat_counter_idx.
670
+ * @offset_loaded: ptr to the VSI's stat_offsets_loaded.
671
+ * @stat_offset: ptr to stat_offset to store first read of specific register.
672
+ * @stat: ptr to VSI's stat to be updated.
673
+ **/
674
+static void
675
+i40e_stats_update_rx_discards(struct i40e_vsi *vsi, struct i40e_hw *hw,
676
+ int stat_idx, bool offset_loaded,
677
+ struct i40e_eth_stats *stat_offset,
678
+ struct i40e_eth_stats *stat)
679
+{
680
+ u64 rx_rdpc, rx_rxerr;
681
+
682
+ i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx), offset_loaded,
683
+ &stat_offset->rx_discards, &rx_rdpc);
684
+ i40e_stat_update64(hw,
685
+ I40E_GL_RXERR1H(i40e_compute_pci_to_hw_id(vsi, hw)),
686
+ I40E_GL_RXERR1L(i40e_compute_pci_to_hw_id(vsi, hw)),
687
+ offset_loaded, &stat_offset->rx_discards_other,
688
+ &rx_rxerr);
689
+
690
+ stat->rx_discards = rx_rdpc + rx_rxerr;
691
+}
692
+
693
+/**
634694 * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
635695 * @vsi: the VSI to be updated
636696 **/
....@@ -655,9 +715,6 @@
655715 i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
656716 vsi->stat_offsets_loaded,
657717 &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
658
- i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
659
- vsi->stat_offsets_loaded,
660
- &oes->tx_errors, &es->tx_errors);
661718
662719 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
663720 I40E_GLV_GORCL(stat_idx),
....@@ -692,6 +749,10 @@
692749 I40E_GLV_BPTCL(stat_idx),
693750 vsi->stat_offsets_loaded,
694751 &oes->tx_broadcast, &es->tx_broadcast);
752
+
753
+ i40e_stats_update_rx_discards(vsi, hw, stat_idx,
754
+ vsi->stat_offsets_loaded, oes, es);
755
+
695756 vsi->stat_offsets_loaded = true;
696757 }
697758
....@@ -699,7 +760,7 @@
699760 * i40e_update_veb_stats - Update Switch component statistics
700761 * @veb: the VEB being updated
701762 **/
702
-static void i40e_update_veb_stats(struct i40e_veb *veb)
763
+void i40e_update_veb_stats(struct i40e_veb *veb)
703764 {
704765 struct i40e_pf *pf = veb->pf;
705766 struct i40e_hw *hw = &pf->hw;
....@@ -852,6 +913,25 @@
852913 rx_p += packets;
853914 rx_buf += p->rx_stats.alloc_buff_failed;
854915 rx_page += p->rx_stats.alloc_page_failed;
916
+
917
+ if (i40e_enabled_xdp_vsi(vsi)) {
918
+ /* locate XDP ring */
919
+ p = READ_ONCE(vsi->xdp_rings[q]);
920
+ if (!p)
921
+ continue;
922
+
923
+ do {
924
+ start = u64_stats_fetch_begin_irq(&p->syncp);
925
+ packets = p->stats.packets;
926
+ bytes = p->stats.bytes;
927
+ } while (u64_stats_fetch_retry_irq(&p->syncp, start));
928
+ tx_b += bytes;
929
+ tx_p += packets;
930
+ tx_restart += p->tx_stats.restart_queue;
931
+ tx_busy += p->tx_stats.tx_busy;
932
+ tx_linearize += p->tx_stats.tx_linearize;
933
+ tx_force_wb += p->tx_stats.tx_force_wb;
934
+ }
855935 }
856936 rcu_read_unlock();
857937 vsi->tx_restart = tx_restart;
....@@ -1129,6 +1209,25 @@
11291209 i40e_update_pf_stats(pf);
11301210
11311211 i40e_update_vsi_stats(vsi);
1212
+}
1213
+
1214
+/**
1215
+ * i40e_count_filters - counts VSI mac filters
1216
+ * @vsi: the VSI to be searched
1217
+ *
1218
+ * Returns count of mac filters
1219
+ **/
1220
+int i40e_count_filters(struct i40e_vsi *vsi)
1221
+{
1222
+ struct i40e_mac_filter *f;
1223
+ struct hlist_node *h;
1224
+ int bkt;
1225
+ int cnt = 0;
1226
+
1227
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
1228
+ ++cnt;
1229
+
1230
+ return cnt;
11321231 }
11331232
11341233 /**
....@@ -1530,8 +1629,7 @@
15301629 bool found = false;
15311630 int bkt;
15321631
1533
- WARN(!spin_is_locked(&vsi->mac_filter_hash_lock),
1534
- "Missing mac_filter_hash_lock\n");
1632
+ lockdep_assert_held(&vsi->mac_filter_hash_lock);
15351633 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
15361634 if (ether_addr_equal(macaddr, f->macaddr)) {
15371635 __i40e_del_filter(vsi, f);
....@@ -1569,8 +1667,8 @@
15691667 return 0;
15701668 }
15711669
1572
- if (test_bit(__I40E_VSI_DOWN, vsi->back->state) ||
1573
- test_bit(__I40E_RESET_RECOVERY_PENDING, vsi->back->state))
1670
+ if (test_bit(__I40E_DOWN, pf->state) ||
1671
+ test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
15741672 return -EADDRNOTAVAIL;
15751673
15761674 if (ether_addr_equal(hw->mac.addr, addr->sa_data))
....@@ -1594,8 +1692,7 @@
15941692 if (vsi->type == I40E_VSI_MAIN) {
15951693 i40e_status ret;
15961694
1597
- ret = i40e_aq_mac_address_write(&vsi->back->hw,
1598
- I40E_AQC_WRITE_TYPE_LAA_WOL,
1695
+ ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
15991696 addr->sa_data, NULL);
16001697 if (ret)
16011698 netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n",
....@@ -1606,7 +1703,7 @@
16061703 /* schedule our worker thread which will take care of
16071704 * applying the new filter changes
16081705 */
1609
- i40e_service_event_schedule(vsi->back);
1706
+ i40e_service_event_schedule(pf);
16101707 return 0;
16111708 }
16121709
....@@ -1614,6 +1711,8 @@
16141711 * i40e_config_rss_aq - Prepare for RSS using AQ commands
16151712 * @vsi: vsi structure
16161713 * @seed: RSS hash seed
1714
+ * @lut: pointer to lookup table of lut_size
1715
+ * @lut_size: size of the lookup table
16171716 **/
16181717 static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
16191718 u8 *lut, u16 lut_size)
....@@ -1635,7 +1734,7 @@
16351734 }
16361735 }
16371736 if (lut) {
1638
- bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
1737
+ bool pf_lut = vsi->type == I40E_VSI_MAIN;
16391738
16401739 ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
16411740 if (ret) {
....@@ -1800,6 +1899,8 @@
18001899
18011900 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
18021901 offset = 0;
1902
+ /* zero out queue mapping, it will get updated on the end of the function */
1903
+ memset(ctxt->info.queue_mapping, 0, sizeof(ctxt->info.queue_mapping));
18031904
18041905 if (vsi->type == I40E_VSI_MAIN) {
18051906 /* This code helps add more queue to the VSI if we have
....@@ -1808,18 +1909,24 @@
18081909 * non-zero req_queue_pairs says that user requested a new
18091910 * queue count via ethtool's set_channels, so use this
18101911 * value for queues distribution across traffic classes
1912
+ * We need at least one queue pair for the interface
1913
+ * to be usable as we see in else statement.
18111914 */
18121915 if (vsi->req_queue_pairs > 0)
18131916 vsi->num_queue_pairs = vsi->req_queue_pairs;
18141917 else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
18151918 vsi->num_queue_pairs = pf->num_lan_msix;
1919
+ else
1920
+ vsi->num_queue_pairs = 1;
18161921 }
18171922
18181923 /* Number of queues per enabled TC */
1819
- if (vsi->type == I40E_VSI_MAIN)
1924
+ if (vsi->type == I40E_VSI_MAIN ||
1925
+ (vsi->type == I40E_VSI_SRIOV && vsi->num_queue_pairs != 0))
18201926 num_tc_qps = vsi->num_queue_pairs;
18211927 else
18221928 num_tc_qps = vsi->alloc_queue_pairs;
1929
+
18231930 if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
18241931 /* Find numtc from enabled TC bitmap */
18251932 for (i = 0, numtc = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
....@@ -1858,7 +1965,7 @@
18581965 num_tc_qps);
18591966 break;
18601967 }
1861
- /* fall through */
1968
+ fallthrough;
18621969 case I40E_VSI_FDIR:
18631970 case I40E_VSI_SRIOV:
18641971 case I40E_VSI_VMDQ2:
....@@ -1897,10 +2004,12 @@
18972004 }
18982005 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
18992006 }
1900
- /* Do not change previously set num_queue_pairs for PFs */
2007
+ /* Do not change previously set num_queue_pairs for PFs and VFs*/
19012008 if ((vsi->type == I40E_VSI_MAIN && numtc != 1) ||
1902
- vsi->type != I40E_VSI_MAIN)
2009
+ (vsi->type == I40E_VSI_SRIOV && vsi->num_queue_pairs == 0) ||
2010
+ (vsi->type != I40E_VSI_MAIN && vsi->type != I40E_VSI_SRIOV))
19032011 vsi->num_queue_pairs = offset;
2012
+
19042013 /* Scheduler section valid can only be set for ADD VSI */
19052014 if (is_add) {
19062015 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
....@@ -2154,11 +2263,22 @@
21542263 fcnt = i40e_update_filter_state(num_add, list, add_head);
21552264
21562265 if (fcnt != num_add) {
2157
- set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2158
- dev_warn(&vsi->back->pdev->dev,
2159
- "Error %s adding RX filters on %s, promiscuous mode forced on\n",
2160
- i40e_aq_str(hw, aq_err),
2161
- vsi_name);
2266
+ if (vsi->type == I40E_VSI_MAIN) {
2267
+ set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2268
+ dev_warn(&vsi->back->pdev->dev,
2269
+ "Error %s adding RX filters on %s, promiscuous mode forced on\n",
2270
+ i40e_aq_str(hw, aq_err), vsi_name);
2271
+ } else if (vsi->type == I40E_VSI_SRIOV ||
2272
+ vsi->type == I40E_VSI_VMDQ1 ||
2273
+ vsi->type == I40E_VSI_VMDQ2) {
2274
+ dev_warn(&vsi->back->pdev->dev,
2275
+ "Error %s adding RX filters on %s, please set promiscuous on manually for %s\n",
2276
+ i40e_aq_str(hw, aq_err), vsi_name, vsi_name);
2277
+ } else {
2278
+ dev_warn(&vsi->back->pdev->dev,
2279
+ "Error %s adding RX filters on %s, incorrect VSI type: %i.\n",
2280
+ i40e_aq_str(hw, aq_err), vsi_name, vsi->type);
2281
+ }
21622282 }
21632283 }
21642284
....@@ -2565,6 +2685,9 @@
25652685 vsi_name,
25662686 i40e_stat_str(hw, aq_ret),
25672687 i40e_aq_str(hw, hw->aq.asq_last_status));
2688
+ } else {
2689
+ dev_info(&pf->pdev->dev, "%s allmulti mode.\n",
2690
+ cur_multipromisc ? "entering" : "leaving");
25682691 }
25692692 }
25702693
....@@ -2665,14 +2788,14 @@
26652788 struct i40e_pf *pf = vsi->back;
26662789
26672790 if (i40e_enabled_xdp_vsi(vsi)) {
2668
- int frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
2791
+ int frame_size = new_mtu + I40E_PACKET_HDR_PAD;
26692792
26702793 if (frame_size > i40e_max_xdp_frame_size(vsi))
26712794 return -EINVAL;
26722795 }
26732796
2674
- netdev_info(netdev, "changing MTU from %d to %d\n",
2675
- netdev->mtu, new_mtu);
2797
+ netdev_dbg(netdev, "changing MTU from %d to %d\n",
2798
+ netdev->mtu, new_mtu);
26762799 netdev->mtu = new_mtu;
26772800 if (netif_running(netdev))
26782801 i40e_vsi_reinit_locked(vsi);
....@@ -3014,9 +3137,9 @@
30143137 **/
30153138 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
30163139 {
3017
- i40e_vlan_stripping_disable(vsi);
3018
-
30193140 vsi->info.pvid = 0;
3141
+
3142
+ i40e_vlan_stripping_disable(vsi);
30203143 }
30213144
30223145 /**
....@@ -3129,6 +3252,26 @@
31293252 }
31303253
31313254 /**
3255
+ * i40e_xsk_pool - Retrieve the AF_XDP buffer pool if XDP and ZC is enabled
3256
+ * @ring: The Tx or Rx ring
3257
+ *
3258
+ * Returns the AF_XDP buffer pool or NULL.
3259
+ **/
3260
+static struct xsk_buff_pool *i40e_xsk_pool(struct i40e_ring *ring)
3261
+{
3262
+ bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi);
3263
+ int qid = ring->queue_index;
3264
+
3265
+ if (ring_is_xdp(ring))
3266
+ qid -= ring->vsi->alloc_queue_pairs;
3267
+
3268
+ if (!xdp_on || !test_bit(qid, ring->vsi->af_xdp_zc_qps))
3269
+ return NULL;
3270
+
3271
+ return xsk_get_pool_from_qid(ring->vsi->netdev, qid);
3272
+}
3273
+
3274
+/**
31323275 * i40e_configure_tx_ring - Configure a transmit ring context and rest
31333276 * @ring: The Tx ring to configure
31343277 *
....@@ -3142,6 +3285,9 @@
31423285 struct i40e_hmc_obj_txq tx_ctx;
31433286 i40e_status err = 0;
31443287 u32 qtx_ctl = 0;
3288
+
3289
+ if (ring_is_xdp(ring))
3290
+ ring->xsk_pool = i40e_xsk_pool(ring);
31453291
31463292 /* some ATR related tx ring init */
31473293 if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
....@@ -3252,13 +3398,45 @@
32523398 struct i40e_hw *hw = &vsi->back->hw;
32533399 struct i40e_hmc_obj_rxq rx_ctx;
32543400 i40e_status err = 0;
3401
+ bool ok;
3402
+ int ret;
32553403
32563404 bitmap_zero(ring->state, __I40E_RING_STATE_NBITS);
32573405
32583406 /* clear the context structure first */
32593407 memset(&rx_ctx, 0, sizeof(rx_ctx));
32603408
3261
- ring->rx_buf_len = vsi->rx_buf_len;
3409
+ if (ring->vsi->type == I40E_VSI_MAIN)
3410
+ xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
3411
+
3412
+ ring->xsk_pool = i40e_xsk_pool(ring);
3413
+ if (ring->xsk_pool) {
3414
+ ring->rx_buf_len =
3415
+ xsk_pool_get_rx_frame_size(ring->xsk_pool);
3416
+ /* For AF_XDP ZC, we disallow packets to span on
3417
+ * multiple buffers, thus letting us skip that
3418
+ * handling in the fast-path.
3419
+ */
3420
+ chain_len = 1;
3421
+ ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
3422
+ MEM_TYPE_XSK_BUFF_POOL,
3423
+ NULL);
3424
+ if (ret)
3425
+ return ret;
3426
+ dev_info(&vsi->back->pdev->dev,
3427
+ "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
3428
+ ring->queue_index);
3429
+
3430
+ } else {
3431
+ ring->rx_buf_len = vsi->rx_buf_len;
3432
+ if (ring->vsi->type == I40E_VSI_MAIN) {
3433
+ ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
3434
+ MEM_TYPE_PAGE_SHARED,
3435
+ NULL);
3436
+ if (ret)
3437
+ return ret;
3438
+ }
3439
+ }
32623440
32633441 rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len,
32643442 BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
....@@ -3266,8 +3444,8 @@
32663444 rx_ctx.base = (ring->dma / 128);
32673445 rx_ctx.qlen = ring->count;
32683446
3269
- /* use 32 byte descriptors */
3270
- rx_ctx.dsize = 1;
3447
+ /* use 16 byte descriptors */
3448
+ rx_ctx.dsize = 0;
32713449
32723450 /* descriptor type is always zero
32733451 * rx_ctx.dtype = 0;
....@@ -3314,7 +3492,21 @@
33143492 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
33153493 writel(0, ring->tail);
33163494
3317
- i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
3495
+ if (ring->xsk_pool) {
3496
+ xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
3497
+ ok = i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring));
3498
+ } else {
3499
+ ok = !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
3500
+ }
3501
+ if (!ok) {
3502
+ /* Log this in case the user has forgotten to give the kernel
3503
+ * any buffers, even later in the application.
3504
+ */
3505
+ dev_info(&vsi->back->pdev->dev,
3506
+ "Failed to allocate some buffers on %sRx ring %d (pf_q %d)\n",
3507
+ ring->xsk_pool ? "AF_XDP ZC enabled " : "",
3508
+ ring->queue_index, pf_q);
3509
+ }
33183510
33193511 return 0;
33203512 }
....@@ -3333,7 +3525,7 @@
33333525 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
33343526 err = i40e_configure_tx_ring(vsi->tx_rings[i]);
33353527
3336
- if (!i40e_enabled_xdp_vsi(vsi))
3528
+ if (err || !i40e_enabled_xdp_vsi(vsi))
33373529 return err;
33383530
33393531 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
....@@ -3631,7 +3823,7 @@
36313823 (I40E_QUEUE_TYPE_TX
36323824 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
36333825
3634
- wr32(hw, I40E_QINT_TQCTL(nextqp), val);
3826
+ wr32(hw, I40E_QINT_TQCTL(nextqp), val);
36353827 }
36363828
36373829 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
....@@ -4006,7 +4198,8 @@
40064198 enable_intr:
40074199 /* re-enable interrupt causes */
40084200 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
4009
- if (!test_bit(__I40E_DOWN, pf->state)) {
4201
+ if (!test_bit(__I40E_DOWN, pf->state) ||
4202
+ test_bit(__I40E_RECOVERY_MODE, pf->state)) {
40104203 i40e_service_event_schedule(pf);
40114204 i40e_irq_dynamic_enable_icr0(pf);
40124205 }
....@@ -5321,6 +5514,58 @@
53215514 }
53225515
53235516 /**
5517
+ * i40e_update_adq_vsi_queues - update queue mapping for ADq VSI
5518
+ * @vsi: the VSI being reconfigured
5519
+ * @vsi_offset: offset from main VF VSI
5520
+ */
5521
+int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset)
5522
+{
5523
+ struct i40e_vsi_context ctxt = {};
5524
+ struct i40e_pf *pf;
5525
+ struct i40e_hw *hw;
5526
+ int ret;
5527
+
5528
+ if (!vsi)
5529
+ return I40E_ERR_PARAM;
5530
+ pf = vsi->back;
5531
+ hw = &pf->hw;
5532
+
5533
+ ctxt.seid = vsi->seid;
5534
+ ctxt.pf_num = hw->pf_id;
5535
+ ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id + vsi_offset;
5536
+ ctxt.uplink_seid = vsi->uplink_seid;
5537
+ ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
5538
+ ctxt.flags = I40E_AQ_VSI_TYPE_VF;
5539
+ ctxt.info = vsi->info;
5540
+
5541
+ i40e_vsi_setup_queue_map(vsi, &ctxt, vsi->tc_config.enabled_tc,
5542
+ false);
5543
+ if (vsi->reconfig_rss) {
5544
+ vsi->rss_size = min_t(int, pf->alloc_rss_size,
5545
+ vsi->num_queue_pairs);
5546
+ ret = i40e_vsi_config_rss(vsi);
5547
+ if (ret) {
5548
+ dev_info(&pf->pdev->dev, "Failed to reconfig rss for num_queues\n");
5549
+ return ret;
5550
+ }
5551
+ vsi->reconfig_rss = false;
5552
+ }
5553
+
5554
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5555
+ if (ret) {
5556
+ dev_info(&pf->pdev->dev, "Update vsi config failed, err %s aq_err %s\n",
5557
+ i40e_stat_str(hw, ret),
5558
+ i40e_aq_str(hw, hw->aq.asq_last_status));
5559
+ return ret;
5560
+ }
5561
+ /* update the local VSI info with updated queue map */
5562
+ i40e_vsi_update_queue_map(vsi, &ctxt);
5563
+ vsi->info.valid_sections = 0;
5564
+
5565
+ return ret;
5566
+}
5567
+
5568
+/**
53245569 * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
53255570 * @vsi: VSI to be configured
53265571 * @enabled_tc: TC bitmap
....@@ -5482,6 +5727,26 @@
54825727 }
54835728
54845729 /**
5730
+ * i40e_bw_bytes_to_mbits - Convert max_tx_rate from bytes to mbits
5731
+ * @vsi: Pointer to vsi structure
5732
+ * @max_tx_rate: max TX rate in bytes to be converted into Mbits
5733
+ *
5734
+ * Helper function to convert units before send to set BW limit
5735
+ **/
5736
+static u64 i40e_bw_bytes_to_mbits(struct i40e_vsi *vsi, u64 max_tx_rate)
5737
+{
5738
+ if (max_tx_rate < I40E_BW_MBPS_DIVISOR) {
5739
+ dev_warn(&vsi->back->pdev->dev,
5740
+ "Setting max tx rate to minimum usable value of 50Mbps.\n");
5741
+ max_tx_rate = I40E_BW_CREDIT_DIVISOR;
5742
+ } else {
5743
+ do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
5744
+ }
5745
+
5746
+ return max_tx_rate;
5747
+}
5748
+
5749
+/**
54855750 * i40e_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
54865751 * @vsi: VSI to be configured
54875752 * @seid: seid of the channel/VSI
....@@ -5503,10 +5768,10 @@
55035768 max_tx_rate, seid);
55045769 return -EINVAL;
55055770 }
5506
- if (max_tx_rate && max_tx_rate < 50) {
5771
+ if (max_tx_rate && max_tx_rate < I40E_BW_CREDIT_DIVISOR) {
55075772 dev_warn(&pf->pdev->dev,
55085773 "Setting max tx rate to minimum usable value of 50Mbps.\n");
5509
- max_tx_rate = 50;
5774
+ max_tx_rate = I40E_BW_CREDIT_DIVISOR;
55105775 }
55115776
55125777 /* Tx rate credits are in values of 50Mbps, 0 is disabled */
....@@ -5608,24 +5873,6 @@
56085873 kfree(ch);
56095874 }
56105875 INIT_LIST_HEAD(&vsi->ch_list);
5611
-}
5612
-
5613
-/**
5614
- * i40e_is_any_channel - channel exist or not
5615
- * @vsi: ptr to VSI to which channels are associated with
5616
- *
5617
- * Returns true or false if channel(s) exist for associated VSI or not
5618
- **/
5619
-static bool i40e_is_any_channel(struct i40e_vsi *vsi)
5620
-{
5621
- struct i40e_channel *ch, *ch_tmp;
5622
-
5623
- list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
5624
- if (ch->initialized)
5625
- return true;
5626
- }
5627
-
5628
- return false;
56295876 }
56305877
56315878 /**
....@@ -5764,7 +6011,6 @@
57646011 /**
57656012 * i40e_channel_setup_queue_map - Setup a channel queue map
57666013 * @pf: ptr to PF device
5767
- * @vsi: the VSI being setup
57686014 * @ctxt: VSI context structure
57696015 * @ch: ptr to channel structure
57706016 *
....@@ -5852,11 +6098,13 @@
58526098 return -ENOENT;
58536099 }
58546100
5855
- /* Success, update channel */
5856
- ch->enabled_tc = enabled_tc;
6101
+ /* Success, update channel, set enabled_tc only if the channel
6102
+ * is not a macvlan
6103
+ */
6104
+ ch->enabled_tc = !i40e_is_channel_macvlan(ch) && enabled_tc;
58576105 ch->seid = ctxt.seid;
58586106 ch->vsi_number = ctxt.vsi_number;
5859
- ch->stat_counter_idx = cpu_to_le16(ctxt.info.stat_counter_idx);
6107
+ ch->stat_counter_idx = le16_to_cpu(ctxt.info.stat_counter_idx);
58606108
58616109 /* copy just the sections touched not the entire info
58626110 * since not all sections are valid as returned by
....@@ -6005,8 +6253,7 @@
60056253 /**
60066254 * i40e_setup_channel - setup new channel using uplink element
60076255 * @pf: ptr to PF device
6008
- * @type: type of channel to be created (VMDq2/VF)
6009
- * @uplink_seid: underlying HW switching element (VEB) ID
6256
+ * @vsi: pointer to the VSI to set up the channel within
60106257 * @ch: ptr to channel structure
60116258 *
60126259 * Setup new channel (VSI) based on specified type (VMDq2/VF)
....@@ -6133,26 +6380,15 @@
61336380 /* By default we are in VEPA mode, if this is the first VF/VMDq
61346381 * VSI to be added switch to VEB mode.
61356382 */
6136
- if ((!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) ||
6137
- (!i40e_is_any_channel(vsi))) {
6138
- if (!is_power_of_2(vsi->tc_config.tc_info[0].qcount)) {
6139
- dev_dbg(&pf->pdev->dev,
6140
- "Failed to create channel. Override queues (%u) not power of 2\n",
6141
- vsi->tc_config.tc_info[0].qcount);
6142
- return -EINVAL;
6143
- }
61446383
6145
- if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
6146
- pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
6384
+ if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
6385
+ pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
61476386
6148
- if (vsi->type == I40E_VSI_MAIN) {
6149
- if (pf->flags & I40E_FLAG_TC_MQPRIO)
6150
- i40e_do_reset(pf, I40E_PF_RESET_FLAG,
6151
- true);
6152
- else
6153
- i40e_do_reset_safe(pf,
6154
- I40E_PF_RESET_FLAG);
6155
- }
6387
+ if (vsi->type == I40E_VSI_MAIN) {
6388
+ if (pf->flags & I40E_FLAG_TC_MQPRIO)
6389
+ i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
6390
+ else
6391
+ i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
61566392 }
61576393 /* now onwards for main VSI, number of queues will be value
61586394 * of TC0's queue count
....@@ -6416,11 +6652,13 @@
64166652 * Also do not enable DCBx if FW LLDP agent is disabled
64176653 */
64186654 if ((pf->hw_features & I40E_HW_NO_DCB_SUPPORT) ||
6419
- (pf->flags & I40E_FLAG_DISABLE_FW_LLDP))
6655
+ (pf->flags & I40E_FLAG_DISABLE_FW_LLDP)) {
6656
+ dev_info(&pf->pdev->dev, "DCB is not supported or FW LLDP is disabled\n");
6657
+ err = I40E_NOT_SUPPORTED;
64206658 goto out;
6659
+ }
64216660
6422
- /* Get the initial DCB configuration */
6423
- err = i40e_init_dcb(hw);
6661
+ err = i40e_init_dcb(hw, true);
64246662 if (!err) {
64256663 /* Device/Function is not DCBX capable */
64266664 if ((!hw->func_caps.dcb) ||
....@@ -6457,8 +6695,7 @@
64576695 return err;
64586696 }
64596697 #endif /* CONFIG_I40E_DCB */
6460
-#define SPEED_SIZE 14
6461
-#define FC_SIZE 8
6698
+
64626699 /**
64636700 * i40e_print_link_message - print link up or down
64646701 * @vsi: the VSI for which link needs a message
....@@ -6474,7 +6711,10 @@
64746711 char *req_fec = "";
64756712 char *an = "";
64766713
6477
- new_speed = pf->hw.phy.link_info.link_speed;
6714
+ if (isup)
6715
+ new_speed = pf->hw.phy.link_info.link_speed;
6716
+ else
6717
+ new_speed = I40E_LINK_SPEED_UNKNOWN;
64786718
64796719 if ((vsi->current_isup == isup) && (vsi->current_speed == new_speed))
64806720 return;
....@@ -6507,6 +6747,12 @@
65076747 case I40E_LINK_SPEED_10GB:
65086748 speed = "10 G";
65096749 break;
6750
+ case I40E_LINK_SPEED_5GB:
6751
+ speed = "5 G";
6752
+ break;
6753
+ case I40E_LINK_SPEED_2_5GB:
6754
+ speed = "2.5 G";
6755
+ break;
65106756 case I40E_LINK_SPEED_1GB:
65116757 speed = "1000 M";
65126758 break;
....@@ -6533,19 +6779,19 @@
65336779 }
65346780
65356781 if (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) {
6536
- req_fec = ", Requested FEC: None";
6537
- fec = ", FEC: None";
6538
- an = ", Autoneg: False";
6782
+ req_fec = "None";
6783
+ fec = "None";
6784
+ an = "False";
65396785
65406786 if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
6541
- an = ", Autoneg: True";
6787
+ an = "True";
65426788
65436789 if (pf->hw.phy.link_info.fec_info &
65446790 I40E_AQ_CONFIG_FEC_KR_ENA)
6545
- fec = ", FEC: CL74 FC-FEC/BASE-R";
6791
+ fec = "CL74 FC-FEC/BASE-R";
65466792 else if (pf->hw.phy.link_info.fec_info &
65476793 I40E_AQ_CONFIG_FEC_RS_ENA)
6548
- fec = ", FEC: CL108 RS-FEC";
6794
+ fec = "CL108 RS-FEC";
65496795
65506796 /* 'CL108 RS-FEC' should be displayed when RS is requested, or
65516797 * both RS and FC are requested
....@@ -6554,14 +6800,38 @@
65546800 (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS)) {
65556801 if (vsi->back->hw.phy.link_info.req_fec_info &
65566802 I40E_AQ_REQUEST_FEC_RS)
6557
- req_fec = ", Requested FEC: CL108 RS-FEC";
6803
+ req_fec = "CL108 RS-FEC";
65586804 else
6559
- req_fec = ", Requested FEC: CL74 FC-FEC/BASE-R";
6805
+ req_fec = "CL74 FC-FEC/BASE-R";
65606806 }
6807
+ netdev_info(vsi->netdev,
6808
+ "NIC Link is Up, %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
6809
+ speed, req_fec, fec, an, fc);
6810
+ } else if (pf->hw.device_id == I40E_DEV_ID_KX_X722) {
6811
+ req_fec = "None";
6812
+ fec = "None";
6813
+ an = "False";
6814
+
6815
+ if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
6816
+ an = "True";
6817
+
6818
+ if (pf->hw.phy.link_info.fec_info &
6819
+ I40E_AQ_CONFIG_FEC_KR_ENA)
6820
+ fec = "CL74 FC-FEC/BASE-R";
6821
+
6822
+ if (pf->hw.phy.link_info.req_fec_info &
6823
+ I40E_AQ_REQUEST_FEC_KR)
6824
+ req_fec = "CL74 FC-FEC/BASE-R";
6825
+
6826
+ netdev_info(vsi->netdev,
6827
+ "NIC Link is Up, %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
6828
+ speed, req_fec, fec, an, fc);
6829
+ } else {
6830
+ netdev_info(vsi->netdev,
6831
+ "NIC Link is Up, %sbps Full Duplex, Flow Control: %s\n",
6832
+ speed, fc);
65616833 }
65626834
6563
- netdev_info(vsi->netdev, "NIC Link is Up, %sbps Full Duplex%s%s%s, Flow Control: %s\n",
6564
- speed, req_fec, fec, an, fc);
65656835 }
65666836
65676837 /**
....@@ -6622,28 +6892,12 @@
66226892 {
66236893 struct i40e_pf *pf = vsi->back;
66246894
6625
- WARN_ON(in_interrupt());
66266895 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state))
66276896 usleep_range(1000, 2000);
66286897 i40e_down(vsi);
66296898
66306899 i40e_up(vsi);
66316900 clear_bit(__I40E_CONFIG_BUSY, pf->state);
6632
-}
6633
-
6634
-/**
6635
- * i40e_up - Bring the connection back up after being down
6636
- * @vsi: the VSI being configured
6637
- **/
6638
-int i40e_up(struct i40e_vsi *vsi)
6639
-{
6640
- int err;
6641
-
6642
- err = i40e_vsi_configure(vsi);
6643
- if (!err)
6644
- err = i40e_up_complete(vsi);
6645
-
6646
- return err;
66476901 }
66486902
66496903 /**
....@@ -6655,6 +6909,7 @@
66556909 {
66566910 struct i40e_aq_get_phy_abilities_resp abilities;
66576911 struct i40e_aq_set_phy_config config = {0};
6912
+ bool non_zero_phy_type = is_up;
66586913 struct i40e_hw *hw = &pf->hw;
66596914 i40e_status err;
66606915 u64 mask;
....@@ -6690,8 +6945,11 @@
66906945
66916946 /* If link needs to go up, but was not forced to go down,
66926947 * and its speed values are OK, no need for a flap
6948
+ * if non_zero_phy_type was set, still need to force up
66936949 */
6694
- if (is_up && abilities.phy_type != 0 && abilities.link_speed != 0)
6950
+ if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED)
6951
+ non_zero_phy_type = true;
6952
+ else if (is_up && abilities.phy_type != 0 && abilities.link_speed != 0)
66956953 return I40E_SUCCESS;
66966954
66976955 /* To force link we need to set bits for all supported PHY types,
....@@ -6699,10 +6957,18 @@
66996957 * across two fields.
67006958 */
67016959 mask = I40E_PHY_TYPES_BITMASK;
6702
- config.phy_type = is_up ? cpu_to_le32((u32)(mask & 0xffffffff)) : 0;
6703
- config.phy_type_ext = is_up ? (u8)((mask >> 32) & 0xff) : 0;
6960
+ config.phy_type =
6961
+ non_zero_phy_type ? cpu_to_le32((u32)(mask & 0xffffffff)) : 0;
6962
+ config.phy_type_ext =
6963
+ non_zero_phy_type ? (u8)((mask >> 32) & 0xff) : 0;
67046964 /* Copy the old settings, except of phy_type */
67056965 config.abilities = abilities.abilities;
6966
+ if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED) {
6967
+ if (is_up)
6968
+ config.abilities |= I40E_AQ_PHY_ENABLE_LINK;
6969
+ else
6970
+ config.abilities &= ~(I40E_AQ_PHY_ENABLE_LINK);
6971
+ }
67066972 if (abilities.link_speed != 0)
67076973 config.link_speed = abilities.link_speed;
67086974 else
....@@ -6733,9 +6999,29 @@
67336999 i40e_update_link_info(hw);
67347000 }
67357001
6736
- i40e_aq_set_link_restart_an(hw, true, NULL);
7002
+ i40e_aq_set_link_restart_an(hw, is_up, NULL);
67377003
67387004 return I40E_SUCCESS;
7005
+}
7006
+
7007
+/**
7008
+ * i40e_up - Bring the connection back up after being down
7009
+ * @vsi: the VSI being configured
7010
+ **/
7011
+int i40e_up(struct i40e_vsi *vsi)
7012
+{
7013
+ int err;
7014
+
7015
+ if (vsi->type == I40E_VSI_MAIN &&
7016
+ (vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED ||
7017
+ vsi->back->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED))
7018
+ i40e_force_link_state(vsi->back, true);
7019
+
7020
+ err = i40e_vsi_configure(vsi);
7021
+ if (!err)
7022
+ err = i40e_up_complete(vsi);
7023
+
7024
+ return err;
67397025 }
67407026
67417027 /**
....@@ -6756,14 +7042,20 @@
67567042 i40e_vsi_disable_irq(vsi);
67577043 i40e_vsi_stop_rings(vsi);
67587044 if (vsi->type == I40E_VSI_MAIN &&
6759
- vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED)
7045
+ (vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED ||
7046
+ vsi->back->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED))
67607047 i40e_force_link_state(vsi->back, false);
67617048 i40e_napi_disable_all(vsi);
67627049
67637050 for (i = 0; i < vsi->num_queue_pairs; i++) {
67647051 i40e_clean_tx_ring(vsi->tx_rings[i]);
6765
- if (i40e_enabled_xdp_vsi(vsi))
7052
+ if (i40e_enabled_xdp_vsi(vsi)) {
7053
+ /* Make sure that in-progress ndo_xdp_xmit and
7054
+ * ndo_xsk_wakeup calls are completed.
7055
+ */
7056
+ synchronize_rcu();
67667057 i40e_clean_tx_ring(vsi->xdp_rings[i]);
7058
+ }
67677059 i40e_clean_rx_ring(vsi->rx_rings[i]);
67687060 }
67697061
....@@ -6841,6 +7133,491 @@
68417133 else
68427134 vsi->tc_config.tc_info[i].qcount = 1;
68437135 vsi->tc_config.tc_info[i].netdev_tc = 0;
7136
+ }
7137
+}
7138
+
7139
+/**
7140
+ * i40e_del_macvlan_filter
7141
+ * @hw: pointer to the HW structure
7142
+ * @seid: seid of the channel VSI
7143
+ * @macaddr: the mac address to apply as a filter
7144
+ * @aq_err: store the admin Q error
7145
+ *
7146
+ * This function deletes a mac filter on the channel VSI which serves as the
7147
+ * macvlan. Returns 0 on success.
7148
+ **/
7149
+static i40e_status i40e_del_macvlan_filter(struct i40e_hw *hw, u16 seid,
7150
+ const u8 *macaddr, int *aq_err)
7151
+{
7152
+ struct i40e_aqc_remove_macvlan_element_data element;
7153
+ i40e_status status;
7154
+
7155
+ memset(&element, 0, sizeof(element));
7156
+ ether_addr_copy(element.mac_addr, macaddr);
7157
+ element.vlan_tag = 0;
7158
+ element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
7159
+ status = i40e_aq_remove_macvlan(hw, seid, &element, 1, NULL);
7160
+ *aq_err = hw->aq.asq_last_status;
7161
+
7162
+ return status;
7163
+}
7164
+
7165
+/**
7166
+ * i40e_add_macvlan_filter
7167
+ * @hw: pointer to the HW structure
7168
+ * @seid: seid of the channel VSI
7169
+ * @macaddr: the mac address to apply as a filter
7170
+ * @aq_err: store the admin Q error
7171
+ *
7172
+ * This function adds a mac filter on the channel VSI which serves as the
7173
+ * macvlan. Returns 0 on success.
7174
+ **/
7175
+static i40e_status i40e_add_macvlan_filter(struct i40e_hw *hw, u16 seid,
7176
+ const u8 *macaddr, int *aq_err)
7177
+{
7178
+ struct i40e_aqc_add_macvlan_element_data element;
7179
+ i40e_status status;
7180
+ u16 cmd_flags = 0;
7181
+
7182
+ ether_addr_copy(element.mac_addr, macaddr);
7183
+ element.vlan_tag = 0;
7184
+ element.queue_number = 0;
7185
+ element.match_method = I40E_AQC_MM_ERR_NO_RES;
7186
+ cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
7187
+ element.flags = cpu_to_le16(cmd_flags);
7188
+ status = i40e_aq_add_macvlan(hw, seid, &element, 1, NULL);
7189
+ *aq_err = hw->aq.asq_last_status;
7190
+
7191
+ return status;
7192
+}
7193
+
7194
+/**
7195
+ * i40e_reset_ch_rings - Reset the queue contexts in a channel
7196
+ * @vsi: the VSI we want to access
7197
+ * @ch: the channel we want to access
7198
+ */
7199
+static void i40e_reset_ch_rings(struct i40e_vsi *vsi, struct i40e_channel *ch)
7200
+{
7201
+ struct i40e_ring *tx_ring, *rx_ring;
7202
+ u16 pf_q;
7203
+ int i;
7204
+
7205
+ for (i = 0; i < ch->num_queue_pairs; i++) {
7206
+ pf_q = ch->base_queue + i;
7207
+ tx_ring = vsi->tx_rings[pf_q];
7208
+ tx_ring->ch = NULL;
7209
+ rx_ring = vsi->rx_rings[pf_q];
7210
+ rx_ring->ch = NULL;
7211
+ }
7212
+}
7213
+
7214
+/**
7215
+ * i40e_free_macvlan_channels
7216
+ * @vsi: the VSI we want to access
7217
+ *
7218
+ * This function frees the Qs of the channel VSI from
7219
+ * the stack and also deletes the channel VSIs which
7220
+ * serve as macvlans.
7221
+ */
7222
+static void i40e_free_macvlan_channels(struct i40e_vsi *vsi)
7223
+{
7224
+ struct i40e_channel *ch, *ch_tmp;
7225
+ int ret;
7226
+
7227
+ if (list_empty(&vsi->macvlan_list))
7228
+ return;
7229
+
7230
+ list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
7231
+ struct i40e_vsi *parent_vsi;
7232
+
7233
+ if (i40e_is_channel_macvlan(ch)) {
7234
+ i40e_reset_ch_rings(vsi, ch);
7235
+ clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask);
7236
+ netdev_unbind_sb_channel(vsi->netdev, ch->fwd->netdev);
7237
+ netdev_set_sb_channel(ch->fwd->netdev, 0);
7238
+ kfree(ch->fwd);
7239
+ ch->fwd = NULL;
7240
+ }
7241
+
7242
+ list_del(&ch->list);
7243
+ parent_vsi = ch->parent_vsi;
7244
+ if (!parent_vsi || !ch->initialized) {
7245
+ kfree(ch);
7246
+ continue;
7247
+ }
7248
+
7249
+ /* remove the VSI */
7250
+ ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid,
7251
+ NULL);
7252
+ if (ret)
7253
+ dev_err(&vsi->back->pdev->dev,
7254
+ "unable to remove channel (%d) for parent VSI(%d)\n",
7255
+ ch->seid, parent_vsi->seid);
7256
+ kfree(ch);
7257
+ }
7258
+ vsi->macvlan_cnt = 0;
7259
+}
7260
+
7261
+/**
7262
+ * i40e_fwd_ring_up - bring the macvlan device up
7263
+ * @vsi: the VSI we want to access
7264
+ * @vdev: macvlan netdevice
7265
+ * @fwd: the private fwd structure
7266
+ */
7267
+static int i40e_fwd_ring_up(struct i40e_vsi *vsi, struct net_device *vdev,
7268
+ struct i40e_fwd_adapter *fwd)
7269
+{
7270
+ struct i40e_channel *ch = NULL, *ch_tmp, *iter;
7271
+ int ret = 0, num_tc = 1, i, aq_err;
7272
+ struct i40e_pf *pf = vsi->back;
7273
+ struct i40e_hw *hw = &pf->hw;
7274
+
7275
+ /* Go through the list and find an available channel */
7276
+ list_for_each_entry_safe(iter, ch_tmp, &vsi->macvlan_list, list) {
7277
+ if (!i40e_is_channel_macvlan(iter)) {
7278
+ iter->fwd = fwd;
7279
+ /* record configuration for macvlan interface in vdev */
7280
+ for (i = 0; i < num_tc; i++)
7281
+ netdev_bind_sb_channel_queue(vsi->netdev, vdev,
7282
+ i,
7283
+ iter->num_queue_pairs,
7284
+ iter->base_queue);
7285
+ for (i = 0; i < iter->num_queue_pairs; i++) {
7286
+ struct i40e_ring *tx_ring, *rx_ring;
7287
+ u16 pf_q;
7288
+
7289
+ pf_q = iter->base_queue + i;
7290
+
7291
+ /* Get to TX ring ptr */
7292
+ tx_ring = vsi->tx_rings[pf_q];
7293
+ tx_ring->ch = iter;
7294
+
7295
+ /* Get the RX ring ptr */
7296
+ rx_ring = vsi->rx_rings[pf_q];
7297
+ rx_ring->ch = iter;
7298
+ }
7299
+ ch = iter;
7300
+ break;
7301
+ }
7302
+ }
7303
+
7304
+ if (!ch)
7305
+ return -EINVAL;
7306
+
7307
+ /* Guarantee all rings are updated before we update the
7308
+ * MAC address filter.
7309
+ */
7310
+ wmb();
7311
+
7312
+ /* Add a mac filter */
7313
+ ret = i40e_add_macvlan_filter(hw, ch->seid, vdev->dev_addr, &aq_err);
7314
+ if (ret) {
7315
+ /* if we cannot add the MAC rule then disable the offload */
7316
+ macvlan_release_l2fw_offload(vdev);
7317
+ for (i = 0; i < ch->num_queue_pairs; i++) {
7318
+ struct i40e_ring *rx_ring;
7319
+ u16 pf_q;
7320
+
7321
+ pf_q = ch->base_queue + i;
7322
+ rx_ring = vsi->rx_rings[pf_q];
7323
+ rx_ring->netdev = NULL;
7324
+ }
7325
+ dev_info(&pf->pdev->dev,
7326
+ "Error adding mac filter on macvlan err %s, aq_err %s\n",
7327
+ i40e_stat_str(hw, ret),
7328
+ i40e_aq_str(hw, aq_err));
7329
+ netdev_err(vdev, "L2fwd offload disabled to L2 filter error\n");
7330
+ }
7331
+
7332
+ return ret;
7333
+}
7334
+
7335
+/**
7336
+ * i40e_setup_macvlans - create the channels which will be macvlans
7337
+ * @vsi: the VSI we want to access
7338
+ * @macvlan_cnt: no. of macvlans to be setup
7339
+ * @qcnt: no. of Qs per macvlan
7340
+ * @vdev: macvlan netdevice
7341
+ */
7342
+static int i40e_setup_macvlans(struct i40e_vsi *vsi, u16 macvlan_cnt, u16 qcnt,
7343
+ struct net_device *vdev)
7344
+{
7345
+ struct i40e_pf *pf = vsi->back;
7346
+ struct i40e_hw *hw = &pf->hw;
7347
+ struct i40e_vsi_context ctxt;
7348
+ u16 sections, qmap, num_qps;
7349
+ struct i40e_channel *ch;
7350
+ int i, pow, ret = 0;
7351
+ u8 offset = 0;
7352
+
7353
+ if (vsi->type != I40E_VSI_MAIN || !macvlan_cnt)
7354
+ return -EINVAL;
7355
+
7356
+ num_qps = vsi->num_queue_pairs - (macvlan_cnt * qcnt);
7357
+
7358
+ /* find the next higher power-of-2 of num queue pairs */
7359
+ pow = fls(roundup_pow_of_two(num_qps) - 1);
7360
+
7361
+ qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
7362
+ (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
7363
+
7364
+ /* Setup context bits for the main VSI */
7365
+ sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
7366
+ sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
7367
+ memset(&ctxt, 0, sizeof(ctxt));
7368
+ ctxt.seid = vsi->seid;
7369
+ ctxt.pf_num = vsi->back->hw.pf_id;
7370
+ ctxt.vf_num = 0;
7371
+ ctxt.uplink_seid = vsi->uplink_seid;
7372
+ ctxt.info = vsi->info;
7373
+ ctxt.info.tc_mapping[0] = cpu_to_le16(qmap);
7374
+ ctxt.info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
7375
+ ctxt.info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
7376
+ ctxt.info.valid_sections |= cpu_to_le16(sections);
7377
+
7378
+ /* Reconfigure RSS for main VSI with new max queue count */
7379
+ vsi->rss_size = max_t(u16, num_qps, qcnt);
7380
+ ret = i40e_vsi_config_rss(vsi);
7381
+ if (ret) {
7382
+ dev_info(&pf->pdev->dev,
7383
+ "Failed to reconfig RSS for num_queues (%u)\n",
7384
+ vsi->rss_size);
7385
+ return ret;
7386
+ }
7387
+ vsi->reconfig_rss = true;
7388
+ dev_dbg(&vsi->back->pdev->dev,
7389
+ "Reconfigured RSS with num_queues (%u)\n", vsi->rss_size);
7390
+ vsi->next_base_queue = num_qps;
7391
+ vsi->cnt_q_avail = vsi->num_queue_pairs - num_qps;
7392
+
7393
+ /* Update the VSI after updating the VSI queue-mapping
7394
+ * information
7395
+ */
7396
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
7397
+ if (ret) {
7398
+ dev_info(&pf->pdev->dev,
7399
+ "Update vsi tc config failed, err %s aq_err %s\n",
7400
+ i40e_stat_str(hw, ret),
7401
+ i40e_aq_str(hw, hw->aq.asq_last_status));
7402
+ return ret;
7403
+ }
7404
+ /* update the local VSI info with updated queue map */
7405
+ i40e_vsi_update_queue_map(vsi, &ctxt);
7406
+ vsi->info.valid_sections = 0;
7407
+
7408
+ /* Create channels for macvlans */
7409
+ INIT_LIST_HEAD(&vsi->macvlan_list);
7410
+ for (i = 0; i < macvlan_cnt; i++) {
7411
+ ch = kzalloc(sizeof(*ch), GFP_KERNEL);
7412
+ if (!ch) {
7413
+ ret = -ENOMEM;
7414
+ goto err_free;
7415
+ }
7416
+ INIT_LIST_HEAD(&ch->list);
7417
+ ch->num_queue_pairs = qcnt;
7418
+ if (!i40e_setup_channel(pf, vsi, ch)) {
7419
+ ret = -EINVAL;
7420
+ kfree(ch);
7421
+ goto err_free;
7422
+ }
7423
+ ch->parent_vsi = vsi;
7424
+ vsi->cnt_q_avail -= ch->num_queue_pairs;
7425
+ vsi->macvlan_cnt++;
7426
+ list_add_tail(&ch->list, &vsi->macvlan_list);
7427
+ }
7428
+
7429
+ return ret;
7430
+
7431
+err_free:
7432
+ dev_info(&pf->pdev->dev, "Failed to setup macvlans\n");
7433
+ i40e_free_macvlan_channels(vsi);
7434
+
7435
+ return ret;
7436
+}
7437
+
7438
+/**
7439
+ * i40e_fwd_add - configure macvlans
7440
+ * @netdev: net device to configure
7441
+ * @vdev: macvlan netdevice
7442
+ **/
7443
+static void *i40e_fwd_add(struct net_device *netdev, struct net_device *vdev)
7444
+{
7445
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
7446
+ u16 q_per_macvlan = 0, macvlan_cnt = 0, vectors;
7447
+ struct i40e_vsi *vsi = np->vsi;
7448
+ struct i40e_pf *pf = vsi->back;
7449
+ struct i40e_fwd_adapter *fwd;
7450
+ int avail_macvlan, ret;
7451
+
7452
+ if ((pf->flags & I40E_FLAG_DCB_ENABLED)) {
7453
+ netdev_info(netdev, "Macvlans are not supported when DCB is enabled\n");
7454
+ return ERR_PTR(-EINVAL);
7455
+ }
7456
+ if ((pf->flags & I40E_FLAG_TC_MQPRIO)) {
7457
+ netdev_info(netdev, "Macvlans are not supported when HW TC offload is on\n");
7458
+ return ERR_PTR(-EINVAL);
7459
+ }
7460
+ if (pf->num_lan_msix < I40E_MIN_MACVLAN_VECTORS) {
7461
+ netdev_info(netdev, "Not enough vectors available to support macvlans\n");
7462
+ return ERR_PTR(-EINVAL);
7463
+ }
7464
+
7465
+ /* The macvlan device has to be a single Q device so that the
7466
+ * tc_to_txq field can be reused to pick the tx queue.
7467
+ */
7468
+ if (netif_is_multiqueue(vdev))
7469
+ return ERR_PTR(-ERANGE);
7470
+
7471
+ if (!vsi->macvlan_cnt) {
7472
+ /* reserve bit 0 for the pf device */
7473
+ set_bit(0, vsi->fwd_bitmask);
7474
+
7475
+ /* Try to reserve as many queues as possible for macvlans. First
7476
+ * reserve 3/4th of max vectors, then half, then quarter and
7477
+ * calculate Qs per macvlan as you go
7478
+ */
7479
+ vectors = pf->num_lan_msix;
7480
+ if (vectors <= I40E_MAX_MACVLANS && vectors > 64) {
7481
+ /* allocate 4 Qs per macvlan and 32 Qs to the PF*/
7482
+ q_per_macvlan = 4;
7483
+ macvlan_cnt = (vectors - 32) / 4;
7484
+ } else if (vectors <= 64 && vectors > 32) {
7485
+ /* allocate 2 Qs per macvlan and 16 Qs to the PF*/
7486
+ q_per_macvlan = 2;
7487
+ macvlan_cnt = (vectors - 16) / 2;
7488
+ } else if (vectors <= 32 && vectors > 16) {
7489
+ /* allocate 1 Q per macvlan and 16 Qs to the PF*/
7490
+ q_per_macvlan = 1;
7491
+ macvlan_cnt = vectors - 16;
7492
+ } else if (vectors <= 16 && vectors > 8) {
7493
+ /* allocate 1 Q per macvlan and 8 Qs to the PF */
7494
+ q_per_macvlan = 1;
7495
+ macvlan_cnt = vectors - 8;
7496
+ } else {
7497
+ /* allocate 1 Q per macvlan and 1 Q to the PF */
7498
+ q_per_macvlan = 1;
7499
+ macvlan_cnt = vectors - 1;
7500
+ }
7501
+
7502
+ if (macvlan_cnt == 0)
7503
+ return ERR_PTR(-EBUSY);
7504
+
7505
+ /* Quiesce VSI queues */
7506
+ i40e_quiesce_vsi(vsi);
7507
+
7508
+ /* sets up the macvlans but does not "enable" them */
7509
+ ret = i40e_setup_macvlans(vsi, macvlan_cnt, q_per_macvlan,
7510
+ vdev);
7511
+ if (ret)
7512
+ return ERR_PTR(ret);
7513
+
7514
+ /* Unquiesce VSI */
7515
+ i40e_unquiesce_vsi(vsi);
7516
+ }
7517
+ avail_macvlan = find_first_zero_bit(vsi->fwd_bitmask,
7518
+ vsi->macvlan_cnt);
7519
+ if (avail_macvlan >= I40E_MAX_MACVLANS)
7520
+ return ERR_PTR(-EBUSY);
7521
+
7522
+ /* create the fwd struct */
7523
+ fwd = kzalloc(sizeof(*fwd), GFP_KERNEL);
7524
+ if (!fwd)
7525
+ return ERR_PTR(-ENOMEM);
7526
+
7527
+ set_bit(avail_macvlan, vsi->fwd_bitmask);
7528
+ fwd->bit_no = avail_macvlan;
7529
+ netdev_set_sb_channel(vdev, avail_macvlan);
7530
+ fwd->netdev = vdev;
7531
+
7532
+ if (!netif_running(netdev))
7533
+ return fwd;
7534
+
7535
+ /* Set fwd ring up */
7536
+ ret = i40e_fwd_ring_up(vsi, vdev, fwd);
7537
+ if (ret) {
7538
+ /* unbind the queues and drop the subordinate channel config */
7539
+ netdev_unbind_sb_channel(netdev, vdev);
7540
+ netdev_set_sb_channel(vdev, 0);
7541
+
7542
+ kfree(fwd);
7543
+ return ERR_PTR(-EINVAL);
7544
+ }
7545
+
7546
+ return fwd;
7547
+}
7548
+
7549
+/**
7550
+ * i40e_del_all_macvlans - Delete all the mac filters on the channels
7551
+ * @vsi: the VSI we want to access
7552
+ */
7553
+static void i40e_del_all_macvlans(struct i40e_vsi *vsi)
7554
+{
7555
+ struct i40e_channel *ch, *ch_tmp;
7556
+ struct i40e_pf *pf = vsi->back;
7557
+ struct i40e_hw *hw = &pf->hw;
7558
+ int aq_err, ret = 0;
7559
+
7560
+ if (list_empty(&vsi->macvlan_list))
7561
+ return;
7562
+
7563
+ list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
7564
+ if (i40e_is_channel_macvlan(ch)) {
7565
+ ret = i40e_del_macvlan_filter(hw, ch->seid,
7566
+ i40e_channel_mac(ch),
7567
+ &aq_err);
7568
+ if (!ret) {
7569
+ /* Reset queue contexts */
7570
+ i40e_reset_ch_rings(vsi, ch);
7571
+ clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask);
7572
+ netdev_unbind_sb_channel(vsi->netdev,
7573
+ ch->fwd->netdev);
7574
+ netdev_set_sb_channel(ch->fwd->netdev, 0);
7575
+ kfree(ch->fwd);
7576
+ ch->fwd = NULL;
7577
+ }
7578
+ }
7579
+ }
7580
+}
7581
+
7582
+/**
7583
+ * i40e_fwd_del - delete macvlan interfaces
7584
+ * @netdev: net device to configure
7585
+ * @vdev: macvlan netdevice
7586
+ */
7587
+static void i40e_fwd_del(struct net_device *netdev, void *vdev)
7588
+{
7589
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
7590
+ struct i40e_fwd_adapter *fwd = vdev;
7591
+ struct i40e_channel *ch, *ch_tmp;
7592
+ struct i40e_vsi *vsi = np->vsi;
7593
+ struct i40e_pf *pf = vsi->back;
7594
+ struct i40e_hw *hw = &pf->hw;
7595
+ int aq_err, ret = 0;
7596
+
7597
+ /* Find the channel associated with the macvlan and del mac filter */
7598
+ list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
7599
+ if (i40e_is_channel_macvlan(ch) &&
7600
+ ether_addr_equal(i40e_channel_mac(ch),
7601
+ fwd->netdev->dev_addr)) {
7602
+ ret = i40e_del_macvlan_filter(hw, ch->seid,
7603
+ i40e_channel_mac(ch),
7604
+ &aq_err);
7605
+ if (!ret) {
7606
+ /* Reset queue contexts */
7607
+ i40e_reset_ch_rings(vsi, ch);
7608
+ clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask);
7609
+ netdev_unbind_sb_channel(netdev, fwd->netdev);
7610
+ netdev_set_sb_channel(fwd->netdev, 0);
7611
+ kfree(ch->fwd);
7612
+ ch->fwd = NULL;
7613
+ } else {
7614
+ dev_info(&pf->pdev->dev,
7615
+ "Error deleting mac filter on macvlan err %s, aq_err %s\n",
7616
+ i40e_stat_str(hw, ret),
7617
+ i40e_aq_str(hw, aq_err));
7618
+ }
7619
+ break;
7620
+ }
68447621 }
68457622 }
68467623
....@@ -6939,13 +7716,25 @@
69397716 vsi->seid);
69407717 need_reset = true;
69417718 goto exit;
7719
+ } else if (enabled_tc &&
7720
+ (!is_power_of_2(vsi->tc_config.tc_info[0].qcount))) {
7721
+ netdev_info(netdev,
7722
+ "Failed to create channel. Override queues (%u) not power of 2\n",
7723
+ vsi->tc_config.tc_info[0].qcount);
7724
+ ret = -EINVAL;
7725
+ need_reset = true;
7726
+ goto exit;
69427727 }
7728
+
7729
+ dev_info(&vsi->back->pdev->dev,
7730
+ "Setup channel (id:%u) utilizing num_queues %d\n",
7731
+ vsi->seid, vsi->tc_config.tc_info[0].qcount);
69437732
69447733 if (pf->flags & I40E_FLAG_TC_MQPRIO) {
69457734 if (vsi->mqprio_qopt.max_rate[0]) {
6946
- u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
7735
+ u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi,
7736
+ vsi->mqprio_qopt.max_rate[0]);
69477737
6948
- do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
69497738 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
69507739 if (!ret) {
69517740 u64 credits = max_tx_rate;
....@@ -6994,8 +7783,8 @@
69947783 i40e_set_cld_element(struct i40e_cloud_filter *filter,
69957784 struct i40e_aqc_cloud_filters_element_data *cld)
69967785 {
6997
- int i, j;
69987786 u32 ipa;
7787
+ int i;
69997788
70007789 memset(cld, 0, sizeof(*cld));
70017790 ether_addr_copy(cld->outer_mac, filter->dst_mac);
....@@ -7006,14 +7795,14 @@
70067795
70077796 if (filter->n_proto == ETH_P_IPV6) {
70087797 #define IPV6_MAX_INDEX (ARRAY_SIZE(filter->dst_ipv6) - 1)
7009
- for (i = 0, j = 0; i < ARRAY_SIZE(filter->dst_ipv6);
7010
- i++, j += 2) {
7798
+ for (i = 0; i < ARRAY_SIZE(filter->dst_ipv6); i++) {
70117799 ipa = be32_to_cpu(filter->dst_ipv6[IPV6_MAX_INDEX - i]);
7012
- ipa = cpu_to_le32(ipa);
7013
- memcpy(&cld->ipaddr.raw_v6.data[j], &ipa, sizeof(ipa));
7800
+
7801
+ *(__le32 *)&cld->ipaddr.raw_v6.data[i * 2] = cpu_to_le32(ipa);
70147802 }
70157803 } else {
70167804 ipa = be32_to_cpu(filter->dst_ipv4);
7805
+
70177806 memcpy(&cld->ipaddr.v4.data, &ipa, sizeof(ipa));
70187807 }
70197808
....@@ -7208,19 +7997,21 @@
72087997 /**
72097998 * i40e_parse_cls_flower - Parse tc flower filters provided by kernel
72107999 * @vsi: Pointer to VSI
7211
- * @cls_flower: Pointer to struct tc_cls_flower_offload
8000
+ * @f: Pointer to struct flow_cls_offload
72128001 * @filter: Pointer to cloud filter structure
72138002 *
72148003 **/
72158004 static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
7216
- struct tc_cls_flower_offload *f,
8005
+ struct flow_cls_offload *f,
72178006 struct i40e_cloud_filter *filter)
72188007 {
8008
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
8009
+ struct flow_dissector *dissector = rule->match.dissector;
72198010 u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0;
72208011 struct i40e_pf *pf = vsi->back;
72218012 u8 field_flags = 0;
72228013
7223
- if (f->dissector->used_keys &
8014
+ if (dissector->used_keys &
72248015 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
72258016 BIT(FLOW_DISSECTOR_KEY_BASIC) |
72268017 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
....@@ -7230,143 +8021,109 @@
72308021 BIT(FLOW_DISSECTOR_KEY_PORTS) |
72318022 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
72328023 dev_err(&pf->pdev->dev, "Unsupported key used: 0x%x\n",
7233
- f->dissector->used_keys);
8024
+ dissector->used_keys);
72348025 return -EOPNOTSUPP;
72358026 }
72368027
7237
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
7238
- struct flow_dissector_key_keyid *key =
7239
- skb_flow_dissector_target(f->dissector,
7240
- FLOW_DISSECTOR_KEY_ENC_KEYID,
7241
- f->key);
8028
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
8029
+ struct flow_match_enc_keyid match;
72428030
7243
- struct flow_dissector_key_keyid *mask =
7244
- skb_flow_dissector_target(f->dissector,
7245
- FLOW_DISSECTOR_KEY_ENC_KEYID,
7246
- f->mask);
7247
-
7248
- if (mask->keyid != 0)
8031
+ flow_rule_match_enc_keyid(rule, &match);
8032
+ if (match.mask->keyid != 0)
72498033 field_flags |= I40E_CLOUD_FIELD_TEN_ID;
72508034
7251
- filter->tenant_id = be32_to_cpu(key->keyid);
8035
+ filter->tenant_id = be32_to_cpu(match.key->keyid);
72528036 }
72538037
7254
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
7255
- struct flow_dissector_key_basic *key =
7256
- skb_flow_dissector_target(f->dissector,
7257
- FLOW_DISSECTOR_KEY_BASIC,
7258
- f->key);
8038
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
8039
+ struct flow_match_basic match;
72598040
7260
- struct flow_dissector_key_basic *mask =
7261
- skb_flow_dissector_target(f->dissector,
7262
- FLOW_DISSECTOR_KEY_BASIC,
7263
- f->mask);
7264
-
7265
- n_proto_key = ntohs(key->n_proto);
7266
- n_proto_mask = ntohs(mask->n_proto);
8041
+ flow_rule_match_basic(rule, &match);
8042
+ n_proto_key = ntohs(match.key->n_proto);
8043
+ n_proto_mask = ntohs(match.mask->n_proto);
72678044
72688045 if (n_proto_key == ETH_P_ALL) {
72698046 n_proto_key = 0;
72708047 n_proto_mask = 0;
72718048 }
72728049 filter->n_proto = n_proto_key & n_proto_mask;
7273
- filter->ip_proto = key->ip_proto;
8050
+ filter->ip_proto = match.key->ip_proto;
72748051 }
72758052
7276
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
7277
- struct flow_dissector_key_eth_addrs *key =
7278
- skb_flow_dissector_target(f->dissector,
7279
- FLOW_DISSECTOR_KEY_ETH_ADDRS,
7280
- f->key);
8053
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
8054
+ struct flow_match_eth_addrs match;
72818055
7282
- struct flow_dissector_key_eth_addrs *mask =
7283
- skb_flow_dissector_target(f->dissector,
7284
- FLOW_DISSECTOR_KEY_ETH_ADDRS,
7285
- f->mask);
8056
+ flow_rule_match_eth_addrs(rule, &match);
72868057
72878058 /* use is_broadcast and is_zero to check for all 0xf or 0 */
7288
- if (!is_zero_ether_addr(mask->dst)) {
7289
- if (is_broadcast_ether_addr(mask->dst)) {
8059
+ if (!is_zero_ether_addr(match.mask->dst)) {
8060
+ if (is_broadcast_ether_addr(match.mask->dst)) {
72908061 field_flags |= I40E_CLOUD_FIELD_OMAC;
72918062 } else {
72928063 dev_err(&pf->pdev->dev, "Bad ether dest mask %pM\n",
7293
- mask->dst);
8064
+ match.mask->dst);
72948065 return I40E_ERR_CONFIG;
72958066 }
72968067 }
72978068
7298
- if (!is_zero_ether_addr(mask->src)) {
7299
- if (is_broadcast_ether_addr(mask->src)) {
8069
+ if (!is_zero_ether_addr(match.mask->src)) {
8070
+ if (is_broadcast_ether_addr(match.mask->src)) {
73008071 field_flags |= I40E_CLOUD_FIELD_IMAC;
73018072 } else {
73028073 dev_err(&pf->pdev->dev, "Bad ether src mask %pM\n",
7303
- mask->src);
8074
+ match.mask->src);
73048075 return I40E_ERR_CONFIG;
73058076 }
73068077 }
7307
- ether_addr_copy(filter->dst_mac, key->dst);
7308
- ether_addr_copy(filter->src_mac, key->src);
8078
+ ether_addr_copy(filter->dst_mac, match.key->dst);
8079
+ ether_addr_copy(filter->src_mac, match.key->src);
73098080 }
73108081
7311
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
7312
- struct flow_dissector_key_vlan *key =
7313
- skb_flow_dissector_target(f->dissector,
7314
- FLOW_DISSECTOR_KEY_VLAN,
7315
- f->key);
7316
- struct flow_dissector_key_vlan *mask =
7317
- skb_flow_dissector_target(f->dissector,
7318
- FLOW_DISSECTOR_KEY_VLAN,
7319
- f->mask);
8082
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
8083
+ struct flow_match_vlan match;
73208084
7321
- if (mask->vlan_id) {
7322
- if (mask->vlan_id == VLAN_VID_MASK) {
8085
+ flow_rule_match_vlan(rule, &match);
8086
+ if (match.mask->vlan_id) {
8087
+ if (match.mask->vlan_id == VLAN_VID_MASK) {
73238088 field_flags |= I40E_CLOUD_FIELD_IVLAN;
73248089
73258090 } else {
73268091 dev_err(&pf->pdev->dev, "Bad vlan mask 0x%04x\n",
7327
- mask->vlan_id);
8092
+ match.mask->vlan_id);
73288093 return I40E_ERR_CONFIG;
73298094 }
73308095 }
73318096
7332
- filter->vlan_id = cpu_to_be16(key->vlan_id);
8097
+ filter->vlan_id = cpu_to_be16(match.key->vlan_id);
73338098 }
73348099
7335
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
7336
- struct flow_dissector_key_control *key =
7337
- skb_flow_dissector_target(f->dissector,
7338
- FLOW_DISSECTOR_KEY_CONTROL,
7339
- f->key);
8100
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
8101
+ struct flow_match_control match;
73408102
7341
- addr_type = key->addr_type;
8103
+ flow_rule_match_control(rule, &match);
8104
+ addr_type = match.key->addr_type;
73428105 }
73438106
73448107 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
7345
- struct flow_dissector_key_ipv4_addrs *key =
7346
- skb_flow_dissector_target(f->dissector,
7347
- FLOW_DISSECTOR_KEY_IPV4_ADDRS,
7348
- f->key);
7349
- struct flow_dissector_key_ipv4_addrs *mask =
7350
- skb_flow_dissector_target(f->dissector,
7351
- FLOW_DISSECTOR_KEY_IPV4_ADDRS,
7352
- f->mask);
8108
+ struct flow_match_ipv4_addrs match;
73538109
7354
- if (mask->dst) {
7355
- if (mask->dst == cpu_to_be32(0xffffffff)) {
8110
+ flow_rule_match_ipv4_addrs(rule, &match);
8111
+ if (match.mask->dst) {
8112
+ if (match.mask->dst == cpu_to_be32(0xffffffff)) {
73568113 field_flags |= I40E_CLOUD_FIELD_IIP;
73578114 } else {
73588115 dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4b\n",
7359
- &mask->dst);
8116
+ &match.mask->dst);
73608117 return I40E_ERR_CONFIG;
73618118 }
73628119 }
73638120
7364
- if (mask->src) {
7365
- if (mask->src == cpu_to_be32(0xffffffff)) {
8121
+ if (match.mask->src) {
8122
+ if (match.mask->src == cpu_to_be32(0xffffffff)) {
73668123 field_flags |= I40E_CLOUD_FIELD_IIP;
73678124 } else {
73688125 dev_err(&pf->pdev->dev, "Bad ip src mask %pI4b\n",
7369
- &mask->src);
8126
+ &match.mask->src);
73708127 return I40E_ERR_CONFIG;
73718128 }
73728129 }
....@@ -7375,70 +8132,60 @@
73758132 dev_err(&pf->pdev->dev, "Tenant id not allowed for ip filter\n");
73768133 return I40E_ERR_CONFIG;
73778134 }
7378
- filter->dst_ipv4 = key->dst;
7379
- filter->src_ipv4 = key->src;
8135
+ filter->dst_ipv4 = match.key->dst;
8136
+ filter->src_ipv4 = match.key->src;
73808137 }
73818138
73828139 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
7383
- struct flow_dissector_key_ipv6_addrs *key =
7384
- skb_flow_dissector_target(f->dissector,
7385
- FLOW_DISSECTOR_KEY_IPV6_ADDRS,
7386
- f->key);
7387
- struct flow_dissector_key_ipv6_addrs *mask =
7388
- skb_flow_dissector_target(f->dissector,
7389
- FLOW_DISSECTOR_KEY_IPV6_ADDRS,
7390
- f->mask);
8140
+ struct flow_match_ipv6_addrs match;
8141
+
8142
+ flow_rule_match_ipv6_addrs(rule, &match);
73918143
73928144 /* src and dest IPV6 address should not be LOOPBACK
73938145 * (0:0:0:0:0:0:0:1), which can be represented as ::1
73948146 */
7395
- if (ipv6_addr_loopback(&key->dst) ||
7396
- ipv6_addr_loopback(&key->src)) {
8147
+ if (ipv6_addr_loopback(&match.key->dst) ||
8148
+ ipv6_addr_loopback(&match.key->src)) {
73978149 dev_err(&pf->pdev->dev,
73988150 "Bad ipv6, addr is LOOPBACK\n");
73998151 return I40E_ERR_CONFIG;
74008152 }
7401
- if (!ipv6_addr_any(&mask->dst) || !ipv6_addr_any(&mask->src))
8153
+ if (!ipv6_addr_any(&match.mask->dst) ||
8154
+ !ipv6_addr_any(&match.mask->src))
74028155 field_flags |= I40E_CLOUD_FIELD_IIP;
74038156
7404
- memcpy(&filter->src_ipv6, &key->src.s6_addr32,
8157
+ memcpy(&filter->src_ipv6, &match.key->src.s6_addr32,
74058158 sizeof(filter->src_ipv6));
7406
- memcpy(&filter->dst_ipv6, &key->dst.s6_addr32,
8159
+ memcpy(&filter->dst_ipv6, &match.key->dst.s6_addr32,
74078160 sizeof(filter->dst_ipv6));
74088161 }
74098162
7410
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
7411
- struct flow_dissector_key_ports *key =
7412
- skb_flow_dissector_target(f->dissector,
7413
- FLOW_DISSECTOR_KEY_PORTS,
7414
- f->key);
7415
- struct flow_dissector_key_ports *mask =
7416
- skb_flow_dissector_target(f->dissector,
7417
- FLOW_DISSECTOR_KEY_PORTS,
7418
- f->mask);
8163
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
8164
+ struct flow_match_ports match;
74198165
7420
- if (mask->src) {
7421
- if (mask->src == cpu_to_be16(0xffff)) {
8166
+ flow_rule_match_ports(rule, &match);
8167
+ if (match.mask->src) {
8168
+ if (match.mask->src == cpu_to_be16(0xffff)) {
74228169 field_flags |= I40E_CLOUD_FIELD_IIP;
74238170 } else {
74248171 dev_err(&pf->pdev->dev, "Bad src port mask 0x%04x\n",
7425
- be16_to_cpu(mask->src));
8172
+ be16_to_cpu(match.mask->src));
74268173 return I40E_ERR_CONFIG;
74278174 }
74288175 }
74298176
7430
- if (mask->dst) {
7431
- if (mask->dst == cpu_to_be16(0xffff)) {
8177
+ if (match.mask->dst) {
8178
+ if (match.mask->dst == cpu_to_be16(0xffff)) {
74328179 field_flags |= I40E_CLOUD_FIELD_IIP;
74338180 } else {
74348181 dev_err(&pf->pdev->dev, "Bad dst port mask 0x%04x\n",
7435
- be16_to_cpu(mask->dst));
8182
+ be16_to_cpu(match.mask->dst));
74368183 return I40E_ERR_CONFIG;
74378184 }
74388185 }
74398186
7440
- filter->dst_port = key->dst;
7441
- filter->src_port = key->src;
8187
+ filter->dst_port = match.key->dst;
8188
+ filter->src_port = match.key->src;
74428189
74438190 switch (filter->ip_proto) {
74448191 case IPPROTO_TCP:
....@@ -7492,11 +8239,11 @@
74928239 /**
74938240 * i40e_configure_clsflower - Configure tc flower filters
74948241 * @vsi: Pointer to VSI
7495
- * @cls_flower: Pointer to struct tc_cls_flower_offload
8242
+ * @cls_flower: Pointer to struct flow_cls_offload
74968243 *
74978244 **/
74988245 static int i40e_configure_clsflower(struct i40e_vsi *vsi,
7499
- struct tc_cls_flower_offload *cls_flower)
8246
+ struct flow_cls_offload *cls_flower)
75008247 {
75018248 int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid);
75028249 struct i40e_cloud_filter *filter = NULL;
....@@ -7506,6 +8253,11 @@
75068253 if (tc < 0) {
75078254 dev_err(&vsi->back->pdev->dev, "Invalid traffic class\n");
75088255 return -EOPNOTSUPP;
8256
+ }
8257
+
8258
+ if (!tc) {
8259
+ dev_err(&pf->pdev->dev, "Unable to add filter because of invalid destination");
8260
+ return -EINVAL;
75098261 }
75108262
75118263 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
....@@ -7587,11 +8339,11 @@
75878339 /**
75888340 * i40e_delete_clsflower - Remove tc flower filters
75898341 * @vsi: Pointer to VSI
7590
- * @cls_flower: Pointer to struct tc_cls_flower_offload
8342
+ * @cls_flower: Pointer to struct flow_cls_offload
75918343 *
75928344 **/
75938345 static int i40e_delete_clsflower(struct i40e_vsi *vsi,
7594
- struct tc_cls_flower_offload *cls_flower)
8346
+ struct flow_cls_offload *cls_flower)
75958347 {
75968348 struct i40e_cloud_filter *filter = NULL;
75978349 struct i40e_pf *pf = vsi->back;
....@@ -7630,20 +8382,20 @@
76308382
76318383 /**
76328384 * i40e_setup_tc_cls_flower - flower classifier offloads
7633
- * @netdev: net device to configure
7634
- * @type_data: offload data
8385
+ * @np: net device to configure
8386
+ * @cls_flower: offload data
76358387 **/
76368388 static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np,
7637
- struct tc_cls_flower_offload *cls_flower)
8389
+ struct flow_cls_offload *cls_flower)
76388390 {
76398391 struct i40e_vsi *vsi = np->vsi;
76408392
76418393 switch (cls_flower->command) {
7642
- case TC_CLSFLOWER_REPLACE:
8394
+ case FLOW_CLS_REPLACE:
76438395 return i40e_configure_clsflower(vsi, cls_flower);
7644
- case TC_CLSFLOWER_DESTROY:
8396
+ case FLOW_CLS_DESTROY:
76458397 return i40e_delete_clsflower(vsi, cls_flower);
7646
- case TC_CLSFLOWER_STATS:
8398
+ case FLOW_CLS_STATS:
76478399 return -EOPNOTSUPP;
76488400 default:
76498401 return -EOPNOTSUPP;
....@@ -7667,34 +8419,21 @@
76678419 }
76688420 }
76698421
7670
-static int i40e_setup_tc_block(struct net_device *dev,
7671
- struct tc_block_offload *f)
7672
-{
7673
- struct i40e_netdev_priv *np = netdev_priv(dev);
7674
-
7675
- if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
7676
- return -EOPNOTSUPP;
7677
-
7678
- switch (f->command) {
7679
- case TC_BLOCK_BIND:
7680
- return tcf_block_cb_register(f->block, i40e_setup_tc_block_cb,
7681
- np, np, f->extack);
7682
- case TC_BLOCK_UNBIND:
7683
- tcf_block_cb_unregister(f->block, i40e_setup_tc_block_cb, np);
7684
- return 0;
7685
- default:
7686
- return -EOPNOTSUPP;
7687
- }
7688
-}
8422
+static LIST_HEAD(i40e_block_cb_list);
76898423
76908424 static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type,
76918425 void *type_data)
76928426 {
8427
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
8428
+
76938429 switch (type) {
76948430 case TC_SETUP_QDISC_MQPRIO:
76958431 return i40e_setup_tc(netdev, type_data);
76968432 case TC_SETUP_BLOCK:
7697
- return i40e_setup_tc_block(netdev, type_data);
8433
+ return flow_block_cb_setup_simple(type_data,
8434
+ &i40e_block_cb_list,
8435
+ i40e_setup_tc_block_cb,
8436
+ np, np, true);
76988437 default:
76998438 return -EOPNOTSUPP;
77008439 }
....@@ -7962,9 +8701,6 @@
79628701 {
79638702 u32 val;
79648703
7965
- WARN_ON(in_interrupt());
7966
-
7967
-
79688704 /* do the biggest reset indicated */
79698705 if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
79708706
....@@ -8013,6 +8749,10 @@
80138749 */
80148750 i40e_prep_for_reset(pf, lock_acquired);
80158751 i40e_reset_and_rebuild(pf, true, lock_acquired);
8752
+ dev_info(&pf->pdev->dev,
8753
+ pf->flags & I40E_FLAG_DISABLE_FW_LLDP ?
8754
+ "FW LLDP is disabled\n" :
8755
+ "FW LLDP is enabled\n");
80168756
80178757 } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
80188758 int v;
....@@ -8202,8 +8942,8 @@
82028942 i40e_service_event_schedule(pf);
82038943 } else {
82048944 i40e_pf_unquiesce_all_vsi(pf);
8205
- set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
8206
- set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
8945
+ set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
8946
+ set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
82078947 }
82088948
82098949 exit:
....@@ -8489,13 +9229,6 @@
84899229 return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
84909230 }
84919231
8492
-/* We can see up to 256 filter programming desc in transit if the filters are
8493
- * being applied really fast; before we see the first
8494
- * filter miss error on Rx queue 0. Accumulating enough error messages before
8495
- * reacting will make sure we don't cause flush too often.
8496
- */
8497
-#define I40E_MAX_FD_PROGRAM_ERROR 256
8498
-
84999232 /**
85009233 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
85019234 * @pf: board private structure
....@@ -8585,14 +9318,9 @@
85859318 i40e_status status;
85869319 bool new_link, old_link;
85879320
8588
- /* save off old link status information */
8589
- pf->hw.phy.link_info_old = pf->hw.phy.link_info;
8590
-
85919321 /* set this to force the get_link_status call to refresh state */
85929322 pf->hw.phy.get_link_info = true;
8593
-
85949323 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
8595
-
85969324 status = i40e_get_link_status(&pf->hw, &new_link);
85979325
85989326 /* On success, disable temp link polling */
....@@ -8622,7 +9350,7 @@
86229350 /* Notify the base of the switch tree connected to
86239351 * the link. Floating VEBs are not notified.
86249352 */
8625
- if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
9353
+ if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb])
86269354 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
86279355 else
86289356 i40e_vsi_link_event(vsi, new_link);
....@@ -9100,6 +9828,7 @@
91009828 /**
91019829 * i40e_get_capabilities - get info about the HW
91029830 * @pf: the PF struct
9831
+ * @list_type: AQ capability to be queried
91039832 **/
91049833 static int i40e_get_capabilities(struct i40e_pf *pf,
91059834 enum i40e_admin_queue_opc list_type)
....@@ -9337,6 +10066,21 @@
933710066 }
933810067
933910068 /**
10069
+ * i40e_clean_xps_state - clean xps state for every tx_ring
10070
+ * @vsi: ptr to the VSI
10071
+ **/
10072
+static void i40e_clean_xps_state(struct i40e_vsi *vsi)
10073
+{
10074
+ int i;
10075
+
10076
+ if (vsi->tx_rings)
10077
+ for (i = 0; i < vsi->num_queue_pairs; i++)
10078
+ if (vsi->tx_rings[i])
10079
+ clear_bit(__I40E_TX_XPS_INIT_DONE,
10080
+ vsi->tx_rings[i]->state);
10081
+}
10082
+
10083
+/**
934010084 * i40e_prep_for_reset - prep for the core to reset
934110085 * @pf: board private structure
934210086 * @lock_acquired: indicates whether or not the lock has been acquired
....@@ -9367,8 +10111,10 @@
936710111 rtnl_unlock();
936810112
936910113 for (v = 0; v < pf->num_alloc_vsi; v++) {
9370
- if (pf->vsi[v])
10114
+ if (pf->vsi[v]) {
10115
+ i40e_clean_xps_state(pf->vsi[v]);
937110116 pf->vsi[v]->seid = 0;
10117
+ }
937210118 }
937310119
937410120 i40e_shutdown_adminq(&pf->hw);
....@@ -9380,6 +10126,11 @@
938010126 dev_warn(&pf->pdev->dev,
938110127 "shutdown_lan_hmc failed: %d\n", ret);
938210128 }
10129
+
10130
+ /* Save the current PTP time so that we can restore the time after the
10131
+ * reset completes.
10132
+ */
10133
+ i40e_ptp_save_hw_time(pf);
938310134 }
938410135
938510136 /**
....@@ -9390,11 +10141,11 @@
939010141 {
939110142 struct i40e_driver_version dv;
939210143
9393
- dv.major_version = DRV_VERSION_MAJOR;
9394
- dv.minor_version = DRV_VERSION_MINOR;
9395
- dv.build_version = DRV_VERSION_BUILD;
10144
+ dv.major_version = 0xff;
10145
+ dv.minor_version = 0xff;
10146
+ dv.build_version = 0xff;
939610147 dv.subbuild_version = 0;
9397
- strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string));
10148
+ strlcpy(dv.driver_string, UTS_RELEASE, sizeof(dv.driver_string));
939810149 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
939910150 }
940010151
....@@ -9472,13 +10223,19 @@
947210223 **/
947310224 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
947410225 {
10226
+ const bool is_recovery_mode_reported = i40e_check_recovery_mode(pf);
947510227 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
947610228 struct i40e_hw *hw = &pf->hw;
947710229 i40e_status ret;
947810230 u32 val;
947910231 int v;
948010232
9481
- if (test_bit(__I40E_DOWN, pf->state))
10233
+ if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
10234
+ is_recovery_mode_reported)
10235
+ i40e_set_ethtool_ops(pf->vsi[pf->lan_vsi]->netdev);
10236
+
10237
+ if (test_bit(__I40E_DOWN, pf->state) &&
10238
+ !test_bit(__I40E_RECOVERY_MODE, pf->state))
948210239 goto clear_recovery;
948310240 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
948410241
....@@ -9500,6 +10257,43 @@
950010257 /* re-verify the eeprom if we just had an EMP reset */
950110258 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state))
950210259 i40e_verify_eeprom(pf);
10260
+
10261
+ /* if we are going out of or into recovery mode we have to act
10262
+ * accordingly with regard to resources initialization
10263
+ * and deinitialization
10264
+ */
10265
+ if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
10266
+ if (i40e_get_capabilities(pf,
10267
+ i40e_aqc_opc_list_func_capabilities))
10268
+ goto end_unlock;
10269
+
10270
+ if (is_recovery_mode_reported) {
10271
+ /* we're staying in recovery mode so we'll reinitialize
10272
+ * misc vector here
10273
+ */
10274
+ if (i40e_setup_misc_vector_for_recovery_mode(pf))
10275
+ goto end_unlock;
10276
+ } else {
10277
+ if (!lock_acquired)
10278
+ rtnl_lock();
10279
+ /* we're going out of recovery mode so we'll free
10280
+ * the IRQ allocated specifically for recovery mode
10281
+ * and restore the interrupt scheme
10282
+ */
10283
+ free_irq(pf->pdev->irq, pf);
10284
+ i40e_clear_interrupt_scheme(pf);
10285
+ if (i40e_restore_interrupt_scheme(pf))
10286
+ goto end_unlock;
10287
+ }
10288
+
10289
+ /* tell the firmware that we're starting */
10290
+ i40e_send_version(pf);
10291
+
10292
+ /* bail out in case recovery mode was detected, as there is
10293
+ * no need for further configuration.
10294
+ */
10295
+ goto end_unlock;
10296
+ }
950310297
950410298 i40e_clear_pxe_mode(hw);
950510299 ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
....@@ -9532,7 +10326,7 @@
953210326 /* do basic switch setup */
953310327 if (!lock_acquired)
953410328 rtnl_lock();
9535
- ret = i40e_setup_pf_switch(pf, reinit);
10329
+ ret = i40e_setup_pf_switch(pf, reinit, true);
953610330 if (ret)
953710331 goto end_unlock;
953810332
....@@ -9602,10 +10396,10 @@
960210396 }
960310397
960410398 if (vsi->mqprio_qopt.max_rate[0]) {
9605
- u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
10399
+ u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi,
10400
+ vsi->mqprio_qopt.max_rate[0]);
960610401 u64 credits = 0;
960710402
9608
- do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
960910403 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
961010404 if (ret)
961110405 goto end_unlock;
....@@ -9654,8 +10448,11 @@
965410448 pf->hw.aq.asq_last_status));
965510449 }
965610450 /* reinit the misc interrupt */
9657
- if (pf->flags & I40E_FLAG_MSIX_ENABLED)
10451
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
965810452 ret = i40e_setup_misc_vector(pf);
10453
+ if (ret)
10454
+ goto end_unlock;
10455
+ }
965910456
966010457 /* Add a filter to drop all Flow control frames from any VSI from being
966110458 * transmitted. By doing so we stop a malicious VF from sending out
....@@ -9745,7 +10542,6 @@
974510542 {
974610543 struct i40e_hw *hw = &pf->hw;
974710544 bool mdd_detected = false;
9748
- bool pf_mdd_detected = false;
974910545 struct i40e_vf *vf;
975010546 u32 reg;
975110547 int i;
....@@ -9791,19 +10587,12 @@
979110587 reg = rd32(hw, I40E_PF_MDET_TX);
979210588 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
979310589 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
9794
- dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
9795
- pf_mdd_detected = true;
10590
+ dev_dbg(&pf->pdev->dev, "TX driver issue detected on PF\n");
979610591 }
979710592 reg = rd32(hw, I40E_PF_MDET_RX);
979810593 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
979910594 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
9800
- dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n");
9801
- pf_mdd_detected = true;
9802
- }
9803
- /* Queue belongs to the PF, initiate a reset */
9804
- if (pf_mdd_detected) {
9805
- set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
9806
- i40e_service_event_schedule(pf);
10595
+ dev_dbg(&pf->pdev->dev, "RX driver issue detected on PF\n");
980710596 }
980810597 }
980910598
....@@ -9816,6 +10605,9 @@
981610605 vf->num_mdd_events++;
981710606 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
981810607 i);
10608
+ dev_info(&pf->pdev->dev,
10609
+ "Use PF Control I/F to re-enable the VF\n");
10610
+ set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
981910611 }
982010612
982110613 reg = rd32(hw, I40E_VP_MDET_RX(i));
....@@ -9824,11 +10616,6 @@
982410616 vf->num_mdd_events++;
982510617 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
982610618 i);
9827
- }
9828
-
9829
- if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) {
9830
- dev_info(&pf->pdev->dev,
9831
- "Too many MDD events on VF %d, disabled\n", i);
983210619 dev_info(&pf->pdev->dev,
983310620 "Use PF Control I/F to re-enable the VF\n");
983410621 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
....@@ -9843,106 +10630,6 @@
984310630 i40e_flush(hw);
984410631 }
984510632
9846
-static const char *i40e_tunnel_name(u8 type)
9847
-{
9848
- switch (type) {
9849
- case UDP_TUNNEL_TYPE_VXLAN:
9850
- return "vxlan";
9851
- case UDP_TUNNEL_TYPE_GENEVE:
9852
- return "geneve";
9853
- default:
9854
- return "unknown";
9855
- }
9856
-}
9857
-
9858
-/**
9859
- * i40e_sync_udp_filters - Trigger a sync event for existing UDP filters
9860
- * @pf: board private structure
9861
- **/
9862
-static void i40e_sync_udp_filters(struct i40e_pf *pf)
9863
-{
9864
- int i;
9865
-
9866
- /* loop through and set pending bit for all active UDP filters */
9867
- for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
9868
- if (pf->udp_ports[i].port)
9869
- pf->pending_udp_bitmap |= BIT_ULL(i);
9870
- }
9871
-
9872
- set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
9873
-}
9874
-
9875
-/**
9876
- * i40e_sync_udp_filters_subtask - Sync the VSI filter list with HW
9877
- * @pf: board private structure
9878
- **/
9879
-static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
9880
-{
9881
- struct i40e_hw *hw = &pf->hw;
9882
- u8 filter_index, type;
9883
- u16 port;
9884
- int i;
9885
-
9886
- if (!test_and_clear_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state))
9887
- return;
9888
-
9889
- /* acquire RTNL to maintain state of flags and port requests */
9890
- rtnl_lock();
9891
-
9892
- for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
9893
- if (pf->pending_udp_bitmap & BIT_ULL(i)) {
9894
- struct i40e_udp_port_config *udp_port;
9895
- i40e_status ret = 0;
9896
-
9897
- udp_port = &pf->udp_ports[i];
9898
- pf->pending_udp_bitmap &= ~BIT_ULL(i);
9899
-
9900
- port = READ_ONCE(udp_port->port);
9901
- type = READ_ONCE(udp_port->type);
9902
- filter_index = READ_ONCE(udp_port->filter_index);
9903
-
9904
- /* release RTNL while we wait on AQ command */
9905
- rtnl_unlock();
9906
-
9907
- if (port)
9908
- ret = i40e_aq_add_udp_tunnel(hw, port,
9909
- type,
9910
- &filter_index,
9911
- NULL);
9912
- else if (filter_index != I40E_UDP_PORT_INDEX_UNUSED)
9913
- ret = i40e_aq_del_udp_tunnel(hw, filter_index,
9914
- NULL);
9915
-
9916
- /* reacquire RTNL so we can update filter_index */
9917
- rtnl_lock();
9918
-
9919
- if (ret) {
9920
- dev_info(&pf->pdev->dev,
9921
- "%s %s port %d, index %d failed, err %s aq_err %s\n",
9922
- i40e_tunnel_name(type),
9923
- port ? "add" : "delete",
9924
- port,
9925
- filter_index,
9926
- i40e_stat_str(&pf->hw, ret),
9927
- i40e_aq_str(&pf->hw,
9928
- pf->hw.aq.asq_last_status));
9929
- if (port) {
9930
- /* failed to add, just reset port,
9931
- * drop pending bit for any deletion
9932
- */
9933
- udp_port->port = 0;
9934
- pf->pending_udp_bitmap &= ~BIT_ULL(i);
9935
- }
9936
- } else if (port) {
9937
- /* record filter index on success */
9938
- udp_port->filter_index = filter_index;
9939
- }
9940
- }
9941
- }
9942
-
9943
- rtnl_unlock();
9944
-}
9945
-
994610633 /**
994710634 * i40e_service_task - Run the driver's async subtasks
994810635 * @work: pointer to work_struct containing our data
....@@ -9955,31 +10642,37 @@
995510642 unsigned long start_time = jiffies;
995610643
995710644 /* don't bother with service tasks if a reset is in progress */
9958
- if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
10645
+ if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
10646
+ test_bit(__I40E_SUSPENDED, pf->state))
995910647 return;
996010648
996110649 if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state))
996210650 return;
996310651
9964
- i40e_detect_recover_hung(pf->vsi[pf->lan_vsi]);
9965
- i40e_sync_filters_subtask(pf);
9966
- i40e_reset_subtask(pf);
9967
- i40e_handle_mdd_event(pf);
9968
- i40e_vc_process_vflr_event(pf);
9969
- i40e_watchdog_subtask(pf);
9970
- i40e_fdir_reinit_subtask(pf);
9971
- if (test_and_clear_bit(__I40E_CLIENT_RESET, pf->state)) {
9972
- /* Client subtask will reopen next time through. */
9973
- i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], true);
10652
+ if (!test_bit(__I40E_RECOVERY_MODE, pf->state)) {
10653
+ i40e_detect_recover_hung(pf->vsi[pf->lan_vsi]);
10654
+ i40e_sync_filters_subtask(pf);
10655
+ i40e_reset_subtask(pf);
10656
+ i40e_handle_mdd_event(pf);
10657
+ i40e_vc_process_vflr_event(pf);
10658
+ i40e_watchdog_subtask(pf);
10659
+ i40e_fdir_reinit_subtask(pf);
10660
+ if (test_and_clear_bit(__I40E_CLIENT_RESET, pf->state)) {
10661
+ /* Client subtask will reopen next time through. */
10662
+ i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi],
10663
+ true);
10664
+ } else {
10665
+ i40e_client_subtask(pf);
10666
+ if (test_and_clear_bit(__I40E_CLIENT_L2_CHANGE,
10667
+ pf->state))
10668
+ i40e_notify_client_of_l2_param_changes(
10669
+ pf->vsi[pf->lan_vsi]);
10670
+ }
10671
+ i40e_sync_filters_subtask(pf);
997410672 } else {
9975
- i40e_client_subtask(pf);
9976
- if (test_and_clear_bit(__I40E_CLIENT_L2_CHANGE,
9977
- pf->state))
9978
- i40e_notify_client_of_l2_param_changes(
9979
- pf->vsi[pf->lan_vsi]);
10673
+ i40e_reset_subtask(pf);
998010674 }
9981
- i40e_sync_filters_subtask(pf);
9982
- i40e_sync_udp_filters_subtask(pf);
10675
+
998310676 i40e_clean_adminq_subtask(pf);
998410677
998510678 /* flush memory to make sure state is correct before next watchdog */
....@@ -9999,7 +10692,7 @@
999910692
1000010693 /**
1000110694 * i40e_service_timer - timer callback
10002
- * @data: pointer to PF struct
10695
+ * @t: timer list pointer
1000310696 **/
1000410697 static void i40e_service_timer(struct timer_list *t)
1000510698 {
....@@ -10021,8 +10714,12 @@
1002110714 switch (vsi->type) {
1002210715 case I40E_VSI_MAIN:
1002310716 vsi->alloc_queue_pairs = pf->num_lan_qps;
10024
- vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
10025
- I40E_REQ_DESCRIPTOR_MULTIPLE);
10717
+ if (!vsi->num_tx_desc)
10718
+ vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
10719
+ I40E_REQ_DESCRIPTOR_MULTIPLE);
10720
+ if (!vsi->num_rx_desc)
10721
+ vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
10722
+ I40E_REQ_DESCRIPTOR_MULTIPLE);
1002610723 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
1002710724 vsi->num_q_vectors = pf->num_lan_msix;
1002810725 else
....@@ -10032,22 +10729,32 @@
1003210729
1003310730 case I40E_VSI_FDIR:
1003410731 vsi->alloc_queue_pairs = 1;
10035
- vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT,
10036
- I40E_REQ_DESCRIPTOR_MULTIPLE);
10732
+ vsi->num_tx_desc = ALIGN(I40E_FDIR_RING_COUNT,
10733
+ I40E_REQ_DESCRIPTOR_MULTIPLE);
10734
+ vsi->num_rx_desc = ALIGN(I40E_FDIR_RING_COUNT,
10735
+ I40E_REQ_DESCRIPTOR_MULTIPLE);
1003710736 vsi->num_q_vectors = pf->num_fdsb_msix;
1003810737 break;
1003910738
1004010739 case I40E_VSI_VMDQ2:
1004110740 vsi->alloc_queue_pairs = pf->num_vmdq_qps;
10042
- vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
10043
- I40E_REQ_DESCRIPTOR_MULTIPLE);
10741
+ if (!vsi->num_tx_desc)
10742
+ vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
10743
+ I40E_REQ_DESCRIPTOR_MULTIPLE);
10744
+ if (!vsi->num_rx_desc)
10745
+ vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
10746
+ I40E_REQ_DESCRIPTOR_MULTIPLE);
1004410747 vsi->num_q_vectors = pf->num_vmdq_msix;
1004510748 break;
1004610749
1004710750 case I40E_VSI_SRIOV:
1004810751 vsi->alloc_queue_pairs = pf->num_vf_qps;
10049
- vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
10050
- I40E_REQ_DESCRIPTOR_MULTIPLE);
10752
+ if (!vsi->num_tx_desc)
10753
+ vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
10754
+ I40E_REQ_DESCRIPTOR_MULTIPLE);
10755
+ if (!vsi->num_rx_desc)
10756
+ vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
10757
+ I40E_REQ_DESCRIPTOR_MULTIPLE);
1005110758 break;
1005210759
1005310760 default:
....@@ -10160,6 +10867,12 @@
1016010867 hash_init(vsi->mac_filter_hash);
1016110868 vsi->irqs_ready = false;
1016210869
10870
+ if (type == I40E_VSI_MAIN) {
10871
+ vsi->af_xdp_zc_qps = bitmap_zalloc(pf->num_lan_qps, GFP_KERNEL);
10872
+ if (!vsi->af_xdp_zc_qps)
10873
+ goto err_rings;
10874
+ }
10875
+
1016310876 ret = i40e_set_num_rings_in_vsi(vsi);
1016410877 if (ret)
1016510878 goto err_rings;
....@@ -10178,6 +10891,7 @@
1017810891 goto unlock_pf;
1017910892
1018010893 err_rings:
10894
+ bitmap_free(vsi->af_xdp_zc_qps);
1018110895 pf->next_vsi = i - 1;
1018210896 kfree(vsi);
1018310897 unlock_pf:
....@@ -10258,6 +10972,7 @@
1025810972 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
1025910973 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
1026010974
10975
+ bitmap_free(vsi->af_xdp_zc_qps);
1026110976 i40e_vsi_free_arrays(vsi, true);
1026210977 i40e_clear_rss_config_user(vsi);
1026310978
....@@ -10315,7 +11030,7 @@
1031511030 ring->vsi = vsi;
1031611031 ring->netdev = vsi->netdev;
1031711032 ring->dev = &pf->pdev->dev;
10318
- ring->count = vsi->num_desc;
11033
+ ring->count = vsi->num_tx_desc;
1031911034 ring->size = 0;
1032011035 ring->dcb_tc = 0;
1032111036 if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
....@@ -10332,7 +11047,7 @@
1033211047 ring->vsi = vsi;
1033311048 ring->netdev = NULL;
1033411049 ring->dev = &pf->pdev->dev;
10335
- ring->count = vsi->num_desc;
11050
+ ring->count = vsi->num_tx_desc;
1033611051 ring->size = 0;
1033711052 ring->dcb_tc = 0;
1033811053 if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
....@@ -10348,7 +11063,7 @@
1034811063 ring->vsi = vsi;
1034911064 ring->netdev = vsi->netdev;
1035011065 ring->dev = &pf->pdev->dev;
10351
- ring->count = vsi->num_desc;
11066
+ ring->count = vsi->num_rx_desc;
1035211067 ring->size = 0;
1035311068 ring->dcb_tc = 0;
1035411069 ring->itr_setting = pf->rx_itr_default;
....@@ -10616,11 +11331,10 @@
1061611331 * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
1061711332 * @vsi: the VSI being configured
1061811333 * @v_idx: index of the vector in the vsi struct
10619
- * @cpu: cpu to be used on affinity_mask
1062011334 *
1062111335 * We allocate one q_vector. If allocation fails we return -ENOMEM.
1062211336 **/
10623
-static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu)
11337
+static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
1062411338 {
1062511339 struct i40e_q_vector *q_vector;
1062611340
....@@ -10653,7 +11367,7 @@
1065311367 static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
1065411368 {
1065511369 struct i40e_pf *pf = vsi->back;
10656
- int err, v_idx, num_q_vectors, current_cpu;
11370
+ int err, v_idx, num_q_vectors;
1065711371
1065811372 /* if not MSIX, give the one vector only to the LAN VSI */
1065911373 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
....@@ -10663,15 +11377,10 @@
1066311377 else
1066411378 return -EINVAL;
1066511379
10666
- current_cpu = cpumask_first(cpu_online_mask);
10667
-
1066811380 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
10669
- err = i40e_vsi_alloc_q_vector(vsi, v_idx, current_cpu);
11381
+ err = i40e_vsi_alloc_q_vector(vsi, v_idx);
1067011382 if (err)
1067111383 goto err_out;
10672
- current_cpu = cpumask_next(current_cpu, cpu_online_mask);
10673
- if (unlikely(current_cpu >= nr_cpu_ids))
10674
- current_cpu = cpumask_first(cpu_online_mask);
1067511384 }
1067611385
1067711386 return 0;
....@@ -10793,6 +11502,48 @@
1079311502 }
1079411503
1079511504 /**
11505
+ * i40e_setup_misc_vector_for_recovery_mode - Setup the misc vector to handle
11506
+ * non queue events in recovery mode
11507
+ * @pf: board private structure
11508
+ *
11509
+ * This sets up the handler for MSIX 0 or MSI/legacy, which is used to manage
11510
+ * the non-queue interrupts, e.g. AdminQ and errors in recovery mode.
11511
+ * This is handled differently than in recovery mode since no Tx/Rx resources
11512
+ * are being allocated.
11513
+ **/
11514
+static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf)
11515
+{
11516
+ int err;
11517
+
11518
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
11519
+ err = i40e_setup_misc_vector(pf);
11520
+
11521
+ if (err) {
11522
+ dev_info(&pf->pdev->dev,
11523
+ "MSI-X misc vector request failed, error %d\n",
11524
+ err);
11525
+ return err;
11526
+ }
11527
+ } else {
11528
+ u32 flags = pf->flags & I40E_FLAG_MSI_ENABLED ? 0 : IRQF_SHARED;
11529
+
11530
+ err = request_irq(pf->pdev->irq, i40e_intr, flags,
11531
+ pf->int_name, pf);
11532
+
11533
+ if (err) {
11534
+ dev_info(&pf->pdev->dev,
11535
+ "MSI/legacy misc vector request failed, error %d\n",
11536
+ err);
11537
+ return err;
11538
+ }
11539
+ i40e_enable_misc_int_causes(pf);
11540
+ i40e_irq_dynamic_enable_icr0(pf);
11541
+ }
11542
+
11543
+ return 0;
11544
+}
11545
+
11546
+/**
1079611547 * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
1079711548 * @pf: board private structure
1079811549 *
....@@ -10861,7 +11612,7 @@
1086111612 }
1086211613
1086311614 if (lut) {
10864
- bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
11615
+ bool pf_lut = vsi->type == I40E_VSI_MAIN;
1086511616
1086611617 ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
1086711618 if (ret) {
....@@ -11103,6 +11854,7 @@
1110311854 if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
1110411855 return 0;
1110511856
11857
+ queue_count = min_t(int, queue_count, num_online_cpus());
1110611858 new_rss_size = min_t(int, queue_count, pf->rss_size_max);
1110711859
1110811860 if (queue_count != vsi->num_queue_pairs) {
....@@ -11264,6 +12016,58 @@
1126412016 }
1126512017
1126612018 /**
12019
+ * i40e_is_total_port_shutdown_enabled - read NVM and return value
12020
+ * if total port shutdown feature is enabled for this PF
12021
+ * @pf: board private structure
12022
+ **/
12023
+static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf)
12024
+{
12025
+#define I40E_TOTAL_PORT_SHUTDOWN_ENABLED BIT(4)
12026
+#define I40E_FEATURES_ENABLE_PTR 0x2A
12027
+#define I40E_CURRENT_SETTING_PTR 0x2B
12028
+#define I40E_LINK_BEHAVIOR_WORD_OFFSET 0x2D
12029
+#define I40E_LINK_BEHAVIOR_WORD_LENGTH 0x1
12030
+#define I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED BIT(0)
12031
+#define I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH 4
12032
+ i40e_status read_status = I40E_SUCCESS;
12033
+ u16 sr_emp_sr_settings_ptr = 0;
12034
+ u16 features_enable = 0;
12035
+ u16 link_behavior = 0;
12036
+ bool ret = false;
12037
+
12038
+ read_status = i40e_read_nvm_word(&pf->hw,
12039
+ I40E_SR_EMP_SR_SETTINGS_PTR,
12040
+ &sr_emp_sr_settings_ptr);
12041
+ if (read_status)
12042
+ goto err_nvm;
12043
+ read_status = i40e_read_nvm_word(&pf->hw,
12044
+ sr_emp_sr_settings_ptr +
12045
+ I40E_FEATURES_ENABLE_PTR,
12046
+ &features_enable);
12047
+ if (read_status)
12048
+ goto err_nvm;
12049
+ if (I40E_TOTAL_PORT_SHUTDOWN_ENABLED & features_enable) {
12050
+ read_status = i40e_read_nvm_module_data(&pf->hw,
12051
+ I40E_SR_EMP_SR_SETTINGS_PTR,
12052
+ I40E_CURRENT_SETTING_PTR,
12053
+ I40E_LINK_BEHAVIOR_WORD_OFFSET,
12054
+ I40E_LINK_BEHAVIOR_WORD_LENGTH,
12055
+ &link_behavior);
12056
+ if (read_status)
12057
+ goto err_nvm;
12058
+ link_behavior >>= (pf->hw.port * I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH);
12059
+ ret = I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED & link_behavior;
12060
+ }
12061
+ return ret;
12062
+
12063
+err_nvm:
12064
+ dev_warn(&pf->pdev->dev,
12065
+ "total-port-shutdown feature is off due to read nvm error: %s\n",
12066
+ i40e_stat_str(&pf->hw, read_status));
12067
+ return ret;
12068
+}
12069
+
12070
+/**
1126712071 * i40e_sw_init - Initialize general software structures (struct i40e_pf)
1126812072 * @pf: board private structure to initialize
1126912073 *
....@@ -11405,16 +12209,15 @@
1140512209 /* IWARP needs one extra vector for CQP just like MISC.*/
1140612210 pf->num_iwarp_msix = (int)num_online_cpus() + 1;
1140712211 }
11408
- /* Stopping the FW LLDP engine is only supported on the
11409
- * XL710 with a FW ver >= 1.7. Also, stopping FW LLDP
11410
- * engine is not supported if NPAR is functioning on this
11411
- * part
12212
+ /* Stopping FW LLDP engine is supported on XL710 and X722
12213
+ * starting from FW versions determined in i40e_init_adminq.
12214
+ * Stopping the FW LLDP engine is not supported on XL710
12215
+ * if NPAR is functioning so unset this hw flag in this case.
1141212216 */
1141312217 if (pf->hw.mac.type == I40E_MAC_XL710 &&
11414
- !pf->hw.func_caps.npar_enable &&
11415
- (pf->hw.aq.api_maj_ver > 1 ||
11416
- (pf->hw.aq.api_maj_ver == 1 && pf->hw.aq.api_min_ver > 6)))
11417
- pf->hw_features |= I40E_HW_STOPPABLE_FW_LLDP;
12218
+ pf->hw.func_caps.npar_enable &&
12219
+ (pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
12220
+ pf->hw.flags &= ~I40E_HW_FLAG_FW_LLDP_STOPPABLE;
1141812221
1141912222 #ifdef CONFIG_PCI_IOV
1142012223 if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
....@@ -11444,6 +12247,16 @@
1144412247
1144512248 pf->tx_timeout_recovery_level = 1;
1144612249
12250
+ if (pf->hw.mac.type != I40E_MAC_X722 &&
12251
+ i40e_is_total_port_shutdown_enabled(pf)) {
12252
+ /* Link down on close must be on when total port shutdown
12253
+ * is enabled for a given port
12254
+ */
12255
+ pf->flags |= (I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED |
12256
+ I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED);
12257
+ dev_info(&pf->pdev->dev,
12258
+ "total-port-shutdown was enabled, link-down-on-close is forced on\n");
12259
+ }
1144712260 mutex_init(&pf->switch_mutex);
1144812261
1144912262 sw_init_done:
....@@ -11550,6 +12363,9 @@
1155012363 return -EINVAL;
1155112364 }
1155212365
12366
+ if (!(features & NETIF_F_HW_L2FW_DOFFLOAD) && vsi->macvlan_cnt)
12367
+ i40e_del_all_macvlans(vsi);
12368
+
1155312369 need_reset = i40e_set_ntuple(pf, features);
1155412370
1155512371 if (need_reset)
....@@ -11558,131 +12374,48 @@
1155812374 return 0;
1155912375 }
1156012376
11561
-/**
11562
- * i40e_get_udp_port_idx - Lookup a possibly offloaded for Rx UDP port
11563
- * @pf: board private structure
11564
- * @port: The UDP port to look up
11565
- *
11566
- * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
11567
- **/
11568
-static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, u16 port)
11569
-{
11570
- u8 i;
11571
-
11572
- for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
11573
- /* Do not report ports with pending deletions as
11574
- * being available.
11575
- */
11576
- if (!port && (pf->pending_udp_bitmap & BIT_ULL(i)))
11577
- continue;
11578
- if (pf->udp_ports[i].port == port)
11579
- return i;
11580
- }
11581
-
11582
- return i;
11583
-}
11584
-
11585
-/**
11586
- * i40e_udp_tunnel_add - Get notifications about UDP tunnel ports that come up
11587
- * @netdev: This physical port's netdev
11588
- * @ti: Tunnel endpoint information
11589
- **/
11590
-static void i40e_udp_tunnel_add(struct net_device *netdev,
11591
- struct udp_tunnel_info *ti)
12377
+static int i40e_udp_tunnel_set_port(struct net_device *netdev,
12378
+ unsigned int table, unsigned int idx,
12379
+ struct udp_tunnel_info *ti)
1159212380 {
1159312381 struct i40e_netdev_priv *np = netdev_priv(netdev);
11594
- struct i40e_vsi *vsi = np->vsi;
11595
- struct i40e_pf *pf = vsi->back;
11596
- u16 port = ntohs(ti->port);
11597
- u8 next_idx;
11598
- u8 idx;
12382
+ struct i40e_hw *hw = &np->vsi->back->hw;
12383
+ u8 type, filter_index;
12384
+ i40e_status ret;
1159912385
11600
- idx = i40e_get_udp_port_idx(pf, port);
12386
+ type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? I40E_AQC_TUNNEL_TYPE_VXLAN :
12387
+ I40E_AQC_TUNNEL_TYPE_NGE;
1160112388
11602
- /* Check if port already exists */
11603
- if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
11604
- netdev_info(netdev, "port %d already offloaded\n", port);
11605
- return;
12389
+ ret = i40e_aq_add_udp_tunnel(hw, ntohs(ti->port), type, &filter_index,
12390
+ NULL);
12391
+ if (ret) {
12392
+ netdev_info(netdev, "add UDP port failed, err %s aq_err %s\n",
12393
+ i40e_stat_str(hw, ret),
12394
+ i40e_aq_str(hw, hw->aq.asq_last_status));
12395
+ return -EIO;
1160612396 }
1160712397
11608
- /* Now check if there is space to add the new port */
11609
- next_idx = i40e_get_udp_port_idx(pf, 0);
11610
-
11611
- if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
11612
- netdev_info(netdev, "maximum number of offloaded UDP ports reached, not adding port %d\n",
11613
- port);
11614
- return;
11615
- }
11616
-
11617
- switch (ti->type) {
11618
- case UDP_TUNNEL_TYPE_VXLAN:
11619
- pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN;
11620
- break;
11621
- case UDP_TUNNEL_TYPE_GENEVE:
11622
- if (!(pf->hw_features & I40E_HW_GENEVE_OFFLOAD_CAPABLE))
11623
- return;
11624
- pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE;
11625
- break;
11626
- default:
11627
- return;
11628
- }
11629
-
11630
- /* New port: add it and mark its index in the bitmap */
11631
- pf->udp_ports[next_idx].port = port;
11632
- pf->udp_ports[next_idx].filter_index = I40E_UDP_PORT_INDEX_UNUSED;
11633
- pf->pending_udp_bitmap |= BIT_ULL(next_idx);
11634
- set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
12398
+ udp_tunnel_nic_set_port_priv(netdev, table, idx, filter_index);
12399
+ return 0;
1163512400 }
1163612401
11637
-/**
11638
- * i40e_udp_tunnel_del - Get notifications about UDP tunnel ports that go away
11639
- * @netdev: This physical port's netdev
11640
- * @ti: Tunnel endpoint information
11641
- **/
11642
-static void i40e_udp_tunnel_del(struct net_device *netdev,
11643
- struct udp_tunnel_info *ti)
12402
+static int i40e_udp_tunnel_unset_port(struct net_device *netdev,
12403
+ unsigned int table, unsigned int idx,
12404
+ struct udp_tunnel_info *ti)
1164412405 {
1164512406 struct i40e_netdev_priv *np = netdev_priv(netdev);
11646
- struct i40e_vsi *vsi = np->vsi;
11647
- struct i40e_pf *pf = vsi->back;
11648
- u16 port = ntohs(ti->port);
11649
- u8 idx;
12407
+ struct i40e_hw *hw = &np->vsi->back->hw;
12408
+ i40e_status ret;
1165012409
11651
- idx = i40e_get_udp_port_idx(pf, port);
11652
-
11653
- /* Check if port already exists */
11654
- if (idx >= I40E_MAX_PF_UDP_OFFLOAD_PORTS)
11655
- goto not_found;
11656
-
11657
- switch (ti->type) {
11658
- case UDP_TUNNEL_TYPE_VXLAN:
11659
- if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_VXLAN)
11660
- goto not_found;
11661
- break;
11662
- case UDP_TUNNEL_TYPE_GENEVE:
11663
- if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_NGE)
11664
- goto not_found;
11665
- break;
11666
- default:
11667
- goto not_found;
12410
+ ret = i40e_aq_del_udp_tunnel(hw, ti->hw_priv, NULL);
12411
+ if (ret) {
12412
+ netdev_info(netdev, "delete UDP port failed, err %s aq_err %s\n",
12413
+ i40e_stat_str(hw, ret),
12414
+ i40e_aq_str(hw, hw->aq.asq_last_status));
12415
+ return -EIO;
1166812416 }
1166912417
11670
- /* if port exists, set it to 0 (mark for deletion)
11671
- * and make it pending
11672
- */
11673
- pf->udp_ports[idx].port = 0;
11674
-
11675
- /* Toggle pending bit instead of setting it. This way if we are
11676
- * deleting a port that has yet to be added we just clear the pending
11677
- * bit and don't have to worry about it.
11678
- */
11679
- pf->pending_udp_bitmap ^= BIT_ULL(idx);
11680
- set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
11681
-
11682
- return;
11683
-not_found:
11684
- netdev_warn(netdev, "UDP port %d was not found, not deleting\n",
11685
- port);
12418
+ return 0;
1168612419 }
1168712420
1168812421 static int i40e_get_phys_port_id(struct net_device *netdev,
....@@ -11709,11 +12442,13 @@
1170912442 * @addr: the MAC address entry being added
1171012443 * @vid: VLAN ID
1171112444 * @flags: instructions from stack about fdb operation
12445
+ * @extack: netlink extended ack, unused currently
1171212446 */
1171312447 static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
1171412448 struct net_device *dev,
1171512449 const unsigned char *addr, u16 vid,
11716
- u16 flags)
12450
+ u16 flags,
12451
+ struct netlink_ext_ack *extack)
1171712452 {
1171812453 struct i40e_netdev_priv *np = netdev_priv(dev);
1171912454 struct i40e_pf *pf = np->vsi->back;
....@@ -11754,6 +12489,7 @@
1175412489 * @dev: the netdev being configured
1175512490 * @nlh: RTNL message
1175612491 * @flags: bridge flags
12492
+ * @extack: netlink extended ack
1175712493 *
1175812494 * Inserts a new hardware bridge if not already created and
1175912495 * enables the bridging mode requested (VEB or VEPA). If the
....@@ -11766,7 +12502,8 @@
1176612502 **/
1176712503 static int i40e_ndo_bridge_setlink(struct net_device *dev,
1176812504 struct nlmsghdr *nlh,
11769
- u16 flags)
12505
+ u16 flags,
12506
+ struct netlink_ext_ack *extack)
1177012507 {
1177112508 struct i40e_netdev_priv *np = netdev_priv(dev);
1177212509 struct i40e_vsi *vsi = np->vsi;
....@@ -11786,6 +12523,8 @@
1178612523 }
1178712524
1178812525 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
12526
+ if (!br_spec)
12527
+ return -EINVAL;
1178912528
1179012529 nla_for_each_nested(attr, br_spec, rem) {
1179112530 __u16 mode;
....@@ -11953,8 +12692,20 @@
1195312692
1195412693 old_prog = xchg(&vsi->xdp_prog, prog);
1195512694
11956
- if (need_reset)
12695
+ if (need_reset) {
12696
+ if (!prog)
12697
+ /* Wait until ndo_xsk_wakeup completes. */
12698
+ synchronize_rcu();
1195712699 i40e_reset_and_rebuild(pf, true, true);
12700
+ }
12701
+
12702
+ if (!i40e_enabled_xdp_vsi(vsi) && prog) {
12703
+ if (i40e_realloc_rx_bi_zc(vsi, true))
12704
+ return -ENOMEM;
12705
+ } else if (i40e_enabled_xdp_vsi(vsi) && !prog) {
12706
+ if (i40e_realloc_rx_bi_zc(vsi, false))
12707
+ return -ENOMEM;
12708
+ }
1195812709
1195912710 for (i = 0; i < vsi->num_queue_pairs; i++)
1196012711 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
....@@ -11962,7 +12713,272 @@
1196212713 if (old_prog)
1196312714 bpf_prog_put(old_prog);
1196412715
12716
+ /* Kick start the NAPI context if there is an AF_XDP socket open
12717
+ * on that queue id. This so that receiving will start.
12718
+ */
12719
+ if (need_reset && prog)
12720
+ for (i = 0; i < vsi->num_queue_pairs; i++)
12721
+ if (vsi->xdp_rings[i]->xsk_pool)
12722
+ (void)i40e_xsk_wakeup(vsi->netdev, i,
12723
+ XDP_WAKEUP_RX);
12724
+
1196512725 return 0;
12726
+}
12727
+
12728
+/**
12729
+ * i40e_enter_busy_conf - Enters busy config state
12730
+ * @vsi: vsi
12731
+ *
12732
+ * Returns 0 on success, <0 for failure.
12733
+ **/
12734
+static int i40e_enter_busy_conf(struct i40e_vsi *vsi)
12735
+{
12736
+ struct i40e_pf *pf = vsi->back;
12737
+ int timeout = 50;
12738
+
12739
+ while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) {
12740
+ timeout--;
12741
+ if (!timeout)
12742
+ return -EBUSY;
12743
+ usleep_range(1000, 2000);
12744
+ }
12745
+
12746
+ return 0;
12747
+}
12748
+
12749
+/**
12750
+ * i40e_exit_busy_conf - Exits busy config state
12751
+ * @vsi: vsi
12752
+ **/
12753
+static void i40e_exit_busy_conf(struct i40e_vsi *vsi)
12754
+{
12755
+ struct i40e_pf *pf = vsi->back;
12756
+
12757
+ clear_bit(__I40E_CONFIG_BUSY, pf->state);
12758
+}
12759
+
12760
+/**
12761
+ * i40e_queue_pair_reset_stats - Resets all statistics for a queue pair
12762
+ * @vsi: vsi
12763
+ * @queue_pair: queue pair
12764
+ **/
12765
+static void i40e_queue_pair_reset_stats(struct i40e_vsi *vsi, int queue_pair)
12766
+{
12767
+ memset(&vsi->rx_rings[queue_pair]->rx_stats, 0,
12768
+ sizeof(vsi->rx_rings[queue_pair]->rx_stats));
12769
+ memset(&vsi->tx_rings[queue_pair]->stats, 0,
12770
+ sizeof(vsi->tx_rings[queue_pair]->stats));
12771
+ if (i40e_enabled_xdp_vsi(vsi)) {
12772
+ memset(&vsi->xdp_rings[queue_pair]->stats, 0,
12773
+ sizeof(vsi->xdp_rings[queue_pair]->stats));
12774
+ }
12775
+}
12776
+
12777
+/**
12778
+ * i40e_queue_pair_clean_rings - Cleans all the rings of a queue pair
12779
+ * @vsi: vsi
12780
+ * @queue_pair: queue pair
12781
+ **/
12782
+static void i40e_queue_pair_clean_rings(struct i40e_vsi *vsi, int queue_pair)
12783
+{
12784
+ i40e_clean_tx_ring(vsi->tx_rings[queue_pair]);
12785
+ if (i40e_enabled_xdp_vsi(vsi)) {
12786
+ /* Make sure that in-progress ndo_xdp_xmit calls are
12787
+ * completed.
12788
+ */
12789
+ synchronize_rcu();
12790
+ i40e_clean_tx_ring(vsi->xdp_rings[queue_pair]);
12791
+ }
12792
+ i40e_clean_rx_ring(vsi->rx_rings[queue_pair]);
12793
+}
12794
+
12795
+/**
12796
+ * i40e_queue_pair_toggle_napi - Enables/disables NAPI for a queue pair
12797
+ * @vsi: vsi
12798
+ * @queue_pair: queue pair
12799
+ * @enable: true for enable, false for disable
12800
+ **/
12801
+static void i40e_queue_pair_toggle_napi(struct i40e_vsi *vsi, int queue_pair,
12802
+ bool enable)
12803
+{
12804
+ struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
12805
+ struct i40e_q_vector *q_vector = rxr->q_vector;
12806
+
12807
+ if (!vsi->netdev)
12808
+ return;
12809
+
12810
+ /* All rings in a qp belong to the same qvector. */
12811
+ if (q_vector->rx.ring || q_vector->tx.ring) {
12812
+ if (enable)
12813
+ napi_enable(&q_vector->napi);
12814
+ else
12815
+ napi_disable(&q_vector->napi);
12816
+ }
12817
+}
12818
+
12819
+/**
12820
+ * i40e_queue_pair_toggle_rings - Enables/disables all rings for a queue pair
12821
+ * @vsi: vsi
12822
+ * @queue_pair: queue pair
12823
+ * @enable: true for enable, false for disable
12824
+ *
12825
+ * Returns 0 on success, <0 on failure.
12826
+ **/
12827
+static int i40e_queue_pair_toggle_rings(struct i40e_vsi *vsi, int queue_pair,
12828
+ bool enable)
12829
+{
12830
+ struct i40e_pf *pf = vsi->back;
12831
+ int pf_q, ret = 0;
12832
+
12833
+ pf_q = vsi->base_queue + queue_pair;
12834
+ ret = i40e_control_wait_tx_q(vsi->seid, pf, pf_q,
12835
+ false /*is xdp*/, enable);
12836
+ if (ret) {
12837
+ dev_info(&pf->pdev->dev,
12838
+ "VSI seid %d Tx ring %d %sable timeout\n",
12839
+ vsi->seid, pf_q, (enable ? "en" : "dis"));
12840
+ return ret;
12841
+ }
12842
+
12843
+ i40e_control_rx_q(pf, pf_q, enable);
12844
+ ret = i40e_pf_rxq_wait(pf, pf_q, enable);
12845
+ if (ret) {
12846
+ dev_info(&pf->pdev->dev,
12847
+ "VSI seid %d Rx ring %d %sable timeout\n",
12848
+ vsi->seid, pf_q, (enable ? "en" : "dis"));
12849
+ return ret;
12850
+ }
12851
+
12852
+ /* Due to HW errata, on Rx disable only, the register can
12853
+ * indicate done before it really is. Needs 50ms to be sure
12854
+ */
12855
+ if (!enable)
12856
+ mdelay(50);
12857
+
12858
+ if (!i40e_enabled_xdp_vsi(vsi))
12859
+ return ret;
12860
+
12861
+ ret = i40e_control_wait_tx_q(vsi->seid, pf,
12862
+ pf_q + vsi->alloc_queue_pairs,
12863
+ true /*is xdp*/, enable);
12864
+ if (ret) {
12865
+ dev_info(&pf->pdev->dev,
12866
+ "VSI seid %d XDP Tx ring %d %sable timeout\n",
12867
+ vsi->seid, pf_q, (enable ? "en" : "dis"));
12868
+ }
12869
+
12870
+ return ret;
12871
+}
12872
+
12873
+/**
12874
+ * i40e_queue_pair_enable_irq - Enables interrupts for a queue pair
12875
+ * @vsi: vsi
12876
+ * @queue_pair: queue_pair
12877
+ **/
12878
+static void i40e_queue_pair_enable_irq(struct i40e_vsi *vsi, int queue_pair)
12879
+{
12880
+ struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
12881
+ struct i40e_pf *pf = vsi->back;
12882
+ struct i40e_hw *hw = &pf->hw;
12883
+
12884
+ /* All rings in a qp belong to the same qvector. */
12885
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED)
12886
+ i40e_irq_dynamic_enable(vsi, rxr->q_vector->v_idx);
12887
+ else
12888
+ i40e_irq_dynamic_enable_icr0(pf);
12889
+
12890
+ i40e_flush(hw);
12891
+}
12892
+
12893
+/**
12894
+ * i40e_queue_pair_disable_irq - Disables interrupts for a queue pair
12895
+ * @vsi: vsi
12896
+ * @queue_pair: queue_pair
12897
+ **/
12898
+static void i40e_queue_pair_disable_irq(struct i40e_vsi *vsi, int queue_pair)
12899
+{
12900
+ struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
12901
+ struct i40e_pf *pf = vsi->back;
12902
+ struct i40e_hw *hw = &pf->hw;
12903
+
12904
+ /* For simplicity, instead of removing the qp interrupt causes
12905
+ * from the interrupt linked list, we simply disable the interrupt, and
12906
+ * leave the list intact.
12907
+ *
12908
+ * All rings in a qp belong to the same qvector.
12909
+ */
12910
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
12911
+ u32 intpf = vsi->base_vector + rxr->q_vector->v_idx;
12912
+
12913
+ wr32(hw, I40E_PFINT_DYN_CTLN(intpf - 1), 0);
12914
+ i40e_flush(hw);
12915
+ synchronize_irq(pf->msix_entries[intpf].vector);
12916
+ } else {
12917
+ /* Legacy and MSI mode - this stops all interrupt handling */
12918
+ wr32(hw, I40E_PFINT_ICR0_ENA, 0);
12919
+ wr32(hw, I40E_PFINT_DYN_CTL0, 0);
12920
+ i40e_flush(hw);
12921
+ synchronize_irq(pf->pdev->irq);
12922
+ }
12923
+}
12924
+
12925
+/**
12926
+ * i40e_queue_pair_disable - Disables a queue pair
12927
+ * @vsi: vsi
12928
+ * @queue_pair: queue pair
12929
+ *
12930
+ * Returns 0 on success, <0 on failure.
12931
+ **/
12932
+int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair)
12933
+{
12934
+ int err;
12935
+
12936
+ err = i40e_enter_busy_conf(vsi);
12937
+ if (err)
12938
+ return err;
12939
+
12940
+ i40e_queue_pair_disable_irq(vsi, queue_pair);
12941
+ err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false /* off */);
12942
+ i40e_clean_rx_ring(vsi->rx_rings[queue_pair]);
12943
+ i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */);
12944
+ i40e_queue_pair_clean_rings(vsi, queue_pair);
12945
+ i40e_queue_pair_reset_stats(vsi, queue_pair);
12946
+
12947
+ return err;
12948
+}
12949
+
12950
+/**
12951
+ * i40e_queue_pair_enable - Enables a queue pair
12952
+ * @vsi: vsi
12953
+ * @queue_pair: queue pair
12954
+ *
12955
+ * Returns 0 on success, <0 on failure.
12956
+ **/
12957
+int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair)
12958
+{
12959
+ int err;
12960
+
12961
+ err = i40e_configure_tx_ring(vsi->tx_rings[queue_pair]);
12962
+ if (err)
12963
+ return err;
12964
+
12965
+ if (i40e_enabled_xdp_vsi(vsi)) {
12966
+ err = i40e_configure_tx_ring(vsi->xdp_rings[queue_pair]);
12967
+ if (err)
12968
+ return err;
12969
+ }
12970
+
12971
+ err = i40e_configure_rx_ring(vsi->rx_rings[queue_pair]);
12972
+ if (err)
12973
+ return err;
12974
+
12975
+ err = i40e_queue_pair_toggle_rings(vsi, queue_pair, true /* on */);
12976
+ i40e_queue_pair_toggle_napi(vsi, queue_pair, true /* on */);
12977
+ i40e_queue_pair_enable_irq(vsi, queue_pair);
12978
+
12979
+ i40e_exit_busy_conf(vsi);
12980
+
12981
+ return err;
1196612982 }
1196712983
1196812984 /**
....@@ -11982,9 +12998,9 @@
1198212998 switch (xdp->command) {
1198312999 case XDP_SETUP_PROG:
1198413000 return i40e_xdp_setup(vsi, xdp->prog);
11985
- case XDP_QUERY_PROG:
11986
- xdp->prog_id = vsi->xdp_prog ? vsi->xdp_prog->aux->id : 0;
11987
- return 0;
13001
+ case XDP_SETUP_XSK_POOL:
13002
+ return i40e_xsk_pool_setup(vsi, xdp->xsk.pool,
13003
+ xdp->xsk.queue_id);
1198813004 default:
1198913005 return -EINVAL;
1199013006 }
....@@ -12007,16 +13023,18 @@
1200713023 .ndo_poll_controller = i40e_netpoll,
1200813024 #endif
1200913025 .ndo_setup_tc = __i40e_setup_tc,
13026
+ .ndo_select_queue = i40e_lan_select_queue,
1201013027 .ndo_set_features = i40e_set_features,
1201113028 .ndo_set_vf_mac = i40e_ndo_set_vf_mac,
1201213029 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
13030
+ .ndo_get_vf_stats = i40e_get_vf_stats,
1201313031 .ndo_set_vf_rate = i40e_ndo_set_vf_bw,
1201413032 .ndo_get_vf_config = i40e_ndo_get_vf_config,
1201513033 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
1201613034 .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk,
1201713035 .ndo_set_vf_trust = i40e_ndo_set_vf_trust,
12018
- .ndo_udp_tunnel_add = i40e_udp_tunnel_add,
12019
- .ndo_udp_tunnel_del = i40e_udp_tunnel_del,
13036
+ .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
13037
+ .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
1202013038 .ndo_get_phys_port_id = i40e_get_phys_port_id,
1202113039 .ndo_fdb_add = i40e_ndo_fdb_add,
1202213040 .ndo_features_check = i40e_features_check,
....@@ -12024,6 +13042,9 @@
1202413042 .ndo_bridge_setlink = i40e_ndo_bridge_setlink,
1202513043 .ndo_bpf = i40e_xdp,
1202613044 .ndo_xdp_xmit = i40e_xdp_xmit,
13045
+ .ndo_xsk_wakeup = i40e_xsk_wakeup,
13046
+ .ndo_dfwd_add_station = i40e_fwd_add,
13047
+ .ndo_dfwd_del_station = i40e_fwd_del,
1202713048 };
1202813049
1202913050 /**
....@@ -12068,6 +13089,7 @@
1206813089 NETIF_F_GSO_IPXIP6 |
1206913090 NETIF_F_GSO_UDP_TUNNEL |
1207013091 NETIF_F_GSO_UDP_TUNNEL_CSUM |
13092
+ NETIF_F_GSO_UDP_L4 |
1207113093 NETIF_F_SCTP_CRC |
1207213094 NETIF_F_RXHASH |
1207313095 NETIF_F_RXCSUM |
....@@ -12076,6 +13098,8 @@
1207613098 if (!(pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE))
1207713099 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
1207813100
13101
+ netdev->udp_tunnel_nic_info = &pf->udp_tunnel_nic;
13102
+
1207913103 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
1208013104
1208113105 netdev->hw_enc_features |= hw_enc_features;
....@@ -12083,12 +13107,15 @@
1208313107 /* record features VLANs can make use of */
1208413108 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
1208513109
12086
- if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
12087
- netdev->hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC;
13110
+ /* enable macvlan offloads */
13111
+ netdev->hw_features |= NETIF_F_HW_L2FW_DOFFLOAD;
1208813112
1208913113 hw_features = hw_enc_features |
1209013114 NETIF_F_HW_VLAN_CTAG_TX |
1209113115 NETIF_F_HW_VLAN_CTAG_RX;
13116
+
13117
+ if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
13118
+ hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC;
1209213119
1209313120 netdev->hw_features |= hw_features;
1209413121
....@@ -12195,7 +13222,7 @@
1219513222 struct i40e_pf *pf = vsi->back;
1219613223
1219713224 /* Uplink is not a bridge so default to VEB */
12198
- if (vsi->veb_idx == I40E_NO_VEB)
13225
+ if (vsi->veb_idx >= I40E_MAX_VEB)
1219913226 return 1;
1220013227
1220113228 veb = pf->veb[vsi->veb_idx];
....@@ -12434,15 +13461,15 @@
1243413461 vsi->id = ctxt.vsi_number;
1243513462 }
1243613463
12437
- vsi->active_filters = 0;
12438
- clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
1243913464 spin_lock_bh(&vsi->mac_filter_hash_lock);
13465
+ vsi->active_filters = 0;
1244013466 /* If macvlan filters already exist, force them to get loaded */
1244113467 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1244213468 f->state = I40E_FILTER_NEW;
1244313469 f_count++;
1244413470 }
1244513471 spin_unlock_bh(&vsi->mac_filter_hash_lock);
13472
+ clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
1244613473
1244713474 if (f_count) {
1244813475 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
....@@ -12836,8 +13863,7 @@
1283613863 /* Setup DCB netlink interface */
1283713864 i40e_dcbnl_setup(vsi);
1283813865 #endif /* CONFIG_I40E_DCB */
12839
- /* fall through */
12840
-
13866
+ fallthrough;
1284113867 case I40E_VSI_FDIR:
1284213868 /* set up vectors and rings if needed */
1284313869 ret = i40e_vsi_setup_vectors(vsi);
....@@ -12853,7 +13879,6 @@
1285313879
1285413880 i40e_vsi_reset_stats(vsi);
1285513881 break;
12856
-
1285713882 default:
1285813883 /* no netdev or rings for the other VSI types */
1285913884 break;
....@@ -13179,7 +14204,7 @@
1317914204 for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
1318014205 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
1318114206 break;
13182
- if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) {
14207
+ if (vsi_idx == pf->num_alloc_vsi && vsi_seid != 0) {
1318314208 dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
1318414209 vsi_seid);
1318514210 return NULL;
....@@ -13256,7 +14281,7 @@
1325614281 /* Main VEB? */
1325714282 if (uplink_seid != pf->mac_seid)
1325814283 break;
13259
- if (pf->lan_veb == I40E_NO_VEB) {
14284
+ if (pf->lan_veb >= I40E_MAX_VEB) {
1326014285 int v;
1326114286
1326214287 /* find existing or else empty VEB */
....@@ -13266,13 +14291,15 @@
1326614291 break;
1326714292 }
1326814293 }
13269
- if (pf->lan_veb == I40E_NO_VEB) {
14294
+ if (pf->lan_veb >= I40E_MAX_VEB) {
1327014295 v = i40e_veb_mem_alloc(pf);
1327114296 if (v < 0)
1327214297 break;
1327314298 pf->lan_veb = v;
1327414299 }
1327514300 }
14301
+ if (pf->lan_veb >= I40E_MAX_VEB)
14302
+ break;
1327614303
1327714304 pf->veb[pf->lan_veb]->seid = seid;
1327814305 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
....@@ -13370,10 +14397,11 @@
1337014397 * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
1337114398 * @pf: board private structure
1337214399 * @reinit: if the Main VSI needs to re-initialized.
14400
+ * @lock_acquired: indicates whether or not the lock has been acquired
1337314401 *
1337414402 * Returns 0 on success, negative value on failure
1337514403 **/
13376
-static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
14404
+static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acquired)
1337714405 {
1337814406 u16 flags = 0;
1337914407 int ret;
....@@ -13426,7 +14454,7 @@
1342614454 /* Set up the PF VSI associated with the PF's main VSI
1342714455 * that is already in the HW switch
1342814456 */
13429
- if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
14457
+ if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb])
1343014458 uplink_seid = pf->veb[pf->lan_veb]->seid;
1343114459 else
1343214460 uplink_seid = pf->mac_seid;
....@@ -13475,8 +14503,14 @@
1347514503
1347614504 i40e_ptp_init(pf);
1347714505
14506
+ if (!lock_acquired)
14507
+ rtnl_lock();
14508
+
1347814509 /* repopulate tunnel port filters */
13479
- i40e_sync_udp_filters(pf);
14510
+ udp_tunnel_nic_reset_ntf(pf->vsi[pf->lan_vsi]->netdev);
14511
+
14512
+ if (!lock_acquired)
14513
+ rtnl_unlock();
1348014514
1348114515 return ret;
1348214516 }
....@@ -13626,29 +14660,29 @@
1362614660
1362714661 i = snprintf(buf, INFO_STRING_LEN, "Features: PF-id[%d]", hw->pf_id);
1362814662 #ifdef CONFIG_PCI_IOV
13629
- i += snprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs);
14663
+ i += scnprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs);
1363014664 #endif
13631
- i += snprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d",
14665
+ i += scnprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d",
1363214666 pf->hw.func_caps.num_vsis,
1363314667 pf->vsi[pf->lan_vsi]->num_queue_pairs);
1363414668 if (pf->flags & I40E_FLAG_RSS_ENABLED)
13635
- i += snprintf(&buf[i], REMAIN(i), " RSS");
14669
+ i += scnprintf(&buf[i], REMAIN(i), " RSS");
1363614670 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
13637
- i += snprintf(&buf[i], REMAIN(i), " FD_ATR");
14671
+ i += scnprintf(&buf[i], REMAIN(i), " FD_ATR");
1363814672 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
13639
- i += snprintf(&buf[i], REMAIN(i), " FD_SB");
13640
- i += snprintf(&buf[i], REMAIN(i), " NTUPLE");
14673
+ i += scnprintf(&buf[i], REMAIN(i), " FD_SB");
14674
+ i += scnprintf(&buf[i], REMAIN(i), " NTUPLE");
1364114675 }
1364214676 if (pf->flags & I40E_FLAG_DCB_CAPABLE)
13643
- i += snprintf(&buf[i], REMAIN(i), " DCB");
13644
- i += snprintf(&buf[i], REMAIN(i), " VxLAN");
13645
- i += snprintf(&buf[i], REMAIN(i), " Geneve");
14677
+ i += scnprintf(&buf[i], REMAIN(i), " DCB");
14678
+ i += scnprintf(&buf[i], REMAIN(i), " VxLAN");
14679
+ i += scnprintf(&buf[i], REMAIN(i), " Geneve");
1364614680 if (pf->flags & I40E_FLAG_PTP)
13647
- i += snprintf(&buf[i], REMAIN(i), " PTP");
14681
+ i += scnprintf(&buf[i], REMAIN(i), " PTP");
1364814682 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
13649
- i += snprintf(&buf[i], REMAIN(i), " VEB");
14683
+ i += scnprintf(&buf[i], REMAIN(i), " VEB");
1365014684 else
13651
- i += snprintf(&buf[i], REMAIN(i), " VEPA");
14685
+ i += scnprintf(&buf[i], REMAIN(i), " VEPA");
1365214686
1365314687 dev_info(&pf->pdev->dev, "%s\n", buf);
1365414688 kfree(buf);
....@@ -13669,6 +14703,237 @@
1366914703 {
1367014704 if (eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr))
1367114705 i40e_get_mac_addr(&pf->hw, pf->hw.mac.addr);
14706
+}
14707
+
14708
+/**
14709
+ * i40e_set_fec_in_flags - helper function for setting FEC options in flags
14710
+ * @fec_cfg: FEC option to set in flags
14711
+ * @flags: ptr to flags in which we set FEC option
14712
+ **/
14713
+void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags)
14714
+{
14715
+ if (fec_cfg & I40E_AQ_SET_FEC_AUTO)
14716
+ *flags |= I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC;
14717
+ if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_RS) ||
14718
+ (fec_cfg & I40E_AQ_SET_FEC_ABILITY_RS)) {
14719
+ *flags |= I40E_FLAG_RS_FEC;
14720
+ *flags &= ~I40E_FLAG_BASE_R_FEC;
14721
+ }
14722
+ if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_KR) ||
14723
+ (fec_cfg & I40E_AQ_SET_FEC_ABILITY_KR)) {
14724
+ *flags |= I40E_FLAG_BASE_R_FEC;
14725
+ *flags &= ~I40E_FLAG_RS_FEC;
14726
+ }
14727
+ if (fec_cfg == 0)
14728
+ *flags &= ~(I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC);
14729
+}
14730
+
14731
+/**
14732
+ * i40e_check_recovery_mode - check if we are running transition firmware
14733
+ * @pf: board private structure
14734
+ *
14735
+ * Check registers indicating the firmware runs in recovery mode. Sets the
14736
+ * appropriate driver state.
14737
+ *
14738
+ * Returns true if the recovery mode was detected, false otherwise
14739
+ **/
14740
+static bool i40e_check_recovery_mode(struct i40e_pf *pf)
14741
+{
14742
+ u32 val = rd32(&pf->hw, I40E_GL_FWSTS);
14743
+
14744
+ if (val & I40E_GL_FWSTS_FWS1B_MASK) {
14745
+ dev_crit(&pf->pdev->dev, "Firmware recovery mode detected. Limiting functionality.\n");
14746
+ dev_crit(&pf->pdev->dev, "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
14747
+ set_bit(__I40E_RECOVERY_MODE, pf->state);
14748
+
14749
+ return true;
14750
+ }
14751
+ if (test_bit(__I40E_RECOVERY_MODE, pf->state))
14752
+ dev_info(&pf->pdev->dev, "Please do Power-On Reset to initialize adapter in normal mode with full functionality.\n");
14753
+
14754
+ return false;
14755
+}
14756
+
14757
+/**
14758
+ * i40e_pf_loop_reset - perform reset in a loop.
14759
+ * @pf: board private structure
14760
+ *
14761
+ * This function is useful when a NIC is about to enter recovery mode.
14762
+ * When a NIC's internal data structures are corrupted the NIC's
14763
+ * firmware is going to enter recovery mode.
14764
+ * Right after a POR it takes about 7 minutes for firmware to enter
14765
+ * recovery mode. Until that time a NIC is in some kind of intermediate
14766
+ * state. After that time period the NIC almost surely enters
14767
+ * recovery mode. The only way for a driver to detect intermediate
14768
+ * state is to issue a series of pf-resets and check a return value.
14769
+ * If a PF reset returns success then the firmware could be in recovery
14770
+ * mode so the caller of this code needs to check for recovery mode
14771
+ * if this function returns success. There is a little chance that
14772
+ * firmware will hang in intermediate state forever.
14773
+ * Since waiting 7 minutes is quite a lot of time this function waits
14774
+ * 10 seconds and then gives up by returning an error.
14775
+ *
14776
+ * Return 0 on success, negative on failure.
14777
+ **/
14778
+static i40e_status i40e_pf_loop_reset(struct i40e_pf *pf)
14779
+{
14780
+ /* wait max 10 seconds for PF reset to succeed */
14781
+ const unsigned long time_end = jiffies + 10 * HZ;
14782
+
14783
+ struct i40e_hw *hw = &pf->hw;
14784
+ i40e_status ret;
14785
+
14786
+ ret = i40e_pf_reset(hw);
14787
+ while (ret != I40E_SUCCESS && time_before(jiffies, time_end)) {
14788
+ usleep_range(10000, 20000);
14789
+ ret = i40e_pf_reset(hw);
14790
+ }
14791
+
14792
+ if (ret == I40E_SUCCESS)
14793
+ pf->pfr_count++;
14794
+ else
14795
+ dev_info(&pf->pdev->dev, "PF reset failed: %d\n", ret);
14796
+
14797
+ return ret;
14798
+}
14799
+
14800
+/**
14801
+ * i40e_check_fw_empr - check if FW issued unexpected EMP Reset
14802
+ * @pf: board private structure
14803
+ *
14804
+ * Check FW registers to determine if FW issued unexpected EMP Reset.
14805
+ * Every time when unexpected EMP Reset occurs the FW increments
14806
+ * a counter of unexpected EMP Resets. When the counter reaches 10
14807
+ * the FW should enter the Recovery mode
14808
+ *
14809
+ * Returns true if FW issued unexpected EMP Reset
14810
+ **/
14811
+static bool i40e_check_fw_empr(struct i40e_pf *pf)
14812
+{
14813
+ const u32 fw_sts = rd32(&pf->hw, I40E_GL_FWSTS) &
14814
+ I40E_GL_FWSTS_FWS1B_MASK;
14815
+ return (fw_sts > I40E_GL_FWSTS_FWS1B_EMPR_0) &&
14816
+ (fw_sts <= I40E_GL_FWSTS_FWS1B_EMPR_10);
14817
+}
14818
+
14819
+/**
14820
+ * i40e_handle_resets - handle EMP resets and PF resets
14821
+ * @pf: board private structure
14822
+ *
14823
+ * Handle both EMP resets and PF resets and conclude whether there are
14824
+ * any issues regarding these resets. If there are any issues then
14825
+ * generate log entry.
14826
+ *
14827
+ * Return 0 if NIC is healthy or negative value when there are issues
14828
+ * with resets
14829
+ **/
14830
+static i40e_status i40e_handle_resets(struct i40e_pf *pf)
14831
+{
14832
+ const i40e_status pfr = i40e_pf_loop_reset(pf);
14833
+ const bool is_empr = i40e_check_fw_empr(pf);
14834
+
14835
+ if (is_empr || pfr != I40E_SUCCESS)
14836
+ dev_crit(&pf->pdev->dev, "Entering recovery mode due to repeated FW resets. This may take several minutes. Refer to the Intel(R) Ethernet Adapters and Devices User Guide.\n");
14837
+
14838
+ return is_empr ? I40E_ERR_RESET_FAILED : pfr;
14839
+}
14840
+
14841
+/**
14842
+ * i40e_init_recovery_mode - initialize subsystems needed in recovery mode
14843
+ * @pf: board private structure
14844
+ * @hw: ptr to the hardware info
14845
+ *
14846
+ * This function does a minimal setup of all subsystems needed for running
14847
+ * recovery mode.
14848
+ *
14849
+ * Returns 0 on success, negative on failure
14850
+ **/
14851
+static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw)
14852
+{
14853
+ struct i40e_vsi *vsi;
14854
+ int err;
14855
+ int v_idx;
14856
+
14857
+ pci_set_drvdata(pf->pdev, pf);
14858
+ pci_save_state(pf->pdev);
14859
+
14860
+ /* set up periodic task facility */
14861
+ timer_setup(&pf->service_timer, i40e_service_timer, 0);
14862
+ pf->service_timer_period = HZ;
14863
+
14864
+ INIT_WORK(&pf->service_task, i40e_service_task);
14865
+ clear_bit(__I40E_SERVICE_SCHED, pf->state);
14866
+
14867
+ err = i40e_init_interrupt_scheme(pf);
14868
+ if (err)
14869
+ goto err_switch_setup;
14870
+
14871
+ /* The number of VSIs reported by the FW is the minimum guaranteed
14872
+ * to us; HW supports far more and we share the remaining pool with
14873
+ * the other PFs. We allocate space for more than the guarantee with
14874
+ * the understanding that we might not get them all later.
14875
+ */
14876
+ if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
14877
+ pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
14878
+ else
14879
+ pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
14880
+
14881
+ /* Set up the vsi struct and our local tracking of the MAIN PF vsi. */
14882
+ pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
14883
+ GFP_KERNEL);
14884
+ if (!pf->vsi) {
14885
+ err = -ENOMEM;
14886
+ goto err_switch_setup;
14887
+ }
14888
+
14889
+ /* We allocate one VSI which is needed as absolute minimum
14890
+ * in order to register the netdev
14891
+ */
14892
+ v_idx = i40e_vsi_mem_alloc(pf, I40E_VSI_MAIN);
14893
+ if (v_idx < 0) {
14894
+ err = v_idx;
14895
+ goto err_switch_setup;
14896
+ }
14897
+ pf->lan_vsi = v_idx;
14898
+ vsi = pf->vsi[v_idx];
14899
+ if (!vsi) {
14900
+ err = -EFAULT;
14901
+ goto err_switch_setup;
14902
+ }
14903
+ vsi->alloc_queue_pairs = 1;
14904
+ err = i40e_config_netdev(vsi);
14905
+ if (err)
14906
+ goto err_switch_setup;
14907
+ err = register_netdev(vsi->netdev);
14908
+ if (err)
14909
+ goto err_switch_setup;
14910
+ vsi->netdev_registered = true;
14911
+ i40e_dbg_pf_init(pf);
14912
+
14913
+ err = i40e_setup_misc_vector_for_recovery_mode(pf);
14914
+ if (err)
14915
+ goto err_switch_setup;
14916
+
14917
+ /* tell the firmware that we're starting */
14918
+ i40e_send_version(pf);
14919
+
14920
+ /* since everything's happy, start the service_task timer */
14921
+ mod_timer(&pf->service_timer,
14922
+ round_jiffies(jiffies + pf->service_timer_period));
14923
+
14924
+ return 0;
14925
+
14926
+err_switch_setup:
14927
+ i40e_reset_interrupt_capability(pf);
14928
+ del_timer_sync(&pf->service_timer);
14929
+ i40e_shutdown_adminq(hw);
14930
+ iounmap(hw->hw_addr);
14931
+ pci_disable_pcie_error_reporting(pf->pdev);
14932
+ pci_release_mem_regions(pf->pdev);
14933
+ pci_disable_device(pf->pdev);
14934
+ kfree(pf);
14935
+
14936
+ return err;
1367214937 }
1367314938
1367414939 /**
....@@ -13739,7 +15004,17 @@
1373915004
1374015005 pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0),
1374115006 I40E_MAX_CSR_SPACE);
13742
-
15007
+ /* We believe that the highest register to read is
15008
+ * I40E_GLGEN_STAT_CLEAR, so we check if the BAR size
15009
+ * is not less than that before mapping to prevent a
15010
+ * kernel panic.
15011
+ */
15012
+ if (pf->ioremap_len < I40E_GLGEN_STAT_CLEAR) {
15013
+ dev_err(&pdev->dev, "Cannot map registers, bar size 0x%X too small, aborting\n",
15014
+ pf->ioremap_len);
15015
+ err = -ENOMEM;
15016
+ goto err_ioremap;
15017
+ }
1374315018 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len);
1374415019 if (!hw->hw_addr) {
1374515020 err = -EIO;
....@@ -13767,6 +15042,7 @@
1376715042
1376815043 INIT_LIST_HEAD(&pf->l3_flex_pit_list);
1376915044 INIT_LIST_HEAD(&pf->l4_flex_pit_list);
15045
+ INIT_LIST_HEAD(&pf->ddp_old_prof);
1377015046
1377115047 /* set up the locks for the AQ, do this only once in probe
1377215048 * and destroy them only once in remove
....@@ -13794,12 +15070,19 @@
1379415070
1379515071 /* Reset here to make sure all is clean and to define PF 'n' */
1379615072 i40e_clear_hw(hw);
13797
- err = i40e_pf_reset(hw);
15073
+
15074
+ err = i40e_set_mac_type(hw);
1379815075 if (err) {
13799
- dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
15076
+ dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
15077
+ err);
1380015078 goto err_pf_reset;
1380115079 }
13802
- pf->pfr_count++;
15080
+
15081
+ err = i40e_handle_resets(pf);
15082
+ if (err)
15083
+ goto err_pf_reset;
15084
+
15085
+ i40e_check_recovery_mode(pf);
1380315086
1380415087 hw->aq.num_arq_entries = I40E_AQ_LEN;
1380515088 hw->aq.num_asq_entries = I40E_AQ_LEN;
....@@ -13825,7 +15108,11 @@
1382515108 if (err) {
1382615109 if (err == I40E_ERR_FIRMWARE_API_VERSION)
1382715110 dev_info(&pdev->dev,
13828
- "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
15111
+ "The driver for the device stopped because the NVM image v%u.%u is newer than expected v%u.%u. You must install the most recent version of the network driver.\n",
15112
+ hw->aq.api_maj_ver,
15113
+ hw->aq.api_min_ver,
15114
+ I40E_FW_API_VERSION_MAJOR,
15115
+ I40E_FW_MINOR_VERSION(hw));
1382915116 else
1383015117 dev_info(&pdev->dev,
1383115118 "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n");
....@@ -13834,19 +15121,28 @@
1383415121 }
1383515122 i40e_get_oem_version(hw);
1383615123
13837
- /* provide nvm, fw, api versions */
13838
- dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s\n",
15124
+ /* provide nvm, fw, api versions, vendor:device id, subsys vendor:device id */
15125
+ dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s [%04x:%04x] [%04x:%04x]\n",
1383915126 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
1384015127 hw->aq.api_maj_ver, hw->aq.api_min_ver,
13841
- i40e_nvm_version_str(hw));
15128
+ i40e_nvm_version_str(hw), hw->vendor_id, hw->device_id,
15129
+ hw->subsystem_vendor_id, hw->subsystem_device_id);
1384215130
1384315131 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
1384415132 hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw))
13845
- dev_info(&pdev->dev,
13846
- "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
15133
+ dev_dbg(&pdev->dev,
15134
+ "The driver for the device detected a newer version of the NVM image v%u.%u than v%u.%u.\n",
15135
+ hw->aq.api_maj_ver,
15136
+ hw->aq.api_min_ver,
15137
+ I40E_FW_API_VERSION_MAJOR,
15138
+ I40E_FW_MINOR_VERSION(hw));
1384715139 else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4)
1384815140 dev_info(&pdev->dev,
13849
- "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
15141
+ "The driver for the device detected an older version of the NVM image v%u.%u than expected v%u.%u. Please update the NVM image.\n",
15142
+ hw->aq.api_maj_ver,
15143
+ hw->aq.api_min_ver,
15144
+ I40E_FW_API_VERSION_MAJOR,
15145
+ I40E_FW_MINOR_VERSION(hw));
1385015146
1385115147 i40e_verify_eeprom(pf);
1385215148
....@@ -13855,6 +15151,7 @@
1385515151 dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
1385615152
1385715153 i40e_clear_pxe_mode(hw);
15154
+
1385815155 err = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
1385915156 if (err)
1386015157 goto err_adminq_setup;
....@@ -13864,6 +15161,9 @@
1386415161 dev_info(&pdev->dev, "sw_init failed: %d\n", err);
1386515162 goto err_sw_init;
1386615163 }
15164
+
15165
+ if (test_bit(__I40E_RECOVERY_MODE, pf->state))
15166
+ return i40e_init_recovery_mode(pf, hw);
1386715167
1386815168 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
1386915169 hw->func_caps.num_rx_qp, 0, 0);
....@@ -13885,7 +15185,7 @@
1388515185 */
1388615186 if (pf->hw_features & I40E_HW_STOP_FW_LLDP) {
1388715187 dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
13888
- i40e_aq_stop_lldp(hw, true, NULL);
15188
+ i40e_aq_stop_lldp(hw, true, false, NULL);
1388915189 }
1389015190
1389115191 /* allow a platform config to override the HW addr */
....@@ -13904,6 +15204,11 @@
1390415204
1390515205 pci_set_drvdata(pdev, pf);
1390615206 pci_save_state(pdev);
15207
+
15208
+ dev_info(&pdev->dev,
15209
+ (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) ?
15210
+ "FW LLDP is disabled\n" :
15211
+ "FW LLDP is enabled\n");
1390715212
1390815213 /* Enable FW to write default DCB config on link-up */
1390915214 i40e_aq_set_dcb_parameters(hw, true, NULL);
....@@ -13938,6 +15243,14 @@
1393815243 if (err)
1393915244 goto err_switch_setup;
1394015245
15246
+ pf->udp_tunnel_nic.set_port = i40e_udp_tunnel_set_port;
15247
+ pf->udp_tunnel_nic.unset_port = i40e_udp_tunnel_unset_port;
15248
+ pf->udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
15249
+ pf->udp_tunnel_nic.shared = &pf->udp_tunnel_shared;
15250
+ pf->udp_tunnel_nic.tables[0].n_entries = I40E_MAX_PF_UDP_OFFLOAD_PORTS;
15251
+ pf->udp_tunnel_nic.tables[0].tunnel_types = UDP_TUNNEL_TYPE_VXLAN |
15252
+ UDP_TUNNEL_TYPE_GENEVE;
15253
+
1394115254 /* The number of VSIs reported by the FW is the minimum guaranteed
1394215255 * to us; HW supports far more and we share the remaining pool with
1394315256 * the other PFs. We allocate space for more than the guarantee with
....@@ -13947,6 +15260,12 @@
1394715260 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
1394815261 else
1394915262 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
15263
+ if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
15264
+ dev_warn(&pf->pdev->dev,
15265
+ "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
15266
+ pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
15267
+ pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
15268
+ }
1395015269
1395115270 /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
1395215271 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
....@@ -13965,7 +15284,7 @@
1396515284 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
1396615285 }
1396715286 #endif
13968
- err = i40e_setup_pf_switch(pf, false);
15287
+ err = i40e_setup_pf_switch(pf, false, false);
1396915288 if (err) {
1397015289 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
1397115290 goto err_vsis;
....@@ -14106,23 +15425,23 @@
1410615425
1410715426 switch (hw->bus.speed) {
1410815427 case i40e_bus_speed_8000:
14109
- strncpy(speed, "8.0", PCI_SPEED_SIZE); break;
15428
+ strlcpy(speed, "8.0", PCI_SPEED_SIZE); break;
1411015429 case i40e_bus_speed_5000:
14111
- strncpy(speed, "5.0", PCI_SPEED_SIZE); break;
15430
+ strlcpy(speed, "5.0", PCI_SPEED_SIZE); break;
1411215431 case i40e_bus_speed_2500:
14113
- strncpy(speed, "2.5", PCI_SPEED_SIZE); break;
15432
+ strlcpy(speed, "2.5", PCI_SPEED_SIZE); break;
1411415433 default:
1411515434 break;
1411615435 }
1411715436 switch (hw->bus.width) {
1411815437 case i40e_bus_width_pcie_x8:
14119
- strncpy(width, "8", PCI_WIDTH_SIZE); break;
15438
+ strlcpy(width, "8", PCI_WIDTH_SIZE); break;
1412015439 case i40e_bus_width_pcie_x4:
14121
- strncpy(width, "4", PCI_WIDTH_SIZE); break;
15440
+ strlcpy(width, "4", PCI_WIDTH_SIZE); break;
1412215441 case i40e_bus_width_pcie_x2:
14123
- strncpy(width, "2", PCI_WIDTH_SIZE); break;
15442
+ strlcpy(width, "2", PCI_WIDTH_SIZE); break;
1412415443 case i40e_bus_width_pcie_x1:
14125
- strncpy(width, "1", PCI_WIDTH_SIZE); break;
15444
+ strlcpy(width, "1", PCI_WIDTH_SIZE); break;
1412615445 default:
1412715446 break;
1412815447 }
....@@ -14145,12 +15464,23 @@
1414515464 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
1414615465 pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
1414715466
15467
+ /* set the FEC config due to the board capabilities */
15468
+ i40e_set_fec_in_flags(abilities.fec_cfg_curr_mod_ext_info, &pf->flags);
15469
+
1414815470 /* get the supported phy types from the fw */
1414915471 err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
1415015472 if (err)
1415115473 dev_dbg(&pf->pdev->dev, "get supported phy types ret = %s last_status = %s\n",
1415215474 i40e_stat_str(&pf->hw, err),
1415315475 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
15476
+
15477
+ /* make sure the MFS hasn't been set lower than the default */
15478
+#define MAX_FRAME_SIZE_DEFAULT 0x2600
15479
+ val = (rd32(&pf->hw, I40E_PRTGL_SAH) &
15480
+ I40E_PRTGL_SAH_MFS_MASK) >> I40E_PRTGL_SAH_MFS_SHIFT;
15481
+ if (val < MAX_FRAME_SIZE_DEFAULT)
15482
+ dev_warn(&pdev->dev, "MFS for port %x has been set below the default: %x\n",
15483
+ i, val);
1415415484
1415515485 /* Add a filter to drop all Flow control frames from any VSI from being
1415615486 * transmitted. By doing so we stop a malicious VF from sending out
....@@ -14239,6 +15569,19 @@
1423915569 if (pf->service_task.func)
1424015570 cancel_work_sync(&pf->service_task);
1424115571
15572
+ if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
15573
+ struct i40e_vsi *vsi = pf->vsi[0];
15574
+
15575
+ /* We know that we have allocated only one vsi for this PF,
15576
+ * it was just for registering netdevice, so the interface
15577
+ * could be visible in the 'ifconfig' output
15578
+ */
15579
+ unregister_netdev(vsi->netdev);
15580
+ free_netdev(vsi->netdev);
15581
+
15582
+ goto unmap;
15583
+ }
15584
+
1424215585 /* Client close must be called explicitly here because the timer
1424315586 * has been stopped.
1424415587 */
....@@ -14283,6 +15626,12 @@
1428315626 ret_code);
1428415627 }
1428515628
15629
+unmap:
15630
+ /* Free MSI/legacy interrupt 0 when in recovery mode. */
15631
+ if (test_bit(__I40E_RECOVERY_MODE, pf->state) &&
15632
+ !(pf->flags & I40E_FLAG_MSIX_ENABLED))
15633
+ free_irq(pf->pdev->irq, pf);
15634
+
1428615635 /* shutdown the adminq */
1428715636 i40e_shutdown_adminq(hw);
1428815637
....@@ -14295,7 +15644,8 @@
1429515644 i40e_clear_interrupt_scheme(pf);
1429615645 for (i = 0; i < pf->num_alloc_vsi; i++) {
1429715646 if (pf->vsi[i]) {
14298
- i40e_vsi_clear_rings(pf->vsi[i]);
15647
+ if (!test_bit(__I40E_RECOVERY_MODE, pf->state))
15648
+ i40e_vsi_clear_rings(pf->vsi[i]);
1429915649 i40e_vsi_clear(pf->vsi[i]);
1430015650 pf->vsi[i] = NULL;
1430115651 }
....@@ -14328,7 +15678,7 @@
1432815678 * remediation.
1432915679 **/
1433015680 static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
14331
- enum pci_channel_state error)
15681
+ pci_channel_state_t error)
1433215682 {
1433315683 struct i40e_pf *pf = pci_get_drvdata(pdev);
1433415684
....@@ -14361,7 +15711,6 @@
1436115711 {
1436215712 struct i40e_pf *pf = pci_get_drvdata(pdev);
1436315713 pci_ers_result_t result;
14364
- int err;
1436515714 u32 reg;
1436615715
1436715716 dev_dbg(&pdev->dev, "%s\n", __func__);
....@@ -14380,14 +15729,6 @@
1438015729 result = PCI_ERS_RESULT_RECOVERED;
1438115730 else
1438215731 result = PCI_ERS_RESULT_DISCONNECT;
14383
- }
14384
-
14385
- err = pci_cleanup_aer_uncorrect_error_status(pdev);
14386
- if (err) {
14387
- dev_info(&pdev->dev,
14388
- "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
14389
- err);
14390
- /* non-fatal, continue */
1439115732 }
1439215733
1439315734 return result;
....@@ -14512,6 +15853,11 @@
1451215853 wr32(hw, I40E_PFPM_WUFC,
1451315854 (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
1451415855
15856
+ /* Free MSI/legacy interrupt 0 when in recovery mode. */
15857
+ if (test_bit(__I40E_RECOVERY_MODE, pf->state) &&
15858
+ !(pf->flags & I40E_FLAG_MSIX_ENABLED))
15859
+ free_irq(pf->pdev->irq, pf);
15860
+
1451515861 /* Since we're going to destroy queues during the
1451615862 * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
1451715863 * whole section
....@@ -14532,8 +15878,7 @@
1453215878 **/
1453315879 static int __maybe_unused i40e_suspend(struct device *dev)
1453415880 {
14535
- struct pci_dev *pdev = to_pci_dev(dev);
14536
- struct i40e_pf *pf = pci_get_drvdata(pdev);
15881
+ struct i40e_pf *pf = dev_get_drvdata(dev);
1453715882 struct i40e_hw *hw = &pf->hw;
1453815883
1453915884 /* If we're already suspended, then there is nothing to do */
....@@ -14583,8 +15928,7 @@
1458315928 **/
1458415929 static int __maybe_unused i40e_resume(struct device *dev)
1458515930 {
14586
- struct pci_dev *pdev = to_pci_dev(dev);
14587
- struct i40e_pf *pf = pci_get_drvdata(pdev);
15931
+ struct i40e_pf *pf = dev_get_drvdata(dev);
1458815932 int err;
1458915933
1459015934 /* If we're not suspended, then there is nothing to do */
....@@ -14601,7 +15945,7 @@
1460115945 */
1460215946 err = i40e_restore_interrupt_scheme(pf);
1460315947 if (err) {
14604
- dev_err(&pdev->dev, "Cannot restore interrupt scheme: %d\n",
15948
+ dev_err(dev, "Cannot restore interrupt scheme: %d\n",
1460515949 err);
1460615950 }
1460715951
....@@ -14651,8 +15995,9 @@
1465115995 **/
1465215996 static int __init i40e_init_module(void)
1465315997 {
14654
- pr_info("%s: %s - version %s\n", i40e_driver_name,
14655
- i40e_driver_string, i40e_driver_version_str);
15998
+ int err;
15999
+
16000
+ pr_info("%s: %s\n", i40e_driver_name, i40e_driver_string);
1465616001 pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
1465716002
1465816003 /* There is no need to throttle the number of active tasks because
....@@ -14669,7 +16014,14 @@
1466916014 }
1467016015
1467116016 i40e_dbg_init();
14672
- return pci_register_driver(&i40e_driver);
16017
+ err = pci_register_driver(&i40e_driver);
16018
+ if (err) {
16019
+ destroy_workqueue(i40e_wq);
16020
+ i40e_dbg_exit();
16021
+ return err;
16022
+ }
16023
+
16024
+ return 0;
1467316025 }
1467416026 module_init(i40e_init_module);
1467516027