forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-10 9999e48639b3cecb08ffb37358bcba3b48161b29
kernel/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
....@@ -62,10 +62,13 @@
6262 #include <net/netevent.h>
6363 #include <net/addrconf.h>
6464 #include <net/bonding.h>
65
-#include <net/addrconf.h>
6665 #include <linux/uaccess.h>
6766 #include <linux/crash_dump.h>
6867 #include <net/udp_tunnel.h>
68
+#include <net/xfrm.h>
69
+#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
70
+#include <net/tls.h>
71
+#endif
6972
7073 #include "cxgb4.h"
7174 #include "cxgb4_filter.h"
....@@ -83,16 +86,13 @@
8386 #include "sched.h"
8487 #include "cxgb4_tc_u32.h"
8588 #include "cxgb4_tc_flower.h"
89
+#include "cxgb4_tc_mqprio.h"
90
+#include "cxgb4_tc_matchall.h"
8691 #include "cxgb4_ptp.h"
8792 #include "cxgb4_cudbg.h"
8893
8994 char cxgb4_driver_name[] = KBUILD_MODNAME;
9095
91
-#ifdef DRV_VERSION
92
-#undef DRV_VERSION
93
-#endif
94
-#define DRV_VERSION "2.0.0-ko"
95
-const char cxgb4_driver_version[] = DRV_VERSION;
9696 #define DRV_DESC "Chelsio T4/T5/T6 Network Driver"
9797
9898 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
....@@ -135,7 +135,6 @@
135135 MODULE_DESCRIPTION(DRV_DESC);
136136 MODULE_AUTHOR("Chelsio Communications");
137137 MODULE_LICENSE("Dual BSD/GPL");
138
-MODULE_VERSION(DRV_VERSION);
139138 MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
140139 MODULE_FIRMWARE(FW4_FNAME);
141140 MODULE_FIRMWARE(FW5_FNAME);
....@@ -184,6 +183,9 @@
184183
185184 LIST_HEAD(adapter_list);
186185 DEFINE_MUTEX(uld_mutex);
186
+LIST_HEAD(uld_list);
187
+
188
+static int cfg_queues(struct adapter *adap);
187189
188190 static void link_report(struct net_device *dev)
189191 {
....@@ -367,13 +369,19 @@
367369 int ret;
368370 u64 mhash = 0;
369371 u64 uhash = 0;
372
+ /* idx stores the index of allocated filters,
373
+ * its size should be modified based on the number of
374
+ * MAC addresses that we allocate filters for
375
+ */
376
+
377
+ u16 idx[1] = {};
370378 bool free = false;
371379 bool ucast = is_unicast_ether_addr(mac_addr);
372380 const u8 *maclist[1] = {mac_addr};
373381 struct hash_mac_addr *new_entry;
374382
375
- ret = t4_alloc_mac_filt(adap, adap->mbox, pi->viid, free, 1, maclist,
376
- NULL, ucast ? &uhash : &mhash, false);
383
+ ret = cxgb4_alloc_mac_filt(adap, pi->viid, free, 1, maclist,
384
+ idx, ucast ? &uhash : &mhash, false);
377385 if (ret < 0)
378386 goto out;
379387 /* if hash != 0, then add the addr to hash addr list
....@@ -411,7 +419,7 @@
411419 }
412420 }
413421
414
- ret = t4_free_mac_filt(adap, adap->mbox, pi->viid, 1, maclist, false);
422
+ ret = cxgb4_free_mac_filt(adap, pi->viid, 1, maclist, false);
415423 return ret < 0 ? -EINVAL : 0;
416424 }
417425
....@@ -427,13 +435,67 @@
427435 __dev_uc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
428436 __dev_mc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
429437
430
- return t4_set_rxmode(adapter, adapter->mbox, pi->viid, mtu,
431
- (dev->flags & IFF_PROMISC) ? 1 : 0,
438
+ return t4_set_rxmode(adapter, adapter->mbox, pi->viid, pi->viid_mirror,
439
+ mtu, (dev->flags & IFF_PROMISC) ? 1 : 0,
432440 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
433441 sleep_ok);
434442 }
435443
436444 /**
445
+ * cxgb4_change_mac - Update match filter for a MAC address.
446
+ * @pi: the port_info
447
+ * @viid: the VI id
448
+ * @tcam_idx: TCAM index of existing filter for old value of MAC address,
449
+ * or -1
450
+ * @addr: the new MAC address value
451
+ * @persist: whether a new MAC allocation should be persistent
452
+ * @smt_idx: the destination to store the new SMT index.
453
+ *
454
+ * Modifies an MPS filter and sets it to the new MAC address if
455
+ * @tcam_idx >= 0, or adds the MAC address to a new filter if
456
+ * @tcam_idx < 0. In the latter case the address is added persistently
457
+ * if @persist is %true.
458
+ * Addresses are programmed to hash region, if tcam runs out of entries.
459
+ *
460
+ */
461
+int cxgb4_change_mac(struct port_info *pi, unsigned int viid,
462
+ int *tcam_idx, const u8 *addr, bool persist,
463
+ u8 *smt_idx)
464
+{
465
+ struct adapter *adapter = pi->adapter;
466
+ struct hash_mac_addr *entry, *new_entry;
467
+ int ret;
468
+
469
+ ret = t4_change_mac(adapter, adapter->mbox, viid,
470
+ *tcam_idx, addr, persist, smt_idx);
471
+ /* We ran out of TCAM entries. try programming hash region. */
472
+ if (ret == -ENOMEM) {
473
+ /* If the MAC address to be updated is in the hash addr
474
+ * list, update it from the list
475
+ */
476
+ list_for_each_entry(entry, &adapter->mac_hlist, list) {
477
+ if (entry->iface_mac) {
478
+ ether_addr_copy(entry->addr, addr);
479
+ goto set_hash;
480
+ }
481
+ }
482
+ new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
483
+ if (!new_entry)
484
+ return -ENOMEM;
485
+ ether_addr_copy(new_entry->addr, addr);
486
+ new_entry->iface_mac = true;
487
+ list_add_tail(&new_entry->list, &adapter->mac_hlist);
488
+set_hash:
489
+ ret = cxgb4_set_addr_hash(pi);
490
+ } else if (ret >= 0) {
491
+ *tcam_idx = ret;
492
+ ret = 0;
493
+ }
494
+
495
+ return ret;
496
+}
497
+
498
+/*
437499 * link_start - enable a port
438500 * @dev: the port to enable
439501 *
....@@ -441,25 +503,20 @@
441503 */
442504 static int link_start(struct net_device *dev)
443505 {
444
- int ret;
445506 struct port_info *pi = netdev_priv(dev);
446
- unsigned int mb = pi->adapter->pf;
507
+ unsigned int mb = pi->adapter->mbox;
508
+ int ret;
447509
448510 /*
449511 * We do not set address filters and promiscuity here, the stack does
450512 * that step explicitly.
451513 */
452
- ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
514
+ ret = t4_set_rxmode(pi->adapter, mb, pi->viid, pi->viid_mirror,
515
+ dev->mtu, -1, -1, -1,
453516 !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
454
- if (ret == 0) {
455
- ret = t4_change_mac(pi->adapter, mb, pi->viid,
456
- pi->xact_addr_filt, dev->dev_addr, true,
457
- true);
458
- if (ret >= 0) {
459
- pi->xact_addr_filt = ret;
460
- ret = 0;
461
- }
462
- }
517
+ if (ret == 0)
518
+ ret = cxgb4_update_mac_filt(pi, pi->viid, &pi->xact_addr_filt,
519
+ dev->dev_addr, true, &pi->smt_idx);
463520 if (ret == 0)
464521 ret = t4_link_l1cfg(pi->adapter, mb, pi->tx_chan,
465522 &pi->link_cfg);
....@@ -528,7 +585,7 @@
528585 struct sge_eth_txq *eq;
529586
530587 eq = container_of(txq, struct sge_eth_txq, q);
531
- netif_tx_wake_queue(eq->txq);
588
+ t4_sge_eth_txq_egress_update(q->adap, eq, -1);
532589 } else {
533590 struct sge_uld_txq *oq;
534591
....@@ -604,12 +661,12 @@
604661
605662 static void disable_msi(struct adapter *adapter)
606663 {
607
- if (adapter->flags & USING_MSIX) {
664
+ if (adapter->flags & CXGB4_USING_MSIX) {
608665 pci_disable_msix(adapter->pdev);
609
- adapter->flags &= ~USING_MSIX;
610
- } else if (adapter->flags & USING_MSI) {
666
+ adapter->flags &= ~CXGB4_USING_MSIX;
667
+ } else if (adapter->flags & CXGB4_USING_MSI) {
611668 pci_disable_msi(adapter->pdev);
612
- adapter->flags &= ~USING_MSI;
669
+ adapter->flags &= ~CXGB4_USING_MSI;
613670 }
614671 }
615672
....@@ -625,74 +682,170 @@
625682 adap->swintr = 1;
626683 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A), v);
627684 }
628
- if (adap->flags & MASTER_PF)
685
+ if (adap->flags & CXGB4_MASTER_PF)
629686 t4_slow_intr_handler(adap);
630687 return IRQ_HANDLED;
631688 }
632689
633
-/*
634
- * Name the MSI-X interrupts.
635
- */
636
-static void name_msix_vecs(struct adapter *adap)
690
+int cxgb4_set_msix_aff(struct adapter *adap, unsigned short vec,
691
+ cpumask_var_t *aff_mask, int idx)
637692 {
638
- int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
693
+ int rv;
639694
640
- /* non-data interrupts */
641
- snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
642
-
643
- /* FW events */
644
- snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
645
- adap->port[0]->name);
646
-
647
- /* Ethernet queues */
648
- for_each_port(adap, j) {
649
- struct net_device *d = adap->port[j];
650
- const struct port_info *pi = netdev_priv(d);
651
-
652
- for (i = 0; i < pi->nqsets; i++, msi_idx++)
653
- snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
654
- d->name, i);
695
+ if (!zalloc_cpumask_var(aff_mask, GFP_KERNEL)) {
696
+ dev_err(adap->pdev_dev, "alloc_cpumask_var failed\n");
697
+ return -ENOMEM;
655698 }
699
+
700
+ cpumask_set_cpu(cpumask_local_spread(idx, dev_to_node(adap->pdev_dev)),
701
+ *aff_mask);
702
+
703
+ rv = irq_set_affinity_hint(vec, *aff_mask);
704
+ if (rv)
705
+ dev_warn(adap->pdev_dev,
706
+ "irq_set_affinity_hint %u failed %d\n",
707
+ vec, rv);
708
+
709
+ return 0;
710
+}
711
+
712
+void cxgb4_clear_msix_aff(unsigned short vec, cpumask_var_t aff_mask)
713
+{
714
+ irq_set_affinity_hint(vec, NULL);
715
+ free_cpumask_var(aff_mask);
656716 }
657717
658718 static int request_msix_queue_irqs(struct adapter *adap)
659719 {
660720 struct sge *s = &adap->sge;
721
+ struct msix_info *minfo;
661722 int err, ethqidx;
662
- int msi_index = 2;
663723
664
- err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
665
- adap->msix_info[1].desc, &s->fw_evtq);
724
+ if (s->fwevtq_msix_idx < 0)
725
+ return -ENOMEM;
726
+
727
+ err = request_irq(adap->msix_info[s->fwevtq_msix_idx].vec,
728
+ t4_sge_intr_msix, 0,
729
+ adap->msix_info[s->fwevtq_msix_idx].desc,
730
+ &s->fw_evtq);
666731 if (err)
667732 return err;
668733
669734 for_each_ethrxq(s, ethqidx) {
670
- err = request_irq(adap->msix_info[msi_index].vec,
735
+ minfo = s->ethrxq[ethqidx].msix;
736
+ err = request_irq(minfo->vec,
671737 t4_sge_intr_msix, 0,
672
- adap->msix_info[msi_index].desc,
738
+ minfo->desc,
673739 &s->ethrxq[ethqidx].rspq);
674740 if (err)
675741 goto unwind;
676
- msi_index++;
742
+
743
+ cxgb4_set_msix_aff(adap, minfo->vec,
744
+ &minfo->aff_mask, ethqidx);
677745 }
678746 return 0;
679747
680748 unwind:
681
- while (--ethqidx >= 0)
682
- free_irq(adap->msix_info[--msi_index].vec,
683
- &s->ethrxq[ethqidx].rspq);
684
- free_irq(adap->msix_info[1].vec, &s->fw_evtq);
749
+ while (--ethqidx >= 0) {
750
+ minfo = s->ethrxq[ethqidx].msix;
751
+ cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask);
752
+ free_irq(minfo->vec, &s->ethrxq[ethqidx].rspq);
753
+ }
754
+ free_irq(adap->msix_info[s->fwevtq_msix_idx].vec, &s->fw_evtq);
685755 return err;
686756 }
687757
688758 static void free_msix_queue_irqs(struct adapter *adap)
689759 {
690
- int i, msi_index = 2;
691760 struct sge *s = &adap->sge;
761
+ struct msix_info *minfo;
762
+ int i;
692763
693
- free_irq(adap->msix_info[1].vec, &s->fw_evtq);
694
- for_each_ethrxq(s, i)
695
- free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
764
+ free_irq(adap->msix_info[s->fwevtq_msix_idx].vec, &s->fw_evtq);
765
+ for_each_ethrxq(s, i) {
766
+ minfo = s->ethrxq[i].msix;
767
+ cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask);
768
+ free_irq(minfo->vec, &s->ethrxq[i].rspq);
769
+ }
770
+}
771
+
772
+static int setup_ppod_edram(struct adapter *adap)
773
+{
774
+ unsigned int param, val;
775
+ int ret;
776
+
777
+ /* Driver sends FW_PARAMS_PARAM_DEV_PPOD_EDRAM read command to check
778
+ * if firmware supports ppod edram feature or not. If firmware
779
+ * returns 1, then driver can enable this feature by sending
780
+ * FW_PARAMS_PARAM_DEV_PPOD_EDRAM write command with value 1 to
781
+ * enable ppod edram feature.
782
+ */
783
+ param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
784
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PPOD_EDRAM));
785
+
786
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &param, &val);
787
+ if (ret < 0) {
788
+ dev_warn(adap->pdev_dev,
789
+ "querying PPOD_EDRAM support failed: %d\n",
790
+ ret);
791
+ return -1;
792
+ }
793
+
794
+ if (val != 1)
795
+ return -1;
796
+
797
+ ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, &param, &val);
798
+ if (ret < 0) {
799
+ dev_err(adap->pdev_dev,
800
+ "setting PPOD_EDRAM failed: %d\n", ret);
801
+ return -1;
802
+ }
803
+ return 0;
804
+}
805
+
806
+static void adap_config_hpfilter(struct adapter *adapter)
807
+{
808
+ u32 param, val = 0;
809
+ int ret;
810
+
811
+ /* Enable HP filter region. Older fw will fail this request and
812
+ * it is fine.
813
+ */
814
+ param = FW_PARAM_DEV(HPFILTER_REGION_SUPPORT);
815
+ ret = t4_set_params(adapter, adapter->mbox, adapter->pf, 0,
816
+ 1, &param, &val);
817
+
818
+ /* An error means FW doesn't know about HP filter support,
819
+ * it's not a problem, don't return an error.
820
+ */
821
+ if (ret < 0)
822
+ dev_err(adapter->pdev_dev,
823
+ "HP filter region isn't supported by FW\n");
824
+}
825
+
826
+static int cxgb4_config_rss(const struct port_info *pi, u16 *rss,
827
+ u16 rss_size, u16 viid)
828
+{
829
+ struct adapter *adap = pi->adapter;
830
+ int ret;
831
+
832
+ ret = t4_config_rss_range(adap, adap->mbox, viid, 0, rss_size, rss,
833
+ rss_size);
834
+ if (ret)
835
+ return ret;
836
+
837
+ /* If Tunnel All Lookup isn't specified in the global RSS
838
+ * Configuration, then we need to specify a default Ingress
839
+ * Queue for any ingress packets which aren't hashed. We'll
840
+ * use our first ingress queue ...
841
+ */
842
+ return t4_config_vi_rss(adap, adap->mbox, viid,
843
+ FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F |
844
+ FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F |
845
+ FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F |
846
+ FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F |
847
+ FW_RSS_VI_CONFIG_CMD_UDPEN_F,
848
+ rss[0]);
696849 }
697850
698851 /**
....@@ -706,10 +859,10 @@
706859 */
707860 int cxgb4_write_rss(const struct port_info *pi, const u16 *queues)
708861 {
709
- u16 *rss;
710
- int i, err;
711862 struct adapter *adapter = pi->adapter;
712863 const struct sge_eth_rxq *rxq;
864
+ int i, err;
865
+ u16 *rss;
713866
714867 rxq = &adapter->sge.ethrxq[pi->first_qset];
715868 rss = kmalloc_array(pi->rss_size, sizeof(u16), GFP_KERNEL);
....@@ -720,21 +873,7 @@
720873 for (i = 0; i < pi->rss_size; i++, queues++)
721874 rss[i] = rxq[*queues].rspq.abs_id;
722875
723
- err = t4_config_rss_range(adapter, adapter->pf, pi->viid, 0,
724
- pi->rss_size, rss, pi->rss_size);
725
- /* If Tunnel All Lookup isn't specified in the global RSS
726
- * Configuration, then we need to specify a default Ingress
727
- * Queue for any ingress packets which aren't hashed. We'll
728
- * use our first ingress queue ...
729
- */
730
- if (!err)
731
- err = t4_config_vi_rss(adapter, adapter->mbox, pi->viid,
732
- FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F |
733
- FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F |
734
- FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F |
735
- FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F |
736
- FW_RSS_VI_CONFIG_CMD_UDPEN_F,
737
- rss[0]);
876
+ err = cxgb4_config_rss(pi, rss, pi->rss_size, pi->viid);
738877 kfree(rss);
739878 return err;
740879 }
....@@ -772,6 +911,12 @@
772911 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
773912 }
774913
914
+void cxgb4_quiesce_rx(struct sge_rspq *q)
915
+{
916
+ if (q->handler)
917
+ napi_disable(&q->napi);
918
+}
919
+
775920 /*
776921 * Wait until all NAPI handlers are descheduled.
777922 */
....@@ -782,24 +927,40 @@
782927 for (i = 0; i < adap->sge.ingr_sz; i++) {
783928 struct sge_rspq *q = adap->sge.ingr_map[i];
784929
785
- if (q && q->handler)
786
- napi_disable(&q->napi);
930
+ if (!q)
931
+ continue;
932
+
933
+ cxgb4_quiesce_rx(q);
787934 }
788935 }
789936
790937 /* Disable interrupt and napi handler */
791938 static void disable_interrupts(struct adapter *adap)
792939 {
793
- if (adap->flags & FULL_INIT_DONE) {
940
+ struct sge *s = &adap->sge;
941
+
942
+ if (adap->flags & CXGB4_FULL_INIT_DONE) {
794943 t4_intr_disable(adap);
795
- if (adap->flags & USING_MSIX) {
944
+ if (adap->flags & CXGB4_USING_MSIX) {
796945 free_msix_queue_irqs(adap);
797
- free_irq(adap->msix_info[0].vec, adap);
946
+ free_irq(adap->msix_info[s->nd_msix_idx].vec,
947
+ adap);
798948 } else {
799949 free_irq(adap->pdev->irq, adap);
800950 }
801951 quiesce_rx(adap);
802952 }
953
+}
954
+
955
+void cxgb4_enable_rx(struct adapter *adap, struct sge_rspq *q)
956
+{
957
+ if (q->handler)
958
+ napi_enable(&q->napi);
959
+
960
+ /* 0-increment GTS to start the timer and enable interrupts */
961
+ t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
962
+ SEINTARM_V(q->intr_params) |
963
+ INGRESSQID_V(q->cntxt_id));
803964 }
804965
805966 /*
....@@ -814,37 +975,63 @@
814975
815976 if (!q)
816977 continue;
817
- if (q->handler)
818
- napi_enable(&q->napi);
819978
820
- /* 0-increment GTS to start the timer and enable interrupts */
821
- t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
822
- SEINTARM_V(q->intr_params) |
823
- INGRESSQID_V(q->cntxt_id));
979
+ cxgb4_enable_rx(adap, q);
824980 }
825981 }
826982
983
+static int setup_non_data_intr(struct adapter *adap)
984
+{
985
+ int msix;
986
+
987
+ adap->sge.nd_msix_idx = -1;
988
+ if (!(adap->flags & CXGB4_USING_MSIX))
989
+ return 0;
990
+
991
+ /* Request MSI-X vector for non-data interrupt */
992
+ msix = cxgb4_get_msix_idx_from_bmap(adap);
993
+ if (msix < 0)
994
+ return -ENOMEM;
995
+
996
+ snprintf(adap->msix_info[msix].desc,
997
+ sizeof(adap->msix_info[msix].desc),
998
+ "%s", adap->port[0]->name);
999
+
1000
+ adap->sge.nd_msix_idx = msix;
1001
+ return 0;
1002
+}
8271003
8281004 static int setup_fw_sge_queues(struct adapter *adap)
8291005 {
8301006 struct sge *s = &adap->sge;
831
- int err = 0;
1007
+ int msix, err = 0;
8321008
8331009 bitmap_zero(s->starving_fl, s->egr_sz);
8341010 bitmap_zero(s->txq_maperr, s->egr_sz);
8351011
836
- if (adap->flags & USING_MSIX)
837
- adap->msi_idx = 1; /* vector 0 is for non-queue interrupts */
838
- else {
1012
+ if (adap->flags & CXGB4_USING_MSIX) {
1013
+ s->fwevtq_msix_idx = -1;
1014
+ msix = cxgb4_get_msix_idx_from_bmap(adap);
1015
+ if (msix < 0)
1016
+ return -ENOMEM;
1017
+
1018
+ snprintf(adap->msix_info[msix].desc,
1019
+ sizeof(adap->msix_info[msix].desc),
1020
+ "%s-FWeventq", adap->port[0]->name);
1021
+ } else {
8391022 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
8401023 NULL, NULL, NULL, -1);
8411024 if (err)
8421025 return err;
843
- adap->msi_idx = -((int)s->intrq.abs_id + 1);
1026
+ msix = -((int)s->intrq.abs_id + 1);
8441027 }
8451028
8461029 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
847
- adap->msi_idx, NULL, fwevtq_handler, NULL, -1);
1030
+ msix, NULL, fwevtq_handler, NULL, -1);
1031
+ if (err && msix >= 0)
1032
+ cxgb4_free_msix_idx_in_bmap(adap, msix);
1033
+
1034
+ s->fwevtq_msix_idx = msix;
8481035 return err;
8491036 }
8501037
....@@ -858,13 +1045,16 @@
8581045 */
8591046 static int setup_sge_queues(struct adapter *adap)
8601047 {
861
- int err, i, j;
862
- struct sge *s = &adap->sge;
8631048 struct sge_uld_rxq_info *rxq_info = NULL;
1049
+ struct sge *s = &adap->sge;
8641050 unsigned int cmplqid = 0;
1051
+ int err, i, j, msix = 0;
8651052
8661053 if (is_uld(adap))
8671054 rxq_info = s->uld_rxq_info[CXGB4_ULD_RDMA];
1055
+
1056
+ if (!(adap->flags & CXGB4_USING_MSIX))
1057
+ msix = -((int)s->intrq.abs_id + 1);
8681058
8691059 for_each_port(adap, i) {
8701060 struct net_device *dev = adap->port[i];
....@@ -873,10 +1063,21 @@
8731063 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
8741064
8751065 for (j = 0; j < pi->nqsets; j++, q++) {
876
- if (adap->msi_idx > 0)
877
- adap->msi_idx++;
1066
+ if (msix >= 0) {
1067
+ msix = cxgb4_get_msix_idx_from_bmap(adap);
1068
+ if (msix < 0) {
1069
+ err = msix;
1070
+ goto freeout;
1071
+ }
1072
+
1073
+ snprintf(adap->msix_info[msix].desc,
1074
+ sizeof(adap->msix_info[msix].desc),
1075
+ "%s-Rx%d", dev->name, j);
1076
+ q->msix = &adap->msix_info[msix];
1077
+ }
1078
+
8781079 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
879
- adap->msi_idx, &q->fl,
1080
+ msix, &q->fl,
8801081 t4_ethrx_handler,
8811082 NULL,
8821083 t4_get_tp_ch_map(adap,
....@@ -886,10 +1087,13 @@
8861087 q->rspq.idx = j;
8871088 memset(&q->stats, 0, sizeof(q->stats));
8881089 }
889
- for (j = 0; j < pi->nqsets; j++, t++) {
1090
+
1091
+ q = &s->ethrxq[pi->first_qset];
1092
+ for (j = 0; j < pi->nqsets; j++, t++, q++) {
8901093 err = t4_sge_alloc_eth_txq(adap, t, dev,
8911094 netdev_get_tx_queue(dev, j),
892
- s->fw_evtq.cntxt_id);
1095
+ q->rspq.cntxt_id,
1096
+ !!(adap->flags & CXGB4_SGE_DBQ_TIMER));
8931097 if (err)
8941098 goto freeout;
8951099 }
....@@ -911,7 +1115,7 @@
9111115 if (!is_t4(adap->params.chip)) {
9121116 err = t4_sge_alloc_eth_txq(adap, &s->ptptxq, adap->port[0],
9131117 netdev_get_tx_queue(adap->port[0], 0)
914
- , s->fw_evtq.cntxt_id);
1118
+ , s->fw_evtq.cntxt_id, false);
9151119 if (err)
9161120 goto freeout;
9171121 }
....@@ -929,8 +1133,7 @@
9291133 }
9301134
9311135 static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
932
- struct net_device *sb_dev,
933
- select_queue_fallback_t fallback)
1136
+ struct net_device *sb_dev)
9341137 {
9351138 int txq;
9361139
....@@ -961,6 +1164,25 @@
9611164 }
9621165 #endif /* CONFIG_CHELSIO_T4_DCB */
9631166
1167
+ if (dev->num_tc) {
1168
+ struct port_info *pi = netdev2pinfo(dev);
1169
+ u8 ver, proto;
1170
+
1171
+ ver = ip_hdr(skb)->version;
1172
+ proto = (ver == 6) ? ipv6_hdr(skb)->nexthdr :
1173
+ ip_hdr(skb)->protocol;
1174
+
1175
+ /* Send unsupported traffic pattern to normal NIC queues. */
1176
+ txq = netdev_pick_tx(dev, skb, sb_dev);
1177
+ if (xfrm_offload(skb) || is_ptp_enabled(skb, dev) ||
1178
+ skb->encapsulation ||
1179
+ cxgb4_is_ktls_skb(skb) ||
1180
+ (proto != IPPROTO_TCP && proto != IPPROTO_UDP))
1181
+ txq = txq % pi->nqsets;
1182
+
1183
+ return txq;
1184
+ }
1185
+
9641186 if (select_queue) {
9651187 txq = (skb_rx_queue_recorded(skb)
9661188 ? skb_get_rx_queue(skb)
....@@ -972,7 +1194,7 @@
9721194 return txq;
9731195 }
9741196
975
- return fallback(dev, skb, NULL) % dev->real_num_tx_queues;
1197
+ return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
9761198 }
9771199
9781200 static int closest_timer(const struct sge *s, int time)
....@@ -1050,15 +1272,15 @@
10501272
10511273 static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
10521274 {
1053
- const struct port_info *pi = netdev_priv(dev);
10541275 netdev_features_t changed = dev->features ^ features;
1276
+ const struct port_info *pi = netdev_priv(dev);
10551277 int err;
10561278
10571279 if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
10581280 return 0;
10591281
1060
- err = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, -1,
1061
- -1, -1, -1,
1282
+ err = t4_set_rxmode(pi->adapter, pi->adapter->mbox, pi->viid,
1283
+ pi->viid_mirror, -1, -1, -1, -1,
10621284 !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
10631285 if (unlikely(err))
10641286 dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
....@@ -1074,6 +1296,292 @@
10741296 t4_setup_debugfs(adap);
10751297 #endif
10761298 return 0;
1299
+}
1300
+
1301
+static void cxgb4_port_mirror_free_rxq(struct adapter *adap,
1302
+ struct sge_eth_rxq *mirror_rxq)
1303
+{
1304
+ if ((adap->flags & CXGB4_FULL_INIT_DONE) &&
1305
+ !(adap->flags & CXGB4_SHUTTING_DOWN))
1306
+ cxgb4_quiesce_rx(&mirror_rxq->rspq);
1307
+
1308
+ if (adap->flags & CXGB4_USING_MSIX) {
1309
+ cxgb4_clear_msix_aff(mirror_rxq->msix->vec,
1310
+ mirror_rxq->msix->aff_mask);
1311
+ free_irq(mirror_rxq->msix->vec, &mirror_rxq->rspq);
1312
+ cxgb4_free_msix_idx_in_bmap(adap, mirror_rxq->msix->idx);
1313
+ }
1314
+
1315
+ free_rspq_fl(adap, &mirror_rxq->rspq, &mirror_rxq->fl);
1316
+}
1317
+
1318
+static int cxgb4_port_mirror_alloc_queues(struct net_device *dev)
1319
+{
1320
+ struct port_info *pi = netdev2pinfo(dev);
1321
+ struct adapter *adap = netdev2adap(dev);
1322
+ struct sge_eth_rxq *mirror_rxq;
1323
+ struct sge *s = &adap->sge;
1324
+ int ret = 0, msix = 0;
1325
+ u16 i, rxqid;
1326
+ u16 *rss;
1327
+
1328
+ if (!pi->vi_mirror_count)
1329
+ return 0;
1330
+
1331
+ if (s->mirror_rxq[pi->port_id])
1332
+ return 0;
1333
+
1334
+ mirror_rxq = kcalloc(pi->nmirrorqsets, sizeof(*mirror_rxq), GFP_KERNEL);
1335
+ if (!mirror_rxq)
1336
+ return -ENOMEM;
1337
+
1338
+ s->mirror_rxq[pi->port_id] = mirror_rxq;
1339
+
1340
+ if (!(adap->flags & CXGB4_USING_MSIX))
1341
+ msix = -((int)adap->sge.intrq.abs_id + 1);
1342
+
1343
+ for (i = 0, rxqid = 0; i < pi->nmirrorqsets; i++, rxqid++) {
1344
+ mirror_rxq = &s->mirror_rxq[pi->port_id][i];
1345
+
1346
+ /* Allocate Mirror Rxqs */
1347
+ if (msix >= 0) {
1348
+ msix = cxgb4_get_msix_idx_from_bmap(adap);
1349
+ if (msix < 0) {
1350
+ ret = msix;
1351
+ goto out_free_queues;
1352
+ }
1353
+
1354
+ mirror_rxq->msix = &adap->msix_info[msix];
1355
+ snprintf(mirror_rxq->msix->desc,
1356
+ sizeof(mirror_rxq->msix->desc),
1357
+ "%s-mirrorrxq%d", dev->name, i);
1358
+ }
1359
+
1360
+ init_rspq(adap, &mirror_rxq->rspq,
1361
+ CXGB4_MIRROR_RXQ_DEFAULT_INTR_USEC,
1362
+ CXGB4_MIRROR_RXQ_DEFAULT_PKT_CNT,
1363
+ CXGB4_MIRROR_RXQ_DEFAULT_DESC_NUM,
1364
+ CXGB4_MIRROR_RXQ_DEFAULT_DESC_SIZE);
1365
+
1366
+ mirror_rxq->fl.size = CXGB4_MIRROR_FLQ_DEFAULT_DESC_NUM;
1367
+
1368
+ ret = t4_sge_alloc_rxq(adap, &mirror_rxq->rspq, false,
1369
+ dev, msix, &mirror_rxq->fl,
1370
+ t4_ethrx_handler, NULL, 0);
1371
+ if (ret)
1372
+ goto out_free_msix_idx;
1373
+
1374
+ /* Setup MSI-X vectors for Mirror Rxqs */
1375
+ if (adap->flags & CXGB4_USING_MSIX) {
1376
+ ret = request_irq(mirror_rxq->msix->vec,
1377
+ t4_sge_intr_msix, 0,
1378
+ mirror_rxq->msix->desc,
1379
+ &mirror_rxq->rspq);
1380
+ if (ret)
1381
+ goto out_free_rxq;
1382
+
1383
+ cxgb4_set_msix_aff(adap, mirror_rxq->msix->vec,
1384
+ &mirror_rxq->msix->aff_mask, i);
1385
+ }
1386
+
1387
+ /* Start NAPI for Mirror Rxqs */
1388
+ cxgb4_enable_rx(adap, &mirror_rxq->rspq);
1389
+ }
1390
+
1391
+ /* Setup RSS for Mirror Rxqs */
1392
+ rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
1393
+ if (!rss) {
1394
+ ret = -ENOMEM;
1395
+ goto out_free_queues;
1396
+ }
1397
+
1398
+ mirror_rxq = &s->mirror_rxq[pi->port_id][0];
1399
+ for (i = 0; i < pi->rss_size; i++)
1400
+ rss[i] = mirror_rxq[i % pi->nmirrorqsets].rspq.abs_id;
1401
+
1402
+ ret = cxgb4_config_rss(pi, rss, pi->rss_size, pi->viid_mirror);
1403
+ kfree(rss);
1404
+ if (ret)
1405
+ goto out_free_queues;
1406
+
1407
+ return 0;
1408
+
1409
+out_free_rxq:
1410
+ free_rspq_fl(adap, &mirror_rxq->rspq, &mirror_rxq->fl);
1411
+
1412
+out_free_msix_idx:
1413
+ cxgb4_free_msix_idx_in_bmap(adap, mirror_rxq->msix->idx);
1414
+
1415
+out_free_queues:
1416
+ while (rxqid-- > 0)
1417
+ cxgb4_port_mirror_free_rxq(adap,
1418
+ &s->mirror_rxq[pi->port_id][rxqid]);
1419
+
1420
+ kfree(s->mirror_rxq[pi->port_id]);
1421
+ s->mirror_rxq[pi->port_id] = NULL;
1422
+ return ret;
1423
+}
1424
+
1425
+static void cxgb4_port_mirror_free_queues(struct net_device *dev)
1426
+{
1427
+ struct port_info *pi = netdev2pinfo(dev);
1428
+ struct adapter *adap = netdev2adap(dev);
1429
+ struct sge *s = &adap->sge;
1430
+ u16 i;
1431
+
1432
+ if (!pi->vi_mirror_count)
1433
+ return;
1434
+
1435
+ if (!s->mirror_rxq[pi->port_id])
1436
+ return;
1437
+
1438
+ for (i = 0; i < pi->nmirrorqsets; i++)
1439
+ cxgb4_port_mirror_free_rxq(adap,
1440
+ &s->mirror_rxq[pi->port_id][i]);
1441
+
1442
+ kfree(s->mirror_rxq[pi->port_id]);
1443
+ s->mirror_rxq[pi->port_id] = NULL;
1444
+}
1445
+
1446
+static int cxgb4_port_mirror_start(struct net_device *dev)
1447
+{
1448
+ struct port_info *pi = netdev2pinfo(dev);
1449
+ struct adapter *adap = netdev2adap(dev);
1450
+ int ret, idx = -1;
1451
+
1452
+ if (!pi->vi_mirror_count)
1453
+ return 0;
1454
+
1455
+ /* Mirror VIs can be created dynamically after stack had
1456
+ * already setup Rx modes like MTU, promisc, allmulti, etc.
1457
+ * on main VI. So, parse what the stack had setup on the
1458
+ * main VI and update the same on the mirror VI.
1459
+ */
1460
+ ret = t4_set_rxmode(adap, adap->mbox, pi->viid, pi->viid_mirror,
1461
+ dev->mtu, (dev->flags & IFF_PROMISC) ? 1 : 0,
1462
+ (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1,
1463
+ !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
1464
+ if (ret) {
1465
+ dev_err(adap->pdev_dev,
1466
+ "Failed start up Rx mode for Mirror VI 0x%x, ret: %d\n",
1467
+ pi->viid_mirror, ret);
1468
+ return ret;
1469
+ }
1470
+
1471
+ /* Enable replication bit for the device's MAC address
1472
+ * in MPS TCAM, so that the packets for the main VI are
1473
+ * replicated to mirror VI.
1474
+ */
1475
+ ret = cxgb4_update_mac_filt(pi, pi->viid_mirror, &idx,
1476
+ dev->dev_addr, true, NULL);
1477
+ if (ret) {
1478
+ dev_err(adap->pdev_dev,
1479
+ "Failed updating MAC filter for Mirror VI 0x%x, ret: %d\n",
1480
+ pi->viid_mirror, ret);
1481
+ return ret;
1482
+ }
1483
+
1484
+ /* Enabling a Virtual Interface can result in an interrupt
1485
+ * during the processing of the VI Enable command and, in some
1486
+ * paths, result in an attempt to issue another command in the
1487
+ * interrupt context. Thus, we disable interrupts during the
1488
+ * course of the VI Enable command ...
1489
+ */
1490
+ local_bh_disable();
1491
+ ret = t4_enable_vi_params(adap, adap->mbox, pi->viid_mirror, true, true,
1492
+ false);
1493
+ local_bh_enable();
1494
+ if (ret)
1495
+ dev_err(adap->pdev_dev,
1496
+ "Failed starting Mirror VI 0x%x, ret: %d\n",
1497
+ pi->viid_mirror, ret);
1498
+
1499
+ return ret;
1500
+}
1501
+
1502
+static void cxgb4_port_mirror_stop(struct net_device *dev)
1503
+{
1504
+ struct port_info *pi = netdev2pinfo(dev);
1505
+ struct adapter *adap = netdev2adap(dev);
1506
+
1507
+ if (!pi->vi_mirror_count)
1508
+ return;
1509
+
1510
+ t4_enable_vi_params(adap, adap->mbox, pi->viid_mirror, false, false,
1511
+ false);
1512
+}
1513
+
1514
+int cxgb4_port_mirror_alloc(struct net_device *dev)
1515
+{
1516
+ struct port_info *pi = netdev2pinfo(dev);
1517
+ struct adapter *adap = netdev2adap(dev);
1518
+ int ret = 0;
1519
+
1520
+ if (!pi->nmirrorqsets)
1521
+ return -EOPNOTSUPP;
1522
+
1523
+ mutex_lock(&pi->vi_mirror_mutex);
1524
+ if (pi->viid_mirror) {
1525
+ pi->vi_mirror_count++;
1526
+ goto out_unlock;
1527
+ }
1528
+
1529
+ ret = t4_init_port_mirror(pi, adap->mbox, pi->port_id, adap->pf, 0,
1530
+ &pi->viid_mirror);
1531
+ if (ret)
1532
+ goto out_unlock;
1533
+
1534
+ pi->vi_mirror_count = 1;
1535
+
1536
+ if (adap->flags & CXGB4_FULL_INIT_DONE) {
1537
+ ret = cxgb4_port_mirror_alloc_queues(dev);
1538
+ if (ret)
1539
+ goto out_free_vi;
1540
+
1541
+ ret = cxgb4_port_mirror_start(dev);
1542
+ if (ret)
1543
+ goto out_free_queues;
1544
+ }
1545
+
1546
+ mutex_unlock(&pi->vi_mirror_mutex);
1547
+ return 0;
1548
+
1549
+out_free_queues:
1550
+ cxgb4_port_mirror_free_queues(dev);
1551
+
1552
+out_free_vi:
1553
+ pi->vi_mirror_count = 0;
1554
+ t4_free_vi(adap, adap->mbox, adap->pf, 0, pi->viid_mirror);
1555
+ pi->viid_mirror = 0;
1556
+
1557
+out_unlock:
1558
+ mutex_unlock(&pi->vi_mirror_mutex);
1559
+ return ret;
1560
+}
1561
+
1562
+void cxgb4_port_mirror_free(struct net_device *dev)
1563
+{
1564
+ struct port_info *pi = netdev2pinfo(dev);
1565
+ struct adapter *adap = netdev2adap(dev);
1566
+
1567
+ mutex_lock(&pi->vi_mirror_mutex);
1568
+ if (!pi->viid_mirror)
1569
+ goto out_unlock;
1570
+
1571
+ if (pi->vi_mirror_count > 1) {
1572
+ pi->vi_mirror_count--;
1573
+ goto out_unlock;
1574
+ }
1575
+
1576
+ cxgb4_port_mirror_stop(dev);
1577
+ cxgb4_port_mirror_free_queues(dev);
1578
+
1579
+ pi->vi_mirror_count = 0;
1580
+ t4_free_vi(adap, adap->mbox, adap->pf, 0, pi->viid_mirror);
1581
+ pi->viid_mirror = 0;
1582
+
1583
+out_unlock:
1584
+ mutex_unlock(&pi->vi_mirror_mutex);
10771585 }
10781586
10791587 /*
....@@ -1236,8 +1744,8 @@
12361744 static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
12371745 unsigned int tid)
12381746 {
1239
- void **p = &t->tid_tab[tid];
12401747 struct adapter *adap = container_of(t, struct adapter, tids);
1748
+ void **p = &t->tid_tab[tid - t->tid_base];
12411749
12421750 spin_lock_bh(&adap->tid_release_lock);
12431751 *p = adap->tid_release_head;
....@@ -1289,13 +1797,13 @@
12891797 void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid,
12901798 unsigned short family)
12911799 {
1292
- struct sk_buff *skb;
12931800 struct adapter *adap = container_of(t, struct adapter, tids);
1801
+ struct sk_buff *skb;
12941802
1295
- WARN_ON(tid >= t->ntids);
1803
+ WARN_ON(tid_out_of_range(&adap->tids, tid));
12961804
1297
- if (t->tid_tab[tid]) {
1298
- t->tid_tab[tid] = NULL;
1805
+ if (t->tid_tab[tid - adap->tids.tid_base]) {
1806
+ t->tid_tab[tid - adap->tids.tid_base] = NULL;
12991807 atomic_dec(&t->conns_in_use);
13001808 if (t->hash_base && (tid >= t->hash_base)) {
13011809 if (family == AF_INET6)
....@@ -1327,19 +1835,27 @@
13271835 struct adapter *adap = container_of(t, struct adapter, tids);
13281836 unsigned int max_ftids = t->nftids + t->nsftids;
13291837 unsigned int natids = t->natids;
1838
+ unsigned int hpftid_bmap_size;
1839
+ unsigned int eotid_bmap_size;
13301840 unsigned int stid_bmap_size;
13311841 unsigned int ftid_bmap_size;
13321842 size_t size;
13331843
13341844 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
13351845 ftid_bmap_size = BITS_TO_LONGS(t->nftids);
1846
+ hpftid_bmap_size = BITS_TO_LONGS(t->nhpftids);
1847
+ eotid_bmap_size = BITS_TO_LONGS(t->neotids);
13361848 size = t->ntids * sizeof(*t->tid_tab) +
13371849 natids * sizeof(*t->atid_tab) +
13381850 t->nstids * sizeof(*t->stid_tab) +
13391851 t->nsftids * sizeof(*t->stid_tab) +
13401852 stid_bmap_size * sizeof(long) +
1853
+ t->nhpftids * sizeof(*t->hpftid_tab) +
1854
+ hpftid_bmap_size * sizeof(long) +
13411855 max_ftids * sizeof(*t->ftid_tab) +
1342
- ftid_bmap_size * sizeof(long);
1856
+ ftid_bmap_size * sizeof(long) +
1857
+ t->neotids * sizeof(*t->eotid_tab) +
1858
+ eotid_bmap_size * sizeof(long);
13431859
13441860 t->tid_tab = kvzalloc(size, GFP_KERNEL);
13451861 if (!t->tid_tab)
....@@ -1348,8 +1864,12 @@
13481864 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
13491865 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
13501866 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
1351
- t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
1867
+ t->hpftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
1868
+ t->hpftid_bmap = (unsigned long *)&t->hpftid_tab[t->nhpftids];
1869
+ t->ftid_tab = (struct filter_entry *)&t->hpftid_bmap[hpftid_bmap_size];
13521870 t->ftid_bmap = (unsigned long *)&t->ftid_tab[max_ftids];
1871
+ t->eotid_tab = (struct eotid_entry *)&t->ftid_bmap[ftid_bmap_size];
1872
+ t->eotid_bmap = (unsigned long *)&t->eotid_tab[t->neotids];
13531873 spin_lock_init(&t->stid_lock);
13541874 spin_lock_init(&t->atid_lock);
13551875 spin_lock_init(&t->ftid_lock);
....@@ -1362,6 +1882,7 @@
13621882 atomic_set(&t->tids_in_use, 0);
13631883 atomic_set(&t->conns_in_use, 0);
13641884 atomic_set(&t->hash_tids_in_use, 0);
1885
+ atomic_set(&t->eotids_in_use, 0);
13651886
13661887 /* Setup the free list for atid_tab and clear the stid bitmap. */
13671888 if (natids) {
....@@ -1376,8 +1897,13 @@
13761897 if (!t->stid_base &&
13771898 CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
13781899 __set_bit(0, t->stid_bmap);
1900
+
1901
+ if (t->neotids)
1902
+ bitmap_zero(t->eotid_bmap, t->neotids);
13791903 }
13801904
1905
+ if (t->nhpftids)
1906
+ bitmap_zero(t->hpftid_bmap, t->nhpftids);
13811907 bitmap_zero(t->ftid_bmap, t->nftids);
13821908 return 0;
13831909 }
....@@ -1388,6 +1914,7 @@
13881914 * @stid: the server TID
13891915 * @sip: local IP address to bind server to
13901916 * @sport: the server's TCP port
1917
+ * @vlan: the VLAN header information
13911918 * @queue: queue to direct messages from this server to
13921919 *
13931920 * Create an IP server for the given port and address.
....@@ -1586,28 +2113,6 @@
15862113 EXPORT_SYMBOL(cxgb4_best_aligned_mtu);
15872114
15882115 /**
1589
- * cxgb4_tp_smt_idx - Get the Source Mac Table index for this VI
1590
- * @chip: chip type
1591
- * @viid: VI id of the given port
1592
- *
1593
- * Return the SMT index for this VI.
1594
- */
1595
-unsigned int cxgb4_tp_smt_idx(enum chip_type chip, unsigned int viid)
1596
-{
1597
- /* In T4/T5, SMT contains 256 SMAC entries organized in
1598
- * 128 rows of 2 entries each.
1599
- * In T6, SMT contains 256 SMAC entries in 256 rows.
1600
- * TODO: The below code needs to be updated when we add support
1601
- * for 256 VFs.
1602
- */
1603
- if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5)
1604
- return ((viid & 0x7f) << 1);
1605
- else
1606
- return (viid & 0x7f);
1607
-}
1608
-EXPORT_SYMBOL(cxgb4_tp_smt_idx);
1609
-
1610
-/**
16112116 * cxgb4_port_chan - get the HW channel of a port
16122117 * @dev: the net device for the port
16132118 *
....@@ -1618,6 +2123,18 @@
16182123 return netdev2pinfo(dev)->tx_chan;
16192124 }
16202125 EXPORT_SYMBOL(cxgb4_port_chan);
2126
+
2127
+/**
2128
+ * cxgb4_port_e2cchan - get the HW c-channel of a port
2129
+ * @dev: the net device for the port
2130
+ *
2131
+ * Return the HW RX c-channel of the given port.
2132
+ */
2133
+unsigned int cxgb4_port_e2cchan(const struct net_device *dev)
2134
+{
2135
+ return netdev2pinfo(dev)->rx_cchan;
2136
+}
2137
+EXPORT_SYMBOL(cxgb4_port_e2cchan);
16212138
16222139 unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
16232140 {
....@@ -2126,6 +2643,9 @@
21262643 {
21272644 unsigned int i;
21282645
2646
+ if (!is_uld(adap))
2647
+ return;
2648
+
21292649 mutex_lock(&uld_mutex);
21302650 list_del(&adap->list_node);
21312651
....@@ -2242,6 +2762,7 @@
22422762 */
22432763 static int cxgb_up(struct adapter *adap)
22442764 {
2765
+ struct sge *s = &adap->sge;
22452766 int err;
22462767
22472768 mutex_lock(&uld_mutex);
....@@ -2252,20 +2773,25 @@
22522773 if (err)
22532774 goto freeq;
22542775
2255
- if (adap->flags & USING_MSIX) {
2256
- name_msix_vecs(adap);
2257
- err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
2258
- adap->msix_info[0].desc, adap);
2259
- if (err)
2260
- goto irq_err;
2261
- err = request_msix_queue_irqs(adap);
2262
- if (err) {
2263
- free_irq(adap->msix_info[0].vec, adap);
2776
+ if (adap->flags & CXGB4_USING_MSIX) {
2777
+ if (s->nd_msix_idx < 0) {
2778
+ err = -ENOMEM;
22642779 goto irq_err;
22652780 }
2781
+
2782
+ err = request_irq(adap->msix_info[s->nd_msix_idx].vec,
2783
+ t4_nondata_intr, 0,
2784
+ adap->msix_info[s->nd_msix_idx].desc, adap);
2785
+ if (err)
2786
+ goto irq_err;
2787
+
2788
+ err = request_msix_queue_irqs(adap);
2789
+ if (err)
2790
+ goto irq_err_free_nd_msix;
22662791 } else {
22672792 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
2268
- (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
2793
+ (adap->flags & CXGB4_USING_MSI) ? 0
2794
+ : IRQF_SHARED,
22692795 adap->port[0]->name, adap);
22702796 if (err)
22712797 goto irq_err;
....@@ -2274,7 +2800,7 @@
22742800 enable_rx(adap);
22752801 t4_sge_start(adap);
22762802 t4_intr_enable(adap);
2277
- adap->flags |= FULL_INIT_DONE;
2803
+ adap->flags |= CXGB4_FULL_INIT_DONE;
22782804 mutex_unlock(&uld_mutex);
22792805
22802806 notify_ulds(adap, CXGB4_STATE_UP);
....@@ -2283,11 +2809,13 @@
22832809 #endif
22842810 return err;
22852811
2286
- irq_err:
2812
+irq_err_free_nd_msix:
2813
+ free_irq(adap->msix_info[s->nd_msix_idx].vec, adap);
2814
+irq_err:
22872815 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
2288
- freeq:
2816
+freeq:
22892817 t4_free_sge_resources(adap);
2290
- rel_lock:
2818
+rel_lock:
22912819 mutex_unlock(&uld_mutex);
22922820 return err;
22932821 }
....@@ -2303,7 +2831,7 @@
23032831 t4_sge_stop(adapter);
23042832 t4_free_sge_resources(adapter);
23052833
2306
- adapter->flags &= ~FULL_INIT_DONE;
2834
+ adapter->flags &= ~CXGB4_FULL_INIT_DONE;
23072835 }
23082836
23092837 /*
....@@ -2311,13 +2839,13 @@
23112839 */
23122840 static int cxgb_open(struct net_device *dev)
23132841 {
2314
- int err;
23152842 struct port_info *pi = netdev_priv(dev);
23162843 struct adapter *adapter = pi->adapter;
2844
+ int err;
23172845
23182846 netif_carrier_off(dev);
23192847
2320
- if (!(adapter->flags & FULL_INIT_DONE)) {
2848
+ if (!(adapter->flags & CXGB4_FULL_INIT_DONE)) {
23212849 err = cxgb_up(adapter);
23222850 if (err < 0)
23232851 return err;
....@@ -2331,8 +2859,29 @@
23312859 return err;
23322860
23332861 err = link_start(dev);
2334
- if (!err)
2335
- netif_tx_start_all_queues(dev);
2862
+ if (err)
2863
+ return err;
2864
+
2865
+ if (pi->nmirrorqsets) {
2866
+ mutex_lock(&pi->vi_mirror_mutex);
2867
+ err = cxgb4_port_mirror_alloc_queues(dev);
2868
+ if (err)
2869
+ goto out_unlock;
2870
+
2871
+ err = cxgb4_port_mirror_start(dev);
2872
+ if (err)
2873
+ goto out_free_queues;
2874
+ mutex_unlock(&pi->vi_mirror_mutex);
2875
+ }
2876
+
2877
+ netif_tx_start_all_queues(dev);
2878
+ return 0;
2879
+
2880
+out_free_queues:
2881
+ cxgb4_port_mirror_free_queues(dev);
2882
+
2883
+out_unlock:
2884
+ mutex_unlock(&pi->vi_mirror_mutex);
23362885 return err;
23372886 }
23382887
....@@ -2350,7 +2899,17 @@
23502899 cxgb4_dcb_reset(dev);
23512900 dcb_tx_queue_prio_enable(dev, false);
23522901 #endif
2353
- return ret;
2902
+ if (ret)
2903
+ return ret;
2904
+
2905
+ if (pi->nmirrorqsets) {
2906
+ mutex_lock(&pi->vi_mirror_mutex);
2907
+ cxgb4_port_mirror_stop(dev);
2908
+ cxgb4_port_mirror_free_queues(dev);
2909
+ mutex_unlock(&pi->vi_mirror_mutex);
2910
+ }
2911
+
2912
+ return 0;
23542913 }
23552914
23562915 int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
....@@ -2384,7 +2943,7 @@
23842943
23852944 /* Clear out filter specifications */
23862945 memset(&f->fs, 0, sizeof(struct ch_filter_specification));
2387
- f->fs.val.lport = cpu_to_be16(sport);
2946
+ f->fs.val.lport = be16_to_cpu(sport);
23882947 f->fs.mask.lport = ~0;
23892948 val = (u8 *)&sip;
23902949 if ((val[0] | val[1] | val[2] | val[3]) != 0) {
....@@ -2616,11 +3175,11 @@
26163175
26173176 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
26183177 {
2619
- int ret;
26203178 struct port_info *pi = netdev_priv(dev);
3179
+ int ret;
26213180
2622
- ret = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, new_mtu, -1,
2623
- -1, -1, -1, true);
3181
+ ret = t4_set_rxmode(pi->adapter, pi->adapter->mbox, pi->viid,
3182
+ pi->viid_mirror, new_mtu, -1, -1, -1, -1, true);
26243183 if (!ret)
26253184 dev->mtu = new_mtu;
26263185 return ret;
....@@ -2669,7 +3228,7 @@
26693228
26703229 for (vf = 0, nvfs = pci_sriov_get_totalvfs(adap->pdev);
26713230 vf < nvfs; vf++) {
2672
- macaddr[5] = adap->pf * 16 + vf;
3231
+ macaddr[5] = adap->pf * nvfs + vf;
26733232 ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, macaddr);
26743233 }
26753234 }
....@@ -2712,6 +3271,7 @@
27123271 ivi->min_tx_rate = 0;
27133272 ether_addr_copy(ivi->mac, vfinfo->vf_mac_addr);
27143273 ivi->vlan = vfinfo->vlan;
3274
+ ivi->linkstate = vfinfo->link_state;
27153275 return 0;
27163276 }
27173277
....@@ -2748,6 +3308,27 @@
27483308 return -EINVAL;
27493309 }
27503310
3311
+ if (max_tx_rate == 0) {
3312
+ /* unbind VF to to any Traffic Class */
3313
+ fw_pfvf =
3314
+ (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
3315
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH));
3316
+ fw_class = 0xffffffff;
3317
+ ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1,
3318
+ &fw_pfvf, &fw_class);
3319
+ if (ret) {
3320
+ dev_err(adap->pdev_dev,
3321
+ "Err %d in unbinding PF %d VF %d from TX Rate Limiting\n",
3322
+ ret, adap->pf, vf);
3323
+ return -EINVAL;
3324
+ }
3325
+ dev_info(adap->pdev_dev,
3326
+ "PF %d VF %d is unbound from TX Rate Limiting\n",
3327
+ adap->pf, vf);
3328
+ adap->vfinfo[vf].tx_rate = 0;
3329
+ return 0;
3330
+ }
3331
+
27513332 ret = t4_get_link_params(pi, &link_ok, &speed, &mtu);
27523333 if (ret != FW_SUCCESS) {
27533334 dev_err(adap->pdev_dev,
....@@ -2779,7 +3360,7 @@
27793360 SCHED_CLASS_RATEUNIT_BITS,
27803361 SCHED_CLASS_RATEMODE_ABS,
27813362 pi->tx_chan, class_id, 0,
2782
- max_tx_rate * 1000, 0, pktsize);
3363
+ max_tx_rate * 1000, 0, pktsize, 0);
27833364 if (ret) {
27843365 dev_err(adap->pdev_dev, "Err %d for Traffic Class config\n",
27853366 ret);
....@@ -2797,8 +3378,8 @@
27973378 &fw_class);
27983379 if (ret) {
27993380 dev_err(adap->pdev_dev,
2800
- "Err %d in binding VF %d to Traffic Class %d\n",
2801
- ret, vf, class_id);
3381
+ "Err %d in binding PF %d VF %d to Traffic Class %d\n",
3382
+ ret, adap->pf, vf, class_id);
28023383 return -EINVAL;
28033384 }
28043385 dev_info(adap->pdev_dev, "PF %d VF %d is bound to Class %d\n",
....@@ -2830,6 +3411,49 @@
28303411 ret, (vlan ? "setting" : "clearing"), adap->pf, vf);
28313412 return ret;
28323413 }
3414
+
3415
+static int cxgb4_mgmt_set_vf_link_state(struct net_device *dev, int vf,
3416
+ int link)
3417
+{
3418
+ struct port_info *pi = netdev_priv(dev);
3419
+ struct adapter *adap = pi->adapter;
3420
+ u32 param, val;
3421
+ int ret = 0;
3422
+
3423
+ if (vf >= adap->num_vfs)
3424
+ return -EINVAL;
3425
+
3426
+ switch (link) {
3427
+ case IFLA_VF_LINK_STATE_AUTO:
3428
+ val = FW_VF_LINK_STATE_AUTO;
3429
+ break;
3430
+
3431
+ case IFLA_VF_LINK_STATE_ENABLE:
3432
+ val = FW_VF_LINK_STATE_ENABLE;
3433
+ break;
3434
+
3435
+ case IFLA_VF_LINK_STATE_DISABLE:
3436
+ val = FW_VF_LINK_STATE_DISABLE;
3437
+ break;
3438
+
3439
+ default:
3440
+ return -EINVAL;
3441
+ }
3442
+
3443
+ param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
3444
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_LINK_STATE));
3445
+ ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1,
3446
+ &param, &val);
3447
+ if (ret) {
3448
+ dev_err(adap->pdev_dev,
3449
+ "Error %d in setting PF %d VF %d link state\n",
3450
+ ret, adap->pf, vf);
3451
+ return -EINVAL;
3452
+ }
3453
+
3454
+ adap->vfinfo[vf].link_state = link;
3455
+ return ret;
3456
+}
28333457 #endif /* CONFIG_PCI_IOV */
28343458
28353459 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
....@@ -2841,13 +3465,12 @@
28413465 if (!is_valid_ether_addr(addr->sa_data))
28423466 return -EADDRNOTAVAIL;
28433467
2844
- ret = t4_change_mac(pi->adapter, pi->adapter->pf, pi->viid,
2845
- pi->xact_addr_filt, addr->sa_data, true, true);
3468
+ ret = cxgb4_update_mac_filt(pi, pi->viid, &pi->xact_addr_filt,
3469
+ addr->sa_data, true, &pi->smt_idx);
28463470 if (ret < 0)
28473471 return ret;
28483472
28493473 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2850
- pi->xact_addr_filt = ret;
28513474 return 0;
28523475 }
28533476
....@@ -2857,7 +3480,7 @@
28573480 struct port_info *pi = netdev_priv(dev);
28583481 struct adapter *adap = pi->adapter;
28593482
2860
- if (adap->flags & USING_MSIX) {
3483
+ if (adap->flags & CXGB4_USING_MSIX) {
28613484 int i;
28623485 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
28633486
....@@ -2872,9 +3495,9 @@
28723495 {
28733496 struct port_info *pi = netdev_priv(dev);
28743497 struct adapter *adap = pi->adapter;
3498
+ struct ch_sched_queue qe = { 0 };
3499
+ struct ch_sched_params p = { 0 };
28753500 struct sched_class *e;
2876
- struct ch_sched_params p;
2877
- struct ch_sched_queue qe;
28783501 u32 req_rate;
28793502 int err = 0;
28803503
....@@ -2884,11 +3507,20 @@
28843507 if (index < 0 || index > pi->nqsets - 1)
28853508 return -EINVAL;
28863509
2887
- if (!(adap->flags & FULL_INIT_DONE)) {
3510
+ if (!(adap->flags & CXGB4_FULL_INIT_DONE)) {
28883511 dev_err(adap->pdev_dev,
28893512 "Failed to rate limit on queue %d. Link Down?\n",
28903513 index);
28913514 return -EINVAL;
3515
+ }
3516
+
3517
+ qe.queue = index;
3518
+ e = cxgb4_sched_queue_lookup(dev, &qe);
3519
+ if (e && e->info.u.params.level != SCHED_CLASS_LEVEL_CL_RL) {
3520
+ dev_err(adap->pdev_dev,
3521
+ "Queue %u already bound to class %u of type: %u\n",
3522
+ index, e->idx, e->info.u.params.level);
3523
+ return -EBUSY;
28923524 }
28933525
28943526 /* Convert from Mbps to Kbps */
....@@ -2920,7 +3552,6 @@
29203552 return 0;
29213553
29223554 /* Fetch any available unused or matching scheduling class */
2923
- memset(&p, 0, sizeof(p));
29243555 p.type = SCHED_CLASS_TYPE_PACKET;
29253556 p.u.params.level = SCHED_CLASS_LEVEL_CL_RL;
29263557 p.u.params.mode = SCHED_CLASS_MODE_CLASS;
....@@ -2950,14 +3581,14 @@
29503581 }
29513582
29523583 static int cxgb_setup_tc_flower(struct net_device *dev,
2953
- struct tc_cls_flower_offload *cls_flower)
3584
+ struct flow_cls_offload *cls_flower)
29543585 {
29553586 switch (cls_flower->command) {
2956
- case TC_CLSFLOWER_REPLACE:
3587
+ case FLOW_CLS_REPLACE:
29573588 return cxgb4_tc_flower_replace(dev, cls_flower);
2958
- case TC_CLSFLOWER_DESTROY:
3589
+ case FLOW_CLS_DESTROY:
29593590 return cxgb4_tc_flower_destroy(dev, cls_flower);
2960
- case TC_CLSFLOWER_STATS:
3591
+ case FLOW_CLS_STATS:
29613592 return cxgb4_tc_flower_stats(dev, cls_flower);
29623593 default:
29633594 return -EOPNOTSUPP;
....@@ -2978,14 +3609,39 @@
29783609 }
29793610 }
29803611
2981
-static int cxgb_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
2982
- void *cb_priv)
3612
+static int cxgb_setup_tc_matchall(struct net_device *dev,
3613
+ struct tc_cls_matchall_offload *cls_matchall,
3614
+ bool ingress)
3615
+{
3616
+ struct adapter *adap = netdev2adap(dev);
3617
+
3618
+ if (!adap->tc_matchall)
3619
+ return -ENOMEM;
3620
+
3621
+ switch (cls_matchall->command) {
3622
+ case TC_CLSMATCHALL_REPLACE:
3623
+ return cxgb4_tc_matchall_replace(dev, cls_matchall, ingress);
3624
+ case TC_CLSMATCHALL_DESTROY:
3625
+ return cxgb4_tc_matchall_destroy(dev, cls_matchall, ingress);
3626
+ case TC_CLSMATCHALL_STATS:
3627
+ if (ingress)
3628
+ return cxgb4_tc_matchall_stats(dev, cls_matchall);
3629
+ break;
3630
+ default:
3631
+ break;
3632
+ }
3633
+
3634
+ return -EOPNOTSUPP;
3635
+}
3636
+
3637
+static int cxgb_setup_tc_block_ingress_cb(enum tc_setup_type type,
3638
+ void *type_data, void *cb_priv)
29833639 {
29843640 struct net_device *dev = cb_priv;
29853641 struct port_info *pi = netdev2pinfo(dev);
29863642 struct adapter *adap = netdev2adap(dev);
29873643
2988
- if (!(adap->flags & FULL_INIT_DONE)) {
3644
+ if (!(adap->flags & CXGB4_FULL_INIT_DONE)) {
29893645 dev_err(adap->pdev_dev,
29903646 "Failed to setup tc on port %d. Link Down?\n",
29913647 pi->port_id);
....@@ -3000,35 +3656,79 @@
30003656 return cxgb_setup_tc_cls_u32(dev, type_data);
30013657 case TC_SETUP_CLSFLOWER:
30023658 return cxgb_setup_tc_flower(dev, type_data);
3659
+ case TC_SETUP_CLSMATCHALL:
3660
+ return cxgb_setup_tc_matchall(dev, type_data, true);
30033661 default:
30043662 return -EOPNOTSUPP;
30053663 }
30063664 }
30073665
3008
-static int cxgb_setup_tc_block(struct net_device *dev,
3009
- struct tc_block_offload *f)
3666
+static int cxgb_setup_tc_block_egress_cb(enum tc_setup_type type,
3667
+ void *type_data, void *cb_priv)
30103668 {
3669
+ struct net_device *dev = cb_priv;
30113670 struct port_info *pi = netdev2pinfo(dev);
3671
+ struct adapter *adap = netdev2adap(dev);
30123672
3013
- if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
3014
- return -EOPNOTSUPP;
3015
-
3016
- switch (f->command) {
3017
- case TC_BLOCK_BIND:
3018
- return tcf_block_cb_register(f->block, cxgb_setup_tc_block_cb,
3019
- pi, dev, f->extack);
3020
- case TC_BLOCK_UNBIND:
3021
- tcf_block_cb_unregister(f->block, cxgb_setup_tc_block_cb, pi);
3022
- return 0;
3023
- default:
3024
- return -EOPNOTSUPP;
3673
+ if (!(adap->flags & CXGB4_FULL_INIT_DONE)) {
3674
+ dev_err(adap->pdev_dev,
3675
+ "Failed to setup tc on port %d. Link Down?\n",
3676
+ pi->port_id);
3677
+ return -EINVAL;
30253678 }
3679
+
3680
+ if (!tc_cls_can_offload_and_chain0(dev, type_data))
3681
+ return -EOPNOTSUPP;
3682
+
3683
+ switch (type) {
3684
+ case TC_SETUP_CLSMATCHALL:
3685
+ return cxgb_setup_tc_matchall(dev, type_data, false);
3686
+ default:
3687
+ break;
3688
+ }
3689
+
3690
+ return -EOPNOTSUPP;
3691
+}
3692
+
3693
+static int cxgb_setup_tc_mqprio(struct net_device *dev,
3694
+ struct tc_mqprio_qopt_offload *mqprio)
3695
+{
3696
+ struct adapter *adap = netdev2adap(dev);
3697
+
3698
+ if (!is_ethofld(adap) || !adap->tc_mqprio)
3699
+ return -ENOMEM;
3700
+
3701
+ return cxgb4_setup_tc_mqprio(dev, mqprio);
3702
+}
3703
+
3704
+static LIST_HEAD(cxgb_block_cb_list);
3705
+
3706
+static int cxgb_setup_tc_block(struct net_device *dev,
3707
+ struct flow_block_offload *f)
3708
+{
3709
+ struct port_info *pi = netdev_priv(dev);
3710
+ flow_setup_cb_t *cb;
3711
+ bool ingress_only;
3712
+
3713
+ pi->tc_block_shared = f->block_shared;
3714
+ if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
3715
+ cb = cxgb_setup_tc_block_egress_cb;
3716
+ ingress_only = false;
3717
+ } else {
3718
+ cb = cxgb_setup_tc_block_ingress_cb;
3719
+ ingress_only = true;
3720
+ }
3721
+
3722
+ return flow_block_cb_setup_simple(f, &cxgb_block_cb_list,
3723
+ cb, pi, dev, ingress_only);
30263724 }
30273725
30283726 static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type,
30293727 void *type_data)
30303728 {
30313729 switch (type) {
3730
+ case TC_SETUP_QDISC_MQPRIO:
3731
+ return cxgb_setup_tc_mqprio(dev, type_data);
30323732 case TC_SETUP_BLOCK:
30333733 return cxgb_setup_tc_block(dev, type_data);
30343734 default:
....@@ -3036,131 +3736,71 @@
30363736 }
30373737 }
30383738
3039
-static void cxgb_del_udp_tunnel(struct net_device *netdev,
3040
- struct udp_tunnel_info *ti)
3739
+static int cxgb_udp_tunnel_unset_port(struct net_device *netdev,
3740
+ unsigned int table, unsigned int entry,
3741
+ struct udp_tunnel_info *ti)
30413742 {
30423743 struct port_info *pi = netdev_priv(netdev);
30433744 struct adapter *adapter = pi->adapter;
3044
- unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
30453745 u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
30463746 int ret = 0, i;
30473747
3048
- if (chip_ver < CHELSIO_T6)
3049
- return;
3050
-
30513748 switch (ti->type) {
30523749 case UDP_TUNNEL_TYPE_VXLAN:
3053
- if (!adapter->vxlan_port_cnt ||
3054
- adapter->vxlan_port != ti->port)
3055
- return; /* Invalid VxLAN destination port */
3056
-
3057
- adapter->vxlan_port_cnt--;
3058
- if (adapter->vxlan_port_cnt)
3059
- return;
3060
-
30613750 adapter->vxlan_port = 0;
30623751 t4_write_reg(adapter, MPS_RX_VXLAN_TYPE_A, 0);
30633752 break;
30643753 case UDP_TUNNEL_TYPE_GENEVE:
3065
- if (!adapter->geneve_port_cnt ||
3066
- adapter->geneve_port != ti->port)
3067
- return; /* Invalid GENEVE destination port */
3068
-
3069
- adapter->geneve_port_cnt--;
3070
- if (adapter->geneve_port_cnt)
3071
- return;
3072
-
30733754 adapter->geneve_port = 0;
30743755 t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A, 0);
30753756 break;
30763757 default:
3077
- return;
3758
+ return -EINVAL;
30783759 }
30793760
30803761 /* Matchall mac entries can be deleted only after all tunnel ports
30813762 * are brought down or removed.
30823763 */
30833764 if (!adapter->rawf_cnt)
3084
- return;
3765
+ return 0;
30853766 for_each_port(adapter, i) {
30863767 pi = adap2pinfo(adapter, i);
30873768 ret = t4_free_raw_mac_filt(adapter, pi->viid,
30883769 match_all_mac, match_all_mac,
3089
- adapter->rawf_start +
3090
- pi->port_id,
3770
+ adapter->rawf_start + pi->port_id,
30913771 1, pi->port_id, false);
30923772 if (ret < 0) {
30933773 netdev_info(netdev, "Failed to free mac filter entry, for port %d\n",
30943774 i);
3095
- return;
3775
+ return ret;
30963776 }
3097
- atomic_dec(&adapter->mps_encap[adapter->rawf_start +
3098
- pi->port_id].refcnt);
30993777 }
3778
+
3779
+ return 0;
31003780 }
31013781
3102
-static void cxgb_add_udp_tunnel(struct net_device *netdev,
3103
- struct udp_tunnel_info *ti)
3782
+static int cxgb_udp_tunnel_set_port(struct net_device *netdev,
3783
+ unsigned int table, unsigned int entry,
3784
+ struct udp_tunnel_info *ti)
31043785 {
31053786 struct port_info *pi = netdev_priv(netdev);
31063787 struct adapter *adapter = pi->adapter;
3107
- unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
31083788 u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
31093789 int i, ret;
31103790
3111
- if (chip_ver < CHELSIO_T6 || !adapter->rawf_cnt)
3112
- return;
3113
-
31143791 switch (ti->type) {
31153792 case UDP_TUNNEL_TYPE_VXLAN:
3116
- /* Callback for adding vxlan port can be called with the same
3117
- * port for both IPv4 and IPv6. We should not disable the
3118
- * offloading when the same port for both protocols is added
3119
- * and later one of them is removed.
3120
- */
3121
- if (adapter->vxlan_port_cnt &&
3122
- adapter->vxlan_port == ti->port) {
3123
- adapter->vxlan_port_cnt++;
3124
- return;
3125
- }
3126
-
3127
- /* We will support only one VxLAN port */
3128
- if (adapter->vxlan_port_cnt) {
3129
- netdev_info(netdev, "UDP port %d already offloaded, not adding port %d\n",
3130
- be16_to_cpu(adapter->vxlan_port),
3131
- be16_to_cpu(ti->port));
3132
- return;
3133
- }
3134
-
31353793 adapter->vxlan_port = ti->port;
3136
- adapter->vxlan_port_cnt = 1;
3137
-
31383794 t4_write_reg(adapter, MPS_RX_VXLAN_TYPE_A,
31393795 VXLAN_V(be16_to_cpu(ti->port)) | VXLAN_EN_F);
31403796 break;
31413797 case UDP_TUNNEL_TYPE_GENEVE:
3142
- if (adapter->geneve_port_cnt &&
3143
- adapter->geneve_port == ti->port) {
3144
- adapter->geneve_port_cnt++;
3145
- return;
3146
- }
3147
-
3148
- /* We will support only one GENEVE port */
3149
- if (adapter->geneve_port_cnt) {
3150
- netdev_info(netdev, "UDP port %d already offloaded, not adding port %d\n",
3151
- be16_to_cpu(adapter->geneve_port),
3152
- be16_to_cpu(ti->port));
3153
- return;
3154
- }
3155
-
31563798 adapter->geneve_port = ti->port;
3157
- adapter->geneve_port_cnt = 1;
3158
-
31593799 t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A,
31603800 GENEVE_V(be16_to_cpu(ti->port)) | GENEVE_EN_F);
31613801 break;
31623802 default:
3163
- return;
3803
+ return -EINVAL;
31643804 }
31653805
31663806 /* Create a 'match all' mac filter entry for inner mac,
....@@ -3175,18 +3815,26 @@
31753815 ret = t4_alloc_raw_mac_filt(adapter, pi->viid,
31763816 match_all_mac,
31773817 match_all_mac,
3178
- adapter->rawf_start +
3179
- pi->port_id,
3818
+ adapter->rawf_start + pi->port_id,
31803819 1, pi->port_id, false);
31813820 if (ret < 0) {
31823821 netdev_info(netdev, "Failed to allocate a mac filter entry, not adding port %d\n",
31833822 be16_to_cpu(ti->port));
3184
- cxgb_del_udp_tunnel(netdev, ti);
3185
- return;
3823
+ return ret;
31863824 }
3187
- atomic_inc(&adapter->mps_encap[ret].refcnt);
31883825 }
3826
+
3827
+ return 0;
31893828 }
3829
+
3830
+static const struct udp_tunnel_nic_info cxgb_udp_tunnels = {
3831
+ .set_port = cxgb_udp_tunnel_set_port,
3832
+ .unset_port = cxgb_udp_tunnel_unset_port,
3833
+ .tables = {
3834
+ { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
3835
+ { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
3836
+ },
3837
+};
31903838
31913839 static netdev_features_t cxgb_features_check(struct sk_buff *skb,
31923840 struct net_device *dev,
....@@ -3237,20 +3885,21 @@
32373885 #endif /* CONFIG_CHELSIO_T4_FCOE */
32383886 .ndo_set_tx_maxrate = cxgb_set_tx_maxrate,
32393887 .ndo_setup_tc = cxgb_setup_tc,
3240
- .ndo_udp_tunnel_add = cxgb_add_udp_tunnel,
3241
- .ndo_udp_tunnel_del = cxgb_del_udp_tunnel,
3888
+ .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
3889
+ .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
32423890 .ndo_features_check = cxgb_features_check,
32433891 .ndo_fix_features = cxgb_fix_features,
32443892 };
32453893
32463894 #ifdef CONFIG_PCI_IOV
32473895 static const struct net_device_ops cxgb4_mgmt_netdev_ops = {
3248
- .ndo_open = cxgb4_mgmt_open,
3249
- .ndo_set_vf_mac = cxgb4_mgmt_set_vf_mac,
3250
- .ndo_get_vf_config = cxgb4_mgmt_get_vf_config,
3251
- .ndo_set_vf_rate = cxgb4_mgmt_set_vf_rate,
3252
- .ndo_get_phys_port_id = cxgb4_mgmt_get_phys_port_id,
3253
- .ndo_set_vf_vlan = cxgb4_mgmt_set_vf_vlan,
3896
+ .ndo_open = cxgb4_mgmt_open,
3897
+ .ndo_set_vf_mac = cxgb4_mgmt_set_vf_mac,
3898
+ .ndo_get_vf_config = cxgb4_mgmt_get_vf_config,
3899
+ .ndo_set_vf_rate = cxgb4_mgmt_set_vf_rate,
3900
+ .ndo_get_phys_port_id = cxgb4_mgmt_get_phys_port_id,
3901
+ .ndo_set_vf_vlan = cxgb4_mgmt_set_vf_vlan,
3902
+ .ndo_set_vf_link_state = cxgb4_mgmt_set_vf_link_state,
32543903 };
32553904 #endif
32563905
....@@ -3260,8 +3909,6 @@
32603909 struct adapter *adapter = netdev2adap(dev);
32613910
32623911 strlcpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
3263
- strlcpy(info->version, cxgb4_driver_version,
3264
- sizeof(info->version));
32653912 strlcpy(info->bus_info, pci_name(adapter->pdev),
32663913 sizeof(info->bus_info));
32673914 }
....@@ -3784,8 +4431,7 @@
37844431
37854432 /* Load PHY Firmware onto adapter.
37864433 */
3787
- ret = t4_load_phy_fw(adap, MEMWIN_NIC, &adap->win0_lock,
3788
- phy_info->phy_fw_version,
4434
+ ret = t4_load_phy_fw(adap, MEMWIN_NIC, phy_info->phy_fw_version,
37894435 (u8 *)phyf->data, phyf->size);
37904436 if (ret < 0)
37914437 dev_err(adap->pdev_dev, "PHY Firmware transfer error %d\n",
....@@ -3811,14 +4457,14 @@
38114457 */
38124458 static int adap_init0_config(struct adapter *adapter, int reset)
38134459 {
3814
- struct fw_caps_config_cmd caps_cmd;
3815
- const struct firmware *cf;
3816
- unsigned long mtype = 0, maddr = 0;
3817
- u32 finiver, finicsum, cfcsum;
3818
- int ret;
3819
- int config_issued = 0;
38204460 char *fw_config_file, fw_config_file_path[256];
4461
+ u32 finiver, finicsum, cfcsum, param, val;
4462
+ struct fw_caps_config_cmd caps_cmd;
4463
+ unsigned long mtype = 0, maddr = 0;
4464
+ const struct firmware *cf;
38214465 char *config_name = NULL;
4466
+ int config_issued = 0;
4467
+ int ret;
38224468
38234469 /*
38244470 * Reset device if necessary.
....@@ -3926,6 +4572,24 @@
39264572 goto bye;
39274573 }
39284574
4575
+ val = 0;
4576
+
4577
+ /* Ofld + Hash filter is supported. Older fw will fail this request and
4578
+ * it is fine.
4579
+ */
4580
+ param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
4581
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_HASHFILTER_WITH_OFLD));
4582
+ ret = t4_set_params(adapter, adapter->mbox, adapter->pf, 0,
4583
+ 1, &param, &val);
4584
+
4585
+ /* FW doesn't know about Hash filter + ofld support,
4586
+ * it's not a problem, don't return an error.
4587
+ */
4588
+ if (ret < 0) {
4589
+ dev_warn(adapter->pdev_dev,
4590
+ "Hash filter with ofld is not supported by FW\n");
4591
+ }
4592
+
39294593 /*
39304594 * Issue a Capability Configuration command to the firmware to get it
39314595 * to parse the Configuration File. We don't use t4_fw_config_file()
....@@ -4001,6 +4665,14 @@
40014665 if (ret)
40024666 dev_err(adapter->pdev_dev,
40034667 "HMA configuration failed with error %d\n", ret);
4668
+
4669
+ if (is_t6(adapter->params.chip)) {
4670
+ adap_config_hpfilter(adapter);
4671
+ ret = setup_ppod_edram(adapter);
4672
+ if (!ret)
4673
+ dev_info(adapter->pdev_dev, "Successfully enabled "
4674
+ "ppod edram feature\n");
4675
+ }
40044676
40054677 /*
40064678 * And finally tell the firmware to initialize itself using the
....@@ -4091,14 +4763,14 @@
40914763 /*
40924764 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
40934765 */
4094
-static int adap_init0(struct adapter *adap)
4766
+static int adap_init0(struct adapter *adap, int vpd_skip)
40954767 {
4096
- int ret;
4097
- u32 v, port_vec;
4098
- enum dev_state state;
4099
- u32 params[7], val[7];
41004768 struct fw_caps_config_cmd caps_cmd;
4769
+ u32 params[7], val[7];
4770
+ enum dev_state state;
4771
+ u32 v, port_vec;
41014772 int reset = 1;
4773
+ int ret;
41024774
41034775 /* Grab Firmware Device Log parameters as early as possible so we have
41044776 * access to it for debugging, etc.
....@@ -4116,7 +4788,7 @@
41164788 return ret;
41174789 }
41184790 if (ret == adap->mbox)
4119
- adap->flags |= MASTER_PF;
4791
+ adap->flags |= CXGB4_MASTER_PF;
41204792
41214793 /*
41224794 * If we're the Master PF Driver and the device is uninitialized,
....@@ -4131,7 +4803,7 @@
41314803 /* If firmware is too old (not supported by driver) force an update. */
41324804 if (ret)
41334805 state = DEV_STATE_UNINIT;
4134
- if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
4806
+ if ((adap->flags & CXGB4_MASTER_PF) && state != DEV_STATE_INIT) {
41354807 struct fw_info *fw_info;
41364808 struct fw_hdr *card_fw;
41374809 const struct firmware *fw;
....@@ -4193,7 +4865,7 @@
41934865 ret);
41944866 dev_info(adap->pdev_dev, "Coming up as %s: "\
41954867 "Adapter already initialized\n",
4196
- adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
4868
+ adap->flags & CXGB4_MASTER_PF ? "MASTER" : "SLAVE");
41974869 } else {
41984870 dev_info(adap->pdev_dev, "Coming up as MASTER: "\
41994871 "Initializing adapter\n");
....@@ -4253,9 +4925,11 @@
42534925 * could have FLASHed a new VPD which won't be read by the firmware
42544926 * until we do the RESET ...
42554927 */
4256
- ret = t4_get_vpd_params(adap, &adap->params.vpd);
4257
- if (ret < 0)
4258
- goto bye;
4928
+ if (!vpd_skip) {
4929
+ ret = t4_get_vpd_params(adap, &adap->params.vpd);
4930
+ if (ret < 0)
4931
+ goto bye;
4932
+ }
42594933
42604934 /* Find out what ports are available to us. Note that we need to do
42614935 * this before calling adap_init0_no_config() since it needs nports
....@@ -4279,22 +4953,30 @@
42794953 if (ret < 0)
42804954 goto bye;
42814955
4956
+ /* Grab the SGE Doorbell Queue Timer values. If successful, that
4957
+ * indicates that the Firmware and Hardware support this.
4958
+ */
4959
+ params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
4960
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DBQ_TIMERTICK));
4961
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
4962
+ 1, params, val);
4963
+
4964
+ if (!ret) {
4965
+ adap->sge.dbqtimer_tick = val[0];
4966
+ ret = t4_read_sge_dbqtimers(adap,
4967
+ ARRAY_SIZE(adap->sge.dbqtimer_val),
4968
+ adap->sge.dbqtimer_val);
4969
+ }
4970
+
4971
+ if (!ret)
4972
+ adap->flags |= CXGB4_SGE_DBQ_TIMER;
4973
+
42824974 if (is_bypass_device(adap->pdev->device))
42834975 adap->params.bypass = 1;
42844976
42854977 /*
42864978 * Grab some of our basic fundamental operating parameters.
42874979 */
4288
-#define FW_PARAM_DEV(param) \
4289
- (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | \
4290
- FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_##param))
4291
-
4292
-#define FW_PARAM_PFVF(param) \
4293
- FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
4294
- FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param)| \
4295
- FW_PARAMS_PARAM_Y_V(0) | \
4296
- FW_PARAMS_PARAM_Z_V(0)
4297
-
42984980 params[0] = FW_PARAM_PFVF(EQ_START);
42994981 params[1] = FW_PARAM_PFVF(L2T_START);
43004982 params[2] = FW_PARAM_PFVF(L2T_END);
....@@ -4312,6 +4994,16 @@
43124994 adap->sge.ingr_start = val[5];
43134995
43144996 if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
4997
+ params[0] = FW_PARAM_PFVF(HPFILTER_START);
4998
+ params[1] = FW_PARAM_PFVF(HPFILTER_END);
4999
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
5000
+ params, val);
5001
+ if (ret < 0)
5002
+ goto bye;
5003
+
5004
+ adap->tids.hpftid_base = val[0];
5005
+ adap->tids.nhpftids = val[1] - val[0] + 1;
5006
+
43155007 /* Read the raw mps entries. In T6, the last 2 tcam entries
43165008 * are reserved for raw mac addresses (rawf = 2, one per port).
43175009 */
....@@ -4323,6 +5015,9 @@
43235015 adap->rawf_start = val[0];
43245016 adap->rawf_cnt = val[1] - val[0] + 1;
43255017 }
5018
+
5019
+ adap->tids.tid_base =
5020
+ t4_read_reg(adap, LE_DB_ACTIVE_TABLE_START_INDEX_A);
43265021 }
43275022
43285023 /* qids (ingress/egress) returned from firmware can be anywhere
....@@ -4377,6 +5072,7 @@
43775072 ret = -ENOMEM;
43785073 goto bye;
43795074 }
5075
+ bitmap_zero(adap->sge.blocked_fl, adap->sge.egr_sz);
43805076 #endif
43815077
43825078 params[0] = FW_PARAM_PFVF(CLIP_START);
....@@ -4387,11 +5083,18 @@
43875083 adap->clipt_start = val[0];
43885084 adap->clipt_end = val[1];
43895085
4390
- /* We don't yet have a PARAMs calls to retrieve the number of Traffic
4391
- * Classes supported by the hardware/firmware so we hard code it here
4392
- * for now.
4393
- */
4394
- adap->params.nsched_cls = is_t4(adap->params.chip) ? 15 : 16;
5086
+ /* Get the supported number of traffic classes */
5087
+ params[0] = FW_PARAM_DEV(NUM_TM_CLASS);
5088
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params, val);
5089
+ if (ret < 0) {
5090
+ /* We couldn't retrieve the number of Traffic Classes
5091
+ * supported by the hardware/firmware. So we hard
5092
+ * code it here.
5093
+ */
5094
+ adap->params.nsched_cls = is_t4(adap->params.chip) ? 15 : 16;
5095
+ } else {
5096
+ adap->params.nsched_cls = val[0];
5097
+ }
43955098
43965099 /* query params related to active filter region */
43975100 params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
....@@ -4401,7 +5104,7 @@
44015104 * offload connection through firmware work request
44025105 */
44035106 if ((val[0] != val[1]) && (ret >= 0)) {
4404
- adap->flags |= FW_OFLD_CONN;
5107
+ adap->flags |= CXGB4_FW_OFLD_CONN;
44055108 adap->tids.aftid_base = val[0];
44065109 adap->tids.aftid_end = val[1];
44075110 }
....@@ -4446,6 +5149,15 @@
44465149 adap->params.filter2_wr_support = (ret == 0 && val[0] != 0);
44475150 }
44485151
5152
+ /* Check if FW supports returning vin and smt index.
5153
+ * If this is not supported, driver will interpret
5154
+ * these values from viid.
5155
+ */
5156
+ params[0] = FW_PARAM_DEV(OPAQUE_VIID_SMT_EXTN);
5157
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
5158
+ 1, params, val);
5159
+ adap->params.viid_smt_extn_support = (ret == 0 && val[0] != 0);
5160
+
44495161 /*
44505162 * Get device capabilities so we can determine what resources we need
44515163 * to manage.
....@@ -4459,8 +5171,16 @@
44595171 if (ret < 0)
44605172 goto bye;
44615173
5174
+ /* hash filter has some mandatory register settings to be tested and for
5175
+ * that it needs to test whether offload is enabled or not, hence
5176
+ * checking and setting it here.
5177
+ */
5178
+ if (caps_cmd.ofldcaps)
5179
+ adap->params.offload = 1;
5180
+
44625181 if (caps_cmd.ofldcaps ||
4463
- (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_HASHFILTER))) {
5182
+ (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_HASHFILTER)) ||
5183
+ (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_ETHOFLD))) {
44645184 /* query offload-related parameters */
44655185 params[0] = FW_PARAM_DEV(NTID);
44665186 params[1] = FW_PARAM_PFVF(SERVER_START);
....@@ -4485,7 +5205,7 @@
44855205 * 2. Server filter: This are special filters which are used
44865206 * to redirect SYN packets to offload queue.
44875207 */
4488
- if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
5208
+ if (adap->flags & CXGB4_FW_OFLD_CONN && !is_bypass(adap)) {
44895209 adap->tids.sftid_base = adap->tids.ftid_base +
44905210 DIV_ROUND_UP(adap->tids.nftids, 3);
44915211 adap->tids.nsftids = adap->tids.nftids -
....@@ -4498,12 +5218,22 @@
44985218 adap->params.ofldq_wr_cred = val[5];
44995219
45005220 if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_HASHFILTER)) {
4501
- ret = init_hash_filter(adap);
4502
- if (ret < 0)
4503
- goto bye;
5221
+ init_hash_filter(adap);
45045222 } else {
4505
- adap->params.offload = 1;
45065223 adap->num_ofld_uld += 1;
5224
+ }
5225
+
5226
+ if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_ETHOFLD)) {
5227
+ params[0] = FW_PARAM_PFVF(ETHOFLD_START);
5228
+ params[1] = FW_PARAM_PFVF(ETHOFLD_END);
5229
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
5230
+ params, val);
5231
+ if (!ret) {
5232
+ adap->tids.eotid_base = val[0];
5233
+ adap->tids.neotids = min_t(u32, MAX_ATIDS,
5234
+ val[1] - val[0] + 1);
5235
+ adap->params.ethofld = 1;
5236
+ }
45075237 }
45085238 }
45095239 if (caps_cmd.rdmacaps) {
....@@ -4594,6 +5324,22 @@
45945324 goto bye;
45955325 adap->vres.iscsi.start = val[0];
45965326 adap->vres.iscsi.size = val[1] - val[0] + 1;
5327
+ if (is_t6(adap->params.chip)) {
5328
+ params[0] = FW_PARAM_PFVF(PPOD_EDRAM_START);
5329
+ params[1] = FW_PARAM_PFVF(PPOD_EDRAM_END);
5330
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
5331
+ params, val);
5332
+ if (!ret) {
5333
+ adap->vres.ppod_edram.start = val[0];
5334
+ adap->vres.ppod_edram.size =
5335
+ val[1] - val[0] + 1;
5336
+
5337
+ dev_info(adap->pdev_dev,
5338
+ "ppod edram start 0x%x end 0x%x size 0x%x\n",
5339
+ val[0], val[1],
5340
+ adap->vres.ppod_edram.size);
5341
+ }
5342
+ }
45975343 /* LIO target and cxgb4i initiaitor */
45985344 adap->num_ofld_uld += 2;
45995345 }
....@@ -4625,8 +5371,6 @@
46255371 }
46265372 adap->params.crypto = ntohs(caps_cmd.cryptocaps);
46275373 }
4628
-#undef FW_PARAM_PFVF
4629
-#undef FW_PARAM_DEV
46305374
46315375 /* The MTU/MSS Table is initialized by now, so load their values. If
46325376 * we're initializing the adapter, then we'll make any modifications
....@@ -4664,7 +5408,7 @@
46645408 adap->params.b_wnd);
46655409 }
46665410 t4_init_sge_params(adap);
4667
- adap->flags |= FW_OK;
5411
+ adap->flags |= CXGB4_FW_OK;
46685412 t4_init_tp_params(adap, true);
46695413 return 0;
46705414
....@@ -4699,7 +5443,7 @@
46995443 goto out;
47005444
47015445 rtnl_lock();
4702
- adap->flags &= ~FW_OK;
5446
+ adap->flags &= ~CXGB4_FW_OK;
47035447 notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
47045448 spin_lock(&adap->stats_lock);
47055449 for_each_port(adap, i) {
....@@ -4711,12 +5455,12 @@
47115455 }
47125456 spin_unlock(&adap->stats_lock);
47135457 disable_interrupts(adap);
4714
- if (adap->flags & FULL_INIT_DONE)
5458
+ if (adap->flags & CXGB4_FULL_INIT_DONE)
47155459 cxgb_down(adap);
47165460 rtnl_unlock();
4717
- if ((adap->flags & DEV_ENABLED)) {
5461
+ if ((adap->flags & CXGB4_DEV_ENABLED)) {
47185462 pci_disable_device(pdev);
4719
- adap->flags &= ~DEV_ENABLED;
5463
+ adap->flags &= ~CXGB4_DEV_ENABLED;
47205464 }
47215465 out: return state == pci_channel_io_perm_failure ?
47225466 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
....@@ -4734,37 +5478,48 @@
47345478 return PCI_ERS_RESULT_RECOVERED;
47355479 }
47365480
4737
- if (!(adap->flags & DEV_ENABLED)) {
5481
+ if (!(adap->flags & CXGB4_DEV_ENABLED)) {
47385482 if (pci_enable_device(pdev)) {
47395483 dev_err(&pdev->dev, "Cannot reenable PCI "
47405484 "device after reset\n");
47415485 return PCI_ERS_RESULT_DISCONNECT;
47425486 }
4743
- adap->flags |= DEV_ENABLED;
5487
+ adap->flags |= CXGB4_DEV_ENABLED;
47445488 }
47455489
47465490 pci_set_master(pdev);
47475491 pci_restore_state(pdev);
47485492 pci_save_state(pdev);
4749
- pci_cleanup_aer_uncorrect_error_status(pdev);
47505493
47515494 if (t4_wait_dev_ready(adap->regs) < 0)
47525495 return PCI_ERS_RESULT_DISCONNECT;
47535496 if (t4_fw_hello(adap, adap->mbox, adap->pf, MASTER_MUST, NULL) < 0)
47545497 return PCI_ERS_RESULT_DISCONNECT;
4755
- adap->flags |= FW_OK;
5498
+ adap->flags |= CXGB4_FW_OK;
47565499 if (adap_init1(adap, &c))
47575500 return PCI_ERS_RESULT_DISCONNECT;
47585501
47595502 for_each_port(adap, i) {
4760
- struct port_info *p = adap2pinfo(adap, i);
5503
+ struct port_info *pi = adap2pinfo(adap, i);
5504
+ u8 vivld = 0, vin = 0;
47615505
4762
- ret = t4_alloc_vi(adap, adap->mbox, p->tx_chan, adap->pf, 0, 1,
4763
- NULL, NULL);
5506
+ ret = t4_alloc_vi(adap, adap->mbox, pi->tx_chan, adap->pf, 0, 1,
5507
+ NULL, NULL, &vivld, &vin);
47645508 if (ret < 0)
47655509 return PCI_ERS_RESULT_DISCONNECT;
4766
- p->viid = ret;
4767
- p->xact_addr_filt = -1;
5510
+ pi->viid = ret;
5511
+ pi->xact_addr_filt = -1;
5512
+ /* If fw supports returning the VIN as part of FW_VI_CMD,
5513
+ * save the returned values.
5514
+ */
5515
+ if (adap->params.viid_smt_extn_support) {
5516
+ pi->vivld = vivld;
5517
+ pi->vin = vin;
5518
+ } else {
5519
+ /* Retrieve the values from VIID */
5520
+ pi->vivld = FW_VIID_VIVLD_G(pi->viid);
5521
+ pi->vin = FW_VIID_VIN_G(pi->viid);
5522
+ }
47685523 }
47695524
47705525 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
....@@ -4797,10 +5552,93 @@
47975552 rtnl_unlock();
47985553 }
47995554
5555
+static void eeh_reset_prepare(struct pci_dev *pdev)
5556
+{
5557
+ struct adapter *adapter = pci_get_drvdata(pdev);
5558
+ int i;
5559
+
5560
+ if (adapter->pf != 4)
5561
+ return;
5562
+
5563
+ adapter->flags &= ~CXGB4_FW_OK;
5564
+
5565
+ notify_ulds(adapter, CXGB4_STATE_DOWN);
5566
+
5567
+ for_each_port(adapter, i)
5568
+ if (adapter->port[i]->reg_state == NETREG_REGISTERED)
5569
+ cxgb_close(adapter->port[i]);
5570
+
5571
+ disable_interrupts(adapter);
5572
+ cxgb4_free_mps_ref_entries(adapter);
5573
+
5574
+ adap_free_hma_mem(adapter);
5575
+
5576
+ if (adapter->flags & CXGB4_FULL_INIT_DONE)
5577
+ cxgb_down(adapter);
5578
+}
5579
+
5580
+static void eeh_reset_done(struct pci_dev *pdev)
5581
+{
5582
+ struct adapter *adapter = pci_get_drvdata(pdev);
5583
+ int err, i;
5584
+
5585
+ if (adapter->pf != 4)
5586
+ return;
5587
+
5588
+ err = t4_wait_dev_ready(adapter->regs);
5589
+ if (err < 0) {
5590
+ dev_err(adapter->pdev_dev,
5591
+ "Device not ready, err %d", err);
5592
+ return;
5593
+ }
5594
+
5595
+ setup_memwin(adapter);
5596
+
5597
+ err = adap_init0(adapter, 1);
5598
+ if (err) {
5599
+ dev_err(adapter->pdev_dev,
5600
+ "Adapter init failed, err %d", err);
5601
+ return;
5602
+ }
5603
+
5604
+ setup_memwin_rdma(adapter);
5605
+
5606
+ if (adapter->flags & CXGB4_FW_OK) {
5607
+ err = t4_port_init(adapter, adapter->pf, adapter->pf, 0);
5608
+ if (err) {
5609
+ dev_err(adapter->pdev_dev,
5610
+ "Port init failed, err %d", err);
5611
+ return;
5612
+ }
5613
+ }
5614
+
5615
+ err = cfg_queues(adapter);
5616
+ if (err) {
5617
+ dev_err(adapter->pdev_dev,
5618
+ "Config queues failed, err %d", err);
5619
+ return;
5620
+ }
5621
+
5622
+ cxgb4_init_mps_ref_entries(adapter);
5623
+
5624
+ err = setup_fw_sge_queues(adapter);
5625
+ if (err) {
5626
+ dev_err(adapter->pdev_dev,
5627
+ "FW sge queue allocation failed, err %d", err);
5628
+ return;
5629
+ }
5630
+
5631
+ for_each_port(adapter, i)
5632
+ if (adapter->port[i]->reg_state == NETREG_REGISTERED)
5633
+ cxgb_open(adapter->port[i]);
5634
+}
5635
+
48005636 static const struct pci_error_handlers cxgb4_eeh = {
48015637 .error_detected = eeh_err_detected,
48025638 .slot_reset = eeh_slot_reset,
48035639 .resume = eeh_resume,
5640
+ .reset_prepare = eeh_reset_prepare,
5641
+ .reset_done = eeh_reset_done,
48045642 };
48055643
48065644 /* Return true if the Link Configuration supports "High Speeds" (those greater
....@@ -4817,26 +5655,24 @@
48175655 return high_speeds != 0;
48185656 }
48195657
4820
-/*
4821
- * Perform default configuration of DMA queues depending on the number and type
5658
+/* Perform default configuration of DMA queues depending on the number and type
48225659 * of ports we found and the number of available CPUs. Most settings can be
48235660 * modified by the admin prior to actual use.
48245661 */
48255662 static int cfg_queues(struct adapter *adap)
48265663 {
5664
+ u32 avail_qsets, avail_eth_qsets, avail_uld_qsets;
5665
+ u32 ncpus = num_online_cpus();
5666
+ u32 niqflint, neq, num_ulds;
48275667 struct sge *s = &adap->sge;
4828
- int i, n10g = 0, qidx = 0;
4829
- int niqflint, neq, avail_eth_qsets;
4830
- int max_eth_qsets = 32;
4831
-#ifndef CONFIG_CHELSIO_T4_DCB
4832
- int q10g = 0;
4833
-#endif
5668
+ u32 i, n10g = 0, qidx = 0;
5669
+ u32 q10g = 0, q1g;
48345670
4835
- /* Reduce memory usage in kdump environment, disable all offload.
4836
- */
5671
+ /* Reduce memory usage in kdump environment, disable all offload. */
48375672 if (is_kdump_kernel() || (is_uld(adap) && t4_uld_mem_alloc(adap))) {
48385673 adap->params.offload = 0;
48395674 adap->params.crypto = 0;
5675
+ adap->params.ethofld = 0;
48405676 }
48415677
48425678 /* Calculate the number of Ethernet Queue Sets available based on
....@@ -4852,17 +5688,14 @@
48525688 * at all is problematic ...
48535689 */
48545690 niqflint = adap->params.pfres.niqflint - 1;
4855
- if (!(adap->flags & USING_MSIX))
5691
+ if (!(adap->flags & CXGB4_USING_MSIX))
48565692 niqflint--;
48575693 neq = adap->params.pfres.neq / 2;
4858
- avail_eth_qsets = min(niqflint, neq);
5694
+ avail_qsets = min(niqflint, neq);
48595695
4860
- if (avail_eth_qsets > max_eth_qsets)
4861
- avail_eth_qsets = max_eth_qsets;
4862
-
4863
- if (avail_eth_qsets < adap->params.nports) {
5696
+ if (avail_qsets < adap->params.nports) {
48645697 dev_err(adap->pdev_dev, "avail_eth_qsets=%d < nports=%d\n",
4865
- avail_eth_qsets, adap->params.nports);
5698
+ avail_qsets, adap->params.nports);
48665699 return -ENOMEM;
48675700 }
48685701
....@@ -4870,62 +5703,102 @@
48705703 for_each_port(adap, i)
48715704 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
48725705
5706
+ avail_eth_qsets = min_t(u32, avail_qsets, MAX_ETH_QSETS);
5707
+
5708
+ /* We default to 1 queue per non-10G port and up to # of cores queues
5709
+ * per 10G port.
5710
+ */
5711
+ if (n10g)
5712
+ q10g = (avail_eth_qsets - (adap->params.nports - n10g)) / n10g;
5713
+
48735714 #ifdef CONFIG_CHELSIO_T4_DCB
48745715 /* For Data Center Bridging support we need to be able to support up
48755716 * to 8 Traffic Priorities; each of which will be assigned to its
48765717 * own TX Queue in order to prevent Head-Of-Line Blocking.
48775718 */
5719
+ q1g = 8;
48785720 if (adap->params.nports * 8 > avail_eth_qsets) {
48795721 dev_err(adap->pdev_dev, "DCB avail_eth_qsets=%d < %d!\n",
48805722 avail_eth_qsets, adap->params.nports * 8);
48815723 return -ENOMEM;
48825724 }
48835725
4884
- for_each_port(adap, i) {
4885
- struct port_info *pi = adap2pinfo(adap, i);
5726
+ if (adap->params.nports * ncpus < avail_eth_qsets)
5727
+ q10g = max(8U, ncpus);
5728
+ else
5729
+ q10g = max(8U, q10g);
48865730
4887
- pi->first_qset = qidx;
4888
- pi->nqsets = is_kdump_kernel() ? 1 : 8;
4889
- qidx += pi->nqsets;
4890
- }
5731
+ while ((q10g * n10g) >
5732
+ (avail_eth_qsets - (adap->params.nports - n10g) * q1g))
5733
+ q10g--;
5734
+
48915735 #else /* !CONFIG_CHELSIO_T4_DCB */
4892
- /*
4893
- * We default to 1 queue per non-10G port and up to # of cores queues
4894
- * per 10G port.
4895
- */
4896
- if (n10g)
4897
- q10g = (avail_eth_qsets - (adap->params.nports - n10g)) / n10g;
4898
- if (q10g > netif_get_num_default_rss_queues())
4899
- q10g = netif_get_num_default_rss_queues();
4900
-
4901
- if (is_kdump_kernel())
5736
+ q1g = 1;
5737
+ q10g = min(q10g, ncpus);
5738
+#endif /* !CONFIG_CHELSIO_T4_DCB */
5739
+ if (is_kdump_kernel()) {
49025740 q10g = 1;
5741
+ q1g = 1;
5742
+ }
49035743
49045744 for_each_port(adap, i) {
49055745 struct port_info *pi = adap2pinfo(adap, i);
49065746
49075747 pi->first_qset = qidx;
4908
- pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
5748
+ pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : q1g;
49095749 qidx += pi->nqsets;
49105750 }
4911
-#endif /* !CONFIG_CHELSIO_T4_DCB */
49125751
49135752 s->ethqsets = qidx;
49145753 s->max_ethqsets = qidx; /* MSI-X may lower it later */
5754
+ avail_qsets -= qidx;
49155755
49165756 if (is_uld(adap)) {
4917
- /*
4918
- * For offload we use 1 queue/channel if all ports are up to 1G,
5757
+ /* For offload we use 1 queue/channel if all ports are up to 1G,
49195758 * otherwise we divide all available queues amongst the channels
49205759 * capped by the number of available cores.
49215760 */
4922
- if (n10g) {
4923
- i = min_t(int, MAX_OFLD_QSETS, num_online_cpus());
4924
- s->ofldqsets = roundup(i, adap->params.nports);
4925
- } else {
5761
+ num_ulds = adap->num_uld + adap->num_ofld_uld;
5762
+ i = min_t(u32, MAX_OFLD_QSETS, ncpus);
5763
+ avail_uld_qsets = roundup(i, adap->params.nports);
5764
+ if (avail_qsets < num_ulds * adap->params.nports) {
5765
+ adap->params.offload = 0;
5766
+ adap->params.crypto = 0;
5767
+ s->ofldqsets = 0;
5768
+ } else if (avail_qsets < num_ulds * avail_uld_qsets || !n10g) {
49265769 s->ofldqsets = adap->params.nports;
5770
+ } else {
5771
+ s->ofldqsets = avail_uld_qsets;
49275772 }
5773
+
5774
+ avail_qsets -= num_ulds * s->ofldqsets;
49285775 }
5776
+
5777
+ /* ETHOFLD Queues used for QoS offload should follow same
5778
+ * allocation scheme as normal Ethernet Queues.
5779
+ */
5780
+ if (is_ethofld(adap)) {
5781
+ if (avail_qsets < s->max_ethqsets) {
5782
+ adap->params.ethofld = 0;
5783
+ s->eoqsets = 0;
5784
+ } else {
5785
+ s->eoqsets = s->max_ethqsets;
5786
+ }
5787
+ avail_qsets -= s->eoqsets;
5788
+ }
5789
+
5790
+ /* Mirror queues must follow same scheme as normal Ethernet
5791
+ * Queues, when there are enough queues available. Otherwise,
5792
+ * allocate at least 1 queue per port. If even 1 queue is not
5793
+ * available, then disable mirror queues support.
5794
+ */
5795
+ if (avail_qsets >= s->max_ethqsets)
5796
+ s->mirrorqsets = s->max_ethqsets;
5797
+ else if (avail_qsets >= adap->params.nports)
5798
+ s->mirrorqsets = adap->params.nports;
5799
+ else
5800
+ s->mirrorqsets = 0;
5801
+ avail_qsets -= s->mirrorqsets;
49295802
49305803 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
49315804 struct sge_eth_rxq *r = &s->ethrxq[i];
....@@ -4977,42 +5850,62 @@
49775850 }
49785851 }
49795852
4980
-static int get_msix_info(struct adapter *adap)
5853
+static int alloc_msix_info(struct adapter *adap, u32 num_vec)
49815854 {
4982
- struct uld_msix_info *msix_info;
4983
- unsigned int max_ingq = 0;
5855
+ struct msix_info *msix_info;
49845856
4985
- if (is_offload(adap))
4986
- max_ingq += MAX_OFLD_QSETS * adap->num_ofld_uld;
4987
- if (is_pci_uld(adap))
4988
- max_ingq += MAX_OFLD_QSETS * adap->num_uld;
4989
-
4990
- if (!max_ingq)
4991
- goto out;
4992
-
4993
- msix_info = kcalloc(max_ingq, sizeof(*msix_info), GFP_KERNEL);
5857
+ msix_info = kcalloc(num_vec, sizeof(*msix_info), GFP_KERNEL);
49945858 if (!msix_info)
49955859 return -ENOMEM;
49965860
4997
- adap->msix_bmap_ulds.msix_bmap = kcalloc(BITS_TO_LONGS(max_ingq),
4998
- sizeof(long), GFP_KERNEL);
4999
- if (!adap->msix_bmap_ulds.msix_bmap) {
5861
+ adap->msix_bmap.msix_bmap = kcalloc(BITS_TO_LONGS(num_vec),
5862
+ sizeof(long), GFP_KERNEL);
5863
+ if (!adap->msix_bmap.msix_bmap) {
50005864 kfree(msix_info);
50015865 return -ENOMEM;
50025866 }
5003
- spin_lock_init(&adap->msix_bmap_ulds.lock);
5004
- adap->msix_info_ulds = msix_info;
5005
-out:
5867
+
5868
+ spin_lock_init(&adap->msix_bmap.lock);
5869
+ adap->msix_bmap.mapsize = num_vec;
5870
+
5871
+ adap->msix_info = msix_info;
50065872 return 0;
50075873 }
50085874
50095875 static void free_msix_info(struct adapter *adap)
50105876 {
5011
- if (!(adap->num_uld && adap->num_ofld_uld))
5012
- return;
5877
+ kfree(adap->msix_bmap.msix_bmap);
5878
+ kfree(adap->msix_info);
5879
+}
50135880
5014
- kfree(adap->msix_info_ulds);
5015
- kfree(adap->msix_bmap_ulds.msix_bmap);
5881
+int cxgb4_get_msix_idx_from_bmap(struct adapter *adap)
5882
+{
5883
+ struct msix_bmap *bmap = &adap->msix_bmap;
5884
+ unsigned int msix_idx;
5885
+ unsigned long flags;
5886
+
5887
+ spin_lock_irqsave(&bmap->lock, flags);
5888
+ msix_idx = find_first_zero_bit(bmap->msix_bmap, bmap->mapsize);
5889
+ if (msix_idx < bmap->mapsize) {
5890
+ __set_bit(msix_idx, bmap->msix_bmap);
5891
+ } else {
5892
+ spin_unlock_irqrestore(&bmap->lock, flags);
5893
+ return -ENOSPC;
5894
+ }
5895
+
5896
+ spin_unlock_irqrestore(&bmap->lock, flags);
5897
+ return msix_idx;
5898
+}
5899
+
5900
+void cxgb4_free_msix_idx_in_bmap(struct adapter *adap,
5901
+ unsigned int msix_idx)
5902
+{
5903
+ struct msix_bmap *bmap = &adap->msix_bmap;
5904
+ unsigned long flags;
5905
+
5906
+ spin_lock_irqsave(&bmap->lock, flags);
5907
+ __clear_bit(msix_idx, bmap->msix_bmap);
5908
+ spin_unlock_irqrestore(&bmap->lock, flags);
50165909 }
50175910
50185911 /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
....@@ -5020,88 +5913,194 @@
50205913
50215914 static int enable_msix(struct adapter *adap)
50225915 {
5023
- int ofld_need = 0, uld_need = 0;
5024
- int i, j, want, need, allocated;
5916
+ u32 eth_need, uld_need = 0, ethofld_need = 0, mirror_need = 0;
5917
+ u32 ethqsets = 0, ofldqsets = 0, eoqsets = 0, mirrorqsets = 0;
5918
+ u8 num_uld = 0, nchan = adap->params.nports;
5919
+ u32 i, want, need, num_vec;
50255920 struct sge *s = &adap->sge;
5026
- unsigned int nchan = adap->params.nports;
50275921 struct msix_entry *entries;
5028
- int max_ingq = MAX_INGQ;
5922
+ struct port_info *pi;
5923
+ int allocated, ret;
50295924
5030
- if (is_pci_uld(adap))
5031
- max_ingq += (MAX_OFLD_QSETS * adap->num_uld);
5032
- if (is_offload(adap))
5033
- max_ingq += (MAX_OFLD_QSETS * adap->num_ofld_uld);
5034
- entries = kmalloc_array(max_ingq + 1, sizeof(*entries),
5035
- GFP_KERNEL);
5036
- if (!entries)
5037
- return -ENOMEM;
5038
-
5039
- /* map for msix */
5040
- if (get_msix_info(adap)) {
5041
- adap->params.offload = 0;
5042
- adap->params.crypto = 0;
5043
- }
5044
-
5045
- for (i = 0; i < max_ingq + 1; ++i)
5046
- entries[i].entry = i;
5047
-
5048
- want = s->max_ethqsets + EXTRA_VECS;
5049
- if (is_offload(adap)) {
5050
- want += adap->num_ofld_uld * s->ofldqsets;
5051
- ofld_need = adap->num_ofld_uld * nchan;
5052
- }
5053
- if (is_pci_uld(adap)) {
5054
- want += adap->num_uld * s->ofldqsets;
5055
- uld_need = adap->num_uld * nchan;
5056
- }
5925
+ want = s->max_ethqsets;
50575926 #ifdef CONFIG_CHELSIO_T4_DCB
50585927 /* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
50595928 * each port.
50605929 */
5061
- need = 8 * adap->params.nports + EXTRA_VECS + ofld_need + uld_need;
5930
+ need = 8 * nchan;
50625931 #else
5063
- need = adap->params.nports + EXTRA_VECS + ofld_need + uld_need;
5932
+ need = nchan;
50645933 #endif
5934
+ eth_need = need;
5935
+ if (is_uld(adap)) {
5936
+ num_uld = adap->num_ofld_uld + adap->num_uld;
5937
+ want += num_uld * s->ofldqsets;
5938
+ uld_need = num_uld * nchan;
5939
+ need += uld_need;
5940
+ }
5941
+
5942
+ if (is_ethofld(adap)) {
5943
+ want += s->eoqsets;
5944
+ ethofld_need = eth_need;
5945
+ need += ethofld_need;
5946
+ }
5947
+
5948
+ if (s->mirrorqsets) {
5949
+ want += s->mirrorqsets;
5950
+ mirror_need = nchan;
5951
+ need += mirror_need;
5952
+ }
5953
+
5954
+ want += EXTRA_VECS;
5955
+ need += EXTRA_VECS;
5956
+
5957
+ entries = kmalloc_array(want, sizeof(*entries), GFP_KERNEL);
5958
+ if (!entries)
5959
+ return -ENOMEM;
5960
+
5961
+ for (i = 0; i < want; i++)
5962
+ entries[i].entry = i;
5963
+
50655964 allocated = pci_enable_msix_range(adap->pdev, entries, need, want);
50665965 if (allocated < 0) {
5067
- dev_info(adap->pdev_dev, "not enough MSI-X vectors left,"
5068
- " not using MSI-X\n");
5069
- kfree(entries);
5070
- return allocated;
5071
- }
5072
-
5073
- /* Distribute available vectors to the various queue groups.
5074
- * Every group gets its minimum requirement and NIC gets top
5075
- * priority for leftovers.
5076
- */
5077
- i = allocated - EXTRA_VECS - ofld_need - uld_need;
5078
- if (i < s->max_ethqsets) {
5079
- s->max_ethqsets = i;
5080
- if (i < s->ethqsets)
5081
- reduce_ethqs(adap, i);
5082
- }
5083
- if (is_uld(adap)) {
5084
- if (allocated < want)
5085
- s->nqs_per_uld = nchan;
5086
- else
5087
- s->nqs_per_uld = s->ofldqsets;
5088
- }
5089
-
5090
- for (i = 0; i < (s->max_ethqsets + EXTRA_VECS); ++i)
5091
- adap->msix_info[i].vec = entries[i].vector;
5092
- if (is_uld(adap)) {
5093
- for (j = 0 ; i < allocated; ++i, j++) {
5094
- adap->msix_info_ulds[j].vec = entries[i].vector;
5095
- adap->msix_info_ulds[j].idx = i;
5966
+ /* Disable offload and attempt to get vectors for NIC
5967
+ * only mode.
5968
+ */
5969
+ want = s->max_ethqsets + EXTRA_VECS;
5970
+ need = eth_need + EXTRA_VECS;
5971
+ allocated = pci_enable_msix_range(adap->pdev, entries,
5972
+ need, want);
5973
+ if (allocated < 0) {
5974
+ dev_info(adap->pdev_dev,
5975
+ "Disabling MSI-X due to insufficient MSI-X vectors\n");
5976
+ ret = allocated;
5977
+ goto out_free;
50965978 }
5097
- adap->msix_bmap_ulds.mapsize = j;
5979
+
5980
+ dev_info(adap->pdev_dev,
5981
+ "Disabling offload due to insufficient MSI-X vectors\n");
5982
+ adap->params.offload = 0;
5983
+ adap->params.crypto = 0;
5984
+ adap->params.ethofld = 0;
5985
+ s->ofldqsets = 0;
5986
+ s->eoqsets = 0;
5987
+ s->mirrorqsets = 0;
5988
+ uld_need = 0;
5989
+ ethofld_need = 0;
5990
+ mirror_need = 0;
50985991 }
5099
- dev_info(adap->pdev_dev, "%d MSI-X vectors allocated, "
5100
- "nic %d per uld %d\n",
5101
- allocated, s->max_ethqsets, s->nqs_per_uld);
5992
+
5993
+ num_vec = allocated;
5994
+ if (num_vec < want) {
5995
+ /* Distribute available vectors to the various queue groups.
5996
+ * Every group gets its minimum requirement and NIC gets top
5997
+ * priority for leftovers.
5998
+ */
5999
+ ethqsets = eth_need;
6000
+ if (is_uld(adap))
6001
+ ofldqsets = nchan;
6002
+ if (is_ethofld(adap))
6003
+ eoqsets = ethofld_need;
6004
+ if (s->mirrorqsets)
6005
+ mirrorqsets = mirror_need;
6006
+
6007
+ num_vec -= need;
6008
+ while (num_vec) {
6009
+ if (num_vec < eth_need + ethofld_need ||
6010
+ ethqsets > s->max_ethqsets)
6011
+ break;
6012
+
6013
+ for_each_port(adap, i) {
6014
+ pi = adap2pinfo(adap, i);
6015
+ if (pi->nqsets < 2)
6016
+ continue;
6017
+
6018
+ ethqsets++;
6019
+ num_vec--;
6020
+ if (ethofld_need) {
6021
+ eoqsets++;
6022
+ num_vec--;
6023
+ }
6024
+ }
6025
+ }
6026
+
6027
+ if (is_uld(adap)) {
6028
+ while (num_vec) {
6029
+ if (num_vec < uld_need ||
6030
+ ofldqsets > s->ofldqsets)
6031
+ break;
6032
+
6033
+ ofldqsets++;
6034
+ num_vec -= uld_need;
6035
+ }
6036
+ }
6037
+
6038
+ if (s->mirrorqsets) {
6039
+ while (num_vec) {
6040
+ if (num_vec < mirror_need ||
6041
+ mirrorqsets > s->mirrorqsets)
6042
+ break;
6043
+
6044
+ mirrorqsets++;
6045
+ num_vec -= mirror_need;
6046
+ }
6047
+ }
6048
+ } else {
6049
+ ethqsets = s->max_ethqsets;
6050
+ if (is_uld(adap))
6051
+ ofldqsets = s->ofldqsets;
6052
+ if (is_ethofld(adap))
6053
+ eoqsets = s->eoqsets;
6054
+ if (s->mirrorqsets)
6055
+ mirrorqsets = s->mirrorqsets;
6056
+ }
6057
+
6058
+ if (ethqsets < s->max_ethqsets) {
6059
+ s->max_ethqsets = ethqsets;
6060
+ reduce_ethqs(adap, ethqsets);
6061
+ }
6062
+
6063
+ if (is_uld(adap)) {
6064
+ s->ofldqsets = ofldqsets;
6065
+ s->nqs_per_uld = s->ofldqsets;
6066
+ }
6067
+
6068
+ if (is_ethofld(adap))
6069
+ s->eoqsets = eoqsets;
6070
+
6071
+ if (s->mirrorqsets) {
6072
+ s->mirrorqsets = mirrorqsets;
6073
+ for_each_port(adap, i) {
6074
+ pi = adap2pinfo(adap, i);
6075
+ pi->nmirrorqsets = s->mirrorqsets / nchan;
6076
+ mutex_init(&pi->vi_mirror_mutex);
6077
+ }
6078
+ }
6079
+
6080
+ /* map for msix */
6081
+ ret = alloc_msix_info(adap, allocated);
6082
+ if (ret)
6083
+ goto out_disable_msix;
6084
+
6085
+ for (i = 0; i < allocated; i++) {
6086
+ adap->msix_info[i].vec = entries[i].vector;
6087
+ adap->msix_info[i].idx = i;
6088
+ }
6089
+
6090
+ dev_info(adap->pdev_dev,
6091
+ "%d MSI-X vectors allocated, nic %d eoqsets %d per uld %d mirrorqsets %d\n",
6092
+ allocated, s->max_ethqsets, s->eoqsets, s->nqs_per_uld,
6093
+ s->mirrorqsets);
51026094
51036095 kfree(entries);
51046096 return 0;
6097
+
6098
+out_disable_msix:
6099
+ pci_disable_msix(adap->pdev);
6100
+
6101
+out_free:
6102
+ kfree(entries);
6103
+ return ret;
51056104 }
51066105
51076106 #undef EXTRA_VECS
....@@ -5134,8 +6133,8 @@
51346133 /* Software/Hardware configuration */
51356134 dev_info(adapter->pdev_dev, "Configuration: %sNIC %s, %s capable\n",
51366135 is_offload(adapter) ? "R" : "",
5137
- ((adapter->flags & USING_MSIX) ? "MSI-X" :
5138
- (adapter->flags & USING_MSI) ? "MSI" : ""),
6136
+ ((adapter->flags & CXGB4_USING_MSIX) ? "MSI-X" :
6137
+ (adapter->flags & CXGB4_USING_MSI) ? "MSI" : ""),
51396138 is_offload(adapter) ? "Offload" : "non-Offload");
51406139 }
51416140
....@@ -5183,14 +6182,16 @@
51836182 {
51846183 unsigned int i;
51856184
5186
- kvfree(adapter->mps_encap);
51876185 kvfree(adapter->smt);
51886186 kvfree(adapter->l2t);
51896187 kvfree(adapter->srq);
51906188 t4_cleanup_sched(adapter);
51916189 kvfree(adapter->tids.tid_tab);
6190
+ cxgb4_cleanup_tc_matchall(adapter);
6191
+ cxgb4_cleanup_tc_mqprio(adapter);
51926192 cxgb4_cleanup_tc_flower(adapter);
51936193 cxgb4_cleanup_tc_u32(adapter);
6194
+ cxgb4_cleanup_ethtool_filters(adapter);
51946195 kfree(adapter->sge.egr_map);
51956196 kfree(adapter->sge.ingr_map);
51966197 kfree(adapter->sge.starving_fl);
....@@ -5210,13 +6211,14 @@
52106211 kfree(adap2pinfo(adapter, i)->rss);
52116212 free_netdev(adapter->port[i]);
52126213 }
5213
- if (adapter->flags & FW_OK)
6214
+ if (adapter->flags & CXGB4_FW_OK)
52146215 t4_fw_bye(adapter, adapter->pf);
52156216 }
52166217
5217
-#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
6218
+#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN | \
6219
+ NETIF_F_GSO_UDP_L4)
52186220 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
5219
- NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
6221
+ NETIF_F_GRO | NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
52206222 #define SEGMENT_SIZE 128
52216223
52226224 static int t4_get_chip_type(struct adapter *adap, int ver)
....@@ -5309,7 +6311,6 @@
53096311 char name[IFNAMSIZ];
53106312 u32 devcap2;
53116313 u16 flags;
5312
- int pos;
53136314
53146315 /* If we want to instantiate Virtual Functions, then our
53156316 * parent bridge's PCI-E needs to support Alternative Routing
....@@ -5317,9 +6318,8 @@
53176318 * and above.
53186319 */
53196320 pbridge = pdev->bus->self;
5320
- pos = pci_find_capability(pbridge, PCI_CAP_ID_EXP);
5321
- pci_read_config_word(pbridge, pos + PCI_EXP_FLAGS, &flags);
5322
- pci_read_config_dword(pbridge, pos + PCI_EXP_DEVCAP2, &devcap2);
6321
+ pcie_capability_read_word(pbridge, PCI_EXP_FLAGS, &flags);
6322
+ pcie_capability_read_dword(pbridge, PCI_EXP_DEVCAP2, &devcap2);
53236323
53246324 if ((flags & PCI_EXP_FLAGS_VERS) < 2 ||
53256325 !(devcap2 & PCI_EXP_DEVCAP2_ARI)) {
....@@ -5399,6 +6399,213 @@
53996399 }
54006400 #endif /* CONFIG_PCI_IOV */
54016401
6402
+#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE) || IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
6403
+
6404
+static int chcr_offload_state(struct adapter *adap,
6405
+ enum cxgb4_netdev_tls_ops op_val)
6406
+{
6407
+ switch (op_val) {
6408
+#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
6409
+ case CXGB4_TLSDEV_OPS:
6410
+ if (!adap->uld[CXGB4_ULD_KTLS].handle) {
6411
+ dev_dbg(adap->pdev_dev, "ch_ktls driver is not loaded\n");
6412
+ return -EOPNOTSUPP;
6413
+ }
6414
+ if (!adap->uld[CXGB4_ULD_KTLS].tlsdev_ops) {
6415
+ dev_dbg(adap->pdev_dev,
6416
+ "ch_ktls driver has no registered tlsdev_ops\n");
6417
+ return -EOPNOTSUPP;
6418
+ }
6419
+ break;
6420
+#endif /* CONFIG_CHELSIO_TLS_DEVICE */
6421
+#if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
6422
+ case CXGB4_XFRMDEV_OPS:
6423
+ if (!adap->uld[CXGB4_ULD_IPSEC].handle) {
6424
+ dev_dbg(adap->pdev_dev, "chipsec driver is not loaded\n");
6425
+ return -EOPNOTSUPP;
6426
+ }
6427
+ if (!adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops) {
6428
+ dev_dbg(adap->pdev_dev,
6429
+ "chipsec driver has no registered xfrmdev_ops\n");
6430
+ return -EOPNOTSUPP;
6431
+ }
6432
+ break;
6433
+#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
6434
+ default:
6435
+ dev_dbg(adap->pdev_dev,
6436
+ "driver has no support for offload %d\n", op_val);
6437
+ return -EOPNOTSUPP;
6438
+ }
6439
+
6440
+ return 0;
6441
+}
6442
+
6443
+#endif /* CONFIG_CHELSIO_TLS_DEVICE || CONFIG_CHELSIO_IPSEC_INLINE */
6444
+
6445
+#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
6446
+
6447
+static int cxgb4_ktls_dev_add(struct net_device *netdev, struct sock *sk,
6448
+ enum tls_offload_ctx_dir direction,
6449
+ struct tls_crypto_info *crypto_info,
6450
+ u32 tcp_sn)
6451
+{
6452
+ struct adapter *adap = netdev2adap(netdev);
6453
+ int ret;
6454
+
6455
+ mutex_lock(&uld_mutex);
6456
+ ret = chcr_offload_state(adap, CXGB4_TLSDEV_OPS);
6457
+ if (ret)
6458
+ goto out_unlock;
6459
+
6460
+ ret = cxgb4_set_ktls_feature(adap, FW_PARAMS_PARAM_DEV_KTLS_HW_ENABLE);
6461
+ if (ret)
6462
+ goto out_unlock;
6463
+
6464
+ ret = adap->uld[CXGB4_ULD_KTLS].tlsdev_ops->tls_dev_add(netdev, sk,
6465
+ direction,
6466
+ crypto_info,
6467
+ tcp_sn);
6468
+ /* if there is a failure, clear the refcount */
6469
+ if (ret)
6470
+ cxgb4_set_ktls_feature(adap,
6471
+ FW_PARAMS_PARAM_DEV_KTLS_HW_DISABLE);
6472
+out_unlock:
6473
+ mutex_unlock(&uld_mutex);
6474
+ return ret;
6475
+}
6476
+
6477
+static void cxgb4_ktls_dev_del(struct net_device *netdev,
6478
+ struct tls_context *tls_ctx,
6479
+ enum tls_offload_ctx_dir direction)
6480
+{
6481
+ struct adapter *adap = netdev2adap(netdev);
6482
+
6483
+ mutex_lock(&uld_mutex);
6484
+ if (chcr_offload_state(adap, CXGB4_TLSDEV_OPS))
6485
+ goto out_unlock;
6486
+
6487
+ adap->uld[CXGB4_ULD_KTLS].tlsdev_ops->tls_dev_del(netdev, tls_ctx,
6488
+ direction);
6489
+
6490
+out_unlock:
6491
+ cxgb4_set_ktls_feature(adap, FW_PARAMS_PARAM_DEV_KTLS_HW_DISABLE);
6492
+ mutex_unlock(&uld_mutex);
6493
+}
6494
+
6495
+static const struct tlsdev_ops cxgb4_ktls_ops = {
6496
+ .tls_dev_add = cxgb4_ktls_dev_add,
6497
+ .tls_dev_del = cxgb4_ktls_dev_del,
6498
+};
6499
+#endif /* CONFIG_CHELSIO_TLS_DEVICE */
6500
+
6501
+#if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
6502
+
6503
+static int cxgb4_xfrm_add_state(struct xfrm_state *x)
6504
+{
6505
+ struct adapter *adap = netdev2adap(x->xso.dev);
6506
+ int ret;
6507
+
6508
+ if (!mutex_trylock(&uld_mutex)) {
6509
+ dev_dbg(adap->pdev_dev,
6510
+ "crypto uld critical resource is under use\n");
6511
+ return -EBUSY;
6512
+ }
6513
+ ret = chcr_offload_state(adap, CXGB4_XFRMDEV_OPS);
6514
+ if (ret)
6515
+ goto out_unlock;
6516
+
6517
+ ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_add(x);
6518
+
6519
+out_unlock:
6520
+ mutex_unlock(&uld_mutex);
6521
+
6522
+ return ret;
6523
+}
6524
+
6525
+static void cxgb4_xfrm_del_state(struct xfrm_state *x)
6526
+{
6527
+ struct adapter *adap = netdev2adap(x->xso.dev);
6528
+
6529
+ if (!mutex_trylock(&uld_mutex)) {
6530
+ dev_dbg(adap->pdev_dev,
6531
+ "crypto uld critical resource is under use\n");
6532
+ return;
6533
+ }
6534
+ if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS))
6535
+ goto out_unlock;
6536
+
6537
+ adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_delete(x);
6538
+
6539
+out_unlock:
6540
+ mutex_unlock(&uld_mutex);
6541
+}
6542
+
6543
+static void cxgb4_xfrm_free_state(struct xfrm_state *x)
6544
+{
6545
+ struct adapter *adap = netdev2adap(x->xso.dev);
6546
+
6547
+ if (!mutex_trylock(&uld_mutex)) {
6548
+ dev_dbg(adap->pdev_dev,
6549
+ "crypto uld critical resource is under use\n");
6550
+ return;
6551
+ }
6552
+ if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS))
6553
+ goto out_unlock;
6554
+
6555
+ adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_free(x);
6556
+
6557
+out_unlock:
6558
+ mutex_unlock(&uld_mutex);
6559
+}
6560
+
6561
+static bool cxgb4_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
6562
+{
6563
+ struct adapter *adap = netdev2adap(x->xso.dev);
6564
+ bool ret = false;
6565
+
6566
+ if (!mutex_trylock(&uld_mutex)) {
6567
+ dev_dbg(adap->pdev_dev,
6568
+ "crypto uld critical resource is under use\n");
6569
+ return ret;
6570
+ }
6571
+ if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS))
6572
+ goto out_unlock;
6573
+
6574
+ ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_offload_ok(skb, x);
6575
+
6576
+out_unlock:
6577
+ mutex_unlock(&uld_mutex);
6578
+ return ret;
6579
+}
6580
+
6581
+static void cxgb4_advance_esn_state(struct xfrm_state *x)
6582
+{
6583
+ struct adapter *adap = netdev2adap(x->xso.dev);
6584
+
6585
+ if (!mutex_trylock(&uld_mutex)) {
6586
+ dev_dbg(adap->pdev_dev,
6587
+ "crypto uld critical resource is under use\n");
6588
+ return;
6589
+ }
6590
+ if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS))
6591
+ goto out_unlock;
6592
+
6593
+ adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_advance_esn(x);
6594
+
6595
+out_unlock:
6596
+ mutex_unlock(&uld_mutex);
6597
+}
6598
+
6599
+static const struct xfrmdev_ops cxgb4_xfrmdev_ops = {
6600
+ .xdo_dev_state_add = cxgb4_xfrm_add_state,
6601
+ .xdo_dev_state_delete = cxgb4_xfrm_del_state,
6602
+ .xdo_dev_state_free = cxgb4_xfrm_free_state,
6603
+ .xdo_dev_offload_ok = cxgb4_ipsec_offload_ok,
6604
+ .xdo_dev_state_advance_esn = cxgb4_advance_esn_state,
6605
+};
6606
+
6607
+#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
6608
+
54026609 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
54036610 {
54046611 struct net_device *netdev;
....@@ -5413,8 +6620,6 @@
54136620 u16 device_id;
54146621 int i, err;
54156622 u32 whoami;
5416
-
5417
- printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
54186623
54196624 err = pci_request_regions(pdev, KBUILD_MODNAME);
54206625 if (err) {
....@@ -5514,7 +6719,7 @@
55146719 }
55156720
55166721 /* PCI device has been enabled */
5517
- adapter->flags |= DEV_ENABLED;
6722
+ adapter->flags |= CXGB4_DEV_ENABLED;
55186723 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
55196724
55206725 /* If possible, we use PCIe Relaxed Ordering Attribute to deliver
....@@ -5532,7 +6737,7 @@
55326737 * using Relaxed Ordering.
55336738 */
55346739 if (!pcie_relaxed_ordering_enabled(pdev))
5535
- adapter->flags |= ROOT_NO_RELAXED_ORDERING;
6740
+ adapter->flags |= CXGB4_ROOT_NO_RELAXED_ORDERING;
55366741
55376742 spin_lock_init(&adapter->stats_lock);
55386743 spin_lock_init(&adapter->tid_release_lock);
....@@ -5587,13 +6792,11 @@
55876792 }
55886793
55896794 setup_memwin(adapter);
5590
- err = adap_init0(adapter);
5591
-#ifdef CONFIG_DEBUG_FS
5592
- bitmap_zero(adapter->sge.blocked_fl, adapter->sge.egr_sz);
5593
-#endif
5594
- setup_memwin_rdma(adapter);
6795
+ err = adap_init0(adapter, 0);
55956796 if (err)
55966797 goto out_unmap_bar;
6798
+
6799
+ setup_memwin_rdma(adapter);
55976800
55986801 /* configure SGE_STAT_CFG_A to read WC stats */
55996802 if (!is_t4(adapter->params.chip))
....@@ -5605,8 +6808,14 @@
56056808 INIT_LIST_HEAD(&adapter->mac_hlist);
56066809
56076810 for_each_port(adapter, i) {
6811
+ /* For supporting MQPRIO Offload, need some extra
6812
+ * queues for each ETHOFLD TIDs. Keep it equal to
6813
+ * MAX_ATIDs for now. Once we connect to firmware
6814
+ * later and query the EOTID params, we'll come to
6815
+ * know the actual # of EOTIDs supported.
6816
+ */
56086817 netdev = alloc_etherdev_mq(sizeof(struct port_info),
5609
- MAX_ETH_QSETS);
6818
+ MAX_ETH_QSETS + MAX_ATIDS);
56106819 if (!netdev) {
56116820 err = -ENOMEM;
56126821 goto out_free_dev;
....@@ -5623,24 +6832,45 @@
56236832
56246833 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
56256834 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5626
- NETIF_F_RXCSUM | NETIF_F_RXHASH |
6835
+ NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_GRO |
56276836 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
5628
- NETIF_F_HW_TC;
6837
+ NETIF_F_HW_TC | NETIF_F_NTUPLE;
56296838
56306839 if (chip_ver > CHELSIO_T5) {
56316840 netdev->hw_enc_features |= NETIF_F_IP_CSUM |
56326841 NETIF_F_IPV6_CSUM |
56336842 NETIF_F_RXCSUM |
56346843 NETIF_F_GSO_UDP_TUNNEL |
6844
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
56356845 NETIF_F_TSO | NETIF_F_TSO6;
56366846
5637
- netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
6847
+ netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
6848
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
6849
+ NETIF_F_HW_TLS_RECORD;
6850
+
6851
+ if (adapter->rawf_cnt)
6852
+ netdev->udp_tunnel_nic_info = &cxgb_udp_tunnels;
56386853 }
56396854
56406855 if (highdma)
56416856 netdev->hw_features |= NETIF_F_HIGHDMA;
56426857 netdev->features |= netdev->hw_features;
56436858 netdev->vlan_features = netdev->features & VLAN_FEAT;
6859
+#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
6860
+ if (pi->adapter->params.crypto & FW_CAPS_CONFIG_TLS_HW) {
6861
+ netdev->hw_features |= NETIF_F_HW_TLS_TX;
6862
+ netdev->tlsdev_ops = &cxgb4_ktls_ops;
6863
+ /* initialize the refcount */
6864
+ refcount_set(&pi->adapter->chcr_ktls.ktls_refcount, 0);
6865
+ }
6866
+#endif /* CONFIG_CHELSIO_TLS_DEVICE */
6867
+#if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
6868
+ if (pi->adapter->params.crypto & FW_CAPS_CONFIG_IPSEC_INLINE) {
6869
+ netdev->hw_enc_features |= NETIF_F_HW_ESP;
6870
+ netdev->features |= NETIF_F_HW_ESP;
6871
+ netdev->xfrmdev_ops = &cxgb4_xfrmdev_ops;
6872
+ }
6873
+#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
56446874
56456875 netdev->priv_flags |= IFF_UNICAST_FLT;
56466876
....@@ -5661,7 +6891,7 @@
56616891
56626892 pci_set_drvdata(pdev, adapter);
56636893
5664
- if (adapter->flags & FW_OK) {
6894
+ if (adapter->flags & CXGB4_FW_OK) {
56656895 err = t4_port_init(adapter, func, func, 0);
56666896 if (err)
56676897 goto out_free_dev;
....@@ -5683,7 +6913,7 @@
56836913 }
56846914 }
56856915
5686
- if (!(adapter->flags & FW_OK))
6916
+ if (!(adapter->flags & CXGB4_FW_OK))
56876917 goto fw_attach_fail;
56886918
56896919 /* Configure queues and allocate tables now, they can be needed as
....@@ -5705,12 +6935,6 @@
57056935 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
57066936 adapter->params.offload = 0;
57076937 }
5708
-
5709
- adapter->mps_encap = kvcalloc(adapter->params.arch.mps_tcam_size,
5710
- sizeof(struct mps_encap_entry),
5711
- GFP_KERNEL);
5712
- if (!adapter->mps_encap)
5713
- dev_warn(&pdev->dev, "could not allocate MPS Encap entries, continuing\n");
57146938
57156939 #if IS_ENABLED(CONFIG_IPV6)
57166940 if (chip_ver <= CHELSIO_T5 &&
....@@ -5744,6 +6968,24 @@
57446968 i);
57456969 }
57466970
6971
+ if (is_offload(adapter) || is_hashfilter(adapter)) {
6972
+ if (t4_read_reg(adapter, LE_DB_CONFIG_A) & HASHEN_F) {
6973
+ u32 v;
6974
+
6975
+ v = t4_read_reg(adapter, LE_DB_HASH_CONFIG_A);
6976
+ if (chip_ver <= CHELSIO_T5) {
6977
+ adapter->tids.nhash = 1 << HASHTIDSIZE_G(v);
6978
+ v = t4_read_reg(adapter, LE_DB_TID_HASHBASE_A);
6979
+ adapter->tids.hash_base = v / 4;
6980
+ } else {
6981
+ adapter->tids.nhash = HASHTBLSIZE_G(v) << 3;
6982
+ v = t4_read_reg(adapter,
6983
+ T6_LE_DB_HASH_TID_BASE_A);
6984
+ adapter->tids.hash_base = v;
6985
+ }
6986
+ }
6987
+ }
6988
+
57476989 if (tid_init(&adapter->tids) < 0) {
57486990 dev_warn(&pdev->dev, "could not allocate TID table, "
57496991 "continuing\n");
....@@ -5757,29 +6999,24 @@
57576999 if (cxgb4_init_tc_flower(adapter))
57587000 dev_warn(&pdev->dev,
57597001 "could not offload tc flower, continuing\n");
5760
- }
57617002
5762
- if (is_offload(adapter) || is_hashfilter(adapter)) {
5763
- if (t4_read_reg(adapter, LE_DB_CONFIG_A) & HASHEN_F) {
5764
- u32 hash_base, hash_reg;
7003
+ if (cxgb4_init_tc_mqprio(adapter))
7004
+ dev_warn(&pdev->dev,
7005
+ "could not offload tc mqprio, continuing\n");
57657006
5766
- if (chip_ver <= CHELSIO_T5) {
5767
- hash_reg = LE_DB_TID_HASHBASE_A;
5768
- hash_base = t4_read_reg(adapter, hash_reg);
5769
- adapter->tids.hash_base = hash_base / 4;
5770
- } else {
5771
- hash_reg = T6_LE_DB_HASH_TID_BASE_A;
5772
- hash_base = t4_read_reg(adapter, hash_reg);
5773
- adapter->tids.hash_base = hash_base;
5774
- }
5775
- }
7007
+ if (cxgb4_init_tc_matchall(adapter))
7008
+ dev_warn(&pdev->dev,
7009
+ "could not offload tc matchall, continuing\n");
7010
+ if (cxgb4_init_ethtool_filters(adapter))
7011
+ dev_warn(&pdev->dev,
7012
+ "could not initialize ethtool filters, continuing\n");
57767013 }
57777014
57787015 /* See what interrupts we'll be using */
57797016 if (msi > 1 && enable_msix(adapter) == 0)
5780
- adapter->flags |= USING_MSIX;
7017
+ adapter->flags |= CXGB4_USING_MSIX;
57817018 else if (msi > 0 && pci_enable_msi(pdev) == 0) {
5782
- adapter->flags |= USING_MSI;
7019
+ adapter->flags |= CXGB4_USING_MSI;
57837020 if (msi > 1)
57847021 free_msix_info(adapter);
57857022 }
....@@ -5787,9 +7024,18 @@
57877024 /* check for PCI Express bandwidth capabiltites */
57887025 pcie_print_link_status(pdev);
57897026
7027
+ cxgb4_init_mps_ref_entries(adapter);
7028
+
57907029 err = init_rss(adapter);
57917030 if (err)
57927031 goto out_free_dev;
7032
+
7033
+ err = setup_non_data_intr(adapter);
7034
+ if (err) {
7035
+ dev_err(adapter->pdev_dev,
7036
+ "Non Data interrupt allocation failed, err: %d\n", err);
7037
+ goto out_free_dev;
7038
+ }
57937039
57947040 err = setup_fw_sge_queues(adapter);
57957041 if (err) {
....@@ -5837,14 +7083,15 @@
58377083 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
58387084 pdev->needs_freset = 1;
58397085
5840
- if (is_uld(adapter)) {
5841
- mutex_lock(&uld_mutex);
5842
- list_add_tail(&adapter->list_node, &adapter_list);
5843
- mutex_unlock(&uld_mutex);
5844
- }
7086
+ if (is_uld(adapter))
7087
+ cxgb4_uld_enable(adapter);
58457088
58467089 if (!is_t4(adapter->params.chip))
58477090 cxgb4_ptp_init(adapter);
7091
+
7092
+ if (IS_REACHABLE(CONFIG_THERMAL) &&
7093
+ !is_t4(adapter->params.chip) && (adapter->flags & CXGB4_FW_OK))
7094
+ cxgb4_thermal_init(adapter);
58487095
58497096 print_adapter_info(adapter);
58507097 return 0;
....@@ -5852,7 +7099,7 @@
58527099 out_free_dev:
58537100 t4_free_sge_resources(adapter);
58547101 free_some_resources(adapter);
5855
- if (adapter->flags & USING_MSIX)
7102
+ if (adapter->flags & CXGB4_USING_MSIX)
58567103 free_msix_info(adapter);
58577104 if (adapter->num_uld || adapter->num_ofld_uld)
58587105 t4_uld_mem_free(adapter);
....@@ -5885,7 +7132,12 @@
58857132 return;
58867133 }
58877134
5888
- adapter->flags |= SHUTTING_DOWN;
7135
+ /* If we allocated filters, free up state associated with any
7136
+ * valid filters ...
7137
+ */
7138
+ clear_all_filters(adapter);
7139
+
7140
+ adapter->flags |= CXGB4_SHUTTING_DOWN;
58897141
58907142 if (adapter->pf == 4) {
58917143 int i;
....@@ -5895,33 +7147,31 @@
58957147 */
58967148 destroy_workqueue(adapter->workq);
58977149
5898
- if (is_uld(adapter)) {
5899
- detach_ulds(adapter);
5900
- t4_uld_clean_up(adapter);
5901
- }
5902
-
5903
- adap_free_hma_mem(adapter);
5904
-
5905
- disable_interrupts(adapter);
7150
+ detach_ulds(adapter);
59067151
59077152 for_each_port(adapter, i)
59087153 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
59097154 unregister_netdev(adapter->port[i]);
59107155
7156
+ t4_uld_clean_up(adapter);
7157
+
7158
+ adap_free_hma_mem(adapter);
7159
+
7160
+ disable_interrupts(adapter);
7161
+
7162
+ cxgb4_free_mps_ref_entries(adapter);
7163
+
59117164 debugfs_remove_recursive(adapter->debugfs_root);
59127165
59137166 if (!is_t4(adapter->params.chip))
59147167 cxgb4_ptp_stop(adapter);
7168
+ if (IS_REACHABLE(CONFIG_THERMAL))
7169
+ cxgb4_thermal_remove(adapter);
59157170
5916
- /* If we allocated filters, free up state associated with any
5917
- * valid filters ...
5918
- */
5919
- clear_all_filters(adapter);
5920
-
5921
- if (adapter->flags & FULL_INIT_DONE)
7171
+ if (adapter->flags & CXGB4_FULL_INIT_DONE)
59227172 cxgb_down(adapter);
59237173
5924
- if (adapter->flags & USING_MSIX)
7174
+ if (adapter->flags & CXGB4_USING_MSIX)
59257175 free_msix_info(adapter);
59267176 if (adapter->num_uld || adapter->num_ofld_uld)
59277177 t4_uld_mem_free(adapter);
....@@ -5945,9 +7195,9 @@
59457195 #endif
59467196 iounmap(adapter->regs);
59477197 pci_disable_pcie_error_reporting(pdev);
5948
- if ((adapter->flags & DEV_ENABLED)) {
7198
+ if ((adapter->flags & CXGB4_DEV_ENABLED)) {
59497199 pci_disable_device(pdev);
5950
- adapter->flags &= ~DEV_ENABLED;
7200
+ adapter->flags &= ~CXGB4_DEV_ENABLED;
59517201 }
59527202 pci_release_regions(pdev);
59537203 kfree(adapter->mbox_log);
....@@ -5973,7 +7223,7 @@
59737223 return;
59747224 }
59757225
5976
- adapter->flags |= SHUTTING_DOWN;
7226
+ adapter->flags |= CXGB4_SHUTTING_DOWN;
59777227
59787228 if (adapter->pf == 4) {
59797229 int i;
....@@ -5981,6 +7231,10 @@
59817231 for_each_port(adapter, i)
59827232 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
59837233 cxgb_close(adapter->port[i]);
7234
+
7235
+ rtnl_lock();
7236
+ cxgb4_mqprio_stop_offload(adapter);
7237
+ rtnl_unlock();
59847238
59857239 if (is_uld(adapter)) {
59867240 detach_ulds(adapter);
....@@ -5991,7 +7245,7 @@
59917245 disable_msi(adapter);
59927246
59937247 t4_sge_stop(adapter);
5994
- if (adapter->flags & FW_OK)
7248
+ if (adapter->flags & CXGB4_FW_OK)
59957249 t4_fw_bye(adapter, adapter->mbox);
59967250 }
59977251 }
....@@ -6012,10 +7266,7 @@
60127266 {
60137267 int ret;
60147268
6015
- /* Debugfs support is optional, just warn if this fails */
60167269 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
6017
- if (!cxgb4_debugfs_root)
6018
- pr_warn("could not create debugfs entry, continuing\n");
60197270
60207271 ret = pci_register_driver(&cxgb4_driver);
60217272 if (ret < 0)