.. | .. |
---|
52 | 52 | #include <linux/ptp_clock_kernel.h> |
---|
53 | 53 | #include <linux/ptp_classify.h> |
---|
54 | 54 | #include <linux/crash_dump.h> |
---|
| 55 | +#include <linux/thermal.h> |
---|
55 | 56 | #include <asm/io.h> |
---|
56 | 57 | #include "t4_chip_type.h" |
---|
57 | 58 | #include "cxgb4_uld.h" |
---|
| 59 | +#include "t4fw_api.h" |
---|
58 | 60 | |
---|
59 | 61 | #define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__) |
---|
60 | 62 | extern struct list_head adapter_list; |
---|
| 63 | +extern struct list_head uld_list; |
---|
61 | 64 | extern struct mutex uld_mutex; |
---|
62 | 65 | |
---|
63 | 66 | /* Suspend an Ethernet Tx queue with fewer available descriptors than this. |
---|
.. | .. |
---|
66 | 69 | */ |
---|
67 | 70 | #define ETHTXQ_STOP_THRES \ |
---|
68 | 71 | (1 + DIV_ROUND_UP((3 * MAX_SKB_FRAGS) / 2 + (MAX_SKB_FRAGS & 1), 8)) |
---|
| 72 | + |
---|
| 73 | +#define FW_PARAM_DEV(param) \ |
---|
| 74 | + (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | \ |
---|
| 75 | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_##param)) |
---|
| 76 | + |
---|
| 77 | +#define FW_PARAM_PFVF(param) \ |
---|
| 78 | + (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \ |
---|
| 79 | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param) | \ |
---|
| 80 | + FW_PARAMS_PARAM_Y_V(0) | \ |
---|
| 81 | + FW_PARAMS_PARAM_Z_V(0)) |
---|
69 | 82 | |
---|
70 | 83 | enum { |
---|
71 | 84 | MAX_NPORTS = 4, /* max # of ports */ |
---|
.. | .. |
---|
124 | 137 | FEC_AUTO = 1 << 0, /* IEEE 802.3 "automatic" */ |
---|
125 | 138 | FEC_RS = 1 << 1, /* Reed-Solomon */ |
---|
126 | 139 | FEC_BASER_RS = 1 << 2 /* BaseR/Reed-Solomon */ |
---|
| 140 | +}; |
---|
| 141 | + |
---|
| 142 | +enum { |
---|
| 143 | + CXGB4_ETHTOOL_FLASH_FW = 1, |
---|
| 144 | + CXGB4_ETHTOOL_FLASH_PHY = 2, |
---|
| 145 | + CXGB4_ETHTOOL_FLASH_BOOT = 3, |
---|
| 146 | + CXGB4_ETHTOOL_FLASH_BOOTCFG = 4 |
---|
| 147 | +}; |
---|
| 148 | + |
---|
| 149 | +enum cxgb4_netdev_tls_ops { |
---|
| 150 | + CXGB4_TLSDEV_OPS = 1, |
---|
| 151 | + CXGB4_XFRMDEV_OPS |
---|
| 152 | +}; |
---|
| 153 | + |
---|
| 154 | +struct cxgb4_bootcfg_data { |
---|
| 155 | + __le16 signature; |
---|
| 156 | + __u8 reserved[2]; |
---|
| 157 | +}; |
---|
| 158 | + |
---|
| 159 | +struct cxgb4_pcir_data { |
---|
| 160 | + __le32 signature; /* Signature. The string "PCIR" */ |
---|
| 161 | + __le16 vendor_id; /* Vendor Identification */ |
---|
| 162 | + __le16 device_id; /* Device Identification */ |
---|
| 163 | + __u8 vital_product[2]; /* Pointer to Vital Product Data */ |
---|
| 164 | + __u8 length[2]; /* PCIR Data Structure Length */ |
---|
| 165 | + __u8 revision; /* PCIR Data Structure Revision */ |
---|
| 166 | + __u8 class_code[3]; /* Class Code */ |
---|
| 167 | + __u8 image_length[2]; /* Image Length. Multiple of 512B */ |
---|
| 168 | + __u8 code_revision[2]; /* Revision Level of Code/Data */ |
---|
| 169 | + __u8 code_type; |
---|
| 170 | + __u8 indicator; |
---|
| 171 | + __u8 reserved[2]; |
---|
| 172 | +}; |
---|
| 173 | + |
---|
| 174 | +/* BIOS boot headers */ |
---|
| 175 | +struct cxgb4_pci_exp_rom_header { |
---|
| 176 | + __le16 signature; /* ROM Signature. Should be 0xaa55 */ |
---|
| 177 | + __u8 reserved[22]; /* Reserved per processor Architecture data */ |
---|
| 178 | + __le16 pcir_offset; /* Offset to PCI Data Structure */ |
---|
| 179 | +}; |
---|
| 180 | + |
---|
| 181 | +/* Legacy PCI Expansion ROM Header */ |
---|
| 182 | +struct legacy_pci_rom_hdr { |
---|
| 183 | + __u8 signature[2]; /* ROM Signature. Should be 0xaa55 */ |
---|
| 184 | + __u8 size512; /* Current Image Size in units of 512 bytes */ |
---|
| 185 | + __u8 initentry_point[4]; |
---|
| 186 | + __u8 cksum; /* Checksum computed on the entire Image */ |
---|
| 187 | + __u8 reserved[16]; /* Reserved */ |
---|
| 188 | + __le16 pcir_offset; /* Offset to PCI Data Struture */ |
---|
| 189 | +}; |
---|
| 190 | + |
---|
| 191 | +#define CXGB4_HDR_CODE1 0x00 |
---|
| 192 | +#define CXGB4_HDR_CODE2 0x03 |
---|
| 193 | +#define CXGB4_HDR_INDI 0x80 |
---|
| 194 | + |
---|
| 195 | +/* BOOT constants */ |
---|
| 196 | +enum { |
---|
| 197 | + BOOT_CFG_SIG = 0x4243, |
---|
| 198 | + BOOT_SIZE_INC = 512, |
---|
| 199 | + BOOT_SIGNATURE = 0xaa55, |
---|
| 200 | + BOOT_MIN_SIZE = sizeof(struct cxgb4_pci_exp_rom_header), |
---|
| 201 | + BOOT_MAX_SIZE = 1024 * BOOT_SIZE_INC, |
---|
| 202 | + PCIR_SIGNATURE = 0x52494350 |
---|
127 | 203 | }; |
---|
128 | 204 | |
---|
129 | 205 | struct port_stats { |
---|
.. | .. |
---|
279 | 355 | unsigned short tx_modq[NCHAN]; /* channel to modulation queue map */ |
---|
280 | 356 | |
---|
281 | 357 | u32 vlan_pri_map; /* cached TP_VLAN_PRI_MAP */ |
---|
| 358 | + u32 filter_mask; |
---|
282 | 359 | u32 ingress_config; /* cached TP_INGRESS_CONFIG */ |
---|
283 | 360 | |
---|
284 | 361 | /* cached TP_OUT_CONFIG compressed error vector |
---|
.. | .. |
---|
390 | 467 | struct arch_specific_params arch; /* chip specific params */ |
---|
391 | 468 | unsigned char offload; |
---|
392 | 469 | unsigned char crypto; /* HW capability for crypto */ |
---|
| 470 | + unsigned char ethofld; /* QoS support */ |
---|
393 | 471 | |
---|
394 | 472 | unsigned char bypass; |
---|
395 | 473 | unsigned char hash_filter; |
---|
.. | .. |
---|
403 | 481 | bool fr_nsmr_tpte_wr_support; /* FW support for FR_NSMR_TPTE_WR */ |
---|
404 | 482 | u8 fw_caps_support; /* 32-bit Port Capabilities */ |
---|
405 | 483 | bool filter2_wr_support; /* FW support for FILTER2_WR */ |
---|
| 484 | + unsigned int viid_smt_extn_support:1; /* FW returns vin and smt index */ |
---|
406 | 485 | |
---|
407 | 486 | /* MPS Buffer Group Map[per Port]. Bit i is set if buffer group i is |
---|
408 | 487 | * used by the Port |
---|
.. | .. |
---|
451 | 530 | return &((struct mbox_cmd *)&(log)[1])[entry_idx]; |
---|
452 | 531 | } |
---|
453 | 532 | |
---|
454 | | -#include "t4fw_api.h" |
---|
455 | | - |
---|
456 | 533 | #define FW_VERSION(chip) ( \ |
---|
457 | 534 | FW_HDR_FW_VER_MAJOR_G(chip##FW_VERSION_MAJOR) | \ |
---|
458 | 535 | FW_HDR_FW_VER_MINOR_G(chip##FW_VERSION_MINOR) | \ |
---|
459 | 536 | FW_HDR_FW_VER_MICRO_G(chip##FW_VERSION_MICRO) | \ |
---|
460 | 537 | FW_HDR_FW_VER_BUILD_G(chip##FW_VERSION_BUILD)) |
---|
461 | 538 | #define FW_INTFVER(chip, intf) (FW_HDR_INTFVER_##intf) |
---|
| 539 | + |
---|
| 540 | +struct cxgb4_ethtool_lb_test { |
---|
| 541 | + struct completion completion; |
---|
| 542 | + int result; |
---|
| 543 | + int loopback; |
---|
| 544 | +}; |
---|
462 | 545 | |
---|
463 | 546 | struct fw_info { |
---|
464 | 547 | u8 chip; |
---|
.. | .. |
---|
476 | 559 | unsigned char skip_len; |
---|
477 | 560 | unsigned char invert; |
---|
478 | 561 | unsigned char port; |
---|
| 562 | +}; |
---|
| 563 | + |
---|
| 564 | +struct cxgb4_fw_data { |
---|
| 565 | + __be32 signature; |
---|
| 566 | + __u8 reserved[4]; |
---|
479 | 567 | }; |
---|
480 | 568 | |
---|
481 | 569 | /* Firmware Port Capabilities types. */ |
---|
.. | .. |
---|
500 | 588 | |
---|
501 | 589 | enum cc_pause requested_fc; /* flow control user has requested */ |
---|
502 | 590 | enum cc_pause fc; /* actual link flow control */ |
---|
| 591 | + enum cc_pause advertised_fc; /* actual advertised flow control */ |
---|
503 | 592 | |
---|
504 | 593 | enum cc_fec requested_fec; /* Forward Error Correction: */ |
---|
505 | 594 | enum cc_fec fec; /* requested and actual in use */ |
---|
.. | .. |
---|
533 | 622 | }; |
---|
534 | 623 | |
---|
535 | 624 | enum { |
---|
| 625 | + MAX_TXQ_DESC_SIZE = 64, |
---|
| 626 | + MAX_RXQ_DESC_SIZE = 128, |
---|
| 627 | + MAX_FL_DESC_SIZE = 8, |
---|
| 628 | + MAX_CTRL_TXQ_DESC_SIZE = 64, |
---|
| 629 | +}; |
---|
| 630 | + |
---|
| 631 | +enum { |
---|
536 | 632 | INGQ_EXTRAS = 2, /* firmware event queue and */ |
---|
537 | 633 | /* forwarded interrupts */ |
---|
538 | 634 | MAX_INGQ = MAX_ETH_QSETS + INGQ_EXTRAS, |
---|
.. | .. |
---|
559 | 655 | struct port_info { |
---|
560 | 656 | struct adapter *adapter; |
---|
561 | 657 | u16 viid; |
---|
562 | | - s16 xact_addr_filt; /* index of exact MAC address filter */ |
---|
| 658 | + int xact_addr_filt; /* index of exact MAC address filter */ |
---|
563 | 659 | u16 rss_size; /* size of VI's RSS table slice */ |
---|
564 | 660 | s8 mdio_addr; |
---|
565 | 661 | enum fw_port_type port_type; |
---|
.. | .. |
---|
584 | 680 | bool ptp_enable; |
---|
585 | 681 | struct sched_table *sched_tbl; |
---|
586 | 682 | u32 eth_flags; |
---|
| 683 | + |
---|
| 684 | + /* viid and smt fields either returned by fw |
---|
| 685 | + * or decoded by parsing viid by driver. |
---|
| 686 | + */ |
---|
| 687 | + u8 vin; |
---|
| 688 | + u8 vivld; |
---|
| 689 | + u8 smt_idx; |
---|
| 690 | + u8 rx_cchan; |
---|
| 691 | + |
---|
| 692 | + bool tc_block_shared; |
---|
| 693 | + |
---|
| 694 | + /* Mirror VI information */ |
---|
| 695 | + u16 viid_mirror; |
---|
| 696 | + u16 nmirrorqsets; |
---|
| 697 | + u32 vi_mirror_count; |
---|
| 698 | + struct mutex vi_mirror_mutex; /* Sync access to Mirror VI info */ |
---|
| 699 | + struct cxgb4_ethtool_lb_test ethtool_lb; |
---|
587 | 700 | }; |
---|
588 | 701 | |
---|
589 | 702 | struct dentry; |
---|
590 | 703 | struct work_struct; |
---|
591 | 704 | |
---|
592 | 705 | enum { /* adapter flags */ |
---|
593 | | - FULL_INIT_DONE = (1 << 0), |
---|
594 | | - DEV_ENABLED = (1 << 1), |
---|
595 | | - USING_MSI = (1 << 2), |
---|
596 | | - USING_MSIX = (1 << 3), |
---|
597 | | - FW_OK = (1 << 4), |
---|
598 | | - RSS_TNLALLLOOKUP = (1 << 5), |
---|
599 | | - USING_SOFT_PARAMS = (1 << 6), |
---|
600 | | - MASTER_PF = (1 << 7), |
---|
601 | | - FW_OFLD_CONN = (1 << 9), |
---|
602 | | - ROOT_NO_RELAXED_ORDERING = (1 << 10), |
---|
603 | | - SHUTTING_DOWN = (1 << 11), |
---|
| 706 | + CXGB4_FULL_INIT_DONE = (1 << 0), |
---|
| 707 | + CXGB4_DEV_ENABLED = (1 << 1), |
---|
| 708 | + CXGB4_USING_MSI = (1 << 2), |
---|
| 709 | + CXGB4_USING_MSIX = (1 << 3), |
---|
| 710 | + CXGB4_FW_OK = (1 << 4), |
---|
| 711 | + CXGB4_RSS_TNLALLLOOKUP = (1 << 5), |
---|
| 712 | + CXGB4_USING_SOFT_PARAMS = (1 << 6), |
---|
| 713 | + CXGB4_MASTER_PF = (1 << 7), |
---|
| 714 | + CXGB4_FW_OFLD_CONN = (1 << 9), |
---|
| 715 | + CXGB4_ROOT_NO_RELAXED_ORDERING = (1 << 10), |
---|
| 716 | + CXGB4_SHUTTING_DOWN = (1 << 11), |
---|
| 717 | + CXGB4_SGE_DBQ_TIMER = (1 << 12), |
---|
604 | 718 | }; |
---|
605 | 719 | |
---|
606 | 720 | enum { |
---|
607 | 721 | ULP_CRYPTO_LOOKASIDE = 1 << 0, |
---|
608 | 722 | ULP_CRYPTO_IPSEC_INLINE = 1 << 1, |
---|
| 723 | + ULP_CRYPTO_KTLS_INLINE = 1 << 3, |
---|
609 | 724 | }; |
---|
| 725 | + |
---|
| 726 | +#define CXGB4_MIRROR_RXQ_DEFAULT_DESC_NUM 1024 |
---|
| 727 | +#define CXGB4_MIRROR_RXQ_DEFAULT_DESC_SIZE 64 |
---|
| 728 | +#define CXGB4_MIRROR_RXQ_DEFAULT_INTR_USEC 5 |
---|
| 729 | +#define CXGB4_MIRROR_RXQ_DEFAULT_PKT_CNT 8 |
---|
| 730 | + |
---|
| 731 | +#define CXGB4_MIRROR_FLQ_DEFAULT_DESC_NUM 72 |
---|
610 | 732 | |
---|
611 | 733 | struct rx_sw_desc; |
---|
612 | 734 | |
---|
.. | .. |
---|
685 | 807 | unsigned long rx_cso; /* # of Rx checksum offloads */ |
---|
686 | 808 | unsigned long vlan_ex; /* # of Rx VLAN extractions */ |
---|
687 | 809 | unsigned long rx_drops; /* # of packets dropped due to no mem */ |
---|
| 810 | + unsigned long bad_rx_pkts; /* # of packets with err_vec!=0 */ |
---|
688 | 811 | }; |
---|
689 | 812 | |
---|
690 | 813 | struct sge_eth_rxq { /* SW Ethernet Rx queue */ |
---|
691 | 814 | struct sge_rspq rspq; |
---|
692 | 815 | struct sge_fl fl; |
---|
693 | 816 | struct sge_eth_stats stats; |
---|
| 817 | + struct msix_info *msix; |
---|
694 | 818 | } ____cacheline_aligned_in_smp; |
---|
695 | 819 | |
---|
696 | 820 | struct sge_ofld_stats { /* offload queue statistics */ |
---|
.. | .. |
---|
704 | 828 | struct sge_rspq rspq; |
---|
705 | 829 | struct sge_fl fl; |
---|
706 | 830 | struct sge_ofld_stats stats; |
---|
| 831 | + struct msix_info *msix; |
---|
707 | 832 | } ____cacheline_aligned_in_smp; |
---|
708 | 833 | |
---|
709 | 834 | struct tx_desc { |
---|
710 | 835 | __be64 flit[8]; |
---|
711 | 836 | }; |
---|
712 | 837 | |
---|
713 | | -struct tx_sw_desc; |
---|
| 838 | +struct ulptx_sgl; |
---|
| 839 | + |
---|
| 840 | +struct tx_sw_desc { |
---|
| 841 | + struct sk_buff *skb; /* SKB to free after getting completion */ |
---|
| 842 | + dma_addr_t addr[MAX_SKB_FRAGS + 1]; /* DMA mapped addresses */ |
---|
| 843 | +}; |
---|
714 | 844 | |
---|
715 | 845 | struct sge_txq { |
---|
716 | 846 | unsigned int in_use; /* # of in-use Tx descriptors */ |
---|
.. | .. |
---|
739 | 869 | #ifdef CONFIG_CHELSIO_T4_DCB |
---|
740 | 870 | u8 dcb_prio; /* DCB Priority bound to queue */ |
---|
741 | 871 | #endif |
---|
| 872 | + u8 dbqt; /* SGE Doorbell Queue Timer in use */ |
---|
| 873 | + unsigned int dbqtimerix; /* SGE Doorbell Queue Timer Index */ |
---|
742 | 874 | unsigned long tso; /* # of TSO requests */ |
---|
| 875 | + unsigned long uso; /* # of USO requests */ |
---|
743 | 876 | unsigned long tx_cso; /* # of Tx checksum offloads */ |
---|
744 | 877 | unsigned long vlan_ins; /* # of Tx VLAN insertions */ |
---|
745 | 878 | unsigned long mapping_err; /* # of I/O MMU packet mapping errors */ |
---|
.. | .. |
---|
766 | 899 | struct sge_uld_rxq_info { |
---|
767 | 900 | char name[IFNAMSIZ]; /* name of ULD driver */ |
---|
768 | 901 | struct sge_ofld_rxq *uldrxq; /* Rxq's for ULD */ |
---|
769 | | - u16 *msix_tbl; /* msix_tbl for uld */ |
---|
770 | 902 | u16 *rspq_id; /* response queue id's of rxq */ |
---|
771 | 903 | u16 nrxq; /* # of ingress uld queues */ |
---|
772 | 904 | u16 nciq; /* # of completion queues */ |
---|
.. | .. |
---|
777 | 909 | struct sge_uld_txq *uldtxq; /* Txq's for ULD */ |
---|
778 | 910 | atomic_t users; /* num users */ |
---|
779 | 911 | u16 ntxq; /* # of egress uld queues */ |
---|
| 912 | +}; |
---|
| 913 | + |
---|
| 914 | +/* struct to maintain ULD list to reallocate ULD resources on hotplug */ |
---|
| 915 | +struct cxgb4_uld_list { |
---|
| 916 | + struct cxgb4_uld_info uld_info; |
---|
| 917 | + struct list_head list_node; |
---|
| 918 | + enum cxgb4_uld uld_type; |
---|
| 919 | +}; |
---|
| 920 | + |
---|
| 921 | +enum sge_eosw_state { |
---|
| 922 | + CXGB4_EO_STATE_CLOSED = 0, /* Not ready to accept traffic */ |
---|
| 923 | + CXGB4_EO_STATE_FLOWC_OPEN_SEND, /* Send FLOWC open request */ |
---|
| 924 | + CXGB4_EO_STATE_FLOWC_OPEN_REPLY, /* Waiting for FLOWC open reply */ |
---|
| 925 | + CXGB4_EO_STATE_ACTIVE, /* Ready to accept traffic */ |
---|
| 926 | + CXGB4_EO_STATE_FLOWC_CLOSE_SEND, /* Send FLOWC close request */ |
---|
| 927 | + CXGB4_EO_STATE_FLOWC_CLOSE_REPLY, /* Waiting for FLOWC close reply */ |
---|
| 928 | +}; |
---|
| 929 | + |
---|
| 930 | +struct sge_eosw_txq { |
---|
| 931 | + spinlock_t lock; /* Per queue lock to synchronize completions */ |
---|
| 932 | + enum sge_eosw_state state; /* Current ETHOFLD State */ |
---|
| 933 | + struct tx_sw_desc *desc; /* Descriptor ring to hold packets */ |
---|
| 934 | + u32 ndesc; /* Number of descriptors */ |
---|
| 935 | + u32 pidx; /* Current Producer Index */ |
---|
| 936 | + u32 last_pidx; /* Last successfully transmitted Producer Index */ |
---|
| 937 | + u32 cidx; /* Current Consumer Index */ |
---|
| 938 | + u32 last_cidx; /* Last successfully reclaimed Consumer Index */ |
---|
| 939 | + u32 flowc_idx; /* Descriptor containing a FLOWC request */ |
---|
| 940 | + u32 inuse; /* Number of packets held in ring */ |
---|
| 941 | + |
---|
| 942 | + u32 cred; /* Current available credits */ |
---|
| 943 | + u32 ncompl; /* # of completions posted */ |
---|
| 944 | + u32 last_compl; /* # of credits consumed since last completion req */ |
---|
| 945 | + |
---|
| 946 | + u32 eotid; /* Index into EOTID table in software */ |
---|
| 947 | + u32 hwtid; /* Hardware EOTID index */ |
---|
| 948 | + |
---|
| 949 | + u32 hwqid; /* Underlying hardware queue index */ |
---|
| 950 | + struct net_device *netdev; /* Pointer to netdevice */ |
---|
| 951 | + struct tasklet_struct qresume_tsk; /* Restarts the queue */ |
---|
| 952 | + struct completion completion; /* completion for FLOWC rendezvous */ |
---|
| 953 | +}; |
---|
| 954 | + |
---|
| 955 | +struct sge_eohw_txq { |
---|
| 956 | + spinlock_t lock; /* Per queue lock */ |
---|
| 957 | + struct sge_txq q; /* HW Txq */ |
---|
| 958 | + struct adapter *adap; /* Backpointer to adapter */ |
---|
| 959 | + unsigned long tso; /* # of TSO requests */ |
---|
| 960 | + unsigned long uso; /* # of USO requests */ |
---|
| 961 | + unsigned long tx_cso; /* # of Tx checksum offloads */ |
---|
| 962 | + unsigned long vlan_ins; /* # of Tx VLAN insertions */ |
---|
| 963 | + unsigned long mapping_err; /* # of I/O MMU packet mapping errors */ |
---|
780 | 964 | }; |
---|
781 | 965 | |
---|
782 | 966 | struct sge { |
---|
.. | .. |
---|
792 | 976 | struct sge_rspq intrq ____cacheline_aligned_in_smp; |
---|
793 | 977 | spinlock_t intrq_lock; |
---|
794 | 978 | |
---|
| 979 | + struct sge_eohw_txq *eohw_txq; |
---|
| 980 | + struct sge_ofld_rxq *eohw_rxq; |
---|
| 981 | + |
---|
| 982 | + struct sge_eth_rxq *mirror_rxq[NCHAN]; |
---|
| 983 | + |
---|
795 | 984 | u16 max_ethqsets; /* # of available Ethernet queue sets */ |
---|
796 | 985 | u16 ethqsets; /* # of active Ethernet queue sets */ |
---|
797 | 986 | u16 ethtxq_rover; /* Tx queue to clean up next */ |
---|
798 | 987 | u16 ofldqsets; /* # of active ofld queue sets */ |
---|
799 | 988 | u16 nqs_per_uld; /* # of Rx queues per ULD */ |
---|
| 989 | + u16 eoqsets; /* # of ETHOFLD queues */ |
---|
| 990 | + u16 mirrorqsets; /* # of Mirror queues */ |
---|
| 991 | + |
---|
800 | 992 | u16 timer_val[SGE_NTIMERS]; |
---|
801 | 993 | u8 counter_val[SGE_NCOUNTERS]; |
---|
| 994 | + u16 dbqtimer_tick; |
---|
| 995 | + u16 dbqtimer_val[SGE_NDBQTIMERS]; |
---|
802 | 996 | u32 fl_pg_order; /* large page allocation size */ |
---|
803 | 997 | u32 stat_len; /* length of status page at ring end */ |
---|
804 | 998 | u32 pktshift; /* padding between CPL & packet data */ |
---|
.. | .. |
---|
817 | 1011 | unsigned long *blocked_fl; |
---|
818 | 1012 | struct timer_list rx_timer; /* refills starving FLs */ |
---|
819 | 1013 | struct timer_list tx_timer; /* checks Tx queues */ |
---|
| 1014 | + |
---|
| 1015 | + int fwevtq_msix_idx; /* Index to firmware event queue MSI-X info */ |
---|
| 1016 | + int nd_msix_idx; /* Index to non-data interrupts MSI-X info */ |
---|
820 | 1017 | }; |
---|
821 | 1018 | |
---|
822 | 1019 | #define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++) |
---|
.. | .. |
---|
843 | 1040 | struct hash_mac_addr { |
---|
844 | 1041 | struct list_head list; |
---|
845 | 1042 | u8 addr[ETH_ALEN]; |
---|
| 1043 | + unsigned int iface_mac; |
---|
846 | 1044 | }; |
---|
847 | 1045 | |
---|
848 | | -struct uld_msix_bmap { |
---|
| 1046 | +struct msix_bmap { |
---|
849 | 1047 | unsigned long *msix_bmap; |
---|
850 | 1048 | unsigned int mapsize; |
---|
851 | 1049 | spinlock_t lock; /* lock for acquiring bitmap */ |
---|
852 | 1050 | }; |
---|
853 | 1051 | |
---|
854 | | -struct uld_msix_info { |
---|
| 1052 | +struct msix_info { |
---|
855 | 1053 | unsigned short vec; |
---|
856 | 1054 | char desc[IFNAMSIZ + 10]; |
---|
857 | 1055 | unsigned int idx; |
---|
| 1056 | + cpumask_var_t aff_mask; |
---|
858 | 1057 | }; |
---|
859 | 1058 | |
---|
860 | 1059 | struct vf_info { |
---|
.. | .. |
---|
862 | 1061 | unsigned int tx_rate; |
---|
863 | 1062 | bool pf_set_mac; |
---|
864 | 1063 | u16 vlan; |
---|
| 1064 | + int link_state; |
---|
865 | 1065 | }; |
---|
866 | 1066 | |
---|
867 | 1067 | enum { |
---|
.. | .. |
---|
878 | 1078 | struct list_head list; |
---|
879 | 1079 | }; |
---|
880 | 1080 | |
---|
881 | | -struct mps_encap_entry { |
---|
882 | | - atomic_t refcnt; |
---|
| 1081 | +#if IS_ENABLED(CONFIG_THERMAL) |
---|
| 1082 | +struct ch_thermal { |
---|
| 1083 | + struct thermal_zone_device *tzdev; |
---|
| 1084 | + int trip_temp; |
---|
| 1085 | + int trip_type; |
---|
| 1086 | +}; |
---|
| 1087 | +#endif |
---|
| 1088 | + |
---|
| 1089 | +struct mps_entries_ref { |
---|
| 1090 | + struct list_head list; |
---|
| 1091 | + u8 addr[ETH_ALEN]; |
---|
| 1092 | + u8 mask[ETH_ALEN]; |
---|
| 1093 | + u16 idx; |
---|
| 1094 | + refcount_t refcnt; |
---|
| 1095 | +}; |
---|
| 1096 | + |
---|
| 1097 | +struct cxgb4_ethtool_filter_info { |
---|
| 1098 | + u32 *loc_array; /* Array holding the actual TIDs set to filters */ |
---|
| 1099 | + unsigned long *bmap; /* Bitmap for managing filters in use */ |
---|
| 1100 | + u32 in_use; /* # of filters in use */ |
---|
| 1101 | +}; |
---|
| 1102 | + |
---|
| 1103 | +struct cxgb4_ethtool_filter { |
---|
| 1104 | + u32 nentries; /* Adapter wide number of supported filters */ |
---|
| 1105 | + struct cxgb4_ethtool_filter_info *port; /* Per port entry */ |
---|
883 | 1106 | }; |
---|
884 | 1107 | |
---|
885 | 1108 | struct adapter { |
---|
.. | .. |
---|
898 | 1121 | |
---|
899 | 1122 | int msg_enable; |
---|
900 | 1123 | __be16 vxlan_port; |
---|
901 | | - u8 vxlan_port_cnt; |
---|
902 | 1124 | __be16 geneve_port; |
---|
903 | | - u8 geneve_port_cnt; |
---|
904 | 1125 | |
---|
905 | 1126 | struct adapter_params params; |
---|
906 | 1127 | struct cxgb4_virt_res vres; |
---|
907 | 1128 | unsigned int swintr; |
---|
908 | 1129 | |
---|
909 | | - struct { |
---|
910 | | - unsigned short vec; |
---|
911 | | - char desc[IFNAMSIZ + 10]; |
---|
912 | | - } msix_info[MAX_INGQ + 1]; |
---|
913 | | - struct uld_msix_info *msix_info_ulds; /* msix info for uld's */ |
---|
914 | | - struct uld_msix_bmap msix_bmap_ulds; /* msix bitmap for all uld */ |
---|
915 | | - int msi_idx; |
---|
| 1130 | + /* MSI-X Info for NIC and OFLD queues */ |
---|
| 1131 | + struct msix_info *msix_info; |
---|
| 1132 | + struct msix_bmap msix_bmap; |
---|
916 | 1133 | |
---|
917 | 1134 | struct doorbell_stats db_stats; |
---|
918 | 1135 | struct sge sge; |
---|
.. | .. |
---|
933 | 1150 | unsigned int rawf_start; |
---|
934 | 1151 | unsigned int rawf_cnt; |
---|
935 | 1152 | struct smt_data *smt; |
---|
936 | | - struct mps_encap_entry *mps_encap; |
---|
937 | 1153 | struct cxgb4_uld_info *uld; |
---|
938 | 1154 | void *uld_handle[CXGB4_ULD_MAX]; |
---|
939 | 1155 | unsigned int num_uld; |
---|
.. | .. |
---|
941 | 1157 | struct list_head list_node; |
---|
942 | 1158 | struct list_head rcu_node; |
---|
943 | 1159 | struct list_head mac_hlist; /* list of MAC addresses in MPS Hash */ |
---|
| 1160 | + struct list_head mps_ref; |
---|
| 1161 | + spinlock_t mps_ref_lock; /* lock for syncing mps ref/def activities */ |
---|
944 | 1162 | |
---|
945 | 1163 | void *iscsi_ppm; |
---|
946 | 1164 | |
---|
.. | .. |
---|
981 | 1199 | |
---|
982 | 1200 | /* TC u32 offload */ |
---|
983 | 1201 | struct cxgb4_tc_u32_table *tc_u32; |
---|
| 1202 | + struct chcr_ktls chcr_ktls; |
---|
984 | 1203 | struct chcr_stats_debug chcr_stats; |
---|
| 1204 | +#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE) |
---|
| 1205 | + struct ch_ktls_stats_debug ch_ktls_stats; |
---|
| 1206 | +#endif |
---|
| 1207 | +#if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE) |
---|
| 1208 | + struct ch_ipsec_stats_debug ch_ipsec_stats; |
---|
| 1209 | +#endif |
---|
985 | 1210 | |
---|
986 | 1211 | /* TC flower offload */ |
---|
987 | 1212 | bool tc_flower_initialized; |
---|
.. | .. |
---|
1000 | 1225 | |
---|
1001 | 1226 | /* Dump buffer for collecting logs in kdump kernel */ |
---|
1002 | 1227 | struct vmcoredd_data vmcoredd; |
---|
| 1228 | +#if IS_ENABLED(CONFIG_THERMAL) |
---|
| 1229 | + struct ch_thermal ch_thermal; |
---|
| 1230 | +#endif |
---|
| 1231 | + |
---|
| 1232 | + /* TC MQPRIO offload */ |
---|
| 1233 | + struct cxgb4_tc_mqprio *tc_mqprio; |
---|
| 1234 | + |
---|
| 1235 | + /* TC MATCHALL classifier offload */ |
---|
| 1236 | + struct cxgb4_tc_matchall *tc_matchall; |
---|
| 1237 | + |
---|
| 1238 | + /* Ethtool n-tuple */ |
---|
| 1239 | + struct cxgb4_ethtool_filter *ethtool_filters; |
---|
1003 | 1240 | }; |
---|
1004 | 1241 | |
---|
1005 | 1242 | /* Support for "sched-class" command to allow a TX Scheduling Class to be |
---|
1006 | 1243 | * programmed with various parameters. |
---|
1007 | 1244 | */ |
---|
1008 | 1245 | struct ch_sched_params { |
---|
1009 | | - s8 type; /* packet or flow */ |
---|
| 1246 | + u8 type; /* packet or flow */ |
---|
1010 | 1247 | union { |
---|
1011 | 1248 | struct { |
---|
1012 | | - s8 level; /* scheduler hierarchy level */ |
---|
1013 | | - s8 mode; /* per-class or per-flow */ |
---|
1014 | | - s8 rateunit; /* bit or packet rate */ |
---|
1015 | | - s8 ratemode; /* %port relative or kbps absolute */ |
---|
1016 | | - s8 channel; /* scheduler channel [0..N] */ |
---|
1017 | | - s8 class; /* scheduler class [0..N] */ |
---|
1018 | | - s32 minrate; /* minimum rate */ |
---|
1019 | | - s32 maxrate; /* maximum rate */ |
---|
1020 | | - s16 weight; /* percent weight */ |
---|
1021 | | - s16 pktsize; /* average packet size */ |
---|
| 1249 | + u8 level; /* scheduler hierarchy level */ |
---|
| 1250 | + u8 mode; /* per-class or per-flow */ |
---|
| 1251 | + u8 rateunit; /* bit or packet rate */ |
---|
| 1252 | + u8 ratemode; /* %port relative or kbps absolute */ |
---|
| 1253 | + u8 channel; /* scheduler channel [0..N] */ |
---|
| 1254 | + u8 class; /* scheduler class [0..N] */ |
---|
| 1255 | + u32 minrate; /* minimum rate */ |
---|
| 1256 | + u32 maxrate; /* maximum rate */ |
---|
| 1257 | + u16 weight; /* percent weight */ |
---|
| 1258 | + u16 pktsize; /* average packet size */ |
---|
| 1259 | + u16 burstsize; /* burst buffer size */ |
---|
1022 | 1260 | } params; |
---|
1023 | 1261 | } u; |
---|
1024 | 1262 | }; |
---|
.. | .. |
---|
1029 | 1267 | |
---|
1030 | 1268 | enum { |
---|
1031 | 1269 | SCHED_CLASS_LEVEL_CL_RL = 0, /* class rate limiter */ |
---|
| 1270 | + SCHED_CLASS_LEVEL_CH_RL = 2, /* channel rate limiter */ |
---|
1032 | 1271 | }; |
---|
1033 | 1272 | |
---|
1034 | 1273 | enum { |
---|
1035 | 1274 | SCHED_CLASS_MODE_CLASS = 0, /* per-class scheduling */ |
---|
| 1275 | + SCHED_CLASS_MODE_FLOW, /* per-flow scheduling */ |
---|
1036 | 1276 | }; |
---|
1037 | 1277 | |
---|
1038 | 1278 | enum { |
---|
.. | .. |
---|
1043 | 1283 | SCHED_CLASS_RATEMODE_ABS = 1, /* Kb/s */ |
---|
1044 | 1284 | }; |
---|
1045 | 1285 | |
---|
1046 | | -struct tx_sw_desc { /* SW state per Tx descriptor */ |
---|
1047 | | - struct sk_buff *skb; |
---|
1048 | | - struct ulptx_sgl *sgl; |
---|
1049 | | -}; |
---|
1050 | | - |
---|
1051 | 1286 | /* Support for "sched_queue" command to allow one or more NIC TX Queues |
---|
1052 | 1287 | * to be bound to a TX Scheduling Class. |
---|
1053 | 1288 | */ |
---|
1054 | 1289 | struct ch_sched_queue { |
---|
1055 | 1290 | s8 queue; /* queue index */ |
---|
1056 | 1291 | s8 class; /* class index */ |
---|
| 1292 | +}; |
---|
| 1293 | + |
---|
| 1294 | +/* Support for "sched_flowc" command to allow one or more FLOWC |
---|
| 1295 | + * to be bound to a TX Scheduling Class. |
---|
| 1296 | + */ |
---|
| 1297 | +struct ch_sched_flowc { |
---|
| 1298 | + s32 tid; /* TID to bind */ |
---|
| 1299 | + s8 class; /* class index */ |
---|
1057 | 1300 | }; |
---|
1058 | 1301 | |
---|
1059 | 1302 | /* Defined bit width of user definable filter tuples |
---|
.. | .. |
---|
1170 | 1413 | u16 nat_lport; /* local port to use after NAT'ing */ |
---|
1171 | 1414 | u16 nat_fport; /* foreign port to use after NAT'ing */ |
---|
1172 | 1415 | |
---|
| 1416 | + u32 tc_prio; /* TC's filter priority index */ |
---|
| 1417 | + u64 tc_cookie; /* Unique cookie identifying TC rules */ |
---|
| 1418 | + |
---|
1173 | 1419 | /* reservation for future additions */ |
---|
1174 | | - u8 rsvd[24]; |
---|
| 1420 | + u8 rsvd[12]; |
---|
1175 | 1421 | |
---|
1176 | 1422 | /* Filter rule value/mask pairs. |
---|
1177 | 1423 | */ |
---|
.. | .. |
---|
1202 | 1448 | NAT_MODE_DIP_SIP_SP, /* NAT on Dst IP, Src IP and Src Port */ |
---|
1203 | 1449 | NAT_MODE_ALL /* NAT on entire 4-tuple */ |
---|
1204 | 1450 | }; |
---|
| 1451 | + |
---|
| 1452 | +#define CXGB4_FILTER_TYPE_MAX 2 |
---|
1205 | 1453 | |
---|
1206 | 1454 | /* Host shadow copy of ingress filter entry. This is in host native format |
---|
1207 | 1455 | * and doesn't match the ordering or bit order, etc. of the hardware of the |
---|
.. | .. |
---|
1247 | 1495 | static inline int is_uld(const struct adapter *adap) |
---|
1248 | 1496 | { |
---|
1249 | 1497 | return (adap->params.offload || adap->params.crypto); |
---|
| 1498 | +} |
---|
| 1499 | + |
---|
| 1500 | +static inline int is_ethofld(const struct adapter *adap) |
---|
| 1501 | +{ |
---|
| 1502 | + return adap->params.ethofld; |
---|
1250 | 1503 | } |
---|
1251 | 1504 | |
---|
1252 | 1505 | static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr) |
---|
.. | .. |
---|
1352 | 1605 | return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0; |
---|
1353 | 1606 | } |
---|
1354 | 1607 | |
---|
1355 | | -/* driver version & name used for ethtool_drvinfo */ |
---|
| 1608 | +/* driver name used for ethtool_drvinfo */ |
---|
1356 | 1609 | extern char cxgb4_driver_name[]; |
---|
1357 | | -extern const char cxgb4_driver_version[]; |
---|
1358 | 1610 | |
---|
1359 | 1611 | void t4_os_portmod_changed(struct adapter *adap, int port_id); |
---|
1360 | 1612 | void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat); |
---|
.. | .. |
---|
1363 | 1615 | void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q); |
---|
1364 | 1616 | irq_handler_t t4_intr_handler(struct adapter *adap); |
---|
1365 | 1617 | netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev); |
---|
| 1618 | +int cxgb4_selftest_lb_pkt(struct net_device *netdev); |
---|
1366 | 1619 | int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, |
---|
1367 | 1620 | const struct pkt_gl *gl); |
---|
1368 | 1621 | int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb); |
---|
.. | .. |
---|
1373 | 1626 | rspq_flush_handler_t flush_handler, int cong); |
---|
1374 | 1627 | int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, |
---|
1375 | 1628 | struct net_device *dev, struct netdev_queue *netdevq, |
---|
1376 | | - unsigned int iqid); |
---|
| 1629 | + unsigned int iqid, u8 dbqt); |
---|
1377 | 1630 | int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, |
---|
1378 | 1631 | struct net_device *dev, unsigned int iqid, |
---|
1379 | 1632 | unsigned int cmplqid); |
---|
.. | .. |
---|
1382 | 1635 | int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq, |
---|
1383 | 1636 | struct net_device *dev, unsigned int iqid, |
---|
1384 | 1637 | unsigned int uld_type); |
---|
| 1638 | +int t4_sge_alloc_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq, |
---|
| 1639 | + struct net_device *dev, u32 iqid); |
---|
| 1640 | +void t4_sge_free_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq); |
---|
1385 | 1641 | irqreturn_t t4_sge_intr_msix(int irq, void *cookie); |
---|
1386 | 1642 | int t4_sge_init(struct adapter *adap); |
---|
1387 | 1643 | void t4_sge_start(struct adapter *adap); |
---|
1388 | 1644 | void t4_sge_stop(struct adapter *adap); |
---|
| 1645 | +int t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *q, |
---|
| 1646 | + int maxreclaim); |
---|
1389 | 1647 | void cxgb4_set_ethtool_ops(struct net_device *netdev); |
---|
1390 | 1648 | int cxgb4_write_rss(const struct port_info *pi, const u16 *queues); |
---|
1391 | 1649 | enum cpl_tx_tnl_lso_type cxgb_encap_offload_supported(struct sk_buff *skb); |
---|
.. | .. |
---|
1538 | 1796 | |
---|
1539 | 1797 | int t4_wait_dev_ready(void __iomem *regs); |
---|
1540 | 1798 | |
---|
| 1799 | +fw_port_cap32_t t4_link_acaps(struct adapter *adapter, unsigned int port, |
---|
| 1800 | + struct link_config *lc); |
---|
1541 | 1801 | int t4_link_l1cfg_core(struct adapter *adap, unsigned int mbox, |
---|
1542 | 1802 | unsigned int port, struct link_config *lc, |
---|
1543 | | - bool sleep_ok, int timeout); |
---|
| 1803 | + u8 sleep_ok, int timeout); |
---|
1544 | 1804 | |
---|
1545 | 1805 | static inline int t4_link_l1cfg(struct adapter *adapter, unsigned int mbox, |
---|
1546 | 1806 | unsigned int port, struct link_config *lc) |
---|
.. | .. |
---|
1588 | 1848 | int t4_read_flash(struct adapter *adapter, unsigned int addr, |
---|
1589 | 1849 | unsigned int nwords, u32 *data, int byte_oriented); |
---|
1590 | 1850 | int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size); |
---|
1591 | | -int t4_load_phy_fw(struct adapter *adap, |
---|
1592 | | - int win, spinlock_t *lock, |
---|
| 1851 | +int t4_load_phy_fw(struct adapter *adap, int win, |
---|
1593 | 1852 | int (*phy_fw_version)(const u8 *, size_t), |
---|
1594 | 1853 | const u8 *phy_fw_data, size_t phy_fw_size); |
---|
1595 | 1854 | int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver); |
---|
.. | .. |
---|
1633 | 1892 | int t4_init_portinfo(struct port_info *pi, int mbox, |
---|
1634 | 1893 | int port, int pf, int vf, u8 mac[]); |
---|
1635 | 1894 | int t4_port_init(struct adapter *adap, int mbox, int pf, int vf); |
---|
| 1895 | +int t4_init_port_mirror(struct port_info *pi, u8 mbox, u8 port, u8 pf, u8 vf, |
---|
| 1896 | + u16 *mirror_viid); |
---|
1636 | 1897 | void t4_fatal_err(struct adapter *adapter); |
---|
1637 | 1898 | unsigned int t4_chip_rss_size(struct adapter *adapter); |
---|
1638 | 1899 | int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, |
---|
.. | .. |
---|
1737 | 1998 | unsigned int nexact, unsigned int rcaps, unsigned int wxcaps); |
---|
1738 | 1999 | int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, |
---|
1739 | 2000 | unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, |
---|
1740 | | - unsigned int *rss_size); |
---|
| 2001 | + unsigned int *rss_size, u8 *vivld, u8 *vin); |
---|
1741 | 2002 | int t4_free_vi(struct adapter *adap, unsigned int mbox, |
---|
1742 | 2003 | unsigned int pf, unsigned int vf, |
---|
1743 | 2004 | unsigned int viid); |
---|
1744 | 2005 | int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, |
---|
1745 | | - int mtu, int promisc, int all_multi, int bcast, int vlanex, |
---|
1746 | | - bool sleep_ok); |
---|
| 2006 | + unsigned int viid_mirror, int mtu, int promisc, int all_multi, |
---|
| 2007 | + int bcast, int vlanex, bool sleep_ok); |
---|
1747 | 2008 | int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid, |
---|
1748 | 2009 | const u8 *addr, const u8 *mask, unsigned int idx, |
---|
1749 | 2010 | u8 lookup_type, u8 port_id, bool sleep_ok); |
---|
.. | .. |
---|
1763 | 2024 | unsigned int viid, unsigned int naddr, |
---|
1764 | 2025 | const u8 **addr, bool sleep_ok); |
---|
1765 | 2026 | int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, |
---|
1766 | | - int idx, const u8 *addr, bool persist, bool add_smt); |
---|
| 2027 | + int idx, const u8 *addr, bool persist, u8 *smt_idx); |
---|
1767 | 2028 | int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid, |
---|
1768 | 2029 | bool ucast, u64 vec, bool sleep_ok); |
---|
1769 | 2030 | int t4_enable_vi_params(struct adapter *adap, unsigned int mbox, |
---|
.. | .. |
---|
1792 | 2053 | int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, |
---|
1793 | 2054 | unsigned int vf, unsigned int eqid); |
---|
1794 | 2055 | int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type); |
---|
| 2056 | +int t4_read_sge_dbqtimers(struct adapter *adap, unsigned int ndbqtimers, |
---|
| 2057 | + u16 *dbqtimers); |
---|
1795 | 2058 | void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl); |
---|
1796 | 2059 | int t4_update_port_info(struct port_info *pi); |
---|
1797 | 2060 | int t4_get_link_params(struct port_info *pi, unsigned int *link_okp, |
---|
.. | .. |
---|
1812 | 2075 | enum ctxt_type ctype, u32 *data); |
---|
1813 | 2076 | int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, |
---|
1814 | 2077 | enum ctxt_type ctype, u32 *data); |
---|
1815 | | -int t4_sched_params(struct adapter *adapter, int type, int level, int mode, |
---|
1816 | | - int rateunit, int ratemode, int channel, int class, |
---|
1817 | | - int minrate, int maxrate, int weight, int pktsize); |
---|
| 2078 | +int t4_sched_params(struct adapter *adapter, u8 type, u8 level, u8 mode, |
---|
| 2079 | + u8 rateunit, u8 ratemode, u8 channel, u8 class, |
---|
| 2080 | + u32 minrate, u32 maxrate, u16 weight, u16 pktsize, |
---|
| 2081 | + u16 burstsize); |
---|
1818 | 2082 | void t4_sge_decode_idma_state(struct adapter *adapter, int state); |
---|
1819 | 2083 | void t4_idma_monitor_init(struct adapter *adapter, |
---|
1820 | 2084 | struct sge_idma_monitor_state *idma); |
---|
.. | .. |
---|
1837 | 2101 | int t4_i2c_rd(struct adapter *adap, unsigned int mbox, int port, |
---|
1838 | 2102 | unsigned int devid, unsigned int offset, |
---|
1839 | 2103 | unsigned int len, u8 *buf); |
---|
| 2104 | +int t4_load_boot(struct adapter *adap, u8 *boot_data, |
---|
| 2105 | + unsigned int boot_addr, unsigned int size); |
---|
| 2106 | +int t4_load_bootcfg(struct adapter *adap, |
---|
| 2107 | + const u8 *cfg_data, unsigned int size); |
---|
1840 | 2108 | void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, struct sge_fl *fl); |
---|
1841 | 2109 | void free_tx_desc(struct adapter *adap, struct sge_txq *q, |
---|
1842 | 2110 | unsigned int n, bool unmap); |
---|
| 2111 | +void cxgb4_eosw_txq_free_desc(struct adapter *adap, struct sge_eosw_txq *txq, |
---|
| 2112 | + u32 ndesc); |
---|
| 2113 | +int cxgb4_ethofld_send_flowc(struct net_device *dev, u32 eotid, u32 tc); |
---|
| 2114 | +void cxgb4_ethofld_restart(struct tasklet_struct *t); |
---|
| 2115 | +int cxgb4_ethofld_rx_handler(struct sge_rspq *q, const __be64 *rsp, |
---|
| 2116 | + const struct pkt_gl *si); |
---|
1843 | 2117 | void free_txq(struct adapter *adap, struct sge_txq *q); |
---|
1844 | 2118 | void cxgb4_reclaim_completed_tx(struct adapter *adap, |
---|
1845 | 2119 | struct sge_txq *q, bool unmap); |
---|
.. | .. |
---|
1850 | 2124 | void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q, |
---|
1851 | 2125 | struct ulptx_sgl *sgl, u64 *end, unsigned int start, |
---|
1852 | 2126 | const dma_addr_t *addr); |
---|
| 2127 | +void cxgb4_write_partial_sgl(const struct sk_buff *skb, struct sge_txq *q, |
---|
| 2128 | + struct ulptx_sgl *sgl, u64 *end, |
---|
| 2129 | + const dma_addr_t *addr, u32 start, u32 send_len); |
---|
1853 | 2130 | void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n); |
---|
1854 | 2131 | int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf, |
---|
1855 | 2132 | u16 vlan); |
---|
1856 | 2133 | int cxgb4_dcb_enabled(const struct net_device *dev); |
---|
| 2134 | + |
---|
| 2135 | +int cxgb4_thermal_init(struct adapter *adap); |
---|
| 2136 | +int cxgb4_thermal_remove(struct adapter *adap); |
---|
| 2137 | +int cxgb4_set_msix_aff(struct adapter *adap, unsigned short vec, |
---|
| 2138 | + cpumask_var_t *aff_mask, int idx); |
---|
| 2139 | +void cxgb4_clear_msix_aff(unsigned short vec, cpumask_var_t aff_mask); |
---|
| 2140 | + |
---|
| 2141 | +int cxgb4_change_mac(struct port_info *pi, unsigned int viid, |
---|
| 2142 | + int *tcam_idx, const u8 *addr, |
---|
| 2143 | + bool persistent, u8 *smt_idx); |
---|
| 2144 | + |
---|
| 2145 | +int cxgb4_alloc_mac_filt(struct adapter *adap, unsigned int viid, |
---|
| 2146 | + bool free, unsigned int naddr, |
---|
| 2147 | + const u8 **addr, u16 *idx, |
---|
| 2148 | + u64 *hash, bool sleep_ok); |
---|
| 2149 | +int cxgb4_free_mac_filt(struct adapter *adap, unsigned int viid, |
---|
| 2150 | + unsigned int naddr, const u8 **addr, bool sleep_ok); |
---|
| 2151 | +int cxgb4_init_mps_ref_entries(struct adapter *adap); |
---|
| 2152 | +void cxgb4_free_mps_ref_entries(struct adapter *adap); |
---|
| 2153 | +int cxgb4_alloc_encap_mac_filt(struct adapter *adap, unsigned int viid, |
---|
| 2154 | + const u8 *addr, const u8 *mask, |
---|
| 2155 | + unsigned int vni, unsigned int vni_mask, |
---|
| 2156 | + u8 dip_hit, u8 lookup_type, bool sleep_ok); |
---|
| 2157 | +int cxgb4_free_encap_mac_filt(struct adapter *adap, unsigned int viid, |
---|
| 2158 | + int idx, bool sleep_ok); |
---|
| 2159 | +int cxgb4_free_raw_mac_filt(struct adapter *adap, |
---|
| 2160 | + unsigned int viid, |
---|
| 2161 | + const u8 *addr, |
---|
| 2162 | + const u8 *mask, |
---|
| 2163 | + unsigned int idx, |
---|
| 2164 | + u8 lookup_type, |
---|
| 2165 | + u8 port_id, |
---|
| 2166 | + bool sleep_ok); |
---|
| 2167 | +int cxgb4_alloc_raw_mac_filt(struct adapter *adap, |
---|
| 2168 | + unsigned int viid, |
---|
| 2169 | + const u8 *addr, |
---|
| 2170 | + const u8 *mask, |
---|
| 2171 | + unsigned int idx, |
---|
| 2172 | + u8 lookup_type, |
---|
| 2173 | + u8 port_id, |
---|
| 2174 | + bool sleep_ok); |
---|
| 2175 | +int cxgb4_update_mac_filt(struct port_info *pi, unsigned int viid, |
---|
| 2176 | + int *tcam_idx, const u8 *addr, |
---|
| 2177 | + bool persistent, u8 *smt_idx); |
---|
| 2178 | +int cxgb4_get_msix_idx_from_bmap(struct adapter *adap); |
---|
| 2179 | +void cxgb4_free_msix_idx_in_bmap(struct adapter *adap, u32 msix_idx); |
---|
| 2180 | +void cxgb4_enable_rx(struct adapter *adap, struct sge_rspq *q); |
---|
| 2181 | +void cxgb4_quiesce_rx(struct sge_rspq *q); |
---|
| 2182 | +int cxgb4_port_mirror_alloc(struct net_device *dev); |
---|
| 2183 | +void cxgb4_port_mirror_free(struct net_device *dev); |
---|
| 2184 | +#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE) |
---|
| 2185 | +int cxgb4_set_ktls_feature(struct adapter *adap, bool enable); |
---|
| 2186 | +#endif |
---|
1857 | 2187 | #endif /* __CXGB4_H__ */ |
---|