From f9004dbfff8a3fbbd7e2a88c8a4327c7f2f8e5b2 Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Wed, 31 Jan 2024 01:04:47 +0000 Subject: [PATCH] add driver 5G --- kernel/drivers/net/ethernet/broadcom/bnxt/bnxt.c | 6362 +++++++++++++++++++++++++++++++++++++++++++++++------------ 1 files changed, 5,061 insertions(+), 1,301 deletions(-) diff --git a/kernel/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/kernel/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 5e30299..c67a108 100644 --- a/kernel/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/kernel/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1,7 +1,7 @@ /* Broadcom NetXtreme-C/E network driver. * * Copyright (c) 2014-2016 Broadcom Corporation - * Copyright (c) 2016-2018 Broadcom Limited + * Copyright (c) 2016-2019 Broadcom Limited * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -31,6 +31,7 @@ #include <asm/page.h> #include <linux/time.h> #include <linux/mii.h> +#include <linux/mdio.h> #include <linux/if.h> #include <linux/if_vlan.h> #include <linux/if_bridge.h> @@ -53,6 +54,7 @@ #include <net/pkt_cls.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> +#include <net/page_pool.h> #include "bnxt_hsi.h" #include "bnxt.h" @@ -67,13 +69,11 @@ #include "bnxt_debugfs.h" #define BNXT_TX_TIMEOUT (5 * HZ) - -static const char version[] = - "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n"; +#define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW | \ + NETIF_MSG_TX_ERR) MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Broadcom BCM573xx network driver"); -MODULE_VERSION(DRV_MODULE_VERSION); #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN) #define BNXT_RX_DMA_OFFSET NET_SKB_PAD @@ -111,12 +111,22 @@ BCM57452, BCM57454, BCM5745x_NPAR, + BCM57508, + BCM57504, + BCM57502, + BCM57508_NPAR, + BCM57504_NPAR, + BCM57502_NPAR, BCM58802, BCM58804, BCM58808, NETXTREME_E_VF, NETXTREME_C_VF, NETXTREME_S_VF, + NETXTREME_C_VF_HV, + NETXTREME_E_VF_HV, + NETXTREME_E_P5_VF, + NETXTREME_E_P5_VF_HV, }; /* indexed by enum above */ @@ -152,12 +162,22 @@ [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" }, [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" }, + [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" }, + [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" }, + [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" }, + [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" }, + [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" }, + [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" }, [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" }, [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" }, [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" }, [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" }, + [NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" }, + [NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" }, + [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" }, + [NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" }, }; static const struct pci_device_id bnxt_pci_tbl[] = { @@ -196,17 +216,38 @@ { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR }, { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 }, { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 }, + { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 }, + { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 }, + { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 }, + { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57502_NPAR }, + { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR }, + { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57508_NPAR }, + { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57502_NPAR }, + { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR }, + { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57508_NPAR }, { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 }, { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 }, #ifdef CONFIG_BNXT_SRIOV { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF }, + { PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV }, + { PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV }, { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF }, + { PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV }, { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF }, + { PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV }, + { PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV }, + { PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV }, + { PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV }, { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF }, { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF }, { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF }, { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF }, { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF }, + { PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV }, + { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF }, + { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF }, + { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV }, + { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV }, { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF }, #endif { 0 } @@ -223,10 +264,16 @@ static const u16 bnxt_async_events_arr[] = { ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE, + ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE, ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD, ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED, ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE, ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE, + ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE, + ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY, + ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY, + ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION, + ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG, }; static struct workqueue_struct *bnxt_pf_wq; @@ -234,21 +281,54 @@ static bool bnxt_vf_pciid(enum board_idx idx) { return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF || - idx == NETXTREME_S_VF); + idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV || + idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF || + idx == NETXTREME_E_P5_VF_HV); } #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID) #define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS) #define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS) -#define BNXT_CP_DB_REARM(db, raw_cons) \ - writel(DB_CP_REARM_FLAGS | RING_CMP(raw_cons), db) - -#define BNXT_CP_DB(db, raw_cons) \ - writel(DB_CP_FLAGS | RING_CMP(raw_cons), db) - #define BNXT_CP_DB_IRQ_DIS(db) \ writel(DB_CP_IRQ_DIS_FLAGS, db) + +#define BNXT_DB_CQ(db, idx) \ + writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell) + +#define BNXT_DB_NQ_P5(db, idx) \ + writeq((db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), (db)->doorbell) + +#define BNXT_DB_CQ_ARM(db, idx) \ + writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell) + +#define BNXT_DB_NQ_ARM_P5(db, idx) \ + writeq((db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx), (db)->doorbell) + +static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) +{ + if (bp->flags & BNXT_FLAG_CHIP_P5) + BNXT_DB_NQ_P5(db, idx); + else + BNXT_DB_CQ(db, idx); +} + +static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) +{ + if (bp->flags & BNXT_FLAG_CHIP_P5) + BNXT_DB_NQ_ARM_P5(db, idx); + else + BNXT_DB_CQ_ARM(db, idx); +} + +static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) +{ + if (bp->flags & BNXT_FLAG_CHIP_P5) + writeq(db->db_key64 | DBR_TYPE_CQ_ARMALL | RING_CMP(idx), + db->doorbell); + else + BNXT_DB_CQ(db, idx); +} const u16 bnxt_lhint_arr[] = { TX_BD_FLAGS_LHINT_512_AND_SMALLER, @@ -280,6 +360,13 @@ return 0; return md_dst->u.port_info.port_id; +} + +static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr, + u16 prod) +{ + bnxt_db_write(bp, &txr->tx_db, prod); + txr->kick_pending = 0; } static bool bnxt_txr_netif_try_stop_queue(struct bnxt *bp, @@ -320,6 +407,7 @@ i = skb_get_queue_mapping(skb); if (unlikely(i >= bp->tx_nr_rings)) { dev_kfree_skb_any(skb); + atomic_long_inc(&dev->tx_dropped); return NETDEV_TX_OK; } @@ -329,6 +417,10 @@ free_size = bnxt_tx_avail(bp, txr); if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) { + /* We must have raced with NAPI cleanup */ + if (net_ratelimit() && txr->kick_pending) + netif_warn(bp, tx_err, dev, + "bnxt: ring busy w/ flush pending!\n"); if (bnxt_txr_netif_try_stop_queue(bp, txr, txq)) return NETDEV_TX_BUSY; } @@ -361,6 +453,7 @@ struct tx_push_buffer *tx_push_buf = txr->tx_push; struct tx_push_bd *tx_push = &tx_push_buf->push_bd; struct tx_bd_ext *tx_push1 = &tx_push->txbd2; + void __iomem *db = txr->tx_db.doorbell; void *pdata = tx_push_buf->data; u64 *end; int j, push_len; @@ -418,12 +511,11 @@ push_len = (length + sizeof(*tx_push) + 7) / 8; if (push_len > 16) { - __iowrite64_copy(txr->tx_doorbell, tx_push_buf, 16); - __iowrite32_copy(txr->tx_doorbell + 4, tx_push_buf + 1, + __iowrite64_copy(db, tx_push_buf, 16); + __iowrite32_copy(db + 4, tx_push_buf + 1, (push_len - 16) << 1); } else { - __iowrite64_copy(txr->tx_doorbell, tx_push_buf, - push_len); + __iowrite64_copy(db, tx_push_buf, push_len); } goto tx_done; @@ -432,21 +524,16 @@ normal_tx: if (length < BNXT_MIN_PKT_SIZE) { pad = BNXT_MIN_PKT_SIZE - length; - if (skb_pad(skb, pad)) { + if (skb_pad(skb, pad)) /* SKB already freed. */ - tx_buf->skb = NULL; - return NETDEV_TX_OK; - } + goto tx_kick_pending; length = BNXT_MIN_PKT_SIZE; } mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(&pdev->dev, mapping))) { - dev_kfree_skb_any(skb); - tx_buf->skb = NULL; - return NETDEV_TX_OK; - } + if (unlikely(dma_mapping_error(&pdev->dev, mapping))) + goto tx_free; dma_unmap_addr_set(tx_buf, mapping, mapping); flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD | @@ -530,16 +617,16 @@ prod = NEXT_TX(prod); txr->tx_prod = prod; - if (!skb->xmit_more || netif_xmit_stopped(txq)) - bnxt_db_write(bp, txr->tx_doorbell, DB_KEY_TX | prod); + if (!netdev_xmit_more() || netif_xmit_stopped(txq)) + bnxt_txr_db_kick(bp, txr, prod); + else + txr->kick_pending = 1; tx_done: - mmiowb(); - if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) { - if (skb->xmit_more && !tx_buf->is_push) - bnxt_db_write(bp, txr->tx_doorbell, DB_KEY_TX | prod); + if (netdev_xmit_more() && !tx_buf->is_push) + bnxt_txr_db_kick(bp, txr, prod); bnxt_txr_netif_try_stop_queue(bp, txr, txq); } @@ -551,7 +638,6 @@ /* start back at beginning and unmap skb */ prod = txr->tx_prod; tx_buf = &txr->tx_buf_ring[prod]; - tx_buf->skb = NULL; dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), skb_headlen(skb), PCI_DMA_TODEVICE); prod = NEXT_TX(prod); @@ -565,7 +651,13 @@ PCI_DMA_TODEVICE); } +tx_free: dev_kfree_skb_any(skb); +tx_kick_pending: + if (txr->kick_pending) + bnxt_txr_db_kick(bp, txr, txr->tx_prod); + txr->tx_buf_ring[txr->tx_prod].skb = NULL; + atomic_long_inc(&dev->tx_dropped); return NETDEV_TX_OK; } @@ -631,19 +723,20 @@ } static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, + struct bnxt_rx_ring_info *rxr, gfp_t gfp) { struct device *dev = &bp->pdev->dev; struct page *page; - page = alloc_page(gfp); + page = page_pool_dev_alloc_pages(rxr->page_pool); if (!page) return NULL; *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir, DMA_ATTR_WEAK_ORDERING); if (dma_mapping_error(dev, *mapping)) { - __free_page(page); + page_pool_recycle_direct(rxr->page_pool, page); return NULL; } *mapping += bp->rx_dma_offset; @@ -679,7 +772,8 @@ dma_addr_t mapping; if (BNXT_RX_PAGE_MODE(bp)) { - struct page *page = __bnxt_alloc_rx_page(bp, &mapping, gfp); + struct page *page = + __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp); if (!page) return -ENOMEM; @@ -788,15 +882,40 @@ return 0; } -static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons, - u32 agg_bufs) +static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp, + struct bnxt_cp_ring_info *cpr, + u16 cp_cons, u16 curr) { + struct rx_agg_cmp *agg; + + cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr)); + agg = (struct rx_agg_cmp *) + &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; + return agg; +} + +static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr, + u16 agg_id, u16 curr) +{ + struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id]; + + return &tpa_info->agg_arr[curr]; +} + +static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx, + u16 start, u32 agg_bufs, bool tpa) +{ + struct bnxt_napi *bnapi = cpr->bnapi; struct bnxt *bp = bnapi->bp; - struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; u16 prod = rxr->rx_agg_prod; u16 sw_prod = rxr->rx_sw_agg_prod; + bool p5_tpa = false; u32 i; + + if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa) + p5_tpa = true; for (i = 0; i < agg_bufs; i++) { u16 cons; @@ -805,8 +924,10 @@ struct rx_bd *prod_bd; struct page *page; - agg = (struct rx_agg_cmp *) - &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; + if (p5_tpa) + agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i); + else + agg = bnxt_get_agg(bp, cpr, idx, start + i); cons = agg->rx_agg_cmp_opaque; __clear_bit(cons, rxr->rx_agg_bmap); @@ -834,7 +955,6 @@ prod = NEXT_RX_AGG(prod); sw_prod = NEXT_RX_AGG(sw_prod); - cp_cons = NEXT_CMP(cp_cons); } rxr->rx_agg_prod = prod; rxr->rx_sw_agg_prod = sw_prod; @@ -848,7 +968,7 @@ { unsigned int payload = offset_and_len >> 16; unsigned int len = offset_and_len & 0xffff; - struct skb_frag_struct *frag; + skb_frag_t *frag; struct page *page = data; u16 prod = rxr->rx_prod; struct sk_buff *skb; @@ -862,9 +982,10 @@ dma_addr -= bp->rx_dma_offset; dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir, DMA_ATTR_WEAK_ORDERING); + page_pool_release_page(rxr->page_pool, page); if (unlikely(!payload)) - payload = eth_get_headlen(data_ptr, len); + payload = eth_get_headlen(bp->dev, data_ptr, len); skb = napi_alloc_skb(&rxr->bnapi->napi, payload); if (!skb) { @@ -879,7 +1000,7 @@ frag = &skb_shinfo(skb)->frags[0]; skb_frag_size_sub(frag, payload); - frag->page_offset += payload; + skb_frag_off_add(frag, payload); skb->data_len -= payload; skb->tail += payload; @@ -915,15 +1036,20 @@ return skb; } -static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi, - struct sk_buff *skb, u16 cp_cons, - u32 agg_bufs) +static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, + struct bnxt_cp_ring_info *cpr, + struct sk_buff *skb, u16 idx, + u32 agg_bufs, bool tpa) { + struct bnxt_napi *bnapi = cpr->bnapi; struct pci_dev *pdev = bp->pdev; - struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; u16 prod = rxr->rx_agg_prod; + bool p5_tpa = false; u32 i; + + if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa) + p5_tpa = true; for (i = 0; i < agg_bufs; i++) { u16 cons, frag_len; @@ -932,8 +1058,10 @@ struct page *page; dma_addr_t mapping; - agg = (struct rx_agg_cmp *) - &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; + if (p5_tpa) + agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i); + else + agg = bnxt_get_agg(bp, cpr, idx, i); cons = agg->rx_agg_cmp_opaque; frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) & RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT; @@ -967,7 +1095,7 @@ * allocated already. */ rxr->rx_agg_prod = prod; - bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs - i); + bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa); return NULL; } @@ -980,7 +1108,6 @@ skb->truesize += PAGE_SIZE; prod = NEXT_RX_AGG(prod); - cp_cons = NEXT_CMP(cp_cons); } rxr->rx_agg_prod = prod; return skb; @@ -1024,10 +1151,9 @@ return skb; } -static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_napi *bnapi, +static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, u32 *raw_cons, void *cmp) { - struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; struct rx_cmp *rxcmp = cmp; u32 tmp_raw_cons = *raw_cons; u8 cmp_type, agg_bufs = 0; @@ -1041,9 +1167,10 @@ } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { struct rx_tpa_end_cmp *tpa_end = cmp; - agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) & - RX_TPA_END_CMP_AGG_BUFS) >> - RX_TPA_END_CMP_AGG_BUFS_SHIFT; + if (bp->flags & BNXT_FLAG_CHIP_P5) + return 0; + + agg_bufs = TPA_END_AGG_BUFS(tpa_end); } if (agg_bufs) { @@ -1054,6 +1181,17 @@ return 0; } +static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay) +{ + if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))) + return; + + if (BNXT_PF(bp)) + queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay); + else + schedule_delayed_work(&bp->fw_reset_task, delay); +} + static void bnxt_queue_sp_work(struct bnxt *bp) { if (BNXT_PF(bp)) @@ -1062,44 +1200,73 @@ schedule_work(&bp->sp_task); } -static void bnxt_cancel_sp_work(struct bnxt *bp) -{ - if (BNXT_PF(bp)) - flush_workqueue(bnxt_pf_wq); - else - cancel_work_sync(&bp->sp_task); -} - static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) { if (!rxr->bnapi->in_reset) { rxr->bnapi->in_reset = true; - set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); + if (bp->flags & BNXT_FLAG_CHIP_P5) + set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); + else + set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event); bnxt_queue_sp_work(bp); } rxr->rx_next_cons = 0xffff; +} + +static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id) +{ + struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; + u16 idx = agg_id & MAX_TPA_P5_MASK; + + if (test_bit(idx, map->agg_idx_bmap)) + idx = find_first_zero_bit(map->agg_idx_bmap, + BNXT_AGG_IDX_BMAP_SIZE); + __set_bit(idx, map->agg_idx_bmap); + map->agg_id_tbl[agg_id] = idx; + return idx; +} + +static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx) +{ + struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; + + __clear_bit(idx, map->agg_idx_bmap); +} + +static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id) +{ + struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; + + return map->agg_id_tbl[agg_id]; } static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, struct rx_tpa_start_cmp *tpa_start, struct rx_tpa_start_cmp_ext *tpa_start1) { - u8 agg_id = TPA_START_AGG_ID(tpa_start); - u16 cons, prod; - struct bnxt_tpa_info *tpa_info; struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf; + struct bnxt_tpa_info *tpa_info; + u16 cons, prod, agg_id; struct rx_bd *prod_bd; dma_addr_t mapping; + if (bp->flags & BNXT_FLAG_CHIP_P5) { + agg_id = TPA_START_AGG_ID_P5(tpa_start); + agg_id = bnxt_alloc_agg_idx(rxr, agg_id); + } else { + agg_id = TPA_START_AGG_ID(tpa_start); + } cons = tpa_start->rx_tpa_start_cmp_opaque; prod = rxr->rx_prod; cons_rx_buf = &rxr->rx_buf_ring[cons]; prod_rx_buf = &rxr->rx_buf_ring[prod]; tpa_info = &rxr->rx_tpa[agg_id]; - if (unlikely(cons != rxr->rx_next_cons)) { - netdev_warn(bp->dev, "TPA cons %x != expected cons %x\n", - cons, rxr->rx_next_cons); + if (unlikely(cons != rxr->rx_next_cons || + TPA_START_ERROR(tpa_start))) { + netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n", + cons, rxr->rx_next_cons, + TPA_START_ERROR_CODE(tpa_start1)); bnxt_sched_reset(bp, rxr); return; } @@ -1138,12 +1305,12 @@ } else { tpa_info->hash_type = PKT_HASH_TYPE_NONE; tpa_info->gso_type = 0; - if (netif_msg_rx_err(bp)) - netdev_warn(bp->dev, "TPA packet without valid hash\n"); + netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n"); } tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2); tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata); tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info); + tpa_info->agg_count = 0; rxr->rx_prod = NEXT_RX(prod); cons = NEXT_RX(cons); @@ -1155,12 +1322,36 @@ cons_rx_buf->data = NULL; } -static void bnxt_abort_tpa(struct bnxt *bp, struct bnxt_napi *bnapi, - u16 cp_cons, u32 agg_bufs) +static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs) { if (agg_bufs) - bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs); + bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true); } + +#ifdef CONFIG_INET +static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto) +{ + struct udphdr *uh = NULL; + + if (ip_proto == htons(ETH_P_IP)) { + struct iphdr *iph = (struct iphdr *)skb->data; + + if (iph->protocol == IPPROTO_UDP) + uh = (struct udphdr *)(iph + 1); + } else { + struct ipv6hdr *iph = (struct ipv6hdr *)skb->data; + + if (iph->nexthdr == IPPROTO_UDP) + uh = (struct udphdr *)(iph + 1); + } + if (uh) { + if (uh->check) + skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM; + else + skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; + } +} +#endif static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info, int payload_off, int tcp_ts, @@ -1219,28 +1410,39 @@ } if (inner_mac_off) { /* tunnel */ - struct udphdr *uh = NULL; __be16 proto = *((__be16 *)(skb->data + outer_ip_off - ETH_HLEN - 2)); - if (proto == htons(ETH_P_IP)) { - struct iphdr *iph = (struct iphdr *)skb->data; + bnxt_gro_tunnel(skb, proto); + } +#endif + return skb; +} - if (iph->protocol == IPPROTO_UDP) - uh = (struct udphdr *)(iph + 1); - } else { - struct ipv6hdr *iph = (struct ipv6hdr *)skb->data; +static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info, + int payload_off, int tcp_ts, + struct sk_buff *skb) +{ +#ifdef CONFIG_INET + u16 outer_ip_off, inner_ip_off, inner_mac_off; + u32 hdr_info = tpa_info->hdr_info; + int iphdr_len, nw_off; - if (iph->nexthdr == IPPROTO_UDP) - uh = (struct udphdr *)(iph + 1); - } - if (uh) { - if (uh->check) - skb_shinfo(skb)->gso_type |= - SKB_GSO_UDP_TUNNEL_CSUM; - else - skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; - } + inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info); + inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info); + outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info); + + nw_off = inner_ip_off - ETH_HLEN; + skb_set_network_header(skb, nw_off); + iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ? + sizeof(struct ipv6hdr) : sizeof(struct iphdr); + skb_set_transport_header(skb, nw_off + iphdr_len); + + if (inner_mac_off) { /* tunnel */ + __be16 proto = *((__be16 *)(skb->data + outer_ip_off - + ETH_HLEN - 2)); + + bnxt_gro_tunnel(skb, proto); } #endif return skb; @@ -1287,28 +1489,8 @@ return NULL; } - if (nw_off) { /* tunnel */ - struct udphdr *uh = NULL; - - if (skb->protocol == htons(ETH_P_IP)) { - struct iphdr *iph = (struct iphdr *)skb->data; - - if (iph->protocol == IPPROTO_UDP) - uh = (struct udphdr *)(iph + 1); - } else { - struct ipv6hdr *iph = (struct ipv6hdr *)skb->data; - - if (iph->nexthdr == IPPROTO_UDP) - uh = (struct udphdr *)(iph + 1); - } - if (uh) { - if (uh->check) - skb_shinfo(skb)->gso_type |= - SKB_GSO_UDP_TUNNEL_CSUM; - else - skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; - } - } + if (nw_off) /* tunnel */ + bnxt_gro_tunnel(skb, skb->protocol); #endif return skb; } @@ -1331,9 +1513,10 @@ skb_shinfo(skb)->gso_size = le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len); skb_shinfo(skb)->gso_type = tpa_info->gso_type; - payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) & - RX_TPA_END_CMP_PAYLOAD_OFFSET) >> - RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT; + if (bp->flags & BNXT_FLAG_CHIP_P5) + payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1); + else + payload_off = TPA_END_PAYLOAD_OFF(tpa_end); skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb); if (likely(skb)) tcp_gro_complete(skb); @@ -1353,51 +1536,68 @@ } static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, - struct bnxt_napi *bnapi, + struct bnxt_cp_ring_info *cpr, u32 *raw_cons, struct rx_tpa_end_cmp *tpa_end, struct rx_tpa_end_cmp_ext *tpa_end1, u8 *event) { - struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + struct bnxt_napi *bnapi = cpr->bnapi; struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; - u8 agg_id = TPA_END_AGG_ID(tpa_end); u8 *data_ptr, agg_bufs; - u16 cp_cons = RING_CMP(*raw_cons); unsigned int len; struct bnxt_tpa_info *tpa_info; dma_addr_t mapping; struct sk_buff *skb; + u16 idx = 0, agg_id; void *data; + bool gro; if (unlikely(bnapi->in_reset)) { - int rc = bnxt_discard_rx(bp, bnapi, raw_cons, tpa_end); + int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end); if (rc < 0) return ERR_PTR(-EBUSY); return NULL; } - tpa_info = &rxr->rx_tpa[agg_id]; + if (bp->flags & BNXT_FLAG_CHIP_P5) { + agg_id = TPA_END_AGG_ID_P5(tpa_end); + agg_id = bnxt_lookup_agg_idx(rxr, agg_id); + agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1); + tpa_info = &rxr->rx_tpa[agg_id]; + if (unlikely(agg_bufs != tpa_info->agg_count)) { + netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n", + agg_bufs, tpa_info->agg_count); + agg_bufs = tpa_info->agg_count; + } + tpa_info->agg_count = 0; + *event |= BNXT_AGG_EVENT; + bnxt_free_agg_idx(rxr, agg_id); + idx = agg_id; + gro = !!(bp->flags & BNXT_FLAG_GRO); + } else { + agg_id = TPA_END_AGG_ID(tpa_end); + agg_bufs = TPA_END_AGG_BUFS(tpa_end); + tpa_info = &rxr->rx_tpa[agg_id]; + idx = RING_CMP(*raw_cons); + if (agg_bufs) { + if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons)) + return ERR_PTR(-EBUSY); + + *event |= BNXT_AGG_EVENT; + idx = NEXT_CMP(idx); + } + gro = !!TPA_END_GRO(tpa_end); + } data = tpa_info->data; data_ptr = tpa_info->data_ptr; prefetch(data_ptr); len = tpa_info->len; mapping = tpa_info->mapping; - agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) & - RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT; - - if (agg_bufs) { - if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons)) - return ERR_PTR(-EBUSY); - - *event |= BNXT_AGG_EVENT; - cp_cons = NEXT_CMP(cp_cons); - } - if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) { - bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs); + bnxt_abort_tpa(cpr, idx, agg_bufs); if (agg_bufs > MAX_SKB_FRAGS) netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n", agg_bufs, (int)MAX_SKB_FRAGS); @@ -1407,7 +1607,7 @@ if (len <= bp->rx_copy_thresh) { skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping); if (!skb) { - bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs); + bnxt_abort_tpa(cpr, idx, agg_bufs); return NULL; } } else { @@ -1416,7 +1616,7 @@ new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC); if (!new_data) { - bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs); + bnxt_abort_tpa(cpr, idx, agg_bufs); return NULL; } @@ -1431,7 +1631,7 @@ if (!skb) { kfree(data); - bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs); + bnxt_abort_tpa(cpr, idx, agg_bufs); return NULL; } skb_reserve(skb, bp->rx_offset); @@ -1439,7 +1639,7 @@ } if (agg_bufs) { - skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs); + skb = bnxt_rx_pages(bp, cpr, skb, idx, agg_bufs, true); if (!skb) { /* Page reuse already handled by bnxt_rx_pages(). */ return NULL; @@ -1453,12 +1653,17 @@ skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type); if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) && - (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { - u16 vlan_proto = tpa_info->metadata >> - RX_CMP_FLAGS2_METADATA_TPID_SFT; + (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) { + __be16 vlan_proto = htons(tpa_info->metadata >> + RX_CMP_FLAGS2_METADATA_TPID_SFT); u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK; - __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); + if (eth_type_vlan(vlan_proto)) { + __vlan_hwaccel_put_tag(skb, vlan_proto, vtag); + } else { + dev_kfree_skb(skb); + return NULL; + } } skb_checksum_none_assert(skb); @@ -1468,10 +1673,22 @@ (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3; } - if (TPA_END_GRO(tpa_end)) + if (gro) skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb); return skb; +} + +static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, + struct rx_agg_cmp *rx_agg) +{ + u16 agg_id = TPA_AGG_AGG_ID(rx_agg); + struct bnxt_tpa_info *tpa_info; + + agg_id = bnxt_lookup_agg_idx(rxr, agg_id); + tpa_info = &rxr->rx_tpa[agg_id]; + BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS); + tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg; } static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi, @@ -1493,10 +1710,10 @@ * -ENOMEM - packet aborted due to out of memory * -EIO - packet aborted due to hw error indicated in BD */ -static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, - u8 *event) +static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, + u32 *raw_cons, u8 *event) { - struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + struct bnxt_napi *bnapi = cpr->bnapi; struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; struct net_device *dev = bp->dev; struct rx_cmp *rxcmp; @@ -1515,6 +1732,13 @@ rxcmp = (struct rx_cmp *) &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; + cmp_type = RX_CMP_TYPE(rxcmp); + + if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) { + bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp); + goto next_rx_no_prod_no_len; + } + tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); cp_cons = RING_CMP(tmp_raw_cons); rxcmp1 = (struct rx_cmp_ext *) @@ -1523,8 +1747,10 @@ if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) return -EBUSY; - cmp_type = RX_CMP_TYPE(rxcmp); - + /* The valid test of the entry must be done first before + * reading any further. + */ + dma_rmb(); prod = rxr->rx_prod; if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) { @@ -1535,7 +1761,7 @@ goto next_rx_no_prod_no_len; } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { - skb = bnxt_tpa_end(bp, bnapi, &tmp_raw_cons, + skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons, (struct rx_tpa_end_cmp *)rxcmp, (struct rx_tpa_end_cmp_ext *)rxcmp1, event); @@ -1553,12 +1779,16 @@ cons = rxcmp->rx_cmp_opaque; if (unlikely(cons != rxr->rx_next_cons)) { - int rc1 = bnxt_discard_rx(bp, bnapi, raw_cons, rxcmp); + int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp); - netdev_warn(bp->dev, "RX cons %x != expected cons %x\n", - cons, rxr->rx_next_cons); + /* 0xffff is forced error, don't print it */ + if (rxr->rx_next_cons != 0xffff) + netdev_warn(bp->dev, "RX cons %x != expected cons %x\n", + cons, rxr->rx_next_cons); bnxt_sched_reset(bp, rxr); - return rc1; + if (rc1) + return rc1; + goto next_rx_no_prod_no_len; } rx_buf = &rxr->rx_buf_ring[cons]; data = rx_buf->data; @@ -1583,12 +1813,18 @@ bnxt_reuse_rx_data(rxr, cons, data); if (agg_bufs) - bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs); + bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs, + false); rc = -EIO; if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) { - netdev_warn(bp->dev, "RX buffer error %x\n", rx_err); - bnxt_sched_reset(bp, rxr); + bnapi->cp_ring.sw_stats.rx.rx_buf_errors++; + if (!(bp->flags & BNXT_FLAG_CHIP_P5) && + !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) { + netdev_warn_once(bp->dev, "RX buffer error %x\n", + rx_err); + bnxt_sched_reset(bp, rxr); + } } goto next_rx_no_len; } @@ -1606,7 +1842,8 @@ bnxt_reuse_rx_data(rxr, cons, data); if (!skb) { if (agg_bufs) - bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs); + bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, + agg_bufs, false); rc = -ENOMEM; goto next_rx; } @@ -1626,7 +1863,7 @@ } if (agg_bufs) { - skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs); + skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs, false); if (!skb) { rc = -ENOMEM; goto next_rx; @@ -1648,12 +1885,18 @@ if ((rxcmp1->rx_cmp_flags2 & cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) && - (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { + (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) { u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data); u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK; - u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT; + __be16 vlan_proto = htons(meta_data >> + RX_CMP_FLAGS2_METADATA_TPID_SFT); - __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); + if (eth_type_vlan(vlan_proto)) { + __vlan_hwaccel_put_tag(skb, vlan_proto, vtag); + } else { + dev_kfree_skb(skb); + goto next_rx; + } } skb_checksum_none_assert(skb); @@ -1665,7 +1908,7 @@ } else { if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) { if (dev->features & NETIF_F_RXCSUM) - cpr->rx_l4_csum_errors++; + bnapi->cp_ring.sw_stats.rx.rx_l4_csum_errors++; } } @@ -1689,10 +1932,10 @@ /* In netpoll mode, if we are using a combined completion ring, we need to * discard the rx packets and recycle the buffers. */ -static int bnxt_force_rx_discard(struct bnxt *bp, struct bnxt_napi *bnapi, +static int bnxt_force_rx_discard(struct bnxt *bp, + struct bnxt_cp_ring_info *cpr, u32 *raw_cons, u8 *event) { - struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; u32 tmp_raw_cons = *raw_cons; struct rx_cmp_ext *rxcmp1; struct rx_cmp *rxcmp; @@ -1711,6 +1954,10 @@ if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) return -EBUSY; + /* The valid test of the entry must be done first before + * reading any further. + */ + dma_rmb(); cmp_type = RX_CMP_TYPE(rxcmp); if (cmp_type == CMP_TYPE_RX_L2_CMP) { rxcmp1->rx_cmp_cfa_code_errors_v2 |= @@ -1722,22 +1969,73 @@ tpa_end1->rx_tpa_end_cmp_errors_v2 |= cpu_to_le32(RX_TPA_END_CMP_ERRORS); } - return bnxt_rx_pkt(bp, bnapi, raw_cons, event); + return bnxt_rx_pkt(bp, cpr, raw_cons, event); +} + +u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx) +{ + struct bnxt_fw_health *fw_health = bp->fw_health; + u32 reg = fw_health->regs[reg_idx]; + u32 reg_type, reg_off, val = 0; + + reg_type = BNXT_FW_HEALTH_REG_TYPE(reg); + reg_off = BNXT_FW_HEALTH_REG_OFF(reg); + switch (reg_type) { + case BNXT_FW_HEALTH_REG_TYPE_CFG: + pci_read_config_dword(bp->pdev, reg_off, &val); + break; + case BNXT_FW_HEALTH_REG_TYPE_GRC: + reg_off = fw_health->mapped_regs[reg_idx]; + fallthrough; + case BNXT_FW_HEALTH_REG_TYPE_BAR0: + val = readl(bp->bar0 + reg_off); + break; + case BNXT_FW_HEALTH_REG_TYPE_BAR1: + val = readl(bp->bar1 + reg_off); + break; + } + if (reg_idx == BNXT_FW_RESET_INPROG_REG) + val &= fw_health->fw_reset_inprog_reg_mask; + return val; +} + +static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id) +{ + int i; + + for (i = 0; i < bp->rx_nr_rings; i++) { + u16 grp_idx = bp->rx_ring[i].bnapi->index; + struct bnxt_ring_grp_info *grp_info; + + grp_info = &bp->grp_info[grp_idx]; + if (grp_info->agg_fw_ring_id == ring_id) + return grp_idx; + } + return INVALID_HW_RING_ID; } #define BNXT_GET_EVENT_PORT(data) \ ((data) & \ ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK) +#define BNXT_EVENT_RING_TYPE(data2) \ + ((data2) & \ + ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK) + +#define BNXT_EVENT_RING_TYPE_RX(data2) \ + (BNXT_EVENT_RING_TYPE(data2) == \ + ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX) + static int bnxt_async_event_process(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl) { u16 event_id = le16_to_cpu(cmpl->event_id); + u32 data1 = le32_to_cpu(cmpl->event_data1); + u32 data2 = le32_to_cpu(cmpl->event_data2); /* TODO CHIMP_FW: Define event id's for link change, error etc */ switch (event_id) { case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: { - u32 data1 = le32_to_cpu(cmpl->event_data1); struct bnxt_link_info *link_info = &bp->link_info; if (BNXT_VF(bp)) @@ -1755,7 +2053,11 @@ } set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event); } - /* fall through */ + fallthrough; + case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE: + case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE: + set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event); + fallthrough; case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE: set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event); break; @@ -1763,7 +2065,6 @@ set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event); break; case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: { - u32 data1 = le32_to_cpu(cmpl->event_data1); u16 port_id = BNXT_GET_EVENT_PORT(data1); if (BNXT_VF(bp)) @@ -1780,6 +2081,93 @@ goto async_event_process_exit; set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event); break; + case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: { + char *fatal_str = "non-fatal"; + + if (!bp->fw_health) + goto async_event_process_exit; + + bp->fw_reset_timestamp = jiffies; + bp->fw_reset_min_dsecs = cmpl->timestamp_lo; + if (!bp->fw_reset_min_dsecs) + bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS; + bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi); + if (!bp->fw_reset_max_dsecs) + bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS; + if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) { + fatal_str = "fatal"; + set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); + } + netif_warn(bp, hw, bp->dev, + "Firmware %s reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n", + fatal_str, data1, data2, + bp->fw_reset_min_dsecs * 100, + bp->fw_reset_max_dsecs * 100); + set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event); + break; + } + case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: { + struct bnxt_fw_health *fw_health = bp->fw_health; + + if (!fw_health) + goto async_event_process_exit; + + if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) { + fw_health->enabled = false; + netif_info(bp, drv, bp->dev, + "Error recovery info: error recovery[0]\n"); + break; + } + fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1); + fw_health->tmr_multiplier = + DIV_ROUND_UP(fw_health->polling_dsecs * HZ, + bp->current_interval * 10); + fw_health->tmr_counter = fw_health->tmr_multiplier; + if (!fw_health->enabled) + fw_health->last_fw_heartbeat = + bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); + fw_health->last_fw_reset_cnt = + bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); + netif_info(bp, drv, bp->dev, + "Error recovery info: error recovery[1], master[%d], reset count[%u], health status: 0x%x\n", + fw_health->master, fw_health->last_fw_reset_cnt, + bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG)); + if (!fw_health->enabled) { + /* Make sure tmr_counter is set and visible to + * bnxt_health_check() before setting enabled to true. + */ + smp_wmb(); + fw_health->enabled = true; + } + goto async_event_process_exit; + } + case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION: + netif_notice(bp, hw, bp->dev, + "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n", + data1, data2); + goto async_event_process_exit; + case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: { + struct bnxt_rx_ring_info *rxr; + u16 grp_idx; + + if (bp->flags & BNXT_FLAG_CHIP_P5) + goto async_event_process_exit; + + netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n", + BNXT_EVENT_RING_TYPE(data2), data1); + if (!BNXT_EVENT_RING_TYPE_RX(data2)) + goto async_event_process_exit; + + grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1); + if (grp_idx == INVALID_HW_RING_ID) { + netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n", + data1); + goto async_event_process_exit; + } + rxr = bp->bnapi[grp_idx]->rx_ring; + bnxt_sched_reset(bp, rxr); + goto async_event_process_exit; + } default: goto async_event_process_exit; } @@ -1800,7 +2188,7 @@ case CMPL_BASE_TYPE_HWRM_DONE: seq_id = le16_to_cpu(h_cmpl->sequence_id); if (seq_id == bp->hwrm_intr_seq_id) - bp->hwrm_intr_seq_id = HWRM_SEQ_ID_INVALID; + bp->hwrm_intr_seq_id = (u16)~bp->hwrm_intr_seq_id; else netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id); break; @@ -1873,7 +2261,7 @@ } /* disable ring IRQ */ - BNXT_CP_DB_IRQ_DIS(cpr->cp_doorbell); + BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell); /* Return here if interrupt is shared and is disabled. */ if (unlikely(atomic_read(&bp->intr_sem) != 0)) @@ -1883,9 +2271,10 @@ return IRQ_HANDLED; } -static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) +static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, + int budget) { - struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + struct bnxt_napi *bnapi = cpr->bnapi; u32 raw_cons = cpr->cp_raw_cons; u32 cons; int tx_pkts = 0; @@ -1893,6 +2282,8 @@ u8 event = 0; struct tx_cmp *txcmp; + cpr->has_more_work = 0; + cpr->had_work_done = 1; while (1) { int rc; @@ -1912,13 +2303,15 @@ if (unlikely(tx_pkts >= bp->tx_wake_thresh)) { rx_pkts = budget; raw_cons = NEXT_RAW_CMP(raw_cons); + if (budget) + cpr->has_more_work = 1; break; } } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { if (likely(budget)) - rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event); + rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event); else - rc = bnxt_force_rx_discard(bp, bnapi, &raw_cons, + rc = bnxt_force_rx_discard(bp, cpr, &raw_cons, &event); if (likely(rc >= 0)) rx_pkts += rc; @@ -1941,39 +2334,63 @@ } raw_cons = NEXT_RAW_CMP(raw_cons); - if (rx_pkts && rx_pkts == budget) + if (rx_pkts && rx_pkts == budget) { + cpr->has_more_work = 1; break; + } } + + if (event & BNXT_REDIRECT_EVENT) + xdp_do_flush_map(); if (event & BNXT_TX_EVENT) { struct bnxt_tx_ring_info *txr = bnapi->tx_ring; - void __iomem *db = txr->tx_doorbell; u16 prod = txr->tx_prod; /* Sync BD data before updating doorbell */ wmb(); - bnxt_db_write_relaxed(bp, db, DB_KEY_TX | prod); + bnxt_db_write_relaxed(bp, &txr->tx_db, prod); } cpr->cp_raw_cons = raw_cons; + bnapi->tx_pkts += tx_pkts; + bnapi->events |= event; + return rx_pkts; +} + +static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi) +{ + if (bnapi->tx_pkts) { + bnapi->tx_int(bp, bnapi, bnapi->tx_pkts); + bnapi->tx_pkts = 0; + } + + if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) { + struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; + + if (bnapi->events & BNXT_AGG_EVENT) + bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); + bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); + } + bnapi->events = 0; +} + +static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, + int budget) +{ + struct bnxt_napi *bnapi = cpr->bnapi; + int rx_pkts; + + rx_pkts = __bnxt_poll_work(bp, cpr, budget); + /* ACK completion ring before freeing tx ring and producing new * buffers in rx/agg rings to prevent overflowing the completion * ring. */ - BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); + bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons); - if (tx_pkts) - bnapi->tx_int(bp, bnapi, tx_pkts); - - if (event & BNXT_RX_EVENT) { - struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; - - bnxt_db_write(bp, rxr->rx_doorbell, DB_KEY_RX | rxr->rx_prod); - if (event & BNXT_AGG_EVENT) - bnxt_db_write(bp, rxr->rx_agg_doorbell, - DB_KEY_RX | rxr->rx_agg_prod); - } + __bnxt_poll_work_done(bp, bnapi); return rx_pkts; } @@ -1987,6 +2404,7 @@ struct rx_cmp_ext *rxcmp1; u32 cp_cons, tmp_raw_cons; u32 raw_cons = cpr->cp_raw_cons; + bool flush_xdp = false; u32 rx_pkts = 0; u8 event = 0; @@ -1999,6 +2417,10 @@ if (!TX_CMP_VALID(txcmp, raw_cons)) break; + /* The valid test of the entry must be done first before + * reading any further. + */ + dma_rmb(); if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { tmp_raw_cons = NEXT_RAW_CMP(raw_cons); cp_cons = RING_CMP(tmp_raw_cons); @@ -2012,11 +2434,13 @@ rxcmp1->rx_cmp_cfa_code_errors_v2 |= cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR); - rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event); + rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event); if (likely(rc == -EIO) && budget) rx_pkts++; else if (rc == -EBUSY) /* partial completion */ break; + if (event & BNXT_REDIRECT_EVENT) + flush_xdp = true; } else if (unlikely(TX_CMP_TYPE(txcmp) == CMPL_BASE_TYPE_HWRM_DONE)) { bnxt_hwrm_handler(bp, txcmp); @@ -2031,16 +2455,17 @@ } cpr->cp_raw_cons = raw_cons; - BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); - bnxt_db_write(bp, rxr->rx_doorbell, DB_KEY_RX | rxr->rx_prod); + BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons); + bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); if (event & BNXT_AGG_EVENT) - bnxt_db_write(bp, rxr->rx_agg_doorbell, - DB_KEY_RX | rxr->rx_agg_prod); + bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); + if (flush_xdp) + xdp_do_flush(); if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) { napi_complete_done(napi, rx_pkts); - BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons); + BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); } return rx_pkts; } @@ -2053,32 +2478,126 @@ int work_done = 0; while (1) { - work_done += bnxt_poll_work(bp, bnapi, budget - work_done); + work_done += bnxt_poll_work(bp, cpr, budget - work_done); if (work_done >= budget) { if (!budget) - BNXT_CP_DB_REARM(cpr->cp_doorbell, - cpr->cp_raw_cons); + BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); break; } if (!bnxt_has_work(bp, cpr)) { if (napi_complete_done(napi, work_done)) - BNXT_CP_DB_REARM(cpr->cp_doorbell, - cpr->cp_raw_cons); + BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); break; } } if (bp->flags & BNXT_FLAG_DIM) { - struct net_dim_sample dim_sample; + struct dim_sample dim_sample = {}; - net_dim_sample(cpr->event_ctr, - cpr->rx_packets, - cpr->rx_bytes, - &dim_sample); + dim_update_sample(cpr->event_ctr, + cpr->rx_packets, + cpr->rx_bytes, + &dim_sample); net_dim(&cpr->dim, dim_sample); } - mmiowb(); + return work_done; +} + +static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) +{ + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + int i, work_done = 0; + + for (i = 0; i < 2; i++) { + struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i]; + + if (cpr2) { + work_done += __bnxt_poll_work(bp, cpr2, + budget - work_done); + cpr->has_more_work |= cpr2->has_more_work; + } + } + return work_done; +} + +static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi, + u64 dbr_type) +{ + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + int i; + + for (i = 0; i < 2; i++) { + struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i]; + struct bnxt_db_info *db; + + if (cpr2 && cpr2->had_work_done) { + db = &cpr2->cp_db; + writeq(db->db_key64 | dbr_type | + RING_CMP(cpr2->cp_raw_cons), db->doorbell); + cpr2->had_work_done = 0; + } + } + __bnxt_poll_work_done(bp, bnapi); +} + +static int bnxt_poll_p5(struct napi_struct *napi, int budget) +{ + struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + u32 raw_cons = cpr->cp_raw_cons; + struct bnxt *bp = bnapi->bp; + struct nqe_cn *nqcmp; + int work_done = 0; + u32 cons; + + if (cpr->has_more_work) { + cpr->has_more_work = 0; + work_done = __bnxt_poll_cqs(bp, bnapi, budget); + } + while (1) { + cons = RING_CMP(raw_cons); + nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)]; + + if (!NQ_CMP_VALID(nqcmp, raw_cons)) { + if (cpr->has_more_work) + break; + + __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL); + cpr->cp_raw_cons = raw_cons; + if (napi_complete_done(napi, work_done)) + BNXT_DB_NQ_ARM_P5(&cpr->cp_db, + cpr->cp_raw_cons); + return work_done; + } + + /* The valid test of the entry must be done first before + * reading any further. + */ + dma_rmb(); + + if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) { + u32 idx = le32_to_cpu(nqcmp->cq_handle_low); + struct bnxt_cp_ring_info *cpr2; + + /* No more budget for RX work */ + if (budget && work_done >= budget && idx == BNXT_RX_HDL) + break; + + cpr2 = cpr->cp_ring_arr[idx]; + work_done += __bnxt_poll_work(bp, cpr2, + budget - work_done); + cpr->has_more_work |= cpr2->has_more_work; + } else { + bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp); + } + raw_cons = NEXT_RAW_CMP(raw_cons); + } + __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ); + if (raw_cons != cpr->cp_raw_cons) { + cpr->cp_raw_cons = raw_cons; + BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons); + } return work_done; } @@ -2095,11 +2614,28 @@ struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; int j; + if (!txr->tx_buf_ring) + continue; + for (j = 0; j < max_idx;) { struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j]; - struct sk_buff *skb = tx_buf->skb; + struct sk_buff *skb; int k, last; + if (i < bp->tx_nr_rings_xdp && + tx_buf->action == XDP_REDIRECT) { + dma_unmap_single(&pdev->dev, + dma_unmap_addr(tx_buf, mapping), + dma_unmap_len(tx_buf, len), + PCI_DMA_TODEVICE); + xdp_return_frame(tx_buf->xdpf); + tx_buf->action = 0; + tx_buf->xdpf = NULL; + j++; + continue; + } + + skb = tx_buf->skb; if (!skb) { j++; continue; @@ -2136,89 +2672,101 @@ } } +static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr) +{ + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; + struct pci_dev *pdev = bp->pdev; + struct bnxt_tpa_idx_map *map; + int i, max_idx, max_agg_idx; + + max_idx = bp->rx_nr_pages * RX_DESC_CNT; + max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT; + if (!rxr->rx_tpa) + goto skip_rx_tpa_free; + + for (i = 0; i < bp->max_tpa; i++) { + struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i]; + u8 *data = tpa_info->data; + + if (!data) + continue; + + dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping, + bp->rx_buf_use_size, bp->rx_dir, + DMA_ATTR_WEAK_ORDERING); + + tpa_info->data = NULL; + + kfree(data); + } + +skip_rx_tpa_free: + if (!rxr->rx_buf_ring) + goto skip_rx_buf_free; + + for (i = 0; i < max_idx; i++) { + struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i]; + dma_addr_t mapping = rx_buf->mapping; + void *data = rx_buf->data; + + if (!data) + continue; + + rx_buf->data = NULL; + if (BNXT_RX_PAGE_MODE(bp)) { + mapping -= bp->rx_dma_offset; + dma_unmap_page_attrs(&pdev->dev, mapping, PAGE_SIZE, + bp->rx_dir, + DMA_ATTR_WEAK_ORDERING); + page_pool_recycle_direct(rxr->page_pool, data); + } else { + dma_unmap_single_attrs(&pdev->dev, mapping, + bp->rx_buf_use_size, bp->rx_dir, + DMA_ATTR_WEAK_ORDERING); + kfree(data); + } + } + +skip_rx_buf_free: + if (!rxr->rx_agg_ring) + goto skip_rx_agg_free; + + for (i = 0; i < max_agg_idx; i++) { + struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i]; + struct page *page = rx_agg_buf->page; + + if (!page) + continue; + + dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping, + BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE, + DMA_ATTR_WEAK_ORDERING); + + rx_agg_buf->page = NULL; + __clear_bit(i, rxr->rx_agg_bmap); + + __free_page(page); + } + +skip_rx_agg_free: + if (rxr->rx_page) { + __free_page(rxr->rx_page); + rxr->rx_page = NULL; + } + map = rxr->rx_tpa_idx_map; + if (map) + memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap)); +} + static void bnxt_free_rx_skbs(struct bnxt *bp) { - int i, max_idx, max_agg_idx; - struct pci_dev *pdev = bp->pdev; + int i; if (!bp->rx_ring) return; - max_idx = bp->rx_nr_pages * RX_DESC_CNT; - max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT; - for (i = 0; i < bp->rx_nr_rings; i++) { - struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; - int j; - - if (rxr->rx_tpa) { - for (j = 0; j < MAX_TPA; j++) { - struct bnxt_tpa_info *tpa_info = - &rxr->rx_tpa[j]; - u8 *data = tpa_info->data; - - if (!data) - continue; - - dma_unmap_single_attrs(&pdev->dev, - tpa_info->mapping, - bp->rx_buf_use_size, - bp->rx_dir, - DMA_ATTR_WEAK_ORDERING); - - tpa_info->data = NULL; - - kfree(data); - } - } - - for (j = 0; j < max_idx; j++) { - struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j]; - dma_addr_t mapping = rx_buf->mapping; - void *data = rx_buf->data; - - if (!data) - continue; - - rx_buf->data = NULL; - - if (BNXT_RX_PAGE_MODE(bp)) { - mapping -= bp->rx_dma_offset; - dma_unmap_page_attrs(&pdev->dev, mapping, - PAGE_SIZE, bp->rx_dir, - DMA_ATTR_WEAK_ORDERING); - __free_page(data); - } else { - dma_unmap_single_attrs(&pdev->dev, mapping, - bp->rx_buf_use_size, - bp->rx_dir, - DMA_ATTR_WEAK_ORDERING); - kfree(data); - } - } - - for (j = 0; j < max_agg_idx; j++) { - struct bnxt_sw_rx_agg_bd *rx_agg_buf = - &rxr->rx_agg_ring[j]; - struct page *page = rx_agg_buf->page; - - if (!page) - continue; - - dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping, - BNXT_RX_PAGE_SIZE, - PCI_DMA_FROMDEVICE, - DMA_ATTR_WEAK_ORDERING); - - rx_agg_buf->page = NULL; - __clear_bit(j, rxr->rx_agg_bmap); - - __free_page(page); - } - if (rxr->rx_page) { - __free_page(rxr->rx_page); - rxr->rx_page = NULL; - } - } + for (i = 0; i < bp->rx_nr_rings; i++) + bnxt_free_one_rx_ring_skbs(bp, i); } static void bnxt_free_skbs(struct bnxt *bp) @@ -2227,60 +2775,139 @@ bnxt_free_rx_skbs(bp); } -static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_struct *ring) +static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) { struct pci_dev *pdev = bp->pdev; int i; - for (i = 0; i < ring->nr_pages; i++) { - if (!ring->pg_arr[i]) + for (i = 0; i < rmem->nr_pages; i++) { + if (!rmem->pg_arr[i]) continue; - dma_free_coherent(&pdev->dev, ring->page_size, - ring->pg_arr[i], ring->dma_arr[i]); + dma_free_coherent(&pdev->dev, rmem->page_size, + rmem->pg_arr[i], rmem->dma_arr[i]); - ring->pg_arr[i] = NULL; + rmem->pg_arr[i] = NULL; } - if (ring->pg_tbl) { - dma_free_coherent(&pdev->dev, ring->nr_pages * 8, - ring->pg_tbl, ring->pg_tbl_map); - ring->pg_tbl = NULL; + if (rmem->pg_tbl) { + size_t pg_tbl_size = rmem->nr_pages * 8; + + if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG) + pg_tbl_size = rmem->page_size; + dma_free_coherent(&pdev->dev, pg_tbl_size, + rmem->pg_tbl, rmem->pg_tbl_map); + rmem->pg_tbl = NULL; } - if (ring->vmem_size && *ring->vmem) { - vfree(*ring->vmem); - *ring->vmem = NULL; + if (rmem->vmem_size && *rmem->vmem) { + vfree(*rmem->vmem); + *rmem->vmem = NULL; } } -static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_struct *ring) +static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) { - int i; struct pci_dev *pdev = bp->pdev; + u64 valid_bit = 0; + int i; - if (ring->nr_pages > 1) { - ring->pg_tbl = dma_alloc_coherent(&pdev->dev, - ring->nr_pages * 8, - &ring->pg_tbl_map, + if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG)) + valid_bit = PTU_PTE_VALID; + if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) { + size_t pg_tbl_size = rmem->nr_pages * 8; + + if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG) + pg_tbl_size = rmem->page_size; + rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size, + &rmem->pg_tbl_map, GFP_KERNEL); - if (!ring->pg_tbl) + if (!rmem->pg_tbl) return -ENOMEM; } - for (i = 0; i < ring->nr_pages; i++) { - ring->pg_arr[i] = dma_alloc_coherent(&pdev->dev, - ring->page_size, - &ring->dma_arr[i], + for (i = 0; i < rmem->nr_pages; i++) { + u64 extra_bits = valid_bit; + + rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev, + rmem->page_size, + &rmem->dma_arr[i], GFP_KERNEL); - if (!ring->pg_arr[i]) + if (!rmem->pg_arr[i]) return -ENOMEM; - if (ring->nr_pages > 1) - ring->pg_tbl[i] = cpu_to_le64(ring->dma_arr[i]); + if (rmem->init_val) + memset(rmem->pg_arr[i], rmem->init_val, + rmem->page_size); + if (rmem->nr_pages > 1 || rmem->depth > 0) { + if (i == rmem->nr_pages - 2 && + (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) + extra_bits |= PTU_PTE_NEXT_TO_LAST; + else if (i == rmem->nr_pages - 1 && + (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) + extra_bits |= PTU_PTE_LAST; + rmem->pg_tbl[i] = + cpu_to_le64(rmem->dma_arr[i] | extra_bits); + } } - if (ring->vmem_size) { - *ring->vmem = vzalloc(ring->vmem_size); - if (!(*ring->vmem)) + if (rmem->vmem_size) { + *rmem->vmem = vzalloc(rmem->vmem_size); + if (!(*rmem->vmem)) + return -ENOMEM; + } + return 0; +} + +static void bnxt_free_tpa_info(struct bnxt *bp) +{ + int i, j; + + for (i = 0; i < bp->rx_nr_rings; i++) { + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; + + kfree(rxr->rx_tpa_idx_map); + rxr->rx_tpa_idx_map = NULL; + if (rxr->rx_tpa) { + for (j = 0; j < bp->max_tpa; j++) { + kfree(rxr->rx_tpa[j].agg_arr); + rxr->rx_tpa[j].agg_arr = NULL; + } + } + kfree(rxr->rx_tpa); + rxr->rx_tpa = NULL; + } +} + +static int bnxt_alloc_tpa_info(struct bnxt *bp) +{ + int i, j; + + bp->max_tpa = MAX_TPA; + if (bp->flags & BNXT_FLAG_CHIP_P5) { + if (!bp->max_tpa_v2) + return 0; + bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5); + } + + for (i = 0; i < bp->rx_nr_rings; i++) { + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; + struct rx_agg_cmp *agg; + + rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info), + GFP_KERNEL); + if (!rxr->rx_tpa) + return -ENOMEM; + + if (!(bp->flags & BNXT_FLAG_CHIP_P5)) + continue; + for (j = 0; j < bp->max_tpa; j++) { + agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL); + if (!agg) + return -ENOMEM; + rxr->rx_tpa[j].agg_arr = agg; + } + rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map), + GFP_KERNEL); + if (!rxr->rx_tpa_idx_map) return -ENOMEM; } return 0; @@ -2293,6 +2920,7 @@ if (!bp->rx_ring) return; + bnxt_free_tpa_info(bp); for (i = 0; i < bp->rx_nr_rings; i++) { struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; struct bnxt_ring_struct *ring; @@ -2303,23 +2931,43 @@ if (xdp_rxq_info_is_reg(&rxr->xdp_rxq)) xdp_rxq_info_unreg(&rxr->xdp_rxq); - kfree(rxr->rx_tpa); - rxr->rx_tpa = NULL; + page_pool_destroy(rxr->page_pool); + rxr->page_pool = NULL; kfree(rxr->rx_agg_bmap); rxr->rx_agg_bmap = NULL; ring = &rxr->rx_ring_struct; - bnxt_free_ring(bp, ring); + bnxt_free_ring(bp, &ring->ring_mem); ring = &rxr->rx_agg_ring_struct; - bnxt_free_ring(bp, ring); + bnxt_free_ring(bp, &ring->ring_mem); } +} + +static int bnxt_alloc_rx_page_pool(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr) +{ + struct page_pool_params pp = { 0 }; + + pp.pool_size = bp->rx_ring_size; + pp.nid = dev_to_node(&bp->pdev->dev); + pp.dev = &bp->pdev->dev; + pp.dma_dir = DMA_BIDIRECTIONAL; + + rxr->page_pool = page_pool_create(&pp); + if (IS_ERR(rxr->page_pool)) { + int err = PTR_ERR(rxr->page_pool); + + rxr->page_pool = NULL; + return err; + } + return 0; } static int bnxt_alloc_rx_rings(struct bnxt *bp) { - int i, rc, agg_rings = 0, tpa_rings = 0; + int i, rc = 0, agg_rings = 0; if (!bp->rx_ring) return -ENOMEM; @@ -2327,28 +2975,38 @@ if (bp->flags & BNXT_FLAG_AGG_RINGS) agg_rings = 1; - if (bp->flags & BNXT_FLAG_TPA) - tpa_rings = 1; - for (i = 0; i < bp->rx_nr_rings; i++) { struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; struct bnxt_ring_struct *ring; ring = &rxr->rx_ring_struct; + rc = bnxt_alloc_rx_page_pool(bp, rxr); + if (rc) + return rc; + rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i); if (rc < 0) return rc; - rc = bnxt_alloc_ring(bp, ring); + rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq, + MEM_TYPE_PAGE_POOL, + rxr->page_pool); + if (rc) { + xdp_rxq_info_unreg(&rxr->xdp_rxq); + return rc; + } + + rc = bnxt_alloc_ring(bp, &ring->ring_mem); if (rc) return rc; + ring->grp_idx = i; if (agg_rings) { u16 mem_size; ring = &rxr->rx_agg_ring_struct; - rc = bnxt_alloc_ring(bp, ring); + rc = bnxt_alloc_ring(bp, &ring->ring_mem); if (rc) return rc; @@ -2358,17 +3016,11 @@ rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL); if (!rxr->rx_agg_bmap) return -ENOMEM; - - if (tpa_rings) { - rxr->rx_tpa = kcalloc(MAX_TPA, - sizeof(struct bnxt_tpa_info), - GFP_KERNEL); - if (!rxr->rx_tpa) - return -ENOMEM; - } } } - return 0; + if (bp->flags & BNXT_FLAG_TPA) + rc = bnxt_alloc_tpa_info(bp); + return rc; } static void bnxt_free_tx_rings(struct bnxt *bp) @@ -2391,7 +3043,7 @@ ring = &txr->tx_ring_struct; - bnxt_free_ring(bp, ring); + bnxt_free_ring(bp, &ring->ring_mem); } } @@ -2422,7 +3074,7 @@ ring = &txr->tx_ring_struct; - rc = bnxt_alloc_ring(bp, ring); + rc = bnxt_alloc_ring(bp, &ring->ring_mem); if (rc) return rc; @@ -2444,8 +3096,6 @@ mapping = txr->tx_push_mapping + sizeof(struct tx_push_bd); txr->data_mapping = cpu_to_le64(mapping); - - memset(txr->tx_push, 0, sizeof(struct tx_push_bd)); } qidx = bp->tc_to_qidx[j]; ring->queue_id = bp->q_info[qidx].queue_id; @@ -2468,6 +3118,7 @@ struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_cp_ring_info *cpr; struct bnxt_ring_struct *ring; + int j; if (!bnapi) continue; @@ -2475,12 +3126,51 @@ cpr = &bnapi->cp_ring; ring = &cpr->cp_ring_struct; - bnxt_free_ring(bp, ring); + bnxt_free_ring(bp, &ring->ring_mem); + + for (j = 0; j < 2; j++) { + struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j]; + + if (cpr2) { + ring = &cpr2->cp_ring_struct; + bnxt_free_ring(bp, &ring->ring_mem); + kfree(cpr2); + cpr->cp_ring_arr[j] = NULL; + } + } } +} + +static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp) +{ + struct bnxt_ring_mem_info *rmem; + struct bnxt_ring_struct *ring; + struct bnxt_cp_ring_info *cpr; + int rc; + + cpr = kzalloc(sizeof(*cpr), GFP_KERNEL); + if (!cpr) + return NULL; + + ring = &cpr->cp_ring_struct; + rmem = &ring->ring_mem; + rmem->nr_pages = bp->cp_nr_pages; + rmem->page_size = HW_CMPD_RING_SIZE; + rmem->pg_arr = (void **)cpr->cp_desc_ring; + rmem->dma_arr = cpr->cp_desc_mapping; + rmem->flags = BNXT_RMEM_RING_PTE_FLAG; + rc = bnxt_alloc_ring(bp, rmem); + if (rc) { + bnxt_free_ring(bp, rmem); + kfree(cpr); + cpr = NULL; + } + return cpr; } static int bnxt_alloc_cp_rings(struct bnxt *bp) { + bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS); int i, rc, ulp_base_vec, ulp_msix; ulp_msix = bnxt_get_ulp_msix_num(bp); @@ -2494,9 +3184,10 @@ continue; cpr = &bnapi->cp_ring; + cpr->bnapi = bnapi; ring = &cpr->cp_ring_struct; - rc = bnxt_alloc_ring(bp, ring); + rc = bnxt_alloc_ring(bp, &ring->ring_mem); if (rc) return rc; @@ -2504,6 +3195,29 @@ ring->map_idx = i + ulp_msix; else ring->map_idx = i; + + if (!(bp->flags & BNXT_FLAG_CHIP_P5)) + continue; + + if (i < bp->rx_nr_rings) { + struct bnxt_cp_ring_info *cpr2 = + bnxt_alloc_cp_sub_ring(bp); + + cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2; + if (!cpr2) + return -ENOMEM; + cpr2->bnapi = bnapi; + } + if ((sh && i < bp->tx_nr_rings) || + (!sh && i >= bp->rx_nr_rings)) { + struct bnxt_cp_ring_info *cpr2 = + bnxt_alloc_cp_sub_ring(bp); + + cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2; + if (!cpr2) + return -ENOMEM; + cpr2->bnapi = bnapi; + } } return 0; } @@ -2514,6 +3228,7 @@ for (i = 0; i < bp->cp_nr_rings; i++) { struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_ring_mem_info *rmem; struct bnxt_cp_ring_info *cpr; struct bnxt_rx_ring_info *rxr; struct bnxt_tx_ring_info *txr; @@ -2524,31 +3239,34 @@ cpr = &bnapi->cp_ring; ring = &cpr->cp_ring_struct; - ring->nr_pages = bp->cp_nr_pages; - ring->page_size = HW_CMPD_RING_SIZE; - ring->pg_arr = (void **)cpr->cp_desc_ring; - ring->dma_arr = cpr->cp_desc_mapping; - ring->vmem_size = 0; + rmem = &ring->ring_mem; + rmem->nr_pages = bp->cp_nr_pages; + rmem->page_size = HW_CMPD_RING_SIZE; + rmem->pg_arr = (void **)cpr->cp_desc_ring; + rmem->dma_arr = cpr->cp_desc_mapping; + rmem->vmem_size = 0; rxr = bnapi->rx_ring; if (!rxr) goto skip_rx; ring = &rxr->rx_ring_struct; - ring->nr_pages = bp->rx_nr_pages; - ring->page_size = HW_RXBD_RING_SIZE; - ring->pg_arr = (void **)rxr->rx_desc_ring; - ring->dma_arr = rxr->rx_desc_mapping; - ring->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages; - ring->vmem = (void **)&rxr->rx_buf_ring; + rmem = &ring->ring_mem; + rmem->nr_pages = bp->rx_nr_pages; + rmem->page_size = HW_RXBD_RING_SIZE; + rmem->pg_arr = (void **)rxr->rx_desc_ring; + rmem->dma_arr = rxr->rx_desc_mapping; + rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages; + rmem->vmem = (void **)&rxr->rx_buf_ring; ring = &rxr->rx_agg_ring_struct; - ring->nr_pages = bp->rx_agg_nr_pages; - ring->page_size = HW_RXBD_RING_SIZE; - ring->pg_arr = (void **)rxr->rx_agg_desc_ring; - ring->dma_arr = rxr->rx_agg_desc_mapping; - ring->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages; - ring->vmem = (void **)&rxr->rx_agg_ring; + rmem = &ring->ring_mem; + rmem->nr_pages = bp->rx_agg_nr_pages; + rmem->page_size = HW_RXBD_RING_SIZE; + rmem->pg_arr = (void **)rxr->rx_agg_desc_ring; + rmem->dma_arr = rxr->rx_agg_desc_mapping; + rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages; + rmem->vmem = (void **)&rxr->rx_agg_ring; skip_rx: txr = bnapi->tx_ring; @@ -2556,12 +3274,13 @@ continue; ring = &txr->tx_ring_struct; - ring->nr_pages = bp->tx_nr_pages; - ring->page_size = HW_RXBD_RING_SIZE; - ring->pg_arr = (void **)txr->tx_desc_ring; - ring->dma_arr = txr->tx_desc_mapping; - ring->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages; - ring->vmem = (void **)&txr->tx_buf_ring; + rmem = &ring->ring_mem; + rmem->nr_pages = bp->tx_nr_pages; + rmem->page_size = HW_RXBD_RING_SIZE; + rmem->pg_arr = (void **)txr->tx_desc_ring; + rmem->dma_arr = txr->tx_desc_mapping; + rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages; + rmem->vmem = (void **)&txr->tx_buf_ring; } } @@ -2571,8 +3290,8 @@ u32 prod; struct rx_bd **rx_buf_ring; - rx_buf_ring = (struct rx_bd **)ring->pg_arr; - for (i = 0, prod = 0; i < ring->nr_pages; i++) { + rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr; + for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) { int j; struct rx_bd *rxbd; @@ -2587,13 +3306,60 @@ } } +static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr) +{ + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; + struct net_device *dev = bp->dev; + u32 prod; + int i; + + prod = rxr->rx_prod; + for (i = 0; i < bp->rx_ring_size; i++) { + if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) { + netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n", + ring_nr, i, bp->rx_ring_size); + break; + } + prod = NEXT_RX(prod); + } + rxr->rx_prod = prod; + + if (!(bp->flags & BNXT_FLAG_AGG_RINGS)) + return 0; + + prod = rxr->rx_agg_prod; + for (i = 0; i < bp->rx_agg_ring_size; i++) { + if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) { + netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n", + ring_nr, i, bp->rx_ring_size); + break; + } + prod = NEXT_RX_AGG(prod); + } + rxr->rx_agg_prod = prod; + + if (rxr->rx_tpa) { + dma_addr_t mapping; + u8 *data; + + for (i = 0; i < bp->max_tpa; i++) { + data = __bnxt_alloc_rx_data(bp, &mapping, GFP_KERNEL); + if (!data) + return -ENOMEM; + + rxr->rx_tpa[i].data = data; + rxr->rx_tpa[i].data_ptr = data + bp->rx_offset; + rxr->rx_tpa[i].mapping = mapping; + } + } + return 0; +} + static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) { - struct net_device *dev = bp->dev; struct bnxt_rx_ring_info *rxr; struct bnxt_ring_struct *ring; - u32 prod, type; - int i; + u32 type; type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) | RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP; @@ -2606,75 +3372,27 @@ bnxt_init_rxbd_pages(ring, type); if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) { - rxr->xdp_prog = bpf_prog_add(bp->xdp_prog, 1); - if (IS_ERR(rxr->xdp_prog)) { - int rc = PTR_ERR(rxr->xdp_prog); - - rxr->xdp_prog = NULL; - return rc; - } + bpf_prog_add(bp->xdp_prog, 1); + rxr->xdp_prog = bp->xdp_prog; } - prod = rxr->rx_prod; - for (i = 0; i < bp->rx_ring_size; i++) { - if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) { - netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n", - ring_nr, i, bp->rx_ring_size); - break; - } - prod = NEXT_RX(prod); - } - rxr->rx_prod = prod; ring->fw_ring_id = INVALID_HW_RING_ID; ring = &rxr->rx_agg_ring_struct; ring->fw_ring_id = INVALID_HW_RING_ID; - if (!(bp->flags & BNXT_FLAG_AGG_RINGS)) - return 0; + if ((bp->flags & BNXT_FLAG_AGG_RINGS)) { + type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) | + RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP; - type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) | - RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP; - - bnxt_init_rxbd_pages(ring, type); - - prod = rxr->rx_agg_prod; - for (i = 0; i < bp->rx_agg_ring_size; i++) { - if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) { - netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n", - ring_nr, i, bp->rx_ring_size); - break; - } - prod = NEXT_RX_AGG(prod); - } - rxr->rx_agg_prod = prod; - - if (bp->flags & BNXT_FLAG_TPA) { - if (rxr->rx_tpa) { - u8 *data; - dma_addr_t mapping; - - for (i = 0; i < MAX_TPA; i++) { - data = __bnxt_alloc_rx_data(bp, &mapping, - GFP_KERNEL); - if (!data) - return -ENOMEM; - - rxr->rx_tpa[i].data = data; - rxr->rx_tpa[i].data_ptr = data + bp->rx_offset; - rxr->rx_tpa[i].mapping = mapping; - } - } else { - netdev_err(bp->dev, "No resource allocated for LRO/GRO\n"); - return -ENOMEM; - } + bnxt_init_rxbd_pages(ring, type); } - return 0; + return bnxt_alloc_one_rx_ring(bp, ring_nr); } static void bnxt_init_cp_rings(struct bnxt *bp) { - int i; + int i, j; for (i = 0; i < bp->cp_nr_rings; i++) { struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring; @@ -2683,6 +3401,17 @@ ring->fw_ring_id = INVALID_HW_RING_ID; cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks; cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs; + for (j = 0; j < 2; j++) { + struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j]; + + if (!cpr2) + continue; + + ring = &cpr2->cp_ring_struct; + ring->fw_ring_id = INVALID_HW_RING_ID; + cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks; + cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs; + } } } @@ -2764,7 +3493,7 @@ int num_vnics = 1; #ifdef CONFIG_RFS_ACCEL - if (bp->flags & BNXT_FLAG_RFS) + if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS) num_vnics += bp->rx_nr_rings; #endif @@ -2786,10 +3515,12 @@ for (i = 0; i < bp->nr_vnics; i++) { struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; + int j; vnic->fw_vnic_id = INVALID_HW_RING_ID; - vnic->fw_rss_cos_lb_ctx[0] = INVALID_HW_RING_ID; - vnic->fw_rss_cos_lb_ctx[1] = INVALID_HW_RING_ID; + for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) + vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID; + vnic->fw_l2_ctx_id = INVALID_HW_RING_ID; if (bp->vnic_info[i].rss_hash_key) { @@ -2837,7 +3568,7 @@ */ void bnxt_set_ring_params(struct bnxt *bp) { - u32 ring_size, rx_size, rx_space; + u32 ring_size, rx_size, rx_space, max_rx_cmpl; u32 agg_factor = 0, agg_ring_size = 0; /* 8 for CRC and VLAN */ @@ -2893,7 +3624,15 @@ bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT); bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1; - ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size; + max_rx_cmpl = bp->rx_ring_size; + /* MAX TPA needs to be added because TPA_START completions are + * immediately recycled, so the TPA completions are not bound by + * the RX ring size. + */ + if (bp->flags & BNXT_FLAG_TPA) + max_rx_cmpl += bp->max_tpa; + /* RX and TPA completions are 32-byte, all others are 16-byte */ + ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size; bp->cp_ring_size = ring_size; bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT); @@ -2957,7 +3696,7 @@ } if (vnic->rss_table) { - dma_free_coherent(&pdev->dev, PAGE_SIZE, + dma_free_coherent(&pdev->dev, vnic->rss_table_size, vnic->rss_table, vnic->rss_table_dma_addr); vnic->rss_table = NULL; @@ -3003,6 +3742,9 @@ } } + if (bp->flags & BNXT_FLAG_CHIP_P5) + goto vnic_skip_grps; + if (vnic->flags & BNXT_VNIC_RSS_FLAG) max_rings = bp->rx_nr_rings; else @@ -3013,21 +3755,25 @@ rc = -ENOMEM; goto out; } - +vnic_skip_grps: if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) && !(vnic->flags & BNXT_VNIC_RSS_FLAG)) continue; /* Allocate rss table and hash key */ - vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, + size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16)); + if (bp->flags & BNXT_FLAG_CHIP_P5) + size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5); + + vnic->rss_table_size = size + HW_HASH_KEY_SIZE; + vnic->rss_table = dma_alloc_coherent(&pdev->dev, + vnic->rss_table_size, &vnic->rss_table_dma_addr, GFP_KERNEL); if (!vnic->rss_table) { rc = -ENOMEM; goto out; } - - size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16)); vnic->rss_hash_key = ((void *)vnic->rss_table) + size; vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size; @@ -3047,6 +3793,30 @@ bp->hwrm_cmd_resp_dma_addr); bp->hwrm_cmd_resp_addr = NULL; } + + if (bp->hwrm_cmd_kong_resp_addr) { + dma_free_coherent(&pdev->dev, PAGE_SIZE, + bp->hwrm_cmd_kong_resp_addr, + bp->hwrm_cmd_kong_resp_dma_addr); + bp->hwrm_cmd_kong_resp_addr = NULL; + } +} + +static int bnxt_alloc_kong_hwrm_resources(struct bnxt *bp) +{ + struct pci_dev *pdev = bp->pdev; + + if (bp->hwrm_cmd_kong_resp_addr) + return 0; + + bp->hwrm_cmd_kong_resp_addr = + dma_alloc_coherent(&pdev->dev, PAGE_SIZE, + &bp->hwrm_cmd_kong_resp_dma_addr, + GFP_KERNEL); + if (!bp->hwrm_cmd_kong_resp_addr) + return -ENOMEM; + + return 0; } static int bnxt_alloc_hwrm_resources(struct bnxt *bp) @@ -3067,7 +3837,7 @@ if (bp->hwrm_short_cmd_req_addr) { struct pci_dev *pdev = bp->pdev; - dma_free_coherent(&pdev->dev, BNXT_HWRM_MAX_REQ_LEN, + dma_free_coherent(&pdev->dev, bp->hwrm_max_ext_req_len, bp->hwrm_short_cmd_req_addr, bp->hwrm_short_cmd_req_dma_addr); bp->hwrm_short_cmd_req_addr = NULL; @@ -3078,8 +3848,11 @@ { struct pci_dev *pdev = bp->pdev; + if (bp->hwrm_short_cmd_req_addr) + return 0; + bp->hwrm_short_cmd_req_addr = - dma_alloc_coherent(&pdev->dev, BNXT_HWRM_MAX_REQ_LEN, + dma_alloc_coherent(&pdev->dev, bp->hwrm_max_ext_req_len, &bp->hwrm_short_cmd_req_dma_addr, GFP_KERNEL); if (!bp->hwrm_short_cmd_req_addr) @@ -3088,97 +3861,246 @@ return 0; } -static void bnxt_free_stats(struct bnxt *bp) +static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats) { - u32 size, i; - struct pci_dev *pdev = bp->pdev; + kfree(stats->hw_masks); + stats->hw_masks = NULL; + kfree(stats->sw_stats); + stats->sw_stats = NULL; + if (stats->hw_stats) { + dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats, + stats->hw_stats_map); + stats->hw_stats = NULL; + } +} +static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats, + bool alloc_masks) +{ + stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len, + &stats->hw_stats_map, GFP_KERNEL); + if (!stats->hw_stats) + return -ENOMEM; + + stats->sw_stats = kzalloc(stats->len, GFP_KERNEL); + if (!stats->sw_stats) + goto stats_mem_err; + + if (alloc_masks) { + stats->hw_masks = kzalloc(stats->len, GFP_KERNEL); + if (!stats->hw_masks) + goto stats_mem_err; + } + return 0; + +stats_mem_err: + bnxt_free_stats_mem(bp, stats); + return -ENOMEM; +} + +static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count) +{ + int i; + + for (i = 0; i < count; i++) + mask_arr[i] = mask; +} + +static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count) +{ + int i; + + for (i = 0; i < count; i++) + mask_arr[i] = le64_to_cpu(hw_mask_arr[i]); +} + +static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp, + struct bnxt_stats_mem *stats) +{ + struct hwrm_func_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_func_qstats_ext_input req = {0}; + __le64 *hw_masks; + int rc; + + if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) || + !(bp->flags & BNXT_FLAG_CHIP_P5)) + return -EOPNOTSUPP; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QSTATS_EXT, -1, -1); + req.fid = cpu_to_le16(0xffff); + req.flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK; + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) + goto qstat_exit; + + hw_masks = &resp->rx_ucast_pkts; + bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8); + +qstat_exit: + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + +static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags); +static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags); + +static void bnxt_init_stats(struct bnxt *bp) +{ + struct bnxt_napi *bnapi = bp->bnapi[0]; + struct bnxt_cp_ring_info *cpr; + struct bnxt_stats_mem *stats; + __le64 *rx_stats, *tx_stats; + int rc, rx_count, tx_count; + u64 *rx_masks, *tx_masks; + u64 mask; + u8 flags; + + cpr = &bnapi->cp_ring; + stats = &cpr->stats; + rc = bnxt_hwrm_func_qstat_ext(bp, stats); + if (rc) { + if (bp->flags & BNXT_FLAG_CHIP_P5) + mask = (1ULL << 48) - 1; + else + mask = -1ULL; + bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8); + } + if (bp->flags & BNXT_FLAG_PORT_STATS) { + stats = &bp->port_stats; + rx_stats = stats->hw_stats; + rx_masks = stats->hw_masks; + rx_count = sizeof(struct rx_port_stats) / 8; + tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; + tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; + tx_count = sizeof(struct tx_port_stats) / 8; + + flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK; + rc = bnxt_hwrm_port_qstats(bp, flags); + if (rc) { + mask = (1ULL << 40) - 1; + + bnxt_fill_masks(rx_masks, mask, rx_count); + bnxt_fill_masks(tx_masks, mask, tx_count); + } else { + bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count); + bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count); + bnxt_hwrm_port_qstats(bp, 0); + } + } + if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { + stats = &bp->rx_port_stats_ext; + rx_stats = stats->hw_stats; + rx_masks = stats->hw_masks; + rx_count = sizeof(struct rx_port_stats_ext) / 8; + stats = &bp->tx_port_stats_ext; + tx_stats = stats->hw_stats; + tx_masks = stats->hw_masks; + tx_count = sizeof(struct tx_port_stats_ext) / 8; + + flags = PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK; + rc = bnxt_hwrm_port_qstats_ext(bp, flags); + if (rc) { + mask = (1ULL << 40) - 1; + + bnxt_fill_masks(rx_masks, mask, rx_count); + if (tx_stats) + bnxt_fill_masks(tx_masks, mask, tx_count); + } else { + bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count); + if (tx_stats) + bnxt_copy_hw_masks(tx_masks, tx_stats, + tx_count); + bnxt_hwrm_port_qstats_ext(bp, 0); + } + } +} + +static void bnxt_free_port_stats(struct bnxt *bp) +{ bp->flags &= ~BNXT_FLAG_PORT_STATS; bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT; - if (bp->hw_rx_port_stats) { - dma_free_coherent(&pdev->dev, bp->hw_port_stats_size, - bp->hw_rx_port_stats, - bp->hw_rx_port_stats_map); - bp->hw_rx_port_stats = NULL; - } + bnxt_free_stats_mem(bp, &bp->port_stats); + bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext); + bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext); +} - if (bp->hw_rx_port_stats_ext) { - dma_free_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext), - bp->hw_rx_port_stats_ext, - bp->hw_rx_port_stats_ext_map); - bp->hw_rx_port_stats_ext = NULL; - } +static void bnxt_free_ring_stats(struct bnxt *bp) +{ + int i; if (!bp->bnapi) return; - - size = sizeof(struct ctx_hw_stats); for (i = 0; i < bp->cp_nr_rings; i++) { struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; - if (cpr->hw_stats) { - dma_free_coherent(&pdev->dev, size, cpr->hw_stats, - cpr->hw_stats_map); - cpr->hw_stats = NULL; - } + bnxt_free_stats_mem(bp, &cpr->stats); } } static int bnxt_alloc_stats(struct bnxt *bp) { u32 size, i; - struct pci_dev *pdev = bp->pdev; + int rc; - size = sizeof(struct ctx_hw_stats); + size = bp->hw_ring_stats_size; for (i = 0; i < bp->cp_nr_rings; i++) { struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; - cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size, - &cpr->hw_stats_map, - GFP_KERNEL); - if (!cpr->hw_stats) - return -ENOMEM; + cpr->stats.len = size; + rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i); + if (rc) + return rc; cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; } - if (BNXT_PF(bp) && bp->chip_num != CHIP_NUM_58700) { - bp->hw_port_stats_size = sizeof(struct rx_port_stats) + - sizeof(struct tx_port_stats) + 1024; + if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700) + return 0; - bp->hw_rx_port_stats = - dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size, - &bp->hw_rx_port_stats_map, - GFP_KERNEL); - if (!bp->hw_rx_port_stats) - return -ENOMEM; + if (bp->port_stats.hw_stats) + goto alloc_ext_stats; - bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) + - 512; - bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map + - sizeof(struct rx_port_stats) + 512; - bp->flags |= BNXT_FLAG_PORT_STATS; + bp->port_stats.len = BNXT_PORT_STATS_SIZE; + rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true); + if (rc) + return rc; - /* Display extended statistics only if FW supports it */ - if (bp->hwrm_spec_code < 0x10804 || - bp->hwrm_spec_code == 0x10900) + bp->flags |= BNXT_FLAG_PORT_STATS; + +alloc_ext_stats: + /* Display extended statistics only if FW supports it */ + if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900) + if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) return 0; - bp->hw_rx_port_stats_ext = - dma_zalloc_coherent(&pdev->dev, - sizeof(struct rx_port_stats_ext), - &bp->hw_rx_port_stats_ext_map, - GFP_KERNEL); - if (!bp->hw_rx_port_stats_ext) - return 0; + if (bp->rx_port_stats_ext.hw_stats) + goto alloc_tx_ext_stats; - bp->flags |= BNXT_FLAG_PORT_STATS_EXT; + bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext); + rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true); + /* Extended stats are optional */ + if (rc) + return 0; + +alloc_tx_ext_stats: + if (bp->tx_port_stats_ext.hw_stats) + return 0; + + if (bp->hwrm_spec_code >= 0x10902 || + (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) { + bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext); + rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true); + /* Extended stats are optional */ + if (rc) + return 0; } + bp->flags |= BNXT_FLAG_PORT_STATS_EXT; return 0; } @@ -3277,7 +4199,10 @@ bnxt_free_cp_rings(bp); bnxt_free_ntp_fltrs(bp, irq_re_init); if (irq_re_init) { - bnxt_free_stats(bp); + bnxt_free_ring_stats(bp); + if (!(bp->fw_cap & BNXT_FW_CAP_PORT_STATS_NO_RESET) || + test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) + bnxt_free_port_stats(bp); bnxt_free_ring_grps(bp); bnxt_free_vnics(bp); kfree(bp->tx_ring_map); @@ -3315,6 +4240,13 @@ bp->bnapi[i] = bnapi; bp->bnapi[i]->index = i; bp->bnapi[i]->bp = bp; + if (bp->flags & BNXT_FLAG_CHIP_P5) { + struct bnxt_cp_ring_info *cpr = + &bp->bnapi[i]->cp_ring; + + cpr->cp_ring_struct.ring_mem.flags = + BNXT_RMEM_RING_PTE_FLAG; + } } bp->rx_ring = kcalloc(bp->rx_nr_rings, @@ -3324,7 +4256,15 @@ return -ENOMEM; for (i = 0; i < bp->rx_nr_rings; i++) { - bp->rx_ring[i].bnapi = bp->bnapi[i]; + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; + + if (bp->flags & BNXT_FLAG_CHIP_P5) { + rxr->rx_ring_struct.ring_mem.flags = + BNXT_RMEM_RING_PTE_FLAG; + rxr->rx_agg_ring_struct.ring_mem.flags = + BNXT_RMEM_RING_PTE_FLAG; + } + rxr->bnapi = bp->bnapi[i]; bp->bnapi[i]->rx_ring = &bp->rx_ring[i]; } @@ -3346,12 +4286,16 @@ j = bp->rx_nr_rings; for (i = 0; i < bp->tx_nr_rings; i++, j++) { - bp->tx_ring[i].bnapi = bp->bnapi[j]; - bp->bnapi[j]->tx_ring = &bp->tx_ring[i]; + struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; + + if (bp->flags & BNXT_FLAG_CHIP_P5) + txr->tx_ring_struct.ring_mem.flags = + BNXT_RMEM_RING_PTE_FLAG; + txr->bnapi = bp->bnapi[j]; + bp->bnapi[j]->tx_ring = txr; bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i; if (i >= bp->tx_nr_rings_xdp) { - bp->tx_ring[i].txq_index = i - - bp->tx_nr_rings_xdp; + txr->txq_index = i - bp->tx_nr_rings_xdp; bp->bnapi[j]->tx_int = bnxt_tx_int; } else { bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP; @@ -3362,6 +4306,7 @@ rc = bnxt_alloc_stats(bp); if (rc) goto alloc_mem_err; + bnxt_init_stats(bp); rc = bnxt_alloc_ntp_fltrs(bp); if (rc) @@ -3411,7 +4356,7 @@ struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; if (ring->fw_ring_id != INVALID_HW_RING_ID) - BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); + bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons); } } @@ -3447,7 +4392,7 @@ struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; - BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons); + bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons); } } @@ -3459,7 +4404,39 @@ req->req_type = cpu_to_le16(req_type); req->cmpl_ring = cpu_to_le16(cmpl_ring); req->target_id = cpu_to_le16(target_id); - req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr); + if (bnxt_kong_hwrm_message(bp, req)) + req->resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr); + else + req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr); +} + +static int bnxt_hwrm_to_stderr(u32 hwrm_err) +{ + switch (hwrm_err) { + case HWRM_ERR_CODE_SUCCESS: + return 0; + case HWRM_ERR_CODE_RESOURCE_LOCKED: + return -EROFS; + case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED: + return -EACCES; + case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR: + return -ENOSPC; + case HWRM_ERR_CODE_INVALID_PARAMS: + case HWRM_ERR_CODE_INVALID_FLAGS: + case HWRM_ERR_CODE_INVALID_ENABLES: + case HWRM_ERR_CODE_UNSUPPORTED_TLV: + case HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR: + return -EINVAL; + case HWRM_ERR_CODE_NO_BUFFER: + return -ENOMEM; + case HWRM_ERR_CODE_HOT_RESET_PROGRESS: + case HWRM_ERR_CODE_BUSY: + return -EAGAIN; + case HWRM_ERR_CODE_CMD_NOT_SUPPORTED: + return -EOPNOTSUPP; + default: + return -EIO; + } } static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, @@ -3468,24 +4445,56 @@ int i, intr_process, rc, tmo_count; struct input *req = msg; u32 *data = msg; - __le32 *resp_len; u8 *valid; u16 cp_ring_id, len = 0; struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr; u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN; struct hwrm_short_input short_input = {0}; + u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER; + u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM; + u16 dst = BNXT_HWRM_CHNL_CHIMP; - req->seq_id = cpu_to_le16(bp->hwrm_cmd_seq++); + if (BNXT_NO_FW_ACCESS(bp) && + le16_to_cpu(req->req_type) != HWRM_FUNC_RESET) + return -EBUSY; + + if (msg_len > BNXT_HWRM_MAX_REQ_LEN) { + if (msg_len > bp->hwrm_max_ext_req_len || + !bp->hwrm_short_cmd_req_addr) + return -EINVAL; + } + + if (bnxt_hwrm_kong_chnl(bp, req)) { + dst = BNXT_HWRM_CHNL_KONG; + bar_offset = BNXT_GRCPF_REG_KONG_COMM; + doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER; + resp = bp->hwrm_cmd_kong_resp_addr; + } + memset(resp, 0, PAGE_SIZE); cp_ring_id = le16_to_cpu(req->cmpl_ring); intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1; - if (bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) { + req->seq_id = cpu_to_le16(bnxt_get_hwrm_seq_id(bp, dst)); + /* currently supports only one outstanding message */ + if (intr_process) + bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id); + + if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) || + msg_len > BNXT_HWRM_MAX_REQ_LEN) { void *short_cmd_req = bp->hwrm_short_cmd_req_addr; + u16 max_msg_len; + + /* Set boundary for maximum extended request length for short + * cmd format. If passed up from device use the max supported + * internal req length. + */ + max_msg_len = bp->hwrm_max_ext_req_len; memcpy(short_cmd_req, req, msg_len); - memset(short_cmd_req + msg_len, 0, BNXT_HWRM_MAX_REQ_LEN - - msg_len); + if (msg_len < max_msg_len) + memset(short_cmd_req + msg_len, 0, + max_msg_len - msg_len); short_input.req_type = req->req_type; short_input.signature = @@ -3504,17 +4513,16 @@ } /* Write request msg to hwrm channel */ - __iowrite32_copy(bp->bar0, data, msg_len / 4); + __iowrite32_copy(bp->bar0 + bar_offset, data, msg_len / 4); for (i = msg_len; i < max_req_len; i += 4) - writel(0, bp->bar0 + i); - - /* currently supports only one outstanding message */ - if (intr_process) - bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id); + writel(0, bp->bar0 + bar_offset + i); /* Ring channel doorbell */ - writel(1, bp->bar0 + 0x100); + writel(1, bp->bar0 + doorbell_offset); + + if (!pci_is_enabled(bp->pdev)) + return 0; if (!timeout) timeout = DFLT_HWRM_CMD_TIMEOUT; @@ -3529,11 +4537,18 @@ tmo_count = HWRM_SHORT_TIMEOUT_COUNTER; timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER; tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT); - resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET; + if (intr_process) { + u16 seq_id = bp->hwrm_intr_seq_id; + /* Wait until hwrm response cmpl interrupt is processed */ - while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID && + while (bp->hwrm_intr_seq_id != (u16)~seq_id && i++ < tmo_count) { + /* Abort the wait for completion if the FW health + * check has failed. + */ + if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) + return -EBUSY; /* on first few passes, just barely sleep */ if (i < HWRM_SHORT_TIMEOUT_COUNTER) usleep_range(HWRM_SHORT_MIN_TIMEOUT, @@ -3543,21 +4558,25 @@ HWRM_MAX_TIMEOUT); } - if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) { - netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n", - le16_to_cpu(req->req_type)); - return -1; + if (bp->hwrm_intr_seq_id != (u16)~seq_id) { + if (!silent) + netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n", + le16_to_cpu(req->req_type)); + return -EBUSY; } - len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >> - HWRM_RESP_LEN_SFT; - valid = bp->hwrm_cmd_resp_addr + len - 1; + len = le16_to_cpu(resp->resp_len); + valid = ((u8 *)resp) + len - 1; } else { int j; /* Check if response len is updated */ for (i = 0; i < tmo_count; i++) { - len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >> - HWRM_RESP_LEN_SFT; + /* Abort the wait for completion if the FW health + * check has failed. + */ + if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) + return -EBUSY; + len = le16_to_cpu(resp->resp_len); if (len) break; /* on first few passes, just barely sleep */ @@ -3570,15 +4589,16 @@ } if (i >= tmo_count) { - netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n", - HWRM_TOTAL_TIMEOUT(i), - le16_to_cpu(req->req_type), - le16_to_cpu(req->seq_id), len); - return -1; + if (!silent) + netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n", + HWRM_TOTAL_TIMEOUT(i), + le16_to_cpu(req->req_type), + le16_to_cpu(req->seq_id), len); + return -EBUSY; } /* Last byte of resp contains valid bit */ - valid = bp->hwrm_cmd_resp_addr + len - 1; + valid = ((u8 *)resp) + len - 1; for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) { /* make sure we read from updated DMA memory */ dma_rmb(); @@ -3588,11 +4608,13 @@ } if (j >= HWRM_VALID_BIT_DELAY_USEC) { - netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n", - HWRM_TOTAL_TIMEOUT(i), - le16_to_cpu(req->req_type), - le16_to_cpu(req->seq_id), len, *valid); - return -1; + if (!silent) + netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n", + HWRM_TOTAL_TIMEOUT(i), + le16_to_cpu(req->req_type), + le16_to_cpu(req->seq_id), len, + *valid); + return -EBUSY; } } @@ -3606,7 +4628,7 @@ netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n", le16_to_cpu(resp->req_type), le16_to_cpu(resp->seq_id), rc); - return rc; + return bnxt_hwrm_to_stderr(rc); } int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) @@ -3641,50 +4663,31 @@ return rc; } -int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap, - int bmap_size) -{ - struct hwrm_func_drv_rgtr_input req = {0}; - DECLARE_BITMAP(async_events_bmap, 256); - u32 *events = (u32 *)async_events_bmap; - int i; - - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1); - - req.enables = - cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); - - memset(async_events_bmap, 0, sizeof(async_events_bmap)); - for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) - __set_bit(bnxt_async_events_arr[i], async_events_bmap); - - if (bmap && bmap_size) { - for (i = 0; i < bmap_size; i++) { - if (test_bit(i, bmap)) - __set_bit(i, async_events_bmap); - } - } - - for (i = 0; i < 8; i++) - req.async_event_fwd[i] |= cpu_to_le32(events[i]); - - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); -} - -static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp) +int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size, + bool async_only) { struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr; struct hwrm_func_drv_rgtr_input req = {0}; - int rc; + DECLARE_BITMAP(async_events_bmap, 256); + u32 *events = (u32 *)async_events_bmap; + u32 flags; + int rc, i; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1); req.enables = cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE | - FUNC_DRV_RGTR_REQ_ENABLES_VER); + FUNC_DRV_RGTR_REQ_ENABLES_VER | + FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX); - req.flags = cpu_to_le32(FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE); + flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE; + if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET) + flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT; + if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) + flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT | + FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT; + req.flags = cpu_to_le32(flags); req.ver_maj_8b = DRV_VER_MAJ; req.ver_min_8b = DRV_VER_MIN; req.ver_upd_8b = DRV_VER_UPD; @@ -3713,13 +4716,40 @@ cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD); } + if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) + req.flags |= cpu_to_le32( + FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE); + + memset(async_events_bmap, 0, sizeof(async_events_bmap)); + for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) { + u16 event_id = bnxt_async_events_arr[i]; + + if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY && + !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) + continue; + __set_bit(bnxt_async_events_arr[i], async_events_bmap); + } + if (bmap && bmap_size) { + for (i = 0; i < bmap_size; i++) { + if (test_bit(i, bmap)) + __set_bit(i, async_events_bmap); + } + } + for (i = 0; i < 8; i++) + req.async_event_fwd[i] |= cpu_to_le32(events[i]); + + if (async_only) + req.enables = + cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); + mutex_lock(&bp->hwrm_cmd_lock); rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (rc) - rc = -EIO; - else if (resp->flags & - cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED)) - bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE; + if (!rc) { + set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state); + if (resp->flags & + cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED)) + bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE; + } mutex_unlock(&bp->hwrm_cmd_lock); return rc; } @@ -3727,6 +4757,9 @@ static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp) { struct hwrm_func_drv_unrgtr_input req = {0}; + + if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state)) + return 0; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1); return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); @@ -3742,10 +4775,12 @@ switch (tunnel_type) { case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN: - req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id; + req.tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id); + bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID; break; case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE: - req.tunnel_dst_port_id = bp->nge_fw_dst_port_id; + req.tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id); + bp->nge_fw_dst_port_id = INVALID_HW_RING_ID; break; default: break; @@ -3780,10 +4815,11 @@ switch (tunnel_type) { case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN: - bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id; + bp->vxlan_fw_dst_port_id = + le16_to_cpu(resp->tunnel_dst_port_id); break; case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE: - bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id; + bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id); break; default: break; @@ -3841,16 +4877,24 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, struct bnxt_ntuple_filter *fltr) { - int rc = 0; struct hwrm_cfa_ntuple_filter_alloc_input req = {0}; - struct hwrm_cfa_ntuple_filter_alloc_output *resp = - bp->hwrm_cmd_resp_addr; + struct hwrm_cfa_ntuple_filter_alloc_output *resp; struct flow_keys *keys = &fltr->fkeys; - struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1]; + struct bnxt_vnic_info *vnic; + u32 flags = 0; + int rc = 0; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1); req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx]; + if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) { + flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX; + req.dst_id = cpu_to_le16(fltr->rxq); + } else { + vnic = &bp->vnic_info[fltr->rxq + 1]; + req.dst_id = cpu_to_le16(vnic->fw_vnic_id); + } + req.flags = cpu_to_le32(flags); req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS); req.ethertype = htons(ETH_P_IP); @@ -3889,11 +4933,12 @@ req.dst_port = keys->ports.dst; req.dst_port_mask = cpu_to_be16(0xffff); - req.dst_id = cpu_to_le16(vnic->fw_vnic_id); mutex_lock(&bp->hwrm_cmd_lock); rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (!rc) + if (!rc) { + resp = bnxt_get_hwrm_resp_addr(bp, &req); fltr->filter_id = resp->ntuple_filter_id; + } mutex_unlock(&bp->hwrm_cmd_lock); return rc; } @@ -3964,6 +5009,7 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags) { struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; + u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX; struct hwrm_vnic_tpa_cfg_input req = {0}; if (vnic->fw_vnic_id == INVALID_HW_RING_ID) @@ -4003,9 +5049,14 @@ nsegs = (MAX_SKB_FRAGS - n) / n; } - segs = ilog2(nsegs); + if (bp->flags & BNXT_FLAG_CHIP_P5) { + segs = MAX_TPA_SEGS_P5; + max_aggs = bp->max_tpa; + } else { + segs = ilog2(nsegs); + } req.max_agg_segs = cpu_to_le16(segs); - req.max_aggs = cpu_to_le16(VNIC_TPA_CFG_REQ_MAX_AGGS_MAX); + req.max_aggs = cpu_to_le16(max_aggs); req.min_agg_len = cpu_to_le32(512); } @@ -4014,41 +5065,196 @@ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); } +static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring) +{ + struct bnxt_ring_grp_info *grp_info; + + grp_info = &bp->grp_info[ring->grp_idx]; + return grp_info->cp_fw_ring_id; +} + +static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) +{ + if (bp->flags & BNXT_FLAG_CHIP_P5) { + struct bnxt_napi *bnapi = rxr->bnapi; + struct bnxt_cp_ring_info *cpr; + + cpr = bnapi->cp_ring.cp_ring_arr[BNXT_RX_HDL]; + return cpr->cp_ring_struct.fw_ring_id; + } else { + return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct); + } +} + +static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr) +{ + if (bp->flags & BNXT_FLAG_CHIP_P5) { + struct bnxt_napi *bnapi = txr->bnapi; + struct bnxt_cp_ring_info *cpr; + + cpr = bnapi->cp_ring.cp_ring_arr[BNXT_TX_HDL]; + return cpr->cp_ring_struct.fw_ring_id; + } else { + return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct); + } +} + +static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp) +{ + int entries; + + if (bp->flags & BNXT_FLAG_CHIP_P5) + entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5; + else + entries = HW_HASH_INDEX_SIZE; + + bp->rss_indir_tbl_entries = entries; + bp->rss_indir_tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl), + GFP_KERNEL); + if (!bp->rss_indir_tbl) + return -ENOMEM; + return 0; +} + +static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp) +{ + u16 max_rings, max_entries, pad, i; + + if (!bp->rx_nr_rings) + return; + + if (BNXT_CHIP_TYPE_NITRO_A0(bp)) + max_rings = bp->rx_nr_rings - 1; + else + max_rings = bp->rx_nr_rings; + + max_entries = bnxt_get_rxfh_indir_size(bp->dev); + + for (i = 0; i < max_entries; i++) + bp->rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings); + + pad = bp->rss_indir_tbl_entries - max_entries; + if (pad) + memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16)); +} + +static u16 bnxt_get_max_rss_ring(struct bnxt *bp) +{ + u16 i, tbl_size, max_ring = 0; + + if (!bp->rss_indir_tbl) + return 0; + + tbl_size = bnxt_get_rxfh_indir_size(bp->dev); + for (i = 0; i < tbl_size; i++) + max_ring = max(max_ring, bp->rss_indir_tbl[i]); + return max_ring; +} + +int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings) +{ + if (bp->flags & BNXT_FLAG_CHIP_P5) + return DIV_ROUND_UP(rx_rings, BNXT_RSS_TABLE_ENTRIES_P5); + if (BNXT_CHIP_TYPE_NITRO_A0(bp)) + return 2; + return 1; +} + +static void __bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic) +{ + bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG); + u16 i, j; + + /* Fill the RSS indirection table with ring group ids */ + for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) { + if (!no_rss) + j = bp->rss_indir_tbl[i]; + vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]); + } +} + +static void __bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp, + struct bnxt_vnic_info *vnic) +{ + __le16 *ring_tbl = vnic->rss_table; + struct bnxt_rx_ring_info *rxr; + u16 tbl_size, i; + + tbl_size = bnxt_get_rxfh_indir_size(bp->dev); + + for (i = 0; i < tbl_size; i++) { + u16 ring_id, j; + + j = bp->rss_indir_tbl[i]; + rxr = &bp->rx_ring[j]; + + ring_id = rxr->rx_ring_struct.fw_ring_id; + *ring_tbl++ = cpu_to_le16(ring_id); + ring_id = bnxt_cp_ring_for_rx(bp, rxr); + *ring_tbl++ = cpu_to_le16(ring_id); + } +} + +static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic) +{ + if (bp->flags & BNXT_FLAG_CHIP_P5) + __bnxt_fill_hw_rss_tbl_p5(bp, vnic); + else + __bnxt_fill_hw_rss_tbl(bp, vnic); +} + static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss) { - u32 i, j, max_rings; struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; struct hwrm_vnic_rss_cfg_input req = {0}; - if (vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID) + if ((bp->flags & BNXT_FLAG_CHIP_P5) || + vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID) return 0; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1); if (set_rss) { + bnxt_fill_hw_rss_tbl(bp, vnic); req.hash_type = cpu_to_le32(bp->rss_hash_cfg); req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT; - if (vnic->flags & BNXT_VNIC_RSS_FLAG) { - if (BNXT_CHIP_TYPE_NITRO_A0(bp)) - max_rings = bp->rx_nr_rings - 1; - else - max_rings = bp->rx_nr_rings; - } else { - max_rings = 1; - } - - /* Fill the RSS indirection table with ring group ids */ - for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) { - if (j == max_rings) - j = 0; - vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]); - } - req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr); req.hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr); } req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); +} + +static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss) +{ + struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; + struct hwrm_vnic_rss_cfg_input req = {0}; + dma_addr_t ring_tbl_map; + u32 i, nr_ctxs; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1); + req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); + if (!set_rss) { + hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + return 0; + } + bnxt_fill_hw_rss_tbl(bp, vnic); + req.hash_type = cpu_to_le32(bp->rss_hash_cfg); + req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT; + req.hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr); + ring_tbl_map = vnic->rss_table_dma_addr; + nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings); + for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) { + int rc; + + req.ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map); + req.ring_table_pair_index = i; + req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]); + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) + return rc; + } + return 0; } static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id) @@ -4134,6 +5340,18 @@ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1); + if (bp->flags & BNXT_FLAG_CHIP_P5) { + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0]; + + req.default_rx_ring_id = + cpu_to_le16(rxr->rx_ring_struct.fw_ring_id); + req.default_cmpl_ring_id = + cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr)); + req.enables = + cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID | + VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID); + goto vnic_mru; + } req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP); /* Only RSS support for now TBD: COS & LB */ if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) { @@ -4166,13 +5384,12 @@ ring = bp->rx_nr_rings - 1; grp_idx = bp->rx_ring[ring].bnapi->index; - req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id); - req.lb_rule = cpu_to_le16(0xffff); - req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + - VLAN_HLEN); +vnic_mru: + req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN); + req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); #ifdef CONFIG_BNXT_SRIOV if (BNXT_VF(bp)) def_vlan = bp->vf.vlan; @@ -4185,10 +5402,8 @@ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); } -static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id) +static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id) { - u32 rc = 0; - if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) { struct hwrm_vnic_free_input req = {0}; @@ -4196,12 +5411,9 @@ req.vnic_id = cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (rc) - return rc; + hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID; } - return rc; } static void bnxt_hwrm_vnic_free(struct bnxt *bp) @@ -4220,6 +5432,10 @@ unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings; struct hwrm_vnic_alloc_input req = {0}; struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr; + struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; + + if (bp->flags & BNXT_FLAG_CHIP_P5) + goto vnic_no_ring_grps; /* map ring groups to this vnic */ for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) { @@ -4229,12 +5445,12 @@ j, nr_rings); break; } - bp->vnic_info[vnic_id].fw_grp_ids[j] = - bp->grp_info[grp_idx].fw_grp_id; + vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id; } - bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[0] = INVALID_HW_RING_ID; - bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[1] = INVALID_HW_RING_ID; +vnic_no_ring_grps: + for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) + vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID; if (vnic_id == 0) req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT); @@ -4243,7 +5459,7 @@ mutex_lock(&bp->hwrm_cmd_lock); rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (!rc) - bp->vnic_info[vnic_id].fw_vnic_id = le32_to_cpu(resp->vnic_id); + vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id); mutex_unlock(&bp->hwrm_cmd_lock); return rc; } @@ -4254,6 +5470,8 @@ struct hwrm_vnic_qcaps_input req = {0}; int rc; + bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats); + bp->flags &= ~(BNXT_FLAG_NEW_RSS_CAP | BNXT_FLAG_ROCE_MIRROR_CAP); if (bp->hwrm_spec_code < 0x10600) return 0; @@ -4263,11 +5481,27 @@ if (!rc) { u32 flags = le32_to_cpu(resp->flags); - if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP) + if (!(bp->flags & BNXT_FLAG_CHIP_P5) && + (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP)) bp->flags |= BNXT_FLAG_NEW_RSS_CAP; if (flags & VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP) bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP; + + /* Older P5 fw before EXT_HW_STATS support did not set + * VLAN_STRIP_CAP properly. + */ + if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) || + (BNXT_CHIP_P5_THOR(bp) && + !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))) + bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP; + bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported); + if (bp->max_tpa_v2) { + if (BNXT_CHIP_P5_THOR(bp)) + bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5; + else + bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5_SR2; + } } mutex_unlock(&bp->hwrm_cmd_lock); return rc; @@ -4277,6 +5511,9 @@ { u16 i; u32 rc = 0; + + if (bp->flags & BNXT_FLAG_CHIP_P5) + return 0; mutex_lock(&bp->hwrm_cmd_lock); for (i = 0; i < bp->rx_nr_rings; i++) { @@ -4304,14 +5541,13 @@ return rc; } -static int bnxt_hwrm_ring_grp_free(struct bnxt *bp) +static void bnxt_hwrm_ring_grp_free(struct bnxt *bp) { u16 i; - u32 rc = 0; struct hwrm_ring_grp_free_input req = {0}; - if (!bp->grp_info) - return 0; + if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5)) + return; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1); @@ -4322,14 +5558,10 @@ req.ring_group_id = cpu_to_le32(bp->grp_info[i].fw_grp_id); - rc = _hwrm_send_message(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); - if (rc) - break; + _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; } mutex_unlock(&bp->hwrm_cmd_lock); - return rc; } static int hwrm_ring_alloc_send_msg(struct bnxt *bp, @@ -4339,44 +5571,89 @@ int rc = 0, err = 0; struct hwrm_ring_alloc_input req = {0}; struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr; + struct bnxt_ring_mem_info *rmem = &ring->ring_mem; struct bnxt_ring_grp_info *grp_info; u16 ring_id; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1); req.enables = 0; - if (ring->nr_pages > 1) { - req.page_tbl_addr = cpu_to_le64(ring->pg_tbl_map); + if (rmem->nr_pages > 1) { + req.page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map); /* Page size is in log2 units */ req.page_size = BNXT_PAGE_SHIFT; req.page_tbl_depth = 1; } else { - req.page_tbl_addr = cpu_to_le64(ring->dma_arr[0]); + req.page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]); } req.fbo = 0; /* Association of ring index with doorbell index and MSIX number */ req.logical_id = cpu_to_le16(map_index); switch (ring_type) { - case HWRM_RING_ALLOC_TX: + case HWRM_RING_ALLOC_TX: { + struct bnxt_tx_ring_info *txr; + + txr = container_of(ring, struct bnxt_tx_ring_info, + tx_ring_struct); req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX; /* Association of transmit ring with completion ring */ grp_info = &bp->grp_info[ring->grp_idx]; - req.cmpl_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id); + req.cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr)); req.length = cpu_to_le32(bp->tx_ring_mask + 1); req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); req.queue_id = cpu_to_le16(ring->queue_id); break; + } case HWRM_RING_ALLOC_RX: req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX; req.length = cpu_to_le32(bp->rx_ring_mask + 1); + if (bp->flags & BNXT_FLAG_CHIP_P5) { + u16 flags = 0; + + /* Association of rx ring with stats context */ + grp_info = &bp->grp_info[ring->grp_idx]; + req.rx_buf_size = cpu_to_le16(bp->rx_buf_use_size); + req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); + req.enables |= cpu_to_le32( + RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID); + if (NET_IP_ALIGN == 2) + flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD; + req.flags = cpu_to_le16(flags); + } break; case HWRM_RING_ALLOC_AGG: - req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX; + if (bp->flags & BNXT_FLAG_CHIP_P5) { + req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG; + /* Association of agg ring with rx ring */ + grp_info = &bp->grp_info[ring->grp_idx]; + req.rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id); + req.rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE); + req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); + req.enables |= cpu_to_le32( + RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID | + RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID); + } else { + req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX; + } req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1); break; case HWRM_RING_ALLOC_CMPL: req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL; + req.length = cpu_to_le32(bp->cp_ring_mask + 1); + if (bp->flags & BNXT_FLAG_CHIP_P5) { + /* Association of cp ring with nq */ + grp_info = &bp->grp_info[map_index]; + req.nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id); + req.cq_handle = cpu_to_le64(ring->handle); + req.enables |= cpu_to_le32( + RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID); + } else if (bp->flags & BNXT_FLAG_USING_MSIX) { + req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; + } + break; + case HWRM_RING_ALLOC_NQ: + req.ring_type = RING_ALLOC_REQ_RING_TYPE_NQ; req.length = cpu_to_le32(bp->cp_ring_mask + 1); if (bp->flags & BNXT_FLAG_USING_MSIX) req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; @@ -4426,22 +5703,74 @@ return rc; } +static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type, + u32 map_idx, u32 xid) +{ + if (bp->flags & BNXT_FLAG_CHIP_P5) { + if (BNXT_PF(bp)) + db->doorbell = bp->bar1 + DB_PF_OFFSET_P5; + else + db->doorbell = bp->bar1 + DB_VF_OFFSET_P5; + switch (ring_type) { + case HWRM_RING_ALLOC_TX: + db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ; + break; + case HWRM_RING_ALLOC_RX: + case HWRM_RING_ALLOC_AGG: + db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ; + break; + case HWRM_RING_ALLOC_CMPL: + db->db_key64 = DBR_PATH_L2; + break; + case HWRM_RING_ALLOC_NQ: + db->db_key64 = DBR_PATH_L2; + break; + } + db->db_key64 |= (u64)xid << DBR_XID_SFT; + } else { + db->doorbell = bp->bar1 + map_idx * 0x80; + switch (ring_type) { + case HWRM_RING_ALLOC_TX: + db->db_key32 = DB_KEY_TX; + break; + case HWRM_RING_ALLOC_RX: + case HWRM_RING_ALLOC_AGG: + db->db_key32 = DB_KEY_RX; + break; + case HWRM_RING_ALLOC_CMPL: + db->db_key32 = DB_KEY_CP; + break; + } + } +} + static int bnxt_hwrm_ring_alloc(struct bnxt *bp) { + bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS); int i, rc = 0; + u32 type; + if (bp->flags & BNXT_FLAG_CHIP_P5) + type = HWRM_RING_ALLOC_NQ; + else + type = HWRM_RING_ALLOC_CMPL; for (i = 0; i < bp->cp_nr_rings; i++) { struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; u32 map_idx = ring->map_idx; + unsigned int vector; - cpr->cp_doorbell = bp->bar1 + map_idx * 0x80; - rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_CMPL, - map_idx); - if (rc) + vector = bp->irq_tbl[map_idx].vector; + disable_irq_nosync(vector); + rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); + if (rc) { + enable_irq(vector); goto err_out; - BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); + } + bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id); + bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons); + enable_irq(vector); bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id; if (!i) { @@ -4451,33 +5780,71 @@ } } + type = HWRM_RING_ALLOC_TX; for (i = 0; i < bp->tx_nr_rings; i++) { struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; - struct bnxt_ring_struct *ring = &txr->tx_ring_struct; - u32 map_idx = i; + struct bnxt_ring_struct *ring; + u32 map_idx; - rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_TX, - map_idx); + if (bp->flags & BNXT_FLAG_CHIP_P5) { + struct bnxt_napi *bnapi = txr->bnapi; + struct bnxt_cp_ring_info *cpr, *cpr2; + u32 type2 = HWRM_RING_ALLOC_CMPL; + + cpr = &bnapi->cp_ring; + cpr2 = cpr->cp_ring_arr[BNXT_TX_HDL]; + ring = &cpr2->cp_ring_struct; + ring->handle = BNXT_TX_HDL; + map_idx = bnapi->index; + rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx); + if (rc) + goto err_out; + bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx, + ring->fw_ring_id); + bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons); + } + ring = &txr->tx_ring_struct; + map_idx = i; + rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); if (rc) goto err_out; - txr->tx_doorbell = bp->bar1 + map_idx * 0x80; + bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id); } + type = HWRM_RING_ALLOC_RX; for (i = 0; i < bp->rx_nr_rings; i++) { struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; - u32 map_idx = rxr->bnapi->index; + struct bnxt_napi *bnapi = rxr->bnapi; + u32 map_idx = bnapi->index; - rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_RX, - map_idx); + rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); if (rc) goto err_out; - rxr->rx_doorbell = bp->bar1 + map_idx * 0x80; - writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell); + bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id); + /* If we have agg rings, post agg buffers first. */ + if (!agg_rings) + bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id; + if (bp->flags & BNXT_FLAG_CHIP_P5) { + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + u32 type2 = HWRM_RING_ALLOC_CMPL; + struct bnxt_cp_ring_info *cpr2; + + cpr2 = cpr->cp_ring_arr[BNXT_RX_HDL]; + ring = &cpr2->cp_ring_struct; + ring->handle = BNXT_RX_HDL; + rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx); + if (rc) + goto err_out; + bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx, + ring->fw_ring_id); + bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons); + } } - if (bp->flags & BNXT_FLAG_AGG_RINGS) { + if (agg_rings) { + type = HWRM_RING_ALLOC_AGG; for (i = 0; i < bp->rx_nr_rings; i++) { struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; struct bnxt_ring_struct *ring = @@ -4485,15 +5852,14 @@ u32 grp_idx = ring->grp_idx; u32 map_idx = grp_idx + bp->rx_nr_rings; - rc = hwrm_ring_alloc_send_msg(bp, ring, - HWRM_RING_ALLOC_AGG, - map_idx); + rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); if (rc) goto err_out; - rxr->rx_agg_doorbell = bp->bar1 + map_idx * 0x80; - writel(DB_KEY_RX | rxr->rx_agg_prod, - rxr->rx_agg_doorbell); + bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx, + ring->fw_ring_id); + bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); + bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id; } } @@ -4509,6 +5875,9 @@ struct hwrm_ring_free_input req = {0}; struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr; u16 error_code; + + if (BNXT_NO_FW_ACCESS(bp)) + return 0; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1); req.ring_type = ring_type; @@ -4529,6 +5898,7 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) { + u32 type; int i; if (!bp->bnapi) @@ -4537,10 +5907,10 @@ for (i = 0; i < bp->tx_nr_rings; i++) { struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; struct bnxt_ring_struct *ring = &txr->tx_ring_struct; - u32 grp_idx = txr->bnapi->index; - u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id; if (ring->fw_ring_id != INVALID_HW_RING_ID) { + u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr); + hwrm_ring_free_send_msg(bp, ring, RING_FREE_REQ_RING_TYPE_TX, close_path ? cmpl_ring_id : @@ -4553,9 +5923,10 @@ struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; u32 grp_idx = rxr->bnapi->index; - u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id; if (ring->fw_ring_id != INVALID_HW_RING_ID) { + u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr); + hwrm_ring_free_send_msg(bp, ring, RING_FREE_REQ_RING_TYPE_RX, close_path ? cmpl_ring_id : @@ -4566,15 +5937,19 @@ } } + if (bp->flags & BNXT_FLAG_CHIP_P5) + type = RING_FREE_REQ_RING_TYPE_RX_AGG; + else + type = RING_FREE_REQ_RING_TYPE_RX; for (i = 0; i < bp->rx_nr_rings; i++) { struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct; u32 grp_idx = rxr->bnapi->index; - u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id; if (ring->fw_ring_id != INVALID_HW_RING_ID) { - hwrm_ring_free_send_msg(bp, ring, - RING_FREE_REQ_RING_TYPE_RX, + u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr); + + hwrm_ring_free_send_msg(bp, ring, type, close_path ? cmpl_ring_id : INVALID_HW_RING_ID); ring->fw_ring_id = INVALID_HW_RING_ID; @@ -4589,20 +5964,41 @@ */ bnxt_disable_int_sync(bp); + if (bp->flags & BNXT_FLAG_CHIP_P5) + type = RING_FREE_REQ_RING_TYPE_NQ; + else + type = RING_FREE_REQ_RING_TYPE_L2_CMPL; for (i = 0; i < bp->cp_nr_rings; i++) { struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; - struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; + struct bnxt_ring_struct *ring; + int j; + for (j = 0; j < 2; j++) { + struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j]; + + if (cpr2) { + ring = &cpr2->cp_ring_struct; + if (ring->fw_ring_id == INVALID_HW_RING_ID) + continue; + hwrm_ring_free_send_msg(bp, ring, + RING_FREE_REQ_RING_TYPE_L2_CMPL, + INVALID_HW_RING_ID); + ring->fw_ring_id = INVALID_HW_RING_ID; + } + } + ring = &cpr->cp_ring_struct; if (ring->fw_ring_id != INVALID_HW_RING_ID) { - hwrm_ring_free_send_msg(bp, ring, - RING_FREE_REQ_RING_TYPE_L2_CMPL, + hwrm_ring_free_send_msg(bp, ring, type, INVALID_HW_RING_ID); ring->fw_ring_id = INVALID_HW_RING_ID; bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; } } } + +static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, + bool shared); static int bnxt_hwrm_get_rings(struct bnxt *bp) { @@ -4620,7 +6016,7 @@ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc) { mutex_unlock(&bp->hwrm_cmd_lock); - return -EIO; + return rc; } hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings); @@ -4633,8 +6029,25 @@ hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics); cp = le16_to_cpu(resp->alloc_cmpl_rings); stats = le16_to_cpu(resp->alloc_stat_ctx); - cp = min_t(u16, cp, stats); + hw_resc->resv_irqs = cp; + if (bp->flags & BNXT_FLAG_CHIP_P5) { + int rx = hw_resc->resv_rx_rings; + int tx = hw_resc->resv_tx_rings; + + if (bp->flags & BNXT_FLAG_AGG_RINGS) + rx >>= 1; + if (cp < (rx + tx)) { + bnxt_trim_rings(bp, &rx, &tx, cp, false); + if (bp->flags & BNXT_FLAG_AGG_RINGS) + rx <<= 1; + hw_resc->resv_rx_rings = rx; + hw_resc->resv_tx_rings = tx; + } + hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix); + hw_resc->resv_hw_ring_grps = rx; + } hw_resc->resv_cp_rings = cp; + hw_resc->resv_stat_ctxs = stats; } mutex_unlock(&bp->hwrm_cmd_lock); return 0; @@ -4659,10 +6072,12 @@ return rc; } +static bool bnxt_rfs_supported(struct bnxt *bp); + static void __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req, int tx_rings, int rx_rings, int ring_grps, - int cp_rings, int vnics) + int cp_rings, int stats, int vnics) { u32 enables = 0; @@ -4672,16 +6087,38 @@ req->num_tx_rings = cpu_to_le16(tx_rings); if (BNXT_NEW_RM(bp)) { enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0; - enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS | - FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; - enables |= ring_grps ? - FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; + enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; + if (bp->flags & BNXT_FLAG_CHIP_P5) { + enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0; + enables |= tx_rings + ring_grps ? + FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; + enables |= rx_rings ? + FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; + } else { + enables |= cp_rings ? + FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; + enables |= ring_grps ? + FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS | + FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; + } enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0; req->num_rx_rings = cpu_to_le16(rx_rings); - req->num_hw_ring_grps = cpu_to_le16(ring_grps); - req->num_cmpl_rings = cpu_to_le16(cp_rings); - req->num_stat_ctxs = req->num_cmpl_rings; + if (bp->flags & BNXT_FLAG_CHIP_P5) { + req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps); + req->num_msix = cpu_to_le16(cp_rings); + req->num_rsscos_ctxs = + cpu_to_le16(DIV_ROUND_UP(ring_grps, 64)); + } else { + req->num_cmpl_rings = cpu_to_le16(cp_rings); + req->num_hw_ring_grps = cpu_to_le16(ring_grps); + req->num_rsscos_ctxs = cpu_to_le16(1); + if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) && + bnxt_rfs_supported(bp)) + req->num_rsscos_ctxs = + cpu_to_le16(ring_grps + 1); + } + req->num_stat_ctxs = cpu_to_le16(stats); req->num_vnics = cpu_to_le16(vnics); } req->enables = cpu_to_le32(enables); @@ -4691,23 +6128,39 @@ __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct hwrm_func_vf_cfg_input *req, int tx_rings, int rx_rings, int ring_grps, int cp_rings, - int vnics) + int stats, int vnics) { u32 enables = 0; bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1); enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; - enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS : 0; - enables |= cp_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS | - FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; - enables |= ring_grps ? FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; + enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS | + FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; + enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; + if (bp->flags & BNXT_FLAG_CHIP_P5) { + enables |= tx_rings + ring_grps ? + FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; + } else { + enables |= cp_rings ? + FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; + enables |= ring_grps ? + FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; + } enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0; + enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS; + req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); req->num_tx_rings = cpu_to_le16(tx_rings); req->num_rx_rings = cpu_to_le16(rx_rings); - req->num_hw_ring_grps = cpu_to_le16(ring_grps); - req->num_cmpl_rings = cpu_to_le16(cp_rings); - req->num_stat_ctxs = req->num_cmpl_rings; + if (bp->flags & BNXT_FLAG_CHIP_P5) { + req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps); + req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64)); + } else { + req->num_cmpl_rings = cpu_to_le16(cp_rings); + req->num_hw_ring_grps = cpu_to_le16(ring_grps); + req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX); + } + req->num_stat_ctxs = cpu_to_le16(stats); req->num_vnics = cpu_to_le16(vnics); req->enables = cpu_to_le32(enables); @@ -4715,30 +6168,29 @@ static int bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, - int ring_grps, int cp_rings, int vnics) + int ring_grps, int cp_rings, int stats, int vnics) { struct hwrm_func_cfg_input req = {0}; int rc; __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps, - cp_rings, vnics); + cp_rings, stats, vnics); if (!req.enables) return 0; rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc) - return -ENOMEM; + return rc; if (bp->hwrm_spec_code < 0x10601) bp->hw_resc.resv_tx_rings = tx_rings; - rc = bnxt_hwrm_get_rings(bp); - return rc; + return bnxt_hwrm_get_rings(bp); } static int bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, - int ring_grps, int cp_rings, int vnics) + int ring_grps, int cp_rings, int stats, int vnics) { struct hwrm_func_vf_cfg_input req = {0}; int rc; @@ -4749,29 +6201,26 @@ } __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps, - cp_rings, vnics); - req.enables |= cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS | - FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS); - req.num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX); - req.num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); + cp_rings, stats, vnics); rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc) - return -ENOMEM; + return rc; - rc = bnxt_hwrm_get_rings(bp); - return rc; + return bnxt_hwrm_get_rings(bp); } static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp, - int cp, int vnic) + int cp, int stat, int vnic) { if (BNXT_PF(bp)) - return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, vnic); + return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat, + vnic); else - return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, vnic); + return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat, + vnic); } -static int bnxt_cp_rings_in_use(struct bnxt *bp) +int bnxt_nq_rings_in_use(struct bnxt *bp) { int cp = bp->cp_nr_rings; int ulp_msix, ulp_base; @@ -4786,64 +6235,116 @@ return cp; } +static int bnxt_cp_rings_in_use(struct bnxt *bp) +{ + int cp; + + if (!(bp->flags & BNXT_FLAG_CHIP_P5)) + return bnxt_nq_rings_in_use(bp); + + cp = bp->tx_nr_rings + bp->rx_nr_rings; + return cp; +} + +static int bnxt_get_func_stat_ctxs(struct bnxt *bp) +{ + int ulp_stat = bnxt_get_ulp_stat_ctxs(bp); + int cp = bp->cp_nr_rings; + + if (!ulp_stat) + return cp; + + if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp)) + return bnxt_get_ulp_msix_base(bp) + ulp_stat; + + return cp + ulp_stat; +} + +/* Check if a default RSS map needs to be setup. This function is only + * used on older firmware that does not require reserving RX rings. + */ +static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp) +{ + struct bnxt_hw_resc *hw_resc = &bp->hw_resc; + + /* The RSS map is valid for RX rings set to resv_rx_rings */ + if (hw_resc->resv_rx_rings != bp->rx_nr_rings) { + hw_resc->resv_rx_rings = bp->rx_nr_rings; + if (!netif_is_rxfh_configured(bp->dev)) + bnxt_set_dflt_rss_indir_tbl(bp); + } +} + static bool bnxt_need_reserve_rings(struct bnxt *bp) { struct bnxt_hw_resc *hw_resc = &bp->hw_resc; int cp = bnxt_cp_rings_in_use(bp); - int rx = bp->rx_nr_rings; + int nq = bnxt_nq_rings_in_use(bp); + int rx = bp->rx_nr_rings, stat; int vnic = 1, grp = rx; - if (bp->hwrm_spec_code < 0x10601) - return false; - - if (hw_resc->resv_tx_rings != bp->tx_nr_rings) + if (hw_resc->resv_tx_rings != bp->tx_nr_rings && + bp->hwrm_spec_code >= 0x10601) return true; - if (bp->flags & BNXT_FLAG_RFS) + /* Old firmware does not need RX ring reservations but we still + * need to setup a default RSS map when needed. With new firmware + * we go through RX ring reservations first and then set up the + * RSS map for the successfully reserved RX rings when needed. + */ + if (!BNXT_NEW_RM(bp)) { + bnxt_check_rss_tbl_no_rmgr(bp); + return false; + } + if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5)) vnic = rx + 1; if (bp->flags & BNXT_FLAG_AGG_RINGS) rx <<= 1; - if (BNXT_NEW_RM(bp) && - (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp || - hw_resc->resv_hw_ring_grps != grp || hw_resc->resv_vnics != vnic)) + stat = bnxt_get_func_stat_ctxs(bp); + if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp || + hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat || + (hw_resc->resv_hw_ring_grps != grp && + !(bp->flags & BNXT_FLAG_CHIP_P5))) + return true; + if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) && + hw_resc->resv_irqs != nq) return true; return false; } -static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, - bool shared); - static int __bnxt_reserve_rings(struct bnxt *bp) { struct bnxt_hw_resc *hw_resc = &bp->hw_resc; - int cp = bnxt_cp_rings_in_use(bp); + int cp = bnxt_nq_rings_in_use(bp); int tx = bp->tx_nr_rings; int rx = bp->rx_nr_rings; int grp, rx_rings, rc; + int vnic = 1, stat; bool sh = false; - int vnic = 1; if (!bnxt_need_reserve_rings(bp)) return 0; if (bp->flags & BNXT_FLAG_SHARED_RINGS) sh = true; - if (bp->flags & BNXT_FLAG_RFS) + if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5)) vnic = rx + 1; if (bp->flags & BNXT_FLAG_AGG_RINGS) rx <<= 1; grp = bp->rx_nr_rings; + stat = bnxt_get_func_stat_ctxs(bp); - rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, vnic); + rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic); if (rc) return rc; tx = hw_resc->resv_tx_rings; if (BNXT_NEW_RM(bp)) { rx = hw_resc->resv_rx_rings; - cp = hw_resc->resv_cp_rings; + cp = hw_resc->resv_irqs; grp = hw_resc->resv_hw_ring_grps; vnic = hw_resc->resv_vnics; + stat = hw_resc->resv_stat_ctxs; } rx_rings = rx; @@ -4862,124 +6363,245 @@ } } rx_rings = min_t(int, rx_rings, grp); + cp = min_t(int, cp, bp->cp_nr_rings); + if (stat > bnxt_get_ulp_stat_ctxs(bp)) + stat -= bnxt_get_ulp_stat_ctxs(bp); + cp = min_t(int, cp, stat); rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh); if (bp->flags & BNXT_FLAG_AGG_RINGS) rx = rx_rings << 1; cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings; bp->tx_nr_rings = tx; + + /* If we cannot reserve all the RX rings, reset the RSS map only + * if absolutely necessary + */ + if (rx_rings != bp->rx_nr_rings) { + netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n", + rx_rings, bp->rx_nr_rings); + if ((bp->dev->priv_flags & IFF_RXFH_CONFIGURED) && + (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) != + bnxt_get_nr_rss_ctxs(bp, rx_rings) || + bnxt_get_max_rss_ring(bp) >= rx_rings)) { + netdev_warn(bp->dev, "RSS table entries reverting to default\n"); + bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED; + } + } bp->rx_nr_rings = rx_rings; bp->cp_nr_rings = cp; - if (!tx || !rx || !cp || !grp || !vnic) + if (!tx || !rx || !cp || !grp || !vnic || !stat) return -ENOMEM; + + if (!netif_is_rxfh_configured(bp->dev)) + bnxt_set_dflt_rss_indir_tbl(bp); return rc; } static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, - int ring_grps, int cp_rings, int vnics) + int ring_grps, int cp_rings, int stats, + int vnics) { struct hwrm_func_vf_cfg_input req = {0}; u32 flags; - int rc; if (!BNXT_NEW_RM(bp)) return 0; __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps, - cp_rings, vnics); + cp_rings, stats, vnics); flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST | FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST | FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | - FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST | FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | - FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST; + FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST | + FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST; + if (!(bp->flags & BNXT_FLAG_CHIP_P5)) + flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST; req.flags = cpu_to_le32(flags); - rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (rc) - return -ENOMEM; - return 0; + return hwrm_send_message_silent(bp, &req, sizeof(req), + HWRM_CMD_TIMEOUT); } static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, - int ring_grps, int cp_rings, int vnics) + int ring_grps, int cp_rings, int stats, + int vnics) { struct hwrm_func_cfg_input req = {0}; u32 flags; - int rc; __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps, - cp_rings, vnics); + cp_rings, stats, vnics); flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST; - if (BNXT_NEW_RM(bp)) + if (BNXT_NEW_RM(bp)) { flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST | FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | - FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST | FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST; + if (bp->flags & BNXT_FLAG_CHIP_P5) + flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST | + FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST; + else + flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST; + } req.flags = cpu_to_le32(flags); - rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (rc) - return -ENOMEM; - return 0; + return hwrm_send_message_silent(bp, &req, sizeof(req), + HWRM_CMD_TIMEOUT); } static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings, - int ring_grps, int cp_rings, int vnics) + int ring_grps, int cp_rings, int stats, + int vnics) { if (bp->hwrm_spec_code < 0x10801) return 0; if (BNXT_PF(bp)) return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings, - ring_grps, cp_rings, vnics); + ring_grps, cp_rings, stats, + vnics); return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps, - cp_rings, vnics); + cp_rings, stats, vnics); } -static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal, +static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp) +{ + struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr; + struct bnxt_coal_cap *coal_cap = &bp->coal_cap; + struct hwrm_ring_aggint_qcaps_input req = {0}; + int rc; + + coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS; + coal_cap->num_cmpl_dma_aggr_max = 63; + coal_cap->num_cmpl_dma_aggr_during_int_max = 63; + coal_cap->cmpl_aggr_dma_tmr_max = 65535; + coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535; + coal_cap->int_lat_tmr_min_max = 65535; + coal_cap->int_lat_tmr_max_max = 65535; + coal_cap->num_cmpl_aggr_int_max = 65535; + coal_cap->timer_units = 80; + + if (bp->hwrm_spec_code < 0x10902) + return; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_AGGINT_QCAPS, -1, -1); + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc) { + coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params); + coal_cap->nq_params = le32_to_cpu(resp->nq_params); + coal_cap->num_cmpl_dma_aggr_max = + le16_to_cpu(resp->num_cmpl_dma_aggr_max); + coal_cap->num_cmpl_dma_aggr_during_int_max = + le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max); + coal_cap->cmpl_aggr_dma_tmr_max = + le16_to_cpu(resp->cmpl_aggr_dma_tmr_max); + coal_cap->cmpl_aggr_dma_tmr_during_int_max = + le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max); + coal_cap->int_lat_tmr_min_max = + le16_to_cpu(resp->int_lat_tmr_min_max); + coal_cap->int_lat_tmr_max_max = + le16_to_cpu(resp->int_lat_tmr_max_max); + coal_cap->num_cmpl_aggr_int_max = + le16_to_cpu(resp->num_cmpl_aggr_int_max); + coal_cap->timer_units = le16_to_cpu(resp->timer_units); + } + mutex_unlock(&bp->hwrm_cmd_lock); +} + +static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec) +{ + struct bnxt_coal_cap *coal_cap = &bp->coal_cap; + + return usec * 1000 / coal_cap->timer_units; +} + +static void bnxt_hwrm_set_coal_params(struct bnxt *bp, + struct bnxt_coal *hw_coal, struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) { - u16 val, tmr, max, flags; + struct bnxt_coal_cap *coal_cap = &bp->coal_cap; + u32 cmpl_params = coal_cap->cmpl_params; + u16 val, tmr, max, flags = 0; max = hw_coal->bufs_per_record * 128; if (hw_coal->budget) max = hw_coal->bufs_per_record * hw_coal->budget; + max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max); val = clamp_t(u16, hw_coal->coal_bufs, 1, max); req->num_cmpl_aggr_int = cpu_to_le16(val); - /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */ - val = min_t(u16, val, 63); + val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max); req->num_cmpl_dma_aggr = cpu_to_le16(val); - /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */ - val = clamp_t(u16, hw_coal->coal_bufs_irq, 1, 63); + val = clamp_t(u16, hw_coal->coal_bufs_irq, 1, + coal_cap->num_cmpl_dma_aggr_during_int_max); req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val); - tmr = BNXT_USEC_TO_COAL_TIMER(hw_coal->coal_ticks); - tmr = max_t(u16, tmr, 1); + tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks); + tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max); req->int_lat_tmr_max = cpu_to_le16(tmr); /* min timer set to 1/2 of interrupt timer */ - val = tmr / 2; - req->int_lat_tmr_min = cpu_to_le16(val); + if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) { + val = tmr / 2; + val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max); + req->int_lat_tmr_min = cpu_to_le16(val); + req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE); + } /* buf timer set to 1/4 of interrupt timer */ - val = max_t(u16, tmr / 4, 1); + val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max); req->cmpl_aggr_dma_tmr = cpu_to_le16(val); - tmr = BNXT_USEC_TO_COAL_TIMER(hw_coal->coal_ticks_irq); - tmr = max_t(u16, tmr, 1); - req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(tmr); + if (cmpl_params & + RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) { + tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq); + val = clamp_t(u16, tmr, 1, + coal_cap->cmpl_aggr_dma_tmr_during_int_max); + req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val); + req->enables |= + cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE); + } - flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; - if (hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh) + if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET) + flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; + if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) && + hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh) flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE; req->flags = cpu_to_le16(flags); + req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES); +} + +/* Caller holds bp->hwrm_cmd_lock */ +static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi, + struct bnxt_coal *hw_coal) +{ + struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0}; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + struct bnxt_coal_cap *coal_cap = &bp->coal_cap; + u32 nq_params = coal_cap->nq_params; + u16 tmr; + + if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN)) + return 0; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, + -1, -1); + req.ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id); + req.flags = + cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ); + + tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2; + tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max); + req.int_lat_tmr_min = cpu_to_le16(tmr); + req.enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE); + return _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); } int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi) @@ -4987,7 +6609,6 @@ struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0}; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; struct bnxt_coal coal; - unsigned int grp_idx; /* Tick values in micro seconds. * 1 coal_buf x bufs_per_record = 1 completion record. @@ -5003,10 +6624,9 @@ bnxt_hwrm_cmd_hdr_init(bp, &req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1); - bnxt_hwrm_set_coal_params(&coal, &req_rx); + bnxt_hwrm_set_coal_params(bp, &coal, &req_rx); - grp_idx = bnapi->index; - req_rx.ring_id = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id); + req_rx.ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring)); return hwrm_send_message(bp, &req_rx, sizeof(req_rx), HWRM_CMD_TIMEOUT); @@ -5023,38 +6643,64 @@ bnxt_hwrm_cmd_hdr_init(bp, &req_tx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1); - bnxt_hwrm_set_coal_params(&bp->rx_coal, &req_rx); - bnxt_hwrm_set_coal_params(&bp->tx_coal, &req_tx); + bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, &req_rx); + bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, &req_tx); mutex_lock(&bp->hwrm_cmd_lock); for (i = 0; i < bp->cp_nr_rings; i++) { struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_coal *hw_coal; + u16 ring_id; req = &req_rx; - if (!bnapi->rx_ring) + if (!bnapi->rx_ring) { + ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring); req = &req_tx; - req->ring_id = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id); + } else { + ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring); + } + req->ring_id = cpu_to_le16(ring_id); rc = _hwrm_send_message(bp, req, sizeof(*req), HWRM_CMD_TIMEOUT); if (rc) break; + + if (!(bp->flags & BNXT_FLAG_CHIP_P5)) + continue; + + if (bnapi->rx_ring && bnapi->tx_ring) { + req = &req_tx; + ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring); + req->ring_id = cpu_to_le16(ring_id); + rc = _hwrm_send_message(bp, req, sizeof(*req), + HWRM_CMD_TIMEOUT); + if (rc) + break; + } + if (bnapi->rx_ring) + hw_coal = &bp->rx_coal; + else + hw_coal = &bp->tx_coal; + __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal); } mutex_unlock(&bp->hwrm_cmd_lock); return rc; } -static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp) +static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp) { - int rc = 0, i; + struct hwrm_stat_ctx_clr_stats_input req0 = {0}; struct hwrm_stat_ctx_free_input req = {0}; + int i; if (!bp->bnapi) - return 0; + return; if (BNXT_CHIP_TYPE_NITRO_A0(bp)) - return 0; + return; + bnxt_hwrm_cmd_hdr_init(bp, &req0, HWRM_STAT_CTX_CLR_STATS, -1, -1); bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1); mutex_lock(&bp->hwrm_cmd_lock); @@ -5064,17 +6710,18 @@ if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) { req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id); - - rc = _hwrm_send_message(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); - if (rc) - break; + if (BNXT_FW_MAJ(bp) <= 20) { + req0.stat_ctx_id = req.stat_ctx_id; + _hwrm_send_message(bp, &req0, sizeof(req0), + HWRM_CMD_TIMEOUT); + } + _hwrm_send_message(bp, &req, sizeof(req), + HWRM_CMD_TIMEOUT); cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; } } mutex_unlock(&bp->hwrm_cmd_lock); - return rc; } static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp) @@ -5088,6 +6735,7 @@ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1); + req.stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size); req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000); mutex_lock(&bp->hwrm_cmd_lock); @@ -5095,7 +6743,7 @@ struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; - req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map); + req.stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map); rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); @@ -5114,6 +6762,7 @@ { struct hwrm_func_qcfg_input req = {0}; struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; + u32 min_db_offset = 0; u16 flags; int rc; @@ -5129,6 +6778,8 @@ struct bnxt_vf_info *vf = &bp->vf; vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK; + } else { + bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs); } #endif flags = le16_to_cpu(resp->flags); @@ -5140,6 +6791,8 @@ } if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST)) bp->flags |= BNXT_FLAG_MULTI_HOST; + if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED) + bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR; switch (resp->port_partition_type) { case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0: @@ -5160,9 +6813,479 @@ if (!bp->max_mtu) bp->max_mtu = BNXT_MAX_MTU; + if (bp->db_size) + goto func_qcfg_exit; + + if (bp->flags & BNXT_FLAG_CHIP_P5) { + if (BNXT_PF(bp)) + min_db_offset = DB_PF_OFFSET_P5; + else + min_db_offset = DB_VF_OFFSET_P5; + } + bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) * + 1024); + if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) || + bp->db_size <= min_db_offset) + bp->db_size = pci_resource_len(bp->pdev, 2); + func_qcfg_exit: mutex_unlock(&bp->hwrm_cmd_lock); return rc; +} + +static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp) +{ + struct hwrm_func_backing_store_qcaps_input req = {0}; + struct hwrm_func_backing_store_qcaps_output *resp = + bp->hwrm_cmd_resp_addr; + int rc; + + if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx) + return 0; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_QCAPS, -1, -1); + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc) { + struct bnxt_ctx_pg_info *ctx_pg; + struct bnxt_ctx_mem_info *ctx; + int i, tqm_rings; + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) { + rc = -ENOMEM; + goto ctx_err; + } + ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries); + ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries); + ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries); + ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size); + ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries); + ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries); + ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size); + ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries); + ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries); + ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size); + ctx->vnic_max_vnic_entries = + le16_to_cpu(resp->vnic_max_vnic_entries); + ctx->vnic_max_ring_table_entries = + le16_to_cpu(resp->vnic_max_ring_table_entries); + ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size); + ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries); + ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size); + ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size); + ctx->tqm_min_entries_per_ring = + le32_to_cpu(resp->tqm_min_entries_per_ring); + ctx->tqm_max_entries_per_ring = + le32_to_cpu(resp->tqm_max_entries_per_ring); + ctx->tqm_entries_multiple = resp->tqm_entries_multiple; + if (!ctx->tqm_entries_multiple) + ctx->tqm_entries_multiple = 1; + ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries); + ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size); + ctx->mrav_num_entries_units = + le16_to_cpu(resp->mrav_num_entries_units); + ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size); + ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries); + ctx->ctx_kind_initializer = resp->ctx_kind_initializer; + ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count; + if (!ctx->tqm_fp_rings_count) + ctx->tqm_fp_rings_count = bp->max_q; + else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS) + ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS; + + tqm_rings = ctx->tqm_fp_rings_count + BNXT_MAX_TQM_SP_RINGS; + ctx_pg = kcalloc(tqm_rings, sizeof(*ctx_pg), GFP_KERNEL); + if (!ctx_pg) { + kfree(ctx); + rc = -ENOMEM; + goto ctx_err; + } + for (i = 0; i < tqm_rings; i++, ctx_pg++) + ctx->tqm_mem[i] = ctx_pg; + bp->ctx = ctx; + } else { + rc = 0; + } +ctx_err: + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + +static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr, + __le64 *pg_dir) +{ + BNXT_SET_CTX_PAGE_ATTR(*pg_attr); + if (rmem->depth >= 1) { + if (rmem->depth == 2) + *pg_attr |= 2; + else + *pg_attr |= 1; + *pg_dir = cpu_to_le64(rmem->pg_tbl_map); + } else { + *pg_dir = cpu_to_le64(rmem->dma_arr[0]); + } +} + +#define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \ + (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \ + FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \ + FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \ + FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \ + FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) + +static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables) +{ + struct hwrm_func_backing_store_cfg_input req = {0}; + struct bnxt_ctx_mem_info *ctx = bp->ctx; + struct bnxt_ctx_pg_info *ctx_pg; + __le32 *num_entries; + __le64 *pg_dir; + u32 flags = 0; + u8 *pg_attr; + u32 ena; + int i; + + if (!ctx) + return 0; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_CFG, -1, -1); + req.enables = cpu_to_le32(enables); + + if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) { + ctx_pg = &ctx->qp_mem; + req.qp_num_entries = cpu_to_le32(ctx_pg->entries); + req.qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries); + req.qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries); + req.qp_entry_size = cpu_to_le16(ctx->qp_entry_size); + bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, + &req.qpc_pg_size_qpc_lvl, + &req.qpc_page_dir); + } + if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) { + ctx_pg = &ctx->srq_mem; + req.srq_num_entries = cpu_to_le32(ctx_pg->entries); + req.srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries); + req.srq_entry_size = cpu_to_le16(ctx->srq_entry_size); + bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, + &req.srq_pg_size_srq_lvl, + &req.srq_page_dir); + } + if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) { + ctx_pg = &ctx->cq_mem; + req.cq_num_entries = cpu_to_le32(ctx_pg->entries); + req.cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries); + req.cq_entry_size = cpu_to_le16(ctx->cq_entry_size); + bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, &req.cq_pg_size_cq_lvl, + &req.cq_page_dir); + } + if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) { + ctx_pg = &ctx->vnic_mem; + req.vnic_num_vnic_entries = + cpu_to_le16(ctx->vnic_max_vnic_entries); + req.vnic_num_ring_table_entries = + cpu_to_le16(ctx->vnic_max_ring_table_entries); + req.vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size); + bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, + &req.vnic_pg_size_vnic_lvl, + &req.vnic_page_dir); + } + if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) { + ctx_pg = &ctx->stat_mem; + req.stat_num_entries = cpu_to_le32(ctx->stat_max_entries); + req.stat_entry_size = cpu_to_le16(ctx->stat_entry_size); + bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, + &req.stat_pg_size_stat_lvl, + &req.stat_page_dir); + } + if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) { + ctx_pg = &ctx->mrav_mem; + req.mrav_num_entries = cpu_to_le32(ctx_pg->entries); + if (ctx->mrav_num_entries_units) + flags |= + FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT; + req.mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size); + bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, + &req.mrav_pg_size_mrav_lvl, + &req.mrav_page_dir); + } + if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) { + ctx_pg = &ctx->tim_mem; + req.tim_num_entries = cpu_to_le32(ctx_pg->entries); + req.tim_entry_size = cpu_to_le16(ctx->tim_entry_size); + bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, + &req.tim_pg_size_tim_lvl, + &req.tim_page_dir); + } + for (i = 0, num_entries = &req.tqm_sp_num_entries, + pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl, + pg_dir = &req.tqm_sp_page_dir, + ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP; + i < BNXT_MAX_TQM_RINGS; + i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) { + if (!(enables & ena)) + continue; + + req.tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size); + ctx_pg = ctx->tqm_mem[i]; + *num_entries = cpu_to_le32(ctx_pg->entries); + bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir); + } + req.flags = cpu_to_le32(flags); + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); +} + +static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, + struct bnxt_ctx_pg_info *ctx_pg) +{ + struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; + + rmem->page_size = BNXT_PAGE_SIZE; + rmem->pg_arr = ctx_pg->ctx_pg_arr; + rmem->dma_arr = ctx_pg->ctx_dma_arr; + rmem->flags = BNXT_RMEM_VALID_PTE_FLAG; + if (rmem->depth >= 1) + rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG; + return bnxt_alloc_ring(bp, rmem); +} + +static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp, + struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size, + u8 depth, bool use_init_val) +{ + struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; + int rc; + + if (!mem_size) + return -EINVAL; + + ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); + if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) { + ctx_pg->nr_pages = 0; + return -EINVAL; + } + if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) { + int nr_tbls, i; + + rmem->depth = 2; + ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg), + GFP_KERNEL); + if (!ctx_pg->ctx_pg_tbl) + return -ENOMEM; + nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES); + rmem->nr_pages = nr_tbls; + rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg); + if (rc) + return rc; + for (i = 0; i < nr_tbls; i++) { + struct bnxt_ctx_pg_info *pg_tbl; + + pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL); + if (!pg_tbl) + return -ENOMEM; + ctx_pg->ctx_pg_tbl[i] = pg_tbl; + rmem = &pg_tbl->ring_mem; + rmem->pg_tbl = ctx_pg->ctx_pg_arr[i]; + rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i]; + rmem->depth = 1; + rmem->nr_pages = MAX_CTX_PAGES; + if (use_init_val) + rmem->init_val = bp->ctx->ctx_kind_initializer; + if (i == (nr_tbls - 1)) { + int rem = ctx_pg->nr_pages % MAX_CTX_PAGES; + + if (rem) + rmem->nr_pages = rem; + } + rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl); + if (rc) + break; + } + } else { + rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); + if (rmem->nr_pages > 1 || depth) + rmem->depth = 1; + if (use_init_val) + rmem->init_val = bp->ctx->ctx_kind_initializer; + rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg); + } + return rc; +} + +static void bnxt_free_ctx_pg_tbls(struct bnxt *bp, + struct bnxt_ctx_pg_info *ctx_pg) +{ + struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; + + if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES || + ctx_pg->ctx_pg_tbl) { + int i, nr_tbls = rmem->nr_pages; + + for (i = 0; i < nr_tbls; i++) { + struct bnxt_ctx_pg_info *pg_tbl; + struct bnxt_ring_mem_info *rmem2; + + pg_tbl = ctx_pg->ctx_pg_tbl[i]; + if (!pg_tbl) + continue; + rmem2 = &pg_tbl->ring_mem; + bnxt_free_ring(bp, rmem2); + ctx_pg->ctx_pg_arr[i] = NULL; + kfree(pg_tbl); + ctx_pg->ctx_pg_tbl[i] = NULL; + } + kfree(ctx_pg->ctx_pg_tbl); + ctx_pg->ctx_pg_tbl = NULL; + } + bnxt_free_ring(bp, rmem); + ctx_pg->nr_pages = 0; +} + +static void bnxt_free_ctx_mem(struct bnxt *bp) +{ + struct bnxt_ctx_mem_info *ctx = bp->ctx; + int i; + + if (!ctx) + return; + + if (ctx->tqm_mem[0]) { + for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) + bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]); + kfree(ctx->tqm_mem[0]); + ctx->tqm_mem[0] = NULL; + } + + bnxt_free_ctx_pg_tbls(bp, &ctx->tim_mem); + bnxt_free_ctx_pg_tbls(bp, &ctx->mrav_mem); + bnxt_free_ctx_pg_tbls(bp, &ctx->stat_mem); + bnxt_free_ctx_pg_tbls(bp, &ctx->vnic_mem); + bnxt_free_ctx_pg_tbls(bp, &ctx->cq_mem); + bnxt_free_ctx_pg_tbls(bp, &ctx->srq_mem); + bnxt_free_ctx_pg_tbls(bp, &ctx->qp_mem); + ctx->flags &= ~BNXT_CTX_FLAG_INITED; +} + +static int bnxt_alloc_ctx_mem(struct bnxt *bp) +{ + struct bnxt_ctx_pg_info *ctx_pg; + struct bnxt_ctx_mem_info *ctx; + u32 mem_size, ena, entries; + u32 entries_sp, min; + u32 num_mr, num_ah; + u32 extra_srqs = 0; + u32 extra_qps = 0; + u8 pg_lvl = 1; + int i, rc; + + rc = bnxt_hwrm_func_backing_store_qcaps(bp); + if (rc) { + netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n", + rc); + return rc; + } + ctx = bp->ctx; + if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED)) + return 0; + + if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) { + pg_lvl = 2; + extra_qps = 65536; + extra_srqs = 8192; + } + + ctx_pg = &ctx->qp_mem; + ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries + + extra_qps; + mem_size = ctx->qp_entry_size * ctx_pg->entries; + rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true); + if (rc) + return rc; + + ctx_pg = &ctx->srq_mem; + ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs; + mem_size = ctx->srq_entry_size * ctx_pg->entries; + rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true); + if (rc) + return rc; + + ctx_pg = &ctx->cq_mem; + ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2; + mem_size = ctx->cq_entry_size * ctx_pg->entries; + rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true); + if (rc) + return rc; + + ctx_pg = &ctx->vnic_mem; + ctx_pg->entries = ctx->vnic_max_vnic_entries + + ctx->vnic_max_ring_table_entries; + mem_size = ctx->vnic_entry_size * ctx_pg->entries; + rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, true); + if (rc) + return rc; + + ctx_pg = &ctx->stat_mem; + ctx_pg->entries = ctx->stat_max_entries; + mem_size = ctx->stat_entry_size * ctx_pg->entries; + rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, true); + if (rc) + return rc; + + ena = 0; + if (!(bp->flags & BNXT_FLAG_ROCE_CAP)) + goto skip_rdma; + + ctx_pg = &ctx->mrav_mem; + /* 128K extra is needed to accommodate static AH context + * allocation by f/w. + */ + num_mr = 1024 * 256; + num_ah = 1024 * 128; + ctx_pg->entries = num_mr + num_ah; + mem_size = ctx->mrav_entry_size * ctx_pg->entries; + rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, true); + if (rc) + return rc; + ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV; + if (ctx->mrav_num_entries_units) + ctx_pg->entries = + ((num_mr / ctx->mrav_num_entries_units) << 16) | + (num_ah / ctx->mrav_num_entries_units); + + ctx_pg = &ctx->tim_mem; + ctx_pg->entries = ctx->qp_mem.entries; + mem_size = ctx->tim_entry_size * ctx_pg->entries; + rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, false); + if (rc) + return rc; + ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM; + +skip_rdma: + min = ctx->tqm_min_entries_per_ring; + entries_sp = ctx->vnic_max_vnic_entries + ctx->qp_max_l2_entries + + 2 * (extra_qps + ctx->qp_min_qp1_entries) + min; + entries_sp = roundup(entries_sp, ctx->tqm_entries_multiple); + entries = ctx->qp_max_l2_entries + 2 * (extra_qps + ctx->qp_min_qp1_entries); + entries = roundup(entries, ctx->tqm_entries_multiple); + entries = clamp_t(u32, entries, min, ctx->tqm_max_entries_per_ring); + for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) { + ctx_pg = ctx->tqm_mem[i]; + ctx_pg->entries = i ? entries : entries_sp; + mem_size = ctx->tqm_entry_size * ctx_pg->entries; + rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, false); + if (rc) + return rc; + ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i; + } + ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES; + rc = bnxt_hwrm_func_backing_store_cfg(bp, ena); + if (rc) { + netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n", + rc); + return rc; + } + ctx->flags |= BNXT_CTX_FLAG_INITED; + return 0; } int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all) @@ -5176,11 +7299,10 @@ req.fid = cpu_to_le16(0xffff); mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (rc) { - rc = -EIO; + rc = _hwrm_send_message_silent(bp, &req, sizeof(req), + HWRM_CMD_TIMEOUT); + if (rc) goto hwrm_func_resc_qcaps_exit; - } hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs); if (!all) @@ -5203,6 +7325,13 @@ hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx); hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); + if (bp->flags & BNXT_FLAG_CHIP_P5) { + u16 max_msix = le16_to_cpu(resp->max_msix); + + hw_resc->max_nqs = max_msix; + hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings; + } + if (BNXT_PF(bp)) { struct bnxt_pf_info *pf = &bp->pf; @@ -5222,7 +7351,7 @@ struct hwrm_func_qcaps_input req = {0}; struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr; struct bnxt_hw_resc *hw_resc = &bp->hw_resc; - u32 flags; + u32 flags, flags_ext; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1); req.fid = cpu_to_le16(0xffff); @@ -5237,9 +7366,26 @@ bp->flags |= BNXT_FLAG_ROCEV1_CAP; if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED) bp->flags |= BNXT_FLAG_ROCEV2_CAP; + if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED; + if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE) + bp->fw_cap |= BNXT_FW_CAP_HOT_RESET; + if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED; + if (flags & FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE) + bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY; + if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD) + bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD; + if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED)) + bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT; + + flags_ext = le32_to_cpu(resp->flags_ext); + if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED; bp->tx_push_thresh = 0; - if (flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) + if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) && + BNXT_FW_MAJ(bp) > 217) bp->tx_push_thresh = BNXT_TX_PUSH_THRESH; hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); @@ -5267,6 +7413,7 @@ pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows); pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows); pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows); + bp->flags &= ~BNXT_FLAG_WOL_CAP; if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED) bp->flags |= BNXT_FLAG_WOL_CAP; } else { @@ -5283,6 +7430,8 @@ return rc; } +static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp); + static int bnxt_hwrm_func_qcaps(struct bnxt *bp) { int rc; @@ -5290,12 +7439,206 @@ rc = __bnxt_hwrm_func_qcaps(bp); if (rc) return rc; + rc = bnxt_hwrm_queue_qportcfg(bp); + if (rc) { + netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc); + return rc; + } if (bp->hwrm_spec_code >= 0x10803) { + rc = bnxt_alloc_ctx_mem(bp); + if (rc) + return rc; rc = bnxt_hwrm_func_resc_qcaps(bp, true); if (!rc) bp->fw_cap |= BNXT_FW_CAP_NEW_RM; } return 0; +} + +static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp) +{ + struct hwrm_cfa_adv_flow_mgnt_qcaps_input req = {0}; + struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp; + int rc = 0; + u32 flags; + + if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW)) + return 0; + + resp = bp->hwrm_cmd_resp_addr; + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS, -1, -1); + + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) + goto hwrm_cfa_adv_qcaps_exit; + + flags = le32_to_cpu(resp->flags); + if (flags & + CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2; + +hwrm_cfa_adv_qcaps_exit: + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + +static int __bnxt_alloc_fw_health(struct bnxt *bp) +{ + if (bp->fw_health) + return 0; + + bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL); + if (!bp->fw_health) + return -ENOMEM; + + return 0; +} + +static int bnxt_alloc_fw_health(struct bnxt *bp) +{ + int rc; + + if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) && + !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) + return 0; + + rc = __bnxt_alloc_fw_health(bp); + if (rc) { + bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET; + bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; + return rc; + } + + return 0; +} + +static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg) +{ + writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 + + BNXT_GRCPF_REG_WINDOW_BASE_OUT + + BNXT_FW_HEALTH_WIN_MAP_OFF); +} + +static void bnxt_try_map_fw_health_reg(struct bnxt *bp) +{ + void __iomem *hs; + u32 status_loc; + u32 reg_type; + u32 sig; + + __bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC); + hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC); + + sig = readl(hs + offsetof(struct hcomm_status, sig_ver)); + if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) { + if (bp->fw_health) + bp->fw_health->status_reliable = false; + return; + } + + if (__bnxt_alloc_fw_health(bp)) { + netdev_warn(bp->dev, "no memory for firmware status checks\n"); + return; + } + + status_loc = readl(hs + offsetof(struct hcomm_status, fw_status_loc)); + bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc; + reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc); + if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) { + __bnxt_map_fw_health_reg(bp, status_loc); + bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] = + BNXT_FW_HEALTH_WIN_OFF(status_loc); + } + + bp->fw_health->status_reliable = true; +} + +static int bnxt_map_fw_health_regs(struct bnxt *bp) +{ + struct bnxt_fw_health *fw_health = bp->fw_health; + u32 reg_base = 0xffffffff; + int i; + + /* Only pre-map the monitoring GRC registers using window 3 */ + for (i = 0; i < 4; i++) { + u32 reg = fw_health->regs[i]; + + if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC) + continue; + if (reg_base == 0xffffffff) + reg_base = reg & BNXT_GRC_BASE_MASK; + if ((reg & BNXT_GRC_BASE_MASK) != reg_base) + return -ERANGE; + fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg); + } + if (reg_base == 0xffffffff) + return 0; + + __bnxt_map_fw_health_reg(bp, reg_base); + return 0; +} + +static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp) +{ + struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr; + struct bnxt_fw_health *fw_health = bp->fw_health; + struct hwrm_error_recovery_qcfg_input req = {0}; + int rc, i; + + if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) + return 0; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_ERROR_RECOVERY_QCFG, -1, -1); + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) + goto err_recovery_out; + fw_health->flags = le32_to_cpu(resp->flags); + if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) && + !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) { + rc = -EINVAL; + goto err_recovery_out; + } + fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq); + fw_health->master_func_wait_dsecs = + le32_to_cpu(resp->master_func_wait_period); + fw_health->normal_func_wait_dsecs = + le32_to_cpu(resp->normal_func_wait_period); + fw_health->post_reset_wait_dsecs = + le32_to_cpu(resp->master_func_wait_period_after_reset); + fw_health->post_reset_max_wait_dsecs = + le32_to_cpu(resp->max_bailout_time_after_reset); + fw_health->regs[BNXT_FW_HEALTH_REG] = + le32_to_cpu(resp->fw_health_status_reg); + fw_health->regs[BNXT_FW_HEARTBEAT_REG] = + le32_to_cpu(resp->fw_heartbeat_reg); + fw_health->regs[BNXT_FW_RESET_CNT_REG] = + le32_to_cpu(resp->fw_reset_cnt_reg); + fw_health->regs[BNXT_FW_RESET_INPROG_REG] = + le32_to_cpu(resp->reset_inprogress_reg); + fw_health->fw_reset_inprog_reg_mask = + le32_to_cpu(resp->reset_inprogress_reg_mask); + fw_health->fw_reset_seq_cnt = resp->reg_array_cnt; + if (fw_health->fw_reset_seq_cnt >= 16) { + rc = -EINVAL; + goto err_recovery_out; + } + for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) { + fw_health->fw_reset_seq_regs[i] = + le32_to_cpu(resp->reset_reg[i]); + fw_health->fw_reset_seq_vals[i] = + le32_to_cpu(resp->reset_reg_val[i]); + fw_health->fw_reset_seq_delay_msec[i] = + resp->delay_after_reset[i]; + } +err_recovery_out: + mutex_unlock(&bp->hwrm_cmd_lock); + if (!rc) + rc = bnxt_map_fw_health_regs(bp); + if (rc) + bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; + return rc; } static int bnxt_hwrm_func_reset(struct bnxt *bp) @@ -5306,6 +7649,16 @@ req.enables = 0; return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT); +} + +static void bnxt_nvm_cfg_ver_get(struct bnxt *bp) +{ + struct hwrm_nvm_get_dev_info_output nvm_info; + + if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info)) + snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d", + nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min, + nvm_info.nvm_cfg_ver_upd); } static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) @@ -5335,13 +7688,15 @@ no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP); qptr = &resp->queue_id0; for (i = 0, j = 0; i < bp->max_tc; i++) { - bp->q_info[j].queue_id = *qptr++; + bp->q_info[j].queue_id = *qptr; + bp->q_ids[i] = *qptr++; bp->q_info[j].queue_profile = *qptr++; bp->tc_to_qidx[j] = j; if (!BNXT_CNPQ(bp->q_info[j].queue_profile) || (no_rdma && BNXT_PF(bp))) j++; } + bp->max_q = bp->max_tc; bp->max_tc = max_t(u8, j, 1); if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG) @@ -5355,20 +7710,31 @@ return rc; } -static int bnxt_hwrm_ver_get(struct bnxt *bp) +static int __bnxt_hwrm_ver_get(struct bnxt *bp, bool silent) { - int rc; struct hwrm_ver_get_input req = {0}; - struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr; - u32 dev_caps_cfg; + int rc; - bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1); req.hwrm_intf_maj = HWRM_VERSION_MAJOR; req.hwrm_intf_min = HWRM_VERSION_MINOR; req.hwrm_intf_upd = HWRM_VERSION_UPDATE; + + rc = bnxt_hwrm_do_send_msg(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT, + silent); + return rc; +} + +static int bnxt_hwrm_ver_get(struct bnxt *bp) +{ + struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr; + u16 fw_maj, fw_min, fw_bld, fw_rsv; + u32 dev_caps_cfg, hwrm_ver; + int rc, len; + + bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN; mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = __bnxt_hwrm_ver_get(bp, false); if (rc) goto hwrm_ver_get_exit; @@ -5383,18 +7749,58 @@ resp->hwrm_intf_upd_8b); netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n"); } - snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d.%d", - resp->hwrm_fw_maj_8b, resp->hwrm_fw_min_8b, - resp->hwrm_fw_bld_8b, resp->hwrm_fw_rsvd_8b); + + hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 | + HWRM_VERSION_UPDATE; + + if (bp->hwrm_spec_code > hwrm_ver) + snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d", + HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, + HWRM_VERSION_UPDATE); + else + snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d", + resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b, + resp->hwrm_intf_upd_8b); + + fw_maj = le16_to_cpu(resp->hwrm_fw_major); + if (bp->hwrm_spec_code > 0x10803 && fw_maj) { + fw_min = le16_to_cpu(resp->hwrm_fw_minor); + fw_bld = le16_to_cpu(resp->hwrm_fw_build); + fw_rsv = le16_to_cpu(resp->hwrm_fw_patch); + len = FW_VER_STR_LEN; + } else { + fw_maj = resp->hwrm_fw_maj_8b; + fw_min = resp->hwrm_fw_min_8b; + fw_bld = resp->hwrm_fw_bld_8b; + fw_rsv = resp->hwrm_fw_rsvd_8b; + len = BC_HWRM_STR_LEN; + } + bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv); + snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld, + fw_rsv); + + if (strlen(resp->active_pkg_name)) { + int fw_ver_len = strlen(bp->fw_ver_str); + + snprintf(bp->fw_ver_str + fw_ver_len, + FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s", + resp->active_pkg_name); + bp->fw_cap |= BNXT_FW_CAP_PKG_VER; + } bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout); if (!bp->hwrm_cmd_timeout) bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; - if (resp->hwrm_intf_maj_8b >= 1) + if (resp->hwrm_intf_maj_8b >= 1) { bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len); + bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len); + } + if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN) + bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN; bp->chip_num = le16_to_cpu(resp->chip_num); + bp->chip_rev = resp->chip_rev; if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev && !resp->chip_metal) bp->flags |= BNXT_FLAG_CHIP_NITRO_A0; @@ -5403,6 +7809,21 @@ if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) && (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD; + + if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL; + + if (dev_caps_cfg & + VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE; + + if (dev_caps_cfg & + VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF; + + if (dev_caps_cfg & + VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW; hwrm_ver_get_exit: mutex_unlock(&bp->hwrm_cmd_lock); @@ -5430,50 +7851,194 @@ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); } -static int bnxt_hwrm_port_qstats(struct bnxt *bp) +static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask) { - int rc; + u64 sw_tmp; + + hw &= mask; + sw_tmp = (*sw & ~mask) | hw; + if (hw < (*sw & mask)) + sw_tmp += mask + 1; + WRITE_ONCE(*sw, sw_tmp); +} + +static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks, + int count, bool ignore_zero) +{ + int i; + + for (i = 0; i < count; i++) { + u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i])); + + if (ignore_zero && !hw) + continue; + + if (masks[i] == -1ULL) + sw_stats[i] = hw; + else + bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]); + } +} + +static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats) +{ + if (!stats->hw_stats) + return; + + __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats, + stats->hw_masks, stats->len / 8, false); +} + +static void bnxt_accumulate_all_stats(struct bnxt *bp) +{ + struct bnxt_stats_mem *ring0_stats; + bool ignore_zero = false; + int i; + + /* Chip bug. Counter intermittently becomes 0. */ + if (bp->flags & BNXT_FLAG_CHIP_P5) + ignore_zero = true; + + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr; + struct bnxt_stats_mem *stats; + + cpr = &bnapi->cp_ring; + stats = &cpr->stats; + if (!i) + ring0_stats = stats; + __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats, + ring0_stats->hw_masks, + ring0_stats->len / 8, ignore_zero); + } + if (bp->flags & BNXT_FLAG_PORT_STATS) { + struct bnxt_stats_mem *stats = &bp->port_stats; + __le64 *hw_stats = stats->hw_stats; + u64 *sw_stats = stats->sw_stats; + u64 *masks = stats->hw_masks; + int cnt; + + cnt = sizeof(struct rx_port_stats) / 8; + __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false); + + hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; + sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; + masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; + cnt = sizeof(struct tx_port_stats) / 8; + __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false); + } + if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { + bnxt_accumulate_stats(&bp->rx_port_stats_ext); + bnxt_accumulate_stats(&bp->tx_port_stats_ext); + } +} + +static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags) +{ struct bnxt_pf_info *pf = &bp->pf; struct hwrm_port_qstats_input req = {0}; if (!(bp->flags & BNXT_FLAG_PORT_STATS)) return 0; + if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)) + return -EOPNOTSUPP; + + req.flags = flags; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1); req.port_id = cpu_to_le16(pf->port_id); - req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map); - req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - return rc; + req.tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map + + BNXT_TX_PORT_STATS_BYTE_OFFSET); + req.rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map); + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); } -static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp) +static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags) { + struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_queue_pri2cos_qcfg_input req2 = {0}; struct hwrm_port_qstats_ext_input req = {0}; struct bnxt_pf_info *pf = &bp->pf; + u32 tx_stat_size; + int rc; if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) return 0; + if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)) + return -EOPNOTSUPP; + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS_EXT, -1, -1); + req.flags = flags; req.port_id = cpu_to_le16(pf->port_id); req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext)); - req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_ext_map); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req.rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map); + tx_stat_size = bp->tx_port_stats_ext.hw_stats ? + sizeof(struct tx_port_stats_ext) : 0; + req.tx_stat_size = cpu_to_le16(tx_stat_size); + req.tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map); + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc) { + bp->fw_rx_stats_ext_size = le16_to_cpu(resp->rx_stat_size) / 8; + bp->fw_tx_stats_ext_size = tx_stat_size ? + le16_to_cpu(resp->tx_stat_size) / 8 : 0; + } else { + bp->fw_rx_stats_ext_size = 0; + bp->fw_tx_stats_ext_size = 0; + } + if (flags) + goto qstats_done; + + if (bp->fw_tx_stats_ext_size <= + offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) { + mutex_unlock(&bp->hwrm_cmd_lock); + bp->pri2cos_valid = 0; + return rc; + } + + bnxt_hwrm_cmd_hdr_init(bp, &req2, HWRM_QUEUE_PRI2COS_QCFG, -1, -1); + req2.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN); + + rc = _hwrm_send_message(bp, &req2, sizeof(req2), HWRM_CMD_TIMEOUT); + if (!rc) { + struct hwrm_queue_pri2cos_qcfg_output *resp2; + u8 *pri2cos; + int i, j; + + resp2 = bp->hwrm_cmd_resp_addr; + pri2cos = &resp2->pri0_cos_queue_id; + for (i = 0; i < 8; i++) { + u8 queue_id = pri2cos[i]; + u8 queue_idx; + + /* Per port queue IDs start from 0, 10, 20, etc */ + queue_idx = queue_id % 10; + if (queue_idx > BNXT_MAX_QUEUE) { + bp->pri2cos_valid = false; + goto qstats_done; + } + for (j = 0; j < bp->max_q; j++) { + if (bp->q_ids[j] == queue_id) + bp->pri2cos_idx[i] = queue_idx; + } + } + bp->pri2cos_valid = 1; + } +qstats_done: + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; } static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp) { - if (bp->vxlan_port_cnt) { + if (bp->vxlan_fw_dst_port_id != INVALID_HW_RING_ID) bnxt_hwrm_tunnel_dst_port_free( bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); - } - bp->vxlan_port_cnt = 0; - if (bp->nge_port_cnt) { + if (bp->nge_fw_dst_port_id != INVALID_HW_RING_ID) bnxt_hwrm_tunnel_dst_port_free( bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); - } - bp->nge_port_cnt = 0; } static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa) @@ -5483,6 +8048,8 @@ if (set_tpa) tpa_flags = bp->flags & BNXT_FLAG_TPA; + else if (BNXT_NO_FW_ACCESS(bp)) + return 0; for (i = 0; i < bp->nr_vnics; i++) { rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags); if (rc) { @@ -5502,19 +8069,29 @@ bnxt_hwrm_vnic_set_rss(bp, i, false); } -static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path, - bool irq_re_init) +static void bnxt_clear_vnic(struct bnxt *bp) { - if (bp->vnic_info) { - bnxt_hwrm_clear_vnic_filter(bp); + if (!bp->vnic_info) + return; + + bnxt_hwrm_clear_vnic_filter(bp); + if (!(bp->flags & BNXT_FLAG_CHIP_P5)) { /* clear all RSS setting before free vnic ctx */ bnxt_hwrm_clear_vnic_rss(bp); bnxt_hwrm_vnic_ctx_free(bp); - /* before free the vnic, undo the vnic tpa settings */ - if (bp->flags & BNXT_FLAG_TPA) - bnxt_set_tpa(bp, false); - bnxt_hwrm_vnic_free(bp); } + /* before free the vnic, undo the vnic tpa settings */ + if (bp->flags & BNXT_FLAG_TPA) + bnxt_set_tpa(bp, false); + bnxt_hwrm_vnic_free(bp); + if (bp->flags & BNXT_FLAG_CHIP_P5) + bnxt_hwrm_vnic_ctx_free(bp); +} + +static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path, + bool irq_re_init) +{ + bnxt_clear_vnic(bp); bnxt_hwrm_ring_free(bp, close_path); bnxt_hwrm_ring_grp_free(bp); if (irq_re_init) { @@ -5526,7 +8103,6 @@ static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode) { struct hwrm_func_cfg_input req = {0}; - int rc; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); req.fid = cpu_to_le16(0xffff); @@ -5537,16 +8113,12 @@ req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA; else return -EINVAL; - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (rc) - rc = -EIO; - return rc; + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); } static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size) { struct hwrm_func_cfg_input req = {0}; - int rc; if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803) return 0; @@ -5558,13 +8130,10 @@ if (size == 128) req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128; - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (rc) - rc = -EIO; - return rc; + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); } -static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) +static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) { struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; int rc; @@ -5620,10 +8189,60 @@ return rc; } +static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id) +{ + int rc, i, nr_ctxs; + + nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings); + for (i = 0; i < nr_ctxs; i++) { + rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i); + if (rc) { + netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n", + vnic_id, i, rc); + break; + } + bp->rsscos_nr_ctxs++; + } + if (i < nr_ctxs) + return -ENOMEM; + + rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true); + if (rc) { + netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n", + vnic_id, rc); + return rc; + } + rc = bnxt_hwrm_vnic_cfg(bp, vnic_id); + if (rc) { + netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n", + vnic_id, rc); + return rc; + } + if (bp->flags & BNXT_FLAG_AGG_RINGS) { + rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id); + if (rc) { + netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n", + vnic_id, rc); + } + } + return rc; +} + +static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) +{ + if (bp->flags & BNXT_FLAG_CHIP_P5) + return __bnxt_setup_vnic_p5(bp, vnic_id); + else + return __bnxt_setup_vnic(bp, vnic_id); +} + static int bnxt_alloc_rfs_vnics(struct bnxt *bp) { #ifdef CONFIG_RFS_ACCEL int i, rc = 0; + + if (bp->flags & BNXT_FLAG_CHIP_P5) + return 0; for (i = 0; i < bp->rx_nr_rings; i++) { struct bnxt_vnic_info *vnic; @@ -5722,6 +8341,9 @@ netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc); goto err_out; } + + if (BNXT_VF(bp)) + bnxt_hwrm_func_qcfg(bp); rc = bnxt_setup_vnic(bp, 0); if (rc) @@ -5905,9 +8527,17 @@ bp->irq_tbl[0].handler = bnxt_inta; } +static int bnxt_init_int_mode(struct bnxt *bp); + static int bnxt_setup_int_mode(struct bnxt *bp) { int rc; + + if (!bp->irq_tbl) { + rc = bnxt_init_int_mode(bp); + if (rc || !bp->irq_tbl) + return rc ?: -ENODEV; + } if (bp->flags & BNXT_FLAG_USING_MSIX) bnxt_setup_msix(bp); @@ -5935,24 +8565,27 @@ return bp->hw_resc.max_stat_ctxs; } -void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max) -{ - bp->hw_resc.max_stat_ctxs = max; -} - unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp) { return bp->hw_resc.max_cp_rings; } -unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp) +static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp) { - return bp->hw_resc.max_cp_rings - bnxt_get_ulp_msix_num(bp); + unsigned int cp = bp->hw_resc.max_cp_rings; + + if (!(bp->flags & BNXT_FLAG_CHIP_P5)) + cp -= bnxt_get_ulp_msix_num(bp); + + return cp; } static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp) { struct bnxt_hw_resc *hw_resc = &bp->hw_resc; + + if (bp->flags & BNXT_FLAG_CHIP_P5) + return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs); return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings); } @@ -5962,6 +8595,22 @@ bp->hw_resc.max_irqs = max_irqs; } +unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp) +{ + unsigned int cp; + + cp = bnxt_get_max_func_cp_rings_for_en(bp); + if (bp->flags & BNXT_FLAG_CHIP_P5) + return cp - bp->rx_nr_rings - bp->tx_nr_rings; + else + return cp - bp->cp_nr_rings; +} + +unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp) +{ + return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp); +} + int bnxt_get_avail_msix(struct bnxt *bp, int num) { int max_cp = bnxt_get_max_func_cp_rings(bp); @@ -5969,7 +8618,9 @@ int total_req = bp->cp_nr_rings + num; int max_idx, avail_msix; - max_idx = min_t(int, bp->total_irqs, max_cp); + max_idx = bp->total_irqs; + if (!(bp->flags & BNXT_FLAG_CHIP_P5)) + max_idx = min_t(int, bp->total_irqs, max_cp); avail_msix = max_idx - bp->cp_nr_rings; if (!BNXT_NEW_RM(bp) || avail_msix >= num) return avail_msix; @@ -5987,7 +8638,7 @@ if (!BNXT_NEW_RM(bp)) return bnxt_get_max_func_irqs(bp); - return bnxt_cp_rings_in_use(bp); + return bnxt_nq_rings_in_use(bp); } static int bnxt_init_msix(struct bnxt *bp) @@ -6072,7 +8723,7 @@ static int bnxt_init_int_mode(struct bnxt *bp) { - int rc = 0; + int rc = -ENODEV; if (bp->flags & BNXT_FLAG_MSIX_CAP) rc = bnxt_init_msix(bp); @@ -6094,22 +8745,23 @@ bp->flags &= ~BNXT_FLAG_USING_MSIX; } -int bnxt_reserve_rings(struct bnxt *bp) +int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init) { int tcs = netdev_get_num_tc(bp->dev); - bool reinit_irq = false; + bool irq_cleared = false; int rc; if (!bnxt_need_reserve_rings(bp)) return 0; - if (BNXT_NEW_RM(bp) && (bnxt_get_num_msix(bp) != bp->total_irqs)) { + if (irq_re_init && BNXT_NEW_RM(bp) && + bnxt_get_num_msix(bp) != bp->total_irqs) { bnxt_ulp_irq_stop(bp); bnxt_clear_int_mode(bp); - reinit_irq = true; + irq_cleared = true; } rc = __bnxt_reserve_rings(bp); - if (reinit_irq) { + if (irq_cleared) { if (!rc) rc = bnxt_init_int_mode(bp); bnxt_ulp_irq_restart(bp, rc); @@ -6118,13 +8770,16 @@ netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc); return rc; } - if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) { + if (tcs && (bp->tx_nr_rings_per_tc * tcs != + bp->tx_nr_rings - bp->tx_nr_rings_xdp)) { netdev_err(bp->dev, "tx ring reservation failure\n"); netdev_reset_tc(bp->dev); - bp->tx_nr_rings_per_tc = bp->tx_nr_rings; + if (bp->tx_nr_rings_xdp) + bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp; + else + bp->tx_nr_rings_per_tc = bp->tx_nr_rings; return -ENOMEM; } - bp->num_stat_ctxs = bp->cp_nr_rings; return 0; } @@ -6225,10 +8880,9 @@ for (i = 0; i < bp->cp_nr_rings; i++) { struct bnxt_napi *bnapi = bp->bnapi[i]; - napi_hash_del(&bnapi->napi); - netif_napi_del(&bnapi->napi); + __netif_napi_del(&bnapi->napi); } - /* We called napi_hash_del() before netif_napi_del(), we need + /* We called __netif_napi_del(), we need * to respect an RCU grace period before freeing napi structures. */ synchronize_net(); @@ -6241,12 +8895,15 @@ struct bnxt_napi *bnapi; if (bp->flags & BNXT_FLAG_USING_MSIX) { - if (BNXT_CHIP_TYPE_NITRO_A0(bp)) + int (*poll_fn)(struct napi_struct *, int) = bnxt_poll; + + if (bp->flags & BNXT_FLAG_CHIP_P5) + poll_fn = bnxt_poll_p5; + else if (BNXT_CHIP_TYPE_NITRO_A0(bp)) cp_nr_rings--; for (i = 0; i < cp_nr_rings; i++) { bnapi = bp->bnapi[i]; - netif_napi_add(bp->dev, &bnapi->napi, - bnxt_poll, 64); + netif_napi_add(bp->dev, &bnapi->napi, poll_fn, 64); } if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { bnapi = bp->bnapi[cp_nr_rings]; @@ -6280,14 +8937,19 @@ int i; for (i = 0; i < bp->cp_nr_rings; i++) { - struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring; - bp->bnapi[i]->in_reset = false; + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr; - if (bp->bnapi[i]->rx_ring) { + cpr = &bnapi->cp_ring; + if (bnapi->in_reset) + cpr->sw_stats.rx.rx_resets++; + bnapi->in_reset = false; + + if (bnapi->rx_ring) { INIT_WORK(&cpr->dim.work, bnxt_dim_work); - cpr->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE; + cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; } - napi_enable(&bp->bnapi[i]->napi); + napi_enable(&bnapi->napi); } } @@ -6326,6 +8988,30 @@ netif_carrier_on(bp->dev); } +static char *bnxt_report_fec(struct bnxt_link_info *link_info) +{ + u8 active_fec = link_info->active_fec_sig_mode & + PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK; + + switch (active_fec) { + default: + case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE: + return "None"; + case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE: + return "Clause 74 BaseR"; + case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE: + return "Clause 91 RS(528,514)"; + case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE: + return "Clause 91 RS544_1XN"; + case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE: + return "Clause 91 RS(544,514)"; + case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE: + return "Clause 91 RS272_1XN"; + case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE: + return "Clause 91 RS(272,257)"; + } +} + static void bnxt_report_link(struct bnxt *bp) { if (bp->link_info.link_up) { @@ -6360,14 +9046,23 @@ "not active"); fec = bp->link_info.fec_cfg; if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED)) - netdev_info(bp->dev, "FEC autoneg %s encodings: %s\n", + netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n", (fec & BNXT_FEC_AUTONEG) ? "on" : "off", - (fec & BNXT_FEC_ENC_BASE_R) ? "BaseR" : - (fec & BNXT_FEC_ENC_RS) ? "RS" : "None"); + bnxt_report_fec(&bp->link_info)); } else { netif_carrier_off(bp->dev); netdev_err(bp->dev, "NIC Link is Down\n"); } +} + +static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp) +{ + if (!resp->supported_speeds_auto_mode && + !resp->supported_speeds_force_mode && + !resp->supported_pam4_speeds_auto_mode && + !resp->supported_pam4_speeds_force_mode) + return true; + return false; } static int bnxt_hwrm_phy_qcaps(struct bnxt *bp) @@ -6377,6 +9072,10 @@ struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr; struct bnxt_link_info *link_info = &bp->link_info; + bp->flags &= ~BNXT_FLAG_EEE_CAP; + if (bp->test_info) + bp->test_info->flags &= ~(BNXT_TEST_FL_EXT_LPBK | + BNXT_TEST_FL_AN_PHY_LPBK); if (bp->hwrm_spec_code < 0x10201) return 0; @@ -6402,9 +9101,35 @@ if (bp->test_info) bp->test_info->flags |= BNXT_TEST_FL_EXT_LPBK; } + if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED) { + if (bp->test_info) + bp->test_info->flags |= BNXT_TEST_FL_AN_PHY_LPBK; + } + if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED) { + if (BNXT_PF(bp)) + bp->fw_cap |= BNXT_FW_CAP_SHARED_PORT_CFG; + } + if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_CUMULATIVE_COUNTERS_ON_RESET) + bp->fw_cap |= BNXT_FW_CAP_PORT_STATS_NO_RESET; + + if (bp->hwrm_spec_code >= 0x10a01) { + if (bnxt_phy_qcaps_no_speed(resp)) { + link_info->phy_state = BNXT_PHY_STATE_DISABLED; + netdev_warn(bp->dev, "Ethernet link disabled\n"); + } else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) { + link_info->phy_state = BNXT_PHY_STATE_ENABLED; + netdev_info(bp->dev, "Ethernet link enabled\n"); + /* Phy re-enabled, reprobe the speeds */ + link_info->support_auto_speeds = 0; + link_info->support_pam4_auto_speeds = 0; + } + } if (resp->supported_speeds_auto_mode) link_info->support_auto_speeds = le16_to_cpu(resp->supported_speeds_auto_mode); + if (resp->supported_pam4_speeds_auto_mode) + link_info->support_pam4_auto_speeds = + le16_to_cpu(resp->supported_pam4_speeds_auto_mode); bp->port_count = resp->port_cnt; @@ -6413,14 +9138,21 @@ return rc; } -static int bnxt_update_link(struct bnxt *bp, bool chng_link_state) +static bool bnxt_support_dropped(u16 advertising, u16 supported) +{ + u16 diff = advertising ^ supported; + + return ((supported | diff) != supported); +} + +int bnxt_update_link(struct bnxt *bp, bool chng_link_state) { int rc = 0; struct bnxt_link_info *link_info = &bp->link_info; struct hwrm_port_phy_qcfg_input req = {0}; struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr; u8 link_up = link_info->link_up; - u16 diff; + bool support_changed = false; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1); @@ -6447,10 +9179,17 @@ else link_info->link_speed = 0; link_info->force_link_speed = le16_to_cpu(resp->force_link_speed); + link_info->force_pam4_link_speed = + le16_to_cpu(resp->force_pam4_link_speed); link_info->support_speeds = le16_to_cpu(resp->support_speeds); + link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds); link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask); + link_info->auto_pam4_link_speeds = + le16_to_cpu(resp->auto_pam4_link_speed_mask); link_info->lp_auto_link_speeds = le16_to_cpu(resp->link_partner_adv_speeds); + link_info->lp_auto_pam4_link_speeds = + resp->link_partner_pam4_adv_speeds; link_info->preemphasis = le32_to_cpu(resp->preemphasis); link_info->phy_ver[0] = resp->phy_maj; link_info->phy_ver[1] = resp->phy_min; @@ -6499,9 +9238,10 @@ } link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED; - if (bp->hwrm_spec_code >= 0x10504) + if (bp->hwrm_spec_code >= 0x10504) { link_info->fec_cfg = le16_to_cpu(resp->fec_cfg); - + link_info->active_fec_sig_mode = resp->active_fec_signal_mode; + } /* TODO: need to add more logic to report VF link */ if (chng_link_state) { if (link_info->phy_link_status == BNXT_LINK_LINK) @@ -6516,20 +9256,24 @@ } mutex_unlock(&bp->hwrm_cmd_lock); - if (!BNXT_SINGLE_PF(bp)) + if (!BNXT_PHY_CFG_ABLE(bp)) return 0; - diff = link_info->support_auto_speeds ^ link_info->advertising; - if ((link_info->support_auto_speeds | diff) != - link_info->support_auto_speeds) { - /* An advertised speed is no longer supported, so we need to - * update the advertisement settings. Caller holds RTNL - * so we can modify link settings. - */ + /* Check if any advertised speeds are no longer supported. The caller + * holds the link_lock mutex, so we can modify link_info settings. + */ + if (bnxt_support_dropped(link_info->advertising, + link_info->support_auto_speeds)) { link_info->advertising = link_info->support_auto_speeds; - if (link_info->autoneg & BNXT_AUTONEG_SPEED) - bnxt_hwrm_set_link_setting(bp, true, false); + support_changed = true; } + if (bnxt_support_dropped(link_info->advertising_pam4, + link_info->support_pam4_auto_speeds)) { + link_info->advertising_pam4 = link_info->support_pam4_auto_speeds; + support_changed = true; + } + if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED)) + bnxt_hwrm_set_link_setting(bp, true, false); return 0; } @@ -6588,27 +9332,30 @@ } } -static void bnxt_hwrm_set_link_common(struct bnxt *bp, - struct hwrm_port_phy_cfg_input *req) +static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req) { - u8 autoneg = bp->link_info.autoneg; - u16 fw_link_speed = bp->link_info.req_link_speed; - u16 advertising = bp->link_info.advertising; - - if (autoneg & BNXT_AUTONEG_SPEED) { - req->auto_mode |= - PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK; - - req->enables |= cpu_to_le32( - PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK); - req->auto_link_speed_mask = cpu_to_le16(advertising); - + if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) { + req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK; + if (bp->link_info.advertising) { + req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK); + req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising); + } + if (bp->link_info.advertising_pam4) { + req->enables |= + cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK); + req->auto_link_pam4_speed_mask = + cpu_to_le16(bp->link_info.advertising_pam4); + } req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE); - req->flags |= - cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG); + req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG); } else { - req->force_link_speed = cpu_to_le16(fw_link_speed); req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE); + if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) { + req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed); + req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED); + } else { + req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed); + } } /* tell chimp that the setting takes effect immediately */ @@ -6698,11 +9445,15 @@ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); } +static int bnxt_fw_init_one(struct bnxt *bp); + static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) { struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr; struct hwrm_func_drv_if_change_input req = {0}; + bool fw_reset = !bp->irq_tbl; bool resc_reinit = false; + u32 flags = 0; int rc; if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE)) @@ -6713,24 +9464,64 @@ req.flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP); mutex_lock(&bp->hwrm_cmd_lock); rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (!rc && (resp->flags & - cpu_to_le32(FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE))) - resc_reinit = true; + if (!rc) + flags = le32_to_cpu(resp->flags); mutex_unlock(&bp->hwrm_cmd_lock); + if (rc) + return rc; - if (up && resc_reinit && BNXT_NEW_RM(bp)) { - struct bnxt_hw_resc *hw_resc = &bp->hw_resc; + if (!up) + return 0; - rc = bnxt_hwrm_func_resc_qcaps(bp, true); - hw_resc->resv_cp_rings = 0; - hw_resc->resv_tx_rings = 0; - hw_resc->resv_rx_rings = 0; - hw_resc->resv_hw_ring_grps = 0; - hw_resc->resv_vnics = 0; - bp->tx_nr_rings = 0; - bp->rx_nr_rings = 0; + if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE) + resc_reinit = true; + if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE) + fw_reset = true; + + if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) { + netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n"); + set_bit(BNXT_STATE_ABORT_ERR, &bp->state); + return -ENODEV; } - return rc; + if (resc_reinit || fw_reset) { + if (fw_reset) { + if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) + bnxt_ulp_stop(bp); + bnxt_free_ctx_mem(bp); + kfree(bp->ctx); + bp->ctx = NULL; + bnxt_dcb_free(bp); + rc = bnxt_fw_init_one(bp); + if (rc) { + set_bit(BNXT_STATE_ABORT_ERR, &bp->state); + return rc; + } + bnxt_clear_int_mode(bp); + rc = bnxt_init_int_mode(bp); + if (rc) { + netdev_err(bp->dev, "init int mode failed\n"); + return rc; + } + set_bit(BNXT_STATE_FW_RESET_DET, &bp->state); + } + if (BNXT_NEW_RM(bp)) { + struct bnxt_hw_resc *hw_resc = &bp->hw_resc; + + rc = bnxt_hwrm_func_resc_qcaps(bp, true); + hw_resc->resv_cp_rings = 0; + hw_resc->resv_stat_ctxs = 0; + hw_resc->resv_irqs = 0; + hw_resc->resv_tx_rings = 0; + hw_resc->resv_rx_rings = 0; + hw_resc->resv_hw_ring_grps = 0; + hw_resc->resv_vnics = 0; + if (!fw_reset) { + bp->tx_nr_rings = 0; + bp->rx_nr_rings = 0; + } + } + } + return 0; } static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp) @@ -6740,6 +9531,7 @@ struct bnxt_pf_info *pf = &bp->pf; int rc; + bp->num_leds = 0; if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601) return 0; @@ -6794,14 +9586,12 @@ int bnxt_hwrm_free_wol_fltr(struct bnxt *bp) { struct hwrm_wol_filter_free_input req = {0}; - int rc; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1); req.port_id = cpu_to_le16(bp->pf.port_id); req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID); req.wol_filter_id = bp->wol_filter_id; - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - return rc; + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); } static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle) @@ -6834,6 +9624,7 @@ { u16 handle = 0; + bp->wol = 0; if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP)) return; @@ -6891,6 +9682,9 @@ bnxt_hwmon_close(bp); return; } + + if (bp->hwmon_dev) + return; bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, DRV_MODULE_NAME, bp, @@ -6961,21 +9755,26 @@ if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { if (BNXT_AUTO_MODE(link_info->auto_mode)) update_link = true; - if (link_info->req_link_speed != link_info->force_link_speed) + if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ && + link_info->req_link_speed != link_info->force_link_speed) + update_link = true; + else if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 && + link_info->req_link_speed != link_info->force_pam4_link_speed) update_link = true; if (link_info->req_duplex != link_info->duplex_setting) update_link = true; } else { if (link_info->auto_mode == BNXT_LINK_AUTO_NONE) update_link = true; - if (link_info->advertising != link_info->auto_link_speeds) + if (link_info->advertising != link_info->auto_link_speeds || + link_info->advertising_pam4 != link_info->auto_pam4_link_speeds) update_link = true; } /* The last close may have shutdown the link, so need to call * PHY_CFG to bring it back up. */ - if (!netif_carrier_ok(bp->dev)) + if (!bp->link_info.link_up) update_link = true; if (!bnxt_eee_config_ok(bp)) @@ -7023,10 +9822,10 @@ netdev_err(bp->dev, "Failed to reserve default rings at open\n"); return rc; } - rc = bnxt_reserve_rings(bp); - if (rc) - return rc; } + rc = bnxt_reserve_rings(bp, irq_re_init); + if (rc) + return rc; if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_USING_MSIX)) { /* disable RFS if falling back to INTA */ @@ -7073,7 +9872,7 @@ } if (irq_re_init) - udp_tunnel_get_rx_info(bp->dev); + udp_tunnel_nic_reset_ntf(bp->dev); set_bit(BNXT_STATE_OPEN, &bp->state); bnxt_enable_int(bp); @@ -7103,7 +9902,10 @@ { int rc = 0; - rc = __bnxt_open_nic(bp, irq_re_init, link_re_init); + if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) + rc = -EIO; + if (!rc) + rc = __bnxt_open_nic(bp, irq_re_init, link_re_init); if (rc) { netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc); dev_close(bp->dev); @@ -7118,6 +9920,12 @@ int bnxt_half_open_nic(struct bnxt *bp) { int rc = 0; + + if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { + netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n"); + rc = -ENODEV; + goto half_open_err; + } rc = bnxt_alloc_mem(bp, false); if (rc) { @@ -7148,17 +9956,42 @@ bnxt_free_mem(bp, false); } +static void bnxt_reenable_sriov(struct bnxt *bp) +{ + if (BNXT_PF(bp)) { + struct bnxt_pf_info *pf = &bp->pf; + int n = pf->active_vfs; + + if (n) + bnxt_cfg_hw_sriov(bp, &n, true); + } +} + static int bnxt_open(struct net_device *dev) { struct bnxt *bp = netdev_priv(dev); int rc; - bnxt_hwrm_if_change(bp, true); - rc = __bnxt_open_nic(bp, true, true); - if (rc) - bnxt_hwrm_if_change(bp, false); + if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { + netdev_err(bp->dev, "A previous firmware reset did not complete, aborting\n"); + return -ENODEV; + } - bnxt_hwmon_open(bp); + rc = bnxt_hwrm_if_change(bp, true); + if (rc) + return rc; + rc = __bnxt_open_nic(bp, true, true); + if (rc) { + bnxt_hwrm_if_change(bp, false); + } else { + if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) { + if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { + bnxt_ulp_start(bp, 0); + bnxt_reenable_sriov(bp); + } + } + bnxt_hwmon_open(bp); + } return rc; } @@ -7211,6 +10044,18 @@ { int rc = 0; + if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { + /* If we get here, it means firmware reset is in progress + * while we are trying to close. We can safely proceed with + * the close because we are holding rtnl_lock(). Some firmware + * messages may fail as we proceed to close. We set the + * ABORT_ERR flag here so that the FW reset thread will later + * abort when it gets the rtnl_lock() and sees the flag. + */ + netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n"); + set_bit(BNXT_STATE_ABORT_ERR, &bp->state); + } + #ifdef CONFIG_BNXT_SRIOV if (bp->sriov_cfg) { rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait, @@ -7235,24 +10080,88 @@ return 0; } +static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg, + u16 *val) +{ + struct hwrm_port_phy_mdio_read_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_port_phy_mdio_read_input req = {0}; + int rc; + + if (bp->hwrm_spec_code < 0x10a00) + return -EOPNOTSUPP; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_READ, -1, -1); + req.port_id = cpu_to_le16(bp->pf.port_id); + req.phy_addr = phy_addr; + req.reg_addr = cpu_to_le16(reg & 0x1f); + if (mdio_phy_id_is_c45(phy_addr)) { + req.cl45_mdio = 1; + req.phy_addr = mdio_phy_id_prtad(phy_addr); + req.dev_addr = mdio_phy_id_devad(phy_addr); + req.reg_addr = cpu_to_le16(reg); + } + + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc) + *val = le16_to_cpu(resp->reg_data); + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + +static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg, + u16 val) +{ + struct hwrm_port_phy_mdio_write_input req = {0}; + + if (bp->hwrm_spec_code < 0x10a00) + return -EOPNOTSUPP; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_WRITE, -1, -1); + req.port_id = cpu_to_le16(bp->pf.port_id); + req.phy_addr = phy_addr; + req.reg_addr = cpu_to_le16(reg & 0x1f); + if (mdio_phy_id_is_c45(phy_addr)) { + req.cl45_mdio = 1; + req.phy_addr = mdio_phy_id_prtad(phy_addr); + req.dev_addr = mdio_phy_id_devad(phy_addr); + req.reg_addr = cpu_to_le16(reg); + } + req.reg_data = cpu_to_le16(val); + + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); +} + /* rtnl_lock held */ static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { + struct mii_ioctl_data *mdio = if_mii(ifr); + struct bnxt *bp = netdev_priv(dev); + int rc; + switch (cmd) { case SIOCGMIIPHY: - /* fallthru */ + mdio->phy_id = bp->link_info.phy_addr; + + fallthrough; case SIOCGMIIREG: { + u16 mii_regval = 0; + if (!netif_running(dev)) return -EAGAIN; - return 0; + rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num, + &mii_regval); + mdio->val_out = mii_regval; + return rc; } case SIOCSMIIREG: if (!netif_running(dev)) return -EAGAIN; - return 0; + return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num, + mdio->val_in); default: /* do nothing */ @@ -7266,34 +10175,33 @@ { int i; - for (i = 0; i < bp->cp_nr_rings; i++) { struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; - struct ctx_hw_stats *hw_stats = cpr->hw_stats; + u64 *sw = cpr->stats.sw_stats; - stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts); - stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts); - stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts); + stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts); + stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts); + stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts); - stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts); - stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts); - stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts); + stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts); + stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts); + stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts); - stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes); - stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes); - stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes); + stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes); + stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes); + stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes); - stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes); - stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes); - stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes); + stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes); + stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes); + stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes); stats->rx_missed_errors += - le64_to_cpu(hw_stats->rx_discard_pkts); + BNXT_GET_RING_STATS64(sw, rx_discard_pkts); - stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts); + stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts); - stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts); + stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts); } } @@ -7331,19 +10239,26 @@ bnxt_add_prev_stats(bp, stats); if (bp->flags & BNXT_FLAG_PORT_STATS) { - struct rx_port_stats *rx = bp->hw_rx_port_stats; - struct tx_port_stats *tx = bp->hw_tx_port_stats; + u64 *rx = bp->port_stats.sw_stats; + u64 *tx = bp->port_stats.sw_stats + + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; - stats->rx_crc_errors = le64_to_cpu(rx->rx_fcs_err_frames); - stats->rx_frame_errors = le64_to_cpu(rx->rx_align_err_frames); - stats->rx_length_errors = le64_to_cpu(rx->rx_undrsz_frames) + - le64_to_cpu(rx->rx_ovrsz_frames) + - le64_to_cpu(rx->rx_runt_frames); - stats->rx_errors = le64_to_cpu(rx->rx_false_carrier_frames) + - le64_to_cpu(rx->rx_jbr_frames); - stats->collisions = le64_to_cpu(tx->tx_total_collisions); - stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns); - stats->tx_errors = le64_to_cpu(tx->tx_err); + stats->rx_crc_errors = + BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames); + stats->rx_frame_errors = + BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames); + stats->rx_length_errors = + BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) + + BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) + + BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames); + stats->rx_errors = + BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) + + BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames); + stats->collisions = + BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions); + stats->tx_fifo_errors = + BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns); + stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err); } clear_bit(BNXT_STATE_READ_STATS, &bp->state); } @@ -7404,14 +10319,16 @@ static void bnxt_set_rx_mode(struct net_device *dev) { struct bnxt *bp = netdev_priv(dev); - struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; - u32 mask = vnic->rx_mask; + struct bnxt_vnic_info *vnic; bool mc_update = false; bool uc_update; + u32 mask; - if (!netif_running(dev)) + if (!test_bit(BNXT_STATE_OPEN, &bp->state)) return; + vnic = &bp->vnic_info[0]; + mask = vnic->rx_mask; mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS | CFA_L2_SET_RX_MASK_REQ_MASK_MCAST | CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST | @@ -7530,6 +10447,14 @@ /* If the chip and firmware supports RFS */ static bool bnxt_rfs_supported(struct bnxt *bp) { + if (bp->flags & BNXT_FLAG_CHIP_P5) { + if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) + return true; + return false; + } + /* 212 firmware is broken for aRFS */ + if (BNXT_FW_MAJ(bp) == 212) + return false; if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) return true; if (bp->flags & BNXT_FLAG_NEW_RSS_CAP) @@ -7543,7 +10468,9 @@ #ifdef CONFIG_RFS_ACCEL int vnics, max_vnics, max_rss_ctxs; - if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp)) + if (bp->flags & BNXT_FLAG_CHIP_P5) + return bnxt_rfs_supported(bp); + if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings) return false; vnics = 1 + bp->rx_nr_rings; @@ -7567,12 +10494,12 @@ if (vnics == bp->hw_resc.resv_vnics) return true; - bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, vnics); + bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics); if (vnics <= bp->hw_resc.resv_vnics) return true; netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n"); - bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 1); + bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1); return false; #else return false; @@ -7600,24 +10527,16 @@ /* Both CTAG and STAG VLAN accelaration on the RX side have to be * turned on or off together. */ - vlan_features = features & (NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_STAG_RX); - if (vlan_features != (NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_STAG_RX)) { - if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) - features &= ~(NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_STAG_RX); + vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX; + if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) { + if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX) + features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX; else if (vlan_features) - features |= NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_STAG_RX; + features |= BNXT_HW_FEATURE_VLAN_ALL_RX; } #ifdef CONFIG_BNXT_SRIOV - if (BNXT_VF(bp)) { - if (bp->vf.vlan) { - features &= ~(NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_STAG_RX); - } - } + if (BNXT_VF(bp) && bp->vf.vlan) + features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX; #endif return features; } @@ -7640,7 +10559,7 @@ if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) flags &= ~BNXT_FLAG_TPA; - if (features & NETIF_F_HW_VLAN_CTAG_RX) + if (features & BNXT_HW_FEATURE_VLAN_ALL_RX) flags |= BNXT_FLAG_STRIP_VLAN; if (features & NETIF_F_NTUPLE) @@ -7650,7 +10569,8 @@ if (changes & BNXT_FLAG_TPA) { update_tpa = true; if ((bp->flags & BNXT_FLAG_TPA) == 0 || - (flags & BNXT_FLAG_TPA) == 0) + (flags & BNXT_FLAG_TPA) == 0 || + (bp->flags & BNXT_FLAG_CHIP_P5)) re_init = true; } @@ -7660,9 +10580,8 @@ if (flags != bp->flags) { u32 old_flags = bp->flags; - bp->flags = flags; - if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { + bp->flags = flags; if (update_tpa) bnxt_set_ring_params(bp); return rc; @@ -7670,12 +10589,14 @@ if (re_init) { bnxt_close_nic(bp, false, false); + bp->flags = flags; if (update_tpa) bnxt_set_ring_params(bp); return bnxt_open_nic(bp, false, false); } if (update_tpa) { + bp->flags = flags; rc = bnxt_set_tpa(bp, (flags & BNXT_FLAG_TPA) ? true : false); @@ -7683,6 +10604,58 @@ bp->flags = old_flags; } } + return rc; +} + +int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words, + u32 *reg_buf) +{ + struct hwrm_dbg_read_direct_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_dbg_read_direct_input req = {0}; + __le32 *dbg_reg_buf; + dma_addr_t mapping; + int rc, i; + + dbg_reg_buf = dma_alloc_coherent(&bp->pdev->dev, num_words * 4, + &mapping, GFP_KERNEL); + if (!dbg_reg_buf) + return -ENOMEM; + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_READ_DIRECT, -1, -1); + req.host_dest_addr = cpu_to_le64(mapping); + req.read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR); + req.read_len32 = cpu_to_le32(num_words); + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc || resp->error_code) { + rc = -EIO; + goto dbg_rd_reg_exit; + } + for (i = 0; i < num_words; i++) + reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]); + +dbg_rd_reg_exit: + mutex_unlock(&bp->hwrm_cmd_lock); + dma_free_coherent(&bp->pdev->dev, num_words * 4, dbg_reg_buf, mapping); + return rc; +} + +static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type, + u32 ring_id, u32 *prod, u32 *cons) +{ + struct hwrm_dbg_ring_info_get_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_dbg_ring_info_get_input req = {0}; + int rc; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_RING_INFO_GET, -1, -1); + req.ring_type = ring_type; + req.fw_ring_id = cpu_to_le32(ring_id); + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc) { + *prod = le32_to_cpu(resp->producer_index); + *cons = le32_to_cpu(resp->consumer_index); + } + mutex_unlock(&bp->hwrm_cmd_lock); return rc; } @@ -7737,6 +10710,23 @@ } } +static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr) +{ + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; + struct hwrm_ring_reset_input req = {0}; + struct bnxt_napi *bnapi = rxr->bnapi; + struct bnxt_cp_ring_info *cpr; + u16 cp_ring_id; + + cpr = &bnapi->cp_ring; + cp_ring_id = cpr->cp_ring_struct.fw_ring_id; + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_RESET, cp_ring_id, -1); + req.ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP; + req.ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id); + return hwrm_send_message_silent(bp, &req, sizeof(req), + HWRM_CMD_TIMEOUT); +} + static void bnxt_reset_task(struct bnxt *bp, bool silent) { if (!silent) @@ -7744,16 +10734,19 @@ if (netif_running(bp->dev)) { int rc; - if (!silent) + if (silent) { + bnxt_close_nic(bp, false, false); + bnxt_open_nic(bp, false, false); + } else { bnxt_ulp_stop(bp); - bnxt_close_nic(bp, false, false); - rc = bnxt_open_nic(bp, false, false); - if (!silent && !rc) - bnxt_ulp_start(bp); + bnxt_close_nic(bp, true, false); + rc = bnxt_open_nic(bp, true, false); + bnxt_ulp_start(bp, rc); + } } } -static void bnxt_tx_timeout(struct net_device *dev) +static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue) { struct bnxt *bp = netdev_priv(dev); @@ -7762,19 +10755,54 @@ bnxt_queue_sp_work(bp); } +static void bnxt_fw_health_check(struct bnxt *bp) +{ + struct bnxt_fw_health *fw_health = bp->fw_health; + u32 val; + + if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) + return; + + /* Make sure it is enabled before checking the tmr_counter. */ + smp_rmb(); + if (fw_health->tmr_counter) { + fw_health->tmr_counter--; + return; + } + + val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); + if (val == fw_health->last_fw_heartbeat) + goto fw_reset; + + fw_health->last_fw_heartbeat = val; + + val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); + if (val != fw_health->last_fw_reset_cnt) + goto fw_reset; + + fw_health->tmr_counter = fw_health->tmr_multiplier; + return; + +fw_reset: + set_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event); + bnxt_queue_sp_work(bp); +} + static void bnxt_timer(struct timer_list *t) { struct bnxt *bp = from_timer(bp, t, timer); struct net_device *dev = bp->dev; - if (!netif_running(dev)) + if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state)) return; if (atomic_read(&bp->intr_sem) != 0) goto bnxt_restart_timer; - if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) && - bp->stats_coal_ticks) { + if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) + bnxt_fw_health_check(bp); + + if (bp->link_info.link_up && bp->stats_coal_ticks) { set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event); bnxt_queue_sp_work(bp); } @@ -7784,14 +10812,27 @@ bnxt_queue_sp_work(bp); } +#ifdef CONFIG_RFS_ACCEL + if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count) { + set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event); + bnxt_queue_sp_work(bp); + } +#endif /*CONFIG_RFS_ACCEL*/ + if (bp->link_info.phy_retry) { if (time_after(jiffies, bp->link_info.phy_retry_expires)) { - bp->link_info.phy_retry = 0; + bp->link_info.phy_retry = false; netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n"); } else { set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event); bnxt_queue_sp_work(bp); } + } + + if ((bp->flags & BNXT_FLAG_CHIP_P5) && !bp->chip_rev && + netif_carrier_ok(dev)) { + set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event); + bnxt_queue_sp_work(bp); } bnxt_restart_timer: mod_timer(&bp->timer, jiffies + bp->current_interval); @@ -7823,7 +10864,266 @@ bnxt_rtnl_unlock_sp(bp); } +/* Only called from bnxt_sp_task() */ +static void bnxt_rx_ring_reset(struct bnxt *bp) +{ + int i; + + bnxt_rtnl_lock_sp(bp); + if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { + bnxt_rtnl_unlock_sp(bp); + return; + } + /* Disable and flush TPA before resetting the RX ring */ + if (bp->flags & BNXT_FLAG_TPA) + bnxt_set_tpa(bp, false); + for (i = 0; i < bp->rx_nr_rings; i++) { + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; + struct bnxt_cp_ring_info *cpr; + int rc; + + if (!rxr->bnapi->in_reset) + continue; + + rc = bnxt_hwrm_rx_ring_reset(bp, i); + if (rc) { + if (rc == -EINVAL || rc == -EOPNOTSUPP) + netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n"); + else + netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n", + rc); + bnxt_reset_task(bp, true); + break; + } + bnxt_free_one_rx_ring_skbs(bp, i); + rxr->rx_prod = 0; + rxr->rx_agg_prod = 0; + rxr->rx_sw_agg_prod = 0; + rxr->rx_next_cons = 0; + rxr->bnapi->in_reset = false; + bnxt_alloc_one_rx_ring(bp, i); + cpr = &rxr->bnapi->cp_ring; + cpr->sw_stats.rx.rx_resets++; + if (bp->flags & BNXT_FLAG_AGG_RINGS) + bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); + bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); + } + if (bp->flags & BNXT_FLAG_TPA) + bnxt_set_tpa(bp, true); + bnxt_rtnl_unlock_sp(bp); +} + +static void bnxt_fw_reset_close(struct bnxt *bp) +{ + bnxt_ulp_stop(bp); + /* When firmware is fatal state, disable PCI device to prevent + * any potential bad DMAs before freeing kernel memory. + */ + if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) + pci_disable_device(bp->pdev); + __bnxt_close_nic(bp, true, false); + bnxt_clear_int_mode(bp); + bnxt_hwrm_func_drv_unrgtr(bp); + if (pci_is_enabled(bp->pdev)) + pci_disable_device(bp->pdev); + bnxt_free_ctx_mem(bp); + kfree(bp->ctx); + bp->ctx = NULL; +} + +static bool is_bnxt_fw_ok(struct bnxt *bp) +{ + struct bnxt_fw_health *fw_health = bp->fw_health; + bool no_heartbeat = false, has_reset = false; + u32 val; + + val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); + if (val == fw_health->last_fw_heartbeat) + no_heartbeat = true; + + val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); + if (val != fw_health->last_fw_reset_cnt) + has_reset = true; + + if (!no_heartbeat && has_reset) + return true; + + return false; +} + +/* rtnl_lock is acquired before calling this function */ +static void bnxt_force_fw_reset(struct bnxt *bp) +{ + struct bnxt_fw_health *fw_health = bp->fw_health; + u32 wait_dsecs; + + if (!test_bit(BNXT_STATE_OPEN, &bp->state) || + test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) + return; + + set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); + bnxt_fw_reset_close(bp); + wait_dsecs = fw_health->master_func_wait_dsecs; + if (fw_health->master) { + if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) + wait_dsecs = 0; + bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW; + } else { + bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10; + wait_dsecs = fw_health->normal_func_wait_dsecs; + bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; + } + + bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs; + bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs; + bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10); +} + +void bnxt_fw_exception(struct bnxt *bp) +{ + netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n"); + set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); + bnxt_rtnl_lock_sp(bp); + bnxt_force_fw_reset(bp); + bnxt_rtnl_unlock_sp(bp); +} + +/* Returns the number of registered VFs, or 1 if VF configuration is pending, or + * < 0 on error. + */ +static int bnxt_get_registered_vfs(struct bnxt *bp) +{ +#ifdef CONFIG_BNXT_SRIOV + int rc; + + if (!BNXT_PF(bp)) + return 0; + + rc = bnxt_hwrm_func_qcfg(bp); + if (rc) { + netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc); + return rc; + } + if (bp->pf.registered_vfs) + return bp->pf.registered_vfs; + if (bp->sriov_cfg) + return 1; +#endif + return 0; +} + +void bnxt_fw_reset(struct bnxt *bp) +{ + bnxt_rtnl_lock_sp(bp); + if (test_bit(BNXT_STATE_OPEN, &bp->state) && + !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { + int n = 0, tmo; + + set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); + if (bp->pf.active_vfs && + !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) + n = bnxt_get_registered_vfs(bp); + if (n < 0) { + netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n", + n); + clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); + dev_close(bp->dev); + goto fw_reset_exit; + } else if (n > 0) { + u16 vf_tmo_dsecs = n * 10; + + if (bp->fw_reset_max_dsecs < vf_tmo_dsecs) + bp->fw_reset_max_dsecs = vf_tmo_dsecs; + bp->fw_reset_state = + BNXT_FW_RESET_STATE_POLL_VF; + bnxt_queue_fw_reset_work(bp, HZ / 10); + goto fw_reset_exit; + } + bnxt_fw_reset_close(bp); + if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { + bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN; + tmo = HZ / 10; + } else { + bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; + tmo = bp->fw_reset_min_dsecs * HZ / 10; + } + bnxt_queue_fw_reset_work(bp, tmo); + } +fw_reset_exit: + bnxt_rtnl_unlock_sp(bp); +} + +static void bnxt_chk_missed_irq(struct bnxt *bp) +{ + int i; + + if (!(bp->flags & BNXT_FLAG_CHIP_P5)) + return; + + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr; + u32 fw_ring_id; + int j; + + if (!bnapi) + continue; + + cpr = &bnapi->cp_ring; + for (j = 0; j < 2; j++) { + struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j]; + u32 val[2]; + + if (!cpr2 || cpr2->has_more_work || + !bnxt_has_work(bp, cpr2)) + continue; + + if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) { + cpr2->last_cp_raw_cons = cpr2->cp_raw_cons; + continue; + } + fw_ring_id = cpr2->cp_ring_struct.fw_ring_id; + bnxt_dbg_hwrm_ring_info_get(bp, + DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL, + fw_ring_id, &val[0], &val[1]); + cpr->sw_stats.cmn.missed_irqs++; + } + } +} + static void bnxt_cfg_ntp_filters(struct bnxt *); + +static void bnxt_init_ethtool_link_settings(struct bnxt *bp) +{ + struct bnxt_link_info *link_info = &bp->link_info; + + if (BNXT_AUTO_MODE(link_info->auto_mode)) { + link_info->autoneg = BNXT_AUTONEG_SPEED; + if (bp->hwrm_spec_code >= 0x10201) { + if (link_info->auto_pause_setting & + PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE) + link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; + } else { + link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; + } + link_info->advertising = link_info->auto_link_speeds; + link_info->advertising_pam4 = link_info->auto_pam4_link_speeds; + } else { + link_info->req_link_speed = link_info->force_link_speed; + link_info->req_signal_mode = BNXT_SIG_MODE_NRZ; + if (link_info->force_pam4_link_speed) { + link_info->req_link_speed = + link_info->force_pam4_link_speed; + link_info->req_signal_mode = BNXT_SIG_MODE_PAM4; + } + link_info->req_duplex = link_info->duplex_setting; + } + if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) + link_info->req_flow_ctrl = + link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH; + else + link_info->req_flow_ctrl = link_info->force_pause_setting; +} static void bnxt_sp_task(struct work_struct *work) { @@ -7843,27 +11143,10 @@ bnxt_cfg_ntp_filters(bp); if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event)) bnxt_hwrm_exec_fwd_req(bp); - if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) { - bnxt_hwrm_tunnel_dst_port_alloc( - bp, bp->vxlan_port, - TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); - } - if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) { - bnxt_hwrm_tunnel_dst_port_free( - bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); - } - if (test_and_clear_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event)) { - bnxt_hwrm_tunnel_dst_port_alloc( - bp, bp->nge_port, - TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); - } - if (test_and_clear_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event)) { - bnxt_hwrm_tunnel_dst_port_free( - bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); - } if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) { - bnxt_hwrm_port_qstats(bp); - bnxt_hwrm_port_qstats_ext(bp); + bnxt_hwrm_port_qstats(bp, 0); + bnxt_hwrm_port_qstats_ext(bp, 0); + bnxt_accumulate_all_stats(bp); } if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) { @@ -7875,10 +11158,14 @@ bnxt_hwrm_phy_qcaps(bp); rc = bnxt_update_link(bp, true); - mutex_unlock(&bp->link_lock); if (rc) netdev_err(bp->dev, "SP task can't update link (rc: %x)\n", rc); + + if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, + &bp->sp_event)) + bnxt_init_ethtool_link_settings(bp); + mutex_unlock(&bp->link_lock); } if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) { int rc; @@ -7902,6 +11189,9 @@ if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event)) bnxt_tc_flow_stats_work(bp); + if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event)) + bnxt_chk_missed_irq(bp); + /* These functions below will clear BNXT_STATE_IN_SP_TASK. They * must be the last functions to be called before exiting. */ @@ -7910,6 +11200,18 @@ if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event)) bnxt_reset(bp, true); + + if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event)) + bnxt_rx_ring_reset(bp); + + if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) + bnxt_devlink_health_report(bp, BNXT_FW_RESET_NOTIFY_SP_EVENT); + + if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) { + if (!is_bnxt_fw_ok(bp)) + bnxt_devlink_health_report(bp, + BNXT_FW_EXCEPTION_SP_EVENT); + } smp_mb__before_atomic(); clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); @@ -7920,7 +11222,7 @@ int tx_xdp) { int max_rx, max_tx, tx_sets = 1; - int tx_rings_needed; + int tx_rings_needed, stats; int rx_rings = rx; int cp, vnics, rc; @@ -7939,16 +11241,19 @@ return -ENOMEM; vnics = 1; - if (bp->flags & BNXT_FLAG_RFS) + if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS) vnics += rx_rings; if (bp->flags & BNXT_FLAG_AGG_RINGS) rx_rings <<= 1; cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx; - if (BNXT_NEW_RM(bp)) + stats = cp; + if (BNXT_NEW_RM(bp)) { cp += bnxt_get_ulp_msix_num(bp); + stats += bnxt_get_ulp_stat_ctxs(bp); + } return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp, - vnics); + stats, vnics); } static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev) @@ -7973,7 +11278,8 @@ { bnxt_unmap_bars(bp, bp->pdev); pci_release_regions(bp->pdev); - pci_disable_device(bp->pdev); + if (pci_is_enabled(bp->pdev)) + pci_disable_device(bp->pdev); } static void bnxt_init_dflt_coal(struct bnxt *bp) @@ -7984,7 +11290,7 @@ * 1 coal_buf x bufs_per_record = 1 completion record. */ coal = &bp->rx_coal; - coal->coal_ticks = 14; + coal->coal_ticks = 10; coal->coal_bufs = 30; coal->coal_ticks_irq = 1; coal->coal_bufs_irq = 2; @@ -8000,6 +11306,403 @@ coal->bufs_per_record = 1; bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS; +} + +static int bnxt_fw_reset_via_optee(struct bnxt *bp) +{ +#ifdef CONFIG_TEE_BNXT_FW + int rc = tee_bnxt_fw_load(); + + if (rc) + netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc); + + return rc; +#else + netdev_err(bp->dev, "OP-TEE not supported\n"); + return -ENODEV; +#endif +} + +static int bnxt_fw_init_one_p1(struct bnxt *bp) +{ + int rc; + + bp->fw_cap = 0; + rc = bnxt_hwrm_ver_get(bp); + bnxt_try_map_fw_health_reg(bp); + if (rc) { + if (bp->fw_health && bp->fw_health->status_reliable) { + u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); + + netdev_err(bp->dev, + "Firmware not responding, status: 0x%x\n", + sts); + if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) { + netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n"); + rc = bnxt_fw_reset_via_optee(bp); + if (!rc) + rc = bnxt_hwrm_ver_get(bp); + } + } + if (rc) + return rc; + } + + if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) { + rc = bnxt_alloc_kong_hwrm_resources(bp); + if (rc) + bp->fw_cap &= ~BNXT_FW_CAP_KONG_MB_CHNL; + } + + if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) || + bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) { + rc = bnxt_alloc_hwrm_short_cmd_req(bp); + if (rc) + return rc; + } + bnxt_nvm_cfg_ver_get(bp); + + rc = bnxt_hwrm_func_reset(bp); + if (rc) + return -ENODEV; + + bnxt_hwrm_fw_set_time(bp); + return 0; +} + +static int bnxt_fw_init_one_p2(struct bnxt *bp) +{ + int rc; + + /* Get the MAX capabilities for this function */ + rc = bnxt_hwrm_func_qcaps(bp); + if (rc) { + netdev_err(bp->dev, "hwrm query capability failure rc: %x\n", + rc); + return -ENODEV; + } + + rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp); + if (rc) + netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n", + rc); + + if (bnxt_alloc_fw_health(bp)) { + netdev_warn(bp->dev, "no memory for firmware error recovery\n"); + } else { + rc = bnxt_hwrm_error_recovery_qcfg(bp); + if (rc) + netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n", + rc); + } + + rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false); + if (rc) + return -ENODEV; + + bnxt_hwrm_func_qcfg(bp); + bnxt_hwrm_vnic_qcaps(bp); + bnxt_hwrm_port_led_qcaps(bp); + bnxt_ethtool_init(bp); + bnxt_dcb_init(bp); + return 0; +} + +static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp) +{ + bp->flags &= ~BNXT_FLAG_UDP_RSS_CAP; + bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 | + VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 | + VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 | + VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6; + if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) { + bp->flags |= BNXT_FLAG_UDP_RSS_CAP; + bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 | + VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; + } +} + +static void bnxt_set_dflt_rfs(struct bnxt *bp) +{ + struct net_device *dev = bp->dev; + + dev->hw_features &= ~NETIF_F_NTUPLE; + dev->features &= ~NETIF_F_NTUPLE; + bp->flags &= ~BNXT_FLAG_RFS; + if (bnxt_rfs_supported(bp)) { + dev->hw_features |= NETIF_F_NTUPLE; + if (bnxt_rfs_capable(bp)) { + bp->flags |= BNXT_FLAG_RFS; + dev->features |= NETIF_F_NTUPLE; + } + } +} + +static void bnxt_fw_init_one_p3(struct bnxt *bp) +{ + struct pci_dev *pdev = bp->pdev; + + bnxt_set_dflt_rss_hash_type(bp); + bnxt_set_dflt_rfs(bp); + + bnxt_get_wol_settings(bp); + if (bp->flags & BNXT_FLAG_WOL_CAP) + device_set_wakeup_enable(&pdev->dev, bp->wol); + else + device_set_wakeup_capable(&pdev->dev, false); + + bnxt_hwrm_set_cache_line_size(bp, cache_line_size()); + bnxt_hwrm_coal_params_qcaps(bp); +} + +static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt); + +static int bnxt_fw_init_one(struct bnxt *bp) +{ + int rc; + + rc = bnxt_fw_init_one_p1(bp); + if (rc) { + netdev_err(bp->dev, "Firmware init phase 1 failed\n"); + return rc; + } + rc = bnxt_fw_init_one_p2(bp); + if (rc) { + netdev_err(bp->dev, "Firmware init phase 2 failed\n"); + return rc; + } + rc = bnxt_probe_phy(bp, false); + if (rc) + return rc; + rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false); + if (rc) + return rc; + + /* In case fw capabilities have changed, destroy the unneeded + * reporters and create newly capable ones. + */ + bnxt_dl_fw_reporters_destroy(bp, false); + bnxt_dl_fw_reporters_create(bp); + bnxt_fw_init_one_p3(bp); + return 0; +} + +static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx) +{ + struct bnxt_fw_health *fw_health = bp->fw_health; + u32 reg = fw_health->fw_reset_seq_regs[reg_idx]; + u32 val = fw_health->fw_reset_seq_vals[reg_idx]; + u32 reg_type, reg_off, delay_msecs; + + delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx]; + reg_type = BNXT_FW_HEALTH_REG_TYPE(reg); + reg_off = BNXT_FW_HEALTH_REG_OFF(reg); + switch (reg_type) { + case BNXT_FW_HEALTH_REG_TYPE_CFG: + pci_write_config_dword(bp->pdev, reg_off, val); + break; + case BNXT_FW_HEALTH_REG_TYPE_GRC: + writel(reg_off & BNXT_GRC_BASE_MASK, + bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); + reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000; + fallthrough; + case BNXT_FW_HEALTH_REG_TYPE_BAR0: + writel(val, bp->bar0 + reg_off); + break; + case BNXT_FW_HEALTH_REG_TYPE_BAR1: + writel(val, bp->bar1 + reg_off); + break; + } + if (delay_msecs) { + pci_read_config_dword(bp->pdev, 0, &val); + msleep(delay_msecs); + } +} + +static void bnxt_reset_all(struct bnxt *bp) +{ + struct bnxt_fw_health *fw_health = bp->fw_health; + int i, rc; + + if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { + bnxt_fw_reset_via_optee(bp); + bp->fw_reset_timestamp = jiffies; + return; + } + + if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) { + for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) + bnxt_fw_reset_writel(bp, i); + } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) { + struct hwrm_fw_reset_input req = {0}; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1); + req.resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr); + req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP; + req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP; + req.flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL; + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) + netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc); + } + bp->fw_reset_timestamp = jiffies; +} + +static void bnxt_fw_reset_task(struct work_struct *work) +{ + struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work); + int rc; + + if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { + netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n"); + return; + } + + switch (bp->fw_reset_state) { + case BNXT_FW_RESET_STATE_POLL_VF: { + int n = bnxt_get_registered_vfs(bp); + int tmo; + + if (n < 0) { + netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n", + n, jiffies_to_msecs(jiffies - + bp->fw_reset_timestamp)); + goto fw_reset_abort; + } else if (n > 0) { + if (time_after(jiffies, bp->fw_reset_timestamp + + (bp->fw_reset_max_dsecs * HZ / 10))) { + clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); + bp->fw_reset_state = 0; + netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n", + n); + return; + } + bnxt_queue_fw_reset_work(bp, HZ / 10); + return; + } + bp->fw_reset_timestamp = jiffies; + rtnl_lock(); + if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { + rtnl_unlock(); + goto fw_reset_abort; + } + bnxt_fw_reset_close(bp); + if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { + bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN; + tmo = HZ / 10; + } else { + bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; + tmo = bp->fw_reset_min_dsecs * HZ / 10; + } + rtnl_unlock(); + bnxt_queue_fw_reset_work(bp, tmo); + return; + } + case BNXT_FW_RESET_STATE_POLL_FW_DOWN: { + u32 val; + + val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); + if (!(val & BNXT_FW_STATUS_SHUTDOWN) && + !time_after(jiffies, bp->fw_reset_timestamp + + (bp->fw_reset_max_dsecs * HZ / 10))) { + bnxt_queue_fw_reset_work(bp, HZ / 5); + return; + } + + if (!bp->fw_health->master) { + u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs; + + bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; + bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10); + return; + } + bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW; + } + fallthrough; + case BNXT_FW_RESET_STATE_RESET_FW: + bnxt_reset_all(bp); + bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; + bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10); + return; + case BNXT_FW_RESET_STATE_ENABLE_DEV: + if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) { + u32 val; + + val = bnxt_fw_health_readl(bp, + BNXT_FW_RESET_INPROG_REG); + if (val) + netdev_warn(bp->dev, "FW reset inprog %x after min wait time.\n", + val); + } + clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); + if (pci_enable_device(bp->pdev)) { + netdev_err(bp->dev, "Cannot re-enable PCI device\n"); + goto fw_reset_abort; + } + pci_set_master(bp->pdev); + bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW; + fallthrough; + case BNXT_FW_RESET_STATE_POLL_FW: + bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT; + rc = __bnxt_hwrm_ver_get(bp, true); + if (rc) { + if (time_after(jiffies, bp->fw_reset_timestamp + + (bp->fw_reset_max_dsecs * HZ / 10))) { + netdev_err(bp->dev, "Firmware reset aborted\n"); + goto fw_reset_abort_status; + } + bnxt_queue_fw_reset_work(bp, HZ / 5); + return; + } + bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; + bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING; + fallthrough; + case BNXT_FW_RESET_STATE_OPENING: + while (!rtnl_trylock()) { + bnxt_queue_fw_reset_work(bp, HZ / 10); + return; + } + rc = bnxt_open(bp->dev); + if (rc) { + netdev_err(bp->dev, "bnxt_open_nic() failed\n"); + clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); + dev_close(bp->dev); + } + + if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) && + bp->fw_health->enabled) { + bp->fw_health->last_fw_reset_cnt = + bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); + } + bp->fw_reset_state = 0; + /* Make sure fw_reset_state is 0 before clearing the flag */ + smp_mb__before_atomic(); + clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); + bnxt_ulp_start(bp, rc); + if (!rc) + bnxt_reenable_sriov(bp); + bnxt_dl_health_recovery_done(bp); + bnxt_dl_health_status_update(bp, true); + rtnl_unlock(); + break; + } + return; + +fw_reset_abort_status: + if (bp->fw_health->status_reliable || + (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) { + u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); + + netdev_err(bp->dev, "fw_health_status 0x%x\n", sts); + } +fw_reset_abort: + clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); + if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) + bnxt_dl_health_status_update(bp, false); + bp->fw_reset_state = 0; + rtnl_lock(); + dev_close(bp->dev); + rtnl_unlock(); } static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) @@ -8041,16 +11744,12 @@ bp->dev = dev; bp->pdev = pdev; + /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2() + * determines the BAR size. + */ bp->bar0 = pci_ioremap_bar(pdev, 0); if (!bp->bar0) { dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); - rc = -ENOMEM; - goto init_err_release; - } - - bp->bar1 = pci_ioremap_bar(pdev, 2); - if (!bp->bar1) { - dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n"); rc = -ENOMEM; goto init_err_release; } @@ -8065,8 +11764,12 @@ pci_enable_pcie_error_reporting(pdev); INIT_WORK(&bp->sp_task, bnxt_sp_task); + INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task); spin_lock_init(&bp->ntp_fltr_lock); +#if BITS_PER_LONG == 32 + spin_lock_init(&bp->db_lock); +#endif bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE; bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE; @@ -8075,6 +11778,9 @@ timer_setup(&bp->timer, bnxt_timer, 0); bp->current_interval = BNXT_TIMER_INTERVAL; + + bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID; + bp->nge_fw_dst_port_id = INVALID_HW_RING_ID; clear_bit(BNXT_STATE_OPEN, &bp->state); return 0; @@ -8170,7 +11876,6 @@ bp->tx_nr_rings += bp->tx_nr_rings_xdp; bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : bp->tx_nr_rings + bp->rx_nr_rings; - bp->num_stat_ctxs = bp->cp_nr_rings; if (netif_running(bp->dev)) return bnxt_open_nic(bp, true, false); @@ -8195,32 +11900,19 @@ } } -static int bnxt_setup_tc_block(struct net_device *dev, - struct tc_block_offload *f) -{ - struct bnxt *bp = netdev_priv(dev); - - if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) - return -EOPNOTSUPP; - - switch (f->command) { - case TC_BLOCK_BIND: - return tcf_block_cb_register(f->block, bnxt_setup_tc_block_cb, - bp, bp, f->extack); - case TC_BLOCK_UNBIND: - tcf_block_cb_unregister(f->block, bnxt_setup_tc_block_cb, bp); - return 0; - default: - return -EOPNOTSUPP; - } -} +LIST_HEAD(bnxt_block_cb_list); static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { + struct bnxt *bp = netdev_priv(dev); + switch (type) { case TC_SETUP_BLOCK: - return bnxt_setup_tc_block(dev, type_data); + return flow_block_cb_setup_simple(type_data, + &bnxt_block_cb_list, + bnxt_setup_tc_block_cb, + bp, bp, true); case TC_SETUP_QDISC_MQPRIO: { struct tc_mqprio_qopt *mqprio = type_data; @@ -8274,6 +11966,7 @@ struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb); int rc = 0, idx, bit_id, l2_idx = 0; struct hlist_head *head; + u32 flags; if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) { struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; @@ -8313,8 +12006,9 @@ rc = -EPROTONOSUPPORT; goto err_free; } - if ((fkeys->control.flags & FLOW_DIS_ENCAPSULATION) && - bp->hwrm_spec_code < 0x10601) { + flags = fkeys->control.flags; + if (((flags & FLOW_DIS_ENCAPSULATION) && + bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) { rc = -EPROTONOSUPPORT; goto err_free; } @@ -8327,8 +12021,8 @@ rcu_read_lock(); hlist_for_each_entry_rcu(fltr, head, hash) { if (bnxt_fltr_match(fltr, new_fltr)) { + rc = fltr->sw_id; rcu_read_unlock(); - rc = 0; goto err_free; } } @@ -8404,7 +12098,7 @@ } } if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event)) - netdev_info(bp->dev, "Receive PF driver unload event!"); + netdev_info(bp->dev, "Receive PF driver unload event!\n"); } #else @@ -8415,84 +12109,44 @@ #endif /* CONFIG_RFS_ACCEL */ -static void bnxt_udp_tunnel_add(struct net_device *dev, - struct udp_tunnel_info *ti) +static int bnxt_udp_tunnel_set_port(struct net_device *netdev, unsigned int table, + unsigned int entry, struct udp_tunnel_info *ti) { - struct bnxt *bp = netdev_priv(dev); + struct bnxt *bp = netdev_priv(netdev); + unsigned int cmd; - if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET) - return; + if (ti->type == UDP_TUNNEL_TYPE_VXLAN) + cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN; + else + cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE; - if (!netif_running(dev)) - return; - - switch (ti->type) { - case UDP_TUNNEL_TYPE_VXLAN: - if (bp->vxlan_port_cnt && bp->vxlan_port != ti->port) - return; - - bp->vxlan_port_cnt++; - if (bp->vxlan_port_cnt == 1) { - bp->vxlan_port = ti->port; - set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event); - bnxt_queue_sp_work(bp); - } - break; - case UDP_TUNNEL_TYPE_GENEVE: - if (bp->nge_port_cnt && bp->nge_port != ti->port) - return; - - bp->nge_port_cnt++; - if (bp->nge_port_cnt == 1) { - bp->nge_port = ti->port; - set_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event); - } - break; - default: - return; - } - - bnxt_queue_sp_work(bp); + return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti->port, cmd); } -static void bnxt_udp_tunnel_del(struct net_device *dev, - struct udp_tunnel_info *ti) +static int bnxt_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table, + unsigned int entry, struct udp_tunnel_info *ti) { - struct bnxt *bp = netdev_priv(dev); + struct bnxt *bp = netdev_priv(netdev); + unsigned int cmd; - if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET) - return; + if (ti->type == UDP_TUNNEL_TYPE_VXLAN) + cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN; + else + cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE; - if (!netif_running(dev)) - return; - - switch (ti->type) { - case UDP_TUNNEL_TYPE_VXLAN: - if (!bp->vxlan_port_cnt || bp->vxlan_port != ti->port) - return; - bp->vxlan_port_cnt--; - - if (bp->vxlan_port_cnt != 0) - return; - - set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event); - break; - case UDP_TUNNEL_TYPE_GENEVE: - if (!bp->nge_port_cnt || bp->nge_port != ti->port) - return; - bp->nge_port_cnt--; - - if (bp->nge_port_cnt != 0) - return; - - set_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event); - break; - default: - return; - } - - bnxt_queue_sp_work(bp); + return bnxt_hwrm_tunnel_dst_port_free(bp, cmd); } + +static const struct udp_tunnel_nic_info bnxt_udp_tunnels = { + .set_port = bnxt_udp_tunnel_set_port, + .unset_port = bnxt_udp_tunnel_unset_port, + .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP | + UDP_TUNNEL_NIC_INFO_OPEN_ONLY, + .tables = { + { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, + { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, }, + }, +}; static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev, u32 filter_mask, @@ -8505,7 +12159,7 @@ } static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, - u16 flags) + u16 flags, struct netlink_ext_ack *extack) { struct bnxt *bp = netdev_priv(dev); struct nlattr *attr, *br_spec; @@ -8539,52 +12193,30 @@ return rc; } -static int bnxt_get_phys_port_name(struct net_device *dev, char *buf, - size_t len) +int bnxt_get_port_parent_id(struct net_device *dev, + struct netdev_phys_item_id *ppid) { struct bnxt *bp = netdev_priv(dev); - int rc; - /* The PF and it's VF-reps only support the switchdev framework */ - if (!BNXT_PF(bp)) - return -EOPNOTSUPP; - - rc = snprintf(buf, len, "p%d", bp->pf.port_id); - - if (rc >= len) - return -EOPNOTSUPP; - return 0; -} - -int bnxt_port_attr_get(struct bnxt *bp, struct switchdev_attr *attr) -{ if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) return -EOPNOTSUPP; /* The PF and it's VF-reps only support the switchdev framework */ - if (!BNXT_PF(bp)) + if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID)) return -EOPNOTSUPP; - switch (attr->id) { - case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: - attr->u.ppid.id_len = sizeof(bp->switch_id); - memcpy(attr->u.ppid.id, bp->switch_id, attr->u.ppid.id_len); - break; - default: - return -EOPNOTSUPP; - } + ppid->id_len = sizeof(bp->dsn); + memcpy(ppid->id, bp->dsn, ppid->id_len); + return 0; } -static int bnxt_swdev_port_attr_get(struct net_device *dev, - struct switchdev_attr *attr) +static struct devlink_port *bnxt_get_devlink_port(struct net_device *dev) { - return bnxt_port_attr_get(netdev_priv(dev), attr); -} + struct bnxt *bp = netdev_priv(dev); -static const struct switchdev_ops bnxt_switchdev_ops = { - .switchdev_port_attr_get = bnxt_swdev_port_attr_get -}; + return &bp->dl_port; +} static const struct net_device_ops bnxt_netdev_ops = { .ndo_open = bnxt_open, @@ -8612,12 +12244,13 @@ #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = bnxt_rx_flow_steer, #endif - .ndo_udp_tunnel_add = bnxt_udp_tunnel_add, - .ndo_udp_tunnel_del = bnxt_udp_tunnel_del, + .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, + .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, .ndo_bpf = bnxt_xdp, + .ndo_xdp_xmit = bnxt_xdp_xmit, .ndo_bridge_getlink = bnxt_bridge_getlink, .ndo_bridge_setlink = bnxt_bridge_setlink, - .ndo_get_phys_port_name = bnxt_get_phys_port_name + .ndo_get_devlink_port = bnxt_get_devlink_port, }; static void bnxt_remove_one(struct pci_dev *pdev) @@ -8625,16 +12258,22 @@ struct net_device *dev = pci_get_drvdata(pdev); struct bnxt *bp = netdev_priv(dev); - if (BNXT_PF(bp)) { + if (BNXT_PF(bp)) bnxt_sriov_disable(bp); - bnxt_dl_unregister(bp); - } + if (BNXT_PF(bp)) + devlink_port_type_clear(&bp->dl_port); pci_disable_pcie_error_reporting(pdev); unregister_netdev(dev); - bnxt_shutdown_tc(bp); - bnxt_cancel_sp_work(bp); + clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); + /* Flush any pending tasks */ + cancel_work_sync(&bp->sp_task); + cancel_delayed_work_sync(&bp->fw_reset_task); bp->sp_event = 0; + + bnxt_dl_fw_reporters_destroy(bp, true); + bnxt_dl_unregister(bp); + bnxt_shutdown_tc(bp); bnxt_clear_int_mode(bp); bnxt_hwrm_func_drv_unrgtr(bp); @@ -8644,11 +12283,19 @@ bnxt_dcb_free(bp); kfree(bp->edev); bp->edev = NULL; + kfree(bp->fw_health); + bp->fw_health = NULL; bnxt_cleanup_pci(bp); + bnxt_free_ctx_mem(bp); + kfree(bp->ctx); + bp->ctx = NULL; + kfree(bp->rss_indir_tbl); + bp->rss_indir_tbl = NULL; + bnxt_free_port_stats(bp); free_netdev(dev); } -static int bnxt_probe_phy(struct bnxt *bp) +static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt) { int rc = 0; struct bnxt_link_info *link_info = &bp->link_info; @@ -8659,7 +12306,8 @@ rc); return rc; } - mutex_init(&bp->link_lock); + if (!fw_dflt) + return 0; rc = bnxt_update_link(bp, false); if (rc) { @@ -8674,27 +12322,8 @@ if (link_info->auto_link_speeds && !link_info->support_auto_speeds) link_info->support_auto_speeds = link_info->support_speeds; - /*initialize the ethool setting copy with NVM settings */ - if (BNXT_AUTO_MODE(link_info->auto_mode)) { - link_info->autoneg = BNXT_AUTONEG_SPEED; - if (bp->hwrm_spec_code >= 0x10201) { - if (link_info->auto_pause_setting & - PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE) - link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; - } else { - link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; - } - link_info->advertising = link_info->auto_link_speeds; - } else { - link_info->req_link_speed = link_info->force_link_speed; - link_info->req_duplex = link_info->duplex_setting; - } - if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) - link_info->req_flow_ctrl = - link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH; - else - link_info->req_flow_ctrl = link_info->force_pause_setting; - return rc; + bnxt_init_ethtool_link_settings(bp); + return 0; } static int bnxt_get_max_irq(struct pci_dev *pdev) @@ -8712,13 +12341,16 @@ int *max_cp) { struct bnxt_hw_resc *hw_resc = &bp->hw_resc; - int max_ring_grps = 0; + int max_ring_grps = 0, max_irq; *max_tx = hw_resc->max_tx_rings; *max_rx = hw_resc->max_rx_rings; - *max_cp = min_t(int, bnxt_get_max_func_cp_rings_for_en(bp), - hw_resc->max_irqs - bnxt_get_ulp_msix_num(bp)); - *max_cp = min_t(int, *max_cp, hw_resc->max_stat_ctxs); + *max_cp = bnxt_get_max_func_cp_rings_for_en(bp); + max_irq = min_t(int, bnxt_get_max_func_irqs(bp) - + bnxt_get_ulp_msix_num(bp), + hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp)); + if (!(bp->flags & BNXT_FLAG_CHIP_P5)) + *max_cp = min_t(int, *max_cp, max_irq); max_ring_grps = hw_resc->max_hw_ring_grps; if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) { *max_cp -= 1; @@ -8726,6 +12358,11 @@ } if (bp->flags & BNXT_FLAG_AGG_RINGS) *max_rx >>= 1; + if (bp->flags & BNXT_FLAG_CHIP_P5) { + bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false); + /* On P5 chips, max_cp output param should be available NQs */ + *max_cp = max_irq; + } *max_rx = min_t(int, *max_rx, max_ring_grps); } @@ -8807,7 +12444,7 @@ if (sh) bp->flags |= BNXT_FLAG_SHARED_RINGS; - dflt_rings = netif_get_num_default_rss_queues(); + dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues(); /* Reduce default rings on multi-port cards so that total default * rings do not exceed CPU count. */ @@ -8842,7 +12479,6 @@ netdev_warn(bp->dev, "2nd rings reservation failed.\n"); bp->tx_nr_rings_per_tc = bp->tx_nr_rings; } - bp->num_stat_ctxs = bp->cp_nr_rings; if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { bp->rx_nr_rings++; bp->cp_nr_rings++; @@ -8873,10 +12509,9 @@ goto init_dflt_ring_err; bp->tx_nr_rings_per_tc = bp->tx_nr_rings; - if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) { - bp->flags |= BNXT_FLAG_RFS; - bp->dev->features |= NETIF_F_NTUPLE; - } + + bnxt_set_dflt_rfs(bp); + init_dflt_ring_err: bnxt_ulp_irq_restart(bp, rc); return rc; @@ -8934,9 +12569,94 @@ return rc; } +#define BNXT_VPD_LEN 512 +static void bnxt_vpd_read_info(struct bnxt *bp) +{ + struct pci_dev *pdev = bp->pdev; + int i, len, pos, ro_size, size; + ssize_t vpd_size; + u8 *vpd_data; + + vpd_data = kmalloc(BNXT_VPD_LEN, GFP_KERNEL); + if (!vpd_data) + return; + + vpd_size = pci_read_vpd(pdev, 0, BNXT_VPD_LEN, vpd_data); + if (vpd_size <= 0) { + netdev_err(bp->dev, "Unable to read VPD\n"); + goto exit; + } + + i = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA); + if (i < 0) { + netdev_err(bp->dev, "VPD READ-Only not found\n"); + goto exit; + } + + ro_size = pci_vpd_lrdt_size(&vpd_data[i]); + i += PCI_VPD_LRDT_TAG_SIZE; + if (i + ro_size > vpd_size) + goto exit; + + pos = pci_vpd_find_info_keyword(vpd_data, i, ro_size, + PCI_VPD_RO_KEYWORD_PARTNO); + if (pos < 0) + goto read_sn; + + len = pci_vpd_info_field_size(&vpd_data[pos]); + pos += PCI_VPD_INFO_FLD_HDR_SIZE; + if (len + pos > vpd_size) + goto read_sn; + + size = min(len, BNXT_VPD_FLD_LEN - 1); + memcpy(bp->board_partno, &vpd_data[pos], size); + +read_sn: + pos = pci_vpd_find_info_keyword(vpd_data, i, ro_size, + PCI_VPD_RO_KEYWORD_SERIALNO); + if (pos < 0) + goto exit; + + len = pci_vpd_info_field_size(&vpd_data[pos]); + pos += PCI_VPD_INFO_FLD_HDR_SIZE; + if (len + pos > vpd_size) + goto exit; + + size = min(len, BNXT_VPD_FLD_LEN - 1); + memcpy(bp->board_serialno, &vpd_data[pos], size); +exit: + kfree(vpd_data); +} + +static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[]) +{ + struct pci_dev *pdev = bp->pdev; + u64 qword; + + qword = pci_get_dsn(pdev); + if (!qword) { + netdev_info(bp->dev, "Unable to read adapter's DSN\n"); + return -EOPNOTSUPP; + } + + put_unaligned_le64(qword, dsn); + + bp->flags |= BNXT_FLAG_DSN_VALID; + return 0; +} + +static int bnxt_map_db_bar(struct bnxt *bp) +{ + if (!bp->db_size) + return -ENODEV; + bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size); + if (!bp->bar1) + return -ENOMEM; + return 0; +} + static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { - static int version_printed; struct net_device *dev; struct bnxt *bp; int rc, max_irqs; @@ -8944,8 +12664,13 @@ if (pci_is_bridge(pdev)) return -ENODEV; - if (version_printed++ == 0) - pr_info("%s", version); + /* Clear any pending DMA transactions from crash kernel + * while loading driver in capture kernel. + */ + if (is_kdump_kernel()) { + pci_clear_master(pdev); + pcie_flr(pdev); + } max_irqs = bnxt_get_max_irq(pdev); dev = alloc_etherdev_mq(sizeof(*bp), max_irqs); @@ -8953,6 +12678,8 @@ return -ENOMEM; bp = netdev_priv(dev); + bp->msg_enable = BNXT_DEF_MSG_ENABLE; + bnxt_set_max_func_irqs(bp, max_irqs); if (bnxt_vf_pciid(ent->driver_data)) bp->flags |= BNXT_FLAG_VF; @@ -8967,29 +12694,42 @@ dev->netdev_ops = &bnxt_netdev_ops; dev->watchdog_timeo = BNXT_TX_TIMEOUT; dev->ethtool_ops = &bnxt_ethtool_ops; - SWITCHDEV_SET_OPS(dev, &bnxt_switchdev_ops); pci_set_drvdata(pdev, dev); + + if (BNXT_PF(bp)) + bnxt_vpd_read_info(bp); rc = bnxt_alloc_hwrm_resources(bp); if (rc) goto init_err_pci_clean; mutex_init(&bp->hwrm_cmd_lock); - rc = bnxt_hwrm_ver_get(bp); + mutex_init(&bp->link_lock); + + rc = bnxt_fw_init_one_p1(bp); if (rc) goto init_err_pci_clean; - if (bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) { - rc = bnxt_alloc_hwrm_short_cmd_req(bp); - if (rc) - goto init_err_pci_clean; + if (BNXT_CHIP_P5(bp)) { + bp->flags |= BNXT_FLAG_CHIP_P5; + if (BNXT_CHIP_SR2(bp)) + bp->flags |= BNXT_FLAG_CHIP_SR2; } - rc = bnxt_hwrm_func_reset(bp); + rc = bnxt_alloc_rss_indir_tbl(bp); if (rc) goto init_err_pci_clean; - bnxt_hwrm_fw_set_time(bp); + rc = bnxt_fw_init_one_p2(bp); + if (rc) + goto init_err_pci_clean; + + rc = bnxt_map_db_bar(bp); + if (rc) { + dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n", + rc); + goto init_err_pci_clean; + } dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | @@ -8999,7 +12739,7 @@ NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH | NETIF_F_RXCSUM | NETIF_F_GRO; - if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) + if (BNXT_SUPPORTS_TPA(bp)) dev->hw_features |= NETIF_F_LRO; dev->hw_enc_features = @@ -9008,12 +12748,16 @@ NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL; + dev->udp_tunnel_nic_info = &bnxt_udp_tunnels; + dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM; dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA; - dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX; - if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) + if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP) + dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX; + if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT) + dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX; + if (BNXT_SUPPORTS_TPA(bp)) dev->hw_features |= NETIF_F_GRO_HW; dev->features |= dev->hw_features | NETIF_F_HIGHDMA; if (dev->features & NETIF_F_GRO_HW) @@ -9024,61 +12768,41 @@ init_waitqueue_head(&bp->sriov_cfg_wait); mutex_init(&bp->sriov_lock); #endif - bp->gro_func = bnxt_gro_func_5730x; - if (BNXT_CHIP_P4_PLUS(bp)) - bp->gro_func = bnxt_gro_func_5731x; - else + if (BNXT_SUPPORTS_TPA(bp)) { + bp->gro_func = bnxt_gro_func_5730x; + if (BNXT_CHIP_P4(bp)) + bp->gro_func = bnxt_gro_func_5731x; + else if (BNXT_CHIP_P5(bp)) + bp->gro_func = bnxt_gro_func_5750x; + } + if (!BNXT_CHIP_P4_PLUS(bp)) bp->flags |= BNXT_FLAG_DOUBLE_DB; - - rc = bnxt_hwrm_func_drv_rgtr(bp); - if (rc) - goto init_err_pci_clean; - - rc = bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0); - if (rc) - goto init_err_pci_clean; bp->ulp_probe = bnxt_ulp_probe; - /* Get the MAX capabilities for this function */ - rc = bnxt_hwrm_func_qcaps(bp); - if (rc) { - netdev_err(bp->dev, "hwrm query capability failure rc: %x\n", - rc); - rc = -1; - goto init_err_pci_clean; - } rc = bnxt_init_mac_addr(bp); if (rc) { dev_err(&pdev->dev, "Unable to initialize mac address.\n"); rc = -EADDRNOTAVAIL; goto init_err_pci_clean; } - rc = bnxt_hwrm_queue_qportcfg(bp); - if (rc) { - netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n", - rc); - rc = -1; - goto init_err_pci_clean; - } - bnxt_hwrm_func_qcfg(bp); - bnxt_hwrm_port_led_qcaps(bp); - bnxt_ethtool_init(bp); - bnxt_dcb_init(bp); + if (BNXT_PF(bp)) { + /* Read the adapter's DSN to use as the eswitch switch_id */ + rc = bnxt_pcie_dsn_get(bp, bp->dsn); + } /* MTU range: 60 - FW defined max */ dev->min_mtu = ETH_ZLEN; dev->max_mtu = bp->max_mtu; - rc = bnxt_probe_phy(bp); + rc = bnxt_probe_phy(bp, true); if (rc) goto init_err_pci_clean; bnxt_set_rx_skb_mode(bp, false); bnxt_set_tpa_flags(bp); bnxt_set_ring_params(bp); - bnxt_set_max_func_irqs(bp, max_irqs); rc = bnxt_set_dflt_rings(bp, true); if (rc) { netdev_err(bp->dev, "Not enough rings available.\n"); @@ -9086,27 +12810,9 @@ goto init_err_pci_clean; } - /* Default RSS hash cfg. */ - bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 | - VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 | - VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 | - VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6; - if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) { - bp->flags |= BNXT_FLAG_UDP_RSS_CAP; - bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 | - VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; - } + bnxt_fw_init_one_p3(bp); - bnxt_hwrm_vnic_qcaps(bp); - if (bnxt_rfs_supported(bp)) { - dev->hw_features |= NETIF_F_NTUPLE; - if (bnxt_rfs_capable(bp)) { - bp->flags |= BNXT_FLAG_RFS; - dev->features |= NETIF_F_NTUPLE; - } - } - - if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX) + if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX) bp->flags |= BNXT_FLAG_STRIP_VLAN; rc = bnxt_init_int_mode(bp); @@ -9118,14 +12824,6 @@ */ bp->tx_nr_rings_per_tc = bp->tx_nr_rings; - bnxt_get_wol_settings(bp); - if (bp->flags & BNXT_FLAG_WOL_CAP) - device_set_wakeup_enable(&pdev->dev, bp->wol); - else - device_set_wakeup_capable(&pdev->dev, false); - - bnxt_hwrm_set_cache_line_size(bp, cache_line_size()); - if (BNXT_PF(bp)) { if (!bnxt_pf_wq) { bnxt_pf_wq = @@ -9136,15 +12834,21 @@ goto init_err_pci_clean; } } - bnxt_init_tc(bp); + rc = bnxt_init_tc(bp); + if (rc) + netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n", + rc); } + + bnxt_dl_register(bp); rc = register_netdev(dev); if (rc) - goto init_err_cleanup_tc; + goto init_err_cleanup; if (BNXT_PF(bp)) - bnxt_dl_register(bp); + devlink_port_type_eth_set(&bp->dl_port, bp->dev); + bnxt_dl_fw_reporters_create(bp); netdev_info(dev, "%s found at mem %lx, node addr %pM\n", board_info[ent->driver_data].name, @@ -9154,14 +12858,24 @@ pci_save_state(pdev); return 0; -init_err_cleanup_tc: +init_err_cleanup: + bnxt_dl_unregister(bp); bnxt_shutdown_tc(bp); bnxt_clear_int_mode(bp); init_err_pci_clean: + bnxt_hwrm_func_drv_unrgtr(bp); bnxt_free_hwrm_short_cmd_req(bp); bnxt_free_hwrm_resources(bp); + bnxt_ethtool_free(bp); + kfree(bp->fw_health); + bp->fw_health = NULL; bnxt_cleanup_pci(bp); + bnxt_free_ctx_mem(bp); + kfree(bp->ctx); + bp->ctx = NULL; + kfree(bp->rss_indir_tbl); + bp->rss_indir_tbl = NULL; init_err_free: free_netdev(dev); @@ -9185,9 +12899,10 @@ dev_close(dev); bnxt_ulp_shutdown(bp); + bnxt_clear_int_mode(bp); + pci_disable_device(pdev); if (system_state == SYSTEM_POWER_OFF) { - bnxt_clear_int_mode(bp); pci_wake_from_d3(pdev, bp->wol); pci_set_power_state(pdev, PCI_D3hot); } @@ -9199,30 +12914,40 @@ #ifdef CONFIG_PM_SLEEP static int bnxt_suspend(struct device *device) { - struct pci_dev *pdev = to_pci_dev(device); - struct net_device *dev = pci_get_drvdata(pdev); + struct net_device *dev = dev_get_drvdata(device); struct bnxt *bp = netdev_priv(dev); int rc = 0; rtnl_lock(); + bnxt_ulp_stop(bp); if (netif_running(dev)) { netif_device_detach(dev); rc = bnxt_close(dev); } bnxt_hwrm_func_drv_unrgtr(bp); + pci_disable_device(bp->pdev); + bnxt_free_ctx_mem(bp); + kfree(bp->ctx); + bp->ctx = NULL; rtnl_unlock(); return rc; } static int bnxt_resume(struct device *device) { - struct pci_dev *pdev = to_pci_dev(device); - struct net_device *dev = pci_get_drvdata(pdev); + struct net_device *dev = dev_get_drvdata(device); struct bnxt *bp = netdev_priv(dev); int rc = 0; rtnl_lock(); - if (bnxt_hwrm_ver_get(bp) || bnxt_hwrm_func_drv_rgtr(bp)) { + rc = pci_enable_device(bp->pdev); + if (rc) { + netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n", + rc); + goto resume_exit; + } + pci_set_master(bp->pdev); + if (bnxt_hwrm_ver_get(bp)) { rc = -ENODEV; goto resume_exit; } @@ -9231,6 +12956,16 @@ rc = -EBUSY; goto resume_exit; } + + rc = bnxt_hwrm_func_qcaps(bp); + if (rc) + goto resume_exit; + + if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) { + rc = -ENODEV; + goto resume_exit; + } + bnxt_get_wol_settings(bp); if (netif_running(dev)) { rc = bnxt_open(dev); @@ -9239,6 +12974,9 @@ } resume_exit: + bnxt_ulp_start(bp, rc); + if (!rc) + bnxt_reenable_sriov(bp); rtnl_unlock(); return rc; } @@ -9278,10 +13016,17 @@ return PCI_ERS_RESULT_DISCONNECT; } + if (state == pci_channel_io_frozen) + set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state); + if (netif_running(netdev)) bnxt_close(netdev); - pci_disable_device(pdev); + if (pci_is_enabled(pdev)) + pci_disable_device(pdev); + bnxt_free_ctx_mem(bp); + kfree(bp->ctx); + bp->ctx = NULL; rtnl_unlock(); /* Request a slot slot reset. */ @@ -9299,10 +13044,10 @@ */ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev) { + pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT; struct net_device *netdev = pci_get_drvdata(pdev); struct bnxt *bp = netdev_priv(netdev); - int err = 0; - pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT; + int err = 0, off; netdev_info(bp->dev, "PCI Slot Reset\n"); @@ -9313,33 +13058,29 @@ "Cannot re-enable PCI device after reset.\n"); } else { pci_set_master(pdev); + /* Upon fatal error, our device internal logic that latches to + * BAR value is getting reset and will restore only upon + * rewritting the BARs. + * + * As pci_restore_state() does not re-write the BARs if the + * value is same as saved value earlier, driver needs to + * write the BARs to 0 to force restore, in case of fatal error. + */ + if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, + &bp->state)) { + for (off = PCI_BASE_ADDRESS_0; + off <= PCI_BASE_ADDRESS_5; off += 4) + pci_write_config_dword(bp->pdev, off, 0); + } pci_restore_state(pdev); pci_save_state(pdev); err = bnxt_hwrm_func_reset(bp); - if (!err && netif_running(netdev)) - err = bnxt_open(netdev); - - if (!err) { + if (!err) result = PCI_ERS_RESULT_RECOVERED; - bnxt_ulp_start(bp); - } - } - - if (result != PCI_ERS_RESULT_RECOVERED) { - if (netif_running(netdev)) - dev_close(netdev); - pci_disable_device(pdev); } rtnl_unlock(); - - err = pci_cleanup_aer_uncorrect_error_status(pdev); - if (err) { - dev_err(&pdev->dev, - "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", - err); /* non-fatal, continue */ - } return result; } @@ -9354,10 +13095,21 @@ static void bnxt_io_resume(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); + struct bnxt *bp = netdev_priv(netdev); + int err; + netdev_info(bp->dev, "PCI Slot Resume\n"); rtnl_lock(); - netif_device_attach(netdev); + err = bnxt_hwrm_func_qcaps(bp); + if (!err && netif_running(netdev)) + err = bnxt_open(netdev); + + bnxt_ulp_start(bp, err); + if (!err) { + bnxt_reenable_sriov(bp); + netif_device_attach(netdev); + } rtnl_unlock(); } @@ -9383,8 +13135,16 @@ static int __init bnxt_init(void) { + int err; + bnxt_debug_init(); - return pci_register_driver(&bnxt_pci_driver); + err = pci_register_driver(&bnxt_pci_driver); + if (err) { + bnxt_debug_exit(); + return err; + } + + return 0; } static void __exit bnxt_exit(void) -- Gitblit v1.6.2