.. | .. |
---|
1 | 1 | /* Broadcom NetXtreme-C/E network driver. |
---|
2 | 2 | * |
---|
3 | 3 | * Copyright (c) 2014-2016 Broadcom Corporation |
---|
4 | | - * Copyright (c) 2016-2018 Broadcom Limited |
---|
| 4 | + * Copyright (c) 2016-2019 Broadcom Limited |
---|
5 | 5 | * |
---|
6 | 6 | * This program is free software; you can redistribute it and/or modify |
---|
7 | 7 | * it under the terms of the GNU General Public License as published by |
---|
.. | .. |
---|
31 | 31 | #include <asm/page.h> |
---|
32 | 32 | #include <linux/time.h> |
---|
33 | 33 | #include <linux/mii.h> |
---|
| 34 | +#include <linux/mdio.h> |
---|
34 | 35 | #include <linux/if.h> |
---|
35 | 36 | #include <linux/if_vlan.h> |
---|
36 | 37 | #include <linux/if_bridge.h> |
---|
.. | .. |
---|
53 | 54 | #include <net/pkt_cls.h> |
---|
54 | 55 | #include <linux/hwmon.h> |
---|
55 | 56 | #include <linux/hwmon-sysfs.h> |
---|
| 57 | +#include <net/page_pool.h> |
---|
56 | 58 | |
---|
57 | 59 | #include "bnxt_hsi.h" |
---|
58 | 60 | #include "bnxt.h" |
---|
.. | .. |
---|
67 | 69 | #include "bnxt_debugfs.h" |
---|
68 | 70 | |
---|
69 | 71 | #define BNXT_TX_TIMEOUT (5 * HZ) |
---|
70 | | - |
---|
71 | | -static const char version[] = |
---|
72 | | - "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n"; |
---|
| 72 | +#define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW | \ |
---|
| 73 | + NETIF_MSG_TX_ERR) |
---|
73 | 74 | |
---|
74 | 75 | MODULE_LICENSE("GPL"); |
---|
75 | 76 | MODULE_DESCRIPTION("Broadcom BCM573xx network driver"); |
---|
76 | | -MODULE_VERSION(DRV_MODULE_VERSION); |
---|
77 | 77 | |
---|
78 | 78 | #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN) |
---|
79 | 79 | #define BNXT_RX_DMA_OFFSET NET_SKB_PAD |
---|
.. | .. |
---|
111 | 111 | BCM57452, |
---|
112 | 112 | BCM57454, |
---|
113 | 113 | BCM5745x_NPAR, |
---|
| 114 | + BCM57508, |
---|
| 115 | + BCM57504, |
---|
| 116 | + BCM57502, |
---|
| 117 | + BCM57508_NPAR, |
---|
| 118 | + BCM57504_NPAR, |
---|
| 119 | + BCM57502_NPAR, |
---|
114 | 120 | BCM58802, |
---|
115 | 121 | BCM58804, |
---|
116 | 122 | BCM58808, |
---|
117 | 123 | NETXTREME_E_VF, |
---|
118 | 124 | NETXTREME_C_VF, |
---|
119 | 125 | NETXTREME_S_VF, |
---|
| 126 | + NETXTREME_C_VF_HV, |
---|
| 127 | + NETXTREME_E_VF_HV, |
---|
| 128 | + NETXTREME_E_P5_VF, |
---|
| 129 | + NETXTREME_E_P5_VF_HV, |
---|
120 | 130 | }; |
---|
121 | 131 | |
---|
122 | 132 | /* indexed by enum above */ |
---|
.. | .. |
---|
152 | 162 | [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" }, |
---|
153 | 163 | [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, |
---|
154 | 164 | [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" }, |
---|
| 165 | + [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" }, |
---|
| 166 | + [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" }, |
---|
| 167 | + [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" }, |
---|
| 168 | + [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" }, |
---|
| 169 | + [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" }, |
---|
| 170 | + [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" }, |
---|
155 | 171 | [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" }, |
---|
156 | 172 | [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, |
---|
157 | 173 | [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, |
---|
158 | 174 | [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" }, |
---|
159 | 175 | [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" }, |
---|
160 | 176 | [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" }, |
---|
| 177 | + [NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" }, |
---|
| 178 | + [NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" }, |
---|
| 179 | + [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" }, |
---|
| 180 | + [NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" }, |
---|
161 | 181 | }; |
---|
162 | 182 | |
---|
163 | 183 | static const struct pci_device_id bnxt_pci_tbl[] = { |
---|
.. | .. |
---|
196 | 216 | { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR }, |
---|
197 | 217 | { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 }, |
---|
198 | 218 | { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 }, |
---|
| 219 | + { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 }, |
---|
| 220 | + { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 }, |
---|
| 221 | + { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 }, |
---|
| 222 | + { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57502_NPAR }, |
---|
| 223 | + { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR }, |
---|
| 224 | + { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57508_NPAR }, |
---|
| 225 | + { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57502_NPAR }, |
---|
| 226 | + { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR }, |
---|
| 227 | + { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57508_NPAR }, |
---|
199 | 228 | { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 }, |
---|
200 | 229 | { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 }, |
---|
201 | 230 | #ifdef CONFIG_BNXT_SRIOV |
---|
202 | 231 | { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF }, |
---|
| 232 | + { PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV }, |
---|
| 233 | + { PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV }, |
---|
203 | 234 | { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF }, |
---|
| 235 | + { PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV }, |
---|
204 | 236 | { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF }, |
---|
| 237 | + { PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV }, |
---|
| 238 | + { PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV }, |
---|
| 239 | + { PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV }, |
---|
| 240 | + { PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV }, |
---|
205 | 241 | { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF }, |
---|
206 | 242 | { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF }, |
---|
207 | 243 | { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF }, |
---|
208 | 244 | { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF }, |
---|
209 | 245 | { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF }, |
---|
| 246 | + { PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV }, |
---|
| 247 | + { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF }, |
---|
| 248 | + { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF }, |
---|
| 249 | + { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV }, |
---|
| 250 | + { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV }, |
---|
210 | 251 | { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF }, |
---|
211 | 252 | #endif |
---|
212 | 253 | { 0 } |
---|
.. | .. |
---|
223 | 264 | |
---|
224 | 265 | static const u16 bnxt_async_events_arr[] = { |
---|
225 | 266 | ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE, |
---|
| 267 | + ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE, |
---|
226 | 268 | ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD, |
---|
227 | 269 | ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED, |
---|
228 | 270 | ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE, |
---|
229 | 271 | ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE, |
---|
| 272 | + ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE, |
---|
| 273 | + ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY, |
---|
| 274 | + ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY, |
---|
| 275 | + ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION, |
---|
| 276 | + ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG, |
---|
230 | 277 | }; |
---|
231 | 278 | |
---|
232 | 279 | static struct workqueue_struct *bnxt_pf_wq; |
---|
.. | .. |
---|
234 | 281 | static bool bnxt_vf_pciid(enum board_idx idx) |
---|
235 | 282 | { |
---|
236 | 283 | return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF || |
---|
237 | | - idx == NETXTREME_S_VF); |
---|
| 284 | + idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV || |
---|
| 285 | + idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF || |
---|
| 286 | + idx == NETXTREME_E_P5_VF_HV); |
---|
238 | 287 | } |
---|
239 | 288 | |
---|
240 | 289 | #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID) |
---|
241 | 290 | #define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS) |
---|
242 | 291 | #define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS) |
---|
243 | 292 | |
---|
244 | | -#define BNXT_CP_DB_REARM(db, raw_cons) \ |
---|
245 | | - writel(DB_CP_REARM_FLAGS | RING_CMP(raw_cons), db) |
---|
246 | | - |
---|
247 | | -#define BNXT_CP_DB(db, raw_cons) \ |
---|
248 | | - writel(DB_CP_FLAGS | RING_CMP(raw_cons), db) |
---|
249 | | - |
---|
250 | 293 | #define BNXT_CP_DB_IRQ_DIS(db) \ |
---|
251 | 294 | writel(DB_CP_IRQ_DIS_FLAGS, db) |
---|
| 295 | + |
---|
| 296 | +#define BNXT_DB_CQ(db, idx) \ |
---|
| 297 | + writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell) |
---|
| 298 | + |
---|
| 299 | +#define BNXT_DB_NQ_P5(db, idx) \ |
---|
| 300 | + writeq((db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), (db)->doorbell) |
---|
| 301 | + |
---|
| 302 | +#define BNXT_DB_CQ_ARM(db, idx) \ |
---|
| 303 | + writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell) |
---|
| 304 | + |
---|
| 305 | +#define BNXT_DB_NQ_ARM_P5(db, idx) \ |
---|
| 306 | + writeq((db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx), (db)->doorbell) |
---|
| 307 | + |
---|
| 308 | +static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) |
---|
| 309 | +{ |
---|
| 310 | + if (bp->flags & BNXT_FLAG_CHIP_P5) |
---|
| 311 | + BNXT_DB_NQ_P5(db, idx); |
---|
| 312 | + else |
---|
| 313 | + BNXT_DB_CQ(db, idx); |
---|
| 314 | +} |
---|
| 315 | + |
---|
| 316 | +static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) |
---|
| 317 | +{ |
---|
| 318 | + if (bp->flags & BNXT_FLAG_CHIP_P5) |
---|
| 319 | + BNXT_DB_NQ_ARM_P5(db, idx); |
---|
| 320 | + else |
---|
| 321 | + BNXT_DB_CQ_ARM(db, idx); |
---|
| 322 | +} |
---|
| 323 | + |
---|
| 324 | +static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) |
---|
| 325 | +{ |
---|
| 326 | + if (bp->flags & BNXT_FLAG_CHIP_P5) |
---|
| 327 | + writeq(db->db_key64 | DBR_TYPE_CQ_ARMALL | RING_CMP(idx), |
---|
| 328 | + db->doorbell); |
---|
| 329 | + else |
---|
| 330 | + BNXT_DB_CQ(db, idx); |
---|
| 331 | +} |
---|
252 | 332 | |
---|
253 | 333 | const u16 bnxt_lhint_arr[] = { |
---|
254 | 334 | TX_BD_FLAGS_LHINT_512_AND_SMALLER, |
---|
.. | .. |
---|
280 | 360 | return 0; |
---|
281 | 361 | |
---|
282 | 362 | return md_dst->u.port_info.port_id; |
---|
| 363 | +} |
---|
| 364 | + |
---|
| 365 | +static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr, |
---|
| 366 | + u16 prod) |
---|
| 367 | +{ |
---|
| 368 | + bnxt_db_write(bp, &txr->tx_db, prod); |
---|
| 369 | + txr->kick_pending = 0; |
---|
283 | 370 | } |
---|
284 | 371 | |
---|
285 | 372 | static bool bnxt_txr_netif_try_stop_queue(struct bnxt *bp, |
---|
.. | .. |
---|
320 | 407 | i = skb_get_queue_mapping(skb); |
---|
321 | 408 | if (unlikely(i >= bp->tx_nr_rings)) { |
---|
322 | 409 | dev_kfree_skb_any(skb); |
---|
| 410 | + atomic_long_inc(&dev->tx_dropped); |
---|
323 | 411 | return NETDEV_TX_OK; |
---|
324 | 412 | } |
---|
325 | 413 | |
---|
.. | .. |
---|
329 | 417 | |
---|
330 | 418 | free_size = bnxt_tx_avail(bp, txr); |
---|
331 | 419 | if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) { |
---|
| 420 | + /* We must have raced with NAPI cleanup */ |
---|
| 421 | + if (net_ratelimit() && txr->kick_pending) |
---|
| 422 | + netif_warn(bp, tx_err, dev, |
---|
| 423 | + "bnxt: ring busy w/ flush pending!\n"); |
---|
332 | 424 | if (bnxt_txr_netif_try_stop_queue(bp, txr, txq)) |
---|
333 | 425 | return NETDEV_TX_BUSY; |
---|
334 | 426 | } |
---|
.. | .. |
---|
361 | 453 | struct tx_push_buffer *tx_push_buf = txr->tx_push; |
---|
362 | 454 | struct tx_push_bd *tx_push = &tx_push_buf->push_bd; |
---|
363 | 455 | struct tx_bd_ext *tx_push1 = &tx_push->txbd2; |
---|
| 456 | + void __iomem *db = txr->tx_db.doorbell; |
---|
364 | 457 | void *pdata = tx_push_buf->data; |
---|
365 | 458 | u64 *end; |
---|
366 | 459 | int j, push_len; |
---|
.. | .. |
---|
418 | 511 | |
---|
419 | 512 | push_len = (length + sizeof(*tx_push) + 7) / 8; |
---|
420 | 513 | if (push_len > 16) { |
---|
421 | | - __iowrite64_copy(txr->tx_doorbell, tx_push_buf, 16); |
---|
422 | | - __iowrite32_copy(txr->tx_doorbell + 4, tx_push_buf + 1, |
---|
| 514 | + __iowrite64_copy(db, tx_push_buf, 16); |
---|
| 515 | + __iowrite32_copy(db + 4, tx_push_buf + 1, |
---|
423 | 516 | (push_len - 16) << 1); |
---|
424 | 517 | } else { |
---|
425 | | - __iowrite64_copy(txr->tx_doorbell, tx_push_buf, |
---|
426 | | - push_len); |
---|
| 518 | + __iowrite64_copy(db, tx_push_buf, push_len); |
---|
427 | 519 | } |
---|
428 | 520 | |
---|
429 | 521 | goto tx_done; |
---|
.. | .. |
---|
432 | 524 | normal_tx: |
---|
433 | 525 | if (length < BNXT_MIN_PKT_SIZE) { |
---|
434 | 526 | pad = BNXT_MIN_PKT_SIZE - length; |
---|
435 | | - if (skb_pad(skb, pad)) { |
---|
| 527 | + if (skb_pad(skb, pad)) |
---|
436 | 528 | /* SKB already freed. */ |
---|
437 | | - tx_buf->skb = NULL; |
---|
438 | | - return NETDEV_TX_OK; |
---|
439 | | - } |
---|
| 529 | + goto tx_kick_pending; |
---|
440 | 530 | length = BNXT_MIN_PKT_SIZE; |
---|
441 | 531 | } |
---|
442 | 532 | |
---|
443 | 533 | mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE); |
---|
444 | 534 | |
---|
445 | | - if (unlikely(dma_mapping_error(&pdev->dev, mapping))) { |
---|
446 | | - dev_kfree_skb_any(skb); |
---|
447 | | - tx_buf->skb = NULL; |
---|
448 | | - return NETDEV_TX_OK; |
---|
449 | | - } |
---|
| 535 | + if (unlikely(dma_mapping_error(&pdev->dev, mapping))) |
---|
| 536 | + goto tx_free; |
---|
450 | 537 | |
---|
451 | 538 | dma_unmap_addr_set(tx_buf, mapping, mapping); |
---|
452 | 539 | flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD | |
---|
.. | .. |
---|
530 | 617 | prod = NEXT_TX(prod); |
---|
531 | 618 | txr->tx_prod = prod; |
---|
532 | 619 | |
---|
533 | | - if (!skb->xmit_more || netif_xmit_stopped(txq)) |
---|
534 | | - bnxt_db_write(bp, txr->tx_doorbell, DB_KEY_TX | prod); |
---|
| 620 | + if (!netdev_xmit_more() || netif_xmit_stopped(txq)) |
---|
| 621 | + bnxt_txr_db_kick(bp, txr, prod); |
---|
| 622 | + else |
---|
| 623 | + txr->kick_pending = 1; |
---|
535 | 624 | |
---|
536 | 625 | tx_done: |
---|
537 | 626 | |
---|
538 | | - mmiowb(); |
---|
539 | | - |
---|
540 | 627 | if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) { |
---|
541 | | - if (skb->xmit_more && !tx_buf->is_push) |
---|
542 | | - bnxt_db_write(bp, txr->tx_doorbell, DB_KEY_TX | prod); |
---|
| 628 | + if (netdev_xmit_more() && !tx_buf->is_push) |
---|
| 629 | + bnxt_txr_db_kick(bp, txr, prod); |
---|
543 | 630 | |
---|
544 | 631 | bnxt_txr_netif_try_stop_queue(bp, txr, txq); |
---|
545 | 632 | } |
---|
.. | .. |
---|
551 | 638 | /* start back at beginning and unmap skb */ |
---|
552 | 639 | prod = txr->tx_prod; |
---|
553 | 640 | tx_buf = &txr->tx_buf_ring[prod]; |
---|
554 | | - tx_buf->skb = NULL; |
---|
555 | 641 | dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), |
---|
556 | 642 | skb_headlen(skb), PCI_DMA_TODEVICE); |
---|
557 | 643 | prod = NEXT_TX(prod); |
---|
.. | .. |
---|
565 | 651 | PCI_DMA_TODEVICE); |
---|
566 | 652 | } |
---|
567 | 653 | |
---|
| 654 | +tx_free: |
---|
568 | 655 | dev_kfree_skb_any(skb); |
---|
| 656 | +tx_kick_pending: |
---|
| 657 | + if (txr->kick_pending) |
---|
| 658 | + bnxt_txr_db_kick(bp, txr, txr->tx_prod); |
---|
| 659 | + txr->tx_buf_ring[txr->tx_prod].skb = NULL; |
---|
| 660 | + atomic_long_inc(&dev->tx_dropped); |
---|
569 | 661 | return NETDEV_TX_OK; |
---|
570 | 662 | } |
---|
571 | 663 | |
---|
.. | .. |
---|
631 | 723 | } |
---|
632 | 724 | |
---|
633 | 725 | static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, |
---|
| 726 | + struct bnxt_rx_ring_info *rxr, |
---|
634 | 727 | gfp_t gfp) |
---|
635 | 728 | { |
---|
636 | 729 | struct device *dev = &bp->pdev->dev; |
---|
637 | 730 | struct page *page; |
---|
638 | 731 | |
---|
639 | | - page = alloc_page(gfp); |
---|
| 732 | + page = page_pool_dev_alloc_pages(rxr->page_pool); |
---|
640 | 733 | if (!page) |
---|
641 | 734 | return NULL; |
---|
642 | 735 | |
---|
643 | 736 | *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir, |
---|
644 | 737 | DMA_ATTR_WEAK_ORDERING); |
---|
645 | 738 | if (dma_mapping_error(dev, *mapping)) { |
---|
646 | | - __free_page(page); |
---|
| 739 | + page_pool_recycle_direct(rxr->page_pool, page); |
---|
647 | 740 | return NULL; |
---|
648 | 741 | } |
---|
649 | 742 | *mapping += bp->rx_dma_offset; |
---|
.. | .. |
---|
679 | 772 | dma_addr_t mapping; |
---|
680 | 773 | |
---|
681 | 774 | if (BNXT_RX_PAGE_MODE(bp)) { |
---|
682 | | - struct page *page = __bnxt_alloc_rx_page(bp, &mapping, gfp); |
---|
| 775 | + struct page *page = |
---|
| 776 | + __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp); |
---|
683 | 777 | |
---|
684 | 778 | if (!page) |
---|
685 | 779 | return -ENOMEM; |
---|
.. | .. |
---|
788 | 882 | return 0; |
---|
789 | 883 | } |
---|
790 | 884 | |
---|
791 | | -static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons, |
---|
792 | | - u32 agg_bufs) |
---|
| 885 | +static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp, |
---|
| 886 | + struct bnxt_cp_ring_info *cpr, |
---|
| 887 | + u16 cp_cons, u16 curr) |
---|
793 | 888 | { |
---|
| 889 | + struct rx_agg_cmp *agg; |
---|
| 890 | + |
---|
| 891 | + cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr)); |
---|
| 892 | + agg = (struct rx_agg_cmp *) |
---|
| 893 | + &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; |
---|
| 894 | + return agg; |
---|
| 895 | +} |
---|
| 896 | + |
---|
| 897 | +static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp, |
---|
| 898 | + struct bnxt_rx_ring_info *rxr, |
---|
| 899 | + u16 agg_id, u16 curr) |
---|
| 900 | +{ |
---|
| 901 | + struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id]; |
---|
| 902 | + |
---|
| 903 | + return &tpa_info->agg_arr[curr]; |
---|
| 904 | +} |
---|
| 905 | + |
---|
| 906 | +static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx, |
---|
| 907 | + u16 start, u32 agg_bufs, bool tpa) |
---|
| 908 | +{ |
---|
| 909 | + struct bnxt_napi *bnapi = cpr->bnapi; |
---|
794 | 910 | struct bnxt *bp = bnapi->bp; |
---|
795 | | - struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; |
---|
796 | 911 | struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; |
---|
797 | 912 | u16 prod = rxr->rx_agg_prod; |
---|
798 | 913 | u16 sw_prod = rxr->rx_sw_agg_prod; |
---|
| 914 | + bool p5_tpa = false; |
---|
799 | 915 | u32 i; |
---|
| 916 | + |
---|
| 917 | + if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa) |
---|
| 918 | + p5_tpa = true; |
---|
800 | 919 | |
---|
801 | 920 | for (i = 0; i < agg_bufs; i++) { |
---|
802 | 921 | u16 cons; |
---|
.. | .. |
---|
805 | 924 | struct rx_bd *prod_bd; |
---|
806 | 925 | struct page *page; |
---|
807 | 926 | |
---|
808 | | - agg = (struct rx_agg_cmp *) |
---|
809 | | - &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; |
---|
| 927 | + if (p5_tpa) |
---|
| 928 | + agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i); |
---|
| 929 | + else |
---|
| 930 | + agg = bnxt_get_agg(bp, cpr, idx, start + i); |
---|
810 | 931 | cons = agg->rx_agg_cmp_opaque; |
---|
811 | 932 | __clear_bit(cons, rxr->rx_agg_bmap); |
---|
812 | 933 | |
---|
.. | .. |
---|
834 | 955 | |
---|
835 | 956 | prod = NEXT_RX_AGG(prod); |
---|
836 | 957 | sw_prod = NEXT_RX_AGG(sw_prod); |
---|
837 | | - cp_cons = NEXT_CMP(cp_cons); |
---|
838 | 958 | } |
---|
839 | 959 | rxr->rx_agg_prod = prod; |
---|
840 | 960 | rxr->rx_sw_agg_prod = sw_prod; |
---|
.. | .. |
---|
848 | 968 | { |
---|
849 | 969 | unsigned int payload = offset_and_len >> 16; |
---|
850 | 970 | unsigned int len = offset_and_len & 0xffff; |
---|
851 | | - struct skb_frag_struct *frag; |
---|
| 971 | + skb_frag_t *frag; |
---|
852 | 972 | struct page *page = data; |
---|
853 | 973 | u16 prod = rxr->rx_prod; |
---|
854 | 974 | struct sk_buff *skb; |
---|
.. | .. |
---|
862 | 982 | dma_addr -= bp->rx_dma_offset; |
---|
863 | 983 | dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir, |
---|
864 | 984 | DMA_ATTR_WEAK_ORDERING); |
---|
| 985 | + page_pool_release_page(rxr->page_pool, page); |
---|
865 | 986 | |
---|
866 | 987 | if (unlikely(!payload)) |
---|
867 | | - payload = eth_get_headlen(data_ptr, len); |
---|
| 988 | + payload = eth_get_headlen(bp->dev, data_ptr, len); |
---|
868 | 989 | |
---|
869 | 990 | skb = napi_alloc_skb(&rxr->bnapi->napi, payload); |
---|
870 | 991 | if (!skb) { |
---|
.. | .. |
---|
879 | 1000 | |
---|
880 | 1001 | frag = &skb_shinfo(skb)->frags[0]; |
---|
881 | 1002 | skb_frag_size_sub(frag, payload); |
---|
882 | | - frag->page_offset += payload; |
---|
| 1003 | + skb_frag_off_add(frag, payload); |
---|
883 | 1004 | skb->data_len -= payload; |
---|
884 | 1005 | skb->tail += payload; |
---|
885 | 1006 | |
---|
.. | .. |
---|
915 | 1036 | return skb; |
---|
916 | 1037 | } |
---|
917 | 1038 | |
---|
918 | | -static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi, |
---|
919 | | - struct sk_buff *skb, u16 cp_cons, |
---|
920 | | - u32 agg_bufs) |
---|
| 1039 | +static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, |
---|
| 1040 | + struct bnxt_cp_ring_info *cpr, |
---|
| 1041 | + struct sk_buff *skb, u16 idx, |
---|
| 1042 | + u32 agg_bufs, bool tpa) |
---|
921 | 1043 | { |
---|
| 1044 | + struct bnxt_napi *bnapi = cpr->bnapi; |
---|
922 | 1045 | struct pci_dev *pdev = bp->pdev; |
---|
923 | | - struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; |
---|
924 | 1046 | struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; |
---|
925 | 1047 | u16 prod = rxr->rx_agg_prod; |
---|
| 1048 | + bool p5_tpa = false; |
---|
926 | 1049 | u32 i; |
---|
| 1050 | + |
---|
| 1051 | + if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa) |
---|
| 1052 | + p5_tpa = true; |
---|
927 | 1053 | |
---|
928 | 1054 | for (i = 0; i < agg_bufs; i++) { |
---|
929 | 1055 | u16 cons, frag_len; |
---|
.. | .. |
---|
932 | 1058 | struct page *page; |
---|
933 | 1059 | dma_addr_t mapping; |
---|
934 | 1060 | |
---|
935 | | - agg = (struct rx_agg_cmp *) |
---|
936 | | - &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; |
---|
| 1061 | + if (p5_tpa) |
---|
| 1062 | + agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i); |
---|
| 1063 | + else |
---|
| 1064 | + agg = bnxt_get_agg(bp, cpr, idx, i); |
---|
937 | 1065 | cons = agg->rx_agg_cmp_opaque; |
---|
938 | 1066 | frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) & |
---|
939 | 1067 | RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT; |
---|
.. | .. |
---|
967 | 1095 | * allocated already. |
---|
968 | 1096 | */ |
---|
969 | 1097 | rxr->rx_agg_prod = prod; |
---|
970 | | - bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs - i); |
---|
| 1098 | + bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa); |
---|
971 | 1099 | return NULL; |
---|
972 | 1100 | } |
---|
973 | 1101 | |
---|
.. | .. |
---|
980 | 1108 | skb->truesize += PAGE_SIZE; |
---|
981 | 1109 | |
---|
982 | 1110 | prod = NEXT_RX_AGG(prod); |
---|
983 | | - cp_cons = NEXT_CMP(cp_cons); |
---|
984 | 1111 | } |
---|
985 | 1112 | rxr->rx_agg_prod = prod; |
---|
986 | 1113 | return skb; |
---|
.. | .. |
---|
1024 | 1151 | return skb; |
---|
1025 | 1152 | } |
---|
1026 | 1153 | |
---|
1027 | | -static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_napi *bnapi, |
---|
| 1154 | +static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, |
---|
1028 | 1155 | u32 *raw_cons, void *cmp) |
---|
1029 | 1156 | { |
---|
1030 | | - struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; |
---|
1031 | 1157 | struct rx_cmp *rxcmp = cmp; |
---|
1032 | 1158 | u32 tmp_raw_cons = *raw_cons; |
---|
1033 | 1159 | u8 cmp_type, agg_bufs = 0; |
---|
.. | .. |
---|
1041 | 1167 | } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { |
---|
1042 | 1168 | struct rx_tpa_end_cmp *tpa_end = cmp; |
---|
1043 | 1169 | |
---|
1044 | | - agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) & |
---|
1045 | | - RX_TPA_END_CMP_AGG_BUFS) >> |
---|
1046 | | - RX_TPA_END_CMP_AGG_BUFS_SHIFT; |
---|
| 1170 | + if (bp->flags & BNXT_FLAG_CHIP_P5) |
---|
| 1171 | + return 0; |
---|
| 1172 | + |
---|
| 1173 | + agg_bufs = TPA_END_AGG_BUFS(tpa_end); |
---|
1047 | 1174 | } |
---|
1048 | 1175 | |
---|
1049 | 1176 | if (agg_bufs) { |
---|
.. | .. |
---|
1054 | 1181 | return 0; |
---|
1055 | 1182 | } |
---|
1056 | 1183 | |
---|
| 1184 | +static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay) |
---|
| 1185 | +{ |
---|
| 1186 | + if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))) |
---|
| 1187 | + return; |
---|
| 1188 | + |
---|
| 1189 | + if (BNXT_PF(bp)) |
---|
| 1190 | + queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay); |
---|
| 1191 | + else |
---|
| 1192 | + schedule_delayed_work(&bp->fw_reset_task, delay); |
---|
| 1193 | +} |
---|
| 1194 | + |
---|
1057 | 1195 | static void bnxt_queue_sp_work(struct bnxt *bp) |
---|
1058 | 1196 | { |
---|
1059 | 1197 | if (BNXT_PF(bp)) |
---|
.. | .. |
---|
1062 | 1200 | schedule_work(&bp->sp_task); |
---|
1063 | 1201 | } |
---|
1064 | 1202 | |
---|
1065 | | -static void bnxt_cancel_sp_work(struct bnxt *bp) |
---|
1066 | | -{ |
---|
1067 | | - if (BNXT_PF(bp)) |
---|
1068 | | - flush_workqueue(bnxt_pf_wq); |
---|
1069 | | - else |
---|
1070 | | - cancel_work_sync(&bp->sp_task); |
---|
1071 | | -} |
---|
1072 | | - |
---|
1073 | 1203 | static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) |
---|
1074 | 1204 | { |
---|
1075 | 1205 | if (!rxr->bnapi->in_reset) { |
---|
1076 | 1206 | rxr->bnapi->in_reset = true; |
---|
1077 | | - set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); |
---|
| 1207 | + if (bp->flags & BNXT_FLAG_CHIP_P5) |
---|
| 1208 | + set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); |
---|
| 1209 | + else |
---|
| 1210 | + set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event); |
---|
1078 | 1211 | bnxt_queue_sp_work(bp); |
---|
1079 | 1212 | } |
---|
1080 | 1213 | rxr->rx_next_cons = 0xffff; |
---|
| 1214 | +} |
---|
| 1215 | + |
---|
| 1216 | +static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id) |
---|
| 1217 | +{ |
---|
| 1218 | + struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; |
---|
| 1219 | + u16 idx = agg_id & MAX_TPA_P5_MASK; |
---|
| 1220 | + |
---|
| 1221 | + if (test_bit(idx, map->agg_idx_bmap)) |
---|
| 1222 | + idx = find_first_zero_bit(map->agg_idx_bmap, |
---|
| 1223 | + BNXT_AGG_IDX_BMAP_SIZE); |
---|
| 1224 | + __set_bit(idx, map->agg_idx_bmap); |
---|
| 1225 | + map->agg_id_tbl[agg_id] = idx; |
---|
| 1226 | + return idx; |
---|
| 1227 | +} |
---|
| 1228 | + |
---|
| 1229 | +static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx) |
---|
| 1230 | +{ |
---|
| 1231 | + struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; |
---|
| 1232 | + |
---|
| 1233 | + __clear_bit(idx, map->agg_idx_bmap); |
---|
| 1234 | +} |
---|
| 1235 | + |
---|
| 1236 | +static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id) |
---|
| 1237 | +{ |
---|
| 1238 | + struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; |
---|
| 1239 | + |
---|
| 1240 | + return map->agg_id_tbl[agg_id]; |
---|
1081 | 1241 | } |
---|
1082 | 1242 | |
---|
1083 | 1243 | static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, |
---|
1084 | 1244 | struct rx_tpa_start_cmp *tpa_start, |
---|
1085 | 1245 | struct rx_tpa_start_cmp_ext *tpa_start1) |
---|
1086 | 1246 | { |
---|
1087 | | - u8 agg_id = TPA_START_AGG_ID(tpa_start); |
---|
1088 | | - u16 cons, prod; |
---|
1089 | | - struct bnxt_tpa_info *tpa_info; |
---|
1090 | 1247 | struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf; |
---|
| 1248 | + struct bnxt_tpa_info *tpa_info; |
---|
| 1249 | + u16 cons, prod, agg_id; |
---|
1091 | 1250 | struct rx_bd *prod_bd; |
---|
1092 | 1251 | dma_addr_t mapping; |
---|
1093 | 1252 | |
---|
| 1253 | + if (bp->flags & BNXT_FLAG_CHIP_P5) { |
---|
| 1254 | + agg_id = TPA_START_AGG_ID_P5(tpa_start); |
---|
| 1255 | + agg_id = bnxt_alloc_agg_idx(rxr, agg_id); |
---|
| 1256 | + } else { |
---|
| 1257 | + agg_id = TPA_START_AGG_ID(tpa_start); |
---|
| 1258 | + } |
---|
1094 | 1259 | cons = tpa_start->rx_tpa_start_cmp_opaque; |
---|
1095 | 1260 | prod = rxr->rx_prod; |
---|
1096 | 1261 | cons_rx_buf = &rxr->rx_buf_ring[cons]; |
---|
1097 | 1262 | prod_rx_buf = &rxr->rx_buf_ring[prod]; |
---|
1098 | 1263 | tpa_info = &rxr->rx_tpa[agg_id]; |
---|
1099 | 1264 | |
---|
1100 | | - if (unlikely(cons != rxr->rx_next_cons)) { |
---|
1101 | | - netdev_warn(bp->dev, "TPA cons %x != expected cons %x\n", |
---|
1102 | | - cons, rxr->rx_next_cons); |
---|
| 1265 | + if (unlikely(cons != rxr->rx_next_cons || |
---|
| 1266 | + TPA_START_ERROR(tpa_start))) { |
---|
| 1267 | + netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n", |
---|
| 1268 | + cons, rxr->rx_next_cons, |
---|
| 1269 | + TPA_START_ERROR_CODE(tpa_start1)); |
---|
1103 | 1270 | bnxt_sched_reset(bp, rxr); |
---|
1104 | 1271 | return; |
---|
1105 | 1272 | } |
---|
.. | .. |
---|
1138 | 1305 | } else { |
---|
1139 | 1306 | tpa_info->hash_type = PKT_HASH_TYPE_NONE; |
---|
1140 | 1307 | tpa_info->gso_type = 0; |
---|
1141 | | - if (netif_msg_rx_err(bp)) |
---|
1142 | | - netdev_warn(bp->dev, "TPA packet without valid hash\n"); |
---|
| 1308 | + netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n"); |
---|
1143 | 1309 | } |
---|
1144 | 1310 | tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2); |
---|
1145 | 1311 | tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata); |
---|
1146 | 1312 | tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info); |
---|
| 1313 | + tpa_info->agg_count = 0; |
---|
1147 | 1314 | |
---|
1148 | 1315 | rxr->rx_prod = NEXT_RX(prod); |
---|
1149 | 1316 | cons = NEXT_RX(cons); |
---|
.. | .. |
---|
1155 | 1322 | cons_rx_buf->data = NULL; |
---|
1156 | 1323 | } |
---|
1157 | 1324 | |
---|
1158 | | -static void bnxt_abort_tpa(struct bnxt *bp, struct bnxt_napi *bnapi, |
---|
1159 | | - u16 cp_cons, u32 agg_bufs) |
---|
| 1325 | +static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs) |
---|
1160 | 1326 | { |
---|
1161 | 1327 | if (agg_bufs) |
---|
1162 | | - bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs); |
---|
| 1328 | + bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true); |
---|
1163 | 1329 | } |
---|
| 1330 | + |
---|
| 1331 | +#ifdef CONFIG_INET |
---|
| 1332 | +static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto) |
---|
| 1333 | +{ |
---|
| 1334 | + struct udphdr *uh = NULL; |
---|
| 1335 | + |
---|
| 1336 | + if (ip_proto == htons(ETH_P_IP)) { |
---|
| 1337 | + struct iphdr *iph = (struct iphdr *)skb->data; |
---|
| 1338 | + |
---|
| 1339 | + if (iph->protocol == IPPROTO_UDP) |
---|
| 1340 | + uh = (struct udphdr *)(iph + 1); |
---|
| 1341 | + } else { |
---|
| 1342 | + struct ipv6hdr *iph = (struct ipv6hdr *)skb->data; |
---|
| 1343 | + |
---|
| 1344 | + if (iph->nexthdr == IPPROTO_UDP) |
---|
| 1345 | + uh = (struct udphdr *)(iph + 1); |
---|
| 1346 | + } |
---|
| 1347 | + if (uh) { |
---|
| 1348 | + if (uh->check) |
---|
| 1349 | + skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM; |
---|
| 1350 | + else |
---|
| 1351 | + skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; |
---|
| 1352 | + } |
---|
| 1353 | +} |
---|
| 1354 | +#endif |
---|
1164 | 1355 | |
---|
1165 | 1356 | static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info, |
---|
1166 | 1357 | int payload_off, int tcp_ts, |
---|
.. | .. |
---|
1219 | 1410 | } |
---|
1220 | 1411 | |
---|
1221 | 1412 | if (inner_mac_off) { /* tunnel */ |
---|
1222 | | - struct udphdr *uh = NULL; |
---|
1223 | 1413 | __be16 proto = *((__be16 *)(skb->data + outer_ip_off - |
---|
1224 | 1414 | ETH_HLEN - 2)); |
---|
1225 | 1415 | |
---|
1226 | | - if (proto == htons(ETH_P_IP)) { |
---|
1227 | | - struct iphdr *iph = (struct iphdr *)skb->data; |
---|
| 1416 | + bnxt_gro_tunnel(skb, proto); |
---|
| 1417 | + } |
---|
| 1418 | +#endif |
---|
| 1419 | + return skb; |
---|
| 1420 | +} |
---|
1228 | 1421 | |
---|
1229 | | - if (iph->protocol == IPPROTO_UDP) |
---|
1230 | | - uh = (struct udphdr *)(iph + 1); |
---|
1231 | | - } else { |
---|
1232 | | - struct ipv6hdr *iph = (struct ipv6hdr *)skb->data; |
---|
| 1422 | +static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info, |
---|
| 1423 | + int payload_off, int tcp_ts, |
---|
| 1424 | + struct sk_buff *skb) |
---|
| 1425 | +{ |
---|
| 1426 | +#ifdef CONFIG_INET |
---|
| 1427 | + u16 outer_ip_off, inner_ip_off, inner_mac_off; |
---|
| 1428 | + u32 hdr_info = tpa_info->hdr_info; |
---|
| 1429 | + int iphdr_len, nw_off; |
---|
1233 | 1430 | |
---|
1234 | | - if (iph->nexthdr == IPPROTO_UDP) |
---|
1235 | | - uh = (struct udphdr *)(iph + 1); |
---|
1236 | | - } |
---|
1237 | | - if (uh) { |
---|
1238 | | - if (uh->check) |
---|
1239 | | - skb_shinfo(skb)->gso_type |= |
---|
1240 | | - SKB_GSO_UDP_TUNNEL_CSUM; |
---|
1241 | | - else |
---|
1242 | | - skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; |
---|
1243 | | - } |
---|
| 1431 | + inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info); |
---|
| 1432 | + inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info); |
---|
| 1433 | + outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info); |
---|
| 1434 | + |
---|
| 1435 | + nw_off = inner_ip_off - ETH_HLEN; |
---|
| 1436 | + skb_set_network_header(skb, nw_off); |
---|
| 1437 | + iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ? |
---|
| 1438 | + sizeof(struct ipv6hdr) : sizeof(struct iphdr); |
---|
| 1439 | + skb_set_transport_header(skb, nw_off + iphdr_len); |
---|
| 1440 | + |
---|
| 1441 | + if (inner_mac_off) { /* tunnel */ |
---|
| 1442 | + __be16 proto = *((__be16 *)(skb->data + outer_ip_off - |
---|
| 1443 | + ETH_HLEN - 2)); |
---|
| 1444 | + |
---|
| 1445 | + bnxt_gro_tunnel(skb, proto); |
---|
1244 | 1446 | } |
---|
1245 | 1447 | #endif |
---|
1246 | 1448 | return skb; |
---|
.. | .. |
---|
1287 | 1489 | return NULL; |
---|
1288 | 1490 | } |
---|
1289 | 1491 | |
---|
1290 | | - if (nw_off) { /* tunnel */ |
---|
1291 | | - struct udphdr *uh = NULL; |
---|
1292 | | - |
---|
1293 | | - if (skb->protocol == htons(ETH_P_IP)) { |
---|
1294 | | - struct iphdr *iph = (struct iphdr *)skb->data; |
---|
1295 | | - |
---|
1296 | | - if (iph->protocol == IPPROTO_UDP) |
---|
1297 | | - uh = (struct udphdr *)(iph + 1); |
---|
1298 | | - } else { |
---|
1299 | | - struct ipv6hdr *iph = (struct ipv6hdr *)skb->data; |
---|
1300 | | - |
---|
1301 | | - if (iph->nexthdr == IPPROTO_UDP) |
---|
1302 | | - uh = (struct udphdr *)(iph + 1); |
---|
1303 | | - } |
---|
1304 | | - if (uh) { |
---|
1305 | | - if (uh->check) |
---|
1306 | | - skb_shinfo(skb)->gso_type |= |
---|
1307 | | - SKB_GSO_UDP_TUNNEL_CSUM; |
---|
1308 | | - else |
---|
1309 | | - skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; |
---|
1310 | | - } |
---|
1311 | | - } |
---|
| 1492 | + if (nw_off) /* tunnel */ |
---|
| 1493 | + bnxt_gro_tunnel(skb, skb->protocol); |
---|
1312 | 1494 | #endif |
---|
1313 | 1495 | return skb; |
---|
1314 | 1496 | } |
---|
.. | .. |
---|
1331 | 1513 | skb_shinfo(skb)->gso_size = |
---|
1332 | 1514 | le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len); |
---|
1333 | 1515 | skb_shinfo(skb)->gso_type = tpa_info->gso_type; |
---|
1334 | | - payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) & |
---|
1335 | | - RX_TPA_END_CMP_PAYLOAD_OFFSET) >> |
---|
1336 | | - RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT; |
---|
| 1516 | + if (bp->flags & BNXT_FLAG_CHIP_P5) |
---|
| 1517 | + payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1); |
---|
| 1518 | + else |
---|
| 1519 | + payload_off = TPA_END_PAYLOAD_OFF(tpa_end); |
---|
1337 | 1520 | skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb); |
---|
1338 | 1521 | if (likely(skb)) |
---|
1339 | 1522 | tcp_gro_complete(skb); |
---|
.. | .. |
---|
1353 | 1536 | } |
---|
1354 | 1537 | |
---|
1355 | 1538 | static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, |
---|
1356 | | - struct bnxt_napi *bnapi, |
---|
| 1539 | + struct bnxt_cp_ring_info *cpr, |
---|
1357 | 1540 | u32 *raw_cons, |
---|
1358 | 1541 | struct rx_tpa_end_cmp *tpa_end, |
---|
1359 | 1542 | struct rx_tpa_end_cmp_ext *tpa_end1, |
---|
1360 | 1543 | u8 *event) |
---|
1361 | 1544 | { |
---|
1362 | | - struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; |
---|
| 1545 | + struct bnxt_napi *bnapi = cpr->bnapi; |
---|
1363 | 1546 | struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; |
---|
1364 | | - u8 agg_id = TPA_END_AGG_ID(tpa_end); |
---|
1365 | 1547 | u8 *data_ptr, agg_bufs; |
---|
1366 | | - u16 cp_cons = RING_CMP(*raw_cons); |
---|
1367 | 1548 | unsigned int len; |
---|
1368 | 1549 | struct bnxt_tpa_info *tpa_info; |
---|
1369 | 1550 | dma_addr_t mapping; |
---|
1370 | 1551 | struct sk_buff *skb; |
---|
| 1552 | + u16 idx = 0, agg_id; |
---|
1371 | 1553 | void *data; |
---|
| 1554 | + bool gro; |
---|
1372 | 1555 | |
---|
1373 | 1556 | if (unlikely(bnapi->in_reset)) { |
---|
1374 | | - int rc = bnxt_discard_rx(bp, bnapi, raw_cons, tpa_end); |
---|
| 1557 | + int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end); |
---|
1375 | 1558 | |
---|
1376 | 1559 | if (rc < 0) |
---|
1377 | 1560 | return ERR_PTR(-EBUSY); |
---|
1378 | 1561 | return NULL; |
---|
1379 | 1562 | } |
---|
1380 | 1563 | |
---|
1381 | | - tpa_info = &rxr->rx_tpa[agg_id]; |
---|
| 1564 | + if (bp->flags & BNXT_FLAG_CHIP_P5) { |
---|
| 1565 | + agg_id = TPA_END_AGG_ID_P5(tpa_end); |
---|
| 1566 | + agg_id = bnxt_lookup_agg_idx(rxr, agg_id); |
---|
| 1567 | + agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1); |
---|
| 1568 | + tpa_info = &rxr->rx_tpa[agg_id]; |
---|
| 1569 | + if (unlikely(agg_bufs != tpa_info->agg_count)) { |
---|
| 1570 | + netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n", |
---|
| 1571 | + agg_bufs, tpa_info->agg_count); |
---|
| 1572 | + agg_bufs = tpa_info->agg_count; |
---|
| 1573 | + } |
---|
| 1574 | + tpa_info->agg_count = 0; |
---|
| 1575 | + *event |= BNXT_AGG_EVENT; |
---|
| 1576 | + bnxt_free_agg_idx(rxr, agg_id); |
---|
| 1577 | + idx = agg_id; |
---|
| 1578 | + gro = !!(bp->flags & BNXT_FLAG_GRO); |
---|
| 1579 | + } else { |
---|
| 1580 | + agg_id = TPA_END_AGG_ID(tpa_end); |
---|
| 1581 | + agg_bufs = TPA_END_AGG_BUFS(tpa_end); |
---|
| 1582 | + tpa_info = &rxr->rx_tpa[agg_id]; |
---|
| 1583 | + idx = RING_CMP(*raw_cons); |
---|
| 1584 | + if (agg_bufs) { |
---|
| 1585 | + if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons)) |
---|
| 1586 | + return ERR_PTR(-EBUSY); |
---|
| 1587 | + |
---|
| 1588 | + *event |= BNXT_AGG_EVENT; |
---|
| 1589 | + idx = NEXT_CMP(idx); |
---|
| 1590 | + } |
---|
| 1591 | + gro = !!TPA_END_GRO(tpa_end); |
---|
| 1592 | + } |
---|
1382 | 1593 | data = tpa_info->data; |
---|
1383 | 1594 | data_ptr = tpa_info->data_ptr; |
---|
1384 | 1595 | prefetch(data_ptr); |
---|
1385 | 1596 | len = tpa_info->len; |
---|
1386 | 1597 | mapping = tpa_info->mapping; |
---|
1387 | 1598 | |
---|
1388 | | - agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) & |
---|
1389 | | - RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT; |
---|
1390 | | - |
---|
1391 | | - if (agg_bufs) { |
---|
1392 | | - if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons)) |
---|
1393 | | - return ERR_PTR(-EBUSY); |
---|
1394 | | - |
---|
1395 | | - *event |= BNXT_AGG_EVENT; |
---|
1396 | | - cp_cons = NEXT_CMP(cp_cons); |
---|
1397 | | - } |
---|
1398 | | - |
---|
1399 | 1599 | if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) { |
---|
1400 | | - bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs); |
---|
| 1600 | + bnxt_abort_tpa(cpr, idx, agg_bufs); |
---|
1401 | 1601 | if (agg_bufs > MAX_SKB_FRAGS) |
---|
1402 | 1602 | netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n", |
---|
1403 | 1603 | agg_bufs, (int)MAX_SKB_FRAGS); |
---|
.. | .. |
---|
1407 | 1607 | if (len <= bp->rx_copy_thresh) { |
---|
1408 | 1608 | skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping); |
---|
1409 | 1609 | if (!skb) { |
---|
1410 | | - bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs); |
---|
| 1610 | + bnxt_abort_tpa(cpr, idx, agg_bufs); |
---|
1411 | 1611 | return NULL; |
---|
1412 | 1612 | } |
---|
1413 | 1613 | } else { |
---|
.. | .. |
---|
1416 | 1616 | |
---|
1417 | 1617 | new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC); |
---|
1418 | 1618 | if (!new_data) { |
---|
1419 | | - bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs); |
---|
| 1619 | + bnxt_abort_tpa(cpr, idx, agg_bufs); |
---|
1420 | 1620 | return NULL; |
---|
1421 | 1621 | } |
---|
1422 | 1622 | |
---|
.. | .. |
---|
1431 | 1631 | |
---|
1432 | 1632 | if (!skb) { |
---|
1433 | 1633 | kfree(data); |
---|
1434 | | - bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs); |
---|
| 1634 | + bnxt_abort_tpa(cpr, idx, agg_bufs); |
---|
1435 | 1635 | return NULL; |
---|
1436 | 1636 | } |
---|
1437 | 1637 | skb_reserve(skb, bp->rx_offset); |
---|
.. | .. |
---|
1439 | 1639 | } |
---|
1440 | 1640 | |
---|
1441 | 1641 | if (agg_bufs) { |
---|
1442 | | - skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs); |
---|
| 1642 | + skb = bnxt_rx_pages(bp, cpr, skb, idx, agg_bufs, true); |
---|
1443 | 1643 | if (!skb) { |
---|
1444 | 1644 | /* Page reuse already handled by bnxt_rx_pages(). */ |
---|
1445 | 1645 | return NULL; |
---|
.. | .. |
---|
1453 | 1653 | skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type); |
---|
1454 | 1654 | |
---|
1455 | 1655 | if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) && |
---|
1456 | | - (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { |
---|
1457 | | - u16 vlan_proto = tpa_info->metadata >> |
---|
1458 | | - RX_CMP_FLAGS2_METADATA_TPID_SFT; |
---|
| 1656 | + (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) { |
---|
| 1657 | + __be16 vlan_proto = htons(tpa_info->metadata >> |
---|
| 1658 | + RX_CMP_FLAGS2_METADATA_TPID_SFT); |
---|
1459 | 1659 | u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK; |
---|
1460 | 1660 | |
---|
1461 | | - __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); |
---|
| 1661 | + if (eth_type_vlan(vlan_proto)) { |
---|
| 1662 | + __vlan_hwaccel_put_tag(skb, vlan_proto, vtag); |
---|
| 1663 | + } else { |
---|
| 1664 | + dev_kfree_skb(skb); |
---|
| 1665 | + return NULL; |
---|
| 1666 | + } |
---|
1462 | 1667 | } |
---|
1463 | 1668 | |
---|
1464 | 1669 | skb_checksum_none_assert(skb); |
---|
.. | .. |
---|
1468 | 1673 | (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3; |
---|
1469 | 1674 | } |
---|
1470 | 1675 | |
---|
1471 | | - if (TPA_END_GRO(tpa_end)) |
---|
| 1676 | + if (gro) |
---|
1472 | 1677 | skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb); |
---|
1473 | 1678 | |
---|
1474 | 1679 | return skb; |
---|
| 1680 | +} |
---|
| 1681 | + |
---|
| 1682 | +static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, |
---|
| 1683 | + struct rx_agg_cmp *rx_agg) |
---|
| 1684 | +{ |
---|
| 1685 | + u16 agg_id = TPA_AGG_AGG_ID(rx_agg); |
---|
| 1686 | + struct bnxt_tpa_info *tpa_info; |
---|
| 1687 | + |
---|
| 1688 | + agg_id = bnxt_lookup_agg_idx(rxr, agg_id); |
---|
| 1689 | + tpa_info = &rxr->rx_tpa[agg_id]; |
---|
| 1690 | + BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS); |
---|
| 1691 | + tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg; |
---|
1475 | 1692 | } |
---|
1476 | 1693 | |
---|
1477 | 1694 | static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi, |
---|
.. | .. |
---|
1493 | 1710 | * -ENOMEM - packet aborted due to out of memory |
---|
1494 | 1711 | * -EIO - packet aborted due to hw error indicated in BD |
---|
1495 | 1712 | */ |
---|
1496 | | -static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, |
---|
1497 | | - u8 *event) |
---|
| 1713 | +static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, |
---|
| 1714 | + u32 *raw_cons, u8 *event) |
---|
1498 | 1715 | { |
---|
1499 | | - struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; |
---|
| 1716 | + struct bnxt_napi *bnapi = cpr->bnapi; |
---|
1500 | 1717 | struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; |
---|
1501 | 1718 | struct net_device *dev = bp->dev; |
---|
1502 | 1719 | struct rx_cmp *rxcmp; |
---|
.. | .. |
---|
1515 | 1732 | rxcmp = (struct rx_cmp *) |
---|
1516 | 1733 | &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; |
---|
1517 | 1734 | |
---|
| 1735 | + cmp_type = RX_CMP_TYPE(rxcmp); |
---|
| 1736 | + |
---|
| 1737 | + if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) { |
---|
| 1738 | + bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp); |
---|
| 1739 | + goto next_rx_no_prod_no_len; |
---|
| 1740 | + } |
---|
| 1741 | + |
---|
1518 | 1742 | tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); |
---|
1519 | 1743 | cp_cons = RING_CMP(tmp_raw_cons); |
---|
1520 | 1744 | rxcmp1 = (struct rx_cmp_ext *) |
---|
.. | .. |
---|
1523 | 1747 | if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) |
---|
1524 | 1748 | return -EBUSY; |
---|
1525 | 1749 | |
---|
1526 | | - cmp_type = RX_CMP_TYPE(rxcmp); |
---|
1527 | | - |
---|
| 1750 | + /* The valid test of the entry must be done first before |
---|
| 1751 | + * reading any further. |
---|
| 1752 | + */ |
---|
| 1753 | + dma_rmb(); |
---|
1528 | 1754 | prod = rxr->rx_prod; |
---|
1529 | 1755 | |
---|
1530 | 1756 | if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) { |
---|
.. | .. |
---|
1535 | 1761 | goto next_rx_no_prod_no_len; |
---|
1536 | 1762 | |
---|
1537 | 1763 | } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { |
---|
1538 | | - skb = bnxt_tpa_end(bp, bnapi, &tmp_raw_cons, |
---|
| 1764 | + skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons, |
---|
1539 | 1765 | (struct rx_tpa_end_cmp *)rxcmp, |
---|
1540 | 1766 | (struct rx_tpa_end_cmp_ext *)rxcmp1, event); |
---|
1541 | 1767 | |
---|
.. | .. |
---|
1553 | 1779 | |
---|
1554 | 1780 | cons = rxcmp->rx_cmp_opaque; |
---|
1555 | 1781 | if (unlikely(cons != rxr->rx_next_cons)) { |
---|
1556 | | - int rc1 = bnxt_discard_rx(bp, bnapi, raw_cons, rxcmp); |
---|
| 1782 | + int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp); |
---|
1557 | 1783 | |
---|
1558 | | - netdev_warn(bp->dev, "RX cons %x != expected cons %x\n", |
---|
1559 | | - cons, rxr->rx_next_cons); |
---|
| 1784 | + /* 0xffff is forced error, don't print it */ |
---|
| 1785 | + if (rxr->rx_next_cons != 0xffff) |
---|
| 1786 | + netdev_warn(bp->dev, "RX cons %x != expected cons %x\n", |
---|
| 1787 | + cons, rxr->rx_next_cons); |
---|
1560 | 1788 | bnxt_sched_reset(bp, rxr); |
---|
1561 | | - return rc1; |
---|
| 1789 | + if (rc1) |
---|
| 1790 | + return rc1; |
---|
| 1791 | + goto next_rx_no_prod_no_len; |
---|
1562 | 1792 | } |
---|
1563 | 1793 | rx_buf = &rxr->rx_buf_ring[cons]; |
---|
1564 | 1794 | data = rx_buf->data; |
---|
.. | .. |
---|
1583 | 1813 | |
---|
1584 | 1814 | bnxt_reuse_rx_data(rxr, cons, data); |
---|
1585 | 1815 | if (agg_bufs) |
---|
1586 | | - bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs); |
---|
| 1816 | + bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs, |
---|
| 1817 | + false); |
---|
1587 | 1818 | |
---|
1588 | 1819 | rc = -EIO; |
---|
1589 | 1820 | if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) { |
---|
1590 | | - netdev_warn(bp->dev, "RX buffer error %x\n", rx_err); |
---|
1591 | | - bnxt_sched_reset(bp, rxr); |
---|
| 1821 | + bnapi->cp_ring.sw_stats.rx.rx_buf_errors++; |
---|
| 1822 | + if (!(bp->flags & BNXT_FLAG_CHIP_P5) && |
---|
| 1823 | + !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) { |
---|
| 1824 | + netdev_warn_once(bp->dev, "RX buffer error %x\n", |
---|
| 1825 | + rx_err); |
---|
| 1826 | + bnxt_sched_reset(bp, rxr); |
---|
| 1827 | + } |
---|
1592 | 1828 | } |
---|
1593 | 1829 | goto next_rx_no_len; |
---|
1594 | 1830 | } |
---|
.. | .. |
---|
1606 | 1842 | bnxt_reuse_rx_data(rxr, cons, data); |
---|
1607 | 1843 | if (!skb) { |
---|
1608 | 1844 | if (agg_bufs) |
---|
1609 | | - bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs); |
---|
| 1845 | + bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, |
---|
| 1846 | + agg_bufs, false); |
---|
1610 | 1847 | rc = -ENOMEM; |
---|
1611 | 1848 | goto next_rx; |
---|
1612 | 1849 | } |
---|
.. | .. |
---|
1626 | 1863 | } |
---|
1627 | 1864 | |
---|
1628 | 1865 | if (agg_bufs) { |
---|
1629 | | - skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs); |
---|
| 1866 | + skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs, false); |
---|
1630 | 1867 | if (!skb) { |
---|
1631 | 1868 | rc = -ENOMEM; |
---|
1632 | 1869 | goto next_rx; |
---|
.. | .. |
---|
1648 | 1885 | |
---|
1649 | 1886 | if ((rxcmp1->rx_cmp_flags2 & |
---|
1650 | 1887 | cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) && |
---|
1651 | | - (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { |
---|
| 1888 | + (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) { |
---|
1652 | 1889 | u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data); |
---|
1653 | 1890 | u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK; |
---|
1654 | | - u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT; |
---|
| 1891 | + __be16 vlan_proto = htons(meta_data >> |
---|
| 1892 | + RX_CMP_FLAGS2_METADATA_TPID_SFT); |
---|
1655 | 1893 | |
---|
1656 | | - __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); |
---|
| 1894 | + if (eth_type_vlan(vlan_proto)) { |
---|
| 1895 | + __vlan_hwaccel_put_tag(skb, vlan_proto, vtag); |
---|
| 1896 | + } else { |
---|
| 1897 | + dev_kfree_skb(skb); |
---|
| 1898 | + goto next_rx; |
---|
| 1899 | + } |
---|
1657 | 1900 | } |
---|
1658 | 1901 | |
---|
1659 | 1902 | skb_checksum_none_assert(skb); |
---|
.. | .. |
---|
1665 | 1908 | } else { |
---|
1666 | 1909 | if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) { |
---|
1667 | 1910 | if (dev->features & NETIF_F_RXCSUM) |
---|
1668 | | - cpr->rx_l4_csum_errors++; |
---|
| 1911 | + bnapi->cp_ring.sw_stats.rx.rx_l4_csum_errors++; |
---|
1669 | 1912 | } |
---|
1670 | 1913 | } |
---|
1671 | 1914 | |
---|
.. | .. |
---|
1689 | 1932 | /* In netpoll mode, if we are using a combined completion ring, we need to |
---|
1690 | 1933 | * discard the rx packets and recycle the buffers. |
---|
1691 | 1934 | */ |
---|
1692 | | -static int bnxt_force_rx_discard(struct bnxt *bp, struct bnxt_napi *bnapi, |
---|
| 1935 | +static int bnxt_force_rx_discard(struct bnxt *bp, |
---|
| 1936 | + struct bnxt_cp_ring_info *cpr, |
---|
1693 | 1937 | u32 *raw_cons, u8 *event) |
---|
1694 | 1938 | { |
---|
1695 | | - struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; |
---|
1696 | 1939 | u32 tmp_raw_cons = *raw_cons; |
---|
1697 | 1940 | struct rx_cmp_ext *rxcmp1; |
---|
1698 | 1941 | struct rx_cmp *rxcmp; |
---|
.. | .. |
---|
1711 | 1954 | if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) |
---|
1712 | 1955 | return -EBUSY; |
---|
1713 | 1956 | |
---|
| 1957 | + /* The valid test of the entry must be done first before |
---|
| 1958 | + * reading any further. |
---|
| 1959 | + */ |
---|
| 1960 | + dma_rmb(); |
---|
1714 | 1961 | cmp_type = RX_CMP_TYPE(rxcmp); |
---|
1715 | 1962 | if (cmp_type == CMP_TYPE_RX_L2_CMP) { |
---|
1716 | 1963 | rxcmp1->rx_cmp_cfa_code_errors_v2 |= |
---|
.. | .. |
---|
1722 | 1969 | tpa_end1->rx_tpa_end_cmp_errors_v2 |= |
---|
1723 | 1970 | cpu_to_le32(RX_TPA_END_CMP_ERRORS); |
---|
1724 | 1971 | } |
---|
1725 | | - return bnxt_rx_pkt(bp, bnapi, raw_cons, event); |
---|
| 1972 | + return bnxt_rx_pkt(bp, cpr, raw_cons, event); |
---|
| 1973 | +} |
---|
| 1974 | + |
---|
| 1975 | +u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx) |
---|
| 1976 | +{ |
---|
| 1977 | + struct bnxt_fw_health *fw_health = bp->fw_health; |
---|
| 1978 | + u32 reg = fw_health->regs[reg_idx]; |
---|
| 1979 | + u32 reg_type, reg_off, val = 0; |
---|
| 1980 | + |
---|
| 1981 | + reg_type = BNXT_FW_HEALTH_REG_TYPE(reg); |
---|
| 1982 | + reg_off = BNXT_FW_HEALTH_REG_OFF(reg); |
---|
| 1983 | + switch (reg_type) { |
---|
| 1984 | + case BNXT_FW_HEALTH_REG_TYPE_CFG: |
---|
| 1985 | + pci_read_config_dword(bp->pdev, reg_off, &val); |
---|
| 1986 | + break; |
---|
| 1987 | + case BNXT_FW_HEALTH_REG_TYPE_GRC: |
---|
| 1988 | + reg_off = fw_health->mapped_regs[reg_idx]; |
---|
| 1989 | + fallthrough; |
---|
| 1990 | + case BNXT_FW_HEALTH_REG_TYPE_BAR0: |
---|
| 1991 | + val = readl(bp->bar0 + reg_off); |
---|
| 1992 | + break; |
---|
| 1993 | + case BNXT_FW_HEALTH_REG_TYPE_BAR1: |
---|
| 1994 | + val = readl(bp->bar1 + reg_off); |
---|
| 1995 | + break; |
---|
| 1996 | + } |
---|
| 1997 | + if (reg_idx == BNXT_FW_RESET_INPROG_REG) |
---|
| 1998 | + val &= fw_health->fw_reset_inprog_reg_mask; |
---|
| 1999 | + return val; |
---|
| 2000 | +} |
---|
| 2001 | + |
---|
| 2002 | +static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id) |
---|
| 2003 | +{ |
---|
| 2004 | + int i; |
---|
| 2005 | + |
---|
| 2006 | + for (i = 0; i < bp->rx_nr_rings; i++) { |
---|
| 2007 | + u16 grp_idx = bp->rx_ring[i].bnapi->index; |
---|
| 2008 | + struct bnxt_ring_grp_info *grp_info; |
---|
| 2009 | + |
---|
| 2010 | + grp_info = &bp->grp_info[grp_idx]; |
---|
| 2011 | + if (grp_info->agg_fw_ring_id == ring_id) |
---|
| 2012 | + return grp_idx; |
---|
| 2013 | + } |
---|
| 2014 | + return INVALID_HW_RING_ID; |
---|
1726 | 2015 | } |
---|
1727 | 2016 | |
---|
1728 | 2017 | #define BNXT_GET_EVENT_PORT(data) \ |
---|
1729 | 2018 | ((data) & \ |
---|
1730 | 2019 | ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK) |
---|
1731 | 2020 | |
---|
| 2021 | +#define BNXT_EVENT_RING_TYPE(data2) \ |
---|
| 2022 | + ((data2) & \ |
---|
| 2023 | + ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK) |
---|
| 2024 | + |
---|
| 2025 | +#define BNXT_EVENT_RING_TYPE_RX(data2) \ |
---|
| 2026 | + (BNXT_EVENT_RING_TYPE(data2) == \ |
---|
| 2027 | + ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX) |
---|
| 2028 | + |
---|
1732 | 2029 | static int bnxt_async_event_process(struct bnxt *bp, |
---|
1733 | 2030 | struct hwrm_async_event_cmpl *cmpl) |
---|
1734 | 2031 | { |
---|
1735 | 2032 | u16 event_id = le16_to_cpu(cmpl->event_id); |
---|
| 2033 | + u32 data1 = le32_to_cpu(cmpl->event_data1); |
---|
| 2034 | + u32 data2 = le32_to_cpu(cmpl->event_data2); |
---|
1736 | 2035 | |
---|
1737 | 2036 | /* TODO CHIMP_FW: Define event id's for link change, error etc */ |
---|
1738 | 2037 | switch (event_id) { |
---|
1739 | 2038 | case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: { |
---|
1740 | | - u32 data1 = le32_to_cpu(cmpl->event_data1); |
---|
1741 | 2039 | struct bnxt_link_info *link_info = &bp->link_info; |
---|
1742 | 2040 | |
---|
1743 | 2041 | if (BNXT_VF(bp)) |
---|
.. | .. |
---|
1755 | 2053 | } |
---|
1756 | 2054 | set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event); |
---|
1757 | 2055 | } |
---|
1758 | | - /* fall through */ |
---|
| 2056 | + fallthrough; |
---|
| 2057 | + case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE: |
---|
| 2058 | + case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE: |
---|
| 2059 | + set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event); |
---|
| 2060 | + fallthrough; |
---|
1759 | 2061 | case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE: |
---|
1760 | 2062 | set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event); |
---|
1761 | 2063 | break; |
---|
.. | .. |
---|
1763 | 2065 | set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event); |
---|
1764 | 2066 | break; |
---|
1765 | 2067 | case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: { |
---|
1766 | | - u32 data1 = le32_to_cpu(cmpl->event_data1); |
---|
1767 | 2068 | u16 port_id = BNXT_GET_EVENT_PORT(data1); |
---|
1768 | 2069 | |
---|
1769 | 2070 | if (BNXT_VF(bp)) |
---|
.. | .. |
---|
1780 | 2081 | goto async_event_process_exit; |
---|
1781 | 2082 | set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event); |
---|
1782 | 2083 | break; |
---|
| 2084 | + case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: { |
---|
| 2085 | + char *fatal_str = "non-fatal"; |
---|
| 2086 | + |
---|
| 2087 | + if (!bp->fw_health) |
---|
| 2088 | + goto async_event_process_exit; |
---|
| 2089 | + |
---|
| 2090 | + bp->fw_reset_timestamp = jiffies; |
---|
| 2091 | + bp->fw_reset_min_dsecs = cmpl->timestamp_lo; |
---|
| 2092 | + if (!bp->fw_reset_min_dsecs) |
---|
| 2093 | + bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS; |
---|
| 2094 | + bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi); |
---|
| 2095 | + if (!bp->fw_reset_max_dsecs) |
---|
| 2096 | + bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS; |
---|
| 2097 | + if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) { |
---|
| 2098 | + fatal_str = "fatal"; |
---|
| 2099 | + set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); |
---|
| 2100 | + } |
---|
| 2101 | + netif_warn(bp, hw, bp->dev, |
---|
| 2102 | + "Firmware %s reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n", |
---|
| 2103 | + fatal_str, data1, data2, |
---|
| 2104 | + bp->fw_reset_min_dsecs * 100, |
---|
| 2105 | + bp->fw_reset_max_dsecs * 100); |
---|
| 2106 | + set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event); |
---|
| 2107 | + break; |
---|
| 2108 | + } |
---|
| 2109 | + case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: { |
---|
| 2110 | + struct bnxt_fw_health *fw_health = bp->fw_health; |
---|
| 2111 | + |
---|
| 2112 | + if (!fw_health) |
---|
| 2113 | + goto async_event_process_exit; |
---|
| 2114 | + |
---|
| 2115 | + if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) { |
---|
| 2116 | + fw_health->enabled = false; |
---|
| 2117 | + netif_info(bp, drv, bp->dev, |
---|
| 2118 | + "Error recovery info: error recovery[0]\n"); |
---|
| 2119 | + break; |
---|
| 2120 | + } |
---|
| 2121 | + fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1); |
---|
| 2122 | + fw_health->tmr_multiplier = |
---|
| 2123 | + DIV_ROUND_UP(fw_health->polling_dsecs * HZ, |
---|
| 2124 | + bp->current_interval * 10); |
---|
| 2125 | + fw_health->tmr_counter = fw_health->tmr_multiplier; |
---|
| 2126 | + if (!fw_health->enabled) |
---|
| 2127 | + fw_health->last_fw_heartbeat = |
---|
| 2128 | + bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); |
---|
| 2129 | + fw_health->last_fw_reset_cnt = |
---|
| 2130 | + bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); |
---|
| 2131 | + netif_info(bp, drv, bp->dev, |
---|
| 2132 | + "Error recovery info: error recovery[1], master[%d], reset count[%u], health status: 0x%x\n", |
---|
| 2133 | + fw_health->master, fw_health->last_fw_reset_cnt, |
---|
| 2134 | + bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG)); |
---|
| 2135 | + if (!fw_health->enabled) { |
---|
| 2136 | + /* Make sure tmr_counter is set and visible to |
---|
| 2137 | + * bnxt_health_check() before setting enabled to true. |
---|
| 2138 | + */ |
---|
| 2139 | + smp_wmb(); |
---|
| 2140 | + fw_health->enabled = true; |
---|
| 2141 | + } |
---|
| 2142 | + goto async_event_process_exit; |
---|
| 2143 | + } |
---|
| 2144 | + case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION: |
---|
| 2145 | + netif_notice(bp, hw, bp->dev, |
---|
| 2146 | + "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n", |
---|
| 2147 | + data1, data2); |
---|
| 2148 | + goto async_event_process_exit; |
---|
| 2149 | + case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: { |
---|
| 2150 | + struct bnxt_rx_ring_info *rxr; |
---|
| 2151 | + u16 grp_idx; |
---|
| 2152 | + |
---|
| 2153 | + if (bp->flags & BNXT_FLAG_CHIP_P5) |
---|
| 2154 | + goto async_event_process_exit; |
---|
| 2155 | + |
---|
| 2156 | + netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n", |
---|
| 2157 | + BNXT_EVENT_RING_TYPE(data2), data1); |
---|
| 2158 | + if (!BNXT_EVENT_RING_TYPE_RX(data2)) |
---|
| 2159 | + goto async_event_process_exit; |
---|
| 2160 | + |
---|
| 2161 | + grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1); |
---|
| 2162 | + if (grp_idx == INVALID_HW_RING_ID) { |
---|
| 2163 | + netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n", |
---|
| 2164 | + data1); |
---|
| 2165 | + goto async_event_process_exit; |
---|
| 2166 | + } |
---|
| 2167 | + rxr = bp->bnapi[grp_idx]->rx_ring; |
---|
| 2168 | + bnxt_sched_reset(bp, rxr); |
---|
| 2169 | + goto async_event_process_exit; |
---|
| 2170 | + } |
---|
1783 | 2171 | default: |
---|
1784 | 2172 | goto async_event_process_exit; |
---|
1785 | 2173 | } |
---|
.. | .. |
---|
1800 | 2188 | case CMPL_BASE_TYPE_HWRM_DONE: |
---|
1801 | 2189 | seq_id = le16_to_cpu(h_cmpl->sequence_id); |
---|
1802 | 2190 | if (seq_id == bp->hwrm_intr_seq_id) |
---|
1803 | | - bp->hwrm_intr_seq_id = HWRM_SEQ_ID_INVALID; |
---|
| 2191 | + bp->hwrm_intr_seq_id = (u16)~bp->hwrm_intr_seq_id; |
---|
1804 | 2192 | else |
---|
1805 | 2193 | netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id); |
---|
1806 | 2194 | break; |
---|
.. | .. |
---|
1873 | 2261 | } |
---|
1874 | 2262 | |
---|
1875 | 2263 | /* disable ring IRQ */ |
---|
1876 | | - BNXT_CP_DB_IRQ_DIS(cpr->cp_doorbell); |
---|
| 2264 | + BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell); |
---|
1877 | 2265 | |
---|
1878 | 2266 | /* Return here if interrupt is shared and is disabled. */ |
---|
1879 | 2267 | if (unlikely(atomic_read(&bp->intr_sem) != 0)) |
---|
.. | .. |
---|
1883 | 2271 | return IRQ_HANDLED; |
---|
1884 | 2272 | } |
---|
1885 | 2273 | |
---|
1886 | | -static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) |
---|
| 2274 | +static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, |
---|
| 2275 | + int budget) |
---|
1887 | 2276 | { |
---|
1888 | | - struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; |
---|
| 2277 | + struct bnxt_napi *bnapi = cpr->bnapi; |
---|
1889 | 2278 | u32 raw_cons = cpr->cp_raw_cons; |
---|
1890 | 2279 | u32 cons; |
---|
1891 | 2280 | int tx_pkts = 0; |
---|
.. | .. |
---|
1893 | 2282 | u8 event = 0; |
---|
1894 | 2283 | struct tx_cmp *txcmp; |
---|
1895 | 2284 | |
---|
| 2285 | + cpr->has_more_work = 0; |
---|
| 2286 | + cpr->had_work_done = 1; |
---|
1896 | 2287 | while (1) { |
---|
1897 | 2288 | int rc; |
---|
1898 | 2289 | |
---|
.. | .. |
---|
1912 | 2303 | if (unlikely(tx_pkts >= bp->tx_wake_thresh)) { |
---|
1913 | 2304 | rx_pkts = budget; |
---|
1914 | 2305 | raw_cons = NEXT_RAW_CMP(raw_cons); |
---|
| 2306 | + if (budget) |
---|
| 2307 | + cpr->has_more_work = 1; |
---|
1915 | 2308 | break; |
---|
1916 | 2309 | } |
---|
1917 | 2310 | } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { |
---|
1918 | 2311 | if (likely(budget)) |
---|
1919 | | - rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event); |
---|
| 2312 | + rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event); |
---|
1920 | 2313 | else |
---|
1921 | | - rc = bnxt_force_rx_discard(bp, bnapi, &raw_cons, |
---|
| 2314 | + rc = bnxt_force_rx_discard(bp, cpr, &raw_cons, |
---|
1922 | 2315 | &event); |
---|
1923 | 2316 | if (likely(rc >= 0)) |
---|
1924 | 2317 | rx_pkts += rc; |
---|
.. | .. |
---|
1941 | 2334 | } |
---|
1942 | 2335 | raw_cons = NEXT_RAW_CMP(raw_cons); |
---|
1943 | 2336 | |
---|
1944 | | - if (rx_pkts && rx_pkts == budget) |
---|
| 2337 | + if (rx_pkts && rx_pkts == budget) { |
---|
| 2338 | + cpr->has_more_work = 1; |
---|
1945 | 2339 | break; |
---|
| 2340 | + } |
---|
1946 | 2341 | } |
---|
| 2342 | + |
---|
| 2343 | + if (event & BNXT_REDIRECT_EVENT) |
---|
| 2344 | + xdp_do_flush_map(); |
---|
1947 | 2345 | |
---|
1948 | 2346 | if (event & BNXT_TX_EVENT) { |
---|
1949 | 2347 | struct bnxt_tx_ring_info *txr = bnapi->tx_ring; |
---|
1950 | | - void __iomem *db = txr->tx_doorbell; |
---|
1951 | 2348 | u16 prod = txr->tx_prod; |
---|
1952 | 2349 | |
---|
1953 | 2350 | /* Sync BD data before updating doorbell */ |
---|
1954 | 2351 | wmb(); |
---|
1955 | 2352 | |
---|
1956 | | - bnxt_db_write_relaxed(bp, db, DB_KEY_TX | prod); |
---|
| 2353 | + bnxt_db_write_relaxed(bp, &txr->tx_db, prod); |
---|
1957 | 2354 | } |
---|
1958 | 2355 | |
---|
1959 | 2356 | cpr->cp_raw_cons = raw_cons; |
---|
| 2357 | + bnapi->tx_pkts += tx_pkts; |
---|
| 2358 | + bnapi->events |= event; |
---|
| 2359 | + return rx_pkts; |
---|
| 2360 | +} |
---|
| 2361 | + |
---|
| 2362 | +static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi) |
---|
| 2363 | +{ |
---|
| 2364 | + if (bnapi->tx_pkts) { |
---|
| 2365 | + bnapi->tx_int(bp, bnapi, bnapi->tx_pkts); |
---|
| 2366 | + bnapi->tx_pkts = 0; |
---|
| 2367 | + } |
---|
| 2368 | + |
---|
| 2369 | + if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) { |
---|
| 2370 | + struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; |
---|
| 2371 | + |
---|
| 2372 | + if (bnapi->events & BNXT_AGG_EVENT) |
---|
| 2373 | + bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); |
---|
| 2374 | + bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); |
---|
| 2375 | + } |
---|
| 2376 | + bnapi->events = 0; |
---|
| 2377 | +} |
---|
| 2378 | + |
---|
| 2379 | +static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, |
---|
| 2380 | + int budget) |
---|
| 2381 | +{ |
---|
| 2382 | + struct bnxt_napi *bnapi = cpr->bnapi; |
---|
| 2383 | + int rx_pkts; |
---|
| 2384 | + |
---|
| 2385 | + rx_pkts = __bnxt_poll_work(bp, cpr, budget); |
---|
| 2386 | + |
---|
1960 | 2387 | /* ACK completion ring before freeing tx ring and producing new |
---|
1961 | 2388 | * buffers in rx/agg rings to prevent overflowing the completion |
---|
1962 | 2389 | * ring. |
---|
1963 | 2390 | */ |
---|
1964 | | - BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); |
---|
| 2391 | + bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons); |
---|
1965 | 2392 | |
---|
1966 | | - if (tx_pkts) |
---|
1967 | | - bnapi->tx_int(bp, bnapi, tx_pkts); |
---|
1968 | | - |
---|
1969 | | - if (event & BNXT_RX_EVENT) { |
---|
1970 | | - struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; |
---|
1971 | | - |
---|
1972 | | - bnxt_db_write(bp, rxr->rx_doorbell, DB_KEY_RX | rxr->rx_prod); |
---|
1973 | | - if (event & BNXT_AGG_EVENT) |
---|
1974 | | - bnxt_db_write(bp, rxr->rx_agg_doorbell, |
---|
1975 | | - DB_KEY_RX | rxr->rx_agg_prod); |
---|
1976 | | - } |
---|
| 2393 | + __bnxt_poll_work_done(bp, bnapi); |
---|
1977 | 2394 | return rx_pkts; |
---|
1978 | 2395 | } |
---|
1979 | 2396 | |
---|
.. | .. |
---|
1987 | 2404 | struct rx_cmp_ext *rxcmp1; |
---|
1988 | 2405 | u32 cp_cons, tmp_raw_cons; |
---|
1989 | 2406 | u32 raw_cons = cpr->cp_raw_cons; |
---|
| 2407 | + bool flush_xdp = false; |
---|
1990 | 2408 | u32 rx_pkts = 0; |
---|
1991 | 2409 | u8 event = 0; |
---|
1992 | 2410 | |
---|
.. | .. |
---|
1999 | 2417 | if (!TX_CMP_VALID(txcmp, raw_cons)) |
---|
2000 | 2418 | break; |
---|
2001 | 2419 | |
---|
| 2420 | + /* The valid test of the entry must be done first before |
---|
| 2421 | + * reading any further. |
---|
| 2422 | + */ |
---|
| 2423 | + dma_rmb(); |
---|
2002 | 2424 | if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { |
---|
2003 | 2425 | tmp_raw_cons = NEXT_RAW_CMP(raw_cons); |
---|
2004 | 2426 | cp_cons = RING_CMP(tmp_raw_cons); |
---|
.. | .. |
---|
2012 | 2434 | rxcmp1->rx_cmp_cfa_code_errors_v2 |= |
---|
2013 | 2435 | cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR); |
---|
2014 | 2436 | |
---|
2015 | | - rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event); |
---|
| 2437 | + rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event); |
---|
2016 | 2438 | if (likely(rc == -EIO) && budget) |
---|
2017 | 2439 | rx_pkts++; |
---|
2018 | 2440 | else if (rc == -EBUSY) /* partial completion */ |
---|
2019 | 2441 | break; |
---|
| 2442 | + if (event & BNXT_REDIRECT_EVENT) |
---|
| 2443 | + flush_xdp = true; |
---|
2020 | 2444 | } else if (unlikely(TX_CMP_TYPE(txcmp) == |
---|
2021 | 2445 | CMPL_BASE_TYPE_HWRM_DONE)) { |
---|
2022 | 2446 | bnxt_hwrm_handler(bp, txcmp); |
---|
.. | .. |
---|
2031 | 2455 | } |
---|
2032 | 2456 | |
---|
2033 | 2457 | cpr->cp_raw_cons = raw_cons; |
---|
2034 | | - BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); |
---|
2035 | | - bnxt_db_write(bp, rxr->rx_doorbell, DB_KEY_RX | rxr->rx_prod); |
---|
| 2458 | + BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons); |
---|
| 2459 | + bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); |
---|
2036 | 2460 | |
---|
2037 | 2461 | if (event & BNXT_AGG_EVENT) |
---|
2038 | | - bnxt_db_write(bp, rxr->rx_agg_doorbell, |
---|
2039 | | - DB_KEY_RX | rxr->rx_agg_prod); |
---|
| 2462 | + bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); |
---|
| 2463 | + if (flush_xdp) |
---|
| 2464 | + xdp_do_flush(); |
---|
2040 | 2465 | |
---|
2041 | 2466 | if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) { |
---|
2042 | 2467 | napi_complete_done(napi, rx_pkts); |
---|
2043 | | - BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons); |
---|
| 2468 | + BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); |
---|
2044 | 2469 | } |
---|
2045 | 2470 | return rx_pkts; |
---|
2046 | 2471 | } |
---|
.. | .. |
---|
2053 | 2478 | int work_done = 0; |
---|
2054 | 2479 | |
---|
2055 | 2480 | while (1) { |
---|
2056 | | - work_done += bnxt_poll_work(bp, bnapi, budget - work_done); |
---|
| 2481 | + work_done += bnxt_poll_work(bp, cpr, budget - work_done); |
---|
2057 | 2482 | |
---|
2058 | 2483 | if (work_done >= budget) { |
---|
2059 | 2484 | if (!budget) |
---|
2060 | | - BNXT_CP_DB_REARM(cpr->cp_doorbell, |
---|
2061 | | - cpr->cp_raw_cons); |
---|
| 2485 | + BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); |
---|
2062 | 2486 | break; |
---|
2063 | 2487 | } |
---|
2064 | 2488 | |
---|
2065 | 2489 | if (!bnxt_has_work(bp, cpr)) { |
---|
2066 | 2490 | if (napi_complete_done(napi, work_done)) |
---|
2067 | | - BNXT_CP_DB_REARM(cpr->cp_doorbell, |
---|
2068 | | - cpr->cp_raw_cons); |
---|
| 2491 | + BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); |
---|
2069 | 2492 | break; |
---|
2070 | 2493 | } |
---|
2071 | 2494 | } |
---|
2072 | 2495 | if (bp->flags & BNXT_FLAG_DIM) { |
---|
2073 | | - struct net_dim_sample dim_sample; |
---|
| 2496 | + struct dim_sample dim_sample = {}; |
---|
2074 | 2497 | |
---|
2075 | | - net_dim_sample(cpr->event_ctr, |
---|
2076 | | - cpr->rx_packets, |
---|
2077 | | - cpr->rx_bytes, |
---|
2078 | | - &dim_sample); |
---|
| 2498 | + dim_update_sample(cpr->event_ctr, |
---|
| 2499 | + cpr->rx_packets, |
---|
| 2500 | + cpr->rx_bytes, |
---|
| 2501 | + &dim_sample); |
---|
2079 | 2502 | net_dim(&cpr->dim, dim_sample); |
---|
2080 | 2503 | } |
---|
2081 | | - mmiowb(); |
---|
| 2504 | + return work_done; |
---|
| 2505 | +} |
---|
| 2506 | + |
---|
| 2507 | +static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) |
---|
| 2508 | +{ |
---|
| 2509 | + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; |
---|
| 2510 | + int i, work_done = 0; |
---|
| 2511 | + |
---|
| 2512 | + for (i = 0; i < 2; i++) { |
---|
| 2513 | + struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i]; |
---|
| 2514 | + |
---|
| 2515 | + if (cpr2) { |
---|
| 2516 | + work_done += __bnxt_poll_work(bp, cpr2, |
---|
| 2517 | + budget - work_done); |
---|
| 2518 | + cpr->has_more_work |= cpr2->has_more_work; |
---|
| 2519 | + } |
---|
| 2520 | + } |
---|
| 2521 | + return work_done; |
---|
| 2522 | +} |
---|
| 2523 | + |
---|
| 2524 | +static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi, |
---|
| 2525 | + u64 dbr_type) |
---|
| 2526 | +{ |
---|
| 2527 | + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; |
---|
| 2528 | + int i; |
---|
| 2529 | + |
---|
| 2530 | + for (i = 0; i < 2; i++) { |
---|
| 2531 | + struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i]; |
---|
| 2532 | + struct bnxt_db_info *db; |
---|
| 2533 | + |
---|
| 2534 | + if (cpr2 && cpr2->had_work_done) { |
---|
| 2535 | + db = &cpr2->cp_db; |
---|
| 2536 | + writeq(db->db_key64 | dbr_type | |
---|
| 2537 | + RING_CMP(cpr2->cp_raw_cons), db->doorbell); |
---|
| 2538 | + cpr2->had_work_done = 0; |
---|
| 2539 | + } |
---|
| 2540 | + } |
---|
| 2541 | + __bnxt_poll_work_done(bp, bnapi); |
---|
| 2542 | +} |
---|
| 2543 | + |
---|
| 2544 | +static int bnxt_poll_p5(struct napi_struct *napi, int budget) |
---|
| 2545 | +{ |
---|
| 2546 | + struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); |
---|
| 2547 | + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; |
---|
| 2548 | + u32 raw_cons = cpr->cp_raw_cons; |
---|
| 2549 | + struct bnxt *bp = bnapi->bp; |
---|
| 2550 | + struct nqe_cn *nqcmp; |
---|
| 2551 | + int work_done = 0; |
---|
| 2552 | + u32 cons; |
---|
| 2553 | + |
---|
| 2554 | + if (cpr->has_more_work) { |
---|
| 2555 | + cpr->has_more_work = 0; |
---|
| 2556 | + work_done = __bnxt_poll_cqs(bp, bnapi, budget); |
---|
| 2557 | + } |
---|
| 2558 | + while (1) { |
---|
| 2559 | + cons = RING_CMP(raw_cons); |
---|
| 2560 | + nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)]; |
---|
| 2561 | + |
---|
| 2562 | + if (!NQ_CMP_VALID(nqcmp, raw_cons)) { |
---|
| 2563 | + if (cpr->has_more_work) |
---|
| 2564 | + break; |
---|
| 2565 | + |
---|
| 2566 | + __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL); |
---|
| 2567 | + cpr->cp_raw_cons = raw_cons; |
---|
| 2568 | + if (napi_complete_done(napi, work_done)) |
---|
| 2569 | + BNXT_DB_NQ_ARM_P5(&cpr->cp_db, |
---|
| 2570 | + cpr->cp_raw_cons); |
---|
| 2571 | + return work_done; |
---|
| 2572 | + } |
---|
| 2573 | + |
---|
| 2574 | + /* The valid test of the entry must be done first before |
---|
| 2575 | + * reading any further. |
---|
| 2576 | + */ |
---|
| 2577 | + dma_rmb(); |
---|
| 2578 | + |
---|
| 2579 | + if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) { |
---|
| 2580 | + u32 idx = le32_to_cpu(nqcmp->cq_handle_low); |
---|
| 2581 | + struct bnxt_cp_ring_info *cpr2; |
---|
| 2582 | + |
---|
| 2583 | + /* No more budget for RX work */ |
---|
| 2584 | + if (budget && work_done >= budget && idx == BNXT_RX_HDL) |
---|
| 2585 | + break; |
---|
| 2586 | + |
---|
| 2587 | + cpr2 = cpr->cp_ring_arr[idx]; |
---|
| 2588 | + work_done += __bnxt_poll_work(bp, cpr2, |
---|
| 2589 | + budget - work_done); |
---|
| 2590 | + cpr->has_more_work |= cpr2->has_more_work; |
---|
| 2591 | + } else { |
---|
| 2592 | + bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp); |
---|
| 2593 | + } |
---|
| 2594 | + raw_cons = NEXT_RAW_CMP(raw_cons); |
---|
| 2595 | + } |
---|
| 2596 | + __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ); |
---|
| 2597 | + if (raw_cons != cpr->cp_raw_cons) { |
---|
| 2598 | + cpr->cp_raw_cons = raw_cons; |
---|
| 2599 | + BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons); |
---|
| 2600 | + } |
---|
2082 | 2601 | return work_done; |
---|
2083 | 2602 | } |
---|
2084 | 2603 | |
---|
.. | .. |
---|
2095 | 2614 | struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; |
---|
2096 | 2615 | int j; |
---|
2097 | 2616 | |
---|
| 2617 | + if (!txr->tx_buf_ring) |
---|
| 2618 | + continue; |
---|
| 2619 | + |
---|
2098 | 2620 | for (j = 0; j < max_idx;) { |
---|
2099 | 2621 | struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j]; |
---|
2100 | | - struct sk_buff *skb = tx_buf->skb; |
---|
| 2622 | + struct sk_buff *skb; |
---|
2101 | 2623 | int k, last; |
---|
2102 | 2624 | |
---|
| 2625 | + if (i < bp->tx_nr_rings_xdp && |
---|
| 2626 | + tx_buf->action == XDP_REDIRECT) { |
---|
| 2627 | + dma_unmap_single(&pdev->dev, |
---|
| 2628 | + dma_unmap_addr(tx_buf, mapping), |
---|
| 2629 | + dma_unmap_len(tx_buf, len), |
---|
| 2630 | + PCI_DMA_TODEVICE); |
---|
| 2631 | + xdp_return_frame(tx_buf->xdpf); |
---|
| 2632 | + tx_buf->action = 0; |
---|
| 2633 | + tx_buf->xdpf = NULL; |
---|
| 2634 | + j++; |
---|
| 2635 | + continue; |
---|
| 2636 | + } |
---|
| 2637 | + |
---|
| 2638 | + skb = tx_buf->skb; |
---|
2103 | 2639 | if (!skb) { |
---|
2104 | 2640 | j++; |
---|
2105 | 2641 | continue; |
---|
.. | .. |
---|
2136 | 2672 | } |
---|
2137 | 2673 | } |
---|
2138 | 2674 | |
---|
| 2675 | +static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr) |
---|
| 2676 | +{ |
---|
| 2677 | + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; |
---|
| 2678 | + struct pci_dev *pdev = bp->pdev; |
---|
| 2679 | + struct bnxt_tpa_idx_map *map; |
---|
| 2680 | + int i, max_idx, max_agg_idx; |
---|
| 2681 | + |
---|
| 2682 | + max_idx = bp->rx_nr_pages * RX_DESC_CNT; |
---|
| 2683 | + max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT; |
---|
| 2684 | + if (!rxr->rx_tpa) |
---|
| 2685 | + goto skip_rx_tpa_free; |
---|
| 2686 | + |
---|
| 2687 | + for (i = 0; i < bp->max_tpa; i++) { |
---|
| 2688 | + struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i]; |
---|
| 2689 | + u8 *data = tpa_info->data; |
---|
| 2690 | + |
---|
| 2691 | + if (!data) |
---|
| 2692 | + continue; |
---|
| 2693 | + |
---|
| 2694 | + dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping, |
---|
| 2695 | + bp->rx_buf_use_size, bp->rx_dir, |
---|
| 2696 | + DMA_ATTR_WEAK_ORDERING); |
---|
| 2697 | + |
---|
| 2698 | + tpa_info->data = NULL; |
---|
| 2699 | + |
---|
| 2700 | + kfree(data); |
---|
| 2701 | + } |
---|
| 2702 | + |
---|
| 2703 | +skip_rx_tpa_free: |
---|
| 2704 | + if (!rxr->rx_buf_ring) |
---|
| 2705 | + goto skip_rx_buf_free; |
---|
| 2706 | + |
---|
| 2707 | + for (i = 0; i < max_idx; i++) { |
---|
| 2708 | + struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i]; |
---|
| 2709 | + dma_addr_t mapping = rx_buf->mapping; |
---|
| 2710 | + void *data = rx_buf->data; |
---|
| 2711 | + |
---|
| 2712 | + if (!data) |
---|
| 2713 | + continue; |
---|
| 2714 | + |
---|
| 2715 | + rx_buf->data = NULL; |
---|
| 2716 | + if (BNXT_RX_PAGE_MODE(bp)) { |
---|
| 2717 | + mapping -= bp->rx_dma_offset; |
---|
| 2718 | + dma_unmap_page_attrs(&pdev->dev, mapping, PAGE_SIZE, |
---|
| 2719 | + bp->rx_dir, |
---|
| 2720 | + DMA_ATTR_WEAK_ORDERING); |
---|
| 2721 | + page_pool_recycle_direct(rxr->page_pool, data); |
---|
| 2722 | + } else { |
---|
| 2723 | + dma_unmap_single_attrs(&pdev->dev, mapping, |
---|
| 2724 | + bp->rx_buf_use_size, bp->rx_dir, |
---|
| 2725 | + DMA_ATTR_WEAK_ORDERING); |
---|
| 2726 | + kfree(data); |
---|
| 2727 | + } |
---|
| 2728 | + } |
---|
| 2729 | + |
---|
| 2730 | +skip_rx_buf_free: |
---|
| 2731 | + if (!rxr->rx_agg_ring) |
---|
| 2732 | + goto skip_rx_agg_free; |
---|
| 2733 | + |
---|
| 2734 | + for (i = 0; i < max_agg_idx; i++) { |
---|
| 2735 | + struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i]; |
---|
| 2736 | + struct page *page = rx_agg_buf->page; |
---|
| 2737 | + |
---|
| 2738 | + if (!page) |
---|
| 2739 | + continue; |
---|
| 2740 | + |
---|
| 2741 | + dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping, |
---|
| 2742 | + BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE, |
---|
| 2743 | + DMA_ATTR_WEAK_ORDERING); |
---|
| 2744 | + |
---|
| 2745 | + rx_agg_buf->page = NULL; |
---|
| 2746 | + __clear_bit(i, rxr->rx_agg_bmap); |
---|
| 2747 | + |
---|
| 2748 | + __free_page(page); |
---|
| 2749 | + } |
---|
| 2750 | + |
---|
| 2751 | +skip_rx_agg_free: |
---|
| 2752 | + if (rxr->rx_page) { |
---|
| 2753 | + __free_page(rxr->rx_page); |
---|
| 2754 | + rxr->rx_page = NULL; |
---|
| 2755 | + } |
---|
| 2756 | + map = rxr->rx_tpa_idx_map; |
---|
| 2757 | + if (map) |
---|
| 2758 | + memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap)); |
---|
| 2759 | +} |
---|
| 2760 | + |
---|
2139 | 2761 | static void bnxt_free_rx_skbs(struct bnxt *bp) |
---|
2140 | 2762 | { |
---|
2141 | | - int i, max_idx, max_agg_idx; |
---|
2142 | | - struct pci_dev *pdev = bp->pdev; |
---|
| 2763 | + int i; |
---|
2143 | 2764 | |
---|
2144 | 2765 | if (!bp->rx_ring) |
---|
2145 | 2766 | return; |
---|
2146 | 2767 | |
---|
2147 | | - max_idx = bp->rx_nr_pages * RX_DESC_CNT; |
---|
2148 | | - max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT; |
---|
2149 | | - for (i = 0; i < bp->rx_nr_rings; i++) { |
---|
2150 | | - struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; |
---|
2151 | | - int j; |
---|
2152 | | - |
---|
2153 | | - if (rxr->rx_tpa) { |
---|
2154 | | - for (j = 0; j < MAX_TPA; j++) { |
---|
2155 | | - struct bnxt_tpa_info *tpa_info = |
---|
2156 | | - &rxr->rx_tpa[j]; |
---|
2157 | | - u8 *data = tpa_info->data; |
---|
2158 | | - |
---|
2159 | | - if (!data) |
---|
2160 | | - continue; |
---|
2161 | | - |
---|
2162 | | - dma_unmap_single_attrs(&pdev->dev, |
---|
2163 | | - tpa_info->mapping, |
---|
2164 | | - bp->rx_buf_use_size, |
---|
2165 | | - bp->rx_dir, |
---|
2166 | | - DMA_ATTR_WEAK_ORDERING); |
---|
2167 | | - |
---|
2168 | | - tpa_info->data = NULL; |
---|
2169 | | - |
---|
2170 | | - kfree(data); |
---|
2171 | | - } |
---|
2172 | | - } |
---|
2173 | | - |
---|
2174 | | - for (j = 0; j < max_idx; j++) { |
---|
2175 | | - struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j]; |
---|
2176 | | - dma_addr_t mapping = rx_buf->mapping; |
---|
2177 | | - void *data = rx_buf->data; |
---|
2178 | | - |
---|
2179 | | - if (!data) |
---|
2180 | | - continue; |
---|
2181 | | - |
---|
2182 | | - rx_buf->data = NULL; |
---|
2183 | | - |
---|
2184 | | - if (BNXT_RX_PAGE_MODE(bp)) { |
---|
2185 | | - mapping -= bp->rx_dma_offset; |
---|
2186 | | - dma_unmap_page_attrs(&pdev->dev, mapping, |
---|
2187 | | - PAGE_SIZE, bp->rx_dir, |
---|
2188 | | - DMA_ATTR_WEAK_ORDERING); |
---|
2189 | | - __free_page(data); |
---|
2190 | | - } else { |
---|
2191 | | - dma_unmap_single_attrs(&pdev->dev, mapping, |
---|
2192 | | - bp->rx_buf_use_size, |
---|
2193 | | - bp->rx_dir, |
---|
2194 | | - DMA_ATTR_WEAK_ORDERING); |
---|
2195 | | - kfree(data); |
---|
2196 | | - } |
---|
2197 | | - } |
---|
2198 | | - |
---|
2199 | | - for (j = 0; j < max_agg_idx; j++) { |
---|
2200 | | - struct bnxt_sw_rx_agg_bd *rx_agg_buf = |
---|
2201 | | - &rxr->rx_agg_ring[j]; |
---|
2202 | | - struct page *page = rx_agg_buf->page; |
---|
2203 | | - |
---|
2204 | | - if (!page) |
---|
2205 | | - continue; |
---|
2206 | | - |
---|
2207 | | - dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping, |
---|
2208 | | - BNXT_RX_PAGE_SIZE, |
---|
2209 | | - PCI_DMA_FROMDEVICE, |
---|
2210 | | - DMA_ATTR_WEAK_ORDERING); |
---|
2211 | | - |
---|
2212 | | - rx_agg_buf->page = NULL; |
---|
2213 | | - __clear_bit(j, rxr->rx_agg_bmap); |
---|
2214 | | - |
---|
2215 | | - __free_page(page); |
---|
2216 | | - } |
---|
2217 | | - if (rxr->rx_page) { |
---|
2218 | | - __free_page(rxr->rx_page); |
---|
2219 | | - rxr->rx_page = NULL; |
---|
2220 | | - } |
---|
2221 | | - } |
---|
| 2768 | + for (i = 0; i < bp->rx_nr_rings; i++) |
---|
| 2769 | + bnxt_free_one_rx_ring_skbs(bp, i); |
---|
2222 | 2770 | } |
---|
2223 | 2771 | |
---|
2224 | 2772 | static void bnxt_free_skbs(struct bnxt *bp) |
---|
.. | .. |
---|
2227 | 2775 | bnxt_free_rx_skbs(bp); |
---|
2228 | 2776 | } |
---|
2229 | 2777 | |
---|
2230 | | -static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_struct *ring) |
---|
| 2778 | +static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) |
---|
2231 | 2779 | { |
---|
2232 | 2780 | struct pci_dev *pdev = bp->pdev; |
---|
2233 | 2781 | int i; |
---|
2234 | 2782 | |
---|
2235 | | - for (i = 0; i < ring->nr_pages; i++) { |
---|
2236 | | - if (!ring->pg_arr[i]) |
---|
| 2783 | + for (i = 0; i < rmem->nr_pages; i++) { |
---|
| 2784 | + if (!rmem->pg_arr[i]) |
---|
2237 | 2785 | continue; |
---|
2238 | 2786 | |
---|
2239 | | - dma_free_coherent(&pdev->dev, ring->page_size, |
---|
2240 | | - ring->pg_arr[i], ring->dma_arr[i]); |
---|
| 2787 | + dma_free_coherent(&pdev->dev, rmem->page_size, |
---|
| 2788 | + rmem->pg_arr[i], rmem->dma_arr[i]); |
---|
2241 | 2789 | |
---|
2242 | | - ring->pg_arr[i] = NULL; |
---|
| 2790 | + rmem->pg_arr[i] = NULL; |
---|
2243 | 2791 | } |
---|
2244 | | - if (ring->pg_tbl) { |
---|
2245 | | - dma_free_coherent(&pdev->dev, ring->nr_pages * 8, |
---|
2246 | | - ring->pg_tbl, ring->pg_tbl_map); |
---|
2247 | | - ring->pg_tbl = NULL; |
---|
| 2792 | + if (rmem->pg_tbl) { |
---|
| 2793 | + size_t pg_tbl_size = rmem->nr_pages * 8; |
---|
| 2794 | + |
---|
| 2795 | + if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG) |
---|
| 2796 | + pg_tbl_size = rmem->page_size; |
---|
| 2797 | + dma_free_coherent(&pdev->dev, pg_tbl_size, |
---|
| 2798 | + rmem->pg_tbl, rmem->pg_tbl_map); |
---|
| 2799 | + rmem->pg_tbl = NULL; |
---|
2248 | 2800 | } |
---|
2249 | | - if (ring->vmem_size && *ring->vmem) { |
---|
2250 | | - vfree(*ring->vmem); |
---|
2251 | | - *ring->vmem = NULL; |
---|
| 2801 | + if (rmem->vmem_size && *rmem->vmem) { |
---|
| 2802 | + vfree(*rmem->vmem); |
---|
| 2803 | + *rmem->vmem = NULL; |
---|
2252 | 2804 | } |
---|
2253 | 2805 | } |
---|
2254 | 2806 | |
---|
2255 | | -static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_struct *ring) |
---|
| 2807 | +static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) |
---|
2256 | 2808 | { |
---|
2257 | | - int i; |
---|
2258 | 2809 | struct pci_dev *pdev = bp->pdev; |
---|
| 2810 | + u64 valid_bit = 0; |
---|
| 2811 | + int i; |
---|
2259 | 2812 | |
---|
2260 | | - if (ring->nr_pages > 1) { |
---|
2261 | | - ring->pg_tbl = dma_alloc_coherent(&pdev->dev, |
---|
2262 | | - ring->nr_pages * 8, |
---|
2263 | | - &ring->pg_tbl_map, |
---|
| 2813 | + if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG)) |
---|
| 2814 | + valid_bit = PTU_PTE_VALID; |
---|
| 2815 | + if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) { |
---|
| 2816 | + size_t pg_tbl_size = rmem->nr_pages * 8; |
---|
| 2817 | + |
---|
| 2818 | + if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG) |
---|
| 2819 | + pg_tbl_size = rmem->page_size; |
---|
| 2820 | + rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size, |
---|
| 2821 | + &rmem->pg_tbl_map, |
---|
2264 | 2822 | GFP_KERNEL); |
---|
2265 | | - if (!ring->pg_tbl) |
---|
| 2823 | + if (!rmem->pg_tbl) |
---|
2266 | 2824 | return -ENOMEM; |
---|
2267 | 2825 | } |
---|
2268 | 2826 | |
---|
2269 | | - for (i = 0; i < ring->nr_pages; i++) { |
---|
2270 | | - ring->pg_arr[i] = dma_alloc_coherent(&pdev->dev, |
---|
2271 | | - ring->page_size, |
---|
2272 | | - &ring->dma_arr[i], |
---|
| 2827 | + for (i = 0; i < rmem->nr_pages; i++) { |
---|
| 2828 | + u64 extra_bits = valid_bit; |
---|
| 2829 | + |
---|
| 2830 | + rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev, |
---|
| 2831 | + rmem->page_size, |
---|
| 2832 | + &rmem->dma_arr[i], |
---|
2273 | 2833 | GFP_KERNEL); |
---|
2274 | | - if (!ring->pg_arr[i]) |
---|
| 2834 | + if (!rmem->pg_arr[i]) |
---|
2275 | 2835 | return -ENOMEM; |
---|
2276 | 2836 | |
---|
2277 | | - if (ring->nr_pages > 1) |
---|
2278 | | - ring->pg_tbl[i] = cpu_to_le64(ring->dma_arr[i]); |
---|
| 2837 | + if (rmem->init_val) |
---|
| 2838 | + memset(rmem->pg_arr[i], rmem->init_val, |
---|
| 2839 | + rmem->page_size); |
---|
| 2840 | + if (rmem->nr_pages > 1 || rmem->depth > 0) { |
---|
| 2841 | + if (i == rmem->nr_pages - 2 && |
---|
| 2842 | + (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) |
---|
| 2843 | + extra_bits |= PTU_PTE_NEXT_TO_LAST; |
---|
| 2844 | + else if (i == rmem->nr_pages - 1 && |
---|
| 2845 | + (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) |
---|
| 2846 | + extra_bits |= PTU_PTE_LAST; |
---|
| 2847 | + rmem->pg_tbl[i] = |
---|
| 2848 | + cpu_to_le64(rmem->dma_arr[i] | extra_bits); |
---|
| 2849 | + } |
---|
2279 | 2850 | } |
---|
2280 | 2851 | |
---|
2281 | | - if (ring->vmem_size) { |
---|
2282 | | - *ring->vmem = vzalloc(ring->vmem_size); |
---|
2283 | | - if (!(*ring->vmem)) |
---|
| 2852 | + if (rmem->vmem_size) { |
---|
| 2853 | + *rmem->vmem = vzalloc(rmem->vmem_size); |
---|
| 2854 | + if (!(*rmem->vmem)) |
---|
| 2855 | + return -ENOMEM; |
---|
| 2856 | + } |
---|
| 2857 | + return 0; |
---|
| 2858 | +} |
---|
| 2859 | + |
---|
| 2860 | +static void bnxt_free_tpa_info(struct bnxt *bp) |
---|
| 2861 | +{ |
---|
| 2862 | + int i, j; |
---|
| 2863 | + |
---|
| 2864 | + for (i = 0; i < bp->rx_nr_rings; i++) { |
---|
| 2865 | + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; |
---|
| 2866 | + |
---|
| 2867 | + kfree(rxr->rx_tpa_idx_map); |
---|
| 2868 | + rxr->rx_tpa_idx_map = NULL; |
---|
| 2869 | + if (rxr->rx_tpa) { |
---|
| 2870 | + for (j = 0; j < bp->max_tpa; j++) { |
---|
| 2871 | + kfree(rxr->rx_tpa[j].agg_arr); |
---|
| 2872 | + rxr->rx_tpa[j].agg_arr = NULL; |
---|
| 2873 | + } |
---|
| 2874 | + } |
---|
| 2875 | + kfree(rxr->rx_tpa); |
---|
| 2876 | + rxr->rx_tpa = NULL; |
---|
| 2877 | + } |
---|
| 2878 | +} |
---|
| 2879 | + |
---|
| 2880 | +static int bnxt_alloc_tpa_info(struct bnxt *bp) |
---|
| 2881 | +{ |
---|
| 2882 | + int i, j; |
---|
| 2883 | + |
---|
| 2884 | + bp->max_tpa = MAX_TPA; |
---|
| 2885 | + if (bp->flags & BNXT_FLAG_CHIP_P5) { |
---|
| 2886 | + if (!bp->max_tpa_v2) |
---|
| 2887 | + return 0; |
---|
| 2888 | + bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5); |
---|
| 2889 | + } |
---|
| 2890 | + |
---|
| 2891 | + for (i = 0; i < bp->rx_nr_rings; i++) { |
---|
| 2892 | + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; |
---|
| 2893 | + struct rx_agg_cmp *agg; |
---|
| 2894 | + |
---|
| 2895 | + rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info), |
---|
| 2896 | + GFP_KERNEL); |
---|
| 2897 | + if (!rxr->rx_tpa) |
---|
| 2898 | + return -ENOMEM; |
---|
| 2899 | + |
---|
| 2900 | + if (!(bp->flags & BNXT_FLAG_CHIP_P5)) |
---|
| 2901 | + continue; |
---|
| 2902 | + for (j = 0; j < bp->max_tpa; j++) { |
---|
| 2903 | + agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL); |
---|
| 2904 | + if (!agg) |
---|
| 2905 | + return -ENOMEM; |
---|
| 2906 | + rxr->rx_tpa[j].agg_arr = agg; |
---|
| 2907 | + } |
---|
| 2908 | + rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map), |
---|
| 2909 | + GFP_KERNEL); |
---|
| 2910 | + if (!rxr->rx_tpa_idx_map) |
---|
2284 | 2911 | return -ENOMEM; |
---|
2285 | 2912 | } |
---|
2286 | 2913 | return 0; |
---|
.. | .. |
---|
2293 | 2920 | if (!bp->rx_ring) |
---|
2294 | 2921 | return; |
---|
2295 | 2922 | |
---|
| 2923 | + bnxt_free_tpa_info(bp); |
---|
2296 | 2924 | for (i = 0; i < bp->rx_nr_rings; i++) { |
---|
2297 | 2925 | struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; |
---|
2298 | 2926 | struct bnxt_ring_struct *ring; |
---|
.. | .. |
---|
2303 | 2931 | if (xdp_rxq_info_is_reg(&rxr->xdp_rxq)) |
---|
2304 | 2932 | xdp_rxq_info_unreg(&rxr->xdp_rxq); |
---|
2305 | 2933 | |
---|
2306 | | - kfree(rxr->rx_tpa); |
---|
2307 | | - rxr->rx_tpa = NULL; |
---|
| 2934 | + page_pool_destroy(rxr->page_pool); |
---|
| 2935 | + rxr->page_pool = NULL; |
---|
2308 | 2936 | |
---|
2309 | 2937 | kfree(rxr->rx_agg_bmap); |
---|
2310 | 2938 | rxr->rx_agg_bmap = NULL; |
---|
2311 | 2939 | |
---|
2312 | 2940 | ring = &rxr->rx_ring_struct; |
---|
2313 | | - bnxt_free_ring(bp, ring); |
---|
| 2941 | + bnxt_free_ring(bp, &ring->ring_mem); |
---|
2314 | 2942 | |
---|
2315 | 2943 | ring = &rxr->rx_agg_ring_struct; |
---|
2316 | | - bnxt_free_ring(bp, ring); |
---|
| 2944 | + bnxt_free_ring(bp, &ring->ring_mem); |
---|
2317 | 2945 | } |
---|
| 2946 | +} |
---|
| 2947 | + |
---|
| 2948 | +static int bnxt_alloc_rx_page_pool(struct bnxt *bp, |
---|
| 2949 | + struct bnxt_rx_ring_info *rxr) |
---|
| 2950 | +{ |
---|
| 2951 | + struct page_pool_params pp = { 0 }; |
---|
| 2952 | + |
---|
| 2953 | + pp.pool_size = bp->rx_ring_size; |
---|
| 2954 | + pp.nid = dev_to_node(&bp->pdev->dev); |
---|
| 2955 | + pp.dev = &bp->pdev->dev; |
---|
| 2956 | + pp.dma_dir = DMA_BIDIRECTIONAL; |
---|
| 2957 | + |
---|
| 2958 | + rxr->page_pool = page_pool_create(&pp); |
---|
| 2959 | + if (IS_ERR(rxr->page_pool)) { |
---|
| 2960 | + int err = PTR_ERR(rxr->page_pool); |
---|
| 2961 | + |
---|
| 2962 | + rxr->page_pool = NULL; |
---|
| 2963 | + return err; |
---|
| 2964 | + } |
---|
| 2965 | + return 0; |
---|
2318 | 2966 | } |
---|
2319 | 2967 | |
---|
2320 | 2968 | static int bnxt_alloc_rx_rings(struct bnxt *bp) |
---|
2321 | 2969 | { |
---|
2322 | | - int i, rc, agg_rings = 0, tpa_rings = 0; |
---|
| 2970 | + int i, rc = 0, agg_rings = 0; |
---|
2323 | 2971 | |
---|
2324 | 2972 | if (!bp->rx_ring) |
---|
2325 | 2973 | return -ENOMEM; |
---|
.. | .. |
---|
2327 | 2975 | if (bp->flags & BNXT_FLAG_AGG_RINGS) |
---|
2328 | 2976 | agg_rings = 1; |
---|
2329 | 2977 | |
---|
2330 | | - if (bp->flags & BNXT_FLAG_TPA) |
---|
2331 | | - tpa_rings = 1; |
---|
2332 | | - |
---|
2333 | 2978 | for (i = 0; i < bp->rx_nr_rings; i++) { |
---|
2334 | 2979 | struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; |
---|
2335 | 2980 | struct bnxt_ring_struct *ring; |
---|
2336 | 2981 | |
---|
2337 | 2982 | ring = &rxr->rx_ring_struct; |
---|
2338 | 2983 | |
---|
| 2984 | + rc = bnxt_alloc_rx_page_pool(bp, rxr); |
---|
| 2985 | + if (rc) |
---|
| 2986 | + return rc; |
---|
| 2987 | + |
---|
2339 | 2988 | rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i); |
---|
2340 | 2989 | if (rc < 0) |
---|
2341 | 2990 | return rc; |
---|
2342 | 2991 | |
---|
2343 | | - rc = bnxt_alloc_ring(bp, ring); |
---|
| 2992 | + rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq, |
---|
| 2993 | + MEM_TYPE_PAGE_POOL, |
---|
| 2994 | + rxr->page_pool); |
---|
| 2995 | + if (rc) { |
---|
| 2996 | + xdp_rxq_info_unreg(&rxr->xdp_rxq); |
---|
| 2997 | + return rc; |
---|
| 2998 | + } |
---|
| 2999 | + |
---|
| 3000 | + rc = bnxt_alloc_ring(bp, &ring->ring_mem); |
---|
2344 | 3001 | if (rc) |
---|
2345 | 3002 | return rc; |
---|
2346 | 3003 | |
---|
| 3004 | + ring->grp_idx = i; |
---|
2347 | 3005 | if (agg_rings) { |
---|
2348 | 3006 | u16 mem_size; |
---|
2349 | 3007 | |
---|
2350 | 3008 | ring = &rxr->rx_agg_ring_struct; |
---|
2351 | | - rc = bnxt_alloc_ring(bp, ring); |
---|
| 3009 | + rc = bnxt_alloc_ring(bp, &ring->ring_mem); |
---|
2352 | 3010 | if (rc) |
---|
2353 | 3011 | return rc; |
---|
2354 | 3012 | |
---|
.. | .. |
---|
2358 | 3016 | rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL); |
---|
2359 | 3017 | if (!rxr->rx_agg_bmap) |
---|
2360 | 3018 | return -ENOMEM; |
---|
2361 | | - |
---|
2362 | | - if (tpa_rings) { |
---|
2363 | | - rxr->rx_tpa = kcalloc(MAX_TPA, |
---|
2364 | | - sizeof(struct bnxt_tpa_info), |
---|
2365 | | - GFP_KERNEL); |
---|
2366 | | - if (!rxr->rx_tpa) |
---|
2367 | | - return -ENOMEM; |
---|
2368 | | - } |
---|
2369 | 3019 | } |
---|
2370 | 3020 | } |
---|
2371 | | - return 0; |
---|
| 3021 | + if (bp->flags & BNXT_FLAG_TPA) |
---|
| 3022 | + rc = bnxt_alloc_tpa_info(bp); |
---|
| 3023 | + return rc; |
---|
2372 | 3024 | } |
---|
2373 | 3025 | |
---|
2374 | 3026 | static void bnxt_free_tx_rings(struct bnxt *bp) |
---|
.. | .. |
---|
2391 | 3043 | |
---|
2392 | 3044 | ring = &txr->tx_ring_struct; |
---|
2393 | 3045 | |
---|
2394 | | - bnxt_free_ring(bp, ring); |
---|
| 3046 | + bnxt_free_ring(bp, &ring->ring_mem); |
---|
2395 | 3047 | } |
---|
2396 | 3048 | } |
---|
2397 | 3049 | |
---|
.. | .. |
---|
2422 | 3074 | |
---|
2423 | 3075 | ring = &txr->tx_ring_struct; |
---|
2424 | 3076 | |
---|
2425 | | - rc = bnxt_alloc_ring(bp, ring); |
---|
| 3077 | + rc = bnxt_alloc_ring(bp, &ring->ring_mem); |
---|
2426 | 3078 | if (rc) |
---|
2427 | 3079 | return rc; |
---|
2428 | 3080 | |
---|
.. | .. |
---|
2444 | 3096 | mapping = txr->tx_push_mapping + |
---|
2445 | 3097 | sizeof(struct tx_push_bd); |
---|
2446 | 3098 | txr->data_mapping = cpu_to_le64(mapping); |
---|
2447 | | - |
---|
2448 | | - memset(txr->tx_push, 0, sizeof(struct tx_push_bd)); |
---|
2449 | 3099 | } |
---|
2450 | 3100 | qidx = bp->tc_to_qidx[j]; |
---|
2451 | 3101 | ring->queue_id = bp->q_info[qidx].queue_id; |
---|
.. | .. |
---|
2468 | 3118 | struct bnxt_napi *bnapi = bp->bnapi[i]; |
---|
2469 | 3119 | struct bnxt_cp_ring_info *cpr; |
---|
2470 | 3120 | struct bnxt_ring_struct *ring; |
---|
| 3121 | + int j; |
---|
2471 | 3122 | |
---|
2472 | 3123 | if (!bnapi) |
---|
2473 | 3124 | continue; |
---|
.. | .. |
---|
2475 | 3126 | cpr = &bnapi->cp_ring; |
---|
2476 | 3127 | ring = &cpr->cp_ring_struct; |
---|
2477 | 3128 | |
---|
2478 | | - bnxt_free_ring(bp, ring); |
---|
| 3129 | + bnxt_free_ring(bp, &ring->ring_mem); |
---|
| 3130 | + |
---|
| 3131 | + for (j = 0; j < 2; j++) { |
---|
| 3132 | + struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j]; |
---|
| 3133 | + |
---|
| 3134 | + if (cpr2) { |
---|
| 3135 | + ring = &cpr2->cp_ring_struct; |
---|
| 3136 | + bnxt_free_ring(bp, &ring->ring_mem); |
---|
| 3137 | + kfree(cpr2); |
---|
| 3138 | + cpr->cp_ring_arr[j] = NULL; |
---|
| 3139 | + } |
---|
| 3140 | + } |
---|
2479 | 3141 | } |
---|
| 3142 | +} |
---|
| 3143 | + |
---|
| 3144 | +static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp) |
---|
| 3145 | +{ |
---|
| 3146 | + struct bnxt_ring_mem_info *rmem; |
---|
| 3147 | + struct bnxt_ring_struct *ring; |
---|
| 3148 | + struct bnxt_cp_ring_info *cpr; |
---|
| 3149 | + int rc; |
---|
| 3150 | + |
---|
| 3151 | + cpr = kzalloc(sizeof(*cpr), GFP_KERNEL); |
---|
| 3152 | + if (!cpr) |
---|
| 3153 | + return NULL; |
---|
| 3154 | + |
---|
| 3155 | + ring = &cpr->cp_ring_struct; |
---|
| 3156 | + rmem = &ring->ring_mem; |
---|
| 3157 | + rmem->nr_pages = bp->cp_nr_pages; |
---|
| 3158 | + rmem->page_size = HW_CMPD_RING_SIZE; |
---|
| 3159 | + rmem->pg_arr = (void **)cpr->cp_desc_ring; |
---|
| 3160 | + rmem->dma_arr = cpr->cp_desc_mapping; |
---|
| 3161 | + rmem->flags = BNXT_RMEM_RING_PTE_FLAG; |
---|
| 3162 | + rc = bnxt_alloc_ring(bp, rmem); |
---|
| 3163 | + if (rc) { |
---|
| 3164 | + bnxt_free_ring(bp, rmem); |
---|
| 3165 | + kfree(cpr); |
---|
| 3166 | + cpr = NULL; |
---|
| 3167 | + } |
---|
| 3168 | + return cpr; |
---|
2480 | 3169 | } |
---|
2481 | 3170 | |
---|
2482 | 3171 | static int bnxt_alloc_cp_rings(struct bnxt *bp) |
---|
2483 | 3172 | { |
---|
| 3173 | + bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS); |
---|
2484 | 3174 | int i, rc, ulp_base_vec, ulp_msix; |
---|
2485 | 3175 | |
---|
2486 | 3176 | ulp_msix = bnxt_get_ulp_msix_num(bp); |
---|
.. | .. |
---|
2494 | 3184 | continue; |
---|
2495 | 3185 | |
---|
2496 | 3186 | cpr = &bnapi->cp_ring; |
---|
| 3187 | + cpr->bnapi = bnapi; |
---|
2497 | 3188 | ring = &cpr->cp_ring_struct; |
---|
2498 | 3189 | |
---|
2499 | | - rc = bnxt_alloc_ring(bp, ring); |
---|
| 3190 | + rc = bnxt_alloc_ring(bp, &ring->ring_mem); |
---|
2500 | 3191 | if (rc) |
---|
2501 | 3192 | return rc; |
---|
2502 | 3193 | |
---|
.. | .. |
---|
2504 | 3195 | ring->map_idx = i + ulp_msix; |
---|
2505 | 3196 | else |
---|
2506 | 3197 | ring->map_idx = i; |
---|
| 3198 | + |
---|
| 3199 | + if (!(bp->flags & BNXT_FLAG_CHIP_P5)) |
---|
| 3200 | + continue; |
---|
| 3201 | + |
---|
| 3202 | + if (i < bp->rx_nr_rings) { |
---|
| 3203 | + struct bnxt_cp_ring_info *cpr2 = |
---|
| 3204 | + bnxt_alloc_cp_sub_ring(bp); |
---|
| 3205 | + |
---|
| 3206 | + cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2; |
---|
| 3207 | + if (!cpr2) |
---|
| 3208 | + return -ENOMEM; |
---|
| 3209 | + cpr2->bnapi = bnapi; |
---|
| 3210 | + } |
---|
| 3211 | + if ((sh && i < bp->tx_nr_rings) || |
---|
| 3212 | + (!sh && i >= bp->rx_nr_rings)) { |
---|
| 3213 | + struct bnxt_cp_ring_info *cpr2 = |
---|
| 3214 | + bnxt_alloc_cp_sub_ring(bp); |
---|
| 3215 | + |
---|
| 3216 | + cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2; |
---|
| 3217 | + if (!cpr2) |
---|
| 3218 | + return -ENOMEM; |
---|
| 3219 | + cpr2->bnapi = bnapi; |
---|
| 3220 | + } |
---|
2507 | 3221 | } |
---|
2508 | 3222 | return 0; |
---|
2509 | 3223 | } |
---|
.. | .. |
---|
2514 | 3228 | |
---|
2515 | 3229 | for (i = 0; i < bp->cp_nr_rings; i++) { |
---|
2516 | 3230 | struct bnxt_napi *bnapi = bp->bnapi[i]; |
---|
| 3231 | + struct bnxt_ring_mem_info *rmem; |
---|
2517 | 3232 | struct bnxt_cp_ring_info *cpr; |
---|
2518 | 3233 | struct bnxt_rx_ring_info *rxr; |
---|
2519 | 3234 | struct bnxt_tx_ring_info *txr; |
---|
.. | .. |
---|
2524 | 3239 | |
---|
2525 | 3240 | cpr = &bnapi->cp_ring; |
---|
2526 | 3241 | ring = &cpr->cp_ring_struct; |
---|
2527 | | - ring->nr_pages = bp->cp_nr_pages; |
---|
2528 | | - ring->page_size = HW_CMPD_RING_SIZE; |
---|
2529 | | - ring->pg_arr = (void **)cpr->cp_desc_ring; |
---|
2530 | | - ring->dma_arr = cpr->cp_desc_mapping; |
---|
2531 | | - ring->vmem_size = 0; |
---|
| 3242 | + rmem = &ring->ring_mem; |
---|
| 3243 | + rmem->nr_pages = bp->cp_nr_pages; |
---|
| 3244 | + rmem->page_size = HW_CMPD_RING_SIZE; |
---|
| 3245 | + rmem->pg_arr = (void **)cpr->cp_desc_ring; |
---|
| 3246 | + rmem->dma_arr = cpr->cp_desc_mapping; |
---|
| 3247 | + rmem->vmem_size = 0; |
---|
2532 | 3248 | |
---|
2533 | 3249 | rxr = bnapi->rx_ring; |
---|
2534 | 3250 | if (!rxr) |
---|
2535 | 3251 | goto skip_rx; |
---|
2536 | 3252 | |
---|
2537 | 3253 | ring = &rxr->rx_ring_struct; |
---|
2538 | | - ring->nr_pages = bp->rx_nr_pages; |
---|
2539 | | - ring->page_size = HW_RXBD_RING_SIZE; |
---|
2540 | | - ring->pg_arr = (void **)rxr->rx_desc_ring; |
---|
2541 | | - ring->dma_arr = rxr->rx_desc_mapping; |
---|
2542 | | - ring->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages; |
---|
2543 | | - ring->vmem = (void **)&rxr->rx_buf_ring; |
---|
| 3254 | + rmem = &ring->ring_mem; |
---|
| 3255 | + rmem->nr_pages = bp->rx_nr_pages; |
---|
| 3256 | + rmem->page_size = HW_RXBD_RING_SIZE; |
---|
| 3257 | + rmem->pg_arr = (void **)rxr->rx_desc_ring; |
---|
| 3258 | + rmem->dma_arr = rxr->rx_desc_mapping; |
---|
| 3259 | + rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages; |
---|
| 3260 | + rmem->vmem = (void **)&rxr->rx_buf_ring; |
---|
2544 | 3261 | |
---|
2545 | 3262 | ring = &rxr->rx_agg_ring_struct; |
---|
2546 | | - ring->nr_pages = bp->rx_agg_nr_pages; |
---|
2547 | | - ring->page_size = HW_RXBD_RING_SIZE; |
---|
2548 | | - ring->pg_arr = (void **)rxr->rx_agg_desc_ring; |
---|
2549 | | - ring->dma_arr = rxr->rx_agg_desc_mapping; |
---|
2550 | | - ring->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages; |
---|
2551 | | - ring->vmem = (void **)&rxr->rx_agg_ring; |
---|
| 3263 | + rmem = &ring->ring_mem; |
---|
| 3264 | + rmem->nr_pages = bp->rx_agg_nr_pages; |
---|
| 3265 | + rmem->page_size = HW_RXBD_RING_SIZE; |
---|
| 3266 | + rmem->pg_arr = (void **)rxr->rx_agg_desc_ring; |
---|
| 3267 | + rmem->dma_arr = rxr->rx_agg_desc_mapping; |
---|
| 3268 | + rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages; |
---|
| 3269 | + rmem->vmem = (void **)&rxr->rx_agg_ring; |
---|
2552 | 3270 | |
---|
2553 | 3271 | skip_rx: |
---|
2554 | 3272 | txr = bnapi->tx_ring; |
---|
.. | .. |
---|
2556 | 3274 | continue; |
---|
2557 | 3275 | |
---|
2558 | 3276 | ring = &txr->tx_ring_struct; |
---|
2559 | | - ring->nr_pages = bp->tx_nr_pages; |
---|
2560 | | - ring->page_size = HW_RXBD_RING_SIZE; |
---|
2561 | | - ring->pg_arr = (void **)txr->tx_desc_ring; |
---|
2562 | | - ring->dma_arr = txr->tx_desc_mapping; |
---|
2563 | | - ring->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages; |
---|
2564 | | - ring->vmem = (void **)&txr->tx_buf_ring; |
---|
| 3277 | + rmem = &ring->ring_mem; |
---|
| 3278 | + rmem->nr_pages = bp->tx_nr_pages; |
---|
| 3279 | + rmem->page_size = HW_RXBD_RING_SIZE; |
---|
| 3280 | + rmem->pg_arr = (void **)txr->tx_desc_ring; |
---|
| 3281 | + rmem->dma_arr = txr->tx_desc_mapping; |
---|
| 3282 | + rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages; |
---|
| 3283 | + rmem->vmem = (void **)&txr->tx_buf_ring; |
---|
2565 | 3284 | } |
---|
2566 | 3285 | } |
---|
2567 | 3286 | |
---|
.. | .. |
---|
2571 | 3290 | u32 prod; |
---|
2572 | 3291 | struct rx_bd **rx_buf_ring; |
---|
2573 | 3292 | |
---|
2574 | | - rx_buf_ring = (struct rx_bd **)ring->pg_arr; |
---|
2575 | | - for (i = 0, prod = 0; i < ring->nr_pages; i++) { |
---|
| 3293 | + rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr; |
---|
| 3294 | + for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) { |
---|
2576 | 3295 | int j; |
---|
2577 | 3296 | struct rx_bd *rxbd; |
---|
2578 | 3297 | |
---|
.. | .. |
---|
2587 | 3306 | } |
---|
2588 | 3307 | } |
---|
2589 | 3308 | |
---|
| 3309 | +static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr) |
---|
| 3310 | +{ |
---|
| 3311 | + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; |
---|
| 3312 | + struct net_device *dev = bp->dev; |
---|
| 3313 | + u32 prod; |
---|
| 3314 | + int i; |
---|
| 3315 | + |
---|
| 3316 | + prod = rxr->rx_prod; |
---|
| 3317 | + for (i = 0; i < bp->rx_ring_size; i++) { |
---|
| 3318 | + if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) { |
---|
| 3319 | + netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n", |
---|
| 3320 | + ring_nr, i, bp->rx_ring_size); |
---|
| 3321 | + break; |
---|
| 3322 | + } |
---|
| 3323 | + prod = NEXT_RX(prod); |
---|
| 3324 | + } |
---|
| 3325 | + rxr->rx_prod = prod; |
---|
| 3326 | + |
---|
| 3327 | + if (!(bp->flags & BNXT_FLAG_AGG_RINGS)) |
---|
| 3328 | + return 0; |
---|
| 3329 | + |
---|
| 3330 | + prod = rxr->rx_agg_prod; |
---|
| 3331 | + for (i = 0; i < bp->rx_agg_ring_size; i++) { |
---|
| 3332 | + if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) { |
---|
| 3333 | + netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n", |
---|
| 3334 | + ring_nr, i, bp->rx_ring_size); |
---|
| 3335 | + break; |
---|
| 3336 | + } |
---|
| 3337 | + prod = NEXT_RX_AGG(prod); |
---|
| 3338 | + } |
---|
| 3339 | + rxr->rx_agg_prod = prod; |
---|
| 3340 | + |
---|
| 3341 | + if (rxr->rx_tpa) { |
---|
| 3342 | + dma_addr_t mapping; |
---|
| 3343 | + u8 *data; |
---|
| 3344 | + |
---|
| 3345 | + for (i = 0; i < bp->max_tpa; i++) { |
---|
| 3346 | + data = __bnxt_alloc_rx_data(bp, &mapping, GFP_KERNEL); |
---|
| 3347 | + if (!data) |
---|
| 3348 | + return -ENOMEM; |
---|
| 3349 | + |
---|
| 3350 | + rxr->rx_tpa[i].data = data; |
---|
| 3351 | + rxr->rx_tpa[i].data_ptr = data + bp->rx_offset; |
---|
| 3352 | + rxr->rx_tpa[i].mapping = mapping; |
---|
| 3353 | + } |
---|
| 3354 | + } |
---|
| 3355 | + return 0; |
---|
| 3356 | +} |
---|
| 3357 | + |
---|
2590 | 3358 | static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) |
---|
2591 | 3359 | { |
---|
2592 | | - struct net_device *dev = bp->dev; |
---|
2593 | 3360 | struct bnxt_rx_ring_info *rxr; |
---|
2594 | 3361 | struct bnxt_ring_struct *ring; |
---|
2595 | | - u32 prod, type; |
---|
2596 | | - int i; |
---|
| 3362 | + u32 type; |
---|
2597 | 3363 | |
---|
2598 | 3364 | type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) | |
---|
2599 | 3365 | RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP; |
---|
.. | .. |
---|
2606 | 3372 | bnxt_init_rxbd_pages(ring, type); |
---|
2607 | 3373 | |
---|
2608 | 3374 | if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) { |
---|
2609 | | - rxr->xdp_prog = bpf_prog_add(bp->xdp_prog, 1); |
---|
2610 | | - if (IS_ERR(rxr->xdp_prog)) { |
---|
2611 | | - int rc = PTR_ERR(rxr->xdp_prog); |
---|
2612 | | - |
---|
2613 | | - rxr->xdp_prog = NULL; |
---|
2614 | | - return rc; |
---|
2615 | | - } |
---|
| 3375 | + bpf_prog_add(bp->xdp_prog, 1); |
---|
| 3376 | + rxr->xdp_prog = bp->xdp_prog; |
---|
2616 | 3377 | } |
---|
2617 | | - prod = rxr->rx_prod; |
---|
2618 | | - for (i = 0; i < bp->rx_ring_size; i++) { |
---|
2619 | | - if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) { |
---|
2620 | | - netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n", |
---|
2621 | | - ring_nr, i, bp->rx_ring_size); |
---|
2622 | | - break; |
---|
2623 | | - } |
---|
2624 | | - prod = NEXT_RX(prod); |
---|
2625 | | - } |
---|
2626 | | - rxr->rx_prod = prod; |
---|
2627 | 3378 | ring->fw_ring_id = INVALID_HW_RING_ID; |
---|
2628 | 3379 | |
---|
2629 | 3380 | ring = &rxr->rx_agg_ring_struct; |
---|
2630 | 3381 | ring->fw_ring_id = INVALID_HW_RING_ID; |
---|
2631 | 3382 | |
---|
2632 | | - if (!(bp->flags & BNXT_FLAG_AGG_RINGS)) |
---|
2633 | | - return 0; |
---|
| 3383 | + if ((bp->flags & BNXT_FLAG_AGG_RINGS)) { |
---|
| 3384 | + type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) | |
---|
| 3385 | + RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP; |
---|
2634 | 3386 | |
---|
2635 | | - type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) | |
---|
2636 | | - RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP; |
---|
2637 | | - |
---|
2638 | | - bnxt_init_rxbd_pages(ring, type); |
---|
2639 | | - |
---|
2640 | | - prod = rxr->rx_agg_prod; |
---|
2641 | | - for (i = 0; i < bp->rx_agg_ring_size; i++) { |
---|
2642 | | - if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) { |
---|
2643 | | - netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n", |
---|
2644 | | - ring_nr, i, bp->rx_ring_size); |
---|
2645 | | - break; |
---|
2646 | | - } |
---|
2647 | | - prod = NEXT_RX_AGG(prod); |
---|
2648 | | - } |
---|
2649 | | - rxr->rx_agg_prod = prod; |
---|
2650 | | - |
---|
2651 | | - if (bp->flags & BNXT_FLAG_TPA) { |
---|
2652 | | - if (rxr->rx_tpa) { |
---|
2653 | | - u8 *data; |
---|
2654 | | - dma_addr_t mapping; |
---|
2655 | | - |
---|
2656 | | - for (i = 0; i < MAX_TPA; i++) { |
---|
2657 | | - data = __bnxt_alloc_rx_data(bp, &mapping, |
---|
2658 | | - GFP_KERNEL); |
---|
2659 | | - if (!data) |
---|
2660 | | - return -ENOMEM; |
---|
2661 | | - |
---|
2662 | | - rxr->rx_tpa[i].data = data; |
---|
2663 | | - rxr->rx_tpa[i].data_ptr = data + bp->rx_offset; |
---|
2664 | | - rxr->rx_tpa[i].mapping = mapping; |
---|
2665 | | - } |
---|
2666 | | - } else { |
---|
2667 | | - netdev_err(bp->dev, "No resource allocated for LRO/GRO\n"); |
---|
2668 | | - return -ENOMEM; |
---|
2669 | | - } |
---|
| 3387 | + bnxt_init_rxbd_pages(ring, type); |
---|
2670 | 3388 | } |
---|
2671 | 3389 | |
---|
2672 | | - return 0; |
---|
| 3390 | + return bnxt_alloc_one_rx_ring(bp, ring_nr); |
---|
2673 | 3391 | } |
---|
2674 | 3392 | |
---|
2675 | 3393 | static void bnxt_init_cp_rings(struct bnxt *bp) |
---|
2676 | 3394 | { |
---|
2677 | | - int i; |
---|
| 3395 | + int i, j; |
---|
2678 | 3396 | |
---|
2679 | 3397 | for (i = 0; i < bp->cp_nr_rings; i++) { |
---|
2680 | 3398 | struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring; |
---|
.. | .. |
---|
2683 | 3401 | ring->fw_ring_id = INVALID_HW_RING_ID; |
---|
2684 | 3402 | cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks; |
---|
2685 | 3403 | cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs; |
---|
| 3404 | + for (j = 0; j < 2; j++) { |
---|
| 3405 | + struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j]; |
---|
| 3406 | + |
---|
| 3407 | + if (!cpr2) |
---|
| 3408 | + continue; |
---|
| 3409 | + |
---|
| 3410 | + ring = &cpr2->cp_ring_struct; |
---|
| 3411 | + ring->fw_ring_id = INVALID_HW_RING_ID; |
---|
| 3412 | + cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks; |
---|
| 3413 | + cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs; |
---|
| 3414 | + } |
---|
2686 | 3415 | } |
---|
2687 | 3416 | } |
---|
2688 | 3417 | |
---|
.. | .. |
---|
2764 | 3493 | int num_vnics = 1; |
---|
2765 | 3494 | |
---|
2766 | 3495 | #ifdef CONFIG_RFS_ACCEL |
---|
2767 | | - if (bp->flags & BNXT_FLAG_RFS) |
---|
| 3496 | + if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS) |
---|
2768 | 3497 | num_vnics += bp->rx_nr_rings; |
---|
2769 | 3498 | #endif |
---|
2770 | 3499 | |
---|
.. | .. |
---|
2786 | 3515 | |
---|
2787 | 3516 | for (i = 0; i < bp->nr_vnics; i++) { |
---|
2788 | 3517 | struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; |
---|
| 3518 | + int j; |
---|
2789 | 3519 | |
---|
2790 | 3520 | vnic->fw_vnic_id = INVALID_HW_RING_ID; |
---|
2791 | | - vnic->fw_rss_cos_lb_ctx[0] = INVALID_HW_RING_ID; |
---|
2792 | | - vnic->fw_rss_cos_lb_ctx[1] = INVALID_HW_RING_ID; |
---|
| 3521 | + for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) |
---|
| 3522 | + vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID; |
---|
| 3523 | + |
---|
2793 | 3524 | vnic->fw_l2_ctx_id = INVALID_HW_RING_ID; |
---|
2794 | 3525 | |
---|
2795 | 3526 | if (bp->vnic_info[i].rss_hash_key) { |
---|
.. | .. |
---|
2837 | 3568 | */ |
---|
2838 | 3569 | void bnxt_set_ring_params(struct bnxt *bp) |
---|
2839 | 3570 | { |
---|
2840 | | - u32 ring_size, rx_size, rx_space; |
---|
| 3571 | + u32 ring_size, rx_size, rx_space, max_rx_cmpl; |
---|
2841 | 3572 | u32 agg_factor = 0, agg_ring_size = 0; |
---|
2842 | 3573 | |
---|
2843 | 3574 | /* 8 for CRC and VLAN */ |
---|
.. | .. |
---|
2893 | 3624 | bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT); |
---|
2894 | 3625 | bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1; |
---|
2895 | 3626 | |
---|
2896 | | - ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size; |
---|
| 3627 | + max_rx_cmpl = bp->rx_ring_size; |
---|
| 3628 | + /* MAX TPA needs to be added because TPA_START completions are |
---|
| 3629 | + * immediately recycled, so the TPA completions are not bound by |
---|
| 3630 | + * the RX ring size. |
---|
| 3631 | + */ |
---|
| 3632 | + if (bp->flags & BNXT_FLAG_TPA) |
---|
| 3633 | + max_rx_cmpl += bp->max_tpa; |
---|
| 3634 | + /* RX and TPA completions are 32-byte, all others are 16-byte */ |
---|
| 3635 | + ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size; |
---|
2897 | 3636 | bp->cp_ring_size = ring_size; |
---|
2898 | 3637 | |
---|
2899 | 3638 | bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT); |
---|
.. | .. |
---|
2957 | 3696 | } |
---|
2958 | 3697 | |
---|
2959 | 3698 | if (vnic->rss_table) { |
---|
2960 | | - dma_free_coherent(&pdev->dev, PAGE_SIZE, |
---|
| 3699 | + dma_free_coherent(&pdev->dev, vnic->rss_table_size, |
---|
2961 | 3700 | vnic->rss_table, |
---|
2962 | 3701 | vnic->rss_table_dma_addr); |
---|
2963 | 3702 | vnic->rss_table = NULL; |
---|
.. | .. |
---|
3003 | 3742 | } |
---|
3004 | 3743 | } |
---|
3005 | 3744 | |
---|
| 3745 | + if (bp->flags & BNXT_FLAG_CHIP_P5) |
---|
| 3746 | + goto vnic_skip_grps; |
---|
| 3747 | + |
---|
3006 | 3748 | if (vnic->flags & BNXT_VNIC_RSS_FLAG) |
---|
3007 | 3749 | max_rings = bp->rx_nr_rings; |
---|
3008 | 3750 | else |
---|
.. | .. |
---|
3013 | 3755 | rc = -ENOMEM; |
---|
3014 | 3756 | goto out; |
---|
3015 | 3757 | } |
---|
3016 | | - |
---|
| 3758 | +vnic_skip_grps: |
---|
3017 | 3759 | if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) && |
---|
3018 | 3760 | !(vnic->flags & BNXT_VNIC_RSS_FLAG)) |
---|
3019 | 3761 | continue; |
---|
3020 | 3762 | |
---|
3021 | 3763 | /* Allocate rss table and hash key */ |
---|
3022 | | - vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, |
---|
| 3764 | + size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16)); |
---|
| 3765 | + if (bp->flags & BNXT_FLAG_CHIP_P5) |
---|
| 3766 | + size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5); |
---|
| 3767 | + |
---|
| 3768 | + vnic->rss_table_size = size + HW_HASH_KEY_SIZE; |
---|
| 3769 | + vnic->rss_table = dma_alloc_coherent(&pdev->dev, |
---|
| 3770 | + vnic->rss_table_size, |
---|
3023 | 3771 | &vnic->rss_table_dma_addr, |
---|
3024 | 3772 | GFP_KERNEL); |
---|
3025 | 3773 | if (!vnic->rss_table) { |
---|
3026 | 3774 | rc = -ENOMEM; |
---|
3027 | 3775 | goto out; |
---|
3028 | 3776 | } |
---|
3029 | | - |
---|
3030 | | - size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16)); |
---|
3031 | 3777 | |
---|
3032 | 3778 | vnic->rss_hash_key = ((void *)vnic->rss_table) + size; |
---|
3033 | 3779 | vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size; |
---|
.. | .. |
---|
3047 | 3793 | bp->hwrm_cmd_resp_dma_addr); |
---|
3048 | 3794 | bp->hwrm_cmd_resp_addr = NULL; |
---|
3049 | 3795 | } |
---|
| 3796 | + |
---|
| 3797 | + if (bp->hwrm_cmd_kong_resp_addr) { |
---|
| 3798 | + dma_free_coherent(&pdev->dev, PAGE_SIZE, |
---|
| 3799 | + bp->hwrm_cmd_kong_resp_addr, |
---|
| 3800 | + bp->hwrm_cmd_kong_resp_dma_addr); |
---|
| 3801 | + bp->hwrm_cmd_kong_resp_addr = NULL; |
---|
| 3802 | + } |
---|
| 3803 | +} |
---|
| 3804 | + |
---|
| 3805 | +static int bnxt_alloc_kong_hwrm_resources(struct bnxt *bp) |
---|
| 3806 | +{ |
---|
| 3807 | + struct pci_dev *pdev = bp->pdev; |
---|
| 3808 | + |
---|
| 3809 | + if (bp->hwrm_cmd_kong_resp_addr) |
---|
| 3810 | + return 0; |
---|
| 3811 | + |
---|
| 3812 | + bp->hwrm_cmd_kong_resp_addr = |
---|
| 3813 | + dma_alloc_coherent(&pdev->dev, PAGE_SIZE, |
---|
| 3814 | + &bp->hwrm_cmd_kong_resp_dma_addr, |
---|
| 3815 | + GFP_KERNEL); |
---|
| 3816 | + if (!bp->hwrm_cmd_kong_resp_addr) |
---|
| 3817 | + return -ENOMEM; |
---|
| 3818 | + |
---|
| 3819 | + return 0; |
---|
3050 | 3820 | } |
---|
3051 | 3821 | |
---|
3052 | 3822 | static int bnxt_alloc_hwrm_resources(struct bnxt *bp) |
---|
.. | .. |
---|
3067 | 3837 | if (bp->hwrm_short_cmd_req_addr) { |
---|
3068 | 3838 | struct pci_dev *pdev = bp->pdev; |
---|
3069 | 3839 | |
---|
3070 | | - dma_free_coherent(&pdev->dev, BNXT_HWRM_MAX_REQ_LEN, |
---|
| 3840 | + dma_free_coherent(&pdev->dev, bp->hwrm_max_ext_req_len, |
---|
3071 | 3841 | bp->hwrm_short_cmd_req_addr, |
---|
3072 | 3842 | bp->hwrm_short_cmd_req_dma_addr); |
---|
3073 | 3843 | bp->hwrm_short_cmd_req_addr = NULL; |
---|
.. | .. |
---|
3078 | 3848 | { |
---|
3079 | 3849 | struct pci_dev *pdev = bp->pdev; |
---|
3080 | 3850 | |
---|
| 3851 | + if (bp->hwrm_short_cmd_req_addr) |
---|
| 3852 | + return 0; |
---|
| 3853 | + |
---|
3081 | 3854 | bp->hwrm_short_cmd_req_addr = |
---|
3082 | | - dma_alloc_coherent(&pdev->dev, BNXT_HWRM_MAX_REQ_LEN, |
---|
| 3855 | + dma_alloc_coherent(&pdev->dev, bp->hwrm_max_ext_req_len, |
---|
3083 | 3856 | &bp->hwrm_short_cmd_req_dma_addr, |
---|
3084 | 3857 | GFP_KERNEL); |
---|
3085 | 3858 | if (!bp->hwrm_short_cmd_req_addr) |
---|
.. | .. |
---|
3088 | 3861 | return 0; |
---|
3089 | 3862 | } |
---|
3090 | 3863 | |
---|
3091 | | -static void bnxt_free_stats(struct bnxt *bp) |
---|
| 3864 | +static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats) |
---|
3092 | 3865 | { |
---|
3093 | | - u32 size, i; |
---|
3094 | | - struct pci_dev *pdev = bp->pdev; |
---|
| 3866 | + kfree(stats->hw_masks); |
---|
| 3867 | + stats->hw_masks = NULL; |
---|
| 3868 | + kfree(stats->sw_stats); |
---|
| 3869 | + stats->sw_stats = NULL; |
---|
| 3870 | + if (stats->hw_stats) { |
---|
| 3871 | + dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats, |
---|
| 3872 | + stats->hw_stats_map); |
---|
| 3873 | + stats->hw_stats = NULL; |
---|
| 3874 | + } |
---|
| 3875 | +} |
---|
3095 | 3876 | |
---|
| 3877 | +static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats, |
---|
| 3878 | + bool alloc_masks) |
---|
| 3879 | +{ |
---|
| 3880 | + stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len, |
---|
| 3881 | + &stats->hw_stats_map, GFP_KERNEL); |
---|
| 3882 | + if (!stats->hw_stats) |
---|
| 3883 | + return -ENOMEM; |
---|
| 3884 | + |
---|
| 3885 | + stats->sw_stats = kzalloc(stats->len, GFP_KERNEL); |
---|
| 3886 | + if (!stats->sw_stats) |
---|
| 3887 | + goto stats_mem_err; |
---|
| 3888 | + |
---|
| 3889 | + if (alloc_masks) { |
---|
| 3890 | + stats->hw_masks = kzalloc(stats->len, GFP_KERNEL); |
---|
| 3891 | + if (!stats->hw_masks) |
---|
| 3892 | + goto stats_mem_err; |
---|
| 3893 | + } |
---|
| 3894 | + return 0; |
---|
| 3895 | + |
---|
| 3896 | +stats_mem_err: |
---|
| 3897 | + bnxt_free_stats_mem(bp, stats); |
---|
| 3898 | + return -ENOMEM; |
---|
| 3899 | +} |
---|
| 3900 | + |
---|
| 3901 | +static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count) |
---|
| 3902 | +{ |
---|
| 3903 | + int i; |
---|
| 3904 | + |
---|
| 3905 | + for (i = 0; i < count; i++) |
---|
| 3906 | + mask_arr[i] = mask; |
---|
| 3907 | +} |
---|
| 3908 | + |
---|
| 3909 | +static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count) |
---|
| 3910 | +{ |
---|
| 3911 | + int i; |
---|
| 3912 | + |
---|
| 3913 | + for (i = 0; i < count; i++) |
---|
| 3914 | + mask_arr[i] = le64_to_cpu(hw_mask_arr[i]); |
---|
| 3915 | +} |
---|
| 3916 | + |
---|
| 3917 | +static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp, |
---|
| 3918 | + struct bnxt_stats_mem *stats) |
---|
| 3919 | +{ |
---|
| 3920 | + struct hwrm_func_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr; |
---|
| 3921 | + struct hwrm_func_qstats_ext_input req = {0}; |
---|
| 3922 | + __le64 *hw_masks; |
---|
| 3923 | + int rc; |
---|
| 3924 | + |
---|
| 3925 | + if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) || |
---|
| 3926 | + !(bp->flags & BNXT_FLAG_CHIP_P5)) |
---|
| 3927 | + return -EOPNOTSUPP; |
---|
| 3928 | + |
---|
| 3929 | + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QSTATS_EXT, -1, -1); |
---|
| 3930 | + req.fid = cpu_to_le16(0xffff); |
---|
| 3931 | + req.flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK; |
---|
| 3932 | + mutex_lock(&bp->hwrm_cmd_lock); |
---|
| 3933 | + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
---|
| 3934 | + if (rc) |
---|
| 3935 | + goto qstat_exit; |
---|
| 3936 | + |
---|
| 3937 | + hw_masks = &resp->rx_ucast_pkts; |
---|
| 3938 | + bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8); |
---|
| 3939 | + |
---|
| 3940 | +qstat_exit: |
---|
| 3941 | + mutex_unlock(&bp->hwrm_cmd_lock); |
---|
| 3942 | + return rc; |
---|
| 3943 | +} |
---|
| 3944 | + |
---|
| 3945 | +static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags); |
---|
| 3946 | +static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags); |
---|
| 3947 | + |
---|
| 3948 | +static void bnxt_init_stats(struct bnxt *bp) |
---|
| 3949 | +{ |
---|
| 3950 | + struct bnxt_napi *bnapi = bp->bnapi[0]; |
---|
| 3951 | + struct bnxt_cp_ring_info *cpr; |
---|
| 3952 | + struct bnxt_stats_mem *stats; |
---|
| 3953 | + __le64 *rx_stats, *tx_stats; |
---|
| 3954 | + int rc, rx_count, tx_count; |
---|
| 3955 | + u64 *rx_masks, *tx_masks; |
---|
| 3956 | + u64 mask; |
---|
| 3957 | + u8 flags; |
---|
| 3958 | + |
---|
| 3959 | + cpr = &bnapi->cp_ring; |
---|
| 3960 | + stats = &cpr->stats; |
---|
| 3961 | + rc = bnxt_hwrm_func_qstat_ext(bp, stats); |
---|
| 3962 | + if (rc) { |
---|
| 3963 | + if (bp->flags & BNXT_FLAG_CHIP_P5) |
---|
| 3964 | + mask = (1ULL << 48) - 1; |
---|
| 3965 | + else |
---|
| 3966 | + mask = -1ULL; |
---|
| 3967 | + bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8); |
---|
| 3968 | + } |
---|
| 3969 | + if (bp->flags & BNXT_FLAG_PORT_STATS) { |
---|
| 3970 | + stats = &bp->port_stats; |
---|
| 3971 | + rx_stats = stats->hw_stats; |
---|
| 3972 | + rx_masks = stats->hw_masks; |
---|
| 3973 | + rx_count = sizeof(struct rx_port_stats) / 8; |
---|
| 3974 | + tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; |
---|
| 3975 | + tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; |
---|
| 3976 | + tx_count = sizeof(struct tx_port_stats) / 8; |
---|
| 3977 | + |
---|
| 3978 | + flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK; |
---|
| 3979 | + rc = bnxt_hwrm_port_qstats(bp, flags); |
---|
| 3980 | + if (rc) { |
---|
| 3981 | + mask = (1ULL << 40) - 1; |
---|
| 3982 | + |
---|
| 3983 | + bnxt_fill_masks(rx_masks, mask, rx_count); |
---|
| 3984 | + bnxt_fill_masks(tx_masks, mask, tx_count); |
---|
| 3985 | + } else { |
---|
| 3986 | + bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count); |
---|
| 3987 | + bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count); |
---|
| 3988 | + bnxt_hwrm_port_qstats(bp, 0); |
---|
| 3989 | + } |
---|
| 3990 | + } |
---|
| 3991 | + if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { |
---|
| 3992 | + stats = &bp->rx_port_stats_ext; |
---|
| 3993 | + rx_stats = stats->hw_stats; |
---|
| 3994 | + rx_masks = stats->hw_masks; |
---|
| 3995 | + rx_count = sizeof(struct rx_port_stats_ext) / 8; |
---|
| 3996 | + stats = &bp->tx_port_stats_ext; |
---|
| 3997 | + tx_stats = stats->hw_stats; |
---|
| 3998 | + tx_masks = stats->hw_masks; |
---|
| 3999 | + tx_count = sizeof(struct tx_port_stats_ext) / 8; |
---|
| 4000 | + |
---|
| 4001 | + flags = PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK; |
---|
| 4002 | + rc = bnxt_hwrm_port_qstats_ext(bp, flags); |
---|
| 4003 | + if (rc) { |
---|
| 4004 | + mask = (1ULL << 40) - 1; |
---|
| 4005 | + |
---|
| 4006 | + bnxt_fill_masks(rx_masks, mask, rx_count); |
---|
| 4007 | + if (tx_stats) |
---|
| 4008 | + bnxt_fill_masks(tx_masks, mask, tx_count); |
---|
| 4009 | + } else { |
---|
| 4010 | + bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count); |
---|
| 4011 | + if (tx_stats) |
---|
| 4012 | + bnxt_copy_hw_masks(tx_masks, tx_stats, |
---|
| 4013 | + tx_count); |
---|
| 4014 | + bnxt_hwrm_port_qstats_ext(bp, 0); |
---|
| 4015 | + } |
---|
| 4016 | + } |
---|
| 4017 | +} |
---|
| 4018 | + |
---|
| 4019 | +static void bnxt_free_port_stats(struct bnxt *bp) |
---|
| 4020 | +{ |
---|
3096 | 4021 | bp->flags &= ~BNXT_FLAG_PORT_STATS; |
---|
3097 | 4022 | bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT; |
---|
3098 | 4023 | |
---|
3099 | | - if (bp->hw_rx_port_stats) { |
---|
3100 | | - dma_free_coherent(&pdev->dev, bp->hw_port_stats_size, |
---|
3101 | | - bp->hw_rx_port_stats, |
---|
3102 | | - bp->hw_rx_port_stats_map); |
---|
3103 | | - bp->hw_rx_port_stats = NULL; |
---|
3104 | | - } |
---|
| 4024 | + bnxt_free_stats_mem(bp, &bp->port_stats); |
---|
| 4025 | + bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext); |
---|
| 4026 | + bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext); |
---|
| 4027 | +} |
---|
3105 | 4028 | |
---|
3106 | | - if (bp->hw_rx_port_stats_ext) { |
---|
3107 | | - dma_free_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext), |
---|
3108 | | - bp->hw_rx_port_stats_ext, |
---|
3109 | | - bp->hw_rx_port_stats_ext_map); |
---|
3110 | | - bp->hw_rx_port_stats_ext = NULL; |
---|
3111 | | - } |
---|
| 4029 | +static void bnxt_free_ring_stats(struct bnxt *bp) |
---|
| 4030 | +{ |
---|
| 4031 | + int i; |
---|
3112 | 4032 | |
---|
3113 | 4033 | if (!bp->bnapi) |
---|
3114 | 4034 | return; |
---|
3115 | | - |
---|
3116 | | - size = sizeof(struct ctx_hw_stats); |
---|
3117 | 4035 | |
---|
3118 | 4036 | for (i = 0; i < bp->cp_nr_rings; i++) { |
---|
3119 | 4037 | struct bnxt_napi *bnapi = bp->bnapi[i]; |
---|
3120 | 4038 | struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; |
---|
3121 | 4039 | |
---|
3122 | | - if (cpr->hw_stats) { |
---|
3123 | | - dma_free_coherent(&pdev->dev, size, cpr->hw_stats, |
---|
3124 | | - cpr->hw_stats_map); |
---|
3125 | | - cpr->hw_stats = NULL; |
---|
3126 | | - } |
---|
| 4040 | + bnxt_free_stats_mem(bp, &cpr->stats); |
---|
3127 | 4041 | } |
---|
3128 | 4042 | } |
---|
3129 | 4043 | |
---|
3130 | 4044 | static int bnxt_alloc_stats(struct bnxt *bp) |
---|
3131 | 4045 | { |
---|
3132 | 4046 | u32 size, i; |
---|
3133 | | - struct pci_dev *pdev = bp->pdev; |
---|
| 4047 | + int rc; |
---|
3134 | 4048 | |
---|
3135 | | - size = sizeof(struct ctx_hw_stats); |
---|
| 4049 | + size = bp->hw_ring_stats_size; |
---|
3136 | 4050 | |
---|
3137 | 4051 | for (i = 0; i < bp->cp_nr_rings; i++) { |
---|
3138 | 4052 | struct bnxt_napi *bnapi = bp->bnapi[i]; |
---|
3139 | 4053 | struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; |
---|
3140 | 4054 | |
---|
3141 | | - cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size, |
---|
3142 | | - &cpr->hw_stats_map, |
---|
3143 | | - GFP_KERNEL); |
---|
3144 | | - if (!cpr->hw_stats) |
---|
3145 | | - return -ENOMEM; |
---|
| 4055 | + cpr->stats.len = size; |
---|
| 4056 | + rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i); |
---|
| 4057 | + if (rc) |
---|
| 4058 | + return rc; |
---|
3146 | 4059 | |
---|
3147 | 4060 | cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; |
---|
3148 | 4061 | } |
---|
3149 | 4062 | |
---|
3150 | | - if (BNXT_PF(bp) && bp->chip_num != CHIP_NUM_58700) { |
---|
3151 | | - bp->hw_port_stats_size = sizeof(struct rx_port_stats) + |
---|
3152 | | - sizeof(struct tx_port_stats) + 1024; |
---|
| 4063 | + if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700) |
---|
| 4064 | + return 0; |
---|
3153 | 4065 | |
---|
3154 | | - bp->hw_rx_port_stats = |
---|
3155 | | - dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size, |
---|
3156 | | - &bp->hw_rx_port_stats_map, |
---|
3157 | | - GFP_KERNEL); |
---|
3158 | | - if (!bp->hw_rx_port_stats) |
---|
3159 | | - return -ENOMEM; |
---|
| 4066 | + if (bp->port_stats.hw_stats) |
---|
| 4067 | + goto alloc_ext_stats; |
---|
3160 | 4068 | |
---|
3161 | | - bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) + |
---|
3162 | | - 512; |
---|
3163 | | - bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map + |
---|
3164 | | - sizeof(struct rx_port_stats) + 512; |
---|
3165 | | - bp->flags |= BNXT_FLAG_PORT_STATS; |
---|
| 4069 | + bp->port_stats.len = BNXT_PORT_STATS_SIZE; |
---|
| 4070 | + rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true); |
---|
| 4071 | + if (rc) |
---|
| 4072 | + return rc; |
---|
3166 | 4073 | |
---|
3167 | | - /* Display extended statistics only if FW supports it */ |
---|
3168 | | - if (bp->hwrm_spec_code < 0x10804 || |
---|
3169 | | - bp->hwrm_spec_code == 0x10900) |
---|
| 4074 | + bp->flags |= BNXT_FLAG_PORT_STATS; |
---|
| 4075 | + |
---|
| 4076 | +alloc_ext_stats: |
---|
| 4077 | + /* Display extended statistics only if FW supports it */ |
---|
| 4078 | + if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900) |
---|
| 4079 | + if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) |
---|
3170 | 4080 | return 0; |
---|
3171 | 4081 | |
---|
3172 | | - bp->hw_rx_port_stats_ext = |
---|
3173 | | - dma_zalloc_coherent(&pdev->dev, |
---|
3174 | | - sizeof(struct rx_port_stats_ext), |
---|
3175 | | - &bp->hw_rx_port_stats_ext_map, |
---|
3176 | | - GFP_KERNEL); |
---|
3177 | | - if (!bp->hw_rx_port_stats_ext) |
---|
3178 | | - return 0; |
---|
| 4082 | + if (bp->rx_port_stats_ext.hw_stats) |
---|
| 4083 | + goto alloc_tx_ext_stats; |
---|
3179 | 4084 | |
---|
3180 | | - bp->flags |= BNXT_FLAG_PORT_STATS_EXT; |
---|
| 4085 | + bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext); |
---|
| 4086 | + rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true); |
---|
| 4087 | + /* Extended stats are optional */ |
---|
| 4088 | + if (rc) |
---|
| 4089 | + return 0; |
---|
| 4090 | + |
---|
| 4091 | +alloc_tx_ext_stats: |
---|
| 4092 | + if (bp->tx_port_stats_ext.hw_stats) |
---|
| 4093 | + return 0; |
---|
| 4094 | + |
---|
| 4095 | + if (bp->hwrm_spec_code >= 0x10902 || |
---|
| 4096 | + (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) { |
---|
| 4097 | + bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext); |
---|
| 4098 | + rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true); |
---|
| 4099 | + /* Extended stats are optional */ |
---|
| 4100 | + if (rc) |
---|
| 4101 | + return 0; |
---|
3181 | 4102 | } |
---|
| 4103 | + bp->flags |= BNXT_FLAG_PORT_STATS_EXT; |
---|
3182 | 4104 | return 0; |
---|
3183 | 4105 | } |
---|
3184 | 4106 | |
---|
.. | .. |
---|
3277 | 4199 | bnxt_free_cp_rings(bp); |
---|
3278 | 4200 | bnxt_free_ntp_fltrs(bp, irq_re_init); |
---|
3279 | 4201 | if (irq_re_init) { |
---|
3280 | | - bnxt_free_stats(bp); |
---|
| 4202 | + bnxt_free_ring_stats(bp); |
---|
| 4203 | + if (!(bp->fw_cap & BNXT_FW_CAP_PORT_STATS_NO_RESET) || |
---|
| 4204 | + test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) |
---|
| 4205 | + bnxt_free_port_stats(bp); |
---|
3281 | 4206 | bnxt_free_ring_grps(bp); |
---|
3282 | 4207 | bnxt_free_vnics(bp); |
---|
3283 | 4208 | kfree(bp->tx_ring_map); |
---|
.. | .. |
---|
3315 | 4240 | bp->bnapi[i] = bnapi; |
---|
3316 | 4241 | bp->bnapi[i]->index = i; |
---|
3317 | 4242 | bp->bnapi[i]->bp = bp; |
---|
| 4243 | + if (bp->flags & BNXT_FLAG_CHIP_P5) { |
---|
| 4244 | + struct bnxt_cp_ring_info *cpr = |
---|
| 4245 | + &bp->bnapi[i]->cp_ring; |
---|
| 4246 | + |
---|
| 4247 | + cpr->cp_ring_struct.ring_mem.flags = |
---|
| 4248 | + BNXT_RMEM_RING_PTE_FLAG; |
---|
| 4249 | + } |
---|
3318 | 4250 | } |
---|
3319 | 4251 | |
---|
3320 | 4252 | bp->rx_ring = kcalloc(bp->rx_nr_rings, |
---|
.. | .. |
---|
3324 | 4256 | return -ENOMEM; |
---|
3325 | 4257 | |
---|
3326 | 4258 | for (i = 0; i < bp->rx_nr_rings; i++) { |
---|
3327 | | - bp->rx_ring[i].bnapi = bp->bnapi[i]; |
---|
| 4259 | + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; |
---|
| 4260 | + |
---|
| 4261 | + if (bp->flags & BNXT_FLAG_CHIP_P5) { |
---|
| 4262 | + rxr->rx_ring_struct.ring_mem.flags = |
---|
| 4263 | + BNXT_RMEM_RING_PTE_FLAG; |
---|
| 4264 | + rxr->rx_agg_ring_struct.ring_mem.flags = |
---|
| 4265 | + BNXT_RMEM_RING_PTE_FLAG; |
---|
| 4266 | + } |
---|
| 4267 | + rxr->bnapi = bp->bnapi[i]; |
---|
3328 | 4268 | bp->bnapi[i]->rx_ring = &bp->rx_ring[i]; |
---|
3329 | 4269 | } |
---|
3330 | 4270 | |
---|
.. | .. |
---|
3346 | 4286 | j = bp->rx_nr_rings; |
---|
3347 | 4287 | |
---|
3348 | 4288 | for (i = 0; i < bp->tx_nr_rings; i++, j++) { |
---|
3349 | | - bp->tx_ring[i].bnapi = bp->bnapi[j]; |
---|
3350 | | - bp->bnapi[j]->tx_ring = &bp->tx_ring[i]; |
---|
| 4289 | + struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; |
---|
| 4290 | + |
---|
| 4291 | + if (bp->flags & BNXT_FLAG_CHIP_P5) |
---|
| 4292 | + txr->tx_ring_struct.ring_mem.flags = |
---|
| 4293 | + BNXT_RMEM_RING_PTE_FLAG; |
---|
| 4294 | + txr->bnapi = bp->bnapi[j]; |
---|
| 4295 | + bp->bnapi[j]->tx_ring = txr; |
---|
3351 | 4296 | bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i; |
---|
3352 | 4297 | if (i >= bp->tx_nr_rings_xdp) { |
---|
3353 | | - bp->tx_ring[i].txq_index = i - |
---|
3354 | | - bp->tx_nr_rings_xdp; |
---|
| 4298 | + txr->txq_index = i - bp->tx_nr_rings_xdp; |
---|
3355 | 4299 | bp->bnapi[j]->tx_int = bnxt_tx_int; |
---|
3356 | 4300 | } else { |
---|
3357 | 4301 | bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP; |
---|
.. | .. |
---|
3362 | 4306 | rc = bnxt_alloc_stats(bp); |
---|
3363 | 4307 | if (rc) |
---|
3364 | 4308 | goto alloc_mem_err; |
---|
| 4309 | + bnxt_init_stats(bp); |
---|
3365 | 4310 | |
---|
3366 | 4311 | rc = bnxt_alloc_ntp_fltrs(bp); |
---|
3367 | 4312 | if (rc) |
---|
.. | .. |
---|
3411 | 4356 | struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; |
---|
3412 | 4357 | |
---|
3413 | 4358 | if (ring->fw_ring_id != INVALID_HW_RING_ID) |
---|
3414 | | - BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); |
---|
| 4359 | + bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons); |
---|
3415 | 4360 | } |
---|
3416 | 4361 | } |
---|
3417 | 4362 | |
---|
.. | .. |
---|
3447 | 4392 | struct bnxt_napi *bnapi = bp->bnapi[i]; |
---|
3448 | 4393 | struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; |
---|
3449 | 4394 | |
---|
3450 | | - BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons); |
---|
| 4395 | + bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons); |
---|
3451 | 4396 | } |
---|
3452 | 4397 | } |
---|
3453 | 4398 | |
---|
.. | .. |
---|
3459 | 4404 | req->req_type = cpu_to_le16(req_type); |
---|
3460 | 4405 | req->cmpl_ring = cpu_to_le16(cmpl_ring); |
---|
3461 | 4406 | req->target_id = cpu_to_le16(target_id); |
---|
3462 | | - req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr); |
---|
| 4407 | + if (bnxt_kong_hwrm_message(bp, req)) |
---|
| 4408 | + req->resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr); |
---|
| 4409 | + else |
---|
| 4410 | + req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr); |
---|
| 4411 | +} |
---|
| 4412 | + |
---|
| 4413 | +static int bnxt_hwrm_to_stderr(u32 hwrm_err) |
---|
| 4414 | +{ |
---|
| 4415 | + switch (hwrm_err) { |
---|
| 4416 | + case HWRM_ERR_CODE_SUCCESS: |
---|
| 4417 | + return 0; |
---|
| 4418 | + case HWRM_ERR_CODE_RESOURCE_LOCKED: |
---|
| 4419 | + return -EROFS; |
---|
| 4420 | + case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED: |
---|
| 4421 | + return -EACCES; |
---|
| 4422 | + case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR: |
---|
| 4423 | + return -ENOSPC; |
---|
| 4424 | + case HWRM_ERR_CODE_INVALID_PARAMS: |
---|
| 4425 | + case HWRM_ERR_CODE_INVALID_FLAGS: |
---|
| 4426 | + case HWRM_ERR_CODE_INVALID_ENABLES: |
---|
| 4427 | + case HWRM_ERR_CODE_UNSUPPORTED_TLV: |
---|
| 4428 | + case HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR: |
---|
| 4429 | + return -EINVAL; |
---|
| 4430 | + case HWRM_ERR_CODE_NO_BUFFER: |
---|
| 4431 | + return -ENOMEM; |
---|
| 4432 | + case HWRM_ERR_CODE_HOT_RESET_PROGRESS: |
---|
| 4433 | + case HWRM_ERR_CODE_BUSY: |
---|
| 4434 | + return -EAGAIN; |
---|
| 4435 | + case HWRM_ERR_CODE_CMD_NOT_SUPPORTED: |
---|
| 4436 | + return -EOPNOTSUPP; |
---|
| 4437 | + default: |
---|
| 4438 | + return -EIO; |
---|
| 4439 | + } |
---|
3463 | 4440 | } |
---|
3464 | 4441 | |
---|
3465 | 4442 | static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, |
---|
.. | .. |
---|
3468 | 4445 | int i, intr_process, rc, tmo_count; |
---|
3469 | 4446 | struct input *req = msg; |
---|
3470 | 4447 | u32 *data = msg; |
---|
3471 | | - __le32 *resp_len; |
---|
3472 | 4448 | u8 *valid; |
---|
3473 | 4449 | u16 cp_ring_id, len = 0; |
---|
3474 | 4450 | struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr; |
---|
3475 | 4451 | u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN; |
---|
3476 | 4452 | struct hwrm_short_input short_input = {0}; |
---|
| 4453 | + u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER; |
---|
| 4454 | + u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM; |
---|
| 4455 | + u16 dst = BNXT_HWRM_CHNL_CHIMP; |
---|
3477 | 4456 | |
---|
3478 | | - req->seq_id = cpu_to_le16(bp->hwrm_cmd_seq++); |
---|
| 4457 | + if (BNXT_NO_FW_ACCESS(bp) && |
---|
| 4458 | + le16_to_cpu(req->req_type) != HWRM_FUNC_RESET) |
---|
| 4459 | + return -EBUSY; |
---|
| 4460 | + |
---|
| 4461 | + if (msg_len > BNXT_HWRM_MAX_REQ_LEN) { |
---|
| 4462 | + if (msg_len > bp->hwrm_max_ext_req_len || |
---|
| 4463 | + !bp->hwrm_short_cmd_req_addr) |
---|
| 4464 | + return -EINVAL; |
---|
| 4465 | + } |
---|
| 4466 | + |
---|
| 4467 | + if (bnxt_hwrm_kong_chnl(bp, req)) { |
---|
| 4468 | + dst = BNXT_HWRM_CHNL_KONG; |
---|
| 4469 | + bar_offset = BNXT_GRCPF_REG_KONG_COMM; |
---|
| 4470 | + doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER; |
---|
| 4471 | + resp = bp->hwrm_cmd_kong_resp_addr; |
---|
| 4472 | + } |
---|
| 4473 | + |
---|
3479 | 4474 | memset(resp, 0, PAGE_SIZE); |
---|
3480 | 4475 | cp_ring_id = le16_to_cpu(req->cmpl_ring); |
---|
3481 | 4476 | intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1; |
---|
3482 | 4477 | |
---|
3483 | | - if (bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) { |
---|
| 4478 | + req->seq_id = cpu_to_le16(bnxt_get_hwrm_seq_id(bp, dst)); |
---|
| 4479 | + /* currently supports only one outstanding message */ |
---|
| 4480 | + if (intr_process) |
---|
| 4481 | + bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id); |
---|
| 4482 | + |
---|
| 4483 | + if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) || |
---|
| 4484 | + msg_len > BNXT_HWRM_MAX_REQ_LEN) { |
---|
3484 | 4485 | void *short_cmd_req = bp->hwrm_short_cmd_req_addr; |
---|
| 4486 | + u16 max_msg_len; |
---|
| 4487 | + |
---|
| 4488 | + /* Set boundary for maximum extended request length for short |
---|
| 4489 | + * cmd format. If passed up from device use the max supported |
---|
| 4490 | + * internal req length. |
---|
| 4491 | + */ |
---|
| 4492 | + max_msg_len = bp->hwrm_max_ext_req_len; |
---|
3485 | 4493 | |
---|
3486 | 4494 | memcpy(short_cmd_req, req, msg_len); |
---|
3487 | | - memset(short_cmd_req + msg_len, 0, BNXT_HWRM_MAX_REQ_LEN - |
---|
3488 | | - msg_len); |
---|
| 4495 | + if (msg_len < max_msg_len) |
---|
| 4496 | + memset(short_cmd_req + msg_len, 0, |
---|
| 4497 | + max_msg_len - msg_len); |
---|
3489 | 4498 | |
---|
3490 | 4499 | short_input.req_type = req->req_type; |
---|
3491 | 4500 | short_input.signature = |
---|
.. | .. |
---|
3504 | 4513 | } |
---|
3505 | 4514 | |
---|
3506 | 4515 | /* Write request msg to hwrm channel */ |
---|
3507 | | - __iowrite32_copy(bp->bar0, data, msg_len / 4); |
---|
| 4516 | + __iowrite32_copy(bp->bar0 + bar_offset, data, msg_len / 4); |
---|
3508 | 4517 | |
---|
3509 | 4518 | for (i = msg_len; i < max_req_len; i += 4) |
---|
3510 | | - writel(0, bp->bar0 + i); |
---|
3511 | | - |
---|
3512 | | - /* currently supports only one outstanding message */ |
---|
3513 | | - if (intr_process) |
---|
3514 | | - bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id); |
---|
| 4519 | + writel(0, bp->bar0 + bar_offset + i); |
---|
3515 | 4520 | |
---|
3516 | 4521 | /* Ring channel doorbell */ |
---|
3517 | | - writel(1, bp->bar0 + 0x100); |
---|
| 4522 | + writel(1, bp->bar0 + doorbell_offset); |
---|
| 4523 | + |
---|
| 4524 | + if (!pci_is_enabled(bp->pdev)) |
---|
| 4525 | + return 0; |
---|
3518 | 4526 | |
---|
3519 | 4527 | if (!timeout) |
---|
3520 | 4528 | timeout = DFLT_HWRM_CMD_TIMEOUT; |
---|
.. | .. |
---|
3529 | 4537 | tmo_count = HWRM_SHORT_TIMEOUT_COUNTER; |
---|
3530 | 4538 | timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER; |
---|
3531 | 4539 | tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT); |
---|
3532 | | - resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET; |
---|
| 4540 | + |
---|
3533 | 4541 | if (intr_process) { |
---|
| 4542 | + u16 seq_id = bp->hwrm_intr_seq_id; |
---|
| 4543 | + |
---|
3534 | 4544 | /* Wait until hwrm response cmpl interrupt is processed */ |
---|
3535 | | - while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID && |
---|
| 4545 | + while (bp->hwrm_intr_seq_id != (u16)~seq_id && |
---|
3536 | 4546 | i++ < tmo_count) { |
---|
| 4547 | + /* Abort the wait for completion if the FW health |
---|
| 4548 | + * check has failed. |
---|
| 4549 | + */ |
---|
| 4550 | + if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) |
---|
| 4551 | + return -EBUSY; |
---|
3537 | 4552 | /* on first few passes, just barely sleep */ |
---|
3538 | 4553 | if (i < HWRM_SHORT_TIMEOUT_COUNTER) |
---|
3539 | 4554 | usleep_range(HWRM_SHORT_MIN_TIMEOUT, |
---|
.. | .. |
---|
3543 | 4558 | HWRM_MAX_TIMEOUT); |
---|
3544 | 4559 | } |
---|
3545 | 4560 | |
---|
3546 | | - if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) { |
---|
3547 | | - netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n", |
---|
3548 | | - le16_to_cpu(req->req_type)); |
---|
3549 | | - return -1; |
---|
| 4561 | + if (bp->hwrm_intr_seq_id != (u16)~seq_id) { |
---|
| 4562 | + if (!silent) |
---|
| 4563 | + netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n", |
---|
| 4564 | + le16_to_cpu(req->req_type)); |
---|
| 4565 | + return -EBUSY; |
---|
3550 | 4566 | } |
---|
3551 | | - len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >> |
---|
3552 | | - HWRM_RESP_LEN_SFT; |
---|
3553 | | - valid = bp->hwrm_cmd_resp_addr + len - 1; |
---|
| 4567 | + len = le16_to_cpu(resp->resp_len); |
---|
| 4568 | + valid = ((u8 *)resp) + len - 1; |
---|
3554 | 4569 | } else { |
---|
3555 | 4570 | int j; |
---|
3556 | 4571 | |
---|
3557 | 4572 | /* Check if response len is updated */ |
---|
3558 | 4573 | for (i = 0; i < tmo_count; i++) { |
---|
3559 | | - len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >> |
---|
3560 | | - HWRM_RESP_LEN_SFT; |
---|
| 4574 | + /* Abort the wait for completion if the FW health |
---|
| 4575 | + * check has failed. |
---|
| 4576 | + */ |
---|
| 4577 | + if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) |
---|
| 4578 | + return -EBUSY; |
---|
| 4579 | + len = le16_to_cpu(resp->resp_len); |
---|
3561 | 4580 | if (len) |
---|
3562 | 4581 | break; |
---|
3563 | 4582 | /* on first few passes, just barely sleep */ |
---|
.. | .. |
---|
3570 | 4589 | } |
---|
3571 | 4590 | |
---|
3572 | 4591 | if (i >= tmo_count) { |
---|
3573 | | - netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n", |
---|
3574 | | - HWRM_TOTAL_TIMEOUT(i), |
---|
3575 | | - le16_to_cpu(req->req_type), |
---|
3576 | | - le16_to_cpu(req->seq_id), len); |
---|
3577 | | - return -1; |
---|
| 4592 | + if (!silent) |
---|
| 4593 | + netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n", |
---|
| 4594 | + HWRM_TOTAL_TIMEOUT(i), |
---|
| 4595 | + le16_to_cpu(req->req_type), |
---|
| 4596 | + le16_to_cpu(req->seq_id), len); |
---|
| 4597 | + return -EBUSY; |
---|
3578 | 4598 | } |
---|
3579 | 4599 | |
---|
3580 | 4600 | /* Last byte of resp contains valid bit */ |
---|
3581 | | - valid = bp->hwrm_cmd_resp_addr + len - 1; |
---|
| 4601 | + valid = ((u8 *)resp) + len - 1; |
---|
3582 | 4602 | for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) { |
---|
3583 | 4603 | /* make sure we read from updated DMA memory */ |
---|
3584 | 4604 | dma_rmb(); |
---|
.. | .. |
---|
3588 | 4608 | } |
---|
3589 | 4609 | |
---|
3590 | 4610 | if (j >= HWRM_VALID_BIT_DELAY_USEC) { |
---|
3591 | | - netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n", |
---|
3592 | | - HWRM_TOTAL_TIMEOUT(i), |
---|
3593 | | - le16_to_cpu(req->req_type), |
---|
3594 | | - le16_to_cpu(req->seq_id), len, *valid); |
---|
3595 | | - return -1; |
---|
| 4611 | + if (!silent) |
---|
| 4612 | + netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n", |
---|
| 4613 | + HWRM_TOTAL_TIMEOUT(i), |
---|
| 4614 | + le16_to_cpu(req->req_type), |
---|
| 4615 | + le16_to_cpu(req->seq_id), len, |
---|
| 4616 | + *valid); |
---|
| 4617 | + return -EBUSY; |
---|
3596 | 4618 | } |
---|
3597 | 4619 | } |
---|
3598 | 4620 | |
---|
.. | .. |
---|
3606 | 4628 | netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n", |
---|
3607 | 4629 | le16_to_cpu(resp->req_type), |
---|
3608 | 4630 | le16_to_cpu(resp->seq_id), rc); |
---|
3609 | | - return rc; |
---|
| 4631 | + return bnxt_hwrm_to_stderr(rc); |
---|
3610 | 4632 | } |
---|
3611 | 4633 | |
---|
3612 | 4634 | int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) |
---|
.. | .. |
---|
3641 | 4663 | return rc; |
---|
3642 | 4664 | } |
---|
3643 | 4665 | |
---|
3644 | | -int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap, |
---|
3645 | | - int bmap_size) |
---|
3646 | | -{ |
---|
3647 | | - struct hwrm_func_drv_rgtr_input req = {0}; |
---|
3648 | | - DECLARE_BITMAP(async_events_bmap, 256); |
---|
3649 | | - u32 *events = (u32 *)async_events_bmap; |
---|
3650 | | - int i; |
---|
3651 | | - |
---|
3652 | | - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1); |
---|
3653 | | - |
---|
3654 | | - req.enables = |
---|
3655 | | - cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); |
---|
3656 | | - |
---|
3657 | | - memset(async_events_bmap, 0, sizeof(async_events_bmap)); |
---|
3658 | | - for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) |
---|
3659 | | - __set_bit(bnxt_async_events_arr[i], async_events_bmap); |
---|
3660 | | - |
---|
3661 | | - if (bmap && bmap_size) { |
---|
3662 | | - for (i = 0; i < bmap_size; i++) { |
---|
3663 | | - if (test_bit(i, bmap)) |
---|
3664 | | - __set_bit(i, async_events_bmap); |
---|
3665 | | - } |
---|
3666 | | - } |
---|
3667 | | - |
---|
3668 | | - for (i = 0; i < 8; i++) |
---|
3669 | | - req.async_event_fwd[i] |= cpu_to_le32(events[i]); |
---|
3670 | | - |
---|
3671 | | - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
---|
3672 | | -} |
---|
3673 | | - |
---|
3674 | | -static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp) |
---|
| 4666 | +int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size, |
---|
| 4667 | + bool async_only) |
---|
3675 | 4668 | { |
---|
3676 | 4669 | struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr; |
---|
3677 | 4670 | struct hwrm_func_drv_rgtr_input req = {0}; |
---|
3678 | | - int rc; |
---|
| 4671 | + DECLARE_BITMAP(async_events_bmap, 256); |
---|
| 4672 | + u32 *events = (u32 *)async_events_bmap; |
---|
| 4673 | + u32 flags; |
---|
| 4674 | + int rc, i; |
---|
3679 | 4675 | |
---|
3680 | 4676 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1); |
---|
3681 | 4677 | |
---|
3682 | 4678 | req.enables = |
---|
3683 | 4679 | cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE | |
---|
3684 | | - FUNC_DRV_RGTR_REQ_ENABLES_VER); |
---|
| 4680 | + FUNC_DRV_RGTR_REQ_ENABLES_VER | |
---|
| 4681 | + FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); |
---|
3685 | 4682 | |
---|
3686 | 4683 | req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX); |
---|
3687 | | - req.flags = cpu_to_le32(FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE); |
---|
| 4684 | + flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE; |
---|
| 4685 | + if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET) |
---|
| 4686 | + flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT; |
---|
| 4687 | + if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) |
---|
| 4688 | + flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT | |
---|
| 4689 | + FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT; |
---|
| 4690 | + req.flags = cpu_to_le32(flags); |
---|
3688 | 4691 | req.ver_maj_8b = DRV_VER_MAJ; |
---|
3689 | 4692 | req.ver_min_8b = DRV_VER_MIN; |
---|
3690 | 4693 | req.ver_upd_8b = DRV_VER_UPD; |
---|
.. | .. |
---|
3713 | 4716 | cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD); |
---|
3714 | 4717 | } |
---|
3715 | 4718 | |
---|
| 4719 | + if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) |
---|
| 4720 | + req.flags |= cpu_to_le32( |
---|
| 4721 | + FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE); |
---|
| 4722 | + |
---|
| 4723 | + memset(async_events_bmap, 0, sizeof(async_events_bmap)); |
---|
| 4724 | + for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) { |
---|
| 4725 | + u16 event_id = bnxt_async_events_arr[i]; |
---|
| 4726 | + |
---|
| 4727 | + if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY && |
---|
| 4728 | + !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) |
---|
| 4729 | + continue; |
---|
| 4730 | + __set_bit(bnxt_async_events_arr[i], async_events_bmap); |
---|
| 4731 | + } |
---|
| 4732 | + if (bmap && bmap_size) { |
---|
| 4733 | + for (i = 0; i < bmap_size; i++) { |
---|
| 4734 | + if (test_bit(i, bmap)) |
---|
| 4735 | + __set_bit(i, async_events_bmap); |
---|
| 4736 | + } |
---|
| 4737 | + } |
---|
| 4738 | + for (i = 0; i < 8; i++) |
---|
| 4739 | + req.async_event_fwd[i] |= cpu_to_le32(events[i]); |
---|
| 4740 | + |
---|
| 4741 | + if (async_only) |
---|
| 4742 | + req.enables = |
---|
| 4743 | + cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); |
---|
| 4744 | + |
---|
3716 | 4745 | mutex_lock(&bp->hwrm_cmd_lock); |
---|
3717 | 4746 | rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
---|
3718 | | - if (rc) |
---|
3719 | | - rc = -EIO; |
---|
3720 | | - else if (resp->flags & |
---|
3721 | | - cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED)) |
---|
3722 | | - bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE; |
---|
| 4747 | + if (!rc) { |
---|
| 4748 | + set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state); |
---|
| 4749 | + if (resp->flags & |
---|
| 4750 | + cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED)) |
---|
| 4751 | + bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE; |
---|
| 4752 | + } |
---|
3723 | 4753 | mutex_unlock(&bp->hwrm_cmd_lock); |
---|
3724 | 4754 | return rc; |
---|
3725 | 4755 | } |
---|
.. | .. |
---|
3727 | 4757 | static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp) |
---|
3728 | 4758 | { |
---|
3729 | 4759 | struct hwrm_func_drv_unrgtr_input req = {0}; |
---|
| 4760 | + |
---|
| 4761 | + if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state)) |
---|
| 4762 | + return 0; |
---|
3730 | 4763 | |
---|
3731 | 4764 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1); |
---|
3732 | 4765 | return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
---|
.. | .. |
---|
3742 | 4775 | |
---|
3743 | 4776 | switch (tunnel_type) { |
---|
3744 | 4777 | case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN: |
---|
3745 | | - req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id; |
---|
| 4778 | + req.tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id); |
---|
| 4779 | + bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID; |
---|
3746 | 4780 | break; |
---|
3747 | 4781 | case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE: |
---|
3748 | | - req.tunnel_dst_port_id = bp->nge_fw_dst_port_id; |
---|
| 4782 | + req.tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id); |
---|
| 4783 | + bp->nge_fw_dst_port_id = INVALID_HW_RING_ID; |
---|
3749 | 4784 | break; |
---|
3750 | 4785 | default: |
---|
3751 | 4786 | break; |
---|
.. | .. |
---|
3780 | 4815 | |
---|
3781 | 4816 | switch (tunnel_type) { |
---|
3782 | 4817 | case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN: |
---|
3783 | | - bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id; |
---|
| 4818 | + bp->vxlan_fw_dst_port_id = |
---|
| 4819 | + le16_to_cpu(resp->tunnel_dst_port_id); |
---|
3784 | 4820 | break; |
---|
3785 | 4821 | case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE: |
---|
3786 | | - bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id; |
---|
| 4822 | + bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id); |
---|
3787 | 4823 | break; |
---|
3788 | 4824 | default: |
---|
3789 | 4825 | break; |
---|
.. | .. |
---|
3841 | 4877 | static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, |
---|
3842 | 4878 | struct bnxt_ntuple_filter *fltr) |
---|
3843 | 4879 | { |
---|
3844 | | - int rc = 0; |
---|
3845 | 4880 | struct hwrm_cfa_ntuple_filter_alloc_input req = {0}; |
---|
3846 | | - struct hwrm_cfa_ntuple_filter_alloc_output *resp = |
---|
3847 | | - bp->hwrm_cmd_resp_addr; |
---|
| 4881 | + struct hwrm_cfa_ntuple_filter_alloc_output *resp; |
---|
3848 | 4882 | struct flow_keys *keys = &fltr->fkeys; |
---|
3849 | | - struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1]; |
---|
| 4883 | + struct bnxt_vnic_info *vnic; |
---|
| 4884 | + u32 flags = 0; |
---|
| 4885 | + int rc = 0; |
---|
3850 | 4886 | |
---|
3851 | 4887 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1); |
---|
3852 | 4888 | req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx]; |
---|
3853 | 4889 | |
---|
| 4890 | + if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) { |
---|
| 4891 | + flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX; |
---|
| 4892 | + req.dst_id = cpu_to_le16(fltr->rxq); |
---|
| 4893 | + } else { |
---|
| 4894 | + vnic = &bp->vnic_info[fltr->rxq + 1]; |
---|
| 4895 | + req.dst_id = cpu_to_le16(vnic->fw_vnic_id); |
---|
| 4896 | + } |
---|
| 4897 | + req.flags = cpu_to_le32(flags); |
---|
3854 | 4898 | req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS); |
---|
3855 | 4899 | |
---|
3856 | 4900 | req.ethertype = htons(ETH_P_IP); |
---|
.. | .. |
---|
3889 | 4933 | req.dst_port = keys->ports.dst; |
---|
3890 | 4934 | req.dst_port_mask = cpu_to_be16(0xffff); |
---|
3891 | 4935 | |
---|
3892 | | - req.dst_id = cpu_to_le16(vnic->fw_vnic_id); |
---|
3893 | 4936 | mutex_lock(&bp->hwrm_cmd_lock); |
---|
3894 | 4937 | rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
---|
3895 | | - if (!rc) |
---|
| 4938 | + if (!rc) { |
---|
| 4939 | + resp = bnxt_get_hwrm_resp_addr(bp, &req); |
---|
3896 | 4940 | fltr->filter_id = resp->ntuple_filter_id; |
---|
| 4941 | + } |
---|
3897 | 4942 | mutex_unlock(&bp->hwrm_cmd_lock); |
---|
3898 | 4943 | return rc; |
---|
3899 | 4944 | } |
---|
.. | .. |
---|
3964 | 5009 | static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags) |
---|
3965 | 5010 | { |
---|
3966 | 5011 | struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; |
---|
| 5012 | + u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX; |
---|
3967 | 5013 | struct hwrm_vnic_tpa_cfg_input req = {0}; |
---|
3968 | 5014 | |
---|
3969 | 5015 | if (vnic->fw_vnic_id == INVALID_HW_RING_ID) |
---|
.. | .. |
---|
4003 | 5049 | nsegs = (MAX_SKB_FRAGS - n) / n; |
---|
4004 | 5050 | } |
---|
4005 | 5051 | |
---|
4006 | | - segs = ilog2(nsegs); |
---|
| 5052 | + if (bp->flags & BNXT_FLAG_CHIP_P5) { |
---|
| 5053 | + segs = MAX_TPA_SEGS_P5; |
---|
| 5054 | + max_aggs = bp->max_tpa; |
---|
| 5055 | + } else { |
---|
| 5056 | + segs = ilog2(nsegs); |
---|
| 5057 | + } |
---|
4007 | 5058 | req.max_agg_segs = cpu_to_le16(segs); |
---|
4008 | | - req.max_aggs = cpu_to_le16(VNIC_TPA_CFG_REQ_MAX_AGGS_MAX); |
---|
| 5059 | + req.max_aggs = cpu_to_le16(max_aggs); |
---|
4009 | 5060 | |
---|
4010 | 5061 | req.min_agg_len = cpu_to_le32(512); |
---|
4011 | 5062 | } |
---|
.. | .. |
---|
4014 | 5065 | return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
---|
4015 | 5066 | } |
---|
4016 | 5067 | |
---|
| 5068 | +static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring) |
---|
| 5069 | +{ |
---|
| 5070 | + struct bnxt_ring_grp_info *grp_info; |
---|
| 5071 | + |
---|
| 5072 | + grp_info = &bp->grp_info[ring->grp_idx]; |
---|
| 5073 | + return grp_info->cp_fw_ring_id; |
---|
| 5074 | +} |
---|
| 5075 | + |
---|
| 5076 | +static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) |
---|
| 5077 | +{ |
---|
| 5078 | + if (bp->flags & BNXT_FLAG_CHIP_P5) { |
---|
| 5079 | + struct bnxt_napi *bnapi = rxr->bnapi; |
---|
| 5080 | + struct bnxt_cp_ring_info *cpr; |
---|
| 5081 | + |
---|
| 5082 | + cpr = bnapi->cp_ring.cp_ring_arr[BNXT_RX_HDL]; |
---|
| 5083 | + return cpr->cp_ring_struct.fw_ring_id; |
---|
| 5084 | + } else { |
---|
| 5085 | + return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct); |
---|
| 5086 | + } |
---|
| 5087 | +} |
---|
| 5088 | + |
---|
| 5089 | +static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr) |
---|
| 5090 | +{ |
---|
| 5091 | + if (bp->flags & BNXT_FLAG_CHIP_P5) { |
---|
| 5092 | + struct bnxt_napi *bnapi = txr->bnapi; |
---|
| 5093 | + struct bnxt_cp_ring_info *cpr; |
---|
| 5094 | + |
---|
| 5095 | + cpr = bnapi->cp_ring.cp_ring_arr[BNXT_TX_HDL]; |
---|
| 5096 | + return cpr->cp_ring_struct.fw_ring_id; |
---|
| 5097 | + } else { |
---|
| 5098 | + return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct); |
---|
| 5099 | + } |
---|
| 5100 | +} |
---|
| 5101 | + |
---|
| 5102 | +static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp) |
---|
| 5103 | +{ |
---|
| 5104 | + int entries; |
---|
| 5105 | + |
---|
| 5106 | + if (bp->flags & BNXT_FLAG_CHIP_P5) |
---|
| 5107 | + entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5; |
---|
| 5108 | + else |
---|
| 5109 | + entries = HW_HASH_INDEX_SIZE; |
---|
| 5110 | + |
---|
| 5111 | + bp->rss_indir_tbl_entries = entries; |
---|
| 5112 | + bp->rss_indir_tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl), |
---|
| 5113 | + GFP_KERNEL); |
---|
| 5114 | + if (!bp->rss_indir_tbl) |
---|
| 5115 | + return -ENOMEM; |
---|
| 5116 | + return 0; |
---|
| 5117 | +} |
---|
| 5118 | + |
---|
| 5119 | +static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp) |
---|
| 5120 | +{ |
---|
| 5121 | + u16 max_rings, max_entries, pad, i; |
---|
| 5122 | + |
---|
| 5123 | + if (!bp->rx_nr_rings) |
---|
| 5124 | + return; |
---|
| 5125 | + |
---|
| 5126 | + if (BNXT_CHIP_TYPE_NITRO_A0(bp)) |
---|
| 5127 | + max_rings = bp->rx_nr_rings - 1; |
---|
| 5128 | + else |
---|
| 5129 | + max_rings = bp->rx_nr_rings; |
---|
| 5130 | + |
---|
| 5131 | + max_entries = bnxt_get_rxfh_indir_size(bp->dev); |
---|
| 5132 | + |
---|
| 5133 | + for (i = 0; i < max_entries; i++) |
---|
| 5134 | + bp->rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings); |
---|
| 5135 | + |
---|
| 5136 | + pad = bp->rss_indir_tbl_entries - max_entries; |
---|
| 5137 | + if (pad) |
---|
| 5138 | + memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16)); |
---|
| 5139 | +} |
---|
| 5140 | + |
---|
| 5141 | +static u16 bnxt_get_max_rss_ring(struct bnxt *bp) |
---|
| 5142 | +{ |
---|
| 5143 | + u16 i, tbl_size, max_ring = 0; |
---|
| 5144 | + |
---|
| 5145 | + if (!bp->rss_indir_tbl) |
---|
| 5146 | + return 0; |
---|
| 5147 | + |
---|
| 5148 | + tbl_size = bnxt_get_rxfh_indir_size(bp->dev); |
---|
| 5149 | + for (i = 0; i < tbl_size; i++) |
---|
| 5150 | + max_ring = max(max_ring, bp->rss_indir_tbl[i]); |
---|
| 5151 | + return max_ring; |
---|
| 5152 | +} |
---|
| 5153 | + |
---|
| 5154 | +int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings) |
---|
| 5155 | +{ |
---|
| 5156 | + if (bp->flags & BNXT_FLAG_CHIP_P5) |
---|
| 5157 | + return DIV_ROUND_UP(rx_rings, BNXT_RSS_TABLE_ENTRIES_P5); |
---|
| 5158 | + if (BNXT_CHIP_TYPE_NITRO_A0(bp)) |
---|
| 5159 | + return 2; |
---|
| 5160 | + return 1; |
---|
| 5161 | +} |
---|
| 5162 | + |
---|
| 5163 | +static void __bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic) |
---|
| 5164 | +{ |
---|
| 5165 | + bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG); |
---|
| 5166 | + u16 i, j; |
---|
| 5167 | + |
---|
| 5168 | + /* Fill the RSS indirection table with ring group ids */ |
---|
| 5169 | + for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) { |
---|
| 5170 | + if (!no_rss) |
---|
| 5171 | + j = bp->rss_indir_tbl[i]; |
---|
| 5172 | + vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]); |
---|
| 5173 | + } |
---|
| 5174 | +} |
---|
| 5175 | + |
---|
| 5176 | +static void __bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp, |
---|
| 5177 | + struct bnxt_vnic_info *vnic) |
---|
| 5178 | +{ |
---|
| 5179 | + __le16 *ring_tbl = vnic->rss_table; |
---|
| 5180 | + struct bnxt_rx_ring_info *rxr; |
---|
| 5181 | + u16 tbl_size, i; |
---|
| 5182 | + |
---|
| 5183 | + tbl_size = bnxt_get_rxfh_indir_size(bp->dev); |
---|
| 5184 | + |
---|
| 5185 | + for (i = 0; i < tbl_size; i++) { |
---|
| 5186 | + u16 ring_id, j; |
---|
| 5187 | + |
---|
| 5188 | + j = bp->rss_indir_tbl[i]; |
---|
| 5189 | + rxr = &bp->rx_ring[j]; |
---|
| 5190 | + |
---|
| 5191 | + ring_id = rxr->rx_ring_struct.fw_ring_id; |
---|
| 5192 | + *ring_tbl++ = cpu_to_le16(ring_id); |
---|
| 5193 | + ring_id = bnxt_cp_ring_for_rx(bp, rxr); |
---|
| 5194 | + *ring_tbl++ = cpu_to_le16(ring_id); |
---|
| 5195 | + } |
---|
| 5196 | +} |
---|
| 5197 | + |
---|
| 5198 | +static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic) |
---|
| 5199 | +{ |
---|
| 5200 | + if (bp->flags & BNXT_FLAG_CHIP_P5) |
---|
| 5201 | + __bnxt_fill_hw_rss_tbl_p5(bp, vnic); |
---|
| 5202 | + else |
---|
| 5203 | + __bnxt_fill_hw_rss_tbl(bp, vnic); |
---|
| 5204 | +} |
---|
| 5205 | + |
---|
4017 | 5206 | static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss) |
---|
4018 | 5207 | { |
---|
4019 | | - u32 i, j, max_rings; |
---|
4020 | 5208 | struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; |
---|
4021 | 5209 | struct hwrm_vnic_rss_cfg_input req = {0}; |
---|
4022 | 5210 | |
---|
4023 | | - if (vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID) |
---|
| 5211 | + if ((bp->flags & BNXT_FLAG_CHIP_P5) || |
---|
| 5212 | + vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID) |
---|
4024 | 5213 | return 0; |
---|
4025 | 5214 | |
---|
4026 | 5215 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1); |
---|
4027 | 5216 | if (set_rss) { |
---|
| 5217 | + bnxt_fill_hw_rss_tbl(bp, vnic); |
---|
4028 | 5218 | req.hash_type = cpu_to_le32(bp->rss_hash_cfg); |
---|
4029 | 5219 | req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT; |
---|
4030 | | - if (vnic->flags & BNXT_VNIC_RSS_FLAG) { |
---|
4031 | | - if (BNXT_CHIP_TYPE_NITRO_A0(bp)) |
---|
4032 | | - max_rings = bp->rx_nr_rings - 1; |
---|
4033 | | - else |
---|
4034 | | - max_rings = bp->rx_nr_rings; |
---|
4035 | | - } else { |
---|
4036 | | - max_rings = 1; |
---|
4037 | | - } |
---|
4038 | | - |
---|
4039 | | - /* Fill the RSS indirection table with ring group ids */ |
---|
4040 | | - for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) { |
---|
4041 | | - if (j == max_rings) |
---|
4042 | | - j = 0; |
---|
4043 | | - vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]); |
---|
4044 | | - } |
---|
4045 | | - |
---|
4046 | 5220 | req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr); |
---|
4047 | 5221 | req.hash_key_tbl_addr = |
---|
4048 | 5222 | cpu_to_le64(vnic->rss_hash_key_dma_addr); |
---|
4049 | 5223 | } |
---|
4050 | 5224 | req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); |
---|
4051 | 5225 | return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
---|
| 5226 | +} |
---|
| 5227 | + |
---|
| 5228 | +static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss) |
---|
| 5229 | +{ |
---|
| 5230 | + struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; |
---|
| 5231 | + struct hwrm_vnic_rss_cfg_input req = {0}; |
---|
| 5232 | + dma_addr_t ring_tbl_map; |
---|
| 5233 | + u32 i, nr_ctxs; |
---|
| 5234 | + |
---|
| 5235 | + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1); |
---|
| 5236 | + req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); |
---|
| 5237 | + if (!set_rss) { |
---|
| 5238 | + hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
---|
| 5239 | + return 0; |
---|
| 5240 | + } |
---|
| 5241 | + bnxt_fill_hw_rss_tbl(bp, vnic); |
---|
| 5242 | + req.hash_type = cpu_to_le32(bp->rss_hash_cfg); |
---|
| 5243 | + req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT; |
---|
| 5244 | + req.hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr); |
---|
| 5245 | + ring_tbl_map = vnic->rss_table_dma_addr; |
---|
| 5246 | + nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings); |
---|
| 5247 | + for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) { |
---|
| 5248 | + int rc; |
---|
| 5249 | + |
---|
| 5250 | + req.ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map); |
---|
| 5251 | + req.ring_table_pair_index = i; |
---|
| 5252 | + req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]); |
---|
| 5253 | + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
---|
| 5254 | + if (rc) |
---|
| 5255 | + return rc; |
---|
| 5256 | + } |
---|
| 5257 | + return 0; |
---|
4052 | 5258 | } |
---|
4053 | 5259 | |
---|
4054 | 5260 | static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id) |
---|
.. | .. |
---|
4134 | 5340 | |
---|
4135 | 5341 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1); |
---|
4136 | 5342 | |
---|
| 5343 | + if (bp->flags & BNXT_FLAG_CHIP_P5) { |
---|
| 5344 | + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0]; |
---|
| 5345 | + |
---|
| 5346 | + req.default_rx_ring_id = |
---|
| 5347 | + cpu_to_le16(rxr->rx_ring_struct.fw_ring_id); |
---|
| 5348 | + req.default_cmpl_ring_id = |
---|
| 5349 | + cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr)); |
---|
| 5350 | + req.enables = |
---|
| 5351 | + cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID | |
---|
| 5352 | + VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID); |
---|
| 5353 | + goto vnic_mru; |
---|
| 5354 | + } |
---|
4137 | 5355 | req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP); |
---|
4138 | 5356 | /* Only RSS support for now TBD: COS & LB */ |
---|
4139 | 5357 | if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) { |
---|
.. | .. |
---|
4166 | 5384 | ring = bp->rx_nr_rings - 1; |
---|
4167 | 5385 | |
---|
4168 | 5386 | grp_idx = bp->rx_ring[ring].bnapi->index; |
---|
4169 | | - req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); |
---|
4170 | 5387 | req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id); |
---|
4171 | | - |
---|
4172 | 5388 | req.lb_rule = cpu_to_le16(0xffff); |
---|
4173 | | - req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + |
---|
4174 | | - VLAN_HLEN); |
---|
| 5389 | +vnic_mru: |
---|
| 5390 | + req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN); |
---|
4175 | 5391 | |
---|
| 5392 | + req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); |
---|
4176 | 5393 | #ifdef CONFIG_BNXT_SRIOV |
---|
4177 | 5394 | if (BNXT_VF(bp)) |
---|
4178 | 5395 | def_vlan = bp->vf.vlan; |
---|
.. | .. |
---|
4185 | 5402 | return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
---|
4186 | 5403 | } |
---|
4187 | 5404 | |
---|
4188 | | -static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id) |
---|
| 5405 | +static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id) |
---|
4189 | 5406 | { |
---|
4190 | | - u32 rc = 0; |
---|
4191 | | - |
---|
4192 | 5407 | if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) { |
---|
4193 | 5408 | struct hwrm_vnic_free_input req = {0}; |
---|
4194 | 5409 | |
---|
.. | .. |
---|
4196 | 5411 | req.vnic_id = |
---|
4197 | 5412 | cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id); |
---|
4198 | 5413 | |
---|
4199 | | - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
---|
4200 | | - if (rc) |
---|
4201 | | - return rc; |
---|
| 5414 | + hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
---|
4202 | 5415 | bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID; |
---|
4203 | 5416 | } |
---|
4204 | | - return rc; |
---|
4205 | 5417 | } |
---|
4206 | 5418 | |
---|
4207 | 5419 | static void bnxt_hwrm_vnic_free(struct bnxt *bp) |
---|
.. | .. |
---|
4220 | 5432 | unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings; |
---|
4221 | 5433 | struct hwrm_vnic_alloc_input req = {0}; |
---|
4222 | 5434 | struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr; |
---|
| 5435 | + struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; |
---|
| 5436 | + |
---|
| 5437 | + if (bp->flags & BNXT_FLAG_CHIP_P5) |
---|
| 5438 | + goto vnic_no_ring_grps; |
---|
4223 | 5439 | |
---|
4224 | 5440 | /* map ring groups to this vnic */ |
---|
4225 | 5441 | for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) { |
---|
.. | .. |
---|
4229 | 5445 | j, nr_rings); |
---|
4230 | 5446 | break; |
---|
4231 | 5447 | } |
---|
4232 | | - bp->vnic_info[vnic_id].fw_grp_ids[j] = |
---|
4233 | | - bp->grp_info[grp_idx].fw_grp_id; |
---|
| 5448 | + vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id; |
---|
4234 | 5449 | } |
---|
4235 | 5450 | |
---|
4236 | | - bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[0] = INVALID_HW_RING_ID; |
---|
4237 | | - bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[1] = INVALID_HW_RING_ID; |
---|
| 5451 | +vnic_no_ring_grps: |
---|
| 5452 | + for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) |
---|
| 5453 | + vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID; |
---|
4238 | 5454 | if (vnic_id == 0) |
---|
4239 | 5455 | req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT); |
---|
4240 | 5456 | |
---|
.. | .. |
---|
4243 | 5459 | mutex_lock(&bp->hwrm_cmd_lock); |
---|
4244 | 5460 | rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
---|
4245 | 5461 | if (!rc) |
---|
4246 | | - bp->vnic_info[vnic_id].fw_vnic_id = le32_to_cpu(resp->vnic_id); |
---|
| 5462 | + vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id); |
---|
4247 | 5463 | mutex_unlock(&bp->hwrm_cmd_lock); |
---|
4248 | 5464 | return rc; |
---|
4249 | 5465 | } |
---|
.. | .. |
---|
4254 | 5470 | struct hwrm_vnic_qcaps_input req = {0}; |
---|
4255 | 5471 | int rc; |
---|
4256 | 5472 | |
---|
| 5473 | + bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats); |
---|
| 5474 | + bp->flags &= ~(BNXT_FLAG_NEW_RSS_CAP | BNXT_FLAG_ROCE_MIRROR_CAP); |
---|
4257 | 5475 | if (bp->hwrm_spec_code < 0x10600) |
---|
4258 | 5476 | return 0; |
---|
4259 | 5477 | |
---|
.. | .. |
---|
4263 | 5481 | if (!rc) { |
---|
4264 | 5482 | u32 flags = le32_to_cpu(resp->flags); |
---|
4265 | 5483 | |
---|
4266 | | - if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP) |
---|
| 5484 | + if (!(bp->flags & BNXT_FLAG_CHIP_P5) && |
---|
| 5485 | + (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP)) |
---|
4267 | 5486 | bp->flags |= BNXT_FLAG_NEW_RSS_CAP; |
---|
4268 | 5487 | if (flags & |
---|
4269 | 5488 | VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP) |
---|
4270 | 5489 | bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP; |
---|
| 5490 | + |
---|
| 5491 | + /* Older P5 fw before EXT_HW_STATS support did not set |
---|
| 5492 | + * VLAN_STRIP_CAP properly. |
---|
| 5493 | + */ |
---|
| 5494 | + if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) || |
---|
| 5495 | + (BNXT_CHIP_P5_THOR(bp) && |
---|
| 5496 | + !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))) |
---|
| 5497 | + bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP; |
---|
| 5498 | + bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported); |
---|
| 5499 | + if (bp->max_tpa_v2) { |
---|
| 5500 | + if (BNXT_CHIP_P5_THOR(bp)) |
---|
| 5501 | + bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5; |
---|
| 5502 | + else |
---|
| 5503 | + bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5_SR2; |
---|
| 5504 | + } |
---|
4271 | 5505 | } |
---|
4272 | 5506 | mutex_unlock(&bp->hwrm_cmd_lock); |
---|
4273 | 5507 | return rc; |
---|
.. | .. |
---|
4277 | 5511 | { |
---|
4278 | 5512 | u16 i; |
---|
4279 | 5513 | u32 rc = 0; |
---|
| 5514 | + |
---|
| 5515 | + if (bp->flags & BNXT_FLAG_CHIP_P5) |
---|
| 5516 | + return 0; |
---|
4280 | 5517 | |
---|
4281 | 5518 | mutex_lock(&bp->hwrm_cmd_lock); |
---|
4282 | 5519 | for (i = 0; i < bp->rx_nr_rings; i++) { |
---|
.. | .. |
---|
4304 | 5541 | return rc; |
---|
4305 | 5542 | } |
---|
4306 | 5543 | |
---|
4307 | | -static int bnxt_hwrm_ring_grp_free(struct bnxt *bp) |
---|
| 5544 | +static void bnxt_hwrm_ring_grp_free(struct bnxt *bp) |
---|
4308 | 5545 | { |
---|
4309 | 5546 | u16 i; |
---|
4310 | | - u32 rc = 0; |
---|
4311 | 5547 | struct hwrm_ring_grp_free_input req = {0}; |
---|
4312 | 5548 | |
---|
4313 | | - if (!bp->grp_info) |
---|
4314 | | - return 0; |
---|
| 5549 | + if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5)) |
---|
| 5550 | + return; |
---|
4315 | 5551 | |
---|
4316 | 5552 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1); |
---|
4317 | 5553 | |
---|
.. | .. |
---|
4322 | 5558 | req.ring_group_id = |
---|
4323 | 5559 | cpu_to_le32(bp->grp_info[i].fw_grp_id); |
---|
4324 | 5560 | |
---|
4325 | | - rc = _hwrm_send_message(bp, &req, sizeof(req), |
---|
4326 | | - HWRM_CMD_TIMEOUT); |
---|
4327 | | - if (rc) |
---|
4328 | | - break; |
---|
| 5561 | + _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
---|
4329 | 5562 | bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; |
---|
4330 | 5563 | } |
---|
4331 | 5564 | mutex_unlock(&bp->hwrm_cmd_lock); |
---|
4332 | | - return rc; |
---|
4333 | 5565 | } |
---|
4334 | 5566 | |
---|
4335 | 5567 | static int hwrm_ring_alloc_send_msg(struct bnxt *bp, |
---|
.. | .. |
---|
4339 | 5571 | int rc = 0, err = 0; |
---|
4340 | 5572 | struct hwrm_ring_alloc_input req = {0}; |
---|
4341 | 5573 | struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr; |
---|
| 5574 | + struct bnxt_ring_mem_info *rmem = &ring->ring_mem; |
---|
4342 | 5575 | struct bnxt_ring_grp_info *grp_info; |
---|
4343 | 5576 | u16 ring_id; |
---|
4344 | 5577 | |
---|
4345 | 5578 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1); |
---|
4346 | 5579 | |
---|
4347 | 5580 | req.enables = 0; |
---|
4348 | | - if (ring->nr_pages > 1) { |
---|
4349 | | - req.page_tbl_addr = cpu_to_le64(ring->pg_tbl_map); |
---|
| 5581 | + if (rmem->nr_pages > 1) { |
---|
| 5582 | + req.page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map); |
---|
4350 | 5583 | /* Page size is in log2 units */ |
---|
4351 | 5584 | req.page_size = BNXT_PAGE_SHIFT; |
---|
4352 | 5585 | req.page_tbl_depth = 1; |
---|
4353 | 5586 | } else { |
---|
4354 | | - req.page_tbl_addr = cpu_to_le64(ring->dma_arr[0]); |
---|
| 5587 | + req.page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]); |
---|
4355 | 5588 | } |
---|
4356 | 5589 | req.fbo = 0; |
---|
4357 | 5590 | /* Association of ring index with doorbell index and MSIX number */ |
---|
4358 | 5591 | req.logical_id = cpu_to_le16(map_index); |
---|
4359 | 5592 | |
---|
4360 | 5593 | switch (ring_type) { |
---|
4361 | | - case HWRM_RING_ALLOC_TX: |
---|
| 5594 | + case HWRM_RING_ALLOC_TX: { |
---|
| 5595 | + struct bnxt_tx_ring_info *txr; |
---|
| 5596 | + |
---|
| 5597 | + txr = container_of(ring, struct bnxt_tx_ring_info, |
---|
| 5598 | + tx_ring_struct); |
---|
4362 | 5599 | req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX; |
---|
4363 | 5600 | /* Association of transmit ring with completion ring */ |
---|
4364 | 5601 | grp_info = &bp->grp_info[ring->grp_idx]; |
---|
4365 | | - req.cmpl_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id); |
---|
| 5602 | + req.cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr)); |
---|
4366 | 5603 | req.length = cpu_to_le32(bp->tx_ring_mask + 1); |
---|
4367 | 5604 | req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); |
---|
4368 | 5605 | req.queue_id = cpu_to_le16(ring->queue_id); |
---|
4369 | 5606 | break; |
---|
| 5607 | + } |
---|
4370 | 5608 | case HWRM_RING_ALLOC_RX: |
---|
4371 | 5609 | req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX; |
---|
4372 | 5610 | req.length = cpu_to_le32(bp->rx_ring_mask + 1); |
---|
| 5611 | + if (bp->flags & BNXT_FLAG_CHIP_P5) { |
---|
| 5612 | + u16 flags = 0; |
---|
| 5613 | + |
---|
| 5614 | + /* Association of rx ring with stats context */ |
---|
| 5615 | + grp_info = &bp->grp_info[ring->grp_idx]; |
---|
| 5616 | + req.rx_buf_size = cpu_to_le16(bp->rx_buf_use_size); |
---|
| 5617 | + req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); |
---|
| 5618 | + req.enables |= cpu_to_le32( |
---|
| 5619 | + RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID); |
---|
| 5620 | + if (NET_IP_ALIGN == 2) |
---|
| 5621 | + flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD; |
---|
| 5622 | + req.flags = cpu_to_le16(flags); |
---|
| 5623 | + } |
---|
4373 | 5624 | break; |
---|
4374 | 5625 | case HWRM_RING_ALLOC_AGG: |
---|
4375 | | - req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX; |
---|
| 5626 | + if (bp->flags & BNXT_FLAG_CHIP_P5) { |
---|
| 5627 | + req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG; |
---|
| 5628 | + /* Association of agg ring with rx ring */ |
---|
| 5629 | + grp_info = &bp->grp_info[ring->grp_idx]; |
---|
| 5630 | + req.rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id); |
---|
| 5631 | + req.rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE); |
---|
| 5632 | + req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); |
---|
| 5633 | + req.enables |= cpu_to_le32( |
---|
| 5634 | + RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID | |
---|
| 5635 | + RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID); |
---|
| 5636 | + } else { |
---|
| 5637 | + req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX; |
---|
| 5638 | + } |
---|
4376 | 5639 | req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1); |
---|
4377 | 5640 | break; |
---|
4378 | 5641 | case HWRM_RING_ALLOC_CMPL: |
---|
4379 | 5642 | req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL; |
---|
| 5643 | + req.length = cpu_to_le32(bp->cp_ring_mask + 1); |
---|
| 5644 | + if (bp->flags & BNXT_FLAG_CHIP_P5) { |
---|
| 5645 | + /* Association of cp ring with nq */ |
---|
| 5646 | + grp_info = &bp->grp_info[map_index]; |
---|
| 5647 | + req.nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id); |
---|
| 5648 | + req.cq_handle = cpu_to_le64(ring->handle); |
---|
| 5649 | + req.enables |= cpu_to_le32( |
---|
| 5650 | + RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID); |
---|
| 5651 | + } else if (bp->flags & BNXT_FLAG_USING_MSIX) { |
---|
| 5652 | + req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; |
---|
| 5653 | + } |
---|
| 5654 | + break; |
---|
| 5655 | + case HWRM_RING_ALLOC_NQ: |
---|
| 5656 | + req.ring_type = RING_ALLOC_REQ_RING_TYPE_NQ; |
---|
4380 | 5657 | req.length = cpu_to_le32(bp->cp_ring_mask + 1); |
---|
4381 | 5658 | if (bp->flags & BNXT_FLAG_USING_MSIX) |
---|
4382 | 5659 | req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; |
---|
.. | .. |
---|
4426 | 5703 | return rc; |
---|
4427 | 5704 | } |
---|
4428 | 5705 | |
---|
| 5706 | +static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type, |
---|
| 5707 | + u32 map_idx, u32 xid) |
---|
| 5708 | +{ |
---|
| 5709 | + if (bp->flags & BNXT_FLAG_CHIP_P5) { |
---|
| 5710 | + if (BNXT_PF(bp)) |
---|
| 5711 | + db->doorbell = bp->bar1 + DB_PF_OFFSET_P5; |
---|
| 5712 | + else |
---|
| 5713 | + db->doorbell = bp->bar1 + DB_VF_OFFSET_P5; |
---|
| 5714 | + switch (ring_type) { |
---|
| 5715 | + case HWRM_RING_ALLOC_TX: |
---|
| 5716 | + db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ; |
---|
| 5717 | + break; |
---|
| 5718 | + case HWRM_RING_ALLOC_RX: |
---|
| 5719 | + case HWRM_RING_ALLOC_AGG: |
---|
| 5720 | + db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ; |
---|
| 5721 | + break; |
---|
| 5722 | + case HWRM_RING_ALLOC_CMPL: |
---|
| 5723 | + db->db_key64 = DBR_PATH_L2; |
---|
| 5724 | + break; |
---|
| 5725 | + case HWRM_RING_ALLOC_NQ: |
---|
| 5726 | + db->db_key64 = DBR_PATH_L2; |
---|
| 5727 | + break; |
---|
| 5728 | + } |
---|
| 5729 | + db->db_key64 |= (u64)xid << DBR_XID_SFT; |
---|
| 5730 | + } else { |
---|
| 5731 | + db->doorbell = bp->bar1 + map_idx * 0x80; |
---|
| 5732 | + switch (ring_type) { |
---|
| 5733 | + case HWRM_RING_ALLOC_TX: |
---|
| 5734 | + db->db_key32 = DB_KEY_TX; |
---|
| 5735 | + break; |
---|
| 5736 | + case HWRM_RING_ALLOC_RX: |
---|
| 5737 | + case HWRM_RING_ALLOC_AGG: |
---|
| 5738 | + db->db_key32 = DB_KEY_RX; |
---|
| 5739 | + break; |
---|
| 5740 | + case HWRM_RING_ALLOC_CMPL: |
---|
| 5741 | + db->db_key32 = DB_KEY_CP; |
---|
| 5742 | + break; |
---|
| 5743 | + } |
---|
| 5744 | + } |
---|
| 5745 | +} |
---|
| 5746 | + |
---|
4429 | 5747 | static int bnxt_hwrm_ring_alloc(struct bnxt *bp) |
---|
4430 | 5748 | { |
---|
| 5749 | + bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS); |
---|
4431 | 5750 | int i, rc = 0; |
---|
| 5751 | + u32 type; |
---|
4432 | 5752 | |
---|
| 5753 | + if (bp->flags & BNXT_FLAG_CHIP_P5) |
---|
| 5754 | + type = HWRM_RING_ALLOC_NQ; |
---|
| 5755 | + else |
---|
| 5756 | + type = HWRM_RING_ALLOC_CMPL; |
---|
4433 | 5757 | for (i = 0; i < bp->cp_nr_rings; i++) { |
---|
4434 | 5758 | struct bnxt_napi *bnapi = bp->bnapi[i]; |
---|
4435 | 5759 | struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; |
---|
4436 | 5760 | struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; |
---|
4437 | 5761 | u32 map_idx = ring->map_idx; |
---|
| 5762 | + unsigned int vector; |
---|
4438 | 5763 | |
---|
4439 | | - cpr->cp_doorbell = bp->bar1 + map_idx * 0x80; |
---|
4440 | | - rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_CMPL, |
---|
4441 | | - map_idx); |
---|
4442 | | - if (rc) |
---|
| 5764 | + vector = bp->irq_tbl[map_idx].vector; |
---|
| 5765 | + disable_irq_nosync(vector); |
---|
| 5766 | + rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); |
---|
| 5767 | + if (rc) { |
---|
| 5768 | + enable_irq(vector); |
---|
4443 | 5769 | goto err_out; |
---|
4444 | | - BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); |
---|
| 5770 | + } |
---|
| 5771 | + bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id); |
---|
| 5772 | + bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons); |
---|
| 5773 | + enable_irq(vector); |
---|
4445 | 5774 | bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id; |
---|
4446 | 5775 | |
---|
4447 | 5776 | if (!i) { |
---|
.. | .. |
---|
4451 | 5780 | } |
---|
4452 | 5781 | } |
---|
4453 | 5782 | |
---|
| 5783 | + type = HWRM_RING_ALLOC_TX; |
---|
4454 | 5784 | for (i = 0; i < bp->tx_nr_rings; i++) { |
---|
4455 | 5785 | struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; |
---|
4456 | | - struct bnxt_ring_struct *ring = &txr->tx_ring_struct; |
---|
4457 | | - u32 map_idx = i; |
---|
| 5786 | + struct bnxt_ring_struct *ring; |
---|
| 5787 | + u32 map_idx; |
---|
4458 | 5788 | |
---|
4459 | | - rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_TX, |
---|
4460 | | - map_idx); |
---|
| 5789 | + if (bp->flags & BNXT_FLAG_CHIP_P5) { |
---|
| 5790 | + struct bnxt_napi *bnapi = txr->bnapi; |
---|
| 5791 | + struct bnxt_cp_ring_info *cpr, *cpr2; |
---|
| 5792 | + u32 type2 = HWRM_RING_ALLOC_CMPL; |
---|
| 5793 | + |
---|
| 5794 | + cpr = &bnapi->cp_ring; |
---|
| 5795 | + cpr2 = cpr->cp_ring_arr[BNXT_TX_HDL]; |
---|
| 5796 | + ring = &cpr2->cp_ring_struct; |
---|
| 5797 | + ring->handle = BNXT_TX_HDL; |
---|
| 5798 | + map_idx = bnapi->index; |
---|
| 5799 | + rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx); |
---|
| 5800 | + if (rc) |
---|
| 5801 | + goto err_out; |
---|
| 5802 | + bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx, |
---|
| 5803 | + ring->fw_ring_id); |
---|
| 5804 | + bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons); |
---|
| 5805 | + } |
---|
| 5806 | + ring = &txr->tx_ring_struct; |
---|
| 5807 | + map_idx = i; |
---|
| 5808 | + rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); |
---|
4461 | 5809 | if (rc) |
---|
4462 | 5810 | goto err_out; |
---|
4463 | | - txr->tx_doorbell = bp->bar1 + map_idx * 0x80; |
---|
| 5811 | + bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id); |
---|
4464 | 5812 | } |
---|
4465 | 5813 | |
---|
| 5814 | + type = HWRM_RING_ALLOC_RX; |
---|
4466 | 5815 | for (i = 0; i < bp->rx_nr_rings; i++) { |
---|
4467 | 5816 | struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; |
---|
4468 | 5817 | struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; |
---|
4469 | | - u32 map_idx = rxr->bnapi->index; |
---|
| 5818 | + struct bnxt_napi *bnapi = rxr->bnapi; |
---|
| 5819 | + u32 map_idx = bnapi->index; |
---|
4470 | 5820 | |
---|
4471 | | - rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_RX, |
---|
4472 | | - map_idx); |
---|
| 5821 | + rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); |
---|
4473 | 5822 | if (rc) |
---|
4474 | 5823 | goto err_out; |
---|
4475 | | - rxr->rx_doorbell = bp->bar1 + map_idx * 0x80; |
---|
4476 | | - writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell); |
---|
| 5824 | + bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id); |
---|
| 5825 | + /* If we have agg rings, post agg buffers first. */ |
---|
| 5826 | + if (!agg_rings) |
---|
| 5827 | + bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); |
---|
4477 | 5828 | bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id; |
---|
| 5829 | + if (bp->flags & BNXT_FLAG_CHIP_P5) { |
---|
| 5830 | + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; |
---|
| 5831 | + u32 type2 = HWRM_RING_ALLOC_CMPL; |
---|
| 5832 | + struct bnxt_cp_ring_info *cpr2; |
---|
| 5833 | + |
---|
| 5834 | + cpr2 = cpr->cp_ring_arr[BNXT_RX_HDL]; |
---|
| 5835 | + ring = &cpr2->cp_ring_struct; |
---|
| 5836 | + ring->handle = BNXT_RX_HDL; |
---|
| 5837 | + rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx); |
---|
| 5838 | + if (rc) |
---|
| 5839 | + goto err_out; |
---|
| 5840 | + bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx, |
---|
| 5841 | + ring->fw_ring_id); |
---|
| 5842 | + bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons); |
---|
| 5843 | + } |
---|
4478 | 5844 | } |
---|
4479 | 5845 | |
---|
4480 | | - if (bp->flags & BNXT_FLAG_AGG_RINGS) { |
---|
| 5846 | + if (agg_rings) { |
---|
| 5847 | + type = HWRM_RING_ALLOC_AGG; |
---|
4481 | 5848 | for (i = 0; i < bp->rx_nr_rings; i++) { |
---|
4482 | 5849 | struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; |
---|
4483 | 5850 | struct bnxt_ring_struct *ring = |
---|
.. | .. |
---|
4485 | 5852 | u32 grp_idx = ring->grp_idx; |
---|
4486 | 5853 | u32 map_idx = grp_idx + bp->rx_nr_rings; |
---|
4487 | 5854 | |
---|
4488 | | - rc = hwrm_ring_alloc_send_msg(bp, ring, |
---|
4489 | | - HWRM_RING_ALLOC_AGG, |
---|
4490 | | - map_idx); |
---|
| 5855 | + rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); |
---|
4491 | 5856 | if (rc) |
---|
4492 | 5857 | goto err_out; |
---|
4493 | 5858 | |
---|
4494 | | - rxr->rx_agg_doorbell = bp->bar1 + map_idx * 0x80; |
---|
4495 | | - writel(DB_KEY_RX | rxr->rx_agg_prod, |
---|
4496 | | - rxr->rx_agg_doorbell); |
---|
| 5859 | + bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx, |
---|
| 5860 | + ring->fw_ring_id); |
---|
| 5861 | + bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); |
---|
| 5862 | + bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); |
---|
4497 | 5863 | bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id; |
---|
4498 | 5864 | } |
---|
4499 | 5865 | } |
---|
.. | .. |
---|
4509 | 5875 | struct hwrm_ring_free_input req = {0}; |
---|
4510 | 5876 | struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr; |
---|
4511 | 5877 | u16 error_code; |
---|
| 5878 | + |
---|
| 5879 | + if (BNXT_NO_FW_ACCESS(bp)) |
---|
| 5880 | + return 0; |
---|
4512 | 5881 | |
---|
4513 | 5882 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1); |
---|
4514 | 5883 | req.ring_type = ring_type; |
---|
.. | .. |
---|
4529 | 5898 | |
---|
4530 | 5899 | static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) |
---|
4531 | 5900 | { |
---|
| 5901 | + u32 type; |
---|
4532 | 5902 | int i; |
---|
4533 | 5903 | |
---|
4534 | 5904 | if (!bp->bnapi) |
---|
.. | .. |
---|
4537 | 5907 | for (i = 0; i < bp->tx_nr_rings; i++) { |
---|
4538 | 5908 | struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; |
---|
4539 | 5909 | struct bnxt_ring_struct *ring = &txr->tx_ring_struct; |
---|
4540 | | - u32 grp_idx = txr->bnapi->index; |
---|
4541 | | - u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id; |
---|
4542 | 5910 | |
---|
4543 | 5911 | if (ring->fw_ring_id != INVALID_HW_RING_ID) { |
---|
| 5912 | + u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr); |
---|
| 5913 | + |
---|
4544 | 5914 | hwrm_ring_free_send_msg(bp, ring, |
---|
4545 | 5915 | RING_FREE_REQ_RING_TYPE_TX, |
---|
4546 | 5916 | close_path ? cmpl_ring_id : |
---|
.. | .. |
---|
4553 | 5923 | struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; |
---|
4554 | 5924 | struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; |
---|
4555 | 5925 | u32 grp_idx = rxr->bnapi->index; |
---|
4556 | | - u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id; |
---|
4557 | 5926 | |
---|
4558 | 5927 | if (ring->fw_ring_id != INVALID_HW_RING_ID) { |
---|
| 5928 | + u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr); |
---|
| 5929 | + |
---|
4559 | 5930 | hwrm_ring_free_send_msg(bp, ring, |
---|
4560 | 5931 | RING_FREE_REQ_RING_TYPE_RX, |
---|
4561 | 5932 | close_path ? cmpl_ring_id : |
---|
.. | .. |
---|
4566 | 5937 | } |
---|
4567 | 5938 | } |
---|
4568 | 5939 | |
---|
| 5940 | + if (bp->flags & BNXT_FLAG_CHIP_P5) |
---|
| 5941 | + type = RING_FREE_REQ_RING_TYPE_RX_AGG; |
---|
| 5942 | + else |
---|
| 5943 | + type = RING_FREE_REQ_RING_TYPE_RX; |
---|
4569 | 5944 | for (i = 0; i < bp->rx_nr_rings; i++) { |
---|
4570 | 5945 | struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; |
---|
4571 | 5946 | struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct; |
---|
4572 | 5947 | u32 grp_idx = rxr->bnapi->index; |
---|
4573 | | - u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id; |
---|
4574 | 5948 | |
---|
4575 | 5949 | if (ring->fw_ring_id != INVALID_HW_RING_ID) { |
---|
4576 | | - hwrm_ring_free_send_msg(bp, ring, |
---|
4577 | | - RING_FREE_REQ_RING_TYPE_RX, |
---|
| 5950 | + u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr); |
---|
| 5951 | + |
---|
| 5952 | + hwrm_ring_free_send_msg(bp, ring, type, |
---|
4578 | 5953 | close_path ? cmpl_ring_id : |
---|
4579 | 5954 | INVALID_HW_RING_ID); |
---|
4580 | 5955 | ring->fw_ring_id = INVALID_HW_RING_ID; |
---|
.. | .. |
---|
4589 | 5964 | */ |
---|
4590 | 5965 | bnxt_disable_int_sync(bp); |
---|
4591 | 5966 | |
---|
| 5967 | + if (bp->flags & BNXT_FLAG_CHIP_P5) |
---|
| 5968 | + type = RING_FREE_REQ_RING_TYPE_NQ; |
---|
| 5969 | + else |
---|
| 5970 | + type = RING_FREE_REQ_RING_TYPE_L2_CMPL; |
---|
4592 | 5971 | for (i = 0; i < bp->cp_nr_rings; i++) { |
---|
4593 | 5972 | struct bnxt_napi *bnapi = bp->bnapi[i]; |
---|
4594 | 5973 | struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; |
---|
4595 | | - struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; |
---|
| 5974 | + struct bnxt_ring_struct *ring; |
---|
| 5975 | + int j; |
---|
4596 | 5976 | |
---|
| 5977 | + for (j = 0; j < 2; j++) { |
---|
| 5978 | + struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j]; |
---|
| 5979 | + |
---|
| 5980 | + if (cpr2) { |
---|
| 5981 | + ring = &cpr2->cp_ring_struct; |
---|
| 5982 | + if (ring->fw_ring_id == INVALID_HW_RING_ID) |
---|
| 5983 | + continue; |
---|
| 5984 | + hwrm_ring_free_send_msg(bp, ring, |
---|
| 5985 | + RING_FREE_REQ_RING_TYPE_L2_CMPL, |
---|
| 5986 | + INVALID_HW_RING_ID); |
---|
| 5987 | + ring->fw_ring_id = INVALID_HW_RING_ID; |
---|
| 5988 | + } |
---|
| 5989 | + } |
---|
| 5990 | + ring = &cpr->cp_ring_struct; |
---|
4597 | 5991 | if (ring->fw_ring_id != INVALID_HW_RING_ID) { |
---|
4598 | | - hwrm_ring_free_send_msg(bp, ring, |
---|
4599 | | - RING_FREE_REQ_RING_TYPE_L2_CMPL, |
---|
| 5992 | + hwrm_ring_free_send_msg(bp, ring, type, |
---|
4600 | 5993 | INVALID_HW_RING_ID); |
---|
4601 | 5994 | ring->fw_ring_id = INVALID_HW_RING_ID; |
---|
4602 | 5995 | bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; |
---|
4603 | 5996 | } |
---|
4604 | 5997 | } |
---|
4605 | 5998 | } |
---|
| 5999 | + |
---|
| 6000 | +static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, |
---|
| 6001 | + bool shared); |
---|
4606 | 6002 | |
---|
4607 | 6003 | static int bnxt_hwrm_get_rings(struct bnxt *bp) |
---|
4608 | 6004 | { |
---|
.. | .. |
---|
4620 | 6016 | rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
---|
4621 | 6017 | if (rc) { |
---|
4622 | 6018 | mutex_unlock(&bp->hwrm_cmd_lock); |
---|
4623 | | - return -EIO; |
---|
| 6019 | + return rc; |
---|
4624 | 6020 | } |
---|
4625 | 6021 | |
---|
4626 | 6022 | hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings); |
---|
.. | .. |
---|
4633 | 6029 | hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics); |
---|
4634 | 6030 | cp = le16_to_cpu(resp->alloc_cmpl_rings); |
---|
4635 | 6031 | stats = le16_to_cpu(resp->alloc_stat_ctx); |
---|
4636 | | - cp = min_t(u16, cp, stats); |
---|
| 6032 | + hw_resc->resv_irqs = cp; |
---|
| 6033 | + if (bp->flags & BNXT_FLAG_CHIP_P5) { |
---|
| 6034 | + int rx = hw_resc->resv_rx_rings; |
---|
| 6035 | + int tx = hw_resc->resv_tx_rings; |
---|
| 6036 | + |
---|
| 6037 | + if (bp->flags & BNXT_FLAG_AGG_RINGS) |
---|
| 6038 | + rx >>= 1; |
---|
| 6039 | + if (cp < (rx + tx)) { |
---|
| 6040 | + bnxt_trim_rings(bp, &rx, &tx, cp, false); |
---|
| 6041 | + if (bp->flags & BNXT_FLAG_AGG_RINGS) |
---|
| 6042 | + rx <<= 1; |
---|
| 6043 | + hw_resc->resv_rx_rings = rx; |
---|
| 6044 | + hw_resc->resv_tx_rings = tx; |
---|
| 6045 | + } |
---|
| 6046 | + hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix); |
---|
| 6047 | + hw_resc->resv_hw_ring_grps = rx; |
---|
| 6048 | + } |
---|
4637 | 6049 | hw_resc->resv_cp_rings = cp; |
---|
| 6050 | + hw_resc->resv_stat_ctxs = stats; |
---|
4638 | 6051 | } |
---|
4639 | 6052 | mutex_unlock(&bp->hwrm_cmd_lock); |
---|
4640 | 6053 | return 0; |
---|
.. | .. |
---|
4659 | 6072 | return rc; |
---|
4660 | 6073 | } |
---|
4661 | 6074 | |
---|
| 6075 | +static bool bnxt_rfs_supported(struct bnxt *bp); |
---|
| 6076 | + |
---|
4662 | 6077 | static void |
---|
4663 | 6078 | __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req, |
---|
4664 | 6079 | int tx_rings, int rx_rings, int ring_grps, |
---|
4665 | | - int cp_rings, int vnics) |
---|
| 6080 | + int cp_rings, int stats, int vnics) |
---|
4666 | 6081 | { |
---|
4667 | 6082 | u32 enables = 0; |
---|
4668 | 6083 | |
---|
.. | .. |
---|
4672 | 6087 | req->num_tx_rings = cpu_to_le16(tx_rings); |
---|
4673 | 6088 | if (BNXT_NEW_RM(bp)) { |
---|
4674 | 6089 | enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0; |
---|
4675 | | - enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS | |
---|
4676 | | - FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; |
---|
4677 | | - enables |= ring_grps ? |
---|
4678 | | - FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; |
---|
| 6090 | + enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; |
---|
| 6091 | + if (bp->flags & BNXT_FLAG_CHIP_P5) { |
---|
| 6092 | + enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0; |
---|
| 6093 | + enables |= tx_rings + ring_grps ? |
---|
| 6094 | + FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; |
---|
| 6095 | + enables |= rx_rings ? |
---|
| 6096 | + FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; |
---|
| 6097 | + } else { |
---|
| 6098 | + enables |= cp_rings ? |
---|
| 6099 | + FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; |
---|
| 6100 | + enables |= ring_grps ? |
---|
| 6101 | + FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS | |
---|
| 6102 | + FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; |
---|
| 6103 | + } |
---|
4679 | 6104 | enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0; |
---|
4680 | 6105 | |
---|
4681 | 6106 | req->num_rx_rings = cpu_to_le16(rx_rings); |
---|
4682 | | - req->num_hw_ring_grps = cpu_to_le16(ring_grps); |
---|
4683 | | - req->num_cmpl_rings = cpu_to_le16(cp_rings); |
---|
4684 | | - req->num_stat_ctxs = req->num_cmpl_rings; |
---|
| 6107 | + if (bp->flags & BNXT_FLAG_CHIP_P5) { |
---|
| 6108 | + req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps); |
---|
| 6109 | + req->num_msix = cpu_to_le16(cp_rings); |
---|
| 6110 | + req->num_rsscos_ctxs = |
---|
| 6111 | + cpu_to_le16(DIV_ROUND_UP(ring_grps, 64)); |
---|
| 6112 | + } else { |
---|
| 6113 | + req->num_cmpl_rings = cpu_to_le16(cp_rings); |
---|
| 6114 | + req->num_hw_ring_grps = cpu_to_le16(ring_grps); |
---|
| 6115 | + req->num_rsscos_ctxs = cpu_to_le16(1); |
---|
| 6116 | + if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) && |
---|
| 6117 | + bnxt_rfs_supported(bp)) |
---|
| 6118 | + req->num_rsscos_ctxs = |
---|
| 6119 | + cpu_to_le16(ring_grps + 1); |
---|
| 6120 | + } |
---|
| 6121 | + req->num_stat_ctxs = cpu_to_le16(stats); |
---|
4685 | 6122 | req->num_vnics = cpu_to_le16(vnics); |
---|
4686 | 6123 | } |
---|
4687 | 6124 | req->enables = cpu_to_le32(enables); |
---|
.. | .. |
---|
4691 | 6128 | __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, |
---|
4692 | 6129 | struct hwrm_func_vf_cfg_input *req, int tx_rings, |
---|
4693 | 6130 | int rx_rings, int ring_grps, int cp_rings, |
---|
4694 | | - int vnics) |
---|
| 6131 | + int stats, int vnics) |
---|
4695 | 6132 | { |
---|
4696 | 6133 | u32 enables = 0; |
---|
4697 | 6134 | |
---|
4698 | 6135 | bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1); |
---|
4699 | 6136 | enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; |
---|
4700 | | - enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS : 0; |
---|
4701 | | - enables |= cp_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS | |
---|
4702 | | - FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; |
---|
4703 | | - enables |= ring_grps ? FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; |
---|
| 6137 | + enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS | |
---|
| 6138 | + FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; |
---|
| 6139 | + enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; |
---|
| 6140 | + if (bp->flags & BNXT_FLAG_CHIP_P5) { |
---|
| 6141 | + enables |= tx_rings + ring_grps ? |
---|
| 6142 | + FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; |
---|
| 6143 | + } else { |
---|
| 6144 | + enables |= cp_rings ? |
---|
| 6145 | + FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; |
---|
| 6146 | + enables |= ring_grps ? |
---|
| 6147 | + FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; |
---|
| 6148 | + } |
---|
4704 | 6149 | enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0; |
---|
| 6150 | + enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS; |
---|
4705 | 6151 | |
---|
| 6152 | + req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); |
---|
4706 | 6153 | req->num_tx_rings = cpu_to_le16(tx_rings); |
---|
4707 | 6154 | req->num_rx_rings = cpu_to_le16(rx_rings); |
---|
4708 | | - req->num_hw_ring_grps = cpu_to_le16(ring_grps); |
---|
4709 | | - req->num_cmpl_rings = cpu_to_le16(cp_rings); |
---|
4710 | | - req->num_stat_ctxs = req->num_cmpl_rings; |
---|
| 6155 | + if (bp->flags & BNXT_FLAG_CHIP_P5) { |
---|
| 6156 | + req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps); |
---|
| 6157 | + req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64)); |
---|
| 6158 | + } else { |
---|
| 6159 | + req->num_cmpl_rings = cpu_to_le16(cp_rings); |
---|
| 6160 | + req->num_hw_ring_grps = cpu_to_le16(ring_grps); |
---|
| 6161 | + req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX); |
---|
| 6162 | + } |
---|
| 6163 | + req->num_stat_ctxs = cpu_to_le16(stats); |
---|
4711 | 6164 | req->num_vnics = cpu_to_le16(vnics); |
---|
4712 | 6165 | |
---|
4713 | 6166 | req->enables = cpu_to_le32(enables); |
---|
.. | .. |
---|
4715 | 6168 | |
---|
4716 | 6169 | static int |
---|
4717 | 6170 | bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, |
---|
4718 | | - int ring_grps, int cp_rings, int vnics) |
---|
| 6171 | + int ring_grps, int cp_rings, int stats, int vnics) |
---|
4719 | 6172 | { |
---|
4720 | 6173 | struct hwrm_func_cfg_input req = {0}; |
---|
4721 | 6174 | int rc; |
---|
4722 | 6175 | |
---|
4723 | 6176 | __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps, |
---|
4724 | | - cp_rings, vnics); |
---|
| 6177 | + cp_rings, stats, vnics); |
---|
4725 | 6178 | if (!req.enables) |
---|
4726 | 6179 | return 0; |
---|
4727 | 6180 | |
---|
4728 | 6181 | rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
---|
4729 | 6182 | if (rc) |
---|
4730 | | - return -ENOMEM; |
---|
| 6183 | + return rc; |
---|
4731 | 6184 | |
---|
4732 | 6185 | if (bp->hwrm_spec_code < 0x10601) |
---|
4733 | 6186 | bp->hw_resc.resv_tx_rings = tx_rings; |
---|
4734 | 6187 | |
---|
4735 | | - rc = bnxt_hwrm_get_rings(bp); |
---|
4736 | | - return rc; |
---|
| 6188 | + return bnxt_hwrm_get_rings(bp); |
---|
4737 | 6189 | } |
---|
4738 | 6190 | |
---|
4739 | 6191 | static int |
---|
4740 | 6192 | bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, |
---|
4741 | | - int ring_grps, int cp_rings, int vnics) |
---|
| 6193 | + int ring_grps, int cp_rings, int stats, int vnics) |
---|
4742 | 6194 | { |
---|
4743 | 6195 | struct hwrm_func_vf_cfg_input req = {0}; |
---|
4744 | 6196 | int rc; |
---|
.. | .. |
---|
4749 | 6201 | } |
---|
4750 | 6202 | |
---|
4751 | 6203 | __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps, |
---|
4752 | | - cp_rings, vnics); |
---|
4753 | | - req.enables |= cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS | |
---|
4754 | | - FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS); |
---|
4755 | | - req.num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX); |
---|
4756 | | - req.num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); |
---|
| 6204 | + cp_rings, stats, vnics); |
---|
4757 | 6205 | rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
---|
4758 | 6206 | if (rc) |
---|
4759 | | - return -ENOMEM; |
---|
| 6207 | + return rc; |
---|
4760 | 6208 | |
---|
4761 | | - rc = bnxt_hwrm_get_rings(bp); |
---|
4762 | | - return rc; |
---|
| 6209 | + return bnxt_hwrm_get_rings(bp); |
---|
4763 | 6210 | } |
---|
4764 | 6211 | |
---|
4765 | 6212 | static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp, |
---|
4766 | | - int cp, int vnic) |
---|
| 6213 | + int cp, int stat, int vnic) |
---|
4767 | 6214 | { |
---|
4768 | 6215 | if (BNXT_PF(bp)) |
---|
4769 | | - return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, vnic); |
---|
| 6216 | + return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat, |
---|
| 6217 | + vnic); |
---|
4770 | 6218 | else |
---|
4771 | | - return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, vnic); |
---|
| 6219 | + return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat, |
---|
| 6220 | + vnic); |
---|
4772 | 6221 | } |
---|
4773 | 6222 | |
---|
4774 | | -static int bnxt_cp_rings_in_use(struct bnxt *bp) |
---|
| 6223 | +int bnxt_nq_rings_in_use(struct bnxt *bp) |
---|
4775 | 6224 | { |
---|
4776 | 6225 | int cp = bp->cp_nr_rings; |
---|
4777 | 6226 | int ulp_msix, ulp_base; |
---|
.. | .. |
---|
4786 | 6235 | return cp; |
---|
4787 | 6236 | } |
---|
4788 | 6237 | |
---|
| 6238 | +static int bnxt_cp_rings_in_use(struct bnxt *bp) |
---|
| 6239 | +{ |
---|
| 6240 | + int cp; |
---|
| 6241 | + |
---|
| 6242 | + if (!(bp->flags & BNXT_FLAG_CHIP_P5)) |
---|
| 6243 | + return bnxt_nq_rings_in_use(bp); |
---|
| 6244 | + |
---|
| 6245 | + cp = bp->tx_nr_rings + bp->rx_nr_rings; |
---|
| 6246 | + return cp; |
---|
| 6247 | +} |
---|
| 6248 | + |
---|
| 6249 | +static int bnxt_get_func_stat_ctxs(struct bnxt *bp) |
---|
| 6250 | +{ |
---|
| 6251 | + int ulp_stat = bnxt_get_ulp_stat_ctxs(bp); |
---|
| 6252 | + int cp = bp->cp_nr_rings; |
---|
| 6253 | + |
---|
| 6254 | + if (!ulp_stat) |
---|
| 6255 | + return cp; |
---|
| 6256 | + |
---|
| 6257 | + if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp)) |
---|
| 6258 | + return bnxt_get_ulp_msix_base(bp) + ulp_stat; |
---|
| 6259 | + |
---|
| 6260 | + return cp + ulp_stat; |
---|
| 6261 | +} |
---|
| 6262 | + |
---|
| 6263 | +/* Check if a default RSS map needs to be setup. This function is only |
---|
| 6264 | + * used on older firmware that does not require reserving RX rings. |
---|
| 6265 | + */ |
---|
| 6266 | +static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp) |
---|
| 6267 | +{ |
---|
| 6268 | + struct bnxt_hw_resc *hw_resc = &bp->hw_resc; |
---|
| 6269 | + |
---|
| 6270 | + /* The RSS map is valid for RX rings set to resv_rx_rings */ |
---|
| 6271 | + if (hw_resc->resv_rx_rings != bp->rx_nr_rings) { |
---|
| 6272 | + hw_resc->resv_rx_rings = bp->rx_nr_rings; |
---|
| 6273 | + if (!netif_is_rxfh_configured(bp->dev)) |
---|
| 6274 | + bnxt_set_dflt_rss_indir_tbl(bp); |
---|
| 6275 | + } |
---|
| 6276 | +} |
---|
| 6277 | + |
---|
4789 | 6278 | static bool bnxt_need_reserve_rings(struct bnxt *bp) |
---|
4790 | 6279 | { |
---|
4791 | 6280 | struct bnxt_hw_resc *hw_resc = &bp->hw_resc; |
---|
4792 | 6281 | int cp = bnxt_cp_rings_in_use(bp); |
---|
4793 | | - int rx = bp->rx_nr_rings; |
---|
| 6282 | + int nq = bnxt_nq_rings_in_use(bp); |
---|
| 6283 | + int rx = bp->rx_nr_rings, stat; |
---|
4794 | 6284 | int vnic = 1, grp = rx; |
---|
4795 | 6285 | |
---|
4796 | | - if (bp->hwrm_spec_code < 0x10601) |
---|
4797 | | - return false; |
---|
4798 | | - |
---|
4799 | | - if (hw_resc->resv_tx_rings != bp->tx_nr_rings) |
---|
| 6286 | + if (hw_resc->resv_tx_rings != bp->tx_nr_rings && |
---|
| 6287 | + bp->hwrm_spec_code >= 0x10601) |
---|
4800 | 6288 | return true; |
---|
4801 | 6289 | |
---|
4802 | | - if (bp->flags & BNXT_FLAG_RFS) |
---|
| 6290 | + /* Old firmware does not need RX ring reservations but we still |
---|
| 6291 | + * need to setup a default RSS map when needed. With new firmware |
---|
| 6292 | + * we go through RX ring reservations first and then set up the |
---|
| 6293 | + * RSS map for the successfully reserved RX rings when needed. |
---|
| 6294 | + */ |
---|
| 6295 | + if (!BNXT_NEW_RM(bp)) { |
---|
| 6296 | + bnxt_check_rss_tbl_no_rmgr(bp); |
---|
| 6297 | + return false; |
---|
| 6298 | + } |
---|
| 6299 | + if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5)) |
---|
4803 | 6300 | vnic = rx + 1; |
---|
4804 | 6301 | if (bp->flags & BNXT_FLAG_AGG_RINGS) |
---|
4805 | 6302 | rx <<= 1; |
---|
4806 | | - if (BNXT_NEW_RM(bp) && |
---|
4807 | | - (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp || |
---|
4808 | | - hw_resc->resv_hw_ring_grps != grp || hw_resc->resv_vnics != vnic)) |
---|
| 6303 | + stat = bnxt_get_func_stat_ctxs(bp); |
---|
| 6304 | + if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp || |
---|
| 6305 | + hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat || |
---|
| 6306 | + (hw_resc->resv_hw_ring_grps != grp && |
---|
| 6307 | + !(bp->flags & BNXT_FLAG_CHIP_P5))) |
---|
| 6308 | + return true; |
---|
| 6309 | + if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) && |
---|
| 6310 | + hw_resc->resv_irqs != nq) |
---|
4809 | 6311 | return true; |
---|
4810 | 6312 | return false; |
---|
4811 | 6313 | } |
---|
4812 | 6314 | |
---|
4813 | | -static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, |
---|
4814 | | - bool shared); |
---|
4815 | | - |
---|
4816 | 6315 | static int __bnxt_reserve_rings(struct bnxt *bp) |
---|
4817 | 6316 | { |
---|
4818 | 6317 | struct bnxt_hw_resc *hw_resc = &bp->hw_resc; |
---|
4819 | | - int cp = bnxt_cp_rings_in_use(bp); |
---|
| 6318 | + int cp = bnxt_nq_rings_in_use(bp); |
---|
4820 | 6319 | int tx = bp->tx_nr_rings; |
---|
4821 | 6320 | int rx = bp->rx_nr_rings; |
---|
4822 | 6321 | int grp, rx_rings, rc; |
---|
| 6322 | + int vnic = 1, stat; |
---|
4823 | 6323 | bool sh = false; |
---|
4824 | | - int vnic = 1; |
---|
4825 | 6324 | |
---|
4826 | 6325 | if (!bnxt_need_reserve_rings(bp)) |
---|
4827 | 6326 | return 0; |
---|
4828 | 6327 | |
---|
4829 | 6328 | if (bp->flags & BNXT_FLAG_SHARED_RINGS) |
---|
4830 | 6329 | sh = true; |
---|
4831 | | - if (bp->flags & BNXT_FLAG_RFS) |
---|
| 6330 | + if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5)) |
---|
4832 | 6331 | vnic = rx + 1; |
---|
4833 | 6332 | if (bp->flags & BNXT_FLAG_AGG_RINGS) |
---|
4834 | 6333 | rx <<= 1; |
---|
4835 | 6334 | grp = bp->rx_nr_rings; |
---|
| 6335 | + stat = bnxt_get_func_stat_ctxs(bp); |
---|
4836 | 6336 | |
---|
4837 | | - rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, vnic); |
---|
| 6337 | + rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic); |
---|
4838 | 6338 | if (rc) |
---|
4839 | 6339 | return rc; |
---|
4840 | 6340 | |
---|
4841 | 6341 | tx = hw_resc->resv_tx_rings; |
---|
4842 | 6342 | if (BNXT_NEW_RM(bp)) { |
---|
4843 | 6343 | rx = hw_resc->resv_rx_rings; |
---|
4844 | | - cp = hw_resc->resv_cp_rings; |
---|
| 6344 | + cp = hw_resc->resv_irqs; |
---|
4845 | 6345 | grp = hw_resc->resv_hw_ring_grps; |
---|
4846 | 6346 | vnic = hw_resc->resv_vnics; |
---|
| 6347 | + stat = hw_resc->resv_stat_ctxs; |
---|
4847 | 6348 | } |
---|
4848 | 6349 | |
---|
4849 | 6350 | rx_rings = rx; |
---|
.. | .. |
---|
4862 | 6363 | } |
---|
4863 | 6364 | } |
---|
4864 | 6365 | rx_rings = min_t(int, rx_rings, grp); |
---|
| 6366 | + cp = min_t(int, cp, bp->cp_nr_rings); |
---|
| 6367 | + if (stat > bnxt_get_ulp_stat_ctxs(bp)) |
---|
| 6368 | + stat -= bnxt_get_ulp_stat_ctxs(bp); |
---|
| 6369 | + cp = min_t(int, cp, stat); |
---|
4865 | 6370 | rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh); |
---|
4866 | 6371 | if (bp->flags & BNXT_FLAG_AGG_RINGS) |
---|
4867 | 6372 | rx = rx_rings << 1; |
---|
4868 | 6373 | cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings; |
---|
4869 | 6374 | bp->tx_nr_rings = tx; |
---|
| 6375 | + |
---|
| 6376 | + /* If we cannot reserve all the RX rings, reset the RSS map only |
---|
| 6377 | + * if absolutely necessary |
---|
| 6378 | + */ |
---|
| 6379 | + if (rx_rings != bp->rx_nr_rings) { |
---|
| 6380 | + netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n", |
---|
| 6381 | + rx_rings, bp->rx_nr_rings); |
---|
| 6382 | + if ((bp->dev->priv_flags & IFF_RXFH_CONFIGURED) && |
---|
| 6383 | + (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) != |
---|
| 6384 | + bnxt_get_nr_rss_ctxs(bp, rx_rings) || |
---|
| 6385 | + bnxt_get_max_rss_ring(bp) >= rx_rings)) { |
---|
| 6386 | + netdev_warn(bp->dev, "RSS table entries reverting to default\n"); |
---|
| 6387 | + bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED; |
---|
| 6388 | + } |
---|
| 6389 | + } |
---|
4870 | 6390 | bp->rx_nr_rings = rx_rings; |
---|
4871 | 6391 | bp->cp_nr_rings = cp; |
---|
4872 | 6392 | |
---|
4873 | | - if (!tx || !rx || !cp || !grp || !vnic) |
---|
| 6393 | + if (!tx || !rx || !cp || !grp || !vnic || !stat) |
---|
4874 | 6394 | return -ENOMEM; |
---|
| 6395 | + |
---|
| 6396 | + if (!netif_is_rxfh_configured(bp->dev)) |
---|
| 6397 | + bnxt_set_dflt_rss_indir_tbl(bp); |
---|
4875 | 6398 | |
---|
4876 | 6399 | return rc; |
---|
4877 | 6400 | } |
---|
4878 | 6401 | |
---|
4879 | 6402 | static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, |
---|
4880 | | - int ring_grps, int cp_rings, int vnics) |
---|
| 6403 | + int ring_grps, int cp_rings, int stats, |
---|
| 6404 | + int vnics) |
---|
4881 | 6405 | { |
---|
4882 | 6406 | struct hwrm_func_vf_cfg_input req = {0}; |
---|
4883 | 6407 | u32 flags; |
---|
4884 | | - int rc; |
---|
4885 | 6408 | |
---|
4886 | 6409 | if (!BNXT_NEW_RM(bp)) |
---|
4887 | 6410 | return 0; |
---|
4888 | 6411 | |
---|
4889 | 6412 | __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps, |
---|
4890 | | - cp_rings, vnics); |
---|
| 6413 | + cp_rings, stats, vnics); |
---|
4891 | 6414 | flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST | |
---|
4892 | 6415 | FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST | |
---|
4893 | 6416 | FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | |
---|
4894 | | - FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST | |
---|
4895 | 6417 | FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | |
---|
4896 | | - FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST; |
---|
| 6418 | + FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST | |
---|
| 6419 | + FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST; |
---|
| 6420 | + if (!(bp->flags & BNXT_FLAG_CHIP_P5)) |
---|
| 6421 | + flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST; |
---|
4897 | 6422 | |
---|
4898 | 6423 | req.flags = cpu_to_le32(flags); |
---|
4899 | | - rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
---|
4900 | | - if (rc) |
---|
4901 | | - return -ENOMEM; |
---|
4902 | | - return 0; |
---|
| 6424 | + return hwrm_send_message_silent(bp, &req, sizeof(req), |
---|
| 6425 | + HWRM_CMD_TIMEOUT); |
---|
4903 | 6426 | } |
---|
4904 | 6427 | |
---|
4905 | 6428 | static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, |
---|
4906 | | - int ring_grps, int cp_rings, int vnics) |
---|
| 6429 | + int ring_grps, int cp_rings, int stats, |
---|
| 6430 | + int vnics) |
---|
4907 | 6431 | { |
---|
4908 | 6432 | struct hwrm_func_cfg_input req = {0}; |
---|
4909 | 6433 | u32 flags; |
---|
4910 | | - int rc; |
---|
4911 | 6434 | |
---|
4912 | 6435 | __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps, |
---|
4913 | | - cp_rings, vnics); |
---|
| 6436 | + cp_rings, stats, vnics); |
---|
4914 | 6437 | flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST; |
---|
4915 | | - if (BNXT_NEW_RM(bp)) |
---|
| 6438 | + if (BNXT_NEW_RM(bp)) { |
---|
4916 | 6439 | flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST | |
---|
4917 | 6440 | FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | |
---|
4918 | | - FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST | |
---|
4919 | 6441 | FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | |
---|
4920 | 6442 | FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST; |
---|
| 6443 | + if (bp->flags & BNXT_FLAG_CHIP_P5) |
---|
| 6444 | + flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST | |
---|
| 6445 | + FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST; |
---|
| 6446 | + else |
---|
| 6447 | + flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST; |
---|
| 6448 | + } |
---|
4921 | 6449 | |
---|
4922 | 6450 | req.flags = cpu_to_le32(flags); |
---|
4923 | | - rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
---|
4924 | | - if (rc) |
---|
4925 | | - return -ENOMEM; |
---|
4926 | | - return 0; |
---|
| 6451 | + return hwrm_send_message_silent(bp, &req, sizeof(req), |
---|
| 6452 | + HWRM_CMD_TIMEOUT); |
---|
4927 | 6453 | } |
---|
4928 | 6454 | |
---|
4929 | 6455 | static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings, |
---|
4930 | | - int ring_grps, int cp_rings, int vnics) |
---|
| 6456 | + int ring_grps, int cp_rings, int stats, |
---|
| 6457 | + int vnics) |
---|
4931 | 6458 | { |
---|
4932 | 6459 | if (bp->hwrm_spec_code < 0x10801) |
---|
4933 | 6460 | return 0; |
---|
4934 | 6461 | |
---|
4935 | 6462 | if (BNXT_PF(bp)) |
---|
4936 | 6463 | return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings, |
---|
4937 | | - ring_grps, cp_rings, vnics); |
---|
| 6464 | + ring_grps, cp_rings, stats, |
---|
| 6465 | + vnics); |
---|
4938 | 6466 | |
---|
4939 | 6467 | return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps, |
---|
4940 | | - cp_rings, vnics); |
---|
| 6468 | + cp_rings, stats, vnics); |
---|
4941 | 6469 | } |
---|
4942 | 6470 | |
---|
4943 | | -static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal, |
---|
| 6471 | +static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp) |
---|
| 6472 | +{ |
---|
| 6473 | + struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr; |
---|
| 6474 | + struct bnxt_coal_cap *coal_cap = &bp->coal_cap; |
---|
| 6475 | + struct hwrm_ring_aggint_qcaps_input req = {0}; |
---|
| 6476 | + int rc; |
---|
| 6477 | + |
---|
| 6478 | + coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS; |
---|
| 6479 | + coal_cap->num_cmpl_dma_aggr_max = 63; |
---|
| 6480 | + coal_cap->num_cmpl_dma_aggr_during_int_max = 63; |
---|
| 6481 | + coal_cap->cmpl_aggr_dma_tmr_max = 65535; |
---|
| 6482 | + coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535; |
---|
| 6483 | + coal_cap->int_lat_tmr_min_max = 65535; |
---|
| 6484 | + coal_cap->int_lat_tmr_max_max = 65535; |
---|
| 6485 | + coal_cap->num_cmpl_aggr_int_max = 65535; |
---|
| 6486 | + coal_cap->timer_units = 80; |
---|
| 6487 | + |
---|
| 6488 | + if (bp->hwrm_spec_code < 0x10902) |
---|
| 6489 | + return; |
---|
| 6490 | + |
---|
| 6491 | + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_AGGINT_QCAPS, -1, -1); |
---|
| 6492 | + mutex_lock(&bp->hwrm_cmd_lock); |
---|
| 6493 | + rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
---|
| 6494 | + if (!rc) { |
---|
| 6495 | + coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params); |
---|
| 6496 | + coal_cap->nq_params = le32_to_cpu(resp->nq_params); |
---|
| 6497 | + coal_cap->num_cmpl_dma_aggr_max = |
---|
| 6498 | + le16_to_cpu(resp->num_cmpl_dma_aggr_max); |
---|
| 6499 | + coal_cap->num_cmpl_dma_aggr_during_int_max = |
---|
| 6500 | + le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max); |
---|
| 6501 | + coal_cap->cmpl_aggr_dma_tmr_max = |
---|
| 6502 | + le16_to_cpu(resp->cmpl_aggr_dma_tmr_max); |
---|
| 6503 | + coal_cap->cmpl_aggr_dma_tmr_during_int_max = |
---|
| 6504 | + le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max); |
---|
| 6505 | + coal_cap->int_lat_tmr_min_max = |
---|
| 6506 | + le16_to_cpu(resp->int_lat_tmr_min_max); |
---|
| 6507 | + coal_cap->int_lat_tmr_max_max = |
---|
| 6508 | + le16_to_cpu(resp->int_lat_tmr_max_max); |
---|
| 6509 | + coal_cap->num_cmpl_aggr_int_max = |
---|
| 6510 | + le16_to_cpu(resp->num_cmpl_aggr_int_max); |
---|
| 6511 | + coal_cap->timer_units = le16_to_cpu(resp->timer_units); |
---|
| 6512 | + } |
---|
| 6513 | + mutex_unlock(&bp->hwrm_cmd_lock); |
---|
| 6514 | +} |
---|
| 6515 | + |
---|
| 6516 | +static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec) |
---|
| 6517 | +{ |
---|
| 6518 | + struct bnxt_coal_cap *coal_cap = &bp->coal_cap; |
---|
| 6519 | + |
---|
| 6520 | + return usec * 1000 / coal_cap->timer_units; |
---|
| 6521 | +} |
---|
| 6522 | + |
---|
| 6523 | +static void bnxt_hwrm_set_coal_params(struct bnxt *bp, |
---|
| 6524 | + struct bnxt_coal *hw_coal, |
---|
4944 | 6525 | struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) |
---|
4945 | 6526 | { |
---|
4946 | | - u16 val, tmr, max, flags; |
---|
| 6527 | + struct bnxt_coal_cap *coal_cap = &bp->coal_cap; |
---|
| 6528 | + u32 cmpl_params = coal_cap->cmpl_params; |
---|
| 6529 | + u16 val, tmr, max, flags = 0; |
---|
4947 | 6530 | |
---|
4948 | 6531 | max = hw_coal->bufs_per_record * 128; |
---|
4949 | 6532 | if (hw_coal->budget) |
---|
4950 | 6533 | max = hw_coal->bufs_per_record * hw_coal->budget; |
---|
| 6534 | + max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max); |
---|
4951 | 6535 | |
---|
4952 | 6536 | val = clamp_t(u16, hw_coal->coal_bufs, 1, max); |
---|
4953 | 6537 | req->num_cmpl_aggr_int = cpu_to_le16(val); |
---|
4954 | 6538 | |
---|
4955 | | - /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */ |
---|
4956 | | - val = min_t(u16, val, 63); |
---|
| 6539 | + val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max); |
---|
4957 | 6540 | req->num_cmpl_dma_aggr = cpu_to_le16(val); |
---|
4958 | 6541 | |
---|
4959 | | - /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */ |
---|
4960 | | - val = clamp_t(u16, hw_coal->coal_bufs_irq, 1, 63); |
---|
| 6542 | + val = clamp_t(u16, hw_coal->coal_bufs_irq, 1, |
---|
| 6543 | + coal_cap->num_cmpl_dma_aggr_during_int_max); |
---|
4961 | 6544 | req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val); |
---|
4962 | 6545 | |
---|
4963 | | - tmr = BNXT_USEC_TO_COAL_TIMER(hw_coal->coal_ticks); |
---|
4964 | | - tmr = max_t(u16, tmr, 1); |
---|
| 6546 | + tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks); |
---|
| 6547 | + tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max); |
---|
4965 | 6548 | req->int_lat_tmr_max = cpu_to_le16(tmr); |
---|
4966 | 6549 | |
---|
4967 | 6550 | /* min timer set to 1/2 of interrupt timer */ |
---|
4968 | | - val = tmr / 2; |
---|
4969 | | - req->int_lat_tmr_min = cpu_to_le16(val); |
---|
| 6551 | + if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) { |
---|
| 6552 | + val = tmr / 2; |
---|
| 6553 | + val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max); |
---|
| 6554 | + req->int_lat_tmr_min = cpu_to_le16(val); |
---|
| 6555 | + req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE); |
---|
| 6556 | + } |
---|
4970 | 6557 | |
---|
4971 | 6558 | /* buf timer set to 1/4 of interrupt timer */ |
---|
4972 | | - val = max_t(u16, tmr / 4, 1); |
---|
| 6559 | + val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max); |
---|
4973 | 6560 | req->cmpl_aggr_dma_tmr = cpu_to_le16(val); |
---|
4974 | 6561 | |
---|
4975 | | - tmr = BNXT_USEC_TO_COAL_TIMER(hw_coal->coal_ticks_irq); |
---|
4976 | | - tmr = max_t(u16, tmr, 1); |
---|
4977 | | - req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(tmr); |
---|
| 6562 | + if (cmpl_params & |
---|
| 6563 | + RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) { |
---|
| 6564 | + tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq); |
---|
| 6565 | + val = clamp_t(u16, tmr, 1, |
---|
| 6566 | + coal_cap->cmpl_aggr_dma_tmr_during_int_max); |
---|
| 6567 | + req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val); |
---|
| 6568 | + req->enables |= |
---|
| 6569 | + cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE); |
---|
| 6570 | + } |
---|
4978 | 6571 | |
---|
4979 | | - flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; |
---|
4980 | | - if (hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh) |
---|
| 6572 | + if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET) |
---|
| 6573 | + flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; |
---|
| 6574 | + if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) && |
---|
| 6575 | + hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh) |
---|
4981 | 6576 | flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE; |
---|
4982 | 6577 | req->flags = cpu_to_le16(flags); |
---|
| 6578 | + req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES); |
---|
| 6579 | +} |
---|
| 6580 | + |
---|
| 6581 | +/* Caller holds bp->hwrm_cmd_lock */ |
---|
| 6582 | +static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi, |
---|
| 6583 | + struct bnxt_coal *hw_coal) |
---|
| 6584 | +{ |
---|
| 6585 | + struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0}; |
---|
| 6586 | + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; |
---|
| 6587 | + struct bnxt_coal_cap *coal_cap = &bp->coal_cap; |
---|
| 6588 | + u32 nq_params = coal_cap->nq_params; |
---|
| 6589 | + u16 tmr; |
---|
| 6590 | + |
---|
| 6591 | + if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN)) |
---|
| 6592 | + return 0; |
---|
| 6593 | + |
---|
| 6594 | + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, |
---|
| 6595 | + -1, -1); |
---|
| 6596 | + req.ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id); |
---|
| 6597 | + req.flags = |
---|
| 6598 | + cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ); |
---|
| 6599 | + |
---|
| 6600 | + tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2; |
---|
| 6601 | + tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max); |
---|
| 6602 | + req.int_lat_tmr_min = cpu_to_le16(tmr); |
---|
| 6603 | + req.enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE); |
---|
| 6604 | + return _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
---|
4983 | 6605 | } |
---|
4984 | 6606 | |
---|
4985 | 6607 | int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi) |
---|
.. | .. |
---|
4987 | 6609 | struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0}; |
---|
4988 | 6610 | struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; |
---|
4989 | 6611 | struct bnxt_coal coal; |
---|
4990 | | - unsigned int grp_idx; |
---|
4991 | 6612 | |
---|
4992 | 6613 | /* Tick values in micro seconds. |
---|
4993 | 6614 | * 1 coal_buf x bufs_per_record = 1 completion record. |
---|
.. | .. |
---|
5003 | 6624 | bnxt_hwrm_cmd_hdr_init(bp, &req_rx, |
---|
5004 | 6625 | HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1); |
---|
5005 | 6626 | |
---|
5006 | | - bnxt_hwrm_set_coal_params(&coal, &req_rx); |
---|
| 6627 | + bnxt_hwrm_set_coal_params(bp, &coal, &req_rx); |
---|
5007 | 6628 | |
---|
5008 | | - grp_idx = bnapi->index; |
---|
5009 | | - req_rx.ring_id = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id); |
---|
| 6629 | + req_rx.ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring)); |
---|
5010 | 6630 | |
---|
5011 | 6631 | return hwrm_send_message(bp, &req_rx, sizeof(req_rx), |
---|
5012 | 6632 | HWRM_CMD_TIMEOUT); |
---|
.. | .. |
---|
5023 | 6643 | bnxt_hwrm_cmd_hdr_init(bp, &req_tx, |
---|
5024 | 6644 | HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1); |
---|
5025 | 6645 | |
---|
5026 | | - bnxt_hwrm_set_coal_params(&bp->rx_coal, &req_rx); |
---|
5027 | | - bnxt_hwrm_set_coal_params(&bp->tx_coal, &req_tx); |
---|
| 6646 | + bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, &req_rx); |
---|
| 6647 | + bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, &req_tx); |
---|
5028 | 6648 | |
---|
5029 | 6649 | mutex_lock(&bp->hwrm_cmd_lock); |
---|
5030 | 6650 | for (i = 0; i < bp->cp_nr_rings; i++) { |
---|
5031 | 6651 | struct bnxt_napi *bnapi = bp->bnapi[i]; |
---|
| 6652 | + struct bnxt_coal *hw_coal; |
---|
| 6653 | + u16 ring_id; |
---|
5032 | 6654 | |
---|
5033 | 6655 | req = &req_rx; |
---|
5034 | | - if (!bnapi->rx_ring) |
---|
| 6656 | + if (!bnapi->rx_ring) { |
---|
| 6657 | + ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring); |
---|
5035 | 6658 | req = &req_tx; |
---|
5036 | | - req->ring_id = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id); |
---|
| 6659 | + } else { |
---|
| 6660 | + ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring); |
---|
| 6661 | + } |
---|
| 6662 | + req->ring_id = cpu_to_le16(ring_id); |
---|
5037 | 6663 | |
---|
5038 | 6664 | rc = _hwrm_send_message(bp, req, sizeof(*req), |
---|
5039 | 6665 | HWRM_CMD_TIMEOUT); |
---|
5040 | 6666 | if (rc) |
---|
5041 | 6667 | break; |
---|
| 6668 | + |
---|
| 6669 | + if (!(bp->flags & BNXT_FLAG_CHIP_P5)) |
---|
| 6670 | + continue; |
---|
| 6671 | + |
---|
| 6672 | + if (bnapi->rx_ring && bnapi->tx_ring) { |
---|
| 6673 | + req = &req_tx; |
---|
| 6674 | + ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring); |
---|
| 6675 | + req->ring_id = cpu_to_le16(ring_id); |
---|
| 6676 | + rc = _hwrm_send_message(bp, req, sizeof(*req), |
---|
| 6677 | + HWRM_CMD_TIMEOUT); |
---|
| 6678 | + if (rc) |
---|
| 6679 | + break; |
---|
| 6680 | + } |
---|
| 6681 | + if (bnapi->rx_ring) |
---|
| 6682 | + hw_coal = &bp->rx_coal; |
---|
| 6683 | + else |
---|
| 6684 | + hw_coal = &bp->tx_coal; |
---|
| 6685 | + __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal); |
---|
5042 | 6686 | } |
---|
5043 | 6687 | mutex_unlock(&bp->hwrm_cmd_lock); |
---|
5044 | 6688 | return rc; |
---|
5045 | 6689 | } |
---|
5046 | 6690 | |
---|
5047 | | -static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp) |
---|
| 6691 | +static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp) |
---|
5048 | 6692 | { |
---|
5049 | | - int rc = 0, i; |
---|
| 6693 | + struct hwrm_stat_ctx_clr_stats_input req0 = {0}; |
---|
5050 | 6694 | struct hwrm_stat_ctx_free_input req = {0}; |
---|
| 6695 | + int i; |
---|
5051 | 6696 | |
---|
5052 | 6697 | if (!bp->bnapi) |
---|
5053 | | - return 0; |
---|
| 6698 | + return; |
---|
5054 | 6699 | |
---|
5055 | 6700 | if (BNXT_CHIP_TYPE_NITRO_A0(bp)) |
---|
5056 | | - return 0; |
---|
| 6701 | + return; |
---|
5057 | 6702 | |
---|
| 6703 | + bnxt_hwrm_cmd_hdr_init(bp, &req0, HWRM_STAT_CTX_CLR_STATS, -1, -1); |
---|
5058 | 6704 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1); |
---|
5059 | 6705 | |
---|
5060 | 6706 | mutex_lock(&bp->hwrm_cmd_lock); |
---|
.. | .. |
---|
5064 | 6710 | |
---|
5065 | 6711 | if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) { |
---|
5066 | 6712 | req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id); |
---|
5067 | | - |
---|
5068 | | - rc = _hwrm_send_message(bp, &req, sizeof(req), |
---|
5069 | | - HWRM_CMD_TIMEOUT); |
---|
5070 | | - if (rc) |
---|
5071 | | - break; |
---|
| 6713 | + if (BNXT_FW_MAJ(bp) <= 20) { |
---|
| 6714 | + req0.stat_ctx_id = req.stat_ctx_id; |
---|
| 6715 | + _hwrm_send_message(bp, &req0, sizeof(req0), |
---|
| 6716 | + HWRM_CMD_TIMEOUT); |
---|
| 6717 | + } |
---|
| 6718 | + _hwrm_send_message(bp, &req, sizeof(req), |
---|
| 6719 | + HWRM_CMD_TIMEOUT); |
---|
5072 | 6720 | |
---|
5073 | 6721 | cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; |
---|
5074 | 6722 | } |
---|
5075 | 6723 | } |
---|
5076 | 6724 | mutex_unlock(&bp->hwrm_cmd_lock); |
---|
5077 | | - return rc; |
---|
5078 | 6725 | } |
---|
5079 | 6726 | |
---|
5080 | 6727 | static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp) |
---|
.. | .. |
---|
5088 | 6735 | |
---|
5089 | 6736 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1); |
---|
5090 | 6737 | |
---|
| 6738 | + req.stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size); |
---|
5091 | 6739 | req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000); |
---|
5092 | 6740 | |
---|
5093 | 6741 | mutex_lock(&bp->hwrm_cmd_lock); |
---|
.. | .. |
---|
5095 | 6743 | struct bnxt_napi *bnapi = bp->bnapi[i]; |
---|
5096 | 6744 | struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; |
---|
5097 | 6745 | |
---|
5098 | | - req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map); |
---|
| 6746 | + req.stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map); |
---|
5099 | 6747 | |
---|
5100 | 6748 | rc = _hwrm_send_message(bp, &req, sizeof(req), |
---|
5101 | 6749 | HWRM_CMD_TIMEOUT); |
---|
.. | .. |
---|
5114 | 6762 | { |
---|
5115 | 6763 | struct hwrm_func_qcfg_input req = {0}; |
---|
5116 | 6764 | struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; |
---|
| 6765 | + u32 min_db_offset = 0; |
---|
5117 | 6766 | u16 flags; |
---|
5118 | 6767 | int rc; |
---|
5119 | 6768 | |
---|
.. | .. |
---|
5129 | 6778 | struct bnxt_vf_info *vf = &bp->vf; |
---|
5130 | 6779 | |
---|
5131 | 6780 | vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK; |
---|
| 6781 | + } else { |
---|
| 6782 | + bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs); |
---|
5132 | 6783 | } |
---|
5133 | 6784 | #endif |
---|
5134 | 6785 | flags = le16_to_cpu(resp->flags); |
---|
.. | .. |
---|
5140 | 6791 | } |
---|
5141 | 6792 | if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST)) |
---|
5142 | 6793 | bp->flags |= BNXT_FLAG_MULTI_HOST; |
---|
| 6794 | + if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED) |
---|
| 6795 | + bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR; |
---|
5143 | 6796 | |
---|
5144 | 6797 | switch (resp->port_partition_type) { |
---|
5145 | 6798 | case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0: |
---|
.. | .. |
---|
5160 | 6813 | if (!bp->max_mtu) |
---|
5161 | 6814 | bp->max_mtu = BNXT_MAX_MTU; |
---|
5162 | 6815 | |
---|
| 6816 | + if (bp->db_size) |
---|
| 6817 | + goto func_qcfg_exit; |
---|
| 6818 | + |
---|
| 6819 | + if (bp->flags & BNXT_FLAG_CHIP_P5) { |
---|
| 6820 | + if (BNXT_PF(bp)) |
---|
| 6821 | + min_db_offset = DB_PF_OFFSET_P5; |
---|
| 6822 | + else |
---|
| 6823 | + min_db_offset = DB_VF_OFFSET_P5; |
---|
| 6824 | + } |
---|
| 6825 | + bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) * |
---|
| 6826 | + 1024); |
---|
| 6827 | + if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) || |
---|
| 6828 | + bp->db_size <= min_db_offset) |
---|
| 6829 | + bp->db_size = pci_resource_len(bp->pdev, 2); |
---|
| 6830 | + |
---|
5163 | 6831 | func_qcfg_exit: |
---|
5164 | 6832 | mutex_unlock(&bp->hwrm_cmd_lock); |
---|
5165 | 6833 | return rc; |
---|
| 6834 | +} |
---|
| 6835 | + |
---|
| 6836 | +static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp) |
---|
| 6837 | +{ |
---|
| 6838 | + struct hwrm_func_backing_store_qcaps_input req = {0}; |
---|
| 6839 | + struct hwrm_func_backing_store_qcaps_output *resp = |
---|
| 6840 | + bp->hwrm_cmd_resp_addr; |
---|
| 6841 | + int rc; |
---|
| 6842 | + |
---|
| 6843 | + if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx) |
---|
| 6844 | + return 0; |
---|
| 6845 | + |
---|
| 6846 | + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_QCAPS, -1, -1); |
---|
| 6847 | + mutex_lock(&bp->hwrm_cmd_lock); |
---|
| 6848 | + rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
---|
| 6849 | + if (!rc) { |
---|
| 6850 | + struct bnxt_ctx_pg_info *ctx_pg; |
---|
| 6851 | + struct bnxt_ctx_mem_info *ctx; |
---|
| 6852 | + int i, tqm_rings; |
---|
| 6853 | + |
---|
| 6854 | + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); |
---|
| 6855 | + if (!ctx) { |
---|
| 6856 | + rc = -ENOMEM; |
---|
| 6857 | + goto ctx_err; |
---|
| 6858 | + } |
---|
| 6859 | + ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries); |
---|
| 6860 | + ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries); |
---|
| 6861 | + ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries); |
---|
| 6862 | + ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size); |
---|
| 6863 | + ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries); |
---|
| 6864 | + ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries); |
---|
| 6865 | + ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size); |
---|
| 6866 | + ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries); |
---|
| 6867 | + ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries); |
---|
| 6868 | + ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size); |
---|
| 6869 | + ctx->vnic_max_vnic_entries = |
---|
| 6870 | + le16_to_cpu(resp->vnic_max_vnic_entries); |
---|
| 6871 | + ctx->vnic_max_ring_table_entries = |
---|
| 6872 | + le16_to_cpu(resp->vnic_max_ring_table_entries); |
---|
| 6873 | + ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size); |
---|
| 6874 | + ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries); |
---|
| 6875 | + ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size); |
---|
| 6876 | + ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size); |
---|
| 6877 | + ctx->tqm_min_entries_per_ring = |
---|
| 6878 | + le32_to_cpu(resp->tqm_min_entries_per_ring); |
---|
| 6879 | + ctx->tqm_max_entries_per_ring = |
---|
| 6880 | + le32_to_cpu(resp->tqm_max_entries_per_ring); |
---|
| 6881 | + ctx->tqm_entries_multiple = resp->tqm_entries_multiple; |
---|
| 6882 | + if (!ctx->tqm_entries_multiple) |
---|
| 6883 | + ctx->tqm_entries_multiple = 1; |
---|
| 6884 | + ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries); |
---|
| 6885 | + ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size); |
---|
| 6886 | + ctx->mrav_num_entries_units = |
---|
| 6887 | + le16_to_cpu(resp->mrav_num_entries_units); |
---|
| 6888 | + ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size); |
---|
| 6889 | + ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries); |
---|
| 6890 | + ctx->ctx_kind_initializer = resp->ctx_kind_initializer; |
---|
| 6891 | + ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count; |
---|
| 6892 | + if (!ctx->tqm_fp_rings_count) |
---|
| 6893 | + ctx->tqm_fp_rings_count = bp->max_q; |
---|
| 6894 | + else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS) |
---|
| 6895 | + ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS; |
---|
| 6896 | + |
---|
| 6897 | + tqm_rings = ctx->tqm_fp_rings_count + BNXT_MAX_TQM_SP_RINGS; |
---|
| 6898 | + ctx_pg = kcalloc(tqm_rings, sizeof(*ctx_pg), GFP_KERNEL); |
---|
| 6899 | + if (!ctx_pg) { |
---|
| 6900 | + kfree(ctx); |
---|
| 6901 | + rc = -ENOMEM; |
---|
| 6902 | + goto ctx_err; |
---|
| 6903 | + } |
---|
| 6904 | + for (i = 0; i < tqm_rings; i++, ctx_pg++) |
---|
| 6905 | + ctx->tqm_mem[i] = ctx_pg; |
---|
| 6906 | + bp->ctx = ctx; |
---|
| 6907 | + } else { |
---|
| 6908 | + rc = 0; |
---|
| 6909 | + } |
---|
| 6910 | +ctx_err: |
---|
| 6911 | + mutex_unlock(&bp->hwrm_cmd_lock); |
---|
| 6912 | + return rc; |
---|
| 6913 | +} |
---|
| 6914 | + |
---|
| 6915 | +static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr, |
---|
| 6916 | + __le64 *pg_dir) |
---|
| 6917 | +{ |
---|
| 6918 | + BNXT_SET_CTX_PAGE_ATTR(*pg_attr); |
---|
| 6919 | + if (rmem->depth >= 1) { |
---|
| 6920 | + if (rmem->depth == 2) |
---|
| 6921 | + *pg_attr |= 2; |
---|
| 6922 | + else |
---|
| 6923 | + *pg_attr |= 1; |
---|
| 6924 | + *pg_dir = cpu_to_le64(rmem->pg_tbl_map); |
---|
| 6925 | + } else { |
---|
| 6926 | + *pg_dir = cpu_to_le64(rmem->dma_arr[0]); |
---|
| 6927 | + } |
---|
| 6928 | +} |
---|
| 6929 | + |
---|
| 6930 | +#define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \ |
---|
| 6931 | + (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \ |
---|
| 6932 | + FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \ |
---|
| 6933 | + FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \ |
---|
| 6934 | + FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \ |
---|
| 6935 | + FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) |
---|
| 6936 | + |
---|
| 6937 | +static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables) |
---|
| 6938 | +{ |
---|
| 6939 | + struct hwrm_func_backing_store_cfg_input req = {0}; |
---|
| 6940 | + struct bnxt_ctx_mem_info *ctx = bp->ctx; |
---|
| 6941 | + struct bnxt_ctx_pg_info *ctx_pg; |
---|
| 6942 | + __le32 *num_entries; |
---|
| 6943 | + __le64 *pg_dir; |
---|
| 6944 | + u32 flags = 0; |
---|
| 6945 | + u8 *pg_attr; |
---|
| 6946 | + u32 ena; |
---|
| 6947 | + int i; |
---|
| 6948 | + |
---|
| 6949 | + if (!ctx) |
---|
| 6950 | + return 0; |
---|
| 6951 | + |
---|
| 6952 | + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_CFG, -1, -1); |
---|
| 6953 | + req.enables = cpu_to_le32(enables); |
---|
| 6954 | + |
---|
| 6955 | + if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) { |
---|
| 6956 | + ctx_pg = &ctx->qp_mem; |
---|
| 6957 | + req.qp_num_entries = cpu_to_le32(ctx_pg->entries); |
---|
| 6958 | + req.qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries); |
---|
| 6959 | + req.qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries); |
---|
| 6960 | + req.qp_entry_size = cpu_to_le16(ctx->qp_entry_size); |
---|
| 6961 | + bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, |
---|
| 6962 | + &req.qpc_pg_size_qpc_lvl, |
---|
| 6963 | + &req.qpc_page_dir); |
---|
| 6964 | + } |
---|
| 6965 | + if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) { |
---|
| 6966 | + ctx_pg = &ctx->srq_mem; |
---|
| 6967 | + req.srq_num_entries = cpu_to_le32(ctx_pg->entries); |
---|
| 6968 | + req.srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries); |
---|
| 6969 | + req.srq_entry_size = cpu_to_le16(ctx->srq_entry_size); |
---|
| 6970 | + bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, |
---|
| 6971 | + &req.srq_pg_size_srq_lvl, |
---|
| 6972 | + &req.srq_page_dir); |
---|
| 6973 | + } |
---|
| 6974 | + if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) { |
---|
| 6975 | + ctx_pg = &ctx->cq_mem; |
---|
| 6976 | + req.cq_num_entries = cpu_to_le32(ctx_pg->entries); |
---|
| 6977 | + req.cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries); |
---|
| 6978 | + req.cq_entry_size = cpu_to_le16(ctx->cq_entry_size); |
---|
| 6979 | + bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, &req.cq_pg_size_cq_lvl, |
---|
| 6980 | + &req.cq_page_dir); |
---|
| 6981 | + } |
---|
| 6982 | + if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) { |
---|
| 6983 | + ctx_pg = &ctx->vnic_mem; |
---|
| 6984 | + req.vnic_num_vnic_entries = |
---|
| 6985 | + cpu_to_le16(ctx->vnic_max_vnic_entries); |
---|
| 6986 | + req.vnic_num_ring_table_entries = |
---|
| 6987 | + cpu_to_le16(ctx->vnic_max_ring_table_entries); |
---|
| 6988 | + req.vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size); |
---|
| 6989 | + bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, |
---|
| 6990 | + &req.vnic_pg_size_vnic_lvl, |
---|
| 6991 | + &req.vnic_page_dir); |
---|
| 6992 | + } |
---|
| 6993 | + if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) { |
---|
| 6994 | + ctx_pg = &ctx->stat_mem; |
---|
| 6995 | + req.stat_num_entries = cpu_to_le32(ctx->stat_max_entries); |
---|
| 6996 | + req.stat_entry_size = cpu_to_le16(ctx->stat_entry_size); |
---|
| 6997 | + bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, |
---|
| 6998 | + &req.stat_pg_size_stat_lvl, |
---|
| 6999 | + &req.stat_page_dir); |
---|
| 7000 | + } |
---|
| 7001 | + if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) { |
---|
| 7002 | + ctx_pg = &ctx->mrav_mem; |
---|
| 7003 | + req.mrav_num_entries = cpu_to_le32(ctx_pg->entries); |
---|
| 7004 | + if (ctx->mrav_num_entries_units) |
---|
| 7005 | + flags |= |
---|
| 7006 | + FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT; |
---|
| 7007 | + req.mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size); |
---|
| 7008 | + bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, |
---|
| 7009 | + &req.mrav_pg_size_mrav_lvl, |
---|
| 7010 | + &req.mrav_page_dir); |
---|
| 7011 | + } |
---|
| 7012 | + if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) { |
---|
| 7013 | + ctx_pg = &ctx->tim_mem; |
---|
| 7014 | + req.tim_num_entries = cpu_to_le32(ctx_pg->entries); |
---|
| 7015 | + req.tim_entry_size = cpu_to_le16(ctx->tim_entry_size); |
---|
| 7016 | + bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, |
---|
| 7017 | + &req.tim_pg_size_tim_lvl, |
---|
| 7018 | + &req.tim_page_dir); |
---|
| 7019 | + } |
---|
| 7020 | + for (i = 0, num_entries = &req.tqm_sp_num_entries, |
---|
| 7021 | + pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl, |
---|
| 7022 | + pg_dir = &req.tqm_sp_page_dir, |
---|
| 7023 | + ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP; |
---|
| 7024 | + i < BNXT_MAX_TQM_RINGS; |
---|
| 7025 | + i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) { |
---|
| 7026 | + if (!(enables & ena)) |
---|
| 7027 | + continue; |
---|
| 7028 | + |
---|
| 7029 | + req.tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size); |
---|
| 7030 | + ctx_pg = ctx->tqm_mem[i]; |
---|
| 7031 | + *num_entries = cpu_to_le32(ctx_pg->entries); |
---|
| 7032 | + bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir); |
---|
| 7033 | + } |
---|
| 7034 | + req.flags = cpu_to_le32(flags); |
---|
| 7035 | + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
---|
| 7036 | +} |
---|
| 7037 | + |
---|
| 7038 | +static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, |
---|
| 7039 | + struct bnxt_ctx_pg_info *ctx_pg) |
---|
| 7040 | +{ |
---|
| 7041 | + struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; |
---|
| 7042 | + |
---|
| 7043 | + rmem->page_size = BNXT_PAGE_SIZE; |
---|
| 7044 | + rmem->pg_arr = ctx_pg->ctx_pg_arr; |
---|
| 7045 | + rmem->dma_arr = ctx_pg->ctx_dma_arr; |
---|
| 7046 | + rmem->flags = BNXT_RMEM_VALID_PTE_FLAG; |
---|
| 7047 | + if (rmem->depth >= 1) |
---|
| 7048 | + rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG; |
---|
| 7049 | + return bnxt_alloc_ring(bp, rmem); |
---|
| 7050 | +} |
---|
| 7051 | + |
---|
| 7052 | +static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp, |
---|
| 7053 | + struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size, |
---|
| 7054 | + u8 depth, bool use_init_val) |
---|
| 7055 | +{ |
---|
| 7056 | + struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; |
---|
| 7057 | + int rc; |
---|
| 7058 | + |
---|
| 7059 | + if (!mem_size) |
---|
| 7060 | + return -EINVAL; |
---|
| 7061 | + |
---|
| 7062 | + ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); |
---|
| 7063 | + if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) { |
---|
| 7064 | + ctx_pg->nr_pages = 0; |
---|
| 7065 | + return -EINVAL; |
---|
| 7066 | + } |
---|
| 7067 | + if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) { |
---|
| 7068 | + int nr_tbls, i; |
---|
| 7069 | + |
---|
| 7070 | + rmem->depth = 2; |
---|
| 7071 | + ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg), |
---|
| 7072 | + GFP_KERNEL); |
---|
| 7073 | + if (!ctx_pg->ctx_pg_tbl) |
---|
| 7074 | + return -ENOMEM; |
---|
| 7075 | + nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES); |
---|
| 7076 | + rmem->nr_pages = nr_tbls; |
---|
| 7077 | + rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg); |
---|
| 7078 | + if (rc) |
---|
| 7079 | + return rc; |
---|
| 7080 | + for (i = 0; i < nr_tbls; i++) { |
---|
| 7081 | + struct bnxt_ctx_pg_info *pg_tbl; |
---|
| 7082 | + |
---|
| 7083 | + pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL); |
---|
| 7084 | + if (!pg_tbl) |
---|
| 7085 | + return -ENOMEM; |
---|
| 7086 | + ctx_pg->ctx_pg_tbl[i] = pg_tbl; |
---|
| 7087 | + rmem = &pg_tbl->ring_mem; |
---|
| 7088 | + rmem->pg_tbl = ctx_pg->ctx_pg_arr[i]; |
---|
| 7089 | + rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i]; |
---|
| 7090 | + rmem->depth = 1; |
---|
| 7091 | + rmem->nr_pages = MAX_CTX_PAGES; |
---|
| 7092 | + if (use_init_val) |
---|
| 7093 | + rmem->init_val = bp->ctx->ctx_kind_initializer; |
---|
| 7094 | + if (i == (nr_tbls - 1)) { |
---|
| 7095 | + int rem = ctx_pg->nr_pages % MAX_CTX_PAGES; |
---|
| 7096 | + |
---|
| 7097 | + if (rem) |
---|
| 7098 | + rmem->nr_pages = rem; |
---|
| 7099 | + } |
---|
| 7100 | + rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl); |
---|
| 7101 | + if (rc) |
---|
| 7102 | + break; |
---|
| 7103 | + } |
---|
| 7104 | + } else { |
---|
| 7105 | + rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); |
---|
| 7106 | + if (rmem->nr_pages > 1 || depth) |
---|
| 7107 | + rmem->depth = 1; |
---|
| 7108 | + if (use_init_val) |
---|
| 7109 | + rmem->init_val = bp->ctx->ctx_kind_initializer; |
---|
| 7110 | + rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg); |
---|
| 7111 | + } |
---|
| 7112 | + return rc; |
---|
| 7113 | +} |
---|
| 7114 | + |
---|
| 7115 | +static void bnxt_free_ctx_pg_tbls(struct bnxt *bp, |
---|
| 7116 | + struct bnxt_ctx_pg_info *ctx_pg) |
---|
| 7117 | +{ |
---|
| 7118 | + struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; |
---|
| 7119 | + |
---|
| 7120 | + if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES || |
---|
| 7121 | + ctx_pg->ctx_pg_tbl) { |
---|
| 7122 | + int i, nr_tbls = rmem->nr_pages; |
---|
| 7123 | + |
---|
| 7124 | + for (i = 0; i < nr_tbls; i++) { |
---|
| 7125 | + struct bnxt_ctx_pg_info *pg_tbl; |
---|
| 7126 | + struct bnxt_ring_mem_info *rmem2; |
---|
| 7127 | + |
---|
| 7128 | + pg_tbl = ctx_pg->ctx_pg_tbl[i]; |
---|
| 7129 | + if (!pg_tbl) |
---|
| 7130 | + continue; |
---|
| 7131 | + rmem2 = &pg_tbl->ring_mem; |
---|
| 7132 | + bnxt_free_ring(bp, rmem2); |
---|
| 7133 | + ctx_pg->ctx_pg_arr[i] = NULL; |
---|
| 7134 | + kfree(pg_tbl); |
---|
| 7135 | + ctx_pg->ctx_pg_tbl[i] = NULL; |
---|
| 7136 | + } |
---|
| 7137 | + kfree(ctx_pg->ctx_pg_tbl); |
---|
| 7138 | + ctx_pg->ctx_pg_tbl = NULL; |
---|
| 7139 | + } |
---|
| 7140 | + bnxt_free_ring(bp, rmem); |
---|
| 7141 | + ctx_pg->nr_pages = 0; |
---|
| 7142 | +} |
---|
| 7143 | + |
---|
| 7144 | +static void bnxt_free_ctx_mem(struct bnxt *bp) |
---|
| 7145 | +{ |
---|
| 7146 | + struct bnxt_ctx_mem_info *ctx = bp->ctx; |
---|
| 7147 | + int i; |
---|
| 7148 | + |
---|
| 7149 | + if (!ctx) |
---|
| 7150 | + return; |
---|
| 7151 | + |
---|
| 7152 | + if (ctx->tqm_mem[0]) { |
---|
| 7153 | + for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) |
---|
| 7154 | + bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]); |
---|
| 7155 | + kfree(ctx->tqm_mem[0]); |
---|
| 7156 | + ctx->tqm_mem[0] = NULL; |
---|
| 7157 | + } |
---|
| 7158 | + |
---|
| 7159 | + bnxt_free_ctx_pg_tbls(bp, &ctx->tim_mem); |
---|
| 7160 | + bnxt_free_ctx_pg_tbls(bp, &ctx->mrav_mem); |
---|
| 7161 | + bnxt_free_ctx_pg_tbls(bp, &ctx->stat_mem); |
---|
| 7162 | + bnxt_free_ctx_pg_tbls(bp, &ctx->vnic_mem); |
---|
| 7163 | + bnxt_free_ctx_pg_tbls(bp, &ctx->cq_mem); |
---|
| 7164 | + bnxt_free_ctx_pg_tbls(bp, &ctx->srq_mem); |
---|
| 7165 | + bnxt_free_ctx_pg_tbls(bp, &ctx->qp_mem); |
---|
| 7166 | + ctx->flags &= ~BNXT_CTX_FLAG_INITED; |
---|
| 7167 | +} |
---|
| 7168 | + |
---|
| 7169 | +static int bnxt_alloc_ctx_mem(struct bnxt *bp) |
---|
| 7170 | +{ |
---|
| 7171 | + struct bnxt_ctx_pg_info *ctx_pg; |
---|
| 7172 | + struct bnxt_ctx_mem_info *ctx; |
---|
| 7173 | + u32 mem_size, ena, entries; |
---|
| 7174 | + u32 entries_sp, min; |
---|
| 7175 | + u32 num_mr, num_ah; |
---|
| 7176 | + u32 extra_srqs = 0; |
---|
| 7177 | + u32 extra_qps = 0; |
---|
| 7178 | + u8 pg_lvl = 1; |
---|
| 7179 | + int i, rc; |
---|
| 7180 | + |
---|
| 7181 | + rc = bnxt_hwrm_func_backing_store_qcaps(bp); |
---|
| 7182 | + if (rc) { |
---|
| 7183 | + netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n", |
---|
| 7184 | + rc); |
---|
| 7185 | + return rc; |
---|
| 7186 | + } |
---|
| 7187 | + ctx = bp->ctx; |
---|
| 7188 | + if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED)) |
---|
| 7189 | + return 0; |
---|
| 7190 | + |
---|
| 7191 | + if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) { |
---|
| 7192 | + pg_lvl = 2; |
---|
| 7193 | + extra_qps = 65536; |
---|
| 7194 | + extra_srqs = 8192; |
---|
| 7195 | + } |
---|
| 7196 | + |
---|
| 7197 | + ctx_pg = &ctx->qp_mem; |
---|
| 7198 | + ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries + |
---|
| 7199 | + extra_qps; |
---|
| 7200 | + mem_size = ctx->qp_entry_size * ctx_pg->entries; |
---|
| 7201 | + rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true); |
---|
| 7202 | + if (rc) |
---|
| 7203 | + return rc; |
---|
| 7204 | + |
---|
| 7205 | + ctx_pg = &ctx->srq_mem; |
---|
| 7206 | + ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs; |
---|
| 7207 | + mem_size = ctx->srq_entry_size * ctx_pg->entries; |
---|
| 7208 | + rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true); |
---|
| 7209 | + if (rc) |
---|
| 7210 | + return rc; |
---|
| 7211 | + |
---|
| 7212 | + ctx_pg = &ctx->cq_mem; |
---|
| 7213 | + ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2; |
---|
| 7214 | + mem_size = ctx->cq_entry_size * ctx_pg->entries; |
---|
| 7215 | + rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true); |
---|
| 7216 | + if (rc) |
---|
| 7217 | + return rc; |
---|
| 7218 | + |
---|
| 7219 | + ctx_pg = &ctx->vnic_mem; |
---|
| 7220 | + ctx_pg->entries = ctx->vnic_max_vnic_entries + |
---|
| 7221 | + ctx->vnic_max_ring_table_entries; |
---|
| 7222 | + mem_size = ctx->vnic_entry_size * ctx_pg->entries; |
---|
| 7223 | + rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, true); |
---|
| 7224 | + if (rc) |
---|
| 7225 | + return rc; |
---|
| 7226 | + |
---|
| 7227 | + ctx_pg = &ctx->stat_mem; |
---|
| 7228 | + ctx_pg->entries = ctx->stat_max_entries; |
---|
| 7229 | + mem_size = ctx->stat_entry_size * ctx_pg->entries; |
---|
| 7230 | + rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, true); |
---|
| 7231 | + if (rc) |
---|
| 7232 | + return rc; |
---|
| 7233 | + |
---|
| 7234 | + ena = 0; |
---|
| 7235 | + if (!(bp->flags & BNXT_FLAG_ROCE_CAP)) |
---|
| 7236 | + goto skip_rdma; |
---|
| 7237 | + |
---|
| 7238 | + ctx_pg = &ctx->mrav_mem; |
---|
| 7239 | + /* 128K extra is needed to accommodate static AH context |
---|
| 7240 | + * allocation by f/w. |
---|
| 7241 | + */ |
---|
| 7242 | + num_mr = 1024 * 256; |
---|
| 7243 | + num_ah = 1024 * 128; |
---|
| 7244 | + ctx_pg->entries = num_mr + num_ah; |
---|
| 7245 | + mem_size = ctx->mrav_entry_size * ctx_pg->entries; |
---|
| 7246 | + rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, true); |
---|
| 7247 | + if (rc) |
---|
| 7248 | + return rc; |
---|
| 7249 | + ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV; |
---|
| 7250 | + if (ctx->mrav_num_entries_units) |
---|
| 7251 | + ctx_pg->entries = |
---|
| 7252 | + ((num_mr / ctx->mrav_num_entries_units) << 16) | |
---|
| 7253 | + (num_ah / ctx->mrav_num_entries_units); |
---|
| 7254 | + |
---|
| 7255 | + ctx_pg = &ctx->tim_mem; |
---|
| 7256 | + ctx_pg->entries = ctx->qp_mem.entries; |
---|
| 7257 | + mem_size = ctx->tim_entry_size * ctx_pg->entries; |
---|
| 7258 | + rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, false); |
---|
| 7259 | + if (rc) |
---|
| 7260 | + return rc; |
---|
| 7261 | + ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM; |
---|
| 7262 | + |
---|
| 7263 | +skip_rdma: |
---|
| 7264 | + min = ctx->tqm_min_entries_per_ring; |
---|
| 7265 | + entries_sp = ctx->vnic_max_vnic_entries + ctx->qp_max_l2_entries + |
---|
| 7266 | + 2 * (extra_qps + ctx->qp_min_qp1_entries) + min; |
---|
| 7267 | + entries_sp = roundup(entries_sp, ctx->tqm_entries_multiple); |
---|
| 7268 | + entries = ctx->qp_max_l2_entries + 2 * (extra_qps + ctx->qp_min_qp1_entries); |
---|
| 7269 | + entries = roundup(entries, ctx->tqm_entries_multiple); |
---|
| 7270 | + entries = clamp_t(u32, entries, min, ctx->tqm_max_entries_per_ring); |
---|
| 7271 | + for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) { |
---|
| 7272 | + ctx_pg = ctx->tqm_mem[i]; |
---|
| 7273 | + ctx_pg->entries = i ? entries : entries_sp; |
---|
| 7274 | + mem_size = ctx->tqm_entry_size * ctx_pg->entries; |
---|
| 7275 | + rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, false); |
---|
| 7276 | + if (rc) |
---|
| 7277 | + return rc; |
---|
| 7278 | + ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i; |
---|
| 7279 | + } |
---|
| 7280 | + ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES; |
---|
| 7281 | + rc = bnxt_hwrm_func_backing_store_cfg(bp, ena); |
---|
| 7282 | + if (rc) { |
---|
| 7283 | + netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n", |
---|
| 7284 | + rc); |
---|
| 7285 | + return rc; |
---|
| 7286 | + } |
---|
| 7287 | + ctx->flags |= BNXT_CTX_FLAG_INITED; |
---|
| 7288 | + return 0; |
---|
5166 | 7289 | } |
---|
5167 | 7290 | |
---|
5168 | 7291 | int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all) |
---|
.. | .. |
---|
5176 | 7299 | req.fid = cpu_to_le16(0xffff); |
---|
5177 | 7300 | |
---|
5178 | 7301 | mutex_lock(&bp->hwrm_cmd_lock); |
---|
5179 | | - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
---|
5180 | | - if (rc) { |
---|
5181 | | - rc = -EIO; |
---|
| 7302 | + rc = _hwrm_send_message_silent(bp, &req, sizeof(req), |
---|
| 7303 | + HWRM_CMD_TIMEOUT); |
---|
| 7304 | + if (rc) |
---|
5182 | 7305 | goto hwrm_func_resc_qcaps_exit; |
---|
5183 | | - } |
---|
5184 | 7306 | |
---|
5185 | 7307 | hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs); |
---|
5186 | 7308 | if (!all) |
---|
.. | .. |
---|
5203 | 7325 | hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx); |
---|
5204 | 7326 | hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); |
---|
5205 | 7327 | |
---|
| 7328 | + if (bp->flags & BNXT_FLAG_CHIP_P5) { |
---|
| 7329 | + u16 max_msix = le16_to_cpu(resp->max_msix); |
---|
| 7330 | + |
---|
| 7331 | + hw_resc->max_nqs = max_msix; |
---|
| 7332 | + hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings; |
---|
| 7333 | + } |
---|
| 7334 | + |
---|
5206 | 7335 | if (BNXT_PF(bp)) { |
---|
5207 | 7336 | struct bnxt_pf_info *pf = &bp->pf; |
---|
5208 | 7337 | |
---|
.. | .. |
---|
5222 | 7351 | struct hwrm_func_qcaps_input req = {0}; |
---|
5223 | 7352 | struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr; |
---|
5224 | 7353 | struct bnxt_hw_resc *hw_resc = &bp->hw_resc; |
---|
5225 | | - u32 flags; |
---|
| 7354 | + u32 flags, flags_ext; |
---|
5226 | 7355 | |
---|
5227 | 7356 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1); |
---|
5228 | 7357 | req.fid = cpu_to_le16(0xffff); |
---|
.. | .. |
---|
5237 | 7366 | bp->flags |= BNXT_FLAG_ROCEV1_CAP; |
---|
5238 | 7367 | if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED) |
---|
5239 | 7368 | bp->flags |= BNXT_FLAG_ROCEV2_CAP; |
---|
| 7369 | + if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED) |
---|
| 7370 | + bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED; |
---|
| 7371 | + if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE) |
---|
| 7372 | + bp->fw_cap |= BNXT_FW_CAP_HOT_RESET; |
---|
| 7373 | + if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED) |
---|
| 7374 | + bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED; |
---|
| 7375 | + if (flags & FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE) |
---|
| 7376 | + bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY; |
---|
| 7377 | + if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD) |
---|
| 7378 | + bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD; |
---|
| 7379 | + if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED)) |
---|
| 7380 | + bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT; |
---|
| 7381 | + |
---|
| 7382 | + flags_ext = le32_to_cpu(resp->flags_ext); |
---|
| 7383 | + if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED) |
---|
| 7384 | + bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED; |
---|
5240 | 7385 | |
---|
5241 | 7386 | bp->tx_push_thresh = 0; |
---|
5242 | | - if (flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) |
---|
| 7387 | + if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) && |
---|
| 7388 | + BNXT_FW_MAJ(bp) > 217) |
---|
5243 | 7389 | bp->tx_push_thresh = BNXT_TX_PUSH_THRESH; |
---|
5244 | 7390 | |
---|
5245 | 7391 | hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); |
---|
.. | .. |
---|
5267 | 7413 | pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows); |
---|
5268 | 7414 | pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows); |
---|
5269 | 7415 | pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows); |
---|
| 7416 | + bp->flags &= ~BNXT_FLAG_WOL_CAP; |
---|
5270 | 7417 | if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED) |
---|
5271 | 7418 | bp->flags |= BNXT_FLAG_WOL_CAP; |
---|
5272 | 7419 | } else { |
---|
.. | .. |
---|
5283 | 7430 | return rc; |
---|
5284 | 7431 | } |
---|
5285 | 7432 | |
---|
| 7433 | +static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp); |
---|
| 7434 | + |
---|
5286 | 7435 | static int bnxt_hwrm_func_qcaps(struct bnxt *bp) |
---|
5287 | 7436 | { |
---|
5288 | 7437 | int rc; |
---|
.. | .. |
---|
5290 | 7439 | rc = __bnxt_hwrm_func_qcaps(bp); |
---|
5291 | 7440 | if (rc) |
---|
5292 | 7441 | return rc; |
---|
| 7442 | + rc = bnxt_hwrm_queue_qportcfg(bp); |
---|
| 7443 | + if (rc) { |
---|
| 7444 | + netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc); |
---|
| 7445 | + return rc; |
---|
| 7446 | + } |
---|
5293 | 7447 | if (bp->hwrm_spec_code >= 0x10803) { |
---|
| 7448 | + rc = bnxt_alloc_ctx_mem(bp); |
---|
| 7449 | + if (rc) |
---|
| 7450 | + return rc; |
---|
5294 | 7451 | rc = bnxt_hwrm_func_resc_qcaps(bp, true); |
---|
5295 | 7452 | if (!rc) |
---|
5296 | 7453 | bp->fw_cap |= BNXT_FW_CAP_NEW_RM; |
---|
5297 | 7454 | } |
---|
5298 | 7455 | return 0; |
---|
| 7456 | +} |
---|
| 7457 | + |
---|
| 7458 | +static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp) |
---|
| 7459 | +{ |
---|
| 7460 | + struct hwrm_cfa_adv_flow_mgnt_qcaps_input req = {0}; |
---|
| 7461 | + struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp; |
---|
| 7462 | + int rc = 0; |
---|
| 7463 | + u32 flags; |
---|
| 7464 | + |
---|
| 7465 | + if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW)) |
---|
| 7466 | + return 0; |
---|
| 7467 | + |
---|
| 7468 | + resp = bp->hwrm_cmd_resp_addr; |
---|
| 7469 | + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS, -1, -1); |
---|
| 7470 | + |
---|
| 7471 | + mutex_lock(&bp->hwrm_cmd_lock); |
---|
| 7472 | + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
---|
| 7473 | + if (rc) |
---|
| 7474 | + goto hwrm_cfa_adv_qcaps_exit; |
---|
| 7475 | + |
---|
| 7476 | + flags = le32_to_cpu(resp->flags); |
---|
| 7477 | + if (flags & |
---|
| 7478 | + CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED) |
---|
| 7479 | + bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2; |
---|
| 7480 | + |
---|
| 7481 | +hwrm_cfa_adv_qcaps_exit: |
---|
| 7482 | + mutex_unlock(&bp->hwrm_cmd_lock); |
---|
| 7483 | + return rc; |
---|
| 7484 | +} |
---|
| 7485 | + |
---|
| 7486 | +static int __bnxt_alloc_fw_health(struct bnxt *bp) |
---|
| 7487 | +{ |
---|
| 7488 | + if (bp->fw_health) |
---|
| 7489 | + return 0; |
---|
| 7490 | + |
---|
| 7491 | + bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL); |
---|
| 7492 | + if (!bp->fw_health) |
---|
| 7493 | + return -ENOMEM; |
---|
| 7494 | + |
---|
| 7495 | + return 0; |
---|
| 7496 | +} |
---|
| 7497 | + |
---|
| 7498 | +static int bnxt_alloc_fw_health(struct bnxt *bp) |
---|
| 7499 | +{ |
---|
| 7500 | + int rc; |
---|
| 7501 | + |
---|
| 7502 | + if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) && |
---|
| 7503 | + !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) |
---|
| 7504 | + return 0; |
---|
| 7505 | + |
---|
| 7506 | + rc = __bnxt_alloc_fw_health(bp); |
---|
| 7507 | + if (rc) { |
---|
| 7508 | + bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET; |
---|
| 7509 | + bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; |
---|
| 7510 | + return rc; |
---|
| 7511 | + } |
---|
| 7512 | + |
---|
| 7513 | + return 0; |
---|
| 7514 | +} |
---|
| 7515 | + |
---|
| 7516 | +static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg) |
---|
| 7517 | +{ |
---|
| 7518 | + writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 + |
---|
| 7519 | + BNXT_GRCPF_REG_WINDOW_BASE_OUT + |
---|
| 7520 | + BNXT_FW_HEALTH_WIN_MAP_OFF); |
---|
| 7521 | +} |
---|
| 7522 | + |
---|
| 7523 | +static void bnxt_try_map_fw_health_reg(struct bnxt *bp) |
---|
| 7524 | +{ |
---|
| 7525 | + void __iomem *hs; |
---|
| 7526 | + u32 status_loc; |
---|
| 7527 | + u32 reg_type; |
---|
| 7528 | + u32 sig; |
---|
| 7529 | + |
---|
| 7530 | + __bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC); |
---|
| 7531 | + hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC); |
---|
| 7532 | + |
---|
| 7533 | + sig = readl(hs + offsetof(struct hcomm_status, sig_ver)); |
---|
| 7534 | + if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) { |
---|
| 7535 | + if (bp->fw_health) |
---|
| 7536 | + bp->fw_health->status_reliable = false; |
---|
| 7537 | + return; |
---|
| 7538 | + } |
---|
| 7539 | + |
---|
| 7540 | + if (__bnxt_alloc_fw_health(bp)) { |
---|
| 7541 | + netdev_warn(bp->dev, "no memory for firmware status checks\n"); |
---|
| 7542 | + return; |
---|
| 7543 | + } |
---|
| 7544 | + |
---|
| 7545 | + status_loc = readl(hs + offsetof(struct hcomm_status, fw_status_loc)); |
---|
| 7546 | + bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc; |
---|
| 7547 | + reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc); |
---|
| 7548 | + if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) { |
---|
| 7549 | + __bnxt_map_fw_health_reg(bp, status_loc); |
---|
| 7550 | + bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] = |
---|
| 7551 | + BNXT_FW_HEALTH_WIN_OFF(status_loc); |
---|
| 7552 | + } |
---|
| 7553 | + |
---|
| 7554 | + bp->fw_health->status_reliable = true; |
---|
| 7555 | +} |
---|
| 7556 | + |
---|
| 7557 | +static int bnxt_map_fw_health_regs(struct bnxt *bp) |
---|
| 7558 | +{ |
---|
| 7559 | + struct bnxt_fw_health *fw_health = bp->fw_health; |
---|
| 7560 | + u32 reg_base = 0xffffffff; |
---|
| 7561 | + int i; |
---|
| 7562 | + |
---|
| 7563 | + /* Only pre-map the monitoring GRC registers using window 3 */ |
---|
| 7564 | + for (i = 0; i < 4; i++) { |
---|
| 7565 | + u32 reg = fw_health->regs[i]; |
---|
| 7566 | + |
---|
| 7567 | + if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC) |
---|
| 7568 | + continue; |
---|
| 7569 | + if (reg_base == 0xffffffff) |
---|
| 7570 | + reg_base = reg & BNXT_GRC_BASE_MASK; |
---|
| 7571 | + if ((reg & BNXT_GRC_BASE_MASK) != reg_base) |
---|
| 7572 | + return -ERANGE; |
---|
| 7573 | + fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg); |
---|
| 7574 | + } |
---|
| 7575 | + if (reg_base == 0xffffffff) |
---|
| 7576 | + return 0; |
---|
| 7577 | + |
---|
| 7578 | + __bnxt_map_fw_health_reg(bp, reg_base); |
---|
| 7579 | + return 0; |
---|
| 7580 | +} |
---|
| 7581 | + |
---|
| 7582 | +static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp) |
---|
| 7583 | +{ |
---|
| 7584 | + struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr; |
---|
| 7585 | + struct bnxt_fw_health *fw_health = bp->fw_health; |
---|
| 7586 | + struct hwrm_error_recovery_qcfg_input req = {0}; |
---|
| 7587 | + int rc, i; |
---|
| 7588 | + |
---|
| 7589 | + if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) |
---|
| 7590 | + return 0; |
---|
| 7591 | + |
---|
| 7592 | + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_ERROR_RECOVERY_QCFG, -1, -1); |
---|
| 7593 | + mutex_lock(&bp->hwrm_cmd_lock); |
---|
| 7594 | + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
---|
| 7595 | + if (rc) |
---|
| 7596 | + goto err_recovery_out; |
---|
| 7597 | + fw_health->flags = le32_to_cpu(resp->flags); |
---|
| 7598 | + if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) && |
---|
| 7599 | + !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) { |
---|
| 7600 | + rc = -EINVAL; |
---|
| 7601 | + goto err_recovery_out; |
---|
| 7602 | + } |
---|
| 7603 | + fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq); |
---|
| 7604 | + fw_health->master_func_wait_dsecs = |
---|
| 7605 | + le32_to_cpu(resp->master_func_wait_period); |
---|
| 7606 | + fw_health->normal_func_wait_dsecs = |
---|
| 7607 | + le32_to_cpu(resp->normal_func_wait_period); |
---|
| 7608 | + fw_health->post_reset_wait_dsecs = |
---|
| 7609 | + le32_to_cpu(resp->master_func_wait_period_after_reset); |
---|
| 7610 | + fw_health->post_reset_max_wait_dsecs = |
---|
| 7611 | + le32_to_cpu(resp->max_bailout_time_after_reset); |
---|
| 7612 | + fw_health->regs[BNXT_FW_HEALTH_REG] = |
---|
| 7613 | + le32_to_cpu(resp->fw_health_status_reg); |
---|
| 7614 | + fw_health->regs[BNXT_FW_HEARTBEAT_REG] = |
---|
| 7615 | + le32_to_cpu(resp->fw_heartbeat_reg); |
---|
| 7616 | + fw_health->regs[BNXT_FW_RESET_CNT_REG] = |
---|
| 7617 | + le32_to_cpu(resp->fw_reset_cnt_reg); |
---|
| 7618 | + fw_health->regs[BNXT_FW_RESET_INPROG_REG] = |
---|
| 7619 | + le32_to_cpu(resp->reset_inprogress_reg); |
---|
| 7620 | + fw_health->fw_reset_inprog_reg_mask = |
---|
| 7621 | + le32_to_cpu(resp->reset_inprogress_reg_mask); |
---|
| 7622 | + fw_health->fw_reset_seq_cnt = resp->reg_array_cnt; |
---|
| 7623 | + if (fw_health->fw_reset_seq_cnt >= 16) { |
---|
| 7624 | + rc = -EINVAL; |
---|
| 7625 | + goto err_recovery_out; |
---|
| 7626 | + } |
---|
| 7627 | + for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) { |
---|
| 7628 | + fw_health->fw_reset_seq_regs[i] = |
---|
| 7629 | + le32_to_cpu(resp->reset_reg[i]); |
---|
| 7630 | + fw_health->fw_reset_seq_vals[i] = |
---|
| 7631 | + le32_to_cpu(resp->reset_reg_val[i]); |
---|
| 7632 | + fw_health->fw_reset_seq_delay_msec[i] = |
---|
| 7633 | + resp->delay_after_reset[i]; |
---|
| 7634 | + } |
---|
| 7635 | +err_recovery_out: |
---|
| 7636 | + mutex_unlock(&bp->hwrm_cmd_lock); |
---|
| 7637 | + if (!rc) |
---|
| 7638 | + rc = bnxt_map_fw_health_regs(bp); |
---|
| 7639 | + if (rc) |
---|
| 7640 | + bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; |
---|
| 7641 | + return rc; |
---|
5299 | 7642 | } |
---|
5300 | 7643 | |
---|
5301 | 7644 | static int bnxt_hwrm_func_reset(struct bnxt *bp) |
---|
.. | .. |
---|
5306 | 7649 | req.enables = 0; |
---|
5307 | 7650 | |
---|
5308 | 7651 | return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT); |
---|
| 7652 | +} |
---|
| 7653 | + |
---|
| 7654 | +static void bnxt_nvm_cfg_ver_get(struct bnxt *bp) |
---|
| 7655 | +{ |
---|
| 7656 | + struct hwrm_nvm_get_dev_info_output nvm_info; |
---|
| 7657 | + |
---|
| 7658 | + if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info)) |
---|
| 7659 | + snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d", |
---|
| 7660 | + nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min, |
---|
| 7661 | + nvm_info.nvm_cfg_ver_upd); |
---|
5309 | 7662 | } |
---|
5310 | 7663 | |
---|
5311 | 7664 | static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) |
---|
.. | .. |
---|
5335 | 7688 | no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP); |
---|
5336 | 7689 | qptr = &resp->queue_id0; |
---|
5337 | 7690 | for (i = 0, j = 0; i < bp->max_tc; i++) { |
---|
5338 | | - bp->q_info[j].queue_id = *qptr++; |
---|
| 7691 | + bp->q_info[j].queue_id = *qptr; |
---|
| 7692 | + bp->q_ids[i] = *qptr++; |
---|
5339 | 7693 | bp->q_info[j].queue_profile = *qptr++; |
---|
5340 | 7694 | bp->tc_to_qidx[j] = j; |
---|
5341 | 7695 | if (!BNXT_CNPQ(bp->q_info[j].queue_profile) || |
---|
5342 | 7696 | (no_rdma && BNXT_PF(bp))) |
---|
5343 | 7697 | j++; |
---|
5344 | 7698 | } |
---|
| 7699 | + bp->max_q = bp->max_tc; |
---|
5345 | 7700 | bp->max_tc = max_t(u8, j, 1); |
---|
5346 | 7701 | |
---|
5347 | 7702 | if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG) |
---|
.. | .. |
---|
5355 | 7710 | return rc; |
---|
5356 | 7711 | } |
---|
5357 | 7712 | |
---|
5358 | | -static int bnxt_hwrm_ver_get(struct bnxt *bp) |
---|
| 7713 | +static int __bnxt_hwrm_ver_get(struct bnxt *bp, bool silent) |
---|
5359 | 7714 | { |
---|
5360 | | - int rc; |
---|
5361 | 7715 | struct hwrm_ver_get_input req = {0}; |
---|
5362 | | - struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr; |
---|
5363 | | - u32 dev_caps_cfg; |
---|
| 7716 | + int rc; |
---|
5364 | 7717 | |
---|
5365 | | - bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN; |
---|
5366 | 7718 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1); |
---|
5367 | 7719 | req.hwrm_intf_maj = HWRM_VERSION_MAJOR; |
---|
5368 | 7720 | req.hwrm_intf_min = HWRM_VERSION_MINOR; |
---|
5369 | 7721 | req.hwrm_intf_upd = HWRM_VERSION_UPDATE; |
---|
| 7722 | + |
---|
| 7723 | + rc = bnxt_hwrm_do_send_msg(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT, |
---|
| 7724 | + silent); |
---|
| 7725 | + return rc; |
---|
| 7726 | +} |
---|
| 7727 | + |
---|
| 7728 | +static int bnxt_hwrm_ver_get(struct bnxt *bp) |
---|
| 7729 | +{ |
---|
| 7730 | + struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr; |
---|
| 7731 | + u16 fw_maj, fw_min, fw_bld, fw_rsv; |
---|
| 7732 | + u32 dev_caps_cfg, hwrm_ver; |
---|
| 7733 | + int rc, len; |
---|
| 7734 | + |
---|
| 7735 | + bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN; |
---|
5370 | 7736 | mutex_lock(&bp->hwrm_cmd_lock); |
---|
5371 | | - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
---|
| 7737 | + rc = __bnxt_hwrm_ver_get(bp, false); |
---|
5372 | 7738 | if (rc) |
---|
5373 | 7739 | goto hwrm_ver_get_exit; |
---|
5374 | 7740 | |
---|
.. | .. |
---|
5383 | 7749 | resp->hwrm_intf_upd_8b); |
---|
5384 | 7750 | netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n"); |
---|
5385 | 7751 | } |
---|
5386 | | - snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d.%d", |
---|
5387 | | - resp->hwrm_fw_maj_8b, resp->hwrm_fw_min_8b, |
---|
5388 | | - resp->hwrm_fw_bld_8b, resp->hwrm_fw_rsvd_8b); |
---|
| 7752 | + |
---|
| 7753 | + hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 | |
---|
| 7754 | + HWRM_VERSION_UPDATE; |
---|
| 7755 | + |
---|
| 7756 | + if (bp->hwrm_spec_code > hwrm_ver) |
---|
| 7757 | + snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d", |
---|
| 7758 | + HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, |
---|
| 7759 | + HWRM_VERSION_UPDATE); |
---|
| 7760 | + else |
---|
| 7761 | + snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d", |
---|
| 7762 | + resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b, |
---|
| 7763 | + resp->hwrm_intf_upd_8b); |
---|
| 7764 | + |
---|
| 7765 | + fw_maj = le16_to_cpu(resp->hwrm_fw_major); |
---|
| 7766 | + if (bp->hwrm_spec_code > 0x10803 && fw_maj) { |
---|
| 7767 | + fw_min = le16_to_cpu(resp->hwrm_fw_minor); |
---|
| 7768 | + fw_bld = le16_to_cpu(resp->hwrm_fw_build); |
---|
| 7769 | + fw_rsv = le16_to_cpu(resp->hwrm_fw_patch); |
---|
| 7770 | + len = FW_VER_STR_LEN; |
---|
| 7771 | + } else { |
---|
| 7772 | + fw_maj = resp->hwrm_fw_maj_8b; |
---|
| 7773 | + fw_min = resp->hwrm_fw_min_8b; |
---|
| 7774 | + fw_bld = resp->hwrm_fw_bld_8b; |
---|
| 7775 | + fw_rsv = resp->hwrm_fw_rsvd_8b; |
---|
| 7776 | + len = BC_HWRM_STR_LEN; |
---|
| 7777 | + } |
---|
| 7778 | + bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv); |
---|
| 7779 | + snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld, |
---|
| 7780 | + fw_rsv); |
---|
| 7781 | + |
---|
| 7782 | + if (strlen(resp->active_pkg_name)) { |
---|
| 7783 | + int fw_ver_len = strlen(bp->fw_ver_str); |
---|
| 7784 | + |
---|
| 7785 | + snprintf(bp->fw_ver_str + fw_ver_len, |
---|
| 7786 | + FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s", |
---|
| 7787 | + resp->active_pkg_name); |
---|
| 7788 | + bp->fw_cap |= BNXT_FW_CAP_PKG_VER; |
---|
| 7789 | + } |
---|
5389 | 7790 | |
---|
5390 | 7791 | bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout); |
---|
5391 | 7792 | if (!bp->hwrm_cmd_timeout) |
---|
5392 | 7793 | bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; |
---|
5393 | 7794 | |
---|
5394 | | - if (resp->hwrm_intf_maj_8b >= 1) |
---|
| 7795 | + if (resp->hwrm_intf_maj_8b >= 1) { |
---|
5395 | 7796 | bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len); |
---|
| 7797 | + bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len); |
---|
| 7798 | + } |
---|
| 7799 | + if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN) |
---|
| 7800 | + bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN; |
---|
5396 | 7801 | |
---|
5397 | 7802 | bp->chip_num = le16_to_cpu(resp->chip_num); |
---|
| 7803 | + bp->chip_rev = resp->chip_rev; |
---|
5398 | 7804 | if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev && |
---|
5399 | 7805 | !resp->chip_metal) |
---|
5400 | 7806 | bp->flags |= BNXT_FLAG_CHIP_NITRO_A0; |
---|
.. | .. |
---|
5403 | 7809 | if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) && |
---|
5404 | 7810 | (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) |
---|
5405 | 7811 | bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD; |
---|
| 7812 | + |
---|
| 7813 | + if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED) |
---|
| 7814 | + bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL; |
---|
| 7815 | + |
---|
| 7816 | + if (dev_caps_cfg & |
---|
| 7817 | + VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED) |
---|
| 7818 | + bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE; |
---|
| 7819 | + |
---|
| 7820 | + if (dev_caps_cfg & |
---|
| 7821 | + VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED) |
---|
| 7822 | + bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF; |
---|
| 7823 | + |
---|
| 7824 | + if (dev_caps_cfg & |
---|
| 7825 | + VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED) |
---|
| 7826 | + bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW; |
---|
5406 | 7827 | |
---|
5407 | 7828 | hwrm_ver_get_exit: |
---|
5408 | 7829 | mutex_unlock(&bp->hwrm_cmd_lock); |
---|
.. | .. |
---|
5430 | 7851 | return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
---|
5431 | 7852 | } |
---|
5432 | 7853 | |
---|
5433 | | -static int bnxt_hwrm_port_qstats(struct bnxt *bp) |
---|
| 7854 | +static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask) |
---|
5434 | 7855 | { |
---|
5435 | | - int rc; |
---|
| 7856 | + u64 sw_tmp; |
---|
| 7857 | + |
---|
| 7858 | + hw &= mask; |
---|
| 7859 | + sw_tmp = (*sw & ~mask) | hw; |
---|
| 7860 | + if (hw < (*sw & mask)) |
---|
| 7861 | + sw_tmp += mask + 1; |
---|
| 7862 | + WRITE_ONCE(*sw, sw_tmp); |
---|
| 7863 | +} |
---|
| 7864 | + |
---|
| 7865 | +static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks, |
---|
| 7866 | + int count, bool ignore_zero) |
---|
| 7867 | +{ |
---|
| 7868 | + int i; |
---|
| 7869 | + |
---|
| 7870 | + for (i = 0; i < count; i++) { |
---|
| 7871 | + u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i])); |
---|
| 7872 | + |
---|
| 7873 | + if (ignore_zero && !hw) |
---|
| 7874 | + continue; |
---|
| 7875 | + |
---|
| 7876 | + if (masks[i] == -1ULL) |
---|
| 7877 | + sw_stats[i] = hw; |
---|
| 7878 | + else |
---|
| 7879 | + bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]); |
---|
| 7880 | + } |
---|
| 7881 | +} |
---|
| 7882 | + |
---|
| 7883 | +static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats) |
---|
| 7884 | +{ |
---|
| 7885 | + if (!stats->hw_stats) |
---|
| 7886 | + return; |
---|
| 7887 | + |
---|
| 7888 | + __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats, |
---|
| 7889 | + stats->hw_masks, stats->len / 8, false); |
---|
| 7890 | +} |
---|
| 7891 | + |
---|
| 7892 | +static void bnxt_accumulate_all_stats(struct bnxt *bp) |
---|
| 7893 | +{ |
---|
| 7894 | + struct bnxt_stats_mem *ring0_stats; |
---|
| 7895 | + bool ignore_zero = false; |
---|
| 7896 | + int i; |
---|
| 7897 | + |
---|
| 7898 | + /* Chip bug. Counter intermittently becomes 0. */ |
---|
| 7899 | + if (bp->flags & BNXT_FLAG_CHIP_P5) |
---|
| 7900 | + ignore_zero = true; |
---|
| 7901 | + |
---|
| 7902 | + for (i = 0; i < bp->cp_nr_rings; i++) { |
---|
| 7903 | + struct bnxt_napi *bnapi = bp->bnapi[i]; |
---|
| 7904 | + struct bnxt_cp_ring_info *cpr; |
---|
| 7905 | + struct bnxt_stats_mem *stats; |
---|
| 7906 | + |
---|
| 7907 | + cpr = &bnapi->cp_ring; |
---|
| 7908 | + stats = &cpr->stats; |
---|
| 7909 | + if (!i) |
---|
| 7910 | + ring0_stats = stats; |
---|
| 7911 | + __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats, |
---|
| 7912 | + ring0_stats->hw_masks, |
---|
| 7913 | + ring0_stats->len / 8, ignore_zero); |
---|
| 7914 | + } |
---|
| 7915 | + if (bp->flags & BNXT_FLAG_PORT_STATS) { |
---|
| 7916 | + struct bnxt_stats_mem *stats = &bp->port_stats; |
---|
| 7917 | + __le64 *hw_stats = stats->hw_stats; |
---|
| 7918 | + u64 *sw_stats = stats->sw_stats; |
---|
| 7919 | + u64 *masks = stats->hw_masks; |
---|
| 7920 | + int cnt; |
---|
| 7921 | + |
---|
| 7922 | + cnt = sizeof(struct rx_port_stats) / 8; |
---|
| 7923 | + __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false); |
---|
| 7924 | + |
---|
| 7925 | + hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; |
---|
| 7926 | + sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; |
---|
| 7927 | + masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; |
---|
| 7928 | + cnt = sizeof(struct tx_port_stats) / 8; |
---|
| 7929 | + __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false); |
---|
| 7930 | + } |
---|
| 7931 | + if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { |
---|
| 7932 | + bnxt_accumulate_stats(&bp->rx_port_stats_ext); |
---|
| 7933 | + bnxt_accumulate_stats(&bp->tx_port_stats_ext); |
---|
| 7934 | + } |
---|
| 7935 | +} |
---|
| 7936 | + |
---|
| 7937 | +static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags) |
---|
| 7938 | +{ |
---|
5436 | 7939 | struct bnxt_pf_info *pf = &bp->pf; |
---|
5437 | 7940 | struct hwrm_port_qstats_input req = {0}; |
---|
5438 | 7941 | |
---|
5439 | 7942 | if (!(bp->flags & BNXT_FLAG_PORT_STATS)) |
---|
5440 | 7943 | return 0; |
---|
5441 | 7944 | |
---|
| 7945 | + if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)) |
---|
| 7946 | + return -EOPNOTSUPP; |
---|
| 7947 | + |
---|
| 7948 | + req.flags = flags; |
---|
5442 | 7949 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1); |
---|
5443 | 7950 | req.port_id = cpu_to_le16(pf->port_id); |
---|
5444 | | - req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map); |
---|
5445 | | - req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map); |
---|
5446 | | - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
---|
5447 | | - return rc; |
---|
| 7951 | + req.tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map + |
---|
| 7952 | + BNXT_TX_PORT_STATS_BYTE_OFFSET); |
---|
| 7953 | + req.rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map); |
---|
| 7954 | + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
---|
5448 | 7955 | } |
---|
5449 | 7956 | |
---|
5450 | | -static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp) |
---|
| 7957 | +static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags) |
---|
5451 | 7958 | { |
---|
| 7959 | + struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr; |
---|
| 7960 | + struct hwrm_queue_pri2cos_qcfg_input req2 = {0}; |
---|
5452 | 7961 | struct hwrm_port_qstats_ext_input req = {0}; |
---|
5453 | 7962 | struct bnxt_pf_info *pf = &bp->pf; |
---|
| 7963 | + u32 tx_stat_size; |
---|
| 7964 | + int rc; |
---|
5454 | 7965 | |
---|
5455 | 7966 | if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) |
---|
5456 | 7967 | return 0; |
---|
5457 | 7968 | |
---|
| 7969 | + if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)) |
---|
| 7970 | + return -EOPNOTSUPP; |
---|
| 7971 | + |
---|
5458 | 7972 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS_EXT, -1, -1); |
---|
| 7973 | + req.flags = flags; |
---|
5459 | 7974 | req.port_id = cpu_to_le16(pf->port_id); |
---|
5460 | 7975 | req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext)); |
---|
5461 | | - req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_ext_map); |
---|
5462 | | - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
---|
| 7976 | + req.rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map); |
---|
| 7977 | + tx_stat_size = bp->tx_port_stats_ext.hw_stats ? |
---|
| 7978 | + sizeof(struct tx_port_stats_ext) : 0; |
---|
| 7979 | + req.tx_stat_size = cpu_to_le16(tx_stat_size); |
---|
| 7980 | + req.tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map); |
---|
| 7981 | + mutex_lock(&bp->hwrm_cmd_lock); |
---|
| 7982 | + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
---|
| 7983 | + if (!rc) { |
---|
| 7984 | + bp->fw_rx_stats_ext_size = le16_to_cpu(resp->rx_stat_size) / 8; |
---|
| 7985 | + bp->fw_tx_stats_ext_size = tx_stat_size ? |
---|
| 7986 | + le16_to_cpu(resp->tx_stat_size) / 8 : 0; |
---|
| 7987 | + } else { |
---|
| 7988 | + bp->fw_rx_stats_ext_size = 0; |
---|
| 7989 | + bp->fw_tx_stats_ext_size = 0; |
---|
| 7990 | + } |
---|
| 7991 | + if (flags) |
---|
| 7992 | + goto qstats_done; |
---|
| 7993 | + |
---|
| 7994 | + if (bp->fw_tx_stats_ext_size <= |
---|
| 7995 | + offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) { |
---|
| 7996 | + mutex_unlock(&bp->hwrm_cmd_lock); |
---|
| 7997 | + bp->pri2cos_valid = 0; |
---|
| 7998 | + return rc; |
---|
| 7999 | + } |
---|
| 8000 | + |
---|
| 8001 | + bnxt_hwrm_cmd_hdr_init(bp, &req2, HWRM_QUEUE_PRI2COS_QCFG, -1, -1); |
---|
| 8002 | + req2.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN); |
---|
| 8003 | + |
---|
| 8004 | + rc = _hwrm_send_message(bp, &req2, sizeof(req2), HWRM_CMD_TIMEOUT); |
---|
| 8005 | + if (!rc) { |
---|
| 8006 | + struct hwrm_queue_pri2cos_qcfg_output *resp2; |
---|
| 8007 | + u8 *pri2cos; |
---|
| 8008 | + int i, j; |
---|
| 8009 | + |
---|
| 8010 | + resp2 = bp->hwrm_cmd_resp_addr; |
---|
| 8011 | + pri2cos = &resp2->pri0_cos_queue_id; |
---|
| 8012 | + for (i = 0; i < 8; i++) { |
---|
| 8013 | + u8 queue_id = pri2cos[i]; |
---|
| 8014 | + u8 queue_idx; |
---|
| 8015 | + |
---|
| 8016 | + /* Per port queue IDs start from 0, 10, 20, etc */ |
---|
| 8017 | + queue_idx = queue_id % 10; |
---|
| 8018 | + if (queue_idx > BNXT_MAX_QUEUE) { |
---|
| 8019 | + bp->pri2cos_valid = false; |
---|
| 8020 | + goto qstats_done; |
---|
| 8021 | + } |
---|
| 8022 | + for (j = 0; j < bp->max_q; j++) { |
---|
| 8023 | + if (bp->q_ids[j] == queue_id) |
---|
| 8024 | + bp->pri2cos_idx[i] = queue_idx; |
---|
| 8025 | + } |
---|
| 8026 | + } |
---|
| 8027 | + bp->pri2cos_valid = 1; |
---|
| 8028 | + } |
---|
| 8029 | +qstats_done: |
---|
| 8030 | + mutex_unlock(&bp->hwrm_cmd_lock); |
---|
| 8031 | + return rc; |
---|
5463 | 8032 | } |
---|
5464 | 8033 | |
---|
5465 | 8034 | static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp) |
---|
5466 | 8035 | { |
---|
5467 | | - if (bp->vxlan_port_cnt) { |
---|
| 8036 | + if (bp->vxlan_fw_dst_port_id != INVALID_HW_RING_ID) |
---|
5468 | 8037 | bnxt_hwrm_tunnel_dst_port_free( |
---|
5469 | 8038 | bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); |
---|
5470 | | - } |
---|
5471 | | - bp->vxlan_port_cnt = 0; |
---|
5472 | | - if (bp->nge_port_cnt) { |
---|
| 8039 | + if (bp->nge_fw_dst_port_id != INVALID_HW_RING_ID) |
---|
5473 | 8040 | bnxt_hwrm_tunnel_dst_port_free( |
---|
5474 | 8041 | bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); |
---|
5475 | | - } |
---|
5476 | | - bp->nge_port_cnt = 0; |
---|
5477 | 8042 | } |
---|
5478 | 8043 | |
---|
5479 | 8044 | static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa) |
---|
.. | .. |
---|
5483 | 8048 | |
---|
5484 | 8049 | if (set_tpa) |
---|
5485 | 8050 | tpa_flags = bp->flags & BNXT_FLAG_TPA; |
---|
| 8051 | + else if (BNXT_NO_FW_ACCESS(bp)) |
---|
| 8052 | + return 0; |
---|
5486 | 8053 | for (i = 0; i < bp->nr_vnics; i++) { |
---|
5487 | 8054 | rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags); |
---|
5488 | 8055 | if (rc) { |
---|
.. | .. |
---|
5502 | 8069 | bnxt_hwrm_vnic_set_rss(bp, i, false); |
---|
5503 | 8070 | } |
---|
5504 | 8071 | |
---|
5505 | | -static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path, |
---|
5506 | | - bool irq_re_init) |
---|
| 8072 | +static void bnxt_clear_vnic(struct bnxt *bp) |
---|
5507 | 8073 | { |
---|
5508 | | - if (bp->vnic_info) { |
---|
5509 | | - bnxt_hwrm_clear_vnic_filter(bp); |
---|
| 8074 | + if (!bp->vnic_info) |
---|
| 8075 | + return; |
---|
| 8076 | + |
---|
| 8077 | + bnxt_hwrm_clear_vnic_filter(bp); |
---|
| 8078 | + if (!(bp->flags & BNXT_FLAG_CHIP_P5)) { |
---|
5510 | 8079 | /* clear all RSS setting before free vnic ctx */ |
---|
5511 | 8080 | bnxt_hwrm_clear_vnic_rss(bp); |
---|
5512 | 8081 | bnxt_hwrm_vnic_ctx_free(bp); |
---|
5513 | | - /* before free the vnic, undo the vnic tpa settings */ |
---|
5514 | | - if (bp->flags & BNXT_FLAG_TPA) |
---|
5515 | | - bnxt_set_tpa(bp, false); |
---|
5516 | | - bnxt_hwrm_vnic_free(bp); |
---|
5517 | 8082 | } |
---|
| 8083 | + /* before free the vnic, undo the vnic tpa settings */ |
---|
| 8084 | + if (bp->flags & BNXT_FLAG_TPA) |
---|
| 8085 | + bnxt_set_tpa(bp, false); |
---|
| 8086 | + bnxt_hwrm_vnic_free(bp); |
---|
| 8087 | + if (bp->flags & BNXT_FLAG_CHIP_P5) |
---|
| 8088 | + bnxt_hwrm_vnic_ctx_free(bp); |
---|
| 8089 | +} |
---|
| 8090 | + |
---|
| 8091 | +static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path, |
---|
| 8092 | + bool irq_re_init) |
---|
| 8093 | +{ |
---|
| 8094 | + bnxt_clear_vnic(bp); |
---|
5518 | 8095 | bnxt_hwrm_ring_free(bp, close_path); |
---|
5519 | 8096 | bnxt_hwrm_ring_grp_free(bp); |
---|
5520 | 8097 | if (irq_re_init) { |
---|
.. | .. |
---|
5526 | 8103 | static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode) |
---|
5527 | 8104 | { |
---|
5528 | 8105 | struct hwrm_func_cfg_input req = {0}; |
---|
5529 | | - int rc; |
---|
5530 | 8106 | |
---|
5531 | 8107 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); |
---|
5532 | 8108 | req.fid = cpu_to_le16(0xffff); |
---|
.. | .. |
---|
5537 | 8113 | req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA; |
---|
5538 | 8114 | else |
---|
5539 | 8115 | return -EINVAL; |
---|
5540 | | - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
---|
5541 | | - if (rc) |
---|
5542 | | - rc = -EIO; |
---|
5543 | | - return rc; |
---|
| 8116 | + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
---|
5544 | 8117 | } |
---|
5545 | 8118 | |
---|
5546 | 8119 | static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size) |
---|
5547 | 8120 | { |
---|
5548 | 8121 | struct hwrm_func_cfg_input req = {0}; |
---|
5549 | | - int rc; |
---|
5550 | 8122 | |
---|
5551 | 8123 | if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803) |
---|
5552 | 8124 | return 0; |
---|
.. | .. |
---|
5558 | 8130 | if (size == 128) |
---|
5559 | 8131 | req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128; |
---|
5560 | 8132 | |
---|
5561 | | - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
---|
5562 | | - if (rc) |
---|
5563 | | - rc = -EIO; |
---|
5564 | | - return rc; |
---|
| 8133 | + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
---|
5565 | 8134 | } |
---|
5566 | 8135 | |
---|
5567 | | -static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) |
---|
| 8136 | +static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) |
---|
5568 | 8137 | { |
---|
5569 | 8138 | struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; |
---|
5570 | 8139 | int rc; |
---|
.. | .. |
---|
5620 | 8189 | return rc; |
---|
5621 | 8190 | } |
---|
5622 | 8191 | |
---|
| 8192 | +static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id) |
---|
| 8193 | +{ |
---|
| 8194 | + int rc, i, nr_ctxs; |
---|
| 8195 | + |
---|
| 8196 | + nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings); |
---|
| 8197 | + for (i = 0; i < nr_ctxs; i++) { |
---|
| 8198 | + rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i); |
---|
| 8199 | + if (rc) { |
---|
| 8200 | + netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n", |
---|
| 8201 | + vnic_id, i, rc); |
---|
| 8202 | + break; |
---|
| 8203 | + } |
---|
| 8204 | + bp->rsscos_nr_ctxs++; |
---|
| 8205 | + } |
---|
| 8206 | + if (i < nr_ctxs) |
---|
| 8207 | + return -ENOMEM; |
---|
| 8208 | + |
---|
| 8209 | + rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true); |
---|
| 8210 | + if (rc) { |
---|
| 8211 | + netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n", |
---|
| 8212 | + vnic_id, rc); |
---|
| 8213 | + return rc; |
---|
| 8214 | + } |
---|
| 8215 | + rc = bnxt_hwrm_vnic_cfg(bp, vnic_id); |
---|
| 8216 | + if (rc) { |
---|
| 8217 | + netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n", |
---|
| 8218 | + vnic_id, rc); |
---|
| 8219 | + return rc; |
---|
| 8220 | + } |
---|
| 8221 | + if (bp->flags & BNXT_FLAG_AGG_RINGS) { |
---|
| 8222 | + rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id); |
---|
| 8223 | + if (rc) { |
---|
| 8224 | + netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n", |
---|
| 8225 | + vnic_id, rc); |
---|
| 8226 | + } |
---|
| 8227 | + } |
---|
| 8228 | + return rc; |
---|
| 8229 | +} |
---|
| 8230 | + |
---|
| 8231 | +static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) |
---|
| 8232 | +{ |
---|
| 8233 | + if (bp->flags & BNXT_FLAG_CHIP_P5) |
---|
| 8234 | + return __bnxt_setup_vnic_p5(bp, vnic_id); |
---|
| 8235 | + else |
---|
| 8236 | + return __bnxt_setup_vnic(bp, vnic_id); |
---|
| 8237 | +} |
---|
| 8238 | + |
---|
5623 | 8239 | static int bnxt_alloc_rfs_vnics(struct bnxt *bp) |
---|
5624 | 8240 | { |
---|
5625 | 8241 | #ifdef CONFIG_RFS_ACCEL |
---|
5626 | 8242 | int i, rc = 0; |
---|
| 8243 | + |
---|
| 8244 | + if (bp->flags & BNXT_FLAG_CHIP_P5) |
---|
| 8245 | + return 0; |
---|
5627 | 8246 | |
---|
5628 | 8247 | for (i = 0; i < bp->rx_nr_rings; i++) { |
---|
5629 | 8248 | struct bnxt_vnic_info *vnic; |
---|
.. | .. |
---|
5722 | 8341 | netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc); |
---|
5723 | 8342 | goto err_out; |
---|
5724 | 8343 | } |
---|
| 8344 | + |
---|
| 8345 | + if (BNXT_VF(bp)) |
---|
| 8346 | + bnxt_hwrm_func_qcfg(bp); |
---|
5725 | 8347 | |
---|
5726 | 8348 | rc = bnxt_setup_vnic(bp, 0); |
---|
5727 | 8349 | if (rc) |
---|
.. | .. |
---|
5905 | 8527 | bp->irq_tbl[0].handler = bnxt_inta; |
---|
5906 | 8528 | } |
---|
5907 | 8529 | |
---|
| 8530 | +static int bnxt_init_int_mode(struct bnxt *bp); |
---|
| 8531 | + |
---|
5908 | 8532 | static int bnxt_setup_int_mode(struct bnxt *bp) |
---|
5909 | 8533 | { |
---|
5910 | 8534 | int rc; |
---|
| 8535 | + |
---|
| 8536 | + if (!bp->irq_tbl) { |
---|
| 8537 | + rc = bnxt_init_int_mode(bp); |
---|
| 8538 | + if (rc || !bp->irq_tbl) |
---|
| 8539 | + return rc ?: -ENODEV; |
---|
| 8540 | + } |
---|
5911 | 8541 | |
---|
5912 | 8542 | if (bp->flags & BNXT_FLAG_USING_MSIX) |
---|
5913 | 8543 | bnxt_setup_msix(bp); |
---|
.. | .. |
---|
5935 | 8565 | return bp->hw_resc.max_stat_ctxs; |
---|
5936 | 8566 | } |
---|
5937 | 8567 | |
---|
5938 | | -void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max) |
---|
5939 | | -{ |
---|
5940 | | - bp->hw_resc.max_stat_ctxs = max; |
---|
5941 | | -} |
---|
5942 | | - |
---|
5943 | 8568 | unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp) |
---|
5944 | 8569 | { |
---|
5945 | 8570 | return bp->hw_resc.max_cp_rings; |
---|
5946 | 8571 | } |
---|
5947 | 8572 | |
---|
5948 | | -unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp) |
---|
| 8573 | +static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp) |
---|
5949 | 8574 | { |
---|
5950 | | - return bp->hw_resc.max_cp_rings - bnxt_get_ulp_msix_num(bp); |
---|
| 8575 | + unsigned int cp = bp->hw_resc.max_cp_rings; |
---|
| 8576 | + |
---|
| 8577 | + if (!(bp->flags & BNXT_FLAG_CHIP_P5)) |
---|
| 8578 | + cp -= bnxt_get_ulp_msix_num(bp); |
---|
| 8579 | + |
---|
| 8580 | + return cp; |
---|
5951 | 8581 | } |
---|
5952 | 8582 | |
---|
5953 | 8583 | static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp) |
---|
5954 | 8584 | { |
---|
5955 | 8585 | struct bnxt_hw_resc *hw_resc = &bp->hw_resc; |
---|
| 8586 | + |
---|
| 8587 | + if (bp->flags & BNXT_FLAG_CHIP_P5) |
---|
| 8588 | + return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs); |
---|
5956 | 8589 | |
---|
5957 | 8590 | return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings); |
---|
5958 | 8591 | } |
---|
.. | .. |
---|
5962 | 8595 | bp->hw_resc.max_irqs = max_irqs; |
---|
5963 | 8596 | } |
---|
5964 | 8597 | |
---|
| 8598 | +unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp) |
---|
| 8599 | +{ |
---|
| 8600 | + unsigned int cp; |
---|
| 8601 | + |
---|
| 8602 | + cp = bnxt_get_max_func_cp_rings_for_en(bp); |
---|
| 8603 | + if (bp->flags & BNXT_FLAG_CHIP_P5) |
---|
| 8604 | + return cp - bp->rx_nr_rings - bp->tx_nr_rings; |
---|
| 8605 | + else |
---|
| 8606 | + return cp - bp->cp_nr_rings; |
---|
| 8607 | +} |
---|
| 8608 | + |
---|
| 8609 | +unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp) |
---|
| 8610 | +{ |
---|
| 8611 | + return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp); |
---|
| 8612 | +} |
---|
| 8613 | + |
---|
5965 | 8614 | int bnxt_get_avail_msix(struct bnxt *bp, int num) |
---|
5966 | 8615 | { |
---|
5967 | 8616 | int max_cp = bnxt_get_max_func_cp_rings(bp); |
---|
.. | .. |
---|
5969 | 8618 | int total_req = bp->cp_nr_rings + num; |
---|
5970 | 8619 | int max_idx, avail_msix; |
---|
5971 | 8620 | |
---|
5972 | | - max_idx = min_t(int, bp->total_irqs, max_cp); |
---|
| 8621 | + max_idx = bp->total_irqs; |
---|
| 8622 | + if (!(bp->flags & BNXT_FLAG_CHIP_P5)) |
---|
| 8623 | + max_idx = min_t(int, bp->total_irqs, max_cp); |
---|
5973 | 8624 | avail_msix = max_idx - bp->cp_nr_rings; |
---|
5974 | 8625 | if (!BNXT_NEW_RM(bp) || avail_msix >= num) |
---|
5975 | 8626 | return avail_msix; |
---|
.. | .. |
---|
5987 | 8638 | if (!BNXT_NEW_RM(bp)) |
---|
5988 | 8639 | return bnxt_get_max_func_irqs(bp); |
---|
5989 | 8640 | |
---|
5990 | | - return bnxt_cp_rings_in_use(bp); |
---|
| 8641 | + return bnxt_nq_rings_in_use(bp); |
---|
5991 | 8642 | } |
---|
5992 | 8643 | |
---|
5993 | 8644 | static int bnxt_init_msix(struct bnxt *bp) |
---|
.. | .. |
---|
6072 | 8723 | |
---|
6073 | 8724 | static int bnxt_init_int_mode(struct bnxt *bp) |
---|
6074 | 8725 | { |
---|
6075 | | - int rc = 0; |
---|
| 8726 | + int rc = -ENODEV; |
---|
6076 | 8727 | |
---|
6077 | 8728 | if (bp->flags & BNXT_FLAG_MSIX_CAP) |
---|
6078 | 8729 | rc = bnxt_init_msix(bp); |
---|
.. | .. |
---|
6094 | 8745 | bp->flags &= ~BNXT_FLAG_USING_MSIX; |
---|
6095 | 8746 | } |
---|
6096 | 8747 | |
---|
6097 | | -int bnxt_reserve_rings(struct bnxt *bp) |
---|
| 8748 | +int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init) |
---|
6098 | 8749 | { |
---|
6099 | 8750 | int tcs = netdev_get_num_tc(bp->dev); |
---|
6100 | | - bool reinit_irq = false; |
---|
| 8751 | + bool irq_cleared = false; |
---|
6101 | 8752 | int rc; |
---|
6102 | 8753 | |
---|
6103 | 8754 | if (!bnxt_need_reserve_rings(bp)) |
---|
6104 | 8755 | return 0; |
---|
6105 | 8756 | |
---|
6106 | | - if (BNXT_NEW_RM(bp) && (bnxt_get_num_msix(bp) != bp->total_irqs)) { |
---|
| 8757 | + if (irq_re_init && BNXT_NEW_RM(bp) && |
---|
| 8758 | + bnxt_get_num_msix(bp) != bp->total_irqs) { |
---|
6107 | 8759 | bnxt_ulp_irq_stop(bp); |
---|
6108 | 8760 | bnxt_clear_int_mode(bp); |
---|
6109 | | - reinit_irq = true; |
---|
| 8761 | + irq_cleared = true; |
---|
6110 | 8762 | } |
---|
6111 | 8763 | rc = __bnxt_reserve_rings(bp); |
---|
6112 | | - if (reinit_irq) { |
---|
| 8764 | + if (irq_cleared) { |
---|
6113 | 8765 | if (!rc) |
---|
6114 | 8766 | rc = bnxt_init_int_mode(bp); |
---|
6115 | 8767 | bnxt_ulp_irq_restart(bp, rc); |
---|
.. | .. |
---|
6118 | 8770 | netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc); |
---|
6119 | 8771 | return rc; |
---|
6120 | 8772 | } |
---|
6121 | | - if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) { |
---|
| 8773 | + if (tcs && (bp->tx_nr_rings_per_tc * tcs != |
---|
| 8774 | + bp->tx_nr_rings - bp->tx_nr_rings_xdp)) { |
---|
6122 | 8775 | netdev_err(bp->dev, "tx ring reservation failure\n"); |
---|
6123 | 8776 | netdev_reset_tc(bp->dev); |
---|
6124 | | - bp->tx_nr_rings_per_tc = bp->tx_nr_rings; |
---|
| 8777 | + if (bp->tx_nr_rings_xdp) |
---|
| 8778 | + bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp; |
---|
| 8779 | + else |
---|
| 8780 | + bp->tx_nr_rings_per_tc = bp->tx_nr_rings; |
---|
6125 | 8781 | return -ENOMEM; |
---|
6126 | 8782 | } |
---|
6127 | | - bp->num_stat_ctxs = bp->cp_nr_rings; |
---|
6128 | 8783 | return 0; |
---|
6129 | 8784 | } |
---|
6130 | 8785 | |
---|
.. | .. |
---|
6225 | 8880 | for (i = 0; i < bp->cp_nr_rings; i++) { |
---|
6226 | 8881 | struct bnxt_napi *bnapi = bp->bnapi[i]; |
---|
6227 | 8882 | |
---|
6228 | | - napi_hash_del(&bnapi->napi); |
---|
6229 | | - netif_napi_del(&bnapi->napi); |
---|
| 8883 | + __netif_napi_del(&bnapi->napi); |
---|
6230 | 8884 | } |
---|
6231 | | - /* We called napi_hash_del() before netif_napi_del(), we need |
---|
| 8885 | + /* We called __netif_napi_del(), we need |
---|
6232 | 8886 | * to respect an RCU grace period before freeing napi structures. |
---|
6233 | 8887 | */ |
---|
6234 | 8888 | synchronize_net(); |
---|
.. | .. |
---|
6241 | 8895 | struct bnxt_napi *bnapi; |
---|
6242 | 8896 | |
---|
6243 | 8897 | if (bp->flags & BNXT_FLAG_USING_MSIX) { |
---|
6244 | | - if (BNXT_CHIP_TYPE_NITRO_A0(bp)) |
---|
| 8898 | + int (*poll_fn)(struct napi_struct *, int) = bnxt_poll; |
---|
| 8899 | + |
---|
| 8900 | + if (bp->flags & BNXT_FLAG_CHIP_P5) |
---|
| 8901 | + poll_fn = bnxt_poll_p5; |
---|
| 8902 | + else if (BNXT_CHIP_TYPE_NITRO_A0(bp)) |
---|
6245 | 8903 | cp_nr_rings--; |
---|
6246 | 8904 | for (i = 0; i < cp_nr_rings; i++) { |
---|
6247 | 8905 | bnapi = bp->bnapi[i]; |
---|
6248 | | - netif_napi_add(bp->dev, &bnapi->napi, |
---|
6249 | | - bnxt_poll, 64); |
---|
| 8906 | + netif_napi_add(bp->dev, &bnapi->napi, poll_fn, 64); |
---|
6250 | 8907 | } |
---|
6251 | 8908 | if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { |
---|
6252 | 8909 | bnapi = bp->bnapi[cp_nr_rings]; |
---|
.. | .. |
---|
6280 | 8937 | int i; |
---|
6281 | 8938 | |
---|
6282 | 8939 | for (i = 0; i < bp->cp_nr_rings; i++) { |
---|
6283 | | - struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring; |
---|
6284 | | - bp->bnapi[i]->in_reset = false; |
---|
| 8940 | + struct bnxt_napi *bnapi = bp->bnapi[i]; |
---|
| 8941 | + struct bnxt_cp_ring_info *cpr; |
---|
6285 | 8942 | |
---|
6286 | | - if (bp->bnapi[i]->rx_ring) { |
---|
| 8943 | + cpr = &bnapi->cp_ring; |
---|
| 8944 | + if (bnapi->in_reset) |
---|
| 8945 | + cpr->sw_stats.rx.rx_resets++; |
---|
| 8946 | + bnapi->in_reset = false; |
---|
| 8947 | + |
---|
| 8948 | + if (bnapi->rx_ring) { |
---|
6287 | 8949 | INIT_WORK(&cpr->dim.work, bnxt_dim_work); |
---|
6288 | | - cpr->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE; |
---|
| 8950 | + cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; |
---|
6289 | 8951 | } |
---|
6290 | | - napi_enable(&bp->bnapi[i]->napi); |
---|
| 8952 | + napi_enable(&bnapi->napi); |
---|
6291 | 8953 | } |
---|
6292 | 8954 | } |
---|
6293 | 8955 | |
---|
.. | .. |
---|
6326 | 8988 | netif_carrier_on(bp->dev); |
---|
6327 | 8989 | } |
---|
6328 | 8990 | |
---|
| 8991 | +static char *bnxt_report_fec(struct bnxt_link_info *link_info) |
---|
| 8992 | +{ |
---|
| 8993 | + u8 active_fec = link_info->active_fec_sig_mode & |
---|
| 8994 | + PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK; |
---|
| 8995 | + |
---|
| 8996 | + switch (active_fec) { |
---|
| 8997 | + default: |
---|
| 8998 | + case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE: |
---|
| 8999 | + return "None"; |
---|
| 9000 | + case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE: |
---|
| 9001 | + return "Clause 74 BaseR"; |
---|
| 9002 | + case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE: |
---|
| 9003 | + return "Clause 91 RS(528,514)"; |
---|
| 9004 | + case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE: |
---|
| 9005 | + return "Clause 91 RS544_1XN"; |
---|
| 9006 | + case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE: |
---|
| 9007 | + return "Clause 91 RS(544,514)"; |
---|
| 9008 | + case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE: |
---|
| 9009 | + return "Clause 91 RS272_1XN"; |
---|
| 9010 | + case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE: |
---|
| 9011 | + return "Clause 91 RS(272,257)"; |
---|
| 9012 | + } |
---|
| 9013 | +} |
---|
| 9014 | + |
---|
6329 | 9015 | static void bnxt_report_link(struct bnxt *bp) |
---|
6330 | 9016 | { |
---|
6331 | 9017 | if (bp->link_info.link_up) { |
---|
.. | .. |
---|
6360 | 9046 | "not active"); |
---|
6361 | 9047 | fec = bp->link_info.fec_cfg; |
---|
6362 | 9048 | if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED)) |
---|
6363 | | - netdev_info(bp->dev, "FEC autoneg %s encodings: %s\n", |
---|
| 9049 | + netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n", |
---|
6364 | 9050 | (fec & BNXT_FEC_AUTONEG) ? "on" : "off", |
---|
6365 | | - (fec & BNXT_FEC_ENC_BASE_R) ? "BaseR" : |
---|
6366 | | - (fec & BNXT_FEC_ENC_RS) ? "RS" : "None"); |
---|
| 9051 | + bnxt_report_fec(&bp->link_info)); |
---|
6367 | 9052 | } else { |
---|
6368 | 9053 | netif_carrier_off(bp->dev); |
---|
6369 | 9054 | netdev_err(bp->dev, "NIC Link is Down\n"); |
---|
6370 | 9055 | } |
---|
| 9056 | +} |
---|
| 9057 | + |
---|
| 9058 | +static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp) |
---|
| 9059 | +{ |
---|
| 9060 | + if (!resp->supported_speeds_auto_mode && |
---|
| 9061 | + !resp->supported_speeds_force_mode && |
---|
| 9062 | + !resp->supported_pam4_speeds_auto_mode && |
---|
| 9063 | + !resp->supported_pam4_speeds_force_mode) |
---|
| 9064 | + return true; |
---|
| 9065 | + return false; |
---|
6371 | 9066 | } |
---|
6372 | 9067 | |
---|
6373 | 9068 | static int bnxt_hwrm_phy_qcaps(struct bnxt *bp) |
---|
.. | .. |
---|
6377 | 9072 | struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr; |
---|
6378 | 9073 | struct bnxt_link_info *link_info = &bp->link_info; |
---|
6379 | 9074 | |
---|
| 9075 | + bp->flags &= ~BNXT_FLAG_EEE_CAP; |
---|
| 9076 | + if (bp->test_info) |
---|
| 9077 | + bp->test_info->flags &= ~(BNXT_TEST_FL_EXT_LPBK | |
---|
| 9078 | + BNXT_TEST_FL_AN_PHY_LPBK); |
---|
6380 | 9079 | if (bp->hwrm_spec_code < 0x10201) |
---|
6381 | 9080 | return 0; |
---|
6382 | 9081 | |
---|
.. | .. |
---|
6402 | 9101 | if (bp->test_info) |
---|
6403 | 9102 | bp->test_info->flags |= BNXT_TEST_FL_EXT_LPBK; |
---|
6404 | 9103 | } |
---|
| 9104 | + if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED) { |
---|
| 9105 | + if (bp->test_info) |
---|
| 9106 | + bp->test_info->flags |= BNXT_TEST_FL_AN_PHY_LPBK; |
---|
| 9107 | + } |
---|
| 9108 | + if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED) { |
---|
| 9109 | + if (BNXT_PF(bp)) |
---|
| 9110 | + bp->fw_cap |= BNXT_FW_CAP_SHARED_PORT_CFG; |
---|
| 9111 | + } |
---|
| 9112 | + if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_CUMULATIVE_COUNTERS_ON_RESET) |
---|
| 9113 | + bp->fw_cap |= BNXT_FW_CAP_PORT_STATS_NO_RESET; |
---|
| 9114 | + |
---|
| 9115 | + if (bp->hwrm_spec_code >= 0x10a01) { |
---|
| 9116 | + if (bnxt_phy_qcaps_no_speed(resp)) { |
---|
| 9117 | + link_info->phy_state = BNXT_PHY_STATE_DISABLED; |
---|
| 9118 | + netdev_warn(bp->dev, "Ethernet link disabled\n"); |
---|
| 9119 | + } else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) { |
---|
| 9120 | + link_info->phy_state = BNXT_PHY_STATE_ENABLED; |
---|
| 9121 | + netdev_info(bp->dev, "Ethernet link enabled\n"); |
---|
| 9122 | + /* Phy re-enabled, reprobe the speeds */ |
---|
| 9123 | + link_info->support_auto_speeds = 0; |
---|
| 9124 | + link_info->support_pam4_auto_speeds = 0; |
---|
| 9125 | + } |
---|
| 9126 | + } |
---|
6405 | 9127 | if (resp->supported_speeds_auto_mode) |
---|
6406 | 9128 | link_info->support_auto_speeds = |
---|
6407 | 9129 | le16_to_cpu(resp->supported_speeds_auto_mode); |
---|
| 9130 | + if (resp->supported_pam4_speeds_auto_mode) |
---|
| 9131 | + link_info->support_pam4_auto_speeds = |
---|
| 9132 | + le16_to_cpu(resp->supported_pam4_speeds_auto_mode); |
---|
6408 | 9133 | |
---|
6409 | 9134 | bp->port_count = resp->port_cnt; |
---|
6410 | 9135 | |
---|
.. | .. |
---|
6413 | 9138 | return rc; |
---|
6414 | 9139 | } |
---|
6415 | 9140 | |
---|
6416 | | -static int bnxt_update_link(struct bnxt *bp, bool chng_link_state) |
---|
| 9141 | +static bool bnxt_support_dropped(u16 advertising, u16 supported) |
---|
| 9142 | +{ |
---|
| 9143 | + u16 diff = advertising ^ supported; |
---|
| 9144 | + |
---|
| 9145 | + return ((supported | diff) != supported); |
---|
| 9146 | +} |
---|
| 9147 | + |
---|
| 9148 | +int bnxt_update_link(struct bnxt *bp, bool chng_link_state) |
---|
6417 | 9149 | { |
---|
6418 | 9150 | int rc = 0; |
---|
6419 | 9151 | struct bnxt_link_info *link_info = &bp->link_info; |
---|
6420 | 9152 | struct hwrm_port_phy_qcfg_input req = {0}; |
---|
6421 | 9153 | struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr; |
---|
6422 | 9154 | u8 link_up = link_info->link_up; |
---|
6423 | | - u16 diff; |
---|
| 9155 | + bool support_changed = false; |
---|
6424 | 9156 | |
---|
6425 | 9157 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1); |
---|
6426 | 9158 | |
---|
.. | .. |
---|
6447 | 9179 | else |
---|
6448 | 9180 | link_info->link_speed = 0; |
---|
6449 | 9181 | link_info->force_link_speed = le16_to_cpu(resp->force_link_speed); |
---|
| 9182 | + link_info->force_pam4_link_speed = |
---|
| 9183 | + le16_to_cpu(resp->force_pam4_link_speed); |
---|
6450 | 9184 | link_info->support_speeds = le16_to_cpu(resp->support_speeds); |
---|
| 9185 | + link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds); |
---|
6451 | 9186 | link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask); |
---|
| 9187 | + link_info->auto_pam4_link_speeds = |
---|
| 9188 | + le16_to_cpu(resp->auto_pam4_link_speed_mask); |
---|
6452 | 9189 | link_info->lp_auto_link_speeds = |
---|
6453 | 9190 | le16_to_cpu(resp->link_partner_adv_speeds); |
---|
| 9191 | + link_info->lp_auto_pam4_link_speeds = |
---|
| 9192 | + resp->link_partner_pam4_adv_speeds; |
---|
6454 | 9193 | link_info->preemphasis = le32_to_cpu(resp->preemphasis); |
---|
6455 | 9194 | link_info->phy_ver[0] = resp->phy_maj; |
---|
6456 | 9195 | link_info->phy_ver[1] = resp->phy_min; |
---|
.. | .. |
---|
6499 | 9238 | } |
---|
6500 | 9239 | |
---|
6501 | 9240 | link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED; |
---|
6502 | | - if (bp->hwrm_spec_code >= 0x10504) |
---|
| 9241 | + if (bp->hwrm_spec_code >= 0x10504) { |
---|
6503 | 9242 | link_info->fec_cfg = le16_to_cpu(resp->fec_cfg); |
---|
6504 | | - |
---|
| 9243 | + link_info->active_fec_sig_mode = resp->active_fec_signal_mode; |
---|
| 9244 | + } |
---|
6505 | 9245 | /* TODO: need to add more logic to report VF link */ |
---|
6506 | 9246 | if (chng_link_state) { |
---|
6507 | 9247 | if (link_info->phy_link_status == BNXT_LINK_LINK) |
---|
.. | .. |
---|
6516 | 9256 | } |
---|
6517 | 9257 | mutex_unlock(&bp->hwrm_cmd_lock); |
---|
6518 | 9258 | |
---|
6519 | | - if (!BNXT_SINGLE_PF(bp)) |
---|
| 9259 | + if (!BNXT_PHY_CFG_ABLE(bp)) |
---|
6520 | 9260 | return 0; |
---|
6521 | 9261 | |
---|
6522 | | - diff = link_info->support_auto_speeds ^ link_info->advertising; |
---|
6523 | | - if ((link_info->support_auto_speeds | diff) != |
---|
6524 | | - link_info->support_auto_speeds) { |
---|
6525 | | - /* An advertised speed is no longer supported, so we need to |
---|
6526 | | - * update the advertisement settings. Caller holds RTNL |
---|
6527 | | - * so we can modify link settings. |
---|
6528 | | - */ |
---|
| 9262 | + /* Check if any advertised speeds are no longer supported. The caller |
---|
| 9263 | + * holds the link_lock mutex, so we can modify link_info settings. |
---|
| 9264 | + */ |
---|
| 9265 | + if (bnxt_support_dropped(link_info->advertising, |
---|
| 9266 | + link_info->support_auto_speeds)) { |
---|
6529 | 9267 | link_info->advertising = link_info->support_auto_speeds; |
---|
6530 | | - if (link_info->autoneg & BNXT_AUTONEG_SPEED) |
---|
6531 | | - bnxt_hwrm_set_link_setting(bp, true, false); |
---|
| 9268 | + support_changed = true; |
---|
6532 | 9269 | } |
---|
| 9270 | + if (bnxt_support_dropped(link_info->advertising_pam4, |
---|
| 9271 | + link_info->support_pam4_auto_speeds)) { |
---|
| 9272 | + link_info->advertising_pam4 = link_info->support_pam4_auto_speeds; |
---|
| 9273 | + support_changed = true; |
---|
| 9274 | + } |
---|
| 9275 | + if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED)) |
---|
| 9276 | + bnxt_hwrm_set_link_setting(bp, true, false); |
---|
6533 | 9277 | return 0; |
---|
6534 | 9278 | } |
---|
6535 | 9279 | |
---|
.. | .. |
---|
6588 | 9332 | } |
---|
6589 | 9333 | } |
---|
6590 | 9334 | |
---|
6591 | | -static void bnxt_hwrm_set_link_common(struct bnxt *bp, |
---|
6592 | | - struct hwrm_port_phy_cfg_input *req) |
---|
| 9335 | +static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req) |
---|
6593 | 9336 | { |
---|
6594 | | - u8 autoneg = bp->link_info.autoneg; |
---|
6595 | | - u16 fw_link_speed = bp->link_info.req_link_speed; |
---|
6596 | | - u16 advertising = bp->link_info.advertising; |
---|
6597 | | - |
---|
6598 | | - if (autoneg & BNXT_AUTONEG_SPEED) { |
---|
6599 | | - req->auto_mode |= |
---|
6600 | | - PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK; |
---|
6601 | | - |
---|
6602 | | - req->enables |= cpu_to_le32( |
---|
6603 | | - PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK); |
---|
6604 | | - req->auto_link_speed_mask = cpu_to_le16(advertising); |
---|
6605 | | - |
---|
| 9337 | + if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) { |
---|
| 9338 | + req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK; |
---|
| 9339 | + if (bp->link_info.advertising) { |
---|
| 9340 | + req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK); |
---|
| 9341 | + req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising); |
---|
| 9342 | + } |
---|
| 9343 | + if (bp->link_info.advertising_pam4) { |
---|
| 9344 | + req->enables |= |
---|
| 9345 | + cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK); |
---|
| 9346 | + req->auto_link_pam4_speed_mask = |
---|
| 9347 | + cpu_to_le16(bp->link_info.advertising_pam4); |
---|
| 9348 | + } |
---|
6606 | 9349 | req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE); |
---|
6607 | | - req->flags |= |
---|
6608 | | - cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG); |
---|
| 9350 | + req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG); |
---|
6609 | 9351 | } else { |
---|
6610 | | - req->force_link_speed = cpu_to_le16(fw_link_speed); |
---|
6611 | 9352 | req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE); |
---|
| 9353 | + if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) { |
---|
| 9354 | + req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed); |
---|
| 9355 | + req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED); |
---|
| 9356 | + } else { |
---|
| 9357 | + req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed); |
---|
| 9358 | + } |
---|
6612 | 9359 | } |
---|
6613 | 9360 | |
---|
6614 | 9361 | /* tell chimp that the setting takes effect immediately */ |
---|
.. | .. |
---|
6698 | 9445 | return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
---|
6699 | 9446 | } |
---|
6700 | 9447 | |
---|
| 9448 | +static int bnxt_fw_init_one(struct bnxt *bp); |
---|
| 9449 | + |
---|
6701 | 9450 | static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) |
---|
6702 | 9451 | { |
---|
6703 | 9452 | struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr; |
---|
6704 | 9453 | struct hwrm_func_drv_if_change_input req = {0}; |
---|
| 9454 | + bool fw_reset = !bp->irq_tbl; |
---|
6705 | 9455 | bool resc_reinit = false; |
---|
| 9456 | + u32 flags = 0; |
---|
6706 | 9457 | int rc; |
---|
6707 | 9458 | |
---|
6708 | 9459 | if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE)) |
---|
.. | .. |
---|
6713 | 9464 | req.flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP); |
---|
6714 | 9465 | mutex_lock(&bp->hwrm_cmd_lock); |
---|
6715 | 9466 | rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
---|
6716 | | - if (!rc && (resp->flags & |
---|
6717 | | - cpu_to_le32(FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE))) |
---|
6718 | | - resc_reinit = true; |
---|
| 9467 | + if (!rc) |
---|
| 9468 | + flags = le32_to_cpu(resp->flags); |
---|
6719 | 9469 | mutex_unlock(&bp->hwrm_cmd_lock); |
---|
| 9470 | + if (rc) |
---|
| 9471 | + return rc; |
---|
6720 | 9472 | |
---|
6721 | | - if (up && resc_reinit && BNXT_NEW_RM(bp)) { |
---|
6722 | | - struct bnxt_hw_resc *hw_resc = &bp->hw_resc; |
---|
| 9473 | + if (!up) |
---|
| 9474 | + return 0; |
---|
6723 | 9475 | |
---|
6724 | | - rc = bnxt_hwrm_func_resc_qcaps(bp, true); |
---|
6725 | | - hw_resc->resv_cp_rings = 0; |
---|
6726 | | - hw_resc->resv_tx_rings = 0; |
---|
6727 | | - hw_resc->resv_rx_rings = 0; |
---|
6728 | | - hw_resc->resv_hw_ring_grps = 0; |
---|
6729 | | - hw_resc->resv_vnics = 0; |
---|
6730 | | - bp->tx_nr_rings = 0; |
---|
6731 | | - bp->rx_nr_rings = 0; |
---|
| 9476 | + if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE) |
---|
| 9477 | + resc_reinit = true; |
---|
| 9478 | + if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE) |
---|
| 9479 | + fw_reset = true; |
---|
| 9480 | + |
---|
| 9481 | + if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) { |
---|
| 9482 | + netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n"); |
---|
| 9483 | + set_bit(BNXT_STATE_ABORT_ERR, &bp->state); |
---|
| 9484 | + return -ENODEV; |
---|
6732 | 9485 | } |
---|
6733 | | - return rc; |
---|
| 9486 | + if (resc_reinit || fw_reset) { |
---|
| 9487 | + if (fw_reset) { |
---|
| 9488 | + if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) |
---|
| 9489 | + bnxt_ulp_stop(bp); |
---|
| 9490 | + bnxt_free_ctx_mem(bp); |
---|
| 9491 | + kfree(bp->ctx); |
---|
| 9492 | + bp->ctx = NULL; |
---|
| 9493 | + bnxt_dcb_free(bp); |
---|
| 9494 | + rc = bnxt_fw_init_one(bp); |
---|
| 9495 | + if (rc) { |
---|
| 9496 | + set_bit(BNXT_STATE_ABORT_ERR, &bp->state); |
---|
| 9497 | + return rc; |
---|
| 9498 | + } |
---|
| 9499 | + bnxt_clear_int_mode(bp); |
---|
| 9500 | + rc = bnxt_init_int_mode(bp); |
---|
| 9501 | + if (rc) { |
---|
| 9502 | + netdev_err(bp->dev, "init int mode failed\n"); |
---|
| 9503 | + return rc; |
---|
| 9504 | + } |
---|
| 9505 | + set_bit(BNXT_STATE_FW_RESET_DET, &bp->state); |
---|
| 9506 | + } |
---|
| 9507 | + if (BNXT_NEW_RM(bp)) { |
---|
| 9508 | + struct bnxt_hw_resc *hw_resc = &bp->hw_resc; |
---|
| 9509 | + |
---|
| 9510 | + rc = bnxt_hwrm_func_resc_qcaps(bp, true); |
---|
| 9511 | + hw_resc->resv_cp_rings = 0; |
---|
| 9512 | + hw_resc->resv_stat_ctxs = 0; |
---|
| 9513 | + hw_resc->resv_irqs = 0; |
---|
| 9514 | + hw_resc->resv_tx_rings = 0; |
---|
| 9515 | + hw_resc->resv_rx_rings = 0; |
---|
| 9516 | + hw_resc->resv_hw_ring_grps = 0; |
---|
| 9517 | + hw_resc->resv_vnics = 0; |
---|
| 9518 | + if (!fw_reset) { |
---|
| 9519 | + bp->tx_nr_rings = 0; |
---|
| 9520 | + bp->rx_nr_rings = 0; |
---|
| 9521 | + } |
---|
| 9522 | + } |
---|
| 9523 | + } |
---|
| 9524 | + return 0; |
---|
6734 | 9525 | } |
---|
6735 | 9526 | |
---|
6736 | 9527 | static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp) |
---|
.. | .. |
---|
6740 | 9531 | struct bnxt_pf_info *pf = &bp->pf; |
---|
6741 | 9532 | int rc; |
---|
6742 | 9533 | |
---|
| 9534 | + bp->num_leds = 0; |
---|
6743 | 9535 | if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601) |
---|
6744 | 9536 | return 0; |
---|
6745 | 9537 | |
---|
.. | .. |
---|
6794 | 9586 | int bnxt_hwrm_free_wol_fltr(struct bnxt *bp) |
---|
6795 | 9587 | { |
---|
6796 | 9588 | struct hwrm_wol_filter_free_input req = {0}; |
---|
6797 | | - int rc; |
---|
6798 | 9589 | |
---|
6799 | 9590 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1); |
---|
6800 | 9591 | req.port_id = cpu_to_le16(bp->pf.port_id); |
---|
6801 | 9592 | req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID); |
---|
6802 | 9593 | req.wol_filter_id = bp->wol_filter_id; |
---|
6803 | | - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
---|
6804 | | - return rc; |
---|
| 9594 | + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
---|
6805 | 9595 | } |
---|
6806 | 9596 | |
---|
6807 | 9597 | static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle) |
---|
.. | .. |
---|
6834 | 9624 | { |
---|
6835 | 9625 | u16 handle = 0; |
---|
6836 | 9626 | |
---|
| 9627 | + bp->wol = 0; |
---|
6837 | 9628 | if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP)) |
---|
6838 | 9629 | return; |
---|
6839 | 9630 | |
---|
.. | .. |
---|
6891 | 9682 | bnxt_hwmon_close(bp); |
---|
6892 | 9683 | return; |
---|
6893 | 9684 | } |
---|
| 9685 | + |
---|
| 9686 | + if (bp->hwmon_dev) |
---|
| 9687 | + return; |
---|
6894 | 9688 | |
---|
6895 | 9689 | bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, |
---|
6896 | 9690 | DRV_MODULE_NAME, bp, |
---|
.. | .. |
---|
6961 | 9755 | if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { |
---|
6962 | 9756 | if (BNXT_AUTO_MODE(link_info->auto_mode)) |
---|
6963 | 9757 | update_link = true; |
---|
6964 | | - if (link_info->req_link_speed != link_info->force_link_speed) |
---|
| 9758 | + if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ && |
---|
| 9759 | + link_info->req_link_speed != link_info->force_link_speed) |
---|
| 9760 | + update_link = true; |
---|
| 9761 | + else if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 && |
---|
| 9762 | + link_info->req_link_speed != link_info->force_pam4_link_speed) |
---|
6965 | 9763 | update_link = true; |
---|
6966 | 9764 | if (link_info->req_duplex != link_info->duplex_setting) |
---|
6967 | 9765 | update_link = true; |
---|
6968 | 9766 | } else { |
---|
6969 | 9767 | if (link_info->auto_mode == BNXT_LINK_AUTO_NONE) |
---|
6970 | 9768 | update_link = true; |
---|
6971 | | - if (link_info->advertising != link_info->auto_link_speeds) |
---|
| 9769 | + if (link_info->advertising != link_info->auto_link_speeds || |
---|
| 9770 | + link_info->advertising_pam4 != link_info->auto_pam4_link_speeds) |
---|
6972 | 9771 | update_link = true; |
---|
6973 | 9772 | } |
---|
6974 | 9773 | |
---|
6975 | 9774 | /* The last close may have shutdown the link, so need to call |
---|
6976 | 9775 | * PHY_CFG to bring it back up. |
---|
6977 | 9776 | */ |
---|
6978 | | - if (!netif_carrier_ok(bp->dev)) |
---|
| 9777 | + if (!bp->link_info.link_up) |
---|
6979 | 9778 | update_link = true; |
---|
6980 | 9779 | |
---|
6981 | 9780 | if (!bnxt_eee_config_ok(bp)) |
---|
.. | .. |
---|
7023 | 9822 | netdev_err(bp->dev, "Failed to reserve default rings at open\n"); |
---|
7024 | 9823 | return rc; |
---|
7025 | 9824 | } |
---|
7026 | | - rc = bnxt_reserve_rings(bp); |
---|
7027 | | - if (rc) |
---|
7028 | | - return rc; |
---|
7029 | 9825 | } |
---|
| 9826 | + rc = bnxt_reserve_rings(bp, irq_re_init); |
---|
| 9827 | + if (rc) |
---|
| 9828 | + return rc; |
---|
7030 | 9829 | if ((bp->flags & BNXT_FLAG_RFS) && |
---|
7031 | 9830 | !(bp->flags & BNXT_FLAG_USING_MSIX)) { |
---|
7032 | 9831 | /* disable RFS if falling back to INTA */ |
---|
.. | .. |
---|
7073 | 9872 | } |
---|
7074 | 9873 | |
---|
7075 | 9874 | if (irq_re_init) |
---|
7076 | | - udp_tunnel_get_rx_info(bp->dev); |
---|
| 9875 | + udp_tunnel_nic_reset_ntf(bp->dev); |
---|
7077 | 9876 | |
---|
7078 | 9877 | set_bit(BNXT_STATE_OPEN, &bp->state); |
---|
7079 | 9878 | bnxt_enable_int(bp); |
---|
.. | .. |
---|
7103 | 9902 | { |
---|
7104 | 9903 | int rc = 0; |
---|
7105 | 9904 | |
---|
7106 | | - rc = __bnxt_open_nic(bp, irq_re_init, link_re_init); |
---|
| 9905 | + if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) |
---|
| 9906 | + rc = -EIO; |
---|
| 9907 | + if (!rc) |
---|
| 9908 | + rc = __bnxt_open_nic(bp, irq_re_init, link_re_init); |
---|
7107 | 9909 | if (rc) { |
---|
7108 | 9910 | netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc); |
---|
7109 | 9911 | dev_close(bp->dev); |
---|
.. | .. |
---|
7118 | 9920 | int bnxt_half_open_nic(struct bnxt *bp) |
---|
7119 | 9921 | { |
---|
7120 | 9922 | int rc = 0; |
---|
| 9923 | + |
---|
| 9924 | + if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { |
---|
| 9925 | + netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n"); |
---|
| 9926 | + rc = -ENODEV; |
---|
| 9927 | + goto half_open_err; |
---|
| 9928 | + } |
---|
7121 | 9929 | |
---|
7122 | 9930 | rc = bnxt_alloc_mem(bp, false); |
---|
7123 | 9931 | if (rc) { |
---|
.. | .. |
---|
7148 | 9956 | bnxt_free_mem(bp, false); |
---|
7149 | 9957 | } |
---|
7150 | 9958 | |
---|
| 9959 | +static void bnxt_reenable_sriov(struct bnxt *bp) |
---|
| 9960 | +{ |
---|
| 9961 | + if (BNXT_PF(bp)) { |
---|
| 9962 | + struct bnxt_pf_info *pf = &bp->pf; |
---|
| 9963 | + int n = pf->active_vfs; |
---|
| 9964 | + |
---|
| 9965 | + if (n) |
---|
| 9966 | + bnxt_cfg_hw_sriov(bp, &n, true); |
---|
| 9967 | + } |
---|
| 9968 | +} |
---|
| 9969 | + |
---|
7151 | 9970 | static int bnxt_open(struct net_device *dev) |
---|
7152 | 9971 | { |
---|
7153 | 9972 | struct bnxt *bp = netdev_priv(dev); |
---|
7154 | 9973 | int rc; |
---|
7155 | 9974 | |
---|
7156 | | - bnxt_hwrm_if_change(bp, true); |
---|
7157 | | - rc = __bnxt_open_nic(bp, true, true); |
---|
7158 | | - if (rc) |
---|
7159 | | - bnxt_hwrm_if_change(bp, false); |
---|
| 9975 | + if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { |
---|
| 9976 | + netdev_err(bp->dev, "A previous firmware reset did not complete, aborting\n"); |
---|
| 9977 | + return -ENODEV; |
---|
| 9978 | + } |
---|
7160 | 9979 | |
---|
7161 | | - bnxt_hwmon_open(bp); |
---|
| 9980 | + rc = bnxt_hwrm_if_change(bp, true); |
---|
| 9981 | + if (rc) |
---|
| 9982 | + return rc; |
---|
| 9983 | + rc = __bnxt_open_nic(bp, true, true); |
---|
| 9984 | + if (rc) { |
---|
| 9985 | + bnxt_hwrm_if_change(bp, false); |
---|
| 9986 | + } else { |
---|
| 9987 | + if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) { |
---|
| 9988 | + if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { |
---|
| 9989 | + bnxt_ulp_start(bp, 0); |
---|
| 9990 | + bnxt_reenable_sriov(bp); |
---|
| 9991 | + } |
---|
| 9992 | + } |
---|
| 9993 | + bnxt_hwmon_open(bp); |
---|
| 9994 | + } |
---|
7162 | 9995 | |
---|
7163 | 9996 | return rc; |
---|
7164 | 9997 | } |
---|
.. | .. |
---|
7211 | 10044 | { |
---|
7212 | 10045 | int rc = 0; |
---|
7213 | 10046 | |
---|
| 10047 | + if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { |
---|
| 10048 | + /* If we get here, it means firmware reset is in progress |
---|
| 10049 | + * while we are trying to close. We can safely proceed with |
---|
| 10050 | + * the close because we are holding rtnl_lock(). Some firmware |
---|
| 10051 | + * messages may fail as we proceed to close. We set the |
---|
| 10052 | + * ABORT_ERR flag here so that the FW reset thread will later |
---|
| 10053 | + * abort when it gets the rtnl_lock() and sees the flag. |
---|
| 10054 | + */ |
---|
| 10055 | + netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n"); |
---|
| 10056 | + set_bit(BNXT_STATE_ABORT_ERR, &bp->state); |
---|
| 10057 | + } |
---|
| 10058 | + |
---|
7214 | 10059 | #ifdef CONFIG_BNXT_SRIOV |
---|
7215 | 10060 | if (bp->sriov_cfg) { |
---|
7216 | 10061 | rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait, |
---|
.. | .. |
---|
7235 | 10080 | return 0; |
---|
7236 | 10081 | } |
---|
7237 | 10082 | |
---|
| 10083 | +static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg, |
---|
| 10084 | + u16 *val) |
---|
| 10085 | +{ |
---|
| 10086 | + struct hwrm_port_phy_mdio_read_output *resp = bp->hwrm_cmd_resp_addr; |
---|
| 10087 | + struct hwrm_port_phy_mdio_read_input req = {0}; |
---|
| 10088 | + int rc; |
---|
| 10089 | + |
---|
| 10090 | + if (bp->hwrm_spec_code < 0x10a00) |
---|
| 10091 | + return -EOPNOTSUPP; |
---|
| 10092 | + |
---|
| 10093 | + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_READ, -1, -1); |
---|
| 10094 | + req.port_id = cpu_to_le16(bp->pf.port_id); |
---|
| 10095 | + req.phy_addr = phy_addr; |
---|
| 10096 | + req.reg_addr = cpu_to_le16(reg & 0x1f); |
---|
| 10097 | + if (mdio_phy_id_is_c45(phy_addr)) { |
---|
| 10098 | + req.cl45_mdio = 1; |
---|
| 10099 | + req.phy_addr = mdio_phy_id_prtad(phy_addr); |
---|
| 10100 | + req.dev_addr = mdio_phy_id_devad(phy_addr); |
---|
| 10101 | + req.reg_addr = cpu_to_le16(reg); |
---|
| 10102 | + } |
---|
| 10103 | + |
---|
| 10104 | + mutex_lock(&bp->hwrm_cmd_lock); |
---|
| 10105 | + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
---|
| 10106 | + if (!rc) |
---|
| 10107 | + *val = le16_to_cpu(resp->reg_data); |
---|
| 10108 | + mutex_unlock(&bp->hwrm_cmd_lock); |
---|
| 10109 | + return rc; |
---|
| 10110 | +} |
---|
| 10111 | + |
---|
| 10112 | +static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg, |
---|
| 10113 | + u16 val) |
---|
| 10114 | +{ |
---|
| 10115 | + struct hwrm_port_phy_mdio_write_input req = {0}; |
---|
| 10116 | + |
---|
| 10117 | + if (bp->hwrm_spec_code < 0x10a00) |
---|
| 10118 | + return -EOPNOTSUPP; |
---|
| 10119 | + |
---|
| 10120 | + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_WRITE, -1, -1); |
---|
| 10121 | + req.port_id = cpu_to_le16(bp->pf.port_id); |
---|
| 10122 | + req.phy_addr = phy_addr; |
---|
| 10123 | + req.reg_addr = cpu_to_le16(reg & 0x1f); |
---|
| 10124 | + if (mdio_phy_id_is_c45(phy_addr)) { |
---|
| 10125 | + req.cl45_mdio = 1; |
---|
| 10126 | + req.phy_addr = mdio_phy_id_prtad(phy_addr); |
---|
| 10127 | + req.dev_addr = mdio_phy_id_devad(phy_addr); |
---|
| 10128 | + req.reg_addr = cpu_to_le16(reg); |
---|
| 10129 | + } |
---|
| 10130 | + req.reg_data = cpu_to_le16(val); |
---|
| 10131 | + |
---|
| 10132 | + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
---|
| 10133 | +} |
---|
| 10134 | + |
---|
7238 | 10135 | /* rtnl_lock held */ |
---|
7239 | 10136 | static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) |
---|
7240 | 10137 | { |
---|
| 10138 | + struct mii_ioctl_data *mdio = if_mii(ifr); |
---|
| 10139 | + struct bnxt *bp = netdev_priv(dev); |
---|
| 10140 | + int rc; |
---|
| 10141 | + |
---|
7241 | 10142 | switch (cmd) { |
---|
7242 | 10143 | case SIOCGMIIPHY: |
---|
7243 | | - /* fallthru */ |
---|
| 10144 | + mdio->phy_id = bp->link_info.phy_addr; |
---|
| 10145 | + |
---|
| 10146 | + fallthrough; |
---|
7244 | 10147 | case SIOCGMIIREG: { |
---|
| 10148 | + u16 mii_regval = 0; |
---|
| 10149 | + |
---|
7245 | 10150 | if (!netif_running(dev)) |
---|
7246 | 10151 | return -EAGAIN; |
---|
7247 | 10152 | |
---|
7248 | | - return 0; |
---|
| 10153 | + rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num, |
---|
| 10154 | + &mii_regval); |
---|
| 10155 | + mdio->val_out = mii_regval; |
---|
| 10156 | + return rc; |
---|
7249 | 10157 | } |
---|
7250 | 10158 | |
---|
7251 | 10159 | case SIOCSMIIREG: |
---|
7252 | 10160 | if (!netif_running(dev)) |
---|
7253 | 10161 | return -EAGAIN; |
---|
7254 | 10162 | |
---|
7255 | | - return 0; |
---|
| 10163 | + return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num, |
---|
| 10164 | + mdio->val_in); |
---|
7256 | 10165 | |
---|
7257 | 10166 | default: |
---|
7258 | 10167 | /* do nothing */ |
---|
.. | .. |
---|
7266 | 10175 | { |
---|
7267 | 10176 | int i; |
---|
7268 | 10177 | |
---|
7269 | | - |
---|
7270 | 10178 | for (i = 0; i < bp->cp_nr_rings; i++) { |
---|
7271 | 10179 | struct bnxt_napi *bnapi = bp->bnapi[i]; |
---|
7272 | 10180 | struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; |
---|
7273 | | - struct ctx_hw_stats *hw_stats = cpr->hw_stats; |
---|
| 10181 | + u64 *sw = cpr->stats.sw_stats; |
---|
7274 | 10182 | |
---|
7275 | | - stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts); |
---|
7276 | | - stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts); |
---|
7277 | | - stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts); |
---|
| 10183 | + stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts); |
---|
| 10184 | + stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts); |
---|
| 10185 | + stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts); |
---|
7278 | 10186 | |
---|
7279 | | - stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts); |
---|
7280 | | - stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts); |
---|
7281 | | - stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts); |
---|
| 10187 | + stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts); |
---|
| 10188 | + stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts); |
---|
| 10189 | + stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts); |
---|
7282 | 10190 | |
---|
7283 | | - stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes); |
---|
7284 | | - stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes); |
---|
7285 | | - stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes); |
---|
| 10191 | + stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes); |
---|
| 10192 | + stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes); |
---|
| 10193 | + stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes); |
---|
7286 | 10194 | |
---|
7287 | | - stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes); |
---|
7288 | | - stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes); |
---|
7289 | | - stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes); |
---|
| 10195 | + stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes); |
---|
| 10196 | + stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes); |
---|
| 10197 | + stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes); |
---|
7290 | 10198 | |
---|
7291 | 10199 | stats->rx_missed_errors += |
---|
7292 | | - le64_to_cpu(hw_stats->rx_discard_pkts); |
---|
| 10200 | + BNXT_GET_RING_STATS64(sw, rx_discard_pkts); |
---|
7293 | 10201 | |
---|
7294 | | - stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts); |
---|
| 10202 | + stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts); |
---|
7295 | 10203 | |
---|
7296 | | - stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts); |
---|
| 10204 | + stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts); |
---|
7297 | 10205 | } |
---|
7298 | 10206 | } |
---|
7299 | 10207 | |
---|
.. | .. |
---|
7331 | 10239 | bnxt_add_prev_stats(bp, stats); |
---|
7332 | 10240 | |
---|
7333 | 10241 | if (bp->flags & BNXT_FLAG_PORT_STATS) { |
---|
7334 | | - struct rx_port_stats *rx = bp->hw_rx_port_stats; |
---|
7335 | | - struct tx_port_stats *tx = bp->hw_tx_port_stats; |
---|
| 10242 | + u64 *rx = bp->port_stats.sw_stats; |
---|
| 10243 | + u64 *tx = bp->port_stats.sw_stats + |
---|
| 10244 | + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; |
---|
7336 | 10245 | |
---|
7337 | | - stats->rx_crc_errors = le64_to_cpu(rx->rx_fcs_err_frames); |
---|
7338 | | - stats->rx_frame_errors = le64_to_cpu(rx->rx_align_err_frames); |
---|
7339 | | - stats->rx_length_errors = le64_to_cpu(rx->rx_undrsz_frames) + |
---|
7340 | | - le64_to_cpu(rx->rx_ovrsz_frames) + |
---|
7341 | | - le64_to_cpu(rx->rx_runt_frames); |
---|
7342 | | - stats->rx_errors = le64_to_cpu(rx->rx_false_carrier_frames) + |
---|
7343 | | - le64_to_cpu(rx->rx_jbr_frames); |
---|
7344 | | - stats->collisions = le64_to_cpu(tx->tx_total_collisions); |
---|
7345 | | - stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns); |
---|
7346 | | - stats->tx_errors = le64_to_cpu(tx->tx_err); |
---|
| 10246 | + stats->rx_crc_errors = |
---|
| 10247 | + BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames); |
---|
| 10248 | + stats->rx_frame_errors = |
---|
| 10249 | + BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames); |
---|
| 10250 | + stats->rx_length_errors = |
---|
| 10251 | + BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) + |
---|
| 10252 | + BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) + |
---|
| 10253 | + BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames); |
---|
| 10254 | + stats->rx_errors = |
---|
| 10255 | + BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) + |
---|
| 10256 | + BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames); |
---|
| 10257 | + stats->collisions = |
---|
| 10258 | + BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions); |
---|
| 10259 | + stats->tx_fifo_errors = |
---|
| 10260 | + BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns); |
---|
| 10261 | + stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err); |
---|
7347 | 10262 | } |
---|
7348 | 10263 | clear_bit(BNXT_STATE_READ_STATS, &bp->state); |
---|
7349 | 10264 | } |
---|
.. | .. |
---|
7404 | 10319 | static void bnxt_set_rx_mode(struct net_device *dev) |
---|
7405 | 10320 | { |
---|
7406 | 10321 | struct bnxt *bp = netdev_priv(dev); |
---|
7407 | | - struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; |
---|
7408 | | - u32 mask = vnic->rx_mask; |
---|
| 10322 | + struct bnxt_vnic_info *vnic; |
---|
7409 | 10323 | bool mc_update = false; |
---|
7410 | 10324 | bool uc_update; |
---|
| 10325 | + u32 mask; |
---|
7411 | 10326 | |
---|
7412 | | - if (!netif_running(dev)) |
---|
| 10327 | + if (!test_bit(BNXT_STATE_OPEN, &bp->state)) |
---|
7413 | 10328 | return; |
---|
7414 | 10329 | |
---|
| 10330 | + vnic = &bp->vnic_info[0]; |
---|
| 10331 | + mask = vnic->rx_mask; |
---|
7415 | 10332 | mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS | |
---|
7416 | 10333 | CFA_L2_SET_RX_MASK_REQ_MASK_MCAST | |
---|
7417 | 10334 | CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST | |
---|
.. | .. |
---|
7530 | 10447 | /* If the chip and firmware supports RFS */ |
---|
7531 | 10448 | static bool bnxt_rfs_supported(struct bnxt *bp) |
---|
7532 | 10449 | { |
---|
| 10450 | + if (bp->flags & BNXT_FLAG_CHIP_P5) { |
---|
| 10451 | + if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) |
---|
| 10452 | + return true; |
---|
| 10453 | + return false; |
---|
| 10454 | + } |
---|
| 10455 | + /* 212 firmware is broken for aRFS */ |
---|
| 10456 | + if (BNXT_FW_MAJ(bp) == 212) |
---|
| 10457 | + return false; |
---|
7533 | 10458 | if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) |
---|
7534 | 10459 | return true; |
---|
7535 | 10460 | if (bp->flags & BNXT_FLAG_NEW_RSS_CAP) |
---|
.. | .. |
---|
7543 | 10468 | #ifdef CONFIG_RFS_ACCEL |
---|
7544 | 10469 | int vnics, max_vnics, max_rss_ctxs; |
---|
7545 | 10470 | |
---|
7546 | | - if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp)) |
---|
| 10471 | + if (bp->flags & BNXT_FLAG_CHIP_P5) |
---|
| 10472 | + return bnxt_rfs_supported(bp); |
---|
| 10473 | + if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings) |
---|
7547 | 10474 | return false; |
---|
7548 | 10475 | |
---|
7549 | 10476 | vnics = 1 + bp->rx_nr_rings; |
---|
.. | .. |
---|
7567 | 10494 | if (vnics == bp->hw_resc.resv_vnics) |
---|
7568 | 10495 | return true; |
---|
7569 | 10496 | |
---|
7570 | | - bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, vnics); |
---|
| 10497 | + bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics); |
---|
7571 | 10498 | if (vnics <= bp->hw_resc.resv_vnics) |
---|
7572 | 10499 | return true; |
---|
7573 | 10500 | |
---|
7574 | 10501 | netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n"); |
---|
7575 | | - bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 1); |
---|
| 10502 | + bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1); |
---|
7576 | 10503 | return false; |
---|
7577 | 10504 | #else |
---|
7578 | 10505 | return false; |
---|
.. | .. |
---|
7600 | 10527 | /* Both CTAG and STAG VLAN accelaration on the RX side have to be |
---|
7601 | 10528 | * turned on or off together. |
---|
7602 | 10529 | */ |
---|
7603 | | - vlan_features = features & (NETIF_F_HW_VLAN_CTAG_RX | |
---|
7604 | | - NETIF_F_HW_VLAN_STAG_RX); |
---|
7605 | | - if (vlan_features != (NETIF_F_HW_VLAN_CTAG_RX | |
---|
7606 | | - NETIF_F_HW_VLAN_STAG_RX)) { |
---|
7607 | | - if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) |
---|
7608 | | - features &= ~(NETIF_F_HW_VLAN_CTAG_RX | |
---|
7609 | | - NETIF_F_HW_VLAN_STAG_RX); |
---|
| 10530 | + vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX; |
---|
| 10531 | + if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) { |
---|
| 10532 | + if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX) |
---|
| 10533 | + features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX; |
---|
7610 | 10534 | else if (vlan_features) |
---|
7611 | | - features |= NETIF_F_HW_VLAN_CTAG_RX | |
---|
7612 | | - NETIF_F_HW_VLAN_STAG_RX; |
---|
| 10535 | + features |= BNXT_HW_FEATURE_VLAN_ALL_RX; |
---|
7613 | 10536 | } |
---|
7614 | 10537 | #ifdef CONFIG_BNXT_SRIOV |
---|
7615 | | - if (BNXT_VF(bp)) { |
---|
7616 | | - if (bp->vf.vlan) { |
---|
7617 | | - features &= ~(NETIF_F_HW_VLAN_CTAG_RX | |
---|
7618 | | - NETIF_F_HW_VLAN_STAG_RX); |
---|
7619 | | - } |
---|
7620 | | - } |
---|
| 10538 | + if (BNXT_VF(bp) && bp->vf.vlan) |
---|
| 10539 | + features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX; |
---|
7621 | 10540 | #endif |
---|
7622 | 10541 | return features; |
---|
7623 | 10542 | } |
---|
.. | .. |
---|
7640 | 10559 | if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) |
---|
7641 | 10560 | flags &= ~BNXT_FLAG_TPA; |
---|
7642 | 10561 | |
---|
7643 | | - if (features & NETIF_F_HW_VLAN_CTAG_RX) |
---|
| 10562 | + if (features & BNXT_HW_FEATURE_VLAN_ALL_RX) |
---|
7644 | 10563 | flags |= BNXT_FLAG_STRIP_VLAN; |
---|
7645 | 10564 | |
---|
7646 | 10565 | if (features & NETIF_F_NTUPLE) |
---|
.. | .. |
---|
7650 | 10569 | if (changes & BNXT_FLAG_TPA) { |
---|
7651 | 10570 | update_tpa = true; |
---|
7652 | 10571 | if ((bp->flags & BNXT_FLAG_TPA) == 0 || |
---|
7653 | | - (flags & BNXT_FLAG_TPA) == 0) |
---|
| 10572 | + (flags & BNXT_FLAG_TPA) == 0 || |
---|
| 10573 | + (bp->flags & BNXT_FLAG_CHIP_P5)) |
---|
7654 | 10574 | re_init = true; |
---|
7655 | 10575 | } |
---|
7656 | 10576 | |
---|
.. | .. |
---|
7660 | 10580 | if (flags != bp->flags) { |
---|
7661 | 10581 | u32 old_flags = bp->flags; |
---|
7662 | 10582 | |
---|
7663 | | - bp->flags = flags; |
---|
7664 | | - |
---|
7665 | 10583 | if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { |
---|
| 10584 | + bp->flags = flags; |
---|
7666 | 10585 | if (update_tpa) |
---|
7667 | 10586 | bnxt_set_ring_params(bp); |
---|
7668 | 10587 | return rc; |
---|
.. | .. |
---|
7670 | 10589 | |
---|
7671 | 10590 | if (re_init) { |
---|
7672 | 10591 | bnxt_close_nic(bp, false, false); |
---|
| 10592 | + bp->flags = flags; |
---|
7673 | 10593 | if (update_tpa) |
---|
7674 | 10594 | bnxt_set_ring_params(bp); |
---|
7675 | 10595 | |
---|
7676 | 10596 | return bnxt_open_nic(bp, false, false); |
---|
7677 | 10597 | } |
---|
7678 | 10598 | if (update_tpa) { |
---|
| 10599 | + bp->flags = flags; |
---|
7679 | 10600 | rc = bnxt_set_tpa(bp, |
---|
7680 | 10601 | (flags & BNXT_FLAG_TPA) ? |
---|
7681 | 10602 | true : false); |
---|
.. | .. |
---|
7683 | 10604 | bp->flags = old_flags; |
---|
7684 | 10605 | } |
---|
7685 | 10606 | } |
---|
| 10607 | + return rc; |
---|
| 10608 | +} |
---|
| 10609 | + |
---|
| 10610 | +int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words, |
---|
| 10611 | + u32 *reg_buf) |
---|
| 10612 | +{ |
---|
| 10613 | + struct hwrm_dbg_read_direct_output *resp = bp->hwrm_cmd_resp_addr; |
---|
| 10614 | + struct hwrm_dbg_read_direct_input req = {0}; |
---|
| 10615 | + __le32 *dbg_reg_buf; |
---|
| 10616 | + dma_addr_t mapping; |
---|
| 10617 | + int rc, i; |
---|
| 10618 | + |
---|
| 10619 | + dbg_reg_buf = dma_alloc_coherent(&bp->pdev->dev, num_words * 4, |
---|
| 10620 | + &mapping, GFP_KERNEL); |
---|
| 10621 | + if (!dbg_reg_buf) |
---|
| 10622 | + return -ENOMEM; |
---|
| 10623 | + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_READ_DIRECT, -1, -1); |
---|
| 10624 | + req.host_dest_addr = cpu_to_le64(mapping); |
---|
| 10625 | + req.read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR); |
---|
| 10626 | + req.read_len32 = cpu_to_le32(num_words); |
---|
| 10627 | + mutex_lock(&bp->hwrm_cmd_lock); |
---|
| 10628 | + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
---|
| 10629 | + if (rc || resp->error_code) { |
---|
| 10630 | + rc = -EIO; |
---|
| 10631 | + goto dbg_rd_reg_exit; |
---|
| 10632 | + } |
---|
| 10633 | + for (i = 0; i < num_words; i++) |
---|
| 10634 | + reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]); |
---|
| 10635 | + |
---|
| 10636 | +dbg_rd_reg_exit: |
---|
| 10637 | + mutex_unlock(&bp->hwrm_cmd_lock); |
---|
| 10638 | + dma_free_coherent(&bp->pdev->dev, num_words * 4, dbg_reg_buf, mapping); |
---|
| 10639 | + return rc; |
---|
| 10640 | +} |
---|
| 10641 | + |
---|
| 10642 | +static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type, |
---|
| 10643 | + u32 ring_id, u32 *prod, u32 *cons) |
---|
| 10644 | +{ |
---|
| 10645 | + struct hwrm_dbg_ring_info_get_output *resp = bp->hwrm_cmd_resp_addr; |
---|
| 10646 | + struct hwrm_dbg_ring_info_get_input req = {0}; |
---|
| 10647 | + int rc; |
---|
| 10648 | + |
---|
| 10649 | + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_RING_INFO_GET, -1, -1); |
---|
| 10650 | + req.ring_type = ring_type; |
---|
| 10651 | + req.fw_ring_id = cpu_to_le32(ring_id); |
---|
| 10652 | + mutex_lock(&bp->hwrm_cmd_lock); |
---|
| 10653 | + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
---|
| 10654 | + if (!rc) { |
---|
| 10655 | + *prod = le32_to_cpu(resp->producer_index); |
---|
| 10656 | + *cons = le32_to_cpu(resp->consumer_index); |
---|
| 10657 | + } |
---|
| 10658 | + mutex_unlock(&bp->hwrm_cmd_lock); |
---|
7686 | 10659 | return rc; |
---|
7687 | 10660 | } |
---|
7688 | 10661 | |
---|
.. | .. |
---|
7737 | 10710 | } |
---|
7738 | 10711 | } |
---|
7739 | 10712 | |
---|
| 10713 | +static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr) |
---|
| 10714 | +{ |
---|
| 10715 | + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; |
---|
| 10716 | + struct hwrm_ring_reset_input req = {0}; |
---|
| 10717 | + struct bnxt_napi *bnapi = rxr->bnapi; |
---|
| 10718 | + struct bnxt_cp_ring_info *cpr; |
---|
| 10719 | + u16 cp_ring_id; |
---|
| 10720 | + |
---|
| 10721 | + cpr = &bnapi->cp_ring; |
---|
| 10722 | + cp_ring_id = cpr->cp_ring_struct.fw_ring_id; |
---|
| 10723 | + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_RESET, cp_ring_id, -1); |
---|
| 10724 | + req.ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP; |
---|
| 10725 | + req.ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id); |
---|
| 10726 | + return hwrm_send_message_silent(bp, &req, sizeof(req), |
---|
| 10727 | + HWRM_CMD_TIMEOUT); |
---|
| 10728 | +} |
---|
| 10729 | + |
---|
7740 | 10730 | static void bnxt_reset_task(struct bnxt *bp, bool silent) |
---|
7741 | 10731 | { |
---|
7742 | 10732 | if (!silent) |
---|
.. | .. |
---|
7744 | 10734 | if (netif_running(bp->dev)) { |
---|
7745 | 10735 | int rc; |
---|
7746 | 10736 | |
---|
7747 | | - if (!silent) |
---|
| 10737 | + if (silent) { |
---|
| 10738 | + bnxt_close_nic(bp, false, false); |
---|
| 10739 | + bnxt_open_nic(bp, false, false); |
---|
| 10740 | + } else { |
---|
7748 | 10741 | bnxt_ulp_stop(bp); |
---|
7749 | | - bnxt_close_nic(bp, false, false); |
---|
7750 | | - rc = bnxt_open_nic(bp, false, false); |
---|
7751 | | - if (!silent && !rc) |
---|
7752 | | - bnxt_ulp_start(bp); |
---|
| 10742 | + bnxt_close_nic(bp, true, false); |
---|
| 10743 | + rc = bnxt_open_nic(bp, true, false); |
---|
| 10744 | + bnxt_ulp_start(bp, rc); |
---|
| 10745 | + } |
---|
7753 | 10746 | } |
---|
7754 | 10747 | } |
---|
7755 | 10748 | |
---|
7756 | | -static void bnxt_tx_timeout(struct net_device *dev) |
---|
| 10749 | +static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue) |
---|
7757 | 10750 | { |
---|
7758 | 10751 | struct bnxt *bp = netdev_priv(dev); |
---|
7759 | 10752 | |
---|
.. | .. |
---|
7762 | 10755 | bnxt_queue_sp_work(bp); |
---|
7763 | 10756 | } |
---|
7764 | 10757 | |
---|
| 10758 | +static void bnxt_fw_health_check(struct bnxt *bp) |
---|
| 10759 | +{ |
---|
| 10760 | + struct bnxt_fw_health *fw_health = bp->fw_health; |
---|
| 10761 | + u32 val; |
---|
| 10762 | + |
---|
| 10763 | + if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) |
---|
| 10764 | + return; |
---|
| 10765 | + |
---|
| 10766 | + /* Make sure it is enabled before checking the tmr_counter. */ |
---|
| 10767 | + smp_rmb(); |
---|
| 10768 | + if (fw_health->tmr_counter) { |
---|
| 10769 | + fw_health->tmr_counter--; |
---|
| 10770 | + return; |
---|
| 10771 | + } |
---|
| 10772 | + |
---|
| 10773 | + val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); |
---|
| 10774 | + if (val == fw_health->last_fw_heartbeat) |
---|
| 10775 | + goto fw_reset; |
---|
| 10776 | + |
---|
| 10777 | + fw_health->last_fw_heartbeat = val; |
---|
| 10778 | + |
---|
| 10779 | + val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); |
---|
| 10780 | + if (val != fw_health->last_fw_reset_cnt) |
---|
| 10781 | + goto fw_reset; |
---|
| 10782 | + |
---|
| 10783 | + fw_health->tmr_counter = fw_health->tmr_multiplier; |
---|
| 10784 | + return; |
---|
| 10785 | + |
---|
| 10786 | +fw_reset: |
---|
| 10787 | + set_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event); |
---|
| 10788 | + bnxt_queue_sp_work(bp); |
---|
| 10789 | +} |
---|
| 10790 | + |
---|
7765 | 10791 | static void bnxt_timer(struct timer_list *t) |
---|
7766 | 10792 | { |
---|
7767 | 10793 | struct bnxt *bp = from_timer(bp, t, timer); |
---|
7768 | 10794 | struct net_device *dev = bp->dev; |
---|
7769 | 10795 | |
---|
7770 | | - if (!netif_running(dev)) |
---|
| 10796 | + if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state)) |
---|
7771 | 10797 | return; |
---|
7772 | 10798 | |
---|
7773 | 10799 | if (atomic_read(&bp->intr_sem) != 0) |
---|
7774 | 10800 | goto bnxt_restart_timer; |
---|
7775 | 10801 | |
---|
7776 | | - if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) && |
---|
7777 | | - bp->stats_coal_ticks) { |
---|
| 10802 | + if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) |
---|
| 10803 | + bnxt_fw_health_check(bp); |
---|
| 10804 | + |
---|
| 10805 | + if (bp->link_info.link_up && bp->stats_coal_ticks) { |
---|
7778 | 10806 | set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event); |
---|
7779 | 10807 | bnxt_queue_sp_work(bp); |
---|
7780 | 10808 | } |
---|
.. | .. |
---|
7784 | 10812 | bnxt_queue_sp_work(bp); |
---|
7785 | 10813 | } |
---|
7786 | 10814 | |
---|
| 10815 | +#ifdef CONFIG_RFS_ACCEL |
---|
| 10816 | + if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count) { |
---|
| 10817 | + set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event); |
---|
| 10818 | + bnxt_queue_sp_work(bp); |
---|
| 10819 | + } |
---|
| 10820 | +#endif /*CONFIG_RFS_ACCEL*/ |
---|
| 10821 | + |
---|
7787 | 10822 | if (bp->link_info.phy_retry) { |
---|
7788 | 10823 | if (time_after(jiffies, bp->link_info.phy_retry_expires)) { |
---|
7789 | | - bp->link_info.phy_retry = 0; |
---|
| 10824 | + bp->link_info.phy_retry = false; |
---|
7790 | 10825 | netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n"); |
---|
7791 | 10826 | } else { |
---|
7792 | 10827 | set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event); |
---|
7793 | 10828 | bnxt_queue_sp_work(bp); |
---|
7794 | 10829 | } |
---|
| 10830 | + } |
---|
| 10831 | + |
---|
| 10832 | + if ((bp->flags & BNXT_FLAG_CHIP_P5) && !bp->chip_rev && |
---|
| 10833 | + netif_carrier_ok(dev)) { |
---|
| 10834 | + set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event); |
---|
| 10835 | + bnxt_queue_sp_work(bp); |
---|
7795 | 10836 | } |
---|
7796 | 10837 | bnxt_restart_timer: |
---|
7797 | 10838 | mod_timer(&bp->timer, jiffies + bp->current_interval); |
---|
.. | .. |
---|
7823 | 10864 | bnxt_rtnl_unlock_sp(bp); |
---|
7824 | 10865 | } |
---|
7825 | 10866 | |
---|
| 10867 | +/* Only called from bnxt_sp_task() */ |
---|
| 10868 | +static void bnxt_rx_ring_reset(struct bnxt *bp) |
---|
| 10869 | +{ |
---|
| 10870 | + int i; |
---|
| 10871 | + |
---|
| 10872 | + bnxt_rtnl_lock_sp(bp); |
---|
| 10873 | + if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { |
---|
| 10874 | + bnxt_rtnl_unlock_sp(bp); |
---|
| 10875 | + return; |
---|
| 10876 | + } |
---|
| 10877 | + /* Disable and flush TPA before resetting the RX ring */ |
---|
| 10878 | + if (bp->flags & BNXT_FLAG_TPA) |
---|
| 10879 | + bnxt_set_tpa(bp, false); |
---|
| 10880 | + for (i = 0; i < bp->rx_nr_rings; i++) { |
---|
| 10881 | + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; |
---|
| 10882 | + struct bnxt_cp_ring_info *cpr; |
---|
| 10883 | + int rc; |
---|
| 10884 | + |
---|
| 10885 | + if (!rxr->bnapi->in_reset) |
---|
| 10886 | + continue; |
---|
| 10887 | + |
---|
| 10888 | + rc = bnxt_hwrm_rx_ring_reset(bp, i); |
---|
| 10889 | + if (rc) { |
---|
| 10890 | + if (rc == -EINVAL || rc == -EOPNOTSUPP) |
---|
| 10891 | + netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n"); |
---|
| 10892 | + else |
---|
| 10893 | + netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n", |
---|
| 10894 | + rc); |
---|
| 10895 | + bnxt_reset_task(bp, true); |
---|
| 10896 | + break; |
---|
| 10897 | + } |
---|
| 10898 | + bnxt_free_one_rx_ring_skbs(bp, i); |
---|
| 10899 | + rxr->rx_prod = 0; |
---|
| 10900 | + rxr->rx_agg_prod = 0; |
---|
| 10901 | + rxr->rx_sw_agg_prod = 0; |
---|
| 10902 | + rxr->rx_next_cons = 0; |
---|
| 10903 | + rxr->bnapi->in_reset = false; |
---|
| 10904 | + bnxt_alloc_one_rx_ring(bp, i); |
---|
| 10905 | + cpr = &rxr->bnapi->cp_ring; |
---|
| 10906 | + cpr->sw_stats.rx.rx_resets++; |
---|
| 10907 | + if (bp->flags & BNXT_FLAG_AGG_RINGS) |
---|
| 10908 | + bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); |
---|
| 10909 | + bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); |
---|
| 10910 | + } |
---|
| 10911 | + if (bp->flags & BNXT_FLAG_TPA) |
---|
| 10912 | + bnxt_set_tpa(bp, true); |
---|
| 10913 | + bnxt_rtnl_unlock_sp(bp); |
---|
| 10914 | +} |
---|
| 10915 | + |
---|
| 10916 | +static void bnxt_fw_reset_close(struct bnxt *bp) |
---|
| 10917 | +{ |
---|
| 10918 | + bnxt_ulp_stop(bp); |
---|
| 10919 | + /* When firmware is fatal state, disable PCI device to prevent |
---|
| 10920 | + * any potential bad DMAs before freeing kernel memory. |
---|
| 10921 | + */ |
---|
| 10922 | + if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) |
---|
| 10923 | + pci_disable_device(bp->pdev); |
---|
| 10924 | + __bnxt_close_nic(bp, true, false); |
---|
| 10925 | + bnxt_clear_int_mode(bp); |
---|
| 10926 | + bnxt_hwrm_func_drv_unrgtr(bp); |
---|
| 10927 | + if (pci_is_enabled(bp->pdev)) |
---|
| 10928 | + pci_disable_device(bp->pdev); |
---|
| 10929 | + bnxt_free_ctx_mem(bp); |
---|
| 10930 | + kfree(bp->ctx); |
---|
| 10931 | + bp->ctx = NULL; |
---|
| 10932 | +} |
---|
| 10933 | + |
---|
| 10934 | +static bool is_bnxt_fw_ok(struct bnxt *bp) |
---|
| 10935 | +{ |
---|
| 10936 | + struct bnxt_fw_health *fw_health = bp->fw_health; |
---|
| 10937 | + bool no_heartbeat = false, has_reset = false; |
---|
| 10938 | + u32 val; |
---|
| 10939 | + |
---|
| 10940 | + val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); |
---|
| 10941 | + if (val == fw_health->last_fw_heartbeat) |
---|
| 10942 | + no_heartbeat = true; |
---|
| 10943 | + |
---|
| 10944 | + val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); |
---|
| 10945 | + if (val != fw_health->last_fw_reset_cnt) |
---|
| 10946 | + has_reset = true; |
---|
| 10947 | + |
---|
| 10948 | + if (!no_heartbeat && has_reset) |
---|
| 10949 | + return true; |
---|
| 10950 | + |
---|
| 10951 | + return false; |
---|
| 10952 | +} |
---|
| 10953 | + |
---|
| 10954 | +/* rtnl_lock is acquired before calling this function */ |
---|
| 10955 | +static void bnxt_force_fw_reset(struct bnxt *bp) |
---|
| 10956 | +{ |
---|
| 10957 | + struct bnxt_fw_health *fw_health = bp->fw_health; |
---|
| 10958 | + u32 wait_dsecs; |
---|
| 10959 | + |
---|
| 10960 | + if (!test_bit(BNXT_STATE_OPEN, &bp->state) || |
---|
| 10961 | + test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) |
---|
| 10962 | + return; |
---|
| 10963 | + |
---|
| 10964 | + set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); |
---|
| 10965 | + bnxt_fw_reset_close(bp); |
---|
| 10966 | + wait_dsecs = fw_health->master_func_wait_dsecs; |
---|
| 10967 | + if (fw_health->master) { |
---|
| 10968 | + if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) |
---|
| 10969 | + wait_dsecs = 0; |
---|
| 10970 | + bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW; |
---|
| 10971 | + } else { |
---|
| 10972 | + bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10; |
---|
| 10973 | + wait_dsecs = fw_health->normal_func_wait_dsecs; |
---|
| 10974 | + bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; |
---|
| 10975 | + } |
---|
| 10976 | + |
---|
| 10977 | + bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs; |
---|
| 10978 | + bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs; |
---|
| 10979 | + bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10); |
---|
| 10980 | +} |
---|
| 10981 | + |
---|
| 10982 | +void bnxt_fw_exception(struct bnxt *bp) |
---|
| 10983 | +{ |
---|
| 10984 | + netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n"); |
---|
| 10985 | + set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); |
---|
| 10986 | + bnxt_rtnl_lock_sp(bp); |
---|
| 10987 | + bnxt_force_fw_reset(bp); |
---|
| 10988 | + bnxt_rtnl_unlock_sp(bp); |
---|
| 10989 | +} |
---|
| 10990 | + |
---|
| 10991 | +/* Returns the number of registered VFs, or 1 if VF configuration is pending, or |
---|
| 10992 | + * < 0 on error. |
---|
| 10993 | + */ |
---|
| 10994 | +static int bnxt_get_registered_vfs(struct bnxt *bp) |
---|
| 10995 | +{ |
---|
| 10996 | +#ifdef CONFIG_BNXT_SRIOV |
---|
| 10997 | + int rc; |
---|
| 10998 | + |
---|
| 10999 | + if (!BNXT_PF(bp)) |
---|
| 11000 | + return 0; |
---|
| 11001 | + |
---|
| 11002 | + rc = bnxt_hwrm_func_qcfg(bp); |
---|
| 11003 | + if (rc) { |
---|
| 11004 | + netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc); |
---|
| 11005 | + return rc; |
---|
| 11006 | + } |
---|
| 11007 | + if (bp->pf.registered_vfs) |
---|
| 11008 | + return bp->pf.registered_vfs; |
---|
| 11009 | + if (bp->sriov_cfg) |
---|
| 11010 | + return 1; |
---|
| 11011 | +#endif |
---|
| 11012 | + return 0; |
---|
| 11013 | +} |
---|
| 11014 | + |
---|
| 11015 | +void bnxt_fw_reset(struct bnxt *bp) |
---|
| 11016 | +{ |
---|
| 11017 | + bnxt_rtnl_lock_sp(bp); |
---|
| 11018 | + if (test_bit(BNXT_STATE_OPEN, &bp->state) && |
---|
| 11019 | + !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { |
---|
| 11020 | + int n = 0, tmo; |
---|
| 11021 | + |
---|
| 11022 | + set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); |
---|
| 11023 | + if (bp->pf.active_vfs && |
---|
| 11024 | + !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) |
---|
| 11025 | + n = bnxt_get_registered_vfs(bp); |
---|
| 11026 | + if (n < 0) { |
---|
| 11027 | + netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n", |
---|
| 11028 | + n); |
---|
| 11029 | + clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); |
---|
| 11030 | + dev_close(bp->dev); |
---|
| 11031 | + goto fw_reset_exit; |
---|
| 11032 | + } else if (n > 0) { |
---|
| 11033 | + u16 vf_tmo_dsecs = n * 10; |
---|
| 11034 | + |
---|
| 11035 | + if (bp->fw_reset_max_dsecs < vf_tmo_dsecs) |
---|
| 11036 | + bp->fw_reset_max_dsecs = vf_tmo_dsecs; |
---|
| 11037 | + bp->fw_reset_state = |
---|
| 11038 | + BNXT_FW_RESET_STATE_POLL_VF; |
---|
| 11039 | + bnxt_queue_fw_reset_work(bp, HZ / 10); |
---|
| 11040 | + goto fw_reset_exit; |
---|
| 11041 | + } |
---|
| 11042 | + bnxt_fw_reset_close(bp); |
---|
| 11043 | + if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { |
---|
| 11044 | + bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN; |
---|
| 11045 | + tmo = HZ / 10; |
---|
| 11046 | + } else { |
---|
| 11047 | + bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; |
---|
| 11048 | + tmo = bp->fw_reset_min_dsecs * HZ / 10; |
---|
| 11049 | + } |
---|
| 11050 | + bnxt_queue_fw_reset_work(bp, tmo); |
---|
| 11051 | + } |
---|
| 11052 | +fw_reset_exit: |
---|
| 11053 | + bnxt_rtnl_unlock_sp(bp); |
---|
| 11054 | +} |
---|
| 11055 | + |
---|
| 11056 | +static void bnxt_chk_missed_irq(struct bnxt *bp) |
---|
| 11057 | +{ |
---|
| 11058 | + int i; |
---|
| 11059 | + |
---|
| 11060 | + if (!(bp->flags & BNXT_FLAG_CHIP_P5)) |
---|
| 11061 | + return; |
---|
| 11062 | + |
---|
| 11063 | + for (i = 0; i < bp->cp_nr_rings; i++) { |
---|
| 11064 | + struct bnxt_napi *bnapi = bp->bnapi[i]; |
---|
| 11065 | + struct bnxt_cp_ring_info *cpr; |
---|
| 11066 | + u32 fw_ring_id; |
---|
| 11067 | + int j; |
---|
| 11068 | + |
---|
| 11069 | + if (!bnapi) |
---|
| 11070 | + continue; |
---|
| 11071 | + |
---|
| 11072 | + cpr = &bnapi->cp_ring; |
---|
| 11073 | + for (j = 0; j < 2; j++) { |
---|
| 11074 | + struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j]; |
---|
| 11075 | + u32 val[2]; |
---|
| 11076 | + |
---|
| 11077 | + if (!cpr2 || cpr2->has_more_work || |
---|
| 11078 | + !bnxt_has_work(bp, cpr2)) |
---|
| 11079 | + continue; |
---|
| 11080 | + |
---|
| 11081 | + if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) { |
---|
| 11082 | + cpr2->last_cp_raw_cons = cpr2->cp_raw_cons; |
---|
| 11083 | + continue; |
---|
| 11084 | + } |
---|
| 11085 | + fw_ring_id = cpr2->cp_ring_struct.fw_ring_id; |
---|
| 11086 | + bnxt_dbg_hwrm_ring_info_get(bp, |
---|
| 11087 | + DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL, |
---|
| 11088 | + fw_ring_id, &val[0], &val[1]); |
---|
| 11089 | + cpr->sw_stats.cmn.missed_irqs++; |
---|
| 11090 | + } |
---|
| 11091 | + } |
---|
| 11092 | +} |
---|
| 11093 | + |
---|
7826 | 11094 | static void bnxt_cfg_ntp_filters(struct bnxt *); |
---|
| 11095 | + |
---|
| 11096 | +static void bnxt_init_ethtool_link_settings(struct bnxt *bp) |
---|
| 11097 | +{ |
---|
| 11098 | + struct bnxt_link_info *link_info = &bp->link_info; |
---|
| 11099 | + |
---|
| 11100 | + if (BNXT_AUTO_MODE(link_info->auto_mode)) { |
---|
| 11101 | + link_info->autoneg = BNXT_AUTONEG_SPEED; |
---|
| 11102 | + if (bp->hwrm_spec_code >= 0x10201) { |
---|
| 11103 | + if (link_info->auto_pause_setting & |
---|
| 11104 | + PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE) |
---|
| 11105 | + link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; |
---|
| 11106 | + } else { |
---|
| 11107 | + link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; |
---|
| 11108 | + } |
---|
| 11109 | + link_info->advertising = link_info->auto_link_speeds; |
---|
| 11110 | + link_info->advertising_pam4 = link_info->auto_pam4_link_speeds; |
---|
| 11111 | + } else { |
---|
| 11112 | + link_info->req_link_speed = link_info->force_link_speed; |
---|
| 11113 | + link_info->req_signal_mode = BNXT_SIG_MODE_NRZ; |
---|
| 11114 | + if (link_info->force_pam4_link_speed) { |
---|
| 11115 | + link_info->req_link_speed = |
---|
| 11116 | + link_info->force_pam4_link_speed; |
---|
| 11117 | + link_info->req_signal_mode = BNXT_SIG_MODE_PAM4; |
---|
| 11118 | + } |
---|
| 11119 | + link_info->req_duplex = link_info->duplex_setting; |
---|
| 11120 | + } |
---|
| 11121 | + if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) |
---|
| 11122 | + link_info->req_flow_ctrl = |
---|
| 11123 | + link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH; |
---|
| 11124 | + else |
---|
| 11125 | + link_info->req_flow_ctrl = link_info->force_pause_setting; |
---|
| 11126 | +} |
---|
7827 | 11127 | |
---|
7828 | 11128 | static void bnxt_sp_task(struct work_struct *work) |
---|
7829 | 11129 | { |
---|
.. | .. |
---|
7843 | 11143 | bnxt_cfg_ntp_filters(bp); |
---|
7844 | 11144 | if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event)) |
---|
7845 | 11145 | bnxt_hwrm_exec_fwd_req(bp); |
---|
7846 | | - if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) { |
---|
7847 | | - bnxt_hwrm_tunnel_dst_port_alloc( |
---|
7848 | | - bp, bp->vxlan_port, |
---|
7849 | | - TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); |
---|
7850 | | - } |
---|
7851 | | - if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) { |
---|
7852 | | - bnxt_hwrm_tunnel_dst_port_free( |
---|
7853 | | - bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); |
---|
7854 | | - } |
---|
7855 | | - if (test_and_clear_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event)) { |
---|
7856 | | - bnxt_hwrm_tunnel_dst_port_alloc( |
---|
7857 | | - bp, bp->nge_port, |
---|
7858 | | - TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); |
---|
7859 | | - } |
---|
7860 | | - if (test_and_clear_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event)) { |
---|
7861 | | - bnxt_hwrm_tunnel_dst_port_free( |
---|
7862 | | - bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); |
---|
7863 | | - } |
---|
7864 | 11146 | if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) { |
---|
7865 | | - bnxt_hwrm_port_qstats(bp); |
---|
7866 | | - bnxt_hwrm_port_qstats_ext(bp); |
---|
| 11147 | + bnxt_hwrm_port_qstats(bp, 0); |
---|
| 11148 | + bnxt_hwrm_port_qstats_ext(bp, 0); |
---|
| 11149 | + bnxt_accumulate_all_stats(bp); |
---|
7867 | 11150 | } |
---|
7868 | 11151 | |
---|
7869 | 11152 | if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) { |
---|
.. | .. |
---|
7875 | 11158 | bnxt_hwrm_phy_qcaps(bp); |
---|
7876 | 11159 | |
---|
7877 | 11160 | rc = bnxt_update_link(bp, true); |
---|
7878 | | - mutex_unlock(&bp->link_lock); |
---|
7879 | 11161 | if (rc) |
---|
7880 | 11162 | netdev_err(bp->dev, "SP task can't update link (rc: %x)\n", |
---|
7881 | 11163 | rc); |
---|
| 11164 | + |
---|
| 11165 | + if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, |
---|
| 11166 | + &bp->sp_event)) |
---|
| 11167 | + bnxt_init_ethtool_link_settings(bp); |
---|
| 11168 | + mutex_unlock(&bp->link_lock); |
---|
7882 | 11169 | } |
---|
7883 | 11170 | if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) { |
---|
7884 | 11171 | int rc; |
---|
.. | .. |
---|
7902 | 11189 | if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event)) |
---|
7903 | 11190 | bnxt_tc_flow_stats_work(bp); |
---|
7904 | 11191 | |
---|
| 11192 | + if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event)) |
---|
| 11193 | + bnxt_chk_missed_irq(bp); |
---|
| 11194 | + |
---|
7905 | 11195 | /* These functions below will clear BNXT_STATE_IN_SP_TASK. They |
---|
7906 | 11196 | * must be the last functions to be called before exiting. |
---|
7907 | 11197 | */ |
---|
.. | .. |
---|
7910 | 11200 | |
---|
7911 | 11201 | if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event)) |
---|
7912 | 11202 | bnxt_reset(bp, true); |
---|
| 11203 | + |
---|
| 11204 | + if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event)) |
---|
| 11205 | + bnxt_rx_ring_reset(bp); |
---|
| 11206 | + |
---|
| 11207 | + if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) |
---|
| 11208 | + bnxt_devlink_health_report(bp, BNXT_FW_RESET_NOTIFY_SP_EVENT); |
---|
| 11209 | + |
---|
| 11210 | + if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) { |
---|
| 11211 | + if (!is_bnxt_fw_ok(bp)) |
---|
| 11212 | + bnxt_devlink_health_report(bp, |
---|
| 11213 | + BNXT_FW_EXCEPTION_SP_EVENT); |
---|
| 11214 | + } |
---|
7913 | 11215 | |
---|
7914 | 11216 | smp_mb__before_atomic(); |
---|
7915 | 11217 | clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); |
---|
.. | .. |
---|
7920 | 11222 | int tx_xdp) |
---|
7921 | 11223 | { |
---|
7922 | 11224 | int max_rx, max_tx, tx_sets = 1; |
---|
7923 | | - int tx_rings_needed; |
---|
| 11225 | + int tx_rings_needed, stats; |
---|
7924 | 11226 | int rx_rings = rx; |
---|
7925 | 11227 | int cp, vnics, rc; |
---|
7926 | 11228 | |
---|
.. | .. |
---|
7939 | 11241 | return -ENOMEM; |
---|
7940 | 11242 | |
---|
7941 | 11243 | vnics = 1; |
---|
7942 | | - if (bp->flags & BNXT_FLAG_RFS) |
---|
| 11244 | + if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS) |
---|
7943 | 11245 | vnics += rx_rings; |
---|
7944 | 11246 | |
---|
7945 | 11247 | if (bp->flags & BNXT_FLAG_AGG_RINGS) |
---|
7946 | 11248 | rx_rings <<= 1; |
---|
7947 | 11249 | cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx; |
---|
7948 | | - if (BNXT_NEW_RM(bp)) |
---|
| 11250 | + stats = cp; |
---|
| 11251 | + if (BNXT_NEW_RM(bp)) { |
---|
7949 | 11252 | cp += bnxt_get_ulp_msix_num(bp); |
---|
| 11253 | + stats += bnxt_get_ulp_stat_ctxs(bp); |
---|
| 11254 | + } |
---|
7950 | 11255 | return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp, |
---|
7951 | | - vnics); |
---|
| 11256 | + stats, vnics); |
---|
7952 | 11257 | } |
---|
7953 | 11258 | |
---|
7954 | 11259 | static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev) |
---|
.. | .. |
---|
7973 | 11278 | { |
---|
7974 | 11279 | bnxt_unmap_bars(bp, bp->pdev); |
---|
7975 | 11280 | pci_release_regions(bp->pdev); |
---|
7976 | | - pci_disable_device(bp->pdev); |
---|
| 11281 | + if (pci_is_enabled(bp->pdev)) |
---|
| 11282 | + pci_disable_device(bp->pdev); |
---|
7977 | 11283 | } |
---|
7978 | 11284 | |
---|
7979 | 11285 | static void bnxt_init_dflt_coal(struct bnxt *bp) |
---|
.. | .. |
---|
7984 | 11290 | * 1 coal_buf x bufs_per_record = 1 completion record. |
---|
7985 | 11291 | */ |
---|
7986 | 11292 | coal = &bp->rx_coal; |
---|
7987 | | - coal->coal_ticks = 14; |
---|
| 11293 | + coal->coal_ticks = 10; |
---|
7988 | 11294 | coal->coal_bufs = 30; |
---|
7989 | 11295 | coal->coal_ticks_irq = 1; |
---|
7990 | 11296 | coal->coal_bufs_irq = 2; |
---|
.. | .. |
---|
8000 | 11306 | coal->bufs_per_record = 1; |
---|
8001 | 11307 | |
---|
8002 | 11308 | bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS; |
---|
| 11309 | +} |
---|
| 11310 | + |
---|
| 11311 | +static int bnxt_fw_reset_via_optee(struct bnxt *bp) |
---|
| 11312 | +{ |
---|
| 11313 | +#ifdef CONFIG_TEE_BNXT_FW |
---|
| 11314 | + int rc = tee_bnxt_fw_load(); |
---|
| 11315 | + |
---|
| 11316 | + if (rc) |
---|
| 11317 | + netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc); |
---|
| 11318 | + |
---|
| 11319 | + return rc; |
---|
| 11320 | +#else |
---|
| 11321 | + netdev_err(bp->dev, "OP-TEE not supported\n"); |
---|
| 11322 | + return -ENODEV; |
---|
| 11323 | +#endif |
---|
| 11324 | +} |
---|
| 11325 | + |
---|
| 11326 | +static int bnxt_fw_init_one_p1(struct bnxt *bp) |
---|
| 11327 | +{ |
---|
| 11328 | + int rc; |
---|
| 11329 | + |
---|
| 11330 | + bp->fw_cap = 0; |
---|
| 11331 | + rc = bnxt_hwrm_ver_get(bp); |
---|
| 11332 | + bnxt_try_map_fw_health_reg(bp); |
---|
| 11333 | + if (rc) { |
---|
| 11334 | + if (bp->fw_health && bp->fw_health->status_reliable) { |
---|
| 11335 | + u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); |
---|
| 11336 | + |
---|
| 11337 | + netdev_err(bp->dev, |
---|
| 11338 | + "Firmware not responding, status: 0x%x\n", |
---|
| 11339 | + sts); |
---|
| 11340 | + if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) { |
---|
| 11341 | + netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n"); |
---|
| 11342 | + rc = bnxt_fw_reset_via_optee(bp); |
---|
| 11343 | + if (!rc) |
---|
| 11344 | + rc = bnxt_hwrm_ver_get(bp); |
---|
| 11345 | + } |
---|
| 11346 | + } |
---|
| 11347 | + if (rc) |
---|
| 11348 | + return rc; |
---|
| 11349 | + } |
---|
| 11350 | + |
---|
| 11351 | + if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) { |
---|
| 11352 | + rc = bnxt_alloc_kong_hwrm_resources(bp); |
---|
| 11353 | + if (rc) |
---|
| 11354 | + bp->fw_cap &= ~BNXT_FW_CAP_KONG_MB_CHNL; |
---|
| 11355 | + } |
---|
| 11356 | + |
---|
| 11357 | + if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) || |
---|
| 11358 | + bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) { |
---|
| 11359 | + rc = bnxt_alloc_hwrm_short_cmd_req(bp); |
---|
| 11360 | + if (rc) |
---|
| 11361 | + return rc; |
---|
| 11362 | + } |
---|
| 11363 | + bnxt_nvm_cfg_ver_get(bp); |
---|
| 11364 | + |
---|
| 11365 | + rc = bnxt_hwrm_func_reset(bp); |
---|
| 11366 | + if (rc) |
---|
| 11367 | + return -ENODEV; |
---|
| 11368 | + |
---|
| 11369 | + bnxt_hwrm_fw_set_time(bp); |
---|
| 11370 | + return 0; |
---|
| 11371 | +} |
---|
| 11372 | + |
---|
| 11373 | +static int bnxt_fw_init_one_p2(struct bnxt *bp) |
---|
| 11374 | +{ |
---|
| 11375 | + int rc; |
---|
| 11376 | + |
---|
| 11377 | + /* Get the MAX capabilities for this function */ |
---|
| 11378 | + rc = bnxt_hwrm_func_qcaps(bp); |
---|
| 11379 | + if (rc) { |
---|
| 11380 | + netdev_err(bp->dev, "hwrm query capability failure rc: %x\n", |
---|
| 11381 | + rc); |
---|
| 11382 | + return -ENODEV; |
---|
| 11383 | + } |
---|
| 11384 | + |
---|
| 11385 | + rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp); |
---|
| 11386 | + if (rc) |
---|
| 11387 | + netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n", |
---|
| 11388 | + rc); |
---|
| 11389 | + |
---|
| 11390 | + if (bnxt_alloc_fw_health(bp)) { |
---|
| 11391 | + netdev_warn(bp->dev, "no memory for firmware error recovery\n"); |
---|
| 11392 | + } else { |
---|
| 11393 | + rc = bnxt_hwrm_error_recovery_qcfg(bp); |
---|
| 11394 | + if (rc) |
---|
| 11395 | + netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n", |
---|
| 11396 | + rc); |
---|
| 11397 | + } |
---|
| 11398 | + |
---|
| 11399 | + rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false); |
---|
| 11400 | + if (rc) |
---|
| 11401 | + return -ENODEV; |
---|
| 11402 | + |
---|
| 11403 | + bnxt_hwrm_func_qcfg(bp); |
---|
| 11404 | + bnxt_hwrm_vnic_qcaps(bp); |
---|
| 11405 | + bnxt_hwrm_port_led_qcaps(bp); |
---|
| 11406 | + bnxt_ethtool_init(bp); |
---|
| 11407 | + bnxt_dcb_init(bp); |
---|
| 11408 | + return 0; |
---|
| 11409 | +} |
---|
| 11410 | + |
---|
| 11411 | +static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp) |
---|
| 11412 | +{ |
---|
| 11413 | + bp->flags &= ~BNXT_FLAG_UDP_RSS_CAP; |
---|
| 11414 | + bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 | |
---|
| 11415 | + VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 | |
---|
| 11416 | + VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 | |
---|
| 11417 | + VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6; |
---|
| 11418 | + if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) { |
---|
| 11419 | + bp->flags |= BNXT_FLAG_UDP_RSS_CAP; |
---|
| 11420 | + bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 | |
---|
| 11421 | + VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; |
---|
| 11422 | + } |
---|
| 11423 | +} |
---|
| 11424 | + |
---|
| 11425 | +static void bnxt_set_dflt_rfs(struct bnxt *bp) |
---|
| 11426 | +{ |
---|
| 11427 | + struct net_device *dev = bp->dev; |
---|
| 11428 | + |
---|
| 11429 | + dev->hw_features &= ~NETIF_F_NTUPLE; |
---|
| 11430 | + dev->features &= ~NETIF_F_NTUPLE; |
---|
| 11431 | + bp->flags &= ~BNXT_FLAG_RFS; |
---|
| 11432 | + if (bnxt_rfs_supported(bp)) { |
---|
| 11433 | + dev->hw_features |= NETIF_F_NTUPLE; |
---|
| 11434 | + if (bnxt_rfs_capable(bp)) { |
---|
| 11435 | + bp->flags |= BNXT_FLAG_RFS; |
---|
| 11436 | + dev->features |= NETIF_F_NTUPLE; |
---|
| 11437 | + } |
---|
| 11438 | + } |
---|
| 11439 | +} |
---|
| 11440 | + |
---|
| 11441 | +static void bnxt_fw_init_one_p3(struct bnxt *bp) |
---|
| 11442 | +{ |
---|
| 11443 | + struct pci_dev *pdev = bp->pdev; |
---|
| 11444 | + |
---|
| 11445 | + bnxt_set_dflt_rss_hash_type(bp); |
---|
| 11446 | + bnxt_set_dflt_rfs(bp); |
---|
| 11447 | + |
---|
| 11448 | + bnxt_get_wol_settings(bp); |
---|
| 11449 | + if (bp->flags & BNXT_FLAG_WOL_CAP) |
---|
| 11450 | + device_set_wakeup_enable(&pdev->dev, bp->wol); |
---|
| 11451 | + else |
---|
| 11452 | + device_set_wakeup_capable(&pdev->dev, false); |
---|
| 11453 | + |
---|
| 11454 | + bnxt_hwrm_set_cache_line_size(bp, cache_line_size()); |
---|
| 11455 | + bnxt_hwrm_coal_params_qcaps(bp); |
---|
| 11456 | +} |
---|
| 11457 | + |
---|
| 11458 | +static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt); |
---|
| 11459 | + |
---|
| 11460 | +static int bnxt_fw_init_one(struct bnxt *bp) |
---|
| 11461 | +{ |
---|
| 11462 | + int rc; |
---|
| 11463 | + |
---|
| 11464 | + rc = bnxt_fw_init_one_p1(bp); |
---|
| 11465 | + if (rc) { |
---|
| 11466 | + netdev_err(bp->dev, "Firmware init phase 1 failed\n"); |
---|
| 11467 | + return rc; |
---|
| 11468 | + } |
---|
| 11469 | + rc = bnxt_fw_init_one_p2(bp); |
---|
| 11470 | + if (rc) { |
---|
| 11471 | + netdev_err(bp->dev, "Firmware init phase 2 failed\n"); |
---|
| 11472 | + return rc; |
---|
| 11473 | + } |
---|
| 11474 | + rc = bnxt_probe_phy(bp, false); |
---|
| 11475 | + if (rc) |
---|
| 11476 | + return rc; |
---|
| 11477 | + rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false); |
---|
| 11478 | + if (rc) |
---|
| 11479 | + return rc; |
---|
| 11480 | + |
---|
| 11481 | + /* In case fw capabilities have changed, destroy the unneeded |
---|
| 11482 | + * reporters and create newly capable ones. |
---|
| 11483 | + */ |
---|
| 11484 | + bnxt_dl_fw_reporters_destroy(bp, false); |
---|
| 11485 | + bnxt_dl_fw_reporters_create(bp); |
---|
| 11486 | + bnxt_fw_init_one_p3(bp); |
---|
| 11487 | + return 0; |
---|
| 11488 | +} |
---|
| 11489 | + |
---|
| 11490 | +static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx) |
---|
| 11491 | +{ |
---|
| 11492 | + struct bnxt_fw_health *fw_health = bp->fw_health; |
---|
| 11493 | + u32 reg = fw_health->fw_reset_seq_regs[reg_idx]; |
---|
| 11494 | + u32 val = fw_health->fw_reset_seq_vals[reg_idx]; |
---|
| 11495 | + u32 reg_type, reg_off, delay_msecs; |
---|
| 11496 | + |
---|
| 11497 | + delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx]; |
---|
| 11498 | + reg_type = BNXT_FW_HEALTH_REG_TYPE(reg); |
---|
| 11499 | + reg_off = BNXT_FW_HEALTH_REG_OFF(reg); |
---|
| 11500 | + switch (reg_type) { |
---|
| 11501 | + case BNXT_FW_HEALTH_REG_TYPE_CFG: |
---|
| 11502 | + pci_write_config_dword(bp->pdev, reg_off, val); |
---|
| 11503 | + break; |
---|
| 11504 | + case BNXT_FW_HEALTH_REG_TYPE_GRC: |
---|
| 11505 | + writel(reg_off & BNXT_GRC_BASE_MASK, |
---|
| 11506 | + bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); |
---|
| 11507 | + reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000; |
---|
| 11508 | + fallthrough; |
---|
| 11509 | + case BNXT_FW_HEALTH_REG_TYPE_BAR0: |
---|
| 11510 | + writel(val, bp->bar0 + reg_off); |
---|
| 11511 | + break; |
---|
| 11512 | + case BNXT_FW_HEALTH_REG_TYPE_BAR1: |
---|
| 11513 | + writel(val, bp->bar1 + reg_off); |
---|
| 11514 | + break; |
---|
| 11515 | + } |
---|
| 11516 | + if (delay_msecs) { |
---|
| 11517 | + pci_read_config_dword(bp->pdev, 0, &val); |
---|
| 11518 | + msleep(delay_msecs); |
---|
| 11519 | + } |
---|
| 11520 | +} |
---|
| 11521 | + |
---|
| 11522 | +static void bnxt_reset_all(struct bnxt *bp) |
---|
| 11523 | +{ |
---|
| 11524 | + struct bnxt_fw_health *fw_health = bp->fw_health; |
---|
| 11525 | + int i, rc; |
---|
| 11526 | + |
---|
| 11527 | + if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { |
---|
| 11528 | + bnxt_fw_reset_via_optee(bp); |
---|
| 11529 | + bp->fw_reset_timestamp = jiffies; |
---|
| 11530 | + return; |
---|
| 11531 | + } |
---|
| 11532 | + |
---|
| 11533 | + if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) { |
---|
| 11534 | + for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) |
---|
| 11535 | + bnxt_fw_reset_writel(bp, i); |
---|
| 11536 | + } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) { |
---|
| 11537 | + struct hwrm_fw_reset_input req = {0}; |
---|
| 11538 | + |
---|
| 11539 | + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1); |
---|
| 11540 | + req.resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr); |
---|
| 11541 | + req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP; |
---|
| 11542 | + req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP; |
---|
| 11543 | + req.flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL; |
---|
| 11544 | + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
---|
| 11545 | + if (rc) |
---|
| 11546 | + netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc); |
---|
| 11547 | + } |
---|
| 11548 | + bp->fw_reset_timestamp = jiffies; |
---|
| 11549 | +} |
---|
| 11550 | + |
---|
| 11551 | +static void bnxt_fw_reset_task(struct work_struct *work) |
---|
| 11552 | +{ |
---|
| 11553 | + struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work); |
---|
| 11554 | + int rc; |
---|
| 11555 | + |
---|
| 11556 | + if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { |
---|
| 11557 | + netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n"); |
---|
| 11558 | + return; |
---|
| 11559 | + } |
---|
| 11560 | + |
---|
| 11561 | + switch (bp->fw_reset_state) { |
---|
| 11562 | + case BNXT_FW_RESET_STATE_POLL_VF: { |
---|
| 11563 | + int n = bnxt_get_registered_vfs(bp); |
---|
| 11564 | + int tmo; |
---|
| 11565 | + |
---|
| 11566 | + if (n < 0) { |
---|
| 11567 | + netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n", |
---|
| 11568 | + n, jiffies_to_msecs(jiffies - |
---|
| 11569 | + bp->fw_reset_timestamp)); |
---|
| 11570 | + goto fw_reset_abort; |
---|
| 11571 | + } else if (n > 0) { |
---|
| 11572 | + if (time_after(jiffies, bp->fw_reset_timestamp + |
---|
| 11573 | + (bp->fw_reset_max_dsecs * HZ / 10))) { |
---|
| 11574 | + clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); |
---|
| 11575 | + bp->fw_reset_state = 0; |
---|
| 11576 | + netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n", |
---|
| 11577 | + n); |
---|
| 11578 | + return; |
---|
| 11579 | + } |
---|
| 11580 | + bnxt_queue_fw_reset_work(bp, HZ / 10); |
---|
| 11581 | + return; |
---|
| 11582 | + } |
---|
| 11583 | + bp->fw_reset_timestamp = jiffies; |
---|
| 11584 | + rtnl_lock(); |
---|
| 11585 | + if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { |
---|
| 11586 | + rtnl_unlock(); |
---|
| 11587 | + goto fw_reset_abort; |
---|
| 11588 | + } |
---|
| 11589 | + bnxt_fw_reset_close(bp); |
---|
| 11590 | + if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { |
---|
| 11591 | + bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN; |
---|
| 11592 | + tmo = HZ / 10; |
---|
| 11593 | + } else { |
---|
| 11594 | + bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; |
---|
| 11595 | + tmo = bp->fw_reset_min_dsecs * HZ / 10; |
---|
| 11596 | + } |
---|
| 11597 | + rtnl_unlock(); |
---|
| 11598 | + bnxt_queue_fw_reset_work(bp, tmo); |
---|
| 11599 | + return; |
---|
| 11600 | + } |
---|
| 11601 | + case BNXT_FW_RESET_STATE_POLL_FW_DOWN: { |
---|
| 11602 | + u32 val; |
---|
| 11603 | + |
---|
| 11604 | + val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); |
---|
| 11605 | + if (!(val & BNXT_FW_STATUS_SHUTDOWN) && |
---|
| 11606 | + !time_after(jiffies, bp->fw_reset_timestamp + |
---|
| 11607 | + (bp->fw_reset_max_dsecs * HZ / 10))) { |
---|
| 11608 | + bnxt_queue_fw_reset_work(bp, HZ / 5); |
---|
| 11609 | + return; |
---|
| 11610 | + } |
---|
| 11611 | + |
---|
| 11612 | + if (!bp->fw_health->master) { |
---|
| 11613 | + u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs; |
---|
| 11614 | + |
---|
| 11615 | + bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; |
---|
| 11616 | + bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10); |
---|
| 11617 | + return; |
---|
| 11618 | + } |
---|
| 11619 | + bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW; |
---|
| 11620 | + } |
---|
| 11621 | + fallthrough; |
---|
| 11622 | + case BNXT_FW_RESET_STATE_RESET_FW: |
---|
| 11623 | + bnxt_reset_all(bp); |
---|
| 11624 | + bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; |
---|
| 11625 | + bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10); |
---|
| 11626 | + return; |
---|
| 11627 | + case BNXT_FW_RESET_STATE_ENABLE_DEV: |
---|
| 11628 | + if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) { |
---|
| 11629 | + u32 val; |
---|
| 11630 | + |
---|
| 11631 | + val = bnxt_fw_health_readl(bp, |
---|
| 11632 | + BNXT_FW_RESET_INPROG_REG); |
---|
| 11633 | + if (val) |
---|
| 11634 | + netdev_warn(bp->dev, "FW reset inprog %x after min wait time.\n", |
---|
| 11635 | + val); |
---|
| 11636 | + } |
---|
| 11637 | + clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); |
---|
| 11638 | + if (pci_enable_device(bp->pdev)) { |
---|
| 11639 | + netdev_err(bp->dev, "Cannot re-enable PCI device\n"); |
---|
| 11640 | + goto fw_reset_abort; |
---|
| 11641 | + } |
---|
| 11642 | + pci_set_master(bp->pdev); |
---|
| 11643 | + bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW; |
---|
| 11644 | + fallthrough; |
---|
| 11645 | + case BNXT_FW_RESET_STATE_POLL_FW: |
---|
| 11646 | + bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT; |
---|
| 11647 | + rc = __bnxt_hwrm_ver_get(bp, true); |
---|
| 11648 | + if (rc) { |
---|
| 11649 | + if (time_after(jiffies, bp->fw_reset_timestamp + |
---|
| 11650 | + (bp->fw_reset_max_dsecs * HZ / 10))) { |
---|
| 11651 | + netdev_err(bp->dev, "Firmware reset aborted\n"); |
---|
| 11652 | + goto fw_reset_abort_status; |
---|
| 11653 | + } |
---|
| 11654 | + bnxt_queue_fw_reset_work(bp, HZ / 5); |
---|
| 11655 | + return; |
---|
| 11656 | + } |
---|
| 11657 | + bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; |
---|
| 11658 | + bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING; |
---|
| 11659 | + fallthrough; |
---|
| 11660 | + case BNXT_FW_RESET_STATE_OPENING: |
---|
| 11661 | + while (!rtnl_trylock()) { |
---|
| 11662 | + bnxt_queue_fw_reset_work(bp, HZ / 10); |
---|
| 11663 | + return; |
---|
| 11664 | + } |
---|
| 11665 | + rc = bnxt_open(bp->dev); |
---|
| 11666 | + if (rc) { |
---|
| 11667 | + netdev_err(bp->dev, "bnxt_open_nic() failed\n"); |
---|
| 11668 | + clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); |
---|
| 11669 | + dev_close(bp->dev); |
---|
| 11670 | + } |
---|
| 11671 | + |
---|
| 11672 | + if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) && |
---|
| 11673 | + bp->fw_health->enabled) { |
---|
| 11674 | + bp->fw_health->last_fw_reset_cnt = |
---|
| 11675 | + bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); |
---|
| 11676 | + } |
---|
| 11677 | + bp->fw_reset_state = 0; |
---|
| 11678 | + /* Make sure fw_reset_state is 0 before clearing the flag */ |
---|
| 11679 | + smp_mb__before_atomic(); |
---|
| 11680 | + clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); |
---|
| 11681 | + bnxt_ulp_start(bp, rc); |
---|
| 11682 | + if (!rc) |
---|
| 11683 | + bnxt_reenable_sriov(bp); |
---|
| 11684 | + bnxt_dl_health_recovery_done(bp); |
---|
| 11685 | + bnxt_dl_health_status_update(bp, true); |
---|
| 11686 | + rtnl_unlock(); |
---|
| 11687 | + break; |
---|
| 11688 | + } |
---|
| 11689 | + return; |
---|
| 11690 | + |
---|
| 11691 | +fw_reset_abort_status: |
---|
| 11692 | + if (bp->fw_health->status_reliable || |
---|
| 11693 | + (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) { |
---|
| 11694 | + u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); |
---|
| 11695 | + |
---|
| 11696 | + netdev_err(bp->dev, "fw_health_status 0x%x\n", sts); |
---|
| 11697 | + } |
---|
| 11698 | +fw_reset_abort: |
---|
| 11699 | + clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); |
---|
| 11700 | + if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) |
---|
| 11701 | + bnxt_dl_health_status_update(bp, false); |
---|
| 11702 | + bp->fw_reset_state = 0; |
---|
| 11703 | + rtnl_lock(); |
---|
| 11704 | + dev_close(bp->dev); |
---|
| 11705 | + rtnl_unlock(); |
---|
8003 | 11706 | } |
---|
8004 | 11707 | |
---|
8005 | 11708 | static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) |
---|
.. | .. |
---|
8041 | 11744 | bp->dev = dev; |
---|
8042 | 11745 | bp->pdev = pdev; |
---|
8043 | 11746 | |
---|
| 11747 | + /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2() |
---|
| 11748 | + * determines the BAR size. |
---|
| 11749 | + */ |
---|
8044 | 11750 | bp->bar0 = pci_ioremap_bar(pdev, 0); |
---|
8045 | 11751 | if (!bp->bar0) { |
---|
8046 | 11752 | dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); |
---|
8047 | | - rc = -ENOMEM; |
---|
8048 | | - goto init_err_release; |
---|
8049 | | - } |
---|
8050 | | - |
---|
8051 | | - bp->bar1 = pci_ioremap_bar(pdev, 2); |
---|
8052 | | - if (!bp->bar1) { |
---|
8053 | | - dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n"); |
---|
8054 | 11753 | rc = -ENOMEM; |
---|
8055 | 11754 | goto init_err_release; |
---|
8056 | 11755 | } |
---|
.. | .. |
---|
8065 | 11764 | pci_enable_pcie_error_reporting(pdev); |
---|
8066 | 11765 | |
---|
8067 | 11766 | INIT_WORK(&bp->sp_task, bnxt_sp_task); |
---|
| 11767 | + INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task); |
---|
8068 | 11768 | |
---|
8069 | 11769 | spin_lock_init(&bp->ntp_fltr_lock); |
---|
| 11770 | +#if BITS_PER_LONG == 32 |
---|
| 11771 | + spin_lock_init(&bp->db_lock); |
---|
| 11772 | +#endif |
---|
8070 | 11773 | |
---|
8071 | 11774 | bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE; |
---|
8072 | 11775 | bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE; |
---|
.. | .. |
---|
8075 | 11778 | |
---|
8076 | 11779 | timer_setup(&bp->timer, bnxt_timer, 0); |
---|
8077 | 11780 | bp->current_interval = BNXT_TIMER_INTERVAL; |
---|
| 11781 | + |
---|
| 11782 | + bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID; |
---|
| 11783 | + bp->nge_fw_dst_port_id = INVALID_HW_RING_ID; |
---|
8078 | 11784 | |
---|
8079 | 11785 | clear_bit(BNXT_STATE_OPEN, &bp->state); |
---|
8080 | 11786 | return 0; |
---|
.. | .. |
---|
8170 | 11876 | bp->tx_nr_rings += bp->tx_nr_rings_xdp; |
---|
8171 | 11877 | bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : |
---|
8172 | 11878 | bp->tx_nr_rings + bp->rx_nr_rings; |
---|
8173 | | - bp->num_stat_ctxs = bp->cp_nr_rings; |
---|
8174 | 11879 | |
---|
8175 | 11880 | if (netif_running(bp->dev)) |
---|
8176 | 11881 | return bnxt_open_nic(bp, true, false); |
---|
.. | .. |
---|
8195 | 11900 | } |
---|
8196 | 11901 | } |
---|
8197 | 11902 | |
---|
8198 | | -static int bnxt_setup_tc_block(struct net_device *dev, |
---|
8199 | | - struct tc_block_offload *f) |
---|
8200 | | -{ |
---|
8201 | | - struct bnxt *bp = netdev_priv(dev); |
---|
8202 | | - |
---|
8203 | | - if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) |
---|
8204 | | - return -EOPNOTSUPP; |
---|
8205 | | - |
---|
8206 | | - switch (f->command) { |
---|
8207 | | - case TC_BLOCK_BIND: |
---|
8208 | | - return tcf_block_cb_register(f->block, bnxt_setup_tc_block_cb, |
---|
8209 | | - bp, bp, f->extack); |
---|
8210 | | - case TC_BLOCK_UNBIND: |
---|
8211 | | - tcf_block_cb_unregister(f->block, bnxt_setup_tc_block_cb, bp); |
---|
8212 | | - return 0; |
---|
8213 | | - default: |
---|
8214 | | - return -EOPNOTSUPP; |
---|
8215 | | - } |
---|
8216 | | -} |
---|
| 11903 | +LIST_HEAD(bnxt_block_cb_list); |
---|
8217 | 11904 | |
---|
8218 | 11905 | static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type, |
---|
8219 | 11906 | void *type_data) |
---|
8220 | 11907 | { |
---|
| 11908 | + struct bnxt *bp = netdev_priv(dev); |
---|
| 11909 | + |
---|
8221 | 11910 | switch (type) { |
---|
8222 | 11911 | case TC_SETUP_BLOCK: |
---|
8223 | | - return bnxt_setup_tc_block(dev, type_data); |
---|
| 11912 | + return flow_block_cb_setup_simple(type_data, |
---|
| 11913 | + &bnxt_block_cb_list, |
---|
| 11914 | + bnxt_setup_tc_block_cb, |
---|
| 11915 | + bp, bp, true); |
---|
8224 | 11916 | case TC_SETUP_QDISC_MQPRIO: { |
---|
8225 | 11917 | struct tc_mqprio_qopt *mqprio = type_data; |
---|
8226 | 11918 | |
---|
.. | .. |
---|
8274 | 11966 | struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb); |
---|
8275 | 11967 | int rc = 0, idx, bit_id, l2_idx = 0; |
---|
8276 | 11968 | struct hlist_head *head; |
---|
| 11969 | + u32 flags; |
---|
8277 | 11970 | |
---|
8278 | 11971 | if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) { |
---|
8279 | 11972 | struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; |
---|
.. | .. |
---|
8313 | 12006 | rc = -EPROTONOSUPPORT; |
---|
8314 | 12007 | goto err_free; |
---|
8315 | 12008 | } |
---|
8316 | | - if ((fkeys->control.flags & FLOW_DIS_ENCAPSULATION) && |
---|
8317 | | - bp->hwrm_spec_code < 0x10601) { |
---|
| 12009 | + flags = fkeys->control.flags; |
---|
| 12010 | + if (((flags & FLOW_DIS_ENCAPSULATION) && |
---|
| 12011 | + bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) { |
---|
8318 | 12012 | rc = -EPROTONOSUPPORT; |
---|
8319 | 12013 | goto err_free; |
---|
8320 | 12014 | } |
---|
.. | .. |
---|
8327 | 12021 | rcu_read_lock(); |
---|
8328 | 12022 | hlist_for_each_entry_rcu(fltr, head, hash) { |
---|
8329 | 12023 | if (bnxt_fltr_match(fltr, new_fltr)) { |
---|
| 12024 | + rc = fltr->sw_id; |
---|
8330 | 12025 | rcu_read_unlock(); |
---|
8331 | | - rc = 0; |
---|
8332 | 12026 | goto err_free; |
---|
8333 | 12027 | } |
---|
8334 | 12028 | } |
---|
.. | .. |
---|
8404 | 12098 | } |
---|
8405 | 12099 | } |
---|
8406 | 12100 | if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event)) |
---|
8407 | | - netdev_info(bp->dev, "Receive PF driver unload event!"); |
---|
| 12101 | + netdev_info(bp->dev, "Receive PF driver unload event!\n"); |
---|
8408 | 12102 | } |
---|
8409 | 12103 | |
---|
8410 | 12104 | #else |
---|
.. | .. |
---|
8415 | 12109 | |
---|
8416 | 12110 | #endif /* CONFIG_RFS_ACCEL */ |
---|
8417 | 12111 | |
---|
8418 | | -static void bnxt_udp_tunnel_add(struct net_device *dev, |
---|
8419 | | - struct udp_tunnel_info *ti) |
---|
| 12112 | +static int bnxt_udp_tunnel_set_port(struct net_device *netdev, unsigned int table, |
---|
| 12113 | + unsigned int entry, struct udp_tunnel_info *ti) |
---|
8420 | 12114 | { |
---|
8421 | | - struct bnxt *bp = netdev_priv(dev); |
---|
| 12115 | + struct bnxt *bp = netdev_priv(netdev); |
---|
| 12116 | + unsigned int cmd; |
---|
8422 | 12117 | |
---|
8423 | | - if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET) |
---|
8424 | | - return; |
---|
| 12118 | + if (ti->type == UDP_TUNNEL_TYPE_VXLAN) |
---|
| 12119 | + cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN; |
---|
| 12120 | + else |
---|
| 12121 | + cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE; |
---|
8425 | 12122 | |
---|
8426 | | - if (!netif_running(dev)) |
---|
8427 | | - return; |
---|
8428 | | - |
---|
8429 | | - switch (ti->type) { |
---|
8430 | | - case UDP_TUNNEL_TYPE_VXLAN: |
---|
8431 | | - if (bp->vxlan_port_cnt && bp->vxlan_port != ti->port) |
---|
8432 | | - return; |
---|
8433 | | - |
---|
8434 | | - bp->vxlan_port_cnt++; |
---|
8435 | | - if (bp->vxlan_port_cnt == 1) { |
---|
8436 | | - bp->vxlan_port = ti->port; |
---|
8437 | | - set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event); |
---|
8438 | | - bnxt_queue_sp_work(bp); |
---|
8439 | | - } |
---|
8440 | | - break; |
---|
8441 | | - case UDP_TUNNEL_TYPE_GENEVE: |
---|
8442 | | - if (bp->nge_port_cnt && bp->nge_port != ti->port) |
---|
8443 | | - return; |
---|
8444 | | - |
---|
8445 | | - bp->nge_port_cnt++; |
---|
8446 | | - if (bp->nge_port_cnt == 1) { |
---|
8447 | | - bp->nge_port = ti->port; |
---|
8448 | | - set_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event); |
---|
8449 | | - } |
---|
8450 | | - break; |
---|
8451 | | - default: |
---|
8452 | | - return; |
---|
8453 | | - } |
---|
8454 | | - |
---|
8455 | | - bnxt_queue_sp_work(bp); |
---|
| 12123 | + return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti->port, cmd); |
---|
8456 | 12124 | } |
---|
8457 | 12125 | |
---|
8458 | | -static void bnxt_udp_tunnel_del(struct net_device *dev, |
---|
8459 | | - struct udp_tunnel_info *ti) |
---|
| 12126 | +static int bnxt_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table, |
---|
| 12127 | + unsigned int entry, struct udp_tunnel_info *ti) |
---|
8460 | 12128 | { |
---|
8461 | | - struct bnxt *bp = netdev_priv(dev); |
---|
| 12129 | + struct bnxt *bp = netdev_priv(netdev); |
---|
| 12130 | + unsigned int cmd; |
---|
8462 | 12131 | |
---|
8463 | | - if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET) |
---|
8464 | | - return; |
---|
| 12132 | + if (ti->type == UDP_TUNNEL_TYPE_VXLAN) |
---|
| 12133 | + cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN; |
---|
| 12134 | + else |
---|
| 12135 | + cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE; |
---|
8465 | 12136 | |
---|
8466 | | - if (!netif_running(dev)) |
---|
8467 | | - return; |
---|
8468 | | - |
---|
8469 | | - switch (ti->type) { |
---|
8470 | | - case UDP_TUNNEL_TYPE_VXLAN: |
---|
8471 | | - if (!bp->vxlan_port_cnt || bp->vxlan_port != ti->port) |
---|
8472 | | - return; |
---|
8473 | | - bp->vxlan_port_cnt--; |
---|
8474 | | - |
---|
8475 | | - if (bp->vxlan_port_cnt != 0) |
---|
8476 | | - return; |
---|
8477 | | - |
---|
8478 | | - set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event); |
---|
8479 | | - break; |
---|
8480 | | - case UDP_TUNNEL_TYPE_GENEVE: |
---|
8481 | | - if (!bp->nge_port_cnt || bp->nge_port != ti->port) |
---|
8482 | | - return; |
---|
8483 | | - bp->nge_port_cnt--; |
---|
8484 | | - |
---|
8485 | | - if (bp->nge_port_cnt != 0) |
---|
8486 | | - return; |
---|
8487 | | - |
---|
8488 | | - set_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event); |
---|
8489 | | - break; |
---|
8490 | | - default: |
---|
8491 | | - return; |
---|
8492 | | - } |
---|
8493 | | - |
---|
8494 | | - bnxt_queue_sp_work(bp); |
---|
| 12137 | + return bnxt_hwrm_tunnel_dst_port_free(bp, cmd); |
---|
8495 | 12138 | } |
---|
| 12139 | + |
---|
| 12140 | +static const struct udp_tunnel_nic_info bnxt_udp_tunnels = { |
---|
| 12141 | + .set_port = bnxt_udp_tunnel_set_port, |
---|
| 12142 | + .unset_port = bnxt_udp_tunnel_unset_port, |
---|
| 12143 | + .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP | |
---|
| 12144 | + UDP_TUNNEL_NIC_INFO_OPEN_ONLY, |
---|
| 12145 | + .tables = { |
---|
| 12146 | + { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, |
---|
| 12147 | + { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, }, |
---|
| 12148 | + }, |
---|
| 12149 | +}; |
---|
8496 | 12150 | |
---|
8497 | 12151 | static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, |
---|
8498 | 12152 | struct net_device *dev, u32 filter_mask, |
---|
.. | .. |
---|
8505 | 12159 | } |
---|
8506 | 12160 | |
---|
8507 | 12161 | static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, |
---|
8508 | | - u16 flags) |
---|
| 12162 | + u16 flags, struct netlink_ext_ack *extack) |
---|
8509 | 12163 | { |
---|
8510 | 12164 | struct bnxt *bp = netdev_priv(dev); |
---|
8511 | 12165 | struct nlattr *attr, *br_spec; |
---|
.. | .. |
---|
8539 | 12193 | return rc; |
---|
8540 | 12194 | } |
---|
8541 | 12195 | |
---|
8542 | | -static int bnxt_get_phys_port_name(struct net_device *dev, char *buf, |
---|
8543 | | - size_t len) |
---|
| 12196 | +int bnxt_get_port_parent_id(struct net_device *dev, |
---|
| 12197 | + struct netdev_phys_item_id *ppid) |
---|
8544 | 12198 | { |
---|
8545 | 12199 | struct bnxt *bp = netdev_priv(dev); |
---|
8546 | | - int rc; |
---|
8547 | 12200 | |
---|
8548 | | - /* The PF and it's VF-reps only support the switchdev framework */ |
---|
8549 | | - if (!BNXT_PF(bp)) |
---|
8550 | | - return -EOPNOTSUPP; |
---|
8551 | | - |
---|
8552 | | - rc = snprintf(buf, len, "p%d", bp->pf.port_id); |
---|
8553 | | - |
---|
8554 | | - if (rc >= len) |
---|
8555 | | - return -EOPNOTSUPP; |
---|
8556 | | - return 0; |
---|
8557 | | -} |
---|
8558 | | - |
---|
8559 | | -int bnxt_port_attr_get(struct bnxt *bp, struct switchdev_attr *attr) |
---|
8560 | | -{ |
---|
8561 | 12201 | if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) |
---|
8562 | 12202 | return -EOPNOTSUPP; |
---|
8563 | 12203 | |
---|
8564 | 12204 | /* The PF and it's VF-reps only support the switchdev framework */ |
---|
8565 | | - if (!BNXT_PF(bp)) |
---|
| 12205 | + if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID)) |
---|
8566 | 12206 | return -EOPNOTSUPP; |
---|
8567 | 12207 | |
---|
8568 | | - switch (attr->id) { |
---|
8569 | | - case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: |
---|
8570 | | - attr->u.ppid.id_len = sizeof(bp->switch_id); |
---|
8571 | | - memcpy(attr->u.ppid.id, bp->switch_id, attr->u.ppid.id_len); |
---|
8572 | | - break; |
---|
8573 | | - default: |
---|
8574 | | - return -EOPNOTSUPP; |
---|
8575 | | - } |
---|
| 12208 | + ppid->id_len = sizeof(bp->dsn); |
---|
| 12209 | + memcpy(ppid->id, bp->dsn, ppid->id_len); |
---|
| 12210 | + |
---|
8576 | 12211 | return 0; |
---|
8577 | 12212 | } |
---|
8578 | 12213 | |
---|
8579 | | -static int bnxt_swdev_port_attr_get(struct net_device *dev, |
---|
8580 | | - struct switchdev_attr *attr) |
---|
| 12214 | +static struct devlink_port *bnxt_get_devlink_port(struct net_device *dev) |
---|
8581 | 12215 | { |
---|
8582 | | - return bnxt_port_attr_get(netdev_priv(dev), attr); |
---|
8583 | | -} |
---|
| 12216 | + struct bnxt *bp = netdev_priv(dev); |
---|
8584 | 12217 | |
---|
8585 | | -static const struct switchdev_ops bnxt_switchdev_ops = { |
---|
8586 | | - .switchdev_port_attr_get = bnxt_swdev_port_attr_get |
---|
8587 | | -}; |
---|
| 12218 | + return &bp->dl_port; |
---|
| 12219 | +} |
---|
8588 | 12220 | |
---|
8589 | 12221 | static const struct net_device_ops bnxt_netdev_ops = { |
---|
8590 | 12222 | .ndo_open = bnxt_open, |
---|
.. | .. |
---|
8612 | 12244 | #ifdef CONFIG_RFS_ACCEL |
---|
8613 | 12245 | .ndo_rx_flow_steer = bnxt_rx_flow_steer, |
---|
8614 | 12246 | #endif |
---|
8615 | | - .ndo_udp_tunnel_add = bnxt_udp_tunnel_add, |
---|
8616 | | - .ndo_udp_tunnel_del = bnxt_udp_tunnel_del, |
---|
| 12247 | + .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, |
---|
| 12248 | + .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, |
---|
8617 | 12249 | .ndo_bpf = bnxt_xdp, |
---|
| 12250 | + .ndo_xdp_xmit = bnxt_xdp_xmit, |
---|
8618 | 12251 | .ndo_bridge_getlink = bnxt_bridge_getlink, |
---|
8619 | 12252 | .ndo_bridge_setlink = bnxt_bridge_setlink, |
---|
8620 | | - .ndo_get_phys_port_name = bnxt_get_phys_port_name |
---|
| 12253 | + .ndo_get_devlink_port = bnxt_get_devlink_port, |
---|
8621 | 12254 | }; |
---|
8622 | 12255 | |
---|
8623 | 12256 | static void bnxt_remove_one(struct pci_dev *pdev) |
---|
.. | .. |
---|
8625 | 12258 | struct net_device *dev = pci_get_drvdata(pdev); |
---|
8626 | 12259 | struct bnxt *bp = netdev_priv(dev); |
---|
8627 | 12260 | |
---|
8628 | | - if (BNXT_PF(bp)) { |
---|
| 12261 | + if (BNXT_PF(bp)) |
---|
8629 | 12262 | bnxt_sriov_disable(bp); |
---|
8630 | | - bnxt_dl_unregister(bp); |
---|
8631 | | - } |
---|
8632 | 12263 | |
---|
| 12264 | + if (BNXT_PF(bp)) |
---|
| 12265 | + devlink_port_type_clear(&bp->dl_port); |
---|
8633 | 12266 | pci_disable_pcie_error_reporting(pdev); |
---|
8634 | 12267 | unregister_netdev(dev); |
---|
8635 | | - bnxt_shutdown_tc(bp); |
---|
8636 | | - bnxt_cancel_sp_work(bp); |
---|
| 12268 | + clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); |
---|
| 12269 | + /* Flush any pending tasks */ |
---|
| 12270 | + cancel_work_sync(&bp->sp_task); |
---|
| 12271 | + cancel_delayed_work_sync(&bp->fw_reset_task); |
---|
8637 | 12272 | bp->sp_event = 0; |
---|
| 12273 | + |
---|
| 12274 | + bnxt_dl_fw_reporters_destroy(bp, true); |
---|
| 12275 | + bnxt_dl_unregister(bp); |
---|
| 12276 | + bnxt_shutdown_tc(bp); |
---|
8638 | 12277 | |
---|
8639 | 12278 | bnxt_clear_int_mode(bp); |
---|
8640 | 12279 | bnxt_hwrm_func_drv_unrgtr(bp); |
---|
.. | .. |
---|
8644 | 12283 | bnxt_dcb_free(bp); |
---|
8645 | 12284 | kfree(bp->edev); |
---|
8646 | 12285 | bp->edev = NULL; |
---|
| 12286 | + kfree(bp->fw_health); |
---|
| 12287 | + bp->fw_health = NULL; |
---|
8647 | 12288 | bnxt_cleanup_pci(bp); |
---|
| 12289 | + bnxt_free_ctx_mem(bp); |
---|
| 12290 | + kfree(bp->ctx); |
---|
| 12291 | + bp->ctx = NULL; |
---|
| 12292 | + kfree(bp->rss_indir_tbl); |
---|
| 12293 | + bp->rss_indir_tbl = NULL; |
---|
| 12294 | + bnxt_free_port_stats(bp); |
---|
8648 | 12295 | free_netdev(dev); |
---|
8649 | 12296 | } |
---|
8650 | 12297 | |
---|
8651 | | -static int bnxt_probe_phy(struct bnxt *bp) |
---|
| 12298 | +static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt) |
---|
8652 | 12299 | { |
---|
8653 | 12300 | int rc = 0; |
---|
8654 | 12301 | struct bnxt_link_info *link_info = &bp->link_info; |
---|
.. | .. |
---|
8659 | 12306 | rc); |
---|
8660 | 12307 | return rc; |
---|
8661 | 12308 | } |
---|
8662 | | - mutex_init(&bp->link_lock); |
---|
| 12309 | + if (!fw_dflt) |
---|
| 12310 | + return 0; |
---|
8663 | 12311 | |
---|
8664 | 12312 | rc = bnxt_update_link(bp, false); |
---|
8665 | 12313 | if (rc) { |
---|
.. | .. |
---|
8674 | 12322 | if (link_info->auto_link_speeds && !link_info->support_auto_speeds) |
---|
8675 | 12323 | link_info->support_auto_speeds = link_info->support_speeds; |
---|
8676 | 12324 | |
---|
8677 | | - /*initialize the ethool setting copy with NVM settings */ |
---|
8678 | | - if (BNXT_AUTO_MODE(link_info->auto_mode)) { |
---|
8679 | | - link_info->autoneg = BNXT_AUTONEG_SPEED; |
---|
8680 | | - if (bp->hwrm_spec_code >= 0x10201) { |
---|
8681 | | - if (link_info->auto_pause_setting & |
---|
8682 | | - PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE) |
---|
8683 | | - link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; |
---|
8684 | | - } else { |
---|
8685 | | - link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; |
---|
8686 | | - } |
---|
8687 | | - link_info->advertising = link_info->auto_link_speeds; |
---|
8688 | | - } else { |
---|
8689 | | - link_info->req_link_speed = link_info->force_link_speed; |
---|
8690 | | - link_info->req_duplex = link_info->duplex_setting; |
---|
8691 | | - } |
---|
8692 | | - if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) |
---|
8693 | | - link_info->req_flow_ctrl = |
---|
8694 | | - link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH; |
---|
8695 | | - else |
---|
8696 | | - link_info->req_flow_ctrl = link_info->force_pause_setting; |
---|
8697 | | - return rc; |
---|
| 12325 | + bnxt_init_ethtool_link_settings(bp); |
---|
| 12326 | + return 0; |
---|
8698 | 12327 | } |
---|
8699 | 12328 | |
---|
8700 | 12329 | static int bnxt_get_max_irq(struct pci_dev *pdev) |
---|
.. | .. |
---|
8712 | 12341 | int *max_cp) |
---|
8713 | 12342 | { |
---|
8714 | 12343 | struct bnxt_hw_resc *hw_resc = &bp->hw_resc; |
---|
8715 | | - int max_ring_grps = 0; |
---|
| 12344 | + int max_ring_grps = 0, max_irq; |
---|
8716 | 12345 | |
---|
8717 | 12346 | *max_tx = hw_resc->max_tx_rings; |
---|
8718 | 12347 | *max_rx = hw_resc->max_rx_rings; |
---|
8719 | | - *max_cp = min_t(int, bnxt_get_max_func_cp_rings_for_en(bp), |
---|
8720 | | - hw_resc->max_irqs - bnxt_get_ulp_msix_num(bp)); |
---|
8721 | | - *max_cp = min_t(int, *max_cp, hw_resc->max_stat_ctxs); |
---|
| 12348 | + *max_cp = bnxt_get_max_func_cp_rings_for_en(bp); |
---|
| 12349 | + max_irq = min_t(int, bnxt_get_max_func_irqs(bp) - |
---|
| 12350 | + bnxt_get_ulp_msix_num(bp), |
---|
| 12351 | + hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp)); |
---|
| 12352 | + if (!(bp->flags & BNXT_FLAG_CHIP_P5)) |
---|
| 12353 | + *max_cp = min_t(int, *max_cp, max_irq); |
---|
8722 | 12354 | max_ring_grps = hw_resc->max_hw_ring_grps; |
---|
8723 | 12355 | if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) { |
---|
8724 | 12356 | *max_cp -= 1; |
---|
.. | .. |
---|
8726 | 12358 | } |
---|
8727 | 12359 | if (bp->flags & BNXT_FLAG_AGG_RINGS) |
---|
8728 | 12360 | *max_rx >>= 1; |
---|
| 12361 | + if (bp->flags & BNXT_FLAG_CHIP_P5) { |
---|
| 12362 | + bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false); |
---|
| 12363 | + /* On P5 chips, max_cp output param should be available NQs */ |
---|
| 12364 | + *max_cp = max_irq; |
---|
| 12365 | + } |
---|
8729 | 12366 | *max_rx = min_t(int, *max_rx, max_ring_grps); |
---|
8730 | 12367 | } |
---|
8731 | 12368 | |
---|
.. | .. |
---|
8807 | 12444 | |
---|
8808 | 12445 | if (sh) |
---|
8809 | 12446 | bp->flags |= BNXT_FLAG_SHARED_RINGS; |
---|
8810 | | - dflt_rings = netif_get_num_default_rss_queues(); |
---|
| 12447 | + dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues(); |
---|
8811 | 12448 | /* Reduce default rings on multi-port cards so that total default |
---|
8812 | 12449 | * rings do not exceed CPU count. |
---|
8813 | 12450 | */ |
---|
.. | .. |
---|
8842 | 12479 | netdev_warn(bp->dev, "2nd rings reservation failed.\n"); |
---|
8843 | 12480 | bp->tx_nr_rings_per_tc = bp->tx_nr_rings; |
---|
8844 | 12481 | } |
---|
8845 | | - bp->num_stat_ctxs = bp->cp_nr_rings; |
---|
8846 | 12482 | if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { |
---|
8847 | 12483 | bp->rx_nr_rings++; |
---|
8848 | 12484 | bp->cp_nr_rings++; |
---|
.. | .. |
---|
8873 | 12509 | goto init_dflt_ring_err; |
---|
8874 | 12510 | |
---|
8875 | 12511 | bp->tx_nr_rings_per_tc = bp->tx_nr_rings; |
---|
8876 | | - if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) { |
---|
8877 | | - bp->flags |= BNXT_FLAG_RFS; |
---|
8878 | | - bp->dev->features |= NETIF_F_NTUPLE; |
---|
8879 | | - } |
---|
| 12512 | + |
---|
| 12513 | + bnxt_set_dflt_rfs(bp); |
---|
| 12514 | + |
---|
8880 | 12515 | init_dflt_ring_err: |
---|
8881 | 12516 | bnxt_ulp_irq_restart(bp, rc); |
---|
8882 | 12517 | return rc; |
---|
.. | .. |
---|
8934 | 12569 | return rc; |
---|
8935 | 12570 | } |
---|
8936 | 12571 | |
---|
| 12572 | +#define BNXT_VPD_LEN 512 |
---|
| 12573 | +static void bnxt_vpd_read_info(struct bnxt *bp) |
---|
| 12574 | +{ |
---|
| 12575 | + struct pci_dev *pdev = bp->pdev; |
---|
| 12576 | + int i, len, pos, ro_size, size; |
---|
| 12577 | + ssize_t vpd_size; |
---|
| 12578 | + u8 *vpd_data; |
---|
| 12579 | + |
---|
| 12580 | + vpd_data = kmalloc(BNXT_VPD_LEN, GFP_KERNEL); |
---|
| 12581 | + if (!vpd_data) |
---|
| 12582 | + return; |
---|
| 12583 | + |
---|
| 12584 | + vpd_size = pci_read_vpd(pdev, 0, BNXT_VPD_LEN, vpd_data); |
---|
| 12585 | + if (vpd_size <= 0) { |
---|
| 12586 | + netdev_err(bp->dev, "Unable to read VPD\n"); |
---|
| 12587 | + goto exit; |
---|
| 12588 | + } |
---|
| 12589 | + |
---|
| 12590 | + i = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA); |
---|
| 12591 | + if (i < 0) { |
---|
| 12592 | + netdev_err(bp->dev, "VPD READ-Only not found\n"); |
---|
| 12593 | + goto exit; |
---|
| 12594 | + } |
---|
| 12595 | + |
---|
| 12596 | + ro_size = pci_vpd_lrdt_size(&vpd_data[i]); |
---|
| 12597 | + i += PCI_VPD_LRDT_TAG_SIZE; |
---|
| 12598 | + if (i + ro_size > vpd_size) |
---|
| 12599 | + goto exit; |
---|
| 12600 | + |
---|
| 12601 | + pos = pci_vpd_find_info_keyword(vpd_data, i, ro_size, |
---|
| 12602 | + PCI_VPD_RO_KEYWORD_PARTNO); |
---|
| 12603 | + if (pos < 0) |
---|
| 12604 | + goto read_sn; |
---|
| 12605 | + |
---|
| 12606 | + len = pci_vpd_info_field_size(&vpd_data[pos]); |
---|
| 12607 | + pos += PCI_VPD_INFO_FLD_HDR_SIZE; |
---|
| 12608 | + if (len + pos > vpd_size) |
---|
| 12609 | + goto read_sn; |
---|
| 12610 | + |
---|
| 12611 | + size = min(len, BNXT_VPD_FLD_LEN - 1); |
---|
| 12612 | + memcpy(bp->board_partno, &vpd_data[pos], size); |
---|
| 12613 | + |
---|
| 12614 | +read_sn: |
---|
| 12615 | + pos = pci_vpd_find_info_keyword(vpd_data, i, ro_size, |
---|
| 12616 | + PCI_VPD_RO_KEYWORD_SERIALNO); |
---|
| 12617 | + if (pos < 0) |
---|
| 12618 | + goto exit; |
---|
| 12619 | + |
---|
| 12620 | + len = pci_vpd_info_field_size(&vpd_data[pos]); |
---|
| 12621 | + pos += PCI_VPD_INFO_FLD_HDR_SIZE; |
---|
| 12622 | + if (len + pos > vpd_size) |
---|
| 12623 | + goto exit; |
---|
| 12624 | + |
---|
| 12625 | + size = min(len, BNXT_VPD_FLD_LEN - 1); |
---|
| 12626 | + memcpy(bp->board_serialno, &vpd_data[pos], size); |
---|
| 12627 | +exit: |
---|
| 12628 | + kfree(vpd_data); |
---|
| 12629 | +} |
---|
| 12630 | + |
---|
| 12631 | +static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[]) |
---|
| 12632 | +{ |
---|
| 12633 | + struct pci_dev *pdev = bp->pdev; |
---|
| 12634 | + u64 qword; |
---|
| 12635 | + |
---|
| 12636 | + qword = pci_get_dsn(pdev); |
---|
| 12637 | + if (!qword) { |
---|
| 12638 | + netdev_info(bp->dev, "Unable to read adapter's DSN\n"); |
---|
| 12639 | + return -EOPNOTSUPP; |
---|
| 12640 | + } |
---|
| 12641 | + |
---|
| 12642 | + put_unaligned_le64(qword, dsn); |
---|
| 12643 | + |
---|
| 12644 | + bp->flags |= BNXT_FLAG_DSN_VALID; |
---|
| 12645 | + return 0; |
---|
| 12646 | +} |
---|
| 12647 | + |
---|
| 12648 | +static int bnxt_map_db_bar(struct bnxt *bp) |
---|
| 12649 | +{ |
---|
| 12650 | + if (!bp->db_size) |
---|
| 12651 | + return -ENODEV; |
---|
| 12652 | + bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size); |
---|
| 12653 | + if (!bp->bar1) |
---|
| 12654 | + return -ENOMEM; |
---|
| 12655 | + return 0; |
---|
| 12656 | +} |
---|
| 12657 | + |
---|
8937 | 12658 | static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
---|
8938 | 12659 | { |
---|
8939 | | - static int version_printed; |
---|
8940 | 12660 | struct net_device *dev; |
---|
8941 | 12661 | struct bnxt *bp; |
---|
8942 | 12662 | int rc, max_irqs; |
---|
.. | .. |
---|
8944 | 12664 | if (pci_is_bridge(pdev)) |
---|
8945 | 12665 | return -ENODEV; |
---|
8946 | 12666 | |
---|
8947 | | - if (version_printed++ == 0) |
---|
8948 | | - pr_info("%s", version); |
---|
| 12667 | + /* Clear any pending DMA transactions from crash kernel |
---|
| 12668 | + * while loading driver in capture kernel. |
---|
| 12669 | + */ |
---|
| 12670 | + if (is_kdump_kernel()) { |
---|
| 12671 | + pci_clear_master(pdev); |
---|
| 12672 | + pcie_flr(pdev); |
---|
| 12673 | + } |
---|
8949 | 12674 | |
---|
8950 | 12675 | max_irqs = bnxt_get_max_irq(pdev); |
---|
8951 | 12676 | dev = alloc_etherdev_mq(sizeof(*bp), max_irqs); |
---|
.. | .. |
---|
8953 | 12678 | return -ENOMEM; |
---|
8954 | 12679 | |
---|
8955 | 12680 | bp = netdev_priv(dev); |
---|
| 12681 | + bp->msg_enable = BNXT_DEF_MSG_ENABLE; |
---|
| 12682 | + bnxt_set_max_func_irqs(bp, max_irqs); |
---|
8956 | 12683 | |
---|
8957 | 12684 | if (bnxt_vf_pciid(ent->driver_data)) |
---|
8958 | 12685 | bp->flags |= BNXT_FLAG_VF; |
---|
.. | .. |
---|
8967 | 12694 | dev->netdev_ops = &bnxt_netdev_ops; |
---|
8968 | 12695 | dev->watchdog_timeo = BNXT_TX_TIMEOUT; |
---|
8969 | 12696 | dev->ethtool_ops = &bnxt_ethtool_ops; |
---|
8970 | | - SWITCHDEV_SET_OPS(dev, &bnxt_switchdev_ops); |
---|
8971 | 12697 | pci_set_drvdata(pdev, dev); |
---|
| 12698 | + |
---|
| 12699 | + if (BNXT_PF(bp)) |
---|
| 12700 | + bnxt_vpd_read_info(bp); |
---|
8972 | 12701 | |
---|
8973 | 12702 | rc = bnxt_alloc_hwrm_resources(bp); |
---|
8974 | 12703 | if (rc) |
---|
8975 | 12704 | goto init_err_pci_clean; |
---|
8976 | 12705 | |
---|
8977 | 12706 | mutex_init(&bp->hwrm_cmd_lock); |
---|
8978 | | - rc = bnxt_hwrm_ver_get(bp); |
---|
| 12707 | + mutex_init(&bp->link_lock); |
---|
| 12708 | + |
---|
| 12709 | + rc = bnxt_fw_init_one_p1(bp); |
---|
8979 | 12710 | if (rc) |
---|
8980 | 12711 | goto init_err_pci_clean; |
---|
8981 | 12712 | |
---|
8982 | | - if (bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) { |
---|
8983 | | - rc = bnxt_alloc_hwrm_short_cmd_req(bp); |
---|
8984 | | - if (rc) |
---|
8985 | | - goto init_err_pci_clean; |
---|
| 12713 | + if (BNXT_CHIP_P5(bp)) { |
---|
| 12714 | + bp->flags |= BNXT_FLAG_CHIP_P5; |
---|
| 12715 | + if (BNXT_CHIP_SR2(bp)) |
---|
| 12716 | + bp->flags |= BNXT_FLAG_CHIP_SR2; |
---|
8986 | 12717 | } |
---|
8987 | 12718 | |
---|
8988 | | - rc = bnxt_hwrm_func_reset(bp); |
---|
| 12719 | + rc = bnxt_alloc_rss_indir_tbl(bp); |
---|
8989 | 12720 | if (rc) |
---|
8990 | 12721 | goto init_err_pci_clean; |
---|
8991 | 12722 | |
---|
8992 | | - bnxt_hwrm_fw_set_time(bp); |
---|
| 12723 | + rc = bnxt_fw_init_one_p2(bp); |
---|
| 12724 | + if (rc) |
---|
| 12725 | + goto init_err_pci_clean; |
---|
| 12726 | + |
---|
| 12727 | + rc = bnxt_map_db_bar(bp); |
---|
| 12728 | + if (rc) { |
---|
| 12729 | + dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n", |
---|
| 12730 | + rc); |
---|
| 12731 | + goto init_err_pci_clean; |
---|
| 12732 | + } |
---|
8993 | 12733 | |
---|
8994 | 12734 | dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | |
---|
8995 | 12735 | NETIF_F_TSO | NETIF_F_TSO6 | |
---|
.. | .. |
---|
8999 | 12739 | NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH | |
---|
9000 | 12740 | NETIF_F_RXCSUM | NETIF_F_GRO; |
---|
9001 | 12741 | |
---|
9002 | | - if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) |
---|
| 12742 | + if (BNXT_SUPPORTS_TPA(bp)) |
---|
9003 | 12743 | dev->hw_features |= NETIF_F_LRO; |
---|
9004 | 12744 | |
---|
9005 | 12745 | dev->hw_enc_features = |
---|
.. | .. |
---|
9008 | 12748 | NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | |
---|
9009 | 12749 | NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM | |
---|
9010 | 12750 | NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL; |
---|
| 12751 | + dev->udp_tunnel_nic_info = &bnxt_udp_tunnels; |
---|
| 12752 | + |
---|
9011 | 12753 | dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM | |
---|
9012 | 12754 | NETIF_F_GSO_GRE_CSUM; |
---|
9013 | 12755 | dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA; |
---|
9014 | | - dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX | |
---|
9015 | | - NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX; |
---|
9016 | | - if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) |
---|
| 12756 | + if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP) |
---|
| 12757 | + dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX; |
---|
| 12758 | + if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT) |
---|
| 12759 | + dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX; |
---|
| 12760 | + if (BNXT_SUPPORTS_TPA(bp)) |
---|
9017 | 12761 | dev->hw_features |= NETIF_F_GRO_HW; |
---|
9018 | 12762 | dev->features |= dev->hw_features | NETIF_F_HIGHDMA; |
---|
9019 | 12763 | if (dev->features & NETIF_F_GRO_HW) |
---|
.. | .. |
---|
9024 | 12768 | init_waitqueue_head(&bp->sriov_cfg_wait); |
---|
9025 | 12769 | mutex_init(&bp->sriov_lock); |
---|
9026 | 12770 | #endif |
---|
9027 | | - bp->gro_func = bnxt_gro_func_5730x; |
---|
9028 | | - if (BNXT_CHIP_P4_PLUS(bp)) |
---|
9029 | | - bp->gro_func = bnxt_gro_func_5731x; |
---|
9030 | | - else |
---|
| 12771 | + if (BNXT_SUPPORTS_TPA(bp)) { |
---|
| 12772 | + bp->gro_func = bnxt_gro_func_5730x; |
---|
| 12773 | + if (BNXT_CHIP_P4(bp)) |
---|
| 12774 | + bp->gro_func = bnxt_gro_func_5731x; |
---|
| 12775 | + else if (BNXT_CHIP_P5(bp)) |
---|
| 12776 | + bp->gro_func = bnxt_gro_func_5750x; |
---|
| 12777 | + } |
---|
| 12778 | + if (!BNXT_CHIP_P4_PLUS(bp)) |
---|
9031 | 12779 | bp->flags |= BNXT_FLAG_DOUBLE_DB; |
---|
9032 | | - |
---|
9033 | | - rc = bnxt_hwrm_func_drv_rgtr(bp); |
---|
9034 | | - if (rc) |
---|
9035 | | - goto init_err_pci_clean; |
---|
9036 | | - |
---|
9037 | | - rc = bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0); |
---|
9038 | | - if (rc) |
---|
9039 | | - goto init_err_pci_clean; |
---|
9040 | 12780 | |
---|
9041 | 12781 | bp->ulp_probe = bnxt_ulp_probe; |
---|
9042 | 12782 | |
---|
9043 | | - /* Get the MAX capabilities for this function */ |
---|
9044 | | - rc = bnxt_hwrm_func_qcaps(bp); |
---|
9045 | | - if (rc) { |
---|
9046 | | - netdev_err(bp->dev, "hwrm query capability failure rc: %x\n", |
---|
9047 | | - rc); |
---|
9048 | | - rc = -1; |
---|
9049 | | - goto init_err_pci_clean; |
---|
9050 | | - } |
---|
9051 | 12783 | rc = bnxt_init_mac_addr(bp); |
---|
9052 | 12784 | if (rc) { |
---|
9053 | 12785 | dev_err(&pdev->dev, "Unable to initialize mac address.\n"); |
---|
9054 | 12786 | rc = -EADDRNOTAVAIL; |
---|
9055 | 12787 | goto init_err_pci_clean; |
---|
9056 | 12788 | } |
---|
9057 | | - rc = bnxt_hwrm_queue_qportcfg(bp); |
---|
9058 | | - if (rc) { |
---|
9059 | | - netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n", |
---|
9060 | | - rc); |
---|
9061 | | - rc = -1; |
---|
9062 | | - goto init_err_pci_clean; |
---|
9063 | | - } |
---|
9064 | 12789 | |
---|
9065 | | - bnxt_hwrm_func_qcfg(bp); |
---|
9066 | | - bnxt_hwrm_port_led_qcaps(bp); |
---|
9067 | | - bnxt_ethtool_init(bp); |
---|
9068 | | - bnxt_dcb_init(bp); |
---|
| 12790 | + if (BNXT_PF(bp)) { |
---|
| 12791 | + /* Read the adapter's DSN to use as the eswitch switch_id */ |
---|
| 12792 | + rc = bnxt_pcie_dsn_get(bp, bp->dsn); |
---|
| 12793 | + } |
---|
9069 | 12794 | |
---|
9070 | 12795 | /* MTU range: 60 - FW defined max */ |
---|
9071 | 12796 | dev->min_mtu = ETH_ZLEN; |
---|
9072 | 12797 | dev->max_mtu = bp->max_mtu; |
---|
9073 | 12798 | |
---|
9074 | | - rc = bnxt_probe_phy(bp); |
---|
| 12799 | + rc = bnxt_probe_phy(bp, true); |
---|
9075 | 12800 | if (rc) |
---|
9076 | 12801 | goto init_err_pci_clean; |
---|
9077 | 12802 | |
---|
9078 | 12803 | bnxt_set_rx_skb_mode(bp, false); |
---|
9079 | 12804 | bnxt_set_tpa_flags(bp); |
---|
9080 | 12805 | bnxt_set_ring_params(bp); |
---|
9081 | | - bnxt_set_max_func_irqs(bp, max_irqs); |
---|
9082 | 12806 | rc = bnxt_set_dflt_rings(bp, true); |
---|
9083 | 12807 | if (rc) { |
---|
9084 | 12808 | netdev_err(bp->dev, "Not enough rings available.\n"); |
---|
.. | .. |
---|
9086 | 12810 | goto init_err_pci_clean; |
---|
9087 | 12811 | } |
---|
9088 | 12812 | |
---|
9089 | | - /* Default RSS hash cfg. */ |
---|
9090 | | - bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 | |
---|
9091 | | - VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 | |
---|
9092 | | - VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 | |
---|
9093 | | - VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6; |
---|
9094 | | - if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) { |
---|
9095 | | - bp->flags |= BNXT_FLAG_UDP_RSS_CAP; |
---|
9096 | | - bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 | |
---|
9097 | | - VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; |
---|
9098 | | - } |
---|
| 12813 | + bnxt_fw_init_one_p3(bp); |
---|
9099 | 12814 | |
---|
9100 | | - bnxt_hwrm_vnic_qcaps(bp); |
---|
9101 | | - if (bnxt_rfs_supported(bp)) { |
---|
9102 | | - dev->hw_features |= NETIF_F_NTUPLE; |
---|
9103 | | - if (bnxt_rfs_capable(bp)) { |
---|
9104 | | - bp->flags |= BNXT_FLAG_RFS; |
---|
9105 | | - dev->features |= NETIF_F_NTUPLE; |
---|
9106 | | - } |
---|
9107 | | - } |
---|
9108 | | - |
---|
9109 | | - if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX) |
---|
| 12815 | + if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX) |
---|
9110 | 12816 | bp->flags |= BNXT_FLAG_STRIP_VLAN; |
---|
9111 | 12817 | |
---|
9112 | 12818 | rc = bnxt_init_int_mode(bp); |
---|
.. | .. |
---|
9118 | 12824 | */ |
---|
9119 | 12825 | bp->tx_nr_rings_per_tc = bp->tx_nr_rings; |
---|
9120 | 12826 | |
---|
9121 | | - bnxt_get_wol_settings(bp); |
---|
9122 | | - if (bp->flags & BNXT_FLAG_WOL_CAP) |
---|
9123 | | - device_set_wakeup_enable(&pdev->dev, bp->wol); |
---|
9124 | | - else |
---|
9125 | | - device_set_wakeup_capable(&pdev->dev, false); |
---|
9126 | | - |
---|
9127 | | - bnxt_hwrm_set_cache_line_size(bp, cache_line_size()); |
---|
9128 | | - |
---|
9129 | 12827 | if (BNXT_PF(bp)) { |
---|
9130 | 12828 | if (!bnxt_pf_wq) { |
---|
9131 | 12829 | bnxt_pf_wq = |
---|
.. | .. |
---|
9136 | 12834 | goto init_err_pci_clean; |
---|
9137 | 12835 | } |
---|
9138 | 12836 | } |
---|
9139 | | - bnxt_init_tc(bp); |
---|
| 12837 | + rc = bnxt_init_tc(bp); |
---|
| 12838 | + if (rc) |
---|
| 12839 | + netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n", |
---|
| 12840 | + rc); |
---|
9140 | 12841 | } |
---|
| 12842 | + |
---|
| 12843 | + bnxt_dl_register(bp); |
---|
9141 | 12844 | |
---|
9142 | 12845 | rc = register_netdev(dev); |
---|
9143 | 12846 | if (rc) |
---|
9144 | | - goto init_err_cleanup_tc; |
---|
| 12847 | + goto init_err_cleanup; |
---|
9145 | 12848 | |
---|
9146 | 12849 | if (BNXT_PF(bp)) |
---|
9147 | | - bnxt_dl_register(bp); |
---|
| 12850 | + devlink_port_type_eth_set(&bp->dl_port, bp->dev); |
---|
| 12851 | + bnxt_dl_fw_reporters_create(bp); |
---|
9148 | 12852 | |
---|
9149 | 12853 | netdev_info(dev, "%s found at mem %lx, node addr %pM\n", |
---|
9150 | 12854 | board_info[ent->driver_data].name, |
---|
.. | .. |
---|
9154 | 12858 | pci_save_state(pdev); |
---|
9155 | 12859 | return 0; |
---|
9156 | 12860 | |
---|
9157 | | -init_err_cleanup_tc: |
---|
| 12861 | +init_err_cleanup: |
---|
| 12862 | + bnxt_dl_unregister(bp); |
---|
9158 | 12863 | bnxt_shutdown_tc(bp); |
---|
9159 | 12864 | bnxt_clear_int_mode(bp); |
---|
9160 | 12865 | |
---|
9161 | 12866 | init_err_pci_clean: |
---|
| 12867 | + bnxt_hwrm_func_drv_unrgtr(bp); |
---|
9162 | 12868 | bnxt_free_hwrm_short_cmd_req(bp); |
---|
9163 | 12869 | bnxt_free_hwrm_resources(bp); |
---|
| 12870 | + bnxt_ethtool_free(bp); |
---|
| 12871 | + kfree(bp->fw_health); |
---|
| 12872 | + bp->fw_health = NULL; |
---|
9164 | 12873 | bnxt_cleanup_pci(bp); |
---|
| 12874 | + bnxt_free_ctx_mem(bp); |
---|
| 12875 | + kfree(bp->ctx); |
---|
| 12876 | + bp->ctx = NULL; |
---|
| 12877 | + kfree(bp->rss_indir_tbl); |
---|
| 12878 | + bp->rss_indir_tbl = NULL; |
---|
9165 | 12879 | |
---|
9166 | 12880 | init_err_free: |
---|
9167 | 12881 | free_netdev(dev); |
---|
.. | .. |
---|
9185 | 12899 | dev_close(dev); |
---|
9186 | 12900 | |
---|
9187 | 12901 | bnxt_ulp_shutdown(bp); |
---|
| 12902 | + bnxt_clear_int_mode(bp); |
---|
| 12903 | + pci_disable_device(pdev); |
---|
9188 | 12904 | |
---|
9189 | 12905 | if (system_state == SYSTEM_POWER_OFF) { |
---|
9190 | | - bnxt_clear_int_mode(bp); |
---|
9191 | 12906 | pci_wake_from_d3(pdev, bp->wol); |
---|
9192 | 12907 | pci_set_power_state(pdev, PCI_D3hot); |
---|
9193 | 12908 | } |
---|
.. | .. |
---|
9199 | 12914 | #ifdef CONFIG_PM_SLEEP |
---|
9200 | 12915 | static int bnxt_suspend(struct device *device) |
---|
9201 | 12916 | { |
---|
9202 | | - struct pci_dev *pdev = to_pci_dev(device); |
---|
9203 | | - struct net_device *dev = pci_get_drvdata(pdev); |
---|
| 12917 | + struct net_device *dev = dev_get_drvdata(device); |
---|
9204 | 12918 | struct bnxt *bp = netdev_priv(dev); |
---|
9205 | 12919 | int rc = 0; |
---|
9206 | 12920 | |
---|
9207 | 12921 | rtnl_lock(); |
---|
| 12922 | + bnxt_ulp_stop(bp); |
---|
9208 | 12923 | if (netif_running(dev)) { |
---|
9209 | 12924 | netif_device_detach(dev); |
---|
9210 | 12925 | rc = bnxt_close(dev); |
---|
9211 | 12926 | } |
---|
9212 | 12927 | bnxt_hwrm_func_drv_unrgtr(bp); |
---|
| 12928 | + pci_disable_device(bp->pdev); |
---|
| 12929 | + bnxt_free_ctx_mem(bp); |
---|
| 12930 | + kfree(bp->ctx); |
---|
| 12931 | + bp->ctx = NULL; |
---|
9213 | 12932 | rtnl_unlock(); |
---|
9214 | 12933 | return rc; |
---|
9215 | 12934 | } |
---|
9216 | 12935 | |
---|
9217 | 12936 | static int bnxt_resume(struct device *device) |
---|
9218 | 12937 | { |
---|
9219 | | - struct pci_dev *pdev = to_pci_dev(device); |
---|
9220 | | - struct net_device *dev = pci_get_drvdata(pdev); |
---|
| 12938 | + struct net_device *dev = dev_get_drvdata(device); |
---|
9221 | 12939 | struct bnxt *bp = netdev_priv(dev); |
---|
9222 | 12940 | int rc = 0; |
---|
9223 | 12941 | |
---|
9224 | 12942 | rtnl_lock(); |
---|
9225 | | - if (bnxt_hwrm_ver_get(bp) || bnxt_hwrm_func_drv_rgtr(bp)) { |
---|
| 12943 | + rc = pci_enable_device(bp->pdev); |
---|
| 12944 | + if (rc) { |
---|
| 12945 | + netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n", |
---|
| 12946 | + rc); |
---|
| 12947 | + goto resume_exit; |
---|
| 12948 | + } |
---|
| 12949 | + pci_set_master(bp->pdev); |
---|
| 12950 | + if (bnxt_hwrm_ver_get(bp)) { |
---|
9226 | 12951 | rc = -ENODEV; |
---|
9227 | 12952 | goto resume_exit; |
---|
9228 | 12953 | } |
---|
.. | .. |
---|
9231 | 12956 | rc = -EBUSY; |
---|
9232 | 12957 | goto resume_exit; |
---|
9233 | 12958 | } |
---|
| 12959 | + |
---|
| 12960 | + rc = bnxt_hwrm_func_qcaps(bp); |
---|
| 12961 | + if (rc) |
---|
| 12962 | + goto resume_exit; |
---|
| 12963 | + |
---|
| 12964 | + if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) { |
---|
| 12965 | + rc = -ENODEV; |
---|
| 12966 | + goto resume_exit; |
---|
| 12967 | + } |
---|
| 12968 | + |
---|
9234 | 12969 | bnxt_get_wol_settings(bp); |
---|
9235 | 12970 | if (netif_running(dev)) { |
---|
9236 | 12971 | rc = bnxt_open(dev); |
---|
.. | .. |
---|
9239 | 12974 | } |
---|
9240 | 12975 | |
---|
9241 | 12976 | resume_exit: |
---|
| 12977 | + bnxt_ulp_start(bp, rc); |
---|
| 12978 | + if (!rc) |
---|
| 12979 | + bnxt_reenable_sriov(bp); |
---|
9242 | 12980 | rtnl_unlock(); |
---|
9243 | 12981 | return rc; |
---|
9244 | 12982 | } |
---|
.. | .. |
---|
9278 | 13016 | return PCI_ERS_RESULT_DISCONNECT; |
---|
9279 | 13017 | } |
---|
9280 | 13018 | |
---|
| 13019 | + if (state == pci_channel_io_frozen) |
---|
| 13020 | + set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state); |
---|
| 13021 | + |
---|
9281 | 13022 | if (netif_running(netdev)) |
---|
9282 | 13023 | bnxt_close(netdev); |
---|
9283 | 13024 | |
---|
9284 | | - pci_disable_device(pdev); |
---|
| 13025 | + if (pci_is_enabled(pdev)) |
---|
| 13026 | + pci_disable_device(pdev); |
---|
| 13027 | + bnxt_free_ctx_mem(bp); |
---|
| 13028 | + kfree(bp->ctx); |
---|
| 13029 | + bp->ctx = NULL; |
---|
9285 | 13030 | rtnl_unlock(); |
---|
9286 | 13031 | |
---|
9287 | 13032 | /* Request a slot slot reset. */ |
---|
.. | .. |
---|
9299 | 13044 | */ |
---|
9300 | 13045 | static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev) |
---|
9301 | 13046 | { |
---|
| 13047 | + pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT; |
---|
9302 | 13048 | struct net_device *netdev = pci_get_drvdata(pdev); |
---|
9303 | 13049 | struct bnxt *bp = netdev_priv(netdev); |
---|
9304 | | - int err = 0; |
---|
9305 | | - pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT; |
---|
| 13050 | + int err = 0, off; |
---|
9306 | 13051 | |
---|
9307 | 13052 | netdev_info(bp->dev, "PCI Slot Reset\n"); |
---|
9308 | 13053 | |
---|
.. | .. |
---|
9313 | 13058 | "Cannot re-enable PCI device after reset.\n"); |
---|
9314 | 13059 | } else { |
---|
9315 | 13060 | pci_set_master(pdev); |
---|
| 13061 | + /* Upon fatal error, our device internal logic that latches to |
---|
| 13062 | + * BAR value is getting reset and will restore only upon |
---|
| 13063 | + * rewritting the BARs. |
---|
| 13064 | + * |
---|
| 13065 | + * As pci_restore_state() does not re-write the BARs if the |
---|
| 13066 | + * value is same as saved value earlier, driver needs to |
---|
| 13067 | + * write the BARs to 0 to force restore, in case of fatal error. |
---|
| 13068 | + */ |
---|
| 13069 | + if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, |
---|
| 13070 | + &bp->state)) { |
---|
| 13071 | + for (off = PCI_BASE_ADDRESS_0; |
---|
| 13072 | + off <= PCI_BASE_ADDRESS_5; off += 4) |
---|
| 13073 | + pci_write_config_dword(bp->pdev, off, 0); |
---|
| 13074 | + } |
---|
9316 | 13075 | pci_restore_state(pdev); |
---|
9317 | 13076 | pci_save_state(pdev); |
---|
9318 | 13077 | |
---|
9319 | 13078 | err = bnxt_hwrm_func_reset(bp); |
---|
9320 | | - if (!err && netif_running(netdev)) |
---|
9321 | | - err = bnxt_open(netdev); |
---|
9322 | | - |
---|
9323 | | - if (!err) { |
---|
| 13079 | + if (!err) |
---|
9324 | 13080 | result = PCI_ERS_RESULT_RECOVERED; |
---|
9325 | | - bnxt_ulp_start(bp); |
---|
9326 | | - } |
---|
9327 | | - } |
---|
9328 | | - |
---|
9329 | | - if (result != PCI_ERS_RESULT_RECOVERED) { |
---|
9330 | | - if (netif_running(netdev)) |
---|
9331 | | - dev_close(netdev); |
---|
9332 | | - pci_disable_device(pdev); |
---|
9333 | 13081 | } |
---|
9334 | 13082 | |
---|
9335 | 13083 | rtnl_unlock(); |
---|
9336 | | - |
---|
9337 | | - err = pci_cleanup_aer_uncorrect_error_status(pdev); |
---|
9338 | | - if (err) { |
---|
9339 | | - dev_err(&pdev->dev, |
---|
9340 | | - "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", |
---|
9341 | | - err); /* non-fatal, continue */ |
---|
9342 | | - } |
---|
9343 | 13084 | |
---|
9344 | 13085 | return result; |
---|
9345 | 13086 | } |
---|
.. | .. |
---|
9354 | 13095 | static void bnxt_io_resume(struct pci_dev *pdev) |
---|
9355 | 13096 | { |
---|
9356 | 13097 | struct net_device *netdev = pci_get_drvdata(pdev); |
---|
| 13098 | + struct bnxt *bp = netdev_priv(netdev); |
---|
| 13099 | + int err; |
---|
9357 | 13100 | |
---|
| 13101 | + netdev_info(bp->dev, "PCI Slot Resume\n"); |
---|
9358 | 13102 | rtnl_lock(); |
---|
9359 | 13103 | |
---|
9360 | | - netif_device_attach(netdev); |
---|
| 13104 | + err = bnxt_hwrm_func_qcaps(bp); |
---|
| 13105 | + if (!err && netif_running(netdev)) |
---|
| 13106 | + err = bnxt_open(netdev); |
---|
| 13107 | + |
---|
| 13108 | + bnxt_ulp_start(bp, err); |
---|
| 13109 | + if (!err) { |
---|
| 13110 | + bnxt_reenable_sriov(bp); |
---|
| 13111 | + netif_device_attach(netdev); |
---|
| 13112 | + } |
---|
9361 | 13113 | |
---|
9362 | 13114 | rtnl_unlock(); |
---|
9363 | 13115 | } |
---|
.. | .. |
---|
9383 | 13135 | |
---|
9384 | 13136 | static int __init bnxt_init(void) |
---|
9385 | 13137 | { |
---|
| 13138 | + int err; |
---|
| 13139 | + |
---|
9386 | 13140 | bnxt_debug_init(); |
---|
9387 | | - return pci_register_driver(&bnxt_pci_driver); |
---|
| 13141 | + err = pci_register_driver(&bnxt_pci_driver); |
---|
| 13142 | + if (err) { |
---|
| 13143 | + bnxt_debug_exit(); |
---|
| 13144 | + return err; |
---|
| 13145 | + } |
---|
| 13146 | + |
---|
| 13147 | + return 0; |
---|
9388 | 13148 | } |
---|
9389 | 13149 | |
---|
9390 | 13150 | static void __exit bnxt_exit(void) |
---|