| .. | .. |
|---|
| 25 | 25 | static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp, |
|---|
| 26 | 26 | struct bnxt_vf_info *vf, u16 event_id) |
|---|
| 27 | 27 | { |
|---|
| 28 | | - struct hwrm_fwd_async_event_cmpl_output *resp = bp->hwrm_cmd_resp_addr; |
|---|
| 29 | 28 | struct hwrm_fwd_async_event_cmpl_input req = {0}; |
|---|
| 30 | 29 | struct hwrm_async_event_cmpl *async_cmpl; |
|---|
| 31 | 30 | int rc = 0; |
|---|
| .. | .. |
|---|
| 40 | 39 | async_cmpl->type = cpu_to_le16(ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT); |
|---|
| 41 | 40 | async_cmpl->event_id = cpu_to_le16(event_id); |
|---|
| 42 | 41 | |
|---|
| 43 | | - mutex_lock(&bp->hwrm_cmd_lock); |
|---|
| 44 | | - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
|---|
| 45 | | - |
|---|
| 46 | | - if (rc) { |
|---|
| 42 | + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
|---|
| 43 | + if (rc) |
|---|
| 47 | 44 | netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n", |
|---|
| 48 | 45 | rc); |
|---|
| 49 | | - goto fwd_async_event_cmpl_exit; |
|---|
| 50 | | - } |
|---|
| 51 | | - |
|---|
| 52 | | - if (resp->error_code) { |
|---|
| 53 | | - netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl error %d\n", |
|---|
| 54 | | - resp->error_code); |
|---|
| 55 | | - rc = -1; |
|---|
| 56 | | - } |
|---|
| 57 | | - |
|---|
| 58 | | -fwd_async_event_cmpl_exit: |
|---|
| 59 | | - mutex_unlock(&bp->hwrm_cmd_lock); |
|---|
| 60 | 46 | return rc; |
|---|
| 61 | 47 | } |
|---|
| 62 | 48 | |
|---|
| .. | .. |
|---|
| 119 | 105 | return rc; |
|---|
| 120 | 106 | } |
|---|
| 121 | 107 | |
|---|
| 108 | +static int bnxt_hwrm_func_qcfg_flags(struct bnxt *bp, struct bnxt_vf_info *vf) |
|---|
| 109 | +{ |
|---|
| 110 | + struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; |
|---|
| 111 | + struct hwrm_func_qcfg_input req = {0}; |
|---|
| 112 | + int rc; |
|---|
| 113 | + |
|---|
| 114 | + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); |
|---|
| 115 | + req.fid = cpu_to_le16(vf->fw_fid); |
|---|
| 116 | + mutex_lock(&bp->hwrm_cmd_lock); |
|---|
| 117 | + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
|---|
| 118 | + if (rc) { |
|---|
| 119 | + mutex_unlock(&bp->hwrm_cmd_lock); |
|---|
| 120 | + return rc; |
|---|
| 121 | + } |
|---|
| 122 | + vf->func_qcfg_flags = le16_to_cpu(resp->flags); |
|---|
| 123 | + mutex_unlock(&bp->hwrm_cmd_lock); |
|---|
| 124 | + return 0; |
|---|
| 125 | +} |
|---|
| 126 | + |
|---|
| 127 | +static bool bnxt_is_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf) |
|---|
| 128 | +{ |
|---|
| 129 | + if (!(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF)) |
|---|
| 130 | + return !!(vf->flags & BNXT_VF_TRUST); |
|---|
| 131 | + |
|---|
| 132 | + bnxt_hwrm_func_qcfg_flags(bp, vf); |
|---|
| 133 | + return !!(vf->func_qcfg_flags & FUNC_QCFG_RESP_FLAGS_TRUSTED_VF); |
|---|
| 134 | +} |
|---|
| 135 | + |
|---|
| 136 | +static int bnxt_hwrm_set_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf) |
|---|
| 137 | +{ |
|---|
| 138 | + struct hwrm_func_cfg_input req = {0}; |
|---|
| 139 | + |
|---|
| 140 | + if (!(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF)) |
|---|
| 141 | + return 0; |
|---|
| 142 | + |
|---|
| 143 | + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); |
|---|
| 144 | + req.fid = cpu_to_le16(vf->fw_fid); |
|---|
| 145 | + if (vf->flags & BNXT_VF_TRUST) |
|---|
| 146 | + req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE); |
|---|
| 147 | + else |
|---|
| 148 | + req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE); |
|---|
| 149 | + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
|---|
| 150 | +} |
|---|
| 151 | + |
|---|
| 122 | 152 | int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trusted) |
|---|
| 123 | 153 | { |
|---|
| 124 | 154 | struct bnxt *bp = netdev_priv(dev); |
|---|
| .. | .. |
|---|
| 133 | 163 | else |
|---|
| 134 | 164 | vf->flags &= ~BNXT_VF_TRUST; |
|---|
| 135 | 165 | |
|---|
| 166 | + bnxt_hwrm_set_trusted_vf(bp, vf); |
|---|
| 136 | 167 | return 0; |
|---|
| 137 | 168 | } |
|---|
| 138 | 169 | |
|---|
| .. | .. |
|---|
| 162 | 193 | else |
|---|
| 163 | 194 | ivi->qos = 0; |
|---|
| 164 | 195 | ivi->spoofchk = !!(vf->flags & BNXT_VF_SPOOFCHK); |
|---|
| 165 | | - ivi->trusted = !!(vf->flags & BNXT_VF_TRUST); |
|---|
| 196 | + ivi->trusted = bnxt_is_trusted_vf(bp, vf); |
|---|
| 166 | 197 | if (!(vf->flags & BNXT_VF_LINK_FORCED)) |
|---|
| 167 | 198 | ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; |
|---|
| 168 | 199 | else if (vf->flags & BNXT_VF_LINK_UP) |
|---|
| .. | .. |
|---|
| 433 | 464 | return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
|---|
| 434 | 465 | } |
|---|
| 435 | 466 | |
|---|
| 467 | +/* Caller holds bp->hwrm_cmd_lock mutex lock */ |
|---|
| 468 | +static void __bnxt_set_vf_params(struct bnxt *bp, int vf_id) |
|---|
| 469 | +{ |
|---|
| 470 | + struct hwrm_func_cfg_input req = {0}; |
|---|
| 471 | + struct bnxt_vf_info *vf; |
|---|
| 472 | + |
|---|
| 473 | + vf = &bp->pf.vf[vf_id]; |
|---|
| 474 | + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); |
|---|
| 475 | + req.fid = cpu_to_le16(vf->fw_fid); |
|---|
| 476 | + |
|---|
| 477 | + if (is_valid_ether_addr(vf->mac_addr)) { |
|---|
| 478 | + req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR); |
|---|
| 479 | + memcpy(req.dflt_mac_addr, vf->mac_addr, ETH_ALEN); |
|---|
| 480 | + } |
|---|
| 481 | + if (vf->vlan) { |
|---|
| 482 | + req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN); |
|---|
| 483 | + req.dflt_vlan = cpu_to_le16(vf->vlan); |
|---|
| 484 | + } |
|---|
| 485 | + if (vf->max_tx_rate) { |
|---|
| 486 | + req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW); |
|---|
| 487 | + req.max_bw = cpu_to_le32(vf->max_tx_rate); |
|---|
| 488 | +#ifdef HAVE_IFLA_TX_RATE |
|---|
| 489 | + req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW); |
|---|
| 490 | + req.min_bw = cpu_to_le32(vf->min_tx_rate); |
|---|
| 491 | +#endif |
|---|
| 492 | + } |
|---|
| 493 | + if (vf->flags & BNXT_VF_TRUST) |
|---|
| 494 | + req.flags |= cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE); |
|---|
| 495 | + |
|---|
| 496 | + _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
|---|
| 497 | +} |
|---|
| 498 | + |
|---|
| 436 | 499 | /* Only called by PF to reserve resources for VFs, returns actual number of |
|---|
| 437 | 500 | * VFs configured, or < 0 on error. |
|---|
| 438 | 501 | */ |
|---|
| 439 | | -static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs) |
|---|
| 502 | +static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset) |
|---|
| 440 | 503 | { |
|---|
| 441 | 504 | struct hwrm_func_vf_resource_cfg_input req = {0}; |
|---|
| 442 | 505 | struct bnxt_hw_resc *hw_resc = &bp->hw_resc; |
|---|
| .. | .. |
|---|
| 444 | 507 | u16 vf_stat_ctx, vf_vnics, vf_ring_grps; |
|---|
| 445 | 508 | struct bnxt_pf_info *pf = &bp->pf; |
|---|
| 446 | 509 | int i, rc = 0, min = 1; |
|---|
| 510 | + u16 vf_msix = 0; |
|---|
| 511 | + u16 vf_rss; |
|---|
| 447 | 512 | |
|---|
| 448 | 513 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1); |
|---|
| 449 | 514 | |
|---|
| 450 | | - vf_cp_rings = bnxt_get_max_func_cp_rings_for_en(bp) - bp->cp_nr_rings; |
|---|
| 451 | | - vf_stat_ctx = hw_resc->max_stat_ctxs - bp->num_stat_ctxs; |
|---|
| 515 | + if (bp->flags & BNXT_FLAG_CHIP_P5) { |
|---|
| 516 | + vf_msix = hw_resc->max_nqs - bnxt_nq_rings_in_use(bp); |
|---|
| 517 | + vf_ring_grps = 0; |
|---|
| 518 | + } else { |
|---|
| 519 | + vf_ring_grps = hw_resc->max_hw_ring_grps - bp->rx_nr_rings; |
|---|
| 520 | + } |
|---|
| 521 | + vf_cp_rings = bnxt_get_avail_cp_rings_for_en(bp); |
|---|
| 522 | + vf_stat_ctx = bnxt_get_avail_stat_ctxs_for_en(bp); |
|---|
| 452 | 523 | if (bp->flags & BNXT_FLAG_AGG_RINGS) |
|---|
| 453 | 524 | vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings * 2; |
|---|
| 454 | 525 | else |
|---|
| 455 | 526 | vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings; |
|---|
| 456 | | - vf_ring_grps = hw_resc->max_hw_ring_grps - bp->rx_nr_rings; |
|---|
| 457 | 527 | vf_tx_rings = hw_resc->max_tx_rings - bp->tx_nr_rings; |
|---|
| 458 | 528 | vf_vnics = hw_resc->max_vnics - bp->nr_vnics; |
|---|
| 459 | 529 | vf_vnics = min_t(u16, vf_vnics, vf_rx_rings); |
|---|
| 530 | + vf_rss = hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs; |
|---|
| 460 | 531 | |
|---|
| 461 | 532 | req.min_rsscos_ctx = cpu_to_le16(BNXT_VF_MIN_RSS_CTX); |
|---|
| 462 | | - req.max_rsscos_ctx = cpu_to_le16(BNXT_VF_MAX_RSS_CTX); |
|---|
| 463 | 533 | if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) { |
|---|
| 464 | 534 | min = 0; |
|---|
| 465 | 535 | req.min_rsscos_ctx = cpu_to_le16(min); |
|---|
| .. | .. |
|---|
| 472 | 542 | req.min_l2_ctxs = cpu_to_le16(min); |
|---|
| 473 | 543 | req.min_vnics = cpu_to_le16(min); |
|---|
| 474 | 544 | req.min_stat_ctx = cpu_to_le16(min); |
|---|
| 475 | | - req.min_hw_ring_grps = cpu_to_le16(min); |
|---|
| 545 | + if (!(bp->flags & BNXT_FLAG_CHIP_P5)) |
|---|
| 546 | + req.min_hw_ring_grps = cpu_to_le16(min); |
|---|
| 476 | 547 | } else { |
|---|
| 477 | 548 | vf_cp_rings /= num_vfs; |
|---|
| 478 | 549 | vf_tx_rings /= num_vfs; |
|---|
| .. | .. |
|---|
| 480 | 551 | vf_vnics /= num_vfs; |
|---|
| 481 | 552 | vf_stat_ctx /= num_vfs; |
|---|
| 482 | 553 | vf_ring_grps /= num_vfs; |
|---|
| 554 | + vf_rss /= num_vfs; |
|---|
| 483 | 555 | |
|---|
| 484 | 556 | req.min_cmpl_rings = cpu_to_le16(vf_cp_rings); |
|---|
| 485 | 557 | req.min_tx_rings = cpu_to_le16(vf_tx_rings); |
|---|
| .. | .. |
|---|
| 488 | 560 | req.min_vnics = cpu_to_le16(vf_vnics); |
|---|
| 489 | 561 | req.min_stat_ctx = cpu_to_le16(vf_stat_ctx); |
|---|
| 490 | 562 | req.min_hw_ring_grps = cpu_to_le16(vf_ring_grps); |
|---|
| 563 | + req.min_rsscos_ctx = cpu_to_le16(vf_rss); |
|---|
| 491 | 564 | } |
|---|
| 492 | 565 | req.max_cmpl_rings = cpu_to_le16(vf_cp_rings); |
|---|
| 493 | 566 | req.max_tx_rings = cpu_to_le16(vf_tx_rings); |
|---|
| .. | .. |
|---|
| 496 | 569 | req.max_vnics = cpu_to_le16(vf_vnics); |
|---|
| 497 | 570 | req.max_stat_ctx = cpu_to_le16(vf_stat_ctx); |
|---|
| 498 | 571 | req.max_hw_ring_grps = cpu_to_le16(vf_ring_grps); |
|---|
| 572 | + req.max_rsscos_ctx = cpu_to_le16(vf_rss); |
|---|
| 573 | + if (bp->flags & BNXT_FLAG_CHIP_P5) |
|---|
| 574 | + req.max_msix = cpu_to_le16(vf_msix / num_vfs); |
|---|
| 499 | 575 | |
|---|
| 500 | 576 | mutex_lock(&bp->hwrm_cmd_lock); |
|---|
| 501 | 577 | for (i = 0; i < num_vfs; i++) { |
|---|
| 578 | + if (reset) |
|---|
| 579 | + __bnxt_set_vf_params(bp, i); |
|---|
| 580 | + |
|---|
| 502 | 581 | req.vf_id = cpu_to_le16(pf->first_vf_id + i); |
|---|
| 503 | 582 | rc = _hwrm_send_message(bp, &req, sizeof(req), |
|---|
| 504 | 583 | HWRM_CMD_TIMEOUT); |
|---|
| 505 | | - if (rc) { |
|---|
| 506 | | - rc = -ENOMEM; |
|---|
| 584 | + if (rc) |
|---|
| 507 | 585 | break; |
|---|
| 508 | | - } |
|---|
| 509 | 586 | pf->active_vfs = i + 1; |
|---|
| 510 | 587 | pf->vf[i].fw_fid = pf->first_vf_id + i; |
|---|
| 511 | 588 | } |
|---|
| .. | .. |
|---|
| 518 | 595 | hw_resc->max_hw_ring_grps -= le16_to_cpu(req.min_hw_ring_grps) * |
|---|
| 519 | 596 | n; |
|---|
| 520 | 597 | hw_resc->max_cp_rings -= le16_to_cpu(req.min_cmpl_rings) * n; |
|---|
| 521 | | - hw_resc->max_rsscos_ctxs -= pf->active_vfs; |
|---|
| 598 | + hw_resc->max_rsscos_ctxs -= le16_to_cpu(req.min_rsscos_ctx) * n; |
|---|
| 522 | 599 | hw_resc->max_stat_ctxs -= le16_to_cpu(req.min_stat_ctx) * n; |
|---|
| 523 | 600 | hw_resc->max_vnics -= le16_to_cpu(req.min_vnics) * n; |
|---|
| 601 | + if (bp->flags & BNXT_FLAG_CHIP_P5) |
|---|
| 602 | + hw_resc->max_nqs -= vf_msix; |
|---|
| 524 | 603 | |
|---|
| 525 | 604 | rc = pf->active_vfs; |
|---|
| 526 | 605 | } |
|---|
| .. | .. |
|---|
| 535 | 614 | u32 rc = 0, mtu, i; |
|---|
| 536 | 615 | u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics; |
|---|
| 537 | 616 | struct bnxt_hw_resc *hw_resc = &bp->hw_resc; |
|---|
| 538 | | - u16 vf_ring_grps, max_stat_ctxs; |
|---|
| 539 | 617 | struct hwrm_func_cfg_input req = {0}; |
|---|
| 540 | 618 | struct bnxt_pf_info *pf = &bp->pf; |
|---|
| 541 | 619 | int total_vf_tx_rings = 0; |
|---|
| 620 | + u16 vf_ring_grps; |
|---|
| 542 | 621 | |
|---|
| 543 | 622 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); |
|---|
| 544 | 623 | |
|---|
| 545 | | - max_stat_ctxs = hw_resc->max_stat_ctxs; |
|---|
| 546 | | - |
|---|
| 547 | 624 | /* Remaining rings are distributed equally amongs VF's for now */ |
|---|
| 548 | | - vf_cp_rings = (bnxt_get_max_func_cp_rings_for_en(bp) - |
|---|
| 549 | | - bp->cp_nr_rings) / num_vfs; |
|---|
| 550 | | - vf_stat_ctx = (max_stat_ctxs - bp->num_stat_ctxs) / num_vfs; |
|---|
| 625 | + vf_cp_rings = bnxt_get_avail_cp_rings_for_en(bp) / num_vfs; |
|---|
| 626 | + vf_stat_ctx = bnxt_get_avail_stat_ctxs_for_en(bp) / num_vfs; |
|---|
| 551 | 627 | if (bp->flags & BNXT_FLAG_AGG_RINGS) |
|---|
| 552 | 628 | vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings * 2) / |
|---|
| 553 | 629 | num_vfs; |
|---|
| .. | .. |
|---|
| 570 | 646 | FUNC_CFG_REQ_ENABLES_NUM_VNICS | |
|---|
| 571 | 647 | FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS); |
|---|
| 572 | 648 | |
|---|
| 573 | | - mtu = bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; |
|---|
| 649 | + mtu = bp->dev->mtu + ETH_HLEN + VLAN_HLEN; |
|---|
| 574 | 650 | req.mru = cpu_to_le16(mtu); |
|---|
| 575 | 651 | req.mtu = cpu_to_le16(mtu); |
|---|
| 576 | 652 | |
|---|
| .. | .. |
|---|
| 603 | 679 | total_vf_tx_rings += vf_tx_rsvd; |
|---|
| 604 | 680 | } |
|---|
| 605 | 681 | mutex_unlock(&bp->hwrm_cmd_lock); |
|---|
| 606 | | - if (rc) |
|---|
| 607 | | - rc = -ENOMEM; |
|---|
| 608 | 682 | if (pf->active_vfs) { |
|---|
| 609 | 683 | hw_resc->max_tx_rings -= total_vf_tx_rings; |
|---|
| 610 | 684 | hw_resc->max_rx_rings -= vf_rx_rings * num_vfs; |
|---|
| .. | .. |
|---|
| 618 | 692 | return rc; |
|---|
| 619 | 693 | } |
|---|
| 620 | 694 | |
|---|
| 621 | | -static int bnxt_func_cfg(struct bnxt *bp, int num_vfs) |
|---|
| 695 | +static int bnxt_func_cfg(struct bnxt *bp, int num_vfs, bool reset) |
|---|
| 622 | 696 | { |
|---|
| 623 | 697 | if (BNXT_NEW_RM(bp)) |
|---|
| 624 | | - return bnxt_hwrm_func_vf_resc_cfg(bp, num_vfs); |
|---|
| 698 | + return bnxt_hwrm_func_vf_resc_cfg(bp, num_vfs, reset); |
|---|
| 625 | 699 | else |
|---|
| 626 | 700 | return bnxt_hwrm_func_cfg(bp, num_vfs); |
|---|
| 701 | +} |
|---|
| 702 | + |
|---|
| 703 | +int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset) |
|---|
| 704 | +{ |
|---|
| 705 | + int rc; |
|---|
| 706 | + |
|---|
| 707 | + /* Register buffers for VFs */ |
|---|
| 708 | + rc = bnxt_hwrm_func_buf_rgtr(bp); |
|---|
| 709 | + if (rc) |
|---|
| 710 | + return rc; |
|---|
| 711 | + |
|---|
| 712 | + /* Reserve resources for VFs */ |
|---|
| 713 | + rc = bnxt_func_cfg(bp, *num_vfs, reset); |
|---|
| 714 | + if (rc != *num_vfs) { |
|---|
| 715 | + if (rc <= 0) { |
|---|
| 716 | + netdev_warn(bp->dev, "Unable to reserve resources for SRIOV.\n"); |
|---|
| 717 | + *num_vfs = 0; |
|---|
| 718 | + return rc; |
|---|
| 719 | + } |
|---|
| 720 | + netdev_warn(bp->dev, "Only able to reserve resources for %d VFs.\n", |
|---|
| 721 | + rc); |
|---|
| 722 | + *num_vfs = rc; |
|---|
| 723 | + } |
|---|
| 724 | + |
|---|
| 725 | + bnxt_ulp_sriov_cfg(bp, *num_vfs); |
|---|
| 726 | + return 0; |
|---|
| 627 | 727 | } |
|---|
| 628 | 728 | |
|---|
| 629 | 729 | static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs) |
|---|
| .. | .. |
|---|
| 640 | 740 | */ |
|---|
| 641 | 741 | vfs_supported = *num_vfs; |
|---|
| 642 | 742 | |
|---|
| 643 | | - avail_cp = bnxt_get_max_func_cp_rings_for_en(bp) - bp->cp_nr_rings; |
|---|
| 644 | | - avail_stat = hw_resc->max_stat_ctxs - bp->num_stat_ctxs; |
|---|
| 743 | + avail_cp = bnxt_get_avail_cp_rings_for_en(bp); |
|---|
| 744 | + avail_stat = bnxt_get_avail_stat_ctxs_for_en(bp); |
|---|
| 645 | 745 | avail_cp = min_t(int, avail_cp, avail_stat); |
|---|
| 646 | 746 | |
|---|
| 647 | 747 | while (vfs_supported) { |
|---|
| .. | .. |
|---|
| 691 | 791 | if (rc) |
|---|
| 692 | 792 | goto err_out1; |
|---|
| 693 | 793 | |
|---|
| 694 | | - /* Reserve resources for VFs */ |
|---|
| 695 | | - rc = bnxt_func_cfg(bp, *num_vfs); |
|---|
| 696 | | - if (rc != *num_vfs) { |
|---|
| 697 | | - if (rc <= 0) { |
|---|
| 698 | | - netdev_warn(bp->dev, "Unable to reserve resources for SRIOV.\n"); |
|---|
| 699 | | - *num_vfs = 0; |
|---|
| 700 | | - goto err_out2; |
|---|
| 701 | | - } |
|---|
| 702 | | - netdev_warn(bp->dev, "Only able to reserve resources for %d VFs.\n", rc); |
|---|
| 703 | | - *num_vfs = rc; |
|---|
| 704 | | - } |
|---|
| 705 | | - |
|---|
| 706 | | - /* Register buffers for VFs */ |
|---|
| 707 | | - rc = bnxt_hwrm_func_buf_rgtr(bp); |
|---|
| 794 | + rc = bnxt_cfg_hw_sriov(bp, num_vfs, false); |
|---|
| 708 | 795 | if (rc) |
|---|
| 709 | 796 | goto err_out2; |
|---|
| 710 | | - |
|---|
| 711 | | - bnxt_ulp_sriov_cfg(bp, *num_vfs); |
|---|
| 712 | 797 | |
|---|
| 713 | 798 | rc = pci_enable_sriov(bp->pdev, *num_vfs); |
|---|
| 714 | 799 | if (rc) |
|---|
| .. | .. |
|---|
| 775 | 860 | rtnl_unlock(); |
|---|
| 776 | 861 | return 0; |
|---|
| 777 | 862 | } |
|---|
| 863 | + if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { |
|---|
| 864 | + netdev_warn(dev, "Reject SRIOV config request when FW reset is in progress\n"); |
|---|
| 865 | + rtnl_unlock(); |
|---|
| 866 | + return 0; |
|---|
| 867 | + } |
|---|
| 778 | 868 | bp->sriov_cfg = true; |
|---|
| 779 | 869 | rtnl_unlock(); |
|---|
| 780 | 870 | |
|---|
| .. | .. |
|---|
| 808 | 898 | { |
|---|
| 809 | 899 | int rc = 0; |
|---|
| 810 | 900 | struct hwrm_fwd_resp_input req = {0}; |
|---|
| 811 | | - struct hwrm_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr; |
|---|
| 812 | 901 | |
|---|
| 813 | 902 | if (BNXT_FWD_RESP_SIZE_ERR(msg_size)) |
|---|
| 814 | 903 | return -EINVAL; |
|---|
| .. | .. |
|---|
| 823 | 912 | req.encap_resp_cmpl_ring = encap_resp_cpr; |
|---|
| 824 | 913 | memcpy(req.encap_resp, encap_resp, msg_size); |
|---|
| 825 | 914 | |
|---|
| 826 | | - mutex_lock(&bp->hwrm_cmd_lock); |
|---|
| 827 | | - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
|---|
| 828 | | - |
|---|
| 829 | | - if (rc) { |
|---|
| 915 | + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
|---|
| 916 | + if (rc) |
|---|
| 830 | 917 | netdev_err(bp->dev, "hwrm_fwd_resp failed. rc:%d\n", rc); |
|---|
| 831 | | - goto fwd_resp_exit; |
|---|
| 832 | | - } |
|---|
| 833 | | - |
|---|
| 834 | | - if (resp->error_code) { |
|---|
| 835 | | - netdev_err(bp->dev, "hwrm_fwd_resp error %d\n", |
|---|
| 836 | | - resp->error_code); |
|---|
| 837 | | - rc = -1; |
|---|
| 838 | | - } |
|---|
| 839 | | - |
|---|
| 840 | | -fwd_resp_exit: |
|---|
| 841 | | - mutex_unlock(&bp->hwrm_cmd_lock); |
|---|
| 842 | 918 | return rc; |
|---|
| 843 | 919 | } |
|---|
| 844 | 920 | |
|---|
| .. | .. |
|---|
| 847 | 923 | { |
|---|
| 848 | 924 | int rc = 0; |
|---|
| 849 | 925 | struct hwrm_reject_fwd_resp_input req = {0}; |
|---|
| 850 | | - struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr; |
|---|
| 851 | 926 | |
|---|
| 852 | 927 | if (BNXT_REJ_FWD_RESP_SIZE_ERR(msg_size)) |
|---|
| 853 | 928 | return -EINVAL; |
|---|
| .. | .. |
|---|
| 858 | 933 | req.encap_resp_target_id = cpu_to_le16(vf->fw_fid); |
|---|
| 859 | 934 | memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size); |
|---|
| 860 | 935 | |
|---|
| 861 | | - mutex_lock(&bp->hwrm_cmd_lock); |
|---|
| 862 | | - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
|---|
| 863 | | - |
|---|
| 864 | | - if (rc) { |
|---|
| 936 | + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
|---|
| 937 | + if (rc) |
|---|
| 865 | 938 | netdev_err(bp->dev, "hwrm_fwd_err_resp failed. rc:%d\n", rc); |
|---|
| 866 | | - goto fwd_err_resp_exit; |
|---|
| 867 | | - } |
|---|
| 868 | | - |
|---|
| 869 | | - if (resp->error_code) { |
|---|
| 870 | | - netdev_err(bp->dev, "hwrm_fwd_err_resp error %d\n", |
|---|
| 871 | | - resp->error_code); |
|---|
| 872 | | - rc = -1; |
|---|
| 873 | | - } |
|---|
| 874 | | - |
|---|
| 875 | | -fwd_err_resp_exit: |
|---|
| 876 | | - mutex_unlock(&bp->hwrm_cmd_lock); |
|---|
| 877 | 939 | return rc; |
|---|
| 878 | 940 | } |
|---|
| 879 | 941 | |
|---|
| .. | .. |
|---|
| 882 | 944 | { |
|---|
| 883 | 945 | int rc = 0; |
|---|
| 884 | 946 | struct hwrm_exec_fwd_resp_input req = {0}; |
|---|
| 885 | | - struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr; |
|---|
| 886 | 947 | |
|---|
| 887 | 948 | if (BNXT_EXEC_FWD_RESP_SIZE_ERR(msg_size)) |
|---|
| 888 | 949 | return -EINVAL; |
|---|
| .. | .. |
|---|
| 893 | 954 | req.encap_resp_target_id = cpu_to_le16(vf->fw_fid); |
|---|
| 894 | 955 | memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size); |
|---|
| 895 | 956 | |
|---|
| 896 | | - mutex_lock(&bp->hwrm_cmd_lock); |
|---|
| 897 | | - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
|---|
| 898 | | - |
|---|
| 899 | | - if (rc) { |
|---|
| 957 | + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
|---|
| 958 | + if (rc) |
|---|
| 900 | 959 | netdev_err(bp->dev, "hwrm_exec_fw_resp failed. rc:%d\n", rc); |
|---|
| 901 | | - goto exec_fwd_resp_exit; |
|---|
| 902 | | - } |
|---|
| 903 | | - |
|---|
| 904 | | - if (resp->error_code) { |
|---|
| 905 | | - netdev_err(bp->dev, "hwrm_exec_fw_resp error %d\n", |
|---|
| 906 | | - resp->error_code); |
|---|
| 907 | | - rc = -1; |
|---|
| 908 | | - } |
|---|
| 909 | | - |
|---|
| 910 | | -exec_fwd_resp_exit: |
|---|
| 911 | | - mutex_unlock(&bp->hwrm_cmd_lock); |
|---|
| 912 | 960 | return rc; |
|---|
| 913 | 961 | } |
|---|
| 914 | 962 | |
|---|
| .. | .. |
|---|
| 922 | 970 | * if the PF assigned MAC address is zero |
|---|
| 923 | 971 | */ |
|---|
| 924 | 972 | if (req->enables & cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR)) { |
|---|
| 973 | + bool trust = bnxt_is_trusted_vf(bp, vf); |
|---|
| 974 | + |
|---|
| 925 | 975 | if (is_valid_ether_addr(req->dflt_mac_addr) && |
|---|
| 926 | | - ((vf->flags & BNXT_VF_TRUST) || |
|---|
| 927 | | - !is_valid_ether_addr(vf->mac_addr) || |
|---|
| 976 | + (trust || !is_valid_ether_addr(vf->mac_addr) || |
|---|
| 928 | 977 | ether_addr_equal(req->dflt_mac_addr, vf->mac_addr))) { |
|---|
| 929 | 978 | ether_addr_copy(vf->vf_mac_addr, req->dflt_mac_addr); |
|---|
| 930 | 979 | return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size); |
|---|
| .. | .. |
|---|
| 949 | 998 | * Otherwise, it must match the VF MAC address if firmware spec >= |
|---|
| 950 | 999 | * 1.2.2 |
|---|
| 951 | 1000 | */ |
|---|
| 952 | | - if (vf->flags & BNXT_VF_TRUST) { |
|---|
| 1001 | + if (bnxt_is_trusted_vf(bp, vf)) { |
|---|
| 953 | 1002 | mac_ok = true; |
|---|
| 954 | 1003 | } else if (is_valid_ether_addr(vf->mac_addr)) { |
|---|
| 955 | 1004 | if (ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr)) |
|---|
| .. | .. |
|---|
| 980 | 1029 | rc = bnxt_hwrm_exec_fwd_resp( |
|---|
| 981 | 1030 | bp, vf, sizeof(struct hwrm_port_phy_qcfg_input)); |
|---|
| 982 | 1031 | } else { |
|---|
| 983 | | - struct hwrm_port_phy_qcfg_output phy_qcfg_resp; |
|---|
| 1032 | + struct hwrm_port_phy_qcfg_output phy_qcfg_resp = {0}; |
|---|
| 984 | 1033 | struct hwrm_port_phy_qcfg_input *phy_qcfg_req; |
|---|
| 985 | 1034 | |
|---|
| 986 | 1035 | phy_qcfg_req = |
|---|
| .. | .. |
|---|
| 1127 | 1176 | } |
|---|
| 1128 | 1177 | #else |
|---|
| 1129 | 1178 | |
|---|
| 1179 | +int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset) |
|---|
| 1180 | +{ |
|---|
| 1181 | + if (*num_vfs) |
|---|
| 1182 | + return -EOPNOTSUPP; |
|---|
| 1183 | + return 0; |
|---|
| 1184 | +} |
|---|
| 1185 | + |
|---|
| 1130 | 1186 | void bnxt_sriov_disable(struct bnxt *bp) |
|---|
| 1131 | 1187 | { |
|---|
| 1132 | 1188 | } |
|---|