From 9370bb92b2d16684ee45cf24e879c93c509162da Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Thu, 19 Dec 2024 01:47:39 +0000 Subject: [PATCH] add wifi6 8852be driver --- kernel/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c | 415 ++++++++++++++++++++++++++++++++-------------------------- 1 files changed, 227 insertions(+), 188 deletions(-) diff --git a/kernel/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/kernel/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c index 3d834d4..17faac7 100644 --- a/kernel/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c +++ b/kernel/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c @@ -53,35 +53,6 @@ #define for_each_uldrxq(m, i) for (i = 0; i < ((m)->nrxq + (m)->nciq); i++) -static int get_msix_idx_from_bmap(struct adapter *adap) -{ - struct uld_msix_bmap *bmap = &adap->msix_bmap_ulds; - unsigned long flags; - unsigned int msix_idx; - - spin_lock_irqsave(&bmap->lock, flags); - msix_idx = find_first_zero_bit(bmap->msix_bmap, bmap->mapsize); - if (msix_idx < bmap->mapsize) { - __set_bit(msix_idx, bmap->msix_bmap); - } else { - spin_unlock_irqrestore(&bmap->lock, flags); - return -ENOSPC; - } - - spin_unlock_irqrestore(&bmap->lock, flags); - return msix_idx; -} - -static void free_msix_idx_in_bmap(struct adapter *adap, unsigned int msix_idx) -{ - struct uld_msix_bmap *bmap = &adap->msix_bmap_ulds; - unsigned long flags; - - spin_lock_irqsave(&bmap->lock, flags); - __clear_bit(msix_idx, bmap->msix_bmap); - spin_unlock_irqrestore(&bmap->lock, flags); -} - /* Flush the aggregated lro sessions */ static void uldrx_flush_handler(struct sge_rspq *q) { @@ -138,15 +109,15 @@ struct sge_uld_rxq_info *rxq_info, bool lro) { unsigned int nq = rxq_info->nrxq + rxq_info->nciq; - int i, err, msi_idx, que_idx = 0, bmap_idx = 0; struct sge_ofld_rxq *q = rxq_info->uldrxq; unsigned short *ids = rxq_info->rspq_id; + int i, err, msi_idx, que_idx = 0; struct sge *s = &adap->sge; unsigned int per_chan; per_chan = rxq_info->nrxq / adap->params.nports; - if (adap->flags & USING_MSIX) + if (adap->flags & CXGB4_USING_MSIX) msi_idx = 1; else msi_idx = -((int)s->intrq.abs_id + 1); @@ -159,12 +130,18 @@ } if (msi_idx >= 0) { - bmap_idx = get_msix_idx_from_bmap(adap); - if (bmap_idx < 0) { + msi_idx = cxgb4_get_msix_idx_from_bmap(adap); + if (msi_idx < 0) { err = -ENOSPC; goto freeout; } - msi_idx = adap->msix_info_ulds[bmap_idx].idx; + + snprintf(adap->msix_info[msi_idx].desc, + sizeof(adap->msix_info[msi_idx].desc), + "%s-%s%d", + adap->port[0]->name, rxq_info->name, i); + + q->msix = &adap->msix_info[msi_idx]; } err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[que_idx++ / per_chan], @@ -175,8 +152,7 @@ 0); if (err) goto freeout; - if (msi_idx >= 0) - rxq_info->msix_tbl[i] = bmap_idx; + memset(&q->stats, 0, sizeof(q->stats)); if (ids) ids[i] = q->rspq.abs_id; @@ -188,6 +164,8 @@ if (q->rspq.desc) free_rspq_fl(adap, &q->rspq, q->fl.size ? &q->fl : NULL); + if (q->msix) + cxgb4_free_msix_idx_in_bmap(adap, q->msix->idx); } return err; } @@ -196,21 +174,14 @@ setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro) { struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; - int i, ret = 0; + int i, ret; - if (adap->flags & USING_MSIX) { - rxq_info->msix_tbl = kcalloc((rxq_info->nrxq + rxq_info->nciq), - sizeof(unsigned short), - GFP_KERNEL); - if (!rxq_info->msix_tbl) - return -ENOMEM; - } - - ret = !(!alloc_uld_rxqs(adap, rxq_info, lro)); + ret = alloc_uld_rxqs(adap, rxq_info, lro); + if (ret) + return ret; /* Tell uP to route control queue completions to rdma rspq */ - if (adap->flags & FULL_INIT_DONE && - !ret && uld_type == CXGB4_ULD_RDMA) { + if (adap->flags & CXGB4_FULL_INIT_DONE && uld_type == CXGB4_ULD_RDMA) { struct sge *s = &adap->sge; unsigned int cmplqid; u32 param, cmdop; @@ -242,7 +213,7 @@ { struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; - if (adap->flags & FULL_INIT_DONE && uld_type == CXGB4_ULD_RDMA) { + if (adap->flags & CXGB4_FULL_INIT_DONE && uld_type == CXGB4_ULD_RDMA) { struct sge *s = &adap->sge; u32 param, cmdop, cmplqid = 0; int i; @@ -261,8 +232,6 @@ t4_free_uld_rxqs(adap, rxq_info->nciq, rxq_info->uldrxq + rxq_info->nrxq); t4_free_uld_rxqs(adap, rxq_info->nrxq, rxq_info->uldrxq); - if (adap->flags & USING_MSIX) - kfree(rxq_info->msix_tbl); } static int cfg_queues_uld(struct adapter *adap, unsigned int uld_type, @@ -276,7 +245,7 @@ if (!rxq_info) return -ENOMEM; - if (adap->flags & USING_MSIX && uld_info->nrxq > s->nqs_per_uld) { + if (adap->flags & CXGB4_USING_MSIX && uld_info->nrxq > s->nqs_per_uld) { i = s->nqs_per_uld; rxq_info->nrxq = roundup(i, adap->params.nports); } else { @@ -287,7 +256,7 @@ if (!uld_info->ciq) { rxq_info->nciq = 0; } else { - if (adap->flags & USING_MSIX) + if (adap->flags & CXGB4_USING_MSIX) rxq_info->nciq = min_t(int, s->nqs_per_uld, num_online_cpus()); else @@ -355,25 +324,30 @@ request_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type) { struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; + struct msix_info *minfo; + unsigned int idx; int err = 0; - unsigned int idx, bmap_idx; for_each_uldrxq(rxq_info, idx) { - bmap_idx = rxq_info->msix_tbl[idx]; - err = request_irq(adap->msix_info_ulds[bmap_idx].vec, + minfo = rxq_info->uldrxq[idx].msix; + err = request_irq(minfo->vec, t4_sge_intr_msix, 0, - adap->msix_info_ulds[bmap_idx].desc, + minfo->desc, &rxq_info->uldrxq[idx].rspq); if (err) goto unwind; + + cxgb4_set_msix_aff(adap, minfo->vec, + &minfo->aff_mask, idx); } return 0; + unwind: while (idx-- > 0) { - bmap_idx = rxq_info->msix_tbl[idx]; - free_msix_idx_in_bmap(adap, bmap_idx); - free_irq(adap->msix_info_ulds[bmap_idx].vec, - &rxq_info->uldrxq[idx].rspq); + minfo = rxq_info->uldrxq[idx].msix; + cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask); + cxgb4_free_msix_idx_in_bmap(adap, minfo->idx); + free_irq(minfo->vec, &rxq_info->uldrxq[idx].rspq); } return err; } @@ -382,49 +356,15 @@ free_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type) { struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; - unsigned int idx, bmap_idx; + struct msix_info *minfo; + unsigned int idx; for_each_uldrxq(rxq_info, idx) { - bmap_idx = rxq_info->msix_tbl[idx]; - - free_msix_idx_in_bmap(adap, bmap_idx); - free_irq(adap->msix_info_ulds[bmap_idx].vec, - &rxq_info->uldrxq[idx].rspq); + minfo = rxq_info->uldrxq[idx].msix; + cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask); + cxgb4_free_msix_idx_in_bmap(adap, minfo->idx); + free_irq(minfo->vec, &rxq_info->uldrxq[idx].rspq); } -} - -static void name_msix_vecs_uld(struct adapter *adap, unsigned int uld_type) -{ - struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; - int n = sizeof(adap->msix_info_ulds[0].desc); - unsigned int idx, bmap_idx; - - for_each_uldrxq(rxq_info, idx) { - bmap_idx = rxq_info->msix_tbl[idx]; - - snprintf(adap->msix_info_ulds[bmap_idx].desc, n, "%s-%s%d", - adap->port[0]->name, rxq_info->name, idx); - } -} - -static void enable_rx(struct adapter *adap, struct sge_rspq *q) -{ - if (!q) - return; - - if (q->handler) - napi_enable(&q->napi); - - /* 0-increment GTS to start the timer and enable interrupts */ - t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A), - SEINTARM_V(q->intr_params) | - INGRESSQID_V(q->cntxt_id)); -} - -static void quiesce_rx(struct adapter *adap, struct sge_rspq *q) -{ - if (q && q->handler) - napi_disable(&q->napi); } static void enable_rx_uld(struct adapter *adap, unsigned int uld_type) @@ -432,8 +372,14 @@ struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; int idx; - for_each_uldrxq(rxq_info, idx) - enable_rx(adap, &rxq_info->uldrxq[idx].rspq); + for_each_uldrxq(rxq_info, idx) { + struct sge_rspq *q = &rxq_info->uldrxq[idx].rspq; + + if (!q) + continue; + + cxgb4_enable_rx(adap, q); + } } static void quiesce_rx_uld(struct adapter *adap, unsigned int uld_type) @@ -441,8 +387,14 @@ struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; int idx; - for_each_uldrxq(rxq_info, idx) - quiesce_rx(adap, &rxq_info->uldrxq[idx].rspq); + for_each_uldrxq(rxq_info, idx) { + struct sge_rspq *q = &rxq_info->uldrxq[idx].rspq; + + if (!q) + continue; + + cxgb4_quiesce_rx(q); + } } static void @@ -523,10 +475,20 @@ txq_info = kzalloc(sizeof(*txq_info), GFP_KERNEL); if (!txq_info) return -ENOMEM; + if (uld_type == CXGB4_ULD_CRYPTO) { + i = min_t(int, adap->vres.ncrypto_fc, + num_online_cpus()); + txq_info->ntxq = rounddown(i, adap->params.nports); + if (txq_info->ntxq <= 0) { + dev_warn(adap->pdev_dev, "Crypto Tx Queues can't be zero\n"); + kfree(txq_info); + return -EINVAL; + } - i = min_t(int, uld_info->ntxq, num_online_cpus()); - txq_info->ntxq = roundup(i, adap->params.nports); - + } else { + i = min_t(int, uld_info->ntxq, num_online_cpus()); + txq_info->ntxq = roundup(i, adap->params.nports); + } txq_info->uldtxq = kcalloc(txq_info->ntxq, sizeof(struct sge_uld_txq), GFP_KERNEL); if (!txq_info->uldtxq) { @@ -549,11 +511,14 @@ struct cxgb4_lld_info *lli) { struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; + int tx_uld_type = TX_ULD(uld_type); + struct sge_uld_txq_info *txq_info = adap->sge.uld_txq_info[tx_uld_type]; lli->rxq_ids = rxq_info->rspq_id; lli->nrxq = rxq_info->nrxq; lli->ciq_ids = rxq_info->rspq_id + rxq_info->nrxq; lli->nciq = rxq_info->nciq; + lli->ntxq = txq_info->ntxq; } int t4_uld_mem_alloc(struct adapter *adap) @@ -601,10 +566,10 @@ adap->uld[type].add = NULL; release_sge_txq_uld(adap, type); - if (adap->flags & FULL_INIT_DONE) + if (adap->flags & CXGB4_FULL_INIT_DONE) quiesce_rx_uld(adap, type); - if (adap->flags & USING_MSIX) + if (adap->flags & CXGB4_USING_MSIX) free_msix_queue_irqs_uld(adap, type); free_sge_queues_uld(adap, type); @@ -615,6 +580,9 @@ void t4_uld_clean_up(struct adapter *adap) { unsigned int i; + + if (!is_uld(adap)) + return; mutex_lock(&uld_mutex); for (i = 0; i < CXGB4_ULD_MAX; i++) { @@ -637,7 +605,6 @@ lld->ports = adap->port; lld->vr = &adap->vres; lld->mtus = adap->params.mtus; - lld->ntxq = adap->sge.ofldqsets; lld->nchan = adap->params.nports; lld->nports = adap->params.nports; lld->wr_cred = adap->params.ofldq_wr_cred; @@ -651,6 +618,7 @@ lld->cclk_ps = 1000000000 / adap->params.vpd.cclk; lld->udb_density = 1 << adap->params.sge.eq_qpp; lld->ucq_density = 1 << adap->params.sge.iq_qpp; + lld->sge_host_page_size = 1 << (adap->params.sge.hps + 10); lld->filt_mode = adap->params.tp.vlan_pri_map; /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */ for (i = 0; i < NCHAN; i++) @@ -663,7 +631,7 @@ lld->sge_egrstatuspagesize = adap->sge.stat_len; lld->sge_pktshift = adap->sge.pktshift; lld->ulp_crypto = adap->params.crypto; - lld->enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN; + lld->enable_fw_ofld_conn = adap->flags & CXGB4_FW_OFLD_CONN; lld->max_ordird_qp = adap->params.max_ordird_qp; lld->max_ird_adapter = adap->params.max_ird_adapter; lld->ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl; @@ -692,10 +660,133 @@ adap->uld[uld].handle = handle; t4_register_netevent_notifier(); - if (adap->flags & FULL_INIT_DONE) + if (adap->flags & CXGB4_FULL_INIT_DONE) adap->uld[uld].state_change(handle, CXGB4_STATE_UP); return 0; +} + +#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE) +static bool cxgb4_uld_in_use(struct adapter *adap) +{ + const struct tid_info *t = &adap->tids; + + return (atomic_read(&t->conns_in_use) || t->stids_in_use); +} + +/* cxgb4_set_ktls_feature: request FW to enable/disable ktls settings. + * @adap: adapter info + * @enable: 1 to enable / 0 to disable ktls settings. + */ +int cxgb4_set_ktls_feature(struct adapter *adap, bool enable) +{ + int ret = 0; + u32 params = + FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_KTLS_HW) | + FW_PARAMS_PARAM_Y_V(enable) | + FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_KTLS_HW_USER_ENABLE); + + if (enable) { + if (!refcount_read(&adap->chcr_ktls.ktls_refcount)) { + /* At this moment if ULD connection are up means, other + * ULD is/are already active, return failure. + */ + if (cxgb4_uld_in_use(adap)) { + dev_dbg(adap->pdev_dev, + "ULD connections (tid/stid) active. Can't enable kTLS\n"); + return -EINVAL; + } + ret = t4_set_params(adap, adap->mbox, adap->pf, + 0, 1, ¶ms, ¶ms); + if (ret) + return ret; + refcount_set(&adap->chcr_ktls.ktls_refcount, 1); + pr_debug("kTLS has been enabled. Restrictions placed on ULD support\n"); + } else { + /* ktls settings already up, just increment refcount. */ + refcount_inc(&adap->chcr_ktls.ktls_refcount); + } + } else { + /* return failure if refcount is already 0. */ + if (!refcount_read(&adap->chcr_ktls.ktls_refcount)) + return -EINVAL; + /* decrement refcount and test, if 0, disable ktls feature, + * else return command success. + */ + if (refcount_dec_and_test(&adap->chcr_ktls.ktls_refcount)) { + ret = t4_set_params(adap, adap->mbox, adap->pf, + 0, 1, ¶ms, ¶ms); + if (ret) + return ret; + pr_debug("kTLS is disabled. Restrictions on ULD support removed\n"); + } + } + + return ret; +} +#endif + +static void cxgb4_uld_alloc_resources(struct adapter *adap, + enum cxgb4_uld type, + const struct cxgb4_uld_info *p) +{ + int ret = 0; + + if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) || + (type != CXGB4_ULD_CRYPTO && !is_offload(adap))) + return; + if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip)) + return; + ret = cfg_queues_uld(adap, type, p); + if (ret) + goto out; + ret = setup_sge_queues_uld(adap, type, p->lro); + if (ret) + goto free_queues; + if (adap->flags & CXGB4_USING_MSIX) { + ret = request_msix_queue_irqs_uld(adap, type); + if (ret) + goto free_rxq; + } + if (adap->flags & CXGB4_FULL_INIT_DONE) + enable_rx_uld(adap, type); + if (adap->uld[type].add) + goto free_irq; + ret = setup_sge_txq_uld(adap, type, p); + if (ret) + goto free_irq; + adap->uld[type] = *p; + ret = uld_attach(adap, type); + if (ret) + goto free_txq; + return; +free_txq: + release_sge_txq_uld(adap, type); +free_irq: + if (adap->flags & CXGB4_FULL_INIT_DONE) + quiesce_rx_uld(adap, type); + if (adap->flags & CXGB4_USING_MSIX) + free_msix_queue_irqs_uld(adap, type); +free_rxq: + free_sge_queues_uld(adap, type); +free_queues: + free_queues_uld(adap, type); +out: + dev_warn(adap->pdev_dev, + "ULD registration failed for uld type %d\n", type); +} + +void cxgb4_uld_enable(struct adapter *adap) +{ + struct cxgb4_uld_list *uld_entry; + + mutex_lock(&uld_mutex); + list_add_tail(&adap->list_node, &adapter_list); + list_for_each_entry(uld_entry, &uld_list, list_node) + cxgb4_uld_alloc_resources(adap, uld_entry->uld_type, + &uld_entry->uld_info); + mutex_unlock(&uld_mutex); } /* cxgb4_register_uld - register an upper-layer driver @@ -703,90 +794,30 @@ * @p: the ULD methods * * Registers an upper-layer driver with this driver and notifies the ULD - * about any presently available devices that support its type. Returns - * %-EBUSY if a ULD of the same type is already registered. + * about any presently available devices that support its type. */ -int cxgb4_register_uld(enum cxgb4_uld type, - const struct cxgb4_uld_info *p) +void cxgb4_register_uld(enum cxgb4_uld type, + const struct cxgb4_uld_info *p) { - unsigned int adap_idx = 0; + struct cxgb4_uld_list *uld_entry; struct adapter *adap; - int ret = 0; if (type >= CXGB4_ULD_MAX) - return -EINVAL; + return; + uld_entry = kzalloc(sizeof(*uld_entry), GFP_KERNEL); + if (!uld_entry) + return; + + memcpy(&uld_entry->uld_info, p, sizeof(struct cxgb4_uld_info)); mutex_lock(&uld_mutex); - list_for_each_entry(adap, &adapter_list, list_node) { - if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) || - (type != CXGB4_ULD_CRYPTO && !is_offload(adap))) - continue; - if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip)) - continue; - ret = cfg_queues_uld(adap, type, p); - if (ret) - goto out; - ret = setup_sge_queues_uld(adap, type, p->lro); - if (ret) - goto free_queues; - if (adap->flags & USING_MSIX) { - name_msix_vecs_uld(adap, type); - ret = request_msix_queue_irqs_uld(adap, type); - if (ret) - goto free_rxq; - } - if (adap->flags & FULL_INIT_DONE) - enable_rx_uld(adap, type); - if (adap->uld[type].add) { - ret = -EBUSY; - goto free_irq; - } - ret = setup_sge_txq_uld(adap, type, p); - if (ret) - goto free_irq; - adap->uld[type] = *p; - ret = uld_attach(adap, type); - if (ret) - goto free_txq; - adap_idx++; - } - mutex_unlock(&uld_mutex); - return 0; + list_for_each_entry(adap, &adapter_list, list_node) + cxgb4_uld_alloc_resources(adap, type, p); -free_txq: - release_sge_txq_uld(adap, type); -free_irq: - if (adap->flags & FULL_INIT_DONE) - quiesce_rx_uld(adap, type); - if (adap->flags & USING_MSIX) - free_msix_queue_irqs_uld(adap, type); -free_rxq: - free_sge_queues_uld(adap, type); -free_queues: - free_queues_uld(adap, type); -out: - - list_for_each_entry(adap, &adapter_list, list_node) { - if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) || - (type != CXGB4_ULD_CRYPTO && !is_offload(adap))) - continue; - if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip)) - continue; - if (!adap_idx) - break; - adap->uld[type].handle = NULL; - adap->uld[type].add = NULL; - release_sge_txq_uld(adap, type); - if (adap->flags & FULL_INIT_DONE) - quiesce_rx_uld(adap, type); - if (adap->flags & USING_MSIX) - free_msix_queue_irqs_uld(adap, type); - free_sge_queues_uld(adap, type); - free_queues_uld(adap, type); - adap_idx--; - } + uld_entry->uld_type = type; + list_add_tail(&uld_entry->list_node, &uld_list); mutex_unlock(&uld_mutex); - return ret; + return; } EXPORT_SYMBOL(cxgb4_register_uld); @@ -798,6 +829,7 @@ */ int cxgb4_unregister_uld(enum cxgb4_uld type) { + struct cxgb4_uld_list *uld_entry, *tmp; struct adapter *adap; if (type >= CXGB4_ULD_MAX) @@ -813,6 +845,13 @@ cxgb4_shutdown_uld_adapter(adap, type); } + + list_for_each_entry_safe(uld_entry, tmp, &uld_list, list_node) { + if (uld_entry->uld_type == type) { + list_del(&uld_entry->list_node); + kfree(uld_entry); + } + } mutex_unlock(&uld_mutex); return 0; -- Gitblit v1.6.2