forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
....@@ -53,35 +53,6 @@
5353
5454 #define for_each_uldrxq(m, i) for (i = 0; i < ((m)->nrxq + (m)->nciq); i++)
5555
56
-static int get_msix_idx_from_bmap(struct adapter *adap)
57
-{
58
- struct uld_msix_bmap *bmap = &adap->msix_bmap_ulds;
59
- unsigned long flags;
60
- unsigned int msix_idx;
61
-
62
- spin_lock_irqsave(&bmap->lock, flags);
63
- msix_idx = find_first_zero_bit(bmap->msix_bmap, bmap->mapsize);
64
- if (msix_idx < bmap->mapsize) {
65
- __set_bit(msix_idx, bmap->msix_bmap);
66
- } else {
67
- spin_unlock_irqrestore(&bmap->lock, flags);
68
- return -ENOSPC;
69
- }
70
-
71
- spin_unlock_irqrestore(&bmap->lock, flags);
72
- return msix_idx;
73
-}
74
-
75
-static void free_msix_idx_in_bmap(struct adapter *adap, unsigned int msix_idx)
76
-{
77
- struct uld_msix_bmap *bmap = &adap->msix_bmap_ulds;
78
- unsigned long flags;
79
-
80
- spin_lock_irqsave(&bmap->lock, flags);
81
- __clear_bit(msix_idx, bmap->msix_bmap);
82
- spin_unlock_irqrestore(&bmap->lock, flags);
83
-}
84
-
8556 /* Flush the aggregated lro sessions */
8657 static void uldrx_flush_handler(struct sge_rspq *q)
8758 {
....@@ -138,15 +109,15 @@
138109 struct sge_uld_rxq_info *rxq_info, bool lro)
139110 {
140111 unsigned int nq = rxq_info->nrxq + rxq_info->nciq;
141
- int i, err, msi_idx, que_idx = 0, bmap_idx = 0;
142112 struct sge_ofld_rxq *q = rxq_info->uldrxq;
143113 unsigned short *ids = rxq_info->rspq_id;
114
+ int i, err, msi_idx, que_idx = 0;
144115 struct sge *s = &adap->sge;
145116 unsigned int per_chan;
146117
147118 per_chan = rxq_info->nrxq / adap->params.nports;
148119
149
- if (adap->flags & USING_MSIX)
120
+ if (adap->flags & CXGB4_USING_MSIX)
150121 msi_idx = 1;
151122 else
152123 msi_idx = -((int)s->intrq.abs_id + 1);
....@@ -159,12 +130,18 @@
159130 }
160131
161132 if (msi_idx >= 0) {
162
- bmap_idx = get_msix_idx_from_bmap(adap);
163
- if (bmap_idx < 0) {
133
+ msi_idx = cxgb4_get_msix_idx_from_bmap(adap);
134
+ if (msi_idx < 0) {
164135 err = -ENOSPC;
165136 goto freeout;
166137 }
167
- msi_idx = adap->msix_info_ulds[bmap_idx].idx;
138
+
139
+ snprintf(adap->msix_info[msi_idx].desc,
140
+ sizeof(adap->msix_info[msi_idx].desc),
141
+ "%s-%s%d",
142
+ adap->port[0]->name, rxq_info->name, i);
143
+
144
+ q->msix = &adap->msix_info[msi_idx];
168145 }
169146 err = t4_sge_alloc_rxq(adap, &q->rspq, false,
170147 adap->port[que_idx++ / per_chan],
....@@ -175,8 +152,7 @@
175152 0);
176153 if (err)
177154 goto freeout;
178
- if (msi_idx >= 0)
179
- rxq_info->msix_tbl[i] = bmap_idx;
155
+
180156 memset(&q->stats, 0, sizeof(q->stats));
181157 if (ids)
182158 ids[i] = q->rspq.abs_id;
....@@ -188,6 +164,8 @@
188164 if (q->rspq.desc)
189165 free_rspq_fl(adap, &q->rspq,
190166 q->fl.size ? &q->fl : NULL);
167
+ if (q->msix)
168
+ cxgb4_free_msix_idx_in_bmap(adap, q->msix->idx);
191169 }
192170 return err;
193171 }
....@@ -196,21 +174,14 @@
196174 setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro)
197175 {
198176 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
199
- int i, ret = 0;
177
+ int i, ret;
200178
201
- if (adap->flags & USING_MSIX) {
202
- rxq_info->msix_tbl = kcalloc((rxq_info->nrxq + rxq_info->nciq),
203
- sizeof(unsigned short),
204
- GFP_KERNEL);
205
- if (!rxq_info->msix_tbl)
206
- return -ENOMEM;
207
- }
208
-
209
- ret = !(!alloc_uld_rxqs(adap, rxq_info, lro));
179
+ ret = alloc_uld_rxqs(adap, rxq_info, lro);
180
+ if (ret)
181
+ return ret;
210182
211183 /* Tell uP to route control queue completions to rdma rspq */
212
- if (adap->flags & FULL_INIT_DONE &&
213
- !ret && uld_type == CXGB4_ULD_RDMA) {
184
+ if (adap->flags & CXGB4_FULL_INIT_DONE && uld_type == CXGB4_ULD_RDMA) {
214185 struct sge *s = &adap->sge;
215186 unsigned int cmplqid;
216187 u32 param, cmdop;
....@@ -242,7 +213,7 @@
242213 {
243214 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
244215
245
- if (adap->flags & FULL_INIT_DONE && uld_type == CXGB4_ULD_RDMA) {
216
+ if (adap->flags & CXGB4_FULL_INIT_DONE && uld_type == CXGB4_ULD_RDMA) {
246217 struct sge *s = &adap->sge;
247218 u32 param, cmdop, cmplqid = 0;
248219 int i;
....@@ -261,8 +232,6 @@
261232 t4_free_uld_rxqs(adap, rxq_info->nciq,
262233 rxq_info->uldrxq + rxq_info->nrxq);
263234 t4_free_uld_rxqs(adap, rxq_info->nrxq, rxq_info->uldrxq);
264
- if (adap->flags & USING_MSIX)
265
- kfree(rxq_info->msix_tbl);
266235 }
267236
268237 static int cfg_queues_uld(struct adapter *adap, unsigned int uld_type,
....@@ -276,7 +245,7 @@
276245 if (!rxq_info)
277246 return -ENOMEM;
278247
279
- if (adap->flags & USING_MSIX && uld_info->nrxq > s->nqs_per_uld) {
248
+ if (adap->flags & CXGB4_USING_MSIX && uld_info->nrxq > s->nqs_per_uld) {
280249 i = s->nqs_per_uld;
281250 rxq_info->nrxq = roundup(i, adap->params.nports);
282251 } else {
....@@ -287,7 +256,7 @@
287256 if (!uld_info->ciq) {
288257 rxq_info->nciq = 0;
289258 } else {
290
- if (adap->flags & USING_MSIX)
259
+ if (adap->flags & CXGB4_USING_MSIX)
291260 rxq_info->nciq = min_t(int, s->nqs_per_uld,
292261 num_online_cpus());
293262 else
....@@ -355,25 +324,30 @@
355324 request_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
356325 {
357326 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
327
+ struct msix_info *minfo;
328
+ unsigned int idx;
358329 int err = 0;
359
- unsigned int idx, bmap_idx;
360330
361331 for_each_uldrxq(rxq_info, idx) {
362
- bmap_idx = rxq_info->msix_tbl[idx];
363
- err = request_irq(adap->msix_info_ulds[bmap_idx].vec,
332
+ minfo = rxq_info->uldrxq[idx].msix;
333
+ err = request_irq(minfo->vec,
364334 t4_sge_intr_msix, 0,
365
- adap->msix_info_ulds[bmap_idx].desc,
335
+ minfo->desc,
366336 &rxq_info->uldrxq[idx].rspq);
367337 if (err)
368338 goto unwind;
339
+
340
+ cxgb4_set_msix_aff(adap, minfo->vec,
341
+ &minfo->aff_mask, idx);
369342 }
370343 return 0;
344
+
371345 unwind:
372346 while (idx-- > 0) {
373
- bmap_idx = rxq_info->msix_tbl[idx];
374
- free_msix_idx_in_bmap(adap, bmap_idx);
375
- free_irq(adap->msix_info_ulds[bmap_idx].vec,
376
- &rxq_info->uldrxq[idx].rspq);
347
+ minfo = rxq_info->uldrxq[idx].msix;
348
+ cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask);
349
+ cxgb4_free_msix_idx_in_bmap(adap, minfo->idx);
350
+ free_irq(minfo->vec, &rxq_info->uldrxq[idx].rspq);
377351 }
378352 return err;
379353 }
....@@ -382,49 +356,15 @@
382356 free_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
383357 {
384358 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
385
- unsigned int idx, bmap_idx;
359
+ struct msix_info *minfo;
360
+ unsigned int idx;
386361
387362 for_each_uldrxq(rxq_info, idx) {
388
- bmap_idx = rxq_info->msix_tbl[idx];
389
-
390
- free_msix_idx_in_bmap(adap, bmap_idx);
391
- free_irq(adap->msix_info_ulds[bmap_idx].vec,
392
- &rxq_info->uldrxq[idx].rspq);
363
+ minfo = rxq_info->uldrxq[idx].msix;
364
+ cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask);
365
+ cxgb4_free_msix_idx_in_bmap(adap, minfo->idx);
366
+ free_irq(minfo->vec, &rxq_info->uldrxq[idx].rspq);
393367 }
394
-}
395
-
396
-static void name_msix_vecs_uld(struct adapter *adap, unsigned int uld_type)
397
-{
398
- struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
399
- int n = sizeof(adap->msix_info_ulds[0].desc);
400
- unsigned int idx, bmap_idx;
401
-
402
- for_each_uldrxq(rxq_info, idx) {
403
- bmap_idx = rxq_info->msix_tbl[idx];
404
-
405
- snprintf(adap->msix_info_ulds[bmap_idx].desc, n, "%s-%s%d",
406
- adap->port[0]->name, rxq_info->name, idx);
407
- }
408
-}
409
-
410
-static void enable_rx(struct adapter *adap, struct sge_rspq *q)
411
-{
412
- if (!q)
413
- return;
414
-
415
- if (q->handler)
416
- napi_enable(&q->napi);
417
-
418
- /* 0-increment GTS to start the timer and enable interrupts */
419
- t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
420
- SEINTARM_V(q->intr_params) |
421
- INGRESSQID_V(q->cntxt_id));
422
-}
423
-
424
-static void quiesce_rx(struct adapter *adap, struct sge_rspq *q)
425
-{
426
- if (q && q->handler)
427
- napi_disable(&q->napi);
428368 }
429369
430370 static void enable_rx_uld(struct adapter *adap, unsigned int uld_type)
....@@ -432,8 +372,14 @@
432372 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
433373 int idx;
434374
435
- for_each_uldrxq(rxq_info, idx)
436
- enable_rx(adap, &rxq_info->uldrxq[idx].rspq);
375
+ for_each_uldrxq(rxq_info, idx) {
376
+ struct sge_rspq *q = &rxq_info->uldrxq[idx].rspq;
377
+
378
+ if (!q)
379
+ continue;
380
+
381
+ cxgb4_enable_rx(adap, q);
382
+ }
437383 }
438384
439385 static void quiesce_rx_uld(struct adapter *adap, unsigned int uld_type)
....@@ -441,8 +387,14 @@
441387 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
442388 int idx;
443389
444
- for_each_uldrxq(rxq_info, idx)
445
- quiesce_rx(adap, &rxq_info->uldrxq[idx].rspq);
390
+ for_each_uldrxq(rxq_info, idx) {
391
+ struct sge_rspq *q = &rxq_info->uldrxq[idx].rspq;
392
+
393
+ if (!q)
394
+ continue;
395
+
396
+ cxgb4_quiesce_rx(q);
397
+ }
446398 }
447399
448400 static void
....@@ -523,10 +475,20 @@
523475 txq_info = kzalloc(sizeof(*txq_info), GFP_KERNEL);
524476 if (!txq_info)
525477 return -ENOMEM;
478
+ if (uld_type == CXGB4_ULD_CRYPTO) {
479
+ i = min_t(int, adap->vres.ncrypto_fc,
480
+ num_online_cpus());
481
+ txq_info->ntxq = rounddown(i, adap->params.nports);
482
+ if (txq_info->ntxq <= 0) {
483
+ dev_warn(adap->pdev_dev, "Crypto Tx Queues can't be zero\n");
484
+ kfree(txq_info);
485
+ return -EINVAL;
486
+ }
526487
527
- i = min_t(int, uld_info->ntxq, num_online_cpus());
528
- txq_info->ntxq = roundup(i, adap->params.nports);
529
-
488
+ } else {
489
+ i = min_t(int, uld_info->ntxq, num_online_cpus());
490
+ txq_info->ntxq = roundup(i, adap->params.nports);
491
+ }
530492 txq_info->uldtxq = kcalloc(txq_info->ntxq, sizeof(struct sge_uld_txq),
531493 GFP_KERNEL);
532494 if (!txq_info->uldtxq) {
....@@ -549,11 +511,14 @@
549511 struct cxgb4_lld_info *lli)
550512 {
551513 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
514
+ int tx_uld_type = TX_ULD(uld_type);
515
+ struct sge_uld_txq_info *txq_info = adap->sge.uld_txq_info[tx_uld_type];
552516
553517 lli->rxq_ids = rxq_info->rspq_id;
554518 lli->nrxq = rxq_info->nrxq;
555519 lli->ciq_ids = rxq_info->rspq_id + rxq_info->nrxq;
556520 lli->nciq = rxq_info->nciq;
521
+ lli->ntxq = txq_info->ntxq;
557522 }
558523
559524 int t4_uld_mem_alloc(struct adapter *adap)
....@@ -601,10 +566,10 @@
601566 adap->uld[type].add = NULL;
602567 release_sge_txq_uld(adap, type);
603568
604
- if (adap->flags & FULL_INIT_DONE)
569
+ if (adap->flags & CXGB4_FULL_INIT_DONE)
605570 quiesce_rx_uld(adap, type);
606571
607
- if (adap->flags & USING_MSIX)
572
+ if (adap->flags & CXGB4_USING_MSIX)
608573 free_msix_queue_irqs_uld(adap, type);
609574
610575 free_sge_queues_uld(adap, type);
....@@ -615,6 +580,9 @@
615580 void t4_uld_clean_up(struct adapter *adap)
616581 {
617582 unsigned int i;
583
+
584
+ if (!is_uld(adap))
585
+ return;
618586
619587 mutex_lock(&uld_mutex);
620588 for (i = 0; i < CXGB4_ULD_MAX; i++) {
....@@ -637,7 +605,6 @@
637605 lld->ports = adap->port;
638606 lld->vr = &adap->vres;
639607 lld->mtus = adap->params.mtus;
640
- lld->ntxq = adap->sge.ofldqsets;
641608 lld->nchan = adap->params.nports;
642609 lld->nports = adap->params.nports;
643610 lld->wr_cred = adap->params.ofldq_wr_cred;
....@@ -651,6 +618,7 @@
651618 lld->cclk_ps = 1000000000 / adap->params.vpd.cclk;
652619 lld->udb_density = 1 << adap->params.sge.eq_qpp;
653620 lld->ucq_density = 1 << adap->params.sge.iq_qpp;
621
+ lld->sge_host_page_size = 1 << (adap->params.sge.hps + 10);
654622 lld->filt_mode = adap->params.tp.vlan_pri_map;
655623 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
656624 for (i = 0; i < NCHAN; i++)
....@@ -663,7 +631,7 @@
663631 lld->sge_egrstatuspagesize = adap->sge.stat_len;
664632 lld->sge_pktshift = adap->sge.pktshift;
665633 lld->ulp_crypto = adap->params.crypto;
666
- lld->enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
634
+ lld->enable_fw_ofld_conn = adap->flags & CXGB4_FW_OFLD_CONN;
667635 lld->max_ordird_qp = adap->params.max_ordird_qp;
668636 lld->max_ird_adapter = adap->params.max_ird_adapter;
669637 lld->ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
....@@ -692,10 +660,133 @@
692660 adap->uld[uld].handle = handle;
693661 t4_register_netevent_notifier();
694662
695
- if (adap->flags & FULL_INIT_DONE)
663
+ if (adap->flags & CXGB4_FULL_INIT_DONE)
696664 adap->uld[uld].state_change(handle, CXGB4_STATE_UP);
697665
698666 return 0;
667
+}
668
+
669
+#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
670
+static bool cxgb4_uld_in_use(struct adapter *adap)
671
+{
672
+ const struct tid_info *t = &adap->tids;
673
+
674
+ return (atomic_read(&t->conns_in_use) || t->stids_in_use);
675
+}
676
+
677
+/* cxgb4_set_ktls_feature: request FW to enable/disable ktls settings.
678
+ * @adap: adapter info
679
+ * @enable: 1 to enable / 0 to disable ktls settings.
680
+ */
681
+int cxgb4_set_ktls_feature(struct adapter *adap, bool enable)
682
+{
683
+ int ret = 0;
684
+ u32 params =
685
+ FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
686
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_KTLS_HW) |
687
+ FW_PARAMS_PARAM_Y_V(enable) |
688
+ FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_KTLS_HW_USER_ENABLE);
689
+
690
+ if (enable) {
691
+ if (!refcount_read(&adap->chcr_ktls.ktls_refcount)) {
692
+ /* At this moment if ULD connection are up means, other
693
+ * ULD is/are already active, return failure.
694
+ */
695
+ if (cxgb4_uld_in_use(adap)) {
696
+ dev_dbg(adap->pdev_dev,
697
+ "ULD connections (tid/stid) active. Can't enable kTLS\n");
698
+ return -EINVAL;
699
+ }
700
+ ret = t4_set_params(adap, adap->mbox, adap->pf,
701
+ 0, 1, &params, &params);
702
+ if (ret)
703
+ return ret;
704
+ refcount_set(&adap->chcr_ktls.ktls_refcount, 1);
705
+ pr_debug("kTLS has been enabled. Restrictions placed on ULD support\n");
706
+ } else {
707
+ /* ktls settings already up, just increment refcount. */
708
+ refcount_inc(&adap->chcr_ktls.ktls_refcount);
709
+ }
710
+ } else {
711
+ /* return failure if refcount is already 0. */
712
+ if (!refcount_read(&adap->chcr_ktls.ktls_refcount))
713
+ return -EINVAL;
714
+ /* decrement refcount and test, if 0, disable ktls feature,
715
+ * else return command success.
716
+ */
717
+ if (refcount_dec_and_test(&adap->chcr_ktls.ktls_refcount)) {
718
+ ret = t4_set_params(adap, adap->mbox, adap->pf,
719
+ 0, 1, &params, &params);
720
+ if (ret)
721
+ return ret;
722
+ pr_debug("kTLS is disabled. Restrictions on ULD support removed\n");
723
+ }
724
+ }
725
+
726
+ return ret;
727
+}
728
+#endif
729
+
730
+static void cxgb4_uld_alloc_resources(struct adapter *adap,
731
+ enum cxgb4_uld type,
732
+ const struct cxgb4_uld_info *p)
733
+{
734
+ int ret = 0;
735
+
736
+ if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
737
+ (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
738
+ return;
739
+ if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
740
+ return;
741
+ ret = cfg_queues_uld(adap, type, p);
742
+ if (ret)
743
+ goto out;
744
+ ret = setup_sge_queues_uld(adap, type, p->lro);
745
+ if (ret)
746
+ goto free_queues;
747
+ if (adap->flags & CXGB4_USING_MSIX) {
748
+ ret = request_msix_queue_irqs_uld(adap, type);
749
+ if (ret)
750
+ goto free_rxq;
751
+ }
752
+ if (adap->flags & CXGB4_FULL_INIT_DONE)
753
+ enable_rx_uld(adap, type);
754
+ if (adap->uld[type].add)
755
+ goto free_irq;
756
+ ret = setup_sge_txq_uld(adap, type, p);
757
+ if (ret)
758
+ goto free_irq;
759
+ adap->uld[type] = *p;
760
+ ret = uld_attach(adap, type);
761
+ if (ret)
762
+ goto free_txq;
763
+ return;
764
+free_txq:
765
+ release_sge_txq_uld(adap, type);
766
+free_irq:
767
+ if (adap->flags & CXGB4_FULL_INIT_DONE)
768
+ quiesce_rx_uld(adap, type);
769
+ if (adap->flags & CXGB4_USING_MSIX)
770
+ free_msix_queue_irqs_uld(adap, type);
771
+free_rxq:
772
+ free_sge_queues_uld(adap, type);
773
+free_queues:
774
+ free_queues_uld(adap, type);
775
+out:
776
+ dev_warn(adap->pdev_dev,
777
+ "ULD registration failed for uld type %d\n", type);
778
+}
779
+
780
+void cxgb4_uld_enable(struct adapter *adap)
781
+{
782
+ struct cxgb4_uld_list *uld_entry;
783
+
784
+ mutex_lock(&uld_mutex);
785
+ list_add_tail(&adap->list_node, &adapter_list);
786
+ list_for_each_entry(uld_entry, &uld_list, list_node)
787
+ cxgb4_uld_alloc_resources(adap, uld_entry->uld_type,
788
+ &uld_entry->uld_info);
789
+ mutex_unlock(&uld_mutex);
699790 }
700791
701792 /* cxgb4_register_uld - register an upper-layer driver
....@@ -703,90 +794,30 @@
703794 * @p: the ULD methods
704795 *
705796 * Registers an upper-layer driver with this driver and notifies the ULD
706
- * about any presently available devices that support its type. Returns
707
- * %-EBUSY if a ULD of the same type is already registered.
797
+ * about any presently available devices that support its type.
708798 */
709
-int cxgb4_register_uld(enum cxgb4_uld type,
710
- const struct cxgb4_uld_info *p)
799
+void cxgb4_register_uld(enum cxgb4_uld type,
800
+ const struct cxgb4_uld_info *p)
711801 {
712
- unsigned int adap_idx = 0;
802
+ struct cxgb4_uld_list *uld_entry;
713803 struct adapter *adap;
714
- int ret = 0;
715804
716805 if (type >= CXGB4_ULD_MAX)
717
- return -EINVAL;
806
+ return;
718807
808
+ uld_entry = kzalloc(sizeof(*uld_entry), GFP_KERNEL);
809
+ if (!uld_entry)
810
+ return;
811
+
812
+ memcpy(&uld_entry->uld_info, p, sizeof(struct cxgb4_uld_info));
719813 mutex_lock(&uld_mutex);
720
- list_for_each_entry(adap, &adapter_list, list_node) {
721
- if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
722
- (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
723
- continue;
724
- if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
725
- continue;
726
- ret = cfg_queues_uld(adap, type, p);
727
- if (ret)
728
- goto out;
729
- ret = setup_sge_queues_uld(adap, type, p->lro);
730
- if (ret)
731
- goto free_queues;
732
- if (adap->flags & USING_MSIX) {
733
- name_msix_vecs_uld(adap, type);
734
- ret = request_msix_queue_irqs_uld(adap, type);
735
- if (ret)
736
- goto free_rxq;
737
- }
738
- if (adap->flags & FULL_INIT_DONE)
739
- enable_rx_uld(adap, type);
740
- if (adap->uld[type].add) {
741
- ret = -EBUSY;
742
- goto free_irq;
743
- }
744
- ret = setup_sge_txq_uld(adap, type, p);
745
- if (ret)
746
- goto free_irq;
747
- adap->uld[type] = *p;
748
- ret = uld_attach(adap, type);
749
- if (ret)
750
- goto free_txq;
751
- adap_idx++;
752
- }
753
- mutex_unlock(&uld_mutex);
754
- return 0;
814
+ list_for_each_entry(adap, &adapter_list, list_node)
815
+ cxgb4_uld_alloc_resources(adap, type, p);
755816
756
-free_txq:
757
- release_sge_txq_uld(adap, type);
758
-free_irq:
759
- if (adap->flags & FULL_INIT_DONE)
760
- quiesce_rx_uld(adap, type);
761
- if (adap->flags & USING_MSIX)
762
- free_msix_queue_irqs_uld(adap, type);
763
-free_rxq:
764
- free_sge_queues_uld(adap, type);
765
-free_queues:
766
- free_queues_uld(adap, type);
767
-out:
768
-
769
- list_for_each_entry(adap, &adapter_list, list_node) {
770
- if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
771
- (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
772
- continue;
773
- if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
774
- continue;
775
- if (!adap_idx)
776
- break;
777
- adap->uld[type].handle = NULL;
778
- adap->uld[type].add = NULL;
779
- release_sge_txq_uld(adap, type);
780
- if (adap->flags & FULL_INIT_DONE)
781
- quiesce_rx_uld(adap, type);
782
- if (adap->flags & USING_MSIX)
783
- free_msix_queue_irqs_uld(adap, type);
784
- free_sge_queues_uld(adap, type);
785
- free_queues_uld(adap, type);
786
- adap_idx--;
787
- }
817
+ uld_entry->uld_type = type;
818
+ list_add_tail(&uld_entry->list_node, &uld_list);
788819 mutex_unlock(&uld_mutex);
789
- return ret;
820
+ return;
790821 }
791822 EXPORT_SYMBOL(cxgb4_register_uld);
792823
....@@ -798,6 +829,7 @@
798829 */
799830 int cxgb4_unregister_uld(enum cxgb4_uld type)
800831 {
832
+ struct cxgb4_uld_list *uld_entry, *tmp;
801833 struct adapter *adap;
802834
803835 if (type >= CXGB4_ULD_MAX)
....@@ -813,6 +845,13 @@
813845
814846 cxgb4_shutdown_uld_adapter(adap, type);
815847 }
848
+
849
+ list_for_each_entry_safe(uld_entry, tmp, &uld_list, list_node) {
850
+ if (uld_entry->uld_type == type) {
851
+ list_del(&uld_entry->list_node);
852
+ kfree(uld_entry);
853
+ }
854
+ }
816855 mutex_unlock(&uld_mutex);
817856
818857 return 0;