forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-01-31 f70575805708cabdedea7498aaa3f710fde4d920
kernel/drivers/infiniband/hw/cxgb4/qp.c
....@@ -31,6 +31,7 @@
3131 */
3232
3333 #include <linux/module.h>
34
+#include <rdma/uverbs_ioctl.h>
3435
3536 #include "iw_cxgb4.h"
3637
....@@ -56,18 +57,18 @@
5657
5758 static int max_fr_immd = T4_MAX_FR_IMMD;
5859 module_param(max_fr_immd, int, 0644);
59
-MODULE_PARM_DESC(max_fr_immd, "fastreg threshold for using DSGL instead of immedate");
60
+MODULE_PARM_DESC(max_fr_immd, "fastreg threshold for using DSGL instead of immediate");
6061
6162 static int alloc_ird(struct c4iw_dev *dev, u32 ird)
6263 {
6364 int ret = 0;
6465
65
- spin_lock_irq(&dev->lock);
66
+ xa_lock_irq(&dev->qps);
6667 if (ird <= dev->avail_ird)
6768 dev->avail_ird -= ird;
6869 else
6970 ret = -ENOMEM;
70
- spin_unlock_irq(&dev->lock);
71
+ xa_unlock_irq(&dev->qps);
7172
7273 if (ret)
7374 dev_warn(&dev->rdev.lldi.pdev->dev,
....@@ -78,9 +79,9 @@
7879
7980 static void free_ird(struct c4iw_dev *dev, int ird)
8081 {
81
- spin_lock_irq(&dev->lock);
82
+ xa_lock_irq(&dev->qps);
8283 dev->avail_ird += ird;
83
- spin_unlock_irq(&dev->lock);
84
+ xa_unlock_irq(&dev->qps);
8485 }
8586
8687 static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state)
....@@ -99,7 +100,7 @@
99100 static void dealloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
100101 {
101102 dma_free_coherent(&(rdev->lldi.pdev->dev), sq->memsize, sq->queue,
102
- pci_unmap_addr(sq, mapping));
103
+ dma_unmap_addr(sq, mapping));
103104 }
104105
105106 static void dealloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
....@@ -132,7 +133,7 @@
132133 if (!sq->queue)
133134 return -ENOMEM;
134135 sq->phys_addr = virt_to_phys(sq->queue);
135
- pci_unmap_addr_set(sq, mapping, sq->dma_addr);
136
+ dma_unmap_addr_set(sq, mapping, sq->dma_addr);
136137 return 0;
137138 }
138139
....@@ -273,7 +274,6 @@
273274 (unsigned long long)virt_to_phys(wq->sq.queue),
274275 wq->rq.queue,
275276 (unsigned long long)virt_to_phys(wq->rq.queue));
276
- memset(wq->rq.queue, 0, wq->rq.memsize);
277277 dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);
278278 }
279279
....@@ -303,7 +303,7 @@
303303 wq->rq.msn = 1;
304304
305305 /* build fw_ri_res_wr */
306
- wr_len = sizeof *res_wr + 2 * sizeof *res;
306
+ wr_len = sizeof(*res_wr) + 2 * sizeof(*res);
307307 if (need_rq)
308308 wr_len += sizeof(*res);
309309 skb = alloc_skb(wr_len, GFP_KERNEL);
....@@ -439,7 +439,7 @@
439439 rem -= len;
440440 }
441441 }
442
- len = roundup(plen + sizeof *immdp, 16) - (plen + sizeof *immdp);
442
+ len = roundup(plen + sizeof(*immdp), 16) - (plen + sizeof(*immdp));
443443 if (len)
444444 memset(dstp, 0, len);
445445 immdp->op = FW_RI_DATA_IMMD;
....@@ -528,7 +528,7 @@
528528 T4_MAX_SEND_INLINE, &plen);
529529 if (ret)
530530 return ret;
531
- size = sizeof wqe->send + sizeof(struct fw_ri_immd) +
531
+ size = sizeof(wqe->send) + sizeof(struct fw_ri_immd) +
532532 plen;
533533 } else {
534534 ret = build_isgl((__be64 *)sq->queue,
....@@ -537,7 +537,7 @@
537537 wr->sg_list, wr->num_sge, &plen);
538538 if (ret)
539539 return ret;
540
- size = sizeof wqe->send + sizeof(struct fw_ri_isgl) +
540
+ size = sizeof(wqe->send) + sizeof(struct fw_ri_isgl) +
541541 wr->num_sge * sizeof(struct fw_ri_sge);
542542 }
543543 } else {
....@@ -545,7 +545,7 @@
545545 wqe->send.u.immd_src[0].r1 = 0;
546546 wqe->send.u.immd_src[0].r2 = 0;
547547 wqe->send.u.immd_src[0].immdlen = 0;
548
- size = sizeof wqe->send + sizeof(struct fw_ri_immd);
548
+ size = sizeof(wqe->send) + sizeof(struct fw_ri_immd);
549549 plen = 0;
550550 }
551551 *len16 = DIV_ROUND_UP(size, 16);
....@@ -579,7 +579,7 @@
579579 T4_MAX_WRITE_INLINE, &plen);
580580 if (ret)
581581 return ret;
582
- size = sizeof wqe->write + sizeof(struct fw_ri_immd) +
582
+ size = sizeof(wqe->write) + sizeof(struct fw_ri_immd) +
583583 plen;
584584 } else {
585585 ret = build_isgl((__be64 *)sq->queue,
....@@ -588,7 +588,7 @@
588588 wr->sg_list, wr->num_sge, &plen);
589589 if (ret)
590590 return ret;
591
- size = sizeof wqe->write + sizeof(struct fw_ri_isgl) +
591
+ size = sizeof(wqe->write) + sizeof(struct fw_ri_isgl) +
592592 wr->num_sge * sizeof(struct fw_ri_sge);
593593 }
594594 } else {
....@@ -596,7 +596,7 @@
596596 wqe->write.u.immd_src[0].r1 = 0;
597597 wqe->write.u.immd_src[0].r2 = 0;
598598 wqe->write.u.immd_src[0].immdlen = 0;
599
- size = sizeof wqe->write + sizeof(struct fw_ri_immd);
599
+ size = sizeof(wqe->write) + sizeof(struct fw_ri_immd);
600600 plen = 0;
601601 }
602602 *len16 = DIV_ROUND_UP(size, 16);
....@@ -633,7 +633,10 @@
633633
634634 wcwr->stag_sink = cpu_to_be32(rdma_wr(wr)->rkey);
635635 wcwr->to_sink = cpu_to_be64(rdma_wr(wr)->remote_addr);
636
- wcwr->stag_inv = cpu_to_be32(wr->next->ex.invalidate_rkey);
636
+ if (wr->next->opcode == IB_WR_SEND)
637
+ wcwr->stag_inv = 0;
638
+ else
639
+ wcwr->stag_inv = cpu_to_be32(wr->next->ex.invalidate_rkey);
637640 wcwr->r2 = 0;
638641 wcwr->r3 = 0;
639642
....@@ -680,7 +683,7 @@
680683 }
681684 wqe->read.r2 = 0;
682685 wqe->read.r5 = 0;
683
- *len16 = DIV_ROUND_UP(sizeof wqe->read, 16);
686
+ *len16 = DIV_ROUND_UP(sizeof(wqe->read), 16);
684687 return 0;
685688 }
686689
....@@ -727,7 +730,10 @@
727730
728731 /* SEND_WITH_INV swsqe */
729732 swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
730
- swsqe->opcode = FW_RI_SEND_WITH_INV;
733
+ if (wr->next->opcode == IB_WR_SEND)
734
+ swsqe->opcode = FW_RI_SEND;
735
+ else
736
+ swsqe->opcode = FW_RI_SEND_WITH_INV;
731737 swsqe->idx = qhp->wq.sq.pidx;
732738 swsqe->complete = 0;
733739 swsqe->signaled = send_signaled;
....@@ -760,8 +766,8 @@
760766 &wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL);
761767 if (ret)
762768 return ret;
763
- *len16 = DIV_ROUND_UP(sizeof wqe->recv +
764
- wr->num_sge * sizeof(struct fw_ri_sge), 16);
769
+ *len16 = DIV_ROUND_UP(
770
+ sizeof(wqe->recv) + wr->num_sge * sizeof(struct fw_ri_sge), 16);
765771 return 0;
766772 }
767773
....@@ -880,49 +886,21 @@
880886 {
881887 wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
882888 wqe->inv.r2 = 0;
883
- *len16 = DIV_ROUND_UP(sizeof wqe->inv, 16);
889
+ *len16 = DIV_ROUND_UP(sizeof(wqe->inv), 16);
884890 return 0;
885
-}
886
-
887
-static void free_qp_work(struct work_struct *work)
888
-{
889
- struct c4iw_ucontext *ucontext;
890
- struct c4iw_qp *qhp;
891
- struct c4iw_dev *rhp;
892
-
893
- qhp = container_of(work, struct c4iw_qp, free_work);
894
- ucontext = qhp->ucontext;
895
- rhp = qhp->rhp;
896
-
897
- pr_debug("qhp %p ucontext %p\n", qhp, ucontext);
898
- destroy_qp(&rhp->rdev, &qhp->wq,
899
- ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !qhp->srq);
900
-
901
- if (ucontext)
902
- c4iw_put_ucontext(ucontext);
903
- c4iw_put_wr_wait(qhp->wr_waitp);
904
- kfree(qhp);
905
-}
906
-
907
-static void queue_qp_free(struct kref *kref)
908
-{
909
- struct c4iw_qp *qhp;
910
-
911
- qhp = container_of(kref, struct c4iw_qp, kref);
912
- pr_debug("qhp %p\n", qhp);
913
- queue_work(qhp->rhp->rdev.free_workq, &qhp->free_work);
914891 }
915892
916893 void c4iw_qp_add_ref(struct ib_qp *qp)
917894 {
918895 pr_debug("ib_qp %p\n", qp);
919
- kref_get(&to_c4iw_qp(qp)->kref);
896
+ refcount_inc(&to_c4iw_qp(qp)->qp_refcnt);
920897 }
921898
922899 void c4iw_qp_rem_ref(struct ib_qp *qp)
923900 {
924901 pr_debug("ib_qp %p\n", qp);
925
- kref_put(&to_c4iw_qp(qp)->kref, queue_qp_free);
902
+ if (refcount_dec_and_test(&to_c4iw_qp(qp)->qp_refcnt))
903
+ complete(&to_c4iw_qp(qp)->qp_rel_comp);
926904 }
927905
928906 static void add_to_fc_list(struct list_head *head, struct list_head *entry)
....@@ -935,7 +913,7 @@
935913 {
936914 unsigned long flags;
937915
938
- spin_lock_irqsave(&qhp->rhp->lock, flags);
916
+ xa_lock_irqsave(&qhp->rhp->qps, flags);
939917 spin_lock(&qhp->lock);
940918 if (qhp->rhp->db_state == NORMAL)
941919 t4_ring_sq_db(&qhp->wq, inc, NULL);
....@@ -944,7 +922,7 @@
944922 qhp->wq.sq.wq_pidx_inc += inc;
945923 }
946924 spin_unlock(&qhp->lock);
947
- spin_unlock_irqrestore(&qhp->rhp->lock, flags);
925
+ xa_unlock_irqrestore(&qhp->rhp->qps, flags);
948926 return 0;
949927 }
950928
....@@ -952,7 +930,7 @@
952930 {
953931 unsigned long flags;
954932
955
- spin_lock_irqsave(&qhp->rhp->lock, flags);
933
+ xa_lock_irqsave(&qhp->rhp->qps, flags);
956934 spin_lock(&qhp->lock);
957935 if (qhp->rhp->db_state == NORMAL)
958936 t4_ring_rq_db(&qhp->wq, inc, NULL);
....@@ -961,7 +939,7 @@
961939 qhp->wq.rq.wq_pidx_inc += inc;
962940 }
963941 spin_unlock(&qhp->lock);
964
- spin_unlock_irqrestore(&qhp->rhp->lock, flags);
942
+ xa_unlock_irqrestore(&qhp->rhp->qps, flags);
965943 return 0;
966944 }
967945
....@@ -1134,9 +1112,9 @@
11341112 /*
11351113 * Fastpath for NVMe-oF target WRITE + SEND_WITH_INV wr chain which is
11361114 * the response for small NVMEe-oF READ requests. If the chain is
1137
- * exactly a WRITE->SEND_WITH_INV and the sgl depths and lengths
1138
- * meet the requirements of the fw_ri_write_cmpl_wr work request,
1139
- * then build and post the write_cmpl WR. If any of the tests
1115
+ * exactly a WRITE->SEND_WITH_INV or a WRITE->SEND and the sgl depths
1116
+ * and lengths meet the requirements of the fw_ri_write_cmpl_wr work
1117
+ * request, then build and post the write_cmpl WR. If any of the tests
11401118 * below are not true, then we continue on with the tradtional WRITE
11411119 * and SEND WRs.
11421120 */
....@@ -1146,7 +1124,8 @@
11461124 wr && wr->next && !wr->next->next &&
11471125 wr->opcode == IB_WR_RDMA_WRITE &&
11481126 wr->sg_list[0].length && wr->num_sge <= T4_WRITE_CMPL_MAX_SGL &&
1149
- wr->next->opcode == IB_WR_SEND_WITH_INV &&
1127
+ (wr->next->opcode == IB_WR_SEND ||
1128
+ wr->next->opcode == IB_WR_SEND_WITH_INV) &&
11501129 wr->next->sg_list[0].length == T4_WRITE_CMPL_MAX_CQE &&
11511130 wr->next->num_sge == 1 && num_wrs >= 2) {
11521131 post_write_cmpl(qhp, wr);
....@@ -1187,7 +1166,7 @@
11871166 break;
11881167 }
11891168 fw_flags |= FW_RI_RDMA_WRITE_WITH_IMMEDIATE;
1190
- /*FALLTHROUGH*/
1169
+ fallthrough;
11911170 case IB_WR_RDMA_WRITE:
11921171 fw_opcode = FW_RI_RDMA_WRITE_WR;
11931172 swsqe->opcode = FW_RI_RDMA_WRITE;
....@@ -1601,7 +1580,7 @@
16011580 FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
16021581
16031582 wqe->u.terminate.type = FW_RI_TYPE_TERMINATE;
1604
- wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term);
1583
+ wqe->u.terminate.immdlen = cpu_to_be32(sizeof(*term));
16051584 term = (struct terminate_message *)wqe->u.terminate.termmsg;
16061585 if (qhp->attr.layer_etype == (LAYER_MPA|DDP_LLP)) {
16071586 term->layer_etype = qhp->attr.layer_etype;
....@@ -1746,16 +1725,15 @@
17461725 static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init)
17471726 {
17481727 pr_debug("p2p_type = %d\n", p2p_type);
1749
- memset(&init->u, 0, sizeof init->u);
1728
+ memset(&init->u, 0, sizeof(init->u));
17501729 switch (p2p_type) {
17511730 case FW_RI_INIT_P2PTYPE_RDMA_WRITE:
17521731 init->u.write.opcode = FW_RI_RDMA_WRITE_WR;
17531732 init->u.write.stag_sink = cpu_to_be32(1);
17541733 init->u.write.to_sink = cpu_to_be64(1);
17551734 init->u.write.u.immd_src[0].op = FW_RI_DATA_IMMD;
1756
- init->u.write.len16 = DIV_ROUND_UP(sizeof init->u.write +
1757
- sizeof(struct fw_ri_immd),
1758
- 16);
1735
+ init->u.write.len16 = DIV_ROUND_UP(
1736
+ sizeof(init->u.write) + sizeof(struct fw_ri_immd), 16);
17591737 break;
17601738 case FW_RI_INIT_P2PTYPE_READ_REQ:
17611739 init->u.write.opcode = FW_RI_RDMA_READ_WR;
....@@ -1763,7 +1741,7 @@
17631741 init->u.read.to_src_lo = cpu_to_be32(1);
17641742 init->u.read.stag_sink = cpu_to_be32(1);
17651743 init->u.read.to_sink_lo = cpu_to_be32(1);
1766
- init->u.read.len16 = DIV_ROUND_UP(sizeof init->u.read, 16);
1744
+ init->u.read.len16 = DIV_ROUND_UP(sizeof(init->u.read), 16);
17671745 break;
17681746 }
17691747 }
....@@ -1777,7 +1755,7 @@
17771755 pr_debug("qhp %p qid 0x%x tid %u ird %u ord %u\n", qhp,
17781756 qhp->wq.sq.qid, qhp->ep->hwtid, qhp->ep->ird, qhp->ep->ord);
17791757
1780
- skb = alloc_skb(sizeof *wqe, GFP_KERNEL);
1758
+ skb = alloc_skb(sizeof(*wqe), GFP_KERNEL);
17811759 if (!skb) {
17821760 ret = -ENOMEM;
17831761 goto out;
....@@ -1972,7 +1950,7 @@
19721950 qhp->attr.ecode = attrs->ecode;
19731951 ep = qhp->ep;
19741952 if (!internal) {
1975
- c4iw_get_ep(&qhp->ep->com);
1953
+ c4iw_get_ep(&ep->com);
19761954 terminate = 1;
19771955 disconnect = 1;
19781956 } else {
....@@ -2090,14 +2068,16 @@
20902068 return ret;
20912069 }
20922070
2093
-int c4iw_destroy_qp(struct ib_qp *ib_qp)
2071
+int c4iw_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
20942072 {
20952073 struct c4iw_dev *rhp;
20962074 struct c4iw_qp *qhp;
2075
+ struct c4iw_ucontext *ucontext;
20972076 struct c4iw_qp_attributes attrs;
20982077
20992078 qhp = to_c4iw_qp(ib_qp);
21002079 rhp = qhp->rhp;
2080
+ ucontext = qhp->ucontext;
21012081
21022082 attrs.next_state = C4IW_QP_STATE_ERROR;
21032083 if (qhp->attr.state == C4IW_QP_STATE_TERMINATE)
....@@ -2106,17 +2086,26 @@
21062086 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
21072087 wait_event(qhp->wait, !qhp->ep);
21082088
2109
- remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
2110
-
2111
- spin_lock_irq(&rhp->lock);
2089
+ xa_lock_irq(&rhp->qps);
2090
+ __xa_erase(&rhp->qps, qhp->wq.sq.qid);
21122091 if (!list_empty(&qhp->db_fc_entry))
21132092 list_del_init(&qhp->db_fc_entry);
2114
- spin_unlock_irq(&rhp->lock);
2093
+ xa_unlock_irq(&rhp->qps);
21152094 free_ird(rhp, qhp->attr.max_ird);
21162095
21172096 c4iw_qp_rem_ref(ib_qp);
21182097
2098
+ wait_for_completion(&qhp->qp_rel_comp);
2099
+
21192100 pr_debug("ib_qp %p qpid 0x%0x\n", ib_qp, qhp->wq.sq.qid);
2101
+ pr_debug("qhp %p ucontext %p\n", qhp, ucontext);
2102
+
2103
+ destroy_qp(&rhp->rdev, &qhp->wq,
2104
+ ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !qhp->srq);
2105
+
2106
+ c4iw_put_wr_wait(qhp->wr_waitp);
2107
+
2108
+ kfree(qhp);
21202109 return 0;
21212110 }
21222111
....@@ -2130,7 +2119,8 @@
21302119 struct c4iw_cq *rchp;
21312120 struct c4iw_create_qp_resp uresp;
21322121 unsigned int sqsize, rqsize = 0;
2133
- struct c4iw_ucontext *ucontext;
2122
+ struct c4iw_ucontext *ucontext = rdma_udata_to_drv_context(
2123
+ udata, struct c4iw_ucontext, ibucontext);
21342124 int ret;
21352125 struct c4iw_mm_entry *sq_key_mm, *rq_key_mm = NULL, *sq_db_key_mm;
21362126 struct c4iw_mm_entry *rq_db_key_mm = NULL, *ma_sync_key_mm = NULL;
....@@ -2138,7 +2128,7 @@
21382128 pr_debug("ib_pd %p\n", pd);
21392129
21402130 if (attrs->qp_type != IB_QPT_RC)
2141
- return ERR_PTR(-EINVAL);
2131
+ return ERR_PTR(-EOPNOTSUPP);
21422132
21432133 php = to_c4iw_pd(pd);
21442134 rhp = php->rhp;
....@@ -2163,8 +2153,6 @@
21632153 sqsize = attrs->cap.max_send_wr + 1;
21642154 if (sqsize < 8)
21652155 sqsize = 8;
2166
-
2167
- ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL;
21682156
21692157 qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
21702158 if (!qhp)
....@@ -2227,10 +2215,10 @@
22272215 spin_lock_init(&qhp->lock);
22282216 mutex_init(&qhp->mutex);
22292217 init_waitqueue_head(&qhp->wait);
2230
- kref_init(&qhp->kref);
2231
- INIT_WORK(&qhp->free_work, free_qp_work);
2218
+ init_completion(&qhp->qp_rel_comp);
2219
+ refcount_set(&qhp->qp_refcnt, 1);
22322220
2233
- ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
2221
+ ret = xa_insert_irq(&rhp->qps, qhp->wq.sq.qid, qhp, GFP_KERNEL);
22342222 if (ret)
22352223 goto err_destroy_qp;
22362224
....@@ -2299,7 +2287,7 @@
22992287 ucontext->key += PAGE_SIZE;
23002288 }
23012289 spin_unlock(&ucontext->mmap_lock);
2302
- ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
2290
+ ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
23032291 if (ret)
23042292 goto err_free_ma_sync_key;
23052293 sq_key_mm->key = uresp.sq_key;
....@@ -2332,7 +2320,6 @@
23322320 insert_mmap(ucontext, ma_sync_key_mm);
23332321 }
23342322
2335
- c4iw_get_ucontext(ucontext);
23362323 qhp->ucontext = ucontext;
23372324 }
23382325 if (!attrs->srq) {
....@@ -2367,7 +2354,7 @@
23672354 err_free_sq_key:
23682355 kfree(sq_key_mm);
23692356 err_remove_handle:
2370
- remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
2357
+ xa_erase_irq(&rhp->qps, qhp->wq.sq.qid);
23712358 err_destroy_qp:
23722359 destroy_qp(&rhp->rdev, &qhp->wq,
23732360 ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !attrs->srq);
....@@ -2384,7 +2371,7 @@
23842371 struct c4iw_dev *rhp;
23852372 struct c4iw_qp *qhp;
23862373 enum c4iw_qp_attr_mask mask = 0;
2387
- struct c4iw_qp_attributes attrs;
2374
+ struct c4iw_qp_attributes attrs = {};
23882375
23892376 pr_debug("ib_qp %p\n", ibqp);
23902377
....@@ -2396,7 +2383,6 @@
23962383 if (!attr_mask)
23972384 return 0;
23982385
2399
- memset(&attrs, 0, sizeof attrs);
24002386 qhp = to_c4iw_qp(ibqp);
24012387 rhp = qhp->rhp;
24022388
....@@ -2480,8 +2466,8 @@
24802466 {
24812467 struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
24822468
2483
- memset(attr, 0, sizeof *attr);
2484
- memset(init_attr, 0, sizeof *init_attr);
2469
+ memset(attr, 0, sizeof(*attr));
2470
+ memset(init_attr, 0, sizeof(*init_attr));
24852471 attr->qp_state = to_ib_qp_state(qhp->attr.state);
24862472 attr->cur_qp_state = to_ib_qp_state(qhp->attr.state);
24872473 init_attr->cap.max_send_wr = qhp->attr.sq_num_entries;
....@@ -2524,7 +2510,7 @@
25242510
25252511 dma_free_coherent(&rdev->lldi.pdev->dev,
25262512 wq->memsize, wq->queue,
2527
- pci_unmap_addr(wq, mapping));
2513
+ dma_unmap_addr(wq, mapping));
25282514 c4iw_rqtpool_free(rdev, wq->rqt_hwaddr, wq->rqt_size);
25292515 kfree(wq->sw_rq);
25302516 c4iw_put_qpid(rdev, wq->qid, uctx);
....@@ -2566,14 +2552,12 @@
25662552 wq->rqt_abs_idx = (wq->rqt_hwaddr - rdev->lldi.vr->rq.start) >>
25672553 T4_RQT_ENTRY_SHIFT;
25682554
2569
- wq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev,
2570
- wq->memsize, &wq->dma_addr,
2571
- GFP_KERNEL);
2555
+ wq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, wq->memsize,
2556
+ &wq->dma_addr, GFP_KERNEL);
25722557 if (!wq->queue)
25732558 goto err_free_rqtpool;
25742559
2575
- memset(wq->queue, 0, wq->memsize);
2576
- pci_unmap_addr_set(wq, mapping, wq->dma_addr);
2560
+ dma_unmap_addr_set(wq, mapping, wq->dma_addr);
25772561
25782562 wq->bar2_va = c4iw_bar2_addrs(rdev, wq->qid, CXGB4_BAR2_QTYPE_EGRESS,
25792563 &wq->bar2_qid,
....@@ -2593,7 +2577,7 @@
25932577 /* build fw_ri_res_wr */
25942578 wr_len = sizeof(*res_wr) + sizeof(*res);
25952579
2596
- skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
2580
+ skb = alloc_skb(wr_len, GFP_KERNEL);
25972581 if (!skb)
25982582 goto err_free_queue;
25992583 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
....@@ -2652,7 +2636,7 @@
26522636 err_free_queue:
26532637 dma_free_coherent(&rdev->lldi.pdev->dev,
26542638 wq->memsize, wq->queue,
2655
- pci_unmap_addr(wq, mapping));
2639
+ dma_unmap_addr(wq, mapping));
26562640 err_free_rqtpool:
26572641 c4iw_rqtpool_free(rdev, wq->rqt_hwaddr, wq->rqt_size);
26582642 err_free_pending_wrs:
....@@ -2684,11 +2668,12 @@
26842668 }
26852669 }
26862670
2687
-struct ib_srq *c4iw_create_srq(struct ib_pd *pd, struct ib_srq_init_attr *attrs,
2671
+int c4iw_create_srq(struct ib_srq *ib_srq, struct ib_srq_init_attr *attrs,
26882672 struct ib_udata *udata)
26892673 {
2674
+ struct ib_pd *pd = ib_srq->pd;
26902675 struct c4iw_dev *rhp;
2691
- struct c4iw_srq *srq;
2676
+ struct c4iw_srq *srq = to_c4iw_srq(ib_srq);
26922677 struct c4iw_pd *php;
26932678 struct c4iw_create_srq_resp uresp;
26942679 struct c4iw_ucontext *ucontext;
....@@ -2703,11 +2688,11 @@
27032688 rhp = php->rhp;
27042689
27052690 if (!rhp->rdev.lldi.vr->srq.size)
2706
- return ERR_PTR(-EINVAL);
2691
+ return -EINVAL;
27072692 if (attrs->attr.max_wr > rhp->rdev.hw_queue.t4_max_rq_size)
2708
- return ERR_PTR(-E2BIG);
2693
+ return -E2BIG;
27092694 if (attrs->attr.max_sge > T4_MAX_RECV_SGE)
2710
- return ERR_PTR(-E2BIG);
2695
+ return -E2BIG;
27112696
27122697 /*
27132698 * SRQ RQT and RQ must be a power of 2 and at least 16 deep.
....@@ -2715,17 +2700,12 @@
27152700 rqsize = attrs->attr.max_wr + 1;
27162701 rqsize = roundup_pow_of_two(max_t(u16, rqsize, 16));
27172702
2718
- ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL;
2719
-
2720
- srq = kzalloc(sizeof(*srq), GFP_KERNEL);
2721
- if (!srq)
2722
- return ERR_PTR(-ENOMEM);
2703
+ ucontext = rdma_udata_to_drv_context(udata, struct c4iw_ucontext,
2704
+ ibucontext);
27232705
27242706 srq->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
2725
- if (!srq->wr_waitp) {
2726
- ret = -ENOMEM;
2727
- goto err_free_srq;
2728
- }
2707
+ if (!srq->wr_waitp)
2708
+ return -ENOMEM;
27292709
27302710 srq->idx = c4iw_alloc_srq_idx(&rhp->rdev);
27312711 if (srq->idx < 0) {
....@@ -2759,15 +2739,11 @@
27592739 if (CHELSIO_CHIP_VERSION(rhp->rdev.lldi.adapter_type) > CHELSIO_T6)
27602740 srq->flags = T4_SRQ_LIMIT_SUPPORT;
27612741
2762
- ret = insert_handle(rhp, &rhp->qpidr, srq, srq->wq.qid);
2763
- if (ret)
2764
- goto err_free_queue;
2765
-
27662742 if (udata) {
27672743 srq_key_mm = kmalloc(sizeof(*srq_key_mm), GFP_KERNEL);
27682744 if (!srq_key_mm) {
27692745 ret = -ENOMEM;
2770
- goto err_remove_handle;
2746
+ goto err_free_queue;
27712747 }
27722748 srq_db_key_mm = kmalloc(sizeof(*srq_db_key_mm), GFP_KERNEL);
27732749 if (!srq_db_key_mm) {
....@@ -2805,29 +2781,25 @@
28052781 (unsigned long)srq->wq.memsize, attrs->attr.max_wr);
28062782
28072783 spin_lock_init(&srq->lock);
2808
- return &srq->ibsrq;
2784
+ return 0;
2785
+
28092786 err_free_srq_db_key_mm:
28102787 kfree(srq_db_key_mm);
28112788 err_free_srq_key_mm:
28122789 kfree(srq_key_mm);
2813
-err_remove_handle:
2814
- remove_handle(rhp, &rhp->qpidr, srq->wq.qid);
28152790 err_free_queue:
28162791 free_srq_queue(srq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
28172792 srq->wr_waitp);
28182793 err_free_skb:
2819
- if (srq->destroy_skb)
2820
- kfree_skb(srq->destroy_skb);
2794
+ kfree_skb(srq->destroy_skb);
28212795 err_free_srq_idx:
28222796 c4iw_free_srq_idx(&rhp->rdev, srq->idx);
28232797 err_free_wr_wait:
28242798 c4iw_put_wr_wait(srq->wr_waitp);
2825
-err_free_srq:
2826
- kfree(srq);
2827
- return ERR_PTR(ret);
2799
+ return ret;
28282800 }
28292801
2830
-int c4iw_destroy_srq(struct ib_srq *ibsrq)
2802
+int c4iw_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
28312803 {
28322804 struct c4iw_dev *rhp;
28332805 struct c4iw_srq *srq;
....@@ -2837,14 +2809,11 @@
28372809 rhp = srq->rhp;
28382810
28392811 pr_debug("%s id %d\n", __func__, srq->wq.qid);
2840
-
2841
- remove_handle(rhp, &rhp->qpidr, srq->wq.qid);
2842
- ucontext = ibsrq->uobject ?
2843
- to_c4iw_ucontext(ibsrq->uobject->context) : NULL;
2812
+ ucontext = rdma_udata_to_drv_context(udata, struct c4iw_ucontext,
2813
+ ibucontext);
28442814 free_srq_queue(srq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
28452815 srq->wr_waitp);
28462816 c4iw_free_srq_idx(&rhp->rdev, srq->idx);
28472817 c4iw_put_wr_wait(srq->wr_waitp);
2848
- kfree(srq);
28492818 return 0;
28502819 }