hc
2024-01-03 2f7c68cb55ecb7331f2381deb497c27155f32faf
kernel/drivers/infiniband/hw/cxgb4/cq.c
....@@ -30,18 +30,19 @@
3030 * SOFTWARE.
3131 */
3232
33
+#include <rdma/uverbs_ioctl.h>
34
+
3335 #include "iw_cxgb4.h"
3436
35
-static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
36
- struct c4iw_dev_ucontext *uctx, struct sk_buff *skb,
37
- struct c4iw_wr_wait *wr_waitp)
37
+static void destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
38
+ struct c4iw_dev_ucontext *uctx, struct sk_buff *skb,
39
+ struct c4iw_wr_wait *wr_waitp)
3840 {
3941 struct fw_ri_res_wr *res_wr;
4042 struct fw_ri_res *res;
4143 int wr_len;
42
- int ret;
4344
44
- wr_len = sizeof *res_wr + sizeof *res;
45
+ wr_len = sizeof(*res_wr) + sizeof(*res);
4546 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
4647
4748 res_wr = __skb_put_zero(skb, wr_len);
....@@ -57,14 +58,13 @@
5758 res->u.cq.iqid = cpu_to_be32(cq->cqid);
5859
5960 c4iw_init_wr_wait(wr_waitp);
60
- ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, __func__);
61
+ c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, __func__);
6162
6263 kfree(cq->sw_queue);
6364 dma_free_coherent(&(rdev->lldi.pdev->dev),
6465 cq->memsize, cq->queue,
6566 dma_unmap_addr(cq, mapping));
6667 c4iw_put_cqid(rdev, cq->cqid, uctx);
67
- return ret;
6868 }
6969
7070 static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
....@@ -102,7 +102,6 @@
102102 goto err3;
103103 }
104104 dma_unmap_addr_set(cq, mapping, cq->dma_addr);
105
- memset(cq->queue, 0, cq->memsize);
106105
107106 if (user && ucontext->is_32b_cqe) {
108107 cq->qp_errp = &((struct t4_status_page *)
....@@ -115,7 +114,7 @@
115114 }
116115
117116 /* build fw_ri_res_wr */
118
- wr_len = sizeof *res_wr + sizeof *res;
117
+ wr_len = sizeof(*res_wr) + sizeof(*res);
119118
120119 skb = alloc_skb(wr_len, GFP_KERNEL);
121120 if (!skb) {
....@@ -755,7 +754,7 @@
755754 static int __c4iw_poll_cq_one(struct c4iw_cq *chp, struct c4iw_qp *qhp,
756755 struct ib_wc *wc, struct c4iw_srq *srq)
757756 {
758
- struct t4_cqe uninitialized_var(cqe);
757
+ struct t4_cqe cqe;
759758 struct t4_wq *wq = qhp ? &qhp->wq : NULL;
760759 u32 credit = 0;
761760 u8 cqe_flushed;
....@@ -968,7 +967,7 @@
968967 return !err || err == -ENODATA ? npolled : err;
969968 }
970969
971
-int c4iw_destroy_cq(struct ib_cq *ib_cq)
970
+int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
972971 {
973972 struct c4iw_cq *chp;
974973 struct c4iw_ucontext *ucontext;
....@@ -976,57 +975,49 @@
976975 pr_debug("ib_cq %p\n", ib_cq);
977976 chp = to_c4iw_cq(ib_cq);
978977
979
- remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
978
+ xa_erase_irq(&chp->rhp->cqs, chp->cq.cqid);
980979 atomic_dec(&chp->refcnt);
981980 wait_event(chp->wait, !atomic_read(&chp->refcnt));
982981
983
- ucontext = ib_cq->uobject ? to_c4iw_ucontext(ib_cq->uobject->context)
984
- : NULL;
982
+ ucontext = rdma_udata_to_drv_context(udata, struct c4iw_ucontext,
983
+ ibucontext);
985984 destroy_cq(&chp->rhp->rdev, &chp->cq,
986985 ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx,
987986 chp->destroy_skb, chp->wr_waitp);
988987 c4iw_put_wr_wait(chp->wr_waitp);
989
- kfree(chp);
990988 return 0;
991989 }
992990
993
-struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
994
- const struct ib_cq_init_attr *attr,
995
- struct ib_ucontext *ib_context,
996
- struct ib_udata *udata)
991
+int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
992
+ struct ib_udata *udata)
997993 {
994
+ struct ib_device *ibdev = ibcq->device;
998995 int entries = attr->cqe;
999996 int vector = attr->comp_vector;
1000
- struct c4iw_dev *rhp;
1001
- struct c4iw_cq *chp;
997
+ struct c4iw_dev *rhp = to_c4iw_dev(ibcq->device);
998
+ struct c4iw_cq *chp = to_c4iw_cq(ibcq);
1002999 struct c4iw_create_cq ucmd;
10031000 struct c4iw_create_cq_resp uresp;
1004
- struct c4iw_ucontext *ucontext = NULL;
10051001 int ret, wr_len;
10061002 size_t memsize, hwentries;
10071003 struct c4iw_mm_entry *mm, *mm2;
1004
+ struct c4iw_ucontext *ucontext = rdma_udata_to_drv_context(
1005
+ udata, struct c4iw_ucontext, ibucontext);
10081006
10091007 pr_debug("ib_dev %p entries %d\n", ibdev, entries);
10101008 if (attr->flags)
1011
- return ERR_PTR(-EINVAL);
1012
-
1013
- rhp = to_c4iw_dev(ibdev);
1009
+ return -EINVAL;
10141010
10151011 if (entries < 1 || entries > ibdev->attrs.max_cqe)
1016
- return ERR_PTR(-EINVAL);
1012
+ return -EINVAL;
10171013
10181014 if (vector >= rhp->rdev.lldi.nciq)
1019
- return ERR_PTR(-EINVAL);
1015
+ return -EINVAL;
10201016
1021
- if (ib_context) {
1022
- ucontext = to_c4iw_ucontext(ib_context);
1017
+ if (udata) {
10231018 if (udata->inlen < sizeof(ucmd))
10241019 ucontext->is_32b_cqe = 1;
10251020 }
1026
-
1027
- chp = kzalloc(sizeof(*chp), GFP_KERNEL);
1028
- if (!chp)
1029
- return ERR_PTR(-ENOMEM);
10301021
10311022 chp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
10321023 if (!chp->wr_waitp) {
....@@ -1071,7 +1062,7 @@
10711062 /*
10721063 * memsize must be a multiple of the page size if its a user cq.
10731064 */
1074
- if (ucontext)
1065
+ if (udata)
10751066 memsize = roundup(memsize, PAGE_SIZE);
10761067
10771068 chp->cq.size = hwentries;
....@@ -1091,16 +1082,16 @@
10911082 spin_lock_init(&chp->comp_handler_lock);
10921083 atomic_set(&chp->refcnt, 1);
10931084 init_waitqueue_head(&chp->wait);
1094
- ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
1085
+ ret = xa_insert_irq(&rhp->cqs, chp->cq.cqid, chp, GFP_KERNEL);
10951086 if (ret)
10961087 goto err_destroy_cq;
10971088
10981089 if (ucontext) {
10991090 ret = -ENOMEM;
1100
- mm = kmalloc(sizeof *mm, GFP_KERNEL);
1091
+ mm = kmalloc(sizeof(*mm), GFP_KERNEL);
11011092 if (!mm)
11021093 goto err_remove_handle;
1103
- mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
1094
+ mm2 = kmalloc(sizeof(*mm2), GFP_KERNEL);
11041095 if (!mm2)
11051096 goto err_free_mm;
11061097
....@@ -1137,16 +1128,17 @@
11371128 mm2->len = PAGE_SIZE;
11381129 insert_mmap(ucontext, mm2);
11391130 }
1140
- pr_debug("cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n",
1141
- chp->cq.cqid, chp, chp->cq.size,
1142
- chp->cq.memsize, (unsigned long long)chp->cq.dma_addr);
1143
- return &chp->ibcq;
1131
+
1132
+ pr_debug("cqid 0x%0x chp %p size %u memsize %zu, dma_addr %pad\n",
1133
+ chp->cq.cqid, chp, chp->cq.size, chp->cq.memsize,
1134
+ &chp->cq.dma_addr);
1135
+ return 0;
11441136 err_free_mm2:
11451137 kfree(mm2);
11461138 err_free_mm:
11471139 kfree(mm);
11481140 err_remove_handle:
1149
- remove_handle(rhp, &rhp->cqidr, chp->cq.cqid);
1141
+ xa_erase_irq(&rhp->cqs, chp->cq.cqid);
11501142 err_destroy_cq:
11511143 destroy_cq(&chp->rhp->rdev, &chp->cq,
11521144 ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
....@@ -1156,8 +1148,7 @@
11561148 err_free_wr_wait:
11571149 c4iw_put_wr_wait(chp->wr_waitp);
11581150 err_free_chp:
1159
- kfree(chp);
1160
- return ERR_PTR(ret);
1151
+ return ret;
11611152 }
11621153
11631154 int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)