hc
2024-01-03 2f7c68cb55ecb7331f2381deb497c27155f32faf
kernel/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
....@@ -49,6 +49,7 @@
4949 #include <rdma/ib_addr.h>
5050 #include <rdma/ib_smi.h>
5151 #include <rdma/ib_user_verbs.h>
52
+#include <rdma/uverbs_ioctl.h>
5253
5354 #include "pvrdma.h"
5455
....@@ -91,22 +92,19 @@
9192
9293 /**
9394 * pvrdma_create_cq - create completion queue
94
- * @ibdev: the device
95
+ * @ibcq: Allocated CQ
9596 * @attr: completion queue attributes
96
- * @context: user context
9797 * @udata: user data
9898 *
99
- * @return: ib_cq completion queue pointer on success,
100
- * otherwise returns negative errno.
99
+ * @return: 0 on success
101100 */
102
-struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
103
- const struct ib_cq_init_attr *attr,
104
- struct ib_ucontext *context,
105
- struct ib_udata *udata)
101
+int pvrdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
102
+ struct ib_udata *udata)
106103 {
104
+ struct ib_device *ibdev = ibcq->device;
107105 int entries = attr->cqe;
108106 struct pvrdma_dev *dev = to_vdev(ibdev);
109
- struct pvrdma_cq *cq;
107
+ struct pvrdma_cq *cq = to_vcq(ibcq);
110108 int ret;
111109 int npages;
112110 unsigned long flags;
....@@ -114,26 +112,22 @@
114112 union pvrdma_cmd_resp rsp;
115113 struct pvrdma_cmd_create_cq *cmd = &req.create_cq;
116114 struct pvrdma_cmd_create_cq_resp *resp = &rsp.create_cq_resp;
117
- struct pvrdma_create_cq_resp cq_resp = {0};
115
+ struct pvrdma_create_cq_resp cq_resp = {};
118116 struct pvrdma_create_cq ucmd;
117
+ struct pvrdma_ucontext *context = rdma_udata_to_drv_context(
118
+ udata, struct pvrdma_ucontext, ibucontext);
119119
120120 BUILD_BUG_ON(sizeof(struct pvrdma_cqe) != 64);
121121
122122 entries = roundup_pow_of_two(entries);
123123 if (entries < 1 || entries > dev->dsr->caps.max_cqe)
124
- return ERR_PTR(-EINVAL);
124
+ return -EINVAL;
125125
126126 if (!atomic_add_unless(&dev->num_cqs, 1, dev->dsr->caps.max_cq))
127
- return ERR_PTR(-ENOMEM);
128
-
129
- cq = kzalloc(sizeof(*cq), GFP_KERNEL);
130
- if (!cq) {
131
- atomic_dec(&dev->num_cqs);
132
- return ERR_PTR(-ENOMEM);
133
- }
127
+ return -ENOMEM;
134128
135129 cq->ibcq.cqe = entries;
136
- cq->is_kernel = !context;
130
+ cq->is_kernel = !udata;
137131
138132 if (!cq->is_kernel) {
139133 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
....@@ -141,14 +135,14 @@
141135 goto err_cq;
142136 }
143137
144
- cq->umem = ib_umem_get(context, ucmd.buf_addr, ucmd.buf_size,
145
- IB_ACCESS_LOCAL_WRITE, 1);
138
+ cq->umem = ib_umem_get(ibdev, ucmd.buf_addr, ucmd.buf_size,
139
+ IB_ACCESS_LOCAL_WRITE);
146140 if (IS_ERR(cq->umem)) {
147141 ret = PTR_ERR(cq->umem);
148142 goto err_cq;
149143 }
150144
151
- npages = ib_umem_page_count(cq->umem);
145
+ npages = ib_umem_num_dma_blocks(cq->umem, PAGE_SIZE);
152146 } else {
153147 /* One extra page for shared ring state */
154148 npages = 1 + (entries * sizeof(struct pvrdma_cqe) +
....@@ -185,8 +179,7 @@
185179 memset(cmd, 0, sizeof(*cmd));
186180 cmd->hdr.cmd = PVRDMA_CMD_CREATE_CQ;
187181 cmd->nchunks = npages;
188
- cmd->ctx_handle = (context) ?
189
- (u64)to_vucontext(context)->ctx_handle : 0;
182
+ cmd->ctx_handle = context ? context->ctx_handle : 0;
190183 cmd->cqe = entries;
191184 cmd->pdir_dma = cq->pdir.dir_dma;
192185 ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_CQ_RESP);
....@@ -204,29 +197,26 @@
204197 spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
205198
206199 if (!cq->is_kernel) {
207
- cq->uar = &(to_vucontext(context)->uar);
200
+ cq->uar = &context->uar;
208201
209202 /* Copy udata back. */
210203 if (ib_copy_to_udata(udata, &cq_resp, sizeof(cq_resp))) {
211204 dev_warn(&dev->pdev->dev,
212205 "failed to copy back udata\n");
213
- pvrdma_destroy_cq(&cq->ibcq);
214
- return ERR_PTR(-EINVAL);
206
+ pvrdma_destroy_cq(&cq->ibcq, udata);
207
+ return -EINVAL;
215208 }
216209 }
217210
218
- return &cq->ibcq;
211
+ return 0;
219212
220213 err_page_dir:
221214 pvrdma_page_dir_cleanup(dev, &cq->pdir);
222215 err_umem:
223
- if (!cq->is_kernel)
224
- ib_umem_release(cq->umem);
216
+ ib_umem_release(cq->umem);
225217 err_cq:
226218 atomic_dec(&dev->num_cqs);
227
- kfree(cq);
228
-
229
- return ERR_PTR(ret);
219
+ return ret;
230220 }
231221
232222 static void pvrdma_free_cq(struct pvrdma_dev *dev, struct pvrdma_cq *cq)
....@@ -235,20 +225,17 @@
235225 complete(&cq->free);
236226 wait_for_completion(&cq->free);
237227
238
- if (!cq->is_kernel)
239
- ib_umem_release(cq->umem);
228
+ ib_umem_release(cq->umem);
240229
241230 pvrdma_page_dir_cleanup(dev, &cq->pdir);
242
- kfree(cq);
243231 }
244232
245233 /**
246234 * pvrdma_destroy_cq - destroy completion queue
247235 * @cq: the completion queue to destroy.
248
- *
249
- * @return: 0 for success.
236
+ * @udata: user data or null for kernel object
250237 */
251
-int pvrdma_destroy_cq(struct ib_cq *cq)
238
+int pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
252239 {
253240 struct pvrdma_cq *vcq = to_vcq(cq);
254241 union pvrdma_cmd_req req;
....@@ -274,8 +261,7 @@
274261
275262 pvrdma_free_cq(dev, vcq);
276263 atomic_dec(&dev->num_cqs);
277
-
278
- return ret;
264
+ return 0;
279265 }
280266
281267 static inline struct pvrdma_cqe *get_cqe(struct pvrdma_cq *cq, int i)
....@@ -378,7 +364,7 @@
378364 wc->dlid_path_bits = cqe->dlid_path_bits;
379365 wc->port_num = cqe->port_num;
380366 wc->vendor_err = cqe->vendor_err;
381
- wc->network_hdr_type = cqe->network_hdr_type;
367
+ wc->network_hdr_type = pvrdma_network_type_to_ib(cqe->network_hdr_type);
382368
383369 /* Update shared ring state */
384370 pvrdma_idx_ring_inc(&cq->ring_state->rx.cons_head, cq->ibcq.cqe);
....@@ -390,7 +376,7 @@
390376 * pvrdma_poll_cq - poll for work completion queue entries
391377 * @ibcq: completion queue
392378 * @num_entries: the maximum number of entries
393
- * @entry: pointer to work completion array
379
+ * @wc: pointer to work completion array
394380 *
395381 * @return: number of polled completion entries
396382 */