forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-01-31 f70575805708cabdedea7498aaa3f710fde4d920
kernel/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
....@@ -52,6 +52,9 @@
5252
5353 #include "pvrdma.h"
5454
55
+static void __pvrdma_destroy_qp(struct pvrdma_dev *dev,
56
+ struct pvrdma_qp *qp);
57
+
5558 static inline void get_cqs(struct pvrdma_qp *qp, struct pvrdma_cq **send_cq,
5659 struct pvrdma_cq **recv_cq)
5760 {
....@@ -195,7 +198,9 @@
195198 union pvrdma_cmd_resp rsp;
196199 struct pvrdma_cmd_create_qp *cmd = &req.create_qp;
197200 struct pvrdma_cmd_create_qp_resp *resp = &rsp.create_qp_resp;
201
+ struct pvrdma_cmd_create_qp_resp_v2 *resp_v2 = &rsp.create_qp_resp_v2;
198202 struct pvrdma_create_qp ucmd;
203
+ struct pvrdma_create_qp_resp qp_resp = {};
199204 unsigned long flags;
200205 int ret;
201206 bool is_srq = !!init_attr->srq;
....@@ -212,7 +217,7 @@
212217 init_attr->qp_type != IB_QPT_GSI) {
213218 dev_warn(&dev->pdev->dev, "queuepair type %d not supported\n",
214219 init_attr->qp_type);
215
- return ERR_PTR(-EINVAL);
220
+ return ERR_PTR(-EOPNOTSUPP);
216221 }
217222
218223 if (is_srq && !dev->dsr->caps.max_srq) {
....@@ -227,13 +232,12 @@
227232 switch (init_attr->qp_type) {
228233 case IB_QPT_GSI:
229234 if (init_attr->port_num == 0 ||
230
- init_attr->port_num > pd->device->phys_port_cnt ||
231
- udata) {
235
+ init_attr->port_num > pd->device->phys_port_cnt) {
232236 dev_warn(&dev->pdev->dev, "invalid queuepair attrs\n");
233237 ret = -EINVAL;
234238 goto err_qp;
235239 }
236
- /* fall through */
240
+ fallthrough;
237241 case IB_QPT_RC:
238242 case IB_QPT_UD:
239243 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
....@@ -249,7 +253,7 @@
249253 init_completion(&qp->free);
250254
251255 qp->state = IB_QPS_RESET;
252
- qp->is_kernel = !(pd->uobject && udata);
256
+ qp->is_kernel = !udata;
253257
254258 if (!qp->is_kernel) {
255259 dev_dbg(&dev->pdev->dev,
....@@ -260,11 +264,20 @@
260264 goto err_qp;
261265 }
262266
267
+ /* Userspace supports qpn and qp handles? */
268
+ if (dev->dsr_version >= PVRDMA_QPHANDLE_VERSION &&
269
+ udata->outlen < sizeof(qp_resp)) {
270
+ dev_warn(&dev->pdev->dev,
271
+ "create queuepair not supported\n");
272
+ ret = -EOPNOTSUPP;
273
+ goto err_qp;
274
+ }
275
+
263276 if (!is_srq) {
264277 /* set qp->sq.wqe_cnt, shift, buf_size.. */
265
- qp->rumem = ib_umem_get(pd->uobject->context,
266
- ucmd.rbuf_addr,
267
- ucmd.rbuf_size, 0, 0);
278
+ qp->rumem =
279
+ ib_umem_get(pd->device, ucmd.rbuf_addr,
280
+ ucmd.rbuf_size, 0);
268281 if (IS_ERR(qp->rumem)) {
269282 ret = PTR_ERR(qp->rumem);
270283 goto err_qp;
....@@ -275,9 +288,8 @@
275288 qp->srq = to_vsrq(init_attr->srq);
276289 }
277290
278
- qp->sumem = ib_umem_get(pd->uobject->context,
279
- ucmd.sbuf_addr,
280
- ucmd.sbuf_size, 0, 0);
291
+ qp->sumem = ib_umem_get(pd->device, ucmd.sbuf_addr,
292
+ ucmd.sbuf_size, 0);
281293 if (IS_ERR(qp->sumem)) {
282294 if (!is_srq)
283295 ib_umem_release(qp->rumem);
....@@ -285,9 +297,11 @@
285297 goto err_qp;
286298 }
287299
288
- qp->npages_send = ib_umem_page_count(qp->sumem);
300
+ qp->npages_send =
301
+ ib_umem_num_dma_blocks(qp->sumem, PAGE_SIZE);
289302 if (!is_srq)
290
- qp->npages_recv = ib_umem_page_count(qp->rumem);
303
+ qp->npages_recv = ib_umem_num_dma_blocks(
304
+ qp->rumem, PAGE_SIZE);
291305 else
292306 qp->npages_recv = 0;
293307 qp->npages = qp->npages_send + qp->npages_recv;
....@@ -381,24 +395,40 @@
381395 }
382396
383397 /* max_send_wr/_recv_wr/_send_sge/_recv_sge/_inline_data */
384
- qp->qp_handle = resp->qpn;
385398 qp->port = init_attr->port_num;
386
- qp->ibqp.qp_num = resp->qpn;
399
+
400
+ if (dev->dsr_version >= PVRDMA_QPHANDLE_VERSION) {
401
+ qp->ibqp.qp_num = resp_v2->qpn;
402
+ qp->qp_handle = resp_v2->qp_handle;
403
+ } else {
404
+ qp->ibqp.qp_num = resp->qpn;
405
+ qp->qp_handle = resp->qpn;
406
+ }
407
+
387408 spin_lock_irqsave(&dev->qp_tbl_lock, flags);
388409 dev->qp_tbl[qp->qp_handle % dev->dsr->caps.max_qp] = qp;
389410 spin_unlock_irqrestore(&dev->qp_tbl_lock, flags);
411
+
412
+ if (udata) {
413
+ qp_resp.qpn = qp->ibqp.qp_num;
414
+ qp_resp.qp_handle = qp->qp_handle;
415
+
416
+ if (ib_copy_to_udata(udata, &qp_resp,
417
+ min(udata->outlen, sizeof(qp_resp)))) {
418
+ dev_warn(&dev->pdev->dev,
419
+ "failed to copy back udata\n");
420
+ __pvrdma_destroy_qp(dev, qp);
421
+ return ERR_PTR(-EINVAL);
422
+ }
423
+ }
390424
391425 return &qp->ibqp;
392426
393427 err_pdir:
394428 pvrdma_page_dir_cleanup(dev, &qp->pdir);
395429 err_umem:
396
- if (!qp->is_kernel) {
397
- if (qp->rumem)
398
- ib_umem_release(qp->rumem);
399
- if (qp->sumem)
400
- ib_umem_release(qp->sumem);
401
- }
430
+ ib_umem_release(qp->rumem);
431
+ ib_umem_release(qp->sumem);
402432 err_qp:
403433 kfree(qp);
404434 atomic_dec(&dev->num_qps);
....@@ -406,12 +436,34 @@
406436 return ERR_PTR(ret);
407437 }
408438
439
+static void _pvrdma_free_qp(struct pvrdma_qp *qp)
440
+{
441
+ unsigned long flags;
442
+ struct pvrdma_dev *dev = to_vdev(qp->ibqp.device);
443
+
444
+ spin_lock_irqsave(&dev->qp_tbl_lock, flags);
445
+ dev->qp_tbl[qp->qp_handle] = NULL;
446
+ spin_unlock_irqrestore(&dev->qp_tbl_lock, flags);
447
+
448
+ if (refcount_dec_and_test(&qp->refcnt))
449
+ complete(&qp->free);
450
+ wait_for_completion(&qp->free);
451
+
452
+ ib_umem_release(qp->rumem);
453
+ ib_umem_release(qp->sumem);
454
+
455
+ pvrdma_page_dir_cleanup(dev, &qp->pdir);
456
+
457
+ kfree(qp);
458
+
459
+ atomic_dec(&dev->num_qps);
460
+}
461
+
409462 static void pvrdma_free_qp(struct pvrdma_qp *qp)
410463 {
411
- struct pvrdma_dev *dev = to_vdev(qp->ibqp.device);
412464 struct pvrdma_cq *scq;
413465 struct pvrdma_cq *rcq;
414
- unsigned long flags, scq_flags, rcq_flags;
466
+ unsigned long scq_flags, rcq_flags;
415467
416468 /* In case cq is polling */
417469 get_cqs(qp, &scq, &rcq);
....@@ -421,55 +473,55 @@
421473 if (scq != rcq)
422474 _pvrdma_flush_cqe(qp, rcq);
423475
424
- spin_lock_irqsave(&dev->qp_tbl_lock, flags);
425
- dev->qp_tbl[qp->qp_handle] = NULL;
426
- spin_unlock_irqrestore(&dev->qp_tbl_lock, flags);
427
-
476
+ /*
477
+ * We're now unlocking the CQs before clearing out the qp handle this
478
+ * should still be safe. We have destroyed the backend QP and flushed
479
+ * the CQEs so there should be no other completions for this QP.
480
+ */
428481 pvrdma_unlock_cqs(scq, rcq, &scq_flags, &rcq_flags);
429482
430
- if (refcount_dec_and_test(&qp->refcnt))
431
- complete(&qp->free);
432
- wait_for_completion(&qp->free);
433
-
434
- if (!qp->is_kernel) {
435
- if (qp->rumem)
436
- ib_umem_release(qp->rumem);
437
- if (qp->sumem)
438
- ib_umem_release(qp->sumem);
439
- }
440
-
441
- pvrdma_page_dir_cleanup(dev, &qp->pdir);
442
-
443
- kfree(qp);
444
-
445
- atomic_dec(&dev->num_qps);
483
+ _pvrdma_free_qp(qp);
446484 }
447485
448
-/**
449
- * pvrdma_destroy_qp - destroy a queue pair
450
- * @qp: the queue pair to destroy
451
- *
452
- * @return: 0 on success.
453
- */
454
-int pvrdma_destroy_qp(struct ib_qp *qp)
486
+static inline void _pvrdma_destroy_qp_work(struct pvrdma_dev *dev,
487
+ u32 qp_handle)
455488 {
456
- struct pvrdma_qp *vqp = to_vqp(qp);
457489 union pvrdma_cmd_req req;
458490 struct pvrdma_cmd_destroy_qp *cmd = &req.destroy_qp;
459491 int ret;
460492
461493 memset(cmd, 0, sizeof(*cmd));
462494 cmd->hdr.cmd = PVRDMA_CMD_DESTROY_QP;
463
- cmd->qp_handle = vqp->qp_handle;
495
+ cmd->qp_handle = qp_handle;
464496
465
- ret = pvrdma_cmd_post(to_vdev(qp->device), &req, NULL, 0);
497
+ ret = pvrdma_cmd_post(dev, &req, NULL, 0);
466498 if (ret < 0)
467
- dev_warn(&to_vdev(qp->device)->pdev->dev,
499
+ dev_warn(&dev->pdev->dev,
468500 "destroy queuepair failed, error: %d\n", ret);
501
+}
469502
503
+/**
504
+ * pvrdma_destroy_qp - destroy a queue pair
505
+ * @qp: the queue pair to destroy
506
+ * @udata: user data or null for kernel object
507
+ *
508
+ * @return: always 0.
509
+ */
510
+int pvrdma_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
511
+{
512
+ struct pvrdma_qp *vqp = to_vqp(qp);
513
+
514
+ _pvrdma_destroy_qp_work(to_vdev(qp->device), vqp->qp_handle);
470515 pvrdma_free_qp(vqp);
471516
472517 return 0;
518
+}
519
+
520
+static void __pvrdma_destroy_qp(struct pvrdma_dev *dev,
521
+ struct pvrdma_qp *qp)
522
+{
523
+ _pvrdma_destroy_qp_work(dev, qp->qp_handle);
524
+ _pvrdma_free_qp(qp);
473525 }
474526
475527 /**
....@@ -499,7 +551,7 @@
499551 next_state = (attr_mask & IB_QP_STATE) ? attr->qp_state : cur_state;
500552
501553 if (!ib_modify_qp_is_ok(cur_state, next_state, ibqp->qp_type,
502
- attr_mask, IB_LINK_LAYER_ETHERNET)) {
554
+ attr_mask)) {
503555 ret = -EINVAL;
504556 goto out;
505557 }