hc
2024-05-14 bedbef8ad3e75a304af6361af235302bcc61d06b
kernel/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
....@@ -47,6 +47,7 @@
4747 #include <rdma/ib_umem.h>
4848 #include <rdma/ib_addr.h>
4949 #include <rdma/ib_cache.h>
50
+#include <rdma/uverbs_ioctl.h>
5051
5152 #include "ocrdma.h"
5253 #include "ocrdma_hw.h"
....@@ -98,8 +99,6 @@
9899 attr->max_mw = dev->attr.max_mw;
99100 attr->max_pd = dev->attr.max_pd;
100101 attr->atomic_cap = 0;
101
- attr->max_fmr = 0;
102
- attr->max_map_per_fmr = 0;
103102 attr->max_qp_rd_atom =
104103 min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp);
105104 attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp;
....@@ -112,26 +111,8 @@
112111 return 0;
113112 }
114113
115
-struct net_device *ocrdma_get_netdev(struct ib_device *ibdev, u8 port_num)
116
-{
117
- struct ocrdma_dev *dev;
118
- struct net_device *ndev = NULL;
119
-
120
- rcu_read_lock();
121
-
122
- dev = get_ocrdma_dev(ibdev);
123
- if (dev)
124
- ndev = dev->nic_info.netdev;
125
- if (ndev)
126
- dev_hold(ndev);
127
-
128
- rcu_read_unlock();
129
-
130
- return ndev;
131
-}
132
-
133114 static inline void get_link_speed_and_width(struct ocrdma_dev *dev,
134
- u8 *ib_speed, u8 *ib_width)
115
+ u16 *ib_speed, u8 *ib_width)
135116 {
136117 int status;
137118 u8 speed;
....@@ -177,18 +158,13 @@
177158
178159 /* props being zeroed by the caller, avoid zeroing it here */
179160 dev = get_ocrdma_dev(ibdev);
180
- if (port > 1) {
181
- pr_err("%s(%d) invalid_port=0x%x\n", __func__,
182
- dev->id, port);
183
- return -EINVAL;
184
- }
185161 netdev = dev->nic_info.netdev;
186162 if (netif_running(netdev) && netif_oper_up(netdev)) {
187163 port_state = IB_PORT_ACTIVE;
188
- props->phys_state = 5;
164
+ props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
189165 } else {
190166 port_state = IB_PORT_DOWN;
191
- props->phys_state = 3;
167
+ props->phys_state = IB_PORT_PHYS_STATE_DISABLED;
192168 }
193169 props->max_mtu = IB_MTU_4096;
194170 props->active_mtu = iboe_get_mtu(netdev->mtu);
....@@ -209,19 +185,6 @@
209185 &props->active_width);
210186 props->max_msg_sz = 0x80000000;
211187 props->max_vl_num = 4;
212
- return 0;
213
-}
214
-
215
-int ocrdma_modify_port(struct ib_device *ibdev, u8 port, int mask,
216
- struct ib_port_modify *props)
217
-{
218
- struct ocrdma_dev *dev;
219
-
220
- dev = get_ocrdma_dev(ibdev);
221
- if (port > 1) {
222
- pr_err("%s(%d) invalid_port=0x%x\n", __func__, dev->id, port);
223
- return -EINVAL;
224
- }
225188 return 0;
226189 }
227190
....@@ -379,16 +342,21 @@
379342 return status;
380343 }
381344
382
-static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev,
383
- struct ocrdma_ucontext *uctx,
384
- struct ib_udata *udata)
345
+/*
346
+ * NOTE:
347
+ *
348
+ * ocrdma_ucontext must be used here because this function is also
349
+ * called from ocrdma_alloc_ucontext where ib_udata does not have
350
+ * valid ib_ucontext pointer. ib_uverbs_get_context does not call
351
+ * uobj_{alloc|get_xxx} helpers which are used to store the
352
+ * ib_ucontext in uverbs_attr_bundle wrapping the ib_udata. so
353
+ * ib_udata does NOT imply valid ib_ucontext here!
354
+ */
355
+static int _ocrdma_alloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd,
356
+ struct ocrdma_ucontext *uctx,
357
+ struct ib_udata *udata)
385358 {
386
- struct ocrdma_pd *pd = NULL;
387359 int status;
388
-
389
- pd = kzalloc(sizeof(*pd), GFP_KERNEL);
390
- if (!pd)
391
- return ERR_PTR(-ENOMEM);
392360
393361 if (udata && uctx && dev->attr.max_dpp_pds) {
394362 pd->dpp_enabled =
....@@ -398,15 +366,8 @@
398366 dev->attr.wqe_size) : 0;
399367 }
400368
401
- if (dev->pd_mgr->pd_prealloc_valid) {
402
- status = ocrdma_get_pd_num(dev, pd);
403
- if (status == 0) {
404
- return pd;
405
- } else {
406
- kfree(pd);
407
- return ERR_PTR(status);
408
- }
409
- }
369
+ if (dev->pd_mgr->pd_prealloc_valid)
370
+ return ocrdma_get_pd_num(dev, pd);
410371
411372 retry:
412373 status = ocrdma_mbx_alloc_pd(dev, pd);
....@@ -415,13 +376,11 @@
415376 pd->dpp_enabled = false;
416377 pd->num_dpp_qp = 0;
417378 goto retry;
418
- } else {
419
- kfree(pd);
420
- return ERR_PTR(status);
421379 }
380
+ return status;
422381 }
423382
424
- return pd;
383
+ return 0;
425384 }
426385
427386 static inline int is_ucontext_pd(struct ocrdma_ucontext *uctx,
....@@ -430,30 +389,33 @@
430389 return (uctx->cntxt_pd == pd);
431390 }
432391
433
-static int _ocrdma_dealloc_pd(struct ocrdma_dev *dev,
392
+static void _ocrdma_dealloc_pd(struct ocrdma_dev *dev,
434393 struct ocrdma_pd *pd)
435394 {
436
- int status;
437
-
438395 if (dev->pd_mgr->pd_prealloc_valid)
439
- status = ocrdma_put_pd_num(dev, pd->id, pd->dpp_enabled);
396
+ ocrdma_put_pd_num(dev, pd->id, pd->dpp_enabled);
440397 else
441
- status = ocrdma_mbx_dealloc_pd(dev, pd);
442
-
443
- kfree(pd);
444
- return status;
398
+ ocrdma_mbx_dealloc_pd(dev, pd);
445399 }
446400
447401 static int ocrdma_alloc_ucontext_pd(struct ocrdma_dev *dev,
448402 struct ocrdma_ucontext *uctx,
449403 struct ib_udata *udata)
450404 {
451
- int status = 0;
405
+ struct ib_device *ibdev = &dev->ibdev;
406
+ struct ib_pd *pd;
407
+ int status;
452408
453
- uctx->cntxt_pd = _ocrdma_alloc_pd(dev, uctx, udata);
454
- if (IS_ERR(uctx->cntxt_pd)) {
455
- status = PTR_ERR(uctx->cntxt_pd);
456
- uctx->cntxt_pd = NULL;
409
+ pd = rdma_zalloc_drv_obj(ibdev, ib_pd);
410
+ if (!pd)
411
+ return -ENOMEM;
412
+
413
+ pd->device = ibdev;
414
+ uctx->cntxt_pd = get_ocrdma_pd(pd);
415
+
416
+ status = _ocrdma_alloc_pd(dev, uctx->cntxt_pd, uctx, udata);
417
+ if (status) {
418
+ kfree(uctx->cntxt_pd);
457419 goto err;
458420 }
459421
....@@ -463,7 +425,7 @@
463425 return status;
464426 }
465427
466
-static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx)
428
+static void ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx)
467429 {
468430 struct ocrdma_pd *pd = uctx->cntxt_pd;
469431 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
....@@ -473,8 +435,8 @@
473435 __func__, dev->id, pd->id);
474436 }
475437 uctx->cntxt_pd = NULL;
476
- (void)_ocrdma_dealloc_pd(dev, pd);
477
- return 0;
438
+ _ocrdma_dealloc_pd(dev, pd);
439
+ kfree(pd);
478440 }
479441
480442 static struct ocrdma_pd *ocrdma_get_ucontext_pd(struct ocrdma_ucontext *uctx)
....@@ -498,33 +460,28 @@
498460 mutex_unlock(&uctx->mm_list_lock);
499461 }
500462
501
-struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev,
502
- struct ib_udata *udata)
463
+int ocrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
503464 {
465
+ struct ib_device *ibdev = uctx->device;
504466 int status;
505
- struct ocrdma_ucontext *ctx;
506
- struct ocrdma_alloc_ucontext_resp resp;
467
+ struct ocrdma_ucontext *ctx = get_ocrdma_ucontext(uctx);
468
+ struct ocrdma_alloc_ucontext_resp resp = {};
507469 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
508470 struct pci_dev *pdev = dev->nic_info.pdev;
509471 u32 map_len = roundup(sizeof(u32) * 2048, PAGE_SIZE);
510472
511473 if (!udata)
512
- return ERR_PTR(-EFAULT);
513
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
514
- if (!ctx)
515
- return ERR_PTR(-ENOMEM);
474
+ return -EFAULT;
516475 INIT_LIST_HEAD(&ctx->mm_head);
517476 mutex_init(&ctx->mm_list_lock);
518477
519
- ctx->ah_tbl.va = dma_zalloc_coherent(&pdev->dev, map_len,
520
- &ctx->ah_tbl.pa, GFP_KERNEL);
521
- if (!ctx->ah_tbl.va) {
522
- kfree(ctx);
523
- return ERR_PTR(-ENOMEM);
524
- }
478
+ ctx->ah_tbl.va = dma_alloc_coherent(&pdev->dev, map_len,
479
+ &ctx->ah_tbl.pa, GFP_KERNEL);
480
+ if (!ctx->ah_tbl.va)
481
+ return -ENOMEM;
482
+
525483 ctx->ah_tbl.len = map_len;
526484
527
- memset(&resp, 0, sizeof(resp));
528485 resp.ah_tbl_len = ctx->ah_tbl.len;
529486 resp.ah_tbl_page = virt_to_phys(ctx->ah_tbl.va);
530487
....@@ -546,27 +503,26 @@
546503 status = ib_copy_to_udata(udata, &resp, sizeof(resp));
547504 if (status)
548505 goto cpy_err;
549
- return &ctx->ibucontext;
506
+ return 0;
550507
551508 cpy_err:
509
+ ocrdma_dealloc_ucontext_pd(ctx);
552510 pd_err:
553511 ocrdma_del_mmap(ctx, ctx->ah_tbl.pa, ctx->ah_tbl.len);
554512 map_err:
555513 dma_free_coherent(&pdev->dev, ctx->ah_tbl.len, ctx->ah_tbl.va,
556514 ctx->ah_tbl.pa);
557
- kfree(ctx);
558
- return ERR_PTR(status);
515
+ return status;
559516 }
560517
561
-int ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx)
518
+void ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx)
562519 {
563
- int status;
564520 struct ocrdma_mm *mm, *tmp;
565521 struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ibctx);
566522 struct ocrdma_dev *dev = get_ocrdma_dev(ibctx->device);
567523 struct pci_dev *pdev = dev->nic_info.pdev;
568524
569
- status = ocrdma_dealloc_ucontext_pd(uctx);
525
+ ocrdma_dealloc_ucontext_pd(uctx);
570526
571527 ocrdma_del_mmap(uctx, uctx->ah_tbl.pa, uctx->ah_tbl.len);
572528 dma_free_coherent(&pdev->dev, uctx->ah_tbl.len, uctx->ah_tbl.va,
....@@ -576,8 +532,6 @@
576532 list_del(&mm->entry);
577533 kfree(mm);
578534 }
579
- kfree(uctx);
580
- return status;
581535 }
582536
583537 int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
....@@ -624,7 +578,6 @@
624578 }
625579
626580 static int ocrdma_copy_pd_uresp(struct ocrdma_dev *dev, struct ocrdma_pd *pd,
627
- struct ib_ucontext *ib_ctx,
628581 struct ib_udata *udata)
629582 {
630583 int status;
....@@ -632,7 +585,8 @@
632585 u64 dpp_page_addr = 0;
633586 u32 db_page_size;
634587 struct ocrdma_alloc_pd_uresp rsp;
635
- struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx);
588
+ struct ocrdma_ucontext *uctx = rdma_udata_to_drv_context(
589
+ udata, struct ocrdma_ucontext, ibucontext);
636590
637591 memset(&rsp, 0, sizeof(rsp));
638592 rsp.id = pd->id;
....@@ -670,18 +624,17 @@
670624 return status;
671625 }
672626
673
-struct ib_pd *ocrdma_alloc_pd(struct ib_device *ibdev,
674
- struct ib_ucontext *context,
675
- struct ib_udata *udata)
627
+int ocrdma_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
676628 {
629
+ struct ib_device *ibdev = ibpd->device;
677630 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
678631 struct ocrdma_pd *pd;
679
- struct ocrdma_ucontext *uctx = NULL;
680632 int status;
681633 u8 is_uctx_pd = false;
634
+ struct ocrdma_ucontext *uctx = rdma_udata_to_drv_context(
635
+ udata, struct ocrdma_ucontext, ibucontext);
682636
683
- if (udata && context) {
684
- uctx = get_ocrdma_ucontext(context);
637
+ if (udata) {
685638 pd = ocrdma_get_ucontext_pd(uctx);
686639 if (pd) {
687640 is_uctx_pd = true;
....@@ -689,37 +642,33 @@
689642 }
690643 }
691644
692
- pd = _ocrdma_alloc_pd(dev, uctx, udata);
693
- if (IS_ERR(pd)) {
694
- status = PTR_ERR(pd);
645
+ pd = get_ocrdma_pd(ibpd);
646
+ status = _ocrdma_alloc_pd(dev, pd, uctx, udata);
647
+ if (status)
695648 goto exit;
696
- }
697649
698650 pd_mapping:
699
- if (udata && context) {
700
- status = ocrdma_copy_pd_uresp(dev, pd, context, udata);
651
+ if (udata) {
652
+ status = ocrdma_copy_pd_uresp(dev, pd, udata);
701653 if (status)
702654 goto err;
703655 }
704
- return &pd->ibpd;
656
+ return 0;
705657
706658 err:
707
- if (is_uctx_pd) {
659
+ if (is_uctx_pd)
708660 ocrdma_release_ucontext_pd(uctx);
709
- } else {
710
- if (_ocrdma_dealloc_pd(dev, pd))
711
- pr_err("%s: _ocrdma_dealloc_pd() failed\n", __func__);
712
- }
661
+ else
662
+ _ocrdma_dealloc_pd(dev, pd);
713663 exit:
714
- return ERR_PTR(status);
664
+ return status;
715665 }
716666
717
-int ocrdma_dealloc_pd(struct ib_pd *ibpd)
667
+int ocrdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
718668 {
719669 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
720670 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
721671 struct ocrdma_ucontext *uctx = NULL;
722
- int status = 0;
723672 u64 usr_db;
724673
725674 uctx = pd->uctx;
....@@ -733,11 +682,11 @@
733682
734683 if (is_ucontext_pd(uctx, pd)) {
735684 ocrdma_release_ucontext_pd(uctx);
736
- return status;
685
+ return 0;
737686 }
738687 }
739
- status = _ocrdma_dealloc_pd(dev, pd);
740
- return status;
688
+ _ocrdma_dealloc_pd(dev, pd);
689
+ return 0;
741690 }
742691
743692 static int ocrdma_alloc_lkey(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
....@@ -850,7 +799,7 @@
850799 return -ENOMEM;
851800
852801 for (i = 0; i < mr->num_pbls; i++) {
853
- va = dma_zalloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL);
802
+ va = dma_alloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL);
854803 if (!va) {
855804 ocrdma_free_mr_pbl_tbl(dev, mr);
856805 status = -ENOMEM;
....@@ -862,14 +811,13 @@
862811 return status;
863812 }
864813
865
-static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
866
- u32 num_pbes)
814
+static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr)
867815 {
868816 struct ocrdma_pbe *pbe;
869
- struct scatterlist *sg;
817
+ struct ib_block_iter biter;
870818 struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table;
871
- struct ib_umem *umem = mr->umem;
872
- int shift, pg_cnt, pages, pbe_cnt, entry, total_num_pbes = 0;
819
+ int pbe_cnt;
820
+ u64 pg_addr;
873821
874822 if (!mr->hwmr.num_pbes)
875823 return;
....@@ -877,36 +825,21 @@
877825 pbe = (struct ocrdma_pbe *)pbl_tbl->va;
878826 pbe_cnt = 0;
879827
880
- shift = umem->page_shift;
828
+ rdma_umem_for_each_dma_block (mr->umem, &biter, PAGE_SIZE) {
829
+ /* store the page address in pbe */
830
+ pg_addr = rdma_block_iter_dma_address(&biter);
831
+ pbe->pa_lo = cpu_to_le32(pg_addr);
832
+ pbe->pa_hi = cpu_to_le32(upper_32_bits(pg_addr));
833
+ pbe_cnt += 1;
834
+ pbe++;
881835
882
- for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
883
- pages = sg_dma_len(sg) >> shift;
884
- for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
885
- /* store the page address in pbe */
886
- pbe->pa_lo =
887
- cpu_to_le32(sg_dma_address(sg) +
888
- (pg_cnt << shift));
889
- pbe->pa_hi =
890
- cpu_to_le32(upper_32_bits(sg_dma_address(sg) +
891
- (pg_cnt << shift)));
892
- pbe_cnt += 1;
893
- total_num_pbes += 1;
894
- pbe++;
895
-
896
- /* if done building pbes, issue the mbx cmd. */
897
- if (total_num_pbes == num_pbes)
898
- return;
899
-
900
- /* if the given pbl is full storing the pbes,
901
- * move to next pbl.
902
- */
903
- if (pbe_cnt ==
904
- (mr->hwmr.pbl_size / sizeof(u64))) {
905
- pbl_tbl++;
906
- pbe = (struct ocrdma_pbe *)pbl_tbl->va;
907
- pbe_cnt = 0;
908
- }
909
-
836
+ /* if the given pbl is full storing the pbes,
837
+ * move to next pbl.
838
+ */
839
+ if (pbe_cnt == (mr->hwmr.pbl_size / sizeof(u64))) {
840
+ pbl_tbl++;
841
+ pbe = (struct ocrdma_pbe *)pbl_tbl->va;
842
+ pbe_cnt = 0;
910843 }
911844 }
912845 }
....@@ -918,7 +851,6 @@
918851 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
919852 struct ocrdma_mr *mr;
920853 struct ocrdma_pd *pd;
921
- u32 num_pbes;
922854
923855 pd = get_ocrdma_pd(ibpd);
924856
....@@ -928,18 +860,17 @@
928860 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
929861 if (!mr)
930862 return ERR_PTR(status);
931
- mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
863
+ mr->umem = ib_umem_get(ibpd->device, start, len, acc);
932864 if (IS_ERR(mr->umem)) {
933865 status = -EFAULT;
934866 goto umem_err;
935867 }
936
- num_pbes = ib_umem_page_count(mr->umem);
937
- status = ocrdma_get_pbl_info(dev, mr, num_pbes);
868
+ status = ocrdma_get_pbl_info(
869
+ dev, mr, ib_umem_num_dma_blocks(mr->umem, PAGE_SIZE));
938870 if (status)
939871 goto umem_err;
940872
941
- mr->hwmr.pbe_size = BIT(mr->umem->page_shift);
942
- mr->hwmr.fbo = ib_umem_offset(mr->umem);
873
+ mr->hwmr.pbe_size = PAGE_SIZE;
943874 mr->hwmr.va = usr_addr;
944875 mr->hwmr.len = len;
945876 mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
....@@ -950,7 +881,7 @@
950881 status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
951882 if (status)
952883 goto umem_err;
953
- build_user_pbes(dev, mr, num_pbes);
884
+ build_user_pbes(dev, mr);
954885 status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc);
955886 if (status)
956887 goto mbx_err;
....@@ -967,7 +898,7 @@
967898 return ERR_PTR(status);
968899 }
969900
970
-int ocrdma_dereg_mr(struct ib_mr *ib_mr)
901
+int ocrdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
971902 {
972903 struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr);
973904 struct ocrdma_dev *dev = get_ocrdma_dev(ib_mr->device);
....@@ -978,8 +909,7 @@
978909 ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
979910
980911 /* it could be user registered memory. */
981
- if (mr->umem)
982
- ib_umem_release(mr->umem);
912
+ ib_umem_release(mr->umem);
983913 kfree(mr);
984914
985915 /* Don't stop cleanup, in case FW is unresponsive */
....@@ -991,12 +921,16 @@
991921 }
992922
993923 static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
994
- struct ib_udata *udata,
995
- struct ib_ucontext *ib_ctx)
924
+ struct ib_udata *udata)
996925 {
997926 int status;
998
- struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx);
927
+ struct ocrdma_ucontext *uctx = rdma_udata_to_drv_context(
928
+ udata, struct ocrdma_ucontext, ibucontext);
999929 struct ocrdma_create_cq_uresp uresp;
930
+
931
+ /* this must be user flow! */
932
+ if (!udata)
933
+ return -EINVAL;
1000934
1001935 memset(&uresp, 0, sizeof(uresp));
1002936 uresp.cq_id = cq->id;
....@@ -1026,59 +960,52 @@
1026960 return status;
1027961 }
1028962
1029
-struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev,
1030
- const struct ib_cq_init_attr *attr,
1031
- struct ib_ucontext *ib_ctx,
1032
- struct ib_udata *udata)
963
+int ocrdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
964
+ struct ib_udata *udata)
1033965 {
966
+ struct ib_device *ibdev = ibcq->device;
1034967 int entries = attr->cqe;
1035
- struct ocrdma_cq *cq;
968
+ struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
1036969 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
1037
- struct ocrdma_ucontext *uctx = NULL;
970
+ struct ocrdma_ucontext *uctx = rdma_udata_to_drv_context(
971
+ udata, struct ocrdma_ucontext, ibucontext);
1038972 u16 pd_id = 0;
1039973 int status;
1040974 struct ocrdma_create_cq_ureq ureq;
1041975
1042976 if (attr->flags)
1043
- return ERR_PTR(-EINVAL);
977
+ return -EINVAL;
1044978
1045979 if (udata) {
1046980 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1047
- return ERR_PTR(-EFAULT);
981
+ return -EFAULT;
1048982 } else
1049983 ureq.dpp_cq = 0;
1050
- cq = kzalloc(sizeof(*cq), GFP_KERNEL);
1051
- if (!cq)
1052
- return ERR_PTR(-ENOMEM);
1053984
1054985 spin_lock_init(&cq->cq_lock);
1055986 spin_lock_init(&cq->comp_handler_lock);
1056987 INIT_LIST_HEAD(&cq->sq_head);
1057988 INIT_LIST_HEAD(&cq->rq_head);
1058989
1059
- if (ib_ctx) {
1060
- uctx = get_ocrdma_ucontext(ib_ctx);
990
+ if (udata)
1061991 pd_id = uctx->cntxt_pd->id;
1062
- }
1063992
1064993 status = ocrdma_mbx_create_cq(dev, cq, entries, ureq.dpp_cq, pd_id);
1065
- if (status) {
1066
- kfree(cq);
1067
- return ERR_PTR(status);
1068
- }
1069
- if (ib_ctx) {
1070
- status = ocrdma_copy_cq_uresp(dev, cq, udata, ib_ctx);
994
+ if (status)
995
+ return status;
996
+
997
+ if (udata) {
998
+ status = ocrdma_copy_cq_uresp(dev, cq, udata);
1071999 if (status)
10721000 goto ctx_err;
10731001 }
10741002 cq->phase = OCRDMA_CQE_VALID;
10751003 dev->cq_tbl[cq->id] = cq;
1076
- return &cq->ibcq;
1004
+ return 0;
10771005
10781006 ctx_err:
10791007 ocrdma_mbx_destroy_cq(dev, cq);
1080
- kfree(cq);
1081
- return ERR_PTR(status);
1008
+ return status;
10821009 }
10831010
10841011 int ocrdma_resize_cq(struct ib_cq *ibcq, int new_cnt,
....@@ -1121,7 +1048,7 @@
11211048 spin_unlock_irqrestore(&cq->cq_lock, flags);
11221049 }
11231050
1124
-int ocrdma_destroy_cq(struct ib_cq *ibcq)
1051
+int ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
11251052 {
11261053 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
11271054 struct ocrdma_eq *eq = NULL;
....@@ -1131,14 +1058,13 @@
11311058
11321059 dev->cq_tbl[cq->id] = NULL;
11331060 indx = ocrdma_get_eq_table_index(dev, cq->eqn);
1134
- BUG_ON(indx == -EINVAL);
11351061
11361062 eq = &dev->eq_tbl[indx];
11371063 irq = ocrdma_get_irq(dev, eq);
11381064 synchronize_irq(irq);
11391065 ocrdma_flush_cq(cq);
11401066
1141
- (void)ocrdma_mbx_destroy_cq(dev, cq);
1067
+ ocrdma_mbx_destroy_cq(dev, cq);
11421068 if (cq->ucontext) {
11431069 pdid = cq->ucontext->cntxt_pd->id;
11441070 ocrdma_del_mmap(cq->ucontext, (u64) cq->pa,
....@@ -1147,8 +1073,6 @@
11471073 ocrdma_get_db_addr(dev, pdid),
11481074 dev->nic_info.db_page_size);
11491075 }
1150
-
1151
- kfree(cq);
11521076 return 0;
11531077 }
11541078
....@@ -1169,7 +1093,8 @@
11691093 }
11701094
11711095 static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev,
1172
- struct ib_qp_init_attr *attrs)
1096
+ struct ib_qp_init_attr *attrs,
1097
+ struct ib_udata *udata)
11731098 {
11741099 if ((attrs->qp_type != IB_QPT_GSI) &&
11751100 (attrs->qp_type != IB_QPT_RC) &&
....@@ -1177,7 +1102,7 @@
11771102 (attrs->qp_type != IB_QPT_UD)) {
11781103 pr_err("%s(%d) unsupported qp type=0x%x requested\n",
11791104 __func__, dev->id, attrs->qp_type);
1180
- return -EINVAL;
1105
+ return -EOPNOTSUPP;
11811106 }
11821107 /* Skip the check for QP1 to support CM size of 128 */
11831108 if ((attrs->qp_type != IB_QPT_GSI) &&
....@@ -1217,7 +1142,7 @@
12171142 return -EINVAL;
12181143 }
12191144 /* unprivileged user space cannot create special QP */
1220
- if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
1145
+ if (udata && attrs->qp_type == IB_QPT_GSI) {
12211146 pr_err
12221147 ("%s(%d) Userspace can't create special QPs of type=0x%x\n",
12231148 __func__, dev->id, attrs->qp_type);
....@@ -1374,7 +1299,7 @@
13741299 struct ocrdma_create_qp_ureq ureq;
13751300 u16 dpp_credit_lmt, dpp_offset;
13761301
1377
- status = ocrdma_check_qp_params(ibpd, dev, attrs);
1302
+ status = ocrdma_check_qp_params(ibpd, dev, attrs, udata);
13781303 if (status)
13791304 goto gen_err;
13801305
....@@ -1480,8 +1405,7 @@
14801405 new_qps = old_qps;
14811406 spin_unlock_irqrestore(&qp->q_lock, flags);
14821407
1483
- if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask,
1484
- IB_LINK_LAYER_ETHERNET)) {
1408
+ if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask)) {
14851409 pr_err("%s(%d) invalid attribute mask=0x%x specified for\n"
14861410 "qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n",
14871411 __func__, dev->id, attr_mask, qp->id, ibqp->qp_type,
....@@ -1742,7 +1666,7 @@
17421666 spin_unlock_irqrestore(&dev->flush_q_lock, flags);
17431667 }
17441668
1745
-int ocrdma_destroy_qp(struct ib_qp *ibqp)
1669
+int ocrdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
17461670 {
17471671 struct ocrdma_pd *pd;
17481672 struct ocrdma_qp *qp;
....@@ -1838,45 +1762,43 @@
18381762 return status;
18391763 }
18401764
1841
-struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd,
1842
- struct ib_srq_init_attr *init_attr,
1843
- struct ib_udata *udata)
1765
+int ocrdma_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
1766
+ struct ib_udata *udata)
18441767 {
1845
- int status = -ENOMEM;
1846
- struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
1847
- struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
1848
- struct ocrdma_srq *srq;
1768
+ int status;
1769
+ struct ocrdma_pd *pd = get_ocrdma_pd(ibsrq->pd);
1770
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibsrq->device);
1771
+ struct ocrdma_srq *srq = get_ocrdma_srq(ibsrq);
18491772
18501773 if (init_attr->attr.max_sge > dev->attr.max_recv_sge)
1851
- return ERR_PTR(-EINVAL);
1774
+ return -EINVAL;
18521775 if (init_attr->attr.max_wr > dev->attr.max_rqe)
1853
- return ERR_PTR(-EINVAL);
1854
-
1855
- srq = kzalloc(sizeof(*srq), GFP_KERNEL);
1856
- if (!srq)
1857
- return ERR_PTR(status);
1776
+ return -EINVAL;
18581777
18591778 spin_lock_init(&srq->q_lock);
18601779 srq->pd = pd;
18611780 srq->db = dev->nic_info.db + (pd->id * dev->nic_info.db_page_size);
18621781 status = ocrdma_mbx_create_srq(dev, srq, init_attr, pd);
18631782 if (status)
1864
- goto err;
1783
+ return status;
18651784
1866
- if (udata == NULL) {
1867
- status = -ENOMEM;
1785
+ if (!udata) {
18681786 srq->rqe_wr_id_tbl = kcalloc(srq->rq.max_cnt, sizeof(u64),
18691787 GFP_KERNEL);
1870
- if (srq->rqe_wr_id_tbl == NULL)
1788
+ if (!srq->rqe_wr_id_tbl) {
1789
+ status = -ENOMEM;
18711790 goto arm_err;
1791
+ }
18721792
18731793 srq->bit_fields_len = (srq->rq.max_cnt / 32) +
18741794 (srq->rq.max_cnt % 32 ? 1 : 0);
18751795 srq->idx_bit_fields =
18761796 kmalloc_array(srq->bit_fields_len, sizeof(u32),
18771797 GFP_KERNEL);
1878
- if (srq->idx_bit_fields == NULL)
1798
+ if (!srq->idx_bit_fields) {
1799
+ status = -ENOMEM;
18791800 goto arm_err;
1801
+ }
18801802 memset(srq->idx_bit_fields, 0xff,
18811803 srq->bit_fields_len * sizeof(u32));
18821804 }
....@@ -1893,15 +1815,13 @@
18931815 goto arm_err;
18941816 }
18951817
1896
- return &srq->ibsrq;
1818
+ return 0;
18971819
18981820 arm_err:
18991821 ocrdma_mbx_destroy_srq(dev, srq);
1900
-err:
19011822 kfree(srq->rqe_wr_id_tbl);
19021823 kfree(srq->idx_bit_fields);
1903
- kfree(srq);
1904
- return ERR_PTR(status);
1824
+ return status;
19051825 }
19061826
19071827 int ocrdma_modify_srq(struct ib_srq *ibsrq,
....@@ -1930,15 +1850,14 @@
19301850 return status;
19311851 }
19321852
1933
-int ocrdma_destroy_srq(struct ib_srq *ibsrq)
1853
+int ocrdma_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
19341854 {
1935
- int status;
19361855 struct ocrdma_srq *srq;
19371856 struct ocrdma_dev *dev = get_ocrdma_dev(ibsrq->device);
19381857
19391858 srq = get_ocrdma_srq(ibsrq);
19401859
1941
- status = ocrdma_mbx_destroy_srq(dev, srq);
1860
+ ocrdma_mbx_destroy_srq(dev, srq);
19421861
19431862 if (srq->pd->uctx)
19441863 ocrdma_del_mmap(srq->pd->uctx, (u64) srq->rq.pa,
....@@ -1946,8 +1865,7 @@
19461865
19471866 kfree(srq->idx_bit_fields);
19481867 kfree(srq->rqe_wr_id_tbl);
1949
- kfree(srq);
1950
- return status;
1868
+ return 0;
19511869 }
19521870
19531871 /* unprivileged verbs and their support functions. */
....@@ -2210,7 +2128,7 @@
22102128 case IB_WR_SEND_WITH_IMM:
22112129 hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);
22122130 hdr->immdt = ntohl(wr->ex.imm_data);
2213
- /* fall through */
2131
+ fallthrough;
22142132 case IB_WR_SEND:
22152133 hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT);
22162134 ocrdma_build_send(qp, hdr, wr);
....@@ -2224,7 +2142,7 @@
22242142 case IB_WR_RDMA_WRITE_WITH_IMM:
22252143 hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);
22262144 hdr->immdt = ntohl(wr->ex.imm_data);
2227
- /* fall through */
2145
+ fallthrough;
22282146 case IB_WR_RDMA_WRITE:
22292147 hdr->cw |= (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT);
22302148 status = ocrdma_build_write(qp, hdr, wr);
....@@ -2976,8 +2894,7 @@
29762894 return 0;
29772895 }
29782896
2979
-struct ib_mr *ocrdma_alloc_mr(struct ib_pd *ibpd,
2980
- enum ib_mr_type mr_type,
2897
+struct ib_mr *ocrdma_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
29812898 u32 max_num_sg)
29822899 {
29832900 int status;