hc
2024-01-03 2f7c68cb55ecb7331f2381deb497c27155f32faf
kernel/drivers/infiniband/hw/bnxt_re/ib_verbs.c
....@@ -48,6 +48,7 @@
4848 #include <rdma/ib_addr.h>
4949 #include <rdma/ib_mad.h>
5050 #include <rdma/ib_cache.h>
51
+#include <rdma/uverbs_ioctl.h>
5152
5253 #include "bnxt_ulp.h"
5354
....@@ -118,21 +119,6 @@
118119 }
119120
120121 /* Device */
121
-struct net_device *bnxt_re_get_netdev(struct ib_device *ibdev, u8 port_num)
122
-{
123
- struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
124
- struct net_device *netdev = NULL;
125
-
126
- rcu_read_lock();
127
- if (rdev)
128
- netdev = rdev->netdev;
129
- if (netdev)
130
- dev_hold(netdev);
131
-
132
- rcu_read_unlock();
133
- return netdev;
134
-}
135
-
136122 int bnxt_re_query_device(struct ib_device *ibdev,
137123 struct ib_device_attr *ib_attr,
138124 struct ib_udata *udata)
....@@ -191,9 +177,6 @@
191177 ib_attr->max_total_mcast_qp_attach = 0;
192178 ib_attr->max_ah = dev_attr->max_ah;
193179
194
- ib_attr->max_fmr = 0;
195
- ib_attr->max_map_per_fmr = 0;
196
-
197180 ib_attr->max_srq = dev_attr->max_srq;
198181 ib_attr->max_srq_wr = dev_attr->max_srq_wqes;
199182 ib_attr->max_srq_sge = dev_attr->max_srq_sges;
....@@ -202,24 +185,6 @@
202185
203186 ib_attr->max_pkeys = 1;
204187 ib_attr->local_ca_ack_delay = BNXT_RE_DEFAULT_ACK_DELAY;
205
- return 0;
206
-}
207
-
208
-int bnxt_re_modify_device(struct ib_device *ibdev,
209
- int device_modify_mask,
210
- struct ib_device_modify *device_modify)
211
-{
212
- switch (device_modify_mask) {
213
- case IB_DEVICE_MODIFY_SYS_IMAGE_GUID:
214
- /* Modify the GUID requires the modification of the GID table */
215
- /* GUID should be made as READ-ONLY */
216
- break;
217
- case IB_DEVICE_MODIFY_NODE_DESC:
218
- /* Node Desc should be made as READ-ONLY */
219
- break;
220
- default:
221
- break;
222
- }
223188 return 0;
224189 }
225190
....@@ -234,10 +199,10 @@
234199
235200 if (netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev)) {
236201 port_attr->state = IB_PORT_ACTIVE;
237
- port_attr->phys_state = 5;
202
+ port_attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
238203 } else {
239204 port_attr->state = IB_PORT_DOWN;
240
- port_attr->phys_state = 3;
205
+ port_attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
241206 }
242207 port_attr->max_mtu = IB_MTU_4096;
243208 port_attr->active_mtu = iboe_get_mtu(rdev->netdev->mtu);
....@@ -322,6 +287,7 @@
322287 struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev);
323288 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
324289 struct bnxt_qplib_gid *gid_to_del;
290
+ u16 vlan_id = 0xFFFF;
325291
326292 /* Delete the entry from the hardware */
327293 ctx = *context;
....@@ -331,7 +297,8 @@
331297 if (sgid_tbl && sgid_tbl->active) {
332298 if (ctx->idx >= sgid_tbl->max)
333299 return -EINVAL;
334
- gid_to_del = &sgid_tbl->tbl[ctx->idx];
300
+ gid_to_del = &sgid_tbl->tbl[ctx->idx].gid;
301
+ vlan_id = sgid_tbl->tbl[ctx->idx].vlan_id;
335302 /* DEL_GID is called in WQ context(netdevice_event_work_handler)
336303 * or via the ib_unregister_device path. In the former case QP1
337304 * may not be destroyed yet, in which case just return as FW
....@@ -342,17 +309,18 @@
342309 */
343310 if (ctx->idx == 0 &&
344311 rdma_link_local_addr((struct in6_addr *)gid_to_del) &&
345
- ctx->refcnt == 1 && rdev->qp1_sqp) {
346
- dev_dbg(rdev_to_dev(rdev),
347
- "Trying to delete GID0 while QP1 is alive\n");
312
+ ctx->refcnt == 1 && rdev->gsi_ctx.gsi_sqp) {
313
+ ibdev_dbg(&rdev->ibdev,
314
+ "Trying to delete GID0 while QP1 is alive\n");
348315 return -EFAULT;
349316 }
350317 ctx->refcnt--;
351318 if (!ctx->refcnt) {
352
- rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del, true);
319
+ rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del,
320
+ vlan_id, true);
353321 if (rc) {
354
- dev_err(rdev_to_dev(rdev),
355
- "Failed to remove GID: %#x", rc);
322
+ ibdev_err(&rdev->ibdev,
323
+ "Failed to remove GID: %#x", rc);
356324 } else {
357325 ctx_tbl = sgid_tbl->ctx;
358326 ctx_tbl[ctx->idx] = NULL;
....@@ -374,8 +342,9 @@
374342 struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev);
375343 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
376344
377
- if ((attr->ndev) && is_vlan_dev(attr->ndev))
378
- vlan_id = vlan_dev_vlan_id(attr->ndev);
345
+ rc = rdma_read_gid_l2_fields(attr, &vlan_id, NULL);
346
+ if (rc)
347
+ return rc;
379348
380349 rc = bnxt_qplib_add_sgid(sgid_tbl, (struct bnxt_qplib_gid *)&attr->gid,
381350 rdev->qplib_res.netdev->dev_addr,
....@@ -388,7 +357,7 @@
388357 }
389358
390359 if (rc < 0) {
391
- dev_err(rdev_to_dev(rdev), "Failed to add GID: %#x", rc);
360
+ ibdev_err(&rdev->ibdev, "Failed to add GID: %#x", rc);
392361 return rc;
393362 }
394363
....@@ -451,12 +420,12 @@
451420 wqe.bind.r_key = fence->bind_rkey;
452421 fence->bind_rkey = ib_inc_rkey(fence->bind_rkey);
453422
454
- dev_dbg(rdev_to_dev(qp->rdev),
455
- "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n",
423
+ ibdev_dbg(&qp->rdev->ibdev,
424
+ "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n",
456425 wqe.bind.r_key, qp->qplib_qp.id, pd);
457426 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
458427 if (rc) {
459
- dev_err(rdev_to_dev(qp->rdev), "Failed to bind fence-WQE\n");
428
+ ibdev_err(&qp->rdev->ibdev, "Failed to bind fence-WQE\n");
460429 return rc;
461430 }
462431 bnxt_qplib_post_send_db(&qp->qplib_qp);
....@@ -500,14 +469,13 @@
500469 struct bnxt_re_mr *mr = NULL;
501470 dma_addr_t dma_addr = 0;
502471 struct ib_mw *mw;
503
- u64 pbl_tbl;
504472 int rc;
505473
506474 dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES,
507475 DMA_BIDIRECTIONAL);
508476 rc = dma_mapping_error(dev, dma_addr);
509477 if (rc) {
510
- dev_err(rdev_to_dev(rdev), "Failed to dma-map fence-MR-mem\n");
478
+ ibdev_err(&rdev->ibdev, "Failed to dma-map fence-MR-mem\n");
511479 rc = -EIO;
512480 fence->dma_addr = 0;
513481 goto fail;
....@@ -527,7 +495,7 @@
527495 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
528496 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
529497 if (rc) {
530
- dev_err(rdev_to_dev(rdev), "Failed to alloc fence-HW-MR\n");
498
+ ibdev_err(&rdev->ibdev, "Failed to alloc fence-HW-MR\n");
531499 goto fail;
532500 }
533501
....@@ -535,11 +503,10 @@
535503 mr->ib_mr.lkey = mr->qplib_mr.lkey;
536504 mr->qplib_mr.va = (u64)(unsigned long)fence->va;
537505 mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES;
538
- pbl_tbl = dma_addr;
539
- rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl_tbl,
540
- BNXT_RE_FENCE_PBL_SIZE, false, PAGE_SIZE);
506
+ rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, NULL,
507
+ BNXT_RE_FENCE_PBL_SIZE, PAGE_SIZE);
541508 if (rc) {
542
- dev_err(rdev_to_dev(rdev), "Failed to register fence-MR\n");
509
+ ibdev_err(&rdev->ibdev, "Failed to register fence-MR\n");
543510 goto fail;
544511 }
545512 mr->ib_mr.rkey = mr->qplib_mr.rkey;
....@@ -547,8 +514,8 @@
547514 /* Create a fence MW only for kernel consumers */
548515 mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL);
549516 if (IS_ERR(mw)) {
550
- dev_err(rdev_to_dev(rdev),
551
- "Failed to create fence-MW for PD: %p\n", pd);
517
+ ibdev_err(&rdev->ibdev,
518
+ "Failed to create fence-MW for PD: %p\n", pd);
552519 rc = PTR_ERR(mw);
553520 goto fail;
554521 }
....@@ -563,44 +530,31 @@
563530 }
564531
565532 /* Protection Domains */
566
-int bnxt_re_dealloc_pd(struct ib_pd *ib_pd)
533
+int bnxt_re_dealloc_pd(struct ib_pd *ib_pd, struct ib_udata *udata)
567534 {
568535 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
569536 struct bnxt_re_dev *rdev = pd->rdev;
570
- int rc;
571537
572538 bnxt_re_destroy_fence_mr(pd);
573539
574
- if (pd->qplib_pd.id) {
575
- rc = bnxt_qplib_dealloc_pd(&rdev->qplib_res,
576
- &rdev->qplib_res.pd_tbl,
577
- &pd->qplib_pd);
578
- if (rc)
579
- dev_err(rdev_to_dev(rdev), "Failed to deallocate HW PD");
580
- }
581
-
582
- kfree(pd);
540
+ if (pd->qplib_pd.id)
541
+ bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
542
+ &pd->qplib_pd);
583543 return 0;
584544 }
585545
586
-struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev,
587
- struct ib_ucontext *ucontext,
588
- struct ib_udata *udata)
546
+int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
589547 {
548
+ struct ib_device *ibdev = ibpd->device;
590549 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
591
- struct bnxt_re_ucontext *ucntx = container_of(ucontext,
592
- struct bnxt_re_ucontext,
593
- ib_uctx);
594
- struct bnxt_re_pd *pd;
550
+ struct bnxt_re_ucontext *ucntx = rdma_udata_to_drv_context(
551
+ udata, struct bnxt_re_ucontext, ib_uctx);
552
+ struct bnxt_re_pd *pd = container_of(ibpd, struct bnxt_re_pd, ib_pd);
595553 int rc;
596
-
597
- pd = kzalloc(sizeof(*pd), GFP_KERNEL);
598
- if (!pd)
599
- return ERR_PTR(-ENOMEM);
600554
601555 pd->rdev = rdev;
602556 if (bnxt_qplib_alloc_pd(&rdev->qplib_res.pd_tbl, &pd->qplib_pd)) {
603
- dev_err(rdev_to_dev(rdev), "Failed to allocate HW PD");
557
+ ibdev_err(&rdev->ibdev, "Failed to allocate HW PD");
604558 rc = -ENOMEM;
605559 goto fail;
606560 }
....@@ -627,59 +581,71 @@
627581
628582 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
629583 if (rc) {
630
- dev_err(rdev_to_dev(rdev),
631
- "Failed to copy user response\n");
584
+ ibdev_err(&rdev->ibdev,
585
+ "Failed to copy user response\n");
632586 goto dbfail;
633587 }
634588 }
635589
636590 if (!udata)
637591 if (bnxt_re_create_fence_mr(pd))
638
- dev_warn(rdev_to_dev(rdev),
639
- "Failed to create Fence-MR\n");
640
- return &pd->ib_pd;
592
+ ibdev_warn(&rdev->ibdev,
593
+ "Failed to create Fence-MR\n");
594
+ return 0;
641595 dbfail:
642
- (void)bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
643
- &pd->qplib_pd);
596
+ bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
597
+ &pd->qplib_pd);
644598 fail:
645
- kfree(pd);
646
- return ERR_PTR(rc);
599
+ return rc;
647600 }
648601
649602 /* Address Handles */
650
-int bnxt_re_destroy_ah(struct ib_ah *ib_ah)
603
+int bnxt_re_destroy_ah(struct ib_ah *ib_ah, u32 flags)
651604 {
652605 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
653606 struct bnxt_re_dev *rdev = ah->rdev;
654
- int rc;
655607
656
- rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah);
657
- if (rc) {
658
- dev_err(rdev_to_dev(rdev), "Failed to destroy HW AH");
659
- return rc;
660
- }
661
- kfree(ah);
608
+ bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah,
609
+ !(flags & RDMA_DESTROY_AH_SLEEPABLE));
662610 return 0;
663611 }
664612
665
-struct ib_ah *bnxt_re_create_ah(struct ib_pd *ib_pd,
666
- struct rdma_ah_attr *ah_attr,
667
- struct ib_udata *udata)
613
+static u8 bnxt_re_stack_to_dev_nw_type(enum rdma_network_type ntype)
668614 {
669
- struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
670
- struct bnxt_re_dev *rdev = pd->rdev;
671
- struct bnxt_re_ah *ah;
672
- const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
673
- int rc;
674615 u8 nw_type;
675616
676
- if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) {
677
- dev_err(rdev_to_dev(rdev), "Failed to alloc AH: GRH not set");
678
- return ERR_PTR(-EINVAL);
617
+ switch (ntype) {
618
+ case RDMA_NETWORK_IPV4:
619
+ nw_type = CMDQ_CREATE_AH_TYPE_V2IPV4;
620
+ break;
621
+ case RDMA_NETWORK_IPV6:
622
+ nw_type = CMDQ_CREATE_AH_TYPE_V2IPV6;
623
+ break;
624
+ default:
625
+ nw_type = CMDQ_CREATE_AH_TYPE_V1;
626
+ break;
679627 }
680
- ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
681
- if (!ah)
682
- return ERR_PTR(-ENOMEM);
628
+ return nw_type;
629
+}
630
+
631
+int bnxt_re_create_ah(struct ib_ah *ib_ah, struct rdma_ah_init_attr *init_attr,
632
+ struct ib_udata *udata)
633
+{
634
+ struct ib_pd *ib_pd = ib_ah->pd;
635
+ struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
636
+ struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
637
+ const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
638
+ struct bnxt_re_dev *rdev = pd->rdev;
639
+ const struct ib_gid_attr *sgid_attr;
640
+ struct bnxt_re_gid_ctx *ctx;
641
+ struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
642
+ u8 nw_type;
643
+ int rc;
644
+
645
+ if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) {
646
+ ibdev_err(&rdev->ibdev, "Failed to alloc AH: GRH not set");
647
+ return -EINVAL;
648
+ }
683649
684650 ah->rdev = rdev;
685651 ah->qplib_ah.pd = &pd->qplib_pd;
....@@ -687,55 +653,38 @@
687653 /* Supply the configuration for the HW */
688654 memcpy(ah->qplib_ah.dgid.data, grh->dgid.raw,
689655 sizeof(union ib_gid));
690
- /*
691
- * If RoCE V2 is enabled, stack will have two entries for
692
- * each GID entry. Avoiding this duplicte entry in HW. Dividing
693
- * the GID index by 2 for RoCE V2
656
+ sgid_attr = grh->sgid_attr;
657
+ /* Get the HW context of the GID. The reference
658
+ * of GID table entry is already taken by the caller.
694659 */
695
- ah->qplib_ah.sgid_index = grh->sgid_index / 2;
660
+ ctx = rdma_read_gid_hw_context(sgid_attr);
661
+ ah->qplib_ah.sgid_index = ctx->idx;
696662 ah->qplib_ah.host_sgid_index = grh->sgid_index;
697663 ah->qplib_ah.traffic_class = grh->traffic_class;
698664 ah->qplib_ah.flow_label = grh->flow_label;
699665 ah->qplib_ah.hop_limit = grh->hop_limit;
700666 ah->qplib_ah.sl = rdma_ah_get_sl(ah_attr);
701
- if (ib_pd->uobject &&
702
- !rdma_is_multicast_addr((struct in6_addr *)
703
- grh->dgid.raw) &&
704
- !rdma_link_local_addr((struct in6_addr *)
705
- grh->dgid.raw)) {
706
- const struct ib_gid_attr *sgid_attr;
707667
708
- sgid_attr = grh->sgid_attr;
709
- /* Get network header type for this GID */
710
- nw_type = rdma_gid_attr_network_type(sgid_attr);
711
- switch (nw_type) {
712
- case RDMA_NETWORK_IPV4:
713
- ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V2IPV4;
714
- break;
715
- case RDMA_NETWORK_IPV6:
716
- ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V2IPV6;
717
- break;
718
- default:
719
- ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V1;
720
- break;
721
- }
722
- }
668
+ /* Get network header type for this GID */
669
+ nw_type = rdma_gid_attr_network_type(sgid_attr);
670
+ ah->qplib_ah.nw_type = bnxt_re_stack_to_dev_nw_type(nw_type);
723671
724672 memcpy(ah->qplib_ah.dmac, ah_attr->roce.dmac, ETH_ALEN);
725
- rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah);
673
+ rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah,
674
+ !(init_attr->flags &
675
+ RDMA_CREATE_AH_SLEEPABLE));
726676 if (rc) {
727
- dev_err(rdev_to_dev(rdev), "Failed to allocate HW AH");
728
- goto fail;
677
+ ibdev_err(&rdev->ibdev, "Failed to allocate HW AH");
678
+ return rc;
729679 }
730680
731681 /* Write AVID to shared page. */
732
- if (ib_pd->uobject) {
733
- struct ib_ucontext *ib_uctx = ib_pd->uobject->context;
734
- struct bnxt_re_ucontext *uctx;
682
+ if (udata) {
683
+ struct bnxt_re_ucontext *uctx = rdma_udata_to_drv_context(
684
+ udata, struct bnxt_re_ucontext, ib_uctx);
735685 unsigned long flag;
736686 u32 *wrptr;
737687
738
- uctx = container_of(ib_uctx, struct bnxt_re_ucontext, ib_uctx);
739688 spin_lock_irqsave(&uctx->sh_lock, flag);
740689 wrptr = (u32 *)(uctx->shpg + BNXT_RE_AVID_OFFT);
741690 *wrptr = ah->qplib_ah.id;
....@@ -743,11 +692,7 @@
743692 spin_unlock_irqrestore(&uctx->sh_lock, flag);
744693 }
745694
746
- return &ah->ib_ah;
747
-
748
-fail:
749
- kfree(ah);
750
- return ERR_PTR(rc);
695
+ return 0;
751696 }
752697
753698 int bnxt_re_modify_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
....@@ -796,67 +741,91 @@
796741 spin_unlock_irqrestore(&qp->scq->cq_lock, flags);
797742 }
798743
744
+static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp)
745
+{
746
+ struct bnxt_re_qp *gsi_sqp;
747
+ struct bnxt_re_ah *gsi_sah;
748
+ struct bnxt_re_dev *rdev;
749
+ int rc = 0;
750
+
751
+ rdev = qp->rdev;
752
+ gsi_sqp = rdev->gsi_ctx.gsi_sqp;
753
+ gsi_sah = rdev->gsi_ctx.gsi_sah;
754
+
755
+ ibdev_dbg(&rdev->ibdev, "Destroy the shadow AH\n");
756
+ bnxt_qplib_destroy_ah(&rdev->qplib_res,
757
+ &gsi_sah->qplib_ah,
758
+ true);
759
+ bnxt_qplib_clean_qp(&qp->qplib_qp);
760
+
761
+ ibdev_dbg(&rdev->ibdev, "Destroy the shadow QP\n");
762
+ rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &gsi_sqp->qplib_qp);
763
+ if (rc) {
764
+ ibdev_err(&rdev->ibdev, "Destroy Shadow QP failed");
765
+ goto fail;
766
+ }
767
+ bnxt_qplib_free_qp_res(&rdev->qplib_res, &gsi_sqp->qplib_qp);
768
+
769
+ /* remove from active qp list */
770
+ mutex_lock(&rdev->qp_lock);
771
+ list_del(&gsi_sqp->list);
772
+ mutex_unlock(&rdev->qp_lock);
773
+ atomic_dec(&rdev->qp_count);
774
+
775
+ kfree(rdev->gsi_ctx.sqp_tbl);
776
+ kfree(gsi_sah);
777
+ kfree(gsi_sqp);
778
+ rdev->gsi_ctx.gsi_sqp = NULL;
779
+ rdev->gsi_ctx.gsi_sah = NULL;
780
+ rdev->gsi_ctx.sqp_tbl = NULL;
781
+
782
+ return 0;
783
+fail:
784
+ return rc;
785
+}
786
+
799787 /* Queue Pairs */
800
-int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
788
+int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
801789 {
802790 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
803791 struct bnxt_re_dev *rdev = qp->rdev;
804
- int rc;
805792 unsigned int flags;
793
+ int rc;
806794
807795 bnxt_qplib_flush_cqn_wq(&qp->qplib_qp);
796
+
808797 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
809798 if (rc) {
810
- dev_err(rdev_to_dev(rdev), "Failed to destroy HW QP");
799
+ ibdev_err(&rdev->ibdev, "Failed to destroy HW QP");
811800 return rc;
812801 }
813802
814
- flags = bnxt_re_lock_cqs(qp);
815
- bnxt_qplib_clean_qp(&qp->qplib_qp);
816
- bnxt_re_unlock_cqs(qp, flags);
817
- bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp);
818
-
819
- if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp) {
820
- rc = bnxt_qplib_destroy_ah(&rdev->qplib_res,
821
- &rdev->sqp_ah->qplib_ah);
822
- if (rc) {
823
- dev_err(rdev_to_dev(rdev),
824
- "Failed to destroy HW AH for shadow QP");
825
- return rc;
826
- }
827
-
803
+ if (rdma_is_kernel_res(&qp->ib_qp.res)) {
804
+ flags = bnxt_re_lock_cqs(qp);
828805 bnxt_qplib_clean_qp(&qp->qplib_qp);
829
- rc = bnxt_qplib_destroy_qp(&rdev->qplib_res,
830
- &rdev->qp1_sqp->qplib_qp);
831
- if (rc) {
832
- dev_err(rdev_to_dev(rdev),
833
- "Failed to destroy Shadow QP");
834
- return rc;
835
- }
836
- bnxt_qplib_free_qp_res(&rdev->qplib_res,
837
- &rdev->qp1_sqp->qplib_qp);
838
- mutex_lock(&rdev->qp_lock);
839
- list_del(&rdev->qp1_sqp->list);
840
- atomic_dec(&rdev->qp_count);
841
- mutex_unlock(&rdev->qp_lock);
842
-
843
- kfree(rdev->sqp_ah);
844
- kfree(rdev->qp1_sqp);
845
- rdev->qp1_sqp = NULL;
846
- rdev->sqp_ah = NULL;
806
+ bnxt_re_unlock_cqs(qp, flags);
847807 }
848808
849
- if (!IS_ERR_OR_NULL(qp->rumem))
850
- ib_umem_release(qp->rumem);
851
- if (!IS_ERR_OR_NULL(qp->sumem))
852
- ib_umem_release(qp->sumem);
809
+ bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp);
810
+
811
+ if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp) {
812
+ rc = bnxt_re_destroy_gsi_sqp(qp);
813
+ if (rc)
814
+ goto sh_fail;
815
+ }
853816
854817 mutex_lock(&rdev->qp_lock);
855818 list_del(&qp->list);
856
- atomic_dec(&rdev->qp_count);
857819 mutex_unlock(&rdev->qp_lock);
820
+ atomic_dec(&rdev->qp_count);
821
+
822
+ ib_umem_release(qp->rumem);
823
+ ib_umem_release(qp->sumem);
824
+
858825 kfree(qp);
859826 return 0;
827
+sh_fail:
828
+ return rc;
860829 }
861830
862831 static u8 __from_ib_qp_type(enum ib_qp_type type)
....@@ -873,45 +842,118 @@
873842 }
874843 }
875844
845
+static u16 bnxt_re_setup_rwqe_size(struct bnxt_qplib_qp *qplqp,
846
+ int rsge, int max)
847
+{
848
+ if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
849
+ rsge = max;
850
+ return bnxt_re_get_rwqe_size(rsge);
851
+}
852
+
853
+static u16 bnxt_re_get_wqe_size(int ilsize, int nsge)
854
+{
855
+ u16 wqe_size, calc_ils;
856
+
857
+ wqe_size = bnxt_re_get_swqe_size(nsge);
858
+ if (ilsize) {
859
+ calc_ils = sizeof(struct sq_send_hdr) + ilsize;
860
+ wqe_size = max_t(u16, calc_ils, wqe_size);
861
+ wqe_size = ALIGN(wqe_size, sizeof(struct sq_send_hdr));
862
+ }
863
+ return wqe_size;
864
+}
865
+
866
+static int bnxt_re_setup_swqe_size(struct bnxt_re_qp *qp,
867
+ struct ib_qp_init_attr *init_attr)
868
+{
869
+ struct bnxt_qplib_dev_attr *dev_attr;
870
+ struct bnxt_qplib_qp *qplqp;
871
+ struct bnxt_re_dev *rdev;
872
+ struct bnxt_qplib_q *sq;
873
+ int align, ilsize;
874
+
875
+ rdev = qp->rdev;
876
+ qplqp = &qp->qplib_qp;
877
+ sq = &qplqp->sq;
878
+ dev_attr = &rdev->dev_attr;
879
+
880
+ align = sizeof(struct sq_send_hdr);
881
+ ilsize = ALIGN(init_attr->cap.max_inline_data, align);
882
+
883
+ sq->wqe_size = bnxt_re_get_wqe_size(ilsize, sq->max_sge);
884
+ if (sq->wqe_size > bnxt_re_get_swqe_size(dev_attr->max_qp_sges))
885
+ return -EINVAL;
886
+ /* For gen p4 and gen p5 backward compatibility mode
887
+ * wqe size is fixed to 128 bytes
888
+ */
889
+ if (sq->wqe_size < bnxt_re_get_swqe_size(dev_attr->max_qp_sges) &&
890
+ qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
891
+ sq->wqe_size = bnxt_re_get_swqe_size(dev_attr->max_qp_sges);
892
+
893
+ if (init_attr->cap.max_inline_data) {
894
+ qplqp->max_inline_data = sq->wqe_size -
895
+ sizeof(struct sq_send_hdr);
896
+ init_attr->cap.max_inline_data = qplqp->max_inline_data;
897
+ if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
898
+ sq->max_sge = qplqp->max_inline_data /
899
+ sizeof(struct sq_sge);
900
+ }
901
+
902
+ return 0;
903
+}
904
+
876905 static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
877906 struct bnxt_re_qp *qp, struct ib_udata *udata)
878907 {
908
+ struct bnxt_qplib_qp *qplib_qp;
909
+ struct bnxt_re_ucontext *cntx;
879910 struct bnxt_re_qp_req ureq;
880
- struct bnxt_qplib_qp *qplib_qp = &qp->qplib_qp;
911
+ int bytes = 0, psn_sz;
881912 struct ib_umem *umem;
882
- int bytes = 0;
883
- struct ib_ucontext *context = pd->ib_pd.uobject->context;
884
- struct bnxt_re_ucontext *cntx = container_of(context,
885
- struct bnxt_re_ucontext,
886
- ib_uctx);
913
+ int psn_nume;
914
+
915
+ qplib_qp = &qp->qplib_qp;
916
+ cntx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext,
917
+ ib_uctx);
887918 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
888919 return -EFAULT;
889920
890
- bytes = (qplib_qp->sq.max_wqe * BNXT_QPLIB_MAX_SQE_ENTRY_SIZE);
921
+ bytes = (qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size);
891922 /* Consider mapping PSN search memory only for RC QPs. */
892
- if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC)
893
- bytes += (qplib_qp->sq.max_wqe * sizeof(struct sq_psn_search));
923
+ if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC) {
924
+ psn_sz = bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ?
925
+ sizeof(struct sq_psn_search_ext) :
926
+ sizeof(struct sq_psn_search);
927
+ psn_nume = (qplib_qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
928
+ qplib_qp->sq.max_wqe :
929
+ ((qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size) /
930
+ sizeof(struct bnxt_qplib_sge));
931
+ bytes += (psn_nume * psn_sz);
932
+ }
933
+
894934 bytes = PAGE_ALIGN(bytes);
895
- umem = ib_umem_get(context, ureq.qpsva, bytes,
896
- IB_ACCESS_LOCAL_WRITE, 1);
935
+ umem = ib_umem_get(&rdev->ibdev, ureq.qpsva, bytes,
936
+ IB_ACCESS_LOCAL_WRITE);
897937 if (IS_ERR(umem))
898938 return PTR_ERR(umem);
899939
900940 qp->sumem = umem;
901
- qplib_qp->sq.sglist = umem->sg_head.sgl;
902
- qplib_qp->sq.nmap = umem->nmap;
941
+ qplib_qp->sq.sg_info.umem = umem;
942
+ qplib_qp->sq.sg_info.pgsize = PAGE_SIZE;
943
+ qplib_qp->sq.sg_info.pgshft = PAGE_SHIFT;
903944 qplib_qp->qp_handle = ureq.qp_handle;
904945
905946 if (!qp->qplib_qp.srq) {
906
- bytes = (qplib_qp->rq.max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
947
+ bytes = (qplib_qp->rq.max_wqe * qplib_qp->rq.wqe_size);
907948 bytes = PAGE_ALIGN(bytes);
908
- umem = ib_umem_get(context, ureq.qprva, bytes,
909
- IB_ACCESS_LOCAL_WRITE, 1);
949
+ umem = ib_umem_get(&rdev->ibdev, ureq.qprva, bytes,
950
+ IB_ACCESS_LOCAL_WRITE);
910951 if (IS_ERR(umem))
911952 goto rqfail;
912953 qp->rumem = umem;
913
- qplib_qp->rq.sglist = umem->sg_head.sgl;
914
- qplib_qp->rq.nmap = umem->nmap;
954
+ qplib_qp->rq.sg_info.umem = umem;
955
+ qplib_qp->rq.sg_info.pgsize = PAGE_SIZE;
956
+ qplib_qp->rq.sg_info.pgshft = PAGE_SHIFT;
915957 }
916958
917959 qplib_qp->dpi = &cntx->dpi;
....@@ -919,8 +961,7 @@
919961 rqfail:
920962 ib_umem_release(qp->sumem);
921963 qp->sumem = NULL;
922
- qplib_qp->sq.sglist = NULL;
923
- qplib_qp->sq.nmap = 0;
964
+ memset(&qplib_qp->sq.sg_info, 0, sizeof(qplib_qp->sq.sg_info));
924965
925966 return PTR_ERR(umem);
926967 }
....@@ -958,10 +999,10 @@
958999 /* Have DMAC same as SMAC */
9591000 ether_addr_copy(ah->qplib_ah.dmac, rdev->netdev->dev_addr);
9601001
961
- rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah);
1002
+ rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah, false);
9621003 if (rc) {
963
- dev_err(rdev_to_dev(rdev),
964
- "Failed to allocate HW AH for Shadow QP");
1004
+ ibdev_err(&rdev->ibdev,
1005
+ "Failed to allocate HW AH for Shadow QP");
9651006 goto fail;
9661007 }
9671008
....@@ -998,18 +1039,24 @@
9981039 qp->qplib_qp.sig_type = true;
9991040
10001041 /* Shadow QP SQ depth should be same as QP1 RQ depth */
1042
+ qp->qplib_qp.sq.wqe_size = bnxt_re_get_wqe_size(0, 6);
10011043 qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
10021044 qp->qplib_qp.sq.max_sge = 2;
10031045 /* Q full delta can be 1 since it is internal QP */
10041046 qp->qplib_qp.sq.q_full_delta = 1;
1047
+ qp->qplib_qp.sq.sg_info.pgsize = PAGE_SIZE;
1048
+ qp->qplib_qp.sq.sg_info.pgshft = PAGE_SHIFT;
10051049
10061050 qp->qplib_qp.scq = qp1_qp->scq;
10071051 qp->qplib_qp.rcq = qp1_qp->rcq;
10081052
1053
+ qp->qplib_qp.rq.wqe_size = bnxt_re_get_rwqe_size(6);
10091054 qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
10101055 qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
10111056 /* Q full delta can be 1 since it is internal QP */
10121057 qp->qplib_qp.rq.q_full_delta = 1;
1058
+ qp->qplib_qp.rq.sg_info.pgsize = PAGE_SIZE;
1059
+ qp->qplib_qp.rq.sg_info.pgshft = PAGE_SHIFT;
10131060
10141061 qp->qplib_qp.mtu = qp1_qp->mtu;
10151062
....@@ -1020,8 +1067,6 @@
10201067 rc = bnxt_qplib_create_qp(qp1_res, &qp->qplib_qp);
10211068 if (rc)
10221069 goto fail;
1023
-
1024
- rdev->sqp_id = qp->qplib_qp.id;
10251070
10261071 spin_lock_init(&qp->sq_lock);
10271072 INIT_LIST_HEAD(&qp->list);
....@@ -1035,6 +1080,336 @@
10351080 return NULL;
10361081 }
10371082
1083
+static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp,
1084
+ struct ib_qp_init_attr *init_attr)
1085
+{
1086
+ struct bnxt_qplib_dev_attr *dev_attr;
1087
+ struct bnxt_qplib_qp *qplqp;
1088
+ struct bnxt_re_dev *rdev;
1089
+ struct bnxt_qplib_q *rq;
1090
+ int entries;
1091
+
1092
+ rdev = qp->rdev;
1093
+ qplqp = &qp->qplib_qp;
1094
+ rq = &qplqp->rq;
1095
+ dev_attr = &rdev->dev_attr;
1096
+
1097
+ if (init_attr->srq) {
1098
+ struct bnxt_re_srq *srq;
1099
+
1100
+ srq = container_of(init_attr->srq, struct bnxt_re_srq, ib_srq);
1101
+ if (!srq) {
1102
+ ibdev_err(&rdev->ibdev, "SRQ not found");
1103
+ return -EINVAL;
1104
+ }
1105
+ qplqp->srq = &srq->qplib_srq;
1106
+ rq->max_wqe = 0;
1107
+ } else {
1108
+ rq->max_sge = init_attr->cap.max_recv_sge;
1109
+ if (rq->max_sge > dev_attr->max_qp_sges)
1110
+ rq->max_sge = dev_attr->max_qp_sges;
1111
+ init_attr->cap.max_recv_sge = rq->max_sge;
1112
+ rq->wqe_size = bnxt_re_setup_rwqe_size(qplqp, rq->max_sge,
1113
+ dev_attr->max_qp_sges);
1114
+ /* Allocate 1 more than what's provided so posting max doesn't
1115
+ * mean empty.
1116
+ */
1117
+ entries = roundup_pow_of_two(init_attr->cap.max_recv_wr + 1);
1118
+ rq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1);
1119
+ rq->q_full_delta = 0;
1120
+ rq->sg_info.pgsize = PAGE_SIZE;
1121
+ rq->sg_info.pgshft = PAGE_SHIFT;
1122
+ }
1123
+
1124
+ return 0;
1125
+}
1126
+
1127
+static void bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp *qp)
1128
+{
1129
+ struct bnxt_qplib_dev_attr *dev_attr;
1130
+ struct bnxt_qplib_qp *qplqp;
1131
+ struct bnxt_re_dev *rdev;
1132
+
1133
+ rdev = qp->rdev;
1134
+ qplqp = &qp->qplib_qp;
1135
+ dev_attr = &rdev->dev_attr;
1136
+
1137
+ if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) {
1138
+ qplqp->rq.max_sge = dev_attr->max_qp_sges;
1139
+ if (qplqp->rq.max_sge > dev_attr->max_qp_sges)
1140
+ qplqp->rq.max_sge = dev_attr->max_qp_sges;
1141
+ qplqp->rq.max_sge = 6;
1142
+ }
1143
+}
1144
+
1145
+static int bnxt_re_init_sq_attr(struct bnxt_re_qp *qp,
1146
+ struct ib_qp_init_attr *init_attr,
1147
+ struct ib_udata *udata)
1148
+{
1149
+ struct bnxt_qplib_dev_attr *dev_attr;
1150
+ struct bnxt_qplib_qp *qplqp;
1151
+ struct bnxt_re_dev *rdev;
1152
+ struct bnxt_qplib_q *sq;
1153
+ int entries;
1154
+ int diff;
1155
+ int rc;
1156
+
1157
+ rdev = qp->rdev;
1158
+ qplqp = &qp->qplib_qp;
1159
+ sq = &qplqp->sq;
1160
+ dev_attr = &rdev->dev_attr;
1161
+
1162
+ sq->max_sge = init_attr->cap.max_send_sge;
1163
+ if (sq->max_sge > dev_attr->max_qp_sges) {
1164
+ sq->max_sge = dev_attr->max_qp_sges;
1165
+ init_attr->cap.max_send_sge = sq->max_sge;
1166
+ }
1167
+
1168
+ rc = bnxt_re_setup_swqe_size(qp, init_attr);
1169
+ if (rc)
1170
+ return rc;
1171
+
1172
+ entries = init_attr->cap.max_send_wr;
1173
+ /* Allocate 128 + 1 more than what's provided */
1174
+ diff = (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE) ?
1175
+ 0 : BNXT_QPLIB_RESERVED_QP_WRS;
1176
+ entries = roundup_pow_of_two(entries + diff + 1);
1177
+ sq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + diff + 1);
1178
+ sq->q_full_delta = diff + 1;
1179
+ /*
1180
+ * Reserving one slot for Phantom WQE. Application can
1181
+ * post one extra entry in this case. But allowing this to avoid
1182
+ * unexpected Queue full condition
1183
+ */
1184
+ qplqp->sq.q_full_delta -= 1;
1185
+ qplqp->sq.sg_info.pgsize = PAGE_SIZE;
1186
+ qplqp->sq.sg_info.pgshft = PAGE_SHIFT;
1187
+
1188
+ return 0;
1189
+}
1190
+
1191
+static void bnxt_re_adjust_gsi_sq_attr(struct bnxt_re_qp *qp,
1192
+ struct ib_qp_init_attr *init_attr)
1193
+{
1194
+ struct bnxt_qplib_dev_attr *dev_attr;
1195
+ struct bnxt_qplib_qp *qplqp;
1196
+ struct bnxt_re_dev *rdev;
1197
+ int entries;
1198
+
1199
+ rdev = qp->rdev;
1200
+ qplqp = &qp->qplib_qp;
1201
+ dev_attr = &rdev->dev_attr;
1202
+
1203
+ if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) {
1204
+ entries = roundup_pow_of_two(init_attr->cap.max_send_wr + 1);
1205
+ qplqp->sq.max_wqe = min_t(u32, entries,
1206
+ dev_attr->max_qp_wqes + 1);
1207
+ qplqp->sq.q_full_delta = qplqp->sq.max_wqe -
1208
+ init_attr->cap.max_send_wr;
1209
+ qplqp->sq.max_sge++; /* Need one extra sge to put UD header */
1210
+ if (qplqp->sq.max_sge > dev_attr->max_qp_sges)
1211
+ qplqp->sq.max_sge = dev_attr->max_qp_sges;
1212
+ }
1213
+}
1214
+
1215
+static int bnxt_re_init_qp_type(struct bnxt_re_dev *rdev,
1216
+ struct ib_qp_init_attr *init_attr)
1217
+{
1218
+ struct bnxt_qplib_chip_ctx *chip_ctx;
1219
+ int qptype;
1220
+
1221
+ chip_ctx = rdev->chip_ctx;
1222
+
1223
+ qptype = __from_ib_qp_type(init_attr->qp_type);
1224
+ if (qptype == IB_QPT_MAX) {
1225
+ ibdev_err(&rdev->ibdev, "QP type 0x%x not supported", qptype);
1226
+ qptype = -EOPNOTSUPP;
1227
+ goto out;
1228
+ }
1229
+
1230
+ if (bnxt_qplib_is_chip_gen_p5(chip_ctx) &&
1231
+ init_attr->qp_type == IB_QPT_GSI)
1232
+ qptype = CMDQ_CREATE_QP_TYPE_GSI;
1233
+out:
1234
+ return qptype;
1235
+}
1236
+
1237
+static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
1238
+ struct ib_qp_init_attr *init_attr,
1239
+ struct ib_udata *udata)
1240
+{
1241
+ struct bnxt_qplib_dev_attr *dev_attr;
1242
+ struct bnxt_qplib_qp *qplqp;
1243
+ struct bnxt_re_dev *rdev;
1244
+ struct bnxt_re_cq *cq;
1245
+ int rc = 0, qptype;
1246
+
1247
+ rdev = qp->rdev;
1248
+ qplqp = &qp->qplib_qp;
1249
+ dev_attr = &rdev->dev_attr;
1250
+
1251
+ /* Setup misc params */
1252
+ ether_addr_copy(qplqp->smac, rdev->netdev->dev_addr);
1253
+ qplqp->pd = &pd->qplib_pd;
1254
+ qplqp->qp_handle = (u64)qplqp;
1255
+ qplqp->max_inline_data = init_attr->cap.max_inline_data;
1256
+ qplqp->sig_type = ((init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ?
1257
+ true : false);
1258
+ qptype = bnxt_re_init_qp_type(rdev, init_attr);
1259
+ if (qptype < 0) {
1260
+ rc = qptype;
1261
+ goto out;
1262
+ }
1263
+ qplqp->type = (u8)qptype;
1264
+ qplqp->wqe_mode = rdev->chip_ctx->modes.wqe_mode;
1265
+
1266
+ if (init_attr->qp_type == IB_QPT_RC) {
1267
+ qplqp->max_rd_atomic = dev_attr->max_qp_rd_atom;
1268
+ qplqp->max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
1269
+ }
1270
+ qplqp->mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
1271
+ qplqp->dpi = &rdev->dpi_privileged; /* Doorbell page */
1272
+ if (init_attr->create_flags)
1273
+ ibdev_dbg(&rdev->ibdev,
1274
+ "QP create flags 0x%x not supported",
1275
+ init_attr->create_flags);
1276
+
1277
+ /* Setup CQs */
1278
+ if (init_attr->send_cq) {
1279
+ cq = container_of(init_attr->send_cq, struct bnxt_re_cq, ib_cq);
1280
+ if (!cq) {
1281
+ ibdev_err(&rdev->ibdev, "Send CQ not found");
1282
+ rc = -EINVAL;
1283
+ goto out;
1284
+ }
1285
+ qplqp->scq = &cq->qplib_cq;
1286
+ qp->scq = cq;
1287
+ }
1288
+
1289
+ if (init_attr->recv_cq) {
1290
+ cq = container_of(init_attr->recv_cq, struct bnxt_re_cq, ib_cq);
1291
+ if (!cq) {
1292
+ ibdev_err(&rdev->ibdev, "Receive CQ not found");
1293
+ rc = -EINVAL;
1294
+ goto out;
1295
+ }
1296
+ qplqp->rcq = &cq->qplib_cq;
1297
+ qp->rcq = cq;
1298
+ }
1299
+
1300
+ /* Setup RQ/SRQ */
1301
+ rc = bnxt_re_init_rq_attr(qp, init_attr);
1302
+ if (rc)
1303
+ goto out;
1304
+ if (init_attr->qp_type == IB_QPT_GSI)
1305
+ bnxt_re_adjust_gsi_rq_attr(qp);
1306
+
1307
+ /* Setup SQ */
1308
+ rc = bnxt_re_init_sq_attr(qp, init_attr, udata);
1309
+ if (rc)
1310
+ goto out;
1311
+ if (init_attr->qp_type == IB_QPT_GSI)
1312
+ bnxt_re_adjust_gsi_sq_attr(qp, init_attr);
1313
+
1314
+ if (udata) /* This will update DPI and qp_handle */
1315
+ rc = bnxt_re_init_user_qp(rdev, pd, qp, udata);
1316
+out:
1317
+ return rc;
1318
+}
1319
+
1320
+static int bnxt_re_create_shadow_gsi(struct bnxt_re_qp *qp,
1321
+ struct bnxt_re_pd *pd)
1322
+{
1323
+ struct bnxt_re_sqp_entries *sqp_tbl = NULL;
1324
+ struct bnxt_re_dev *rdev;
1325
+ struct bnxt_re_qp *sqp;
1326
+ struct bnxt_re_ah *sah;
1327
+ int rc = 0;
1328
+
1329
+ rdev = qp->rdev;
1330
+ /* Create a shadow QP to handle the QP1 traffic */
1331
+ sqp_tbl = kzalloc(sizeof(*sqp_tbl) * BNXT_RE_MAX_GSI_SQP_ENTRIES,
1332
+ GFP_KERNEL);
1333
+ if (!sqp_tbl)
1334
+ return -ENOMEM;
1335
+ rdev->gsi_ctx.sqp_tbl = sqp_tbl;
1336
+
1337
+ sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res, &qp->qplib_qp);
1338
+ if (!sqp) {
1339
+ rc = -ENODEV;
1340
+ ibdev_err(&rdev->ibdev, "Failed to create Shadow QP for QP1");
1341
+ goto out;
1342
+ }
1343
+ rdev->gsi_ctx.gsi_sqp = sqp;
1344
+
1345
+ sqp->rcq = qp->rcq;
1346
+ sqp->scq = qp->scq;
1347
+ sah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res,
1348
+ &qp->qplib_qp);
1349
+ if (!sah) {
1350
+ bnxt_qplib_destroy_qp(&rdev->qplib_res,
1351
+ &sqp->qplib_qp);
1352
+ rc = -ENODEV;
1353
+ ibdev_err(&rdev->ibdev,
1354
+ "Failed to create AH entry for ShadowQP");
1355
+ goto out;
1356
+ }
1357
+ rdev->gsi_ctx.gsi_sah = sah;
1358
+
1359
+ return 0;
1360
+out:
1361
+ kfree(sqp_tbl);
1362
+ return rc;
1363
+}
1364
+
1365
+static int bnxt_re_create_gsi_qp(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
1366
+ struct ib_qp_init_attr *init_attr)
1367
+{
1368
+ struct bnxt_re_dev *rdev;
1369
+ struct bnxt_qplib_qp *qplqp;
1370
+ int rc = 0;
1371
+
1372
+ rdev = qp->rdev;
1373
+ qplqp = &qp->qplib_qp;
1374
+
1375
+ qplqp->rq_hdr_buf_size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
1376
+ qplqp->sq_hdr_buf_size = BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2;
1377
+
1378
+ rc = bnxt_qplib_create_qp1(&rdev->qplib_res, qplqp);
1379
+ if (rc) {
1380
+ ibdev_err(&rdev->ibdev, "create HW QP1 failed!");
1381
+ goto out;
1382
+ }
1383
+
1384
+ rc = bnxt_re_create_shadow_gsi(qp, pd);
1385
+out:
1386
+ return rc;
1387
+}
1388
+
1389
+static bool bnxt_re_test_qp_limits(struct bnxt_re_dev *rdev,
1390
+ struct ib_qp_init_attr *init_attr,
1391
+ struct bnxt_qplib_dev_attr *dev_attr)
1392
+{
1393
+ bool rc = true;
1394
+
1395
+ if (init_attr->cap.max_send_wr > dev_attr->max_qp_wqes ||
1396
+ init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes ||
1397
+ init_attr->cap.max_send_sge > dev_attr->max_qp_sges ||
1398
+ init_attr->cap.max_recv_sge > dev_attr->max_qp_sges ||
1399
+ init_attr->cap.max_inline_data > dev_attr->max_inline_data) {
1400
+ ibdev_err(&rdev->ibdev,
1401
+ "Create QP failed - max exceeded! 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x",
1402
+ init_attr->cap.max_send_wr, dev_attr->max_qp_wqes,
1403
+ init_attr->cap.max_recv_wr, dev_attr->max_qp_wqes,
1404
+ init_attr->cap.max_send_sge, dev_attr->max_qp_sges,
1405
+ init_attr->cap.max_recv_sge, dev_attr->max_qp_sges,
1406
+ init_attr->cap.max_inline_data,
1407
+ dev_attr->max_inline_data);
1408
+ rc = false;
1409
+ }
1410
+ return rc;
1411
+}
1412
+
10381413 struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
10391414 struct ib_qp_init_attr *qp_init_attr,
10401415 struct ib_udata *udata)
....@@ -1043,204 +1418,70 @@
10431418 struct bnxt_re_dev *rdev = pd->rdev;
10441419 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
10451420 struct bnxt_re_qp *qp;
1046
- struct bnxt_re_cq *cq;
1047
- struct bnxt_re_srq *srq;
1048
- int rc, entries;
1421
+ int rc;
10491422
1050
- if ((qp_init_attr->cap.max_send_wr > dev_attr->max_qp_wqes) ||
1051
- (qp_init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes) ||
1052
- (qp_init_attr->cap.max_send_sge > dev_attr->max_qp_sges) ||
1053
- (qp_init_attr->cap.max_recv_sge > dev_attr->max_qp_sges) ||
1054
- (qp_init_attr->cap.max_inline_data > dev_attr->max_inline_data))
1055
- return ERR_PTR(-EINVAL);
1423
+ rc = bnxt_re_test_qp_limits(rdev, qp_init_attr, dev_attr);
1424
+ if (!rc) {
1425
+ rc = -EINVAL;
1426
+ goto exit;
1427
+ }
10561428
10571429 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1058
- if (!qp)
1059
- return ERR_PTR(-ENOMEM);
1060
-
1430
+ if (!qp) {
1431
+ rc = -ENOMEM;
1432
+ goto exit;
1433
+ }
10611434 qp->rdev = rdev;
1062
- ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
1063
- qp->qplib_qp.pd = &pd->qplib_pd;
1064
- qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
1065
- qp->qplib_qp.type = __from_ib_qp_type(qp_init_attr->qp_type);
1066
- if (qp->qplib_qp.type == IB_QPT_MAX) {
1067
- dev_err(rdev_to_dev(rdev), "QP type 0x%x not supported",
1068
- qp->qplib_qp.type);
1069
- rc = -EINVAL;
1435
+ rc = bnxt_re_init_qp_attr(qp, pd, qp_init_attr, udata);
1436
+ if (rc)
10701437 goto fail;
1071
- }
1072
- qp->qplib_qp.max_inline_data = qp_init_attr->cap.max_inline_data;
1073
- qp->qplib_qp.sig_type = ((qp_init_attr->sq_sig_type ==
1074
- IB_SIGNAL_ALL_WR) ? true : false);
10751438
1076
- qp->qplib_qp.sq.max_sge = qp_init_attr->cap.max_send_sge;
1077
- if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
1078
- qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
1079
-
1080
- if (qp_init_attr->send_cq) {
1081
- cq = container_of(qp_init_attr->send_cq, struct bnxt_re_cq,
1082
- ib_cq);
1083
- if (!cq) {
1084
- dev_err(rdev_to_dev(rdev), "Send CQ not found");
1085
- rc = -EINVAL;
1086
- goto fail;
1087
- }
1088
- qp->qplib_qp.scq = &cq->qplib_cq;
1089
- qp->scq = cq;
1090
- }
1091
-
1092
- if (qp_init_attr->recv_cq) {
1093
- cq = container_of(qp_init_attr->recv_cq, struct bnxt_re_cq,
1094
- ib_cq);
1095
- if (!cq) {
1096
- dev_err(rdev_to_dev(rdev), "Receive CQ not found");
1097
- rc = -EINVAL;
1098
- goto fail;
1099
- }
1100
- qp->qplib_qp.rcq = &cq->qplib_cq;
1101
- qp->rcq = cq;
1102
- }
1103
-
1104
- if (qp_init_attr->srq) {
1105
- srq = container_of(qp_init_attr->srq, struct bnxt_re_srq,
1106
- ib_srq);
1107
- if (!srq) {
1108
- dev_err(rdev_to_dev(rdev), "SRQ not found");
1109
- rc = -EINVAL;
1110
- goto fail;
1111
- }
1112
- qp->qplib_qp.srq = &srq->qplib_srq;
1113
- qp->qplib_qp.rq.max_wqe = 0;
1114
- } else {
1115
- /* Allocate 1 more than what's provided so posting max doesn't
1116
- * mean empty
1117
- */
1118
- entries = roundup_pow_of_two(qp_init_attr->cap.max_recv_wr + 1);
1119
- qp->qplib_qp.rq.max_wqe = min_t(u32, entries,
1120
- dev_attr->max_qp_wqes + 1);
1121
-
1122
- qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
1123
- qp_init_attr->cap.max_recv_wr;
1124
-
1125
- qp->qplib_qp.rq.max_sge = qp_init_attr->cap.max_recv_sge;
1126
- if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
1127
- qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1128
- }
1129
-
1130
- qp->qplib_qp.mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
1131
-
1132
- if (qp_init_attr->qp_type == IB_QPT_GSI) {
1133
- /* Allocate 1 more than what's provided */
1134
- entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1);
1135
- qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1136
- dev_attr->max_qp_wqes + 1);
1137
- qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
1138
- qp_init_attr->cap.max_send_wr;
1139
- qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1140
- if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
1141
- qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1142
- qp->qplib_qp.sq.max_sge++;
1143
- if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
1144
- qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
1145
-
1146
- qp->qplib_qp.rq_hdr_buf_size =
1147
- BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
1148
-
1149
- qp->qplib_qp.sq_hdr_buf_size =
1150
- BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2;
1151
- qp->qplib_qp.dpi = &rdev->dpi_privileged;
1152
- rc = bnxt_qplib_create_qp1(&rdev->qplib_res, &qp->qplib_qp);
1153
- if (rc) {
1154
- dev_err(rdev_to_dev(rdev), "Failed to create HW QP1");
1155
- goto fail;
1156
- }
1157
- /* Create a shadow QP to handle the QP1 traffic */
1158
- rdev->qp1_sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res,
1159
- &qp->qplib_qp);
1160
- if (!rdev->qp1_sqp) {
1161
- rc = -EINVAL;
1162
- dev_err(rdev_to_dev(rdev),
1163
- "Failed to create Shadow QP for QP1");
1439
+ if (qp_init_attr->qp_type == IB_QPT_GSI &&
1440
+ !(bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx))) {
1441
+ rc = bnxt_re_create_gsi_qp(qp, pd, qp_init_attr);
1442
+ if (rc == -ENODEV)
11641443 goto qp_destroy;
1165
- }
1166
- rdev->sqp_ah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res,
1167
- &qp->qplib_qp);
1168
- if (!rdev->sqp_ah) {
1169
- bnxt_qplib_destroy_qp(&rdev->qplib_res,
1170
- &rdev->qp1_sqp->qplib_qp);
1171
- rc = -EINVAL;
1172
- dev_err(rdev_to_dev(rdev),
1173
- "Failed to create AH entry for ShadowQP");
1174
- goto qp_destroy;
1175
- }
1176
-
1444
+ if (rc)
1445
+ goto fail;
11771446 } else {
1178
- /* Allocate 128 + 1 more than what's provided */
1179
- entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr +
1180
- BNXT_QPLIB_RESERVED_QP_WRS + 1);
1181
- qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1182
- dev_attr->max_qp_wqes +
1183
- BNXT_QPLIB_RESERVED_QP_WRS + 1);
1184
- qp->qplib_qp.sq.q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1;
1185
-
1186
- /*
1187
- * Reserving one slot for Phantom WQE. Application can
1188
- * post one extra entry in this case. But allowing this to avoid
1189
- * unexpected Queue full condition
1190
- */
1191
-
1192
- qp->qplib_qp.sq.q_full_delta -= 1;
1193
-
1194
- qp->qplib_qp.max_rd_atomic = dev_attr->max_qp_rd_atom;
1195
- qp->qplib_qp.max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
1196
- if (udata) {
1197
- rc = bnxt_re_init_user_qp(rdev, pd, qp, udata);
1198
- if (rc)
1199
- goto fail;
1200
- } else {
1201
- qp->qplib_qp.dpi = &rdev->dpi_privileged;
1202
- }
1203
-
12041447 rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp);
12051448 if (rc) {
1206
- dev_err(rdev_to_dev(rdev), "Failed to create HW QP");
1449
+ ibdev_err(&rdev->ibdev, "Failed to create HW QP");
12071450 goto free_umem;
1451
+ }
1452
+ if (udata) {
1453
+ struct bnxt_re_qp_resp resp;
1454
+
1455
+ resp.qpid = qp->qplib_qp.id;
1456
+ resp.rsvd = 0;
1457
+ rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1458
+ if (rc) {
1459
+ ibdev_err(&rdev->ibdev, "Failed to copy QP udata");
1460
+ goto qp_destroy;
1461
+ }
12081462 }
12091463 }
12101464
12111465 qp->ib_qp.qp_num = qp->qplib_qp.id;
1466
+ if (qp_init_attr->qp_type == IB_QPT_GSI)
1467
+ rdev->gsi_ctx.gsi_qp = qp;
12121468 spin_lock_init(&qp->sq_lock);
12131469 spin_lock_init(&qp->rq_lock);
1214
-
1215
- if (udata) {
1216
- struct bnxt_re_qp_resp resp;
1217
-
1218
- resp.qpid = qp->ib_qp.qp_num;
1219
- resp.rsvd = 0;
1220
- rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1221
- if (rc) {
1222
- dev_err(rdev_to_dev(rdev), "Failed to copy QP udata");
1223
- goto qp_destroy;
1224
- }
1225
- }
12261470 INIT_LIST_HEAD(&qp->list);
12271471 mutex_lock(&rdev->qp_lock);
12281472 list_add_tail(&qp->list, &rdev->qp_list);
1229
- atomic_inc(&rdev->qp_count);
12301473 mutex_unlock(&rdev->qp_lock);
1474
+ atomic_inc(&rdev->qp_count);
12311475
12321476 return &qp->ib_qp;
12331477 qp_destroy:
12341478 bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
12351479 free_umem:
1236
- if (udata) {
1237
- if (qp->rumem)
1238
- ib_umem_release(qp->rumem);
1239
- if (qp->sumem)
1240
- ib_umem_release(qp->sumem);
1241
- }
1480
+ ib_umem_release(qp->rumem);
1481
+ ib_umem_release(qp->sumem);
12421482 fail:
12431483 kfree(qp);
1484
+exit:
12441485 return ERR_PTR(rc);
12451486 }
12461487
....@@ -1323,26 +1564,18 @@
13231564 }
13241565
13251566 /* Shared Receive Queues */
1326
-int bnxt_re_destroy_srq(struct ib_srq *ib_srq)
1567
+int bnxt_re_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata)
13271568 {
13281569 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
13291570 ib_srq);
13301571 struct bnxt_re_dev *rdev = srq->rdev;
13311572 struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
13321573 struct bnxt_qplib_nq *nq = NULL;
1333
- int rc;
13341574
13351575 if (qplib_srq->cq)
13361576 nq = qplib_srq->cq->nq;
1337
- rc = bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq);
1338
- if (rc) {
1339
- dev_err(rdev_to_dev(rdev), "Destroy HW SRQ failed!");
1340
- return rc;
1341
- }
1342
-
1343
- if (srq->umem)
1344
- ib_umem_release(srq->umem);
1345
- kfree(srq);
1577
+ bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq);
1578
+ ib_umem_release(srq->umem);
13461579 atomic_dec(&rdev->srq_count);
13471580 if (nq)
13481581 nq->budget--;
....@@ -1358,42 +1591,49 @@
13581591 struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
13591592 struct ib_umem *umem;
13601593 int bytes = 0;
1361
- struct ib_ucontext *context = pd->ib_pd.uobject->context;
1362
- struct bnxt_re_ucontext *cntx = container_of(context,
1363
- struct bnxt_re_ucontext,
1364
- ib_uctx);
1594
+ struct bnxt_re_ucontext *cntx = rdma_udata_to_drv_context(
1595
+ udata, struct bnxt_re_ucontext, ib_uctx);
1596
+
13651597 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
13661598 return -EFAULT;
13671599
1368
- bytes = (qplib_srq->max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
1600
+ bytes = (qplib_srq->max_wqe * qplib_srq->wqe_size);
13691601 bytes = PAGE_ALIGN(bytes);
1370
- umem = ib_umem_get(context, ureq.srqva, bytes,
1371
- IB_ACCESS_LOCAL_WRITE, 1);
1602
+ umem = ib_umem_get(&rdev->ibdev, ureq.srqva, bytes,
1603
+ IB_ACCESS_LOCAL_WRITE);
13721604 if (IS_ERR(umem))
13731605 return PTR_ERR(umem);
13741606
13751607 srq->umem = umem;
1376
- qplib_srq->nmap = umem->nmap;
1377
- qplib_srq->sglist = umem->sg_head.sgl;
1608
+ qplib_srq->sg_info.umem = umem;
1609
+ qplib_srq->sg_info.pgsize = PAGE_SIZE;
1610
+ qplib_srq->sg_info.pgshft = PAGE_SHIFT;
13781611 qplib_srq->srq_handle = ureq.srq_handle;
13791612 qplib_srq->dpi = &cntx->dpi;
13801613
13811614 return 0;
13821615 }
13831616
1384
-struct ib_srq *bnxt_re_create_srq(struct ib_pd *ib_pd,
1385
- struct ib_srq_init_attr *srq_init_attr,
1386
- struct ib_udata *udata)
1617
+int bnxt_re_create_srq(struct ib_srq *ib_srq,
1618
+ struct ib_srq_init_attr *srq_init_attr,
1619
+ struct ib_udata *udata)
13871620 {
1388
- struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1389
- struct bnxt_re_dev *rdev = pd->rdev;
1390
- struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1391
- struct bnxt_re_srq *srq;
1621
+ struct bnxt_qplib_dev_attr *dev_attr;
13921622 struct bnxt_qplib_nq *nq = NULL;
1623
+ struct bnxt_re_dev *rdev;
1624
+ struct bnxt_re_srq *srq;
1625
+ struct bnxt_re_pd *pd;
1626
+ struct ib_pd *ib_pd;
13931627 int rc, entries;
13941628
1629
+ ib_pd = ib_srq->pd;
1630
+ pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1631
+ rdev = pd->rdev;
1632
+ dev_attr = &rdev->dev_attr;
1633
+ srq = container_of(ib_srq, struct bnxt_re_srq, ib_srq);
1634
+
13951635 if (srq_init_attr->attr.max_wr >= dev_attr->max_srq_wqes) {
1396
- dev_err(rdev_to_dev(rdev), "Create CQ failed - max exceeded");
1636
+ ibdev_err(&rdev->ibdev, "Create CQ failed - max exceeded");
13971637 rc = -EINVAL;
13981638 goto exit;
13991639 }
....@@ -1403,11 +1643,6 @@
14031643 goto exit;
14041644 }
14051645
1406
- srq = kzalloc(sizeof(*srq), GFP_KERNEL);
1407
- if (!srq) {
1408
- rc = -ENOMEM;
1409
- goto exit;
1410
- }
14111646 srq->rdev = rdev;
14121647 srq->qplib_srq.pd = &pd->qplib_pd;
14131648 srq->qplib_srq.dpi = &rdev->dpi_privileged;
....@@ -1417,9 +1652,11 @@
14171652 entries = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1);
14181653 if (entries > dev_attr->max_srq_wqes + 1)
14191654 entries = dev_attr->max_srq_wqes + 1;
1420
-
14211655 srq->qplib_srq.max_wqe = entries;
1656
+
14221657 srq->qplib_srq.max_sge = srq_init_attr->attr.max_sge;
1658
+ /* 128 byte wqe size for SRQ . So use max sges */
1659
+ srq->qplib_srq.wqe_size = bnxt_re_get_rwqe_size(dev_attr->max_srq_sges);
14231660 srq->qplib_srq.threshold = srq_init_attr->attr.srq_limit;
14241661 srq->srq_limit = srq_init_attr->attr.srq_limit;
14251662 srq->qplib_srq.eventq_hw_ring_id = rdev->nq[0].ring_id;
....@@ -1433,7 +1670,7 @@
14331670
14341671 rc = bnxt_qplib_create_srq(&rdev->qplib_res, &srq->qplib_srq);
14351672 if (rc) {
1436
- dev_err(rdev_to_dev(rdev), "Create HW SRQ failed!");
1673
+ ibdev_err(&rdev->ibdev, "Create HW SRQ failed!");
14371674 goto fail;
14381675 }
14391676
....@@ -1443,7 +1680,7 @@
14431680 resp.srqid = srq->qplib_srq.id;
14441681 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
14451682 if (rc) {
1446
- dev_err(rdev_to_dev(rdev), "SRQ copy to udata failed!");
1683
+ ibdev_err(&rdev->ibdev, "SRQ copy to udata failed!");
14471684 bnxt_qplib_destroy_srq(&rdev->qplib_res,
14481685 &srq->qplib_srq);
14491686 goto fail;
....@@ -1452,15 +1689,14 @@
14521689 if (nq)
14531690 nq->budget++;
14541691 atomic_inc(&rdev->srq_count);
1692
+ spin_lock_init(&srq->lock);
14551693
1456
- return &srq->ib_srq;
1694
+ return 0;
14571695
14581696 fail:
1459
- if (srq->umem)
1460
- ib_umem_release(srq->umem);
1461
- kfree(srq);
1697
+ ib_umem_release(srq->umem);
14621698 exit:
1463
- return ERR_PTR(rc);
1699
+ return rc;
14641700 }
14651701
14661702 int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
....@@ -1484,7 +1720,7 @@
14841720 srq->qplib_srq.threshold = srq_attr->srq_limit;
14851721 rc = bnxt_qplib_modify_srq(&rdev->qplib_res, &srq->qplib_srq);
14861722 if (rc) {
1487
- dev_err(rdev_to_dev(rdev), "Modify HW SRQ failed!");
1723
+ ibdev_err(&rdev->ibdev, "Modify HW SRQ failed!");
14881724 return rc;
14891725 }
14901726 /* On success, update the shadow */
....@@ -1492,8 +1728,8 @@
14921728 /* No need to Build and send response back to udata */
14931729 break;
14941730 default:
1495
- dev_err(rdev_to_dev(rdev),
1496
- "Unsupported srq_attr_mask 0x%x", srq_attr_mask);
1731
+ ibdev_err(&rdev->ibdev,
1732
+ "Unsupported srq_attr_mask 0x%x", srq_attr_mask);
14971733 return -EINVAL;
14981734 }
14991735 return 0;
....@@ -1511,7 +1747,7 @@
15111747 tsrq.qplib_srq.id = srq->qplib_srq.id;
15121748 rc = bnxt_qplib_query_srq(&rdev->qplib_res, &tsrq.qplib_srq);
15131749 if (rc) {
1514
- dev_err(rdev_to_dev(rdev), "Query HW SRQ failed!");
1750
+ ibdev_err(&rdev->ibdev, "Query HW SRQ failed!");
15151751 return rc;
15161752 }
15171753 srq_attr->max_wr = srq->qplib_srq.max_wqe;
....@@ -1553,7 +1789,7 @@
15531789 struct bnxt_re_qp *qp1_qp,
15541790 int qp_attr_mask)
15551791 {
1556
- struct bnxt_re_qp *qp = rdev->qp1_sqp;
1792
+ struct bnxt_re_qp *qp = rdev->gsi_ctx.gsi_sqp;
15571793 int rc = 0;
15581794
15591795 if (qp_attr_mask & IB_QP_STATE) {
....@@ -1577,8 +1813,7 @@
15771813
15781814 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
15791815 if (rc)
1580
- dev_err(rdev_to_dev(rdev),
1581
- "Failed to modify Shadow QP for QP1");
1816
+ ibdev_err(&rdev->ibdev, "Failed to modify Shadow QP for QP1");
15821817 return rc;
15831818 }
15841819
....@@ -1598,17 +1833,16 @@
15981833 curr_qp_state = __to_ib_qp_state(qp->qplib_qp.cur_qp_state);
15991834 new_qp_state = qp_attr->qp_state;
16001835 if (!ib_modify_qp_is_ok(curr_qp_state, new_qp_state,
1601
- ib_qp->qp_type, qp_attr_mask,
1602
- IB_LINK_LAYER_ETHERNET)) {
1603
- dev_err(rdev_to_dev(rdev),
1604
- "Invalid attribute mask: %#x specified ",
1605
- qp_attr_mask);
1606
- dev_err(rdev_to_dev(rdev),
1607
- "for qpn: %#x type: %#x",
1608
- ib_qp->qp_num, ib_qp->qp_type);
1609
- dev_err(rdev_to_dev(rdev),
1610
- "curr_qp_state=0x%x, new_qp_state=0x%x\n",
1611
- curr_qp_state, new_qp_state);
1836
+ ib_qp->qp_type, qp_attr_mask)) {
1837
+ ibdev_err(&rdev->ibdev,
1838
+ "Invalid attribute mask: %#x specified ",
1839
+ qp_attr_mask);
1840
+ ibdev_err(&rdev->ibdev,
1841
+ "for qpn: %#x type: %#x",
1842
+ ib_qp->qp_num, ib_qp->qp_type);
1843
+ ibdev_err(&rdev->ibdev,
1844
+ "curr_qp_state=0x%x, new_qp_state=0x%x\n",
1845
+ curr_qp_state, new_qp_state);
16121846 return -EINVAL;
16131847 }
16141848 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
....@@ -1616,18 +1850,16 @@
16161850
16171851 if (!qp->sumem &&
16181852 qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1619
- dev_dbg(rdev_to_dev(rdev),
1620
- "Move QP = %p to flush list\n",
1621
- qp);
1853
+ ibdev_dbg(&rdev->ibdev,
1854
+ "Move QP = %p to flush list\n", qp);
16221855 flags = bnxt_re_lock_cqs(qp);
16231856 bnxt_qplib_add_flush_qp(&qp->qplib_qp);
16241857 bnxt_re_unlock_cqs(qp, flags);
16251858 }
16261859 if (!qp->sumem &&
16271860 qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
1628
- dev_dbg(rdev_to_dev(rdev),
1629
- "Move QP = %p out of flush list\n",
1630
- qp);
1861
+ ibdev_dbg(&rdev->ibdev,
1862
+ "Move QP = %p out of flush list\n", qp);
16311863 flags = bnxt_re_lock_cqs(qp);
16321864 bnxt_qplib_clean_qp(&qp->qplib_qp);
16331865 bnxt_re_unlock_cqs(qp, flags);
....@@ -1644,6 +1876,9 @@
16441876 __from_ib_access_flags(qp_attr->qp_access_flags);
16451877 /* LOCAL_WRITE access must be set to allow RC receive */
16461878 qp->qplib_qp.access |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
1879
+ /* Temp: Set all params on QP as of now */
1880
+ qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_REMOTE_WRITE;
1881
+ qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_REMOTE_READ;
16471882 }
16481883 if (qp_attr_mask & IB_QP_PKEY_INDEX) {
16491884 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
....@@ -1657,6 +1892,7 @@
16571892 const struct ib_global_route *grh =
16581893 rdma_ah_read_grh(&qp_attr->ah_attr);
16591894 const struct ib_gid_attr *sgid_attr;
1895
+ struct bnxt_re_gid_ctx *ctx;
16601896
16611897 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
16621898 CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
....@@ -1668,11 +1904,12 @@
16681904 memcpy(qp->qplib_qp.ah.dgid.data, grh->dgid.raw,
16691905 sizeof(qp->qplib_qp.ah.dgid.data));
16701906 qp->qplib_qp.ah.flow_label = grh->flow_label;
1671
- /* If RoCE V2 is enabled, stack will have two entries for
1672
- * each GID entry. Avoiding this duplicte entry in HW. Dividing
1673
- * the GID index by 2 for RoCE V2
1907
+ sgid_attr = grh->sgid_attr;
1908
+ /* Get the HW context of the GID. The reference
1909
+ * of GID table entry is already taken by the caller.
16741910 */
1675
- qp->qplib_qp.ah.sgid_index = grh->sgid_index / 2;
1911
+ ctx = rdma_read_gid_hw_context(sgid_attr);
1912
+ qp->qplib_qp.ah.sgid_index = ctx->idx;
16761913 qp->qplib_qp.ah.host_sgid_index = grh->sgid_index;
16771914 qp->qplib_qp.ah.hop_limit = grh->hop_limit;
16781915 qp->qplib_qp.ah.traffic_class = grh->traffic_class;
....@@ -1680,9 +1917,11 @@
16801917 ether_addr_copy(qp->qplib_qp.ah.dmac,
16811918 qp_attr->ah_attr.roce.dmac);
16821919
1683
- sgid_attr = qp_attr->ah_attr.grh.sgid_attr;
1684
- memcpy(qp->qplib_qp.smac, sgid_attr->ndev->dev_addr,
1685
- ETH_ALEN);
1920
+ rc = rdma_read_gid_l2_fields(sgid_attr, NULL,
1921
+ &qp->qplib_qp.smac[0]);
1922
+ if (rc)
1923
+ return rc;
1924
+
16861925 nw_type = rdma_gid_attr_network_type(sgid_attr);
16871926 switch (nw_type) {
16881927 case RDMA_NETWORK_IPV4:
....@@ -1751,10 +1990,10 @@
17511990 if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
17521991 if (qp_attr->max_dest_rd_atomic >
17531992 dev_attr->max_qp_init_rd_atom) {
1754
- dev_err(rdev_to_dev(rdev),
1755
- "max_dest_rd_atomic requested%d is > dev_max%d",
1756
- qp_attr->max_dest_rd_atomic,
1757
- dev_attr->max_qp_init_rd_atom);
1993
+ ibdev_err(&rdev->ibdev,
1994
+ "max_dest_rd_atomic requested%d is > dev_max%d",
1995
+ qp_attr->max_dest_rd_atomic,
1996
+ dev_attr->max_qp_init_rd_atom);
17581997 return -EINVAL;
17591998 }
17601999
....@@ -1775,8 +2014,8 @@
17752014 (qp_attr->cap.max_recv_sge >= dev_attr->max_qp_sges) ||
17762015 (qp_attr->cap.max_inline_data >=
17772016 dev_attr->max_inline_data)) {
1778
- dev_err(rdev_to_dev(rdev),
1779
- "Create QP failed - max exceeded");
2017
+ ibdev_err(&rdev->ibdev,
2018
+ "Create QP failed - max exceeded");
17802019 return -EINVAL;
17812020 }
17822021 entries = roundup_pow_of_two(qp_attr->cap.max_send_wr);
....@@ -1809,10 +2048,10 @@
18092048 }
18102049 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
18112050 if (rc) {
1812
- dev_err(rdev_to_dev(rdev), "Failed to modify HW QP");
2051
+ ibdev_err(&rdev->ibdev, "Failed to modify HW QP");
18132052 return rc;
18142053 }
1815
- if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp)
2054
+ if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp)
18162055 rc = bnxt_re_modify_shadow_qp(rdev, qp, qp_attr_mask);
18172056 return rc;
18182057 }
....@@ -1834,7 +2073,7 @@
18342073
18352074 rc = bnxt_qplib_query_qp(&rdev->qplib_res, qplib_qp);
18362075 if (rc) {
1837
- dev_err(rdev_to_dev(rdev), "Failed to query HW QP");
2076
+ ibdev_err(&rdev->ibdev, "Failed to query HW QP");
18382077 goto out;
18392078 }
18402079 qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state);
....@@ -1902,8 +2141,10 @@
19022141
19032142 memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr));
19042143
1905
- if (is_vlan_dev(sgid_attr->ndev))
1906
- vlan_id = vlan_dev_vlan_id(sgid_attr->ndev);
2144
+ rc = rdma_read_gid_l2_fields(sgid_attr, &vlan_id, NULL);
2145
+ if (rc)
2146
+ return rc;
2147
+
19072148 /* Get network header type for this GID */
19082149 nw_type = rdma_gid_attr_network_type(sgid_attr);
19092150 switch (nw_type) {
....@@ -2038,7 +2279,7 @@
20382279 wqe->num_sge++;
20392280
20402281 } else {
2041
- dev_err(rdev_to_dev(qp->rdev), "QP1 buffer is empty!");
2282
+ ibdev_err(&qp->rdev->ibdev, "QP1 buffer is empty!");
20422283 rc = -ENOMEM;
20432284 }
20442285 return rc;
....@@ -2055,9 +2296,12 @@
20552296 struct bnxt_qplib_swqe *wqe,
20562297 int payload_size)
20572298 {
2058
- struct bnxt_qplib_sge ref, sge;
2059
- u32 rq_prod_index;
20602299 struct bnxt_re_sqp_entries *sqp_entry;
2300
+ struct bnxt_qplib_sge ref, sge;
2301
+ struct bnxt_re_dev *rdev;
2302
+ u32 rq_prod_index;
2303
+
2304
+ rdev = qp->rdev;
20612305
20622306 rq_prod_index = bnxt_qplib_get_rq_prod_index(&qp->qplib_qp);
20632307
....@@ -2072,7 +2316,7 @@
20722316 ref.lkey = wqe->sg_list[0].lkey;
20732317 ref.size = wqe->sg_list[0].size;
20742318
2075
- sqp_entry = &qp->rdev->sqp_tbl[rq_prod_index];
2319
+ sqp_entry = &rdev->gsi_ctx.sqp_tbl[rq_prod_index];
20762320
20772321 /* SGE 1 */
20782322 wqe->sg_list[0].addr = sge.addr;
....@@ -2092,7 +2336,8 @@
20922336
20932337 static int is_ud_qp(struct bnxt_re_qp *qp)
20942338 {
2095
- return qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD;
2339
+ return (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD ||
2340
+ qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_GSI);
20962341 }
20972342
20982343 static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp,
....@@ -2223,7 +2468,7 @@
22232468 wqe->frmr.pbl_dma_ptr = qplib_frpl->hwq.pbl_dma_ptr[0];
22242469 wqe->frmr.page_list = mr->pages;
22252470 wqe->frmr.page_list_len = mr->npages;
2226
- wqe->frmr.levels = qplib_frpl->hwq.level + 1;
2471
+ wqe->frmr.levels = qplib_frpl->hwq.level;
22272472 wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
22282473
22292474 /* Need unconditional fence for reg_mr
....@@ -2270,8 +2515,8 @@
22702515
22712516 if ((sge_len + wqe->inline_len) >
22722517 BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
2273
- dev_err(rdev_to_dev(rdev),
2274
- "Inline data size requested > supported value");
2518
+ ibdev_err(&rdev->ibdev,
2519
+ "Inline data size requested > supported value");
22752520 return -EINVAL;
22762521 }
22772522 sge_len = wr->sg_list[i].length;
....@@ -2318,21 +2563,18 @@
23182563 struct bnxt_re_qp *qp,
23192564 const struct ib_send_wr *wr)
23202565 {
2321
- struct bnxt_qplib_swqe wqe;
23222566 int rc = 0, payload_sz = 0;
23232567 unsigned long flags;
23242568
23252569 spin_lock_irqsave(&qp->sq_lock, flags);
2326
- memset(&wqe, 0, sizeof(wqe));
23272570 while (wr) {
2328
- /* House keeping */
2329
- memset(&wqe, 0, sizeof(wqe));
2571
+ struct bnxt_qplib_swqe wqe = {};
23302572
23312573 /* Common */
23322574 wqe.num_sge = wr->num_sge;
23332575 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2334
- dev_err(rdev_to_dev(rdev),
2335
- "Limit exceeded for Send SGEs");
2576
+ ibdev_err(&rdev->ibdev,
2577
+ "Limit exceeded for Send SGEs");
23362578 rc = -EINVAL;
23372579 goto bad;
23382580 }
....@@ -2351,9 +2593,9 @@
23512593 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
23522594 bad:
23532595 if (rc) {
2354
- dev_err(rdev_to_dev(rdev),
2355
- "Post send failed opcode = %#x rc = %d",
2356
- wr->opcode, rc);
2596
+ ibdev_err(&rdev->ibdev,
2597
+ "Post send failed opcode = %#x rc = %d",
2598
+ wr->opcode, rc);
23572599 break;
23582600 }
23592601 wr = wr->next;
....@@ -2380,8 +2622,8 @@
23802622 /* Common */
23812623 wqe.num_sge = wr->num_sge;
23822624 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2383
- dev_err(rdev_to_dev(qp->rdev),
2384
- "Limit exceeded for Send SGEs");
2625
+ ibdev_err(&qp->rdev->ibdev,
2626
+ "Limit exceeded for Send SGEs");
23852627 rc = -EINVAL;
23862628 goto bad;
23872629 }
....@@ -2396,7 +2638,7 @@
23962638 switch (wr->opcode) {
23972639 case IB_WR_SEND:
23982640 case IB_WR_SEND_WITH_IMM:
2399
- if (ib_qp->qp_type == IB_QPT_GSI) {
2641
+ if (qp->qplib_qp.type == CMDQ_CREATE_QP1_TYPE_GSI) {
24002642 rc = bnxt_re_build_qp1_send_v2(qp, wr, &wqe,
24012643 payload_sz);
24022644 if (rc)
....@@ -2412,7 +2654,7 @@
24122654 default:
24132655 break;
24142656 }
2415
- /* fall through */
2657
+ fallthrough;
24162658 case IB_WR_SEND_WITH_INV:
24172659 rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
24182660 break;
....@@ -2426,8 +2668,8 @@
24262668 rc = bnxt_re_build_atomic_wqe(wr, &wqe);
24272669 break;
24282670 case IB_WR_RDMA_READ_WITH_INV:
2429
- dev_err(rdev_to_dev(qp->rdev),
2430
- "RDMA Read with Invalidate is not supported");
2671
+ ibdev_err(&qp->rdev->ibdev,
2672
+ "RDMA Read with Invalidate is not supported");
24312673 rc = -EINVAL;
24322674 goto bad;
24332675 case IB_WR_LOCAL_INV:
....@@ -2438,8 +2680,8 @@
24382680 break;
24392681 default:
24402682 /* Unsupported WRs */
2441
- dev_err(rdev_to_dev(qp->rdev),
2442
- "WR (%#x) is not supported", wr->opcode);
2683
+ ibdev_err(&qp->rdev->ibdev,
2684
+ "WR (%#x) is not supported", wr->opcode);
24432685 rc = -EINVAL;
24442686 goto bad;
24452687 }
....@@ -2447,9 +2689,9 @@
24472689 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
24482690 bad:
24492691 if (rc) {
2450
- dev_err(rdev_to_dev(qp->rdev),
2451
- "post_send failed op:%#x qps = %#x rc = %d\n",
2452
- wr->opcode, qp->qplib_qp.state, rc);
2692
+ ibdev_err(&qp->rdev->ibdev,
2693
+ "post_send failed op:%#x qps = %#x rc = %d\n",
2694
+ wr->opcode, qp->qplib_qp.state, rc);
24532695 *bad_wr = wr;
24542696 break;
24552697 }
....@@ -2477,8 +2719,8 @@
24772719 /* Common */
24782720 wqe.num_sge = wr->num_sge;
24792721 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2480
- dev_err(rdev_to_dev(rdev),
2481
- "Limit exceeded for Receive SGEs");
2722
+ ibdev_err(&rdev->ibdev,
2723
+ "Limit exceeded for Receive SGEs");
24822724 rc = -EINVAL;
24832725 break;
24842726 }
....@@ -2514,8 +2756,8 @@
25142756 /* Common */
25152757 wqe.num_sge = wr->num_sge;
25162758 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2517
- dev_err(rdev_to_dev(qp->rdev),
2518
- "Limit exceeded for Receive SGEs");
2759
+ ibdev_err(&qp->rdev->ibdev,
2760
+ "Limit exceeded for Receive SGEs");
25192761 rc = -EINVAL;
25202762 *bad_wr = wr;
25212763 break;
....@@ -2526,7 +2768,8 @@
25262768 wqe.wr_id = wr->wr_id;
25272769 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
25282770
2529
- if (ib_qp->qp_type == IB_QPT_GSI)
2771
+ if (ib_qp->qp_type == IB_QPT_GSI &&
2772
+ qp->qplib_qp.type != CMDQ_CREATE_QP_TYPE_GSI)
25302773 rc = bnxt_re_build_qp1_shadow_qp_recv(qp, wr, &wqe,
25312774 payload_sz);
25322775 if (!rc)
....@@ -2554,9 +2797,8 @@
25542797 }
25552798
25562799 /* Completion Queues */
2557
-int bnxt_re_destroy_cq(struct ib_cq *ib_cq)
2800
+int bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
25582801 {
2559
- int rc;
25602802 struct bnxt_re_cq *cq;
25612803 struct bnxt_qplib_nq *nq;
25622804 struct bnxt_re_dev *rdev;
....@@ -2565,30 +2807,21 @@
25652807 rdev = cq->rdev;
25662808 nq = cq->qplib_cq.nq;
25672809
2568
- rc = bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2569
- if (rc) {
2570
- dev_err(rdev_to_dev(rdev), "Failed to destroy HW CQ");
2571
- return rc;
2572
- }
2573
- if (!IS_ERR_OR_NULL(cq->umem))
2574
- ib_umem_release(cq->umem);
2810
+ bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2811
+ ib_umem_release(cq->umem);
25752812
25762813 atomic_dec(&rdev->cq_count);
25772814 nq->budget--;
25782815 kfree(cq->cql);
2579
- kfree(cq);
2580
-
25812816 return 0;
25822817 }
25832818
2584
-struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
2585
- const struct ib_cq_init_attr *attr,
2586
- struct ib_ucontext *context,
2587
- struct ib_udata *udata)
2819
+int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
2820
+ struct ib_udata *udata)
25882821 {
2589
- struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
2822
+ struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibcq->device, ibdev);
25902823 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
2591
- struct bnxt_re_cq *cq = NULL;
2824
+ struct bnxt_re_cq *cq = container_of(ibcq, struct bnxt_re_cq, ib_cq);
25922825 int rc, entries;
25932826 int cqe = attr->cqe;
25942827 struct bnxt_qplib_nq *nq = NULL;
....@@ -2596,12 +2829,9 @@
25962829
25972830 /* Validate CQ fields */
25982831 if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
2599
- dev_err(rdev_to_dev(rdev), "Failed to create CQ -max exceeded");
2600
- return ERR_PTR(-EINVAL);
2832
+ ibdev_err(&rdev->ibdev, "Failed to create CQ -max exceeded");
2833
+ return -EINVAL;
26012834 }
2602
- cq = kzalloc(sizeof(*cq), GFP_KERNEL);
2603
- if (!cq)
2604
- return ERR_PTR(-ENOMEM);
26052835
26062836 cq->rdev = rdev;
26072837 cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq);
....@@ -2610,26 +2840,25 @@
26102840 if (entries > dev_attr->max_cq_wqes + 1)
26112841 entries = dev_attr->max_cq_wqes + 1;
26122842
2613
- if (context) {
2843
+ cq->qplib_cq.sg_info.pgsize = PAGE_SIZE;
2844
+ cq->qplib_cq.sg_info.pgshft = PAGE_SHIFT;
2845
+ if (udata) {
26142846 struct bnxt_re_cq_req req;
2615
- struct bnxt_re_ucontext *uctx = container_of
2616
- (context,
2617
- struct bnxt_re_ucontext,
2618
- ib_uctx);
2847
+ struct bnxt_re_ucontext *uctx = rdma_udata_to_drv_context(
2848
+ udata, struct bnxt_re_ucontext, ib_uctx);
26192849 if (ib_copy_from_udata(&req, udata, sizeof(req))) {
26202850 rc = -EFAULT;
26212851 goto fail;
26222852 }
26232853
2624
- cq->umem = ib_umem_get(context, req.cq_va,
2854
+ cq->umem = ib_umem_get(&rdev->ibdev, req.cq_va,
26252855 entries * sizeof(struct cq_base),
2626
- IB_ACCESS_LOCAL_WRITE, 1);
2856
+ IB_ACCESS_LOCAL_WRITE);
26272857 if (IS_ERR(cq->umem)) {
26282858 rc = PTR_ERR(cq->umem);
26292859 goto fail;
26302860 }
2631
- cq->qplib_cq.sghead = cq->umem->sg_head.sgl;
2632
- cq->qplib_cq.nmap = cq->umem->nmap;
2861
+ cq->qplib_cq.sg_info.umem = cq->umem;
26332862 cq->qplib_cq.dpi = &uctx->dpi;
26342863 } else {
26352864 cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
....@@ -2641,8 +2870,6 @@
26412870 }
26422871
26432872 cq->qplib_cq.dpi = &rdev->dpi_privileged;
2644
- cq->qplib_cq.sghead = NULL;
2645
- cq->qplib_cq.nmap = 0;
26462873 }
26472874 /*
26482875 * Allocating the NQ in a round robin fashion. nq_alloc_cnt is a
....@@ -2656,7 +2883,7 @@
26562883
26572884 rc = bnxt_qplib_create_cq(&rdev->qplib_res, &cq->qplib_cq);
26582885 if (rc) {
2659
- dev_err(rdev_to_dev(rdev), "Failed to create HW CQ");
2886
+ ibdev_err(&rdev->ibdev, "Failed to create HW CQ");
26602887 goto fail;
26612888 }
26622889
....@@ -2667,7 +2894,7 @@
26672894 atomic_inc(&rdev->cq_count);
26682895 spin_lock_init(&cq->cq_lock);
26692896
2670
- if (context) {
2897
+ if (udata) {
26712898 struct bnxt_re_cq_resp resp;
26722899
26732900 resp.cqid = cq->qplib_cq.id;
....@@ -2676,21 +2903,19 @@
26762903 resp.rsvd = 0;
26772904 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
26782905 if (rc) {
2679
- dev_err(rdev_to_dev(rdev), "Failed to copy CQ udata");
2906
+ ibdev_err(&rdev->ibdev, "Failed to copy CQ udata");
26802907 bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
26812908 goto c2fail;
26822909 }
26832910 }
26842911
2685
- return &cq->ib_cq;
2912
+ return 0;
26862913
26872914 c2fail:
2688
- if (context)
2689
- ib_umem_release(cq->umem);
2915
+ ib_umem_release(cq->umem);
26902916 fail:
26912917 kfree(cq->cql);
2692
- kfree(cq);
2693
- return ERR_PTR(rc);
2918
+ return rc;
26942919 }
26952920
26962921 static u8 __req_to_ib_wc_status(u8 qstatus)
....@@ -2909,12 +3134,13 @@
29093134 return rc;
29103135 }
29113136
2912
-static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *qp1_qp,
3137
+static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *gsi_qp,
29133138 struct bnxt_qplib_cqe *cqe)
29143139 {
2915
- struct bnxt_re_dev *rdev = qp1_qp->rdev;
3140
+ struct bnxt_re_dev *rdev = gsi_qp->rdev;
29163141 struct bnxt_re_sqp_entries *sqp_entry = NULL;
2917
- struct bnxt_re_qp *qp = rdev->qp1_sqp;
3142
+ struct bnxt_re_qp *gsi_sqp = rdev->gsi_ctx.gsi_sqp;
3143
+ struct bnxt_re_ah *gsi_sah;
29183144 struct ib_send_wr *swr;
29193145 struct ib_ud_wr udwr;
29203146 struct ib_recv_wr rwr;
....@@ -2937,26 +3163,26 @@
29373163 swr = &udwr.wr;
29383164 tbl_idx = cqe->wr_id;
29393165
2940
- rq_hdr_buf = qp1_qp->qplib_qp.rq_hdr_buf +
2941
- (tbl_idx * qp1_qp->qplib_qp.rq_hdr_buf_size);
2942
- rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp1_qp->qplib_qp,
3166
+ rq_hdr_buf = gsi_qp->qplib_qp.rq_hdr_buf +
3167
+ (tbl_idx * gsi_qp->qplib_qp.rq_hdr_buf_size);
3168
+ rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&gsi_qp->qplib_qp,
29433169 tbl_idx);
29443170
29453171 /* Shadow QP header buffer */
2946
- shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp->qplib_qp,
3172
+ shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&gsi_qp->qplib_qp,
29473173 tbl_idx);
2948
- sqp_entry = &rdev->sqp_tbl[tbl_idx];
3174
+ sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx];
29493175
29503176 /* Store this cqe */
29513177 memcpy(&sqp_entry->cqe, cqe, sizeof(struct bnxt_qplib_cqe));
2952
- sqp_entry->qp1_qp = qp1_qp;
3178
+ sqp_entry->qp1_qp = gsi_qp;
29533179
29543180 /* Find packet type from the cqe */
29553181
29563182 pkt_type = bnxt_re_check_packet_type(cqe->raweth_qp1_flags,
29573183 cqe->raweth_qp1_flags2);
29583184 if (pkt_type < 0) {
2959
- dev_err(rdev_to_dev(rdev), "Invalid packet\n");
3185
+ ibdev_err(&rdev->ibdev, "Invalid packet\n");
29603186 return -EINVAL;
29613187 }
29623188
....@@ -3003,10 +3229,10 @@
30033229 rwr.wr_id = tbl_idx;
30043230 rwr.next = NULL;
30053231
3006
- rc = bnxt_re_post_recv_shadow_qp(rdev, qp, &rwr);
3232
+ rc = bnxt_re_post_recv_shadow_qp(rdev, gsi_sqp, &rwr);
30073233 if (rc) {
3008
- dev_err(rdev_to_dev(rdev),
3009
- "Failed to post Rx buffers to shadow QP");
3234
+ ibdev_err(&rdev->ibdev,
3235
+ "Failed to post Rx buffers to shadow QP");
30103236 return -ENOMEM;
30113237 }
30123238
....@@ -3015,15 +3241,13 @@
30153241 swr->wr_id = tbl_idx;
30163242 swr->opcode = IB_WR_SEND;
30173243 swr->next = NULL;
3018
-
3019
- udwr.ah = &rdev->sqp_ah->ib_ah;
3020
- udwr.remote_qpn = rdev->qp1_sqp->qplib_qp.id;
3021
- udwr.remote_qkey = rdev->qp1_sqp->qplib_qp.qkey;
3244
+ gsi_sah = rdev->gsi_ctx.gsi_sah;
3245
+ udwr.ah = &gsi_sah->ib_ah;
3246
+ udwr.remote_qpn = gsi_sqp->qplib_qp.id;
3247
+ udwr.remote_qkey = gsi_sqp->qplib_qp.qkey;
30223248
30233249 /* post data received in the send queue */
3024
- rc = bnxt_re_post_send_shadow_qp(rdev, qp, swr);
3025
-
3026
- return 0;
3250
+ return bnxt_re_post_send_shadow_qp(rdev, gsi_sqp, swr);
30273251 }
30283252
30293253 static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc,
....@@ -3088,12 +3312,12 @@
30883312 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
30893313 }
30903314
3091
-static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *qp,
3315
+static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *gsi_sqp,
30923316 struct ib_wc *wc,
30933317 struct bnxt_qplib_cqe *cqe)
30943318 {
3095
- struct bnxt_re_dev *rdev = qp->rdev;
3096
- struct bnxt_re_qp *qp1_qp = NULL;
3319
+ struct bnxt_re_dev *rdev = gsi_sqp->rdev;
3320
+ struct bnxt_re_qp *gsi_qp = NULL;
30973321 struct bnxt_qplib_cqe *orig_cqe = NULL;
30983322 struct bnxt_re_sqp_entries *sqp_entry = NULL;
30993323 int nw_type;
....@@ -3103,13 +3327,13 @@
31033327
31043328 tbl_idx = cqe->wr_id;
31053329
3106
- sqp_entry = &rdev->sqp_tbl[tbl_idx];
3107
- qp1_qp = sqp_entry->qp1_qp;
3330
+ sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx];
3331
+ gsi_qp = sqp_entry->qp1_qp;
31083332 orig_cqe = &sqp_entry->cqe;
31093333
31103334 wc->wr_id = sqp_entry->wrid;
31113335 wc->byte_len = orig_cqe->length;
3112
- wc->qp = &qp1_qp->ib_qp;
3336
+ wc->qp = &gsi_qp->ib_qp;
31133337
31143338 wc->ex.imm_data = orig_cqe->immdata;
31153339 wc->src_qp = orig_cqe->src_qp;
....@@ -3136,19 +3360,39 @@
31363360 }
31373361 }
31383362
3139
-static void bnxt_re_process_res_ud_wc(struct ib_wc *wc,
3363
+static void bnxt_re_process_res_ud_wc(struct bnxt_re_qp *qp,
3364
+ struct ib_wc *wc,
31403365 struct bnxt_qplib_cqe *cqe)
31413366 {
3367
+ struct bnxt_re_dev *rdev;
3368
+ u16 vlan_id = 0;
3369
+ u8 nw_type;
3370
+
3371
+ rdev = qp->rdev;
31423372 wc->opcode = IB_WC_RECV;
31433373 wc->status = __rc_to_ib_wc_status(cqe->status);
31443374
3145
- if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
3375
+ if (cqe->flags & CQ_RES_UD_FLAGS_IMM)
31463376 wc->wc_flags |= IB_WC_WITH_IMM;
3147
- if (cqe->flags & CQ_RES_RC_FLAGS_INV)
3148
- wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3149
- if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
3150
- (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
3151
- wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
3377
+ /* report only on GSI QP for Thor */
3378
+ if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_GSI) {
3379
+ wc->wc_flags |= IB_WC_GRH;
3380
+ memcpy(wc->smac, cqe->smac, ETH_ALEN);
3381
+ wc->wc_flags |= IB_WC_WITH_SMAC;
3382
+ if (cqe->flags & CQ_RES_UD_FLAGS_META_FORMAT_VLAN) {
3383
+ vlan_id = (cqe->cfa_meta & 0xFFF);
3384
+ }
3385
+ /* Mark only if vlan_id is non zero */
3386
+ if (vlan_id && bnxt_re_check_if_vlan_valid(rdev, vlan_id)) {
3387
+ wc->vlan_id = vlan_id;
3388
+ wc->wc_flags |= IB_WC_WITH_VLAN;
3389
+ }
3390
+ nw_type = (cqe->flags & CQ_RES_UD_FLAGS_ROCE_IP_VER_MASK) >>
3391
+ CQ_RES_UD_FLAGS_ROCE_IP_VER_SFT;
3392
+ wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
3393
+ wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
3394
+ }
3395
+
31523396 }
31533397
31543398 static int send_phantom_wqe(struct bnxt_re_qp *qp)
....@@ -3162,11 +3406,11 @@
31623406 rc = bnxt_re_bind_fence_mw(lib_qp);
31633407 if (!rc) {
31643408 lib_qp->sq.phantom_wqe_cnt++;
3165
- dev_dbg(&lib_qp->sq.hwq.pdev->dev,
3166
- "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n",
3167
- lib_qp->id, lib_qp->sq.hwq.prod,
3168
- HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq),
3169
- lib_qp->sq.phantom_wqe_cnt);
3409
+ ibdev_dbg(&qp->rdev->ibdev,
3410
+ "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n",
3411
+ lib_qp->id, lib_qp->sq.hwq.prod,
3412
+ HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq),
3413
+ lib_qp->sq.phantom_wqe_cnt);
31703414 }
31713415
31723416 spin_unlock_irqrestore(&qp->sq_lock, flags);
....@@ -3176,7 +3420,7 @@
31763420 int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
31773421 {
31783422 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
3179
- struct bnxt_re_qp *qp;
3423
+ struct bnxt_re_qp *qp, *sh_qp;
31803424 struct bnxt_qplib_cqe *cqe;
31813425 int i, ncqe, budget;
31823426 struct bnxt_qplib_q *sq;
....@@ -3189,7 +3433,7 @@
31893433 budget = min_t(u32, num_entries, cq->max_cql);
31903434 num_entries = budget;
31913435 if (!cq->cql) {
3192
- dev_err(rdev_to_dev(cq->rdev), "POLL CQ : no CQL to use");
3436
+ ibdev_err(&cq->rdev->ibdev, "POLL CQ : no CQL to use");
31933437 goto exit;
31943438 }
31953439 cqe = &cq->cql[0];
....@@ -3202,8 +3446,8 @@
32023446 qp = container_of(lib_qp,
32033447 struct bnxt_re_qp, qplib_qp);
32043448 if (send_phantom_wqe(qp) == -ENOMEM)
3205
- dev_err(rdev_to_dev(cq->rdev),
3206
- "Phantom failed! Scheduled to send again\n");
3449
+ ibdev_err(&cq->rdev->ibdev,
3450
+ "Phantom failed! Scheduled to send again\n");
32073451 else
32083452 sq->send_phantom = false;
32093453 }
....@@ -3227,8 +3471,7 @@
32273471 (unsigned long)(cqe->qp_handle),
32283472 struct bnxt_re_qp, qplib_qp);
32293473 if (!qp) {
3230
- dev_err(rdev_to_dev(cq->rdev),
3231
- "POLL CQ : bad QP handle");
3474
+ ibdev_err(&cq->rdev->ibdev, "POLL CQ : bad QP handle");
32323475 continue;
32333476 }
32343477 wc->qp = &qp->ib_qp;
....@@ -3240,8 +3483,9 @@
32403483
32413484 switch (cqe->opcode) {
32423485 case CQ_BASE_CQE_TYPE_REQ:
3243
- if (qp->qplib_qp.id ==
3244
- qp->rdev->qp1_sqp->qplib_qp.id) {
3486
+ sh_qp = qp->rdev->gsi_ctx.gsi_sqp;
3487
+ if (sh_qp &&
3488
+ qp->qplib_qp.id == sh_qp->qplib_qp.id) {
32453489 /* Handle this completion with
32463490 * the stored completion
32473491 */
....@@ -3267,7 +3511,7 @@
32673511 * stored in the table
32683512 */
32693513 tbl_idx = cqe->wr_id;
3270
- sqp_entry = &cq->rdev->sqp_tbl[tbl_idx];
3514
+ sqp_entry = &cq->rdev->gsi_ctx.sqp_tbl[tbl_idx];
32713515 wc->wr_id = sqp_entry->wrid;
32723516 bnxt_re_process_res_rawqp1_wc(wc, cqe);
32733517 break;
....@@ -3275,8 +3519,9 @@
32753519 bnxt_re_process_res_rc_wc(wc, cqe);
32763520 break;
32773521 case CQ_BASE_CQE_TYPE_RES_UD:
3278
- if (qp->qplib_qp.id ==
3279
- qp->rdev->qp1_sqp->qplib_qp.id) {
3522
+ sh_qp = qp->rdev->gsi_ctx.gsi_sqp;
3523
+ if (sh_qp &&
3524
+ qp->qplib_qp.id == sh_qp->qplib_qp.id) {
32803525 /* Handle this completion with
32813526 * the stored completion
32823527 */
....@@ -3288,12 +3533,12 @@
32883533 break;
32893534 }
32903535 }
3291
- bnxt_re_process_res_ud_wc(wc, cqe);
3536
+ bnxt_re_process_res_ud_wc(qp, wc, cqe);
32923537 break;
32933538 default:
3294
- dev_err(rdev_to_dev(cq->rdev),
3295
- "POLL CQ : type 0x%x not handled",
3296
- cqe->opcode);
3539
+ ibdev_err(&cq->rdev->ibdev,
3540
+ "POLL CQ : type 0x%x not handled",
3541
+ cqe->opcode);
32973542 continue;
32983543 }
32993544 wc++;
....@@ -3315,10 +3560,10 @@
33153560 spin_lock_irqsave(&cq->cq_lock, flags);
33163561 /* Trigger on the very next completion */
33173562 if (ib_cqn_flags & IB_CQ_NEXT_COMP)
3318
- type = DBR_DBR_TYPE_CQ_ARMALL;
3563
+ type = DBC_DBC_TYPE_CQ_ARMALL;
33193564 /* Trigger on the next solicited completion */
33203565 else if (ib_cqn_flags & IB_CQ_SOLICITED)
3321
- type = DBR_DBR_TYPE_CQ_ARMSE;
3566
+ type = DBC_DBC_TYPE_CQ_ARMSE;
33223567
33233568 /* Poll to see if there are missed events */
33243569 if ((ib_cqn_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
....@@ -3339,7 +3584,6 @@
33393584 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
33403585 struct bnxt_re_dev *rdev = pd->rdev;
33413586 struct bnxt_re_mr *mr;
3342
- u64 pbl = 0;
33433587 int rc;
33443588
33453589 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
....@@ -3358,7 +3602,7 @@
33583602
33593603 mr->qplib_mr.hwq.level = PBL_LVL_MAX;
33603604 mr->qplib_mr.total_size = -1; /* Infinte length */
3361
- rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl, 0, false,
3605
+ rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, NULL, 0,
33623606 PAGE_SIZE);
33633607 if (rc)
33643608 goto fail_mr;
....@@ -3378,7 +3622,7 @@
33783622 return ERR_PTR(rc);
33793623 }
33803624
3381
-int bnxt_re_dereg_mr(struct ib_mr *ib_mr)
3625
+int bnxt_re_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
33823626 {
33833627 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
33843628 struct bnxt_re_dev *rdev = mr->rdev;
....@@ -3386,7 +3630,7 @@
33863630
33873631 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
33883632 if (rc) {
3389
- dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc);
3633
+ ibdev_err(&rdev->ibdev, "Dereg MR failed: %#x\n", rc);
33903634 return rc;
33913635 }
33923636
....@@ -3397,8 +3641,7 @@
33973641 mr->npages = 0;
33983642 mr->pages = NULL;
33993643 }
3400
- if (!IS_ERR_OR_NULL(mr->ib_umem))
3401
- ib_umem_release(mr->ib_umem);
3644
+ ib_umem_release(mr->ib_umem);
34023645
34033646 kfree(mr);
34043647 atomic_dec(&rdev->mr_count);
....@@ -3434,7 +3677,7 @@
34343677 int rc;
34353678
34363679 if (type != IB_MR_TYPE_MEM_REG) {
3437
- dev_dbg(rdev_to_dev(rdev), "MR type 0x%x not supported", type);
3680
+ ibdev_dbg(&rdev->ibdev, "MR type 0x%x not supported", type);
34383681 return ERR_PTR(-EINVAL);
34393682 }
34403683 if (max_num_sg > MAX_PBL_LVL_1_PGS)
....@@ -3464,8 +3707,8 @@
34643707 rc = bnxt_qplib_alloc_fast_reg_page_list(&rdev->qplib_res,
34653708 &mr->qplib_frpl, max_num_sg);
34663709 if (rc) {
3467
- dev_err(rdev_to_dev(rdev),
3468
- "Failed to allocate HW FR page list");
3710
+ ibdev_err(&rdev->ibdev,
3711
+ "Failed to allocate HW FR page list");
34693712 goto fail_mr;
34703713 }
34713714
....@@ -3500,7 +3743,7 @@
35003743 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B);
35013744 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw);
35023745 if (rc) {
3503
- dev_err(rdev_to_dev(rdev), "Allocate MW failed!");
3746
+ ibdev_err(&rdev->ibdev, "Allocate MW failed!");
35043747 goto fail;
35053748 }
35063749 mw->ib_mw.rkey = mw->qplib_mw.rkey;
....@@ -3521,53 +3764,13 @@
35213764
35223765 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw);
35233766 if (rc) {
3524
- dev_err(rdev_to_dev(rdev), "Free MW failed: %#x\n", rc);
3767
+ ibdev_err(&rdev->ibdev, "Free MW failed: %#x\n", rc);
35253768 return rc;
35263769 }
35273770
35283771 kfree(mw);
35293772 atomic_dec(&rdev->mw_count);
35303773 return rc;
3531
-}
3532
-
3533
-static int bnxt_re_page_size_ok(int page_shift)
3534
-{
3535
- switch (page_shift) {
3536
- case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4K:
3537
- case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_8K:
3538
- case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_64K:
3539
- case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_2M:
3540
- case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_256K:
3541
- case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1M:
3542
- case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4M:
3543
- case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1G:
3544
- return 1;
3545
- default:
3546
- return 0;
3547
- }
3548
-}
3549
-
3550
-static int fill_umem_pbl_tbl(struct ib_umem *umem, u64 *pbl_tbl_orig,
3551
- int page_shift)
3552
-{
3553
- u64 *pbl_tbl = pbl_tbl_orig;
3554
- u64 paddr;
3555
- u64 page_mask = (1ULL << page_shift) - 1;
3556
- int i, pages;
3557
- struct scatterlist *sg;
3558
- int entry;
3559
-
3560
- for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
3561
- pages = sg_dma_len(sg) >> PAGE_SHIFT;
3562
- for (i = 0; i < pages; i++) {
3563
- paddr = sg_dma_address(sg) + (i << PAGE_SHIFT);
3564
- if (pbl_tbl == pbl_tbl_orig)
3565
- *pbl_tbl++ = paddr & ~page_mask;
3566
- else if ((paddr & page_mask) == 0)
3567
- *pbl_tbl++ = paddr;
3568
- }
3569
- }
3570
- return pbl_tbl - pbl_tbl_orig;
35713774 }
35723775
35733776 /* uverbs */
....@@ -3579,12 +3782,12 @@
35793782 struct bnxt_re_dev *rdev = pd->rdev;
35803783 struct bnxt_re_mr *mr;
35813784 struct ib_umem *umem;
3582
- u64 *pbl_tbl = NULL;
3583
- int umem_pgs, page_shift, rc;
3785
+ unsigned long page_size;
3786
+ int umem_pgs, rc;
35843787
35853788 if (length > BNXT_RE_MAX_MR_SIZE) {
3586
- dev_err(rdev_to_dev(rdev), "MR Size: %lld > Max supported:%lld\n",
3587
- length, BNXT_RE_MAX_MR_SIZE);
3789
+ ibdev_err(&rdev->ibdev, "MR Size: %lld > Max supported:%lld\n",
3790
+ length, BNXT_RE_MAX_MR_SIZE);
35883791 return ERR_PTR(-ENOMEM);
35893792 }
35903793
....@@ -3599,74 +3802,51 @@
35993802
36003803 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
36013804 if (rc) {
3602
- dev_err(rdev_to_dev(rdev), "Failed to allocate MR");
3805
+ ibdev_err(&rdev->ibdev, "Failed to allocate MR");
36033806 goto free_mr;
36043807 }
36053808 /* The fixed portion of the rkey is the same as the lkey */
36063809 mr->ib_mr.rkey = mr->qplib_mr.rkey;
36073810
3608
- umem = ib_umem_get(ib_pd->uobject->context, start, length,
3609
- mr_access_flags, 0);
3811
+ umem = ib_umem_get(&rdev->ibdev, start, length, mr_access_flags);
36103812 if (IS_ERR(umem)) {
3611
- dev_err(rdev_to_dev(rdev), "Failed to get umem");
3813
+ ibdev_err(&rdev->ibdev, "Failed to get umem");
36123814 rc = -EFAULT;
36133815 goto free_mrw;
36143816 }
36153817 mr->ib_umem = umem;
36163818
36173819 mr->qplib_mr.va = virt_addr;
3618
- umem_pgs = ib_umem_page_count(umem);
3619
- if (!umem_pgs) {
3620
- dev_err(rdev_to_dev(rdev), "umem is invalid!");
3621
- rc = -EINVAL;
3820
+ page_size = ib_umem_find_best_pgsz(
3821
+ umem, BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_2M, virt_addr);
3822
+ if (!page_size) {
3823
+ ibdev_err(&rdev->ibdev, "umem page size unsupported!");
3824
+ rc = -EFAULT;
36223825 goto free_umem;
36233826 }
36243827 mr->qplib_mr.total_size = length;
36253828
3626
- pbl_tbl = kcalloc(umem_pgs, sizeof(u64 *), GFP_KERNEL);
3627
- if (!pbl_tbl) {
3628
- rc = -ENOMEM;
3829
+ if (page_size == BNXT_RE_PAGE_SIZE_4K &&
3830
+ length > BNXT_RE_MAX_MR_SIZE_LOW) {
3831
+ ibdev_err(&rdev->ibdev, "Requested MR Sz:%llu Max sup:%llu",
3832
+ length, (u64)BNXT_RE_MAX_MR_SIZE_LOW);
3833
+ rc = -EINVAL;
36293834 goto free_umem;
36303835 }
36313836
3632
- page_shift = umem->page_shift;
3633
-
3634
- if (!bnxt_re_page_size_ok(page_shift)) {
3635
- dev_err(rdev_to_dev(rdev), "umem page size unsupported!");
3636
- rc = -EFAULT;
3637
- goto fail;
3638
- }
3639
-
3640
- if (!umem->hugetlb && length > BNXT_RE_MAX_MR_SIZE_LOW) {
3641
- dev_err(rdev_to_dev(rdev), "Requested MR Sz:%llu Max sup:%llu",
3642
- length, (u64)BNXT_RE_MAX_MR_SIZE_LOW);
3643
- rc = -EINVAL;
3644
- goto fail;
3645
- }
3646
- if (umem->hugetlb && length > BNXT_RE_PAGE_SIZE_2M) {
3647
- page_shift = BNXT_RE_PAGE_SHIFT_2M;
3648
- dev_warn(rdev_to_dev(rdev), "umem hugetlb set page_size %x",
3649
- 1 << page_shift);
3650
- }
3651
-
3652
- /* Map umem buf ptrs to the PBL */
3653
- umem_pgs = fill_umem_pbl_tbl(umem, pbl_tbl, page_shift);
3654
- rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, pbl_tbl,
3655
- umem_pgs, false, 1 << page_shift);
3837
+ umem_pgs = ib_umem_num_dma_blocks(umem, page_size);
3838
+ rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, umem,
3839
+ umem_pgs, page_size);
36563840 if (rc) {
3657
- dev_err(rdev_to_dev(rdev), "Failed to register user MR");
3658
- goto fail;
3841
+ ibdev_err(&rdev->ibdev, "Failed to register user MR");
3842
+ goto free_umem;
36593843 }
3660
-
3661
- kfree(pbl_tbl);
36623844
36633845 mr->ib_mr.lkey = mr->qplib_mr.lkey;
36643846 mr->ib_mr.rkey = mr->qplib_mr.lkey;
36653847 atomic_inc(&rdev->mr_count);
36663848
36673849 return &mr->ib_mr;
3668
-fail:
3669
- kfree(pbl_tbl);
36703850 free_umem:
36713851 ib_umem_release(umem);
36723852 free_mrw:
....@@ -3676,27 +3856,24 @@
36763856 return ERR_PTR(rc);
36773857 }
36783858
3679
-struct ib_ucontext *bnxt_re_alloc_ucontext(struct ib_device *ibdev,
3680
- struct ib_udata *udata)
3859
+int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
36813860 {
3861
+ struct ib_device *ibdev = ctx->device;
3862
+ struct bnxt_re_ucontext *uctx =
3863
+ container_of(ctx, struct bnxt_re_ucontext, ib_uctx);
36823864 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
3683
- struct bnxt_re_uctx_resp resp;
3684
- struct bnxt_re_ucontext *uctx;
36853865 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
3866
+ struct bnxt_re_uctx_resp resp;
3867
+ u32 chip_met_rev_num = 0;
36863868 int rc;
36873869
3688
- dev_dbg(rdev_to_dev(rdev), "ABI version requested %d",
3689
- ibdev->uverbs_abi_ver);
3870
+ ibdev_dbg(ibdev, "ABI version requested %u", ibdev->ops.uverbs_abi_ver);
36903871
3691
- if (ibdev->uverbs_abi_ver != BNXT_RE_ABI_VERSION) {
3692
- dev_dbg(rdev_to_dev(rdev), " is different from the device %d ",
3693
- BNXT_RE_ABI_VERSION);
3694
- return ERR_PTR(-EPERM);
3872
+ if (ibdev->ops.uverbs_abi_ver != BNXT_RE_ABI_VERSION) {
3873
+ ibdev_dbg(ibdev, " is different from the device %d ",
3874
+ BNXT_RE_ABI_VERSION);
3875
+ return -EPERM;
36953876 }
3696
-
3697
- uctx = kzalloc(sizeof(*uctx), GFP_KERNEL);
3698
- if (!uctx)
3699
- return ERR_PTR(-ENOMEM);
37003877
37013878 uctx->rdev = rdev;
37023879
....@@ -3707,37 +3884,45 @@
37073884 }
37083885 spin_lock_init(&uctx->sh_lock);
37093886
3710
- resp.dev_id = rdev->en_dev->pdev->devfn; /*Temp, Use idr_alloc instead*/
3887
+ resp.comp_mask = BNXT_RE_UCNTX_CMASK_HAVE_CCTX;
3888
+ chip_met_rev_num = rdev->chip_ctx->chip_num;
3889
+ chip_met_rev_num |= ((u32)rdev->chip_ctx->chip_rev & 0xFF) <<
3890
+ BNXT_RE_CHIP_ID0_CHIP_REV_SFT;
3891
+ chip_met_rev_num |= ((u32)rdev->chip_ctx->chip_metal & 0xFF) <<
3892
+ BNXT_RE_CHIP_ID0_CHIP_MET_SFT;
3893
+ resp.chip_id0 = chip_met_rev_num;
3894
+ /* Future extension of chip info */
3895
+ resp.chip_id1 = 0;
3896
+ /*Temp, Use xa_alloc instead */
3897
+ resp.dev_id = rdev->en_dev->pdev->devfn;
37113898 resp.max_qp = rdev->qplib_ctx.qpc_count;
37123899 resp.pg_size = PAGE_SIZE;
37133900 resp.cqe_sz = sizeof(struct cq_base);
37143901 resp.max_cqd = dev_attr->max_cq_wqes;
37153902 resp.rsvd = 0;
37163903
3717
- rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
3904
+ rc = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)));
37183905 if (rc) {
3719
- dev_err(rdev_to_dev(rdev), "Failed to copy user context");
3906
+ ibdev_err(ibdev, "Failed to copy user context");
37203907 rc = -EFAULT;
37213908 goto cfail;
37223909 }
37233910
3724
- return &uctx->ib_uctx;
3911
+ return 0;
37253912 cfail:
37263913 free_page((unsigned long)uctx->shpg);
37273914 uctx->shpg = NULL;
37283915 fail:
3729
- kfree(uctx);
3730
- return ERR_PTR(rc);
3916
+ return rc;
37313917 }
37323918
3733
-int bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
3919
+void bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
37343920 {
37353921 struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
37363922 struct bnxt_re_ucontext,
37373923 ib_uctx);
37383924
37393925 struct bnxt_re_dev *rdev = uctx->rdev;
3740
- int rc = 0;
37413926
37423927 if (uctx->shpg)
37433928 free_page((unsigned long)uctx->shpg);
....@@ -3746,17 +3931,10 @@
37463931 /* Free DPI only if this is the first PD allocated by the
37473932 * application and mark the context dpi as NULL
37483933 */
3749
- rc = bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
3750
- &rdev->qplib_res.dpi_tbl,
3751
- &uctx->dpi);
3752
- if (rc)
3753
- dev_err(rdev_to_dev(rdev), "Deallocate HW DPI failed!");
3754
- /* Don't fail, continue*/
3934
+ bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
3935
+ &rdev->qplib_res.dpi_tbl, &uctx->dpi);
37553936 uctx->dpi.dbr = NULL;
37563937 }
3757
-
3758
- kfree(uctx);
3759
- return 0;
37603938 }
37613939
37623940 /* Helper function to mmap the virtual memory from user app */
....@@ -3775,15 +3953,14 @@
37753953 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
37763954 if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
37773955 PAGE_SIZE, vma->vm_page_prot)) {
3778
- dev_err(rdev_to_dev(rdev), "Failed to map DPI");
3956
+ ibdev_err(&rdev->ibdev, "Failed to map DPI");
37793957 return -EAGAIN;
37803958 }
37813959 } else {
37823960 pfn = virt_to_phys(uctx->shpg) >> PAGE_SHIFT;
37833961 if (remap_pfn_range(vma, vma->vm_start,
37843962 pfn, PAGE_SIZE, vma->vm_page_prot)) {
3785
- dev_err(rdev_to_dev(rdev),
3786
- "Failed to map shared page");
3963
+ ibdev_err(&rdev->ibdev, "Failed to map shared page");
37873964 return -EAGAIN;
37883965 }
37893966 }