forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-01-31 f70575805708cabdedea7498aaa3f710fde4d920
kernel/drivers/infiniband/hw/cxgb4/mem.c
....@@ -130,8 +130,9 @@
130130
131131 copy_len = len > C4IW_MAX_INLINE_SIZE ? C4IW_MAX_INLINE_SIZE :
132132 len;
133
- wr_len = roundup(sizeof *req + sizeof *sc +
134
- roundup(copy_len, T4_ULPTX_MIN_IO), 16);
133
+ wr_len = roundup(sizeof(*req) + sizeof(*sc) +
134
+ roundup(copy_len, T4_ULPTX_MIN_IO),
135
+ 16);
135136
136137 if (!skb) {
137138 skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
....@@ -398,10 +399,9 @@
398399 mmid = stag >> 8;
399400 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
400401 mhp->ibmr.length = mhp->attr.len;
401
- mhp->ibmr.iova = mhp->attr.va_fbo;
402402 mhp->ibmr.page_size = 1U << (mhp->attr.page_size + 12);
403403 pr_debug("mmid 0x%x mhp %p\n", mmid, mhp);
404
- return insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid);
404
+ return xa_insert_irq(&mhp->rhp->mrs, mmid, mhp, GFP_KERNEL);
405405 }
406406
407407 static int register_mem(struct c4iw_dev *rhp, struct c4iw_pd *php,
....@@ -508,10 +508,9 @@
508508 u64 virt, int acc, struct ib_udata *udata)
509509 {
510510 __be64 *pages;
511
- int shift, n, len;
512
- int i, k, entry;
511
+ int shift, n, i;
513512 int err = -ENOMEM;
514
- struct scatterlist *sg;
513
+ struct ib_block_iter biter;
515514 struct c4iw_dev *rhp;
516515 struct c4iw_pd *php;
517516 struct c4iw_mr *mhp;
....@@ -543,13 +542,13 @@
543542
544543 mhp->rhp = rhp;
545544
546
- mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
545
+ mhp->umem = ib_umem_get(pd->device, start, length, acc);
547546 if (IS_ERR(mhp->umem))
548547 goto err_free_skb;
549548
550
- shift = mhp->umem->page_shift;
549
+ shift = PAGE_SHIFT;
551550
552
- n = mhp->umem->nmap;
551
+ n = ib_umem_num_dma_blocks(mhp->umem, 1 << shift);
553552 err = alloc_pbl(mhp, n);
554553 if (err)
555554 goto err_umem_release;
....@@ -562,21 +561,16 @@
562561
563562 i = n = 0;
564563
565
- for_each_sg(mhp->umem->sg_head.sgl, sg, mhp->umem->nmap, entry) {
566
- len = sg_dma_len(sg) >> shift;
567
- for (k = 0; k < len; ++k) {
568
- pages[i++] = cpu_to_be64(sg_dma_address(sg) +
569
- (k << shift));
570
- if (i == PAGE_SIZE / sizeof *pages) {
571
- err = write_pbl(&mhp->rhp->rdev,
572
- pages,
573
- mhp->attr.pbl_addr + (n << 3), i,
574
- mhp->wr_waitp);
575
- if (err)
576
- goto pbl_done;
577
- n += i;
578
- i = 0;
579
- }
564
+ rdma_umem_for_each_dma_block(mhp->umem, &biter, 1 << shift) {
565
+ pages[i++] = cpu_to_be64(rdma_block_iter_dma_address(&biter));
566
+ if (i == PAGE_SIZE / sizeof(*pages)) {
567
+ err = write_pbl(&mhp->rhp->rdev, pages,
568
+ mhp->attr.pbl_addr + (n << 3), i,
569
+ mhp->wr_waitp);
570
+ if (err)
571
+ goto pbl_done;
572
+ n += i;
573
+ i = 0;
580574 }
581575 }
582576
....@@ -617,30 +611,23 @@
617611 return ERR_PTR(err);
618612 }
619613
620
-struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
621
- struct ib_udata *udata)
614
+int c4iw_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
622615 {
616
+ struct c4iw_mw *mhp = to_c4iw_mw(ibmw);
623617 struct c4iw_dev *rhp;
624618 struct c4iw_pd *php;
625
- struct c4iw_mw *mhp;
626619 u32 mmid;
627620 u32 stag = 0;
628621 int ret;
629622
630
- if (type != IB_MW_TYPE_1)
631
- return ERR_PTR(-EINVAL);
623
+ if (ibmw->type != IB_MW_TYPE_1)
624
+ return -EINVAL;
632625
633
- php = to_c4iw_pd(pd);
626
+ php = to_c4iw_pd(ibmw->pd);
634627 rhp = php->rhp;
635
- mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
636
- if (!mhp)
637
- return ERR_PTR(-ENOMEM);
638
-
639628 mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
640
- if (!mhp->wr_waitp) {
641
- ret = -ENOMEM;
642
- goto free_mhp;
643
- }
629
+ if (!mhp->wr_waitp)
630
+ return -ENOMEM;
644631
645632 mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL);
646633 if (!mhp->dereg_skb) {
....@@ -651,18 +638,19 @@
651638 ret = allocate_window(&rhp->rdev, &stag, php->pdid, mhp->wr_waitp);
652639 if (ret)
653640 goto free_skb;
641
+
654642 mhp->rhp = rhp;
655643 mhp->attr.pdid = php->pdid;
656644 mhp->attr.type = FW_RI_STAG_MW;
657645 mhp->attr.stag = stag;
658646 mmid = (stag) >> 8;
659
- mhp->ibmw.rkey = stag;
660
- if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
647
+ ibmw->rkey = stag;
648
+ if (xa_insert_irq(&rhp->mrs, mmid, mhp, GFP_KERNEL)) {
661649 ret = -ENOMEM;
662650 goto dealloc_win;
663651 }
664652 pr_debug("mmid 0x%x mhp %p stag 0x%x\n", mmid, mhp, stag);
665
- return &(mhp->ibmw);
653
+ return 0;
666654
667655 dealloc_win:
668656 deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb,
....@@ -671,9 +659,7 @@
671659 kfree_skb(mhp->dereg_skb);
672660 free_wr_wait:
673661 c4iw_put_wr_wait(mhp->wr_waitp);
674
-free_mhp:
675
- kfree(mhp);
676
- return ERR_PTR(ret);
662
+ return ret;
677663 }
678664
679665 int c4iw_dealloc_mw(struct ib_mw *mw)
....@@ -685,18 +671,15 @@
685671 mhp = to_c4iw_mw(mw);
686672 rhp = mhp->rhp;
687673 mmid = (mw->rkey) >> 8;
688
- remove_handle(rhp, &rhp->mmidr, mmid);
674
+ xa_erase_irq(&rhp->mrs, mmid);
689675 deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb,
690676 mhp->wr_waitp);
691677 kfree_skb(mhp->dereg_skb);
692678 c4iw_put_wr_wait(mhp->wr_waitp);
693
- kfree(mhp);
694
- pr_debug("ib_mw %p mmid 0x%x ptr %p\n", mw, mmid, mhp);
695679 return 0;
696680 }
697681
698
-struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd,
699
- enum ib_mr_type mr_type,
682
+struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
700683 u32 max_num_sg)
701684 {
702685 struct c4iw_dev *rhp;
....@@ -752,7 +735,7 @@
752735 mhp->attr.state = 0;
753736 mmid = (stag) >> 8;
754737 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
755
- if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
738
+ if (xa_insert_irq(&rhp->mrs, mmid, mhp, GFP_KERNEL)) {
756739 ret = -ENOMEM;
757740 goto err_dereg;
758741 }
....@@ -798,7 +781,7 @@
798781 return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, c4iw_set_page);
799782 }
800783
801
-int c4iw_dereg_mr(struct ib_mr *ib_mr)
784
+int c4iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
802785 {
803786 struct c4iw_dev *rhp;
804787 struct c4iw_mr *mhp;
....@@ -809,7 +792,7 @@
809792 mhp = to_c4iw_mr(ib_mr);
810793 rhp = mhp->rhp;
811794 mmid = mhp->attr.stag >> 8;
812
- remove_handle(rhp, &rhp->mmidr, mmid);
795
+ xa_erase_irq(&rhp->mrs, mmid);
813796 if (mhp->mpl)
814797 dma_free_coherent(&mhp->rhp->rdev.lldi.pdev->dev,
815798 mhp->max_mpl_len, mhp->mpl, mhp->mpl_addr);
....@@ -820,8 +803,7 @@
820803 mhp->attr.pbl_size << 3);
821804 if (mhp->kva)
822805 kfree((void *) (unsigned long) mhp->kva);
823
- if (mhp->umem)
824
- ib_umem_release(mhp->umem);
806
+ ib_umem_release(mhp->umem);
825807 pr_debug("mmid 0x%x ptr %p\n", mmid, mhp);
826808 c4iw_put_wr_wait(mhp->wr_waitp);
827809 kfree(mhp);
....@@ -833,9 +815,9 @@
833815 struct c4iw_mr *mhp;
834816 unsigned long flags;
835817
836
- spin_lock_irqsave(&rhp->lock, flags);
837
- mhp = get_mhp(rhp, rkey >> 8);
818
+ xa_lock_irqsave(&rhp->mrs, flags);
819
+ mhp = xa_load(&rhp->mrs, rkey >> 8);
838820 if (mhp)
839821 mhp->attr.state = 0;
840
- spin_unlock_irqrestore(&rhp->lock, flags);
822
+ xa_unlock_irqrestore(&rhp->mrs, flags);
841823 }