hc
2023-12-09 b22da3d8526a935aa31e086e63f60ff3246cb61c
kernel/drivers/infiniband/sw/rdmavt/mr.c
....@@ -97,7 +97,6 @@
9797 RCU_INIT_POINTER(rdi->lkey_table.table[i], NULL);
9898
9999 rdi->dparms.props.max_mr = rdi->lkey_table.max;
100
- rdi->dparms.props.max_fmr = rdi->lkey_table.max;
101100 return 0;
102101 }
103102
....@@ -325,8 +324,6 @@
325324 * @acc: access flags
326325 *
327326 * Return: the memory region on success, otherwise returns an errno.
328
- * Note that all DMA addresses should be created via the functions in
329
- * struct dma_virt_ops.
330327 */
331328 struct ib_mr *rvt_get_dma_mr(struct ib_pd *pd, int acc)
332329 {
....@@ -383,19 +380,18 @@
383380 {
384381 struct rvt_mr *mr;
385382 struct ib_umem *umem;
386
- struct scatterlist *sg;
387
- int n, m, entry;
383
+ struct sg_page_iter sg_iter;
384
+ int n, m;
388385 struct ib_mr *ret;
389386
390387 if (length == 0)
391388 return ERR_PTR(-EINVAL);
392389
393
- umem = ib_umem_get(pd->uobject->context, start, length,
394
- mr_access_flags, 0);
390
+ umem = ib_umem_get(pd->device, start, length, mr_access_flags);
395391 if (IS_ERR(umem))
396392 return (void *)umem;
397393
398
- n = umem->nmap;
394
+ n = ib_umem_num_pages(umem);
399395
400396 mr = __rvt_alloc_mr(n, pd);
401397 if (IS_ERR(mr)) {
....@@ -410,23 +406,21 @@
410406 mr->mr.access_flags = mr_access_flags;
411407 mr->umem = umem;
412408
413
- mr->mr.page_shift = umem->page_shift;
409
+ mr->mr.page_shift = PAGE_SHIFT;
414410 m = 0;
415411 n = 0;
416
- for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
412
+ for_each_sg_page (umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
417413 void *vaddr;
418414
419
- vaddr = page_address(sg_page(sg));
415
+ vaddr = page_address(sg_page_iter_page(&sg_iter));
420416 if (!vaddr) {
421417 ret = ERR_PTR(-EINVAL);
422418 goto bail_inval;
423419 }
424420 mr->mr.map[m]->segs[n].vaddr = vaddr;
425
- mr->mr.map[m]->segs[n].length = BIT(umem->page_shift);
426
- trace_rvt_mr_user_seg(&mr->mr, m, n, vaddr,
427
- BIT(umem->page_shift));
428
- n++;
429
- if (n == RVT_SEGSZ) {
421
+ mr->mr.map[m]->segs[n].length = PAGE_SIZE;
422
+ trace_rvt_mr_user_seg(&mr->mr, m, n, vaddr, PAGE_SIZE);
423
+ if (++n == RVT_SEGSZ) {
430424 m++;
431425 n = 0;
432426 }
....@@ -503,7 +497,7 @@
503497 rvt_pr_err(rdi,
504498 "%s timeout mr %p pd %p lkey %x refcount %ld\n",
505499 t, mr, mr->pd, mr->lkey,
506
- atomic_long_read(&mr->refcount.count));
500
+ atomic_long_read(&mr->refcount.data->count));
507501 rvt_get_mr(mr);
508502 return -EBUSY;
509503 }
....@@ -553,7 +547,7 @@
553547 *
554548 * Returns 0 on success.
555549 */
556
-int rvt_dereg_mr(struct ib_mr *ibmr)
550
+int rvt_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
557551 {
558552 struct rvt_mr *mr = to_imr(ibmr);
559553 int ret;
....@@ -565,8 +559,7 @@
565559 if (ret)
566560 goto out;
567561 rvt_deinit_mregion(&mr->mr);
568
- if (mr->umem)
569
- ib_umem_release(mr->umem);
562
+ ib_umem_release(mr->umem);
570563 kfree(mr);
571564 out:
572565 return ret;
....@@ -580,8 +573,7 @@
580573 *
581574 * Return: the memory region on success, otherwise return an errno.
582575 */
583
-struct ib_mr *rvt_alloc_mr(struct ib_pd *pd,
584
- enum ib_mr_type mr_type,
576
+struct ib_mr *rvt_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
585577 u32 max_num_sg)
586578 {
587579 struct rvt_mr *mr;
....@@ -617,8 +609,8 @@
617609 n = mapped_segs % RVT_SEGSZ;
618610 mr->mr.map[m]->segs[n].vaddr = (void *)addr;
619611 mr->mr.map[m]->segs[n].length = ps;
620
- trace_rvt_mr_page_seg(&mr->mr, m, n, (void *)addr, ps);
621612 mr->mr.length += ps;
613
+ trace_rvt_mr_page_seg(&mr->mr, m, n, (void *)addr, ps);
622614
623615 return 0;
624616 }
....@@ -647,6 +639,7 @@
647639 mr->mr.iova = ibmr->iova;
648640 mr->mr.offset = ibmr->iova - (u64)mr->mr.map[0]->segs[0].vaddr;
649641 mr->mr.length = (size_t)ibmr->length;
642
+ trace_rvt_map_mr_sg(ibmr, sg_nents, sg_offset);
650643 return ret;
651644 }
652645
....@@ -718,160 +711,6 @@
718711 EXPORT_SYMBOL(rvt_invalidate_rkey);
719712
720713 /**
721
- * rvt_alloc_fmr - allocate a fast memory region
722
- * @pd: the protection domain for this memory region
723
- * @mr_access_flags: access flags for this memory region
724
- * @fmr_attr: fast memory region attributes
725
- *
726
- * Return: the memory region on success, otherwise returns an errno.
727
- */
728
-struct ib_fmr *rvt_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
729
- struct ib_fmr_attr *fmr_attr)
730
-{
731
- struct rvt_fmr *fmr;
732
- int m;
733
- struct ib_fmr *ret;
734
- int rval = -ENOMEM;
735
-
736
- /* Allocate struct plus pointers to first level page tables. */
737
- m = (fmr_attr->max_pages + RVT_SEGSZ - 1) / RVT_SEGSZ;
738
- fmr = kzalloc(struct_size(fmr, mr.map, m), GFP_KERNEL);
739
- if (!fmr)
740
- goto bail;
741
-
742
- rval = rvt_init_mregion(&fmr->mr, pd, fmr_attr->max_pages,
743
- PERCPU_REF_INIT_ATOMIC);
744
- if (rval)
745
- goto bail;
746
-
747
- /*
748
- * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey &
749
- * rkey.
750
- */
751
- rval = rvt_alloc_lkey(&fmr->mr, 0);
752
- if (rval)
753
- goto bail_mregion;
754
- fmr->ibfmr.rkey = fmr->mr.lkey;
755
- fmr->ibfmr.lkey = fmr->mr.lkey;
756
- /*
757
- * Resources are allocated but no valid mapping (RKEY can't be
758
- * used).
759
- */
760
- fmr->mr.access_flags = mr_access_flags;
761
- fmr->mr.max_segs = fmr_attr->max_pages;
762
- fmr->mr.page_shift = fmr_attr->page_shift;
763
-
764
- ret = &fmr->ibfmr;
765
-done:
766
- return ret;
767
-
768
-bail_mregion:
769
- rvt_deinit_mregion(&fmr->mr);
770
-bail:
771
- kfree(fmr);
772
- ret = ERR_PTR(rval);
773
- goto done;
774
-}
775
-
776
-/**
777
- * rvt_map_phys_fmr - set up a fast memory region
778
- * @ibfmr: the fast memory region to set up
779
- * @page_list: the list of pages to associate with the fast memory region
780
- * @list_len: the number of pages to associate with the fast memory region
781
- * @iova: the virtual address of the start of the fast memory region
782
- *
783
- * This may be called from interrupt context.
784
- *
785
- * Return: 0 on success
786
- */
787
-
788
-int rvt_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
789
- int list_len, u64 iova)
790
-{
791
- struct rvt_fmr *fmr = to_ifmr(ibfmr);
792
- struct rvt_lkey_table *rkt;
793
- unsigned long flags;
794
- int m, n;
795
- unsigned long i;
796
- u32 ps;
797
- struct rvt_dev_info *rdi = ib_to_rvt(ibfmr->device);
798
-
799
- i = atomic_long_read(&fmr->mr.refcount.count);
800
- if (i > 2)
801
- return -EBUSY;
802
-
803
- if (list_len > fmr->mr.max_segs)
804
- return -EINVAL;
805
-
806
- rkt = &rdi->lkey_table;
807
- spin_lock_irqsave(&rkt->lock, flags);
808
- fmr->mr.user_base = iova;
809
- fmr->mr.iova = iova;
810
- ps = 1 << fmr->mr.page_shift;
811
- fmr->mr.length = list_len * ps;
812
- m = 0;
813
- n = 0;
814
- for (i = 0; i < list_len; i++) {
815
- fmr->mr.map[m]->segs[n].vaddr = (void *)page_list[i];
816
- fmr->mr.map[m]->segs[n].length = ps;
817
- trace_rvt_mr_fmr_seg(&fmr->mr, m, n, (void *)page_list[i], ps);
818
- if (++n == RVT_SEGSZ) {
819
- m++;
820
- n = 0;
821
- }
822
- }
823
- spin_unlock_irqrestore(&rkt->lock, flags);
824
- return 0;
825
-}
826
-
827
-/**
828
- * rvt_unmap_fmr - unmap fast memory regions
829
- * @fmr_list: the list of fast memory regions to unmap
830
- *
831
- * Return: 0 on success.
832
- */
833
-int rvt_unmap_fmr(struct list_head *fmr_list)
834
-{
835
- struct rvt_fmr *fmr;
836
- struct rvt_lkey_table *rkt;
837
- unsigned long flags;
838
- struct rvt_dev_info *rdi;
839
-
840
- list_for_each_entry(fmr, fmr_list, ibfmr.list) {
841
- rdi = ib_to_rvt(fmr->ibfmr.device);
842
- rkt = &rdi->lkey_table;
843
- spin_lock_irqsave(&rkt->lock, flags);
844
- fmr->mr.user_base = 0;
845
- fmr->mr.iova = 0;
846
- fmr->mr.length = 0;
847
- spin_unlock_irqrestore(&rkt->lock, flags);
848
- }
849
- return 0;
850
-}
851
-
852
-/**
853
- * rvt_dealloc_fmr - deallocate a fast memory region
854
- * @ibfmr: the fast memory region to deallocate
855
- *
856
- * Return: 0 on success.
857
- */
858
-int rvt_dealloc_fmr(struct ib_fmr *ibfmr)
859
-{
860
- struct rvt_fmr *fmr = to_ifmr(ibfmr);
861
- int ret = 0;
862
-
863
- rvt_free_lkey(&fmr->mr);
864
- rvt_put_mr(&fmr->mr); /* will set completion if last */
865
- ret = rvt_check_refs(&fmr->mr, __func__);
866
- if (ret)
867
- goto out;
868
- rvt_deinit_mregion(&fmr->mr);
869
- kfree(fmr);
870
-out:
871
- return ret;
872
-}
873
-
874
-/**
875714 * rvt_sge_adjacent - is isge compressible
876715 * @last_sge: last outgoing SGE written
877716 * @sge: SGE to check
....@@ -925,7 +764,7 @@
925764
926765 /*
927766 * We use LKEY == zero for kernel virtual addresses
928
- * (see rvt_get_dma_mr() and dma_virt_ops).
767
+ * (see rvt_get_dma_mr()).
929768 */
930769 if (sge->lkey == 0) {
931770 struct rvt_dev_info *dev = ib_to_rvt(pd->ibpd.device);
....@@ -1036,7 +875,7 @@
1036875
1037876 /*
1038877 * We use RKEY == zero for kernel virtual addresses
1039
- * (see rvt_get_dma_mr() and dma_virt_ops).
878
+ * (see rvt_get_dma_mr()).
1040879 */
1041880 rcu_read_lock();
1042881 if (rkey == 0) {