forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-13 9d77db3c730780c8ef5ccd4b66403ff5675cfe4e
kernel/drivers/infiniband/hw/mlx4/mr.c
....@@ -258,7 +258,7 @@
258258 int *num_of_mtts)
259259 {
260260 u64 block_shift = MLX4_MAX_MTT_SHIFT;
261
- u64 min_shift = umem->page_shift;
261
+ u64 min_shift = PAGE_SHIFT;
262262 u64 last_block_aligned_end = 0;
263263 u64 current_block_start = 0;
264264 u64 first_block_start = 0;
....@@ -270,6 +270,8 @@
270270 u64 next_block_start;
271271 u64 total_len = 0;
272272 int i;
273
+
274
+ *num_of_mtts = ib_umem_num_dma_blocks(umem, PAGE_SIZE);
273275
274276 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) {
275277 /*
....@@ -295,8 +297,8 @@
295297 * in access to the wrong data.
296298 */
297299 misalignment_bits =
298
- (start_va & (~(((u64)(BIT(umem->page_shift))) - 1ULL)))
299
- ^ current_block_start;
300
+ (start_va & (~(((u64)(PAGE_SIZE)) - 1ULL))) ^
301
+ current_block_start;
300302 block_shift = min(alignment_of(misalignment_bits),
301303 block_shift);
302304 }
....@@ -367,9 +369,8 @@
367369 return block_shift;
368370 }
369371
370
-static struct ib_umem *mlx4_get_umem_mr(struct ib_ucontext *context, u64 start,
371
- u64 length, u64 virt_addr,
372
- int access_flags)
372
+static struct ib_umem *mlx4_get_umem_mr(struct ib_device *device, u64 start,
373
+ u64 length, int access_flags)
373374 {
374375 /*
375376 * Force registering the memory as writable if the underlying pages
....@@ -378,27 +379,28 @@
378379 * again
379380 */
380381 if (!ib_access_writable(access_flags)) {
382
+ unsigned long untagged_start = untagged_addr(start);
381383 struct vm_area_struct *vma;
382384
383
- down_read(&current->mm->mmap_sem);
385
+ mmap_read_lock(current->mm);
384386 /*
385387 * FIXME: Ideally this would iterate over all the vmas that
386388 * cover the memory, but for now it requires a single vma to
387389 * entirely cover the MR to support RO mappings.
388390 */
389
- vma = find_vma(current->mm, start);
390
- if (vma && vma->vm_end >= start + length &&
391
- vma->vm_start <= start) {
391
+ vma = find_vma(current->mm, untagged_start);
392
+ if (vma && vma->vm_end >= untagged_start + length &&
393
+ vma->vm_start <= untagged_start) {
392394 if (vma->vm_flags & VM_WRITE)
393395 access_flags |= IB_ACCESS_LOCAL_WRITE;
394396 } else {
395397 access_flags |= IB_ACCESS_LOCAL_WRITE;
396398 }
397399
398
- up_read(&current->mm->mmap_sem);
400
+ mmap_read_unlock(current->mm);
399401 }
400402
401
- return ib_umem_get(context, start, length, access_flags, 0);
403
+ return ib_umem_get(device, start, length, access_flags);
402404 }
403405
404406 struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
....@@ -415,14 +417,12 @@
415417 if (!mr)
416418 return ERR_PTR(-ENOMEM);
417419
418
- mr->umem = mlx4_get_umem_mr(pd->uobject->context, start, length,
419
- virt_addr, access_flags);
420
+ mr->umem = mlx4_get_umem_mr(pd->device, start, length, access_flags);
420421 if (IS_ERR(mr->umem)) {
421422 err = PTR_ERR(mr->umem);
422423 goto err_free;
423424 }
424425
425
- n = ib_umem_page_count(mr->umem);
426426 shift = mlx4_ib_umem_calc_optimal_mtt_size(mr->umem, start, &n);
427427
428428 err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, virt_addr, length,
....@@ -439,8 +439,6 @@
439439 goto err_mr;
440440
441441 mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
442
- mr->ibmr.length = length;
443
- mr->ibmr.iova = virt_addr;
444442 mr->ibmr.page_size = 1U << shift;
445443
446444 return &mr->ibmr;
....@@ -505,17 +503,16 @@
505503
506504 mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr);
507505 ib_umem_release(mmr->umem);
508
- mmr->umem =
509
- mlx4_get_umem_mr(mr->uobject->context, start, length,
510
- virt_addr, mr_access_flags);
506
+ mmr->umem = mlx4_get_umem_mr(mr->device, start, length,
507
+ mr_access_flags);
511508 if (IS_ERR(mmr->umem)) {
512509 err = PTR_ERR(mmr->umem);
513510 /* Prevent mlx4_ib_dereg_mr from free'ing invalid pointer */
514511 mmr->umem = NULL;
515512 goto release_mpt_entry;
516513 }
517
- n = ib_umem_page_count(mmr->umem);
518
- shift = mmr->umem->page_shift;
514
+ n = ib_umem_num_dma_blocks(mmr->umem, PAGE_SIZE);
515
+ shift = PAGE_SHIFT;
519516
520517 err = mlx4_mr_rereg_mem_write(dev->dev, &mmr->mmr,
521518 virt_addr, length, n, shift,
....@@ -596,7 +593,7 @@
596593 }
597594 }
598595
599
-int mlx4_ib_dereg_mr(struct ib_mr *ibmr)
596
+int mlx4_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
600597 {
601598 struct mlx4_ib_mr *mr = to_mmr(ibmr);
602599 int ret;
....@@ -613,37 +610,27 @@
613610 return 0;
614611 }
615612
616
-struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
617
- struct ib_udata *udata)
613
+int mlx4_ib_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
618614 {
619
- struct mlx4_ib_dev *dev = to_mdev(pd->device);
620
- struct mlx4_ib_mw *mw;
615
+ struct mlx4_ib_dev *dev = to_mdev(ibmw->device);
616
+ struct mlx4_ib_mw *mw = to_mmw(ibmw);
621617 int err;
622618
623
- mw = kmalloc(sizeof(*mw), GFP_KERNEL);
624
- if (!mw)
625
- return ERR_PTR(-ENOMEM);
626
-
627
- err = mlx4_mw_alloc(dev->dev, to_mpd(pd)->pdn,
628
- to_mlx4_type(type), &mw->mmw);
619
+ err = mlx4_mw_alloc(dev->dev, to_mpd(ibmw->pd)->pdn,
620
+ to_mlx4_type(ibmw->type), &mw->mmw);
629621 if (err)
630
- goto err_free;
622
+ return err;
631623
632624 err = mlx4_mw_enable(dev->dev, &mw->mmw);
633625 if (err)
634626 goto err_mw;
635627
636
- mw->ibmw.rkey = mw->mmw.key;
637
-
638
- return &mw->ibmw;
628
+ ibmw->rkey = mw->mmw.key;
629
+ return 0;
639630
640631 err_mw:
641632 mlx4_mw_free(dev->dev, &mw->mmw);
642
-
643
-err_free:
644
- kfree(mw);
645
-
646
- return ERR_PTR(err);
633
+ return err;
647634 }
648635
649636 int mlx4_ib_dealloc_mw(struct ib_mw *ibmw)
....@@ -651,13 +638,10 @@
651638 struct mlx4_ib_mw *mw = to_mmw(ibmw);
652639
653640 mlx4_mw_free(to_mdev(ibmw->device)->dev, &mw->mmw);
654
- kfree(mw);
655
-
656641 return 0;
657642 }
658643
659
-struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd,
660
- enum ib_mr_type mr_type,
644
+struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
661645 u32 max_num_sg)
662646 {
663647 struct mlx4_ib_dev *dev = to_mdev(pd->device);
....@@ -699,99 +683,6 @@
699683 err_free:
700684 kfree(mr);
701685 return ERR_PTR(err);
702
-}
703
-
704
-struct ib_fmr *mlx4_ib_fmr_alloc(struct ib_pd *pd, int acc,
705
- struct ib_fmr_attr *fmr_attr)
706
-{
707
- struct mlx4_ib_dev *dev = to_mdev(pd->device);
708
- struct mlx4_ib_fmr *fmr;
709
- int err = -ENOMEM;
710
-
711
- fmr = kmalloc(sizeof *fmr, GFP_KERNEL);
712
- if (!fmr)
713
- return ERR_PTR(-ENOMEM);
714
-
715
- err = mlx4_fmr_alloc(dev->dev, to_mpd(pd)->pdn, convert_access(acc),
716
- fmr_attr->max_pages, fmr_attr->max_maps,
717
- fmr_attr->page_shift, &fmr->mfmr);
718
- if (err)
719
- goto err_free;
720
-
721
- err = mlx4_fmr_enable(to_mdev(pd->device)->dev, &fmr->mfmr);
722
- if (err)
723
- goto err_mr;
724
-
725
- fmr->ibfmr.rkey = fmr->ibfmr.lkey = fmr->mfmr.mr.key;
726
-
727
- return &fmr->ibfmr;
728
-
729
-err_mr:
730
- (void) mlx4_mr_free(to_mdev(pd->device)->dev, &fmr->mfmr.mr);
731
-
732
-err_free:
733
- kfree(fmr);
734
-
735
- return ERR_PTR(err);
736
-}
737
-
738
-int mlx4_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
739
- int npages, u64 iova)
740
-{
741
- struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr);
742
- struct mlx4_ib_dev *dev = to_mdev(ifmr->ibfmr.device);
743
-
744
- return mlx4_map_phys_fmr(dev->dev, &ifmr->mfmr, page_list, npages, iova,
745
- &ifmr->ibfmr.lkey, &ifmr->ibfmr.rkey);
746
-}
747
-
748
-int mlx4_ib_unmap_fmr(struct list_head *fmr_list)
749
-{
750
- struct ib_fmr *ibfmr;
751
- int err;
752
- struct mlx4_dev *mdev = NULL;
753
-
754
- list_for_each_entry(ibfmr, fmr_list, list) {
755
- if (mdev && to_mdev(ibfmr->device)->dev != mdev)
756
- return -EINVAL;
757
- mdev = to_mdev(ibfmr->device)->dev;
758
- }
759
-
760
- if (!mdev)
761
- return 0;
762
-
763
- list_for_each_entry(ibfmr, fmr_list, list) {
764
- struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr);
765
-
766
- mlx4_fmr_unmap(mdev, &ifmr->mfmr, &ifmr->ibfmr.lkey, &ifmr->ibfmr.rkey);
767
- }
768
-
769
- /*
770
- * Make sure all MPT status updates are visible before issuing
771
- * SYNC_TPT firmware command.
772
- */
773
- wmb();
774
-
775
- err = mlx4_SYNC_TPT(mdev);
776
- if (err)
777
- pr_warn("SYNC_TPT error %d when "
778
- "unmapping FMRs\n", err);
779
-
780
- return 0;
781
-}
782
-
783
-int mlx4_ib_fmr_dealloc(struct ib_fmr *ibfmr)
784
-{
785
- struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr);
786
- struct mlx4_ib_dev *dev = to_mdev(ibfmr->device);
787
- int err;
788
-
789
- err = mlx4_fmr_free(dev->dev, &ifmr->mfmr);
790
-
791
- if (!err)
792
- kfree(ifmr);
793
-
794
- return err;
795686 }
796687
797688 static int mlx4_set_page(struct ib_mr *ibmr, u64 addr)