.. | .. |
---|
97 | 97 | RCU_INIT_POINTER(rdi->lkey_table.table[i], NULL); |
---|
98 | 98 | |
---|
99 | 99 | rdi->dparms.props.max_mr = rdi->lkey_table.max; |
---|
100 | | - rdi->dparms.props.max_fmr = rdi->lkey_table.max; |
---|
101 | 100 | return 0; |
---|
102 | 101 | } |
---|
103 | 102 | |
---|
.. | .. |
---|
325 | 324 | * @acc: access flags |
---|
326 | 325 | * |
---|
327 | 326 | * Return: the memory region on success, otherwise returns an errno. |
---|
328 | | - * Note that all DMA addresses should be created via the functions in |
---|
329 | | - * struct dma_virt_ops. |
---|
330 | 327 | */ |
---|
331 | 328 | struct ib_mr *rvt_get_dma_mr(struct ib_pd *pd, int acc) |
---|
332 | 329 | { |
---|
.. | .. |
---|
383 | 380 | { |
---|
384 | 381 | struct rvt_mr *mr; |
---|
385 | 382 | struct ib_umem *umem; |
---|
386 | | - struct scatterlist *sg; |
---|
387 | | - int n, m, entry; |
---|
| 383 | + struct sg_page_iter sg_iter; |
---|
| 384 | + int n, m; |
---|
388 | 385 | struct ib_mr *ret; |
---|
389 | 386 | |
---|
390 | 387 | if (length == 0) |
---|
391 | 388 | return ERR_PTR(-EINVAL); |
---|
392 | 389 | |
---|
393 | | - umem = ib_umem_get(pd->uobject->context, start, length, |
---|
394 | | - mr_access_flags, 0); |
---|
| 390 | + umem = ib_umem_get(pd->device, start, length, mr_access_flags); |
---|
395 | 391 | if (IS_ERR(umem)) |
---|
396 | 392 | return (void *)umem; |
---|
397 | 393 | |
---|
398 | | - n = umem->nmap; |
---|
| 394 | + n = ib_umem_num_pages(umem); |
---|
399 | 395 | |
---|
400 | 396 | mr = __rvt_alloc_mr(n, pd); |
---|
401 | 397 | if (IS_ERR(mr)) { |
---|
.. | .. |
---|
410 | 406 | mr->mr.access_flags = mr_access_flags; |
---|
411 | 407 | mr->umem = umem; |
---|
412 | 408 | |
---|
413 | | - mr->mr.page_shift = umem->page_shift; |
---|
| 409 | + mr->mr.page_shift = PAGE_SHIFT; |
---|
414 | 410 | m = 0; |
---|
415 | 411 | n = 0; |
---|
416 | | - for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { |
---|
| 412 | + for_each_sg_page (umem->sg_head.sgl, &sg_iter, umem->nmap, 0) { |
---|
417 | 413 | void *vaddr; |
---|
418 | 414 | |
---|
419 | | - vaddr = page_address(sg_page(sg)); |
---|
| 415 | + vaddr = page_address(sg_page_iter_page(&sg_iter)); |
---|
420 | 416 | if (!vaddr) { |
---|
421 | 417 | ret = ERR_PTR(-EINVAL); |
---|
422 | 418 | goto bail_inval; |
---|
423 | 419 | } |
---|
424 | 420 | mr->mr.map[m]->segs[n].vaddr = vaddr; |
---|
425 | | - mr->mr.map[m]->segs[n].length = BIT(umem->page_shift); |
---|
426 | | - trace_rvt_mr_user_seg(&mr->mr, m, n, vaddr, |
---|
427 | | - BIT(umem->page_shift)); |
---|
428 | | - n++; |
---|
429 | | - if (n == RVT_SEGSZ) { |
---|
| 421 | + mr->mr.map[m]->segs[n].length = PAGE_SIZE; |
---|
| 422 | + trace_rvt_mr_user_seg(&mr->mr, m, n, vaddr, PAGE_SIZE); |
---|
| 423 | + if (++n == RVT_SEGSZ) { |
---|
430 | 424 | m++; |
---|
431 | 425 | n = 0; |
---|
432 | 426 | } |
---|
.. | .. |
---|
503 | 497 | rvt_pr_err(rdi, |
---|
504 | 498 | "%s timeout mr %p pd %p lkey %x refcount %ld\n", |
---|
505 | 499 | t, mr, mr->pd, mr->lkey, |
---|
506 | | - atomic_long_read(&mr->refcount.count)); |
---|
| 500 | + atomic_long_read(&mr->refcount.data->count)); |
---|
507 | 501 | rvt_get_mr(mr); |
---|
508 | 502 | return -EBUSY; |
---|
509 | 503 | } |
---|
.. | .. |
---|
553 | 547 | * |
---|
554 | 548 | * Returns 0 on success. |
---|
555 | 549 | */ |
---|
556 | | -int rvt_dereg_mr(struct ib_mr *ibmr) |
---|
| 550 | +int rvt_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) |
---|
557 | 551 | { |
---|
558 | 552 | struct rvt_mr *mr = to_imr(ibmr); |
---|
559 | 553 | int ret; |
---|
.. | .. |
---|
565 | 559 | if (ret) |
---|
566 | 560 | goto out; |
---|
567 | 561 | rvt_deinit_mregion(&mr->mr); |
---|
568 | | - if (mr->umem) |
---|
569 | | - ib_umem_release(mr->umem); |
---|
| 562 | + ib_umem_release(mr->umem); |
---|
570 | 563 | kfree(mr); |
---|
571 | 564 | out: |
---|
572 | 565 | return ret; |
---|
.. | .. |
---|
580 | 573 | * |
---|
581 | 574 | * Return: the memory region on success, otherwise return an errno. |
---|
582 | 575 | */ |
---|
583 | | -struct ib_mr *rvt_alloc_mr(struct ib_pd *pd, |
---|
584 | | - enum ib_mr_type mr_type, |
---|
| 576 | +struct ib_mr *rvt_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, |
---|
585 | 577 | u32 max_num_sg) |
---|
586 | 578 | { |
---|
587 | 579 | struct rvt_mr *mr; |
---|
.. | .. |
---|
617 | 609 | n = mapped_segs % RVT_SEGSZ; |
---|
618 | 610 | mr->mr.map[m]->segs[n].vaddr = (void *)addr; |
---|
619 | 611 | mr->mr.map[m]->segs[n].length = ps; |
---|
620 | | - trace_rvt_mr_page_seg(&mr->mr, m, n, (void *)addr, ps); |
---|
621 | 612 | mr->mr.length += ps; |
---|
| 613 | + trace_rvt_mr_page_seg(&mr->mr, m, n, (void *)addr, ps); |
---|
622 | 614 | |
---|
623 | 615 | return 0; |
---|
624 | 616 | } |
---|
.. | .. |
---|
647 | 639 | mr->mr.iova = ibmr->iova; |
---|
648 | 640 | mr->mr.offset = ibmr->iova - (u64)mr->mr.map[0]->segs[0].vaddr; |
---|
649 | 641 | mr->mr.length = (size_t)ibmr->length; |
---|
| 642 | + trace_rvt_map_mr_sg(ibmr, sg_nents, sg_offset); |
---|
650 | 643 | return ret; |
---|
651 | 644 | } |
---|
652 | 645 | |
---|
.. | .. |
---|
718 | 711 | EXPORT_SYMBOL(rvt_invalidate_rkey); |
---|
719 | 712 | |
---|
720 | 713 | /** |
---|
721 | | - * rvt_alloc_fmr - allocate a fast memory region |
---|
722 | | - * @pd: the protection domain for this memory region |
---|
723 | | - * @mr_access_flags: access flags for this memory region |
---|
724 | | - * @fmr_attr: fast memory region attributes |
---|
725 | | - * |
---|
726 | | - * Return: the memory region on success, otherwise returns an errno. |
---|
727 | | - */ |
---|
728 | | -struct ib_fmr *rvt_alloc_fmr(struct ib_pd *pd, int mr_access_flags, |
---|
729 | | - struct ib_fmr_attr *fmr_attr) |
---|
730 | | -{ |
---|
731 | | - struct rvt_fmr *fmr; |
---|
732 | | - int m; |
---|
733 | | - struct ib_fmr *ret; |
---|
734 | | - int rval = -ENOMEM; |
---|
735 | | - |
---|
736 | | - /* Allocate struct plus pointers to first level page tables. */ |
---|
737 | | - m = (fmr_attr->max_pages + RVT_SEGSZ - 1) / RVT_SEGSZ; |
---|
738 | | - fmr = kzalloc(struct_size(fmr, mr.map, m), GFP_KERNEL); |
---|
739 | | - if (!fmr) |
---|
740 | | - goto bail; |
---|
741 | | - |
---|
742 | | - rval = rvt_init_mregion(&fmr->mr, pd, fmr_attr->max_pages, |
---|
743 | | - PERCPU_REF_INIT_ATOMIC); |
---|
744 | | - if (rval) |
---|
745 | | - goto bail; |
---|
746 | | - |
---|
747 | | - /* |
---|
748 | | - * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey & |
---|
749 | | - * rkey. |
---|
750 | | - */ |
---|
751 | | - rval = rvt_alloc_lkey(&fmr->mr, 0); |
---|
752 | | - if (rval) |
---|
753 | | - goto bail_mregion; |
---|
754 | | - fmr->ibfmr.rkey = fmr->mr.lkey; |
---|
755 | | - fmr->ibfmr.lkey = fmr->mr.lkey; |
---|
756 | | - /* |
---|
757 | | - * Resources are allocated but no valid mapping (RKEY can't be |
---|
758 | | - * used). |
---|
759 | | - */ |
---|
760 | | - fmr->mr.access_flags = mr_access_flags; |
---|
761 | | - fmr->mr.max_segs = fmr_attr->max_pages; |
---|
762 | | - fmr->mr.page_shift = fmr_attr->page_shift; |
---|
763 | | - |
---|
764 | | - ret = &fmr->ibfmr; |
---|
765 | | -done: |
---|
766 | | - return ret; |
---|
767 | | - |
---|
768 | | -bail_mregion: |
---|
769 | | - rvt_deinit_mregion(&fmr->mr); |
---|
770 | | -bail: |
---|
771 | | - kfree(fmr); |
---|
772 | | - ret = ERR_PTR(rval); |
---|
773 | | - goto done; |
---|
774 | | -} |
---|
775 | | - |
---|
776 | | -/** |
---|
777 | | - * rvt_map_phys_fmr - set up a fast memory region |
---|
778 | | - * @ibfmr: the fast memory region to set up |
---|
779 | | - * @page_list: the list of pages to associate with the fast memory region |
---|
780 | | - * @list_len: the number of pages to associate with the fast memory region |
---|
781 | | - * @iova: the virtual address of the start of the fast memory region |
---|
782 | | - * |
---|
783 | | - * This may be called from interrupt context. |
---|
784 | | - * |
---|
785 | | - * Return: 0 on success |
---|
786 | | - */ |
---|
787 | | - |
---|
788 | | -int rvt_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, |
---|
789 | | - int list_len, u64 iova) |
---|
790 | | -{ |
---|
791 | | - struct rvt_fmr *fmr = to_ifmr(ibfmr); |
---|
792 | | - struct rvt_lkey_table *rkt; |
---|
793 | | - unsigned long flags; |
---|
794 | | - int m, n; |
---|
795 | | - unsigned long i; |
---|
796 | | - u32 ps; |
---|
797 | | - struct rvt_dev_info *rdi = ib_to_rvt(ibfmr->device); |
---|
798 | | - |
---|
799 | | - i = atomic_long_read(&fmr->mr.refcount.count); |
---|
800 | | - if (i > 2) |
---|
801 | | - return -EBUSY; |
---|
802 | | - |
---|
803 | | - if (list_len > fmr->mr.max_segs) |
---|
804 | | - return -EINVAL; |
---|
805 | | - |
---|
806 | | - rkt = &rdi->lkey_table; |
---|
807 | | - spin_lock_irqsave(&rkt->lock, flags); |
---|
808 | | - fmr->mr.user_base = iova; |
---|
809 | | - fmr->mr.iova = iova; |
---|
810 | | - ps = 1 << fmr->mr.page_shift; |
---|
811 | | - fmr->mr.length = list_len * ps; |
---|
812 | | - m = 0; |
---|
813 | | - n = 0; |
---|
814 | | - for (i = 0; i < list_len; i++) { |
---|
815 | | - fmr->mr.map[m]->segs[n].vaddr = (void *)page_list[i]; |
---|
816 | | - fmr->mr.map[m]->segs[n].length = ps; |
---|
817 | | - trace_rvt_mr_fmr_seg(&fmr->mr, m, n, (void *)page_list[i], ps); |
---|
818 | | - if (++n == RVT_SEGSZ) { |
---|
819 | | - m++; |
---|
820 | | - n = 0; |
---|
821 | | - } |
---|
822 | | - } |
---|
823 | | - spin_unlock_irqrestore(&rkt->lock, flags); |
---|
824 | | - return 0; |
---|
825 | | -} |
---|
826 | | - |
---|
827 | | -/** |
---|
828 | | - * rvt_unmap_fmr - unmap fast memory regions |
---|
829 | | - * @fmr_list: the list of fast memory regions to unmap |
---|
830 | | - * |
---|
831 | | - * Return: 0 on success. |
---|
832 | | - */ |
---|
833 | | -int rvt_unmap_fmr(struct list_head *fmr_list) |
---|
834 | | -{ |
---|
835 | | - struct rvt_fmr *fmr; |
---|
836 | | - struct rvt_lkey_table *rkt; |
---|
837 | | - unsigned long flags; |
---|
838 | | - struct rvt_dev_info *rdi; |
---|
839 | | - |
---|
840 | | - list_for_each_entry(fmr, fmr_list, ibfmr.list) { |
---|
841 | | - rdi = ib_to_rvt(fmr->ibfmr.device); |
---|
842 | | - rkt = &rdi->lkey_table; |
---|
843 | | - spin_lock_irqsave(&rkt->lock, flags); |
---|
844 | | - fmr->mr.user_base = 0; |
---|
845 | | - fmr->mr.iova = 0; |
---|
846 | | - fmr->mr.length = 0; |
---|
847 | | - spin_unlock_irqrestore(&rkt->lock, flags); |
---|
848 | | - } |
---|
849 | | - return 0; |
---|
850 | | -} |
---|
851 | | - |
---|
852 | | -/** |
---|
853 | | - * rvt_dealloc_fmr - deallocate a fast memory region |
---|
854 | | - * @ibfmr: the fast memory region to deallocate |
---|
855 | | - * |
---|
856 | | - * Return: 0 on success. |
---|
857 | | - */ |
---|
858 | | -int rvt_dealloc_fmr(struct ib_fmr *ibfmr) |
---|
859 | | -{ |
---|
860 | | - struct rvt_fmr *fmr = to_ifmr(ibfmr); |
---|
861 | | - int ret = 0; |
---|
862 | | - |
---|
863 | | - rvt_free_lkey(&fmr->mr); |
---|
864 | | - rvt_put_mr(&fmr->mr); /* will set completion if last */ |
---|
865 | | - ret = rvt_check_refs(&fmr->mr, __func__); |
---|
866 | | - if (ret) |
---|
867 | | - goto out; |
---|
868 | | - rvt_deinit_mregion(&fmr->mr); |
---|
869 | | - kfree(fmr); |
---|
870 | | -out: |
---|
871 | | - return ret; |
---|
872 | | -} |
---|
873 | | - |
---|
874 | | -/** |
---|
875 | 714 | * rvt_sge_adjacent - is isge compressible |
---|
876 | 715 | * @last_sge: last outgoing SGE written |
---|
877 | 716 | * @sge: SGE to check |
---|
.. | .. |
---|
925 | 764 | |
---|
926 | 765 | /* |
---|
927 | 766 | * We use LKEY == zero for kernel virtual addresses |
---|
928 | | - * (see rvt_get_dma_mr() and dma_virt_ops). |
---|
| 767 | + * (see rvt_get_dma_mr()). |
---|
929 | 768 | */ |
---|
930 | 769 | if (sge->lkey == 0) { |
---|
931 | 770 | struct rvt_dev_info *dev = ib_to_rvt(pd->ibpd.device); |
---|
.. | .. |
---|
1036 | 875 | |
---|
1037 | 876 | /* |
---|
1038 | 877 | * We use RKEY == zero for kernel virtual addresses |
---|
1039 | | - * (see rvt_get_dma_mr() and dma_virt_ops). |
---|
| 878 | + * (see rvt_get_dma_mr()). |
---|
1040 | 879 | */ |
---|
1041 | 880 | rcu_read_lock(); |
---|
1042 | 881 | if (rkey == 0) { |
---|