| .. | .. |
|---|
| 258 | 258 | int *num_of_mtts) |
|---|
| 259 | 259 | { |
|---|
| 260 | 260 | u64 block_shift = MLX4_MAX_MTT_SHIFT; |
|---|
| 261 | | - u64 min_shift = umem->page_shift; |
|---|
| 261 | + u64 min_shift = PAGE_SHIFT; |
|---|
| 262 | 262 | u64 last_block_aligned_end = 0; |
|---|
| 263 | 263 | u64 current_block_start = 0; |
|---|
| 264 | 264 | u64 first_block_start = 0; |
|---|
| .. | .. |
|---|
| 270 | 270 | u64 next_block_start; |
|---|
| 271 | 271 | u64 total_len = 0; |
|---|
| 272 | 272 | int i; |
|---|
| 273 | + |
|---|
| 274 | + *num_of_mtts = ib_umem_num_dma_blocks(umem, PAGE_SIZE); |
|---|
| 273 | 275 | |
|---|
| 274 | 276 | for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) { |
|---|
| 275 | 277 | /* |
|---|
| .. | .. |
|---|
| 295 | 297 | * in access to the wrong data. |
|---|
| 296 | 298 | */ |
|---|
| 297 | 299 | misalignment_bits = |
|---|
| 298 | | - (start_va & (~(((u64)(BIT(umem->page_shift))) - 1ULL))) |
|---|
| 299 | | - ^ current_block_start; |
|---|
| 300 | + (start_va & (~(((u64)(PAGE_SIZE)) - 1ULL))) ^ |
|---|
| 301 | + current_block_start; |
|---|
| 300 | 302 | block_shift = min(alignment_of(misalignment_bits), |
|---|
| 301 | 303 | block_shift); |
|---|
| 302 | 304 | } |
|---|
| .. | .. |
|---|
| 367 | 369 | return block_shift; |
|---|
| 368 | 370 | } |
|---|
| 369 | 371 | |
|---|
| 370 | | -static struct ib_umem *mlx4_get_umem_mr(struct ib_ucontext *context, u64 start, |
|---|
| 371 | | - u64 length, u64 virt_addr, |
|---|
| 372 | | - int access_flags) |
|---|
| 372 | +static struct ib_umem *mlx4_get_umem_mr(struct ib_device *device, u64 start, |
|---|
| 373 | + u64 length, int access_flags) |
|---|
| 373 | 374 | { |
|---|
| 374 | 375 | /* |
|---|
| 375 | 376 | * Force registering the memory as writable if the underlying pages |
|---|
| .. | .. |
|---|
| 378 | 379 | * again |
|---|
| 379 | 380 | */ |
|---|
| 380 | 381 | if (!ib_access_writable(access_flags)) { |
|---|
| 382 | + unsigned long untagged_start = untagged_addr(start); |
|---|
| 381 | 383 | struct vm_area_struct *vma; |
|---|
| 382 | 384 | |
|---|
| 383 | | - down_read(¤t->mm->mmap_sem); |
|---|
| 385 | + mmap_read_lock(current->mm); |
|---|
| 384 | 386 | /* |
|---|
| 385 | 387 | * FIXME: Ideally this would iterate over all the vmas that |
|---|
| 386 | 388 | * cover the memory, but for now it requires a single vma to |
|---|
| 387 | 389 | * entirely cover the MR to support RO mappings. |
|---|
| 388 | 390 | */ |
|---|
| 389 | | - vma = find_vma(current->mm, start); |
|---|
| 390 | | - if (vma && vma->vm_end >= start + length && |
|---|
| 391 | | - vma->vm_start <= start) { |
|---|
| 391 | + vma = find_vma(current->mm, untagged_start); |
|---|
| 392 | + if (vma && vma->vm_end >= untagged_start + length && |
|---|
| 393 | + vma->vm_start <= untagged_start) { |
|---|
| 392 | 394 | if (vma->vm_flags & VM_WRITE) |
|---|
| 393 | 395 | access_flags |= IB_ACCESS_LOCAL_WRITE; |
|---|
| 394 | 396 | } else { |
|---|
| 395 | 397 | access_flags |= IB_ACCESS_LOCAL_WRITE; |
|---|
| 396 | 398 | } |
|---|
| 397 | 399 | |
|---|
| 398 | | - up_read(¤t->mm->mmap_sem); |
|---|
| 400 | + mmap_read_unlock(current->mm); |
|---|
| 399 | 401 | } |
|---|
| 400 | 402 | |
|---|
| 401 | | - return ib_umem_get(context, start, length, access_flags, 0); |
|---|
| 403 | + return ib_umem_get(device, start, length, access_flags); |
|---|
| 402 | 404 | } |
|---|
| 403 | 405 | |
|---|
| 404 | 406 | struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, |
|---|
| .. | .. |
|---|
| 415 | 417 | if (!mr) |
|---|
| 416 | 418 | return ERR_PTR(-ENOMEM); |
|---|
| 417 | 419 | |
|---|
| 418 | | - mr->umem = mlx4_get_umem_mr(pd->uobject->context, start, length, |
|---|
| 419 | | - virt_addr, access_flags); |
|---|
| 420 | + mr->umem = mlx4_get_umem_mr(pd->device, start, length, access_flags); |
|---|
| 420 | 421 | if (IS_ERR(mr->umem)) { |
|---|
| 421 | 422 | err = PTR_ERR(mr->umem); |
|---|
| 422 | 423 | goto err_free; |
|---|
| 423 | 424 | } |
|---|
| 424 | 425 | |
|---|
| 425 | | - n = ib_umem_page_count(mr->umem); |
|---|
| 426 | 426 | shift = mlx4_ib_umem_calc_optimal_mtt_size(mr->umem, start, &n); |
|---|
| 427 | 427 | |
|---|
| 428 | 428 | err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, virt_addr, length, |
|---|
| .. | .. |
|---|
| 439 | 439 | goto err_mr; |
|---|
| 440 | 440 | |
|---|
| 441 | 441 | mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key; |
|---|
| 442 | | - mr->ibmr.length = length; |
|---|
| 443 | | - mr->ibmr.iova = virt_addr; |
|---|
| 444 | 442 | mr->ibmr.page_size = 1U << shift; |
|---|
| 445 | 443 | |
|---|
| 446 | 444 | return &mr->ibmr; |
|---|
| .. | .. |
|---|
| 505 | 503 | |
|---|
| 506 | 504 | mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr); |
|---|
| 507 | 505 | ib_umem_release(mmr->umem); |
|---|
| 508 | | - mmr->umem = |
|---|
| 509 | | - mlx4_get_umem_mr(mr->uobject->context, start, length, |
|---|
| 510 | | - virt_addr, mr_access_flags); |
|---|
| 506 | + mmr->umem = mlx4_get_umem_mr(mr->device, start, length, |
|---|
| 507 | + mr_access_flags); |
|---|
| 511 | 508 | if (IS_ERR(mmr->umem)) { |
|---|
| 512 | 509 | err = PTR_ERR(mmr->umem); |
|---|
| 513 | 510 | /* Prevent mlx4_ib_dereg_mr from free'ing invalid pointer */ |
|---|
| 514 | 511 | mmr->umem = NULL; |
|---|
| 515 | 512 | goto release_mpt_entry; |
|---|
| 516 | 513 | } |
|---|
| 517 | | - n = ib_umem_page_count(mmr->umem); |
|---|
| 518 | | - shift = mmr->umem->page_shift; |
|---|
| 514 | + n = ib_umem_num_dma_blocks(mmr->umem, PAGE_SIZE); |
|---|
| 515 | + shift = PAGE_SHIFT; |
|---|
| 519 | 516 | |
|---|
| 520 | 517 | err = mlx4_mr_rereg_mem_write(dev->dev, &mmr->mmr, |
|---|
| 521 | 518 | virt_addr, length, n, shift, |
|---|
| .. | .. |
|---|
| 596 | 593 | } |
|---|
| 597 | 594 | } |
|---|
| 598 | 595 | |
|---|
| 599 | | -int mlx4_ib_dereg_mr(struct ib_mr *ibmr) |
|---|
| 596 | +int mlx4_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) |
|---|
| 600 | 597 | { |
|---|
| 601 | 598 | struct mlx4_ib_mr *mr = to_mmr(ibmr); |
|---|
| 602 | 599 | int ret; |
|---|
| .. | .. |
|---|
| 613 | 610 | return 0; |
|---|
| 614 | 611 | } |
|---|
| 615 | 612 | |
|---|
| 616 | | -struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type, |
|---|
| 617 | | - struct ib_udata *udata) |
|---|
| 613 | +int mlx4_ib_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata) |
|---|
| 618 | 614 | { |
|---|
| 619 | | - struct mlx4_ib_dev *dev = to_mdev(pd->device); |
|---|
| 620 | | - struct mlx4_ib_mw *mw; |
|---|
| 615 | + struct mlx4_ib_dev *dev = to_mdev(ibmw->device); |
|---|
| 616 | + struct mlx4_ib_mw *mw = to_mmw(ibmw); |
|---|
| 621 | 617 | int err; |
|---|
| 622 | 618 | |
|---|
| 623 | | - mw = kmalloc(sizeof(*mw), GFP_KERNEL); |
|---|
| 624 | | - if (!mw) |
|---|
| 625 | | - return ERR_PTR(-ENOMEM); |
|---|
| 626 | | - |
|---|
| 627 | | - err = mlx4_mw_alloc(dev->dev, to_mpd(pd)->pdn, |
|---|
| 628 | | - to_mlx4_type(type), &mw->mmw); |
|---|
| 619 | + err = mlx4_mw_alloc(dev->dev, to_mpd(ibmw->pd)->pdn, |
|---|
| 620 | + to_mlx4_type(ibmw->type), &mw->mmw); |
|---|
| 629 | 621 | if (err) |
|---|
| 630 | | - goto err_free; |
|---|
| 622 | + return err; |
|---|
| 631 | 623 | |
|---|
| 632 | 624 | err = mlx4_mw_enable(dev->dev, &mw->mmw); |
|---|
| 633 | 625 | if (err) |
|---|
| 634 | 626 | goto err_mw; |
|---|
| 635 | 627 | |
|---|
| 636 | | - mw->ibmw.rkey = mw->mmw.key; |
|---|
| 637 | | - |
|---|
| 638 | | - return &mw->ibmw; |
|---|
| 628 | + ibmw->rkey = mw->mmw.key; |
|---|
| 629 | + return 0; |
|---|
| 639 | 630 | |
|---|
| 640 | 631 | err_mw: |
|---|
| 641 | 632 | mlx4_mw_free(dev->dev, &mw->mmw); |
|---|
| 642 | | - |
|---|
| 643 | | -err_free: |
|---|
| 644 | | - kfree(mw); |
|---|
| 645 | | - |
|---|
| 646 | | - return ERR_PTR(err); |
|---|
| 633 | + return err; |
|---|
| 647 | 634 | } |
|---|
| 648 | 635 | |
|---|
| 649 | 636 | int mlx4_ib_dealloc_mw(struct ib_mw *ibmw) |
|---|
| .. | .. |
|---|
| 651 | 638 | struct mlx4_ib_mw *mw = to_mmw(ibmw); |
|---|
| 652 | 639 | |
|---|
| 653 | 640 | mlx4_mw_free(to_mdev(ibmw->device)->dev, &mw->mmw); |
|---|
| 654 | | - kfree(mw); |
|---|
| 655 | | - |
|---|
| 656 | 641 | return 0; |
|---|
| 657 | 642 | } |
|---|
| 658 | 643 | |
|---|
| 659 | | -struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd, |
|---|
| 660 | | - enum ib_mr_type mr_type, |
|---|
| 644 | +struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, |
|---|
| 661 | 645 | u32 max_num_sg) |
|---|
| 662 | 646 | { |
|---|
| 663 | 647 | struct mlx4_ib_dev *dev = to_mdev(pd->device); |
|---|
| .. | .. |
|---|
| 699 | 683 | err_free: |
|---|
| 700 | 684 | kfree(mr); |
|---|
| 701 | 685 | return ERR_PTR(err); |
|---|
| 702 | | -} |
|---|
| 703 | | - |
|---|
| 704 | | -struct ib_fmr *mlx4_ib_fmr_alloc(struct ib_pd *pd, int acc, |
|---|
| 705 | | - struct ib_fmr_attr *fmr_attr) |
|---|
| 706 | | -{ |
|---|
| 707 | | - struct mlx4_ib_dev *dev = to_mdev(pd->device); |
|---|
| 708 | | - struct mlx4_ib_fmr *fmr; |
|---|
| 709 | | - int err = -ENOMEM; |
|---|
| 710 | | - |
|---|
| 711 | | - fmr = kmalloc(sizeof *fmr, GFP_KERNEL); |
|---|
| 712 | | - if (!fmr) |
|---|
| 713 | | - return ERR_PTR(-ENOMEM); |
|---|
| 714 | | - |
|---|
| 715 | | - err = mlx4_fmr_alloc(dev->dev, to_mpd(pd)->pdn, convert_access(acc), |
|---|
| 716 | | - fmr_attr->max_pages, fmr_attr->max_maps, |
|---|
| 717 | | - fmr_attr->page_shift, &fmr->mfmr); |
|---|
| 718 | | - if (err) |
|---|
| 719 | | - goto err_free; |
|---|
| 720 | | - |
|---|
| 721 | | - err = mlx4_fmr_enable(to_mdev(pd->device)->dev, &fmr->mfmr); |
|---|
| 722 | | - if (err) |
|---|
| 723 | | - goto err_mr; |
|---|
| 724 | | - |
|---|
| 725 | | - fmr->ibfmr.rkey = fmr->ibfmr.lkey = fmr->mfmr.mr.key; |
|---|
| 726 | | - |
|---|
| 727 | | - return &fmr->ibfmr; |
|---|
| 728 | | - |
|---|
| 729 | | -err_mr: |
|---|
| 730 | | - (void) mlx4_mr_free(to_mdev(pd->device)->dev, &fmr->mfmr.mr); |
|---|
| 731 | | - |
|---|
| 732 | | -err_free: |
|---|
| 733 | | - kfree(fmr); |
|---|
| 734 | | - |
|---|
| 735 | | - return ERR_PTR(err); |
|---|
| 736 | | -} |
|---|
| 737 | | - |
|---|
| 738 | | -int mlx4_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, |
|---|
| 739 | | - int npages, u64 iova) |
|---|
| 740 | | -{ |
|---|
| 741 | | - struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr); |
|---|
| 742 | | - struct mlx4_ib_dev *dev = to_mdev(ifmr->ibfmr.device); |
|---|
| 743 | | - |
|---|
| 744 | | - return mlx4_map_phys_fmr(dev->dev, &ifmr->mfmr, page_list, npages, iova, |
|---|
| 745 | | - &ifmr->ibfmr.lkey, &ifmr->ibfmr.rkey); |
|---|
| 746 | | -} |
|---|
| 747 | | - |
|---|
| 748 | | -int mlx4_ib_unmap_fmr(struct list_head *fmr_list) |
|---|
| 749 | | -{ |
|---|
| 750 | | - struct ib_fmr *ibfmr; |
|---|
| 751 | | - int err; |
|---|
| 752 | | - struct mlx4_dev *mdev = NULL; |
|---|
| 753 | | - |
|---|
| 754 | | - list_for_each_entry(ibfmr, fmr_list, list) { |
|---|
| 755 | | - if (mdev && to_mdev(ibfmr->device)->dev != mdev) |
|---|
| 756 | | - return -EINVAL; |
|---|
| 757 | | - mdev = to_mdev(ibfmr->device)->dev; |
|---|
| 758 | | - } |
|---|
| 759 | | - |
|---|
| 760 | | - if (!mdev) |
|---|
| 761 | | - return 0; |
|---|
| 762 | | - |
|---|
| 763 | | - list_for_each_entry(ibfmr, fmr_list, list) { |
|---|
| 764 | | - struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr); |
|---|
| 765 | | - |
|---|
| 766 | | - mlx4_fmr_unmap(mdev, &ifmr->mfmr, &ifmr->ibfmr.lkey, &ifmr->ibfmr.rkey); |
|---|
| 767 | | - } |
|---|
| 768 | | - |
|---|
| 769 | | - /* |
|---|
| 770 | | - * Make sure all MPT status updates are visible before issuing |
|---|
| 771 | | - * SYNC_TPT firmware command. |
|---|
| 772 | | - */ |
|---|
| 773 | | - wmb(); |
|---|
| 774 | | - |
|---|
| 775 | | - err = mlx4_SYNC_TPT(mdev); |
|---|
| 776 | | - if (err) |
|---|
| 777 | | - pr_warn("SYNC_TPT error %d when " |
|---|
| 778 | | - "unmapping FMRs\n", err); |
|---|
| 779 | | - |
|---|
| 780 | | - return 0; |
|---|
| 781 | | -} |
|---|
| 782 | | - |
|---|
| 783 | | -int mlx4_ib_fmr_dealloc(struct ib_fmr *ibfmr) |
|---|
| 784 | | -{ |
|---|
| 785 | | - struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr); |
|---|
| 786 | | - struct mlx4_ib_dev *dev = to_mdev(ibfmr->device); |
|---|
| 787 | | - int err; |
|---|
| 788 | | - |
|---|
| 789 | | - err = mlx4_fmr_free(dev->dev, &ifmr->mfmr); |
|---|
| 790 | | - |
|---|
| 791 | | - if (!err) |
|---|
| 792 | | - kfree(ifmr); |
|---|
| 793 | | - |
|---|
| 794 | | - return err; |
|---|
| 795 | 686 | } |
|---|
| 796 | 687 | |
|---|
| 797 | 688 | static int mlx4_set_page(struct ib_mr *ibmr, u64 addr) |
|---|