.. | .. |
---|
58 | 58 | * Shrink (or completely remove) all CPU mappings which reference the shrunk |
---|
59 | 59 | * part of the allocation. |
---|
60 | 60 | * |
---|
61 | | - * Note: Caller must be holding the processes mmap_sem lock. |
---|
| 61 | + * Note: Caller must be holding the processes mmap_lock lock. |
---|
62 | 62 | */ |
---|
63 | 63 | static void kbase_mem_shrink_cpu_mapping(struct kbase_context *kctx, |
---|
64 | 64 | struct kbase_va_region *reg, |
---|
.. | .. |
---|
611 | 611 | real_flags |= KBASE_REG_SHARE_IN; |
---|
612 | 612 | |
---|
613 | 613 | /* now we can lock down the context, and find the region */ |
---|
614 | | - down_write(¤t->mm->mmap_sem); |
---|
| 614 | + down_write(¤t->mm->mmap_lock); |
---|
615 | 615 | kbase_gpu_vm_lock(kctx); |
---|
616 | 616 | |
---|
617 | 617 | /* Validate the region */ |
---|
.. | .. |
---|
683 | 683 | |
---|
684 | 684 | out_unlock: |
---|
685 | 685 | kbase_gpu_vm_unlock(kctx); |
---|
686 | | - up_write(¤t->mm->mmap_sem); |
---|
| 686 | + up_write(¤t->mm->mmap_lock); |
---|
687 | 687 | out: |
---|
688 | 688 | return ret; |
---|
689 | 689 | } |
---|
.. | .. |
---|
1019 | 1019 | *flags |= KBASE_MEM_IMPORT_HAVE_PAGES; |
---|
1020 | 1020 | } |
---|
1021 | 1021 | |
---|
1022 | | - down_read(¤t->mm->mmap_sem); |
---|
| 1022 | + down_read(¤t->mm->mmap_lock); |
---|
1023 | 1023 | |
---|
1024 | 1024 | #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) |
---|
1025 | 1025 | faulted_pages = get_user_pages(current, current->mm, address, *va_pages, |
---|
.. | .. |
---|
1033 | 1033 | pages, NULL); |
---|
1034 | 1034 | #endif |
---|
1035 | 1035 | |
---|
1036 | | - up_read(¤t->mm->mmap_sem); |
---|
| 1036 | + up_read(¤t->mm->mmap_lock); |
---|
1037 | 1037 | |
---|
1038 | 1038 | if (faulted_pages != *va_pages) |
---|
1039 | 1039 | goto fault_mismatch; |
---|
.. | .. |
---|
1498 | 1498 | return -EINVAL; |
---|
1499 | 1499 | } |
---|
1500 | 1500 | |
---|
1501 | | - down_write(¤t->mm->mmap_sem); |
---|
| 1501 | + down_write(¤t->mm->mmap_lock); |
---|
1502 | 1502 | kbase_gpu_vm_lock(kctx); |
---|
1503 | 1503 | |
---|
1504 | 1504 | /* Validate the region */ |
---|
.. | .. |
---|
1540 | 1540 | * No update to the mm so downgrade the writer lock to a read |
---|
1541 | 1541 | * lock so other readers aren't blocked after this point. |
---|
1542 | 1542 | */ |
---|
1543 | | - downgrade_write(¤t->mm->mmap_sem); |
---|
| 1543 | + downgrade_write(¤t->mm->mmap_lock); |
---|
1544 | 1544 | read_locked = true; |
---|
1545 | 1545 | |
---|
1546 | 1546 | /* Allocate some more pages */ |
---|
.. | .. |
---|
1596 | 1596 | out_unlock: |
---|
1597 | 1597 | kbase_gpu_vm_unlock(kctx); |
---|
1598 | 1598 | if (read_locked) |
---|
1599 | | - up_read(¤t->mm->mmap_sem); |
---|
| 1599 | + up_read(¤t->mm->mmap_lock); |
---|
1600 | 1600 | else |
---|
1601 | | - up_write(¤t->mm->mmap_sem); |
---|
| 1601 | + up_write(¤t->mm->mmap_lock); |
---|
1602 | 1602 | |
---|
1603 | 1603 | return res; |
---|
1604 | 1604 | } |
---|
.. | .. |
---|
1651 | 1651 | |
---|
1652 | 1652 | |
---|
1653 | 1653 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) |
---|
1654 | | -static int kbase_cpu_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) |
---|
| 1654 | +static vm_fault_t kbase_cpu_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) |
---|
1655 | 1655 | { |
---|
1656 | 1656 | #else |
---|
1657 | | -static int kbase_cpu_vm_fault(struct vm_fault *vmf) |
---|
| 1657 | +static vm_fault_t kbase_cpu_vm_fault(struct vm_fault *vmf) |
---|
1658 | 1658 | { |
---|
1659 | 1659 | struct vm_area_struct *vma = vmf->vma; |
---|
1660 | 1660 | #endif |
---|
.. | .. |
---|
1662 | 1662 | pgoff_t rel_pgoff; |
---|
1663 | 1663 | size_t i; |
---|
1664 | 1664 | pgoff_t addr; |
---|
| 1665 | + vm_fault_t ret = VM_FAULT_SIGBUS; |
---|
1665 | 1666 | |
---|
1666 | 1667 | KBASE_DEBUG_ASSERT(map); |
---|
1667 | 1668 | KBASE_DEBUG_ASSERT(map->count > 0); |
---|
.. | .. |
---|
1686 | 1687 | addr = (pgoff_t)(vmf->address >> PAGE_SHIFT); |
---|
1687 | 1688 | #endif |
---|
1688 | 1689 | while (i < map->alloc->nents && (addr < vma->vm_end >> PAGE_SHIFT)) { |
---|
1689 | | - int ret = vm_insert_pfn(vma, addr << PAGE_SHIFT, |
---|
| 1690 | + ret = vmf_insert_pfn(vma, addr << PAGE_SHIFT, |
---|
1690 | 1691 | PFN_DOWN(map->alloc->pages[i])); |
---|
1691 | | - if (ret < 0 && ret != -EBUSY) |
---|
| 1692 | + if (ret != VM_FAULT_NOPAGE) |
---|
1692 | 1693 | goto locked_bad_fault; |
---|
1693 | 1694 | |
---|
1694 | 1695 | i++; addr++; |
---|
.. | .. |
---|
1700 | 1701 | |
---|
1701 | 1702 | locked_bad_fault: |
---|
1702 | 1703 | kbase_gpu_vm_unlock(map->kctx); |
---|
1703 | | - return VM_FAULT_SIGBUS; |
---|
| 1704 | + return ret; |
---|
1704 | 1705 | } |
---|
1705 | 1706 | |
---|
1706 | 1707 | const struct vm_operations_struct kbase_vm_ops = { |
---|
.. | .. |
---|
1767 | 1768 | vma->vm_flags |= VM_PFNMAP; |
---|
1768 | 1769 | for (i = 0; i < nr_pages; i++) { |
---|
1769 | 1770 | unsigned long pfn = PFN_DOWN(page_array[i + start_off]); |
---|
| 1771 | + vm_fault_t ret; |
---|
1770 | 1772 | |
---|
1771 | | - err = vm_insert_pfn(vma, addr, pfn); |
---|
1772 | | - if (WARN_ON(err)) |
---|
| 1773 | + ret = vmf_insert_pfn(vma, addr, pfn); |
---|
| 1774 | + if (WARN_ON(ret != VM_FAULT_NOPAGE)) { |
---|
| 1775 | + if (ret == VM_FAULT_OOM) |
---|
| 1776 | + err = -ENOMEM; |
---|
| 1777 | + else |
---|
| 1778 | + err = -EFAULT; |
---|
1773 | 1779 | break; |
---|
| 1780 | + } |
---|
1774 | 1781 | |
---|
1775 | 1782 | addr += PAGE_SIZE; |
---|
1776 | 1783 | } |
---|
.. | .. |
---|
1950 | 1957 | { |
---|
1951 | 1958 | struct mm_struct *mm = current->mm; |
---|
1952 | 1959 | (void)kctx; |
---|
1953 | | - down_read(&mm->mmap_sem); |
---|
| 1960 | + down_read(&mm->mmap_lock); |
---|
1954 | 1961 | } |
---|
1955 | 1962 | |
---|
1956 | 1963 | void kbase_os_mem_map_unlock(struct kbase_context *kctx) |
---|
1957 | 1964 | { |
---|
1958 | 1965 | struct mm_struct *mm = current->mm; |
---|
1959 | 1966 | (void)kctx; |
---|
1960 | | - up_read(&mm->mmap_sem); |
---|
| 1967 | + up_read(&mm->mmap_lock); |
---|
1961 | 1968 | } |
---|
1962 | 1969 | |
---|
1963 | 1970 | static int kbasep_reg_mmap(struct kbase_context *kctx, |
---|