hc
2023-12-06 08f87f769b595151be1afeff53e144f543faa614
kernel/drivers/gpu/arm/midgard/mali_kbase_mem_linux.c
....@@ -58,7 +58,7 @@
5858 * Shrink (or completely remove) all CPU mappings which reference the shrunk
5959 * part of the allocation.
6060 *
61
- * Note: Caller must be holding the processes mmap_sem lock.
61
+ * Note: Caller must be holding the processes mmap_lock lock.
6262 */
6363 static void kbase_mem_shrink_cpu_mapping(struct kbase_context *kctx,
6464 struct kbase_va_region *reg,
....@@ -611,7 +611,7 @@
611611 real_flags |= KBASE_REG_SHARE_IN;
612612
613613 /* now we can lock down the context, and find the region */
614
- down_write(&current->mm->mmap_sem);
614
+ down_write(&current->mm->mmap_lock);
615615 kbase_gpu_vm_lock(kctx);
616616
617617 /* Validate the region */
....@@ -683,7 +683,7 @@
683683
684684 out_unlock:
685685 kbase_gpu_vm_unlock(kctx);
686
- up_write(&current->mm->mmap_sem);
686
+ up_write(&current->mm->mmap_lock);
687687 out:
688688 return ret;
689689 }
....@@ -1019,7 +1019,7 @@
10191019 *flags |= KBASE_MEM_IMPORT_HAVE_PAGES;
10201020 }
10211021
1022
- down_read(&current->mm->mmap_sem);
1022
+ down_read(&current->mm->mmap_lock);
10231023
10241024 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
10251025 faulted_pages = get_user_pages(current, current->mm, address, *va_pages,
....@@ -1033,7 +1033,7 @@
10331033 pages, NULL);
10341034 #endif
10351035
1036
- up_read(&current->mm->mmap_sem);
1036
+ up_read(&current->mm->mmap_lock);
10371037
10381038 if (faulted_pages != *va_pages)
10391039 goto fault_mismatch;
....@@ -1498,7 +1498,7 @@
14981498 return -EINVAL;
14991499 }
15001500
1501
- down_write(&current->mm->mmap_sem);
1501
+ down_write(&current->mm->mmap_lock);
15021502 kbase_gpu_vm_lock(kctx);
15031503
15041504 /* Validate the region */
....@@ -1540,7 +1540,7 @@
15401540 * No update to the mm so downgrade the writer lock to a read
15411541 * lock so other readers aren't blocked after this point.
15421542 */
1543
- downgrade_write(&current->mm->mmap_sem);
1543
+ downgrade_write(&current->mm->mmap_lock);
15441544 read_locked = true;
15451545
15461546 /* Allocate some more pages */
....@@ -1596,9 +1596,9 @@
15961596 out_unlock:
15971597 kbase_gpu_vm_unlock(kctx);
15981598 if (read_locked)
1599
- up_read(&current->mm->mmap_sem);
1599
+ up_read(&current->mm->mmap_lock);
16001600 else
1601
- up_write(&current->mm->mmap_sem);
1601
+ up_write(&current->mm->mmap_lock);
16021602
16031603 return res;
16041604 }
....@@ -1651,10 +1651,10 @@
16511651
16521652
16531653 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0))
1654
-static int kbase_cpu_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1654
+static vm_fault_t kbase_cpu_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
16551655 {
16561656 #else
1657
-static int kbase_cpu_vm_fault(struct vm_fault *vmf)
1657
+static vm_fault_t kbase_cpu_vm_fault(struct vm_fault *vmf)
16581658 {
16591659 struct vm_area_struct *vma = vmf->vma;
16601660 #endif
....@@ -1662,6 +1662,7 @@
16621662 pgoff_t rel_pgoff;
16631663 size_t i;
16641664 pgoff_t addr;
1665
+ vm_fault_t ret = VM_FAULT_SIGBUS;
16651666
16661667 KBASE_DEBUG_ASSERT(map);
16671668 KBASE_DEBUG_ASSERT(map->count > 0);
....@@ -1686,9 +1687,9 @@
16861687 addr = (pgoff_t)(vmf->address >> PAGE_SHIFT);
16871688 #endif
16881689 while (i < map->alloc->nents && (addr < vma->vm_end >> PAGE_SHIFT)) {
1689
- int ret = vm_insert_pfn(vma, addr << PAGE_SHIFT,
1690
+ ret = vmf_insert_pfn(vma, addr << PAGE_SHIFT,
16901691 PFN_DOWN(map->alloc->pages[i]));
1691
- if (ret < 0 && ret != -EBUSY)
1692
+ if (ret != VM_FAULT_NOPAGE)
16921693 goto locked_bad_fault;
16931694
16941695 i++; addr++;
....@@ -1700,7 +1701,7 @@
17001701
17011702 locked_bad_fault:
17021703 kbase_gpu_vm_unlock(map->kctx);
1703
- return VM_FAULT_SIGBUS;
1704
+ return ret;
17041705 }
17051706
17061707 const struct vm_operations_struct kbase_vm_ops = {
....@@ -1767,10 +1768,16 @@
17671768 vma->vm_flags |= VM_PFNMAP;
17681769 for (i = 0; i < nr_pages; i++) {
17691770 unsigned long pfn = PFN_DOWN(page_array[i + start_off]);
1771
+ vm_fault_t ret;
17701772
1771
- err = vm_insert_pfn(vma, addr, pfn);
1772
- if (WARN_ON(err))
1773
+ ret = vmf_insert_pfn(vma, addr, pfn);
1774
+ if (WARN_ON(ret != VM_FAULT_NOPAGE)) {
1775
+ if (ret == VM_FAULT_OOM)
1776
+ err = -ENOMEM;
1777
+ else
1778
+ err = -EFAULT;
17731779 break;
1780
+ }
17741781
17751782 addr += PAGE_SIZE;
17761783 }
....@@ -1950,14 +1957,14 @@
19501957 {
19511958 struct mm_struct *mm = current->mm;
19521959 (void)kctx;
1953
- down_read(&mm->mmap_sem);
1960
+ down_read(&mm->mmap_lock);
19541961 }
19551962
19561963 void kbase_os_mem_map_unlock(struct kbase_context *kctx)
19571964 {
19581965 struct mm_struct *mm = current->mm;
19591966 (void)kctx;
1960
- up_read(&mm->mmap_sem);
1967
+ up_read(&mm->mmap_lock);
19611968 }
19621969
19631970 static int kbasep_reg_mmap(struct kbase_context *kctx,