| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * linux/mm/nommu.c |
|---|
| 3 | 4 | * |
|---|
| 4 | 5 | * Replacement code for mm functions to support CPU's that don't |
|---|
| 5 | 6 | * have any form of memory management unit (thus no virtual memory). |
|---|
| 6 | 7 | * |
|---|
| 7 | | - * See Documentation/nommu-mmap.txt |
|---|
| 8 | + * See Documentation/admin-guide/mm/nommu-mmap.rst |
|---|
| 8 | 9 | * |
|---|
| 9 | 10 | * Copyright (c) 2004-2008 David Howells <dhowells@redhat.com> |
|---|
| 10 | 11 | * Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com> |
|---|
| .. | .. |
|---|
| 107 | 108 | * The ksize() function is only guaranteed to work for pointers |
|---|
| 108 | 109 | * returned by kmalloc(). So handle arbitrary pointers here. |
|---|
| 109 | 110 | */ |
|---|
| 110 | | - return PAGE_SIZE << compound_order(page); |
|---|
| 111 | + return page_size(page); |
|---|
| 111 | 112 | } |
|---|
| 112 | | - |
|---|
| 113 | | -static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, |
|---|
| 114 | | - unsigned long start, unsigned long nr_pages, |
|---|
| 115 | | - unsigned int foll_flags, struct page **pages, |
|---|
| 116 | | - struct vm_area_struct **vmas, int *nonblocking) |
|---|
| 117 | | -{ |
|---|
| 118 | | - struct vm_area_struct *vma; |
|---|
| 119 | | - unsigned long vm_flags; |
|---|
| 120 | | - int i; |
|---|
| 121 | | - |
|---|
| 122 | | - /* calculate required read or write permissions. |
|---|
| 123 | | - * If FOLL_FORCE is set, we only require the "MAY" flags. |
|---|
| 124 | | - */ |
|---|
| 125 | | - vm_flags = (foll_flags & FOLL_WRITE) ? |
|---|
| 126 | | - (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD); |
|---|
| 127 | | - vm_flags &= (foll_flags & FOLL_FORCE) ? |
|---|
| 128 | | - (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); |
|---|
| 129 | | - |
|---|
| 130 | | - for (i = 0; i < nr_pages; i++) { |
|---|
| 131 | | - vma = find_vma(mm, start); |
|---|
| 132 | | - if (!vma) |
|---|
| 133 | | - goto finish_or_fault; |
|---|
| 134 | | - |
|---|
| 135 | | - /* protect what we can, including chardevs */ |
|---|
| 136 | | - if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) || |
|---|
| 137 | | - !(vm_flags & vma->vm_flags)) |
|---|
| 138 | | - goto finish_or_fault; |
|---|
| 139 | | - |
|---|
| 140 | | - if (pages) { |
|---|
| 141 | | - pages[i] = virt_to_page(start); |
|---|
| 142 | | - if (pages[i]) |
|---|
| 143 | | - get_page(pages[i]); |
|---|
| 144 | | - } |
|---|
| 145 | | - if (vmas) |
|---|
| 146 | | - vmas[i] = vma; |
|---|
| 147 | | - start = (start + PAGE_SIZE) & PAGE_MASK; |
|---|
| 148 | | - } |
|---|
| 149 | | - |
|---|
| 150 | | - return i; |
|---|
| 151 | | - |
|---|
| 152 | | -finish_or_fault: |
|---|
| 153 | | - return i ? : -EFAULT; |
|---|
| 154 | | -} |
|---|
| 155 | | - |
|---|
| 156 | | -/* |
|---|
| 157 | | - * get a list of pages in an address range belonging to the specified process |
|---|
| 158 | | - * and indicate the VMA that covers each page |
|---|
| 159 | | - * - this is potentially dodgy as we may end incrementing the page count of a |
|---|
| 160 | | - * slab page or a secondary page from a compound page |
|---|
| 161 | | - * - don't permit access to VMAs that don't support it, such as I/O mappings |
|---|
| 162 | | - */ |
|---|
| 163 | | -long get_user_pages(unsigned long start, unsigned long nr_pages, |
|---|
| 164 | | - unsigned int gup_flags, struct page **pages, |
|---|
| 165 | | - struct vm_area_struct **vmas) |
|---|
| 166 | | -{ |
|---|
| 167 | | - return __get_user_pages(current, current->mm, start, nr_pages, |
|---|
| 168 | | - gup_flags, pages, vmas, NULL); |
|---|
| 169 | | -} |
|---|
| 170 | | -EXPORT_SYMBOL(get_user_pages); |
|---|
| 171 | | - |
|---|
| 172 | | -long get_user_pages_locked(unsigned long start, unsigned long nr_pages, |
|---|
| 173 | | - unsigned int gup_flags, struct page **pages, |
|---|
| 174 | | - int *locked) |
|---|
| 175 | | -{ |
|---|
| 176 | | - return get_user_pages(start, nr_pages, gup_flags, pages, NULL); |
|---|
| 177 | | -} |
|---|
| 178 | | -EXPORT_SYMBOL(get_user_pages_locked); |
|---|
| 179 | | - |
|---|
| 180 | | -static long __get_user_pages_unlocked(struct task_struct *tsk, |
|---|
| 181 | | - struct mm_struct *mm, unsigned long start, |
|---|
| 182 | | - unsigned long nr_pages, struct page **pages, |
|---|
| 183 | | - unsigned int gup_flags) |
|---|
| 184 | | -{ |
|---|
| 185 | | - long ret; |
|---|
| 186 | | - down_read(&mm->mmap_sem); |
|---|
| 187 | | - ret = __get_user_pages(tsk, mm, start, nr_pages, gup_flags, pages, |
|---|
| 188 | | - NULL, NULL); |
|---|
| 189 | | - up_read(&mm->mmap_sem); |
|---|
| 190 | | - return ret; |
|---|
| 191 | | -} |
|---|
| 192 | | - |
|---|
| 193 | | -long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, |
|---|
| 194 | | - struct page **pages, unsigned int gup_flags) |
|---|
| 195 | | -{ |
|---|
| 196 | | - return __get_user_pages_unlocked(current, current->mm, start, nr_pages, |
|---|
| 197 | | - pages, gup_flags); |
|---|
| 198 | | -} |
|---|
| 199 | | -EXPORT_SYMBOL(get_user_pages_unlocked); |
|---|
| 200 | 113 | |
|---|
| 201 | 114 | /** |
|---|
| 202 | 115 | * follow_pfn - look up PFN at a user virtual address |
|---|
| .. | .. |
|---|
| 227 | 140 | } |
|---|
| 228 | 141 | EXPORT_SYMBOL(vfree); |
|---|
| 229 | 142 | |
|---|
| 230 | | -void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) |
|---|
| 143 | +void *__vmalloc(unsigned long size, gfp_t gfp_mask) |
|---|
| 231 | 144 | { |
|---|
| 232 | 145 | /* |
|---|
| 233 | 146 | * You can't specify __GFP_HIGHMEM with kmalloc() since kmalloc() |
|---|
| .. | .. |
|---|
| 237 | 150 | } |
|---|
| 238 | 151 | EXPORT_SYMBOL(__vmalloc); |
|---|
| 239 | 152 | |
|---|
| 240 | | -void *__vmalloc_node_flags(unsigned long size, int node, gfp_t flags) |
|---|
| 153 | +void *__vmalloc_node_range(unsigned long size, unsigned long align, |
|---|
| 154 | + unsigned long start, unsigned long end, gfp_t gfp_mask, |
|---|
| 155 | + pgprot_t prot, unsigned long vm_flags, int node, |
|---|
| 156 | + const void *caller) |
|---|
| 241 | 157 | { |
|---|
| 242 | | - return __vmalloc(size, flags, PAGE_KERNEL); |
|---|
| 158 | + return __vmalloc(size, gfp_mask); |
|---|
| 159 | +} |
|---|
| 160 | + |
|---|
| 161 | +void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask, |
|---|
| 162 | + int node, const void *caller) |
|---|
| 163 | +{ |
|---|
| 164 | + return __vmalloc(size, gfp_mask); |
|---|
| 165 | +} |
|---|
| 166 | + |
|---|
| 167 | +static void *__vmalloc_user_flags(unsigned long size, gfp_t flags) |
|---|
| 168 | +{ |
|---|
| 169 | + void *ret; |
|---|
| 170 | + |
|---|
| 171 | + ret = __vmalloc(size, flags); |
|---|
| 172 | + if (ret) { |
|---|
| 173 | + struct vm_area_struct *vma; |
|---|
| 174 | + |
|---|
| 175 | + mmap_write_lock(current->mm); |
|---|
| 176 | + vma = find_vma(current->mm, (unsigned long)ret); |
|---|
| 177 | + if (vma) |
|---|
| 178 | + vma->vm_flags |= VM_USERMAP; |
|---|
| 179 | + mmap_write_unlock(current->mm); |
|---|
| 180 | + } |
|---|
| 181 | + |
|---|
| 182 | + return ret; |
|---|
| 243 | 183 | } |
|---|
| 244 | 184 | |
|---|
| 245 | 185 | void *vmalloc_user(unsigned long size) |
|---|
| 246 | 186 | { |
|---|
| 247 | | - void *ret; |
|---|
| 248 | | - |
|---|
| 249 | | - ret = __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL); |
|---|
| 250 | | - if (ret) { |
|---|
| 251 | | - struct vm_area_struct *vma; |
|---|
| 252 | | - |
|---|
| 253 | | - down_write(¤t->mm->mmap_sem); |
|---|
| 254 | | - vma = find_vma(current->mm, (unsigned long)ret); |
|---|
| 255 | | - if (vma) |
|---|
| 256 | | - vma->vm_flags |= VM_USERMAP; |
|---|
| 257 | | - up_write(¤t->mm->mmap_sem); |
|---|
| 258 | | - } |
|---|
| 259 | | - |
|---|
| 260 | | - return ret; |
|---|
| 187 | + return __vmalloc_user_flags(size, GFP_KERNEL | __GFP_ZERO); |
|---|
| 261 | 188 | } |
|---|
| 262 | 189 | EXPORT_SYMBOL(vmalloc_user); |
|---|
| 263 | 190 | |
|---|
| .. | .. |
|---|
| 306 | 233 | */ |
|---|
| 307 | 234 | void *vmalloc(unsigned long size) |
|---|
| 308 | 235 | { |
|---|
| 309 | | - return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL); |
|---|
| 236 | + return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM); |
|---|
| 310 | 237 | } |
|---|
| 311 | 238 | EXPORT_SYMBOL(vmalloc); |
|---|
| 312 | 239 | |
|---|
| .. | .. |
|---|
| 324 | 251 | */ |
|---|
| 325 | 252 | void *vzalloc(unsigned long size) |
|---|
| 326 | 253 | { |
|---|
| 327 | | - return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, |
|---|
| 328 | | - PAGE_KERNEL); |
|---|
| 254 | + return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); |
|---|
| 329 | 255 | } |
|---|
| 330 | 256 | EXPORT_SYMBOL(vzalloc); |
|---|
| 331 | 257 | |
|---|
| .. | .. |
|---|
| 365 | 291 | EXPORT_SYMBOL(vzalloc_node); |
|---|
| 366 | 292 | |
|---|
| 367 | 293 | /** |
|---|
| 368 | | - * vmalloc_exec - allocate virtually contiguous, executable memory |
|---|
| 369 | | - * @size: allocation size |
|---|
| 370 | | - * |
|---|
| 371 | | - * Kernel-internal function to allocate enough pages to cover @size |
|---|
| 372 | | - * the page level allocator and map them into contiguous and |
|---|
| 373 | | - * executable kernel virtual space. |
|---|
| 374 | | - * |
|---|
| 375 | | - * For tight control over page level allocator and protection flags |
|---|
| 376 | | - * use __vmalloc() instead. |
|---|
| 377 | | - */ |
|---|
| 378 | | - |
|---|
| 379 | | -void *vmalloc_exec(unsigned long size) |
|---|
| 380 | | -{ |
|---|
| 381 | | - return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC); |
|---|
| 382 | | -} |
|---|
| 383 | | - |
|---|
| 384 | | -/** |
|---|
| 385 | 294 | * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) |
|---|
| 386 | 295 | * @size: allocation size |
|---|
| 387 | 296 | * |
|---|
| .. | .. |
|---|
| 390 | 299 | */ |
|---|
| 391 | 300 | void *vmalloc_32(unsigned long size) |
|---|
| 392 | 301 | { |
|---|
| 393 | | - return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL); |
|---|
| 302 | + return __vmalloc(size, GFP_KERNEL); |
|---|
| 394 | 303 | } |
|---|
| 395 | 304 | EXPORT_SYMBOL(vmalloc_32); |
|---|
| 396 | 305 | |
|---|
| .. | .. |
|---|
| 427 | 336 | } |
|---|
| 428 | 337 | EXPORT_SYMBOL(vunmap); |
|---|
| 429 | 338 | |
|---|
| 430 | | -void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot) |
|---|
| 339 | +void *vm_map_ram(struct page **pages, unsigned int count, int node) |
|---|
| 431 | 340 | { |
|---|
| 432 | 341 | BUG(); |
|---|
| 433 | 342 | return NULL; |
|---|
| .. | .. |
|---|
| 445 | 354 | } |
|---|
| 446 | 355 | EXPORT_SYMBOL_GPL(vm_unmap_aliases); |
|---|
| 447 | 356 | |
|---|
| 448 | | -/* |
|---|
| 449 | | - * Implement a stub for vmalloc_sync_[un]mapping() if the architecture |
|---|
| 450 | | - * chose not to have one. |
|---|
| 451 | | - */ |
|---|
| 452 | | -void __weak vmalloc_sync_mappings(void) |
|---|
| 453 | | -{ |
|---|
| 454 | | -} |
|---|
| 455 | | - |
|---|
| 456 | | -void __weak vmalloc_sync_unmappings(void) |
|---|
| 457 | | -{ |
|---|
| 458 | | -} |
|---|
| 459 | | - |
|---|
| 460 | | -struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes) |
|---|
| 461 | | -{ |
|---|
| 462 | | - BUG(); |
|---|
| 463 | | - return NULL; |
|---|
| 464 | | -} |
|---|
| 465 | | -EXPORT_SYMBOL_GPL(alloc_vm_area); |
|---|
| 466 | | - |
|---|
| 467 | 357 | void free_vm_area(struct vm_struct *area) |
|---|
| 468 | 358 | { |
|---|
| 469 | 359 | BUG(); |
|---|
| .. | .. |
|---|
| 476 | 366 | return -EINVAL; |
|---|
| 477 | 367 | } |
|---|
| 478 | 368 | EXPORT_SYMBOL(vm_insert_page); |
|---|
| 369 | + |
|---|
| 370 | +int vm_map_pages(struct vm_area_struct *vma, struct page **pages, |
|---|
| 371 | + unsigned long num) |
|---|
| 372 | +{ |
|---|
| 373 | + return -EINVAL; |
|---|
| 374 | +} |
|---|
| 375 | +EXPORT_SYMBOL(vm_map_pages); |
|---|
| 376 | + |
|---|
| 377 | +int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages, |
|---|
| 378 | + unsigned long num) |
|---|
| 379 | +{ |
|---|
| 380 | + return -EINVAL; |
|---|
| 381 | +} |
|---|
| 382 | +EXPORT_SYMBOL(vm_map_pages_zero); |
|---|
| 479 | 383 | |
|---|
| 480 | 384 | /* |
|---|
| 481 | 385 | * sys_brk() for the most part doesn't need the global kernel |
|---|
| .. | .. |
|---|
| 505 | 409 | /* |
|---|
| 506 | 410 | * Ok, looks good - let it rip. |
|---|
| 507 | 411 | */ |
|---|
| 508 | | - flush_icache_range(mm->brk, brk); |
|---|
| 412 | + flush_icache_user_range(mm->brk, brk); |
|---|
| 509 | 413 | return mm->brk = brk; |
|---|
| 510 | 414 | } |
|---|
| 511 | 415 | |
|---|
| .. | .. |
|---|
| 654 | 558 | * add a VMA into a process's mm_struct in the appropriate place in the list |
|---|
| 655 | 559 | * and tree and add to the address space's page tree also if not an anonymous |
|---|
| 656 | 560 | * page |
|---|
| 657 | | - * - should be called with mm->mmap_sem held writelocked |
|---|
| 561 | + * - should be called with mm->mmap_lock held writelocked |
|---|
| 658 | 562 | */ |
|---|
| 659 | 563 | static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) |
|---|
| 660 | 564 | { |
|---|
| .. | .. |
|---|
| 714 | 618 | if (rb_prev) |
|---|
| 715 | 619 | prev = rb_entry(rb_prev, struct vm_area_struct, vm_rb); |
|---|
| 716 | 620 | |
|---|
| 717 | | - __vma_link_list(mm, vma, prev, parent); |
|---|
| 621 | + __vma_link_list(mm, vma, prev); |
|---|
| 718 | 622 | } |
|---|
| 719 | 623 | |
|---|
| 720 | 624 | /* |
|---|
| .. | .. |
|---|
| 750 | 654 | /* remove from the MM's tree and list */ |
|---|
| 751 | 655 | rb_erase(&vma->vm_rb, &mm->mm_rb); |
|---|
| 752 | 656 | |
|---|
| 753 | | - if (vma->vm_prev) |
|---|
| 754 | | - vma->vm_prev->vm_next = vma->vm_next; |
|---|
| 755 | | - else |
|---|
| 756 | | - mm->mmap = vma->vm_next; |
|---|
| 757 | | - |
|---|
| 758 | | - if (vma->vm_next) |
|---|
| 759 | | - vma->vm_next->vm_prev = vma->vm_prev; |
|---|
| 657 | + __vma_unlink_list(mm, vma); |
|---|
| 760 | 658 | } |
|---|
| 761 | 659 | |
|---|
| 762 | 660 | /* |
|---|
| .. | .. |
|---|
| 774 | 672 | |
|---|
| 775 | 673 | /* |
|---|
| 776 | 674 | * look up the first VMA in which addr resides, NULL if none |
|---|
| 777 | | - * - should be called with mm->mmap_sem at least held readlocked |
|---|
| 675 | + * - should be called with mm->mmap_lock at least held readlocked |
|---|
| 778 | 676 | */ |
|---|
| 779 | 677 | struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) |
|---|
| 780 | 678 | { |
|---|
| .. | .. |
|---|
| 820 | 718 | |
|---|
| 821 | 719 | /* |
|---|
| 822 | 720 | * look up the first VMA exactly that exactly matches addr |
|---|
| 823 | | - * - should be called with mm->mmap_sem at least held readlocked |
|---|
| 721 | + * - should be called with mm->mmap_lock at least held readlocked |
|---|
| 824 | 722 | */ |
|---|
| 825 | 723 | static struct vm_area_struct *find_vma_exact(struct mm_struct *mm, |
|---|
| 826 | 724 | unsigned long addr, |
|---|
| .. | .. |
|---|
| 1173 | 1071 | unsigned long len, |
|---|
| 1174 | 1072 | unsigned long prot, |
|---|
| 1175 | 1073 | unsigned long flags, |
|---|
| 1176 | | - vm_flags_t vm_flags, |
|---|
| 1177 | 1074 | unsigned long pgoff, |
|---|
| 1178 | 1075 | unsigned long *populate, |
|---|
| 1179 | 1076 | struct list_head *uf) |
|---|
| .. | .. |
|---|
| 1181 | 1078 | struct vm_area_struct *vma; |
|---|
| 1182 | 1079 | struct vm_region *region; |
|---|
| 1183 | 1080 | struct rb_node *rb; |
|---|
| 1081 | + vm_flags_t vm_flags; |
|---|
| 1184 | 1082 | unsigned long capabilities, result; |
|---|
| 1185 | 1083 | int ret; |
|---|
| 1186 | 1084 | |
|---|
| .. | .. |
|---|
| 1199 | 1097 | |
|---|
| 1200 | 1098 | /* we've determined that we can make the mapping, now translate what we |
|---|
| 1201 | 1099 | * now know into VMA flags */ |
|---|
| 1202 | | - vm_flags |= determine_vm_flags(file, prot, flags, capabilities); |
|---|
| 1100 | + vm_flags = determine_vm_flags(file, prot, flags, capabilities); |
|---|
| 1203 | 1101 | |
|---|
| 1204 | 1102 | /* we're going to need to record the mapping */ |
|---|
| 1205 | 1103 | region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL); |
|---|
| .. | .. |
|---|
| 1338 | 1236 | add_nommu_region(region); |
|---|
| 1339 | 1237 | |
|---|
| 1340 | 1238 | /* clear anonymous mappings that don't ask for uninitialized data */ |
|---|
| 1341 | | - if (!vma->vm_file && !(flags & MAP_UNINITIALIZED)) |
|---|
| 1239 | + if (!vma->vm_file && |
|---|
| 1240 | + (!IS_ENABLED(CONFIG_MMAP_ALLOW_UNINITIALIZED) || |
|---|
| 1241 | + !(flags & MAP_UNINITIALIZED))) |
|---|
| 1342 | 1242 | memset((void *)region->vm_start, 0, |
|---|
| 1343 | 1243 | region->vm_end - region->vm_start); |
|---|
| 1344 | 1244 | |
|---|
| .. | .. |
|---|
| 1353 | 1253 | /* we flush the region from the icache only when the first executable |
|---|
| 1354 | 1254 | * mapping of it is made */ |
|---|
| 1355 | 1255 | if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) { |
|---|
| 1356 | | - flush_icache_range(region->vm_start, region->vm_end); |
|---|
| 1256 | + flush_icache_user_range(region->vm_start, region->vm_end); |
|---|
| 1357 | 1257 | region->vm_icache_flushed = true; |
|---|
| 1358 | 1258 | } |
|---|
| 1359 | 1259 | |
|---|
| .. | .. |
|---|
| 1618 | 1518 | struct mm_struct *mm = current->mm; |
|---|
| 1619 | 1519 | int ret; |
|---|
| 1620 | 1520 | |
|---|
| 1621 | | - down_write(&mm->mmap_sem); |
|---|
| 1521 | + mmap_write_lock(mm); |
|---|
| 1622 | 1522 | ret = do_munmap(mm, addr, len, NULL); |
|---|
| 1623 | | - up_write(&mm->mmap_sem); |
|---|
| 1523 | + mmap_write_unlock(mm); |
|---|
| 1624 | 1524 | return ret; |
|---|
| 1625 | 1525 | } |
|---|
| 1626 | 1526 | EXPORT_SYMBOL(vm_munmap); |
|---|
| .. | .. |
|---|
| 1707 | 1607 | { |
|---|
| 1708 | 1608 | unsigned long ret; |
|---|
| 1709 | 1609 | |
|---|
| 1710 | | - down_write(¤t->mm->mmap_sem); |
|---|
| 1610 | + mmap_write_lock(current->mm); |
|---|
| 1711 | 1611 | ret = do_mremap(addr, old_len, new_len, flags, new_addr); |
|---|
| 1712 | | - up_write(¤t->mm->mmap_sem); |
|---|
| 1612 | + mmap_write_unlock(current->mm); |
|---|
| 1713 | 1613 | return ret; |
|---|
| 1714 | 1614 | } |
|---|
| 1715 | 1615 | |
|---|
| 1716 | | -struct page *follow_page_mask(struct vm_area_struct *vma, |
|---|
| 1717 | | - unsigned long address, unsigned int flags, |
|---|
| 1718 | | - unsigned int *page_mask) |
|---|
| 1616 | +struct page *follow_page(struct vm_area_struct *vma, unsigned long address, |
|---|
| 1617 | + unsigned int foll_flags) |
|---|
| 1719 | 1618 | { |
|---|
| 1720 | | - *page_mask = 0; |
|---|
| 1721 | 1619 | return NULL; |
|---|
| 1722 | 1620 | } |
|---|
| 1723 | 1621 | |
|---|
| .. | .. |
|---|
| 1770 | 1668 | } |
|---|
| 1771 | 1669 | EXPORT_SYMBOL(filemap_fault); |
|---|
| 1772 | 1670 | |
|---|
| 1773 | | -void filemap_map_pages(struct vm_fault *vmf, |
|---|
| 1671 | +vm_fault_t filemap_map_pages(struct vm_fault *vmf, |
|---|
| 1774 | 1672 | pgoff_t start_pgoff, pgoff_t end_pgoff) |
|---|
| 1775 | 1673 | { |
|---|
| 1776 | 1674 | BUG(); |
|---|
| 1675 | + return 0; |
|---|
| 1777 | 1676 | } |
|---|
| 1778 | 1677 | EXPORT_SYMBOL(filemap_map_pages); |
|---|
| 1678 | + |
|---|
| 1679 | +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT |
|---|
| 1680 | +bool filemap_allow_speculation(void) |
|---|
| 1681 | +{ |
|---|
| 1682 | + BUG(); |
|---|
| 1683 | + return false; |
|---|
| 1684 | +} |
|---|
| 1685 | +#endif |
|---|
| 1779 | 1686 | |
|---|
| 1780 | 1687 | int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, |
|---|
| 1781 | 1688 | unsigned long addr, void *buf, int len, unsigned int gup_flags) |
|---|
| .. | .. |
|---|
| 1783 | 1690 | struct vm_area_struct *vma; |
|---|
| 1784 | 1691 | int write = gup_flags & FOLL_WRITE; |
|---|
| 1785 | 1692 | |
|---|
| 1786 | | - if (down_read_killable(&mm->mmap_sem)) |
|---|
| 1693 | + if (mmap_read_lock_killable(mm)) |
|---|
| 1787 | 1694 | return 0; |
|---|
| 1788 | 1695 | |
|---|
| 1789 | 1696 | /* the access must start within one of the target process's mappings */ |
|---|
| .. | .. |
|---|
| 1806 | 1713 | len = 0; |
|---|
| 1807 | 1714 | } |
|---|
| 1808 | 1715 | |
|---|
| 1809 | | - up_read(&mm->mmap_sem); |
|---|
| 1716 | + mmap_read_unlock(mm); |
|---|
| 1810 | 1717 | |
|---|
| 1811 | 1718 | return len; |
|---|
| 1812 | 1719 | } |
|---|
| .. | .. |
|---|
| 1857 | 1764 | * @newsize: The proposed filesize of the inode |
|---|
| 1858 | 1765 | * |
|---|
| 1859 | 1766 | * Check the shared mappings on an inode on behalf of a shrinking truncate to |
|---|
| 1860 | | - * make sure that that any outstanding VMAs aren't broken and then shrink the |
|---|
| 1861 | | - * vm_regions that extend that beyond so that do_mmap_pgoff() doesn't |
|---|
| 1767 | + * make sure that any outstanding VMAs aren't broken and then shrink the |
|---|
| 1768 | + * vm_regions that extend beyond so that do_mmap() doesn't |
|---|
| 1862 | 1769 | * automatically grant mappings that are too large. |
|---|
| 1863 | 1770 | */ |
|---|
| 1864 | 1771 | int nommu_shrink_inode_mappings(struct inode *inode, size_t size, |
|---|