.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-or-later |
---|
1 | 2 | /* |
---|
2 | 3 | * linux/mm/process_vm_access.c |
---|
3 | 4 | * |
---|
4 | 5 | * Copyright (C) 2010-2011 Christopher Yeoh <cyeoh@au1.ibm.com>, IBM Corp. |
---|
5 | | - * |
---|
6 | | - * This program is free software; you can redistribute it and/or |
---|
7 | | - * modify it under the terms of the GNU General Public License |
---|
8 | | - * as published by the Free Software Foundation; either version |
---|
9 | | - * 2 of the License, or (at your option) any later version. |
---|
10 | 6 | */ |
---|
11 | 7 | |
---|
| 8 | +#include <linux/compat.h> |
---|
12 | 9 | #include <linux/mm.h> |
---|
13 | 10 | #include <linux/uio.h> |
---|
14 | 11 | #include <linux/sched.h> |
---|
| 12 | +#include <linux/compat.h> |
---|
15 | 13 | #include <linux/sched/mm.h> |
---|
16 | 14 | #include <linux/highmem.h> |
---|
17 | 15 | #include <linux/ptrace.h> |
---|
18 | 16 | #include <linux/slab.h> |
---|
19 | 17 | #include <linux/syscalls.h> |
---|
20 | | - |
---|
21 | | -#ifdef CONFIG_COMPAT |
---|
22 | | -#include <linux/compat.h> |
---|
23 | | -#endif |
---|
24 | 18 | |
---|
25 | 19 | /** |
---|
26 | 20 | * process_vm_rw_pages - read/write pages from task specified |
---|
.. | .. |
---|
46 | 40 | if (copy > len) |
---|
47 | 41 | copy = len; |
---|
48 | 42 | |
---|
49 | | - if (vm_write) { |
---|
| 43 | + if (vm_write) |
---|
50 | 44 | copied = copy_page_from_iter(page, offset, copy, iter); |
---|
51 | | - set_page_dirty_lock(page); |
---|
52 | | - } else { |
---|
| 45 | + else |
---|
53 | 46 | copied = copy_page_to_iter(page, offset, copy, iter); |
---|
54 | | - } |
---|
| 47 | + |
---|
55 | 48 | len -= copied; |
---|
56 | 49 | if (copied < copy && iov_iter_count(iter)) |
---|
57 | 50 | return -EFAULT; |
---|
.. | .. |
---|
100 | 93 | flags |= FOLL_WRITE; |
---|
101 | 94 | |
---|
102 | 95 | while (!rc && nr_pages && iov_iter_count(iter)) { |
---|
103 | | - int pages = min(nr_pages, max_pages_per_loop); |
---|
| 96 | + int pinned_pages = min(nr_pages, max_pages_per_loop); |
---|
104 | 97 | int locked = 1; |
---|
105 | 98 | size_t bytes; |
---|
106 | 99 | |
---|
.. | .. |
---|
109 | 102 | * access remotely because task/mm might not |
---|
110 | 103 | * current/current->mm |
---|
111 | 104 | */ |
---|
112 | | - down_read(&mm->mmap_sem); |
---|
113 | | - pages = get_user_pages_remote(task, mm, pa, pages, flags, |
---|
114 | | - process_pages, NULL, &locked); |
---|
| 105 | + mmap_read_lock(mm); |
---|
| 106 | + pinned_pages = pin_user_pages_remote(mm, pa, pinned_pages, |
---|
| 107 | + flags, process_pages, |
---|
| 108 | + NULL, &locked); |
---|
115 | 109 | if (locked) |
---|
116 | | - up_read(&mm->mmap_sem); |
---|
117 | | - if (pages <= 0) |
---|
| 110 | + mmap_read_unlock(mm); |
---|
| 111 | + if (pinned_pages <= 0) |
---|
118 | 112 | return -EFAULT; |
---|
119 | 113 | |
---|
120 | | - bytes = pages * PAGE_SIZE - start_offset; |
---|
| 114 | + bytes = pinned_pages * PAGE_SIZE - start_offset; |
---|
121 | 115 | if (bytes > len) |
---|
122 | 116 | bytes = len; |
---|
123 | 117 | |
---|
.. | .. |
---|
126 | 120 | vm_write); |
---|
127 | 121 | len -= bytes; |
---|
128 | 122 | start_offset = 0; |
---|
129 | | - nr_pages -= pages; |
---|
130 | | - pa += pages * PAGE_SIZE; |
---|
131 | | - while (pages) |
---|
132 | | - put_page(process_pages[--pages]); |
---|
| 123 | + nr_pages -= pinned_pages; |
---|
| 124 | + pa += pinned_pages * PAGE_SIZE; |
---|
| 125 | + |
---|
| 126 | + /* If vm_write is set, the pages need to be made dirty: */ |
---|
| 127 | + unpin_user_pages_dirty_lock(process_pages, pinned_pages, |
---|
| 128 | + vm_write); |
---|
133 | 129 | } |
---|
134 | 130 | |
---|
135 | 131 | return rc; |
---|
.. | .. |
---|
208 | 204 | if (!mm || IS_ERR(mm)) { |
---|
209 | 205 | rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH; |
---|
210 | 206 | /* |
---|
211 | | - * Explicitly map EACCES to EPERM as EPERM is a more a |
---|
| 207 | + * Explicitly map EACCES to EPERM as EPERM is a more |
---|
212 | 208 | * appropriate error code for process_vw_readv/writev |
---|
213 | 209 | */ |
---|
214 | 210 | if (rc == -EACCES) |
---|
.. | .. |
---|
278 | 274 | if (rc < 0) |
---|
279 | 275 | return rc; |
---|
280 | 276 | if (!iov_iter_count(&iter)) |
---|
281 | | - goto free_iovecs; |
---|
282 | | - |
---|
283 | | - rc = rw_copy_check_uvector(CHECK_IOVEC_ONLY, rvec, riovcnt, UIO_FASTIOV, |
---|
284 | | - iovstack_r, &iov_r); |
---|
285 | | - if (rc <= 0) |
---|
286 | | - goto free_iovecs; |
---|
287 | | - |
---|
| 277 | + goto free_iov_l; |
---|
| 278 | + iov_r = iovec_from_user(rvec, riovcnt, UIO_FASTIOV, iovstack_r, |
---|
| 279 | + in_compat_syscall()); |
---|
| 280 | + if (IS_ERR(iov_r)) { |
---|
| 281 | + rc = PTR_ERR(iov_r); |
---|
| 282 | + goto free_iov_l; |
---|
| 283 | + } |
---|
288 | 284 | rc = process_vm_rw_core(pid, &iter, iov_r, riovcnt, flags, vm_write); |
---|
289 | | - |
---|
290 | | -free_iovecs: |
---|
291 | 285 | if (iov_r != iovstack_r) |
---|
292 | 286 | kfree(iov_r); |
---|
| 287 | +free_iov_l: |
---|
293 | 288 | kfree(iov_l); |
---|
294 | | - |
---|
295 | 289 | return rc; |
---|
296 | 290 | } |
---|
297 | 291 | |
---|
.. | .. |
---|
309 | 303 | { |
---|
310 | 304 | return process_vm_rw(pid, lvec, liovcnt, rvec, riovcnt, flags, 1); |
---|
311 | 305 | } |
---|
312 | | - |
---|
313 | | -#ifdef CONFIG_COMPAT |
---|
314 | | - |
---|
315 | | -static ssize_t |
---|
316 | | -compat_process_vm_rw(compat_pid_t pid, |
---|
317 | | - const struct compat_iovec __user *lvec, |
---|
318 | | - unsigned long liovcnt, |
---|
319 | | - const struct compat_iovec __user *rvec, |
---|
320 | | - unsigned long riovcnt, |
---|
321 | | - unsigned long flags, int vm_write) |
---|
322 | | -{ |
---|
323 | | - struct iovec iovstack_l[UIO_FASTIOV]; |
---|
324 | | - struct iovec iovstack_r[UIO_FASTIOV]; |
---|
325 | | - struct iovec *iov_l = iovstack_l; |
---|
326 | | - struct iovec *iov_r = iovstack_r; |
---|
327 | | - struct iov_iter iter; |
---|
328 | | - ssize_t rc = -EFAULT; |
---|
329 | | - int dir = vm_write ? WRITE : READ; |
---|
330 | | - |
---|
331 | | - if (flags != 0) |
---|
332 | | - return -EINVAL; |
---|
333 | | - |
---|
334 | | - rc = compat_import_iovec(dir, lvec, liovcnt, UIO_FASTIOV, &iov_l, &iter); |
---|
335 | | - if (rc < 0) |
---|
336 | | - return rc; |
---|
337 | | - if (!iov_iter_count(&iter)) |
---|
338 | | - goto free_iovecs; |
---|
339 | | - rc = compat_rw_copy_check_uvector(CHECK_IOVEC_ONLY, rvec, riovcnt, |
---|
340 | | - UIO_FASTIOV, iovstack_r, |
---|
341 | | - &iov_r); |
---|
342 | | - if (rc <= 0) |
---|
343 | | - goto free_iovecs; |
---|
344 | | - |
---|
345 | | - rc = process_vm_rw_core(pid, &iter, iov_r, riovcnt, flags, vm_write); |
---|
346 | | - |
---|
347 | | -free_iovecs: |
---|
348 | | - if (iov_r != iovstack_r) |
---|
349 | | - kfree(iov_r); |
---|
350 | | - kfree(iov_l); |
---|
351 | | - return rc; |
---|
352 | | -} |
---|
353 | | - |
---|
354 | | -COMPAT_SYSCALL_DEFINE6(process_vm_readv, compat_pid_t, pid, |
---|
355 | | - const struct compat_iovec __user *, lvec, |
---|
356 | | - compat_ulong_t, liovcnt, |
---|
357 | | - const struct compat_iovec __user *, rvec, |
---|
358 | | - compat_ulong_t, riovcnt, |
---|
359 | | - compat_ulong_t, flags) |
---|
360 | | -{ |
---|
361 | | - return compat_process_vm_rw(pid, lvec, liovcnt, rvec, |
---|
362 | | - riovcnt, flags, 0); |
---|
363 | | -} |
---|
364 | | - |
---|
365 | | -COMPAT_SYSCALL_DEFINE6(process_vm_writev, compat_pid_t, pid, |
---|
366 | | - const struct compat_iovec __user *, lvec, |
---|
367 | | - compat_ulong_t, liovcnt, |
---|
368 | | - const struct compat_iovec __user *, rvec, |
---|
369 | | - compat_ulong_t, riovcnt, |
---|
370 | | - compat_ulong_t, flags) |
---|
371 | | -{ |
---|
372 | | - return compat_process_vm_rw(pid, lvec, liovcnt, rvec, |
---|
373 | | - riovcnt, flags, 1); |
---|
374 | | -} |
---|
375 | | - |
---|
376 | | -#endif |
---|