hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/mm/process_vm_access.c
....@@ -1,26 +1,20 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /*
23 * linux/mm/process_vm_access.c
34 *
45 * Copyright (C) 2010-2011 Christopher Yeoh <cyeoh@au1.ibm.com>, IBM Corp.
5
- *
6
- * This program is free software; you can redistribute it and/or
7
- * modify it under the terms of the GNU General Public License
8
- * as published by the Free Software Foundation; either version
9
- * 2 of the License, or (at your option) any later version.
106 */
117
8
+#include <linux/compat.h>
129 #include <linux/mm.h>
1310 #include <linux/uio.h>
1411 #include <linux/sched.h>
12
+#include <linux/compat.h>
1513 #include <linux/sched/mm.h>
1614 #include <linux/highmem.h>
1715 #include <linux/ptrace.h>
1816 #include <linux/slab.h>
1917 #include <linux/syscalls.h>
20
-
21
-#ifdef CONFIG_COMPAT
22
-#include <linux/compat.h>
23
-#endif
2418
2519 /**
2620 * process_vm_rw_pages - read/write pages from task specified
....@@ -46,12 +40,11 @@
4640 if (copy > len)
4741 copy = len;
4842
49
- if (vm_write) {
43
+ if (vm_write)
5044 copied = copy_page_from_iter(page, offset, copy, iter);
51
- set_page_dirty_lock(page);
52
- } else {
45
+ else
5346 copied = copy_page_to_iter(page, offset, copy, iter);
54
- }
47
+
5548 len -= copied;
5649 if (copied < copy && iov_iter_count(iter))
5750 return -EFAULT;
....@@ -100,7 +93,7 @@
10093 flags |= FOLL_WRITE;
10194
10295 while (!rc && nr_pages && iov_iter_count(iter)) {
103
- int pages = min(nr_pages, max_pages_per_loop);
96
+ int pinned_pages = min(nr_pages, max_pages_per_loop);
10497 int locked = 1;
10598 size_t bytes;
10699
....@@ -109,15 +102,16 @@
109102 * access remotely because task/mm might not
110103 * current/current->mm
111104 */
112
- down_read(&mm->mmap_sem);
113
- pages = get_user_pages_remote(task, mm, pa, pages, flags,
114
- process_pages, NULL, &locked);
105
+ mmap_read_lock(mm);
106
+ pinned_pages = pin_user_pages_remote(mm, pa, pinned_pages,
107
+ flags, process_pages,
108
+ NULL, &locked);
115109 if (locked)
116
- up_read(&mm->mmap_sem);
117
- if (pages <= 0)
110
+ mmap_read_unlock(mm);
111
+ if (pinned_pages <= 0)
118112 return -EFAULT;
119113
120
- bytes = pages * PAGE_SIZE - start_offset;
114
+ bytes = pinned_pages * PAGE_SIZE - start_offset;
121115 if (bytes > len)
122116 bytes = len;
123117
....@@ -126,10 +120,12 @@
126120 vm_write);
127121 len -= bytes;
128122 start_offset = 0;
129
- nr_pages -= pages;
130
- pa += pages * PAGE_SIZE;
131
- while (pages)
132
- put_page(process_pages[--pages]);
123
+ nr_pages -= pinned_pages;
124
+ pa += pinned_pages * PAGE_SIZE;
125
+
126
+ /* If vm_write is set, the pages need to be made dirty: */
127
+ unpin_user_pages_dirty_lock(process_pages, pinned_pages,
128
+ vm_write);
133129 }
134130
135131 return rc;
....@@ -208,7 +204,7 @@
208204 if (!mm || IS_ERR(mm)) {
209205 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
210206 /*
211
- * Explicitly map EACCES to EPERM as EPERM is a more a
207
+ * Explicitly map EACCES to EPERM as EPERM is a more
212208 * appropriate error code for process_vw_readv/writev
213209 */
214210 if (rc == -EACCES)
....@@ -278,20 +274,18 @@
278274 if (rc < 0)
279275 return rc;
280276 if (!iov_iter_count(&iter))
281
- goto free_iovecs;
282
-
283
- rc = rw_copy_check_uvector(CHECK_IOVEC_ONLY, rvec, riovcnt, UIO_FASTIOV,
284
- iovstack_r, &iov_r);
285
- if (rc <= 0)
286
- goto free_iovecs;
287
-
277
+ goto free_iov_l;
278
+ iov_r = iovec_from_user(rvec, riovcnt, UIO_FASTIOV, iovstack_r,
279
+ in_compat_syscall());
280
+ if (IS_ERR(iov_r)) {
281
+ rc = PTR_ERR(iov_r);
282
+ goto free_iov_l;
283
+ }
288284 rc = process_vm_rw_core(pid, &iter, iov_r, riovcnt, flags, vm_write);
289
-
290
-free_iovecs:
291285 if (iov_r != iovstack_r)
292286 kfree(iov_r);
287
+free_iov_l:
293288 kfree(iov_l);
294
-
295289 return rc;
296290 }
297291
....@@ -309,68 +303,3 @@
309303 {
310304 return process_vm_rw(pid, lvec, liovcnt, rvec, riovcnt, flags, 1);
311305 }
312
-
313
-#ifdef CONFIG_COMPAT
314
-
315
-static ssize_t
316
-compat_process_vm_rw(compat_pid_t pid,
317
- const struct compat_iovec __user *lvec,
318
- unsigned long liovcnt,
319
- const struct compat_iovec __user *rvec,
320
- unsigned long riovcnt,
321
- unsigned long flags, int vm_write)
322
-{
323
- struct iovec iovstack_l[UIO_FASTIOV];
324
- struct iovec iovstack_r[UIO_FASTIOV];
325
- struct iovec *iov_l = iovstack_l;
326
- struct iovec *iov_r = iovstack_r;
327
- struct iov_iter iter;
328
- ssize_t rc = -EFAULT;
329
- int dir = vm_write ? WRITE : READ;
330
-
331
- if (flags != 0)
332
- return -EINVAL;
333
-
334
- rc = compat_import_iovec(dir, lvec, liovcnt, UIO_FASTIOV, &iov_l, &iter);
335
- if (rc < 0)
336
- return rc;
337
- if (!iov_iter_count(&iter))
338
- goto free_iovecs;
339
- rc = compat_rw_copy_check_uvector(CHECK_IOVEC_ONLY, rvec, riovcnt,
340
- UIO_FASTIOV, iovstack_r,
341
- &iov_r);
342
- if (rc <= 0)
343
- goto free_iovecs;
344
-
345
- rc = process_vm_rw_core(pid, &iter, iov_r, riovcnt, flags, vm_write);
346
-
347
-free_iovecs:
348
- if (iov_r != iovstack_r)
349
- kfree(iov_r);
350
- kfree(iov_l);
351
- return rc;
352
-}
353
-
354
-COMPAT_SYSCALL_DEFINE6(process_vm_readv, compat_pid_t, pid,
355
- const struct compat_iovec __user *, lvec,
356
- compat_ulong_t, liovcnt,
357
- const struct compat_iovec __user *, rvec,
358
- compat_ulong_t, riovcnt,
359
- compat_ulong_t, flags)
360
-{
361
- return compat_process_vm_rw(pid, lvec, liovcnt, rvec,
362
- riovcnt, flags, 0);
363
-}
364
-
365
-COMPAT_SYSCALL_DEFINE6(process_vm_writev, compat_pid_t, pid,
366
- const struct compat_iovec __user *, lvec,
367
- compat_ulong_t, liovcnt,
368
- const struct compat_iovec __user *, rvec,
369
- compat_ulong_t, riovcnt,
370
- compat_ulong_t, flags)
371
-{
372
- return compat_process_vm_rw(pid, lvec, liovcnt, rvec,
373
- riovcnt, flags, 1);
374
-}
375
-
376
-#endif