forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/fs/aio.c
....@@ -27,7 +27,6 @@
2727 #include <linux/file.h>
2828 #include <linux/mm.h>
2929 #include <linux/mman.h>
30
-#include <linux/mmu_context.h>
3130 #include <linux/percpu.h>
3231 #include <linux/slab.h>
3332 #include <linux/timer.h>
....@@ -42,8 +41,8 @@
4241 #include <linux/ramfs.h>
4342 #include <linux/percpu-refcount.h>
4443 #include <linux/mount.h>
44
+#include <linux/pseudo_fs.h>
4545
46
-#include <asm/kmap_types.h>
4746 #include <linux/uaccess.h>
4847 #include <linux/nospec.h>
4948
....@@ -67,8 +66,14 @@
6766 unsigned header_length; /* size of aio_ring */
6867
6968
70
- struct io_event io_events[0];
69
+ struct io_event io_events[];
7170 }; /* 128 bytes + ring size */
71
+
72
+/*
73
+ * Plugging is meant to work with larger batches of IOs. If we don't
74
+ * have more than the below, then don't bother setting up a plug.
75
+ */
76
+#define AIO_PLUG_THRESHOLD 2
7277
7378 #define AIO_RING_PAGES 8
7479
....@@ -121,7 +126,6 @@
121126 long nr_pages;
122127
123128 struct rcu_work free_rwork; /* see free_ioctx() */
124
- struct work_struct free_work; /* see free_ioctx() */
125129
126130 /*
127131 * signals when all in-flight requests are done
....@@ -246,15 +250,12 @@
246250 return file;
247251 }
248252
249
-static struct dentry *aio_mount(struct file_system_type *fs_type,
250
- int flags, const char *dev_name, void *data)
253
+static int aio_init_fs_context(struct fs_context *fc)
251254 {
252
- struct dentry *root = mount_pseudo(fs_type, "aio:", NULL, NULL,
253
- AIO_RING_MAGIC);
254
-
255
- if (!IS_ERR(root))
256
- root->d_sb->s_iflags |= SB_I_NOEXEC;
257
- return root;
255
+ if (!init_pseudo(fc, AIO_RING_MAGIC))
256
+ return -ENOMEM;
257
+ fc->s_iflags |= SB_I_NOEXEC;
258
+ return 0;
258259 }
259260
260261 /* aio_setup
....@@ -265,7 +266,7 @@
265266 {
266267 static struct file_system_type aio_fs = {
267268 .name = "aio",
268
- .mount = aio_mount,
269
+ .init_fs_context = aio_init_fs_context,
269270 .kill_sb = kill_anon_super,
270271 };
271272 aio_mnt = kern_mount(&aio_fs);
....@@ -422,7 +423,7 @@
422423 BUG_ON(PageWriteback(old));
423424 get_page(new);
424425
425
- rc = migrate_page_move_mapping(mapping, new, old, NULL, mode, 1);
426
+ rc = migrate_page_move_mapping(mapping, new, old, 1);
426427 if (rc != MIGRATEPAGE_SUCCESS) {
427428 put_page(new);
428429 goto out_unlock;
....@@ -518,16 +519,16 @@
518519 ctx->mmap_size = nr_pages * PAGE_SIZE;
519520 pr_debug("attempting mmap of %lu bytes\n", ctx->mmap_size);
520521
521
- if (down_write_killable(&mm->mmap_sem)) {
522
+ if (mmap_write_lock_killable(mm)) {
522523 ctx->mmap_size = 0;
523524 aio_free_ring(ctx);
524525 return -EINTR;
525526 }
526527
527
- ctx->mmap_base = do_mmap_pgoff(ctx->aio_ring_file, 0, ctx->mmap_size,
528
- PROT_READ | PROT_WRITE,
529
- MAP_SHARED, 0, &unused, NULL);
530
- up_write(&mm->mmap_sem);
528
+ ctx->mmap_base = do_mmap(ctx->aio_ring_file, 0, ctx->mmap_size,
529
+ PROT_READ | PROT_WRITE,
530
+ MAP_SHARED, 0, &unused, NULL);
531
+ mmap_write_unlock(mm);
531532 if (IS_ERR((void *)ctx->mmap_base)) {
532533 ctx->mmap_size = 0;
533534 aio_free_ring(ctx);
....@@ -609,9 +610,9 @@
609610 * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted -
610611 * now it's safe to cancel any that need to be.
611612 */
612
-static void free_ioctx_users_work(struct work_struct *work)
613
+static void free_ioctx_users(struct percpu_ref *ref)
613614 {
614
- struct kioctx *ctx = container_of(work, struct kioctx, free_work);
615
+ struct kioctx *ctx = container_of(ref, struct kioctx, users);
615616 struct aio_kiocb *req;
616617
617618 spin_lock_irq(&ctx->ctx_lock);
....@@ -627,14 +628,6 @@
627628
628629 percpu_ref_kill(&ctx->reqs);
629630 percpu_ref_put(&ctx->reqs);
630
-}
631
-
632
-static void free_ioctx_users(struct percpu_ref *ref)
633
-{
634
- struct kioctx *ctx = container_of(ref, struct kioctx, users);
635
-
636
- INIT_WORK(&ctx->free_work, free_ioctx_users_work);
637
- schedule_work(&ctx->free_work);
638631 }
639632
640633 static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
....@@ -1038,6 +1031,11 @@
10381031 if (unlikely(!req))
10391032 return NULL;
10401033
1034
+ if (unlikely(!get_reqs_available(ctx))) {
1035
+ kmem_cache_free(kiocb_cachep, req);
1036
+ return NULL;
1037
+ }
1038
+
10411039 percpu_ref_get(&ctx->reqs);
10421040 req->ki_ctx = ctx;
10431041 INIT_LIST_HEAD(&req->ki_list);
....@@ -1076,6 +1074,8 @@
10761074
10771075 static inline void iocb_destroy(struct aio_kiocb *iocb)
10781076 {
1077
+ if (iocb->ki_eventfd)
1078
+ eventfd_ctx_put(iocb->ki_eventfd);
10791079 if (iocb->ki_filp)
10801080 fput(iocb->ki_filp);
10811081 percpu_ref_put(&iocb->ki_ctx->reqs);
....@@ -1143,10 +1143,8 @@
11431143 * eventfd. The eventfd_signal() function is safe to be called
11441144 * from IRQ context.
11451145 */
1146
- if (iocb->ki_eventfd) {
1146
+ if (iocb->ki_eventfd)
11471147 eventfd_signal(iocb->ki_eventfd, 1);
1148
- eventfd_ctx_put(iocb->ki_eventfd);
1149
- }
11501148
11511149 /*
11521150 * We have to order our ring_info tail store above and test
....@@ -1469,7 +1467,7 @@
14691467
14701468 req->ki_ioprio = iocb->aio_reqprio;
14711469 } else
1472
- req->ki_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0);
1470
+ req->ki_ioprio = get_current_ioprio();
14731471
14741472 ret = kiocb_set_rw_flags(req, iocb->aio_rw_flags);
14751473 if (unlikely(ret))
....@@ -1479,8 +1477,9 @@
14791477 return 0;
14801478 }
14811479
1482
-static int aio_setup_rw(int rw, const struct iocb *iocb, struct iovec **iovec,
1483
- bool vectored, bool compat, struct iov_iter *iter)
1480
+static ssize_t aio_setup_rw(int rw, const struct iocb *iocb,
1481
+ struct iovec **iovec, bool vectored, bool compat,
1482
+ struct iov_iter *iter)
14841483 {
14851484 void __user *buf = (void __user *)(uintptr_t)iocb->aio_buf;
14861485 size_t len = iocb->aio_nbytes;
....@@ -1490,12 +1489,8 @@
14901489 *iovec = NULL;
14911490 return ret;
14921491 }
1493
-#ifdef CONFIG_COMPAT
1494
- if (compat)
1495
- return compat_import_iovec(rw, buf, len, UIO_FASTIOV, iovec,
1496
- iter);
1497
-#endif
1498
- return import_iovec(rw, buf, len, UIO_FASTIOV, iovec, iter);
1492
+
1493
+ return __import_iovec(rw, buf, len, UIO_FASTIOV, iovec, iter, compat);
14991494 }
15001495
15011496 static inline void aio_rw_done(struct kiocb *req, ssize_t ret)
....@@ -1512,19 +1507,19 @@
15121507 * may be already running. Just fail this IO with EINTR.
15131508 */
15141509 ret = -EINTR;
1515
- /*FALLTHRU*/
1510
+ fallthrough;
15161511 default:
15171512 req->ki_complete(req, ret, 0);
15181513 }
15191514 }
15201515
1521
-static ssize_t aio_read(struct kiocb *req, const struct iocb *iocb,
1516
+static int aio_read(struct kiocb *req, const struct iocb *iocb,
15221517 bool vectored, bool compat)
15231518 {
15241519 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
15251520 struct iov_iter iter;
15261521 struct file *file;
1527
- ssize_t ret;
1522
+ int ret;
15281523
15291524 ret = aio_prep_rw(req, iocb);
15301525 if (ret)
....@@ -1537,7 +1532,7 @@
15371532 return -EINVAL;
15381533
15391534 ret = aio_setup_rw(READ, iocb, &iovec, vectored, compat, &iter);
1540
- if (ret)
1535
+ if (ret < 0)
15411536 return ret;
15421537 ret = rw_verify_area(READ, file, &req->ki_pos, iov_iter_count(&iter));
15431538 if (!ret)
....@@ -1546,13 +1541,13 @@
15461541 return ret;
15471542 }
15481543
1549
-static ssize_t aio_write(struct kiocb *req, const struct iocb *iocb,
1544
+static int aio_write(struct kiocb *req, const struct iocb *iocb,
15501545 bool vectored, bool compat)
15511546 {
15521547 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
15531548 struct iov_iter iter;
15541549 struct file *file;
1555
- ssize_t ret;
1550
+ int ret;
15561551
15571552 ret = aio_prep_rw(req, iocb);
15581553 if (ret)
....@@ -1565,7 +1560,7 @@
15651560 return -EINVAL;
15661561
15671562 ret = aio_setup_rw(WRITE, iocb, &iovec, vectored, compat, &iter);
1568
- if (ret)
1563
+ if (ret < 0)
15691564 return ret;
15701565 ret = rw_verify_area(WRITE, file, &req->ki_pos, iov_iter_count(&iter));
15711566 if (!ret) {
....@@ -1577,7 +1572,7 @@
15771572 * we return to userspace.
15781573 */
15791574 if (S_ISREG(file_inode(file)->i_mode)) {
1580
- __sb_start_write(file_inode(file)->i_sb, SB_FREEZE_WRITE, true);
1575
+ sb_start_write(file_inode(file)->i_sb);
15811576 __sb_writers_release(file_inode(file)->i_sb, SB_FREEZE_WRITE);
15821577 }
15831578 req->ki_flags |= IOCB_WRITE;
....@@ -1766,7 +1761,7 @@
17661761 list_del_init(&req->wait.entry);
17671762 list_del(&iocb->ki_list);
17681763 iocb->ki_res.res = mangle_poll(mask);
1769
- if (iocb->ki_eventfd && eventfd_signal_count()) {
1764
+ if (iocb->ki_eventfd && !eventfd_signal_allowed()) {
17701765 iocb = NULL;
17711766 INIT_WORK(&req->work, aio_poll_put_work);
17721767 schedule_work(&req->work);
....@@ -1841,7 +1836,7 @@
18411836 add_wait_queue(head, &pt->iocb->poll.wait);
18421837 }
18431838
1844
-static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
1839
+static int aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
18451840 {
18461841 struct kioctx *ctx = aiocb->ki_ctx;
18471842 struct poll_iocb *req = &aiocb->poll;
....@@ -1917,59 +1912,31 @@
19171912 }
19181913
19191914 static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb,
1920
- struct iocb __user *user_iocb, bool compat)
1915
+ struct iocb __user *user_iocb, struct aio_kiocb *req,
1916
+ bool compat)
19211917 {
1922
- struct aio_kiocb *req;
1923
- ssize_t ret;
1924
-
1925
- /* enforce forwards compatibility on users */
1926
- if (unlikely(iocb->aio_reserved2)) {
1927
- pr_debug("EINVAL: reserve field set\n");
1928
- return -EINVAL;
1929
- }
1930
-
1931
- /* prevent overflows */
1932
- if (unlikely(
1933
- (iocb->aio_buf != (unsigned long)iocb->aio_buf) ||
1934
- (iocb->aio_nbytes != (size_t)iocb->aio_nbytes) ||
1935
- ((ssize_t)iocb->aio_nbytes < 0)
1936
- )) {
1937
- pr_debug("EINVAL: overflow check\n");
1938
- return -EINVAL;
1939
- }
1940
-
1941
- if (!get_reqs_available(ctx))
1942
- return -EAGAIN;
1943
-
1944
- ret = -EAGAIN;
1945
- req = aio_get_req(ctx);
1946
- if (unlikely(!req))
1947
- goto out_put_reqs_available;
1948
-
19491918 req->ki_filp = fget(iocb->aio_fildes);
1950
- ret = -EBADF;
19511919 if (unlikely(!req->ki_filp))
1952
- goto out_put_req;
1920
+ return -EBADF;
19531921
19541922 if (iocb->aio_flags & IOCB_FLAG_RESFD) {
1923
+ struct eventfd_ctx *eventfd;
19551924 /*
19561925 * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an
19571926 * instance of the file* now. The file descriptor must be
19581927 * an eventfd() fd, and will be signaled for each completed
19591928 * event using the eventfd_signal() function.
19601929 */
1961
- req->ki_eventfd = eventfd_ctx_fdget((int) iocb->aio_resfd);
1962
- if (IS_ERR(req->ki_eventfd)) {
1963
- ret = PTR_ERR(req->ki_eventfd);
1964
- req->ki_eventfd = NULL;
1965
- goto out_put_req;
1966
- }
1930
+ eventfd = eventfd_ctx_fdget(iocb->aio_resfd);
1931
+ if (IS_ERR(eventfd))
1932
+ return PTR_ERR(eventfd);
1933
+
1934
+ req->ki_eventfd = eventfd;
19671935 }
19681936
1969
- ret = put_user(KIOCB_KEY, &user_iocb->aio_key);
1970
- if (unlikely(ret)) {
1937
+ if (unlikely(put_user(KIOCB_KEY, &user_iocb->aio_key))) {
19711938 pr_debug("EFAULT: aio_key\n");
1972
- goto out_put_req;
1939
+ return -EFAULT;
19731940 }
19741941
19751942 req->ki_res.obj = (u64)(unsigned long)user_iocb;
....@@ -1979,61 +1946,70 @@
19791946
19801947 switch (iocb->aio_lio_opcode) {
19811948 case IOCB_CMD_PREAD:
1982
- ret = aio_read(&req->rw, iocb, false, compat);
1983
- break;
1949
+ return aio_read(&req->rw, iocb, false, compat);
19841950 case IOCB_CMD_PWRITE:
1985
- ret = aio_write(&req->rw, iocb, false, compat);
1986
- break;
1951
+ return aio_write(&req->rw, iocb, false, compat);
19871952 case IOCB_CMD_PREADV:
1988
- ret = aio_read(&req->rw, iocb, true, compat);
1989
- break;
1953
+ return aio_read(&req->rw, iocb, true, compat);
19901954 case IOCB_CMD_PWRITEV:
1991
- ret = aio_write(&req->rw, iocb, true, compat);
1992
- break;
1955
+ return aio_write(&req->rw, iocb, true, compat);
19931956 case IOCB_CMD_FSYNC:
1994
- ret = aio_fsync(&req->fsync, iocb, false);
1995
- break;
1957
+ return aio_fsync(&req->fsync, iocb, false);
19961958 case IOCB_CMD_FDSYNC:
1997
- ret = aio_fsync(&req->fsync, iocb, true);
1998
- break;
1959
+ return aio_fsync(&req->fsync, iocb, true);
19991960 case IOCB_CMD_POLL:
2000
- ret = aio_poll(req, iocb);
2001
- break;
1961
+ return aio_poll(req, iocb);
20021962 default:
20031963 pr_debug("invalid aio operation %d\n", iocb->aio_lio_opcode);
2004
- ret = -EINVAL;
2005
- break;
1964
+ return -EINVAL;
20061965 }
2007
-
2008
- /* Done with the synchronous reference */
2009
- iocb_put(req);
2010
-
2011
- /*
2012
- * If ret is 0, we'd either done aio_complete() ourselves or have
2013
- * arranged for that to be done asynchronously. Anything non-zero
2014
- * means that we need to destroy req ourselves.
2015
- */
2016
- if (!ret)
2017
- return 0;
2018
-
2019
-out_put_req:
2020
- if (req->ki_eventfd)
2021
- eventfd_ctx_put(req->ki_eventfd);
2022
- iocb_destroy(req);
2023
-out_put_reqs_available:
2024
- put_reqs_available(ctx, 1);
2025
- return ret;
20261966 }
20271967
20281968 static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
20291969 bool compat)
20301970 {
1971
+ struct aio_kiocb *req;
20311972 struct iocb iocb;
1973
+ int err;
20321974
20331975 if (unlikely(copy_from_user(&iocb, user_iocb, sizeof(iocb))))
20341976 return -EFAULT;
20351977
2036
- return __io_submit_one(ctx, &iocb, user_iocb, compat);
1978
+ /* enforce forwards compatibility on users */
1979
+ if (unlikely(iocb.aio_reserved2)) {
1980
+ pr_debug("EINVAL: reserve field set\n");
1981
+ return -EINVAL;
1982
+ }
1983
+
1984
+ /* prevent overflows */
1985
+ if (unlikely(
1986
+ (iocb.aio_buf != (unsigned long)iocb.aio_buf) ||
1987
+ (iocb.aio_nbytes != (size_t)iocb.aio_nbytes) ||
1988
+ ((ssize_t)iocb.aio_nbytes < 0)
1989
+ )) {
1990
+ pr_debug("EINVAL: overflow check\n");
1991
+ return -EINVAL;
1992
+ }
1993
+
1994
+ req = aio_get_req(ctx);
1995
+ if (unlikely(!req))
1996
+ return -EAGAIN;
1997
+
1998
+ err = __io_submit_one(ctx, &iocb, user_iocb, req, compat);
1999
+
2000
+ /* Done with the synchronous reference */
2001
+ iocb_put(req);
2002
+
2003
+ /*
2004
+ * If err is 0, we'd either done aio_complete() ourselves or have
2005
+ * arranged for that to be done asynchronously. Anything non-zero
2006
+ * means that we need to destroy req ourselves.
2007
+ */
2008
+ if (unlikely(err)) {
2009
+ iocb_destroy(req);
2010
+ put_reqs_available(ctx, 1);
2011
+ }
2012
+ return err;
20372013 }
20382014
20392015 /* sys_io_submit:
....@@ -2068,7 +2044,8 @@
20682044 if (nr > ctx->nr_events)
20692045 nr = ctx->nr_events;
20702046
2071
- blk_start_plug(&plug);
2047
+ if (nr > AIO_PLUG_THRESHOLD)
2048
+ blk_start_plug(&plug);
20722049 for (i = 0; i < nr; i++) {
20732050 struct iocb __user *user_iocb;
20742051
....@@ -2081,7 +2058,8 @@
20812058 if (ret)
20822059 break;
20832060 }
2084
- blk_finish_plug(&plug);
2061
+ if (nr > AIO_PLUG_THRESHOLD)
2062
+ blk_finish_plug(&plug);
20852063
20862064 percpu_ref_put(&ctx->users);
20872065 return i ? i : ret;
....@@ -2108,7 +2086,8 @@
21082086 if (nr > ctx->nr_events)
21092087 nr = ctx->nr_events;
21102088
2111
- blk_start_plug(&plug);
2089
+ if (nr > AIO_PLUG_THRESHOLD)
2090
+ blk_start_plug(&plug);
21122091 for (i = 0; i < nr; i++) {
21132092 compat_uptr_t user_iocb;
21142093
....@@ -2121,7 +2100,8 @@
21212100 if (ret)
21222101 break;
21232102 }
2124
- blk_finish_plug(&plug);
2103
+ if (nr > AIO_PLUG_THRESHOLD)
2104
+ blk_finish_plug(&plug);
21252105
21262106 percpu_ref_put(&ctx->users);
21272107 return i ? i : ret;
....@@ -2212,11 +2192,13 @@
22122192 * specifies an infinite timeout. Note that the timeout pointed to by
22132193 * timeout is relative. Will fail with -ENOSYS if not implemented.
22142194 */
2195
+#ifdef CONFIG_64BIT
2196
+
22152197 SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id,
22162198 long, min_nr,
22172199 long, nr,
22182200 struct io_event __user *, events,
2219
- struct timespec __user *, timeout)
2201
+ struct __kernel_timespec __user *, timeout)
22202202 {
22212203 struct timespec64 ts;
22222204 int ret;
....@@ -2230,6 +2212,8 @@
22302212 return ret;
22312213 }
22322214
2215
+#endif
2216
+
22332217 struct __aio_sigset {
22342218 const sigset_t __user *sigmask;
22352219 size_t sigsetsize;
....@@ -2240,12 +2224,12 @@
22402224 long, min_nr,
22412225 long, nr,
22422226 struct io_event __user *, events,
2243
- struct timespec __user *, timeout,
2227
+ struct __kernel_timespec __user *, timeout,
22442228 const struct __aio_sigset __user *, usig)
22452229 {
22462230 struct __aio_sigset ksig = { NULL, };
2247
- sigset_t ksigmask, sigsaved;
22482231 struct timespec64 ts;
2232
+ bool interrupted;
22492233 int ret;
22502234
22512235 if (timeout && unlikely(get_timespec64(&ts, timeout)))
....@@ -2254,43 +2238,70 @@
22542238 if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
22552239 return -EFAULT;
22562240
2257
- if (ksig.sigmask) {
2258
- if (ksig.sigsetsize != sizeof(sigset_t))
2259
- return -EINVAL;
2260
- if (copy_from_user(&ksigmask, ksig.sigmask, sizeof(ksigmask)))
2261
- return -EFAULT;
2262
- sigdelsetmask(&ksigmask, sigmask(SIGKILL) | sigmask(SIGSTOP));
2263
- sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
2264
- }
2241
+ ret = set_user_sigmask(ksig.sigmask, ksig.sigsetsize);
2242
+ if (ret)
2243
+ return ret;
22652244
22662245 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL);
2267
- if (signal_pending(current)) {
2268
- if (ksig.sigmask) {
2269
- current->saved_sigmask = sigsaved;
2270
- set_restore_sigmask();
2271
- }
22722246
2273
- if (!ret)
2274
- ret = -ERESTARTNOHAND;
2275
- } else {
2276
- if (ksig.sigmask)
2277
- sigprocmask(SIG_SETMASK, &sigsaved, NULL);
2278
- }
2247
+ interrupted = signal_pending(current);
2248
+ restore_saved_sigmask_unless(interrupted);
2249
+ if (interrupted && !ret)
2250
+ ret = -ERESTARTNOHAND;
22792251
22802252 return ret;
22812253 }
22822254
2283
-#ifdef CONFIG_COMPAT
2284
-COMPAT_SYSCALL_DEFINE5(io_getevents, compat_aio_context_t, ctx_id,
2285
- compat_long_t, min_nr,
2286
- compat_long_t, nr,
2287
- struct io_event __user *, events,
2288
- struct compat_timespec __user *, timeout)
2255
+#if defined(CONFIG_COMPAT_32BIT_TIME) && !defined(CONFIG_64BIT)
2256
+
2257
+SYSCALL_DEFINE6(io_pgetevents_time32,
2258
+ aio_context_t, ctx_id,
2259
+ long, min_nr,
2260
+ long, nr,
2261
+ struct io_event __user *, events,
2262
+ struct old_timespec32 __user *, timeout,
2263
+ const struct __aio_sigset __user *, usig)
2264
+{
2265
+ struct __aio_sigset ksig = { NULL, };
2266
+ struct timespec64 ts;
2267
+ bool interrupted;
2268
+ int ret;
2269
+
2270
+ if (timeout && unlikely(get_old_timespec32(&ts, timeout)))
2271
+ return -EFAULT;
2272
+
2273
+ if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
2274
+ return -EFAULT;
2275
+
2276
+
2277
+ ret = set_user_sigmask(ksig.sigmask, ksig.sigsetsize);
2278
+ if (ret)
2279
+ return ret;
2280
+
2281
+ ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL);
2282
+
2283
+ interrupted = signal_pending(current);
2284
+ restore_saved_sigmask_unless(interrupted);
2285
+ if (interrupted && !ret)
2286
+ ret = -ERESTARTNOHAND;
2287
+
2288
+ return ret;
2289
+}
2290
+
2291
+#endif
2292
+
2293
+#if defined(CONFIG_COMPAT_32BIT_TIME)
2294
+
2295
+SYSCALL_DEFINE5(io_getevents_time32, __u32, ctx_id,
2296
+ __s32, min_nr,
2297
+ __s32, nr,
2298
+ struct io_event __user *, events,
2299
+ struct old_timespec32 __user *, timeout)
22892300 {
22902301 struct timespec64 t;
22912302 int ret;
22922303
2293
- if (timeout && compat_get_timespec64(&t, timeout))
2304
+ if (timeout && get_old_timespec32(&t, timeout))
22942305 return -EFAULT;
22952306
22962307 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL);
....@@ -2299,52 +2310,81 @@
22992310 return ret;
23002311 }
23012312
2313
+#endif
2314
+
2315
+#ifdef CONFIG_COMPAT
23022316
23032317 struct __compat_aio_sigset {
2304
- compat_sigset_t __user *sigmask;
2318
+ compat_uptr_t sigmask;
23052319 compat_size_t sigsetsize;
23062320 };
2321
+
2322
+#if defined(CONFIG_COMPAT_32BIT_TIME)
23072323
23082324 COMPAT_SYSCALL_DEFINE6(io_pgetevents,
23092325 compat_aio_context_t, ctx_id,
23102326 compat_long_t, min_nr,
23112327 compat_long_t, nr,
23122328 struct io_event __user *, events,
2313
- struct compat_timespec __user *, timeout,
2329
+ struct old_timespec32 __user *, timeout,
23142330 const struct __compat_aio_sigset __user *, usig)
23152331 {
2316
- struct __compat_aio_sigset ksig = { NULL, };
2317
- sigset_t ksigmask, sigsaved;
2332
+ struct __compat_aio_sigset ksig = { 0, };
23182333 struct timespec64 t;
2334
+ bool interrupted;
23192335 int ret;
23202336
2321
- if (timeout && compat_get_timespec64(&t, timeout))
2337
+ if (timeout && get_old_timespec32(&t, timeout))
23222338 return -EFAULT;
23232339
23242340 if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
23252341 return -EFAULT;
23262342
2327
- if (ksig.sigmask) {
2328
- if (ksig.sigsetsize != sizeof(compat_sigset_t))
2329
- return -EINVAL;
2330
- if (get_compat_sigset(&ksigmask, ksig.sigmask))
2331
- return -EFAULT;
2332
- sigdelsetmask(&ksigmask, sigmask(SIGKILL) | sigmask(SIGSTOP));
2333
- sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
2334
- }
2343
+ ret = set_compat_user_sigmask(compat_ptr(ksig.sigmask), ksig.sigsetsize);
2344
+ if (ret)
2345
+ return ret;
23352346
23362347 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL);
2337
- if (signal_pending(current)) {
2338
- if (ksig.sigmask) {
2339
- current->saved_sigmask = sigsaved;
2340
- set_restore_sigmask();
2341
- }
2342
- if (!ret)
2343
- ret = -ERESTARTNOHAND;
2344
- } else {
2345
- if (ksig.sigmask)
2346
- sigprocmask(SIG_SETMASK, &sigsaved, NULL);
2347
- }
2348
+
2349
+ interrupted = signal_pending(current);
2350
+ restore_saved_sigmask_unless(interrupted);
2351
+ if (interrupted && !ret)
2352
+ ret = -ERESTARTNOHAND;
2353
+
2354
+ return ret;
2355
+}
2356
+
2357
+#endif
2358
+
2359
+COMPAT_SYSCALL_DEFINE6(io_pgetevents_time64,
2360
+ compat_aio_context_t, ctx_id,
2361
+ compat_long_t, min_nr,
2362
+ compat_long_t, nr,
2363
+ struct io_event __user *, events,
2364
+ struct __kernel_timespec __user *, timeout,
2365
+ const struct __compat_aio_sigset __user *, usig)
2366
+{
2367
+ struct __compat_aio_sigset ksig = { 0, };
2368
+ struct timespec64 t;
2369
+ bool interrupted;
2370
+ int ret;
2371
+
2372
+ if (timeout && get_timespec64(&t, timeout))
2373
+ return -EFAULT;
2374
+
2375
+ if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
2376
+ return -EFAULT;
2377
+
2378
+ ret = set_compat_user_sigmask(compat_ptr(ksig.sigmask), ksig.sigsetsize);
2379
+ if (ret)
2380
+ return ret;
2381
+
2382
+ ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL);
2383
+
2384
+ interrupted = signal_pending(current);
2385
+ restore_saved_sigmask_unless(interrupted);
2386
+ if (interrupted && !ret)
2387
+ ret = -ERESTARTNOHAND;
23482388
23492389 return ret;
23502390 }