From 102a0743326a03cd1a1202ceda21e175b7d3575c Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Tue, 20 Feb 2024 01:20:52 +0000 Subject: [PATCH] add new system file --- kernel/fs/fuse/dev.c | 950 ++++++++++++++++++++++++++++++----------------------------- 1 files changed, 483 insertions(+), 467 deletions(-) diff --git a/kernel/fs/fuse/dev.c b/kernel/fs/fuse/dev.c index 94788b0..fbd7a8b 100644 --- a/kernel/fs/fuse/dev.c +++ b/kernel/fs/fuse/dev.c @@ -26,6 +26,10 @@ MODULE_ALIAS_MISCDEV(FUSE_MINOR); MODULE_ALIAS("devname:fuse"); +/* Ordinary requests have even IDs, while interrupts IDs are odd */ +#define FUSE_INT_REQ_BIT (1ULL << 0) +#define FUSE_REQ_ID_STEP (1ULL << 1) + static struct kmem_cache *fuse_req_cachep; static struct fuse_dev *fuse_get_dev(struct file *file) @@ -37,75 +41,31 @@ return READ_ONCE(file->private_data); } -static void fuse_request_init(struct fuse_req *req, struct page **pages, - struct fuse_page_desc *page_descs, - unsigned npages) +static void fuse_request_init(struct fuse_mount *fm, struct fuse_req *req) { - memset(req, 0, sizeof(*req)); - memset(pages, 0, sizeof(*pages) * npages); - memset(page_descs, 0, sizeof(*page_descs) * npages); INIT_LIST_HEAD(&req->list); INIT_LIST_HEAD(&req->intr_entry); init_waitqueue_head(&req->waitq); refcount_set(&req->count, 1); - req->pages = pages; - req->page_descs = page_descs; - req->max_pages = npages; __set_bit(FR_PENDING, &req->flags); + req->fm = fm; } -static struct fuse_req *__fuse_request_alloc(unsigned npages, gfp_t flags) +static struct fuse_req *fuse_request_alloc(struct fuse_mount *fm, gfp_t flags) { - struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, flags); - if (req) { - struct page **pages; - struct fuse_page_desc *page_descs; + struct fuse_req *req = kmem_cache_zalloc(fuse_req_cachep, flags); + if (req) + fuse_request_init(fm, req); - if (npages <= FUSE_REQ_INLINE_PAGES) { - pages = req->inline_pages; - page_descs = req->inline_page_descs; - } else { - pages = kmalloc_array(npages, sizeof(struct page *), - flags); - page_descs = - kmalloc_array(npages, - sizeof(struct fuse_page_desc), - flags); - } - - if (!pages || !page_descs) { - kfree(pages); - kfree(page_descs); - kmem_cache_free(fuse_req_cachep, req); - return NULL; - } - - fuse_request_init(req, pages, page_descs, npages); - } return req; } -struct fuse_req *fuse_request_alloc(unsigned npages) +static void fuse_request_free(struct fuse_req *req) { - return __fuse_request_alloc(npages, GFP_KERNEL); -} -EXPORT_SYMBOL_GPL(fuse_request_alloc); - -struct fuse_req *fuse_request_alloc_nofs(unsigned npages) -{ - return __fuse_request_alloc(npages, GFP_NOFS); -} - -void fuse_request_free(struct fuse_req *req) -{ - if (req->pages != req->inline_pages) { - kfree(req->pages); - kfree(req->page_descs); - } kmem_cache_free(fuse_req_cachep, req); } -void __fuse_get_request(struct fuse_req *req) +static void __fuse_get_request(struct fuse_req *req) { refcount_inc(&req->count); } @@ -142,9 +102,11 @@ } } -static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages, - bool for_background) +static void fuse_put_request(struct fuse_req *req); + +static struct fuse_req *fuse_get_req(struct fuse_mount *fm, bool for_background) { + struct fuse_conn *fc = fm->fc; struct fuse_req *req; int err; atomic_inc(&fc->num_waiting); @@ -166,7 +128,7 @@ if (fc->conn_error) goto out; - req = fuse_request_alloc(npages); + req = fuse_request_alloc(fm, GFP_KERNEL); err = -ENOMEM; if (!req) { if (for_background) @@ -184,7 +146,7 @@ if (unlikely(req->in.h.uid == ((uid_t)-1) || req->in.h.gid == ((gid_t)-1))) { - fuse_put_request(fc, req); + fuse_put_request(req); return ERR_PTR(-EOVERFLOW); } return req; @@ -194,108 +156,20 @@ return ERR_PTR(err); } -struct fuse_req *fuse_get_req(struct fuse_conn *fc, unsigned npages) +static void fuse_put_request(struct fuse_req *req) { - return __fuse_get_req(fc, npages, false); -} -EXPORT_SYMBOL_GPL(fuse_get_req); + struct fuse_conn *fc = req->fm->fc; -struct fuse_req *fuse_get_req_for_background(struct fuse_conn *fc, - unsigned npages) -{ - return __fuse_get_req(fc, npages, true); -} -EXPORT_SYMBOL_GPL(fuse_get_req_for_background); - -/* - * Return request in fuse_file->reserved_req. However that may - * currently be in use. If that is the case, wait for it to become - * available. - */ -static struct fuse_req *get_reserved_req(struct fuse_conn *fc, - struct file *file) -{ - struct fuse_req *req = NULL; - struct fuse_file *ff = file->private_data; - - do { - wait_event(fc->reserved_req_waitq, ff->reserved_req); - spin_lock(&fc->lock); - if (ff->reserved_req) { - req = ff->reserved_req; - ff->reserved_req = NULL; - req->stolen_file = get_file(file); - } - spin_unlock(&fc->lock); - } while (!req); - - return req; -} - -/* - * Put stolen request back into fuse_file->reserved_req - */ -static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req) -{ - struct file *file = req->stolen_file; - struct fuse_file *ff = file->private_data; - - spin_lock(&fc->lock); - fuse_request_init(req, req->pages, req->page_descs, req->max_pages); - BUG_ON(ff->reserved_req); - ff->reserved_req = req; - wake_up_all(&fc->reserved_req_waitq); - spin_unlock(&fc->lock); - fput(file); -} - -/* - * Gets a requests for a file operation, always succeeds - * - * This is used for sending the FLUSH request, which must get to - * userspace, due to POSIX locks which may need to be unlocked. - * - * If allocation fails due to OOM, use the reserved request in - * fuse_file. - * - * This is very unlikely to deadlock accidentally, since the - * filesystem should not have it's own file open. If deadlock is - * intentional, it can still be broken by "aborting" the filesystem. - */ -struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc, - struct file *file) -{ - struct fuse_req *req; - - atomic_inc(&fc->num_waiting); - wait_event(fc->blocked_waitq, fc->initialized); - /* Matches smp_wmb() in fuse_set_initialized() */ - smp_rmb(); - req = fuse_request_alloc(0); - if (!req) - req = get_reserved_req(fc, file); - - req->in.h.uid = from_kuid_munged(fc->user_ns, current_fsuid()); - req->in.h.gid = from_kgid_munged(fc->user_ns, current_fsgid()); - req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns); - - __set_bit(FR_WAITING, &req->flags); - __clear_bit(FR_BACKGROUND, &req->flags); - return req; -} - -void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req) -{ if (refcount_dec_and_test(&req->count)) { if (test_bit(FR_BACKGROUND, &req->flags)) { /* * We get here in the unlikely case that a background * request was allocated but not sent */ - spin_lock(&fc->lock); + spin_lock(&fc->bg_lock); if (!fc->blocked) wake_up(&fc->blocked_waitq); - spin_unlock(&fc->lock); + spin_unlock(&fc->bg_lock); } if (test_bit(FR_WAITING, &req->flags)) { @@ -303,15 +177,11 @@ fuse_drop_waiting(fc); } - if (req->stolen_file) - put_reserved_req(fc, req); - else - fuse_request_free(req); + fuse_request_free(req); } } -EXPORT_SYMBOL_GPL(fuse_put_request); -static unsigned len_args(unsigned numargs, struct fuse_arg *args) +unsigned int fuse_len_args(unsigned int numargs, struct fuse_arg *args) { unsigned nbytes = 0; unsigned i; @@ -321,19 +191,50 @@ return nbytes; } +EXPORT_SYMBOL_GPL(fuse_len_args); -static u64 fuse_get_unique(struct fuse_iqueue *fiq) +u64 fuse_get_unique(struct fuse_iqueue *fiq) { - return ++fiq->reqctr; + fiq->reqctr += FUSE_REQ_ID_STEP; + return fiq->reqctr; +} +EXPORT_SYMBOL_GPL(fuse_get_unique); + +static unsigned int fuse_req_hash(u64 unique) +{ + return hash_long(unique & ~FUSE_INT_REQ_BIT, FUSE_PQ_HASH_BITS); } -static void queue_request(struct fuse_iqueue *fiq, struct fuse_req *req) +/** + * A new request is available, wake fiq->waitq + */ +static void fuse_dev_wake_and_unlock(struct fuse_iqueue *fiq, bool sync) +__releases(fiq->lock) +{ + if (sync) + wake_up_sync(&fiq->waitq); + else + wake_up(&fiq->waitq); + kill_fasync(&fiq->fasync, SIGIO, POLL_IN); + spin_unlock(&fiq->lock); +} + +const struct fuse_iqueue_ops fuse_dev_fiq_ops = { + .wake_forget_and_unlock = fuse_dev_wake_and_unlock, + .wake_interrupt_and_unlock = fuse_dev_wake_and_unlock, + .wake_pending_and_unlock = fuse_dev_wake_and_unlock, +}; +EXPORT_SYMBOL_GPL(fuse_dev_fiq_ops); + +static void queue_request_and_unlock(struct fuse_iqueue *fiq, + struct fuse_req *req, bool sync) +__releases(fiq->lock) { req->in.h.len = sizeof(struct fuse_in_header) + - len_args(req->in.numargs, (struct fuse_arg *) req->in.args); + fuse_len_args(req->args->in_numargs, + (struct fuse_arg *) req->args->in_args); list_add_tail(&req->list, &fiq->pending); - wake_up(&fiq->waitq); - kill_fasync(&fiq->fasync, SIGIO, POLL_IN); + fiq->ops->wake_pending_and_unlock(fiq, sync); } void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget, @@ -348,28 +249,27 @@ if (fiq->connected) { fiq->forget_list_tail->next = forget; fiq->forget_list_tail = forget; - wake_up(&fiq->waitq); - kill_fasync(&fiq->fasync, SIGIO, POLL_IN); + fiq->ops->wake_forget_and_unlock(fiq, false); } else { kfree(forget); + spin_unlock(&fiq->lock); } - spin_unlock(&fiq->lock); } static void flush_bg_queue(struct fuse_conn *fc) { + struct fuse_iqueue *fiq = &fc->iq; + while (fc->active_background < fc->max_background && !list_empty(&fc->bg_queue)) { struct fuse_req *req; - struct fuse_iqueue *fiq = &fc->iq; - req = list_entry(fc->bg_queue.next, struct fuse_req, list); + req = list_first_entry(&fc->bg_queue, struct fuse_req, list); list_del(&req->list); fc->active_background++; spin_lock(&fiq->lock); req->in.h.unique = fuse_get_unique(fiq); - queue_request(fiq, req); - spin_unlock(&fiq->lock); + queue_request_and_unlock(fiq, req, false); } } @@ -381,20 +281,29 @@ * the 'end' callback is called if given, else the reference to the * request is released */ -static void request_end(struct fuse_conn *fc, struct fuse_req *req) +void fuse_request_end(struct fuse_req *req) { + struct fuse_mount *fm = req->fm; + struct fuse_conn *fc = fm->fc; struct fuse_iqueue *fiq = &fc->iq; if (test_and_set_bit(FR_FINISHED, &req->flags)) goto put_request; - spin_lock(&fiq->lock); - list_del_init(&req->intr_entry); - spin_unlock(&fiq->lock); + /* + * test_and_set_bit() implies smp_mb() between bit + * changing and below FR_INTERRUPTED check. Pairs with + * smp_mb() from queue_interrupt(). + */ + if (test_bit(FR_INTERRUPTED, &req->flags)) { + spin_lock(&fiq->lock); + list_del_init(&req->intr_entry); + spin_unlock(&fiq->lock); + } WARN_ON(test_bit(FR_PENDING, &req->flags)); WARN_ON(test_bit(FR_SENT, &req->flags)); if (test_bit(FR_BACKGROUND, &req->flags)) { - spin_lock(&fc->lock); + spin_lock(&fc->bg_lock); clear_bit(FR_BACKGROUND, &req->flags); if (fc->num_background == fc->max_background) { fc->blocked = 0; @@ -410,39 +319,59 @@ wake_up(&fc->blocked_waitq); } - if (fc->num_background == fc->congestion_threshold && fc->sb) { - clear_bdi_congested(fc->sb->s_bdi, BLK_RW_SYNC); - clear_bdi_congested(fc->sb->s_bdi, BLK_RW_ASYNC); + if (fc->num_background == fc->congestion_threshold && fm->sb) { + clear_bdi_congested(fm->sb->s_bdi, BLK_RW_SYNC); + clear_bdi_congested(fm->sb->s_bdi, BLK_RW_ASYNC); } fc->num_background--; fc->active_background--; flush_bg_queue(fc); - spin_unlock(&fc->lock); + spin_unlock(&fc->bg_lock); + } else { + /* Wake up waiter sleeping in request_wait_answer() */ + wake_up(&req->waitq); } - wake_up(&req->waitq); - if (req->end) - req->end(fc, req); -put_request: - fuse_put_request(fc, req); -} -static void queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req) + if (test_bit(FR_ASYNC, &req->flags)) + req->args->end(fm, req->args, req->out.h.error); +put_request: + fuse_put_request(req); +} +EXPORT_SYMBOL_GPL(fuse_request_end); + +static int queue_interrupt(struct fuse_req *req) { + struct fuse_iqueue *fiq = &req->fm->fc->iq; + spin_lock(&fiq->lock); - if (test_bit(FR_FINISHED, &req->flags)) { + /* Check for we've sent request to interrupt this req */ + if (unlikely(!test_bit(FR_INTERRUPTED, &req->flags))) { spin_unlock(&fiq->lock); - return; + return -EINVAL; } + if (list_empty(&req->intr_entry)) { list_add_tail(&req->intr_entry, &fiq->interrupts); - wake_up(&fiq->waitq); + /* + * Pairs with smp_mb() implied by test_and_set_bit() + * from fuse_request_end(). + */ + smp_mb(); + if (test_bit(FR_FINISHED, &req->flags)) { + list_del_init(&req->intr_entry); + spin_unlock(&fiq->lock); + return 0; + } + fiq->ops->wake_interrupt_and_unlock(fiq, false); + } else { + spin_unlock(&fiq->lock); } - spin_unlock(&fiq->lock); - kill_fasync(&fiq->fasync, SIGIO, POLL_IN); + return 0; } -static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req) +static void request_wait_answer(struct fuse_req *req) { + struct fuse_conn *fc = req->fm->fc; struct fuse_iqueue *fiq = &fc->iq; int err; @@ -457,7 +386,7 @@ /* matches barrier in fuse_dev_do_read() */ smp_mb__after_atomic(); if (test_bit(FR_SENT, &req->flags)) - queue_interrupt(fiq, req); + queue_interrupt(req); } if (!test_bit(FR_FORCE, &req->flags)) { @@ -486,9 +415,9 @@ wait_event(req->waitq, test_bit(FR_FINISHED, &req->flags)); } -static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req) +static void __fuse_request_send(struct fuse_req *req) { - struct fuse_iqueue *fiq = &fc->iq; + struct fuse_iqueue *fiq = &req->fm->fc->iq; BUG_ON(test_bit(FR_BACKGROUND, &req->flags)); spin_lock(&fiq->lock); @@ -497,173 +426,192 @@ req->out.h.error = -ENOTCONN; } else { req->in.h.unique = fuse_get_unique(fiq); - queue_request(fiq, req); /* acquire extra reference, since request is still needed - after request_end() */ + after fuse_request_end() */ __fuse_get_request(req); - spin_unlock(&fiq->lock); + queue_request_and_unlock(fiq, req, true); - request_wait_answer(fc, req); - /* Pairs with smp_wmb() in request_end() */ + request_wait_answer(req); + /* Pairs with smp_wmb() in fuse_request_end() */ smp_rmb(); } } -void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req) -{ - __set_bit(FR_ISREPLY, &req->flags); - if (!test_bit(FR_WAITING, &req->flags)) { - __set_bit(FR_WAITING, &req->flags); - atomic_inc(&fc->num_waiting); - } - __fuse_request_send(fc, req); -} -EXPORT_SYMBOL_GPL(fuse_request_send); - static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args) { - if (fc->minor < 4 && args->in.h.opcode == FUSE_STATFS) - args->out.args[0].size = FUSE_COMPAT_STATFS_SIZE; + if (fc->minor < 4 && args->opcode == FUSE_STATFS) + args->out_args[0].size = FUSE_COMPAT_STATFS_SIZE; if (fc->minor < 9) { - switch (args->in.h.opcode) { + switch (args->opcode) { case FUSE_LOOKUP: case FUSE_CREATE: case FUSE_MKNOD: case FUSE_MKDIR: case FUSE_SYMLINK: case FUSE_LINK: - args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE; + args->out_args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE; break; case FUSE_GETATTR: case FUSE_SETATTR: - args->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE; + args->out_args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE; break; } } if (fc->minor < 12) { - switch (args->in.h.opcode) { + switch (args->opcode) { case FUSE_CREATE: - args->in.args[0].size = sizeof(struct fuse_open_in); + args->in_args[0].size = sizeof(struct fuse_open_in); break; case FUSE_MKNOD: - args->in.args[0].size = FUSE_COMPAT_MKNOD_IN_SIZE; + args->in_args[0].size = FUSE_COMPAT_MKNOD_IN_SIZE; break; } } } -ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args) +static void fuse_force_creds(struct fuse_req *req) { + struct fuse_conn *fc = req->fm->fc; + + req->in.h.uid = from_kuid_munged(fc->user_ns, current_fsuid()); + req->in.h.gid = from_kgid_munged(fc->user_ns, current_fsgid()); + req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns); +} + +static void fuse_args_to_req(struct fuse_req *req, struct fuse_args *args) +{ + req->in.h.opcode = args->opcode; + req->in.h.nodeid = args->nodeid; + req->args = args; + if (args->end) + __set_bit(FR_ASYNC, &req->flags); +} + +ssize_t fuse_simple_request(struct fuse_mount *fm, struct fuse_args *args) +{ + struct fuse_conn *fc = fm->fc; struct fuse_req *req; ssize_t ret; - req = fuse_get_req(fc, 0); - if (IS_ERR(req)) - return PTR_ERR(req); + if (args->force) { + atomic_inc(&fc->num_waiting); + req = fuse_request_alloc(fm, GFP_KERNEL | __GFP_NOFAIL); + + if (!args->nocreds) + fuse_force_creds(req); + + __set_bit(FR_WAITING, &req->flags); + __set_bit(FR_FORCE, &req->flags); + } else { + WARN_ON(args->nocreds); + req = fuse_get_req(fm, false); + if (IS_ERR(req)) + return PTR_ERR(req); + } /* Needs to be done after fuse_get_req() so that fc->minor is valid */ fuse_adjust_compat(fc, args); + fuse_args_to_req(req, args); - req->in.h.opcode = args->in.h.opcode; - req->in.h.nodeid = args->in.h.nodeid; - req->in.numargs = args->in.numargs; - memcpy(req->in.args, args->in.args, - args->in.numargs * sizeof(struct fuse_in_arg)); - req->out.argvar = args->out.argvar; - req->out.numargs = args->out.numargs; - memcpy(req->out.args, args->out.args, - args->out.numargs * sizeof(struct fuse_arg)); - req->out.canonical_path = args->out.canonical_path; - fuse_request_send(fc, req); + if (!args->noreply) + __set_bit(FR_ISREPLY, &req->flags); + __fuse_request_send(req); ret = req->out.h.error; - if (!ret && args->out.argvar) { - BUG_ON(args->out.numargs != 1); - ret = req->out.args[0].size; + if (!ret && args->out_argvar) { + BUG_ON(args->out_numargs == 0); + ret = args->out_args[args->out_numargs - 1].size; } - fuse_put_request(fc, req); + fuse_put_request(req); return ret; } -/* - * Called under fc->lock - * - * fc->connected must have been checked previously - */ -void fuse_request_send_background_locked(struct fuse_conn *fc, - struct fuse_req *req) +static bool fuse_request_queue_background(struct fuse_req *req) { - BUG_ON(!test_bit(FR_BACKGROUND, &req->flags)); + struct fuse_mount *fm = req->fm; + struct fuse_conn *fc = fm->fc; + bool queued = false; + + WARN_ON(!test_bit(FR_BACKGROUND, &req->flags)); if (!test_bit(FR_WAITING, &req->flags)) { __set_bit(FR_WAITING, &req->flags); atomic_inc(&fc->num_waiting); } __set_bit(FR_ISREPLY, &req->flags); - fc->num_background++; - if (fc->num_background == fc->max_background) - fc->blocked = 1; - if (fc->num_background == fc->congestion_threshold && fc->sb) { - set_bdi_congested(fc->sb->s_bdi, BLK_RW_SYNC); - set_bdi_congested(fc->sb->s_bdi, BLK_RW_ASYNC); + spin_lock(&fc->bg_lock); + if (likely(fc->connected)) { + fc->num_background++; + if (fc->num_background == fc->max_background) + fc->blocked = 1; + if (fc->num_background == fc->congestion_threshold && fm->sb) { + set_bdi_congested(fm->sb->s_bdi, BLK_RW_SYNC); + set_bdi_congested(fm->sb->s_bdi, BLK_RW_ASYNC); + } + list_add_tail(&req->list, &fc->bg_queue); + flush_bg_queue(fc); + queued = true; } - list_add_tail(&req->list, &fc->bg_queue); - flush_bg_queue(fc); + spin_unlock(&fc->bg_lock); + + return queued; } -void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req) +int fuse_simple_background(struct fuse_mount *fm, struct fuse_args *args, + gfp_t gfp_flags) { - BUG_ON(!req->end); - spin_lock(&fc->lock); - if (fc->connected) { - fuse_request_send_background_locked(fc, req); - spin_unlock(&fc->lock); + struct fuse_req *req; + + if (args->force) { + WARN_ON(!args->nocreds); + req = fuse_request_alloc(fm, gfp_flags); + if (!req) + return -ENOMEM; + __set_bit(FR_BACKGROUND, &req->flags); } else { - spin_unlock(&fc->lock); - req->out.h.error = -ENOTCONN; - req->end(fc, req); - fuse_put_request(fc, req); + WARN_ON(args->nocreds); + req = fuse_get_req(fm, true); + if (IS_ERR(req)) + return PTR_ERR(req); } -} -EXPORT_SYMBOL_GPL(fuse_request_send_background); -static int fuse_request_send_notify_reply(struct fuse_conn *fc, - struct fuse_req *req, u64 unique) + fuse_args_to_req(req, args); + + if (!fuse_request_queue_background(req)) { + fuse_put_request(req); + return -ENOTCONN; + } + + return 0; +} +EXPORT_SYMBOL_GPL(fuse_simple_background); + +static int fuse_simple_notify_reply(struct fuse_mount *fm, + struct fuse_args *args, u64 unique) { - int err = -ENODEV; - struct fuse_iqueue *fiq = &fc->iq; + struct fuse_req *req; + struct fuse_iqueue *fiq = &fm->fc->iq; + int err = 0; + + req = fuse_get_req(fm, false); + if (IS_ERR(req)) + return PTR_ERR(req); __clear_bit(FR_ISREPLY, &req->flags); req->in.h.unique = unique; + + fuse_args_to_req(req, args); + spin_lock(&fiq->lock); if (fiq->connected) { - queue_request(fiq, req); - err = 0; + queue_request_and_unlock(fiq, req, false); + } else { + err = -ENODEV; + spin_unlock(&fiq->lock); + fuse_put_request(req); } - spin_unlock(&fiq->lock); return err; -} - -void fuse_force_forget(struct file *file, u64 nodeid) -{ - struct inode *inode = file_inode(file); - struct fuse_conn *fc = get_fuse_conn(inode); - struct fuse_req *req; - struct fuse_forget_in inarg; - - memset(&inarg, 0, sizeof(inarg)); - inarg.nlookup = 1; - req = fuse_get_req_nofail_nopages(fc, file); - req->in.h.opcode = FUSE_FORGET; - req->in.h.nodeid = nodeid; - req->in.numargs = 1; - req->in.args[0].size = sizeof(inarg); - req->in.args[0].value = &inarg; - __clear_bit(FR_ISREPLY, &req->flags); - __fuse_request_send(fc, req); - /* ignore errors */ - fuse_put_request(fc, req); } /* @@ -739,7 +687,11 @@ flush_dcache_page(cs->pg); set_page_dirty_lock(cs->pg); } - put_page(cs->pg); + /* + * The page could be GUP page(see iov_iter_get_pages in + * fuse_copy_fill) so use put_user_page to release it. + */ + put_user_page(cs->pg); } cs->pg = NULL; } @@ -774,7 +726,7 @@ cs->pipebufs++; cs->nr_segs--; } else { - if (cs->nr_segs == cs->pipe->buffers) + if (cs->nr_segs >= cs->pipe->max_usage) return -EIO; page = alloc_page(GFP_HIGHUSER); @@ -839,10 +791,10 @@ 1 << PG_uptodate | 1 << PG_lru | 1 << PG_active | + 1 << PG_workingset | 1 << PG_reclaim | 1 << PG_waiters))) { - printk(KERN_WARNING "fuse: trying to steal weird page\n"); - printk(KERN_WARNING " page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page, page->index, page->flags, page_count(page), page_mapcount(page), page->mapping); + dump_page(page, "fuse: trying to steal weird page"); return 1; } return 0; @@ -875,7 +827,7 @@ if (cs->len != PAGE_SIZE) goto out_fallback; - if (pipe_buf_steal(cs->pipe, buf) != 0) + if (!pipe_buf_try_steal(cs->pipe, buf)) goto out_fallback; newpage = buf->page; @@ -910,7 +862,7 @@ get_page(newpage); if (!(buf->flags & PIPE_BUF_FLAG_LRU)) - lru_cache_add_file(newpage); + lru_cache_add(newpage); /* * Release while we have extra ref on stolen page. Otherwise @@ -962,7 +914,7 @@ struct pipe_buffer *buf; int err; - if (cs->nr_segs == cs->pipe->buffers) + if (cs->nr_segs >= cs->pipe->max_usage) return -EIO; get_page(page); @@ -1001,7 +953,17 @@ while (count) { if (cs->write && cs->pipebufs && page) { - return fuse_ref_page(cs, page, offset, count); + /* + * Can't control lifetime of pipe buffers, so always + * copy user pages. + */ + if (cs->req->args->user_pages) { + err = fuse_copy_fill(cs); + if (err) + return err; + } else { + return fuse_ref_page(cs, page, offset, count); + } } else if (!cs->len) { if (cs->move_pages && page && offset == 0 && count == PAGE_SIZE) { @@ -1033,14 +995,15 @@ { unsigned i; struct fuse_req *req = cs->req; + struct fuse_args_pages *ap = container_of(req->args, typeof(*ap), args); - for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) { + + for (i = 0; i < ap->num_pages && (nbytes || zeroing); i++) { int err; - unsigned offset = req->page_descs[i].offset; - unsigned count = min(nbytes, req->page_descs[i].length); + unsigned int offset = ap->descs[i].offset; + unsigned int count = min(nbytes, ap->descs[i].length); - err = fuse_copy_page(cs, &req->pages[i], offset, count, - zeroing); + err = fuse_copy_page(cs, &ap->pages[i], offset, count, zeroing); if (err) return err; @@ -1111,12 +1074,11 @@ int err; list_del_init(&req->intr_entry); - req->intr_unique = fuse_get_unique(fiq); memset(&ih, 0, sizeof(ih)); memset(&arg, 0, sizeof(arg)); ih.len = reqsize; ih.opcode = FUSE_INTERRUPT; - ih.unique = req->intr_unique; + ih.unique = (req->in.h.unique | FUSE_INT_REQ_BIT); arg.unique = req->in.h.unique; spin_unlock(&fiq->lock); @@ -1131,9 +1093,9 @@ return err ? err : reqsize; } -static struct fuse_forget_link *dequeue_forget(struct fuse_iqueue *fiq, - unsigned max, - unsigned *countp) +struct fuse_forget_link *fuse_dequeue_forget(struct fuse_iqueue *fiq, + unsigned int max, + unsigned int *countp) { struct fuse_forget_link *head = fiq->forget_list_head.next; struct fuse_forget_link **newhead = &head; @@ -1152,6 +1114,7 @@ return head; } +EXPORT_SYMBOL(fuse_dequeue_forget); static int fuse_read_single_forget(struct fuse_iqueue *fiq, struct fuse_copy_state *cs, @@ -1159,7 +1122,7 @@ __releases(fiq->lock) { int err; - struct fuse_forget_link *forget = dequeue_forget(fiq, 1, NULL); + struct fuse_forget_link *forget = fuse_dequeue_forget(fiq, 1, NULL); struct fuse_forget_in arg = { .nlookup = forget->forget_one.nlookup, }; @@ -1207,7 +1170,7 @@ } max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one); - head = dequeue_forget(fiq, max_forgets, &count); + head = fuse_dequeue_forget(fiq, max_forgets, &count); spin_unlock(&fiq->lock); arg.count = count; @@ -1252,7 +1215,7 @@ * the pending list and copies request data to userspace buffer. If * no reply is needed (FORGET) or request has been aborted or there * was an error during the copying then it's finished by calling - * request_end(). Otherwise add it to the processing list, and set + * fuse_request_end(). Otherwise add it to the processing list, and set * the 'sent' flag. */ static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file, @@ -1263,8 +1226,27 @@ struct fuse_iqueue *fiq = &fc->iq; struct fuse_pqueue *fpq = &fud->pq; struct fuse_req *req; - struct fuse_in *in; + struct fuse_args *args; unsigned reqsize; + unsigned int hash; + + /* + * Require sane minimum read buffer - that has capacity for fixed part + * of any request header + negotiated max_write room for data. + * + * Historically libfuse reserves 4K for fixed header room, but e.g. + * GlusterFS reserves only 80 bytes + * + * = `sizeof(fuse_in_header) + sizeof(fuse_write_in)` + * + * which is the absolute minimum any sane filesystem should be using + * for header room. + */ + if (nbytes < max_t(size_t, FUSE_MIN_READ_BUFFER, + sizeof(struct fuse_in_header) + + sizeof(struct fuse_write_in) + + fc->max_write)) + return -EINVAL; restart: for (;;) { @@ -1282,7 +1264,7 @@ } if (!fiq->connected) { - err = (fc->aborted && fc->abort_err) ? -ECONNABORTED : -ENODEV; + err = fc->aborted ? -ECONNABORTED : -ENODEV; goto err_unlock; } @@ -1305,16 +1287,16 @@ list_del_init(&req->list); spin_unlock(&fiq->lock); - in = &req->in; - reqsize = in->h.len; + args = req->args; + reqsize = req->in.h.len; /* If request is too large, reply with an error and restart the read */ if (nbytes < reqsize) { req->out.h.error = -EIO; /* SETXATTR is special, since it may contain too large data */ - if (in->h.opcode == FUSE_SETXATTR) + if (args->opcode == FUSE_SETXATTR) req->out.h.error = -E2BIG; - request_end(fc, req); + fuse_request_end(req); goto restart; } spin_lock(&fpq->lock); @@ -1330,15 +1312,15 @@ list_add(&req->list, &fpq->io); spin_unlock(&fpq->lock); cs->req = req; - err = fuse_copy_one(cs, &in->h, sizeof(in->h)); + err = fuse_copy_one(cs, &req->in.h, sizeof(req->in.h)); if (!err) - err = fuse_copy_args(cs, in->numargs, in->argpages, - (struct fuse_arg *) in->args, 0); + err = fuse_copy_args(cs, args->in_numargs, args->in_pages, + (struct fuse_arg *) args->in_args, 0); fuse_copy_finish(cs); spin_lock(&fpq->lock); clear_bit(FR_LOCKED, &req->flags); if (!fpq->connected) { - err = (fc->aborted && fc->abort_err) ? -ECONNABORTED : -ENODEV; + err = fc->aborted ? -ECONNABORTED : -ENODEV; goto out_end; } if (err) { @@ -1349,15 +1331,16 @@ err = reqsize; goto out_end; } - list_move_tail(&req->list, &fpq->processing); + hash = fuse_req_hash(req->in.h.unique); + list_move_tail(&req->list, &fpq->processing[hash]); __fuse_get_request(req); set_bit(FR_SENT, &req->flags); spin_unlock(&fpq->lock); /* matches barrier in request_wait_answer() */ smp_mb__after_atomic(); if (test_bit(FR_INTERRUPTED, &req->flags)) - queue_interrupt(fiq, req); - fuse_put_request(fc, req); + queue_interrupt(req); + fuse_put_request(req); return reqsize; @@ -1365,7 +1348,7 @@ if (!test_bit(FR_PRIVATE, &req->flags)) list_del_init(&req->list); spin_unlock(&fpq->lock); - request_end(fc, req); + fuse_request_end(req); return err; err_unlock: @@ -1414,7 +1397,7 @@ if (!fud) return -EPERM; - bufs = kvmalloc_array(pipe->buffers, sizeof(struct pipe_buffer), + bufs = kvmalloc_array(pipe->max_usage, sizeof(struct pipe_buffer), GFP_KERNEL); if (!bufs) return -ENOMEM; @@ -1426,7 +1409,7 @@ if (ret < 0) goto out; - if (pipe->nrbufs + cs.nr_segs > pipe->buffers) { + if (pipe_occupancy(pipe->head, pipe->tail) + cs.nr_segs > pipe->max_usage) { ret = -EIO; goto out; } @@ -1488,11 +1471,8 @@ fuse_copy_finish(cs); down_read(&fc->killsb); - err = -ENOENT; - if (fc->sb) { - err = fuse_reverse_inval_inode(fc->sb, outarg.ino, - outarg.off, outarg.len); - } + err = fuse_reverse_inval_inode(fc, outarg.ino, + outarg.off, outarg.len); up_read(&fc->killsb); return err; @@ -1538,9 +1518,7 @@ buf[outarg.namelen] = 0; down_read(&fc->killsb); - err = -ENOENT; - if (fc->sb) - err = fuse_reverse_inval_entry(fc->sb, outarg.parent, 0, &name); + err = fuse_reverse_inval_entry(fc, outarg.parent, 0, &name); up_read(&fc->killsb); kfree(buf); return err; @@ -1588,10 +1566,7 @@ buf[outarg.namelen] = 0; down_read(&fc->killsb); - err = -ENOENT; - if (fc->sb) - err = fuse_reverse_inval_entry(fc->sb, outarg.parent, - outarg.child, &name); + err = fuse_reverse_inval_entry(fc, outarg.parent, outarg.child, &name); up_read(&fc->killsb); kfree(buf); return err; @@ -1633,10 +1608,7 @@ down_read(&fc->killsb); err = -ENOENT; - if (!fc->sb) - goto out_up_killsb; - - inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid); + inode = fuse_ilookup(fc, nodeid, NULL); if (!inode) goto out_up_killsb; @@ -1688,23 +1660,37 @@ return err; } -static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_req *req) +struct fuse_retrieve_args { + struct fuse_args_pages ap; + struct fuse_notify_retrieve_in inarg; +}; + +static void fuse_retrieve_end(struct fuse_mount *fm, struct fuse_args *args, + int error) { - release_pages(req->pages, req->num_pages); + struct fuse_retrieve_args *ra = + container_of(args, typeof(*ra), ap.args); + + release_pages(ra->ap.pages, ra->ap.num_pages); + kfree(ra); } -static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode, +static int fuse_retrieve(struct fuse_mount *fm, struct inode *inode, struct fuse_notify_retrieve_out *outarg) { int err; struct address_space *mapping = inode->i_mapping; - struct fuse_req *req; pgoff_t index; loff_t file_size; unsigned int num; unsigned int offset; size_t total_len = 0; - int num_pages; + unsigned int num_pages; + struct fuse_conn *fc = fm->fc; + struct fuse_retrieve_args *ra; + size_t args_size = sizeof(*ra); + struct fuse_args_pages *ap; + struct fuse_args *args; offset = outarg->offset & ~PAGE_MASK; file_size = i_size_read(inode); @@ -1716,21 +1702,28 @@ num = file_size - outarg->offset; num_pages = (num + offset + PAGE_SIZE - 1) >> PAGE_SHIFT; - num_pages = min(num_pages, FUSE_MAX_PAGES_PER_REQ); + num_pages = min(num_pages, fc->max_pages); - req = fuse_get_req(fc, num_pages); - if (IS_ERR(req)) - return PTR_ERR(req); + args_size += num_pages * (sizeof(ap->pages[0]) + sizeof(ap->descs[0])); - req->in.h.opcode = FUSE_NOTIFY_REPLY; - req->in.h.nodeid = outarg->nodeid; - req->in.numargs = 2; - req->in.argpages = 1; - req->end = fuse_retrieve_end; + ra = kzalloc(args_size, GFP_KERNEL); + if (!ra) + return -ENOMEM; + + ap = &ra->ap; + ap->pages = (void *) (ra + 1); + ap->descs = (void *) (ap->pages + num_pages); + + args = &ap->args; + args->nodeid = outarg->nodeid; + args->opcode = FUSE_NOTIFY_REPLY; + args->in_numargs = 2; + args->in_pages = true; + args->end = fuse_retrieve_end; index = outarg->offset >> PAGE_SHIFT; - while (num && req->num_pages < num_pages) { + while (num && ap->num_pages < num_pages) { struct page *page; unsigned int this_num; @@ -1739,27 +1732,25 @@ break; this_num = min_t(unsigned, num, PAGE_SIZE - offset); - req->pages[req->num_pages] = page; - req->page_descs[req->num_pages].offset = offset; - req->page_descs[req->num_pages].length = this_num; - req->num_pages++; + ap->pages[ap->num_pages] = page; + ap->descs[ap->num_pages].offset = offset; + ap->descs[ap->num_pages].length = this_num; + ap->num_pages++; offset = 0; num -= this_num; total_len += this_num; index++; } - req->misc.retrieve_in.offset = outarg->offset; - req->misc.retrieve_in.size = total_len; - req->in.args[0].size = sizeof(req->misc.retrieve_in); - req->in.args[0].value = &req->misc.retrieve_in; - req->in.args[1].size = total_len; + ra->inarg.offset = outarg->offset; + ra->inarg.size = total_len; + args->in_args[0].size = sizeof(ra->inarg); + args->in_args[0].value = &ra->inarg; + args->in_args[1].size = total_len; - err = fuse_request_send_notify_reply(fc, req, outarg->notify_unique); - if (err) { - fuse_retrieve_end(fc, req); - fuse_put_request(fc, req); - } + err = fuse_simple_notify_reply(fm, args, outarg->notify_unique); + if (err) + fuse_retrieve_end(fm, args, err); return err; } @@ -1768,7 +1759,9 @@ struct fuse_copy_state *cs) { struct fuse_notify_retrieve_out outarg; + struct fuse_mount *fm; struct inode *inode; + u64 nodeid; int err; err = -EINVAL; @@ -1783,14 +1776,12 @@ down_read(&fc->killsb); err = -ENOENT; - if (fc->sb) { - u64 nodeid = outarg.nodeid; + nodeid = outarg.nodeid; - inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid); - if (inode) { - err = fuse_retrieve(fc, inode, &outarg); - iput(inode); - } + inode = fuse_ilookup(fc, nodeid, &fm); + if (inode) { + err = fuse_retrieve(fm, inode, &outarg); + iput(inode); } up_read(&fc->killsb); @@ -1835,36 +1826,35 @@ /* Look up request on processing list by unique ID */ static struct fuse_req *request_find(struct fuse_pqueue *fpq, u64 unique) { + unsigned int hash = fuse_req_hash(unique); struct fuse_req *req; - list_for_each_entry(req, &fpq->processing, list) { - if (req->in.h.unique == unique || req->intr_unique == unique) + list_for_each_entry(req, &fpq->processing[hash], list) { + if (req->in.h.unique == unique) return req; } return NULL; } -static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out, +static int copy_out_args(struct fuse_copy_state *cs, struct fuse_args *args, unsigned nbytes) { unsigned reqsize = sizeof(struct fuse_out_header); - if (out->h.error) - return nbytes != reqsize ? -EINVAL : 0; + reqsize += fuse_len_args(args->out_numargs, args->out_args); - reqsize += len_args(out->numargs, out->args); - - if (reqsize < nbytes || (reqsize > nbytes && !out->argvar)) + if (reqsize < nbytes || (reqsize > nbytes && !args->out_argvar)) return -EINVAL; else if (reqsize > nbytes) { - struct fuse_arg *lastarg = &out->args[out->numargs-1]; + struct fuse_arg *lastarg = &args->out_args[args->out_numargs-1]; unsigned diffsize = reqsize - nbytes; + if (diffsize > lastarg->size) return -EINVAL; lastarg->size -= diffsize; } - return fuse_copy_args(cs, out->numargs, out->argpages, out->args, - out->page_zeroing); + return fuse_copy_args(cs, args->out_numargs, args->out_pages, + args->out_args, args->page_zeroing); } /* @@ -1872,7 +1862,7 @@ * the write buffer. The request is then searched on the processing * list by the unique ID found in the header. If found, then remove * it from the list and copy the rest of the buffer to the request. - * The request is finished by calling request_end() + * The request is finished by calling fuse_request_end(). */ static ssize_t fuse_dev_do_write(struct fuse_dev *fud, struct fuse_copy_state *cs, size_t nbytes) @@ -1883,16 +1873,17 @@ struct fuse_req *req; struct fuse_out_header oh; + err = -EINVAL; if (nbytes < sizeof(struct fuse_out_header)) - return -EINVAL; + goto out; err = fuse_copy_one(cs, &oh, sizeof(oh)); if (err) - goto err_finish; + goto copy_finish; err = -EINVAL; if (oh.len != nbytes) - goto err_finish; + goto copy_finish; /* * Zero oh.unique indicates unsolicited notification message @@ -1900,41 +1891,40 @@ */ if (!oh.unique) { err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs); - return err ? err : nbytes; + goto out; } err = -EINVAL; if (oh.error <= -512 || oh.error > 0) - goto err_finish; + goto copy_finish; spin_lock(&fpq->lock); + req = NULL; + if (fpq->connected) + req = request_find(fpq, oh.unique & ~FUSE_INT_REQ_BIT); + err = -ENOENT; - if (!fpq->connected) - goto err_unlock_pq; + if (!req) { + spin_unlock(&fpq->lock); + goto copy_finish; + } - req = request_find(fpq, oh.unique); - if (!req) - goto err_unlock_pq; - - /* Is it an interrupt reply? */ - if (req->intr_unique == oh.unique) { + /* Is it an interrupt reply ID? */ + if (oh.unique & FUSE_INT_REQ_BIT) { __fuse_get_request(req); spin_unlock(&fpq->lock); - err = -EINVAL; - if (nbytes != sizeof(struct fuse_out_header)) { - fuse_put_request(fc, req); - goto err_finish; - } - - if (oh.error == -ENOSYS) + err = 0; + if (nbytes != sizeof(struct fuse_out_header)) + err = -EINVAL; + else if (oh.error == -ENOSYS) fc->no_interrupt = 1; else if (oh.error == -EAGAIN) - queue_interrupt(&fc->iq, req); - fuse_put_request(fc, req); + err = queue_interrupt(req); - fuse_copy_finish(cs); - return nbytes; + fuse_put_request(req); + + goto copy_finish; } clear_bit(FR_SENT, &req->flags); @@ -1943,18 +1933,21 @@ set_bit(FR_LOCKED, &req->flags); spin_unlock(&fpq->lock); cs->req = req; - if (!req->out.page_replace) + if (!req->args->page_replace) cs->move_pages = 0; - err = copy_out_args(cs, &req->out, nbytes); + if (oh.error) + err = nbytes != sizeof(oh) ? -EINVAL : 0; + else + err = copy_out_args(cs, req->args, nbytes); fuse_copy_finish(cs); if (!err && req->in.h.opcode == FUSE_CANONICAL_PATH) { - char *path = (char *)req->out.args[0].value; + char *path = (char *)req->args->out_args[0].value; - path[req->out.args[0].size - 1] = 0; + path[req->args->out_args[0].size - 1] = 0; if (req->out.h.error != -ENOSYS) - req->out.h.error = kern_path(path, 0, req->out.canonical_path); + req->out.h.error = kern_path(path, 0, req->args->canonical_path); } spin_lock(&fpq->lock); @@ -1967,15 +1960,13 @@ list_del_init(&req->list); spin_unlock(&fpq->lock); - request_end(fc, req); - + fuse_request_end(req); +out: return err ? err : nbytes; - err_unlock_pq: - spin_unlock(&fpq->lock); - err_finish: +copy_finish: fuse_copy_finish(cs); - return err; + goto out; } static ssize_t fuse_dev_write(struct kiocb *iocb, struct iov_iter *from) @@ -1998,6 +1989,7 @@ struct file *out, loff_t *ppos, size_t len, unsigned int flags) { + unsigned int head, tail, mask, count; unsigned nbuf; unsigned idx; struct pipe_buffer *bufs; @@ -2012,8 +2004,12 @@ pipe_lock(pipe); - bufs = kvmalloc_array(pipe->nrbufs, sizeof(struct pipe_buffer), - GFP_KERNEL); + head = pipe->head; + tail = pipe->tail; + mask = pipe->ring_size - 1; + count = head - tail; + + bufs = kvmalloc_array(count, sizeof(struct pipe_buffer), GFP_KERNEL); if (!bufs) { pipe_unlock(pipe); return -ENOMEM; @@ -2021,8 +2017,8 @@ nbuf = 0; rem = 0; - for (idx = 0; idx < pipe->nrbufs && rem < len; idx++) - rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 1)].len; + for (idx = tail; idx != head && rem < len; idx++) + rem += pipe->bufs[idx & mask].len; ret = -EINVAL; if (rem < len) @@ -2033,16 +2029,17 @@ struct pipe_buffer *ibuf; struct pipe_buffer *obuf; - BUG_ON(nbuf >= pipe->buffers); - BUG_ON(!pipe->nrbufs); - ibuf = &pipe->bufs[pipe->curbuf]; + if (WARN_ON(nbuf >= count || tail == head)) + goto out_free; + + ibuf = &pipe->bufs[tail & mask]; obuf = &bufs[nbuf]; if (rem >= ibuf->len) { *obuf = *ibuf; ibuf->ops = NULL; - pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1); - pipe->nrbufs--; + tail++; + pipe->tail = tail; } else { if (!pipe_buf_get(pipe, ibuf)) goto out_free; @@ -2104,12 +2101,8 @@ return mask; } -/* - * Abort all requests on the given list (pending or processing) - * - * This function releases and reacquires fc->lock - */ -static void end_requests(struct fuse_conn *fc, struct list_head *head) +/* Abort all requests on the given list (pending or processing) */ +static void end_requests(struct list_head *head) { while (!list_empty(head)) { struct fuse_req *req; @@ -2117,7 +2110,7 @@ req->out.h.error = -ECONNABORTED; clear_bit(FR_SENT, &req->flags); list_del_init(&req->list); - request_end(fc, req); + fuse_request_end(req); } } @@ -2145,7 +2138,7 @@ * The same effect is usually achievable through killing the filesystem daemon * and all users of the filesystem. The exception is the combination of an * asynchronous request and the tricky deadlock (see - * Documentation/filesystems/fuse.txt). + * Documentation/filesystems/fuse.rst). * * Aborting requests under I/O goes as follows: 1: Separate out unlocked * requests, they should be finished off immediately. Locked requests will be @@ -2154,7 +2147,7 @@ * is OK, the request will in that case be removed from the list before we touch * it. */ -void fuse_abort_conn(struct fuse_conn *fc, bool is_abort) +void fuse_abort_conn(struct fuse_conn *fc) { struct fuse_iqueue *fiq = &fc->iq; @@ -2163,10 +2156,13 @@ struct fuse_dev *fud; struct fuse_req *req, *next; LIST_HEAD(to_end); + unsigned int i; + /* Background queuing checks fc->connected under bg_lock */ + spin_lock(&fc->bg_lock); fc->connected = 0; - fc->blocked = 0; - fc->aborted = is_abort; + spin_unlock(&fc->bg_lock); + fuse_set_initialized(fc); list_for_each_entry(fud, &fc->devices, entry) { struct fuse_pqueue *fpq = &fud->pq; @@ -2184,11 +2180,16 @@ } spin_unlock(&req->waitq.lock); } - list_splice_tail_init(&fpq->processing, &to_end); + for (i = 0; i < FUSE_PQ_HASH_SIZE; i++) + list_splice_tail_init(&fpq->processing[i], + &to_end); spin_unlock(&fpq->lock); } + spin_lock(&fc->bg_lock); + fc->blocked = 0; fc->max_background = UINT_MAX; flush_bg_queue(fc); + spin_unlock(&fc->bg_lock); spin_lock(&fiq->lock); fiq->connected = 0; @@ -2196,7 +2197,7 @@ clear_bit(FR_PENDING, &req->flags); list_splice_tail_init(&fiq->pending, &to_end); while (forget_pending(fiq)) - kfree(dequeue_forget(fiq, 1, NULL)); + kfree(fuse_dequeue_forget(fiq, 1, NULL)); wake_up_all(&fiq->waitq); spin_unlock(&fiq->lock); kill_fasync(&fiq->fasync, SIGIO, POLL_IN); @@ -2204,7 +2205,7 @@ wake_up_all(&fc->blocked_waitq); spin_unlock(&fc->lock); - end_requests(fc, &to_end); + end_requests(&to_end); } else { spin_unlock(&fc->lock); } @@ -2226,18 +2227,20 @@ struct fuse_conn *fc = fud->fc; struct fuse_pqueue *fpq = &fud->pq; LIST_HEAD(to_end); + unsigned int i; spin_lock(&fpq->lock); WARN_ON(!list_empty(&fpq->io)); - list_splice_init(&fpq->processing, &to_end); + for (i = 0; i < FUSE_PQ_HASH_SIZE; i++) + list_splice_init(&fpq->processing[i], &to_end); spin_unlock(&fpq->lock); - end_requests(fc, &to_end); + end_requests(&to_end); /* Are we the last open device? */ if (atomic_dec_and_test(&fc->dev_count)) { WARN_ON(fc->iq.fasync != NULL); - fuse_abort_conn(fc, false); + fuse_abort_conn(fc); } fuse_dev_free(fud); } @@ -2263,7 +2266,7 @@ if (new->private_data) return -EINVAL; - fud = fuse_dev_alloc(fc); + fud = fuse_dev_alloc_install(fc); if (!fud) return -ENOMEM; @@ -2276,37 +2279,50 @@ static long fuse_dev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { - int err = -ENOTTY; + int res; + int oldfd; + struct fuse_dev *fud = NULL; - if (cmd == FUSE_DEV_IOC_CLONE) { - int oldfd; - - err = -EFAULT; - if (!get_user(oldfd, (__u32 __user *) arg)) { + switch (cmd) { + case FUSE_DEV_IOC_CLONE: + res = -EFAULT; + if (!get_user(oldfd, (__u32 __user *)arg)) { struct file *old = fget(oldfd); - err = -EINVAL; + res = -EINVAL; if (old) { - struct fuse_dev *fud = NULL; - /* * Check against file->f_op because CUSE * uses the same ioctl handler. */ if (old->f_op == file->f_op && - old->f_cred->user_ns == file->f_cred->user_ns) + old->f_cred->user_ns == + file->f_cred->user_ns) fud = fuse_get_dev(old); if (fud) { mutex_lock(&fuse_mutex); - err = fuse_device_clone(fud->fc, file); + res = fuse_device_clone(fud->fc, file); mutex_unlock(&fuse_mutex); } fput(old); } } + break; + case FUSE_DEV_IOC_PASSTHROUGH_OPEN: + res = -EFAULT; + if (!get_user(oldfd, (__u32 __user *)arg)) { + res = -EINVAL; + fud = fuse_get_dev(file); + if (fud) + res = fuse_passthrough_open(fud, oldfd); + } + break; + default: + res = -ENOTTY; + break; } - return err; + return res; } const struct file_operations fuse_dev_operations = { @@ -2321,7 +2337,7 @@ .release = fuse_dev_release, .fasync = fuse_dev_fasync, .unlocked_ioctl = fuse_dev_ioctl, - .compat_ioctl = fuse_dev_ioctl, + .compat_ioctl = compat_ptr_ioctl, }; EXPORT_SYMBOL_GPL(fuse_dev_operations); -- Gitblit v1.6.2