From 05e59e5fb0064c97a1c10921ecd549f2d4a58565 Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Wed, 09 Oct 2024 06:14:40 +0000 Subject: [PATCH] add REDIRECT --- kernel/fs/btrfs/async-thread.c | 103 ++++++++++++++++----------------------------------- 1 files changed, 32 insertions(+), 71 deletions(-) diff --git a/kernel/fs/btrfs/async-thread.c b/kernel/fs/btrfs/async-thread.c index 21f8f47..43c8995 100644 --- a/kernel/fs/btrfs/async-thread.c +++ b/kernel/fs/btrfs/async-thread.c @@ -12,9 +12,11 @@ #include "async-thread.h" #include "ctree.h" -#define WORK_DONE_BIT 0 -#define WORK_ORDER_DONE_BIT 1 -#define WORK_HIGH_PRIO_BIT 2 +enum { + WORK_DONE_BIT, + WORK_ORDER_DONE_BIT, + WORK_HIGH_PRIO_BIT, +}; #define NO_THRESHOLD (-1) #define DFT_THRESHOLD (32) @@ -51,24 +53,12 @@ struct __btrfs_workqueue *high; }; -static void normal_work_helper(struct btrfs_work *work); - -#define BTRFS_WORK_HELPER(name) \ -noinline_for_stack void btrfs_##name(struct work_struct *arg) \ -{ \ - struct btrfs_work *work = container_of(arg, struct btrfs_work, \ - normal_work); \ - normal_work_helper(work); \ -} - -struct btrfs_fs_info * -btrfs_workqueue_owner(const struct __btrfs_workqueue *wq) +struct btrfs_fs_info * __pure btrfs_workqueue_owner(const struct __btrfs_workqueue *wq) { return wq->fs_info; } -struct btrfs_fs_info * -btrfs_work_owner(const struct btrfs_work *work) +struct btrfs_fs_info * __pure btrfs_work_owner(const struct btrfs_work *work) { return work->wq->fs_info; } @@ -86,29 +76,6 @@ return atomic_read(&wq->normal->pending) > wq->normal->thresh * 2; } - -BTRFS_WORK_HELPER(worker_helper); -BTRFS_WORK_HELPER(delalloc_helper); -BTRFS_WORK_HELPER(flush_delalloc_helper); -BTRFS_WORK_HELPER(cache_helper); -BTRFS_WORK_HELPER(submit_helper); -BTRFS_WORK_HELPER(fixup_helper); -BTRFS_WORK_HELPER(endio_helper); -BTRFS_WORK_HELPER(endio_meta_helper); -BTRFS_WORK_HELPER(endio_meta_write_helper); -BTRFS_WORK_HELPER(endio_raid56_helper); -BTRFS_WORK_HELPER(endio_repair_helper); -BTRFS_WORK_HELPER(rmw_helper); -BTRFS_WORK_HELPER(endio_write_helper); -BTRFS_WORK_HELPER(freespace_write_helper); -BTRFS_WORK_HELPER(delayed_meta_helper); -BTRFS_WORK_HELPER(readahead_helper); -BTRFS_WORK_HELPER(qgroup_rescan_helper); -BTRFS_WORK_HELPER(extent_refs_helper); -BTRFS_WORK_HELPER(scrub_helper); -BTRFS_WORK_HELPER(scrubwrc_helper); -BTRFS_WORK_HELPER(scrubnc_helper); -BTRFS_WORK_HELPER(scrubparity_helper); static struct __btrfs_workqueue * __btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info, const char *name, @@ -139,13 +106,11 @@ } if (flags & WQ_HIGHPRI) - ret->normal_wq = alloc_workqueue("%s-%s-high", flags, - ret->current_active, "btrfs", - name); + ret->normal_wq = alloc_workqueue("btrfs-%s-high", flags, + ret->current_active, name); else - ret->normal_wq = alloc_workqueue("%s-%s", flags, - ret->current_active, "btrfs", - name); + ret->normal_wq = alloc_workqueue("btrfs-%s", flags, + ret->current_active, name); if (!ret->normal_wq) { kfree(ret); return NULL; @@ -259,7 +224,6 @@ struct btrfs_work *work; spinlock_t *lock = &wq->list_lock; unsigned long flags; - void *wtag; bool free_self = false; while (1) { @@ -309,39 +273,39 @@ * original work item cannot depend on the recycled work * item in that case (see find_worker_executing_work()). * - * Note that the work of one Btrfs filesystem may depend - * on the work of another Btrfs filesystem via, e.g., a - * loop device. Therefore, we must not allow the current - * work item to be recycled until we are really done, - * otherwise we break the above assumption and can - * deadlock. + * Note that different types of Btrfs work can depend on + * each other, and one type of work on one Btrfs + * filesystem may even depend on the same type of work + * on another Btrfs filesystem via, e.g., a loop device. + * Therefore, we must not allow the current work item to + * be recycled until we are really done, otherwise we + * break the above assumption and can deadlock. */ free_self = true; } else { /* * We don't want to call the ordered free functions with - * the lock held though. Save the work as tag for the - * trace event, because the callback could free the - * structure. + * the lock held. */ - wtag = work; work->ordered_free(work); - trace_btrfs_all_work_done(wq->fs_info, wtag); + /* NB: work must not be dereferenced past this point. */ + trace_btrfs_all_work_done(wq->fs_info, work); } } spin_unlock_irqrestore(lock, flags); if (free_self) { - wtag = self; self->ordered_free(self); - trace_btrfs_all_work_done(wq->fs_info, wtag); + /* NB: self must not be dereferenced past this point. */ + trace_btrfs_all_work_done(wq->fs_info, self); } } -static void normal_work_helper(struct btrfs_work *work) +static void btrfs_work_helper(struct work_struct *normal_work) { + struct btrfs_work *work = container_of(normal_work, struct btrfs_work, + normal_work); struct __btrfs_workqueue *wq; - void *wtag; int need_order = 0; /* @@ -355,8 +319,6 @@ if (work->ordered_func) need_order = 1; wq = work->wq; - /* Safe for tracepoints in case work gets freed by the callback */ - wtag = work; trace_btrfs_work_sched(work); thresh_exec_hook(wq); @@ -371,20 +333,19 @@ smp_mb__before_atomic(); set_bit(WORK_DONE_BIT, &work->flags); run_ordered_work(wq, work); + } else { + /* NB: work must not be dereferenced past this point. */ + trace_btrfs_all_work_done(wq->fs_info, work); } - if (!need_order) - trace_btrfs_all_work_done(wq->fs_info, wtag); } -void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t uniq_func, - btrfs_func_t func, - btrfs_func_t ordered_func, - btrfs_func_t ordered_free) +void btrfs_init_work(struct btrfs_work *work, btrfs_func_t func, + btrfs_func_t ordered_func, btrfs_func_t ordered_free) { work->func = func; work->ordered_func = ordered_func; work->ordered_free = ordered_free; - INIT_WORK(&work->normal_work, uniq_func); + INIT_WORK(&work->normal_work, btrfs_work_helper); INIT_LIST_HEAD(&work->ordered_list); work->flags = 0; } -- Gitblit v1.6.2