.. | .. |
---|
9 | 9 | * task_work_add - ask the @task to execute @work->func() |
---|
10 | 10 | * @task: the task which should run the callback |
---|
11 | 11 | * @work: the callback to run |
---|
12 | | - * @notify: send the notification if true |
---|
| 12 | + * @notify: how to notify the targeted task |
---|
13 | 13 | * |
---|
14 | | - * Queue @work for task_work_run() below and notify the @task if @notify. |
---|
15 | | - * Fails if the @task is exiting/exited and thus it can't process this @work. |
---|
16 | | - * Otherwise @work->func() will be called when the @task returns from kernel |
---|
17 | | - * mode or exits. |
---|
| 14 | + * Queue @work for task_work_run() below and notify the @task if @notify |
---|
| 15 | + * is @TWA_RESUME or @TWA_SIGNAL. @TWA_SIGNAL works like signals, in that the |
---|
| 16 | + * it will interrupt the targeted task and run the task_work. @TWA_RESUME |
---|
| 17 | + * work is run only when the task exits the kernel and returns to user mode, |
---|
| 18 | + * or before entering guest mode. Fails if the @task is exiting/exited and thus |
---|
| 19 | + * it can't process this @work. Otherwise @work->func() will be called when the |
---|
| 20 | + * @task goes through one of the aforementioned transitions, or exits. |
---|
18 | 21 | * |
---|
19 | | - * This is like the signal handler which runs in kernel mode, but it doesn't |
---|
20 | | - * try to wake up the @task. |
---|
| 22 | + * If the targeted task is exiting, then an error is returned and the work item |
---|
| 23 | + * is not queued. It's up to the caller to arrange for an alternative mechanism |
---|
| 24 | + * in that case. |
---|
21 | 25 | * |
---|
22 | | - * Note: there is no ordering guarantee on works queued here. |
---|
| 26 | + * Note: there is no ordering guarantee on works queued here. The task_work |
---|
| 27 | + * list is LIFO. |
---|
23 | 28 | * |
---|
24 | 29 | * RETURNS: |
---|
25 | 30 | * 0 if succeeds or -ESRCH. |
---|
26 | 31 | */ |
---|
27 | | -int |
---|
28 | | -task_work_add(struct task_struct *task, struct callback_head *work, bool notify) |
---|
| 32 | +int task_work_add(struct task_struct *task, struct callback_head *work, |
---|
| 33 | + enum task_work_notify_mode notify) |
---|
29 | 34 | { |
---|
30 | 35 | struct callback_head *head; |
---|
| 36 | + |
---|
| 37 | + /* record the work call stack in order to print it in KASAN reports */ |
---|
| 38 | + kasan_record_aux_stack(work); |
---|
31 | 39 | |
---|
32 | 40 | do { |
---|
33 | 41 | head = READ_ONCE(task->task_works); |
---|
.. | .. |
---|
36 | 44 | work->next = head; |
---|
37 | 45 | } while (cmpxchg(&task->task_works, head, work) != head); |
---|
38 | 46 | |
---|
39 | | - if (notify) |
---|
| 47 | + switch (notify) { |
---|
| 48 | + case TWA_NONE: |
---|
| 49 | + break; |
---|
| 50 | + case TWA_RESUME: |
---|
40 | 51 | set_notify_resume(task); |
---|
| 52 | + break; |
---|
| 53 | + case TWA_SIGNAL: |
---|
| 54 | + set_notify_signal(task); |
---|
| 55 | + break; |
---|
| 56 | + default: |
---|
| 57 | + WARN_ON_ONCE(1); |
---|
| 58 | + break; |
---|
| 59 | + } |
---|
| 60 | + |
---|
41 | 61 | return 0; |
---|
| 62 | +} |
---|
| 63 | + |
---|
| 64 | +/** |
---|
| 65 | + * task_work_cancel_match - cancel a pending work added by task_work_add() |
---|
| 66 | + * @task: the task which should execute the work |
---|
| 67 | + * @match: match function to call |
---|
| 68 | + * |
---|
| 69 | + * RETURNS: |
---|
| 70 | + * The found work or NULL if not found. |
---|
| 71 | + */ |
---|
| 72 | +struct callback_head * |
---|
| 73 | +task_work_cancel_match(struct task_struct *task, |
---|
| 74 | + bool (*match)(struct callback_head *, void *data), |
---|
| 75 | + void *data) |
---|
| 76 | +{ |
---|
| 77 | + struct callback_head **pprev = &task->task_works; |
---|
| 78 | + struct callback_head *work; |
---|
| 79 | + unsigned long flags; |
---|
| 80 | + |
---|
| 81 | + if (likely(!task->task_works)) |
---|
| 82 | + return NULL; |
---|
| 83 | + /* |
---|
| 84 | + * If cmpxchg() fails we continue without updating pprev. |
---|
| 85 | + * Either we raced with task_work_add() which added the |
---|
| 86 | + * new entry before this work, we will find it again. Or |
---|
| 87 | + * we raced with task_work_run(), *pprev == NULL/exited. |
---|
| 88 | + */ |
---|
| 89 | + raw_spin_lock_irqsave(&task->pi_lock, flags); |
---|
| 90 | + while ((work = READ_ONCE(*pprev))) { |
---|
| 91 | + if (!match(work, data)) |
---|
| 92 | + pprev = &work->next; |
---|
| 93 | + else if (cmpxchg(pprev, work, work->next) == work) |
---|
| 94 | + break; |
---|
| 95 | + } |
---|
| 96 | + raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
---|
| 97 | + |
---|
| 98 | + return work; |
---|
| 99 | +} |
---|
| 100 | + |
---|
| 101 | +static bool task_work_func_match(struct callback_head *cb, void *data) |
---|
| 102 | +{ |
---|
| 103 | + return cb->func == data; |
---|
42 | 104 | } |
---|
43 | 105 | |
---|
44 | 106 | /** |
---|
.. | .. |
---|
55 | 117 | struct callback_head * |
---|
56 | 118 | task_work_cancel(struct task_struct *task, task_work_func_t func) |
---|
57 | 119 | { |
---|
58 | | - struct callback_head **pprev = &task->task_works; |
---|
59 | | - struct callback_head *work; |
---|
60 | | - unsigned long flags; |
---|
61 | | - |
---|
62 | | - if (likely(!task->task_works)) |
---|
63 | | - return NULL; |
---|
64 | | - /* |
---|
65 | | - * If cmpxchg() fails we continue without updating pprev. |
---|
66 | | - * Either we raced with task_work_add() which added the |
---|
67 | | - * new entry before this work, we will find it again. Or |
---|
68 | | - * we raced with task_work_run(), *pprev == NULL/exited. |
---|
69 | | - */ |
---|
70 | | - raw_spin_lock_irqsave(&task->pi_lock, flags); |
---|
71 | | - while ((work = READ_ONCE(*pprev))) { |
---|
72 | | - if (work->func != func) |
---|
73 | | - pprev = &work->next; |
---|
74 | | - else if (cmpxchg(pprev, work, work->next) == work) |
---|
75 | | - break; |
---|
76 | | - } |
---|
77 | | - raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
---|
78 | | - |
---|
79 | | - return work; |
---|
| 120 | + return task_work_cancel_match(task, task_work_func_match, func); |
---|
80 | 121 | } |
---|
81 | 122 | |
---|
82 | 123 | /** |
---|
.. | .. |
---|
97 | 138 | * work->func() can do task_work_add(), do not set |
---|
98 | 139 | * work_exited unless the list is empty. |
---|
99 | 140 | */ |
---|
100 | | - raw_spin_lock_irq(&task->pi_lock); |
---|
101 | 141 | do { |
---|
| 142 | + head = NULL; |
---|
102 | 143 | work = READ_ONCE(task->task_works); |
---|
103 | | - head = !work && (task->flags & PF_EXITING) ? |
---|
104 | | - &work_exited : NULL; |
---|
| 144 | + if (!work) { |
---|
| 145 | + if (task->flags & PF_EXITING) |
---|
| 146 | + head = &work_exited; |
---|
| 147 | + else |
---|
| 148 | + break; |
---|
| 149 | + } |
---|
105 | 150 | } while (cmpxchg(&task->task_works, work, head) != work); |
---|
106 | | - raw_spin_unlock_irq(&task->pi_lock); |
---|
107 | 151 | |
---|
108 | 152 | if (!work) |
---|
109 | 153 | break; |
---|
| 154 | + /* |
---|
| 155 | + * Synchronize with task_work_cancel(). It can not remove |
---|
| 156 | + * the first entry == work, cmpxchg(task_works) must fail. |
---|
| 157 | + * But it can remove another entry from the ->next list. |
---|
| 158 | + */ |
---|
| 159 | + raw_spin_lock_irq(&task->pi_lock); |
---|
| 160 | + raw_spin_unlock_irq(&task->pi_lock); |
---|
110 | 161 | |
---|
111 | 162 | do { |
---|
112 | 163 | next = work->next; |
---|