.. | .. |
---|
7 | 7 | #include <linux/list.h> |
---|
8 | 8 | #include <linux/stddef.h> |
---|
9 | 9 | #include <linux/spinlock.h> |
---|
10 | | -#include <linux/sched/debug.h> |
---|
11 | 10 | |
---|
12 | 11 | #include <asm/current.h> |
---|
13 | 12 | #include <uapi/linux/wait.h> |
---|
.. | .. |
---|
21 | 20 | #define WQ_FLAG_EXCLUSIVE 0x01 |
---|
22 | 21 | #define WQ_FLAG_WOKEN 0x02 |
---|
23 | 22 | #define WQ_FLAG_BOOKMARK 0x04 |
---|
| 23 | +#define WQ_FLAG_CUSTOM 0x08 |
---|
| 24 | +#define WQ_FLAG_DONE 0x10 |
---|
24 | 25 | |
---|
25 | 26 | /* |
---|
26 | 27 | * A single wait-queue entry structure: |
---|
.. | .. |
---|
102 | 103 | * lead to sporadic and non-obvious failure. |
---|
103 | 104 | * |
---|
104 | 105 | * Use either while holding wait_queue_head::lock or when used for wakeups |
---|
105 | | - * with an extra smp_mb() like: |
---|
| 106 | + * with an extra smp_mb() like:: |
---|
106 | 107 | * |
---|
107 | 108 | * CPU0 - waker CPU1 - waiter |
---|
108 | 109 | * |
---|
.. | .. |
---|
125 | 126 | static inline int waitqueue_active(struct wait_queue_head *wq_head) |
---|
126 | 127 | { |
---|
127 | 128 | return !list_empty(&wq_head->head); |
---|
| 129 | +} |
---|
| 130 | + |
---|
| 131 | +/** |
---|
| 132 | + * wq_has_single_sleeper - check if there is only one sleeper |
---|
| 133 | + * @wq_head: wait queue head |
---|
| 134 | + * |
---|
| 135 | + * Returns true of wq_head has only one sleeper on the list. |
---|
| 136 | + * |
---|
| 137 | + * Please refer to the comment for waitqueue_active. |
---|
| 138 | + */ |
---|
| 139 | +static inline bool wq_has_single_sleeper(struct wait_queue_head *wq_head) |
---|
| 140 | +{ |
---|
| 141 | + return list_is_singular(&wq_head->head); |
---|
128 | 142 | } |
---|
129 | 143 | |
---|
130 | 144 | /** |
---|
.. | .. |
---|
189 | 203 | void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key); |
---|
190 | 204 | void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head, |
---|
191 | 205 | unsigned int mode, void *key, wait_queue_entry_t *bookmark); |
---|
192 | | -void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key); |
---|
| 206 | +void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key); |
---|
| 207 | +void __wake_up_locked_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key); |
---|
193 | 208 | void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr); |
---|
194 | | -void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr); |
---|
| 209 | +void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode); |
---|
195 | 210 | void __wake_up_pollfree(struct wait_queue_head *wq_head); |
---|
196 | 211 | |
---|
197 | 212 | #define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL) |
---|
.. | .. |
---|
203 | 218 | #define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL) |
---|
204 | 219 | #define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL) |
---|
205 | 220 | #define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL) |
---|
206 | | -#define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1) |
---|
| 221 | +#define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE) |
---|
| 222 | +#define wake_up_sync(x) __wake_up_sync((x), TASK_NORMAL) |
---|
207 | 223 | |
---|
208 | 224 | /* |
---|
209 | 225 | * Wakeup macros to be used to report events to the targets. |
---|
.. | .. |
---|
217 | 233 | #define wake_up_interruptible_poll(x, m) \ |
---|
218 | 234 | __wake_up(x, TASK_INTERRUPTIBLE, 1, poll_to_key(m)) |
---|
219 | 235 | #define wake_up_interruptible_sync_poll(x, m) \ |
---|
220 | | - __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, poll_to_key(m)) |
---|
| 236 | + __wake_up_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m)) |
---|
| 237 | +#define wake_up_interruptible_sync_poll_locked(x, m) \ |
---|
| 238 | + __wake_up_locked_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m)) |
---|
221 | 239 | |
---|
222 | 240 | /** |
---|
223 | 241 | * wake_up_pollfree - signal that a polled waitqueue is going away |
---|
.. | .. |
---|
335 | 353 | |
---|
336 | 354 | #define __wait_event_freezable(wq_head, condition) \ |
---|
337 | 355 | ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \ |
---|
338 | | - schedule(); try_to_freeze()) |
---|
| 356 | + freezable_schedule()) |
---|
339 | 357 | |
---|
340 | 358 | /** |
---|
341 | 359 | * wait_event_freezable - sleep (or freeze) until a condition gets true |
---|
.. | .. |
---|
394 | 412 | #define __wait_event_freezable_timeout(wq_head, condition, timeout) \ |
---|
395 | 413 | ___wait_event(wq_head, ___wait_cond_timeout(condition), \ |
---|
396 | 414 | TASK_INTERRUPTIBLE, 0, timeout, \ |
---|
397 | | - __ret = schedule_timeout(__ret); try_to_freeze()) |
---|
| 415 | + __ret = freezable_schedule_timeout(__ret)) |
---|
398 | 416 | |
---|
399 | 417 | /* |
---|
400 | 418 | * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid |
---|
.. | .. |
---|
515 | 533 | int __ret = 0; \ |
---|
516 | 534 | struct hrtimer_sleeper __t; \ |
---|
517 | 535 | \ |
---|
518 | | - hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); \ |
---|
519 | | - hrtimer_init_sleeper(&__t, current); \ |
---|
520 | | - if ((timeout) != KTIME_MAX) \ |
---|
521 | | - hrtimer_start_range_ns(&__t.timer, timeout, \ |
---|
522 | | - current->timer_slack_ns, \ |
---|
523 | | - HRTIMER_MODE_REL); \ |
---|
| 536 | + hrtimer_init_sleeper_on_stack(&__t, CLOCK_MONOTONIC, \ |
---|
| 537 | + HRTIMER_MODE_REL); \ |
---|
| 538 | + if ((timeout) != KTIME_MAX) { \ |
---|
| 539 | + hrtimer_set_expires_range_ns(&__t.timer, timeout, \ |
---|
| 540 | + current->timer_slack_ns); \ |
---|
| 541 | + hrtimer_sleeper_start_expires(&__t, HRTIMER_MODE_REL); \ |
---|
| 542 | + } \ |
---|
524 | 543 | \ |
---|
525 | 544 | __ret = ___wait_event(wq_head, condition, state, 0, 0, \ |
---|
526 | 545 | if (!__t.task) { \ |
---|
.. | .. |
---|
615 | 634 | |
---|
616 | 635 | #define __wait_event_freezable_exclusive(wq, condition) \ |
---|
617 | 636 | ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \ |
---|
618 | | - schedule(); try_to_freeze()) |
---|
| 637 | + freezable_schedule()) |
---|
619 | 638 | |
---|
620 | 639 | #define wait_event_freezable_exclusive(wq, condition) \ |
---|
621 | 640 | ({ \ |
---|
.. | .. |
---|
1135 | 1154 | * Waitqueues which are removed from the waitqueue_head at wakeup time |
---|
1136 | 1155 | */ |
---|
1137 | 1156 | void prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state); |
---|
1138 | | -void prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state); |
---|
| 1157 | +bool prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state); |
---|
1139 | 1158 | long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state); |
---|
1140 | 1159 | void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry); |
---|
1141 | | -long __sched wait_woken(struct wait_queue_entry *wq_entry, unsigned int mode, |
---|
1142 | | - long timeout); |
---|
1143 | | -int __sched woken_wake_function(struct wait_queue_entry *wq_entry, |
---|
1144 | | - unsigned int mode, int sync, void *key); |
---|
1145 | | -int __sched autoremove_wake_function(struct wait_queue_entry *wq_entry, |
---|
1146 | | - unsigned int mode, int sync, void *key); |
---|
| 1160 | +long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout); |
---|
| 1161 | +int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key); |
---|
| 1162 | +int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key); |
---|
1147 | 1163 | |
---|
1148 | 1164 | #define DEFINE_WAIT_FUNC(name, function) \ |
---|
1149 | 1165 | struct wait_queue_entry name = { \ |
---|
.. | .. |
---|
1162 | 1178 | (wait)->flags = 0; \ |
---|
1163 | 1179 | } while (0) |
---|
1164 | 1180 | |
---|
| 1181 | +bool try_invoke_on_locked_down_task(struct task_struct *p, bool (*func)(struct task_struct *t, void *arg), void *arg); |
---|
| 1182 | + |
---|
1165 | 1183 | #endif /* _LINUX_WAIT_H */ |
---|