| .. | .. |
|---|
| 7 | 7 | #include <linux/list.h> |
|---|
| 8 | 8 | #include <linux/stddef.h> |
|---|
| 9 | 9 | #include <linux/spinlock.h> |
|---|
| 10 | | -#include <linux/sched/debug.h> |
|---|
| 11 | 10 | |
|---|
| 12 | 11 | #include <asm/current.h> |
|---|
| 13 | 12 | #include <uapi/linux/wait.h> |
|---|
| 14 | | -#include <linux/atomic.h> |
|---|
| 15 | 13 | |
|---|
| 16 | 14 | typedef struct wait_queue_entry wait_queue_entry_t; |
|---|
| 17 | 15 | |
|---|
| .. | .. |
|---|
| 22 | 20 | #define WQ_FLAG_EXCLUSIVE 0x01 |
|---|
| 23 | 21 | #define WQ_FLAG_WOKEN 0x02 |
|---|
| 24 | 22 | #define WQ_FLAG_BOOKMARK 0x04 |
|---|
| 23 | +#define WQ_FLAG_CUSTOM 0x08 |
|---|
| 24 | +#define WQ_FLAG_DONE 0x10 |
|---|
| 25 | 25 | |
|---|
| 26 | 26 | /* |
|---|
| 27 | 27 | * A single wait-queue entry structure: |
|---|
| .. | .. |
|---|
| 103 | 103 | * lead to sporadic and non-obvious failure. |
|---|
| 104 | 104 | * |
|---|
| 105 | 105 | * Use either while holding wait_queue_head::lock or when used for wakeups |
|---|
| 106 | | - * with an extra smp_mb() like: |
|---|
| 106 | + * with an extra smp_mb() like:: |
|---|
| 107 | 107 | * |
|---|
| 108 | 108 | * CPU0 - waker CPU1 - waiter |
|---|
| 109 | 109 | * |
|---|
| .. | .. |
|---|
| 126 | 126 | static inline int waitqueue_active(struct wait_queue_head *wq_head) |
|---|
| 127 | 127 | { |
|---|
| 128 | 128 | return !list_empty(&wq_head->head); |
|---|
| 129 | +} |
|---|
| 130 | + |
|---|
| 131 | +/** |
|---|
| 132 | + * wq_has_single_sleeper - check if there is only one sleeper |
|---|
| 133 | + * @wq_head: wait queue head |
|---|
| 134 | + * |
|---|
| 135 | + * Returns true of wq_head has only one sleeper on the list. |
|---|
| 136 | + * |
|---|
| 137 | + * Please refer to the comment for waitqueue_active. |
|---|
| 138 | + */ |
|---|
| 139 | +static inline bool wq_has_single_sleeper(struct wait_queue_head *wq_head) |
|---|
| 140 | +{ |
|---|
| 141 | + return list_is_singular(&wq_head->head); |
|---|
| 129 | 142 | } |
|---|
| 130 | 143 | |
|---|
| 131 | 144 | /** |
|---|
| .. | .. |
|---|
| 190 | 203 | void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key); |
|---|
| 191 | 204 | void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head, |
|---|
| 192 | 205 | unsigned int mode, void *key, wait_queue_entry_t *bookmark); |
|---|
| 193 | | -void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key); |
|---|
| 206 | +void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key); |
|---|
| 207 | +void __wake_up_locked_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key); |
|---|
| 194 | 208 | void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr); |
|---|
| 195 | | -void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr); |
|---|
| 209 | +void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode); |
|---|
| 196 | 210 | void __wake_up_pollfree(struct wait_queue_head *wq_head); |
|---|
| 197 | 211 | |
|---|
| 198 | 212 | #define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL) |
|---|
| .. | .. |
|---|
| 204 | 218 | #define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL) |
|---|
| 205 | 219 | #define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL) |
|---|
| 206 | 220 | #define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL) |
|---|
| 207 | | -#define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1) |
|---|
| 221 | +#define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE) |
|---|
| 222 | +#define wake_up_sync(x) __wake_up_sync((x), TASK_NORMAL) |
|---|
| 208 | 223 | |
|---|
| 209 | 224 | /* |
|---|
| 210 | 225 | * Wakeup macros to be used to report events to the targets. |
|---|
| .. | .. |
|---|
| 218 | 233 | #define wake_up_interruptible_poll(x, m) \ |
|---|
| 219 | 234 | __wake_up(x, TASK_INTERRUPTIBLE, 1, poll_to_key(m)) |
|---|
| 220 | 235 | #define wake_up_interruptible_sync_poll(x, m) \ |
|---|
| 221 | | - __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, poll_to_key(m)) |
|---|
| 236 | + __wake_up_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m)) |
|---|
| 237 | +#define wake_up_interruptible_sync_poll_locked(x, m) \ |
|---|
| 238 | + __wake_up_locked_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m)) |
|---|
| 222 | 239 | |
|---|
| 223 | 240 | /** |
|---|
| 224 | 241 | * wake_up_pollfree - signal that a polled waitqueue is going away |
|---|
| .. | .. |
|---|
| 336 | 353 | |
|---|
| 337 | 354 | #define __wait_event_freezable(wq_head, condition) \ |
|---|
| 338 | 355 | ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \ |
|---|
| 339 | | - schedule(); try_to_freeze()) |
|---|
| 356 | + freezable_schedule()) |
|---|
| 340 | 357 | |
|---|
| 341 | 358 | /** |
|---|
| 342 | 359 | * wait_event_freezable - sleep (or freeze) until a condition gets true |
|---|
| .. | .. |
|---|
| 395 | 412 | #define __wait_event_freezable_timeout(wq_head, condition, timeout) \ |
|---|
| 396 | 413 | ___wait_event(wq_head, ___wait_cond_timeout(condition), \ |
|---|
| 397 | 414 | TASK_INTERRUPTIBLE, 0, timeout, \ |
|---|
| 398 | | - __ret = schedule_timeout(__ret); try_to_freeze()) |
|---|
| 415 | + __ret = freezable_schedule_timeout(__ret)) |
|---|
| 399 | 416 | |
|---|
| 400 | 417 | /* |
|---|
| 401 | 418 | * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid |
|---|
| .. | .. |
|---|
| 516 | 533 | int __ret = 0; \ |
|---|
| 517 | 534 | struct hrtimer_sleeper __t; \ |
|---|
| 518 | 535 | \ |
|---|
| 519 | | - hrtimer_init_sleeper_on_stack(&__t, CLOCK_MONOTONIC, HRTIMER_MODE_REL, \ |
|---|
| 520 | | - current); \ |
|---|
| 521 | | - if ((timeout) != KTIME_MAX) \ |
|---|
| 522 | | - hrtimer_start_range_ns(&__t.timer, timeout, \ |
|---|
| 523 | | - current->timer_slack_ns, \ |
|---|
| 524 | | - HRTIMER_MODE_REL); \ |
|---|
| 536 | + hrtimer_init_sleeper_on_stack(&__t, CLOCK_MONOTONIC, \ |
|---|
| 537 | + HRTIMER_MODE_REL); \ |
|---|
| 538 | + if ((timeout) != KTIME_MAX) { \ |
|---|
| 539 | + hrtimer_set_expires_range_ns(&__t.timer, timeout, \ |
|---|
| 540 | + current->timer_slack_ns); \ |
|---|
| 541 | + hrtimer_sleeper_start_expires(&__t, HRTIMER_MODE_REL); \ |
|---|
| 542 | + } \ |
|---|
| 525 | 543 | \ |
|---|
| 526 | 544 | __ret = ___wait_event(wq_head, condition, state, 0, 0, \ |
|---|
| 527 | 545 | if (!__t.task) { \ |
|---|
| .. | .. |
|---|
| 616 | 634 | |
|---|
| 617 | 635 | #define __wait_event_freezable_exclusive(wq, condition) \ |
|---|
| 618 | 636 | ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \ |
|---|
| 619 | | - schedule(); try_to_freeze()) |
|---|
| 637 | + freezable_schedule()) |
|---|
| 620 | 638 | |
|---|
| 621 | 639 | #define wait_event_freezable_exclusive(wq, condition) \ |
|---|
| 622 | 640 | ({ \ |
|---|
| .. | .. |
|---|
| 1136 | 1154 | * Waitqueues which are removed from the waitqueue_head at wakeup time |
|---|
| 1137 | 1155 | */ |
|---|
| 1138 | 1156 | void prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state); |
|---|
| 1139 | | -void prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state); |
|---|
| 1157 | +bool prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state); |
|---|
| 1140 | 1158 | long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state); |
|---|
| 1141 | 1159 | void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry); |
|---|
| 1142 | | -long __sched wait_woken(struct wait_queue_entry *wq_entry, unsigned int mode, |
|---|
| 1143 | | - long timeout); |
|---|
| 1144 | | -int __sched woken_wake_function(struct wait_queue_entry *wq_entry, |
|---|
| 1145 | | - unsigned int mode, int sync, void *key); |
|---|
| 1146 | | -int __sched autoremove_wake_function(struct wait_queue_entry *wq_entry, |
|---|
| 1147 | | - unsigned int mode, int sync, void *key); |
|---|
| 1160 | +long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout); |
|---|
| 1161 | +int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key); |
|---|
| 1162 | +int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key); |
|---|
| 1148 | 1163 | |
|---|
| 1149 | 1164 | #define DEFINE_WAIT_FUNC(name, function) \ |
|---|
| 1150 | 1165 | struct wait_queue_entry name = { \ |
|---|
| .. | .. |
|---|
| 1163 | 1178 | (wait)->flags = 0; \ |
|---|
| 1164 | 1179 | } while (0) |
|---|
| 1165 | 1180 | |
|---|
| 1181 | +bool try_invoke_on_locked_down_task(struct task_struct *p, bool (*func)(struct task_struct *t, void *arg), void *arg); |
|---|
| 1182 | + |
|---|
| 1166 | 1183 | #endif /* _LINUX_WAIT_H */ |
|---|