hc
2023-12-09 b22da3d8526a935aa31e086e63f60ff3246cb61c
kernel/include/linux/wait.h
....@@ -7,7 +7,6 @@
77 #include <linux/list.h>
88 #include <linux/stddef.h>
99 #include <linux/spinlock.h>
10
-#include <linux/sched/debug.h>
1110
1211 #include <asm/current.h>
1312 #include <uapi/linux/wait.h>
....@@ -22,6 +21,8 @@
2221 #define WQ_FLAG_EXCLUSIVE 0x01
2322 #define WQ_FLAG_WOKEN 0x02
2423 #define WQ_FLAG_BOOKMARK 0x04
24
+#define WQ_FLAG_CUSTOM 0x08
25
+#define WQ_FLAG_DONE 0x10
2526
2627 /*
2728 * A single wait-queue entry structure:
....@@ -103,7 +104,7 @@
103104 * lead to sporadic and non-obvious failure.
104105 *
105106 * Use either while holding wait_queue_head::lock or when used for wakeups
106
- * with an extra smp_mb() like:
107
+ * with an extra smp_mb() like::
107108 *
108109 * CPU0 - waker CPU1 - waiter
109110 *
....@@ -126,6 +127,19 @@
126127 static inline int waitqueue_active(struct wait_queue_head *wq_head)
127128 {
128129 return !list_empty(&wq_head->head);
130
+}
131
+
132
+/**
133
+ * wq_has_single_sleeper - check if there is only one sleeper
134
+ * @wq_head: wait queue head
135
+ *
136
+ * Returns true of wq_head has only one sleeper on the list.
137
+ *
138
+ * Please refer to the comment for waitqueue_active.
139
+ */
140
+static inline bool wq_has_single_sleeper(struct wait_queue_head *wq_head)
141
+{
142
+ return list_is_singular(&wq_head->head);
129143 }
130144
131145 /**
....@@ -190,9 +204,10 @@
190204 void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
191205 void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
192206 unsigned int mode, void *key, wait_queue_entry_t *bookmark);
193
-void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
207
+void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
208
+void __wake_up_locked_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
194209 void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
195
-void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr);
210
+void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode);
196211 void __wake_up_pollfree(struct wait_queue_head *wq_head);
197212
198213 #define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
....@@ -204,7 +219,8 @@
204219 #define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
205220 #define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
206221 #define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
207
-#define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
222
+#define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE)
223
+#define wake_up_sync(x) __wake_up_sync((x), TASK_NORMAL)
208224
209225 /*
210226 * Wakeup macros to be used to report events to the targets.
....@@ -218,7 +234,9 @@
218234 #define wake_up_interruptible_poll(x, m) \
219235 __wake_up(x, TASK_INTERRUPTIBLE, 1, poll_to_key(m))
220236 #define wake_up_interruptible_sync_poll(x, m) \
221
- __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, poll_to_key(m))
237
+ __wake_up_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
238
+#define wake_up_interruptible_sync_poll_locked(x, m) \
239
+ __wake_up_locked_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
222240
223241 /**
224242 * wake_up_pollfree - signal that a polled waitqueue is going away
....@@ -336,7 +354,7 @@
336354
337355 #define __wait_event_freezable(wq_head, condition) \
338356 ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \
339
- schedule(); try_to_freeze())
357
+ freezable_schedule())
340358
341359 /**
342360 * wait_event_freezable - sleep (or freeze) until a condition gets true
....@@ -395,7 +413,7 @@
395413 #define __wait_event_freezable_timeout(wq_head, condition, timeout) \
396414 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
397415 TASK_INTERRUPTIBLE, 0, timeout, \
398
- __ret = schedule_timeout(__ret); try_to_freeze())
416
+ __ret = freezable_schedule_timeout(__ret))
399417
400418 /*
401419 * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid
....@@ -516,12 +534,13 @@
516534 int __ret = 0; \
517535 struct hrtimer_sleeper __t; \
518536 \
519
- hrtimer_init_sleeper_on_stack(&__t, CLOCK_MONOTONIC, HRTIMER_MODE_REL, \
520
- current); \
521
- if ((timeout) != KTIME_MAX) \
522
- hrtimer_start_range_ns(&__t.timer, timeout, \
523
- current->timer_slack_ns, \
524
- HRTIMER_MODE_REL); \
537
+ hrtimer_init_sleeper_on_stack(&__t, CLOCK_MONOTONIC, \
538
+ HRTIMER_MODE_REL); \
539
+ if ((timeout) != KTIME_MAX) { \
540
+ hrtimer_set_expires_range_ns(&__t.timer, timeout, \
541
+ current->timer_slack_ns); \
542
+ hrtimer_sleeper_start_expires(&__t, HRTIMER_MODE_REL); \
543
+ } \
525544 \
526545 __ret = ___wait_event(wq_head, condition, state, 0, 0, \
527546 if (!__t.task) { \
....@@ -616,7 +635,7 @@
616635
617636 #define __wait_event_freezable_exclusive(wq, condition) \
618637 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
619
- schedule(); try_to_freeze())
638
+ freezable_schedule())
620639
621640 #define wait_event_freezable_exclusive(wq, condition) \
622641 ({ \
....@@ -1136,15 +1155,12 @@
11361155 * Waitqueues which are removed from the waitqueue_head at wakeup time
11371156 */
11381157 void prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1139
-void prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1158
+bool prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
11401159 long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
11411160 void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
1142
-long __sched wait_woken(struct wait_queue_entry *wq_entry, unsigned int mode,
1143
- long timeout);
1144
-int __sched woken_wake_function(struct wait_queue_entry *wq_entry,
1145
- unsigned int mode, int sync, void *key);
1146
-int __sched autoremove_wake_function(struct wait_queue_entry *wq_entry,
1147
- unsigned int mode, int sync, void *key);
1161
+long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout);
1162
+int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
1163
+int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
11481164
11491165 #define DEFINE_WAIT_FUNC(name, function) \
11501166 struct wait_queue_entry name = { \
....@@ -1163,4 +1179,6 @@
11631179 (wait)->flags = 0; \
11641180 } while (0)
11651181
1182
+bool try_invoke_on_locked_down_task(struct task_struct *p, bool (*func)(struct task_struct *t, void *arg), void *arg);
1183
+
11661184 #endif /* _LINUX_WAIT_H */