hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/include/linux/wait.h
....@@ -7,7 +7,6 @@
77 #include <linux/list.h>
88 #include <linux/stddef.h>
99 #include <linux/spinlock.h>
10
-#include <linux/sched/debug.h>
1110
1211 #include <asm/current.h>
1312 #include <uapi/linux/wait.h>
....@@ -21,6 +20,8 @@
2120 #define WQ_FLAG_EXCLUSIVE 0x01
2221 #define WQ_FLAG_WOKEN 0x02
2322 #define WQ_FLAG_BOOKMARK 0x04
23
+#define WQ_FLAG_CUSTOM 0x08
24
+#define WQ_FLAG_DONE 0x10
2425
2526 /*
2627 * A single wait-queue entry structure:
....@@ -102,7 +103,7 @@
102103 * lead to sporadic and non-obvious failure.
103104 *
104105 * Use either while holding wait_queue_head::lock or when used for wakeups
105
- * with an extra smp_mb() like:
106
+ * with an extra smp_mb() like::
106107 *
107108 * CPU0 - waker CPU1 - waiter
108109 *
....@@ -125,6 +126,19 @@
125126 static inline int waitqueue_active(struct wait_queue_head *wq_head)
126127 {
127128 return !list_empty(&wq_head->head);
129
+}
130
+
131
+/**
132
+ * wq_has_single_sleeper - check if there is only one sleeper
133
+ * @wq_head: wait queue head
134
+ *
135
+ * Returns true of wq_head has only one sleeper on the list.
136
+ *
137
+ * Please refer to the comment for waitqueue_active.
138
+ */
139
+static inline bool wq_has_single_sleeper(struct wait_queue_head *wq_head)
140
+{
141
+ return list_is_singular(&wq_head->head);
128142 }
129143
130144 /**
....@@ -189,9 +203,10 @@
189203 void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
190204 void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
191205 unsigned int mode, void *key, wait_queue_entry_t *bookmark);
192
-void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
206
+void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
207
+void __wake_up_locked_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
193208 void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
194
-void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr);
209
+void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode);
195210 void __wake_up_pollfree(struct wait_queue_head *wq_head);
196211
197212 #define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
....@@ -203,7 +218,8 @@
203218 #define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
204219 #define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
205220 #define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
206
-#define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
221
+#define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE)
222
+#define wake_up_sync(x) __wake_up_sync((x), TASK_NORMAL)
207223
208224 /*
209225 * Wakeup macros to be used to report events to the targets.
....@@ -217,7 +233,9 @@
217233 #define wake_up_interruptible_poll(x, m) \
218234 __wake_up(x, TASK_INTERRUPTIBLE, 1, poll_to_key(m))
219235 #define wake_up_interruptible_sync_poll(x, m) \
220
- __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, poll_to_key(m))
236
+ __wake_up_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
237
+#define wake_up_interruptible_sync_poll_locked(x, m) \
238
+ __wake_up_locked_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
221239
222240 /**
223241 * wake_up_pollfree - signal that a polled waitqueue is going away
....@@ -335,7 +353,7 @@
335353
336354 #define __wait_event_freezable(wq_head, condition) \
337355 ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \
338
- schedule(); try_to_freeze())
356
+ freezable_schedule())
339357
340358 /**
341359 * wait_event_freezable - sleep (or freeze) until a condition gets true
....@@ -394,7 +412,7 @@
394412 #define __wait_event_freezable_timeout(wq_head, condition, timeout) \
395413 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
396414 TASK_INTERRUPTIBLE, 0, timeout, \
397
- __ret = schedule_timeout(__ret); try_to_freeze())
415
+ __ret = freezable_schedule_timeout(__ret))
398416
399417 /*
400418 * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid
....@@ -515,12 +533,13 @@
515533 int __ret = 0; \
516534 struct hrtimer_sleeper __t; \
517535 \
518
- hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); \
519
- hrtimer_init_sleeper(&__t, current); \
520
- if ((timeout) != KTIME_MAX) \
521
- hrtimer_start_range_ns(&__t.timer, timeout, \
522
- current->timer_slack_ns, \
523
- HRTIMER_MODE_REL); \
536
+ hrtimer_init_sleeper_on_stack(&__t, CLOCK_MONOTONIC, \
537
+ HRTIMER_MODE_REL); \
538
+ if ((timeout) != KTIME_MAX) { \
539
+ hrtimer_set_expires_range_ns(&__t.timer, timeout, \
540
+ current->timer_slack_ns); \
541
+ hrtimer_sleeper_start_expires(&__t, HRTIMER_MODE_REL); \
542
+ } \
524543 \
525544 __ret = ___wait_event(wq_head, condition, state, 0, 0, \
526545 if (!__t.task) { \
....@@ -615,7 +634,7 @@
615634
616635 #define __wait_event_freezable_exclusive(wq, condition) \
617636 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
618
- schedule(); try_to_freeze())
637
+ freezable_schedule())
619638
620639 #define wait_event_freezable_exclusive(wq, condition) \
621640 ({ \
....@@ -1135,15 +1154,12 @@
11351154 * Waitqueues which are removed from the waitqueue_head at wakeup time
11361155 */
11371156 void prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1138
-void prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1157
+bool prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
11391158 long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
11401159 void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
1141
-long __sched wait_woken(struct wait_queue_entry *wq_entry, unsigned int mode,
1142
- long timeout);
1143
-int __sched woken_wake_function(struct wait_queue_entry *wq_entry,
1144
- unsigned int mode, int sync, void *key);
1145
-int __sched autoremove_wake_function(struct wait_queue_entry *wq_entry,
1146
- unsigned int mode, int sync, void *key);
1160
+long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout);
1161
+int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
1162
+int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
11471163
11481164 #define DEFINE_WAIT_FUNC(name, function) \
11491165 struct wait_queue_entry name = { \
....@@ -1162,4 +1178,6 @@
11621178 (wait)->flags = 0; \
11631179 } while (0)
11641180
1181
+bool try_invoke_on_locked_down_task(struct task_struct *p, bool (*func)(struct task_struct *t, void *arg), void *arg);
1182
+
11651183 #endif /* _LINUX_WAIT_H */