.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * Generic waiting primitives. |
---|
3 | 4 | * |
---|
4 | 5 | * (C) 2004 Nadia Yvette Chambers, Oracle |
---|
5 | 6 | */ |
---|
6 | 7 | #include "sched.h" |
---|
| 8 | +#include <trace/hooks/sched.h> |
---|
7 | 9 | |
---|
8 | 10 | void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *key) |
---|
9 | 11 | { |
---|
.. | .. |
---|
117 | 119 | bookmark.func = NULL; |
---|
118 | 120 | INIT_LIST_HEAD(&bookmark.entry); |
---|
119 | 121 | |
---|
120 | | - spin_lock_irqsave(&wq_head->lock, flags); |
---|
121 | | - nr_exclusive = __wake_up_common(wq_head, mode, nr_exclusive, wake_flags, key, &bookmark); |
---|
122 | | - spin_unlock_irqrestore(&wq_head->lock, flags); |
---|
123 | | - |
---|
124 | | - while (bookmark.flags & WQ_FLAG_BOOKMARK) { |
---|
| 122 | + do { |
---|
125 | 123 | spin_lock_irqsave(&wq_head->lock, flags); |
---|
126 | 124 | nr_exclusive = __wake_up_common(wq_head, mode, nr_exclusive, |
---|
127 | 125 | wake_flags, key, &bookmark); |
---|
128 | 126 | spin_unlock_irqrestore(&wq_head->lock, flags); |
---|
129 | | - } |
---|
| 127 | + } while (bookmark.flags & WQ_FLAG_BOOKMARK); |
---|
130 | 128 | } |
---|
131 | 129 | |
---|
132 | 130 | /** |
---|
.. | .. |
---|
172 | 170 | * __wake_up_sync_key - wake up threads blocked on a waitqueue. |
---|
173 | 171 | * @wq_head: the waitqueue |
---|
174 | 172 | * @mode: which threads |
---|
175 | | - * @nr_exclusive: how many wake-one or wake-many threads to wake up |
---|
176 | 173 | * @key: opaque value to be passed to wakeup targets |
---|
177 | 174 | * |
---|
178 | 175 | * The sync wakeup differs that the waker knows that it will schedule |
---|
.. | .. |
---|
186 | 183 | * accessing the task state. |
---|
187 | 184 | */ |
---|
188 | 185 | void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, |
---|
189 | | - int nr_exclusive, void *key) |
---|
| 186 | + void *key) |
---|
190 | 187 | { |
---|
191 | | - int wake_flags = 1; /* XXX WF_SYNC */ |
---|
| 188 | + int wake_flags = WF_SYNC; |
---|
192 | 189 | |
---|
193 | 190 | if (unlikely(!wq_head)) |
---|
194 | 191 | return; |
---|
195 | 192 | |
---|
196 | | - if (unlikely(nr_exclusive != 1)) |
---|
197 | | - wake_flags = 0; |
---|
198 | | - |
---|
199 | | - __wake_up_common_lock(wq_head, mode, nr_exclusive, wake_flags, key); |
---|
| 193 | + trace_android_vh_set_wake_flags(&wake_flags, &mode); |
---|
| 194 | + __wake_up_common_lock(wq_head, mode, 1, wake_flags, key); |
---|
200 | 195 | } |
---|
201 | 196 | EXPORT_SYMBOL_GPL(__wake_up_sync_key); |
---|
| 197 | + |
---|
| 198 | +/** |
---|
| 199 | + * __wake_up_locked_sync_key - wake up a thread blocked on a locked waitqueue. |
---|
| 200 | + * @wq_head: the waitqueue |
---|
| 201 | + * @mode: which threads |
---|
| 202 | + * @key: opaque value to be passed to wakeup targets |
---|
| 203 | + * |
---|
| 204 | + * The sync wakeup differs in that the waker knows that it will schedule |
---|
| 205 | + * away soon, so while the target thread will be woken up, it will not |
---|
| 206 | + * be migrated to another CPU - ie. the two threads are 'synchronized' |
---|
| 207 | + * with each other. This can prevent needless bouncing between CPUs. |
---|
| 208 | + * |
---|
| 209 | + * On UP it can prevent extra preemption. |
---|
| 210 | + * |
---|
| 211 | + * If this function wakes up a task, it executes a full memory barrier before |
---|
| 212 | + * accessing the task state. |
---|
| 213 | + */ |
---|
| 214 | +void __wake_up_locked_sync_key(struct wait_queue_head *wq_head, |
---|
| 215 | + unsigned int mode, void *key) |
---|
| 216 | +{ |
---|
| 217 | + __wake_up_common(wq_head, mode, 1, WF_SYNC, key, NULL); |
---|
| 218 | +} |
---|
| 219 | +EXPORT_SYMBOL_GPL(__wake_up_locked_sync_key); |
---|
202 | 220 | |
---|
203 | 221 | /* |
---|
204 | 222 | * __wake_up_sync - see __wake_up_sync_key() |
---|
205 | 223 | */ |
---|
206 | | -void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr_exclusive) |
---|
| 224 | +void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode) |
---|
207 | 225 | { |
---|
208 | | - __wake_up_sync_key(wq_head, mode, nr_exclusive, NULL); |
---|
| 226 | + __wake_up_sync_key(wq_head, mode, NULL); |
---|
209 | 227 | } |
---|
210 | 228 | EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */ |
---|
211 | 229 | |
---|
.. | .. |
---|
242 | 260 | } |
---|
243 | 261 | EXPORT_SYMBOL(prepare_to_wait); |
---|
244 | 262 | |
---|
245 | | -void |
---|
| 263 | +/* Returns true if we are the first waiter in the queue, false otherwise. */ |
---|
| 264 | +bool |
---|
246 | 265 | prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state) |
---|
247 | 266 | { |
---|
248 | 267 | unsigned long flags; |
---|
| 268 | + bool was_empty = false; |
---|
249 | 269 | |
---|
250 | 270 | wq_entry->flags |= WQ_FLAG_EXCLUSIVE; |
---|
251 | 271 | spin_lock_irqsave(&wq_head->lock, flags); |
---|
252 | | - if (list_empty(&wq_entry->entry)) |
---|
| 272 | + if (list_empty(&wq_entry->entry)) { |
---|
| 273 | + was_empty = list_empty(&wq_head->head); |
---|
253 | 274 | __add_wait_queue_entry_tail(wq_head, wq_entry); |
---|
| 275 | + } |
---|
254 | 276 | set_current_state(state); |
---|
255 | 277 | spin_unlock_irqrestore(&wq_head->lock, flags); |
---|
| 278 | + return was_empty; |
---|
256 | 279 | } |
---|
257 | 280 | EXPORT_SYMBOL(prepare_to_wait_exclusive); |
---|
258 | 281 | |
---|
.. | .. |
---|
271 | 294 | long ret = 0; |
---|
272 | 295 | |
---|
273 | 296 | spin_lock_irqsave(&wq_head->lock, flags); |
---|
274 | | - if (unlikely(signal_pending_state(state, current))) { |
---|
| 297 | + if (signal_pending_state(state, current)) { |
---|
275 | 298 | /* |
---|
276 | 299 | * Exclusive waiter must not fail if it was selected by wakeup, |
---|
277 | 300 | * it should "consume" the condition we were waiting for. |
---|
.. | .. |
---|
377 | 400 | } |
---|
378 | 401 | EXPORT_SYMBOL(finish_wait); |
---|
379 | 402 | |
---|
380 | | -int __sched autoremove_wake_function(struct wait_queue_entry *wq_entry, |
---|
381 | | - unsigned int mode, int sync, void *key) |
---|
| 403 | +__sched int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned int mode, |
---|
| 404 | + int sync, void *key) |
---|
382 | 405 | { |
---|
383 | 406 | int ret = default_wake_function(wq_entry, mode, sync, key); |
---|
384 | 407 | |
---|
385 | 408 | if (ret) |
---|
386 | | - list_del_init(&wq_entry->entry); |
---|
| 409 | + list_del_init_careful(&wq_entry->entry); |
---|
387 | 410 | |
---|
388 | 411 | return ret; |
---|
389 | 412 | } |
---|
.. | .. |
---|
414 | 437 | * } smp_mb(); // C |
---|
415 | 438 | * remove_wait_queue(&wq_head, &wait); wq_entry->flags |= WQ_FLAG_WOKEN; |
---|
416 | 439 | */ |
---|
417 | | -long __sched wait_woken(struct wait_queue_entry *wq_entry, unsigned int mode, |
---|
418 | | - long timeout) |
---|
| 440 | +__sched long wait_woken(struct wait_queue_entry *wq_entry, unsigned int mode, long timeout) |
---|
419 | 441 | { |
---|
420 | 442 | /* |
---|
421 | 443 | * The below executes an smp_mb(), which matches with the full barrier |
---|
.. | .. |
---|
440 | 462 | } |
---|
441 | 463 | EXPORT_SYMBOL(wait_woken); |
---|
442 | 464 | |
---|
443 | | -int __sched woken_wake_function(struct wait_queue_entry *wq_entry, |
---|
444 | | - unsigned int mode, int sync, void *key) |
---|
| 465 | +__sched int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned int mode, |
---|
| 466 | + int sync, void *key) |
---|
445 | 467 | { |
---|
446 | 468 | /* Pairs with the smp_store_mb() in wait_woken(). */ |
---|
447 | 469 | smp_mb(); /* C */ |
---|