| .. | .. |
|---|
| 160 | 160 | extern void swake_up_one(struct swait_queue_head *q); |
|---|
| 161 | 161 | extern void swake_up_all(struct swait_queue_head *q); |
|---|
| 162 | 162 | extern void swake_up_locked(struct swait_queue_head *q); |
|---|
| 163 | +extern void swake_up_all_locked(struct swait_queue_head *q); |
|---|
| 163 | 164 | |
|---|
| 165 | +extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait); |
|---|
| 164 | 166 | extern void prepare_to_swait_exclusive(struct swait_queue_head *q, struct swait_queue *wait, int state); |
|---|
| 165 | 167 | extern long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state); |
|---|
| 166 | 168 | |
|---|
| .. | .. |
|---|
| 297 | 299 | __ret; \ |
|---|
| 298 | 300 | }) |
|---|
| 299 | 301 | |
|---|
| 302 | +#define __swait_event_lock_irq(wq, condition, lock, cmd) \ |
|---|
| 303 | + ___swait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, \ |
|---|
| 304 | + raw_spin_unlock_irq(&lock); \ |
|---|
| 305 | + cmd; \ |
|---|
| 306 | + schedule(); \ |
|---|
| 307 | + raw_spin_lock_irq(&lock)) |
|---|
| 308 | + |
|---|
| 309 | +#define swait_event_lock_irq(wq_head, condition, lock) \ |
|---|
| 310 | + do { \ |
|---|
| 311 | + if (condition) \ |
|---|
| 312 | + break; \ |
|---|
| 313 | + __swait_event_lock_irq(wq_head, condition, lock, ); \ |
|---|
| 314 | + } while (0) |
|---|
| 315 | + |
|---|
| 300 | 316 | #endif /* _LINUX_SWAIT_H */ |
|---|