.. | .. |
---|
191 | 191 | |
---|
192 | 192 | INIT_LIST_HEAD(&rq->queuelist); |
---|
193 | 193 | INIT_LIST_HEAD(&rq->timeout_list); |
---|
| 194 | +#ifdef CONFIG_PREEMPT_RT_FULL |
---|
| 195 | + INIT_WORK(&rq->work, __blk_mq_complete_request_remote_work); |
---|
| 196 | +#endif |
---|
194 | 197 | rq->cpu = -1; |
---|
195 | 198 | rq->q = q; |
---|
196 | 199 | rq->__sector = (sector_t) -1; |
---|
.. | .. |
---|
972 | 975 | percpu_ref_put(&q->q_usage_counter); |
---|
973 | 976 | } |
---|
974 | 977 | |
---|
| 978 | +static void blk_queue_usage_counter_release_wrk(struct work_struct *work) |
---|
| 979 | +{ |
---|
| 980 | + struct request_queue *q = |
---|
| 981 | + container_of(work, struct request_queue, mq_pcpu_wake); |
---|
| 982 | + |
---|
| 983 | + wake_up_all(&q->mq_freeze_wq); |
---|
| 984 | +} |
---|
| 985 | + |
---|
975 | 986 | static void blk_queue_usage_counter_release(struct percpu_ref *ref) |
---|
976 | 987 | { |
---|
977 | 988 | struct request_queue *q = |
---|
978 | 989 | container_of(ref, struct request_queue, q_usage_counter); |
---|
979 | 990 | |
---|
980 | | - wake_up_all(&q->mq_freeze_wq); |
---|
| 991 | + if (wq_has_sleeper(&q->mq_freeze_wq)) |
---|
| 992 | + schedule_work(&q->mq_pcpu_wake); |
---|
981 | 993 | } |
---|
982 | 994 | |
---|
983 | 995 | static void blk_rq_timed_out_timer(struct timer_list *t) |
---|
.. | .. |
---|
1076 | 1088 | queue_flag_set_unlocked(QUEUE_FLAG_BYPASS, q); |
---|
1077 | 1089 | |
---|
1078 | 1090 | init_waitqueue_head(&q->mq_freeze_wq); |
---|
| 1091 | + INIT_WORK(&q->mq_pcpu_wake, blk_queue_usage_counter_release_wrk); |
---|
1079 | 1092 | |
---|
1080 | 1093 | /* |
---|
1081 | 1094 | * Init percpu_ref in atomic mode so that it's faster to shutdown. |
---|