hc
2023-11-20 69d6da3c1c63675524a25e7dc92a4f43c4164cef
kernel/block/blk-core.c
....@@ -191,6 +191,9 @@
191191
192192 INIT_LIST_HEAD(&rq->queuelist);
193193 INIT_LIST_HEAD(&rq->timeout_list);
194
+#ifdef CONFIG_PREEMPT_RT_FULL
195
+ INIT_WORK(&rq->work, __blk_mq_complete_request_remote_work);
196
+#endif
194197 rq->cpu = -1;
195198 rq->q = q;
196199 rq->__sector = (sector_t) -1;
....@@ -972,12 +975,21 @@
972975 percpu_ref_put(&q->q_usage_counter);
973976 }
974977
978
+static void blk_queue_usage_counter_release_wrk(struct work_struct *work)
979
+{
980
+ struct request_queue *q =
981
+ container_of(work, struct request_queue, mq_pcpu_wake);
982
+
983
+ wake_up_all(&q->mq_freeze_wq);
984
+}
985
+
975986 static void blk_queue_usage_counter_release(struct percpu_ref *ref)
976987 {
977988 struct request_queue *q =
978989 container_of(ref, struct request_queue, q_usage_counter);
979990
980
- wake_up_all(&q->mq_freeze_wq);
991
+ if (wq_has_sleeper(&q->mq_freeze_wq))
992
+ schedule_work(&q->mq_pcpu_wake);
981993 }
982994
983995 static void blk_rq_timed_out_timer(struct timer_list *t)
....@@ -1076,6 +1088,7 @@
10761088 queue_flag_set_unlocked(QUEUE_FLAG_BYPASS, q);
10771089
10781090 init_waitqueue_head(&q->mq_freeze_wq);
1091
+ INIT_WORK(&q->mq_pcpu_wake, blk_queue_usage_counter_release_wrk);
10791092
10801093 /*
10811094 * Init percpu_ref in atomic mode so that it's faster to shutdown.