hc
2023-12-08 01573e231f18eb2d99162747186f59511f56b64d
kernel/block/blk-rq-qos.c
....@@ -1,3 +1,5 @@
1
+// SPDX-License-Identifier: GPL-2.0
2
+
13 #include "blk-rq-qos.h"
24
35 /*
....@@ -27,75 +29,85 @@
2729 return atomic_inc_below(&rq_wait->inflight, limit);
2830 }
2931
30
-void rq_qos_cleanup(struct request_queue *q, struct bio *bio)
32
+void __rq_qos_cleanup(struct rq_qos *rqos, struct bio *bio)
3133 {
32
- struct rq_qos *rqos;
33
-
34
- for (rqos = q->rq_qos; rqos; rqos = rqos->next) {
34
+ do {
3535 if (rqos->ops->cleanup)
3636 rqos->ops->cleanup(rqos, bio);
37
- }
37
+ rqos = rqos->next;
38
+ } while (rqos);
3839 }
3940
40
-void rq_qos_done(struct request_queue *q, struct request *rq)
41
+void __rq_qos_done(struct rq_qos *rqos, struct request *rq)
4142 {
42
- struct rq_qos *rqos;
43
-
44
- for (rqos = q->rq_qos; rqos; rqos = rqos->next) {
43
+ do {
4544 if (rqos->ops->done)
4645 rqos->ops->done(rqos, rq);
47
- }
46
+ rqos = rqos->next;
47
+ } while (rqos);
4848 }
4949
50
-void rq_qos_issue(struct request_queue *q, struct request *rq)
50
+void __rq_qos_issue(struct rq_qos *rqos, struct request *rq)
5151 {
52
- struct rq_qos *rqos;
53
-
54
- for(rqos = q->rq_qos; rqos; rqos = rqos->next) {
52
+ do {
5553 if (rqos->ops->issue)
5654 rqos->ops->issue(rqos, rq);
57
- }
55
+ rqos = rqos->next;
56
+ } while (rqos);
5857 }
5958
60
-void rq_qos_requeue(struct request_queue *q, struct request *rq)
59
+void __rq_qos_requeue(struct rq_qos *rqos, struct request *rq)
6160 {
62
- struct rq_qos *rqos;
63
-
64
- for(rqos = q->rq_qos; rqos; rqos = rqos->next) {
61
+ do {
6562 if (rqos->ops->requeue)
6663 rqos->ops->requeue(rqos, rq);
67
- }
64
+ rqos = rqos->next;
65
+ } while (rqos);
6866 }
6967
70
-void rq_qos_throttle(struct request_queue *q, struct bio *bio,
71
- spinlock_t *lock)
68
+void __rq_qos_throttle(struct rq_qos *rqos, struct bio *bio)
7269 {
73
- struct rq_qos *rqos;
74
-
75
- for(rqos = q->rq_qos; rqos; rqos = rqos->next) {
70
+ do {
7671 if (rqos->ops->throttle)
77
- rqos->ops->throttle(rqos, bio, lock);
78
- }
72
+ rqos->ops->throttle(rqos, bio);
73
+ rqos = rqos->next;
74
+ } while (rqos);
7975 }
8076
81
-void rq_qos_track(struct request_queue *q, struct request *rq, struct bio *bio)
77
+void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio)
8278 {
83
- struct rq_qos *rqos;
84
-
85
- for(rqos = q->rq_qos; rqos; rqos = rqos->next) {
79
+ do {
8680 if (rqos->ops->track)
8781 rqos->ops->track(rqos, rq, bio);
88
- }
82
+ rqos = rqos->next;
83
+ } while (rqos);
8984 }
9085
91
-void rq_qos_done_bio(struct request_queue *q, struct bio *bio)
86
+void __rq_qos_merge(struct rq_qos *rqos, struct request *rq, struct bio *bio)
9287 {
93
- struct rq_qos *rqos;
88
+ do {
89
+ if (rqos->ops->merge)
90
+ rqos->ops->merge(rqos, rq, bio);
91
+ rqos = rqos->next;
92
+ } while (rqos);
93
+}
9494
95
- for(rqos = q->rq_qos; rqos; rqos = rqos->next) {
95
+void __rq_qos_done_bio(struct rq_qos *rqos, struct bio *bio)
96
+{
97
+ do {
9698 if (rqos->ops->done_bio)
9799 rqos->ops->done_bio(rqos, bio);
98
- }
100
+ rqos = rqos->next;
101
+ } while (rqos);
102
+}
103
+
104
+void __rq_qos_queue_depth_changed(struct rq_qos *rqos)
105
+{
106
+ do {
107
+ if (rqos->ops->queue_depth_changed)
108
+ rqos->ops->queue_depth_changed(rqos);
109
+ rqos = rqos->next;
110
+ } while (rqos);
99111 }
100112
101113 /*
....@@ -188,8 +200,102 @@
188200 return true;
189201 }
190202
203
+struct rq_qos_wait_data {
204
+ struct wait_queue_entry wq;
205
+ struct task_struct *task;
206
+ struct rq_wait *rqw;
207
+ acquire_inflight_cb_t *cb;
208
+ void *private_data;
209
+ bool got_token;
210
+};
211
+
212
+static int rq_qos_wake_function(struct wait_queue_entry *curr,
213
+ unsigned int mode, int wake_flags, void *key)
214
+{
215
+ struct rq_qos_wait_data *data = container_of(curr,
216
+ struct rq_qos_wait_data,
217
+ wq);
218
+
219
+ /*
220
+ * If we fail to get a budget, return -1 to interrupt the wake up loop
221
+ * in __wake_up_common.
222
+ */
223
+ if (!data->cb(data->rqw, data->private_data))
224
+ return -1;
225
+
226
+ data->got_token = true;
227
+ smp_wmb();
228
+ list_del_init(&curr->entry);
229
+ wake_up_process(data->task);
230
+ return 1;
231
+}
232
+
233
+/**
234
+ * rq_qos_wait - throttle on a rqw if we need to
235
+ * @rqw: rqw to throttle on
236
+ * @private_data: caller provided specific data
237
+ * @acquire_inflight_cb: inc the rqw->inflight counter if we can
238
+ * @cleanup_cb: the callback to cleanup in case we race with a waker
239
+ *
240
+ * This provides a uniform place for the rq_qos users to do their throttling.
241
+ * Since you can end up with a lot of things sleeping at once, this manages the
242
+ * waking up based on the resources available. The acquire_inflight_cb should
243
+ * inc the rqw->inflight if we have the ability to do so, or return false if not
244
+ * and then we will sleep until the room becomes available.
245
+ *
246
+ * cleanup_cb is in case that we race with a waker and need to cleanup the
247
+ * inflight count accordingly.
248
+ */
249
+void rq_qos_wait(struct rq_wait *rqw, void *private_data,
250
+ acquire_inflight_cb_t *acquire_inflight_cb,
251
+ cleanup_cb_t *cleanup_cb)
252
+{
253
+ struct rq_qos_wait_data data = {
254
+ .wq = {
255
+ .func = rq_qos_wake_function,
256
+ .entry = LIST_HEAD_INIT(data.wq.entry),
257
+ },
258
+ .task = current,
259
+ .rqw = rqw,
260
+ .cb = acquire_inflight_cb,
261
+ .private_data = private_data,
262
+ };
263
+ bool has_sleeper;
264
+
265
+ has_sleeper = wq_has_sleeper(&rqw->wait);
266
+ if (!has_sleeper && acquire_inflight_cb(rqw, private_data))
267
+ return;
268
+
269
+ has_sleeper = !prepare_to_wait_exclusive(&rqw->wait, &data.wq,
270
+ TASK_UNINTERRUPTIBLE);
271
+ do {
272
+ /* The memory barrier in set_task_state saves us here. */
273
+ if (data.got_token)
274
+ break;
275
+ if (!has_sleeper && acquire_inflight_cb(rqw, private_data)) {
276
+ finish_wait(&rqw->wait, &data.wq);
277
+
278
+ /*
279
+ * We raced with wbt_wake_function() getting a token,
280
+ * which means we now have two. Put our local token
281
+ * and wake anyone else potentially waiting for one.
282
+ */
283
+ smp_rmb();
284
+ if (data.got_token)
285
+ cleanup_cb(rqw, private_data);
286
+ break;
287
+ }
288
+ io_schedule();
289
+ has_sleeper = true;
290
+ set_current_state(TASK_UNINTERRUPTIBLE);
291
+ } while (1);
292
+ finish_wait(&rqw->wait, &data.wq);
293
+}
294
+
191295 void rq_qos_exit(struct request_queue *q)
192296 {
297
+ blk_mq_debugfs_unregister_queue_rqos(q);
298
+
193299 while (q->rq_qos) {
194300 struct rq_qos *rqos = q->rq_qos;
195301 q->rq_qos = rqos->next;