.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0 |
---|
| 2 | + |
---|
1 | 3 | #include "blk-rq-qos.h" |
---|
2 | 4 | |
---|
3 | 5 | /* |
---|
.. | .. |
---|
27 | 29 | return atomic_inc_below(&rq_wait->inflight, limit); |
---|
28 | 30 | } |
---|
29 | 31 | |
---|
30 | | -void rq_qos_cleanup(struct request_queue *q, struct bio *bio) |
---|
| 32 | +void __rq_qos_cleanup(struct rq_qos *rqos, struct bio *bio) |
---|
31 | 33 | { |
---|
32 | | - struct rq_qos *rqos; |
---|
33 | | - |
---|
34 | | - for (rqos = q->rq_qos; rqos; rqos = rqos->next) { |
---|
| 34 | + do { |
---|
35 | 35 | if (rqos->ops->cleanup) |
---|
36 | 36 | rqos->ops->cleanup(rqos, bio); |
---|
37 | | - } |
---|
| 37 | + rqos = rqos->next; |
---|
| 38 | + } while (rqos); |
---|
38 | 39 | } |
---|
39 | 40 | |
---|
40 | | -void rq_qos_done(struct request_queue *q, struct request *rq) |
---|
| 41 | +void __rq_qos_done(struct rq_qos *rqos, struct request *rq) |
---|
41 | 42 | { |
---|
42 | | - struct rq_qos *rqos; |
---|
43 | | - |
---|
44 | | - for (rqos = q->rq_qos; rqos; rqos = rqos->next) { |
---|
| 43 | + do { |
---|
45 | 44 | if (rqos->ops->done) |
---|
46 | 45 | rqos->ops->done(rqos, rq); |
---|
47 | | - } |
---|
| 46 | + rqos = rqos->next; |
---|
| 47 | + } while (rqos); |
---|
48 | 48 | } |
---|
49 | 49 | |
---|
50 | | -void rq_qos_issue(struct request_queue *q, struct request *rq) |
---|
| 50 | +void __rq_qos_issue(struct rq_qos *rqos, struct request *rq) |
---|
51 | 51 | { |
---|
52 | | - struct rq_qos *rqos; |
---|
53 | | - |
---|
54 | | - for(rqos = q->rq_qos; rqos; rqos = rqos->next) { |
---|
| 52 | + do { |
---|
55 | 53 | if (rqos->ops->issue) |
---|
56 | 54 | rqos->ops->issue(rqos, rq); |
---|
57 | | - } |
---|
| 55 | + rqos = rqos->next; |
---|
| 56 | + } while (rqos); |
---|
58 | 57 | } |
---|
59 | 58 | |
---|
60 | | -void rq_qos_requeue(struct request_queue *q, struct request *rq) |
---|
| 59 | +void __rq_qos_requeue(struct rq_qos *rqos, struct request *rq) |
---|
61 | 60 | { |
---|
62 | | - struct rq_qos *rqos; |
---|
63 | | - |
---|
64 | | - for(rqos = q->rq_qos; rqos; rqos = rqos->next) { |
---|
| 61 | + do { |
---|
65 | 62 | if (rqos->ops->requeue) |
---|
66 | 63 | rqos->ops->requeue(rqos, rq); |
---|
67 | | - } |
---|
| 64 | + rqos = rqos->next; |
---|
| 65 | + } while (rqos); |
---|
68 | 66 | } |
---|
69 | 67 | |
---|
70 | | -void rq_qos_throttle(struct request_queue *q, struct bio *bio, |
---|
71 | | - spinlock_t *lock) |
---|
| 68 | +void __rq_qos_throttle(struct rq_qos *rqos, struct bio *bio) |
---|
72 | 69 | { |
---|
73 | | - struct rq_qos *rqos; |
---|
74 | | - |
---|
75 | | - for(rqos = q->rq_qos; rqos; rqos = rqos->next) { |
---|
| 70 | + do { |
---|
76 | 71 | if (rqos->ops->throttle) |
---|
77 | | - rqos->ops->throttle(rqos, bio, lock); |
---|
78 | | - } |
---|
| 72 | + rqos->ops->throttle(rqos, bio); |
---|
| 73 | + rqos = rqos->next; |
---|
| 74 | + } while (rqos); |
---|
79 | 75 | } |
---|
80 | 76 | |
---|
81 | | -void rq_qos_track(struct request_queue *q, struct request *rq, struct bio *bio) |
---|
| 77 | +void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio) |
---|
82 | 78 | { |
---|
83 | | - struct rq_qos *rqos; |
---|
84 | | - |
---|
85 | | - for(rqos = q->rq_qos; rqos; rqos = rqos->next) { |
---|
| 79 | + do { |
---|
86 | 80 | if (rqos->ops->track) |
---|
87 | 81 | rqos->ops->track(rqos, rq, bio); |
---|
88 | | - } |
---|
| 82 | + rqos = rqos->next; |
---|
| 83 | + } while (rqos); |
---|
89 | 84 | } |
---|
90 | 85 | |
---|
91 | | -void rq_qos_done_bio(struct request_queue *q, struct bio *bio) |
---|
| 86 | +void __rq_qos_merge(struct rq_qos *rqos, struct request *rq, struct bio *bio) |
---|
92 | 87 | { |
---|
93 | | - struct rq_qos *rqos; |
---|
| 88 | + do { |
---|
| 89 | + if (rqos->ops->merge) |
---|
| 90 | + rqos->ops->merge(rqos, rq, bio); |
---|
| 91 | + rqos = rqos->next; |
---|
| 92 | + } while (rqos); |
---|
| 93 | +} |
---|
94 | 94 | |
---|
95 | | - for(rqos = q->rq_qos; rqos; rqos = rqos->next) { |
---|
| 95 | +void __rq_qos_done_bio(struct rq_qos *rqos, struct bio *bio) |
---|
| 96 | +{ |
---|
| 97 | + do { |
---|
96 | 98 | if (rqos->ops->done_bio) |
---|
97 | 99 | rqos->ops->done_bio(rqos, bio); |
---|
98 | | - } |
---|
| 100 | + rqos = rqos->next; |
---|
| 101 | + } while (rqos); |
---|
| 102 | +} |
---|
| 103 | + |
---|
| 104 | +void __rq_qos_queue_depth_changed(struct rq_qos *rqos) |
---|
| 105 | +{ |
---|
| 106 | + do { |
---|
| 107 | + if (rqos->ops->queue_depth_changed) |
---|
| 108 | + rqos->ops->queue_depth_changed(rqos); |
---|
| 109 | + rqos = rqos->next; |
---|
| 110 | + } while (rqos); |
---|
99 | 111 | } |
---|
100 | 112 | |
---|
101 | 113 | /* |
---|
.. | .. |
---|
188 | 200 | return true; |
---|
189 | 201 | } |
---|
190 | 202 | |
---|
| 203 | +struct rq_qos_wait_data { |
---|
| 204 | + struct wait_queue_entry wq; |
---|
| 205 | + struct task_struct *task; |
---|
| 206 | + struct rq_wait *rqw; |
---|
| 207 | + acquire_inflight_cb_t *cb; |
---|
| 208 | + void *private_data; |
---|
| 209 | + bool got_token; |
---|
| 210 | +}; |
---|
| 211 | + |
---|
| 212 | +static int rq_qos_wake_function(struct wait_queue_entry *curr, |
---|
| 213 | + unsigned int mode, int wake_flags, void *key) |
---|
| 214 | +{ |
---|
| 215 | + struct rq_qos_wait_data *data = container_of(curr, |
---|
| 216 | + struct rq_qos_wait_data, |
---|
| 217 | + wq); |
---|
| 218 | + |
---|
| 219 | + /* |
---|
| 220 | + * If we fail to get a budget, return -1 to interrupt the wake up loop |
---|
| 221 | + * in __wake_up_common. |
---|
| 222 | + */ |
---|
| 223 | + if (!data->cb(data->rqw, data->private_data)) |
---|
| 224 | + return -1; |
---|
| 225 | + |
---|
| 226 | + data->got_token = true; |
---|
| 227 | + smp_wmb(); |
---|
| 228 | + list_del_init(&curr->entry); |
---|
| 229 | + wake_up_process(data->task); |
---|
| 230 | + return 1; |
---|
| 231 | +} |
---|
| 232 | + |
---|
| 233 | +/** |
---|
| 234 | + * rq_qos_wait - throttle on a rqw if we need to |
---|
| 235 | + * @rqw: rqw to throttle on |
---|
| 236 | + * @private_data: caller provided specific data |
---|
| 237 | + * @acquire_inflight_cb: inc the rqw->inflight counter if we can |
---|
| 238 | + * @cleanup_cb: the callback to cleanup in case we race with a waker |
---|
| 239 | + * |
---|
| 240 | + * This provides a uniform place for the rq_qos users to do their throttling. |
---|
| 241 | + * Since you can end up with a lot of things sleeping at once, this manages the |
---|
| 242 | + * waking up based on the resources available. The acquire_inflight_cb should |
---|
| 243 | + * inc the rqw->inflight if we have the ability to do so, or return false if not |
---|
| 244 | + * and then we will sleep until the room becomes available. |
---|
| 245 | + * |
---|
| 246 | + * cleanup_cb is in case that we race with a waker and need to cleanup the |
---|
| 247 | + * inflight count accordingly. |
---|
| 248 | + */ |
---|
| 249 | +void rq_qos_wait(struct rq_wait *rqw, void *private_data, |
---|
| 250 | + acquire_inflight_cb_t *acquire_inflight_cb, |
---|
| 251 | + cleanup_cb_t *cleanup_cb) |
---|
| 252 | +{ |
---|
| 253 | + struct rq_qos_wait_data data = { |
---|
| 254 | + .wq = { |
---|
| 255 | + .func = rq_qos_wake_function, |
---|
| 256 | + .entry = LIST_HEAD_INIT(data.wq.entry), |
---|
| 257 | + }, |
---|
| 258 | + .task = current, |
---|
| 259 | + .rqw = rqw, |
---|
| 260 | + .cb = acquire_inflight_cb, |
---|
| 261 | + .private_data = private_data, |
---|
| 262 | + }; |
---|
| 263 | + bool has_sleeper; |
---|
| 264 | + |
---|
| 265 | + has_sleeper = wq_has_sleeper(&rqw->wait); |
---|
| 266 | + if (!has_sleeper && acquire_inflight_cb(rqw, private_data)) |
---|
| 267 | + return; |
---|
| 268 | + |
---|
| 269 | + has_sleeper = !prepare_to_wait_exclusive(&rqw->wait, &data.wq, |
---|
| 270 | + TASK_UNINTERRUPTIBLE); |
---|
| 271 | + do { |
---|
| 272 | + /* The memory barrier in set_task_state saves us here. */ |
---|
| 273 | + if (data.got_token) |
---|
| 274 | + break; |
---|
| 275 | + if (!has_sleeper && acquire_inflight_cb(rqw, private_data)) { |
---|
| 276 | + finish_wait(&rqw->wait, &data.wq); |
---|
| 277 | + |
---|
| 278 | + /* |
---|
| 279 | + * We raced with wbt_wake_function() getting a token, |
---|
| 280 | + * which means we now have two. Put our local token |
---|
| 281 | + * and wake anyone else potentially waiting for one. |
---|
| 282 | + */ |
---|
| 283 | + smp_rmb(); |
---|
| 284 | + if (data.got_token) |
---|
| 285 | + cleanup_cb(rqw, private_data); |
---|
| 286 | + break; |
---|
| 287 | + } |
---|
| 288 | + io_schedule(); |
---|
| 289 | + has_sleeper = true; |
---|
| 290 | + set_current_state(TASK_UNINTERRUPTIBLE); |
---|
| 291 | + } while (1); |
---|
| 292 | + finish_wait(&rqw->wait, &data.wq); |
---|
| 293 | +} |
---|
| 294 | + |
---|
191 | 295 | void rq_qos_exit(struct request_queue *q) |
---|
192 | 296 | { |
---|
| 297 | + blk_mq_debugfs_unregister_queue_rqos(q); |
---|
| 298 | + |
---|
193 | 299 | while (q->rq_qos) { |
---|
194 | 300 | struct rq_qos *rqos = q->rq_qos; |
---|
195 | 301 | q->rq_qos = rqos->next; |
---|