.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0 |
---|
1 | 2 | /* |
---|
2 | 3 | * Copyright (C) 2017 Facebook |
---|
3 | | - * |
---|
4 | | - * This program is free software; you can redistribute it and/or |
---|
5 | | - * modify it under the terms of the GNU General Public |
---|
6 | | - * License v2 as published by the Free Software Foundation. |
---|
7 | | - * |
---|
8 | | - * This program is distributed in the hope that it will be useful, |
---|
9 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
10 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
---|
11 | | - * General Public License for more details. |
---|
12 | | - * |
---|
13 | | - * You should have received a copy of the GNU General Public License |
---|
14 | | - * along with this program. If not, see <https://www.gnu.org/licenses/>. |
---|
15 | 4 | */ |
---|
16 | 5 | |
---|
17 | 6 | #include <linux/kernel.h> |
---|
.. | .. |
---|
23 | 12 | #include "blk-mq.h" |
---|
24 | 13 | #include "blk-mq-debugfs.h" |
---|
25 | 14 | #include "blk-mq-tag.h" |
---|
| 15 | +#include "blk-rq-qos.h" |
---|
26 | 16 | |
---|
27 | 17 | static void print_stat(struct seq_file *m, struct blk_rq_stat *stat) |
---|
28 | 18 | { |
---|
29 | 19 | if (stat->nr_samples) { |
---|
30 | | - seq_printf(m, "samples=%d, mean=%lld, min=%llu, max=%llu", |
---|
| 20 | + seq_printf(m, "samples=%d, mean=%llu, min=%llu, max=%llu", |
---|
31 | 21 | stat->nr_samples, stat->mean, stat->min, stat->max); |
---|
32 | 22 | } else { |
---|
33 | 23 | seq_puts(m, "samples=0"); |
---|
.. | .. |
---|
39 | 29 | struct request_queue *q = data; |
---|
40 | 30 | int bucket; |
---|
41 | 31 | |
---|
42 | | - for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS/2; bucket++) { |
---|
43 | | - seq_printf(m, "read (%d Bytes): ", 1 << (9+bucket)); |
---|
44 | | - print_stat(m, &q->poll_stat[2*bucket]); |
---|
| 32 | + for (bucket = 0; bucket < (BLK_MQ_POLL_STATS_BKTS / 2); bucket++) { |
---|
| 33 | + seq_printf(m, "read (%d Bytes): ", 1 << (9 + bucket)); |
---|
| 34 | + print_stat(m, &q->poll_stat[2 * bucket]); |
---|
45 | 35 | seq_puts(m, "\n"); |
---|
46 | 36 | |
---|
47 | | - seq_printf(m, "write (%d Bytes): ", 1 << (9+bucket)); |
---|
48 | | - print_stat(m, &q->poll_stat[2*bucket+1]); |
---|
| 37 | + seq_printf(m, "write (%d Bytes): ", 1 << (9 + bucket)); |
---|
| 38 | + print_stat(m, &q->poll_stat[2 * bucket + 1]); |
---|
49 | 39 | seq_puts(m, "\n"); |
---|
50 | 40 | } |
---|
51 | 41 | return 0; |
---|
.. | .. |
---|
112 | 102 | |
---|
113 | 103 | #define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name |
---|
114 | 104 | static const char *const blk_queue_flag_name[] = { |
---|
115 | | - QUEUE_FLAG_NAME(QUEUED), |
---|
116 | 105 | QUEUE_FLAG_NAME(STOPPED), |
---|
117 | 106 | QUEUE_FLAG_NAME(DYING), |
---|
118 | | - QUEUE_FLAG_NAME(BYPASS), |
---|
119 | | - QUEUE_FLAG_NAME(BIDI), |
---|
120 | 107 | QUEUE_FLAG_NAME(NOMERGES), |
---|
121 | 108 | QUEUE_FLAG_NAME(SAME_COMP), |
---|
122 | 109 | QUEUE_FLAG_NAME(FAIL_IO), |
---|
.. | .. |
---|
129 | 116 | QUEUE_FLAG_NAME(SAME_FORCE), |
---|
130 | 117 | QUEUE_FLAG_NAME(DEAD), |
---|
131 | 118 | QUEUE_FLAG_NAME(INIT_DONE), |
---|
132 | | - QUEUE_FLAG_NAME(NO_SG_MERGE), |
---|
| 119 | + QUEUE_FLAG_NAME(STABLE_WRITES), |
---|
133 | 120 | QUEUE_FLAG_NAME(POLL), |
---|
134 | 121 | QUEUE_FLAG_NAME(WC), |
---|
135 | 122 | QUEUE_FLAG_NAME(FUA), |
---|
136 | | - QUEUE_FLAG_NAME(FLUSH_NQ), |
---|
137 | 123 | QUEUE_FLAG_NAME(DAX), |
---|
138 | 124 | QUEUE_FLAG_NAME(STATS), |
---|
139 | 125 | QUEUE_FLAG_NAME(POLL_STATS), |
---|
140 | 126 | QUEUE_FLAG_NAME(REGISTERED), |
---|
141 | 127 | QUEUE_FLAG_NAME(SCSI_PASSTHROUGH), |
---|
142 | 128 | QUEUE_FLAG_NAME(QUIESCED), |
---|
| 129 | + QUEUE_FLAG_NAME(PCI_P2PDMA), |
---|
| 130 | + QUEUE_FLAG_NAME(ZONE_RESETALL), |
---|
| 131 | + QUEUE_FLAG_NAME(RQ_ALLOC_TIME), |
---|
| 132 | + QUEUE_FLAG_NAME(HCTX_ACTIVE), |
---|
| 133 | + QUEUE_FLAG_NAME(NOWAIT), |
---|
143 | 134 | }; |
---|
144 | 135 | #undef QUEUE_FLAG_NAME |
---|
145 | 136 | |
---|
.. | .. |
---|
228 | 219 | HCTX_STATE_NAME(STOPPED), |
---|
229 | 220 | HCTX_STATE_NAME(TAG_ACTIVE), |
---|
230 | 221 | HCTX_STATE_NAME(SCHED_RESTART), |
---|
| 222 | + HCTX_STATE_NAME(INACTIVE), |
---|
231 | 223 | }; |
---|
232 | 224 | #undef HCTX_STATE_NAME |
---|
233 | 225 | |
---|
.. | .. |
---|
251 | 243 | #define HCTX_FLAG_NAME(name) [ilog2(BLK_MQ_F_##name)] = #name |
---|
252 | 244 | static const char *const hctx_flag_name[] = { |
---|
253 | 245 | HCTX_FLAG_NAME(SHOULD_MERGE), |
---|
254 | | - HCTX_FLAG_NAME(TAG_SHARED), |
---|
255 | | - HCTX_FLAG_NAME(SG_MERGE), |
---|
| 246 | + HCTX_FLAG_NAME(TAG_QUEUE_SHARED), |
---|
256 | 247 | HCTX_FLAG_NAME(BLOCKING), |
---|
257 | 248 | HCTX_FLAG_NAME(NO_SCHED), |
---|
| 249 | + HCTX_FLAG_NAME(STACKING), |
---|
| 250 | + HCTX_FLAG_NAME(TAG_HCTX_SHARED), |
---|
258 | 251 | }; |
---|
259 | 252 | #undef HCTX_FLAG_NAME |
---|
260 | 253 | |
---|
.. | .. |
---|
277 | 270 | return 0; |
---|
278 | 271 | } |
---|
279 | 272 | |
---|
280 | | -#define REQ_OP_NAME(name) [REQ_OP_##name] = #name |
---|
281 | | -static const char *const op_name[] = { |
---|
282 | | - REQ_OP_NAME(READ), |
---|
283 | | - REQ_OP_NAME(WRITE), |
---|
284 | | - REQ_OP_NAME(FLUSH), |
---|
285 | | - REQ_OP_NAME(DISCARD), |
---|
286 | | - REQ_OP_NAME(ZONE_REPORT), |
---|
287 | | - REQ_OP_NAME(SECURE_ERASE), |
---|
288 | | - REQ_OP_NAME(ZONE_RESET), |
---|
289 | | - REQ_OP_NAME(WRITE_SAME), |
---|
290 | | - REQ_OP_NAME(WRITE_ZEROES), |
---|
291 | | - REQ_OP_NAME(SCSI_IN), |
---|
292 | | - REQ_OP_NAME(SCSI_OUT), |
---|
293 | | - REQ_OP_NAME(DRV_IN), |
---|
294 | | - REQ_OP_NAME(DRV_OUT), |
---|
295 | | -}; |
---|
296 | | -#undef REQ_OP_NAME |
---|
297 | | - |
---|
298 | 273 | #define CMD_FLAG_NAME(name) [__REQ_##name] = #name |
---|
299 | 274 | static const char *const cmd_flag_name[] = { |
---|
300 | 275 | CMD_FLAG_NAME(FAILFAST_DEV), |
---|
.. | .. |
---|
310 | 285 | CMD_FLAG_NAME(PREFLUSH), |
---|
311 | 286 | CMD_FLAG_NAME(RAHEAD), |
---|
312 | 287 | CMD_FLAG_NAME(BACKGROUND), |
---|
313 | | - CMD_FLAG_NAME(NOUNMAP), |
---|
314 | 288 | CMD_FLAG_NAME(NOWAIT), |
---|
| 289 | + CMD_FLAG_NAME(NOUNMAP), |
---|
| 290 | + CMD_FLAG_NAME(HIPRI), |
---|
315 | 291 | }; |
---|
316 | 292 | #undef CMD_FLAG_NAME |
---|
317 | 293 | |
---|
.. | .. |
---|
319 | 295 | static const char *const rqf_name[] = { |
---|
320 | 296 | RQF_NAME(SORTED), |
---|
321 | 297 | RQF_NAME(STARTED), |
---|
322 | | - RQF_NAME(QUEUED), |
---|
323 | 298 | RQF_NAME(SOFTBARRIER), |
---|
324 | 299 | RQF_NAME(FLUSH_SEQ), |
---|
325 | 300 | RQF_NAME(MIXED_MERGE), |
---|
326 | 301 | RQF_NAME(MQ_INFLIGHT), |
---|
327 | 302 | RQF_NAME(DONTPREP), |
---|
328 | | - RQF_NAME(PREEMPT), |
---|
329 | | - RQF_NAME(COPY_USER), |
---|
330 | 303 | RQF_NAME(FAILED), |
---|
331 | 304 | RQF_NAME(QUIET), |
---|
332 | 305 | RQF_NAME(ELVPRIV), |
---|
.. | .. |
---|
358 | 331 | int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq) |
---|
359 | 332 | { |
---|
360 | 333 | const struct blk_mq_ops *const mq_ops = rq->q->mq_ops; |
---|
361 | | - const unsigned int op = rq->cmd_flags & REQ_OP_MASK; |
---|
| 334 | + const unsigned int op = req_op(rq); |
---|
| 335 | + const char *op_str = blk_op_str(op); |
---|
362 | 336 | |
---|
363 | 337 | seq_printf(m, "%p {.op=", rq); |
---|
364 | | - if (op < ARRAY_SIZE(op_name) && op_name[op]) |
---|
365 | | - seq_printf(m, "%s", op_name[op]); |
---|
| 338 | + if (strcmp(op_str, "UNKNOWN") == 0) |
---|
| 339 | + seq_printf(m, "%u", op); |
---|
366 | 340 | else |
---|
367 | | - seq_printf(m, "%d", op); |
---|
| 341 | + seq_printf(m, "%s", op_str); |
---|
368 | 342 | seq_puts(m, ", .cmd_flags="); |
---|
369 | 343 | blk_flags_show(m, rq->cmd_flags & ~REQ_OP_MASK, cmd_flag_name, |
---|
370 | 344 | ARRAY_SIZE(cmd_flag_name)); |
---|
.. | .. |
---|
425 | 399 | |
---|
426 | 400 | /* |
---|
427 | 401 | * Note: the state of a request may change while this function is in progress, |
---|
428 | | - * e.g. due to a concurrent blk_mq_finish_request() call. |
---|
| 402 | + * e.g. due to a concurrent blk_mq_finish_request() call. Returns true to |
---|
| 403 | + * keep iterating requests. |
---|
429 | 404 | */ |
---|
430 | | -static void hctx_show_busy_rq(struct request *rq, void *data, bool reserved) |
---|
| 405 | +static bool hctx_show_busy_rq(struct request *rq, void *data, bool reserved) |
---|
431 | 406 | { |
---|
432 | 407 | const struct show_busy_params *params = data; |
---|
433 | 408 | |
---|
434 | | - if (blk_mq_map_queue(rq->q, rq->mq_ctx->cpu) == params->hctx && |
---|
435 | | - blk_mq_rq_state(rq) != MQ_RQ_IDLE) |
---|
436 | | - __blk_mq_debugfs_rq_show(params->m, |
---|
437 | | - list_entry_rq(&rq->queuelist)); |
---|
| 409 | + if (rq->mq_hctx == params->hctx) |
---|
| 410 | + __blk_mq_debugfs_rq_show(params->m, rq); |
---|
| 411 | + |
---|
| 412 | + return true; |
---|
438 | 413 | } |
---|
439 | 414 | |
---|
440 | 415 | static int hctx_busy_show(void *data, struct seq_file *m) |
---|
.. | .. |
---|
445 | 420 | blk_mq_tagset_busy_iter(hctx->queue->tag_set, hctx_show_busy_rq, |
---|
446 | 421 | ¶ms); |
---|
447 | 422 | |
---|
| 423 | + return 0; |
---|
| 424 | +} |
---|
| 425 | + |
---|
| 426 | +static const char *const hctx_types[] = { |
---|
| 427 | + [HCTX_TYPE_DEFAULT] = "default", |
---|
| 428 | + [HCTX_TYPE_READ] = "read", |
---|
| 429 | + [HCTX_TYPE_POLL] = "poll", |
---|
| 430 | +}; |
---|
| 431 | + |
---|
| 432 | +static int hctx_type_show(void *data, struct seq_file *m) |
---|
| 433 | +{ |
---|
| 434 | + struct blk_mq_hw_ctx *hctx = data; |
---|
| 435 | + |
---|
| 436 | + BUILD_BUG_ON(ARRAY_SIZE(hctx_types) != HCTX_MAX_TYPES); |
---|
| 437 | + seq_printf(m, "%s\n", hctx_types[hctx->type]); |
---|
448 | 438 | return 0; |
---|
449 | 439 | } |
---|
450 | 440 | |
---|
.. | .. |
---|
465 | 455 | atomic_read(&tags->active_queues)); |
---|
466 | 456 | |
---|
467 | 457 | seq_puts(m, "\nbitmap_tags:\n"); |
---|
468 | | - sbitmap_queue_show(&tags->bitmap_tags, m); |
---|
| 458 | + sbitmap_queue_show(tags->bitmap_tags, m); |
---|
469 | 459 | |
---|
470 | 460 | if (tags->nr_reserved_tags) { |
---|
471 | 461 | seq_puts(m, "\nbreserved_tags:\n"); |
---|
472 | | - sbitmap_queue_show(&tags->breserved_tags, m); |
---|
| 462 | + sbitmap_queue_show(tags->breserved_tags, m); |
---|
473 | 463 | } |
---|
474 | 464 | } |
---|
475 | 465 | |
---|
.. | .. |
---|
500 | 490 | if (res) |
---|
501 | 491 | goto out; |
---|
502 | 492 | if (hctx->tags) |
---|
503 | | - sbitmap_bitmap_show(&hctx->tags->bitmap_tags.sb, m); |
---|
| 493 | + sbitmap_bitmap_show(&hctx->tags->bitmap_tags->sb, m); |
---|
504 | 494 | mutex_unlock(&q->sysfs_lock); |
---|
505 | 495 | |
---|
506 | 496 | out: |
---|
.. | .. |
---|
534 | 524 | if (res) |
---|
535 | 525 | goto out; |
---|
536 | 526 | if (hctx->sched_tags) |
---|
537 | | - sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags.sb, m); |
---|
| 527 | + sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags->sb, m); |
---|
538 | 528 | mutex_unlock(&q->sysfs_lock); |
---|
539 | 529 | |
---|
540 | 530 | out: |
---|
.. | .. |
---|
638 | 628 | return 0; |
---|
639 | 629 | } |
---|
640 | 630 | |
---|
641 | | -static void *ctx_rq_list_start(struct seq_file *m, loff_t *pos) |
---|
642 | | - __acquires(&ctx->lock) |
---|
643 | | -{ |
---|
644 | | - struct blk_mq_ctx *ctx = m->private; |
---|
645 | | - |
---|
646 | | - spin_lock(&ctx->lock); |
---|
647 | | - return seq_list_start(&ctx->rq_list, *pos); |
---|
| 631 | +#define CTX_RQ_SEQ_OPS(name, type) \ |
---|
| 632 | +static void *ctx_##name##_rq_list_start(struct seq_file *m, loff_t *pos) \ |
---|
| 633 | + __acquires(&ctx->lock) \ |
---|
| 634 | +{ \ |
---|
| 635 | + struct blk_mq_ctx *ctx = m->private; \ |
---|
| 636 | + \ |
---|
| 637 | + spin_lock(&ctx->lock); \ |
---|
| 638 | + return seq_list_start(&ctx->rq_lists[type], *pos); \ |
---|
| 639 | +} \ |
---|
| 640 | + \ |
---|
| 641 | +static void *ctx_##name##_rq_list_next(struct seq_file *m, void *v, \ |
---|
| 642 | + loff_t *pos) \ |
---|
| 643 | +{ \ |
---|
| 644 | + struct blk_mq_ctx *ctx = m->private; \ |
---|
| 645 | + \ |
---|
| 646 | + return seq_list_next(v, &ctx->rq_lists[type], pos); \ |
---|
| 647 | +} \ |
---|
| 648 | + \ |
---|
| 649 | +static void ctx_##name##_rq_list_stop(struct seq_file *m, void *v) \ |
---|
| 650 | + __releases(&ctx->lock) \ |
---|
| 651 | +{ \ |
---|
| 652 | + struct blk_mq_ctx *ctx = m->private; \ |
---|
| 653 | + \ |
---|
| 654 | + spin_unlock(&ctx->lock); \ |
---|
| 655 | +} \ |
---|
| 656 | + \ |
---|
| 657 | +static const struct seq_operations ctx_##name##_rq_list_seq_ops = { \ |
---|
| 658 | + .start = ctx_##name##_rq_list_start, \ |
---|
| 659 | + .next = ctx_##name##_rq_list_next, \ |
---|
| 660 | + .stop = ctx_##name##_rq_list_stop, \ |
---|
| 661 | + .show = blk_mq_debugfs_rq_show, \ |
---|
648 | 662 | } |
---|
649 | 663 | |
---|
650 | | -static void *ctx_rq_list_next(struct seq_file *m, void *v, loff_t *pos) |
---|
651 | | -{ |
---|
652 | | - struct blk_mq_ctx *ctx = m->private; |
---|
| 664 | +CTX_RQ_SEQ_OPS(default, HCTX_TYPE_DEFAULT); |
---|
| 665 | +CTX_RQ_SEQ_OPS(read, HCTX_TYPE_READ); |
---|
| 666 | +CTX_RQ_SEQ_OPS(poll, HCTX_TYPE_POLL); |
---|
653 | 667 | |
---|
654 | | - return seq_list_next(v, &ctx->rq_list, pos); |
---|
655 | | -} |
---|
656 | | - |
---|
657 | | -static void ctx_rq_list_stop(struct seq_file *m, void *v) |
---|
658 | | - __releases(&ctx->lock) |
---|
659 | | -{ |
---|
660 | | - struct blk_mq_ctx *ctx = m->private; |
---|
661 | | - |
---|
662 | | - spin_unlock(&ctx->lock); |
---|
663 | | -} |
---|
664 | | - |
---|
665 | | -static const struct seq_operations ctx_rq_list_seq_ops = { |
---|
666 | | - .start = ctx_rq_list_start, |
---|
667 | | - .next = ctx_rq_list_next, |
---|
668 | | - .stop = ctx_rq_list_stop, |
---|
669 | | - .show = blk_mq_debugfs_rq_show, |
---|
670 | | -}; |
---|
671 | 668 | static int ctx_dispatched_show(void *data, struct seq_file *m) |
---|
672 | 669 | { |
---|
673 | 670 | struct blk_mq_ctx *ctx = data; |
---|
.. | .. |
---|
772 | 769 | |
---|
773 | 770 | if (attr->show) |
---|
774 | 771 | return single_release(inode, file); |
---|
775 | | - else |
---|
776 | | - return seq_release(inode, file); |
---|
| 772 | + |
---|
| 773 | + return seq_release(inode, file); |
---|
777 | 774 | } |
---|
778 | 775 | |
---|
779 | 776 | static const struct file_operations blk_mq_debugfs_fops = { |
---|
.. | .. |
---|
800 | 797 | {"run", 0600, hctx_run_show, hctx_run_write}, |
---|
801 | 798 | {"active", 0400, hctx_active_show}, |
---|
802 | 799 | {"dispatch_busy", 0400, hctx_dispatch_busy_show}, |
---|
| 800 | + {"type", 0400, hctx_type_show}, |
---|
803 | 801 | {}, |
---|
804 | 802 | }; |
---|
805 | 803 | |
---|
806 | 804 | static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = { |
---|
807 | | - {"rq_list", 0400, .seq_ops = &ctx_rq_list_seq_ops}, |
---|
| 805 | + {"default_rq_list", 0400, .seq_ops = &ctx_default_rq_list_seq_ops}, |
---|
| 806 | + {"read_rq_list", 0400, .seq_ops = &ctx_read_rq_list_seq_ops}, |
---|
| 807 | + {"poll_rq_list", 0400, .seq_ops = &ctx_poll_rq_list_seq_ops}, |
---|
808 | 808 | {"dispatched", 0600, ctx_dispatched_show, ctx_dispatched_write}, |
---|
809 | 809 | {"merged", 0600, ctx_merged_show, ctx_merged_write}, |
---|
810 | 810 | {"completed", 0600, ctx_completed_show, ctx_completed_write}, |
---|
811 | 811 | {}, |
---|
812 | 812 | }; |
---|
813 | 813 | |
---|
814 | | -static bool debugfs_create_files(struct dentry *parent, void *data, |
---|
| 814 | +static void debugfs_create_files(struct dentry *parent, void *data, |
---|
815 | 815 | const struct blk_mq_debugfs_attr *attr) |
---|
816 | 816 | { |
---|
| 817 | + if (IS_ERR_OR_NULL(parent)) |
---|
| 818 | + return; |
---|
| 819 | + |
---|
817 | 820 | d_inode(parent)->i_private = data; |
---|
818 | 821 | |
---|
819 | | - for (; attr->name; attr++) { |
---|
820 | | - if (!debugfs_create_file(attr->name, attr->mode, parent, |
---|
821 | | - (void *)attr, &blk_mq_debugfs_fops)) |
---|
822 | | - return false; |
---|
823 | | - } |
---|
824 | | - return true; |
---|
| 822 | + for (; attr->name; attr++) |
---|
| 823 | + debugfs_create_file(attr->name, attr->mode, parent, |
---|
| 824 | + (void *)attr, &blk_mq_debugfs_fops); |
---|
825 | 825 | } |
---|
826 | 826 | |
---|
827 | | -int blk_mq_debugfs_register(struct request_queue *q) |
---|
| 827 | +void blk_mq_debugfs_register(struct request_queue *q) |
---|
828 | 828 | { |
---|
829 | 829 | struct blk_mq_hw_ctx *hctx; |
---|
830 | 830 | int i; |
---|
831 | 831 | |
---|
832 | | - if (!blk_debugfs_root) |
---|
833 | | - return -ENOENT; |
---|
834 | | - |
---|
835 | | - q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent), |
---|
836 | | - blk_debugfs_root); |
---|
837 | | - if (!q->debugfs_dir) |
---|
838 | | - return -ENOMEM; |
---|
839 | | - |
---|
840 | | - if (!debugfs_create_files(q->debugfs_dir, q, |
---|
841 | | - blk_mq_debugfs_queue_attrs)) |
---|
842 | | - goto err; |
---|
| 832 | + debugfs_create_files(q->debugfs_dir, q, blk_mq_debugfs_queue_attrs); |
---|
843 | 833 | |
---|
844 | 834 | /* |
---|
845 | 835 | * blk_mq_init_sched() attempted to do this already, but q->debugfs_dir |
---|
.. | .. |
---|
851 | 841 | |
---|
852 | 842 | /* Similarly, blk_mq_init_hctx() couldn't do this previously. */ |
---|
853 | 843 | queue_for_each_hw_ctx(q, hctx, i) { |
---|
854 | | - if (!hctx->debugfs_dir && blk_mq_debugfs_register_hctx(q, hctx)) |
---|
855 | | - goto err; |
---|
856 | | - if (q->elevator && !hctx->sched_debugfs_dir && |
---|
857 | | - blk_mq_debugfs_register_sched_hctx(q, hctx)) |
---|
858 | | - goto err; |
---|
| 844 | + if (!hctx->debugfs_dir) |
---|
| 845 | + blk_mq_debugfs_register_hctx(q, hctx); |
---|
| 846 | + if (q->elevator && !hctx->sched_debugfs_dir) |
---|
| 847 | + blk_mq_debugfs_register_sched_hctx(q, hctx); |
---|
859 | 848 | } |
---|
860 | 849 | |
---|
861 | | - return 0; |
---|
| 850 | + if (q->rq_qos) { |
---|
| 851 | + struct rq_qos *rqos = q->rq_qos; |
---|
862 | 852 | |
---|
863 | | -err: |
---|
864 | | - blk_mq_debugfs_unregister(q); |
---|
865 | | - return -ENOMEM; |
---|
| 853 | + while (rqos) { |
---|
| 854 | + blk_mq_debugfs_register_rqos(rqos); |
---|
| 855 | + rqos = rqos->next; |
---|
| 856 | + } |
---|
| 857 | + } |
---|
866 | 858 | } |
---|
867 | 859 | |
---|
868 | 860 | void blk_mq_debugfs_unregister(struct request_queue *q) |
---|
869 | 861 | { |
---|
870 | | - debugfs_remove_recursive(q->debugfs_dir); |
---|
871 | 862 | q->sched_debugfs_dir = NULL; |
---|
872 | | - q->debugfs_dir = NULL; |
---|
873 | 863 | } |
---|
874 | 864 | |
---|
875 | | -static int blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx, |
---|
876 | | - struct blk_mq_ctx *ctx) |
---|
| 865 | +static void blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx, |
---|
| 866 | + struct blk_mq_ctx *ctx) |
---|
877 | 867 | { |
---|
878 | 868 | struct dentry *ctx_dir; |
---|
879 | 869 | char name[20]; |
---|
880 | 870 | |
---|
881 | 871 | snprintf(name, sizeof(name), "cpu%u", ctx->cpu); |
---|
882 | 872 | ctx_dir = debugfs_create_dir(name, hctx->debugfs_dir); |
---|
883 | | - if (!ctx_dir) |
---|
884 | | - return -ENOMEM; |
---|
885 | 873 | |
---|
886 | | - if (!debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs)) |
---|
887 | | - return -ENOMEM; |
---|
888 | | - |
---|
889 | | - return 0; |
---|
| 874 | + debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs); |
---|
890 | 875 | } |
---|
891 | 876 | |
---|
892 | | -int blk_mq_debugfs_register_hctx(struct request_queue *q, |
---|
893 | | - struct blk_mq_hw_ctx *hctx) |
---|
| 877 | +void blk_mq_debugfs_register_hctx(struct request_queue *q, |
---|
| 878 | + struct blk_mq_hw_ctx *hctx) |
---|
894 | 879 | { |
---|
895 | 880 | struct blk_mq_ctx *ctx; |
---|
896 | 881 | char name[20]; |
---|
897 | 882 | int i; |
---|
898 | 883 | |
---|
899 | 884 | if (!q->debugfs_dir) |
---|
900 | | - return -ENOENT; |
---|
| 885 | + return; |
---|
901 | 886 | |
---|
902 | 887 | snprintf(name, sizeof(name), "hctx%u", hctx->queue_num); |
---|
903 | 888 | hctx->debugfs_dir = debugfs_create_dir(name, q->debugfs_dir); |
---|
904 | | - if (!hctx->debugfs_dir) |
---|
905 | | - return -ENOMEM; |
---|
906 | 889 | |
---|
907 | | - if (!debugfs_create_files(hctx->debugfs_dir, hctx, |
---|
908 | | - blk_mq_debugfs_hctx_attrs)) |
---|
909 | | - goto err; |
---|
| 890 | + debugfs_create_files(hctx->debugfs_dir, hctx, blk_mq_debugfs_hctx_attrs); |
---|
910 | 891 | |
---|
911 | | - hctx_for_each_ctx(hctx, ctx, i) { |
---|
912 | | - if (blk_mq_debugfs_register_ctx(hctx, ctx)) |
---|
913 | | - goto err; |
---|
914 | | - } |
---|
915 | | - |
---|
916 | | - return 0; |
---|
917 | | - |
---|
918 | | -err: |
---|
919 | | - blk_mq_debugfs_unregister_hctx(hctx); |
---|
920 | | - return -ENOMEM; |
---|
| 892 | + hctx_for_each_ctx(hctx, ctx, i) |
---|
| 893 | + blk_mq_debugfs_register_ctx(hctx, ctx); |
---|
921 | 894 | } |
---|
922 | 895 | |
---|
923 | 896 | void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx) |
---|
.. | .. |
---|
927 | 900 | hctx->debugfs_dir = NULL; |
---|
928 | 901 | } |
---|
929 | 902 | |
---|
930 | | -int blk_mq_debugfs_register_hctxs(struct request_queue *q) |
---|
| 903 | +void blk_mq_debugfs_register_hctxs(struct request_queue *q) |
---|
931 | 904 | { |
---|
932 | 905 | struct blk_mq_hw_ctx *hctx; |
---|
933 | 906 | int i; |
---|
934 | 907 | |
---|
935 | | - queue_for_each_hw_ctx(q, hctx, i) { |
---|
936 | | - if (blk_mq_debugfs_register_hctx(q, hctx)) |
---|
937 | | - return -ENOMEM; |
---|
938 | | - } |
---|
939 | | - |
---|
940 | | - return 0; |
---|
| 908 | + queue_for_each_hw_ctx(q, hctx, i) |
---|
| 909 | + blk_mq_debugfs_register_hctx(q, hctx); |
---|
941 | 910 | } |
---|
942 | 911 | |
---|
943 | 912 | void blk_mq_debugfs_unregister_hctxs(struct request_queue *q) |
---|
.. | .. |
---|
949 | 918 | blk_mq_debugfs_unregister_hctx(hctx); |
---|
950 | 919 | } |
---|
951 | 920 | |
---|
952 | | -int blk_mq_debugfs_register_sched(struct request_queue *q) |
---|
| 921 | +void blk_mq_debugfs_register_sched(struct request_queue *q) |
---|
953 | 922 | { |
---|
954 | 923 | struct elevator_type *e = q->elevator->type; |
---|
955 | 924 | |
---|
| 925 | + /* |
---|
| 926 | + * If the parent directory has not been created yet, return, we will be |
---|
| 927 | + * called again later on and the directory/files will be created then. |
---|
| 928 | + */ |
---|
956 | 929 | if (!q->debugfs_dir) |
---|
957 | | - return -ENOENT; |
---|
| 930 | + return; |
---|
958 | 931 | |
---|
959 | 932 | if (!e->queue_debugfs_attrs) |
---|
960 | | - return 0; |
---|
| 933 | + return; |
---|
961 | 934 | |
---|
962 | 935 | q->sched_debugfs_dir = debugfs_create_dir("sched", q->debugfs_dir); |
---|
963 | | - if (!q->sched_debugfs_dir) |
---|
964 | | - return -ENOMEM; |
---|
965 | 936 | |
---|
966 | | - if (!debugfs_create_files(q->sched_debugfs_dir, q, |
---|
967 | | - e->queue_debugfs_attrs)) |
---|
968 | | - goto err; |
---|
969 | | - |
---|
970 | | - return 0; |
---|
971 | | - |
---|
972 | | -err: |
---|
973 | | - blk_mq_debugfs_unregister_sched(q); |
---|
974 | | - return -ENOMEM; |
---|
| 937 | + debugfs_create_files(q->sched_debugfs_dir, q, e->queue_debugfs_attrs); |
---|
975 | 938 | } |
---|
976 | 939 | |
---|
977 | 940 | void blk_mq_debugfs_unregister_sched(struct request_queue *q) |
---|
.. | .. |
---|
980 | 943 | q->sched_debugfs_dir = NULL; |
---|
981 | 944 | } |
---|
982 | 945 | |
---|
983 | | -int blk_mq_debugfs_register_sched_hctx(struct request_queue *q, |
---|
984 | | - struct blk_mq_hw_ctx *hctx) |
---|
| 946 | +static const char *rq_qos_id_to_name(enum rq_qos_id id) |
---|
| 947 | +{ |
---|
| 948 | + switch (id) { |
---|
| 949 | + case RQ_QOS_WBT: |
---|
| 950 | + return "wbt"; |
---|
| 951 | + case RQ_QOS_LATENCY: |
---|
| 952 | + return "latency"; |
---|
| 953 | + case RQ_QOS_COST: |
---|
| 954 | + return "cost"; |
---|
| 955 | + case RQ_QOS_IOPRIO: |
---|
| 956 | + return "ioprio"; |
---|
| 957 | + } |
---|
| 958 | + return "unknown"; |
---|
| 959 | +} |
---|
| 960 | + |
---|
| 961 | +void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos) |
---|
| 962 | +{ |
---|
| 963 | + debugfs_remove_recursive(rqos->debugfs_dir); |
---|
| 964 | + rqos->debugfs_dir = NULL; |
---|
| 965 | +} |
---|
| 966 | + |
---|
| 967 | +void blk_mq_debugfs_register_rqos(struct rq_qos *rqos) |
---|
| 968 | +{ |
---|
| 969 | + struct request_queue *q = rqos->q; |
---|
| 970 | + const char *dir_name = rq_qos_id_to_name(rqos->id); |
---|
| 971 | + |
---|
| 972 | + if (rqos->debugfs_dir || !rqos->ops->debugfs_attrs) |
---|
| 973 | + return; |
---|
| 974 | + |
---|
| 975 | + if (!q->rqos_debugfs_dir) |
---|
| 976 | + q->rqos_debugfs_dir = debugfs_create_dir("rqos", |
---|
| 977 | + q->debugfs_dir); |
---|
| 978 | + |
---|
| 979 | + rqos->debugfs_dir = debugfs_create_dir(dir_name, |
---|
| 980 | + rqos->q->rqos_debugfs_dir); |
---|
| 981 | + |
---|
| 982 | + debugfs_create_files(rqos->debugfs_dir, rqos, rqos->ops->debugfs_attrs); |
---|
| 983 | +} |
---|
| 984 | + |
---|
| 985 | +void blk_mq_debugfs_unregister_queue_rqos(struct request_queue *q) |
---|
| 986 | +{ |
---|
| 987 | + debugfs_remove_recursive(q->rqos_debugfs_dir); |
---|
| 988 | + q->rqos_debugfs_dir = NULL; |
---|
| 989 | +} |
---|
| 990 | + |
---|
| 991 | +void blk_mq_debugfs_register_sched_hctx(struct request_queue *q, |
---|
| 992 | + struct blk_mq_hw_ctx *hctx) |
---|
985 | 993 | { |
---|
986 | 994 | struct elevator_type *e = q->elevator->type; |
---|
987 | 995 | |
---|
988 | | - if (!hctx->debugfs_dir) |
---|
989 | | - return -ENOENT; |
---|
990 | | - |
---|
991 | 996 | if (!e->hctx_debugfs_attrs) |
---|
992 | | - return 0; |
---|
| 997 | + return; |
---|
993 | 998 | |
---|
994 | 999 | hctx->sched_debugfs_dir = debugfs_create_dir("sched", |
---|
995 | 1000 | hctx->debugfs_dir); |
---|
996 | | - if (!hctx->sched_debugfs_dir) |
---|
997 | | - return -ENOMEM; |
---|
998 | | - |
---|
999 | | - if (!debugfs_create_files(hctx->sched_debugfs_dir, hctx, |
---|
1000 | | - e->hctx_debugfs_attrs)) |
---|
1001 | | - return -ENOMEM; |
---|
1002 | | - |
---|
1003 | | - return 0; |
---|
| 1001 | + debugfs_create_files(hctx->sched_debugfs_dir, hctx, |
---|
| 1002 | + e->hctx_debugfs_attrs); |
---|
1004 | 1003 | } |
---|
1005 | 1004 | |
---|
1006 | 1005 | void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx) |
---|