| .. | .. |
|---|
| 11 | 11 | #include <linux/blktrace_api.h> |
|---|
| 12 | 12 | #include <linux/blk-mq.h> |
|---|
| 13 | 13 | #include <linux/blk-cgroup.h> |
|---|
| 14 | +#include <linux/debugfs.h> |
|---|
| 14 | 15 | |
|---|
| 15 | 16 | #include "blk.h" |
|---|
| 16 | 17 | #include "blk-mq.h" |
|---|
| .. | .. |
|---|
| 68 | 69 | unsigned long nr; |
|---|
| 69 | 70 | int ret, err; |
|---|
| 70 | 71 | |
|---|
| 71 | | - if (!q->request_fn && !q->mq_ops) |
|---|
| 72 | + if (!queue_is_mq(q)) |
|---|
| 72 | 73 | return -EINVAL; |
|---|
| 73 | 74 | |
|---|
| 74 | 75 | ret = queue_var_store(&nr, page, count); |
|---|
| .. | .. |
|---|
| 78 | 79 | if (nr < BLKDEV_MIN_RQ) |
|---|
| 79 | 80 | nr = BLKDEV_MIN_RQ; |
|---|
| 80 | 81 | |
|---|
| 81 | | - if (q->request_fn) |
|---|
| 82 | | - err = blk_update_nr_requests(q, nr); |
|---|
| 83 | | - else |
|---|
| 84 | | - err = blk_mq_update_nr_requests(q, nr); |
|---|
| 85 | | - |
|---|
| 82 | + err = blk_mq_update_nr_requests(q, nr); |
|---|
| 86 | 83 | if (err) |
|---|
| 87 | 84 | return err; |
|---|
| 88 | 85 | |
|---|
| .. | .. |
|---|
| 136 | 133 | |
|---|
| 137 | 134 | static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) |
|---|
| 138 | 135 | { |
|---|
| 139 | | - if (blk_queue_cluster(q)) |
|---|
| 140 | | - return queue_var_show(queue_max_segment_size(q), (page)); |
|---|
| 141 | | - |
|---|
| 142 | | - return queue_var_show(PAGE_SIZE, (page)); |
|---|
| 136 | + return queue_var_show(queue_max_segment_size(q), (page)); |
|---|
| 143 | 137 | } |
|---|
| 144 | 138 | |
|---|
| 145 | 139 | static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) |
|---|
| .. | .. |
|---|
| 225 | 219 | (unsigned long long)q->limits.max_write_zeroes_sectors << 9); |
|---|
| 226 | 220 | } |
|---|
| 227 | 221 | |
|---|
| 222 | +static ssize_t queue_zone_append_max_show(struct request_queue *q, char *page) |
|---|
| 223 | +{ |
|---|
| 224 | + unsigned long long max_sectors = q->limits.max_zone_append_sectors; |
|---|
| 225 | + |
|---|
| 226 | + return sprintf(page, "%llu\n", max_sectors << SECTOR_SHIFT); |
|---|
| 227 | +} |
|---|
| 228 | + |
|---|
| 228 | 229 | static ssize_t |
|---|
| 229 | 230 | queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) |
|---|
| 230 | 231 | { |
|---|
| .. | .. |
|---|
| 242 | 243 | if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb) |
|---|
| 243 | 244 | return -EINVAL; |
|---|
| 244 | 245 | |
|---|
| 245 | | - spin_lock_irq(q->queue_lock); |
|---|
| 246 | + spin_lock_irq(&q->queue_lock); |
|---|
| 246 | 247 | q->limits.max_sectors = max_sectors_kb << 1; |
|---|
| 247 | 248 | q->backing_dev_info->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10); |
|---|
| 248 | | - spin_unlock_irq(q->queue_lock); |
|---|
| 249 | + spin_unlock_irq(&q->queue_lock); |
|---|
| 249 | 250 | |
|---|
| 250 | 251 | return ret; |
|---|
| 251 | 252 | } |
|---|
| .. | .. |
|---|
| 259 | 260 | |
|---|
| 260 | 261 | #define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \ |
|---|
| 261 | 262 | static ssize_t \ |
|---|
| 262 | | -queue_show_##name(struct request_queue *q, char *page) \ |
|---|
| 263 | +queue_##name##_show(struct request_queue *q, char *page) \ |
|---|
| 263 | 264 | { \ |
|---|
| 264 | 265 | int bit; \ |
|---|
| 265 | 266 | bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \ |
|---|
| 266 | 267 | return queue_var_show(neg ? !bit : bit, page); \ |
|---|
| 267 | 268 | } \ |
|---|
| 268 | 269 | static ssize_t \ |
|---|
| 269 | | -queue_store_##name(struct request_queue *q, const char *page, size_t count) \ |
|---|
| 270 | +queue_##name##_store(struct request_queue *q, const char *page, size_t count) \ |
|---|
| 270 | 271 | { \ |
|---|
| 271 | 272 | unsigned long val; \ |
|---|
| 272 | 273 | ssize_t ret; \ |
|---|
| .. | .. |
|---|
| 286 | 287 | QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1); |
|---|
| 287 | 288 | QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0); |
|---|
| 288 | 289 | QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0); |
|---|
| 290 | +QUEUE_SYSFS_BIT_FNS(stable_writes, STABLE_WRITES, 0); |
|---|
| 289 | 291 | #undef QUEUE_SYSFS_BIT_FNS |
|---|
| 290 | 292 | |
|---|
| 291 | 293 | static ssize_t queue_zoned_show(struct request_queue *q, char *page) |
|---|
| .. | .. |
|---|
| 298 | 300 | default: |
|---|
| 299 | 301 | return sprintf(page, "none\n"); |
|---|
| 300 | 302 | } |
|---|
| 303 | +} |
|---|
| 304 | + |
|---|
| 305 | +static ssize_t queue_nr_zones_show(struct request_queue *q, char *page) |
|---|
| 306 | +{ |
|---|
| 307 | + return queue_var_show(blk_queue_nr_zones(q), page); |
|---|
| 308 | +} |
|---|
| 309 | + |
|---|
| 310 | +static ssize_t queue_max_open_zones_show(struct request_queue *q, char *page) |
|---|
| 311 | +{ |
|---|
| 312 | + return queue_var_show(queue_max_open_zones(q), page); |
|---|
| 313 | +} |
|---|
| 314 | + |
|---|
| 315 | +static ssize_t queue_max_active_zones_show(struct request_queue *q, char *page) |
|---|
| 316 | +{ |
|---|
| 317 | + return queue_var_show(queue_max_active_zones(q), page); |
|---|
| 301 | 318 | } |
|---|
| 302 | 319 | |
|---|
| 303 | 320 | static ssize_t queue_nomerges_show(struct request_queue *q, char *page) |
|---|
| .. | .. |
|---|
| 315 | 332 | if (ret < 0) |
|---|
| 316 | 333 | return ret; |
|---|
| 317 | 334 | |
|---|
| 318 | | - spin_lock_irq(q->queue_lock); |
|---|
| 319 | | - queue_flag_clear(QUEUE_FLAG_NOMERGES, q); |
|---|
| 320 | | - queue_flag_clear(QUEUE_FLAG_NOXMERGES, q); |
|---|
| 335 | + blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q); |
|---|
| 336 | + blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, q); |
|---|
| 321 | 337 | if (nm == 2) |
|---|
| 322 | | - queue_flag_set(QUEUE_FLAG_NOMERGES, q); |
|---|
| 338 | + blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q); |
|---|
| 323 | 339 | else if (nm) |
|---|
| 324 | | - queue_flag_set(QUEUE_FLAG_NOXMERGES, q); |
|---|
| 325 | | - spin_unlock_irq(q->queue_lock); |
|---|
| 340 | + blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q); |
|---|
| 326 | 341 | |
|---|
| 327 | 342 | return ret; |
|---|
| 328 | 343 | } |
|---|
| .. | .. |
|---|
| 346 | 361 | if (ret < 0) |
|---|
| 347 | 362 | return ret; |
|---|
| 348 | 363 | |
|---|
| 349 | | - spin_lock_irq(q->queue_lock); |
|---|
| 350 | 364 | if (val == 2) { |
|---|
| 351 | | - queue_flag_set(QUEUE_FLAG_SAME_COMP, q); |
|---|
| 352 | | - queue_flag_set(QUEUE_FLAG_SAME_FORCE, q); |
|---|
| 365 | + blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q); |
|---|
| 366 | + blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q); |
|---|
| 353 | 367 | } else if (val == 1) { |
|---|
| 354 | | - queue_flag_set(QUEUE_FLAG_SAME_COMP, q); |
|---|
| 355 | | - queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); |
|---|
| 368 | + blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q); |
|---|
| 369 | + blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); |
|---|
| 356 | 370 | } else if (val == 0) { |
|---|
| 357 | | - queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); |
|---|
| 358 | | - queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); |
|---|
| 371 | + blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); |
|---|
| 372 | + blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); |
|---|
| 359 | 373 | } |
|---|
| 360 | | - spin_unlock_irq(q->queue_lock); |
|---|
| 361 | 374 | #endif |
|---|
| 362 | 375 | return ret; |
|---|
| 363 | 376 | } |
|---|
| .. | .. |
|---|
| 366 | 379 | { |
|---|
| 367 | 380 | int val; |
|---|
| 368 | 381 | |
|---|
| 369 | | - if (q->poll_nsec == -1) |
|---|
| 370 | | - val = -1; |
|---|
| 382 | + if (q->poll_nsec == BLK_MQ_POLL_CLASSIC) |
|---|
| 383 | + val = BLK_MQ_POLL_CLASSIC; |
|---|
| 371 | 384 | else |
|---|
| 372 | 385 | val = q->poll_nsec / 1000; |
|---|
| 373 | 386 | |
|---|
| .. | .. |
|---|
| 386 | 399 | if (err < 0) |
|---|
| 387 | 400 | return err; |
|---|
| 388 | 401 | |
|---|
| 389 | | - if (val == -1) |
|---|
| 390 | | - q->poll_nsec = -1; |
|---|
| 391 | | - else |
|---|
| 402 | + if (val == BLK_MQ_POLL_CLASSIC) |
|---|
| 403 | + q->poll_nsec = BLK_MQ_POLL_CLASSIC; |
|---|
| 404 | + else if (val >= 0) |
|---|
| 392 | 405 | q->poll_nsec = val * 1000; |
|---|
| 406 | + else |
|---|
| 407 | + return -EINVAL; |
|---|
| 393 | 408 | |
|---|
| 394 | 409 | return count; |
|---|
| 395 | 410 | } |
|---|
| .. | .. |
|---|
| 405 | 420 | unsigned long poll_on; |
|---|
| 406 | 421 | ssize_t ret; |
|---|
| 407 | 422 | |
|---|
| 408 | | - if (!q->mq_ops || !q->mq_ops->poll) |
|---|
| 423 | + if (!q->tag_set || q->tag_set->nr_maps <= HCTX_TYPE_POLL || |
|---|
| 424 | + !q->tag_set->map[HCTX_TYPE_POLL].nr_queues) |
|---|
| 409 | 425 | return -EINVAL; |
|---|
| 410 | 426 | |
|---|
| 411 | 427 | ret = queue_var_store(&poll_on, page, count); |
|---|
| .. | .. |
|---|
| 418 | 434 | blk_queue_flag_clear(QUEUE_FLAG_POLL, q); |
|---|
| 419 | 435 | |
|---|
| 420 | 436 | return ret; |
|---|
| 437 | +} |
|---|
| 438 | + |
|---|
| 439 | +static ssize_t queue_io_timeout_show(struct request_queue *q, char *page) |
|---|
| 440 | +{ |
|---|
| 441 | + return sprintf(page, "%u\n", jiffies_to_msecs(q->rq_timeout)); |
|---|
| 442 | +} |
|---|
| 443 | + |
|---|
| 444 | +static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page, |
|---|
| 445 | + size_t count) |
|---|
| 446 | +{ |
|---|
| 447 | + unsigned int val; |
|---|
| 448 | + int err; |
|---|
| 449 | + |
|---|
| 450 | + err = kstrtou32(page, 10, &val); |
|---|
| 451 | + if (err || val == 0) |
|---|
| 452 | + return -EINVAL; |
|---|
| 453 | + |
|---|
| 454 | + blk_queue_rq_timeout(q, msecs_to_jiffies(val)); |
|---|
| 455 | + |
|---|
| 456 | + return count; |
|---|
| 421 | 457 | } |
|---|
| 422 | 458 | |
|---|
| 423 | 459 | static ssize_t queue_wb_lat_show(struct request_queue *q, char *page) |
|---|
| .. | .. |
|---|
| 453 | 489 | else if (val >= 0) |
|---|
| 454 | 490 | val *= 1000ULL; |
|---|
| 455 | 491 | |
|---|
| 492 | + if (wbt_get_min_lat(q) == val) |
|---|
| 493 | + return count; |
|---|
| 494 | + |
|---|
| 456 | 495 | /* |
|---|
| 457 | 496 | * Ensure that the queue is idled, in case the latency update |
|---|
| 458 | 497 | * ends up either enabling or disabling wbt completely. We can't |
|---|
| 459 | 498 | * have IO inflight if that happens. |
|---|
| 460 | 499 | */ |
|---|
| 461 | | - if (q->mq_ops) { |
|---|
| 462 | | - blk_mq_freeze_queue(q); |
|---|
| 463 | | - blk_mq_quiesce_queue(q); |
|---|
| 464 | | - } else |
|---|
| 465 | | - blk_queue_bypass_start(q); |
|---|
| 500 | + blk_mq_freeze_queue(q); |
|---|
| 501 | + blk_mq_quiesce_queue(q); |
|---|
| 466 | 502 | |
|---|
| 467 | 503 | wbt_set_min_lat(q, val); |
|---|
| 468 | | - wbt_update_limits(q); |
|---|
| 469 | 504 | |
|---|
| 470 | | - if (q->mq_ops) { |
|---|
| 471 | | - blk_mq_unquiesce_queue(q); |
|---|
| 472 | | - blk_mq_unfreeze_queue(q); |
|---|
| 473 | | - } else |
|---|
| 474 | | - blk_queue_bypass_end(q); |
|---|
| 505 | + blk_mq_unquiesce_queue(q); |
|---|
| 506 | + blk_mq_unfreeze_queue(q); |
|---|
| 475 | 507 | |
|---|
| 476 | 508 | return count; |
|---|
| 477 | 509 | } |
|---|
| .. | .. |
|---|
| 516 | 548 | return queue_var_show(blk_queue_dax(q), page); |
|---|
| 517 | 549 | } |
|---|
| 518 | 550 | |
|---|
| 519 | | -static struct queue_sysfs_entry queue_requests_entry = { |
|---|
| 520 | | - .attr = {.name = "nr_requests", .mode = 0644 }, |
|---|
| 521 | | - .show = queue_requests_show, |
|---|
| 522 | | - .store = queue_requests_store, |
|---|
| 551 | +#define QUEUE_RO_ENTRY(_prefix, _name) \ |
|---|
| 552 | +static struct queue_sysfs_entry _prefix##_entry = { \ |
|---|
| 553 | + .attr = { .name = _name, .mode = 0444 }, \ |
|---|
| 554 | + .show = _prefix##_show, \ |
|---|
| 523 | 555 | }; |
|---|
| 524 | 556 | |
|---|
| 525 | | -static struct queue_sysfs_entry queue_ra_entry = { |
|---|
| 526 | | - .attr = {.name = "read_ahead_kb", .mode = 0644 }, |
|---|
| 527 | | - .show = queue_ra_show, |
|---|
| 528 | | - .store = queue_ra_store, |
|---|
| 557 | +#define QUEUE_RW_ENTRY(_prefix, _name) \ |
|---|
| 558 | +static struct queue_sysfs_entry _prefix##_entry = { \ |
|---|
| 559 | + .attr = { .name = _name, .mode = 0644 }, \ |
|---|
| 560 | + .show = _prefix##_show, \ |
|---|
| 561 | + .store = _prefix##_store, \ |
|---|
| 529 | 562 | }; |
|---|
| 530 | 563 | |
|---|
| 531 | | -static struct queue_sysfs_entry queue_max_sectors_entry = { |
|---|
| 532 | | - .attr = {.name = "max_sectors_kb", .mode = 0644 }, |
|---|
| 533 | | - .show = queue_max_sectors_show, |
|---|
| 534 | | - .store = queue_max_sectors_store, |
|---|
| 535 | | -}; |
|---|
| 564 | +QUEUE_RW_ENTRY(queue_requests, "nr_requests"); |
|---|
| 565 | +QUEUE_RW_ENTRY(queue_ra, "read_ahead_kb"); |
|---|
| 566 | +QUEUE_RW_ENTRY(queue_max_sectors, "max_sectors_kb"); |
|---|
| 567 | +QUEUE_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb"); |
|---|
| 568 | +QUEUE_RO_ENTRY(queue_max_segments, "max_segments"); |
|---|
| 569 | +QUEUE_RO_ENTRY(queue_max_integrity_segments, "max_integrity_segments"); |
|---|
| 570 | +QUEUE_RO_ENTRY(queue_max_segment_size, "max_segment_size"); |
|---|
| 571 | +QUEUE_RW_ENTRY(elv_iosched, "scheduler"); |
|---|
| 536 | 572 | |
|---|
| 537 | | -static struct queue_sysfs_entry queue_max_hw_sectors_entry = { |
|---|
| 538 | | - .attr = {.name = "max_hw_sectors_kb", .mode = 0444 }, |
|---|
| 539 | | - .show = queue_max_hw_sectors_show, |
|---|
| 540 | | -}; |
|---|
| 573 | +QUEUE_RO_ENTRY(queue_logical_block_size, "logical_block_size"); |
|---|
| 574 | +QUEUE_RO_ENTRY(queue_physical_block_size, "physical_block_size"); |
|---|
| 575 | +QUEUE_RO_ENTRY(queue_chunk_sectors, "chunk_sectors"); |
|---|
| 576 | +QUEUE_RO_ENTRY(queue_io_min, "minimum_io_size"); |
|---|
| 577 | +QUEUE_RO_ENTRY(queue_io_opt, "optimal_io_size"); |
|---|
| 541 | 578 | |
|---|
| 542 | | -static struct queue_sysfs_entry queue_max_segments_entry = { |
|---|
| 543 | | - .attr = {.name = "max_segments", .mode = 0444 }, |
|---|
| 544 | | - .show = queue_max_segments_show, |
|---|
| 545 | | -}; |
|---|
| 579 | +QUEUE_RO_ENTRY(queue_max_discard_segments, "max_discard_segments"); |
|---|
| 580 | +QUEUE_RO_ENTRY(queue_discard_granularity, "discard_granularity"); |
|---|
| 581 | +QUEUE_RO_ENTRY(queue_discard_max_hw, "discard_max_hw_bytes"); |
|---|
| 582 | +QUEUE_RW_ENTRY(queue_discard_max, "discard_max_bytes"); |
|---|
| 583 | +QUEUE_RO_ENTRY(queue_discard_zeroes_data, "discard_zeroes_data"); |
|---|
| 546 | 584 | |
|---|
| 547 | | -static struct queue_sysfs_entry queue_max_discard_segments_entry = { |
|---|
| 548 | | - .attr = {.name = "max_discard_segments", .mode = 0444 }, |
|---|
| 549 | | - .show = queue_max_discard_segments_show, |
|---|
| 550 | | -}; |
|---|
| 585 | +QUEUE_RO_ENTRY(queue_write_same_max, "write_same_max_bytes"); |
|---|
| 586 | +QUEUE_RO_ENTRY(queue_write_zeroes_max, "write_zeroes_max_bytes"); |
|---|
| 587 | +QUEUE_RO_ENTRY(queue_zone_append_max, "zone_append_max_bytes"); |
|---|
| 551 | 588 | |
|---|
| 552 | | -static struct queue_sysfs_entry queue_max_integrity_segments_entry = { |
|---|
| 553 | | - .attr = {.name = "max_integrity_segments", .mode = 0444 }, |
|---|
| 554 | | - .show = queue_max_integrity_segments_show, |
|---|
| 555 | | -}; |
|---|
| 589 | +QUEUE_RO_ENTRY(queue_zoned, "zoned"); |
|---|
| 590 | +QUEUE_RO_ENTRY(queue_nr_zones, "nr_zones"); |
|---|
| 591 | +QUEUE_RO_ENTRY(queue_max_open_zones, "max_open_zones"); |
|---|
| 592 | +QUEUE_RO_ENTRY(queue_max_active_zones, "max_active_zones"); |
|---|
| 556 | 593 | |
|---|
| 557 | | -static struct queue_sysfs_entry queue_max_segment_size_entry = { |
|---|
| 558 | | - .attr = {.name = "max_segment_size", .mode = 0444 }, |
|---|
| 559 | | - .show = queue_max_segment_size_show, |
|---|
| 560 | | -}; |
|---|
| 594 | +QUEUE_RW_ENTRY(queue_nomerges, "nomerges"); |
|---|
| 595 | +QUEUE_RW_ENTRY(queue_rq_affinity, "rq_affinity"); |
|---|
| 596 | +QUEUE_RW_ENTRY(queue_poll, "io_poll"); |
|---|
| 597 | +QUEUE_RW_ENTRY(queue_poll_delay, "io_poll_delay"); |
|---|
| 598 | +QUEUE_RW_ENTRY(queue_wc, "write_cache"); |
|---|
| 599 | +QUEUE_RO_ENTRY(queue_fua, "fua"); |
|---|
| 600 | +QUEUE_RO_ENTRY(queue_dax, "dax"); |
|---|
| 601 | +QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout"); |
|---|
| 602 | +QUEUE_RW_ENTRY(queue_wb_lat, "wbt_lat_usec"); |
|---|
| 561 | 603 | |
|---|
| 562 | | -static struct queue_sysfs_entry queue_iosched_entry = { |
|---|
| 563 | | - .attr = {.name = "scheduler", .mode = 0644 }, |
|---|
| 564 | | - .show = elv_iosched_show, |
|---|
| 565 | | - .store = elv_iosched_store, |
|---|
| 566 | | -}; |
|---|
| 604 | +#ifdef CONFIG_BLK_DEV_THROTTLING_LOW |
|---|
| 605 | +QUEUE_RW_ENTRY(blk_throtl_sample_time, "throttle_sample_time"); |
|---|
| 606 | +#endif |
|---|
| 567 | 607 | |
|---|
| 608 | +/* legacy alias for logical_block_size: */ |
|---|
| 568 | 609 | static struct queue_sysfs_entry queue_hw_sector_size_entry = { |
|---|
| 569 | 610 | .attr = {.name = "hw_sector_size", .mode = 0444 }, |
|---|
| 570 | 611 | .show = queue_logical_block_size_show, |
|---|
| 571 | 612 | }; |
|---|
| 572 | 613 | |
|---|
| 573 | | -static struct queue_sysfs_entry queue_logical_block_size_entry = { |
|---|
| 574 | | - .attr = {.name = "logical_block_size", .mode = 0444 }, |
|---|
| 575 | | - .show = queue_logical_block_size_show, |
|---|
| 576 | | -}; |
|---|
| 614 | +QUEUE_RW_ENTRY(queue_nonrot, "rotational"); |
|---|
| 615 | +QUEUE_RW_ENTRY(queue_iostats, "iostats"); |
|---|
| 616 | +QUEUE_RW_ENTRY(queue_random, "add_random"); |
|---|
| 617 | +QUEUE_RW_ENTRY(queue_stable_writes, "stable_writes"); |
|---|
| 577 | 618 | |
|---|
| 578 | | -static struct queue_sysfs_entry queue_physical_block_size_entry = { |
|---|
| 579 | | - .attr = {.name = "physical_block_size", .mode = 0444 }, |
|---|
| 580 | | - .show = queue_physical_block_size_show, |
|---|
| 581 | | -}; |
|---|
| 582 | | - |
|---|
| 583 | | -static struct queue_sysfs_entry queue_chunk_sectors_entry = { |
|---|
| 584 | | - .attr = {.name = "chunk_sectors", .mode = 0444 }, |
|---|
| 585 | | - .show = queue_chunk_sectors_show, |
|---|
| 586 | | -}; |
|---|
| 587 | | - |
|---|
| 588 | | -static struct queue_sysfs_entry queue_io_min_entry = { |
|---|
| 589 | | - .attr = {.name = "minimum_io_size", .mode = 0444 }, |
|---|
| 590 | | - .show = queue_io_min_show, |
|---|
| 591 | | -}; |
|---|
| 592 | | - |
|---|
| 593 | | -static struct queue_sysfs_entry queue_io_opt_entry = { |
|---|
| 594 | | - .attr = {.name = "optimal_io_size", .mode = 0444 }, |
|---|
| 595 | | - .show = queue_io_opt_show, |
|---|
| 596 | | -}; |
|---|
| 597 | | - |
|---|
| 598 | | -static struct queue_sysfs_entry queue_discard_granularity_entry = { |
|---|
| 599 | | - .attr = {.name = "discard_granularity", .mode = 0444 }, |
|---|
| 600 | | - .show = queue_discard_granularity_show, |
|---|
| 601 | | -}; |
|---|
| 602 | | - |
|---|
| 603 | | -static struct queue_sysfs_entry queue_discard_max_hw_entry = { |
|---|
| 604 | | - .attr = {.name = "discard_max_hw_bytes", .mode = 0444 }, |
|---|
| 605 | | - .show = queue_discard_max_hw_show, |
|---|
| 606 | | -}; |
|---|
| 607 | | - |
|---|
| 608 | | -static struct queue_sysfs_entry queue_discard_max_entry = { |
|---|
| 609 | | - .attr = {.name = "discard_max_bytes", .mode = 0644 }, |
|---|
| 610 | | - .show = queue_discard_max_show, |
|---|
| 611 | | - .store = queue_discard_max_store, |
|---|
| 612 | | -}; |
|---|
| 613 | | - |
|---|
| 614 | | -static struct queue_sysfs_entry queue_discard_zeroes_data_entry = { |
|---|
| 615 | | - .attr = {.name = "discard_zeroes_data", .mode = 0444 }, |
|---|
| 616 | | - .show = queue_discard_zeroes_data_show, |
|---|
| 617 | | -}; |
|---|
| 618 | | - |
|---|
| 619 | | -static struct queue_sysfs_entry queue_write_same_max_entry = { |
|---|
| 620 | | - .attr = {.name = "write_same_max_bytes", .mode = 0444 }, |
|---|
| 621 | | - .show = queue_write_same_max_show, |
|---|
| 622 | | -}; |
|---|
| 623 | | - |
|---|
| 624 | | -static struct queue_sysfs_entry queue_write_zeroes_max_entry = { |
|---|
| 625 | | - .attr = {.name = "write_zeroes_max_bytes", .mode = 0444 }, |
|---|
| 626 | | - .show = queue_write_zeroes_max_show, |
|---|
| 627 | | -}; |
|---|
| 628 | | - |
|---|
| 629 | | -static struct queue_sysfs_entry queue_nonrot_entry = { |
|---|
| 630 | | - .attr = {.name = "rotational", .mode = 0644 }, |
|---|
| 631 | | - .show = queue_show_nonrot, |
|---|
| 632 | | - .store = queue_store_nonrot, |
|---|
| 633 | | -}; |
|---|
| 634 | | - |
|---|
| 635 | | -static struct queue_sysfs_entry queue_zoned_entry = { |
|---|
| 636 | | - .attr = {.name = "zoned", .mode = 0444 }, |
|---|
| 637 | | - .show = queue_zoned_show, |
|---|
| 638 | | -}; |
|---|
| 639 | | - |
|---|
| 640 | | -static struct queue_sysfs_entry queue_nomerges_entry = { |
|---|
| 641 | | - .attr = {.name = "nomerges", .mode = 0644 }, |
|---|
| 642 | | - .show = queue_nomerges_show, |
|---|
| 643 | | - .store = queue_nomerges_store, |
|---|
| 644 | | -}; |
|---|
| 645 | | - |
|---|
| 646 | | -static struct queue_sysfs_entry queue_rq_affinity_entry = { |
|---|
| 647 | | - .attr = {.name = "rq_affinity", .mode = 0644 }, |
|---|
| 648 | | - .show = queue_rq_affinity_show, |
|---|
| 649 | | - .store = queue_rq_affinity_store, |
|---|
| 650 | | -}; |
|---|
| 651 | | - |
|---|
| 652 | | -static struct queue_sysfs_entry queue_iostats_entry = { |
|---|
| 653 | | - .attr = {.name = "iostats", .mode = 0644 }, |
|---|
| 654 | | - .show = queue_show_iostats, |
|---|
| 655 | | - .store = queue_store_iostats, |
|---|
| 656 | | -}; |
|---|
| 657 | | - |
|---|
| 658 | | -static struct queue_sysfs_entry queue_random_entry = { |
|---|
| 659 | | - .attr = {.name = "add_random", .mode = 0644 }, |
|---|
| 660 | | - .show = queue_show_random, |
|---|
| 661 | | - .store = queue_store_random, |
|---|
| 662 | | -}; |
|---|
| 663 | | - |
|---|
| 664 | | -static struct queue_sysfs_entry queue_poll_entry = { |
|---|
| 665 | | - .attr = {.name = "io_poll", .mode = 0644 }, |
|---|
| 666 | | - .show = queue_poll_show, |
|---|
| 667 | | - .store = queue_poll_store, |
|---|
| 668 | | -}; |
|---|
| 669 | | - |
|---|
| 670 | | -static struct queue_sysfs_entry queue_poll_delay_entry = { |
|---|
| 671 | | - .attr = {.name = "io_poll_delay", .mode = 0644 }, |
|---|
| 672 | | - .show = queue_poll_delay_show, |
|---|
| 673 | | - .store = queue_poll_delay_store, |
|---|
| 674 | | -}; |
|---|
| 675 | | - |
|---|
| 676 | | -static struct queue_sysfs_entry queue_wc_entry = { |
|---|
| 677 | | - .attr = {.name = "write_cache", .mode = 0644 }, |
|---|
| 678 | | - .show = queue_wc_show, |
|---|
| 679 | | - .store = queue_wc_store, |
|---|
| 680 | | -}; |
|---|
| 681 | | - |
|---|
| 682 | | -static struct queue_sysfs_entry queue_fua_entry = { |
|---|
| 683 | | - .attr = {.name = "fua", .mode = 0444 }, |
|---|
| 684 | | - .show = queue_fua_show, |
|---|
| 685 | | -}; |
|---|
| 686 | | - |
|---|
| 687 | | -static struct queue_sysfs_entry queue_dax_entry = { |
|---|
| 688 | | - .attr = {.name = "dax", .mode = 0444 }, |
|---|
| 689 | | - .show = queue_dax_show, |
|---|
| 690 | | -}; |
|---|
| 691 | | - |
|---|
| 692 | | -static struct queue_sysfs_entry queue_wb_lat_entry = { |
|---|
| 693 | | - .attr = {.name = "wbt_lat_usec", .mode = 0644 }, |
|---|
| 694 | | - .show = queue_wb_lat_show, |
|---|
| 695 | | - .store = queue_wb_lat_store, |
|---|
| 696 | | -}; |
|---|
| 697 | | - |
|---|
| 698 | | -#ifdef CONFIG_BLK_DEV_THROTTLING_LOW |
|---|
| 699 | | -static struct queue_sysfs_entry throtl_sample_time_entry = { |
|---|
| 700 | | - .attr = {.name = "throttle_sample_time", .mode = 0644 }, |
|---|
| 701 | | - .show = blk_throtl_sample_time_show, |
|---|
| 702 | | - .store = blk_throtl_sample_time_store, |
|---|
| 703 | | -}; |
|---|
| 704 | | -#endif |
|---|
| 705 | | - |
|---|
| 706 | | -static struct attribute *default_attrs[] = { |
|---|
| 619 | +static struct attribute *queue_attrs[] = { |
|---|
| 707 | 620 | &queue_requests_entry.attr, |
|---|
| 708 | 621 | &queue_ra_entry.attr, |
|---|
| 709 | 622 | &queue_max_hw_sectors_entry.attr, |
|---|
| .. | .. |
|---|
| 712 | 625 | &queue_max_discard_segments_entry.attr, |
|---|
| 713 | 626 | &queue_max_integrity_segments_entry.attr, |
|---|
| 714 | 627 | &queue_max_segment_size_entry.attr, |
|---|
| 715 | | - &queue_iosched_entry.attr, |
|---|
| 628 | + &elv_iosched_entry.attr, |
|---|
| 716 | 629 | &queue_hw_sector_size_entry.attr, |
|---|
| 717 | 630 | &queue_logical_block_size_entry.attr, |
|---|
| 718 | 631 | &queue_physical_block_size_entry.attr, |
|---|
| .. | .. |
|---|
| 725 | 638 | &queue_discard_zeroes_data_entry.attr, |
|---|
| 726 | 639 | &queue_write_same_max_entry.attr, |
|---|
| 727 | 640 | &queue_write_zeroes_max_entry.attr, |
|---|
| 641 | + &queue_zone_append_max_entry.attr, |
|---|
| 728 | 642 | &queue_nonrot_entry.attr, |
|---|
| 729 | 643 | &queue_zoned_entry.attr, |
|---|
| 644 | + &queue_nr_zones_entry.attr, |
|---|
| 645 | + &queue_max_open_zones_entry.attr, |
|---|
| 646 | + &queue_max_active_zones_entry.attr, |
|---|
| 730 | 647 | &queue_nomerges_entry.attr, |
|---|
| 731 | 648 | &queue_rq_affinity_entry.attr, |
|---|
| 732 | 649 | &queue_iostats_entry.attr, |
|---|
| 650 | + &queue_stable_writes_entry.attr, |
|---|
| 733 | 651 | &queue_random_entry.attr, |
|---|
| 734 | 652 | &queue_poll_entry.attr, |
|---|
| 735 | 653 | &queue_wc_entry.attr, |
|---|
| .. | .. |
|---|
| 737 | 655 | &queue_dax_entry.attr, |
|---|
| 738 | 656 | &queue_wb_lat_entry.attr, |
|---|
| 739 | 657 | &queue_poll_delay_entry.attr, |
|---|
| 658 | + &queue_io_timeout_entry.attr, |
|---|
| 740 | 659 | #ifdef CONFIG_BLK_DEV_THROTTLING_LOW |
|---|
| 741 | | - &throtl_sample_time_entry.attr, |
|---|
| 660 | + &blk_throtl_sample_time_entry.attr, |
|---|
| 742 | 661 | #endif |
|---|
| 743 | 662 | NULL, |
|---|
| 744 | 663 | }; |
|---|
| 664 | + |
|---|
| 665 | +static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr, |
|---|
| 666 | + int n) |
|---|
| 667 | +{ |
|---|
| 668 | + struct request_queue *q = |
|---|
| 669 | + container_of(kobj, struct request_queue, kobj); |
|---|
| 670 | + |
|---|
| 671 | + if (attr == &queue_io_timeout_entry.attr && |
|---|
| 672 | + (!q->mq_ops || !q->mq_ops->timeout)) |
|---|
| 673 | + return 0; |
|---|
| 674 | + |
|---|
| 675 | + if ((attr == &queue_max_open_zones_entry.attr || |
|---|
| 676 | + attr == &queue_max_active_zones_entry.attr) && |
|---|
| 677 | + !blk_queue_is_zoned(q)) |
|---|
| 678 | + return 0; |
|---|
| 679 | + |
|---|
| 680 | + return attr->mode; |
|---|
| 681 | +} |
|---|
| 682 | + |
|---|
| 683 | +static struct attribute_group queue_attr_group = { |
|---|
| 684 | + .attrs = queue_attrs, |
|---|
| 685 | + .is_visible = queue_attr_visible, |
|---|
| 686 | +}; |
|---|
| 687 | + |
|---|
| 745 | 688 | |
|---|
| 746 | 689 | #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr) |
|---|
| 747 | 690 | |
|---|
| .. | .. |
|---|
| 756 | 699 | if (!entry->show) |
|---|
| 757 | 700 | return -EIO; |
|---|
| 758 | 701 | mutex_lock(&q->sysfs_lock); |
|---|
| 759 | | - if (blk_queue_dying(q)) { |
|---|
| 760 | | - mutex_unlock(&q->sysfs_lock); |
|---|
| 761 | | - return -ENOENT; |
|---|
| 762 | | - } |
|---|
| 763 | 702 | res = entry->show(q, page); |
|---|
| 764 | 703 | mutex_unlock(&q->sysfs_lock); |
|---|
| 765 | 704 | return res; |
|---|
| .. | .. |
|---|
| 778 | 717 | |
|---|
| 779 | 718 | q = container_of(kobj, struct request_queue, kobj); |
|---|
| 780 | 719 | mutex_lock(&q->sysfs_lock); |
|---|
| 781 | | - if (blk_queue_dying(q)) { |
|---|
| 782 | | - mutex_unlock(&q->sysfs_lock); |
|---|
| 783 | | - return -ENOENT; |
|---|
| 784 | | - } |
|---|
| 785 | 720 | res = entry->store(q, page, length); |
|---|
| 786 | 721 | mutex_unlock(&q->sysfs_lock); |
|---|
| 787 | 722 | return res; |
|---|
| .. | .. |
|---|
| 791 | 726 | { |
|---|
| 792 | 727 | struct request_queue *q = container_of(rcu_head, struct request_queue, |
|---|
| 793 | 728 | rcu_head); |
|---|
| 729 | + |
|---|
| 730 | + percpu_ref_exit(&q->q_usage_counter); |
|---|
| 794 | 731 | kmem_cache_free(blk_requestq_cachep, q); |
|---|
| 795 | 732 | } |
|---|
| 796 | 733 | |
|---|
| 797 | | -/** |
|---|
| 798 | | - * __blk_release_queue - release a request queue when it is no longer needed |
|---|
| 799 | | - * @work: pointer to the release_work member of the request queue to be released |
|---|
| 800 | | - * |
|---|
| 801 | | - * Description: |
|---|
| 802 | | - * blk_release_queue is the counterpart of blk_init_queue(). It should be |
|---|
| 803 | | - * called when a request queue is being released; typically when a block |
|---|
| 804 | | - * device is being de-registered. Its primary task it to free the queue |
|---|
| 805 | | - * itself. |
|---|
| 806 | | - * |
|---|
| 807 | | - * Notes: |
|---|
| 808 | | - * The low level driver must have finished any outstanding requests first |
|---|
| 809 | | - * via blk_cleanup_queue(). |
|---|
| 810 | | - * |
|---|
| 811 | | - * Although blk_release_queue() may be called with preemption disabled, |
|---|
| 812 | | - * __blk_release_queue() may sleep. |
|---|
| 813 | | - */ |
|---|
| 814 | | -static void __blk_release_queue(struct work_struct *work) |
|---|
| 734 | +/* Unconfigure the I/O scheduler and dissociate from the cgroup controller. */ |
|---|
| 735 | +static void blk_exit_queue(struct request_queue *q) |
|---|
| 815 | 736 | { |
|---|
| 816 | | - struct request_queue *q = container_of(work, typeof(*q), release_work); |
|---|
| 737 | + /* |
|---|
| 738 | + * Since the I/O scheduler exit code may access cgroup information, |
|---|
| 739 | + * perform I/O scheduler exit before disassociating from the block |
|---|
| 740 | + * cgroup controller. |
|---|
| 741 | + */ |
|---|
| 742 | + if (q->elevator) { |
|---|
| 743 | + ioc_clear_queue(q); |
|---|
| 744 | + __elevator_exit(q, q->elevator); |
|---|
| 745 | + } |
|---|
| 746 | + |
|---|
| 747 | + /* |
|---|
| 748 | + * Remove all references to @q from the block cgroup controller before |
|---|
| 749 | + * restoring @q->queue_lock to avoid that restoring this pointer causes |
|---|
| 750 | + * e.g. blkcg_print_blkgs() to crash. |
|---|
| 751 | + */ |
|---|
| 752 | + blkcg_exit_queue(q); |
|---|
| 753 | + |
|---|
| 754 | + /* |
|---|
| 755 | + * Since the cgroup code may dereference the @q->backing_dev_info |
|---|
| 756 | + * pointer, only decrease its reference count after having removed the |
|---|
| 757 | + * association with the block cgroup controller. |
|---|
| 758 | + */ |
|---|
| 759 | + bdi_put(q->backing_dev_info); |
|---|
| 760 | +} |
|---|
| 761 | + |
|---|
| 762 | +/** |
|---|
| 763 | + * blk_release_queue - releases all allocated resources of the request_queue |
|---|
| 764 | + * @kobj: pointer to a kobject, whose container is a request_queue |
|---|
| 765 | + * |
|---|
| 766 | + * This function releases all allocated resources of the request queue. |
|---|
| 767 | + * |
|---|
| 768 | + * The struct request_queue refcount is incremented with blk_get_queue() and |
|---|
| 769 | + * decremented with blk_put_queue(). Once the refcount reaches 0 this function |
|---|
| 770 | + * is called. |
|---|
| 771 | + * |
|---|
| 772 | + * For drivers that have a request_queue on a gendisk and added with |
|---|
| 773 | + * __device_add_disk() the refcount to request_queue will reach 0 with |
|---|
| 774 | + * the last put_disk() called by the driver. For drivers which don't use |
|---|
| 775 | + * __device_add_disk() this happens with blk_cleanup_queue(). |
|---|
| 776 | + * |
|---|
| 777 | + * Drivers exist which depend on the release of the request_queue to be |
|---|
| 778 | + * synchronous, it should not be deferred. |
|---|
| 779 | + * |
|---|
| 780 | + * Context: can sleep |
|---|
| 781 | + */ |
|---|
| 782 | +static void blk_release_queue(struct kobject *kobj) |
|---|
| 783 | +{ |
|---|
| 784 | + struct request_queue *q = |
|---|
| 785 | + container_of(kobj, struct request_queue, kobj); |
|---|
| 786 | + |
|---|
| 787 | + might_sleep(); |
|---|
| 817 | 788 | |
|---|
| 818 | 789 | if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags)) |
|---|
| 819 | 790 | blk_stat_remove_callback(q, q->poll_cb); |
|---|
| 820 | 791 | blk_stat_free_callback(q->poll_cb); |
|---|
| 821 | 792 | |
|---|
| 822 | | - if (!blk_queue_dead(q)) { |
|---|
| 823 | | - /* |
|---|
| 824 | | - * Last reference was dropped without having called |
|---|
| 825 | | - * blk_cleanup_queue(). |
|---|
| 826 | | - */ |
|---|
| 827 | | - WARN_ONCE(blk_queue_init_done(q), |
|---|
| 828 | | - "request queue %p has been registered but blk_cleanup_queue() has not been called for that queue\n", |
|---|
| 829 | | - q); |
|---|
| 830 | | - blk_exit_queue(q); |
|---|
| 831 | | - } |
|---|
| 832 | | - |
|---|
| 833 | | - WARN(blk_queue_root_blkg(q), |
|---|
| 834 | | - "request queue %p is being released but it has not yet been removed from the blkcg controller\n", |
|---|
| 835 | | - q); |
|---|
| 836 | | - |
|---|
| 837 | 793 | blk_free_queue_stats(q->stats); |
|---|
| 838 | 794 | |
|---|
| 839 | | - if (q->mq_ops) |
|---|
| 795 | + if (queue_is_mq(q)) { |
|---|
| 796 | + struct blk_mq_hw_ctx *hctx; |
|---|
| 797 | + int i; |
|---|
| 798 | + |
|---|
| 840 | 799 | cancel_delayed_work_sync(&q->requeue_work); |
|---|
| 841 | 800 | |
|---|
| 842 | | - blk_exit_rl(q, &q->root_rl); |
|---|
| 843 | | - |
|---|
| 844 | | - if (q->queue_tags) |
|---|
| 845 | | - __blk_queue_free_tags(q); |
|---|
| 846 | | - |
|---|
| 847 | | - if (!q->mq_ops) { |
|---|
| 848 | | - if (q->exit_rq_fn) |
|---|
| 849 | | - q->exit_rq_fn(q, q->fq->flush_rq); |
|---|
| 850 | | - blk_free_flush_queue(q->fq); |
|---|
| 851 | | - } else { |
|---|
| 852 | | - blk_mq_release(q); |
|---|
| 801 | + queue_for_each_hw_ctx(q, hctx, i) |
|---|
| 802 | + cancel_delayed_work_sync(&hctx->run_work); |
|---|
| 853 | 803 | } |
|---|
| 854 | 804 | |
|---|
| 855 | | - blk_trace_shutdown(q); |
|---|
| 805 | + blk_exit_queue(q); |
|---|
| 856 | 806 | |
|---|
| 857 | | - if (q->mq_ops) |
|---|
| 807 | + blk_queue_free_zone_bitmaps(q); |
|---|
| 808 | + |
|---|
| 809 | + if (queue_is_mq(q)) |
|---|
| 810 | + blk_mq_release(q); |
|---|
| 811 | + |
|---|
| 812 | + blk_trace_shutdown(q); |
|---|
| 813 | + mutex_lock(&q->debugfs_mutex); |
|---|
| 814 | + debugfs_remove_recursive(q->debugfs_dir); |
|---|
| 815 | + mutex_unlock(&q->debugfs_mutex); |
|---|
| 816 | + |
|---|
| 817 | + if (queue_is_mq(q)) |
|---|
| 858 | 818 | blk_mq_debugfs_unregister(q); |
|---|
| 859 | 819 | |
|---|
| 860 | 820 | bioset_exit(&q->bio_split); |
|---|
| 861 | 821 | |
|---|
| 862 | 822 | ida_simple_remove(&blk_queue_ida, q->id); |
|---|
| 863 | 823 | call_rcu(&q->rcu_head, blk_free_queue_rcu); |
|---|
| 864 | | -} |
|---|
| 865 | | - |
|---|
| 866 | | -static void blk_release_queue(struct kobject *kobj) |
|---|
| 867 | | -{ |
|---|
| 868 | | - struct request_queue *q = |
|---|
| 869 | | - container_of(kobj, struct request_queue, kobj); |
|---|
| 870 | | - |
|---|
| 871 | | - INIT_WORK(&q->release_work, __blk_release_queue); |
|---|
| 872 | | - schedule_work(&q->release_work); |
|---|
| 873 | 824 | } |
|---|
| 874 | 825 | |
|---|
| 875 | 826 | static const struct sysfs_ops queue_sysfs_ops = { |
|---|
| .. | .. |
|---|
| 879 | 830 | |
|---|
| 880 | 831 | struct kobj_type blk_queue_ktype = { |
|---|
| 881 | 832 | .sysfs_ops = &queue_sysfs_ops, |
|---|
| 882 | | - .default_attrs = default_attrs, |
|---|
| 883 | 833 | .release = blk_release_queue, |
|---|
| 884 | 834 | }; |
|---|
| 885 | 835 | |
|---|
| .. | .. |
|---|
| 899 | 849 | WARN_ONCE(blk_queue_registered(q), |
|---|
| 900 | 850 | "%s is registering an already registered queue\n", |
|---|
| 901 | 851 | kobject_name(&dev->kobj)); |
|---|
| 902 | | - queue_flag_set_unlocked(QUEUE_FLAG_REGISTERED, q); |
|---|
| 903 | 852 | |
|---|
| 904 | 853 | /* |
|---|
| 905 | 854 | * SCSI probing may synchronously create and destroy a lot of |
|---|
| .. | .. |
|---|
| 911 | 860 | * request_queues for non-existent devices never get registered. |
|---|
| 912 | 861 | */ |
|---|
| 913 | 862 | if (!blk_queue_init_done(q)) { |
|---|
| 914 | | - queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q); |
|---|
| 863 | + blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q); |
|---|
| 915 | 864 | percpu_ref_switch_to_percpu(&q->q_usage_counter); |
|---|
| 916 | | - blk_queue_bypass_end(q); |
|---|
| 917 | 865 | } |
|---|
| 866 | + |
|---|
| 867 | + blk_queue_update_readahead(q); |
|---|
| 918 | 868 | |
|---|
| 919 | 869 | ret = blk_trace_init_sysfs(dev); |
|---|
| 920 | 870 | if (ret) |
|---|
| 921 | 871 | return ret; |
|---|
| 922 | 872 | |
|---|
| 923 | | - /* Prevent changes through sysfs until registration is completed. */ |
|---|
| 924 | | - mutex_lock(&q->sysfs_lock); |
|---|
| 873 | + mutex_lock(&q->sysfs_dir_lock); |
|---|
| 925 | 874 | |
|---|
| 926 | 875 | ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); |
|---|
| 927 | 876 | if (ret < 0) { |
|---|
| .. | .. |
|---|
| 929 | 878 | goto unlock; |
|---|
| 930 | 879 | } |
|---|
| 931 | 880 | |
|---|
| 932 | | - if (q->mq_ops) { |
|---|
| 881 | + ret = sysfs_create_group(&q->kobj, &queue_attr_group); |
|---|
| 882 | + if (ret) { |
|---|
| 883 | + blk_trace_remove_sysfs(dev); |
|---|
| 884 | + kobject_del(&q->kobj); |
|---|
| 885 | + kobject_put(&dev->kobj); |
|---|
| 886 | + goto unlock; |
|---|
| 887 | + } |
|---|
| 888 | + |
|---|
| 889 | + mutex_lock(&q->debugfs_mutex); |
|---|
| 890 | + q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent), |
|---|
| 891 | + blk_debugfs_root); |
|---|
| 892 | + mutex_unlock(&q->debugfs_mutex); |
|---|
| 893 | + |
|---|
| 894 | + if (queue_is_mq(q)) { |
|---|
| 933 | 895 | __blk_mq_register_dev(dev, q); |
|---|
| 934 | 896 | blk_mq_debugfs_register(q); |
|---|
| 935 | 897 | } |
|---|
| 936 | 898 | |
|---|
| 937 | | - kobject_uevent(&q->kobj, KOBJ_ADD); |
|---|
| 938 | | - |
|---|
| 939 | | - wbt_enable_default(q); |
|---|
| 940 | | - |
|---|
| 941 | | - blk_throtl_register_queue(q); |
|---|
| 942 | | - |
|---|
| 943 | | - if (q->request_fn || (q->mq_ops && q->elevator)) { |
|---|
| 944 | | - ret = elv_register_queue(q); |
|---|
| 899 | + mutex_lock(&q->sysfs_lock); |
|---|
| 900 | + if (q->elevator) { |
|---|
| 901 | + ret = elv_register_queue(q, false); |
|---|
| 945 | 902 | if (ret) { |
|---|
| 946 | 903 | mutex_unlock(&q->sysfs_lock); |
|---|
| 947 | | - kobject_uevent(&q->kobj, KOBJ_REMOVE); |
|---|
| 904 | + mutex_unlock(&q->sysfs_dir_lock); |
|---|
| 948 | 905 | kobject_del(&q->kobj); |
|---|
| 949 | 906 | blk_trace_remove_sysfs(dev); |
|---|
| 950 | 907 | kobject_put(&dev->kobj); |
|---|
| 951 | 908 | return ret; |
|---|
| 952 | 909 | } |
|---|
| 953 | 910 | } |
|---|
| 911 | + |
|---|
| 912 | + blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q); |
|---|
| 913 | + wbt_enable_default(q); |
|---|
| 914 | + blk_throtl_register_queue(q); |
|---|
| 915 | + |
|---|
| 916 | + /* Now everything is ready and send out KOBJ_ADD uevent */ |
|---|
| 917 | + kobject_uevent(&q->kobj, KOBJ_ADD); |
|---|
| 918 | + if (q->elevator) |
|---|
| 919 | + kobject_uevent(&q->elevator->kobj, KOBJ_ADD); |
|---|
| 920 | + mutex_unlock(&q->sysfs_lock); |
|---|
| 921 | + |
|---|
| 954 | 922 | ret = 0; |
|---|
| 955 | 923 | unlock: |
|---|
| 956 | | - mutex_unlock(&q->sysfs_lock); |
|---|
| 924 | + mutex_unlock(&q->sysfs_dir_lock); |
|---|
| 957 | 925 | return ret; |
|---|
| 958 | 926 | } |
|---|
| 959 | 927 | EXPORT_SYMBOL_GPL(blk_register_queue); |
|---|
| .. | .. |
|---|
| 982 | 950 | * concurrent elv_iosched_store() calls. |
|---|
| 983 | 951 | */ |
|---|
| 984 | 952 | mutex_lock(&q->sysfs_lock); |
|---|
| 985 | | - |
|---|
| 986 | 953 | blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q); |
|---|
| 954 | + mutex_unlock(&q->sysfs_lock); |
|---|
| 987 | 955 | |
|---|
| 956 | + mutex_lock(&q->sysfs_dir_lock); |
|---|
| 988 | 957 | /* |
|---|
| 989 | 958 | * Remove the sysfs attributes before unregistering the queue data |
|---|
| 990 | 959 | * structures that can be modified through sysfs. |
|---|
| 991 | 960 | */ |
|---|
| 992 | | - if (q->mq_ops) |
|---|
| 961 | + if (queue_is_mq(q)) |
|---|
| 993 | 962 | blk_mq_unregister_dev(disk_to_dev(disk), q); |
|---|
| 994 | | - mutex_unlock(&q->sysfs_lock); |
|---|
| 995 | | - |
|---|
| 996 | | - kobject_uevent(&q->kobj, KOBJ_REMOVE); |
|---|
| 997 | | - kobject_del(&q->kobj); |
|---|
| 998 | 963 | blk_trace_remove_sysfs(disk_to_dev(disk)); |
|---|
| 999 | 964 | |
|---|
| 1000 | 965 | mutex_lock(&q->sysfs_lock); |
|---|
| 1001 | | - if (q->request_fn || (q->mq_ops && q->elevator)) |
|---|
| 966 | + if (q->elevator) |
|---|
| 1002 | 967 | elv_unregister_queue(q); |
|---|
| 1003 | 968 | mutex_unlock(&q->sysfs_lock); |
|---|
| 1004 | 969 | |
|---|
| 970 | + /* Now that we've deleted all child objects, we can delete the queue. */ |
|---|
| 971 | + kobject_uevent(&q->kobj, KOBJ_REMOVE); |
|---|
| 972 | + kobject_del(&q->kobj); |
|---|
| 973 | + |
|---|
| 974 | + mutex_unlock(&q->sysfs_dir_lock); |
|---|
| 975 | + |
|---|
| 1005 | 976 | kobject_put(&disk_to_dev(disk)->kobj); |
|---|
| 1006 | 977 | } |
|---|