| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0 |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * Block device elevator/IO-scheduler. |
|---|
| 3 | 4 | * |
|---|
| .. | .. |
|---|
| 41 | 42 | |
|---|
| 42 | 43 | #include "blk.h" |
|---|
| 43 | 44 | #include "blk-mq-sched.h" |
|---|
| 45 | +#include "blk-pm.h" |
|---|
| 44 | 46 | #include "blk-wbt.h" |
|---|
| 45 | 47 | |
|---|
| 46 | 48 | static DEFINE_SPINLOCK(elv_list_lock); |
|---|
| .. | .. |
|---|
| 60 | 62 | struct request_queue *q = rq->q; |
|---|
| 61 | 63 | struct elevator_queue *e = q->elevator; |
|---|
| 62 | 64 | |
|---|
| 63 | | - if (e->uses_mq && e->type->ops.mq.allow_merge) |
|---|
| 64 | | - return e->type->ops.mq.allow_merge(q, rq, bio); |
|---|
| 65 | | - else if (!e->uses_mq && e->type->ops.sq.elevator_allow_bio_merge_fn) |
|---|
| 66 | | - return e->type->ops.sq.elevator_allow_bio_merge_fn(q, rq, bio); |
|---|
| 65 | + if (e->type->ops.allow_merge) |
|---|
| 66 | + return e->type->ops.allow_merge(q, rq, bio); |
|---|
| 67 | 67 | |
|---|
| 68 | 68 | return 1; |
|---|
| 69 | 69 | } |
|---|
| .. | .. |
|---|
| 83 | 83 | } |
|---|
| 84 | 84 | EXPORT_SYMBOL(elv_bio_merge_ok); |
|---|
| 85 | 85 | |
|---|
| 86 | | -static bool elevator_match(const struct elevator_type *e, const char *name) |
|---|
| 86 | +static inline bool elv_support_features(unsigned int elv_features, |
|---|
| 87 | + unsigned int required_features) |
|---|
| 87 | 88 | { |
|---|
| 89 | + return (required_features & elv_features) == required_features; |
|---|
| 90 | +} |
|---|
| 91 | + |
|---|
| 92 | +/** |
|---|
| 93 | + * elevator_match - Test an elevator name and features |
|---|
| 94 | + * @e: Scheduler to test |
|---|
| 95 | + * @name: Elevator name to test |
|---|
| 96 | + * @required_features: Features that the elevator must provide |
|---|
| 97 | + * |
|---|
| 98 | + * Return true if the elevator @e name matches @name and if @e provides all |
|---|
| 99 | + * the features specified by @required_features. |
|---|
| 100 | + */ |
|---|
| 101 | +static bool elevator_match(const struct elevator_type *e, const char *name, |
|---|
| 102 | + unsigned int required_features) |
|---|
| 103 | +{ |
|---|
| 104 | + if (!elv_support_features(e->elevator_features, required_features)) |
|---|
| 105 | + return false; |
|---|
| 88 | 106 | if (!strcmp(e->elevator_name, name)) |
|---|
| 89 | 107 | return true; |
|---|
| 90 | 108 | if (e->elevator_alias && !strcmp(e->elevator_alias, name)) |
|---|
| .. | .. |
|---|
| 93 | 111 | return false; |
|---|
| 94 | 112 | } |
|---|
| 95 | 113 | |
|---|
| 96 | | -/* |
|---|
| 97 | | - * Return scheduler with name 'name' and with matching 'mq capability |
|---|
| 114 | +/** |
|---|
| 115 | + * elevator_find - Find an elevator |
|---|
| 116 | + * @name: Name of the elevator to find |
|---|
| 117 | + * @required_features: Features that the elevator must provide |
|---|
| 118 | + * |
|---|
| 119 | + * Return the first registered scheduler with name @name and supporting the |
|---|
| 120 | + * features @required_features and NULL otherwise. |
|---|
| 98 | 121 | */ |
|---|
| 99 | | -static struct elevator_type *elevator_find(const char *name, bool mq) |
|---|
| 122 | +static struct elevator_type *elevator_find(const char *name, |
|---|
| 123 | + unsigned int required_features) |
|---|
| 100 | 124 | { |
|---|
| 101 | 125 | struct elevator_type *e; |
|---|
| 102 | 126 | |
|---|
| 103 | 127 | list_for_each_entry(e, &elv_list, list) { |
|---|
| 104 | | - if (elevator_match(e, name) && (mq == e->uses_mq)) |
|---|
| 128 | + if (elevator_match(e, name, required_features)) |
|---|
| 105 | 129 | return e; |
|---|
| 106 | 130 | } |
|---|
| 107 | 131 | |
|---|
| .. | .. |
|---|
| 120 | 144 | |
|---|
| 121 | 145 | spin_lock(&elv_list_lock); |
|---|
| 122 | 146 | |
|---|
| 123 | | - e = elevator_find(name, q->mq_ops != NULL); |
|---|
| 147 | + e = elevator_find(name, q->required_elevator_features); |
|---|
| 124 | 148 | if (!e && try_loading) { |
|---|
| 125 | 149 | spin_unlock(&elv_list_lock); |
|---|
| 126 | 150 | request_module("%s-iosched", name); |
|---|
| 127 | 151 | spin_lock(&elv_list_lock); |
|---|
| 128 | | - e = elevator_find(name, q->mq_ops != NULL); |
|---|
| 152 | + e = elevator_find(name, q->required_elevator_features); |
|---|
| 129 | 153 | } |
|---|
| 130 | 154 | |
|---|
| 131 | 155 | if (e && !try_module_get(e->elevator_owner)) |
|---|
| .. | .. |
|---|
| 133 | 157 | |
|---|
| 134 | 158 | spin_unlock(&elv_list_lock); |
|---|
| 135 | 159 | return e; |
|---|
| 136 | | -} |
|---|
| 137 | | - |
|---|
| 138 | | -static char chosen_elevator[ELV_NAME_MAX]; |
|---|
| 139 | | - |
|---|
| 140 | | -static int __init elevator_setup(char *str) |
|---|
| 141 | | -{ |
|---|
| 142 | | - /* |
|---|
| 143 | | - * Be backwards-compatible with previous kernels, so users |
|---|
| 144 | | - * won't get the wrong elevator. |
|---|
| 145 | | - */ |
|---|
| 146 | | - strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1); |
|---|
| 147 | | - return 1; |
|---|
| 148 | | -} |
|---|
| 149 | | - |
|---|
| 150 | | -__setup("elevator=", elevator_setup); |
|---|
| 151 | | - |
|---|
| 152 | | -/* called during boot to load the elevator chosen by the elevator param */ |
|---|
| 153 | | -void __init load_default_elevator_module(void) |
|---|
| 154 | | -{ |
|---|
| 155 | | - struct elevator_type *e; |
|---|
| 156 | | - |
|---|
| 157 | | - if (!chosen_elevator[0]) |
|---|
| 158 | | - return; |
|---|
| 159 | | - |
|---|
| 160 | | - /* |
|---|
| 161 | | - * Boot parameter is deprecated, we haven't supported that for MQ. |
|---|
| 162 | | - * Only look for non-mq schedulers from here. |
|---|
| 163 | | - */ |
|---|
| 164 | | - spin_lock(&elv_list_lock); |
|---|
| 165 | | - e = elevator_find(chosen_elevator, false); |
|---|
| 166 | | - spin_unlock(&elv_list_lock); |
|---|
| 167 | | - |
|---|
| 168 | | - if (!e) |
|---|
| 169 | | - request_module("%s-iosched", chosen_elevator); |
|---|
| 170 | 160 | } |
|---|
| 171 | 161 | |
|---|
| 172 | 162 | static struct kobj_type elv_ktype; |
|---|
| .. | .. |
|---|
| 184 | 174 | kobject_init(&eq->kobj, &elv_ktype); |
|---|
| 185 | 175 | mutex_init(&eq->sysfs_lock); |
|---|
| 186 | 176 | hash_init(eq->hash); |
|---|
| 187 | | - eq->uses_mq = e->uses_mq; |
|---|
| 188 | 177 | |
|---|
| 189 | 178 | return eq; |
|---|
| 190 | 179 | } |
|---|
| .. | .. |
|---|
| 199 | 188 | kfree(e); |
|---|
| 200 | 189 | } |
|---|
| 201 | 190 | |
|---|
| 202 | | -/* |
|---|
| 203 | | - * Use the default elevator specified by config boot param for non-mq devices, |
|---|
| 204 | | - * or by config option. Don't try to load modules as we could be running off |
|---|
| 205 | | - * async and request_module() isn't allowed from async. |
|---|
| 206 | | - */ |
|---|
| 207 | | -int elevator_init(struct request_queue *q) |
|---|
| 208 | | -{ |
|---|
| 209 | | - struct elevator_type *e = NULL; |
|---|
| 210 | | - int err = 0; |
|---|
| 211 | | - |
|---|
| 212 | | - /* |
|---|
| 213 | | - * q->sysfs_lock must be held to provide mutual exclusion between |
|---|
| 214 | | - * elevator_switch() and here. |
|---|
| 215 | | - */ |
|---|
| 216 | | - mutex_lock(&q->sysfs_lock); |
|---|
| 217 | | - if (unlikely(q->elevator)) |
|---|
| 218 | | - goto out_unlock; |
|---|
| 219 | | - |
|---|
| 220 | | - if (*chosen_elevator) { |
|---|
| 221 | | - e = elevator_get(q, chosen_elevator, false); |
|---|
| 222 | | - if (!e) |
|---|
| 223 | | - printk(KERN_ERR "I/O scheduler %s not found\n", |
|---|
| 224 | | - chosen_elevator); |
|---|
| 225 | | - } |
|---|
| 226 | | - |
|---|
| 227 | | - if (!e) |
|---|
| 228 | | - e = elevator_get(q, CONFIG_DEFAULT_IOSCHED, false); |
|---|
| 229 | | - if (!e) { |
|---|
| 230 | | - printk(KERN_ERR |
|---|
| 231 | | - "Default I/O scheduler not found. Using noop.\n"); |
|---|
| 232 | | - e = elevator_get(q, "noop", false); |
|---|
| 233 | | - } |
|---|
| 234 | | - |
|---|
| 235 | | - err = e->ops.sq.elevator_init_fn(q, e); |
|---|
| 236 | | - if (err) |
|---|
| 237 | | - elevator_put(e); |
|---|
| 238 | | -out_unlock: |
|---|
| 239 | | - mutex_unlock(&q->sysfs_lock); |
|---|
| 240 | | - return err; |
|---|
| 241 | | -} |
|---|
| 242 | | - |
|---|
| 243 | | -void elevator_exit(struct request_queue *q, struct elevator_queue *e) |
|---|
| 191 | +void __elevator_exit(struct request_queue *q, struct elevator_queue *e) |
|---|
| 244 | 192 | { |
|---|
| 245 | 193 | mutex_lock(&e->sysfs_lock); |
|---|
| 246 | | - if (e->uses_mq && e->type->ops.mq.exit_sched) |
|---|
| 247 | | - blk_mq_exit_sched(q, e); |
|---|
| 248 | | - else if (!e->uses_mq && e->type->ops.sq.elevator_exit_fn) |
|---|
| 249 | | - e->type->ops.sq.elevator_exit_fn(e); |
|---|
| 194 | + blk_mq_exit_sched(q, e); |
|---|
| 250 | 195 | mutex_unlock(&e->sysfs_lock); |
|---|
| 251 | 196 | |
|---|
| 252 | 197 | kobject_put(&e->kobj); |
|---|
| .. | .. |
|---|
| 355 | 300 | } |
|---|
| 356 | 301 | EXPORT_SYMBOL(elv_rb_find); |
|---|
| 357 | 302 | |
|---|
| 358 | | -/* |
|---|
| 359 | | - * Insert rq into dispatch queue of q. Queue lock must be held on |
|---|
| 360 | | - * entry. rq is sort instead into the dispatch queue. To be used by |
|---|
| 361 | | - * specific elevators. |
|---|
| 362 | | - */ |
|---|
| 363 | | -void elv_dispatch_sort(struct request_queue *q, struct request *rq) |
|---|
| 364 | | -{ |
|---|
| 365 | | - sector_t boundary; |
|---|
| 366 | | - struct list_head *entry; |
|---|
| 367 | | - |
|---|
| 368 | | - if (q->last_merge == rq) |
|---|
| 369 | | - q->last_merge = NULL; |
|---|
| 370 | | - |
|---|
| 371 | | - elv_rqhash_del(q, rq); |
|---|
| 372 | | - |
|---|
| 373 | | - q->nr_sorted--; |
|---|
| 374 | | - |
|---|
| 375 | | - boundary = q->end_sector; |
|---|
| 376 | | - list_for_each_prev(entry, &q->queue_head) { |
|---|
| 377 | | - struct request *pos = list_entry_rq(entry); |
|---|
| 378 | | - |
|---|
| 379 | | - if (req_op(rq) != req_op(pos)) |
|---|
| 380 | | - break; |
|---|
| 381 | | - if (rq_data_dir(rq) != rq_data_dir(pos)) |
|---|
| 382 | | - break; |
|---|
| 383 | | - if (pos->rq_flags & (RQF_STARTED | RQF_SOFTBARRIER)) |
|---|
| 384 | | - break; |
|---|
| 385 | | - if (blk_rq_pos(rq) >= boundary) { |
|---|
| 386 | | - if (blk_rq_pos(pos) < boundary) |
|---|
| 387 | | - continue; |
|---|
| 388 | | - } else { |
|---|
| 389 | | - if (blk_rq_pos(pos) >= boundary) |
|---|
| 390 | | - break; |
|---|
| 391 | | - } |
|---|
| 392 | | - if (blk_rq_pos(rq) >= blk_rq_pos(pos)) |
|---|
| 393 | | - break; |
|---|
| 394 | | - } |
|---|
| 395 | | - |
|---|
| 396 | | - list_add(&rq->queuelist, entry); |
|---|
| 397 | | -} |
|---|
| 398 | | -EXPORT_SYMBOL(elv_dispatch_sort); |
|---|
| 399 | | - |
|---|
| 400 | | -/* |
|---|
| 401 | | - * Insert rq into dispatch queue of q. Queue lock must be held on |
|---|
| 402 | | - * entry. rq is added to the back of the dispatch queue. To be used by |
|---|
| 403 | | - * specific elevators. |
|---|
| 404 | | - */ |
|---|
| 405 | | -void elv_dispatch_add_tail(struct request_queue *q, struct request *rq) |
|---|
| 406 | | -{ |
|---|
| 407 | | - if (q->last_merge == rq) |
|---|
| 408 | | - q->last_merge = NULL; |
|---|
| 409 | | - |
|---|
| 410 | | - elv_rqhash_del(q, rq); |
|---|
| 411 | | - |
|---|
| 412 | | - q->nr_sorted--; |
|---|
| 413 | | - |
|---|
| 414 | | - q->end_sector = rq_end_sector(rq); |
|---|
| 415 | | - q->boundary_rq = rq; |
|---|
| 416 | | - list_add_tail(&rq->queuelist, &q->queue_head); |
|---|
| 417 | | -} |
|---|
| 418 | | -EXPORT_SYMBOL(elv_dispatch_add_tail); |
|---|
| 419 | | - |
|---|
| 420 | 303 | enum elv_merge elv_merge(struct request_queue *q, struct request **req, |
|---|
| 421 | 304 | struct bio *bio) |
|---|
| 422 | 305 | { |
|---|
| .. | .. |
|---|
| 453 | 336 | __rq = elv_rqhash_find(q, bio->bi_iter.bi_sector); |
|---|
| 454 | 337 | if (__rq && elv_bio_merge_ok(__rq, bio)) { |
|---|
| 455 | 338 | *req = __rq; |
|---|
| 339 | + |
|---|
| 340 | + if (blk_discard_mergable(__rq)) |
|---|
| 341 | + return ELEVATOR_DISCARD_MERGE; |
|---|
| 456 | 342 | return ELEVATOR_BACK_MERGE; |
|---|
| 457 | 343 | } |
|---|
| 458 | 344 | |
|---|
| 459 | | - if (e->uses_mq && e->type->ops.mq.request_merge) |
|---|
| 460 | | - return e->type->ops.mq.request_merge(q, req, bio); |
|---|
| 461 | | - else if (!e->uses_mq && e->type->ops.sq.elevator_merge_fn) |
|---|
| 462 | | - return e->type->ops.sq.elevator_merge_fn(q, req, bio); |
|---|
| 345 | + if (e->type->ops.request_merge) |
|---|
| 346 | + return e->type->ops.request_merge(q, req, bio); |
|---|
| 463 | 347 | |
|---|
| 464 | 348 | return ELEVATOR_NO_MERGE; |
|---|
| 465 | 349 | } |
|---|
| .. | .. |
|---|
| 510 | 394 | { |
|---|
| 511 | 395 | struct elevator_queue *e = q->elevator; |
|---|
| 512 | 396 | |
|---|
| 513 | | - if (e->uses_mq && e->type->ops.mq.request_merged) |
|---|
| 514 | | - e->type->ops.mq.request_merged(q, rq, type); |
|---|
| 515 | | - else if (!e->uses_mq && e->type->ops.sq.elevator_merged_fn) |
|---|
| 516 | | - e->type->ops.sq.elevator_merged_fn(q, rq, type); |
|---|
| 397 | + if (e->type->ops.request_merged) |
|---|
| 398 | + e->type->ops.request_merged(q, rq, type); |
|---|
| 517 | 399 | |
|---|
| 518 | 400 | if (type == ELEVATOR_BACK_MERGE) |
|---|
| 519 | 401 | elv_rqhash_reposition(q, rq); |
|---|
| .. | .. |
|---|
| 525 | 407 | struct request *next) |
|---|
| 526 | 408 | { |
|---|
| 527 | 409 | struct elevator_queue *e = q->elevator; |
|---|
| 528 | | - bool next_sorted = false; |
|---|
| 529 | 410 | |
|---|
| 530 | | - if (e->uses_mq && e->type->ops.mq.requests_merged) |
|---|
| 531 | | - e->type->ops.mq.requests_merged(q, rq, next); |
|---|
| 532 | | - else if (e->type->ops.sq.elevator_merge_req_fn) { |
|---|
| 533 | | - next_sorted = (__force bool)(next->rq_flags & RQF_SORTED); |
|---|
| 534 | | - if (next_sorted) |
|---|
| 535 | | - e->type->ops.sq.elevator_merge_req_fn(q, rq, next); |
|---|
| 536 | | - } |
|---|
| 411 | + if (e->type->ops.requests_merged) |
|---|
| 412 | + e->type->ops.requests_merged(q, rq, next); |
|---|
| 537 | 413 | |
|---|
| 538 | 414 | elv_rqhash_reposition(q, rq); |
|---|
| 539 | | - |
|---|
| 540 | | - if (next_sorted) { |
|---|
| 541 | | - elv_rqhash_del(q, next); |
|---|
| 542 | | - q->nr_sorted--; |
|---|
| 543 | | - } |
|---|
| 544 | | - |
|---|
| 545 | 415 | q->last_merge = rq; |
|---|
| 546 | 416 | } |
|---|
| 547 | | - |
|---|
| 548 | | -void elv_bio_merged(struct request_queue *q, struct request *rq, |
|---|
| 549 | | - struct bio *bio) |
|---|
| 550 | | -{ |
|---|
| 551 | | - struct elevator_queue *e = q->elevator; |
|---|
| 552 | | - |
|---|
| 553 | | - if (WARN_ON_ONCE(e->uses_mq)) |
|---|
| 554 | | - return; |
|---|
| 555 | | - |
|---|
| 556 | | - if (e->type->ops.sq.elevator_bio_merged_fn) |
|---|
| 557 | | - e->type->ops.sq.elevator_bio_merged_fn(q, rq, bio); |
|---|
| 558 | | -} |
|---|
| 559 | | - |
|---|
| 560 | | -#ifdef CONFIG_PM |
|---|
| 561 | | -static void blk_pm_requeue_request(struct request *rq) |
|---|
| 562 | | -{ |
|---|
| 563 | | - if (rq->q->dev && !(rq->rq_flags & RQF_PM)) |
|---|
| 564 | | - rq->q->nr_pending--; |
|---|
| 565 | | -} |
|---|
| 566 | | - |
|---|
| 567 | | -static void blk_pm_add_request(struct request_queue *q, struct request *rq) |
|---|
| 568 | | -{ |
|---|
| 569 | | - if (q->dev && !(rq->rq_flags & RQF_PM) && q->nr_pending++ == 0 && |
|---|
| 570 | | - (q->rpm_status == RPM_SUSPENDED || q->rpm_status == RPM_SUSPENDING)) |
|---|
| 571 | | - pm_request_resume(q->dev); |
|---|
| 572 | | -} |
|---|
| 573 | | -#else |
|---|
| 574 | | -static inline void blk_pm_requeue_request(struct request *rq) {} |
|---|
| 575 | | -static inline void blk_pm_add_request(struct request_queue *q, |
|---|
| 576 | | - struct request *rq) |
|---|
| 577 | | -{ |
|---|
| 578 | | -} |
|---|
| 579 | | -#endif |
|---|
| 580 | | - |
|---|
| 581 | | -void elv_requeue_request(struct request_queue *q, struct request *rq) |
|---|
| 582 | | -{ |
|---|
| 583 | | - /* |
|---|
| 584 | | - * it already went through dequeue, we need to decrement the |
|---|
| 585 | | - * in_flight count again |
|---|
| 586 | | - */ |
|---|
| 587 | | - if (blk_account_rq(rq)) { |
|---|
| 588 | | - q->in_flight[rq_is_sync(rq)]--; |
|---|
| 589 | | - if (rq->rq_flags & RQF_SORTED) |
|---|
| 590 | | - elv_deactivate_rq(q, rq); |
|---|
| 591 | | - } |
|---|
| 592 | | - |
|---|
| 593 | | - rq->rq_flags &= ~RQF_STARTED; |
|---|
| 594 | | - |
|---|
| 595 | | - blk_pm_requeue_request(rq); |
|---|
| 596 | | - |
|---|
| 597 | | - __elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE); |
|---|
| 598 | | -} |
|---|
| 599 | | - |
|---|
| 600 | | -void elv_drain_elevator(struct request_queue *q) |
|---|
| 601 | | -{ |
|---|
| 602 | | - struct elevator_queue *e = q->elevator; |
|---|
| 603 | | - static int printed; |
|---|
| 604 | | - |
|---|
| 605 | | - if (WARN_ON_ONCE(e->uses_mq)) |
|---|
| 606 | | - return; |
|---|
| 607 | | - |
|---|
| 608 | | - lockdep_assert_held(q->queue_lock); |
|---|
| 609 | | - |
|---|
| 610 | | - while (e->type->ops.sq.elevator_dispatch_fn(q, 1)) |
|---|
| 611 | | - ; |
|---|
| 612 | | - if (q->nr_sorted && !blk_queue_is_zoned(q) && printed++ < 10 ) { |
|---|
| 613 | | - printk(KERN_ERR "%s: forced dispatching is broken " |
|---|
| 614 | | - "(nr_sorted=%u), please report this\n", |
|---|
| 615 | | - q->elevator->type->elevator_name, q->nr_sorted); |
|---|
| 616 | | - } |
|---|
| 617 | | -} |
|---|
| 618 | | - |
|---|
| 619 | | -void __elv_add_request(struct request_queue *q, struct request *rq, int where) |
|---|
| 620 | | -{ |
|---|
| 621 | | - trace_block_rq_insert(q, rq); |
|---|
| 622 | | - |
|---|
| 623 | | - blk_pm_add_request(q, rq); |
|---|
| 624 | | - |
|---|
| 625 | | - rq->q = q; |
|---|
| 626 | | - |
|---|
| 627 | | - if (rq->rq_flags & RQF_SOFTBARRIER) { |
|---|
| 628 | | - /* barriers are scheduling boundary, update end_sector */ |
|---|
| 629 | | - if (!blk_rq_is_passthrough(rq)) { |
|---|
| 630 | | - q->end_sector = rq_end_sector(rq); |
|---|
| 631 | | - q->boundary_rq = rq; |
|---|
| 632 | | - } |
|---|
| 633 | | - } else if (!(rq->rq_flags & RQF_ELVPRIV) && |
|---|
| 634 | | - (where == ELEVATOR_INSERT_SORT || |
|---|
| 635 | | - where == ELEVATOR_INSERT_SORT_MERGE)) |
|---|
| 636 | | - where = ELEVATOR_INSERT_BACK; |
|---|
| 637 | | - |
|---|
| 638 | | - switch (where) { |
|---|
| 639 | | - case ELEVATOR_INSERT_REQUEUE: |
|---|
| 640 | | - case ELEVATOR_INSERT_FRONT: |
|---|
| 641 | | - rq->rq_flags |= RQF_SOFTBARRIER; |
|---|
| 642 | | - list_add(&rq->queuelist, &q->queue_head); |
|---|
| 643 | | - break; |
|---|
| 644 | | - |
|---|
| 645 | | - case ELEVATOR_INSERT_BACK: |
|---|
| 646 | | - rq->rq_flags |= RQF_SOFTBARRIER; |
|---|
| 647 | | - elv_drain_elevator(q); |
|---|
| 648 | | - list_add_tail(&rq->queuelist, &q->queue_head); |
|---|
| 649 | | - /* |
|---|
| 650 | | - * We kick the queue here for the following reasons. |
|---|
| 651 | | - * - The elevator might have returned NULL previously |
|---|
| 652 | | - * to delay requests and returned them now. As the |
|---|
| 653 | | - * queue wasn't empty before this request, ll_rw_blk |
|---|
| 654 | | - * won't run the queue on return, resulting in hang. |
|---|
| 655 | | - * - Usually, back inserted requests won't be merged |
|---|
| 656 | | - * with anything. There's no point in delaying queue |
|---|
| 657 | | - * processing. |
|---|
| 658 | | - */ |
|---|
| 659 | | - __blk_run_queue(q); |
|---|
| 660 | | - break; |
|---|
| 661 | | - |
|---|
| 662 | | - case ELEVATOR_INSERT_SORT_MERGE: |
|---|
| 663 | | - /* |
|---|
| 664 | | - * If we succeed in merging this request with one in the |
|---|
| 665 | | - * queue already, we are done - rq has now been freed, |
|---|
| 666 | | - * so no need to do anything further. |
|---|
| 667 | | - */ |
|---|
| 668 | | - if (elv_attempt_insert_merge(q, rq)) |
|---|
| 669 | | - break; |
|---|
| 670 | | - /* fall through */ |
|---|
| 671 | | - case ELEVATOR_INSERT_SORT: |
|---|
| 672 | | - BUG_ON(blk_rq_is_passthrough(rq)); |
|---|
| 673 | | - rq->rq_flags |= RQF_SORTED; |
|---|
| 674 | | - q->nr_sorted++; |
|---|
| 675 | | - if (rq_mergeable(rq)) { |
|---|
| 676 | | - elv_rqhash_add(q, rq); |
|---|
| 677 | | - if (!q->last_merge) |
|---|
| 678 | | - q->last_merge = rq; |
|---|
| 679 | | - } |
|---|
| 680 | | - |
|---|
| 681 | | - /* |
|---|
| 682 | | - * Some ioscheds (cfq) run q->request_fn directly, so |
|---|
| 683 | | - * rq cannot be accessed after calling |
|---|
| 684 | | - * elevator_add_req_fn. |
|---|
| 685 | | - */ |
|---|
| 686 | | - q->elevator->type->ops.sq.elevator_add_req_fn(q, rq); |
|---|
| 687 | | - break; |
|---|
| 688 | | - |
|---|
| 689 | | - case ELEVATOR_INSERT_FLUSH: |
|---|
| 690 | | - rq->rq_flags |= RQF_SOFTBARRIER; |
|---|
| 691 | | - blk_insert_flush(rq); |
|---|
| 692 | | - break; |
|---|
| 693 | | - default: |
|---|
| 694 | | - printk(KERN_ERR "%s: bad insertion point %d\n", |
|---|
| 695 | | - __func__, where); |
|---|
| 696 | | - BUG(); |
|---|
| 697 | | - } |
|---|
| 698 | | -} |
|---|
| 699 | | -EXPORT_SYMBOL(__elv_add_request); |
|---|
| 700 | | - |
|---|
| 701 | | -void elv_add_request(struct request_queue *q, struct request *rq, int where) |
|---|
| 702 | | -{ |
|---|
| 703 | | - unsigned long flags; |
|---|
| 704 | | - |
|---|
| 705 | | - spin_lock_irqsave(q->queue_lock, flags); |
|---|
| 706 | | - __elv_add_request(q, rq, where); |
|---|
| 707 | | - spin_unlock_irqrestore(q->queue_lock, flags); |
|---|
| 708 | | -} |
|---|
| 709 | | -EXPORT_SYMBOL(elv_add_request); |
|---|
| 710 | 417 | |
|---|
| 711 | 418 | struct request *elv_latter_request(struct request_queue *q, struct request *rq) |
|---|
| 712 | 419 | { |
|---|
| 713 | 420 | struct elevator_queue *e = q->elevator; |
|---|
| 714 | 421 | |
|---|
| 715 | | - if (e->uses_mq && e->type->ops.mq.next_request) |
|---|
| 716 | | - return e->type->ops.mq.next_request(q, rq); |
|---|
| 717 | | - else if (!e->uses_mq && e->type->ops.sq.elevator_latter_req_fn) |
|---|
| 718 | | - return e->type->ops.sq.elevator_latter_req_fn(q, rq); |
|---|
| 422 | + if (e->type->ops.next_request) |
|---|
| 423 | + return e->type->ops.next_request(q, rq); |
|---|
| 719 | 424 | |
|---|
| 720 | 425 | return NULL; |
|---|
| 721 | 426 | } |
|---|
| .. | .. |
|---|
| 724 | 429 | { |
|---|
| 725 | 430 | struct elevator_queue *e = q->elevator; |
|---|
| 726 | 431 | |
|---|
| 727 | | - if (e->uses_mq && e->type->ops.mq.former_request) |
|---|
| 728 | | - return e->type->ops.mq.former_request(q, rq); |
|---|
| 729 | | - if (!e->uses_mq && e->type->ops.sq.elevator_former_req_fn) |
|---|
| 730 | | - return e->type->ops.sq.elevator_former_req_fn(q, rq); |
|---|
| 432 | + if (e->type->ops.former_request) |
|---|
| 433 | + return e->type->ops.former_request(q, rq); |
|---|
| 434 | + |
|---|
| 731 | 435 | return NULL; |
|---|
| 732 | | -} |
|---|
| 733 | | - |
|---|
| 734 | | -int elv_set_request(struct request_queue *q, struct request *rq, |
|---|
| 735 | | - struct bio *bio, gfp_t gfp_mask) |
|---|
| 736 | | -{ |
|---|
| 737 | | - struct elevator_queue *e = q->elevator; |
|---|
| 738 | | - |
|---|
| 739 | | - if (WARN_ON_ONCE(e->uses_mq)) |
|---|
| 740 | | - return 0; |
|---|
| 741 | | - |
|---|
| 742 | | - if (e->type->ops.sq.elevator_set_req_fn) |
|---|
| 743 | | - return e->type->ops.sq.elevator_set_req_fn(q, rq, bio, gfp_mask); |
|---|
| 744 | | - return 0; |
|---|
| 745 | | -} |
|---|
| 746 | | - |
|---|
| 747 | | -void elv_put_request(struct request_queue *q, struct request *rq) |
|---|
| 748 | | -{ |
|---|
| 749 | | - struct elevator_queue *e = q->elevator; |
|---|
| 750 | | - |
|---|
| 751 | | - if (WARN_ON_ONCE(e->uses_mq)) |
|---|
| 752 | | - return; |
|---|
| 753 | | - |
|---|
| 754 | | - if (e->type->ops.sq.elevator_put_req_fn) |
|---|
| 755 | | - e->type->ops.sq.elevator_put_req_fn(rq); |
|---|
| 756 | | -} |
|---|
| 757 | | - |
|---|
| 758 | | -int elv_may_queue(struct request_queue *q, unsigned int op) |
|---|
| 759 | | -{ |
|---|
| 760 | | - struct elevator_queue *e = q->elevator; |
|---|
| 761 | | - |
|---|
| 762 | | - if (WARN_ON_ONCE(e->uses_mq)) |
|---|
| 763 | | - return 0; |
|---|
| 764 | | - |
|---|
| 765 | | - if (e->type->ops.sq.elevator_may_queue_fn) |
|---|
| 766 | | - return e->type->ops.sq.elevator_may_queue_fn(q, op); |
|---|
| 767 | | - |
|---|
| 768 | | - return ELV_MQUEUE_MAY; |
|---|
| 769 | | -} |
|---|
| 770 | | - |
|---|
| 771 | | -void elv_completed_request(struct request_queue *q, struct request *rq) |
|---|
| 772 | | -{ |
|---|
| 773 | | - struct elevator_queue *e = q->elevator; |
|---|
| 774 | | - |
|---|
| 775 | | - if (WARN_ON_ONCE(e->uses_mq)) |
|---|
| 776 | | - return; |
|---|
| 777 | | - |
|---|
| 778 | | - /* |
|---|
| 779 | | - * request is released from the driver, io must be done |
|---|
| 780 | | - */ |
|---|
| 781 | | - if (blk_account_rq(rq)) { |
|---|
| 782 | | - q->in_flight[rq_is_sync(rq)]--; |
|---|
| 783 | | - if ((rq->rq_flags & RQF_SORTED) && |
|---|
| 784 | | - e->type->ops.sq.elevator_completed_req_fn) |
|---|
| 785 | | - e->type->ops.sq.elevator_completed_req_fn(q, rq); |
|---|
| 786 | | - } |
|---|
| 787 | 436 | } |
|---|
| 788 | 437 | |
|---|
| 789 | 438 | #define to_elv(atr) container_of((atr), struct elv_fs_entry, attr) |
|---|
| .. | .. |
|---|
| 833 | 482 | .release = elevator_release, |
|---|
| 834 | 483 | }; |
|---|
| 835 | 484 | |
|---|
| 836 | | -int elv_register_queue(struct request_queue *q) |
|---|
| 485 | +int elv_register_queue(struct request_queue *q, bool uevent) |
|---|
| 837 | 486 | { |
|---|
| 838 | 487 | struct elevator_queue *e = q->elevator; |
|---|
| 839 | 488 | int error; |
|---|
| .. | .. |
|---|
| 850 | 499 | attr++; |
|---|
| 851 | 500 | } |
|---|
| 852 | 501 | } |
|---|
| 853 | | - kobject_uevent(&e->kobj, KOBJ_ADD); |
|---|
| 502 | + if (uevent) |
|---|
| 503 | + kobject_uevent(&e->kobj, KOBJ_ADD); |
|---|
| 504 | + |
|---|
| 854 | 505 | e->registered = 1; |
|---|
| 855 | | - if (!e->uses_mq && e->type->ops.sq.elevator_registered_fn) |
|---|
| 856 | | - e->type->ops.sq.elevator_registered_fn(q); |
|---|
| 857 | 506 | } |
|---|
| 858 | 507 | return error; |
|---|
| 859 | 508 | } |
|---|
| .. | .. |
|---|
| 867 | 516 | |
|---|
| 868 | 517 | kobject_uevent(&e->kobj, KOBJ_REMOVE); |
|---|
| 869 | 518 | kobject_del(&e->kobj); |
|---|
| 519 | + |
|---|
| 870 | 520 | e->registered = 0; |
|---|
| 871 | 521 | } |
|---|
| 872 | 522 | } |
|---|
| 873 | 523 | |
|---|
| 874 | 524 | int elv_register(struct elevator_type *e) |
|---|
| 875 | 525 | { |
|---|
| 876 | | - char *def = ""; |
|---|
| 877 | | - |
|---|
| 878 | 526 | /* create icq_cache if requested */ |
|---|
| 879 | 527 | if (e->icq_size) { |
|---|
| 880 | 528 | if (WARN_ON(e->icq_size < sizeof(struct io_cq)) || |
|---|
| .. | .. |
|---|
| 891 | 539 | |
|---|
| 892 | 540 | /* register, don't allow duplicate names */ |
|---|
| 893 | 541 | spin_lock(&elv_list_lock); |
|---|
| 894 | | - if (elevator_find(e->elevator_name, e->uses_mq)) { |
|---|
| 542 | + if (elevator_find(e->elevator_name, 0)) { |
|---|
| 895 | 543 | spin_unlock(&elv_list_lock); |
|---|
| 896 | 544 | kmem_cache_destroy(e->icq_cache); |
|---|
| 897 | 545 | return -EBUSY; |
|---|
| .. | .. |
|---|
| 899 | 547 | list_add_tail(&e->list, &elv_list); |
|---|
| 900 | 548 | spin_unlock(&elv_list_lock); |
|---|
| 901 | 549 | |
|---|
| 902 | | - /* print pretty message */ |
|---|
| 903 | | - if (elevator_match(e, chosen_elevator) || |
|---|
| 904 | | - (!*chosen_elevator && |
|---|
| 905 | | - elevator_match(e, CONFIG_DEFAULT_IOSCHED))) |
|---|
| 906 | | - def = " (default)"; |
|---|
| 550 | + printk(KERN_INFO "io scheduler %s registered\n", e->elevator_name); |
|---|
| 907 | 551 | |
|---|
| 908 | | - printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name, |
|---|
| 909 | | - def); |
|---|
| 910 | 552 | return 0; |
|---|
| 911 | 553 | } |
|---|
| 912 | 554 | EXPORT_SYMBOL_GPL(elv_register); |
|---|
| .. | .. |
|---|
| 940 | 582 | if (q->elevator) { |
|---|
| 941 | 583 | if (q->elevator->registered) |
|---|
| 942 | 584 | elv_unregister_queue(q); |
|---|
| 585 | + |
|---|
| 943 | 586 | ioc_clear_queue(q); |
|---|
| 944 | 587 | elevator_exit(q, q->elevator); |
|---|
| 945 | 588 | } |
|---|
| .. | .. |
|---|
| 949 | 592 | goto out; |
|---|
| 950 | 593 | |
|---|
| 951 | 594 | if (new_e) { |
|---|
| 952 | | - ret = elv_register_queue(q); |
|---|
| 595 | + ret = elv_register_queue(q, true); |
|---|
| 953 | 596 | if (ret) { |
|---|
| 954 | 597 | elevator_exit(q, q->elevator); |
|---|
| 955 | 598 | goto out; |
|---|
| .. | .. |
|---|
| 965 | 608 | return ret; |
|---|
| 966 | 609 | } |
|---|
| 967 | 610 | |
|---|
| 968 | | -/* |
|---|
| 969 | | - * For blk-mq devices, we default to using mq-deadline, if available, for single |
|---|
| 970 | | - * queue devices. If deadline isn't available OR we have multiple queues, |
|---|
| 971 | | - * default to "none". |
|---|
| 972 | | - */ |
|---|
| 973 | | -int elevator_init_mq(struct request_queue *q) |
|---|
| 611 | +static inline bool elv_support_iosched(struct request_queue *q) |
|---|
| 974 | 612 | { |
|---|
| 975 | | - struct elevator_type *e; |
|---|
| 976 | | - int err = 0; |
|---|
| 613 | + if (!queue_is_mq(q) || |
|---|
| 614 | + (q->tag_set && (q->tag_set->flags & BLK_MQ_F_NO_SCHED))) |
|---|
| 615 | + return false; |
|---|
| 616 | + return true; |
|---|
| 617 | +} |
|---|
| 618 | + |
|---|
| 619 | +/* |
|---|
| 620 | + * For single queue devices, default to using mq-deadline. If we have multiple |
|---|
| 621 | + * queues or mq-deadline is not available, default to "none". |
|---|
| 622 | + */ |
|---|
| 623 | +static struct elevator_type *elevator_get_default(struct request_queue *q) |
|---|
| 624 | +{ |
|---|
| 625 | + if (q->tag_set && q->tag_set->flags & BLK_MQ_F_NO_SCHED_BY_DEFAULT) |
|---|
| 626 | + return NULL; |
|---|
| 977 | 627 | |
|---|
| 978 | 628 | if (q->nr_hw_queues != 1) |
|---|
| 979 | | - return 0; |
|---|
| 629 | + return NULL; |
|---|
| 980 | 630 | |
|---|
| 981 | | - WARN_ON_ONCE(test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags)); |
|---|
| 631 | + return elevator_get(q, "mq-deadline", false); |
|---|
| 632 | +} |
|---|
| 633 | + |
|---|
| 634 | +/* |
|---|
| 635 | + * Get the first elevator providing the features required by the request queue. |
|---|
| 636 | + * Default to "none" if no matching elevator is found. |
|---|
| 637 | + */ |
|---|
| 638 | +static struct elevator_type *elevator_get_by_features(struct request_queue *q) |
|---|
| 639 | +{ |
|---|
| 640 | + struct elevator_type *e, *found = NULL; |
|---|
| 641 | + |
|---|
| 642 | + spin_lock(&elv_list_lock); |
|---|
| 643 | + |
|---|
| 644 | + list_for_each_entry(e, &elv_list, list) { |
|---|
| 645 | + if (elv_support_features(e->elevator_features, |
|---|
| 646 | + q->required_elevator_features)) { |
|---|
| 647 | + found = e; |
|---|
| 648 | + break; |
|---|
| 649 | + } |
|---|
| 650 | + } |
|---|
| 651 | + |
|---|
| 652 | + if (found && !try_module_get(found->elevator_owner)) |
|---|
| 653 | + found = NULL; |
|---|
| 654 | + |
|---|
| 655 | + spin_unlock(&elv_list_lock); |
|---|
| 656 | + return found; |
|---|
| 657 | +} |
|---|
| 658 | + |
|---|
| 659 | +/* |
|---|
| 660 | + * For a device queue that has no required features, use the default elevator |
|---|
| 661 | + * settings. Otherwise, use the first elevator available matching the required |
|---|
| 662 | + * features. If no suitable elevator is find or if the chosen elevator |
|---|
| 663 | + * initialization fails, fall back to the "none" elevator (no elevator). |
|---|
| 664 | + */ |
|---|
| 665 | +void elevator_init_mq(struct request_queue *q) |
|---|
| 666 | +{ |
|---|
| 667 | + struct elevator_type *e; |
|---|
| 668 | + int err; |
|---|
| 669 | + |
|---|
| 670 | + if (!elv_support_iosched(q)) |
|---|
| 671 | + return; |
|---|
| 672 | + |
|---|
| 673 | + WARN_ON_ONCE(blk_queue_registered(q)); |
|---|
| 982 | 674 | |
|---|
| 983 | 675 | if (unlikely(q->elevator)) |
|---|
| 984 | | - goto out; |
|---|
| 676 | + return; |
|---|
| 985 | 677 | |
|---|
| 986 | | - e = elevator_get(q, "mq-deadline", false); |
|---|
| 678 | + if (!q->required_elevator_features) |
|---|
| 679 | + e = elevator_get_default(q); |
|---|
| 680 | + else |
|---|
| 681 | + e = elevator_get_by_features(q); |
|---|
| 987 | 682 | if (!e) |
|---|
| 988 | | - goto out; |
|---|
| 683 | + return; |
|---|
| 684 | + |
|---|
| 685 | + blk_mq_freeze_queue(q); |
|---|
| 686 | + blk_mq_quiesce_queue(q); |
|---|
| 989 | 687 | |
|---|
| 990 | 688 | err = blk_mq_init_sched(q, e); |
|---|
| 991 | | - if (err) |
|---|
| 689 | + |
|---|
| 690 | + blk_mq_unquiesce_queue(q); |
|---|
| 691 | + blk_mq_unfreeze_queue(q); |
|---|
| 692 | + |
|---|
| 693 | + if (err) { |
|---|
| 694 | + pr_warn("\"%s\" elevator initialization failed, " |
|---|
| 695 | + "falling back to \"none\"\n", e->elevator_name); |
|---|
| 992 | 696 | elevator_put(e); |
|---|
| 993 | | -out: |
|---|
| 994 | | - return err; |
|---|
| 697 | + } |
|---|
| 995 | 698 | } |
|---|
| 996 | 699 | |
|---|
| 997 | 700 | |
|---|
| .. | .. |
|---|
| 1003 | 706 | */ |
|---|
| 1004 | 707 | static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) |
|---|
| 1005 | 708 | { |
|---|
| 1006 | | - struct elevator_queue *old = q->elevator; |
|---|
| 1007 | | - bool old_registered = false; |
|---|
| 1008 | 709 | int err; |
|---|
| 1009 | 710 | |
|---|
| 1010 | 711 | lockdep_assert_held(&q->sysfs_lock); |
|---|
| 1011 | 712 | |
|---|
| 1012 | | - if (q->mq_ops) { |
|---|
| 1013 | | - blk_mq_freeze_queue(q); |
|---|
| 1014 | | - blk_mq_quiesce_queue(q); |
|---|
| 713 | + blk_mq_freeze_queue(q); |
|---|
| 714 | + blk_mq_quiesce_queue(q); |
|---|
| 1015 | 715 | |
|---|
| 1016 | | - err = elevator_switch_mq(q, new_e); |
|---|
| 716 | + err = elevator_switch_mq(q, new_e); |
|---|
| 1017 | 717 | |
|---|
| 1018 | | - blk_mq_unquiesce_queue(q); |
|---|
| 1019 | | - blk_mq_unfreeze_queue(q); |
|---|
| 1020 | | - |
|---|
| 1021 | | - return err; |
|---|
| 1022 | | - } |
|---|
| 1023 | | - |
|---|
| 1024 | | - /* |
|---|
| 1025 | | - * Turn on BYPASS and drain all requests w/ elevator private data. |
|---|
| 1026 | | - * Block layer doesn't call into a quiesced elevator - all requests |
|---|
| 1027 | | - * are directly put on the dispatch list without elevator data |
|---|
| 1028 | | - * using INSERT_BACK. All requests have SOFTBARRIER set and no |
|---|
| 1029 | | - * merge happens either. |
|---|
| 1030 | | - */ |
|---|
| 1031 | | - if (old) { |
|---|
| 1032 | | - old_registered = old->registered; |
|---|
| 1033 | | - |
|---|
| 1034 | | - blk_queue_bypass_start(q); |
|---|
| 1035 | | - |
|---|
| 1036 | | - /* unregister and clear all auxiliary data of the old elevator */ |
|---|
| 1037 | | - if (old_registered) |
|---|
| 1038 | | - elv_unregister_queue(q); |
|---|
| 1039 | | - |
|---|
| 1040 | | - ioc_clear_queue(q); |
|---|
| 1041 | | - } |
|---|
| 1042 | | - |
|---|
| 1043 | | - /* allocate, init and register new elevator */ |
|---|
| 1044 | | - err = new_e->ops.sq.elevator_init_fn(q, new_e); |
|---|
| 1045 | | - if (err) |
|---|
| 1046 | | - goto fail_init; |
|---|
| 1047 | | - |
|---|
| 1048 | | - err = elv_register_queue(q); |
|---|
| 1049 | | - if (err) |
|---|
| 1050 | | - goto fail_register; |
|---|
| 1051 | | - |
|---|
| 1052 | | - /* done, kill the old one and finish */ |
|---|
| 1053 | | - if (old) { |
|---|
| 1054 | | - elevator_exit(q, old); |
|---|
| 1055 | | - blk_queue_bypass_end(q); |
|---|
| 1056 | | - } |
|---|
| 1057 | | - |
|---|
| 1058 | | - blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name); |
|---|
| 1059 | | - |
|---|
| 1060 | | - return 0; |
|---|
| 1061 | | - |
|---|
| 1062 | | -fail_register: |
|---|
| 1063 | | - elevator_exit(q, q->elevator); |
|---|
| 1064 | | -fail_init: |
|---|
| 1065 | | - /* switch failed, restore and re-register old elevator */ |
|---|
| 1066 | | - if (old) { |
|---|
| 1067 | | - q->elevator = old; |
|---|
| 1068 | | - elv_register_queue(q); |
|---|
| 1069 | | - blk_queue_bypass_end(q); |
|---|
| 1070 | | - } |
|---|
| 718 | + blk_mq_unquiesce_queue(q); |
|---|
| 719 | + blk_mq_unfreeze_queue(q); |
|---|
| 1071 | 720 | |
|---|
| 1072 | 721 | return err; |
|---|
| 1073 | 722 | } |
|---|
| .. | .. |
|---|
| 1087 | 736 | /* |
|---|
| 1088 | 737 | * Special case for mq, turn off scheduling |
|---|
| 1089 | 738 | */ |
|---|
| 1090 | | - if (q->mq_ops && !strncmp(name, "none", 4)) |
|---|
| 739 | + if (!strncmp(name, "none", 4)) { |
|---|
| 740 | + if (!q->elevator) |
|---|
| 741 | + return 0; |
|---|
| 1091 | 742 | return elevator_switch(q, NULL); |
|---|
| 743 | + } |
|---|
| 1092 | 744 | |
|---|
| 1093 | 745 | strlcpy(elevator_name, name, sizeof(elevator_name)); |
|---|
| 1094 | 746 | e = elevator_get(q, strstrip(elevator_name), true); |
|---|
| 1095 | 747 | if (!e) |
|---|
| 1096 | 748 | return -EINVAL; |
|---|
| 1097 | 749 | |
|---|
| 1098 | | - if (q->elevator && elevator_match(q->elevator->type, elevator_name)) { |
|---|
| 750 | + if (q->elevator && |
|---|
| 751 | + elevator_match(q->elevator->type, elevator_name, 0)) { |
|---|
| 1099 | 752 | elevator_put(e); |
|---|
| 1100 | 753 | return 0; |
|---|
| 1101 | 754 | } |
|---|
| .. | .. |
|---|
| 1103 | 756 | return elevator_switch(q, e); |
|---|
| 1104 | 757 | } |
|---|
| 1105 | 758 | |
|---|
| 1106 | | -static inline bool elv_support_iosched(struct request_queue *q) |
|---|
| 1107 | | -{ |
|---|
| 1108 | | - if (q->mq_ops && q->tag_set && (q->tag_set->flags & |
|---|
| 1109 | | - BLK_MQ_F_NO_SCHED)) |
|---|
| 1110 | | - return false; |
|---|
| 1111 | | - return true; |
|---|
| 1112 | | -} |
|---|
| 1113 | | - |
|---|
| 1114 | 759 | ssize_t elv_iosched_store(struct request_queue *q, const char *name, |
|---|
| 1115 | 760 | size_t count) |
|---|
| 1116 | 761 | { |
|---|
| 1117 | 762 | int ret; |
|---|
| 1118 | 763 | |
|---|
| 1119 | | - if (!(q->mq_ops || q->request_fn) || !elv_support_iosched(q)) |
|---|
| 764 | + if (!elv_support_iosched(q)) |
|---|
| 1120 | 765 | return count; |
|---|
| 1121 | 766 | |
|---|
| 1122 | 767 | ret = __elevator_change(q, name); |
|---|
| .. | .. |
|---|
| 1131 | 776 | struct elevator_queue *e = q->elevator; |
|---|
| 1132 | 777 | struct elevator_type *elv = NULL; |
|---|
| 1133 | 778 | struct elevator_type *__e; |
|---|
| 1134 | | - bool uses_mq = q->mq_ops != NULL; |
|---|
| 1135 | 779 | int len = 0; |
|---|
| 1136 | 780 | |
|---|
| 1137 | | - if (!queue_is_rq_based(q)) |
|---|
| 781 | + if (!queue_is_mq(q)) |
|---|
| 1138 | 782 | return sprintf(name, "none\n"); |
|---|
| 1139 | 783 | |
|---|
| 1140 | 784 | if (!q->elevator) |
|---|
| .. | .. |
|---|
| 1144 | 788 | |
|---|
| 1145 | 789 | spin_lock(&elv_list_lock); |
|---|
| 1146 | 790 | list_for_each_entry(__e, &elv_list, list) { |
|---|
| 1147 | | - if (elv && elevator_match(elv, __e->elevator_name) && |
|---|
| 1148 | | - (__e->uses_mq == uses_mq)) { |
|---|
| 791 | + if (elv && elevator_match(elv, __e->elevator_name, 0)) { |
|---|
| 1149 | 792 | len += sprintf(name+len, "[%s] ", elv->elevator_name); |
|---|
| 1150 | 793 | continue; |
|---|
| 1151 | 794 | } |
|---|
| 1152 | | - if (__e->uses_mq && q->mq_ops && elv_support_iosched(q)) |
|---|
| 1153 | | - len += sprintf(name+len, "%s ", __e->elevator_name); |
|---|
| 1154 | | - else if (!__e->uses_mq && !q->mq_ops) |
|---|
| 795 | + if (elv_support_iosched(q) && |
|---|
| 796 | + elevator_match(__e, __e->elevator_name, |
|---|
| 797 | + q->required_elevator_features)) |
|---|
| 1155 | 798 | len += sprintf(name+len, "%s ", __e->elevator_name); |
|---|
| 1156 | 799 | } |
|---|
| 1157 | 800 | spin_unlock(&elv_list_lock); |
|---|
| 1158 | 801 | |
|---|
| 1159 | | - if (q->mq_ops && q->elevator) |
|---|
| 802 | + if (q->elevator) |
|---|
| 1160 | 803 | len += sprintf(name+len, "none"); |
|---|
| 1161 | 804 | |
|---|
| 1162 | 805 | len += sprintf(len+name, "\n"); |
|---|
| .. | .. |
|---|
| 1186 | 829 | return NULL; |
|---|
| 1187 | 830 | } |
|---|
| 1188 | 831 | EXPORT_SYMBOL(elv_rb_latter_request); |
|---|
| 832 | + |
|---|
| 833 | +static int __init elevator_setup(char *str) |
|---|
| 834 | +{ |
|---|
| 835 | + pr_warn("Kernel parameter elevator= does not have any effect anymore.\n" |
|---|
| 836 | + "Please use sysfs to set IO scheduler for individual devices.\n"); |
|---|
| 837 | + return 1; |
|---|
| 838 | +} |
|---|
| 839 | + |
|---|
| 840 | +__setup("elevator=", elevator_setup); |
|---|