.. | .. |
---|
43 | 43 | |
---|
44 | 44 | #include <trace/hooks/block.h> |
---|
45 | 45 | |
---|
46 | | -static DEFINE_PER_CPU(struct llist_head, blk_cpu_done); |
---|
| 46 | +static DEFINE_PER_CPU(struct list_head, blk_cpu_done); |
---|
47 | 47 | |
---|
48 | 48 | static void blk_mq_poll_stats_start(struct request_queue *q); |
---|
49 | 49 | static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb); |
---|
.. | .. |
---|
571 | 571 | } |
---|
572 | 572 | EXPORT_SYMBOL(blk_mq_end_request); |
---|
573 | 573 | |
---|
574 | | -static void blk_complete_reqs(struct llist_head *list) |
---|
575 | | -{ |
---|
576 | | - struct llist_node *entry = llist_reverse_order(llist_del_all(list)); |
---|
577 | | - struct request *rq, *next; |
---|
578 | | - |
---|
579 | | - llist_for_each_entry_safe(rq, next, entry, ipi_list) |
---|
580 | | - rq->q->mq_ops->complete(rq); |
---|
581 | | -} |
---|
582 | | - |
---|
| 574 | +/* |
---|
| 575 | + * Softirq action handler - move entries to local list and loop over them |
---|
| 576 | + * while passing them to the queue registered handler. |
---|
| 577 | + */ |
---|
583 | 578 | static __latent_entropy void blk_done_softirq(struct softirq_action *h) |
---|
584 | 579 | { |
---|
585 | | - blk_complete_reqs(this_cpu_ptr(&blk_cpu_done)); |
---|
| 580 | + struct list_head *cpu_list, local_list; |
---|
| 581 | + |
---|
| 582 | + local_irq_disable(); |
---|
| 583 | + cpu_list = this_cpu_ptr(&blk_cpu_done); |
---|
| 584 | + list_replace_init(cpu_list, &local_list); |
---|
| 585 | + local_irq_enable(); |
---|
| 586 | + |
---|
| 587 | + while (!list_empty(&local_list)) { |
---|
| 588 | + struct request *rq; |
---|
| 589 | + |
---|
| 590 | + rq = list_entry(local_list.next, struct request, ipi_list); |
---|
| 591 | + list_del_init(&rq->ipi_list); |
---|
| 592 | + rq->q->mq_ops->complete(rq); |
---|
| 593 | + } |
---|
| 594 | +} |
---|
| 595 | + |
---|
| 596 | +static void blk_mq_trigger_softirq(struct request *rq) |
---|
| 597 | +{ |
---|
| 598 | + struct list_head *list; |
---|
| 599 | + unsigned long flags; |
---|
| 600 | + |
---|
| 601 | + local_irq_save(flags); |
---|
| 602 | + list = this_cpu_ptr(&blk_cpu_done); |
---|
| 603 | + list_add_tail(&rq->ipi_list, list); |
---|
| 604 | + |
---|
| 605 | + /* |
---|
| 606 | + * If the list only contains our just added request, signal a raise of |
---|
| 607 | + * the softirq. If there are already entries there, someone already |
---|
| 608 | + * raised the irq but it hasn't run yet. |
---|
| 609 | + */ |
---|
| 610 | + if (list->next == &rq->ipi_list) |
---|
| 611 | + raise_softirq_irqoff(BLOCK_SOFTIRQ); |
---|
| 612 | + local_irq_restore(flags); |
---|
586 | 613 | } |
---|
587 | 614 | |
---|
588 | 615 | static int blk_softirq_cpu_dead(unsigned int cpu) |
---|
589 | 616 | { |
---|
590 | | - blk_complete_reqs(&per_cpu(blk_cpu_done, cpu)); |
---|
| 617 | + /* |
---|
| 618 | + * If a CPU goes away, splice its entries to the current CPU |
---|
| 619 | + * and trigger a run of the softirq |
---|
| 620 | + */ |
---|
| 621 | + local_irq_disable(); |
---|
| 622 | + list_splice_init(&per_cpu(blk_cpu_done, cpu), |
---|
| 623 | + this_cpu_ptr(&blk_cpu_done)); |
---|
| 624 | + raise_softirq_irqoff(BLOCK_SOFTIRQ); |
---|
| 625 | + local_irq_enable(); |
---|
| 626 | + |
---|
591 | 627 | return 0; |
---|
592 | 628 | } |
---|
593 | 629 | |
---|
| 630 | + |
---|
594 | 631 | static void __blk_mq_complete_request_remote(void *data) |
---|
595 | 632 | { |
---|
596 | | - __raise_softirq_irqoff(BLOCK_SOFTIRQ); |
---|
| 633 | + struct request *rq = data; |
---|
| 634 | + |
---|
| 635 | + /* |
---|
| 636 | + * For most of single queue controllers, there is only one irq vector |
---|
| 637 | + * for handling I/O completion, and the only irq's affinity is set |
---|
| 638 | + * to all possible CPUs. On most of ARCHs, this affinity means the irq |
---|
| 639 | + * is handled on one specific CPU. |
---|
| 640 | + * |
---|
| 641 | + * So complete I/O requests in softirq context in case of single queue |
---|
| 642 | + * devices to avoid degrading I/O performance due to irqsoff latency. |
---|
| 643 | + */ |
---|
| 644 | + if (rq->q->nr_hw_queues == 1) |
---|
| 645 | + blk_mq_trigger_softirq(rq); |
---|
| 646 | + else |
---|
| 647 | + rq->q->mq_ops->complete(rq); |
---|
597 | 648 | } |
---|
598 | 649 | |
---|
599 | 650 | static inline bool blk_mq_complete_need_ipi(struct request *rq) |
---|
.. | .. |
---|
602 | 653 | |
---|
603 | 654 | if (!IS_ENABLED(CONFIG_SMP) || |
---|
604 | 655 | !test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) |
---|
605 | | - return false; |
---|
606 | | - /* |
---|
607 | | - * With force threaded interrupts enabled, raising softirq from an SMP |
---|
608 | | - * function call will always result in waking the ksoftirqd thread. |
---|
609 | | - * This is probably worse than completing the request on a different |
---|
610 | | - * cache domain. |
---|
611 | | - */ |
---|
612 | | - if (force_irqthreads) |
---|
613 | 656 | return false; |
---|
614 | 657 | |
---|
615 | 658 | /* same CPU or cache domain? Complete locally */ |
---|
.. | .. |
---|
620 | 663 | |
---|
621 | 664 | /* don't try to IPI to an offline CPU */ |
---|
622 | 665 | return cpu_online(rq->mq_ctx->cpu); |
---|
623 | | -} |
---|
624 | | - |
---|
625 | | -static void blk_mq_complete_send_ipi(struct request *rq) |
---|
626 | | -{ |
---|
627 | | - struct llist_head *list; |
---|
628 | | - unsigned int cpu; |
---|
629 | | - |
---|
630 | | - cpu = rq->mq_ctx->cpu; |
---|
631 | | - list = &per_cpu(blk_cpu_done, cpu); |
---|
632 | | - if (llist_add(&rq->ipi_list, list)) { |
---|
633 | | - rq->csd.func = __blk_mq_complete_request_remote; |
---|
634 | | - rq->csd.info = rq; |
---|
635 | | - rq->csd.flags = 0; |
---|
636 | | - smp_call_function_single_async(cpu, &rq->csd); |
---|
637 | | - } |
---|
638 | | -} |
---|
639 | | - |
---|
640 | | -static void blk_mq_raise_softirq(struct request *rq) |
---|
641 | | -{ |
---|
642 | | - struct llist_head *list; |
---|
643 | | - |
---|
644 | | - preempt_disable(); |
---|
645 | | - list = this_cpu_ptr(&blk_cpu_done); |
---|
646 | | - if (llist_add(&rq->ipi_list, list)) |
---|
647 | | - raise_softirq(BLOCK_SOFTIRQ); |
---|
648 | | - preempt_enable(); |
---|
649 | 666 | } |
---|
650 | 667 | |
---|
651 | 668 | bool blk_mq_complete_request_remote(struct request *rq) |
---|
.. | .. |
---|
660 | 677 | return false; |
---|
661 | 678 | |
---|
662 | 679 | if (blk_mq_complete_need_ipi(rq)) { |
---|
663 | | - blk_mq_complete_send_ipi(rq); |
---|
664 | | - return true; |
---|
| 680 | + rq->csd.func = __blk_mq_complete_request_remote; |
---|
| 681 | + rq->csd.info = rq; |
---|
| 682 | + rq->csd.flags = 0; |
---|
| 683 | + smp_call_function_single_async(rq->mq_ctx->cpu, &rq->csd); |
---|
| 684 | + } else { |
---|
| 685 | + if (rq->q->nr_hw_queues > 1) |
---|
| 686 | + return false; |
---|
| 687 | + blk_mq_trigger_softirq(rq); |
---|
665 | 688 | } |
---|
666 | 689 | |
---|
667 | | - if (rq->q->nr_hw_queues == 1) { |
---|
668 | | - blk_mq_raise_softirq(rq); |
---|
669 | | - return true; |
---|
670 | | - } |
---|
671 | | - return false; |
---|
| 690 | + return true; |
---|
672 | 691 | } |
---|
673 | 692 | EXPORT_SYMBOL_GPL(blk_mq_complete_request_remote); |
---|
674 | 693 | |
---|
.. | .. |
---|
1577 | 1596 | return; |
---|
1578 | 1597 | |
---|
1579 | 1598 | if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) { |
---|
1580 | | - int cpu = get_cpu_light(); |
---|
| 1599 | + int cpu = get_cpu(); |
---|
1581 | 1600 | if (cpumask_test_cpu(cpu, hctx->cpumask)) { |
---|
1582 | 1601 | __blk_mq_run_hw_queue(hctx); |
---|
1583 | | - put_cpu_light(); |
---|
| 1602 | + put_cpu(); |
---|
1584 | 1603 | return; |
---|
1585 | 1604 | } |
---|
1586 | 1605 | |
---|
1587 | | - put_cpu_light(); |
---|
| 1606 | + put_cpu(); |
---|
1588 | 1607 | } |
---|
1589 | 1608 | |
---|
1590 | 1609 | kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work, |
---|
.. | .. |
---|
4019 | 4038 | int i; |
---|
4020 | 4039 | |
---|
4021 | 4040 | for_each_possible_cpu(i) |
---|
4022 | | - init_llist_head(&per_cpu(blk_cpu_done, i)); |
---|
| 4041 | + INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i)); |
---|
4023 | 4042 | open_softirq(BLOCK_SOFTIRQ, blk_done_softirq); |
---|
4024 | 4043 | |
---|
4025 | 4044 | cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD, |
---|