From ea08eeccae9297f7aabd2ef7f0c2517ac4549acc Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Tue, 20 Feb 2024 01:18:26 +0000
Subject: [PATCH] write in 30M
---
kernel/block/blk-mq.c | 140 ++++++++++++++++++++++++++--------------------
1 files changed, 80 insertions(+), 60 deletions(-)
diff --git a/kernel/block/blk-mq.c b/kernel/block/blk-mq.c
index ba16e1d..21544b1 100644
--- a/kernel/block/blk-mq.c
+++ b/kernel/block/blk-mq.c
@@ -43,7 +43,7 @@
#include <trace/hooks/block.h>
-static DEFINE_PER_CPU(struct llist_head, blk_cpu_done);
+static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
static void blk_mq_poll_stats_start(struct request_queue *q);
static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
@@ -451,7 +451,8 @@
* allocator for this for the rare use case of a command tied to
* a specific queue.
*/
- if (WARN_ON_ONCE(!(flags & (BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED))))
+ if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT)) ||
+ WARN_ON_ONCE(!(flags & BLK_MQ_REQ_RESERVED)))
return ERR_PTR(-EINVAL);
if (hctx_idx >= q->nr_hw_queues)
@@ -571,29 +572,80 @@
}
EXPORT_SYMBOL(blk_mq_end_request);
-static void blk_complete_reqs(struct llist_head *list)
-{
- struct llist_node *entry = llist_reverse_order(llist_del_all(list));
- struct request *rq, *next;
-
- llist_for_each_entry_safe(rq, next, entry, ipi_list)
- rq->q->mq_ops->complete(rq);
-}
-
+/*
+ * Softirq action handler - move entries to local list and loop over them
+ * while passing them to the queue registered handler.
+ */
static __latent_entropy void blk_done_softirq(struct softirq_action *h)
{
- blk_complete_reqs(this_cpu_ptr(&blk_cpu_done));
+ struct list_head *cpu_list, local_list;
+
+ local_irq_disable();
+ cpu_list = this_cpu_ptr(&blk_cpu_done);
+ list_replace_init(cpu_list, &local_list);
+ local_irq_enable();
+
+ while (!list_empty(&local_list)) {
+ struct request *rq;
+
+ rq = list_entry(local_list.next, struct request, ipi_list);
+ list_del_init(&rq->ipi_list);
+ rq->q->mq_ops->complete(rq);
+ }
+}
+
+static void blk_mq_trigger_softirq(struct request *rq)
+{
+ struct list_head *list;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ list = this_cpu_ptr(&blk_cpu_done);
+ list_add_tail(&rq->ipi_list, list);
+
+ /*
+ * If the list only contains our just added request, signal a raise of
+ * the softirq. If there are already entries there, someone already
+ * raised the irq but it hasn't run yet.
+ */
+ if (list->next == &rq->ipi_list)
+ raise_softirq_irqoff(BLOCK_SOFTIRQ);
+ local_irq_restore(flags);
}
static int blk_softirq_cpu_dead(unsigned int cpu)
{
- blk_complete_reqs(&per_cpu(blk_cpu_done, cpu));
+ /*
+ * If a CPU goes away, splice its entries to the current CPU
+ * and trigger a run of the softirq
+ */
+ local_irq_disable();
+ list_splice_init(&per_cpu(blk_cpu_done, cpu),
+ this_cpu_ptr(&blk_cpu_done));
+ raise_softirq_irqoff(BLOCK_SOFTIRQ);
+ local_irq_enable();
+
return 0;
}
+
static void __blk_mq_complete_request_remote(void *data)
{
- __raise_softirq_irqoff(BLOCK_SOFTIRQ);
+ struct request *rq = data;
+
+ /*
+ * For most of single queue controllers, there is only one irq vector
+ * for handling I/O completion, and the only irq's affinity is set
+ * to all possible CPUs. On most of ARCHs, this affinity means the irq
+ * is handled on one specific CPU.
+ *
+ * So complete I/O requests in softirq context in case of single queue
+ * devices to avoid degrading I/O performance due to irqsoff latency.
+ */
+ if (rq->q->nr_hw_queues == 1)
+ blk_mq_trigger_softirq(rq);
+ else
+ rq->q->mq_ops->complete(rq);
}
static inline bool blk_mq_complete_need_ipi(struct request *rq)
@@ -602,14 +654,6 @@
if (!IS_ENABLED(CONFIG_SMP) ||
!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags))
- return false;
- /*
- * With force threaded interrupts enabled, raising softirq from an SMP
- * function call will always result in waking the ksoftirqd thread.
- * This is probably worse than completing the request on a different
- * cache domain.
- */
- if (force_irqthreads)
return false;
/* same CPU or cache domain? Complete locally */
@@ -620,32 +664,6 @@
/* don't try to IPI to an offline CPU */
return cpu_online(rq->mq_ctx->cpu);
-}
-
-static void blk_mq_complete_send_ipi(struct request *rq)
-{
- struct llist_head *list;
- unsigned int cpu;
-
- cpu = rq->mq_ctx->cpu;
- list = &per_cpu(blk_cpu_done, cpu);
- if (llist_add(&rq->ipi_list, list)) {
- rq->csd.func = __blk_mq_complete_request_remote;
- rq->csd.info = rq;
- rq->csd.flags = 0;
- smp_call_function_single_async(cpu, &rq->csd);
- }
-}
-
-static void blk_mq_raise_softirq(struct request *rq)
-{
- struct llist_head *list;
-
- preempt_disable();
- list = this_cpu_ptr(&blk_cpu_done);
- if (llist_add(&rq->ipi_list, list))
- raise_softirq(BLOCK_SOFTIRQ);
- preempt_enable();
}
bool blk_mq_complete_request_remote(struct request *rq)
@@ -660,15 +678,17 @@
return false;
if (blk_mq_complete_need_ipi(rq)) {
- blk_mq_complete_send_ipi(rq);
- return true;
+ rq->csd.func = __blk_mq_complete_request_remote;
+ rq->csd.info = rq;
+ rq->csd.flags = 0;
+ smp_call_function_single_async(rq->mq_ctx->cpu, &rq->csd);
+ } else {
+ if (rq->q->nr_hw_queues > 1)
+ return false;
+ blk_mq_trigger_softirq(rq);
}
- if (rq->q->nr_hw_queues == 1) {
- blk_mq_raise_softirq(rq);
- return true;
- }
- return false;
+ return true;
}
EXPORT_SYMBOL_GPL(blk_mq_complete_request_remote);
@@ -1577,14 +1597,14 @@
return;
if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
- int cpu = get_cpu_light();
+ int cpu = get_cpu();
if (cpumask_test_cpu(cpu, hctx->cpumask)) {
__blk_mq_run_hw_queue(hctx);
- put_cpu_light();
+ put_cpu();
return;
}
- put_cpu_light();
+ put_cpu();
}
kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work,
@@ -2228,7 +2248,7 @@
blk_mq_bio_to_request(rq, bio, nr_segs);
- ret = blk_crypto_init_request(rq);
+ ret = blk_crypto_rq_get_keyslot(rq);
if (ret != BLK_STS_OK) {
bio->bi_status = ret;
bio_endio(bio);
@@ -4019,7 +4039,7 @@
int i;
for_each_possible_cpu(i)
- init_llist_head(&per_cpu(blk_cpu_done, i));
+ INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
open_softirq(BLOCK_SOFTIRQ, blk_done_softirq);
cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD,
--
Gitblit v1.6.2