From e636c8d336489bf3eed5878299e6cc045bbad077 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Tue, 20 Feb 2024 01:17:29 +0000
Subject: [PATCH] debug lk
---
kernel/drivers/mmc/core/queue.c | 190 +++++++++++++++++++++++++----------------------
1 files changed, 100 insertions(+), 90 deletions(-)
diff --git a/kernel/drivers/mmc/core/queue.c b/kernel/drivers/mmc/core/queue.c
index 36cce8e..c4db326 100644
--- a/kernel/drivers/mmc/core/queue.c
+++ b/kernel/drivers/mmc/core/queue.c
@@ -1,11 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2003 Russell King, All Rights Reserved.
* Copyright 2006-2007 Pierre Ossman
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#include <linux/slab.h>
#include <linux/module.h>
@@ -14,6 +10,7 @@
#include <linux/kthread.h>
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
+#include <linux/backing-dev.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
@@ -21,9 +18,11 @@
#include "queue.h"
#include "block.h"
#include "core.h"
-#include "crypto.h"
#include "card.h"
+#include "crypto.h"
#include "host.h"
+
+#define MMC_DMA_MAP_MERGE_SEGMENTS 512
static inline bool mmc_cqe_dcmd_busy(struct mmc_queue *mq)
{
@@ -72,6 +71,7 @@
return MMC_ISSUE_SYNC;
}
+EXPORT_SYMBOL_GPL(mmc_issue_type);
static void __mmc_cqe_recovery_notifier(struct mmc_queue *mq)
{
@@ -90,9 +90,9 @@
struct mmc_queue *mq = q->queuedata;
unsigned long flags;
- spin_lock_irqsave(q->queue_lock, flags);
+ spin_lock_irqsave(&mq->lock, flags);
__mmc_cqe_recovery_notifier(mq);
- spin_unlock_irqrestore(q->queue_lock, flags);
+ spin_unlock_irqrestore(&mq->lock, flags);
}
static enum blk_eh_timer_return mmc_cqe_timed_out(struct request *req)
@@ -130,9 +130,9 @@
unsigned long flags;
bool ignore_tout;
- spin_lock_irqsave(q->queue_lock, flags);
+ spin_lock_irqsave(&mq->lock, flags);
ignore_tout = mq->recovery_needed || !mq->use_cqe || host->hsq_enabled;
- spin_unlock_irqrestore(q->queue_lock, flags);
+ spin_unlock_irqrestore(&mq->lock, flags);
return ignore_tout ? BLK_EH_RESET_TIMER : mmc_cqe_timed_out(req);
}
@@ -155,9 +155,9 @@
mq->in_recovery = false;
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&mq->lock);
mq->recovery_needed = false;
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&mq->lock);
if (host->hsq_enabled)
host->cqe_ops->cqe_recovery_finish(host);
@@ -197,9 +197,15 @@
blk_queue_flag_set(QUEUE_FLAG_SECERASE, q);
}
+static unsigned int mmc_get_max_segments(struct mmc_host *host)
+{
+ return host->can_dma_map_merge ? MMC_DMA_MAP_MERGE_SEGMENTS :
+ host->max_segs;
+}
+
/**
* mmc_init_request() - initialize the MMC-specific per-request data
- * @q: the request queue
+ * @mq: the request queue
* @req: the request
* @gfp: memory allocation policy
*/
@@ -210,7 +216,7 @@
struct mmc_card *card = mq->card;
struct mmc_host *host = card->host;
- mq_rq->sg = mmc_alloc_sg(host->max_segs, gfp);
+ mq_rq->sg = mmc_alloc_sg(mmc_get_max_segments(host), gfp);
if (!mq_rq->sg)
return -ENOMEM;
@@ -259,10 +265,10 @@
issue_type = mmc_issue_type(mq, req);
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&mq->lock);
if (mq->recovery_needed || mq->busy) {
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&mq->lock);
return BLK_STS_RESOURCE;
}
@@ -270,7 +276,7 @@
case MMC_ISSUE_DCMD:
if (mmc_cqe_dcmd_busy(mq)) {
mq->cqe_busy |= MMC_CQE_DCMD_BUSY;
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&mq->lock);
return BLK_STS_RESOURCE;
}
break;
@@ -280,7 +286,7 @@
* flight to avoid a long latency.
*/
if (host->hsq_enabled && mq->in_flight[issue_type] > 2) {
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&mq->lock);
return BLK_STS_RESOURCE;
}
break;
@@ -303,7 +309,7 @@
get_card = (mmc_tot_in_flight(mq) == 1);
cqe_retune_ok = (mmc_cqe_qcnt(mq) == 1);
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&mq->lock);
if (!(req->rq_flags & RQF_DONTPREP)) {
req_to_mmc_queue_req(req)->retries = 0;
@@ -337,12 +343,12 @@
if (issued != MMC_REQ_STARTED) {
bool put_card = false;
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&mq->lock);
mq->in_flight[issue_type] -= 1;
if (mmc_tot_in_flight(mq) == 0)
put_card = true;
mq->busy = false;
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&mq->lock);
if (put_card)
mmc_put_card(card, &mq->ctx);
} else {
@@ -363,21 +369,22 @@
static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
{
struct mmc_host *host = card->host;
- u64 limit = BLK_BOUNCE_HIGH;
unsigned block_size = 512;
-
- if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
- limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
blk_queue_flag_set(QUEUE_FLAG_NONROT, mq->queue);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, mq->queue);
if (mmc_can_erase(card))
mmc_queue_setup_discard(mq->queue, card);
- blk_queue_bounce_limit(mq->queue, limit);
+ if (!mmc_dev(host)->dma_mask || !*mmc_dev(host)->dma_mask)
+ blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH);
blk_queue_max_hw_sectors(mq->queue,
min(host->max_blk_count, host->max_req_size / 512));
- blk_queue_max_segments(mq->queue, host->max_segs);
+ if (host->can_dma_map_merge)
+ WARN(!blk_queue_can_use_dma_map_merging(mq->queue,
+ mmc_dev(host)),
+ "merging was advertised but not possible");
+ blk_queue_max_segments(mq->queue, mmc_get_max_segments(host));
if (mmc_card_mmc(card) && card->ext_csd.data_sector_size) {
block_size = card->ext_csd.data_sector_size;
@@ -385,8 +392,16 @@
}
blk_queue_logical_block_size(mq->queue, block_size);
- blk_queue_max_segment_size(mq->queue,
+ /*
+ * After blk_queue_can_use_dma_map_merging() was called with succeed,
+ * since it calls blk_queue_virt_boundary(), the mmc should not call
+ * both blk_queue_max_segment_size().
+ */
+ if (!host->can_dma_map_merge)
+ blk_queue_max_segment_size(mq->queue,
round_down(host->max_seg_size, block_size));
+
+ dma_set_max_seg_size(mmc_dev(host), queue_max_segment_size(mq->queue));
INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler);
INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work);
@@ -394,22 +409,67 @@
mutex_init(&mq->complete_lock);
init_waitqueue_head(&mq->wait);
+
+ mmc_crypto_setup_queue(mq->queue, host);
}
-static int mmc_mq_init_queue(struct mmc_queue *mq, int q_depth,
- const struct blk_mq_ops *mq_ops, spinlock_t *lock)
+static inline bool mmc_merge_capable(struct mmc_host *host)
{
+ return host->caps2 & MMC_CAP2_MERGE_CAPABLE;
+}
+
+/* Set queue depth to get a reasonable value for q->nr_requests */
+#ifdef CONFIG_MMC_QUEUE_DEPTH
+#define MMC_QUEUE_DEPTH CONFIG_MMC_QUEUE_DEPTH
+#else
+#define MMC_QUEUE_DEPTH 64
+#endif
+
+/**
+ * mmc_init_queue - initialise a queue structure.
+ * @mq: mmc queue
+ * @card: mmc card to attach this queue
+ *
+ * Initialise a MMC card request queue.
+ */
+int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
int ret;
+ mq->card = card;
+ mq->use_cqe = host->cqe_enabled;
+
+ spin_lock_init(&mq->lock);
+
memset(&mq->tag_set, 0, sizeof(mq->tag_set));
- mq->tag_set.ops = mq_ops;
- mq->tag_set.queue_depth = q_depth;
+ mq->tag_set.ops = &mmc_mq_ops;
+ /*
+ * The queue depth for CQE must match the hardware because the request
+ * tag is used to index the hardware queue.
+ */
+ if (mq->use_cqe && !host->hsq_enabled)
+ mq->tag_set.queue_depth =
+ min_t(int, card->ext_csd.cmdq_depth, host->cqe_qdepth);
+ else
+ mq->tag_set.queue_depth = MMC_QUEUE_DEPTH;
mq->tag_set.numa_node = NUMA_NO_NODE;
- mq->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE |
- BLK_MQ_F_BLOCKING;
+ mq->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
mq->tag_set.nr_hw_queues = 1;
mq->tag_set.cmd_size = sizeof(struct mmc_queue_req);
mq->tag_set.driver_data = mq;
+
+ /*
+ * Since blk_mq_alloc_tag_set() calls .init_request() of mmc_mq_ops,
+ * the host->can_dma_map_merge should be set before to get max_segs
+ * from mmc_get_max_segments().
+ */
+ if (mmc_merge_capable(host) &&
+ host->max_segs < MMC_DMA_MAP_MERGE_SEGMENTS &&
+ dma_get_merge_boundary(mmc_dev(host)))
+ host->can_dma_map_merge = 1;
+ else
+ host->can_dma_map_merge = 0;
ret = blk_mq_alloc_tag_set(&mq->tag_set);
if (ret)
@@ -421,68 +481,18 @@
goto free_tag_set;
}
- mq->queue->queue_lock = lock;
- mq->queue->queuedata = mq;
+ if (mmc_host_is_spi(host) && host->use_spi_crc)
+ blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, mq->queue);
+ mq->queue->queuedata = mq;
+ blk_queue_rq_timeout(mq->queue, 60 * HZ);
+
+ mmc_setup_queue(mq, card);
return 0;
free_tag_set:
blk_mq_free_tag_set(&mq->tag_set);
-
return ret;
-}
-
-/* Set queue depth to get a reasonable value for q->nr_requests */
-#define MMC_QUEUE_DEPTH 64
-
-static int mmc_mq_init(struct mmc_queue *mq, struct mmc_card *card,
- spinlock_t *lock)
-{
- struct mmc_host *host = card->host;
- int q_depth;
- int ret;
-
- /*
- * The queue depth for CQE must match the hardware because the request
- * tag is used to index the hardware queue.
- */
- if (mq->use_cqe && !host->hsq_enabled)
- q_depth = min_t(int, card->ext_csd.cmdq_depth, host->cqe_qdepth);
- else
- q_depth = MMC_QUEUE_DEPTH;
-
- ret = mmc_mq_init_queue(mq, q_depth, &mmc_mq_ops, lock);
- if (ret)
- return ret;
-
- blk_queue_rq_timeout(mq->queue, 60 * HZ);
-
- mmc_setup_queue(mq, card);
-
- mmc_crypto_setup_queue(host, mq->queue);
-
- return 0;
-}
-
-/**
- * mmc_init_queue - initialise a queue structure.
- * @mq: mmc queue
- * @card: mmc card to attach this queue
- * @lock: queue lock
- * @subname: partition subname
- *
- * Initialise a MMC card request queue.
- */
-int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
- spinlock_t *lock, const char *subname)
-{
- struct mmc_host *host = card->host;
-
- mq->card = card;
-
- mq->use_cqe = host->cqe_enabled;
-
- return mmc_mq_init(mq, card, lock);
}
void mmc_queue_suspend(struct mmc_queue *mq)
--
Gitblit v1.6.2