hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/drivers/mmc/core/queue.c
....@@ -1,11 +1,7 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Copyright (C) 2003 Russell King, All Rights Reserved.
34 * Copyright 2006-2007 Pierre Ossman
4
- *
5
- * This program is free software; you can redistribute it and/or modify
6
- * it under the terms of the GNU General Public License version 2 as
7
- * published by the Free Software Foundation.
8
- *
95 */
106 #include <linux/slab.h>
117 #include <linux/module.h>
....@@ -14,6 +10,7 @@
1410 #include <linux/kthread.h>
1511 #include <linux/scatterlist.h>
1612 #include <linux/dma-mapping.h>
13
+#include <linux/backing-dev.h>
1714
1815 #include <linux/mmc/card.h>
1916 #include <linux/mmc/host.h>
....@@ -21,9 +18,11 @@
2118 #include "queue.h"
2219 #include "block.h"
2320 #include "core.h"
24
-#include "crypto.h"
2521 #include "card.h"
22
+#include "crypto.h"
2623 #include "host.h"
24
+
25
+#define MMC_DMA_MAP_MERGE_SEGMENTS 512
2726
2827 static inline bool mmc_cqe_dcmd_busy(struct mmc_queue *mq)
2928 {
....@@ -72,6 +71,7 @@
7271
7372 return MMC_ISSUE_SYNC;
7473 }
74
+EXPORT_SYMBOL_GPL(mmc_issue_type);
7575
7676 static void __mmc_cqe_recovery_notifier(struct mmc_queue *mq)
7777 {
....@@ -90,9 +90,9 @@
9090 struct mmc_queue *mq = q->queuedata;
9191 unsigned long flags;
9292
93
- spin_lock_irqsave(q->queue_lock, flags);
93
+ spin_lock_irqsave(&mq->lock, flags);
9494 __mmc_cqe_recovery_notifier(mq);
95
- spin_unlock_irqrestore(q->queue_lock, flags);
95
+ spin_unlock_irqrestore(&mq->lock, flags);
9696 }
9797
9898 static enum blk_eh_timer_return mmc_cqe_timed_out(struct request *req)
....@@ -130,9 +130,9 @@
130130 unsigned long flags;
131131 bool ignore_tout;
132132
133
- spin_lock_irqsave(q->queue_lock, flags);
133
+ spin_lock_irqsave(&mq->lock, flags);
134134 ignore_tout = mq->recovery_needed || !mq->use_cqe || host->hsq_enabled;
135
- spin_unlock_irqrestore(q->queue_lock, flags);
135
+ spin_unlock_irqrestore(&mq->lock, flags);
136136
137137 return ignore_tout ? BLK_EH_RESET_TIMER : mmc_cqe_timed_out(req);
138138 }
....@@ -155,9 +155,9 @@
155155
156156 mq->in_recovery = false;
157157
158
- spin_lock_irq(q->queue_lock);
158
+ spin_lock_irq(&mq->lock);
159159 mq->recovery_needed = false;
160
- spin_unlock_irq(q->queue_lock);
160
+ spin_unlock_irq(&mq->lock);
161161
162162 if (host->hsq_enabled)
163163 host->cqe_ops->cqe_recovery_finish(host);
....@@ -197,9 +197,15 @@
197197 blk_queue_flag_set(QUEUE_FLAG_SECERASE, q);
198198 }
199199
200
+static unsigned int mmc_get_max_segments(struct mmc_host *host)
201
+{
202
+ return host->can_dma_map_merge ? MMC_DMA_MAP_MERGE_SEGMENTS :
203
+ host->max_segs;
204
+}
205
+
200206 /**
201207 * mmc_init_request() - initialize the MMC-specific per-request data
202
- * @q: the request queue
208
+ * @mq: the request queue
203209 * @req: the request
204210 * @gfp: memory allocation policy
205211 */
....@@ -210,7 +216,7 @@
210216 struct mmc_card *card = mq->card;
211217 struct mmc_host *host = card->host;
212218
213
- mq_rq->sg = mmc_alloc_sg(host->max_segs, gfp);
219
+ mq_rq->sg = mmc_alloc_sg(mmc_get_max_segments(host), gfp);
214220 if (!mq_rq->sg)
215221 return -ENOMEM;
216222
....@@ -259,10 +265,10 @@
259265
260266 issue_type = mmc_issue_type(mq, req);
261267
262
- spin_lock_irq(q->queue_lock);
268
+ spin_lock_irq(&mq->lock);
263269
264270 if (mq->recovery_needed || mq->busy) {
265
- spin_unlock_irq(q->queue_lock);
271
+ spin_unlock_irq(&mq->lock);
266272 return BLK_STS_RESOURCE;
267273 }
268274
....@@ -270,7 +276,7 @@
270276 case MMC_ISSUE_DCMD:
271277 if (mmc_cqe_dcmd_busy(mq)) {
272278 mq->cqe_busy |= MMC_CQE_DCMD_BUSY;
273
- spin_unlock_irq(q->queue_lock);
279
+ spin_unlock_irq(&mq->lock);
274280 return BLK_STS_RESOURCE;
275281 }
276282 break;
....@@ -280,7 +286,7 @@
280286 * flight to avoid a long latency.
281287 */
282288 if (host->hsq_enabled && mq->in_flight[issue_type] > 2) {
283
- spin_unlock_irq(q->queue_lock);
289
+ spin_unlock_irq(&mq->lock);
284290 return BLK_STS_RESOURCE;
285291 }
286292 break;
....@@ -303,7 +309,7 @@
303309 get_card = (mmc_tot_in_flight(mq) == 1);
304310 cqe_retune_ok = (mmc_cqe_qcnt(mq) == 1);
305311
306
- spin_unlock_irq(q->queue_lock);
312
+ spin_unlock_irq(&mq->lock);
307313
308314 if (!(req->rq_flags & RQF_DONTPREP)) {
309315 req_to_mmc_queue_req(req)->retries = 0;
....@@ -337,12 +343,12 @@
337343 if (issued != MMC_REQ_STARTED) {
338344 bool put_card = false;
339345
340
- spin_lock_irq(q->queue_lock);
346
+ spin_lock_irq(&mq->lock);
341347 mq->in_flight[issue_type] -= 1;
342348 if (mmc_tot_in_flight(mq) == 0)
343349 put_card = true;
344350 mq->busy = false;
345
- spin_unlock_irq(q->queue_lock);
351
+ spin_unlock_irq(&mq->lock);
346352 if (put_card)
347353 mmc_put_card(card, &mq->ctx);
348354 } else {
....@@ -363,21 +369,22 @@
363369 static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
364370 {
365371 struct mmc_host *host = card->host;
366
- u64 limit = BLK_BOUNCE_HIGH;
367372 unsigned block_size = 512;
368
-
369
- if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
370
- limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
371373
372374 blk_queue_flag_set(QUEUE_FLAG_NONROT, mq->queue);
373375 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, mq->queue);
374376 if (mmc_can_erase(card))
375377 mmc_queue_setup_discard(mq->queue, card);
376378
377
- blk_queue_bounce_limit(mq->queue, limit);
379
+ if (!mmc_dev(host)->dma_mask || !*mmc_dev(host)->dma_mask)
380
+ blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH);
378381 blk_queue_max_hw_sectors(mq->queue,
379382 min(host->max_blk_count, host->max_req_size / 512));
380
- blk_queue_max_segments(mq->queue, host->max_segs);
383
+ if (host->can_dma_map_merge)
384
+ WARN(!blk_queue_can_use_dma_map_merging(mq->queue,
385
+ mmc_dev(host)),
386
+ "merging was advertised but not possible");
387
+ blk_queue_max_segments(mq->queue, mmc_get_max_segments(host));
381388
382389 if (mmc_card_mmc(card) && card->ext_csd.data_sector_size) {
383390 block_size = card->ext_csd.data_sector_size;
....@@ -385,8 +392,16 @@
385392 }
386393
387394 blk_queue_logical_block_size(mq->queue, block_size);
388
- blk_queue_max_segment_size(mq->queue,
395
+ /*
396
+ * After blk_queue_can_use_dma_map_merging() was called with succeed,
397
+ * since it calls blk_queue_virt_boundary(), the mmc should not call
398
+ * both blk_queue_max_segment_size().
399
+ */
400
+ if (!host->can_dma_map_merge)
401
+ blk_queue_max_segment_size(mq->queue,
389402 round_down(host->max_seg_size, block_size));
403
+
404
+ dma_set_max_seg_size(mmc_dev(host), queue_max_segment_size(mq->queue));
390405
391406 INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler);
392407 INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work);
....@@ -394,22 +409,67 @@
394409 mutex_init(&mq->complete_lock);
395410
396411 init_waitqueue_head(&mq->wait);
412
+
413
+ mmc_crypto_setup_queue(mq->queue, host);
397414 }
398415
399
-static int mmc_mq_init_queue(struct mmc_queue *mq, int q_depth,
400
- const struct blk_mq_ops *mq_ops, spinlock_t *lock)
416
+static inline bool mmc_merge_capable(struct mmc_host *host)
401417 {
418
+ return host->caps2 & MMC_CAP2_MERGE_CAPABLE;
419
+}
420
+
421
+/* Set queue depth to get a reasonable value for q->nr_requests */
422
+#ifdef CONFIG_MMC_QUEUE_DEPTH
423
+#define MMC_QUEUE_DEPTH CONFIG_MMC_QUEUE_DEPTH
424
+#else
425
+#define MMC_QUEUE_DEPTH 64
426
+#endif
427
+
428
+/**
429
+ * mmc_init_queue - initialise a queue structure.
430
+ * @mq: mmc queue
431
+ * @card: mmc card to attach this queue
432
+ *
433
+ * Initialise a MMC card request queue.
434
+ */
435
+int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card)
436
+{
437
+ struct mmc_host *host = card->host;
402438 int ret;
403439
440
+ mq->card = card;
441
+ mq->use_cqe = host->cqe_enabled;
442
+
443
+ spin_lock_init(&mq->lock);
444
+
404445 memset(&mq->tag_set, 0, sizeof(mq->tag_set));
405
- mq->tag_set.ops = mq_ops;
406
- mq->tag_set.queue_depth = q_depth;
446
+ mq->tag_set.ops = &mmc_mq_ops;
447
+ /*
448
+ * The queue depth for CQE must match the hardware because the request
449
+ * tag is used to index the hardware queue.
450
+ */
451
+ if (mq->use_cqe && !host->hsq_enabled)
452
+ mq->tag_set.queue_depth =
453
+ min_t(int, card->ext_csd.cmdq_depth, host->cqe_qdepth);
454
+ else
455
+ mq->tag_set.queue_depth = MMC_QUEUE_DEPTH;
407456 mq->tag_set.numa_node = NUMA_NO_NODE;
408
- mq->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE |
409
- BLK_MQ_F_BLOCKING;
457
+ mq->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
410458 mq->tag_set.nr_hw_queues = 1;
411459 mq->tag_set.cmd_size = sizeof(struct mmc_queue_req);
412460 mq->tag_set.driver_data = mq;
461
+
462
+ /*
463
+ * Since blk_mq_alloc_tag_set() calls .init_request() of mmc_mq_ops,
464
+ * the host->can_dma_map_merge should be set before to get max_segs
465
+ * from mmc_get_max_segments().
466
+ */
467
+ if (mmc_merge_capable(host) &&
468
+ host->max_segs < MMC_DMA_MAP_MERGE_SEGMENTS &&
469
+ dma_get_merge_boundary(mmc_dev(host)))
470
+ host->can_dma_map_merge = 1;
471
+ else
472
+ host->can_dma_map_merge = 0;
413473
414474 ret = blk_mq_alloc_tag_set(&mq->tag_set);
415475 if (ret)
....@@ -421,68 +481,18 @@
421481 goto free_tag_set;
422482 }
423483
424
- mq->queue->queue_lock = lock;
425
- mq->queue->queuedata = mq;
484
+ if (mmc_host_is_spi(host) && host->use_spi_crc)
485
+ blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, mq->queue);
426486
487
+ mq->queue->queuedata = mq;
488
+ blk_queue_rq_timeout(mq->queue, 60 * HZ);
489
+
490
+ mmc_setup_queue(mq, card);
427491 return 0;
428492
429493 free_tag_set:
430494 blk_mq_free_tag_set(&mq->tag_set);
431
-
432495 return ret;
433
-}
434
-
435
-/* Set queue depth to get a reasonable value for q->nr_requests */
436
-#define MMC_QUEUE_DEPTH 64
437
-
438
-static int mmc_mq_init(struct mmc_queue *mq, struct mmc_card *card,
439
- spinlock_t *lock)
440
-{
441
- struct mmc_host *host = card->host;
442
- int q_depth;
443
- int ret;
444
-
445
- /*
446
- * The queue depth for CQE must match the hardware because the request
447
- * tag is used to index the hardware queue.
448
- */
449
- if (mq->use_cqe && !host->hsq_enabled)
450
- q_depth = min_t(int, card->ext_csd.cmdq_depth, host->cqe_qdepth);
451
- else
452
- q_depth = MMC_QUEUE_DEPTH;
453
-
454
- ret = mmc_mq_init_queue(mq, q_depth, &mmc_mq_ops, lock);
455
- if (ret)
456
- return ret;
457
-
458
- blk_queue_rq_timeout(mq->queue, 60 * HZ);
459
-
460
- mmc_setup_queue(mq, card);
461
-
462
- mmc_crypto_setup_queue(host, mq->queue);
463
-
464
- return 0;
465
-}
466
-
467
-/**
468
- * mmc_init_queue - initialise a queue structure.
469
- * @mq: mmc queue
470
- * @card: mmc card to attach this queue
471
- * @lock: queue lock
472
- * @subname: partition subname
473
- *
474
- * Initialise a MMC card request queue.
475
- */
476
-int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
477
- spinlock_t *lock, const char *subname)
478
-{
479
- struct mmc_host *host = card->host;
480
-
481
- mq->card = card;
482
-
483
- mq->use_cqe = host->cqe_enabled;
484
-
485
- return mmc_mq_init(mq, card, lock);
486496 }
487497
488498 void mmc_queue_suspend(struct mmc_queue *mq)