.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * Copyright (C) 2003 Russell King, All Rights Reserved. |
---|
3 | 4 | * Copyright 2006-2007 Pierre Ossman |
---|
4 | | - * |
---|
5 | | - * This program is free software; you can redistribute it and/or modify |
---|
6 | | - * it under the terms of the GNU General Public License version 2 as |
---|
7 | | - * published by the Free Software Foundation. |
---|
8 | | - * |
---|
9 | 5 | */ |
---|
10 | 6 | #include <linux/slab.h> |
---|
11 | 7 | #include <linux/module.h> |
---|
.. | .. |
---|
14 | 10 | #include <linux/kthread.h> |
---|
15 | 11 | #include <linux/scatterlist.h> |
---|
16 | 12 | #include <linux/dma-mapping.h> |
---|
| 13 | +#include <linux/backing-dev.h> |
---|
17 | 14 | |
---|
18 | 15 | #include <linux/mmc/card.h> |
---|
19 | 16 | #include <linux/mmc/host.h> |
---|
.. | .. |
---|
21 | 18 | #include "queue.h" |
---|
22 | 19 | #include "block.h" |
---|
23 | 20 | #include "core.h" |
---|
24 | | -#include "crypto.h" |
---|
25 | 21 | #include "card.h" |
---|
| 22 | +#include "crypto.h" |
---|
26 | 23 | #include "host.h" |
---|
| 24 | + |
---|
| 25 | +#define MMC_DMA_MAP_MERGE_SEGMENTS 512 |
---|
27 | 26 | |
---|
28 | 27 | static inline bool mmc_cqe_dcmd_busy(struct mmc_queue *mq) |
---|
29 | 28 | { |
---|
.. | .. |
---|
72 | 71 | |
---|
73 | 72 | return MMC_ISSUE_SYNC; |
---|
74 | 73 | } |
---|
| 74 | +EXPORT_SYMBOL_GPL(mmc_issue_type); |
---|
75 | 75 | |
---|
76 | 76 | static void __mmc_cqe_recovery_notifier(struct mmc_queue *mq) |
---|
77 | 77 | { |
---|
.. | .. |
---|
90 | 90 | struct mmc_queue *mq = q->queuedata; |
---|
91 | 91 | unsigned long flags; |
---|
92 | 92 | |
---|
93 | | - spin_lock_irqsave(q->queue_lock, flags); |
---|
| 93 | + spin_lock_irqsave(&mq->lock, flags); |
---|
94 | 94 | __mmc_cqe_recovery_notifier(mq); |
---|
95 | | - spin_unlock_irqrestore(q->queue_lock, flags); |
---|
| 95 | + spin_unlock_irqrestore(&mq->lock, flags); |
---|
96 | 96 | } |
---|
97 | 97 | |
---|
98 | 98 | static enum blk_eh_timer_return mmc_cqe_timed_out(struct request *req) |
---|
.. | .. |
---|
130 | 130 | unsigned long flags; |
---|
131 | 131 | bool ignore_tout; |
---|
132 | 132 | |
---|
133 | | - spin_lock_irqsave(q->queue_lock, flags); |
---|
| 133 | + spin_lock_irqsave(&mq->lock, flags); |
---|
134 | 134 | ignore_tout = mq->recovery_needed || !mq->use_cqe || host->hsq_enabled; |
---|
135 | | - spin_unlock_irqrestore(q->queue_lock, flags); |
---|
| 135 | + spin_unlock_irqrestore(&mq->lock, flags); |
---|
136 | 136 | |
---|
137 | 137 | return ignore_tout ? BLK_EH_RESET_TIMER : mmc_cqe_timed_out(req); |
---|
138 | 138 | } |
---|
.. | .. |
---|
155 | 155 | |
---|
156 | 156 | mq->in_recovery = false; |
---|
157 | 157 | |
---|
158 | | - spin_lock_irq(q->queue_lock); |
---|
| 158 | + spin_lock_irq(&mq->lock); |
---|
159 | 159 | mq->recovery_needed = false; |
---|
160 | | - spin_unlock_irq(q->queue_lock); |
---|
| 160 | + spin_unlock_irq(&mq->lock); |
---|
161 | 161 | |
---|
162 | 162 | if (host->hsq_enabled) |
---|
163 | 163 | host->cqe_ops->cqe_recovery_finish(host); |
---|
.. | .. |
---|
197 | 197 | blk_queue_flag_set(QUEUE_FLAG_SECERASE, q); |
---|
198 | 198 | } |
---|
199 | 199 | |
---|
| 200 | +static unsigned int mmc_get_max_segments(struct mmc_host *host) |
---|
| 201 | +{ |
---|
| 202 | + return host->can_dma_map_merge ? MMC_DMA_MAP_MERGE_SEGMENTS : |
---|
| 203 | + host->max_segs; |
---|
| 204 | +} |
---|
| 205 | + |
---|
200 | 206 | /** |
---|
201 | 207 | * mmc_init_request() - initialize the MMC-specific per-request data |
---|
202 | | - * @q: the request queue |
---|
| 208 | + * @mq: the request queue |
---|
203 | 209 | * @req: the request |
---|
204 | 210 | * @gfp: memory allocation policy |
---|
205 | 211 | */ |
---|
.. | .. |
---|
210 | 216 | struct mmc_card *card = mq->card; |
---|
211 | 217 | struct mmc_host *host = card->host; |
---|
212 | 218 | |
---|
213 | | - mq_rq->sg = mmc_alloc_sg(host->max_segs, gfp); |
---|
| 219 | + mq_rq->sg = mmc_alloc_sg(mmc_get_max_segments(host), gfp); |
---|
214 | 220 | if (!mq_rq->sg) |
---|
215 | 221 | return -ENOMEM; |
---|
216 | 222 | |
---|
.. | .. |
---|
259 | 265 | |
---|
260 | 266 | issue_type = mmc_issue_type(mq, req); |
---|
261 | 267 | |
---|
262 | | - spin_lock_irq(q->queue_lock); |
---|
| 268 | + spin_lock_irq(&mq->lock); |
---|
263 | 269 | |
---|
264 | 270 | if (mq->recovery_needed || mq->busy) { |
---|
265 | | - spin_unlock_irq(q->queue_lock); |
---|
| 271 | + spin_unlock_irq(&mq->lock); |
---|
266 | 272 | return BLK_STS_RESOURCE; |
---|
267 | 273 | } |
---|
268 | 274 | |
---|
.. | .. |
---|
270 | 276 | case MMC_ISSUE_DCMD: |
---|
271 | 277 | if (mmc_cqe_dcmd_busy(mq)) { |
---|
272 | 278 | mq->cqe_busy |= MMC_CQE_DCMD_BUSY; |
---|
273 | | - spin_unlock_irq(q->queue_lock); |
---|
| 279 | + spin_unlock_irq(&mq->lock); |
---|
274 | 280 | return BLK_STS_RESOURCE; |
---|
275 | 281 | } |
---|
276 | 282 | break; |
---|
.. | .. |
---|
280 | 286 | * flight to avoid a long latency. |
---|
281 | 287 | */ |
---|
282 | 288 | if (host->hsq_enabled && mq->in_flight[issue_type] > 2) { |
---|
283 | | - spin_unlock_irq(q->queue_lock); |
---|
| 289 | + spin_unlock_irq(&mq->lock); |
---|
284 | 290 | return BLK_STS_RESOURCE; |
---|
285 | 291 | } |
---|
286 | 292 | break; |
---|
.. | .. |
---|
303 | 309 | get_card = (mmc_tot_in_flight(mq) == 1); |
---|
304 | 310 | cqe_retune_ok = (mmc_cqe_qcnt(mq) == 1); |
---|
305 | 311 | |
---|
306 | | - spin_unlock_irq(q->queue_lock); |
---|
| 312 | + spin_unlock_irq(&mq->lock); |
---|
307 | 313 | |
---|
308 | 314 | if (!(req->rq_flags & RQF_DONTPREP)) { |
---|
309 | 315 | req_to_mmc_queue_req(req)->retries = 0; |
---|
.. | .. |
---|
337 | 343 | if (issued != MMC_REQ_STARTED) { |
---|
338 | 344 | bool put_card = false; |
---|
339 | 345 | |
---|
340 | | - spin_lock_irq(q->queue_lock); |
---|
| 346 | + spin_lock_irq(&mq->lock); |
---|
341 | 347 | mq->in_flight[issue_type] -= 1; |
---|
342 | 348 | if (mmc_tot_in_flight(mq) == 0) |
---|
343 | 349 | put_card = true; |
---|
344 | 350 | mq->busy = false; |
---|
345 | | - spin_unlock_irq(q->queue_lock); |
---|
| 351 | + spin_unlock_irq(&mq->lock); |
---|
346 | 352 | if (put_card) |
---|
347 | 353 | mmc_put_card(card, &mq->ctx); |
---|
348 | 354 | } else { |
---|
.. | .. |
---|
363 | 369 | static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card) |
---|
364 | 370 | { |
---|
365 | 371 | struct mmc_host *host = card->host; |
---|
366 | | - u64 limit = BLK_BOUNCE_HIGH; |
---|
367 | 372 | unsigned block_size = 512; |
---|
368 | | - |
---|
369 | | - if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) |
---|
370 | | - limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; |
---|
371 | 373 | |
---|
372 | 374 | blk_queue_flag_set(QUEUE_FLAG_NONROT, mq->queue); |
---|
373 | 375 | blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, mq->queue); |
---|
374 | 376 | if (mmc_can_erase(card)) |
---|
375 | 377 | mmc_queue_setup_discard(mq->queue, card); |
---|
376 | 378 | |
---|
377 | | - blk_queue_bounce_limit(mq->queue, limit); |
---|
| 379 | + if (!mmc_dev(host)->dma_mask || !*mmc_dev(host)->dma_mask) |
---|
| 380 | + blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH); |
---|
378 | 381 | blk_queue_max_hw_sectors(mq->queue, |
---|
379 | 382 | min(host->max_blk_count, host->max_req_size / 512)); |
---|
380 | | - blk_queue_max_segments(mq->queue, host->max_segs); |
---|
| 383 | + if (host->can_dma_map_merge) |
---|
| 384 | + WARN(!blk_queue_can_use_dma_map_merging(mq->queue, |
---|
| 385 | + mmc_dev(host)), |
---|
| 386 | + "merging was advertised but not possible"); |
---|
| 387 | + blk_queue_max_segments(mq->queue, mmc_get_max_segments(host)); |
---|
381 | 388 | |
---|
382 | 389 | if (mmc_card_mmc(card) && card->ext_csd.data_sector_size) { |
---|
383 | 390 | block_size = card->ext_csd.data_sector_size; |
---|
.. | .. |
---|
385 | 392 | } |
---|
386 | 393 | |
---|
387 | 394 | blk_queue_logical_block_size(mq->queue, block_size); |
---|
388 | | - blk_queue_max_segment_size(mq->queue, |
---|
| 395 | + /* |
---|
| 396 | + * After blk_queue_can_use_dma_map_merging() was called with succeed, |
---|
| 397 | + * since it calls blk_queue_virt_boundary(), the mmc should not call |
---|
| 398 | + * both blk_queue_max_segment_size(). |
---|
| 399 | + */ |
---|
| 400 | + if (!host->can_dma_map_merge) |
---|
| 401 | + blk_queue_max_segment_size(mq->queue, |
---|
389 | 402 | round_down(host->max_seg_size, block_size)); |
---|
| 403 | + |
---|
| 404 | + dma_set_max_seg_size(mmc_dev(host), queue_max_segment_size(mq->queue)); |
---|
390 | 405 | |
---|
391 | 406 | INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler); |
---|
392 | 407 | INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work); |
---|
.. | .. |
---|
394 | 409 | mutex_init(&mq->complete_lock); |
---|
395 | 410 | |
---|
396 | 411 | init_waitqueue_head(&mq->wait); |
---|
| 412 | + |
---|
| 413 | + mmc_crypto_setup_queue(mq->queue, host); |
---|
397 | 414 | } |
---|
398 | 415 | |
---|
399 | | -static int mmc_mq_init_queue(struct mmc_queue *mq, int q_depth, |
---|
400 | | - const struct blk_mq_ops *mq_ops, spinlock_t *lock) |
---|
| 416 | +static inline bool mmc_merge_capable(struct mmc_host *host) |
---|
401 | 417 | { |
---|
| 418 | + return host->caps2 & MMC_CAP2_MERGE_CAPABLE; |
---|
| 419 | +} |
---|
| 420 | + |
---|
| 421 | +/* Set queue depth to get a reasonable value for q->nr_requests */ |
---|
| 422 | +#ifdef CONFIG_MMC_QUEUE_DEPTH |
---|
| 423 | +#define MMC_QUEUE_DEPTH CONFIG_MMC_QUEUE_DEPTH |
---|
| 424 | +#else |
---|
| 425 | +#define MMC_QUEUE_DEPTH 64 |
---|
| 426 | +#endif |
---|
| 427 | + |
---|
| 428 | +/** |
---|
| 429 | + * mmc_init_queue - initialise a queue structure. |
---|
| 430 | + * @mq: mmc queue |
---|
| 431 | + * @card: mmc card to attach this queue |
---|
| 432 | + * |
---|
| 433 | + * Initialise a MMC card request queue. |
---|
| 434 | + */ |
---|
| 435 | +int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card) |
---|
| 436 | +{ |
---|
| 437 | + struct mmc_host *host = card->host; |
---|
402 | 438 | int ret; |
---|
403 | 439 | |
---|
| 440 | + mq->card = card; |
---|
| 441 | + mq->use_cqe = host->cqe_enabled; |
---|
| 442 | + |
---|
| 443 | + spin_lock_init(&mq->lock); |
---|
| 444 | + |
---|
404 | 445 | memset(&mq->tag_set, 0, sizeof(mq->tag_set)); |
---|
405 | | - mq->tag_set.ops = mq_ops; |
---|
406 | | - mq->tag_set.queue_depth = q_depth; |
---|
| 446 | + mq->tag_set.ops = &mmc_mq_ops; |
---|
| 447 | + /* |
---|
| 448 | + * The queue depth for CQE must match the hardware because the request |
---|
| 449 | + * tag is used to index the hardware queue. |
---|
| 450 | + */ |
---|
| 451 | + if (mq->use_cqe && !host->hsq_enabled) |
---|
| 452 | + mq->tag_set.queue_depth = |
---|
| 453 | + min_t(int, card->ext_csd.cmdq_depth, host->cqe_qdepth); |
---|
| 454 | + else |
---|
| 455 | + mq->tag_set.queue_depth = MMC_QUEUE_DEPTH; |
---|
407 | 456 | mq->tag_set.numa_node = NUMA_NO_NODE; |
---|
408 | | - mq->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE | |
---|
409 | | - BLK_MQ_F_BLOCKING; |
---|
| 457 | + mq->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING; |
---|
410 | 458 | mq->tag_set.nr_hw_queues = 1; |
---|
411 | 459 | mq->tag_set.cmd_size = sizeof(struct mmc_queue_req); |
---|
412 | 460 | mq->tag_set.driver_data = mq; |
---|
| 461 | + |
---|
| 462 | + /* |
---|
| 463 | + * Since blk_mq_alloc_tag_set() calls .init_request() of mmc_mq_ops, |
---|
| 464 | + * the host->can_dma_map_merge should be set before to get max_segs |
---|
| 465 | + * from mmc_get_max_segments(). |
---|
| 466 | + */ |
---|
| 467 | + if (mmc_merge_capable(host) && |
---|
| 468 | + host->max_segs < MMC_DMA_MAP_MERGE_SEGMENTS && |
---|
| 469 | + dma_get_merge_boundary(mmc_dev(host))) |
---|
| 470 | + host->can_dma_map_merge = 1; |
---|
| 471 | + else |
---|
| 472 | + host->can_dma_map_merge = 0; |
---|
413 | 473 | |
---|
414 | 474 | ret = blk_mq_alloc_tag_set(&mq->tag_set); |
---|
415 | 475 | if (ret) |
---|
.. | .. |
---|
421 | 481 | goto free_tag_set; |
---|
422 | 482 | } |
---|
423 | 483 | |
---|
424 | | - mq->queue->queue_lock = lock; |
---|
425 | | - mq->queue->queuedata = mq; |
---|
| 484 | + if (mmc_host_is_spi(host) && host->use_spi_crc) |
---|
| 485 | + blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, mq->queue); |
---|
426 | 486 | |
---|
| 487 | + mq->queue->queuedata = mq; |
---|
| 488 | + blk_queue_rq_timeout(mq->queue, 60 * HZ); |
---|
| 489 | + |
---|
| 490 | + mmc_setup_queue(mq, card); |
---|
427 | 491 | return 0; |
---|
428 | 492 | |
---|
429 | 493 | free_tag_set: |
---|
430 | 494 | blk_mq_free_tag_set(&mq->tag_set); |
---|
431 | | - |
---|
432 | 495 | return ret; |
---|
433 | | -} |
---|
434 | | - |
---|
435 | | -/* Set queue depth to get a reasonable value for q->nr_requests */ |
---|
436 | | -#define MMC_QUEUE_DEPTH 64 |
---|
437 | | - |
---|
438 | | -static int mmc_mq_init(struct mmc_queue *mq, struct mmc_card *card, |
---|
439 | | - spinlock_t *lock) |
---|
440 | | -{ |
---|
441 | | - struct mmc_host *host = card->host; |
---|
442 | | - int q_depth; |
---|
443 | | - int ret; |
---|
444 | | - |
---|
445 | | - /* |
---|
446 | | - * The queue depth for CQE must match the hardware because the request |
---|
447 | | - * tag is used to index the hardware queue. |
---|
448 | | - */ |
---|
449 | | - if (mq->use_cqe && !host->hsq_enabled) |
---|
450 | | - q_depth = min_t(int, card->ext_csd.cmdq_depth, host->cqe_qdepth); |
---|
451 | | - else |
---|
452 | | - q_depth = MMC_QUEUE_DEPTH; |
---|
453 | | - |
---|
454 | | - ret = mmc_mq_init_queue(mq, q_depth, &mmc_mq_ops, lock); |
---|
455 | | - if (ret) |
---|
456 | | - return ret; |
---|
457 | | - |
---|
458 | | - blk_queue_rq_timeout(mq->queue, 60 * HZ); |
---|
459 | | - |
---|
460 | | - mmc_setup_queue(mq, card); |
---|
461 | | - |
---|
462 | | - mmc_crypto_setup_queue(host, mq->queue); |
---|
463 | | - |
---|
464 | | - return 0; |
---|
465 | | -} |
---|
466 | | - |
---|
467 | | -/** |
---|
468 | | - * mmc_init_queue - initialise a queue structure. |
---|
469 | | - * @mq: mmc queue |
---|
470 | | - * @card: mmc card to attach this queue |
---|
471 | | - * @lock: queue lock |
---|
472 | | - * @subname: partition subname |
---|
473 | | - * |
---|
474 | | - * Initialise a MMC card request queue. |
---|
475 | | - */ |
---|
476 | | -int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, |
---|
477 | | - spinlock_t *lock, const char *subname) |
---|
478 | | -{ |
---|
479 | | - struct mmc_host *host = card->host; |
---|
480 | | - |
---|
481 | | - mq->card = card; |
---|
482 | | - |
---|
483 | | - mq->use_cqe = host->cqe_enabled; |
---|
484 | | - |
---|
485 | | - return mmc_mq_init(mq, card, lock); |
---|
486 | 496 | } |
---|
487 | 497 | |
---|
488 | 498 | void mmc_queue_suspend(struct mmc_queue *mq) |
---|