.. | .. |
---|
47 | 47 | |
---|
48 | 48 | #include <linux/uaccess.h> |
---|
49 | 49 | |
---|
| 50 | +#include <trace/hooks/mmc_core.h> |
---|
| 51 | + |
---|
50 | 52 | #include "queue.h" |
---|
51 | 53 | #include "block.h" |
---|
52 | 54 | #include "core.h" |
---|
.. | .. |
---|
71 | 73 | * ample. |
---|
72 | 74 | */ |
---|
73 | 75 | #define MMC_BLK_TIMEOUT_MS (10 * 1000) |
---|
74 | | -#define MMC_SANITIZE_REQ_TIMEOUT 240000 |
---|
75 | 76 | #define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16) |
---|
76 | 77 | #define MMC_EXTRACT_VALUE_FROM_ARG(x) ((x & 0x0000FF00) >> 8) |
---|
77 | 78 | |
---|
.. | .. |
---|
101 | 102 | * There is one mmc_blk_data per slot. |
---|
102 | 103 | */ |
---|
103 | 104 | struct mmc_blk_data { |
---|
104 | | - spinlock_t lock; |
---|
105 | 105 | struct device *parent; |
---|
106 | 106 | struct gendisk *disk; |
---|
107 | 107 | struct mmc_queue queue; |
---|
.. | .. |
---|
172 | 172 | unsigned int part_type); |
---|
173 | 173 | static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, |
---|
174 | 174 | struct mmc_card *card, |
---|
175 | | - int disable_multi, |
---|
| 175 | + int recovery_mode, |
---|
176 | 176 | struct mmc_queue *mq); |
---|
177 | 177 | static void mmc_blk_hsq_req_done(struct mmc_request *mrq); |
---|
178 | 178 | |
---|
.. | .. |
---|
256 | 256 | goto out_put; |
---|
257 | 257 | } |
---|
258 | 258 | req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_BOOT_WP; |
---|
| 259 | + req_to_mmc_queue_req(req)->drv_op_result = -EIO; |
---|
259 | 260 | blk_execute_rq(mq->queue, NULL, req, 0); |
---|
260 | 261 | ret = req_to_mmc_queue_req(req)->drv_op_result; |
---|
261 | 262 | blk_put_request(req); |
---|
.. | .. |
---|
315 | 316 | |
---|
316 | 317 | mutex_lock(&block_mutex); |
---|
317 | 318 | if (md) { |
---|
318 | | - if (md->usage == 2) |
---|
319 | | - check_disk_change(bdev); |
---|
320 | 319 | ret = 0; |
---|
321 | | - |
---|
322 | 320 | if ((mode & FMODE_WRITE) && md->read_only) { |
---|
323 | 321 | mmc_blk_put(md); |
---|
324 | 322 | ret = -EROFS; |
---|
.. | .. |
---|
415 | 413 | return 0; |
---|
416 | 414 | } |
---|
417 | 415 | |
---|
418 | | -static int ioctl_do_sanitize(struct mmc_card *card) |
---|
419 | | -{ |
---|
420 | | - int err; |
---|
421 | | - |
---|
422 | | - if (!mmc_can_sanitize(card)) { |
---|
423 | | - pr_warn("%s: %s - SANITIZE is not supported\n", |
---|
424 | | - mmc_hostname(card->host), __func__); |
---|
425 | | - err = -EOPNOTSUPP; |
---|
426 | | - goto out; |
---|
427 | | - } |
---|
428 | | - |
---|
429 | | - pr_debug("%s: %s - SANITIZE IN PROGRESS...\n", |
---|
430 | | - mmc_hostname(card->host), __func__); |
---|
431 | | - |
---|
432 | | - err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, |
---|
433 | | - EXT_CSD_SANITIZE_START, 1, |
---|
434 | | - MMC_SANITIZE_REQ_TIMEOUT); |
---|
435 | | - |
---|
436 | | - if (err) |
---|
437 | | - pr_err("%s: %s - EXT_CSD_SANITIZE_START failed. err=%d\n", |
---|
438 | | - mmc_hostname(card->host), __func__, err); |
---|
439 | | - |
---|
440 | | - pr_debug("%s: %s - SANITIZE COMPLETED\n", mmc_hostname(card->host), |
---|
441 | | - __func__); |
---|
442 | | -out: |
---|
443 | | - return err; |
---|
444 | | -} |
---|
445 | | - |
---|
446 | | -static inline bool mmc_blk_in_tran_state(u32 status) |
---|
447 | | -{ |
---|
448 | | - /* |
---|
449 | | - * Some cards mishandle the status bits, so make sure to check both the |
---|
450 | | - * busy indication and the card state. |
---|
451 | | - */ |
---|
452 | | - return status & R1_READY_FOR_DATA && |
---|
453 | | - (R1_CURRENT_STATE(status) == R1_STATE_TRAN); |
---|
454 | | -} |
---|
455 | | - |
---|
456 | 416 | static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms, |
---|
457 | 417 | u32 *resp_errs) |
---|
458 | 418 | { |
---|
459 | | - unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms); |
---|
| 419 | + unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms > 2000 ? 2000 : timeout_ms); |
---|
460 | 420 | int err = 0; |
---|
461 | 421 | u32 status; |
---|
462 | 422 | |
---|
463 | 423 | do { |
---|
464 | 424 | bool done = time_after(jiffies, timeout); |
---|
465 | 425 | |
---|
466 | | - if (card->host->ops->card_busy) { |
---|
| 426 | + if (!(card->host->caps2 & MMC_CAP2_NO_SD) && card->host->ops->card_busy) { |
---|
467 | 427 | status = card->host->ops->card_busy(card->host) ? |
---|
468 | 428 | 0 : R1_READY_FOR_DATA | R1_STATE_TRAN << 9; |
---|
469 | | - usleep_range(100, 150); |
---|
| 429 | + |
---|
| 430 | + if (!status) |
---|
| 431 | + usleep_range(100, 150); |
---|
470 | 432 | } else { |
---|
471 | 433 | err = __mmc_send_status(card, &status, 5); |
---|
472 | 434 | if (err) { |
---|
.. | .. |
---|
490 | 452 | __func__, status); |
---|
491 | 453 | return -ETIMEDOUT; |
---|
492 | 454 | } |
---|
493 | | - |
---|
494 | | - /* |
---|
495 | | - * Some cards mishandle the status bits, |
---|
496 | | - * so make sure to check both the busy |
---|
497 | | - * indication and the card state. |
---|
498 | | - */ |
---|
499 | | - } while (!mmc_blk_in_tran_state(status)); |
---|
| 455 | + } while (!mmc_ready_for_data(status)); |
---|
500 | 456 | |
---|
501 | 457 | return err; |
---|
502 | 458 | } |
---|
.. | .. |
---|
593 | 549 | } |
---|
594 | 550 | |
---|
595 | 551 | if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) && |
---|
596 | | - (cmd.opcode == MMC_SWITCH)) { |
---|
597 | | - err = ioctl_do_sanitize(card); |
---|
598 | | - |
---|
599 | | - if (err) |
---|
600 | | - pr_err("%s: ioctl_do_sanitize() failed. err = %d", |
---|
601 | | - __func__, err); |
---|
602 | | - |
---|
603 | | - return err; |
---|
604 | | - } |
---|
| 552 | + (cmd.opcode == MMC_SWITCH)) |
---|
| 553 | + return mmc_sanitize(card); |
---|
605 | 554 | |
---|
606 | 555 | mmc_wait_for_req(card->host, &mrq); |
---|
607 | 556 | memcpy(&idata->ic.response, cmd.resp, sizeof(cmd.resp)); |
---|
.. | .. |
---|
701 | 650 | idatas[0] = idata; |
---|
702 | 651 | req_to_mmc_queue_req(req)->drv_op = |
---|
703 | 652 | rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL; |
---|
| 653 | + req_to_mmc_queue_req(req)->drv_op_result = -EIO; |
---|
704 | 654 | req_to_mmc_queue_req(req)->drv_op_data = idatas; |
---|
705 | 655 | req_to_mmc_queue_req(req)->ioc_count = 1; |
---|
706 | 656 | blk_execute_rq(mq->queue, NULL, req, 0); |
---|
.. | .. |
---|
770 | 720 | } |
---|
771 | 721 | req_to_mmc_queue_req(req)->drv_op = |
---|
772 | 722 | rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL; |
---|
| 723 | + req_to_mmc_queue_req(req)->drv_op_result = -EIO; |
---|
773 | 724 | req_to_mmc_queue_req(req)->drv_op_data = idata; |
---|
774 | 725 | req_to_mmc_queue_req(req)->ioc_count = num_of_cmds; |
---|
775 | 726 | blk_execute_rq(mq->queue, NULL, req, 0); |
---|
.. | .. |
---|
797 | 748 | * whole block device, not on a partition. This prevents overspray |
---|
798 | 749 | * between sibling partitions. |
---|
799 | 750 | */ |
---|
800 | | - if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains)) |
---|
| 751 | + if (!capable(CAP_SYS_RAWIO) || bdev_is_partition(bdev)) |
---|
801 | 752 | return -EPERM; |
---|
802 | 753 | return 0; |
---|
803 | 754 | } |
---|
.. | .. |
---|
1024 | 975 | struct mmc_blk_data *main_md = |
---|
1025 | 976 | dev_get_drvdata(&host->card->dev); |
---|
1026 | 977 | int part_err; |
---|
| 978 | + bool allow = true; |
---|
| 979 | + |
---|
| 980 | + trace_android_vh_mmc_blk_reset(host, err, &allow); |
---|
| 981 | + if (!allow) |
---|
| 982 | + return -ENODEV; |
---|
1027 | 983 | |
---|
1028 | 984 | main_md->part_curr = main_md->part_type; |
---|
1029 | 985 | part_err = mmc_blk_part_switch(host->card, md->part_type); |
---|
.. | .. |
---|
1070 | 1026 | if (ret) |
---|
1071 | 1027 | break; |
---|
1072 | 1028 | } |
---|
1073 | | - /* fallthrough */ |
---|
| 1029 | + fallthrough; |
---|
1074 | 1030 | case MMC_DRV_OP_IOCTL_RPMB: |
---|
1075 | 1031 | idata = mq_rq->drv_op_data; |
---|
1076 | 1032 | for (i = 0, ret = 0; i < mq_rq->ioc_count; i++) { |
---|
.. | .. |
---|
1119 | 1075 | { |
---|
1120 | 1076 | struct mmc_blk_data *md = mq->blkdata; |
---|
1121 | 1077 | struct mmc_card *card = md->queue.card; |
---|
1122 | | - unsigned int from, nr, arg; |
---|
| 1078 | + unsigned int from, nr; |
---|
1123 | 1079 | int err = 0, type = MMC_BLK_DISCARD; |
---|
1124 | 1080 | blk_status_t status = BLK_STS_OK; |
---|
1125 | 1081 | |
---|
.. | .. |
---|
1131 | 1087 | from = blk_rq_pos(req); |
---|
1132 | 1088 | nr = blk_rq_sectors(req); |
---|
1133 | 1089 | |
---|
1134 | | - if (mmc_can_discard(card)) |
---|
1135 | | - arg = MMC_DISCARD_ARG; |
---|
1136 | | - else if (mmc_can_trim(card)) |
---|
1137 | | - arg = MMC_TRIM_ARG; |
---|
1138 | | - else |
---|
1139 | | - arg = MMC_ERASE_ARG; |
---|
1140 | 1090 | do { |
---|
| 1091 | + unsigned int erase_arg = card->erase_arg; |
---|
| 1092 | + |
---|
| 1093 | + if (mmc_card_broken_sd_discard(card)) |
---|
| 1094 | + erase_arg = SD_ERASE_ARG; |
---|
| 1095 | + |
---|
1141 | 1096 | err = 0; |
---|
1142 | 1097 | if (card->quirks & MMC_QUIRK_INAND_CMD38) { |
---|
1143 | 1098 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, |
---|
1144 | 1099 | INAND_CMD38_ARG_EXT_CSD, |
---|
1145 | | - arg == MMC_TRIM_ARG ? |
---|
| 1100 | + card->erase_arg == MMC_TRIM_ARG ? |
---|
1146 | 1101 | INAND_CMD38_ARG_TRIM : |
---|
1147 | 1102 | INAND_CMD38_ARG_ERASE, |
---|
1148 | | - 0); |
---|
| 1103 | + card->ext_csd.generic_cmd6_time); |
---|
1149 | 1104 | } |
---|
1150 | 1105 | if (!err) |
---|
1151 | | - err = mmc_erase(card, from, nr, arg); |
---|
| 1106 | + err = mmc_erase(card, from, nr, erase_arg); |
---|
1152 | 1107 | } while (err == -EIO && !mmc_blk_reset(md, card->host, type)); |
---|
1153 | 1108 | if (err) |
---|
1154 | 1109 | status = BLK_STS_IOERR; |
---|
.. | .. |
---|
1187 | 1142 | arg == MMC_SECURE_TRIM1_ARG ? |
---|
1188 | 1143 | INAND_CMD38_ARG_SECTRIM1 : |
---|
1189 | 1144 | INAND_CMD38_ARG_SECERASE, |
---|
1190 | | - 0); |
---|
| 1145 | + card->ext_csd.generic_cmd6_time); |
---|
1191 | 1146 | if (err) |
---|
1192 | 1147 | goto out_retry; |
---|
1193 | 1148 | } |
---|
.. | .. |
---|
1205 | 1160 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, |
---|
1206 | 1161 | INAND_CMD38_ARG_EXT_CSD, |
---|
1207 | 1162 | INAND_CMD38_ARG_SECTRIM2, |
---|
1208 | | - 0); |
---|
| 1163 | + card->ext_csd.generic_cmd6_time); |
---|
1209 | 1164 | if (err) |
---|
1210 | 1165 | goto out_retry; |
---|
1211 | 1166 | } |
---|
.. | .. |
---|
1316 | 1271 | } |
---|
1317 | 1272 | |
---|
1318 | 1273 | static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq, |
---|
1319 | | - int disable_multi, bool *do_rel_wr_p, |
---|
| 1274 | + int recovery_mode, bool *do_rel_wr_p, |
---|
1320 | 1275 | bool *do_data_tag_p) |
---|
1321 | 1276 | { |
---|
1322 | 1277 | struct mmc_blk_data *md = mq->blkdata; |
---|
.. | .. |
---|
1382 | 1337 | brq->data.blocks--; |
---|
1383 | 1338 | |
---|
1384 | 1339 | /* |
---|
1385 | | - * After a read error, we redo the request one sector |
---|
| 1340 | + * After a read error, we redo the request one (native) sector |
---|
1386 | 1341 | * at a time in order to accurately determine which |
---|
1387 | 1342 | * sectors can be read successfully. |
---|
1388 | 1343 | */ |
---|
1389 | | - if (disable_multi) |
---|
1390 | | - brq->data.blocks = 1; |
---|
| 1344 | + if (recovery_mode) |
---|
| 1345 | + brq->data.blocks = queue_physical_block_size(mq->queue) >> 9; |
---|
1391 | 1346 | |
---|
1392 | 1347 | /* |
---|
1393 | 1348 | * Some controllers have HW issues while operating |
---|
.. | .. |
---|
1485 | 1440 | blk_mq_end_request(req, BLK_STS_OK); |
---|
1486 | 1441 | } |
---|
1487 | 1442 | |
---|
1488 | | - spin_lock_irqsave(q->queue_lock, flags); |
---|
| 1443 | + spin_lock_irqsave(&mq->lock, flags); |
---|
1489 | 1444 | |
---|
1490 | 1445 | mq->in_flight[issue_type] -= 1; |
---|
1491 | 1446 | |
---|
.. | .. |
---|
1493 | 1448 | |
---|
1494 | 1449 | mmc_cqe_check_busy(mq); |
---|
1495 | 1450 | |
---|
1496 | | - spin_unlock_irqrestore(q->queue_lock, flags); |
---|
| 1451 | + spin_unlock_irqrestore(&mq->lock, flags); |
---|
1497 | 1452 | |
---|
1498 | 1453 | if (!mq->cqe_busy) |
---|
1499 | 1454 | blk_mq_run_hw_queues(q, true); |
---|
.. | .. |
---|
1513 | 1468 | err = mmc_cqe_recovery(host); |
---|
1514 | 1469 | if (err) |
---|
1515 | 1470 | mmc_blk_reset(mq->blkdata, host, MMC_BLK_CQE_RECOVERY); |
---|
1516 | | - else |
---|
1517 | | - mmc_blk_reset_success(mq->blkdata, MMC_BLK_CQE_RECOVERY); |
---|
| 1471 | + mmc_blk_reset_success(mq->blkdata, MMC_BLK_CQE_RECOVERY); |
---|
1518 | 1472 | |
---|
1519 | 1473 | pr_debug("%s: CQE recovery done\n", mmc_hostname(host)); |
---|
1520 | 1474 | } |
---|
.. | .. |
---|
1533 | 1487 | */ |
---|
1534 | 1488 | if (mq->in_recovery) |
---|
1535 | 1489 | mmc_blk_cqe_complete_rq(mq, req); |
---|
1536 | | - else |
---|
| 1490 | + else if (likely(!blk_should_fake_timeout(req->q))) |
---|
1537 | 1491 | blk_mq_complete_request(req); |
---|
1538 | 1492 | } |
---|
1539 | 1493 | |
---|
.. | .. |
---|
1605 | 1559 | |
---|
1606 | 1560 | static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, |
---|
1607 | 1561 | struct mmc_card *card, |
---|
1608 | | - int disable_multi, |
---|
| 1562 | + int recovery_mode, |
---|
1609 | 1563 | struct mmc_queue *mq) |
---|
1610 | 1564 | { |
---|
1611 | 1565 | u32 readcmd, writecmd; |
---|
.. | .. |
---|
1614 | 1568 | struct mmc_blk_data *md = mq->blkdata; |
---|
1615 | 1569 | bool do_rel_wr, do_data_tag; |
---|
1616 | 1570 | |
---|
1617 | | - mmc_blk_data_prep(mq, mqrq, disable_multi, &do_rel_wr, &do_data_tag); |
---|
| 1571 | + mmc_blk_data_prep(mq, mqrq, recovery_mode, &do_rel_wr, &do_data_tag); |
---|
1618 | 1572 | |
---|
1619 | 1573 | brq->mrq.cmd = &brq->cmd; |
---|
1620 | 1574 | |
---|
.. | .. |
---|
1705 | 1659 | |
---|
1706 | 1660 | #define MMC_READ_SINGLE_RETRIES 2 |
---|
1707 | 1661 | |
---|
1708 | | -/* Single sector read during recovery */ |
---|
| 1662 | +/* Single (native) sector read during recovery */ |
---|
1709 | 1663 | static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req) |
---|
1710 | 1664 | { |
---|
1711 | 1665 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); |
---|
.. | .. |
---|
1713 | 1667 | struct mmc_card *card = mq->card; |
---|
1714 | 1668 | struct mmc_host *host = card->host; |
---|
1715 | 1669 | blk_status_t error = BLK_STS_OK; |
---|
| 1670 | + size_t bytes_per_read = queue_physical_block_size(mq->queue); |
---|
1716 | 1671 | |
---|
1717 | 1672 | do { |
---|
1718 | 1673 | u32 status; |
---|
.. | .. |
---|
1729 | 1684 | goto error_exit; |
---|
1730 | 1685 | |
---|
1731 | 1686 | if (!mmc_host_is_spi(host) && |
---|
1732 | | - !mmc_blk_in_tran_state(status)) { |
---|
| 1687 | + !mmc_ready_for_data(status)) { |
---|
1733 | 1688 | err = mmc_blk_fix_state(card, req); |
---|
1734 | 1689 | if (err) |
---|
1735 | 1690 | goto error_exit; |
---|
.. | .. |
---|
1747 | 1702 | else |
---|
1748 | 1703 | error = BLK_STS_OK; |
---|
1749 | 1704 | |
---|
1750 | | - } while (blk_update_request(req, error, 512)); |
---|
| 1705 | + } while (blk_update_request(req, error, bytes_per_read)); |
---|
1751 | 1706 | |
---|
1752 | 1707 | return; |
---|
1753 | 1708 | |
---|
1754 | 1709 | error_exit: |
---|
1755 | 1710 | mrq->data->bytes_xfered = 0; |
---|
1756 | | - blk_update_request(req, BLK_STS_IOERR, 512); |
---|
| 1711 | + blk_update_request(req, BLK_STS_IOERR, bytes_per_read); |
---|
1757 | 1712 | /* Let it try the remaining request again */ |
---|
1758 | 1713 | if (mqrq->retries > MMC_MAX_RETRIES - 1) |
---|
1759 | 1714 | mqrq->retries = MMC_MAX_RETRIES - 1; |
---|
.. | .. |
---|
1788 | 1743 | return brq->cmd.resp[0] & CMD_ERRORS || |
---|
1789 | 1744 | brq->stop.resp[0] & stop_err_bits || |
---|
1790 | 1745 | status & stop_err_bits || |
---|
1791 | | - (rq_data_dir(req) == WRITE && !mmc_blk_in_tran_state(status)); |
---|
| 1746 | + (rq_data_dir(req) == WRITE && !mmc_ready_for_data(status)); |
---|
1792 | 1747 | } |
---|
1793 | 1748 | |
---|
1794 | 1749 | static inline bool mmc_blk_cmd_started(struct mmc_blk_request *brq) |
---|
.. | .. |
---|
1829 | 1784 | * bytes transferred to zero in that case. |
---|
1830 | 1785 | */ |
---|
1831 | 1786 | err = __mmc_send_status(card, &status, 0); |
---|
1832 | | - if (err || mmc_blk_status_error(req, status)) |
---|
| 1787 | + if (err || mmc_blk_status_error(req, status)) { |
---|
1833 | 1788 | brq->data.bytes_xfered = 0; |
---|
| 1789 | + if (mmc_card_sd(card) && !mmc_card_removed(card)) { |
---|
| 1790 | + mmc_blk_reset_success(mq->blkdata, type); |
---|
| 1791 | + if (!mmc_blk_reset(md, card->host, type)) |
---|
| 1792 | + return; |
---|
| 1793 | + pr_err("%s: pre recovery failed!\n", req->rq_disk->disk_name); |
---|
| 1794 | + } |
---|
| 1795 | + } |
---|
1834 | 1796 | |
---|
1835 | 1797 | mmc_retune_release(card->host); |
---|
1836 | 1798 | |
---|
.. | .. |
---|
1850 | 1812 | |
---|
1851 | 1813 | /* Try to get back to "tran" state */ |
---|
1852 | 1814 | if (!mmc_host_is_spi(mq->card->host) && |
---|
1853 | | - (err || !mmc_blk_in_tran_state(status))) |
---|
| 1815 | + (err || !mmc_ready_for_data(status))) |
---|
1854 | 1816 | err = mmc_blk_fix_state(mq->card, req); |
---|
1855 | 1817 | |
---|
1856 | 1818 | /* |
---|
.. | .. |
---|
1870 | 1832 | err && mmc_blk_reset(md, card->host, type)) { |
---|
1871 | 1833 | pr_err("%s: recovery failed!\n", req->rq_disk->disk_name); |
---|
1872 | 1834 | mqrq->retries = MMC_NO_RETRIES; |
---|
| 1835 | + trace_android_vh_mmc_blk_mq_rw_recovery(card); |
---|
1873 | 1836 | return; |
---|
1874 | 1837 | } |
---|
1875 | 1838 | |
---|
.. | .. |
---|
1894 | 1857 | return; |
---|
1895 | 1858 | } |
---|
1896 | 1859 | |
---|
1897 | | - /* FIXME: Missing single sector read for large sector size */ |
---|
1898 | | - if (!mmc_large_sector(card) && rq_data_dir(req) == READ && |
---|
1899 | | - brq->data.blocks > 1) { |
---|
1900 | | - /* Read one sector at a time */ |
---|
| 1860 | + if (rq_data_dir(req) == READ && brq->data.blocks > |
---|
| 1861 | + queue_physical_block_size(mq->queue) >> 9) { |
---|
| 1862 | + /* Read one (native) sector at a time */ |
---|
1901 | 1863 | mmc_blk_read_single(mq, req); |
---|
1902 | 1864 | return; |
---|
1903 | 1865 | } |
---|
.. | .. |
---|
1979 | 1941 | struct mmc_queue_req *mqrq) |
---|
1980 | 1942 | { |
---|
1981 | 1943 | if (mmc_blk_urgent_bkops_needed(mq, mqrq)) |
---|
1982 | | - mmc_start_bkops(mq->card, true); |
---|
| 1944 | + mmc_run_bkops(mq->card); |
---|
1983 | 1945 | } |
---|
1984 | 1946 | |
---|
1985 | 1947 | static void mmc_blk_hsq_req_done(struct mmc_request *mrq) |
---|
1986 | 1948 | { |
---|
1987 | 1949 | struct mmc_queue_req *mqrq = |
---|
1988 | | - container_of(mrq, struct mmc_queue_req, brq.mrq); |
---|
| 1950 | + container_of(mrq, struct mmc_queue_req, brq.mrq); |
---|
1989 | 1951 | struct request *req = mmc_queue_req_to_req(mqrq); |
---|
1990 | 1952 | struct request_queue *q = req->q; |
---|
1991 | 1953 | struct mmc_queue *mq = q->queuedata; |
---|
.. | .. |
---|
1994 | 1956 | |
---|
1995 | 1957 | if (mmc_blk_rq_error(&mqrq->brq) || |
---|
1996 | 1958 | mmc_blk_urgent_bkops_needed(mq, mqrq)) { |
---|
1997 | | - spin_lock_irqsave(q->queue_lock, flags); |
---|
| 1959 | + spin_lock_irqsave(&mq->lock, flags); |
---|
1998 | 1960 | mq->recovery_needed = true; |
---|
1999 | 1961 | mq->recovery_req = req; |
---|
2000 | | - spin_unlock_irqrestore(q->queue_lock, flags); |
---|
| 1962 | + spin_unlock_irqrestore(&mq->lock, flags); |
---|
2001 | 1963 | |
---|
2002 | 1964 | host->cqe_ops->cqe_recovery_start(host); |
---|
2003 | 1965 | |
---|
.. | .. |
---|
2013 | 1975 | */ |
---|
2014 | 1976 | if (mq->in_recovery) |
---|
2015 | 1977 | mmc_blk_cqe_complete_rq(mq, req); |
---|
2016 | | - else |
---|
| 1978 | + else if (likely(!blk_should_fake_timeout(req->q))) |
---|
2017 | 1979 | blk_mq_complete_request(req); |
---|
2018 | 1980 | } |
---|
2019 | 1981 | |
---|
.. | .. |
---|
2023 | 1985 | |
---|
2024 | 1986 | if (mq->use_cqe) |
---|
2025 | 1987 | mmc_blk_cqe_complete_rq(mq, req); |
---|
2026 | | - else |
---|
| 1988 | + else if (likely(!blk_should_fake_timeout(req->q))) |
---|
2027 | 1989 | mmc_blk_mq_complete_rq(mq, req); |
---|
2028 | 1990 | } |
---|
2029 | 1991 | |
---|
.. | .. |
---|
2044 | 2006 | mmc_blk_urgent_bkops(mq, mqrq); |
---|
2045 | 2007 | } |
---|
2046 | 2008 | |
---|
2047 | | -static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, struct request *req) |
---|
| 2009 | +static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, enum mmc_issue_type issue_type) |
---|
2048 | 2010 | { |
---|
2049 | | - struct request_queue *q = req->q; |
---|
2050 | 2011 | unsigned long flags; |
---|
2051 | 2012 | bool put_card; |
---|
2052 | 2013 | |
---|
2053 | | - spin_lock_irqsave(q->queue_lock, flags); |
---|
| 2014 | + spin_lock_irqsave(&mq->lock, flags); |
---|
2054 | 2015 | |
---|
2055 | | - mq->in_flight[mmc_issue_type(mq, req)] -= 1; |
---|
| 2016 | + mq->in_flight[issue_type] -= 1; |
---|
2056 | 2017 | |
---|
2057 | 2018 | put_card = (mmc_tot_in_flight(mq) == 0); |
---|
2058 | 2019 | |
---|
2059 | | - spin_unlock_irqrestore(q->queue_lock, flags); |
---|
| 2020 | + spin_unlock_irqrestore(&mq->lock, flags); |
---|
2060 | 2021 | |
---|
2061 | 2022 | if (put_card) |
---|
2062 | 2023 | mmc_put_card(mq->card, &mq->ctx); |
---|
.. | .. |
---|
2064 | 2025 | |
---|
2065 | 2026 | static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req) |
---|
2066 | 2027 | { |
---|
| 2028 | + enum mmc_issue_type issue_type = mmc_issue_type(mq, req); |
---|
2067 | 2029 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); |
---|
2068 | 2030 | struct mmc_request *mrq = &mqrq->brq.mrq; |
---|
2069 | 2031 | struct mmc_host *host = mq->card->host; |
---|
.. | .. |
---|
2076 | 2038 | */ |
---|
2077 | 2039 | if (mq->in_recovery) |
---|
2078 | 2040 | mmc_blk_mq_complete_rq(mq, req); |
---|
2079 | | - else |
---|
| 2041 | + else if (likely(!blk_should_fake_timeout(req->q))) |
---|
2080 | 2042 | blk_mq_complete_request(req); |
---|
2081 | 2043 | |
---|
2082 | | - mmc_blk_mq_dec_in_flight(mq, req); |
---|
| 2044 | + mmc_blk_mq_dec_in_flight(mq, issue_type); |
---|
2083 | 2045 | } |
---|
2084 | 2046 | |
---|
2085 | 2047 | void mmc_blk_mq_recovery(struct mmc_queue *mq) |
---|
.. | .. |
---|
2152 | 2114 | * request does not need to wait (although it does need to |
---|
2153 | 2115 | * complete complete_req first). |
---|
2154 | 2116 | */ |
---|
2155 | | - spin_lock_irqsave(q->queue_lock, flags); |
---|
| 2117 | + spin_lock_irqsave(&mq->lock, flags); |
---|
2156 | 2118 | mq->complete_req = req; |
---|
2157 | 2119 | mq->rw_wait = false; |
---|
2158 | 2120 | waiting = mq->waiting; |
---|
2159 | | - spin_unlock_irqrestore(q->queue_lock, flags); |
---|
| 2121 | + spin_unlock_irqrestore(&mq->lock, flags); |
---|
2160 | 2122 | |
---|
2161 | 2123 | /* |
---|
2162 | 2124 | * If 'waiting' then the waiting task will complete this |
---|
.. | .. |
---|
2175 | 2137 | /* Take the recovery path for errors or urgent background operations */ |
---|
2176 | 2138 | if (mmc_blk_rq_error(&mqrq->brq) || |
---|
2177 | 2139 | mmc_blk_urgent_bkops_needed(mq, mqrq)) { |
---|
2178 | | - spin_lock_irqsave(q->queue_lock, flags); |
---|
| 2140 | + spin_lock_irqsave(&mq->lock, flags); |
---|
2179 | 2141 | mq->recovery_needed = true; |
---|
2180 | 2142 | mq->recovery_req = req; |
---|
2181 | | - spin_unlock_irqrestore(q->queue_lock, flags); |
---|
| 2143 | + spin_unlock_irqrestore(&mq->lock, flags); |
---|
2182 | 2144 | wake_up(&mq->wait); |
---|
2183 | 2145 | schedule_work(&mq->recovery_work); |
---|
2184 | 2146 | return; |
---|
.. | .. |
---|
2194 | 2156 | |
---|
2195 | 2157 | static bool mmc_blk_rw_wait_cond(struct mmc_queue *mq, int *err) |
---|
2196 | 2158 | { |
---|
2197 | | - struct request_queue *q = mq->queue; |
---|
2198 | 2159 | unsigned long flags; |
---|
2199 | 2160 | bool done; |
---|
2200 | 2161 | |
---|
.. | .. |
---|
2202 | 2163 | * Wait while there is another request in progress, but not if recovery |
---|
2203 | 2164 | * is needed. Also indicate whether there is a request waiting to start. |
---|
2204 | 2165 | */ |
---|
2205 | | - spin_lock_irqsave(q->queue_lock, flags); |
---|
| 2166 | + spin_lock_irqsave(&mq->lock, flags); |
---|
2206 | 2167 | if (mq->recovery_needed) { |
---|
2207 | 2168 | *err = -EBUSY; |
---|
2208 | 2169 | done = true; |
---|
.. | .. |
---|
2210 | 2171 | done = !mq->rw_wait; |
---|
2211 | 2172 | } |
---|
2212 | 2173 | mq->waiting = !done; |
---|
2213 | | - spin_unlock_irqrestore(q->queue_lock, flags); |
---|
| 2174 | + spin_unlock_irqrestore(&mq->lock, flags); |
---|
2214 | 2175 | |
---|
2215 | 2176 | return done; |
---|
2216 | 2177 | } |
---|
.. | .. |
---|
2391 | 2352 | goto err_kfree; |
---|
2392 | 2353 | } |
---|
2393 | 2354 | |
---|
2394 | | - spin_lock_init(&md->lock); |
---|
2395 | 2355 | INIT_LIST_HEAD(&md->part); |
---|
2396 | 2356 | INIT_LIST_HEAD(&md->rpmbs); |
---|
2397 | 2357 | md->usage = 1; |
---|
2398 | 2358 | |
---|
2399 | | - ret = mmc_init_queue(&md->queue, card, &md->lock, subname); |
---|
| 2359 | + ret = mmc_init_queue(&md->queue, card); |
---|
2400 | 2360 | if (ret) |
---|
2401 | 2361 | goto err_putdisk; |
---|
2402 | 2362 | |
---|
.. | .. |
---|
2764 | 2724 | int ret; |
---|
2765 | 2725 | struct mmc_card *card = md->queue.card; |
---|
2766 | 2726 | |
---|
2767 | | - device_add_disk(md->parent, md->disk); |
---|
| 2727 | + device_add_disk(md->parent, md->disk, NULL); |
---|
2768 | 2728 | md->force_ro.show = force_ro_show; |
---|
2769 | 2729 | md->force_ro.store = force_ro_store; |
---|
2770 | 2730 | sysfs_attr_init(&md->force_ro.attr); |
---|
.. | .. |
---|
2819 | 2779 | if (IS_ERR(req)) |
---|
2820 | 2780 | return PTR_ERR(req); |
---|
2821 | 2781 | req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_CARD_STATUS; |
---|
| 2782 | + req_to_mmc_queue_req(req)->drv_op_result = -EIO; |
---|
2822 | 2783 | blk_execute_rq(mq->queue, NULL, req, 0); |
---|
2823 | 2784 | ret = req_to_mmc_queue_req(req)->drv_op_result; |
---|
2824 | 2785 | if (ret >= 0) { |
---|
.. | .. |
---|
2829 | 2790 | |
---|
2830 | 2791 | return ret; |
---|
2831 | 2792 | } |
---|
2832 | | -DEFINE_SIMPLE_ATTRIBUTE(mmc_dbg_card_status_fops, mmc_dbg_card_status_get, |
---|
2833 | | - NULL, "%08llx\n"); |
---|
| 2793 | +DEFINE_DEBUGFS_ATTRIBUTE(mmc_dbg_card_status_fops, mmc_dbg_card_status_get, |
---|
| 2794 | + NULL, "%08llx\n"); |
---|
2834 | 2795 | |
---|
2835 | 2796 | /* That is two digits * 512 + 1 for newline */ |
---|
2836 | 2797 | #define EXT_CSD_STR_LEN 1025 |
---|
.. | .. |
---|
2857 | 2818 | goto out_free; |
---|
2858 | 2819 | } |
---|
2859 | 2820 | req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_EXT_CSD; |
---|
| 2821 | + req_to_mmc_queue_req(req)->drv_op_result = -EIO; |
---|
2860 | 2822 | req_to_mmc_queue_req(req)->drv_op_data = &ext_csd; |
---|
2861 | 2823 | blk_execute_rq(mq->queue, NULL, req, 0); |
---|
2862 | 2824 | err = req_to_mmc_queue_req(req)->drv_op_result; |
---|
.. | .. |
---|
2918 | 2880 | |
---|
2919 | 2881 | if (mmc_card_mmc(card) || mmc_card_sd(card)) { |
---|
2920 | 2882 | md->status_dentry = |
---|
2921 | | - debugfs_create_file("status", S_IRUSR, root, card, |
---|
2922 | | - &mmc_dbg_card_status_fops); |
---|
| 2883 | + debugfs_create_file_unsafe("status", 0400, root, |
---|
| 2884 | + card, |
---|
| 2885 | + &mmc_dbg_card_status_fops); |
---|
2923 | 2886 | if (!md->status_dentry) |
---|
2924 | 2887 | return -EIO; |
---|
2925 | 2888 | } |
---|
.. | .. |
---|
2968 | 2931 | |
---|
2969 | 2932 | struct mmc_card *this_card; |
---|
2970 | 2933 | EXPORT_SYMBOL(this_card); |
---|
| 2934 | + |
---|
2971 | 2935 | static int mmc_blk_probe(struct mmc_card *card) |
---|
2972 | 2936 | { |
---|
2973 | 2937 | struct mmc_blk_data *md, *part_md; |
---|
.. | .. |
---|
3003 | 2967 | |
---|
3004 | 2968 | dev_set_drvdata(&card->dev, md); |
---|
3005 | 2969 | |
---|
3006 | | -#if IS_ENABLED(CONFIG_MMC_DW_ROCKCHIP) || IS_ENABLED(CONFIG_MMC_SDHCI_OF_ARASAN) |
---|
3007 | | - if (card->host->restrict_caps & RESTRICT_CARD_TYPE_EMMC) |
---|
| 2970 | +#if defined(CONFIG_MMC_DW_ROCKCHIP) || defined(CONFIG_MMC_SDHCI_OF_ARASAN) |
---|
| 2971 | + if (card->type == MMC_TYPE_MMC) |
---|
3008 | 2972 | this_card = card; |
---|
3009 | 2973 | #endif |
---|
3010 | 2974 | |
---|
.. | .. |
---|
3046 | 3010 | mmc_blk_remove_debugfs(card, md); |
---|
3047 | 3011 | |
---|
3048 | 3012 | #if defined(CONFIG_MMC_DW_ROCKCHIP) |
---|
3049 | | - if (card->host->restrict_caps & RESTRICT_CARD_TYPE_EMMC) |
---|
| 3013 | + if (card->type == MMC_TYPE_MMC) |
---|
3050 | 3014 | this_card = NULL; |
---|
3051 | 3015 | #endif |
---|
3052 | 3016 | |
---|