hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/drivers/mmc/core/block.c
....@@ -47,6 +47,8 @@
4747
4848 #include <linux/uaccess.h>
4949
50
+#include <trace/hooks/mmc_core.h>
51
+
5052 #include "queue.h"
5153 #include "block.h"
5254 #include "core.h"
....@@ -71,7 +73,6 @@
7173 * ample.
7274 */
7375 #define MMC_BLK_TIMEOUT_MS (10 * 1000)
74
-#define MMC_SANITIZE_REQ_TIMEOUT 240000
7576 #define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
7677 #define MMC_EXTRACT_VALUE_FROM_ARG(x) ((x & 0x0000FF00) >> 8)
7778
....@@ -101,7 +102,6 @@
101102 * There is one mmc_blk_data per slot.
102103 */
103104 struct mmc_blk_data {
104
- spinlock_t lock;
105105 struct device *parent;
106106 struct gendisk *disk;
107107 struct mmc_queue queue;
....@@ -172,7 +172,7 @@
172172 unsigned int part_type);
173173 static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
174174 struct mmc_card *card,
175
- int disable_multi,
175
+ int recovery_mode,
176176 struct mmc_queue *mq);
177177 static void mmc_blk_hsq_req_done(struct mmc_request *mrq);
178178
....@@ -256,6 +256,7 @@
256256 goto out_put;
257257 }
258258 req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_BOOT_WP;
259
+ req_to_mmc_queue_req(req)->drv_op_result = -EIO;
259260 blk_execute_rq(mq->queue, NULL, req, 0);
260261 ret = req_to_mmc_queue_req(req)->drv_op_result;
261262 blk_put_request(req);
....@@ -315,10 +316,7 @@
315316
316317 mutex_lock(&block_mutex);
317318 if (md) {
318
- if (md->usage == 2)
319
- check_disk_change(bdev);
320319 ret = 0;
321
-
322320 if ((mode & FMODE_WRITE) && md->read_only) {
323321 mmc_blk_put(md);
324322 ret = -EROFS;
....@@ -415,58 +413,22 @@
415413 return 0;
416414 }
417415
418
-static int ioctl_do_sanitize(struct mmc_card *card)
419
-{
420
- int err;
421
-
422
- if (!mmc_can_sanitize(card)) {
423
- pr_warn("%s: %s - SANITIZE is not supported\n",
424
- mmc_hostname(card->host), __func__);
425
- err = -EOPNOTSUPP;
426
- goto out;
427
- }
428
-
429
- pr_debug("%s: %s - SANITIZE IN PROGRESS...\n",
430
- mmc_hostname(card->host), __func__);
431
-
432
- err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
433
- EXT_CSD_SANITIZE_START, 1,
434
- MMC_SANITIZE_REQ_TIMEOUT);
435
-
436
- if (err)
437
- pr_err("%s: %s - EXT_CSD_SANITIZE_START failed. err=%d\n",
438
- mmc_hostname(card->host), __func__, err);
439
-
440
- pr_debug("%s: %s - SANITIZE COMPLETED\n", mmc_hostname(card->host),
441
- __func__);
442
-out:
443
- return err;
444
-}
445
-
446
-static inline bool mmc_blk_in_tran_state(u32 status)
447
-{
448
- /*
449
- * Some cards mishandle the status bits, so make sure to check both the
450
- * busy indication and the card state.
451
- */
452
- return status & R1_READY_FOR_DATA &&
453
- (R1_CURRENT_STATE(status) == R1_STATE_TRAN);
454
-}
455
-
456416 static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
457417 u32 *resp_errs)
458418 {
459
- unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
419
+ unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms > 2000 ? 2000 : timeout_ms);
460420 int err = 0;
461421 u32 status;
462422
463423 do {
464424 bool done = time_after(jiffies, timeout);
465425
466
- if (card->host->ops->card_busy) {
426
+ if (!(card->host->caps2 & MMC_CAP2_NO_SD) && card->host->ops->card_busy) {
467427 status = card->host->ops->card_busy(card->host) ?
468428 0 : R1_READY_FOR_DATA | R1_STATE_TRAN << 9;
469
- usleep_range(100, 150);
429
+
430
+ if (!status)
431
+ usleep_range(100, 150);
470432 } else {
471433 err = __mmc_send_status(card, &status, 5);
472434 if (err) {
....@@ -490,13 +452,7 @@
490452 __func__, status);
491453 return -ETIMEDOUT;
492454 }
493
-
494
- /*
495
- * Some cards mishandle the status bits,
496
- * so make sure to check both the busy
497
- * indication and the card state.
498
- */
499
- } while (!mmc_blk_in_tran_state(status));
455
+ } while (!mmc_ready_for_data(status));
500456
501457 return err;
502458 }
....@@ -593,15 +549,8 @@
593549 }
594550
595551 if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) &&
596
- (cmd.opcode == MMC_SWITCH)) {
597
- err = ioctl_do_sanitize(card);
598
-
599
- if (err)
600
- pr_err("%s: ioctl_do_sanitize() failed. err = %d",
601
- __func__, err);
602
-
603
- return err;
604
- }
552
+ (cmd.opcode == MMC_SWITCH))
553
+ return mmc_sanitize(card);
605554
606555 mmc_wait_for_req(card->host, &mrq);
607556 memcpy(&idata->ic.response, cmd.resp, sizeof(cmd.resp));
....@@ -701,6 +650,7 @@
701650 idatas[0] = idata;
702651 req_to_mmc_queue_req(req)->drv_op =
703652 rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
653
+ req_to_mmc_queue_req(req)->drv_op_result = -EIO;
704654 req_to_mmc_queue_req(req)->drv_op_data = idatas;
705655 req_to_mmc_queue_req(req)->ioc_count = 1;
706656 blk_execute_rq(mq->queue, NULL, req, 0);
....@@ -770,6 +720,7 @@
770720 }
771721 req_to_mmc_queue_req(req)->drv_op =
772722 rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
723
+ req_to_mmc_queue_req(req)->drv_op_result = -EIO;
773724 req_to_mmc_queue_req(req)->drv_op_data = idata;
774725 req_to_mmc_queue_req(req)->ioc_count = num_of_cmds;
775726 blk_execute_rq(mq->queue, NULL, req, 0);
....@@ -797,7 +748,7 @@
797748 * whole block device, not on a partition. This prevents overspray
798749 * between sibling partitions.
799750 */
800
- if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
751
+ if (!capable(CAP_SYS_RAWIO) || bdev_is_partition(bdev))
801752 return -EPERM;
802753 return 0;
803754 }
....@@ -1024,6 +975,11 @@
1024975 struct mmc_blk_data *main_md =
1025976 dev_get_drvdata(&host->card->dev);
1026977 int part_err;
978
+ bool allow = true;
979
+
980
+ trace_android_vh_mmc_blk_reset(host, err, &allow);
981
+ if (!allow)
982
+ return -ENODEV;
1027983
1028984 main_md->part_curr = main_md->part_type;
1029985 part_err = mmc_blk_part_switch(host->card, md->part_type);
....@@ -1070,7 +1026,7 @@
10701026 if (ret)
10711027 break;
10721028 }
1073
- /* fallthrough */
1029
+ fallthrough;
10741030 case MMC_DRV_OP_IOCTL_RPMB:
10751031 idata = mq_rq->drv_op_data;
10761032 for (i = 0, ret = 0; i < mq_rq->ioc_count; i++) {
....@@ -1119,7 +1075,7 @@
11191075 {
11201076 struct mmc_blk_data *md = mq->blkdata;
11211077 struct mmc_card *card = md->queue.card;
1122
- unsigned int from, nr, arg;
1078
+ unsigned int from, nr;
11231079 int err = 0, type = MMC_BLK_DISCARD;
11241080 blk_status_t status = BLK_STS_OK;
11251081
....@@ -1131,24 +1087,23 @@
11311087 from = blk_rq_pos(req);
11321088 nr = blk_rq_sectors(req);
11331089
1134
- if (mmc_can_discard(card))
1135
- arg = MMC_DISCARD_ARG;
1136
- else if (mmc_can_trim(card))
1137
- arg = MMC_TRIM_ARG;
1138
- else
1139
- arg = MMC_ERASE_ARG;
11401090 do {
1091
+ unsigned int erase_arg = card->erase_arg;
1092
+
1093
+ if (mmc_card_broken_sd_discard(card))
1094
+ erase_arg = SD_ERASE_ARG;
1095
+
11411096 err = 0;
11421097 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
11431098 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
11441099 INAND_CMD38_ARG_EXT_CSD,
1145
- arg == MMC_TRIM_ARG ?
1100
+ card->erase_arg == MMC_TRIM_ARG ?
11461101 INAND_CMD38_ARG_TRIM :
11471102 INAND_CMD38_ARG_ERASE,
1148
- 0);
1103
+ card->ext_csd.generic_cmd6_time);
11491104 }
11501105 if (!err)
1151
- err = mmc_erase(card, from, nr, arg);
1106
+ err = mmc_erase(card, from, nr, erase_arg);
11521107 } while (err == -EIO && !mmc_blk_reset(md, card->host, type));
11531108 if (err)
11541109 status = BLK_STS_IOERR;
....@@ -1187,7 +1142,7 @@
11871142 arg == MMC_SECURE_TRIM1_ARG ?
11881143 INAND_CMD38_ARG_SECTRIM1 :
11891144 INAND_CMD38_ARG_SECERASE,
1190
- 0);
1145
+ card->ext_csd.generic_cmd6_time);
11911146 if (err)
11921147 goto out_retry;
11931148 }
....@@ -1205,7 +1160,7 @@
12051160 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
12061161 INAND_CMD38_ARG_EXT_CSD,
12071162 INAND_CMD38_ARG_SECTRIM2,
1208
- 0);
1163
+ card->ext_csd.generic_cmd6_time);
12091164 if (err)
12101165 goto out_retry;
12111166 }
....@@ -1316,7 +1271,7 @@
13161271 }
13171272
13181273 static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
1319
- int disable_multi, bool *do_rel_wr_p,
1274
+ int recovery_mode, bool *do_rel_wr_p,
13201275 bool *do_data_tag_p)
13211276 {
13221277 struct mmc_blk_data *md = mq->blkdata;
....@@ -1382,12 +1337,12 @@
13821337 brq->data.blocks--;
13831338
13841339 /*
1385
- * After a read error, we redo the request one sector
1340
+ * After a read error, we redo the request one (native) sector
13861341 * at a time in order to accurately determine which
13871342 * sectors can be read successfully.
13881343 */
1389
- if (disable_multi)
1390
- brq->data.blocks = 1;
1344
+ if (recovery_mode)
1345
+ brq->data.blocks = queue_physical_block_size(mq->queue) >> 9;
13911346
13921347 /*
13931348 * Some controllers have HW issues while operating
....@@ -1485,7 +1440,7 @@
14851440 blk_mq_end_request(req, BLK_STS_OK);
14861441 }
14871442
1488
- spin_lock_irqsave(q->queue_lock, flags);
1443
+ spin_lock_irqsave(&mq->lock, flags);
14891444
14901445 mq->in_flight[issue_type] -= 1;
14911446
....@@ -1493,7 +1448,7 @@
14931448
14941449 mmc_cqe_check_busy(mq);
14951450
1496
- spin_unlock_irqrestore(q->queue_lock, flags);
1451
+ spin_unlock_irqrestore(&mq->lock, flags);
14971452
14981453 if (!mq->cqe_busy)
14991454 blk_mq_run_hw_queues(q, true);
....@@ -1513,8 +1468,7 @@
15131468 err = mmc_cqe_recovery(host);
15141469 if (err)
15151470 mmc_blk_reset(mq->blkdata, host, MMC_BLK_CQE_RECOVERY);
1516
- else
1517
- mmc_blk_reset_success(mq->blkdata, MMC_BLK_CQE_RECOVERY);
1471
+ mmc_blk_reset_success(mq->blkdata, MMC_BLK_CQE_RECOVERY);
15181472
15191473 pr_debug("%s: CQE recovery done\n", mmc_hostname(host));
15201474 }
....@@ -1533,7 +1487,7 @@
15331487 */
15341488 if (mq->in_recovery)
15351489 mmc_blk_cqe_complete_rq(mq, req);
1536
- else
1490
+ else if (likely(!blk_should_fake_timeout(req->q)))
15371491 blk_mq_complete_request(req);
15381492 }
15391493
....@@ -1605,7 +1559,7 @@
16051559
16061560 static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
16071561 struct mmc_card *card,
1608
- int disable_multi,
1562
+ int recovery_mode,
16091563 struct mmc_queue *mq)
16101564 {
16111565 u32 readcmd, writecmd;
....@@ -1614,7 +1568,7 @@
16141568 struct mmc_blk_data *md = mq->blkdata;
16151569 bool do_rel_wr, do_data_tag;
16161570
1617
- mmc_blk_data_prep(mq, mqrq, disable_multi, &do_rel_wr, &do_data_tag);
1571
+ mmc_blk_data_prep(mq, mqrq, recovery_mode, &do_rel_wr, &do_data_tag);
16181572
16191573 brq->mrq.cmd = &brq->cmd;
16201574
....@@ -1705,7 +1659,7 @@
17051659
17061660 #define MMC_READ_SINGLE_RETRIES 2
17071661
1708
-/* Single sector read during recovery */
1662
+/* Single (native) sector read during recovery */
17091663 static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req)
17101664 {
17111665 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
....@@ -1713,6 +1667,7 @@
17131667 struct mmc_card *card = mq->card;
17141668 struct mmc_host *host = card->host;
17151669 blk_status_t error = BLK_STS_OK;
1670
+ size_t bytes_per_read = queue_physical_block_size(mq->queue);
17161671
17171672 do {
17181673 u32 status;
....@@ -1729,7 +1684,7 @@
17291684 goto error_exit;
17301685
17311686 if (!mmc_host_is_spi(host) &&
1732
- !mmc_blk_in_tran_state(status)) {
1687
+ !mmc_ready_for_data(status)) {
17331688 err = mmc_blk_fix_state(card, req);
17341689 if (err)
17351690 goto error_exit;
....@@ -1747,13 +1702,13 @@
17471702 else
17481703 error = BLK_STS_OK;
17491704
1750
- } while (blk_update_request(req, error, 512));
1705
+ } while (blk_update_request(req, error, bytes_per_read));
17511706
17521707 return;
17531708
17541709 error_exit:
17551710 mrq->data->bytes_xfered = 0;
1756
- blk_update_request(req, BLK_STS_IOERR, 512);
1711
+ blk_update_request(req, BLK_STS_IOERR, bytes_per_read);
17571712 /* Let it try the remaining request again */
17581713 if (mqrq->retries > MMC_MAX_RETRIES - 1)
17591714 mqrq->retries = MMC_MAX_RETRIES - 1;
....@@ -1788,7 +1743,7 @@
17881743 return brq->cmd.resp[0] & CMD_ERRORS ||
17891744 brq->stop.resp[0] & stop_err_bits ||
17901745 status & stop_err_bits ||
1791
- (rq_data_dir(req) == WRITE && !mmc_blk_in_tran_state(status));
1746
+ (rq_data_dir(req) == WRITE && !mmc_ready_for_data(status));
17921747 }
17931748
17941749 static inline bool mmc_blk_cmd_started(struct mmc_blk_request *brq)
....@@ -1829,8 +1784,15 @@
18291784 * bytes transferred to zero in that case.
18301785 */
18311786 err = __mmc_send_status(card, &status, 0);
1832
- if (err || mmc_blk_status_error(req, status))
1787
+ if (err || mmc_blk_status_error(req, status)) {
18331788 brq->data.bytes_xfered = 0;
1789
+ if (mmc_card_sd(card) && !mmc_card_removed(card)) {
1790
+ mmc_blk_reset_success(mq->blkdata, type);
1791
+ if (!mmc_blk_reset(md, card->host, type))
1792
+ return;
1793
+ pr_err("%s: pre recovery failed!\n", req->rq_disk->disk_name);
1794
+ }
1795
+ }
18341796
18351797 mmc_retune_release(card->host);
18361798
....@@ -1850,7 +1812,7 @@
18501812
18511813 /* Try to get back to "tran" state */
18521814 if (!mmc_host_is_spi(mq->card->host) &&
1853
- (err || !mmc_blk_in_tran_state(status)))
1815
+ (err || !mmc_ready_for_data(status)))
18541816 err = mmc_blk_fix_state(mq->card, req);
18551817
18561818 /*
....@@ -1870,6 +1832,7 @@
18701832 err && mmc_blk_reset(md, card->host, type)) {
18711833 pr_err("%s: recovery failed!\n", req->rq_disk->disk_name);
18721834 mqrq->retries = MMC_NO_RETRIES;
1835
+ trace_android_vh_mmc_blk_mq_rw_recovery(card);
18731836 return;
18741837 }
18751838
....@@ -1894,10 +1857,9 @@
18941857 return;
18951858 }
18961859
1897
- /* FIXME: Missing single sector read for large sector size */
1898
- if (!mmc_large_sector(card) && rq_data_dir(req) == READ &&
1899
- brq->data.blocks > 1) {
1900
- /* Read one sector at a time */
1860
+ if (rq_data_dir(req) == READ && brq->data.blocks >
1861
+ queue_physical_block_size(mq->queue) >> 9) {
1862
+ /* Read one (native) sector at a time */
19011863 mmc_blk_read_single(mq, req);
19021864 return;
19031865 }
....@@ -1979,13 +1941,13 @@
19791941 struct mmc_queue_req *mqrq)
19801942 {
19811943 if (mmc_blk_urgent_bkops_needed(mq, mqrq))
1982
- mmc_start_bkops(mq->card, true);
1944
+ mmc_run_bkops(mq->card);
19831945 }
19841946
19851947 static void mmc_blk_hsq_req_done(struct mmc_request *mrq)
19861948 {
19871949 struct mmc_queue_req *mqrq =
1988
- container_of(mrq, struct mmc_queue_req, brq.mrq);
1950
+ container_of(mrq, struct mmc_queue_req, brq.mrq);
19891951 struct request *req = mmc_queue_req_to_req(mqrq);
19901952 struct request_queue *q = req->q;
19911953 struct mmc_queue *mq = q->queuedata;
....@@ -1994,10 +1956,10 @@
19941956
19951957 if (mmc_blk_rq_error(&mqrq->brq) ||
19961958 mmc_blk_urgent_bkops_needed(mq, mqrq)) {
1997
- spin_lock_irqsave(q->queue_lock, flags);
1959
+ spin_lock_irqsave(&mq->lock, flags);
19981960 mq->recovery_needed = true;
19991961 mq->recovery_req = req;
2000
- spin_unlock_irqrestore(q->queue_lock, flags);
1962
+ spin_unlock_irqrestore(&mq->lock, flags);
20011963
20021964 host->cqe_ops->cqe_recovery_start(host);
20031965
....@@ -2013,7 +1975,7 @@
20131975 */
20141976 if (mq->in_recovery)
20151977 mmc_blk_cqe_complete_rq(mq, req);
2016
- else
1978
+ else if (likely(!blk_should_fake_timeout(req->q)))
20171979 blk_mq_complete_request(req);
20181980 }
20191981
....@@ -2023,7 +1985,7 @@
20231985
20241986 if (mq->use_cqe)
20251987 mmc_blk_cqe_complete_rq(mq, req);
2026
- else
1988
+ else if (likely(!blk_should_fake_timeout(req->q)))
20271989 mmc_blk_mq_complete_rq(mq, req);
20281990 }
20291991
....@@ -2044,19 +2006,18 @@
20442006 mmc_blk_urgent_bkops(mq, mqrq);
20452007 }
20462008
2047
-static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, struct request *req)
2009
+static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, enum mmc_issue_type issue_type)
20482010 {
2049
- struct request_queue *q = req->q;
20502011 unsigned long flags;
20512012 bool put_card;
20522013
2053
- spin_lock_irqsave(q->queue_lock, flags);
2014
+ spin_lock_irqsave(&mq->lock, flags);
20542015
2055
- mq->in_flight[mmc_issue_type(mq, req)] -= 1;
2016
+ mq->in_flight[issue_type] -= 1;
20562017
20572018 put_card = (mmc_tot_in_flight(mq) == 0);
20582019
2059
- spin_unlock_irqrestore(q->queue_lock, flags);
2020
+ spin_unlock_irqrestore(&mq->lock, flags);
20602021
20612022 if (put_card)
20622023 mmc_put_card(mq->card, &mq->ctx);
....@@ -2064,6 +2025,7 @@
20642025
20652026 static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req)
20662027 {
2028
+ enum mmc_issue_type issue_type = mmc_issue_type(mq, req);
20672029 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
20682030 struct mmc_request *mrq = &mqrq->brq.mrq;
20692031 struct mmc_host *host = mq->card->host;
....@@ -2076,10 +2038,10 @@
20762038 */
20772039 if (mq->in_recovery)
20782040 mmc_blk_mq_complete_rq(mq, req);
2079
- else
2041
+ else if (likely(!blk_should_fake_timeout(req->q)))
20802042 blk_mq_complete_request(req);
20812043
2082
- mmc_blk_mq_dec_in_flight(mq, req);
2044
+ mmc_blk_mq_dec_in_flight(mq, issue_type);
20832045 }
20842046
20852047 void mmc_blk_mq_recovery(struct mmc_queue *mq)
....@@ -2152,11 +2114,11 @@
21522114 * request does not need to wait (although it does need to
21532115 * complete complete_req first).
21542116 */
2155
- spin_lock_irqsave(q->queue_lock, flags);
2117
+ spin_lock_irqsave(&mq->lock, flags);
21562118 mq->complete_req = req;
21572119 mq->rw_wait = false;
21582120 waiting = mq->waiting;
2159
- spin_unlock_irqrestore(q->queue_lock, flags);
2121
+ spin_unlock_irqrestore(&mq->lock, flags);
21602122
21612123 /*
21622124 * If 'waiting' then the waiting task will complete this
....@@ -2175,10 +2137,10 @@
21752137 /* Take the recovery path for errors or urgent background operations */
21762138 if (mmc_blk_rq_error(&mqrq->brq) ||
21772139 mmc_blk_urgent_bkops_needed(mq, mqrq)) {
2178
- spin_lock_irqsave(q->queue_lock, flags);
2140
+ spin_lock_irqsave(&mq->lock, flags);
21792141 mq->recovery_needed = true;
21802142 mq->recovery_req = req;
2181
- spin_unlock_irqrestore(q->queue_lock, flags);
2143
+ spin_unlock_irqrestore(&mq->lock, flags);
21822144 wake_up(&mq->wait);
21832145 schedule_work(&mq->recovery_work);
21842146 return;
....@@ -2194,7 +2156,6 @@
21942156
21952157 static bool mmc_blk_rw_wait_cond(struct mmc_queue *mq, int *err)
21962158 {
2197
- struct request_queue *q = mq->queue;
21982159 unsigned long flags;
21992160 bool done;
22002161
....@@ -2202,7 +2163,7 @@
22022163 * Wait while there is another request in progress, but not if recovery
22032164 * is needed. Also indicate whether there is a request waiting to start.
22042165 */
2205
- spin_lock_irqsave(q->queue_lock, flags);
2166
+ spin_lock_irqsave(&mq->lock, flags);
22062167 if (mq->recovery_needed) {
22072168 *err = -EBUSY;
22082169 done = true;
....@@ -2210,7 +2171,7 @@
22102171 done = !mq->rw_wait;
22112172 }
22122173 mq->waiting = !done;
2213
- spin_unlock_irqrestore(q->queue_lock, flags);
2174
+ spin_unlock_irqrestore(&mq->lock, flags);
22142175
22152176 return done;
22162177 }
....@@ -2391,12 +2352,11 @@
23912352 goto err_kfree;
23922353 }
23932354
2394
- spin_lock_init(&md->lock);
23952355 INIT_LIST_HEAD(&md->part);
23962356 INIT_LIST_HEAD(&md->rpmbs);
23972357 md->usage = 1;
23982358
2399
- ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
2359
+ ret = mmc_init_queue(&md->queue, card);
24002360 if (ret)
24012361 goto err_putdisk;
24022362
....@@ -2764,7 +2724,7 @@
27642724 int ret;
27652725 struct mmc_card *card = md->queue.card;
27662726
2767
- device_add_disk(md->parent, md->disk);
2727
+ device_add_disk(md->parent, md->disk, NULL);
27682728 md->force_ro.show = force_ro_show;
27692729 md->force_ro.store = force_ro_store;
27702730 sysfs_attr_init(&md->force_ro.attr);
....@@ -2819,6 +2779,7 @@
28192779 if (IS_ERR(req))
28202780 return PTR_ERR(req);
28212781 req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_CARD_STATUS;
2782
+ req_to_mmc_queue_req(req)->drv_op_result = -EIO;
28222783 blk_execute_rq(mq->queue, NULL, req, 0);
28232784 ret = req_to_mmc_queue_req(req)->drv_op_result;
28242785 if (ret >= 0) {
....@@ -2829,8 +2790,8 @@
28292790
28302791 return ret;
28312792 }
2832
-DEFINE_SIMPLE_ATTRIBUTE(mmc_dbg_card_status_fops, mmc_dbg_card_status_get,
2833
- NULL, "%08llx\n");
2793
+DEFINE_DEBUGFS_ATTRIBUTE(mmc_dbg_card_status_fops, mmc_dbg_card_status_get,
2794
+ NULL, "%08llx\n");
28342795
28352796 /* That is two digits * 512 + 1 for newline */
28362797 #define EXT_CSD_STR_LEN 1025
....@@ -2857,6 +2818,7 @@
28572818 goto out_free;
28582819 }
28592820 req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_EXT_CSD;
2821
+ req_to_mmc_queue_req(req)->drv_op_result = -EIO;
28602822 req_to_mmc_queue_req(req)->drv_op_data = &ext_csd;
28612823 blk_execute_rq(mq->queue, NULL, req, 0);
28622824 err = req_to_mmc_queue_req(req)->drv_op_result;
....@@ -2918,8 +2880,9 @@
29182880
29192881 if (mmc_card_mmc(card) || mmc_card_sd(card)) {
29202882 md->status_dentry =
2921
- debugfs_create_file("status", S_IRUSR, root, card,
2922
- &mmc_dbg_card_status_fops);
2883
+ debugfs_create_file_unsafe("status", 0400, root,
2884
+ card,
2885
+ &mmc_dbg_card_status_fops);
29232886 if (!md->status_dentry)
29242887 return -EIO;
29252888 }
....@@ -2968,6 +2931,7 @@
29682931
29692932 struct mmc_card *this_card;
29702933 EXPORT_SYMBOL(this_card);
2934
+
29712935 static int mmc_blk_probe(struct mmc_card *card)
29722936 {
29732937 struct mmc_blk_data *md, *part_md;
....@@ -3003,8 +2967,8 @@
30032967
30042968 dev_set_drvdata(&card->dev, md);
30052969
3006
-#if IS_ENABLED(CONFIG_MMC_DW_ROCKCHIP) || IS_ENABLED(CONFIG_MMC_SDHCI_OF_ARASAN)
3007
- if (card->host->restrict_caps & RESTRICT_CARD_TYPE_EMMC)
2970
+#if defined(CONFIG_MMC_DW_ROCKCHIP) || defined(CONFIG_MMC_SDHCI_OF_ARASAN)
2971
+ if (card->type == MMC_TYPE_MMC)
30082972 this_card = card;
30092973 #endif
30102974
....@@ -3046,7 +3010,7 @@
30463010 mmc_blk_remove_debugfs(card, md);
30473011
30483012 #if defined(CONFIG_MMC_DW_ROCKCHIP)
3049
- if (card->host->restrict_caps & RESTRICT_CARD_TYPE_EMMC)
3013
+ if (card->type == MMC_TYPE_MMC)
30503014 this_card = NULL;
30513015 #endif
30523016