From 8ac6c7a54ed1b98d142dce24b11c6de6a1e239a5 Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Tue, 22 Oct 2024 10:36:11 +0000 Subject: [PATCH] 修改4g拨号为QMI,需要在系统里后台执行quectel-CM --- kernel/drivers/mmc/core/block.c | 222 +++++++++++++++++++++++-------------------------------- 1 files changed, 93 insertions(+), 129 deletions(-) diff --git a/kernel/drivers/mmc/core/block.c b/kernel/drivers/mmc/core/block.c index e765955..c533fbf 100644 --- a/kernel/drivers/mmc/core/block.c +++ b/kernel/drivers/mmc/core/block.c @@ -47,6 +47,8 @@ #include <linux/uaccess.h> +#include <trace/hooks/mmc_core.h> + #include "queue.h" #include "block.h" #include "core.h" @@ -71,7 +73,6 @@ * ample. */ #define MMC_BLK_TIMEOUT_MS (10 * 1000) -#define MMC_SANITIZE_REQ_TIMEOUT 240000 #define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16) #define MMC_EXTRACT_VALUE_FROM_ARG(x) ((x & 0x0000FF00) >> 8) @@ -101,7 +102,6 @@ * There is one mmc_blk_data per slot. */ struct mmc_blk_data { - spinlock_t lock; struct device *parent; struct gendisk *disk; struct mmc_queue queue; @@ -172,7 +172,7 @@ unsigned int part_type); static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, struct mmc_card *card, - int disable_multi, + int recovery_mode, struct mmc_queue *mq); static void mmc_blk_hsq_req_done(struct mmc_request *mrq); @@ -256,6 +256,7 @@ goto out_put; } req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_BOOT_WP; + req_to_mmc_queue_req(req)->drv_op_result = -EIO; blk_execute_rq(mq->queue, NULL, req, 0); ret = req_to_mmc_queue_req(req)->drv_op_result; blk_put_request(req); @@ -315,10 +316,7 @@ mutex_lock(&block_mutex); if (md) { - if (md->usage == 2) - check_disk_change(bdev); ret = 0; - if ((mode & FMODE_WRITE) && md->read_only) { mmc_blk_put(md); ret = -EROFS; @@ -415,58 +413,22 @@ return 0; } -static int ioctl_do_sanitize(struct mmc_card *card) -{ - int err; - - if (!mmc_can_sanitize(card)) { - pr_warn("%s: %s - SANITIZE is not supported\n", - mmc_hostname(card->host), __func__); - err = -EOPNOTSUPP; - goto out; - } - - pr_debug("%s: %s - SANITIZE IN PROGRESS...\n", - mmc_hostname(card->host), __func__); - - err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, - EXT_CSD_SANITIZE_START, 1, - MMC_SANITIZE_REQ_TIMEOUT); - - if (err) - pr_err("%s: %s - EXT_CSD_SANITIZE_START failed. err=%d\n", - mmc_hostname(card->host), __func__, err); - - pr_debug("%s: %s - SANITIZE COMPLETED\n", mmc_hostname(card->host), - __func__); -out: - return err; -} - -static inline bool mmc_blk_in_tran_state(u32 status) -{ - /* - * Some cards mishandle the status bits, so make sure to check both the - * busy indication and the card state. - */ - return status & R1_READY_FOR_DATA && - (R1_CURRENT_STATE(status) == R1_STATE_TRAN); -} - static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms, u32 *resp_errs) { - unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms); + unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms > 2000 ? 2000 : timeout_ms); int err = 0; u32 status; do { bool done = time_after(jiffies, timeout); - if (card->host->ops->card_busy) { + if (!(card->host->caps2 & MMC_CAP2_NO_SD) && card->host->ops->card_busy) { status = card->host->ops->card_busy(card->host) ? 0 : R1_READY_FOR_DATA | R1_STATE_TRAN << 9; - usleep_range(100, 150); + + if (!status) + usleep_range(100, 150); } else { err = __mmc_send_status(card, &status, 5); if (err) { @@ -490,13 +452,7 @@ __func__, status); return -ETIMEDOUT; } - - /* - * Some cards mishandle the status bits, - * so make sure to check both the busy - * indication and the card state. - */ - } while (!mmc_blk_in_tran_state(status)); + } while (!mmc_ready_for_data(status)); return err; } @@ -593,15 +549,8 @@ } if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) && - (cmd.opcode == MMC_SWITCH)) { - err = ioctl_do_sanitize(card); - - if (err) - pr_err("%s: ioctl_do_sanitize() failed. err = %d", - __func__, err); - - return err; - } + (cmd.opcode == MMC_SWITCH)) + return mmc_sanitize(card); mmc_wait_for_req(card->host, &mrq); memcpy(&idata->ic.response, cmd.resp, sizeof(cmd.resp)); @@ -701,6 +650,7 @@ idatas[0] = idata; req_to_mmc_queue_req(req)->drv_op = rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL; + req_to_mmc_queue_req(req)->drv_op_result = -EIO; req_to_mmc_queue_req(req)->drv_op_data = idatas; req_to_mmc_queue_req(req)->ioc_count = 1; blk_execute_rq(mq->queue, NULL, req, 0); @@ -770,6 +720,7 @@ } req_to_mmc_queue_req(req)->drv_op = rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL; + req_to_mmc_queue_req(req)->drv_op_result = -EIO; req_to_mmc_queue_req(req)->drv_op_data = idata; req_to_mmc_queue_req(req)->ioc_count = num_of_cmds; blk_execute_rq(mq->queue, NULL, req, 0); @@ -797,7 +748,7 @@ * whole block device, not on a partition. This prevents overspray * between sibling partitions. */ - if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains)) + if (!capable(CAP_SYS_RAWIO) || bdev_is_partition(bdev)) return -EPERM; return 0; } @@ -1024,6 +975,11 @@ struct mmc_blk_data *main_md = dev_get_drvdata(&host->card->dev); int part_err; + bool allow = true; + + trace_android_vh_mmc_blk_reset(host, err, &allow); + if (!allow) + return -ENODEV; main_md->part_curr = main_md->part_type; part_err = mmc_blk_part_switch(host->card, md->part_type); @@ -1070,7 +1026,7 @@ if (ret) break; } - /* fallthrough */ + fallthrough; case MMC_DRV_OP_IOCTL_RPMB: idata = mq_rq->drv_op_data; for (i = 0, ret = 0; i < mq_rq->ioc_count; i++) { @@ -1119,7 +1075,7 @@ { struct mmc_blk_data *md = mq->blkdata; struct mmc_card *card = md->queue.card; - unsigned int from, nr, arg; + unsigned int from, nr; int err = 0, type = MMC_BLK_DISCARD; blk_status_t status = BLK_STS_OK; @@ -1131,24 +1087,23 @@ from = blk_rq_pos(req); nr = blk_rq_sectors(req); - if (mmc_can_discard(card)) - arg = MMC_DISCARD_ARG; - else if (mmc_can_trim(card)) - arg = MMC_TRIM_ARG; - else - arg = MMC_ERASE_ARG; do { + unsigned int erase_arg = card->erase_arg; + + if (mmc_card_broken_sd_discard(card)) + erase_arg = SD_ERASE_ARG; + err = 0; if (card->quirks & MMC_QUIRK_INAND_CMD38) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, INAND_CMD38_ARG_EXT_CSD, - arg == MMC_TRIM_ARG ? + card->erase_arg == MMC_TRIM_ARG ? INAND_CMD38_ARG_TRIM : INAND_CMD38_ARG_ERASE, - 0); + card->ext_csd.generic_cmd6_time); } if (!err) - err = mmc_erase(card, from, nr, arg); + err = mmc_erase(card, from, nr, erase_arg); } while (err == -EIO && !mmc_blk_reset(md, card->host, type)); if (err) status = BLK_STS_IOERR; @@ -1187,7 +1142,7 @@ arg == MMC_SECURE_TRIM1_ARG ? INAND_CMD38_ARG_SECTRIM1 : INAND_CMD38_ARG_SECERASE, - 0); + card->ext_csd.generic_cmd6_time); if (err) goto out_retry; } @@ -1205,7 +1160,7 @@ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, INAND_CMD38_ARG_EXT_CSD, INAND_CMD38_ARG_SECTRIM2, - 0); + card->ext_csd.generic_cmd6_time); if (err) goto out_retry; } @@ -1316,7 +1271,7 @@ } static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq, - int disable_multi, bool *do_rel_wr_p, + int recovery_mode, bool *do_rel_wr_p, bool *do_data_tag_p) { struct mmc_blk_data *md = mq->blkdata; @@ -1382,12 +1337,12 @@ brq->data.blocks--; /* - * After a read error, we redo the request one sector + * After a read error, we redo the request one (native) sector * at a time in order to accurately determine which * sectors can be read successfully. */ - if (disable_multi) - brq->data.blocks = 1; + if (recovery_mode) + brq->data.blocks = queue_physical_block_size(mq->queue) >> 9; /* * Some controllers have HW issues while operating @@ -1485,7 +1440,7 @@ blk_mq_end_request(req, BLK_STS_OK); } - spin_lock_irqsave(q->queue_lock, flags); + spin_lock_irqsave(&mq->lock, flags); mq->in_flight[issue_type] -= 1; @@ -1493,7 +1448,7 @@ mmc_cqe_check_busy(mq); - spin_unlock_irqrestore(q->queue_lock, flags); + spin_unlock_irqrestore(&mq->lock, flags); if (!mq->cqe_busy) blk_mq_run_hw_queues(q, true); @@ -1513,8 +1468,7 @@ err = mmc_cqe_recovery(host); if (err) mmc_blk_reset(mq->blkdata, host, MMC_BLK_CQE_RECOVERY); - else - mmc_blk_reset_success(mq->blkdata, MMC_BLK_CQE_RECOVERY); + mmc_blk_reset_success(mq->blkdata, MMC_BLK_CQE_RECOVERY); pr_debug("%s: CQE recovery done\n", mmc_hostname(host)); } @@ -1533,7 +1487,7 @@ */ if (mq->in_recovery) mmc_blk_cqe_complete_rq(mq, req); - else + else if (likely(!blk_should_fake_timeout(req->q))) blk_mq_complete_request(req); } @@ -1605,7 +1559,7 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, struct mmc_card *card, - int disable_multi, + int recovery_mode, struct mmc_queue *mq) { u32 readcmd, writecmd; @@ -1614,7 +1568,7 @@ struct mmc_blk_data *md = mq->blkdata; bool do_rel_wr, do_data_tag; - mmc_blk_data_prep(mq, mqrq, disable_multi, &do_rel_wr, &do_data_tag); + mmc_blk_data_prep(mq, mqrq, recovery_mode, &do_rel_wr, &do_data_tag); brq->mrq.cmd = &brq->cmd; @@ -1705,7 +1659,7 @@ #define MMC_READ_SINGLE_RETRIES 2 -/* Single sector read during recovery */ +/* Single (native) sector read during recovery */ static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req) { struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); @@ -1713,6 +1667,7 @@ struct mmc_card *card = mq->card; struct mmc_host *host = card->host; blk_status_t error = BLK_STS_OK; + size_t bytes_per_read = queue_physical_block_size(mq->queue); do { u32 status; @@ -1729,7 +1684,7 @@ goto error_exit; if (!mmc_host_is_spi(host) && - !mmc_blk_in_tran_state(status)) { + !mmc_ready_for_data(status)) { err = mmc_blk_fix_state(card, req); if (err) goto error_exit; @@ -1747,13 +1702,13 @@ else error = BLK_STS_OK; - } while (blk_update_request(req, error, 512)); + } while (blk_update_request(req, error, bytes_per_read)); return; error_exit: mrq->data->bytes_xfered = 0; - blk_update_request(req, BLK_STS_IOERR, 512); + blk_update_request(req, BLK_STS_IOERR, bytes_per_read); /* Let it try the remaining request again */ if (mqrq->retries > MMC_MAX_RETRIES - 1) mqrq->retries = MMC_MAX_RETRIES - 1; @@ -1788,7 +1743,7 @@ return brq->cmd.resp[0] & CMD_ERRORS || brq->stop.resp[0] & stop_err_bits || status & stop_err_bits || - (rq_data_dir(req) == WRITE && !mmc_blk_in_tran_state(status)); + (rq_data_dir(req) == WRITE && !mmc_ready_for_data(status)); } static inline bool mmc_blk_cmd_started(struct mmc_blk_request *brq) @@ -1829,8 +1784,15 @@ * bytes transferred to zero in that case. */ err = __mmc_send_status(card, &status, 0); - if (err || mmc_blk_status_error(req, status)) + if (err || mmc_blk_status_error(req, status)) { brq->data.bytes_xfered = 0; + if (mmc_card_sd(card) && !mmc_card_removed(card)) { + mmc_blk_reset_success(mq->blkdata, type); + if (!mmc_blk_reset(md, card->host, type)) + return; + pr_err("%s: pre recovery failed!\n", req->rq_disk->disk_name); + } + } mmc_retune_release(card->host); @@ -1850,7 +1812,7 @@ /* Try to get back to "tran" state */ if (!mmc_host_is_spi(mq->card->host) && - (err || !mmc_blk_in_tran_state(status))) + (err || !mmc_ready_for_data(status))) err = mmc_blk_fix_state(mq->card, req); /* @@ -1870,6 +1832,7 @@ err && mmc_blk_reset(md, card->host, type)) { pr_err("%s: recovery failed!\n", req->rq_disk->disk_name); mqrq->retries = MMC_NO_RETRIES; + trace_android_vh_mmc_blk_mq_rw_recovery(card); return; } @@ -1894,10 +1857,9 @@ return; } - /* FIXME: Missing single sector read for large sector size */ - if (!mmc_large_sector(card) && rq_data_dir(req) == READ && - brq->data.blocks > 1) { - /* Read one sector at a time */ + if (rq_data_dir(req) == READ && brq->data.blocks > + queue_physical_block_size(mq->queue) >> 9) { + /* Read one (native) sector at a time */ mmc_blk_read_single(mq, req); return; } @@ -1979,13 +1941,13 @@ struct mmc_queue_req *mqrq) { if (mmc_blk_urgent_bkops_needed(mq, mqrq)) - mmc_start_bkops(mq->card, true); + mmc_run_bkops(mq->card); } static void mmc_blk_hsq_req_done(struct mmc_request *mrq) { struct mmc_queue_req *mqrq = - container_of(mrq, struct mmc_queue_req, brq.mrq); + container_of(mrq, struct mmc_queue_req, brq.mrq); struct request *req = mmc_queue_req_to_req(mqrq); struct request_queue *q = req->q; struct mmc_queue *mq = q->queuedata; @@ -1994,10 +1956,10 @@ if (mmc_blk_rq_error(&mqrq->brq) || mmc_blk_urgent_bkops_needed(mq, mqrq)) { - spin_lock_irqsave(q->queue_lock, flags); + spin_lock_irqsave(&mq->lock, flags); mq->recovery_needed = true; mq->recovery_req = req; - spin_unlock_irqrestore(q->queue_lock, flags); + spin_unlock_irqrestore(&mq->lock, flags); host->cqe_ops->cqe_recovery_start(host); @@ -2013,7 +1975,7 @@ */ if (mq->in_recovery) mmc_blk_cqe_complete_rq(mq, req); - else + else if (likely(!blk_should_fake_timeout(req->q))) blk_mq_complete_request(req); } @@ -2023,7 +1985,7 @@ if (mq->use_cqe) mmc_blk_cqe_complete_rq(mq, req); - else + else if (likely(!blk_should_fake_timeout(req->q))) mmc_blk_mq_complete_rq(mq, req); } @@ -2044,19 +2006,18 @@ mmc_blk_urgent_bkops(mq, mqrq); } -static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, struct request *req) +static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, enum mmc_issue_type issue_type) { - struct request_queue *q = req->q; unsigned long flags; bool put_card; - spin_lock_irqsave(q->queue_lock, flags); + spin_lock_irqsave(&mq->lock, flags); - mq->in_flight[mmc_issue_type(mq, req)] -= 1; + mq->in_flight[issue_type] -= 1; put_card = (mmc_tot_in_flight(mq) == 0); - spin_unlock_irqrestore(q->queue_lock, flags); + spin_unlock_irqrestore(&mq->lock, flags); if (put_card) mmc_put_card(mq->card, &mq->ctx); @@ -2064,6 +2025,7 @@ static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req) { + enum mmc_issue_type issue_type = mmc_issue_type(mq, req); struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); struct mmc_request *mrq = &mqrq->brq.mrq; struct mmc_host *host = mq->card->host; @@ -2076,10 +2038,10 @@ */ if (mq->in_recovery) mmc_blk_mq_complete_rq(mq, req); - else + else if (likely(!blk_should_fake_timeout(req->q))) blk_mq_complete_request(req); - mmc_blk_mq_dec_in_flight(mq, req); + mmc_blk_mq_dec_in_flight(mq, issue_type); } void mmc_blk_mq_recovery(struct mmc_queue *mq) @@ -2152,11 +2114,11 @@ * request does not need to wait (although it does need to * complete complete_req first). */ - spin_lock_irqsave(q->queue_lock, flags); + spin_lock_irqsave(&mq->lock, flags); mq->complete_req = req; mq->rw_wait = false; waiting = mq->waiting; - spin_unlock_irqrestore(q->queue_lock, flags); + spin_unlock_irqrestore(&mq->lock, flags); /* * If 'waiting' then the waiting task will complete this @@ -2175,10 +2137,10 @@ /* Take the recovery path for errors or urgent background operations */ if (mmc_blk_rq_error(&mqrq->brq) || mmc_blk_urgent_bkops_needed(mq, mqrq)) { - spin_lock_irqsave(q->queue_lock, flags); + spin_lock_irqsave(&mq->lock, flags); mq->recovery_needed = true; mq->recovery_req = req; - spin_unlock_irqrestore(q->queue_lock, flags); + spin_unlock_irqrestore(&mq->lock, flags); wake_up(&mq->wait); schedule_work(&mq->recovery_work); return; @@ -2194,7 +2156,6 @@ static bool mmc_blk_rw_wait_cond(struct mmc_queue *mq, int *err) { - struct request_queue *q = mq->queue; unsigned long flags; bool done; @@ -2202,7 +2163,7 @@ * Wait while there is another request in progress, but not if recovery * is needed. Also indicate whether there is a request waiting to start. */ - spin_lock_irqsave(q->queue_lock, flags); + spin_lock_irqsave(&mq->lock, flags); if (mq->recovery_needed) { *err = -EBUSY; done = true; @@ -2210,7 +2171,7 @@ done = !mq->rw_wait; } mq->waiting = !done; - spin_unlock_irqrestore(q->queue_lock, flags); + spin_unlock_irqrestore(&mq->lock, flags); return done; } @@ -2391,12 +2352,11 @@ goto err_kfree; } - spin_lock_init(&md->lock); INIT_LIST_HEAD(&md->part); INIT_LIST_HEAD(&md->rpmbs); md->usage = 1; - ret = mmc_init_queue(&md->queue, card, &md->lock, subname); + ret = mmc_init_queue(&md->queue, card); if (ret) goto err_putdisk; @@ -2764,7 +2724,7 @@ int ret; struct mmc_card *card = md->queue.card; - device_add_disk(md->parent, md->disk); + device_add_disk(md->parent, md->disk, NULL); md->force_ro.show = force_ro_show; md->force_ro.store = force_ro_store; sysfs_attr_init(&md->force_ro.attr); @@ -2819,6 +2779,7 @@ if (IS_ERR(req)) return PTR_ERR(req); req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_CARD_STATUS; + req_to_mmc_queue_req(req)->drv_op_result = -EIO; blk_execute_rq(mq->queue, NULL, req, 0); ret = req_to_mmc_queue_req(req)->drv_op_result; if (ret >= 0) { @@ -2829,8 +2790,8 @@ return ret; } -DEFINE_SIMPLE_ATTRIBUTE(mmc_dbg_card_status_fops, mmc_dbg_card_status_get, - NULL, "%08llx\n"); +DEFINE_DEBUGFS_ATTRIBUTE(mmc_dbg_card_status_fops, mmc_dbg_card_status_get, + NULL, "%08llx\n"); /* That is two digits * 512 + 1 for newline */ #define EXT_CSD_STR_LEN 1025 @@ -2857,6 +2818,7 @@ goto out_free; } req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_EXT_CSD; + req_to_mmc_queue_req(req)->drv_op_result = -EIO; req_to_mmc_queue_req(req)->drv_op_data = &ext_csd; blk_execute_rq(mq->queue, NULL, req, 0); err = req_to_mmc_queue_req(req)->drv_op_result; @@ -2918,8 +2880,9 @@ if (mmc_card_mmc(card) || mmc_card_sd(card)) { md->status_dentry = - debugfs_create_file("status", S_IRUSR, root, card, - &mmc_dbg_card_status_fops); + debugfs_create_file_unsafe("status", 0400, root, + card, + &mmc_dbg_card_status_fops); if (!md->status_dentry) return -EIO; } @@ -2968,6 +2931,7 @@ struct mmc_card *this_card; EXPORT_SYMBOL(this_card); + static int mmc_blk_probe(struct mmc_card *card) { struct mmc_blk_data *md, *part_md; @@ -3003,8 +2967,8 @@ dev_set_drvdata(&card->dev, md); -#if IS_ENABLED(CONFIG_MMC_DW_ROCKCHIP) || IS_ENABLED(CONFIG_MMC_SDHCI_OF_ARASAN) - if (card->host->restrict_caps & RESTRICT_CARD_TYPE_EMMC) +#if defined(CONFIG_MMC_DW_ROCKCHIP) || defined(CONFIG_MMC_SDHCI_OF_ARASAN) + if (card->type == MMC_TYPE_MMC) this_card = card; #endif @@ -3046,7 +3010,7 @@ mmc_blk_remove_debugfs(card, md); #if defined(CONFIG_MMC_DW_ROCKCHIP) - if (card->host->restrict_caps & RESTRICT_CARD_TYPE_EMMC) + if (card->type == MMC_TYPE_MMC) this_card = NULL; #endif -- Gitblit v1.6.2