| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-or-later |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * linux/drivers/mmc/core/mmc_ops.h |
|---|
| 3 | 4 | * |
|---|
| 4 | 5 | * Copyright 2006-2007 Pierre Ossman |
|---|
| 5 | | - * |
|---|
| 6 | | - * This program is free software; you can redistribute it and/or modify |
|---|
| 7 | | - * it under the terms of the GNU General Public License as published by |
|---|
| 8 | | - * the Free Software Foundation; either version 2 of the License, or (at |
|---|
| 9 | | - * your option) any later version. |
|---|
| 10 | 6 | */ |
|---|
| 11 | 7 | |
|---|
| 12 | 8 | #include <linux/slab.h> |
|---|
| .. | .. |
|---|
| 23 | 19 | #include "host.h" |
|---|
| 24 | 20 | #include "mmc_ops.h" |
|---|
| 25 | 21 | |
|---|
| 26 | | -#define MMC_OPS_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */ |
|---|
| 22 | +#define MMC_BKOPS_TIMEOUT_MS (120 * 1000) /* 120s */ |
|---|
| 23 | +#define MMC_CACHE_FLUSH_TIMEOUT_MS (30 * 1000) /* 30s */ |
|---|
| 24 | +#define MMC_SANITIZE_TIMEOUT_MS (240 * 1000) /* 240s */ |
|---|
| 27 | 25 | |
|---|
| 28 | 26 | static const u8 tuning_blk_pattern_4bit[] = { |
|---|
| 29 | 27 | 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc, |
|---|
| .. | .. |
|---|
| 147 | 145 | * rules that must accommodate non-MMC slaves which this layer |
|---|
| 148 | 146 | * won't even know about. |
|---|
| 149 | 147 | */ |
|---|
| 150 | | -#ifndef CONFIG_ROCKCHIP_THUNDER_BOOT |
|---|
| 148 | +#ifndef CONFIG_ROCKCHIP_THUNDER_BOOT_MMC |
|---|
| 151 | 149 | if (!mmc_host_is_spi(host)) { |
|---|
| 152 | 150 | mmc_set_chip_select(host, MMC_CS_HIGH); |
|---|
| 153 | 151 | mmc_delay(1); |
|---|
| .. | .. |
|---|
| 159 | 157 | |
|---|
| 160 | 158 | err = mmc_wait_for_cmd(host, &cmd, 0); |
|---|
| 161 | 159 | |
|---|
| 162 | | -#ifndef CONFIG_ROCKCHIP_THUNDER_BOOT |
|---|
| 160 | +#ifndef CONFIG_ROCKCHIP_THUNDER_BOOT_MMC |
|---|
| 163 | 161 | mmc_delay(1); |
|---|
| 162 | + |
|---|
| 164 | 163 | if (!mmc_host_is_spi(host)) { |
|---|
| 165 | 164 | mmc_set_chip_select(host, MMC_CS_DONTCARE); |
|---|
| 166 | 165 | mmc_delay(1); |
|---|
| .. | .. |
|---|
| 185 | 184 | if (err) |
|---|
| 186 | 185 | break; |
|---|
| 187 | 186 | |
|---|
| 187 | +#ifdef CONFIG_ROCKCHIP_THUNDER_BOOT_MMC |
|---|
| 188 | 188 | /* if we're just probing, do a single pass */ |
|---|
| 189 | 189 | if (ocr == 0) |
|---|
| 190 | 190 | break; |
|---|
| 191 | +#endif |
|---|
| 191 | 192 | |
|---|
| 192 | | - /* otherwise wait until reset completes */ |
|---|
| 193 | + /* |
|---|
| 194 | + * According to eMMC specification v5.1 section A6.1, the R3 |
|---|
| 195 | + * response value should be 0x00FF8080, 0x40FF8080, 0x80FF8080 |
|---|
| 196 | + * or 0xC0FF8080. The EMMC device may be abnormal if a wrong |
|---|
| 197 | + * OCR data is configured. |
|---|
| 198 | + */ |
|---|
| 199 | + if ((cmd.resp[0] & 0xFFFFFF) != 0x00FF8080) |
|---|
| 200 | + continue; |
|---|
| 201 | + |
|---|
| 202 | + /* wait until reset completes */ |
|---|
| 193 | 203 | if (mmc_host_is_spi(host)) { |
|---|
| 194 | 204 | if (!(cmd.resp[0] & R1_SPI_IDLE)) |
|---|
| 195 | 205 | break; |
|---|
| .. | .. |
|---|
| 199 | 209 | } |
|---|
| 200 | 210 | |
|---|
| 201 | 211 | err = -ETIMEDOUT; |
|---|
| 202 | | -#ifndef CONFIG_ROCKCHIP_THUNDER_BOOT |
|---|
| 212 | + |
|---|
| 213 | + /* |
|---|
| 214 | + * According to eMMC specification v5.1 section 6.4.3, we |
|---|
| 215 | + * should issue CMD1 repeatedly in the idle state until |
|---|
| 216 | + * the eMMC is ready. Otherwise some eMMC devices seem to enter |
|---|
| 217 | + * the inactive mode after mmc_init_card() issued CMD0 when |
|---|
| 218 | + * the eMMC device is busy. |
|---|
| 219 | + */ |
|---|
| 220 | + if (!ocr && !mmc_host_is_spi(host)) |
|---|
| 221 | + cmd.arg = cmd.resp[0] | BIT(30); |
|---|
| 222 | +#ifndef CONFIG_ROCKCHIP_THUNDER_BOOT_MMC |
|---|
| 203 | 223 | mmc_delay(1); |
|---|
| 204 | 224 | #else |
|---|
| 205 | 225 | udelay(1); |
|---|
| .. | .. |
|---|
| 431 | 451 | } |
|---|
| 432 | 452 | |
|---|
| 433 | 453 | /* Caller must hold re-tuning */ |
|---|
| 434 | | -int __mmc_switch_status(struct mmc_card *card, bool crc_err_fatal) |
|---|
| 454 | +int mmc_switch_status(struct mmc_card *card, bool crc_err_fatal) |
|---|
| 435 | 455 | { |
|---|
| 436 | 456 | u32 status; |
|---|
| 437 | 457 | int err; |
|---|
| .. | .. |
|---|
| 445 | 465 | return mmc_switch_status_error(card->host, status); |
|---|
| 446 | 466 | } |
|---|
| 447 | 467 | |
|---|
| 448 | | -int mmc_switch_status(struct mmc_card *card) |
|---|
| 468 | +static int mmc_busy_status(struct mmc_card *card, bool retry_crc_err, |
|---|
| 469 | + enum mmc_busy_cmd busy_cmd, bool *busy) |
|---|
| 449 | 470 | { |
|---|
| 450 | | - return __mmc_switch_status(card, true); |
|---|
| 471 | + struct mmc_host *host = card->host; |
|---|
| 472 | + u32 status = 0; |
|---|
| 473 | + int err; |
|---|
| 474 | + |
|---|
| 475 | + if (host->ops->card_busy) { |
|---|
| 476 | + *busy = host->ops->card_busy(host); |
|---|
| 477 | + return 0; |
|---|
| 478 | + } |
|---|
| 479 | + |
|---|
| 480 | + err = mmc_send_status(card, &status); |
|---|
| 481 | + if (retry_crc_err && err == -EILSEQ) { |
|---|
| 482 | + *busy = true; |
|---|
| 483 | + return 0; |
|---|
| 484 | + } |
|---|
| 485 | + if (err) |
|---|
| 486 | + return err; |
|---|
| 487 | + |
|---|
| 488 | + switch (busy_cmd) { |
|---|
| 489 | + case MMC_BUSY_CMD6: |
|---|
| 490 | + err = mmc_switch_status_error(card->host, status); |
|---|
| 491 | + break; |
|---|
| 492 | + case MMC_BUSY_ERASE: |
|---|
| 493 | + err = R1_STATUS(status) ? -EIO : 0; |
|---|
| 494 | + break; |
|---|
| 495 | + case MMC_BUSY_HPI: |
|---|
| 496 | + break; |
|---|
| 497 | + default: |
|---|
| 498 | + err = -EINVAL; |
|---|
| 499 | + } |
|---|
| 500 | + |
|---|
| 501 | + if (err) |
|---|
| 502 | + return err; |
|---|
| 503 | + |
|---|
| 504 | + *busy = !mmc_ready_for_data(status); |
|---|
| 505 | + return 0; |
|---|
| 451 | 506 | } |
|---|
| 452 | 507 | |
|---|
| 453 | | -static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms, |
|---|
| 454 | | - bool send_status, bool retry_crc_err) |
|---|
| 508 | +static int __mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms, |
|---|
| 509 | + bool send_status, bool retry_crc_err, |
|---|
| 510 | + enum mmc_busy_cmd busy_cmd) |
|---|
| 455 | 511 | { |
|---|
| 456 | 512 | struct mmc_host *host = card->host; |
|---|
| 457 | 513 | int err; |
|---|
| 458 | 514 | unsigned long timeout; |
|---|
| 459 | | - u32 status = 0; |
|---|
| 515 | + unsigned int udelay = 32, udelay_max = 32768; |
|---|
| 460 | 516 | bool expired = false; |
|---|
| 461 | 517 | bool busy = false; |
|---|
| 462 | | - |
|---|
| 463 | | - /* We have an unspecified cmd timeout, use the fallback value. */ |
|---|
| 464 | | - if (!timeout_ms) |
|---|
| 465 | | - timeout_ms = MMC_OPS_TIMEOUT_MS; |
|---|
| 466 | 518 | |
|---|
| 467 | 519 | /* |
|---|
| 468 | 520 | * In cases when not allowed to poll by using CMD13 or because we aren't |
|---|
| .. | .. |
|---|
| 482 | 534 | */ |
|---|
| 483 | 535 | expired = time_after(jiffies, timeout); |
|---|
| 484 | 536 | |
|---|
| 485 | | - if (host->ops->card_busy) { |
|---|
| 486 | | - busy = host->ops->card_busy(host); |
|---|
| 487 | | - } else { |
|---|
| 488 | | - err = mmc_send_status(card, &status); |
|---|
| 489 | | - if (retry_crc_err && err == -EILSEQ) { |
|---|
| 490 | | - busy = true; |
|---|
| 491 | | - } else if (err) { |
|---|
| 492 | | - return err; |
|---|
| 493 | | - } else { |
|---|
| 494 | | - err = mmc_switch_status_error(host, status); |
|---|
| 495 | | - if (err) |
|---|
| 496 | | - return err; |
|---|
| 497 | | - busy = R1_CURRENT_STATE(status) == R1_STATE_PRG; |
|---|
| 498 | | - } |
|---|
| 499 | | - } |
|---|
| 537 | + err = mmc_busy_status(card, retry_crc_err, busy_cmd, &busy); |
|---|
| 538 | + if (err) |
|---|
| 539 | + return err; |
|---|
| 500 | 540 | |
|---|
| 501 | 541 | /* Timeout if the device still remains busy. */ |
|---|
| 502 | 542 | if (expired && busy) { |
|---|
| .. | .. |
|---|
| 504 | 544 | mmc_hostname(host), __func__); |
|---|
| 505 | 545 | return -ETIMEDOUT; |
|---|
| 506 | 546 | } |
|---|
| 547 | + |
|---|
| 548 | + /* Throttle the polling rate to avoid hogging the CPU. */ |
|---|
| 549 | + if (busy) { |
|---|
| 550 | + usleep_range(udelay, udelay * 2); |
|---|
| 551 | + if (udelay < udelay_max) |
|---|
| 552 | + udelay *= 2; |
|---|
| 553 | + } |
|---|
| 507 | 554 | } while (busy); |
|---|
| 508 | 555 | |
|---|
| 509 | 556 | return 0; |
|---|
| 557 | +} |
|---|
| 558 | + |
|---|
| 559 | +int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms, |
|---|
| 560 | + enum mmc_busy_cmd busy_cmd) |
|---|
| 561 | +{ |
|---|
| 562 | + return __mmc_poll_for_busy(card, timeout_ms, true, false, busy_cmd); |
|---|
| 510 | 563 | } |
|---|
| 511 | 564 | |
|---|
| 512 | 565 | /** |
|---|
| .. | .. |
|---|
| 518 | 571 | * @timeout_ms: timeout (ms) for operation performed by register write, |
|---|
| 519 | 572 | * timeout of zero implies maximum possible timeout |
|---|
| 520 | 573 | * @timing: new timing to change to |
|---|
| 521 | | - * @use_busy_signal: use the busy signal as response type |
|---|
| 522 | 574 | * @send_status: send status cmd to poll for busy |
|---|
| 523 | 575 | * @retry_crc_err: retry when CRC errors when polling with CMD13 for busy |
|---|
| 524 | 576 | * |
|---|
| .. | .. |
|---|
| 526 | 578 | */ |
|---|
| 527 | 579 | int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, |
|---|
| 528 | 580 | unsigned int timeout_ms, unsigned char timing, |
|---|
| 529 | | - bool use_busy_signal, bool send_status, bool retry_crc_err) |
|---|
| 581 | + bool send_status, bool retry_crc_err) |
|---|
| 530 | 582 | { |
|---|
| 531 | 583 | struct mmc_host *host = card->host; |
|---|
| 532 | 584 | int err; |
|---|
| 533 | 585 | struct mmc_command cmd = {}; |
|---|
| 534 | | - bool use_r1b_resp = use_busy_signal; |
|---|
| 586 | + bool use_r1b_resp = true; |
|---|
| 535 | 587 | unsigned char old_timing = host->ios.timing; |
|---|
| 536 | 588 | |
|---|
| 537 | 589 | mmc_retune_hold(host); |
|---|
| 538 | 590 | |
|---|
| 591 | + if (!timeout_ms) { |
|---|
| 592 | + pr_warn("%s: unspecified timeout for CMD6 - use generic\n", |
|---|
| 593 | + mmc_hostname(host)); |
|---|
| 594 | + timeout_ms = card->ext_csd.generic_cmd6_time; |
|---|
| 595 | + } |
|---|
| 596 | + |
|---|
| 539 | 597 | /* |
|---|
| 540 | | - * If the cmd timeout and the max_busy_timeout of the host are both |
|---|
| 541 | | - * specified, let's validate them. A failure means we need to prevent |
|---|
| 542 | | - * the host from doing hw busy detection, which is done by converting |
|---|
| 543 | | - * to a R1 response instead of a R1B. Note, some hosts requires R1B, |
|---|
| 544 | | - * which also means they are on their own when it comes to deal with the |
|---|
| 545 | | - * busy timeout. |
|---|
| 598 | + * If the max_busy_timeout of the host is specified, make sure it's |
|---|
| 599 | + * enough to fit the used timeout_ms. In case it's not, let's instruct |
|---|
| 600 | + * the host to avoid HW busy detection, by converting to a R1 response |
|---|
| 601 | + * instead of a R1B. Note, some hosts requires R1B, which also means |
|---|
| 602 | + * they are on their own when it comes to deal with the busy timeout. |
|---|
| 546 | 603 | */ |
|---|
| 547 | | - if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && timeout_ms && |
|---|
| 548 | | - host->max_busy_timeout && (timeout_ms > host->max_busy_timeout)) |
|---|
| 604 | + if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && host->max_busy_timeout && |
|---|
| 605 | + (timeout_ms > host->max_busy_timeout)) |
|---|
| 549 | 606 | use_r1b_resp = false; |
|---|
| 550 | 607 | |
|---|
| 551 | 608 | cmd.opcode = MMC_SWITCH; |
|---|
| .. | .. |
|---|
| 556 | 613 | cmd.flags = MMC_CMD_AC; |
|---|
| 557 | 614 | if (use_r1b_resp) { |
|---|
| 558 | 615 | cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B; |
|---|
| 559 | | - /* |
|---|
| 560 | | - * A busy_timeout of zero means the host can decide to use |
|---|
| 561 | | - * whatever value it finds suitable. |
|---|
| 562 | | - */ |
|---|
| 563 | 616 | cmd.busy_timeout = timeout_ms; |
|---|
| 564 | 617 | } else { |
|---|
| 565 | 618 | cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1; |
|---|
| 566 | 619 | } |
|---|
| 567 | 620 | |
|---|
| 568 | | - if (index == EXT_CSD_SANITIZE_START) |
|---|
| 569 | | - cmd.sanitize_busy = true; |
|---|
| 570 | | - |
|---|
| 571 | 621 | err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); |
|---|
| 572 | 622 | if (err) |
|---|
| 573 | | - goto out; |
|---|
| 574 | | - |
|---|
| 575 | | - /* No need to check card status in case of unblocking command */ |
|---|
| 576 | | - if (!use_busy_signal) |
|---|
| 577 | 623 | goto out; |
|---|
| 578 | 624 | |
|---|
| 579 | 625 | /*If SPI or used HW busy detection above, then we don't need to poll. */ |
|---|
| .. | .. |
|---|
| 582 | 628 | goto out_tim; |
|---|
| 583 | 629 | |
|---|
| 584 | 630 | /* Let's try to poll to find out when the command is completed. */ |
|---|
| 585 | | - err = mmc_poll_for_busy(card, timeout_ms, send_status, retry_crc_err); |
|---|
| 631 | + err = __mmc_poll_for_busy(card, timeout_ms, send_status, retry_crc_err, |
|---|
| 632 | + MMC_BUSY_CMD6); |
|---|
| 586 | 633 | if (err) |
|---|
| 587 | 634 | goto out; |
|---|
| 588 | 635 | |
|---|
| .. | .. |
|---|
| 592 | 639 | mmc_set_timing(host, timing); |
|---|
| 593 | 640 | |
|---|
| 594 | 641 | if (send_status) { |
|---|
| 595 | | - err = mmc_switch_status(card); |
|---|
| 642 | + err = mmc_switch_status(card, true); |
|---|
| 596 | 643 | if (err && timing) |
|---|
| 597 | 644 | mmc_set_timing(host, old_timing); |
|---|
| 598 | 645 | } |
|---|
| .. | .. |
|---|
| 606 | 653 | unsigned int timeout_ms) |
|---|
| 607 | 654 | { |
|---|
| 608 | 655 | return __mmc_switch(card, set, index, value, timeout_ms, 0, |
|---|
| 609 | | - true, true, false); |
|---|
| 656 | + true, false); |
|---|
| 610 | 657 | } |
|---|
| 611 | 658 | EXPORT_SYMBOL_GPL(mmc_switch); |
|---|
| 612 | 659 | |
|---|
| .. | .. |
|---|
| 802 | 849 | return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width); |
|---|
| 803 | 850 | } |
|---|
| 804 | 851 | |
|---|
| 805 | | -static int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status) |
|---|
| 852 | +static int mmc_send_hpi_cmd(struct mmc_card *card) |
|---|
| 806 | 853 | { |
|---|
| 854 | + unsigned int busy_timeout_ms = card->ext_csd.out_of_int_time; |
|---|
| 855 | + struct mmc_host *host = card->host; |
|---|
| 856 | + bool use_r1b_resp = true; |
|---|
| 807 | 857 | struct mmc_command cmd = {}; |
|---|
| 808 | | - unsigned int opcode; |
|---|
| 809 | 858 | int err; |
|---|
| 810 | 859 | |
|---|
| 811 | | - if (!card->ext_csd.hpi) { |
|---|
| 812 | | - pr_warn("%s: Card didn't support HPI command\n", |
|---|
| 813 | | - mmc_hostname(card->host)); |
|---|
| 814 | | - return -EINVAL; |
|---|
| 815 | | - } |
|---|
| 816 | | - |
|---|
| 817 | | - opcode = card->ext_csd.hpi_cmd; |
|---|
| 818 | | - if (opcode == MMC_STOP_TRANSMISSION) |
|---|
| 819 | | - cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; |
|---|
| 820 | | - else if (opcode == MMC_SEND_STATUS) |
|---|
| 821 | | - cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; |
|---|
| 822 | | - |
|---|
| 823 | | - cmd.opcode = opcode; |
|---|
| 860 | + cmd.opcode = card->ext_csd.hpi_cmd; |
|---|
| 824 | 861 | cmd.arg = card->rca << 16 | 1; |
|---|
| 825 | 862 | |
|---|
| 826 | | - err = mmc_wait_for_cmd(card->host, &cmd, 0); |
|---|
| 863 | + /* |
|---|
| 864 | + * Make sure the host's max_busy_timeout fit the needed timeout for HPI. |
|---|
| 865 | + * In case it doesn't, let's instruct the host to avoid HW busy |
|---|
| 866 | + * detection, by using a R1 response instead of R1B. |
|---|
| 867 | + */ |
|---|
| 868 | + if (host->max_busy_timeout && busy_timeout_ms > host->max_busy_timeout) |
|---|
| 869 | + use_r1b_resp = false; |
|---|
| 870 | + |
|---|
| 871 | + if (cmd.opcode == MMC_STOP_TRANSMISSION && use_r1b_resp) { |
|---|
| 872 | + cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; |
|---|
| 873 | + cmd.busy_timeout = busy_timeout_ms; |
|---|
| 874 | + } else { |
|---|
| 875 | + cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; |
|---|
| 876 | + use_r1b_resp = false; |
|---|
| 877 | + } |
|---|
| 878 | + |
|---|
| 879 | + err = mmc_wait_for_cmd(host, &cmd, 0); |
|---|
| 827 | 880 | if (err) { |
|---|
| 828 | | - pr_warn("%s: error %d interrupting operation. " |
|---|
| 829 | | - "HPI command response %#x\n", mmc_hostname(card->host), |
|---|
| 830 | | - err, cmd.resp[0]); |
|---|
| 881 | + pr_warn("%s: HPI error %d. Command response %#x\n", |
|---|
| 882 | + mmc_hostname(host), err, cmd.resp[0]); |
|---|
| 831 | 883 | return err; |
|---|
| 832 | 884 | } |
|---|
| 833 | | - if (status) |
|---|
| 834 | | - *status = cmd.resp[0]; |
|---|
| 835 | 885 | |
|---|
| 836 | | - return 0; |
|---|
| 886 | + /* No need to poll when using HW busy detection. */ |
|---|
| 887 | + if (host->caps & MMC_CAP_WAIT_WHILE_BUSY && use_r1b_resp) |
|---|
| 888 | + return 0; |
|---|
| 889 | + |
|---|
| 890 | + /* Let's poll to find out when the HPI request completes. */ |
|---|
| 891 | + return mmc_poll_for_busy(card, busy_timeout_ms, MMC_BUSY_HPI); |
|---|
| 837 | 892 | } |
|---|
| 838 | 893 | |
|---|
| 839 | 894 | /** |
|---|
| .. | .. |
|---|
| 843 | 898 | * Issued High Priority Interrupt, and check for card status |
|---|
| 844 | 899 | * until out-of prg-state. |
|---|
| 845 | 900 | */ |
|---|
| 846 | | -int mmc_interrupt_hpi(struct mmc_card *card) |
|---|
| 901 | +static int mmc_interrupt_hpi(struct mmc_card *card) |
|---|
| 847 | 902 | { |
|---|
| 848 | 903 | int err; |
|---|
| 849 | 904 | u32 status; |
|---|
| 850 | | - unsigned long prg_wait; |
|---|
| 851 | 905 | |
|---|
| 852 | 906 | if (!card->ext_csd.hpi_en) { |
|---|
| 853 | 907 | pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host)); |
|---|
| .. | .. |
|---|
| 880 | 934 | goto out; |
|---|
| 881 | 935 | } |
|---|
| 882 | 936 | |
|---|
| 883 | | - err = mmc_send_hpi_cmd(card, &status); |
|---|
| 884 | | - if (err) |
|---|
| 885 | | - goto out; |
|---|
| 886 | | - |
|---|
| 887 | | - prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time); |
|---|
| 888 | | - do { |
|---|
| 889 | | - err = mmc_send_status(card, &status); |
|---|
| 890 | | - |
|---|
| 891 | | - if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN) |
|---|
| 892 | | - break; |
|---|
| 893 | | - if (time_after(jiffies, prg_wait)) |
|---|
| 894 | | - err = -ETIMEDOUT; |
|---|
| 895 | | - } while (!err); |
|---|
| 896 | | - |
|---|
| 937 | + err = mmc_send_hpi_cmd(card); |
|---|
| 897 | 938 | out: |
|---|
| 898 | 939 | return err; |
|---|
| 899 | 940 | } |
|---|
| .. | .. |
|---|
| 901 | 942 | int mmc_can_ext_csd(struct mmc_card *card) |
|---|
| 902 | 943 | { |
|---|
| 903 | 944 | return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3); |
|---|
| 904 | | -} |
|---|
| 905 | | - |
|---|
| 906 | | -/** |
|---|
| 907 | | - * mmc_stop_bkops - stop ongoing BKOPS |
|---|
| 908 | | - * @card: MMC card to check BKOPS |
|---|
| 909 | | - * |
|---|
| 910 | | - * Send HPI command to stop ongoing background operations to |
|---|
| 911 | | - * allow rapid servicing of foreground operations, e.g. read/ |
|---|
| 912 | | - * writes. Wait until the card comes out of the programming state |
|---|
| 913 | | - * to avoid errors in servicing read/write requests. |
|---|
| 914 | | - */ |
|---|
| 915 | | -int mmc_stop_bkops(struct mmc_card *card) |
|---|
| 916 | | -{ |
|---|
| 917 | | - int err = 0; |
|---|
| 918 | | - |
|---|
| 919 | | - err = mmc_interrupt_hpi(card); |
|---|
| 920 | | - |
|---|
| 921 | | - /* |
|---|
| 922 | | - * If err is EINVAL, we can't issue an HPI. |
|---|
| 923 | | - * It should complete the BKOPS. |
|---|
| 924 | | - */ |
|---|
| 925 | | - if (!err || (err == -EINVAL)) { |
|---|
| 926 | | - mmc_card_clr_doing_bkops(card); |
|---|
| 927 | | - mmc_retune_release(card->host); |
|---|
| 928 | | - err = 0; |
|---|
| 929 | | - } |
|---|
| 930 | | - |
|---|
| 931 | | - return err; |
|---|
| 932 | 945 | } |
|---|
| 933 | 946 | |
|---|
| 934 | 947 | static int mmc_read_bkops_status(struct mmc_card *card) |
|---|
| .. | .. |
|---|
| 947 | 960 | } |
|---|
| 948 | 961 | |
|---|
| 949 | 962 | /** |
|---|
| 950 | | - * mmc_start_bkops - start BKOPS for supported cards |
|---|
| 951 | | - * @card: MMC card to start BKOPS |
|---|
| 952 | | - * @from_exception: A flag to indicate if this function was |
|---|
| 953 | | - * called due to an exception raised by the card |
|---|
| 963 | + * mmc_run_bkops - Run BKOPS for supported cards |
|---|
| 964 | + * @card: MMC card to run BKOPS for |
|---|
| 954 | 965 | * |
|---|
| 955 | | - * Start background operations whenever requested. |
|---|
| 956 | | - * When the urgent BKOPS bit is set in a R1 command response |
|---|
| 957 | | - * then background operations should be started immediately. |
|---|
| 966 | + * Run background operations synchronously for cards having manual BKOPS |
|---|
| 967 | + * enabled and in case it reports urgent BKOPS level. |
|---|
| 958 | 968 | */ |
|---|
| 959 | | -void mmc_start_bkops(struct mmc_card *card, bool from_exception) |
|---|
| 969 | +void mmc_run_bkops(struct mmc_card *card) |
|---|
| 960 | 970 | { |
|---|
| 961 | 971 | int err; |
|---|
| 962 | | - int timeout; |
|---|
| 963 | | - bool use_busy_signal; |
|---|
| 964 | 972 | |
|---|
| 965 | | - if (!card->ext_csd.man_bkops_en || mmc_card_doing_bkops(card)) |
|---|
| 973 | + if (!card->ext_csd.man_bkops_en) |
|---|
| 966 | 974 | return; |
|---|
| 967 | 975 | |
|---|
| 968 | 976 | err = mmc_read_bkops_status(card); |
|---|
| .. | .. |
|---|
| 972 | 980 | return; |
|---|
| 973 | 981 | } |
|---|
| 974 | 982 | |
|---|
| 975 | | - if (!card->ext_csd.raw_bkops_status) |
|---|
| 983 | + if (!card->ext_csd.raw_bkops_status || |
|---|
| 984 | + card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2) |
|---|
| 976 | 985 | return; |
|---|
| 977 | | - |
|---|
| 978 | | - if (card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2 && |
|---|
| 979 | | - from_exception) |
|---|
| 980 | | - return; |
|---|
| 981 | | - |
|---|
| 982 | | - if (card->ext_csd.raw_bkops_status >= EXT_CSD_BKOPS_LEVEL_2) { |
|---|
| 983 | | - timeout = MMC_OPS_TIMEOUT_MS; |
|---|
| 984 | | - use_busy_signal = true; |
|---|
| 985 | | - } else { |
|---|
| 986 | | - timeout = 0; |
|---|
| 987 | | - use_busy_signal = false; |
|---|
| 988 | | - } |
|---|
| 989 | 986 | |
|---|
| 990 | 987 | mmc_retune_hold(card->host); |
|---|
| 991 | 988 | |
|---|
| 992 | | - err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, |
|---|
| 993 | | - EXT_CSD_BKOPS_START, 1, timeout, 0, |
|---|
| 994 | | - use_busy_signal, true, false); |
|---|
| 995 | | - if (err) { |
|---|
| 989 | + /* |
|---|
| 990 | + * For urgent BKOPS status, LEVEL_2 and higher, let's execute |
|---|
| 991 | + * synchronously. Future wise, we may consider to start BKOPS, for less |
|---|
| 992 | + * urgent levels by using an asynchronous background task, when idle. |
|---|
| 993 | + */ |
|---|
| 994 | + err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, |
|---|
| 995 | + EXT_CSD_BKOPS_START, 1, MMC_BKOPS_TIMEOUT_MS); |
|---|
| 996 | + if (err) |
|---|
| 996 | 997 | pr_warn("%s: Error %d starting bkops\n", |
|---|
| 997 | 998 | mmc_hostname(card->host), err); |
|---|
| 998 | | - mmc_retune_release(card->host); |
|---|
| 999 | | - return; |
|---|
| 1000 | | - } |
|---|
| 1001 | 999 | |
|---|
| 1002 | | - /* |
|---|
| 1003 | | - * For urgent bkops status (LEVEL_2 and more) |
|---|
| 1004 | | - * bkops executed synchronously, otherwise |
|---|
| 1005 | | - * the operation is in progress |
|---|
| 1006 | | - */ |
|---|
| 1007 | | - if (!use_busy_signal) |
|---|
| 1008 | | - mmc_card_set_doing_bkops(card); |
|---|
| 1009 | | - else |
|---|
| 1010 | | - mmc_retune_release(card->host); |
|---|
| 1000 | + mmc_retune_release(card->host); |
|---|
| 1011 | 1001 | } |
|---|
| 1012 | | -EXPORT_SYMBOL(mmc_start_bkops); |
|---|
| 1002 | +EXPORT_SYMBOL(mmc_run_bkops); |
|---|
| 1013 | 1003 | |
|---|
| 1014 | 1004 | /* |
|---|
| 1015 | 1005 | * Flush the cache to the non-volatile storage. |
|---|
| .. | .. |
|---|
| 1020 | 1010 | |
|---|
| 1021 | 1011 | if (mmc_cache_enabled(card->host)) { |
|---|
| 1022 | 1012 | err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, |
|---|
| 1023 | | - EXT_CSD_FLUSH_CACHE, 1, 0, 0, |
|---|
| 1024 | | - true, false, false); |
|---|
| 1013 | + EXT_CSD_FLUSH_CACHE, 1, |
|---|
| 1014 | + MMC_CACHE_FLUSH_TIMEOUT_MS, 0, |
|---|
| 1015 | + false, false); |
|---|
| 1025 | 1016 | if (err) |
|---|
| 1026 | 1017 | pr_err("%s: cache flush error %d\n", |
|---|
| 1027 | 1018 | mmc_hostname(card->host), err); |
|---|
| .. | .. |
|---|
| 1058 | 1049 | return mmc_cmdq_switch(card, false); |
|---|
| 1059 | 1050 | } |
|---|
| 1060 | 1051 | EXPORT_SYMBOL_GPL(mmc_cmdq_disable); |
|---|
| 1052 | + |
|---|
| 1053 | +int mmc_sanitize(struct mmc_card *card) |
|---|
| 1054 | +{ |
|---|
| 1055 | + struct mmc_host *host = card->host; |
|---|
| 1056 | + int err; |
|---|
| 1057 | + |
|---|
| 1058 | + if (!mmc_can_sanitize(card)) { |
|---|
| 1059 | + pr_warn("%s: Sanitize not supported\n", mmc_hostname(host)); |
|---|
| 1060 | + return -EOPNOTSUPP; |
|---|
| 1061 | + } |
|---|
| 1062 | + |
|---|
| 1063 | + pr_debug("%s: Sanitize in progress...\n", mmc_hostname(host)); |
|---|
| 1064 | + |
|---|
| 1065 | + mmc_retune_hold(host); |
|---|
| 1066 | + |
|---|
| 1067 | + err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_SANITIZE_START, |
|---|
| 1068 | + 1, MMC_SANITIZE_TIMEOUT_MS); |
|---|
| 1069 | + if (err) |
|---|
| 1070 | + pr_err("%s: Sanitize failed err=%d\n", mmc_hostname(host), err); |
|---|
| 1071 | + |
|---|
| 1072 | + /* |
|---|
| 1073 | + * If the sanitize operation timed out, the card is probably still busy |
|---|
| 1074 | + * in the R1_STATE_PRG. Rather than continue to wait, let's try to abort |
|---|
| 1075 | + * it with a HPI command to get back into R1_STATE_TRAN. |
|---|
| 1076 | + */ |
|---|
| 1077 | + if (err == -ETIMEDOUT && !mmc_interrupt_hpi(card)) |
|---|
| 1078 | + pr_warn("%s: Sanitize aborted\n", mmc_hostname(host)); |
|---|
| 1079 | + |
|---|
| 1080 | + mmc_retune_release(host); |
|---|
| 1081 | + |
|---|
| 1082 | + pr_debug("%s: Sanitize completed\n", mmc_hostname(host)); |
|---|
| 1083 | + return err; |
|---|
| 1084 | +} |
|---|
| 1085 | +EXPORT_SYMBOL_GPL(mmc_sanitize); |
|---|