forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-13 9d77db3c730780c8ef5ccd4b66403ff5675cfe4e
kernel/drivers/mmc/host/sdhci.c
....@@ -1,19 +1,17 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /*
23 * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
34 *
45 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
5
- *
6
- * This program is free software; you can redistribute it and/or modify
7
- * it under the terms of the GNU General Public License as published by
8
- * the Free Software Foundation; either version 2 of the License, or (at
9
- * your option) any later version.
106 *
117 * Thanks to the following companies for their support:
128 *
139 * - JMicron (hardware and technical support)
1410 */
1511
12
+#include <linux/bitfield.h>
1613 #include <linux/delay.h>
14
+#include <linux/dmaengine.h>
1715 #include <linux/ktime.h>
1816 #include <linux/highmem.h>
1917 #include <linux/io.h>
....@@ -35,6 +33,8 @@
3533 #include <linux/mmc/sdio.h>
3634 #include <linux/mmc/slot-gpio.h>
3735
36
+#include <trace/hooks/mmc_core.h>
37
+
3838 #include "sdhci.h"
3939
4040 #define DRIVER_NAME "sdhci"
....@@ -50,9 +50,9 @@
5050 static unsigned int debug_quirks = 0;
5151 static unsigned int debug_quirks2;
5252
53
-static void sdhci_finish_data(struct sdhci_host *);
54
-
5553 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
54
+
55
+static bool sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd);
5656
5757 void sdhci_dumpregs(struct sdhci_host *host)
5858 {
....@@ -113,6 +113,9 @@
113113 }
114114 }
115115
116
+ if (host->ops->dump_vendor_regs)
117
+ host->ops->dump_vendor_regs(host);
118
+
116119 SDHCI_DUMP("============================================\n");
117120 }
118121 EXPORT_SYMBOL_GPL(sdhci_dumpregs);
....@@ -122,6 +125,29 @@
122125 * Low level functions *
123126 * *
124127 \*****************************************************************************/
128
+
129
+static void sdhci_do_enable_v4_mode(struct sdhci_host *host)
130
+{
131
+ u16 ctrl2;
132
+
133
+ ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
134
+ if (ctrl2 & SDHCI_CTRL_V4_MODE)
135
+ return;
136
+
137
+ ctrl2 |= SDHCI_CTRL_V4_MODE;
138
+ sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
139
+}
140
+
141
+/*
142
+ * This can be called before sdhci_add_host() by Vendor's host controller
143
+ * driver to enable v4 mode if supported.
144
+ */
145
+void sdhci_enable_v4_mode(struct sdhci_host *host)
146
+{
147
+ host->v4_mode = true;
148
+ sdhci_do_enable_v4_mode(host);
149
+}
150
+EXPORT_SYMBOL_GPL(sdhci_enable_v4_mode);
125151
126152 static inline bool sdhci_data_line_cmd(struct mmc_command *cmd)
127153 {
....@@ -247,16 +273,68 @@
247273 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
248274 }
249275
276
+static void sdhci_config_dma(struct sdhci_host *host)
277
+{
278
+ u8 ctrl;
279
+ u16 ctrl2;
280
+
281
+ if (host->version < SDHCI_SPEC_200)
282
+ return;
283
+
284
+ ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
285
+
286
+ /*
287
+ * Always adjust the DMA selection as some controllers
288
+ * (e.g. JMicron) can't do PIO properly when the selection
289
+ * is ADMA.
290
+ */
291
+ ctrl &= ~SDHCI_CTRL_DMA_MASK;
292
+ if (!(host->flags & SDHCI_REQ_USE_DMA))
293
+ goto out;
294
+
295
+ /* Note if DMA Select is zero then SDMA is selected */
296
+ if (host->flags & SDHCI_USE_ADMA)
297
+ ctrl |= SDHCI_CTRL_ADMA32;
298
+
299
+ if (host->flags & SDHCI_USE_64_BIT_DMA) {
300
+ /*
301
+ * If v4 mode, all supported DMA can be 64-bit addressing if
302
+ * controller supports 64-bit system address, otherwise only
303
+ * ADMA can support 64-bit addressing.
304
+ */
305
+ if (host->v4_mode) {
306
+ ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
307
+ ctrl2 |= SDHCI_CTRL_64BIT_ADDR;
308
+ sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
309
+ } else if (host->flags & SDHCI_USE_ADMA) {
310
+ /*
311
+ * Don't need to undo SDHCI_CTRL_ADMA32 in order to
312
+ * set SDHCI_CTRL_ADMA64.
313
+ */
314
+ ctrl |= SDHCI_CTRL_ADMA64;
315
+ }
316
+ }
317
+
318
+out:
319
+ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
320
+}
321
+
250322 static void sdhci_init(struct sdhci_host *host, int soft)
251323 {
252324 struct mmc_host *mmc = host->mmc;
325
+ unsigned long flags;
253326
254327 if (soft)
255328 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
256329 else
257330 sdhci_do_reset(host, SDHCI_RESET_ALL);
258331
332
+ if (host->v4_mode)
333
+ sdhci_do_enable_v4_mode(host);
334
+
335
+ spin_lock_irqsave(&host->lock, flags);
259336 sdhci_set_default_irqs(host);
337
+ spin_unlock_irqrestore(&host->lock, flags);
260338
261339 host->cqe_on = false;
262340
....@@ -269,13 +347,27 @@
269347
270348 static void sdhci_reinit(struct sdhci_host *host)
271349 {
350
+ u32 cd = host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
351
+
272352 sdhci_init(host, 0);
273353 sdhci_enable_card_detection(host);
354
+
355
+ /*
356
+ * A change to the card detect bits indicates a change in present state,
357
+ * refer sdhci_set_card_detection(). A card detect interrupt might have
358
+ * been missed while the host controller was being reset, so trigger a
359
+ * rescan to check.
360
+ */
361
+ if (cd != (host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT)))
362
+ mmc_detect_change(host->mmc, msecs_to_jiffies(200));
274363 }
275364
276365 static void __sdhci_led_activate(struct sdhci_host *host)
277366 {
278367 u8 ctrl;
368
+
369
+ if (host->quirks & SDHCI_QUIRK_NO_LED)
370
+ return;
279371
280372 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
281373 ctrl |= SDHCI_CTRL_LED;
....@@ -285,6 +377,9 @@
285377 static void __sdhci_led_deactivate(struct sdhci_host *host)
286378 {
287379 u8 ctrl;
380
+
381
+ if (host->quirks & SDHCI_QUIRK_NO_LED)
382
+ return;
288383
289384 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
290385 ctrl &= ~SDHCI_CTRL_LED;
....@@ -315,6 +410,9 @@
315410 {
316411 struct mmc_host *mmc = host->mmc;
317412
413
+ if (host->quirks & SDHCI_QUIRK_NO_LED)
414
+ return 0;
415
+
318416 snprintf(host->led_name, sizeof(host->led_name),
319417 "%s::", mmc_hostname(mmc));
320418
....@@ -328,6 +426,9 @@
328426
329427 static void sdhci_led_unregister(struct sdhci_host *host)
330428 {
429
+ if (host->quirks & SDHCI_QUIRK_NO_LED)
430
+ return;
431
+
331432 led_classdev_unregister(&host->led);
332433 }
333434
....@@ -362,6 +463,28 @@
362463
363464 #endif
364465
466
+static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq,
467
+ unsigned long timeout)
468
+{
469
+ if (sdhci_data_line_cmd(mrq->cmd))
470
+ mod_timer(&host->data_timer, timeout);
471
+ else
472
+ mod_timer(&host->timer, timeout);
473
+}
474
+
475
+static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq)
476
+{
477
+ if (sdhci_data_line_cmd(mrq->cmd))
478
+ del_timer(&host->data_timer);
479
+ else
480
+ del_timer(&host->timer);
481
+}
482
+
483
+static inline bool sdhci_has_requests(struct sdhci_host *host)
484
+{
485
+ return host->cmd || host->data_cmd;
486
+}
487
+
365488 /*****************************************************************************\
366489 * *
367490 * Core functions *
....@@ -372,7 +495,7 @@
372495 {
373496 unsigned long flags;
374497 size_t blksize, len, chunk;
375
- u32 uninitialized_var(scratch);
498
+ u32 scratch;
376499 u8 *buf;
377500
378501 DBG("PIO reading\n");
....@@ -519,9 +642,13 @@
519642 }
520643 if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) {
521644 /* Copy the data to the bounce buffer */
522
- sg_copy_to_buffer(data->sg, data->sg_len,
523
- host->bounce_buffer,
524
- length);
645
+ if (host->ops->copy_to_bounce_buffer) {
646
+ host->ops->copy_to_bounce_buffer(host,
647
+ data, length);
648
+ } else {
649
+ sg_copy_to_buffer(data->sg, data->sg_len,
650
+ host->bounce_buffer, length);
651
+ }
525652 }
526653 /* Switch ownership to the DMA */
527654 dma_sync_single_for_device(host->mmc->parent,
....@@ -566,10 +693,10 @@
566693 /* 32-bit and 64-bit descriptors have these members in same position */
567694 dma_desc->cmd = cpu_to_le16(cmd);
568695 dma_desc->len = cpu_to_le16(len);
569
- dma_desc->addr_lo = cpu_to_le32((u32)addr);
696
+ dma_desc->addr_lo = cpu_to_le32(lower_32_bits(addr));
570697
571698 if (host->flags & SDHCI_USE_64_BIT_DMA)
572
- dma_desc->addr_hi = cpu_to_le32((u64)addr >> 32);
699
+ dma_desc->addr_hi = cpu_to_le32(upper_32_bits(addr));
573700
574701 *desc += host->desc_sz;
575702 }
....@@ -714,12 +841,27 @@
714841 }
715842 }
716843
717
-static u32 sdhci_sdma_address(struct sdhci_host *host)
844
+static void sdhci_set_adma_addr(struct sdhci_host *host, dma_addr_t addr)
845
+{
846
+ sdhci_writel(host, lower_32_bits(addr), SDHCI_ADMA_ADDRESS);
847
+ if (host->flags & SDHCI_USE_64_BIT_DMA)
848
+ sdhci_writel(host, upper_32_bits(addr), SDHCI_ADMA_ADDRESS_HI);
849
+}
850
+
851
+static dma_addr_t sdhci_sdma_address(struct sdhci_host *host)
718852 {
719853 if (host->bounce_buffer)
720854 return host->bounce_addr;
721855 else
722856 return sg_dma_address(host->data->sg);
857
+}
858
+
859
+static void sdhci_set_sdma_addr(struct sdhci_host *host, dma_addr_t addr)
860
+{
861
+ if (host->v4_mode)
862
+ sdhci_set_adma_addr(host, addr);
863
+ else
864
+ sdhci_writel(host, addr, SDHCI_DMA_ADDRESS);
723865 }
724866
725867 static unsigned int sdhci_target_timeout(struct sdhci_host *host,
....@@ -788,7 +930,7 @@
788930 bool *too_big)
789931 {
790932 u8 count;
791
- struct mmc_data *data = cmd->data;
933
+ struct mmc_data *data;
792934 unsigned target_timeout, current_timeout;
793935
794936 *too_big = true;
....@@ -802,6 +944,11 @@
802944 if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
803945 return 0xE;
804946
947
+ /* Unspecified command, asume max */
948
+ if (cmd == NULL)
949
+ return 0xE;
950
+
951
+ data = cmd->data;
805952 /* Unspecified timeout, assume max */
806953 if (!data && !cmd->busy_timeout)
807954 return 0xE;
....@@ -859,7 +1006,7 @@
8591006 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
8601007 }
8611008
862
-static void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable)
1009
+void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable)
8631010 {
8641011 if (enable)
8651012 host->ier |= SDHCI_INT_DATA_TIMEOUT;
....@@ -868,43 +1015,36 @@
8681015 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
8691016 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
8701017 }
1018
+EXPORT_SYMBOL_GPL(sdhci_set_data_timeout_irq);
1019
+
1020
+void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
1021
+{
1022
+ bool too_big = false;
1023
+ u8 count = sdhci_calc_timeout(host, cmd, &too_big);
1024
+
1025
+ if (too_big &&
1026
+ host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) {
1027
+ sdhci_calc_sw_timeout(host, cmd);
1028
+ sdhci_set_data_timeout_irq(host, false);
1029
+ } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) {
1030
+ sdhci_set_data_timeout_irq(host, true);
1031
+ }
1032
+
1033
+ sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
1034
+}
1035
+EXPORT_SYMBOL_GPL(__sdhci_set_timeout);
8711036
8721037 static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
8731038 {
874
- u8 count;
875
-
876
- if (host->ops->set_timeout) {
1039
+ if (host->ops->set_timeout)
8771040 host->ops->set_timeout(host, cmd);
878
- } else {
879
- bool too_big = false;
880
-
881
- count = sdhci_calc_timeout(host, cmd, &too_big);
882
-
883
- if (too_big &&
884
- host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) {
885
- sdhci_calc_sw_timeout(host, cmd);
886
- sdhci_set_data_timeout_irq(host, false);
887
- } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) {
888
- sdhci_set_data_timeout_irq(host, true);
889
- }
890
-
891
- sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
892
- }
1041
+ else
1042
+ __sdhci_set_timeout(host, cmd);
8931043 }
8941044
895
-static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
1045
+static void sdhci_initialize_data(struct sdhci_host *host,
1046
+ struct mmc_data *data)
8961047 {
897
- u8 ctrl;
898
- struct mmc_data *data = cmd->data;
899
-
900
- host->data_timeout = 0;
901
-
902
- if (sdhci_data_line_cmd(cmd))
903
- sdhci_set_timeout(host, cmd);
904
-
905
- if (!data)
906
- return;
907
-
9081048 WARN_ON(host->data);
9091049
9101050 /* Sanity checks */
....@@ -915,6 +1055,34 @@
9151055 host->data = data;
9161056 host->data_early = 0;
9171057 host->data->bytes_xfered = 0;
1058
+}
1059
+
1060
+static inline void sdhci_set_block_info(struct sdhci_host *host,
1061
+ struct mmc_data *data)
1062
+{
1063
+ /* Set the DMA boundary value and block size */
1064
+ sdhci_writew(host,
1065
+ SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz),
1066
+ SDHCI_BLOCK_SIZE);
1067
+ /*
1068
+ * For Version 4.10 onwards, if v4 mode is enabled, 32-bit Block Count
1069
+ * can be supported, in that case 16-bit block count register must be 0.
1070
+ */
1071
+ if (host->version >= SDHCI_SPEC_410 && host->v4_mode &&
1072
+ (host->quirks2 & SDHCI_QUIRK2_USE_32BIT_BLK_CNT)) {
1073
+ if (sdhci_readw(host, SDHCI_BLOCK_COUNT))
1074
+ sdhci_writew(host, 0, SDHCI_BLOCK_COUNT);
1075
+ sdhci_writew(host, data->blocks, SDHCI_32BIT_BLK_CNT);
1076
+ } else {
1077
+ sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
1078
+ }
1079
+}
1080
+
1081
+static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
1082
+{
1083
+ struct mmc_data *data = cmd->data;
1084
+
1085
+ sdhci_initialize_data(host, data);
9181086
9191087 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
9201088 struct scatterlist *sg;
....@@ -966,6 +1134,8 @@
9661134 }
9671135 }
9681136
1137
+ sdhci_config_dma(host);
1138
+
9691139 if (host->flags & SDHCI_REQ_USE_DMA) {
9701140 int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
9711141
....@@ -978,37 +1148,11 @@
9781148 host->flags &= ~SDHCI_REQ_USE_DMA;
9791149 } else if (host->flags & SDHCI_USE_ADMA) {
9801150 sdhci_adma_table_pre(host, data, sg_cnt);
981
-
982
- sdhci_writel(host, host->adma_addr, SDHCI_ADMA_ADDRESS);
983
- if (host->flags & SDHCI_USE_64_BIT_DMA)
984
- sdhci_writel(host,
985
- (u64)host->adma_addr >> 32,
986
- SDHCI_ADMA_ADDRESS_HI);
1151
+ sdhci_set_adma_addr(host, host->adma_addr);
9871152 } else {
9881153 WARN_ON(sg_cnt != 1);
989
- sdhci_writel(host, sdhci_sdma_address(host),
990
- SDHCI_DMA_ADDRESS);
1154
+ sdhci_set_sdma_addr(host, sdhci_sdma_address(host));
9911155 }
992
- }
993
-
994
- /*
995
- * Always adjust the DMA selection as some controllers
996
- * (e.g. JMicron) can't do PIO properly when the selection
997
- * is ADMA.
998
- */
999
- if (host->version >= SDHCI_SPEC_200) {
1000
- ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1001
- ctrl &= ~SDHCI_CTRL_DMA_MASK;
1002
- if ((host->flags & SDHCI_REQ_USE_DMA) &&
1003
- (host->flags & SDHCI_USE_ADMA)) {
1004
- if (host->flags & SDHCI_USE_64_BIT_DMA)
1005
- ctrl |= SDHCI_CTRL_ADMA64;
1006
- else
1007
- ctrl |= SDHCI_CTRL_ADMA32;
1008
- } else {
1009
- ctrl |= SDHCI_CTRL_SDMA;
1010
- }
1011
- sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
10121156 }
10131157
10141158 if (!(host->flags & SDHCI_REQ_USE_DMA)) {
....@@ -1025,17 +1169,249 @@
10251169
10261170 sdhci_set_transfer_irqs(host);
10271171
1028
- /* Set the DMA boundary value and block size */
1029
- sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz),
1030
- SDHCI_BLOCK_SIZE);
1031
- sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
1172
+ sdhci_set_block_info(host, data);
10321173 }
1174
+
1175
+#if IS_ENABLED(CONFIG_MMC_SDHCI_EXTERNAL_DMA)
1176
+
1177
+static int sdhci_external_dma_init(struct sdhci_host *host)
1178
+{
1179
+ int ret = 0;
1180
+ struct mmc_host *mmc = host->mmc;
1181
+
1182
+ host->tx_chan = dma_request_chan(mmc->parent, "tx");
1183
+ if (IS_ERR(host->tx_chan)) {
1184
+ ret = PTR_ERR(host->tx_chan);
1185
+ if (ret != -EPROBE_DEFER)
1186
+ pr_warn("Failed to request TX DMA channel.\n");
1187
+ host->tx_chan = NULL;
1188
+ return ret;
1189
+ }
1190
+
1191
+ host->rx_chan = dma_request_chan(mmc->parent, "rx");
1192
+ if (IS_ERR(host->rx_chan)) {
1193
+ if (host->tx_chan) {
1194
+ dma_release_channel(host->tx_chan);
1195
+ host->tx_chan = NULL;
1196
+ }
1197
+
1198
+ ret = PTR_ERR(host->rx_chan);
1199
+ if (ret != -EPROBE_DEFER)
1200
+ pr_warn("Failed to request RX DMA channel.\n");
1201
+ host->rx_chan = NULL;
1202
+ }
1203
+
1204
+ return ret;
1205
+}
1206
+
1207
+static struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host,
1208
+ struct mmc_data *data)
1209
+{
1210
+ return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan;
1211
+}
1212
+
1213
+static int sdhci_external_dma_setup(struct sdhci_host *host,
1214
+ struct mmc_command *cmd)
1215
+{
1216
+ int ret, i;
1217
+ enum dma_transfer_direction dir;
1218
+ struct dma_async_tx_descriptor *desc;
1219
+ struct mmc_data *data = cmd->data;
1220
+ struct dma_chan *chan;
1221
+ struct dma_slave_config cfg;
1222
+ dma_cookie_t cookie;
1223
+ int sg_cnt;
1224
+
1225
+ if (!host->mapbase)
1226
+ return -EINVAL;
1227
+
1228
+ memset(&cfg, 0, sizeof(cfg));
1229
+ cfg.src_addr = host->mapbase + SDHCI_BUFFER;
1230
+ cfg.dst_addr = host->mapbase + SDHCI_BUFFER;
1231
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1232
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1233
+ cfg.src_maxburst = data->blksz / 4;
1234
+ cfg.dst_maxburst = data->blksz / 4;
1235
+
1236
+ /* Sanity check: all the SG entries must be aligned by block size. */
1237
+ for (i = 0; i < data->sg_len; i++) {
1238
+ if ((data->sg + i)->length % data->blksz)
1239
+ return -EINVAL;
1240
+ }
1241
+
1242
+ chan = sdhci_external_dma_channel(host, data);
1243
+
1244
+ ret = dmaengine_slave_config(chan, &cfg);
1245
+ if (ret)
1246
+ return ret;
1247
+
1248
+ sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
1249
+ if (sg_cnt <= 0)
1250
+ return -EINVAL;
1251
+
1252
+ dir = data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
1253
+ desc = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len, dir,
1254
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1255
+ if (!desc)
1256
+ return -EINVAL;
1257
+
1258
+ desc->callback = NULL;
1259
+ desc->callback_param = NULL;
1260
+
1261
+ cookie = dmaengine_submit(desc);
1262
+ if (dma_submit_error(cookie))
1263
+ ret = cookie;
1264
+
1265
+ return ret;
1266
+}
1267
+
1268
+static void sdhci_external_dma_release(struct sdhci_host *host)
1269
+{
1270
+ if (host->tx_chan) {
1271
+ dma_release_channel(host->tx_chan);
1272
+ host->tx_chan = NULL;
1273
+ }
1274
+
1275
+ if (host->rx_chan) {
1276
+ dma_release_channel(host->rx_chan);
1277
+ host->rx_chan = NULL;
1278
+ }
1279
+
1280
+ sdhci_switch_external_dma(host, false);
1281
+}
1282
+
1283
+static void __sdhci_external_dma_prepare_data(struct sdhci_host *host,
1284
+ struct mmc_command *cmd)
1285
+{
1286
+ struct mmc_data *data = cmd->data;
1287
+
1288
+ sdhci_initialize_data(host, data);
1289
+
1290
+ host->flags |= SDHCI_REQ_USE_DMA;
1291
+ sdhci_set_transfer_irqs(host);
1292
+
1293
+ sdhci_set_block_info(host, data);
1294
+}
1295
+
1296
+static void sdhci_external_dma_prepare_data(struct sdhci_host *host,
1297
+ struct mmc_command *cmd)
1298
+{
1299
+ if (!sdhci_external_dma_setup(host, cmd)) {
1300
+ __sdhci_external_dma_prepare_data(host, cmd);
1301
+ } else {
1302
+ sdhci_external_dma_release(host);
1303
+ pr_err("%s: Cannot use external DMA, switch to the DMA/PIO which standard SDHCI provides.\n",
1304
+ mmc_hostname(host->mmc));
1305
+ sdhci_prepare_data(host, cmd);
1306
+ }
1307
+}
1308
+
1309
+static void sdhci_external_dma_pre_transfer(struct sdhci_host *host,
1310
+ struct mmc_command *cmd)
1311
+{
1312
+ struct dma_chan *chan;
1313
+
1314
+ if (!cmd->data)
1315
+ return;
1316
+
1317
+ chan = sdhci_external_dma_channel(host, cmd->data);
1318
+ if (chan)
1319
+ dma_async_issue_pending(chan);
1320
+}
1321
+
1322
+#else
1323
+
1324
+static inline int sdhci_external_dma_init(struct sdhci_host *host)
1325
+{
1326
+ return -EOPNOTSUPP;
1327
+}
1328
+
1329
+static inline void sdhci_external_dma_release(struct sdhci_host *host)
1330
+{
1331
+}
1332
+
1333
+static inline void sdhci_external_dma_prepare_data(struct sdhci_host *host,
1334
+ struct mmc_command *cmd)
1335
+{
1336
+ /* This should never happen */
1337
+ WARN_ON_ONCE(1);
1338
+}
1339
+
1340
+static inline void sdhci_external_dma_pre_transfer(struct sdhci_host *host,
1341
+ struct mmc_command *cmd)
1342
+{
1343
+}
1344
+
1345
+static inline struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host,
1346
+ struct mmc_data *data)
1347
+{
1348
+ return NULL;
1349
+}
1350
+
1351
+#endif
1352
+
1353
+void sdhci_switch_external_dma(struct sdhci_host *host, bool en)
1354
+{
1355
+ host->use_external_dma = en;
1356
+}
1357
+EXPORT_SYMBOL_GPL(sdhci_switch_external_dma);
10331358
10341359 static inline bool sdhci_auto_cmd12(struct sdhci_host *host,
10351360 struct mmc_request *mrq)
10361361 {
10371362 return !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) &&
10381363 !mrq->cap_cmd_during_tfr;
1364
+}
1365
+
1366
+static inline bool sdhci_auto_cmd23(struct sdhci_host *host,
1367
+ struct mmc_request *mrq)
1368
+{
1369
+ return mrq->sbc && (host->flags & SDHCI_AUTO_CMD23);
1370
+}
1371
+
1372
+static inline bool sdhci_manual_cmd23(struct sdhci_host *host,
1373
+ struct mmc_request *mrq)
1374
+{
1375
+ return mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23);
1376
+}
1377
+
1378
+static inline void sdhci_auto_cmd_select(struct sdhci_host *host,
1379
+ struct mmc_command *cmd,
1380
+ u16 *mode)
1381
+{
1382
+ bool use_cmd12 = sdhci_auto_cmd12(host, cmd->mrq) &&
1383
+ (cmd->opcode != SD_IO_RW_EXTENDED);
1384
+ bool use_cmd23 = sdhci_auto_cmd23(host, cmd->mrq);
1385
+ u16 ctrl2;
1386
+
1387
+ /*
1388
+ * In case of Version 4.10 or later, use of 'Auto CMD Auto
1389
+ * Select' is recommended rather than use of 'Auto CMD12
1390
+ * Enable' or 'Auto CMD23 Enable'. We require Version 4 Mode
1391
+ * here because some controllers (e.g sdhci-of-dwmshc) expect it.
1392
+ */
1393
+ if (host->version >= SDHCI_SPEC_410 && host->v4_mode &&
1394
+ (use_cmd12 || use_cmd23)) {
1395
+ *mode |= SDHCI_TRNS_AUTO_SEL;
1396
+
1397
+ ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1398
+ if (use_cmd23)
1399
+ ctrl2 |= SDHCI_CMD23_ENABLE;
1400
+ else
1401
+ ctrl2 &= ~SDHCI_CMD23_ENABLE;
1402
+ sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
1403
+
1404
+ return;
1405
+ }
1406
+
1407
+ /*
1408
+ * If we are sending CMD23, CMD12 never gets sent
1409
+ * on successful completion (so no Auto-CMD12).
1410
+ */
1411
+ if (use_cmd12)
1412
+ *mode |= SDHCI_TRNS_AUTO_CMD12;
1413
+ else if (use_cmd23)
1414
+ *mode |= SDHCI_TRNS_AUTO_CMD23;
10391415 }
10401416
10411417 static void sdhci_set_transfer_mode(struct sdhci_host *host,
....@@ -1066,17 +1442,9 @@
10661442
10671443 if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
10681444 mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI;
1069
- /*
1070
- * If we are sending CMD23, CMD12 never gets sent
1071
- * on successful completion (so no Auto-CMD12).
1072
- */
1073
- if (sdhci_auto_cmd12(host, cmd->mrq) &&
1074
- (cmd->opcode != SD_IO_RW_EXTENDED))
1075
- mode |= SDHCI_TRNS_AUTO_CMD12;
1076
- else if (cmd->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
1077
- mode |= SDHCI_TRNS_AUTO_CMD23;
1445
+ sdhci_auto_cmd_select(host, cmd, &mode);
1446
+ if (sdhci_auto_cmd23(host, cmd->mrq))
10781447 sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2);
1079
- }
10801448 }
10811449
10821450 if (data->flags & MMC_DATA_READ)
....@@ -1096,7 +1464,7 @@
10961464 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)));
10971465 }
10981466
1099
-static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
1467
+static void sdhci_set_mrq_done(struct sdhci_host *host, struct mmc_request *mrq)
11001468 {
11011469 int i;
11021470
....@@ -1115,11 +1483,9 @@
11151483 }
11161484
11171485 WARN_ON(i >= SDHCI_MAX_MRQS);
1118
-
1119
- tasklet_schedule(&host->finish_tasklet);
11201486 }
11211487
1122
-static void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
1488
+static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
11231489 {
11241490 if (host->cmd && host->cmd->mrq == mrq)
11251491 host->cmd = NULL;
....@@ -1127,16 +1493,31 @@
11271493 if (host->data_cmd && host->data_cmd->mrq == mrq)
11281494 host->data_cmd = NULL;
11291495
1496
+ if (host->deferred_cmd && host->deferred_cmd->mrq == mrq)
1497
+ host->deferred_cmd = NULL;
1498
+
11301499 if (host->data && host->data->mrq == mrq)
11311500 host->data = NULL;
11321501
11331502 if (sdhci_needs_reset(host, mrq))
11341503 host->pending_reset = true;
11351504
1136
- __sdhci_finish_mrq(host, mrq);
1505
+ sdhci_set_mrq_done(host, mrq);
1506
+
1507
+ sdhci_del_timer(host, mrq);
1508
+
1509
+ if (!sdhci_has_requests(host))
1510
+ sdhci_led_deactivate(host);
11371511 }
11381512
1139
-static void sdhci_finish_data(struct sdhci_host *host)
1513
+static void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
1514
+{
1515
+ __sdhci_finish_mrq(host, mrq);
1516
+
1517
+ queue_work(host->complete_wq, &host->complete_work);
1518
+}
1519
+
1520
+static void __sdhci_finish_data(struct sdhci_host *host, bool sw_data_timeout)
11401521 {
11411522 struct mmc_command *data_cmd = host->data_cmd;
11421523 struct mmc_data *data = host->data;
....@@ -1172,47 +1553,47 @@
11721553
11731554 /*
11741555 * Need to send CMD12 if -
1175
- * a) open-ended multiblock transfer (no CMD23)
1556
+ * a) open-ended multiblock transfer not using auto CMD12 (no CMD23)
11761557 * b) error in multiblock transfer
11771558 */
11781559 if (data->stop &&
1179
- (data->error ||
1180
- !data->mrq->sbc)) {
1560
+ ((!data->mrq->sbc && !sdhci_auto_cmd12(host, data->mrq)) ||
1561
+ data->error)) {
11811562 /*
11821563 * 'cap_cmd_during_tfr' request must not use the command line
11831564 * after mmc_command_done() has been called. It is upper layer's
11841565 * responsibility to send the stop command if required.
11851566 */
11861567 if (data->mrq->cap_cmd_during_tfr) {
1187
- sdhci_finish_mrq(host, data->mrq);
1568
+ __sdhci_finish_mrq(host, data->mrq);
11881569 } else {
11891570 /* Avoid triggering warning in sdhci_send_command() */
11901571 host->cmd = NULL;
1191
- sdhci_send_command(host, data->stop);
1572
+ if (!sdhci_send_command(host, data->stop)) {
1573
+ if (sw_data_timeout) {
1574
+ /*
1575
+ * This is anyway a sw data timeout, so
1576
+ * give up now.
1577
+ */
1578
+ data->stop->error = -EIO;
1579
+ __sdhci_finish_mrq(host, data->mrq);
1580
+ } else {
1581
+ WARN_ON(host->deferred_cmd);
1582
+ host->deferred_cmd = data->stop;
1583
+ }
1584
+ }
11921585 }
11931586 } else {
1194
- sdhci_finish_mrq(host, data->mrq);
1587
+ __sdhci_finish_mrq(host, data->mrq);
11951588 }
11961589 }
11971590
1198
-static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq,
1199
- unsigned long timeout)
1591
+static void sdhci_finish_data(struct sdhci_host *host)
12001592 {
1201
- if (sdhci_data_line_cmd(mrq->cmd))
1202
- mod_timer(&host->data_timer, timeout);
1203
- else
1204
- mod_timer(&host->timer, timeout);
1593
+ __sdhci_finish_data(host, false);
12051594 }
12061595
1207
-static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq)
1208
-{
1209
- if (sdhci_data_line_cmd(mrq->cmd))
1210
- del_timer(&host->data_timer);
1211
- else
1212
- del_timer(&host->timer);
1213
-}
1214
-
1215
-void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
1596
+static bool sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
12161597 {
12171598 int flags;
12181599 u32 mask;
....@@ -1227,9 +1608,6 @@
12271608 cmd->opcode == MMC_STOP_TRANSMISSION)
12281609 cmd->flags |= MMC_RSP_BUSY;
12291610
1230
- /* Wait max 10 ms */
1231
- timeout = 10;
1232
-
12331611 mask = SDHCI_CMD_INHIBIT;
12341612 if (sdhci_data_line_cmd(cmd))
12351613 mask |= SDHCI_DATA_INHIBIT;
....@@ -1239,37 +1617,36 @@
12391617 if (cmd->mrq->data && (cmd == cmd->mrq->data->stop))
12401618 mask &= ~SDHCI_DATA_INHIBIT;
12411619
1242
- while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
1243
- if (timeout == 0) {
1244
- pr_err("%s: Controller never released inhibit bit(s).\n",
1245
- mmc_hostname(host->mmc));
1246
- sdhci_dumpregs(host);
1247
- cmd->error = -EIO;
1248
- sdhci_finish_mrq(host, cmd->mrq);
1249
- return;
1250
- }
1251
- timeout--;
1252
- mdelay(1);
1253
- }
1620
+ if (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask)
1621
+ return false;
12541622
12551623 host->cmd = cmd;
1624
+ host->data_timeout = 0;
12561625 if (sdhci_data_line_cmd(cmd)) {
12571626 WARN_ON(host->data_cmd);
12581627 host->data_cmd = cmd;
1628
+ sdhci_set_timeout(host, cmd);
12591629 }
12601630
1261
- sdhci_prepare_data(host, cmd);
1631
+ if (cmd->data) {
1632
+ if (host->use_external_dma)
1633
+ sdhci_external_dma_prepare_data(host, cmd);
1634
+ else
1635
+ sdhci_prepare_data(host, cmd);
1636
+ }
12621637
12631638 sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
12641639
12651640 sdhci_set_transfer_mode(host, cmd);
12661641
12671642 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
1268
- pr_err("%s: Unsupported response type!\n",
1269
- mmc_hostname(host->mmc));
1270
- cmd->error = -EINVAL;
1271
- sdhci_finish_mrq(host, cmd->mrq);
1272
- return;
1643
+ WARN_ONCE(1, "Unsupported response type!\n");
1644
+ /*
1645
+ * This does not happen in practice because 136-bit response
1646
+ * commands never have busy waiting, so rather than complicate
1647
+ * the error path, just remove busy waiting and continue.
1648
+ */
1649
+ cmd->flags &= ~MMC_RSP_BUSY;
12731650 }
12741651
12751652 if (!(cmd->flags & MMC_RSP_PRESENT))
....@@ -1300,9 +1677,65 @@
13001677 timeout += 10 * HZ;
13011678 sdhci_mod_timer(host, cmd->mrq, timeout);
13021679
1680
+ if (host->use_external_dma)
1681
+ sdhci_external_dma_pre_transfer(host, cmd);
1682
+
13031683 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
1684
+
1685
+ return true;
13041686 }
1305
-EXPORT_SYMBOL_GPL(sdhci_send_command);
1687
+
1688
+static bool sdhci_present_error(struct sdhci_host *host,
1689
+ struct mmc_command *cmd, bool present)
1690
+{
1691
+ if (!present || host->flags & SDHCI_DEVICE_DEAD) {
1692
+ cmd->error = -ENOMEDIUM;
1693
+ return true;
1694
+ }
1695
+
1696
+ return false;
1697
+}
1698
+
1699
+static bool sdhci_send_command_retry(struct sdhci_host *host,
1700
+ struct mmc_command *cmd,
1701
+ unsigned long flags)
1702
+ __releases(host->lock)
1703
+ __acquires(host->lock)
1704
+{
1705
+ struct mmc_command *deferred_cmd = host->deferred_cmd;
1706
+ int timeout = 10; /* Approx. 10 ms */
1707
+ bool present;
1708
+
1709
+ while (!sdhci_send_command(host, cmd)) {
1710
+ if (!timeout--) {
1711
+ pr_err("%s: Controller never released inhibit bit(s).\n",
1712
+ mmc_hostname(host->mmc));
1713
+ sdhci_dumpregs(host);
1714
+ cmd->error = -EIO;
1715
+ return false;
1716
+ }
1717
+
1718
+ spin_unlock_irqrestore(&host->lock, flags);
1719
+
1720
+ usleep_range(1000, 1250);
1721
+
1722
+ present = host->mmc->ops->get_cd(host->mmc);
1723
+
1724
+ spin_lock_irqsave(&host->lock, flags);
1725
+
1726
+ /* A deferred command might disappear, handle that */
1727
+ if (cmd == deferred_cmd && cmd != host->deferred_cmd)
1728
+ return true;
1729
+
1730
+ if (sdhci_present_error(host, cmd, present))
1731
+ return false;
1732
+ }
1733
+
1734
+ if (cmd == host->deferred_cmd)
1735
+ host->deferred_cmd = NULL;
1736
+
1737
+ return true;
1738
+}
13061739
13071740 static void sdhci_read_rsp_136(struct sdhci_host *host, struct mmc_command *cmd)
13081741 {
....@@ -1363,7 +1796,10 @@
13631796
13641797 /* Finished CMD23, now send actual command. */
13651798 if (cmd == cmd->mrq->sbc) {
1366
- sdhci_send_command(host, cmd->mrq->cmd);
1799
+ if (!sdhci_send_command(host, cmd->mrq->cmd)) {
1800
+ WARN_ON(host->deferred_cmd);
1801
+ host->deferred_cmd = cmd->mrq->cmd;
1802
+ }
13671803 } else {
13681804
13691805 /* Processed actual command. */
....@@ -1371,7 +1807,7 @@
13711807 sdhci_finish_data(host);
13721808
13731809 if (!cmd->data)
1374
- sdhci_finish_mrq(host, cmd->mrq);
1810
+ __sdhci_finish_mrq(host, cmd->mrq);
13751811 }
13761812 }
13771813
....@@ -1427,10 +1863,9 @@
14271863
14281864 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
14291865 pre_val = sdhci_get_preset_value(host);
1430
- div = (pre_val & SDHCI_PRESET_SDCLK_FREQ_MASK)
1431
- >> SDHCI_PRESET_SDCLK_FREQ_SHIFT;
1866
+ div = FIELD_GET(SDHCI_PRESET_SDCLK_FREQ_MASK, pre_val);
14321867 if (host->clk_mul &&
1433
- (pre_val & SDHCI_PRESET_CLKGEN_SEL_MASK)) {
1868
+ (pre_val & SDHCI_PRESET_CLKGEN_SEL)) {
14341869 clk = SDHCI_PROG_CLOCK_MODE;
14351870 real_div = div + 1;
14361871 clk_mul = host->clk_mul;
....@@ -1513,8 +1948,8 @@
15131948 clk |= SDHCI_CLOCK_INT_EN;
15141949 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
15151950
1516
- /* Wait max 20 ms */
1517
- timeout = ktime_add_ms(ktime_get(), 20);
1951
+ /* Wait max 150 ms */
1952
+ timeout = ktime_add_ms(ktime_get(), 150);
15181953 while (1) {
15191954 bool timedout = ktime_after(ktime_get(), timeout);
15201955
....@@ -1528,6 +1963,29 @@
15281963 return;
15291964 }
15301965 udelay(10);
1966
+ }
1967
+
1968
+ if (host->version >= SDHCI_SPEC_410 && host->v4_mode) {
1969
+ clk |= SDHCI_CLOCK_PLL_EN;
1970
+ clk &= ~SDHCI_CLOCK_INT_STABLE;
1971
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1972
+
1973
+ /* Wait max 150 ms */
1974
+ timeout = ktime_add_ms(ktime_get(), 150);
1975
+ while (1) {
1976
+ bool timedout = ktime_after(ktime_get(), timeout);
1977
+
1978
+ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1979
+ if (clk & SDHCI_CLOCK_INT_STABLE)
1980
+ break;
1981
+ if (timedout) {
1982
+ pr_err("%s: PLL clock never stabilised.\n",
1983
+ mmc_hostname(host->mmc));
1984
+ sdhci_dumpregs(host);
1985
+ return;
1986
+ }
1987
+ udelay(10);
1988
+ }
15311989 }
15321990
15331991 clk |= SDHCI_CLOCK_CARD_EN;
....@@ -1654,6 +2112,25 @@
16542112 }
16552113 EXPORT_SYMBOL_GPL(sdhci_set_power);
16562114
2115
+/*
2116
+ * Some controllers need to configure a valid bus voltage on their power
2117
+ * register regardless of whether an external regulator is taking care of power
2118
+ * supply. This helper function takes care of it if set as the controller's
2119
+ * sdhci_ops.set_power callback.
2120
+ */
2121
+void sdhci_set_power_and_bus_voltage(struct sdhci_host *host,
2122
+ unsigned char mode,
2123
+ unsigned short vdd)
2124
+{
2125
+ if (!IS_ERR(host->mmc->supply.vmmc)) {
2126
+ struct mmc_host *mmc = host->mmc;
2127
+
2128
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
2129
+ }
2130
+ sdhci_set_power_noreg(host, mode, vdd);
2131
+}
2132
+EXPORT_SYMBOL_GPL(sdhci_set_power_and_bus_voltage);
2133
+
16572134 /*****************************************************************************\
16582135 * *
16592136 * MMC callbacks *
....@@ -1662,11 +2139,10 @@
16622139
16632140 void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
16642141 {
1665
- struct sdhci_host *host;
1666
- int present;
2142
+ struct sdhci_host *host = mmc_priv(mmc);
2143
+ struct mmc_command *cmd;
16672144 unsigned long flags;
1668
-
1669
- host = mmc_priv(mmc);
2145
+ bool present;
16702146
16712147 /* Firstly check card presence */
16722148 present = mmc->ops->get_cd(mmc);
....@@ -1675,31 +2151,57 @@
16752151
16762152 sdhci_led_activate(host);
16772153
1678
- /*
1679
- * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
1680
- * requests if Auto-CMD12 is enabled.
1681
- */
1682
- if (sdhci_auto_cmd12(host, mrq)) {
1683
- if (mrq->stop) {
1684
- mrq->data->stop = NULL;
1685
- mrq->stop = NULL;
1686
- }
1687
- }
2154
+ if (sdhci_present_error(host, mrq->cmd, present))
2155
+ goto out_finish;
16882156
1689
- if (!present || host->flags & SDHCI_DEVICE_DEAD) {
1690
- mrq->cmd->error = -ENOMEDIUM;
1691
- sdhci_finish_mrq(host, mrq);
1692
- } else {
1693
- if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
1694
- sdhci_send_command(host, mrq->sbc);
1695
- else
1696
- sdhci_send_command(host, mrq->cmd);
1697
- }
2157
+ cmd = sdhci_manual_cmd23(host, mrq) ? mrq->sbc : mrq->cmd;
16982158
1699
- mmiowb();
2159
+ if (!sdhci_send_command_retry(host, cmd, flags))
2160
+ goto out_finish;
2161
+
2162
+ spin_unlock_irqrestore(&host->lock, flags);
2163
+
2164
+ return;
2165
+
2166
+out_finish:
2167
+ sdhci_finish_mrq(host, mrq);
17002168 spin_unlock_irqrestore(&host->lock, flags);
17012169 }
17022170 EXPORT_SYMBOL_GPL(sdhci_request);
2171
+
2172
+int sdhci_request_atomic(struct mmc_host *mmc, struct mmc_request *mrq)
2173
+{
2174
+ struct sdhci_host *host = mmc_priv(mmc);
2175
+ struct mmc_command *cmd;
2176
+ unsigned long flags;
2177
+ int ret = 0;
2178
+
2179
+ spin_lock_irqsave(&host->lock, flags);
2180
+
2181
+ if (sdhci_present_error(host, mrq->cmd, true)) {
2182
+ sdhci_finish_mrq(host, mrq);
2183
+ goto out_finish;
2184
+ }
2185
+
2186
+ cmd = sdhci_manual_cmd23(host, mrq) ? mrq->sbc : mrq->cmd;
2187
+
2188
+ /*
2189
+ * The HSQ may send a command in interrupt context without polling
2190
+ * the busy signaling, which means we should return BUSY if controller
2191
+ * has not released inhibit bits to allow HSQ trying to send request
2192
+ * again in non-atomic context. So we should not finish this request
2193
+ * here.
2194
+ */
2195
+ if (!sdhci_send_command(host, cmd))
2196
+ ret = -EBUSY;
2197
+ else
2198
+ sdhci_led_activate(host);
2199
+
2200
+out_finish:
2201
+ spin_unlock_irqrestore(&host->lock, flags);
2202
+ return ret;
2203
+}
2204
+EXPORT_SYMBOL_GPL(sdhci_request_atomic);
17032205
17042206 void sdhci_set_bus_width(struct sdhci_host *host, int width)
17052207 {
....@@ -1883,8 +2385,8 @@
18832385
18842386 sdhci_enable_preset_value(host, true);
18852387 preset = sdhci_get_preset_value(host);
1886
- ios->drv_type = (preset & SDHCI_PRESET_DRV_MASK)
1887
- >> SDHCI_PRESET_DRV_SHIFT;
2388
+ ios->drv_type = FIELD_GET(SDHCI_PRESET_DRV_MASK,
2389
+ preset);
18882390 }
18892391
18902392 /* Re-enable SD Clock */
....@@ -1899,8 +2401,6 @@
18992401 */
19002402 if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
19012403 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
1902
-
1903
- mmiowb();
19042404 }
19052405 EXPORT_SYMBOL_GPL(sdhci_set_ios);
19062406
....@@ -1908,6 +2408,7 @@
19082408 {
19092409 struct sdhci_host *host = mmc_priv(mmc);
19102410 int gpio_cd = mmc_gpio_get_cd(mmc);
2411
+ bool allow = true;
19112412
19122413 if (host->flags & SDHCI_DEVICE_DEAD)
19132414 return 0;
....@@ -1915,6 +2416,10 @@
19152416 /* If nonremovable, assume that the card is always present. */
19162417 if (!mmc_card_is_removable(host->mmc))
19172418 return 1;
2419
+
2420
+ trace_android_vh_sdhci_get_cd(host, &allow);
2421
+ if (!allow)
2422
+ return 0;
19182423
19192424 /*
19202425 * Try slot gpio detect, if defined it take precedence
....@@ -1942,6 +2447,8 @@
19422447 is_readonly = 0;
19432448 else if (host->ops->get_ro)
19442449 is_readonly = host->ops->get_ro(host);
2450
+ else if (mmc_can_gpio_ro(host->mmc))
2451
+ is_readonly = mmc_gpio_get_ro(host->mmc);
19452452 else
19462453 is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
19472454 & SDHCI_WRITE_PROTECT);
....@@ -1992,7 +2499,6 @@
19922499
19932500 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
19942501 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
1995
- mmiowb();
19962502 }
19972503 }
19982504
....@@ -2005,11 +2511,6 @@
20052511 pm_runtime_get_noresume(host->mmc->parent);
20062512
20072513 spin_lock_irqsave(&host->lock, flags);
2008
- if (enable)
2009
- host->flags |= SDHCI_SDIO_IRQ_ENABLED;
2010
- else
2011
- host->flags &= ~SDHCI_SDIO_IRQ_ENABLED;
2012
-
20132514 sdhci_enable_sdio_irq_nolock(host, enable);
20142515 spin_unlock_irqrestore(&host->lock, flags);
20152516
....@@ -2017,6 +2518,16 @@
20172518 pm_runtime_put_noidle(host->mmc->parent);
20182519 }
20192520 EXPORT_SYMBOL_GPL(sdhci_enable_sdio_irq);
2521
+
2522
+static void sdhci_ack_sdio_irq(struct mmc_host *mmc)
2523
+{
2524
+ struct sdhci_host *host = mmc_priv(mmc);
2525
+ unsigned long flags;
2526
+
2527
+ spin_lock_irqsave(&host->lock, flags);
2528
+ sdhci_enable_sdio_irq_nolock(host, true);
2529
+ spin_unlock_irqrestore(&host->lock, flags);
2530
+}
20202531
20212532 int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
20222533 struct mmc_ios *ios)
....@@ -2044,7 +2555,7 @@
20442555
20452556 if (!IS_ERR(mmc->supply.vqmmc)) {
20462557 ret = mmc_regulator_set_vqmmc(mmc, ios);
2047
- if (ret) {
2558
+ if (ret < 0) {
20482559 pr_warn("%s: Switching to 3.3V signalling voltage failed\n",
20492560 mmc_hostname(mmc));
20502561 return -EIO;
....@@ -2058,7 +2569,7 @@
20582569 if (!(ctrl & SDHCI_CTRL_VDD_180))
20592570 return 0;
20602571
2061
- pr_warn("%s: 3.3V regulator output did not became stable\n",
2572
+ pr_warn("%s: 3.3V regulator output did not become stable\n",
20622573 mmc_hostname(mmc));
20632574
20642575 return -EAGAIN;
....@@ -2067,7 +2578,7 @@
20672578 return -EINVAL;
20682579 if (!IS_ERR(mmc->supply.vqmmc)) {
20692580 ret = mmc_regulator_set_vqmmc(mmc, ios);
2070
- if (ret) {
2581
+ if (ret < 0) {
20712582 pr_warn("%s: Switching to 1.8V signalling voltage failed\n",
20722583 mmc_hostname(mmc));
20732584 return -EIO;
....@@ -2090,7 +2601,7 @@
20902601 if (ctrl & SDHCI_CTRL_VDD_180)
20912602 return 0;
20922603
2093
- pr_warn("%s: 1.8V regulator output did not became stable\n",
2604
+ pr_warn("%s: 1.8V regulator output did not become stable\n",
20942605 mmc_hostname(mmc));
20952606
20962607 return -EAGAIN;
....@@ -2099,7 +2610,7 @@
20992610 return -EINVAL;
21002611 if (!IS_ERR(mmc->supply.vqmmc)) {
21012612 ret = mmc_regulator_set_vqmmc(mmc, ios);
2102
- if (ret) {
2613
+ if (ret < 0) {
21032614 pr_warn("%s: Switching to 1.2V signalling voltage failed\n",
21042615 mmc_hostname(mmc));
21052616 return -EIO;
....@@ -2179,7 +2690,7 @@
21792690 }
21802691 EXPORT_SYMBOL_GPL(sdhci_reset_tuning);
21812692
2182
-static void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode)
2693
+void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode)
21832694 {
21842695 sdhci_reset_tuning(host);
21852696
....@@ -2190,6 +2701,7 @@
21902701
21912702 mmc_abort_tuning(host->mmc, opcode);
21922703 }
2704
+EXPORT_SYMBOL_GPL(sdhci_abort_tuning);
21932705
21942706 /*
21952707 * We use sdhci_send_tuning() because mmc_send_tuning() is not a good fit. SDHCI
....@@ -2232,7 +2744,11 @@
22322744 */
22332745 sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
22342746
2235
- sdhci_send_command(host, &cmd);
2747
+ if (!sdhci_send_command_retry(host, &cmd, flags)) {
2748
+ spin_unlock_irqrestore(&host->lock, flags);
2749
+ host->tuning_done = 0;
2750
+ return;
2751
+ }
22362752
22372753 host->cmd = NULL;
22382754
....@@ -2240,7 +2756,6 @@
22402756
22412757 host->tuning_done = 0;
22422758
2243
- mmiowb();
22442759 spin_unlock_irqrestore(&host->lock, flags);
22452760
22462761 /* Wait for Buffer Read Ready interrupt */
....@@ -2250,15 +2765,15 @@
22502765 }
22512766 EXPORT_SYMBOL_GPL(sdhci_send_tuning);
22522767
2253
-static void __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
2768
+static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
22542769 {
22552770 int i;
22562771
22572772 /*
22582773 * Issue opcode repeatedly till Execute Tuning is set to 0 or the number
2259
- * of loops reaches 40 times.
2774
+ * of loops reaches tuning loop count.
22602775 */
2261
- for (i = 0; i < MAX_TUNING_LOOP; i++) {
2776
+ for (i = 0; i < host->tuning_loop_count; i++) {
22622777 u16 ctrl;
22632778
22642779 sdhci_send_tuning(host, opcode);
....@@ -2267,24 +2782,26 @@
22672782 pr_debug("%s: Tuning timeout, falling back to fixed sampling clock\n",
22682783 mmc_hostname(host->mmc));
22692784 sdhci_abort_tuning(host, opcode);
2270
- return;
2271
- }
2272
-
2273
- ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2274
- if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) {
2275
- if (ctrl & SDHCI_CTRL_TUNED_CLK)
2276
- return; /* Success! */
2277
- break;
2785
+ return -ETIMEDOUT;
22782786 }
22792787
22802788 /* Spec does not require a delay between tuning cycles */
22812789 if (host->tuning_delay > 0)
22822790 mdelay(host->tuning_delay);
2791
+
2792
+ ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2793
+ if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) {
2794
+ if (ctrl & SDHCI_CTRL_TUNED_CLK)
2795
+ return 0; /* Success! */
2796
+ break;
2797
+ }
2798
+
22832799 }
22842800
22852801 pr_info("%s: Tuning failed, falling back to fixed sampling clock\n",
22862802 mmc_hostname(host->mmc));
22872803 sdhci_reset_tuning(host);
2804
+ return -EAGAIN;
22882805 }
22892806
22902807 int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
....@@ -2328,7 +2845,7 @@
23282845 case MMC_TIMING_UHS_SDR50:
23292846 if (host->flags & SDHCI_SDR50_NEEDS_TUNING)
23302847 break;
2331
- /* FALLTHROUGH */
2848
+ fallthrough;
23322849
23332850 default:
23342851 goto out;
....@@ -2346,7 +2863,7 @@
23462863
23472864 sdhci_start_tuning(host);
23482865
2349
- __sdhci_execute_tuning(host, opcode);
2866
+ host->tuning_err = __sdhci_execute_tuning(host, opcode);
23502867
23512868 sdhci_end_tuning(host);
23522869 out:
....@@ -2413,11 +2930,6 @@
24132930 sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED);
24142931 }
24152932
2416
-static inline bool sdhci_has_requests(struct sdhci_host *host)
2417
-{
2418
- return host->cmd || host->data_cmd;
2419
-}
2420
-
24212933 static void sdhci_error_out_mrqs(struct sdhci_host *host, int err)
24222934 {
24232935 if (host->data_cmd) {
....@@ -2470,6 +2982,7 @@
24702982 .get_ro = sdhci_get_ro,
24712983 .hw_reset = sdhci_hw_reset,
24722984 .enable_sdio_irq = sdhci_enable_sdio_irq,
2985
+ .ack_sdio_irq = sdhci_ack_sdio_irq,
24732986 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
24742987 .prepare_hs400_tuning = sdhci_prepare_hs400_tuning,
24752988 .execute_tuning = sdhci_execute_tuning,
....@@ -2479,7 +2992,7 @@
24792992
24802993 /*****************************************************************************\
24812994 * *
2482
- * Tasklets *
2995
+ * Request done *
24832996 * *
24842997 \*****************************************************************************/
24852998
....@@ -2502,7 +3015,36 @@
25023015 return true;
25033016 }
25043017
2505
- sdhci_del_timer(host, mrq);
3018
+ /*
3019
+ * The controller needs a reset of internal state machines
3020
+ * upon error conditions.
3021
+ */
3022
+ if (sdhci_needs_reset(host, mrq)) {
3023
+ /*
3024
+ * Do not finish until command and data lines are available for
3025
+ * reset. Note there can only be one other mrq, so it cannot
3026
+ * also be in mrqs_done, otherwise host->cmd and host->data_cmd
3027
+ * would both be null.
3028
+ */
3029
+ if (host->cmd || host->data_cmd) {
3030
+ spin_unlock_irqrestore(&host->lock, flags);
3031
+ return true;
3032
+ }
3033
+
3034
+ /* Some controllers need this kick or reset won't work here */
3035
+ if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
3036
+ /* This is to force an update */
3037
+ host->ops->set_clock(host, host->clock);
3038
+
3039
+ /*
3040
+ * Spec says we should do both at the same time, but Ricoh
3041
+ * controllers do not like that.
3042
+ */
3043
+ sdhci_do_reset(host, SDHCI_RESET_CMD);
3044
+ sdhci_do_reset(host, SDHCI_RESET_DATA);
3045
+
3046
+ host->pending_reset = false;
3047
+ }
25063048
25073049 /*
25083050 * Always unmap the data buffers if they were mapped by
....@@ -2511,6 +3053,17 @@
25113053 */
25123054 if (host->flags & SDHCI_REQ_USE_DMA) {
25133055 struct mmc_data *data = mrq->data;
3056
+
3057
+ if (host->use_external_dma && data &&
3058
+ (mrq->cmd->error || data->error)) {
3059
+ struct dma_chan *chan = sdhci_external_dma_channel(host, data);
3060
+
3061
+ host->mrqs_done[i] = NULL;
3062
+ spin_unlock_irqrestore(&host->lock, flags);
3063
+ dmaengine_terminate_sync(chan);
3064
+ spin_lock_irqsave(&host->lock, flags);
3065
+ sdhci_set_mrq_done(host, mrq);
3066
+ }
25143067
25153068 if (data && data->host_cookie == COOKIE_MAPPED) {
25163069 if (host->bounce_buffer) {
....@@ -2556,41 +3109,8 @@
25563109 }
25573110 }
25583111
2559
- /*
2560
- * The controller needs a reset of internal state machines
2561
- * upon error conditions.
2562
- */
2563
- if (sdhci_needs_reset(host, mrq)) {
2564
- /*
2565
- * Do not finish until command and data lines are available for
2566
- * reset. Note there can only be one other mrq, so it cannot
2567
- * also be in mrqs_done, otherwise host->cmd and host->data_cmd
2568
- * would both be null.
2569
- */
2570
- if (host->cmd || host->data_cmd) {
2571
- spin_unlock_irqrestore(&host->lock, flags);
2572
- return true;
2573
- }
2574
-
2575
- /* Some controllers need this kick or reset won't work here */
2576
- if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
2577
- /* This is to force an update */
2578
- host->ops->set_clock(host, host->clock);
2579
-
2580
- /* Spec says we should do both at the same time, but Ricoh
2581
- controllers do not like that. */
2582
- sdhci_do_reset(host, SDHCI_RESET_CMD);
2583
- sdhci_do_reset(host, SDHCI_RESET_DATA);
2584
-
2585
- host->pending_reset = false;
2586
- }
2587
-
2588
- if (!sdhci_has_requests(host))
2589
- sdhci_led_deactivate(host);
2590
-
25913112 host->mrqs_done[i] = NULL;
25923113
2593
- mmiowb();
25943114 spin_unlock_irqrestore(&host->lock, flags);
25953115
25963116 if (host->ops->request_done)
....@@ -2601,9 +3121,10 @@
26013121 return false;
26023122 }
26033123
2604
-static void sdhci_tasklet_finish(unsigned long param)
3124
+static void sdhci_complete_work(struct work_struct *work)
26053125 {
2606
- struct sdhci_host *host = (struct sdhci_host *)param;
3126
+ struct sdhci_host *host = container_of(work, struct sdhci_host,
3127
+ complete_work);
26073128
26083129 while (!sdhci_request_done(host))
26093130 ;
....@@ -2627,7 +3148,6 @@
26273148 sdhci_finish_mrq(host, host->cmd->mrq);
26283149 }
26293150
2630
- mmiowb();
26313151 spin_unlock_irqrestore(&host->lock, flags);
26323152 }
26333153
....@@ -2648,7 +3168,8 @@
26483168
26493169 if (host->data) {
26503170 host->data->error = -ETIMEDOUT;
2651
- sdhci_finish_data(host);
3171
+ __sdhci_finish_data(host, true);
3172
+ queue_work(host->complete_wq, &host->complete_work);
26523173 } else if (host->data_cmd) {
26533174 host->data_cmd->error = -ETIMEDOUT;
26543175 sdhci_finish_mrq(host, host->data_cmd->mrq);
....@@ -2658,7 +3179,6 @@
26583179 }
26593180 }
26603181
2661
- mmiowb();
26623182 spin_unlock_irqrestore(&host->lock, flags);
26633183 }
26643184
....@@ -2715,7 +3235,7 @@
27153235 return;
27163236 }
27173237
2718
- sdhci_finish_mrq(host, host->cmd->mrq);
3238
+ __sdhci_finish_mrq(host, host->cmd->mrq);
27193239 return;
27203240 }
27213241
....@@ -2729,7 +3249,7 @@
27293249
27303250 if (mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
27313251 mrq->sbc->error = err;
2732
- sdhci_finish_mrq(host, mrq);
3252
+ __sdhci_finish_mrq(host, mrq);
27333253 return;
27343254 }
27353255 }
....@@ -2797,7 +3317,7 @@
27973317 if (intmask & SDHCI_INT_DATA_TIMEOUT) {
27983318 host->data_cmd = NULL;
27993319 data_cmd->error = -ETIMEDOUT;
2800
- sdhci_finish_mrq(host, data_cmd->mrq);
3320
+ __sdhci_finish_mrq(host, data_cmd->mrq);
28013321 return;
28023322 }
28033323 if (intmask & SDHCI_INT_DATA_END) {
....@@ -2810,7 +3330,7 @@
28103330 if (host->cmd == data_cmd)
28113331 return;
28123332
2813
- sdhci_finish_mrq(host, data_cmd->mrq);
3333
+ __sdhci_finish_mrq(host, data_cmd->mrq);
28143334 return;
28153335 }
28163336 }
....@@ -2863,7 +3383,7 @@
28633383 * some controllers are faulty, don't trust them.
28643384 */
28653385 if (intmask & SDHCI_INT_DMA_END) {
2866
- u32 dmastart, dmanow;
3386
+ dma_addr_t dmastart, dmanow;
28673387
28683388 dmastart = sdhci_sdma_address(host);
28693389 dmanow = dmastart + host->data->bytes_xfered;
....@@ -2871,12 +3391,12 @@
28713391 * Force update to the next DMA block boundary.
28723392 */
28733393 dmanow = (dmanow &
2874
- ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
3394
+ ~((dma_addr_t)SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
28753395 SDHCI_DEFAULT_BOUNDARY_SIZE;
28763396 host->data->bytes_xfered = dmanow - dmastart;
2877
- DBG("DMA base 0x%08x, transferred 0x%06x bytes, next 0x%08x\n",
2878
- dmastart, host->data->bytes_xfered, dmanow);
2879
- sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
3397
+ DBG("DMA base %pad, transferred 0x%06x bytes, next %pad\n",
3398
+ &dmastart, host->data->bytes_xfered, &dmanow);
3399
+ sdhci_set_sdma_addr(host, dmanow);
28803400 }
28813401
28823402 if (intmask & SDHCI_INT_DATA_END) {
....@@ -2915,7 +3435,7 @@
29153435
29163436 spin_lock(&host->lock);
29173437
2918
- if (host->runtime_suspended && !sdhci_sdio_irq_enabled(host)) {
3438
+ if (host->runtime_suspended) {
29193439 spin_unlock(&host->lock);
29203440 return IRQ_NONE;
29213441 }
....@@ -2986,8 +3506,7 @@
29863506 if ((intmask & SDHCI_INT_CARD_INT) &&
29873507 (host->ier & SDHCI_INT_CARD_INT)) {
29883508 sdhci_enable_sdio_irq_nolock(host, false);
2989
- host->thread_isr |= SDHCI_INT_CARD_INT;
2990
- result = IRQ_WAKE_THREAD;
3509
+ sdio_signal_irq(host->mmc);
29913510 }
29923511
29933512 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
....@@ -3021,6 +3540,9 @@
30213540 }
30223541 }
30233542 out:
3543
+ if (host->deferred_cmd)
3544
+ result = IRQ_WAKE_THREAD;
3545
+
30243546 spin_unlock(&host->lock);
30253547
30263548 /* Process mrqs ready for immediate completion */
....@@ -3046,12 +3568,22 @@
30463568 static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
30473569 {
30483570 struct sdhci_host *host = dev_id;
3571
+ struct mmc_command *cmd;
30493572 unsigned long flags;
30503573 u32 isr;
30513574
3575
+ while (!sdhci_request_done(host))
3576
+ ;
3577
+
30523578 spin_lock_irqsave(&host->lock, flags);
3579
+
30533580 isr = host->thread_isr;
30543581 host->thread_isr = 0;
3582
+
3583
+ cmd = host->deferred_cmd;
3584
+ if (cmd && !sdhci_send_command_retry(host, cmd, flags))
3585
+ sdhci_finish_mrq(host, cmd->mrq);
3586
+
30553587 spin_unlock_irqrestore(&host->lock, flags);
30563588
30573589 if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
....@@ -3061,16 +3593,7 @@
30613593 mmc_detect_change(mmc, msecs_to_jiffies(200));
30623594 }
30633595
3064
- if (isr & SDHCI_INT_CARD_INT) {
3065
- sdio_run_irqs(host->mmc);
3066
-
3067
- spin_lock_irqsave(&host->lock, flags);
3068
- if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
3069
- sdhci_enable_sdio_irq_nolock(host, true);
3070
- spin_unlock_irqrestore(&host->lock, flags);
3071
- }
3072
-
3073
- return isr ? IRQ_HANDLED : IRQ_NONE;
3596
+ return IRQ_HANDLED;
30743597 }
30753598
30763599 /*****************************************************************************\
....@@ -3182,7 +3705,6 @@
31823705 mmc->ops->set_ios(mmc, &mmc->ios);
31833706 } else {
31843707 sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
3185
- mmiowb();
31863708 }
31873709
31883710 if (host->irq_wake_enabled) {
....@@ -3224,7 +3746,7 @@
32243746 }
32253747 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);
32263748
3227
-int sdhci_runtime_resume_host(struct sdhci_host *host)
3749
+int sdhci_runtime_resume_host(struct sdhci_host *host, int soft_reset)
32283750 {
32293751 struct mmc_host *mmc = host->mmc;
32303752 unsigned long flags;
....@@ -3235,7 +3757,7 @@
32353757 host->ops->enable_dma(host);
32363758 }
32373759
3238
- sdhci_init(host, 0);
3760
+ sdhci_init(host, soft_reset);
32393761
32403762 if (mmc->ios.power_mode != MMC_POWER_UNDEFINED &&
32413763 mmc->ios.power_mode != MMC_POWER_OFF) {
....@@ -3262,7 +3784,7 @@
32623784 host->runtime_suspended = false;
32633785
32643786 /* Enable SDIO IRQ */
3265
- if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
3787
+ if (sdio_irq_claimed(mmc))
32663788 sdhci_enable_sdio_irq_nolock(host, true);
32673789
32683790 /* Enable Card Detection */
....@@ -3292,7 +3814,14 @@
32923814
32933815 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
32943816 ctrl &= ~SDHCI_CTRL_DMA_MASK;
3295
- if (host->flags & SDHCI_USE_64_BIT_DMA)
3817
+ /*
3818
+ * Host from V4.10 supports ADMA3 DMA type.
3819
+ * ADMA3 performs integrated descriptor which is more suitable
3820
+ * for cmd queuing to fetch both command and transfer descriptors.
3821
+ */
3822
+ if (host->v4_mode && (host->caps1 & SDHCI_CAN_DO_ADMA3))
3823
+ ctrl |= SDHCI_CTRL_ADMA3;
3824
+ else if (host->flags & SDHCI_USE_64_BIT_DMA)
32963825 ctrl |= SDHCI_CTRL_ADMA64;
32973826 else
32983827 ctrl |= SDHCI_CTRL_ADMA32;
....@@ -3302,7 +3831,7 @@
33023831 SDHCI_BLOCK_SIZE);
33033832
33043833 /* Set maximum timeout */
3305
- sdhci_writeb(host, 0xE, SDHCI_TIMEOUT_CONTROL);
3834
+ sdhci_set_timeout(host, NULL);
33063835
33073836 host->ier = host->cqe_ier;
33083837
....@@ -3315,7 +3844,6 @@
33153844 mmc_hostname(mmc), host->ier,
33163845 sdhci_readl(host, SDHCI_INT_STATUS));
33173846
3318
- mmiowb();
33193847 spin_unlock_irqrestore(&host->lock, flags);
33203848 }
33213849 EXPORT_SYMBOL_GPL(sdhci_cqe_enable);
....@@ -3340,7 +3868,6 @@
33403868 mmc_hostname(mmc), host->ier,
33413869 sdhci_readl(host, SDHCI_INT_STATUS));
33423870
3343
- mmiowb();
33443871 spin_unlock_irqrestore(&host->lock, flags);
33453872 }
33463873 EXPORT_SYMBOL_GPL(sdhci_cqe_disable);
....@@ -3418,6 +3945,7 @@
34183945 host->cqe_err_ier = SDHCI_CQE_INT_ERR_MASK;
34193946
34203947 host->tuning_delay = -1;
3948
+ host->tuning_loop_count = MAX_TUNING_LOOP;
34213949
34223950 host->sdma_boundary = SDHCI_DEFAULT_BOUNDARY_ARG;
34233951
....@@ -3463,7 +3991,8 @@
34633991 return ret;
34643992 }
34653993
3466
-void __sdhci_read_caps(struct sdhci_host *host, u16 *ver, u32 *caps, u32 *caps1)
3994
+void __sdhci_read_caps(struct sdhci_host *host, const u16 *ver,
3995
+ const u32 *caps, const u32 *caps1)
34673996 {
34683997 u16 v;
34693998 u64 dt_caps_mask = 0;
....@@ -3482,10 +4011,13 @@
34824011
34834012 sdhci_do_reset(host, SDHCI_RESET_ALL);
34844013
3485
- of_property_read_u64(mmc_dev(host->mmc)->of_node,
3486
- "sdhci-caps-mask", &dt_caps_mask);
3487
- of_property_read_u64(mmc_dev(host->mmc)->of_node,
3488
- "sdhci-caps", &dt_caps);
4014
+ if (host->v4_mode)
4015
+ sdhci_do_enable_v4_mode(host);
4016
+
4017
+ device_property_read_u64_array(mmc_dev(host->mmc),
4018
+ "sdhci-caps-mask", &dt_caps_mask, 1);
4019
+ device_property_read_u64_array(mmc_dev(host->mmc),
4020
+ "sdhci-caps", &dt_caps, 1);
34894021
34904022 v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION);
34914023 host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT;
....@@ -3514,7 +4046,7 @@
35144046 }
35154047 EXPORT_SYMBOL_GPL(__sdhci_read_caps);
35164048
3517
-static int sdhci_allocate_bounce_buffer(struct sdhci_host *host)
4049
+static void sdhci_allocate_bounce_buffer(struct sdhci_host *host)
35184050 {
35194051 struct mmc_host *mmc = host->mmc;
35204052 unsigned int max_blocks;
....@@ -3552,7 +4084,7 @@
35524084 * Exiting with zero here makes sure we proceed with
35534085 * mmc->max_segs == 1.
35544086 */
3555
- return 0;
4087
+ return;
35564088 }
35574089
35584090 host->bounce_addr = dma_map_single(mmc->parent,
....@@ -3562,7 +4094,7 @@
35624094 ret = dma_mapping_error(mmc->parent, host->bounce_addr);
35634095 if (ret)
35644096 /* Again fall back to max_segs == 1 */
3565
- return 0;
4097
+ return;
35664098 host->bounce_buffer_size = bounce_size;
35674099
35684100 /* Lie about this since we're bouncing */
....@@ -3572,8 +4104,19 @@
35724104
35734105 pr_info("%s bounce up to %u segments into one, max segment size %u bytes\n",
35744106 mmc_hostname(mmc), max_blocks, bounce_size);
4107
+}
35754108
3576
- return 0;
4109
+static inline bool sdhci_can_64bit_dma(struct sdhci_host *host)
4110
+{
4111
+ /*
4112
+ * According to SD Host Controller spec v4.10, bit[27] added from
4113
+ * version 4.10 in Capabilities Register is used as 64-bit System
4114
+ * Address support for V4 mode.
4115
+ */
4116
+ if (host->version >= SDHCI_SPEC_410 && host->v4_mode)
4117
+ return host->caps & SDHCI_CAN_64BIT_V4;
4118
+
4119
+ return host->caps & SDHCI_CAN_64BIT;
35774120 }
35784121
35794122 int sdhci_setup_host(struct sdhci_host *host)
....@@ -3583,7 +4126,8 @@
35834126 unsigned int ocr_avail;
35844127 unsigned int override_timeout_clk;
35854128 u32 max_clk;
3586
- int ret;
4129
+ int ret = 0;
4130
+ bool enable_vqmmc = false;
35874131
35884132 WARN_ON(host == NULL);
35894133 if (host == NULL)
....@@ -3597,9 +4141,12 @@
35974141 * the host can take the appropriate action if regulators are not
35984142 * available.
35994143 */
3600
- ret = mmc_regulator_get_supply(mmc);
3601
- if (ret)
3602
- return ret;
4144
+ if (!mmc->supply.vqmmc) {
4145
+ ret = mmc_regulator_get_supply(mmc);
4146
+ if (ret)
4147
+ return ret;
4148
+ enable_vqmmc = true;
4149
+ }
36034150
36044151 DBG("Version: 0x%08x | Present: 0x%08x\n",
36054152 sdhci_readw(host, SDHCI_HOST_VERSION),
....@@ -3612,13 +4159,10 @@
36124159
36134160 override_timeout_clk = host->timeout_clk;
36144161
3615
- if (host->version > SDHCI_SPEC_300) {
4162
+ if (host->version > SDHCI_SPEC_420) {
36164163 pr_err("%s: Unknown controller version (%d). You may experience problems.\n",
36174164 mmc_hostname(mmc), host->version);
36184165 }
3619
-
3620
- if (host->quirks & SDHCI_QUIRK_BROKEN_CQE)
3621
- mmc->caps2 &= ~MMC_CAP2_CQE;
36224166
36234167 if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
36244168 host->flags |= SDHCI_USE_SDMA;
....@@ -3643,18 +4187,29 @@
36434187 host->flags &= ~SDHCI_USE_ADMA;
36444188 }
36454189
3646
- /*
3647
- * It is assumed that a 64-bit capable device has set a 64-bit DMA mask
3648
- * and *must* do 64-bit DMA. A driver has the opportunity to change
3649
- * that during the first call to ->enable_dma(). Similarly
3650
- * SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to
3651
- * implement.
3652
- */
3653
- if (host->caps & SDHCI_CAN_64BIT)
4190
+ if (sdhci_can_64bit_dma(host))
36544191 host->flags |= SDHCI_USE_64_BIT_DMA;
36554192
4193
+ if (host->use_external_dma) {
4194
+ ret = sdhci_external_dma_init(host);
4195
+ if (ret == -EPROBE_DEFER)
4196
+ goto unreg;
4197
+ /*
4198
+ * Fall back to use the DMA/PIO integrated in standard SDHCI
4199
+ * instead of external DMA devices.
4200
+ */
4201
+ else if (ret)
4202
+ sdhci_switch_external_dma(host, false);
4203
+ /* Disable internal DMA sources */
4204
+ else
4205
+ host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
4206
+ }
4207
+
36564208 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3657
- ret = sdhci_set_dma_mask(host);
4209
+ if (host->ops->set_dma_mask)
4210
+ ret = host->ops->set_dma_mask(host);
4211
+ else
4212
+ ret = sdhci_set_dma_mask(host);
36584213
36594214 if (!ret && host->ops->enable_dma)
36604215 ret = host->ops->enable_dma(host);
....@@ -3668,27 +4223,30 @@
36684223 }
36694224 }
36704225
3671
- /* SDMA does not support 64-bit DMA */
3672
- if (host->flags & SDHCI_USE_64_BIT_DMA)
4226
+ /* SDMA does not support 64-bit DMA if v4 mode not set */
4227
+ if ((host->flags & SDHCI_USE_64_BIT_DMA) && !host->v4_mode)
36734228 host->flags &= ~SDHCI_USE_SDMA;
36744229
36754230 if (host->flags & SDHCI_USE_ADMA) {
36764231 dma_addr_t dma;
36774232 void *buf;
36784233
3679
- if (host->flags & SDHCI_USE_64_BIT_DMA) {
3680
- host->adma_table_sz = host->adma_table_cnt *
3681
- SDHCI_ADMA2_64_DESC_SZ;
3682
- host->desc_sz = SDHCI_ADMA2_64_DESC_SZ;
3683
- } else {
3684
- host->adma_table_sz = host->adma_table_cnt *
3685
- SDHCI_ADMA2_32_DESC_SZ;
3686
- host->desc_sz = SDHCI_ADMA2_32_DESC_SZ;
3687
- }
4234
+ if (!(host->flags & SDHCI_USE_64_BIT_DMA))
4235
+ host->alloc_desc_sz = SDHCI_ADMA2_32_DESC_SZ;
4236
+ else if (!host->alloc_desc_sz)
4237
+ host->alloc_desc_sz = SDHCI_ADMA2_64_DESC_SZ(host);
4238
+
4239
+ host->desc_sz = host->alloc_desc_sz;
4240
+ host->adma_table_sz = host->adma_table_cnt * host->desc_sz;
36884241
36894242 host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
3690
- buf = dma_alloc_coherent(mmc_dev(mmc), host->align_buffer_sz +
3691
- host->adma_table_sz, &dma, GFP_KERNEL);
4243
+ /*
4244
+ * Use zalloc to zero the reserved high 32-bits of 128-bit
4245
+ * descriptors so that they never need to be written.
4246
+ */
4247
+ buf = dma_alloc_coherent(mmc_dev(mmc),
4248
+ host->align_buffer_sz + host->adma_table_sz,
4249
+ &dma, GFP_KERNEL);
36924250 if (!buf) {
36934251 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
36944252 mmc_hostname(mmc));
....@@ -3720,11 +4278,9 @@
37204278 }
37214279
37224280 if (host->version >= SDHCI_SPEC_300)
3723
- host->max_clk = (host->caps & SDHCI_CLOCK_V3_BASE_MASK)
3724
- >> SDHCI_CLOCK_BASE_SHIFT;
4281
+ host->max_clk = FIELD_GET(SDHCI_CLOCK_V3_BASE_MASK, host->caps);
37254282 else
3726
- host->max_clk = (host->caps & SDHCI_CLOCK_BASE_MASK)
3727
- >> SDHCI_CLOCK_BASE_SHIFT;
4283
+ host->max_clk = FIELD_GET(SDHCI_CLOCK_BASE_MASK, host->caps);
37284284
37294285 host->max_clk *= 1000000;
37304286 if (host->max_clk == 0 || host->quirks &
....@@ -3742,8 +4298,7 @@
37424298 * In case of Host Controller v3.00, find out whether clock
37434299 * multiplier is supported.
37444300 */
3745
- host->clk_mul = (host->caps1 & SDHCI_CLOCK_MUL_MASK) >>
3746
- SDHCI_CLOCK_MUL_SHIFT;
4301
+ host->clk_mul = FIELD_GET(SDHCI_CLOCK_MUL_MASK, host->caps1);
37474302
37484303 /*
37494304 * In case the value in Clock Multiplier is 0, then programmable
....@@ -3776,8 +4331,7 @@
37764331 mmc->f_max = max_clk;
37774332
37784333 if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
3779
- host->timeout_clk = (host->caps & SDHCI_TIMEOUT_CLK_MASK) >>
3780
- SDHCI_TIMEOUT_CLK_SHIFT;
4334
+ host->timeout_clk = FIELD_GET(SDHCI_TIMEOUT_CLK_MASK, host->caps);
37814335
37824336 if (host->caps & SDHCI_TIMEOUT_CLK_UNIT)
37834337 host->timeout_clk *= 1000;
....@@ -3807,16 +4361,19 @@
38074361 !host->ops->get_max_timeout_count)
38084362 mmc->max_busy_timeout = 0;
38094363
3810
- mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
4364
+ mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_CMD23;
38114365 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
38124366
38134367 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
38144368 host->flags |= SDHCI_AUTO_CMD12;
38154369
3816
- /* Auto-CMD23 stuff only works in ADMA or PIO. */
4370
+ /*
4371
+ * For v3 mode, Auto-CMD23 stuff only works in ADMA or PIO.
4372
+ * For v4 mode, SDMA may use Auto-CMD23 as well.
4373
+ */
38174374 if ((host->version >= SDHCI_SPEC_300) &&
38184375 ((host->flags & SDHCI_USE_ADMA) ||
3819
- !(host->flags & SDHCI_USE_SDMA)) &&
4376
+ !(host->flags & SDHCI_USE_SDMA) || host->v4_mode) &&
38204377 !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) {
38214378 host->flags |= SDHCI_AUTO_CMD23;
38224379 DBG("Auto-CMD23 available\n");
....@@ -3846,7 +4403,10 @@
38464403 mmc->caps |= MMC_CAP_NEEDS_POLL;
38474404
38484405 if (!IS_ERR(mmc->supply.vqmmc)) {
3849
- ret = regulator_enable(mmc->supply.vqmmc);
4406
+ if (enable_vqmmc) {
4407
+ ret = regulator_enable(mmc->supply.vqmmc);
4408
+ host->sdhci_core_to_disable_vqmmc = !ret;
4409
+ }
38504410
38514411 /* If vqmmc provides no 1.8V signalling, then there's no UHS */
38524412 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000,
....@@ -3865,6 +4425,7 @@
38654425 mmc_hostname(mmc), ret);
38664426 mmc->supply.vqmmc = ERR_PTR(-EINVAL);
38674427 }
4428
+
38684429 }
38694430
38704431 if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) {
....@@ -3926,8 +4487,8 @@
39264487 mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
39274488
39284489 /* Initial value for re-tuning timer count */
3929
- host->tuning_count = (host->caps1 & SDHCI_RETUNING_TIMER_COUNT_MASK) >>
3930
- SDHCI_RETUNING_TIMER_COUNT_SHIFT;
4490
+ host->tuning_count = FIELD_GET(SDHCI_RETUNING_TIMER_COUNT_MASK,
4491
+ host->caps1);
39314492
39324493 /*
39334494 * In case Re-tuning Timer is not disabled, the actual value of
....@@ -3937,8 +4498,7 @@
39374498 host->tuning_count = 1 << (host->tuning_count - 1);
39384499
39394500 /* Re-tuning mode supported by the Host Controller */
3940
- host->tuning_mode = (host->caps1 & SDHCI_RETUNING_MODE_MASK) >>
3941
- SDHCI_RETUNING_MODE_SHIFT;
4501
+ host->tuning_mode = FIELD_GET(SDHCI_RETUNING_MODE_MASK, host->caps1);
39424502
39434503 ocr_avail = 0;
39444504
....@@ -3960,35 +4520,32 @@
39604520
39614521 curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT);
39624522 max_current_caps =
3963
- (curr << SDHCI_MAX_CURRENT_330_SHIFT) |
3964
- (curr << SDHCI_MAX_CURRENT_300_SHIFT) |
3965
- (curr << SDHCI_MAX_CURRENT_180_SHIFT);
4523
+ FIELD_PREP(SDHCI_MAX_CURRENT_330_MASK, curr) |
4524
+ FIELD_PREP(SDHCI_MAX_CURRENT_300_MASK, curr) |
4525
+ FIELD_PREP(SDHCI_MAX_CURRENT_180_MASK, curr);
39664526 }
39674527 }
39684528
39694529 if (host->caps & SDHCI_CAN_VDD_330) {
39704530 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
39714531
3972
- mmc->max_current_330 = ((max_current_caps &
3973
- SDHCI_MAX_CURRENT_330_MASK) >>
3974
- SDHCI_MAX_CURRENT_330_SHIFT) *
3975
- SDHCI_MAX_CURRENT_MULTIPLIER;
4532
+ mmc->max_current_330 = FIELD_GET(SDHCI_MAX_CURRENT_330_MASK,
4533
+ max_current_caps) *
4534
+ SDHCI_MAX_CURRENT_MULTIPLIER;
39764535 }
39774536 if (host->caps & SDHCI_CAN_VDD_300) {
39784537 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
39794538
3980
- mmc->max_current_300 = ((max_current_caps &
3981
- SDHCI_MAX_CURRENT_300_MASK) >>
3982
- SDHCI_MAX_CURRENT_300_SHIFT) *
3983
- SDHCI_MAX_CURRENT_MULTIPLIER;
4539
+ mmc->max_current_300 = FIELD_GET(SDHCI_MAX_CURRENT_300_MASK,
4540
+ max_current_caps) *
4541
+ SDHCI_MAX_CURRENT_MULTIPLIER;
39844542 }
39854543 if (host->caps & SDHCI_CAN_VDD_180) {
39864544 ocr_avail |= MMC_VDD_165_195;
39874545
3988
- mmc->max_current_180 = ((max_current_caps &
3989
- SDHCI_MAX_CURRENT_180_MASK) >>
3990
- SDHCI_MAX_CURRENT_180_SHIFT) *
3991
- SDHCI_MAX_CURRENT_MULTIPLIER;
4546
+ mmc->max_current_180 = FIELD_GET(SDHCI_MAX_CURRENT_180_MASK,
4547
+ max_current_caps) *
4548
+ SDHCI_MAX_CURRENT_MULTIPLIER;
39924549 }
39934550
39944551 /* If OCR set by host, use it instead. */
....@@ -4092,17 +4649,14 @@
40924649 */
40934650 mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
40944651
4095
- if (mmc->max_segs == 1) {
4652
+ if (mmc->max_segs == 1)
40964653 /* This may alter mmc->*_blk_* parameters */
4097
- ret = sdhci_allocate_bounce_buffer(host);
4098
- if (ret)
4099
- return ret;
4100
- }
4654
+ sdhci_allocate_bounce_buffer(host);
41014655
41024656 return 0;
41034657
41044658 unreg:
4105
- if (!IS_ERR(mmc->supply.vqmmc))
4659
+ if (host->sdhci_core_to_disable_vqmmc)
41064660 regulator_disable(mmc->supply.vqmmc);
41074661 undma:
41084662 if (host->align_buffer)
....@@ -4120,13 +4674,17 @@
41204674 {
41214675 struct mmc_host *mmc = host->mmc;
41224676
4123
- if (!IS_ERR(mmc->supply.vqmmc))
4677
+ if (host->sdhci_core_to_disable_vqmmc)
41244678 regulator_disable(mmc->supply.vqmmc);
41254679
41264680 if (host->align_buffer)
41274681 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
41284682 host->adma_table_sz, host->align_buffer,
41294683 host->align_addr);
4684
+
4685
+ if (host->use_external_dma)
4686
+ sdhci_external_dma_release(host);
4687
+
41304688 host->adma_table = NULL;
41314689 host->align_buffer = NULL;
41324690 }
....@@ -4134,14 +4692,21 @@
41344692
41354693 int __sdhci_add_host(struct sdhci_host *host)
41364694 {
4695
+ unsigned int flags = WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI;
41374696 struct mmc_host *mmc = host->mmc;
41384697 int ret;
41394698
4140
- /*
4141
- * Init tasklets.
4142
- */
4143
- tasklet_init(&host->finish_tasklet,
4144
- sdhci_tasklet_finish, (unsigned long)host);
4699
+ if ((mmc->caps2 & MMC_CAP2_CQE) &&
4700
+ (host->quirks & SDHCI_QUIRK_BROKEN_CQE)) {
4701
+ mmc->caps2 &= ~MMC_CAP2_CQE;
4702
+ mmc->cqe_ops = NULL;
4703
+ }
4704
+
4705
+ host->complete_wq = alloc_workqueue("sdhci", flags, 0);
4706
+ if (!host->complete_wq)
4707
+ return -ENOMEM;
4708
+
4709
+ INIT_WORK(&host->complete_work, sdhci_complete_work);
41454710
41464711 timer_setup(&host->timer, sdhci_timeout_timer, 0);
41474712 timer_setup(&host->data_timer, sdhci_timeout_data_timer, 0);
....@@ -4155,7 +4720,7 @@
41554720 if (ret) {
41564721 pr_err("%s: Failed to request IRQ %d: %d\n",
41574722 mmc_hostname(mmc), host->irq, ret);
4158
- goto untasklet;
4723
+ goto unwq;
41594724 }
41604725
41614726 ret = sdhci_led_register(host);
....@@ -4165,14 +4730,13 @@
41654730 goto unirq;
41664731 }
41674732
4168
- mmiowb();
4169
-
41704733 ret = mmc_add_host(mmc);
41714734 if (ret)
41724735 goto unled;
41734736
41744737 pr_info("%s: SDHCI controller on %s [%s] using %s\n",
41754738 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
4739
+ host->use_external_dma ? "External DMA" :
41764740 (host->flags & SDHCI_USE_ADMA) ?
41774741 (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" :
41784742 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
....@@ -4188,8 +4752,8 @@
41884752 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
41894753 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
41904754 free_irq(host->irq, host);
4191
-untasklet:
4192
- tasklet_kill(&host->finish_tasklet);
4755
+unwq:
4756
+ destroy_workqueue(host->complete_wq);
41934757
41944758 return ret;
41954759 }
....@@ -4251,9 +4815,9 @@
42514815 del_timer_sync(&host->timer);
42524816 del_timer_sync(&host->data_timer);
42534817
4254
- tasklet_kill(&host->finish_tasklet);
4818
+ destroy_workqueue(host->complete_wq);
42554819
4256
- if (!IS_ERR(mmc->supply.vqmmc))
4820
+ if (host->sdhci_core_to_disable_vqmmc)
42574821 regulator_disable(mmc->supply.vqmmc);
42584822
42594823 if (host->align_buffer)
....@@ -4261,6 +4825,9 @@
42614825 host->adma_table_sz, host->align_buffer,
42624826 host->align_addr);
42634827
4828
+ if (host->use_external_dma)
4829
+ sdhci_external_dma_release(host);
4830
+
42644831 host->adma_table = NULL;
42654832 host->align_buffer = NULL;
42664833 }