forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-13 9d77db3c730780c8ef5ccd4b66403ff5675cfe4e
kernel/drivers/mmc/host/mmci.c
....@@ -1,12 +1,9 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
34 *
45 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
56 * Copyright (C) 2010 ST-Ericsson SA
6
- *
7
- * This program is free software; you can redistribute it and/or modify
8
- * it under the terms of the GNU General Public License version 2 as
9
- * published by the Free Software Foundation.
107 */
118 #include <linux/module.h>
129 #include <linux/moduleparam.h>
....@@ -21,15 +18,16 @@
2118 #include <linux/err.h>
2219 #include <linux/highmem.h>
2320 #include <linux/log2.h>
21
+#include <linux/mmc/mmc.h>
2422 #include <linux/mmc/pm.h>
2523 #include <linux/mmc/host.h>
2624 #include <linux/mmc/card.h>
25
+#include <linux/mmc/sd.h>
2726 #include <linux/mmc/slot-gpio.h>
2827 #include <linux/amba/bus.h>
2928 #include <linux/clk.h>
3029 #include <linux/scatterlist.h>
31
-#include <linux/gpio.h>
32
-#include <linux/of_gpio.h>
30
+#include <linux/of.h>
3331 #include <linux/regulator/consumer.h>
3432 #include <linux/dmaengine.h>
3533 #include <linux/dma-mapping.h>
....@@ -37,50 +35,75 @@
3735 #include <linux/pm_runtime.h>
3836 #include <linux/types.h>
3937 #include <linux/pinctrl/consumer.h>
38
+#include <linux/reset.h>
4039
4140 #include <asm/div64.h>
4241 #include <asm/io.h>
4342
4443 #include "mmci.h"
45
-#include "mmci_qcom_dml.h"
4644
4745 #define DRIVER_NAME "mmci-pl18x"
46
+
47
+static void mmci_variant_init(struct mmci_host *host);
48
+static void ux500_variant_init(struct mmci_host *host);
49
+static void ux500v2_variant_init(struct mmci_host *host);
4850
4951 static unsigned int fmax = 515633;
5052
5153 static struct variant_data variant_arm = {
5254 .fifosize = 16 * 4,
5355 .fifohalfsize = 8 * 4,
56
+ .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
57
+ .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
58
+ .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
59
+ .cmdreg_srsp = MCI_CPSM_RESPONSE,
5460 .datalength_bits = 16,
61
+ .datactrl_blocksz = 11,
5562 .pwrreg_powerup = MCI_PWR_UP,
5663 .f_max = 100000000,
5764 .reversed_irq_handling = true,
5865 .mmcimask1 = true,
66
+ .irq_pio_mask = MCI_IRQ_PIO_MASK,
5967 .start_err = MCI_STARTBITERR,
6068 .opendrain = MCI_ROD,
69
+ .init = mmci_variant_init,
6170 };
6271
6372 static struct variant_data variant_arm_extended_fifo = {
6473 .fifosize = 128 * 4,
6574 .fifohalfsize = 64 * 4,
75
+ .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
76
+ .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
77
+ .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
78
+ .cmdreg_srsp = MCI_CPSM_RESPONSE,
6679 .datalength_bits = 16,
80
+ .datactrl_blocksz = 11,
6781 .pwrreg_powerup = MCI_PWR_UP,
6882 .f_max = 100000000,
6983 .mmcimask1 = true,
84
+ .irq_pio_mask = MCI_IRQ_PIO_MASK,
7085 .start_err = MCI_STARTBITERR,
7186 .opendrain = MCI_ROD,
87
+ .init = mmci_variant_init,
7288 };
7389
7490 static struct variant_data variant_arm_extended_fifo_hwfc = {
7591 .fifosize = 128 * 4,
7692 .fifohalfsize = 64 * 4,
7793 .clkreg_enable = MCI_ARM_HWFCEN,
94
+ .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
95
+ .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
96
+ .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
97
+ .cmdreg_srsp = MCI_CPSM_RESPONSE,
7898 .datalength_bits = 16,
99
+ .datactrl_blocksz = 11,
79100 .pwrreg_powerup = MCI_PWR_UP,
80101 .f_max = 100000000,
81102 .mmcimask1 = true,
103
+ .irq_pio_mask = MCI_IRQ_PIO_MASK,
82104 .start_err = MCI_STARTBITERR,
83105 .opendrain = MCI_ROD,
106
+ .init = mmci_variant_init,
84107 };
85108
86109 static struct variant_data variant_u300 = {
....@@ -88,7 +111,12 @@
88111 .fifohalfsize = 8 * 4,
89112 .clkreg_enable = MCI_ST_U300_HWFCEN,
90113 .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
114
+ .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
115
+ .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
116
+ .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
117
+ .cmdreg_srsp = MCI_CPSM_RESPONSE,
91118 .datalength_bits = 16,
119
+ .datactrl_blocksz = 11,
92120 .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
93121 .st_sdio = true,
94122 .pwrreg_powerup = MCI_PWR_ON,
....@@ -97,8 +125,10 @@
97125 .pwrreg_clkgate = true,
98126 .pwrreg_nopower = true,
99127 .mmcimask1 = true,
128
+ .irq_pio_mask = MCI_IRQ_PIO_MASK,
100129 .start_err = MCI_STARTBITERR,
101130 .opendrain = MCI_OD,
131
+ .init = mmci_variant_init,
102132 };
103133
104134 static struct variant_data variant_nomadik = {
....@@ -106,7 +136,12 @@
106136 .fifohalfsize = 8 * 4,
107137 .clkreg = MCI_CLK_ENABLE,
108138 .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
139
+ .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
140
+ .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
141
+ .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
142
+ .cmdreg_srsp = MCI_CPSM_RESPONSE,
109143 .datalength_bits = 24,
144
+ .datactrl_blocksz = 11,
110145 .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
111146 .st_sdio = true,
112147 .st_clkdiv = true,
....@@ -116,8 +151,10 @@
116151 .pwrreg_clkgate = true,
117152 .pwrreg_nopower = true,
118153 .mmcimask1 = true,
154
+ .irq_pio_mask = MCI_IRQ_PIO_MASK,
119155 .start_err = MCI_STARTBITERR,
120156 .opendrain = MCI_OD,
157
+ .init = mmci_variant_init,
121158 };
122159
123160 static struct variant_data variant_ux500 = {
....@@ -127,7 +164,14 @@
127164 .clkreg_enable = MCI_ST_UX500_HWFCEN,
128165 .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
129166 .clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
167
+ .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
168
+ .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
169
+ .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
170
+ .cmdreg_srsp = MCI_CPSM_RESPONSE,
130171 .datalength_bits = 24,
172
+ .datactrl_blocksz = 11,
173
+ .datactrl_any_blocksz = true,
174
+ .dma_power_of_2 = true,
131175 .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
132176 .st_sdio = true,
133177 .st_clkdiv = true,
....@@ -141,8 +185,10 @@
141185 .busy_detect_mask = MCI_ST_BUSYENDMASK,
142186 .pwrreg_nopower = true,
143187 .mmcimask1 = true,
188
+ .irq_pio_mask = MCI_IRQ_PIO_MASK,
144189 .start_err = MCI_STARTBITERR,
145190 .opendrain = MCI_OD,
191
+ .init = ux500_variant_init,
146192 };
147193
148194 static struct variant_data variant_ux500v2 = {
....@@ -152,12 +198,18 @@
152198 .clkreg_enable = MCI_ST_UX500_HWFCEN,
153199 .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
154200 .clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
201
+ .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
202
+ .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
203
+ .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
204
+ .cmdreg_srsp = MCI_CPSM_RESPONSE,
155205 .datactrl_mask_ddrmode = MCI_DPSM_ST_DDRMODE,
156206 .datalength_bits = 24,
207
+ .datactrl_blocksz = 11,
208
+ .datactrl_any_blocksz = true,
209
+ .dma_power_of_2 = true,
157210 .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
158211 .st_sdio = true,
159212 .st_clkdiv = true,
160
- .blksz_datactrl16 = true,
161213 .pwrreg_powerup = MCI_PWR_ON,
162214 .f_max = 100000000,
163215 .signal_direction = true,
....@@ -168,8 +220,10 @@
168220 .busy_detect_mask = MCI_ST_BUSYENDMASK,
169221 .pwrreg_nopower = true,
170222 .mmcimask1 = true,
223
+ .irq_pio_mask = MCI_IRQ_PIO_MASK,
171224 .start_err = MCI_STARTBITERR,
172225 .opendrain = MCI_OD,
226
+ .init = ux500v2_variant_init,
173227 };
174228
175229 static struct variant_data variant_stm32 = {
....@@ -179,7 +233,13 @@
179233 .clkreg_enable = MCI_ST_UX500_HWFCEN,
180234 .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
181235 .clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
236
+ .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
237
+ .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
238
+ .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
239
+ .cmdreg_srsp = MCI_CPSM_RESPONSE,
240
+ .irq_pio_mask = MCI_IRQ_PIO_MASK,
182241 .datalength_bits = 24,
242
+ .datactrl_blocksz = 11,
183243 .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
184244 .st_sdio = true,
185245 .st_clkdiv = true,
....@@ -187,6 +247,60 @@
187247 .f_max = 48000000,
188248 .pwrreg_clkgate = true,
189249 .pwrreg_nopower = true,
250
+ .init = mmci_variant_init,
251
+};
252
+
253
+static struct variant_data variant_stm32_sdmmc = {
254
+ .fifosize = 16 * 4,
255
+ .fifohalfsize = 8 * 4,
256
+ .f_max = 208000000,
257
+ .stm32_clkdiv = true,
258
+ .cmdreg_cpsm_enable = MCI_CPSM_STM32_ENABLE,
259
+ .cmdreg_lrsp_crc = MCI_CPSM_STM32_LRSP_CRC,
260
+ .cmdreg_srsp_crc = MCI_CPSM_STM32_SRSP_CRC,
261
+ .cmdreg_srsp = MCI_CPSM_STM32_SRSP,
262
+ .cmdreg_stop = MCI_CPSM_STM32_CMDSTOP,
263
+ .data_cmd_enable = MCI_CPSM_STM32_CMDTRANS,
264
+ .irq_pio_mask = MCI_IRQ_PIO_STM32_MASK,
265
+ .datactrl_first = true,
266
+ .datacnt_useless = true,
267
+ .datalength_bits = 25,
268
+ .datactrl_blocksz = 14,
269
+ .datactrl_any_blocksz = true,
270
+ .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
271
+ .stm32_idmabsize_mask = GENMASK(12, 5),
272
+ .busy_timeout = true,
273
+ .busy_detect = true,
274
+ .busy_detect_flag = MCI_STM32_BUSYD0,
275
+ .busy_detect_mask = MCI_STM32_BUSYD0ENDMASK,
276
+ .init = sdmmc_variant_init,
277
+};
278
+
279
+static struct variant_data variant_stm32_sdmmcv2 = {
280
+ .fifosize = 16 * 4,
281
+ .fifohalfsize = 8 * 4,
282
+ .f_max = 208000000,
283
+ .stm32_clkdiv = true,
284
+ .cmdreg_cpsm_enable = MCI_CPSM_STM32_ENABLE,
285
+ .cmdreg_lrsp_crc = MCI_CPSM_STM32_LRSP_CRC,
286
+ .cmdreg_srsp_crc = MCI_CPSM_STM32_SRSP_CRC,
287
+ .cmdreg_srsp = MCI_CPSM_STM32_SRSP,
288
+ .cmdreg_stop = MCI_CPSM_STM32_CMDSTOP,
289
+ .data_cmd_enable = MCI_CPSM_STM32_CMDTRANS,
290
+ .irq_pio_mask = MCI_IRQ_PIO_STM32_MASK,
291
+ .datactrl_first = true,
292
+ .datacnt_useless = true,
293
+ .datalength_bits = 25,
294
+ .datactrl_blocksz = 14,
295
+ .datactrl_any_blocksz = true,
296
+ .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
297
+ .stm32_idmabsize_mask = GENMASK(16, 5),
298
+ .dma_lli = true,
299
+ .busy_timeout = true,
300
+ .busy_detect = true,
301
+ .busy_detect_flag = MCI_STM32_BUSYD0,
302
+ .busy_detect_mask = MCI_STM32_BUSYD0ENDMASK,
303
+ .init = sdmmc_variant_init,
190304 };
191305
192306 static struct variant_data variant_qcom = {
....@@ -197,15 +311,21 @@
197311 MCI_QCOM_CLK_SELECT_IN_FBCLK,
198312 .clkreg_8bit_bus_enable = MCI_QCOM_CLK_WIDEBUS_8,
199313 .datactrl_mask_ddrmode = MCI_QCOM_CLK_SELECT_IN_DDR_MODE,
314
+ .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
315
+ .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
316
+ .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
317
+ .cmdreg_srsp = MCI_CPSM_RESPONSE,
200318 .data_cmd_enable = MCI_CPSM_QCOM_DATCMD,
201
- .blksz_datactrl4 = true,
202319 .datalength_bits = 24,
320
+ .datactrl_blocksz = 11,
321
+ .datactrl_any_blocksz = true,
203322 .pwrreg_powerup = MCI_PWR_UP,
204323 .f_max = 208000000,
205324 .explicit_mclk_control = true,
206325 .qcom_fifo = true,
207326 .qcom_dml = true,
208327 .mmcimask1 = true,
328
+ .irq_pio_mask = MCI_IRQ_PIO_MASK,
209329 .start_err = MCI_STARTBITERR,
210330 .opendrain = MCI_ROD,
211331 .init = qcom_variant_init,
....@@ -226,24 +346,6 @@
226346 return busy;
227347 }
228348
229
-/*
230
- * Validate mmc prerequisites
231
- */
232
-static int mmci_validate_data(struct mmci_host *host,
233
- struct mmc_data *data)
234
-{
235
- if (!data)
236
- return 0;
237
-
238
- if (!is_power_of_2(data->blksz)) {
239
- dev_err(mmc_dev(host->mmc),
240
- "unsupported block size (%d bytes)\n", data->blksz);
241
- return -EINVAL;
242
- }
243
-
244
- return 0;
245
-}
246
-
247349 static void mmci_reg_delay(struct mmci_host *host)
248350 {
249351 /*
....@@ -262,7 +364,7 @@
262364 /*
263365 * This must be called with host->lock held
264366 */
265
-static void mmci_write_clkreg(struct mmci_host *host, u32 clk)
367
+void mmci_write_clkreg(struct mmci_host *host, u32 clk)
266368 {
267369 if (host->clk_reg != clk) {
268370 host->clk_reg = clk;
....@@ -273,7 +375,7 @@
273375 /*
274376 * This must be called with host->lock held
275377 */
276
-static void mmci_write_pwrreg(struct mmci_host *host, u32 pwr)
378
+void mmci_write_pwrreg(struct mmci_host *host, u32 pwr)
277379 {
278380 if (host->pwr_reg != pwr) {
279381 host->pwr_reg = pwr;
....@@ -357,6 +459,138 @@
357459 mmci_write_clkreg(host, clk);
358460 }
359461
462
+static void mmci_dma_release(struct mmci_host *host)
463
+{
464
+ if (host->ops && host->ops->dma_release)
465
+ host->ops->dma_release(host);
466
+
467
+ host->use_dma = false;
468
+}
469
+
470
+static void mmci_dma_setup(struct mmci_host *host)
471
+{
472
+ if (!host->ops || !host->ops->dma_setup)
473
+ return;
474
+
475
+ if (host->ops->dma_setup(host))
476
+ return;
477
+
478
+ /* initialize pre request cookie */
479
+ host->next_cookie = 1;
480
+
481
+ host->use_dma = true;
482
+}
483
+
484
+/*
485
+ * Validate mmc prerequisites
486
+ */
487
+static int mmci_validate_data(struct mmci_host *host,
488
+ struct mmc_data *data)
489
+{
490
+ struct variant_data *variant = host->variant;
491
+
492
+ if (!data)
493
+ return 0;
494
+ if (!is_power_of_2(data->blksz) && !variant->datactrl_any_blocksz) {
495
+ dev_err(mmc_dev(host->mmc),
496
+ "unsupported block size (%d bytes)\n", data->blksz);
497
+ return -EINVAL;
498
+ }
499
+
500
+ if (host->ops && host->ops->validate_data)
501
+ return host->ops->validate_data(host, data);
502
+
503
+ return 0;
504
+}
505
+
506
+static int mmci_prep_data(struct mmci_host *host, struct mmc_data *data, bool next)
507
+{
508
+ int err;
509
+
510
+ if (!host->ops || !host->ops->prep_data)
511
+ return 0;
512
+
513
+ err = host->ops->prep_data(host, data, next);
514
+
515
+ if (next && !err)
516
+ data->host_cookie = ++host->next_cookie < 0 ?
517
+ 1 : host->next_cookie;
518
+
519
+ return err;
520
+}
521
+
522
+static void mmci_unprep_data(struct mmci_host *host, struct mmc_data *data,
523
+ int err)
524
+{
525
+ if (host->ops && host->ops->unprep_data)
526
+ host->ops->unprep_data(host, data, err);
527
+
528
+ data->host_cookie = 0;
529
+}
530
+
531
+static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
532
+{
533
+ WARN_ON(data->host_cookie && data->host_cookie != host->next_cookie);
534
+
535
+ if (host->ops && host->ops->get_next_data)
536
+ host->ops->get_next_data(host, data);
537
+}
538
+
539
+static int mmci_dma_start(struct mmci_host *host, unsigned int datactrl)
540
+{
541
+ struct mmc_data *data = host->data;
542
+ int ret;
543
+
544
+ if (!host->use_dma)
545
+ return -EINVAL;
546
+
547
+ ret = mmci_prep_data(host, data, false);
548
+ if (ret)
549
+ return ret;
550
+
551
+ if (!host->ops || !host->ops->dma_start)
552
+ return -EINVAL;
553
+
554
+ /* Okay, go for it. */
555
+ dev_vdbg(mmc_dev(host->mmc),
556
+ "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
557
+ data->sg_len, data->blksz, data->blocks, data->flags);
558
+
559
+ ret = host->ops->dma_start(host, &datactrl);
560
+ if (ret)
561
+ return ret;
562
+
563
+ /* Trigger the DMA transfer */
564
+ mmci_write_datactrlreg(host, datactrl);
565
+
566
+ /*
567
+ * Let the MMCI say when the data is ended and it's time
568
+ * to fire next DMA request. When that happens, MMCI will
569
+ * call mmci_data_end()
570
+ */
571
+ writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK,
572
+ host->base + MMCIMASK0);
573
+ return 0;
574
+}
575
+
576
+static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
577
+{
578
+ if (!host->use_dma)
579
+ return;
580
+
581
+ if (host->ops && host->ops->dma_finalize)
582
+ host->ops->dma_finalize(host, data);
583
+}
584
+
585
+static void mmci_dma_error(struct mmci_host *host)
586
+{
587
+ if (!host->use_dma)
588
+ return;
589
+
590
+ if (host->ops && host->ops->dma_error)
591
+ host->ops->dma_error(host);
592
+}
593
+
360594 static void
361595 mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
362596 {
....@@ -378,7 +612,7 @@
378612 if (host->singleirq) {
379613 unsigned int mask0 = readl(base + MMCIMASK0);
380614
381
- mask0 &= ~MCI_IRQ1MASK;
615
+ mask0 &= ~variant->irq_pio_mask;
382616 mask0 |= mask;
383617
384618 writel(mask0, base + MMCIMASK0);
....@@ -409,37 +643,137 @@
409643 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
410644 }
411645
646
+static u32 mmci_get_dctrl_cfg(struct mmci_host *host)
647
+{
648
+ return MCI_DPSM_ENABLE | mmci_dctrl_blksz(host);
649
+}
650
+
651
+static u32 ux500v2_get_dctrl_cfg(struct mmci_host *host)
652
+{
653
+ return MCI_DPSM_ENABLE | (host->data->blksz << 16);
654
+}
655
+
656
+static bool ux500_busy_complete(struct mmci_host *host, u32 status, u32 err_msk)
657
+{
658
+ void __iomem *base = host->base;
659
+
660
+ /*
661
+ * Before unmasking for the busy end IRQ, confirm that the
662
+ * command was sent successfully. To keep track of having a
663
+ * command in-progress, waiting for busy signaling to end,
664
+ * store the status in host->busy_status.
665
+ *
666
+ * Note that, the card may need a couple of clock cycles before
667
+ * it starts signaling busy on DAT0, hence re-read the
668
+ * MMCISTATUS register here, to allow the busy bit to be set.
669
+ * Potentially we may even need to poll the register for a
670
+ * while, to allow it to be set, but tests indicates that it
671
+ * isn't needed.
672
+ */
673
+ if (!host->busy_status && !(status & err_msk) &&
674
+ (readl(base + MMCISTATUS) & host->variant->busy_detect_flag)) {
675
+ writel(readl(base + MMCIMASK0) |
676
+ host->variant->busy_detect_mask,
677
+ base + MMCIMASK0);
678
+
679
+ host->busy_status = status & (MCI_CMDSENT | MCI_CMDRESPEND);
680
+ return false;
681
+ }
682
+
683
+ /*
684
+ * If there is a command in-progress that has been successfully
685
+ * sent, then bail out if busy status is set and wait for the
686
+ * busy end IRQ.
687
+ *
688
+ * Note that, the HW triggers an IRQ on both edges while
689
+ * monitoring DAT0 for busy completion, but there is only one
690
+ * status bit in MMCISTATUS for the busy state. Therefore
691
+ * both the start and the end interrupts needs to be cleared,
692
+ * one after the other. So, clear the busy start IRQ here.
693
+ */
694
+ if (host->busy_status &&
695
+ (status & host->variant->busy_detect_flag)) {
696
+ writel(host->variant->busy_detect_mask, base + MMCICLEAR);
697
+ return false;
698
+ }
699
+
700
+ /*
701
+ * If there is a command in-progress that has been successfully
702
+ * sent and the busy bit isn't set, it means we have received
703
+ * the busy end IRQ. Clear and mask the IRQ, then continue to
704
+ * process the command.
705
+ */
706
+ if (host->busy_status) {
707
+ writel(host->variant->busy_detect_mask, base + MMCICLEAR);
708
+
709
+ writel(readl(base + MMCIMASK0) &
710
+ ~host->variant->busy_detect_mask, base + MMCIMASK0);
711
+ host->busy_status = 0;
712
+ }
713
+
714
+ return true;
715
+}
716
+
412717 /*
413718 * All the DMA operation mode stuff goes inside this ifdef.
414719 * This assumes that you have a generic DMA device interface,
415720 * no custom DMA interfaces are supported.
416721 */
417722 #ifdef CONFIG_DMA_ENGINE
418
-static void mmci_dma_setup(struct mmci_host *host)
723
+struct mmci_dmae_next {
724
+ struct dma_async_tx_descriptor *desc;
725
+ struct dma_chan *chan;
726
+};
727
+
728
+struct mmci_dmae_priv {
729
+ struct dma_chan *cur;
730
+ struct dma_chan *rx_channel;
731
+ struct dma_chan *tx_channel;
732
+ struct dma_async_tx_descriptor *desc_current;
733
+ struct mmci_dmae_next next_data;
734
+};
735
+
736
+int mmci_dmae_setup(struct mmci_host *host)
419737 {
420738 const char *rxname, *txname;
739
+ struct mmci_dmae_priv *dmae;
421740
422
- host->dma_rx_channel = dma_request_slave_channel(mmc_dev(host->mmc), "rx");
423
- host->dma_tx_channel = dma_request_slave_channel(mmc_dev(host->mmc), "tx");
741
+ dmae = devm_kzalloc(mmc_dev(host->mmc), sizeof(*dmae), GFP_KERNEL);
742
+ if (!dmae)
743
+ return -ENOMEM;
424744
425
- /* initialize pre request cookie */
426
- host->next_data.cookie = 1;
745
+ host->dma_priv = dmae;
746
+
747
+ dmae->rx_channel = dma_request_chan(mmc_dev(host->mmc), "rx");
748
+ if (IS_ERR(dmae->rx_channel)) {
749
+ int ret = PTR_ERR(dmae->rx_channel);
750
+ dmae->rx_channel = NULL;
751
+ return ret;
752
+ }
753
+
754
+ dmae->tx_channel = dma_request_chan(mmc_dev(host->mmc), "tx");
755
+ if (IS_ERR(dmae->tx_channel)) {
756
+ if (PTR_ERR(dmae->tx_channel) == -EPROBE_DEFER)
757
+ dev_warn(mmc_dev(host->mmc),
758
+ "Deferred probe for TX channel ignored\n");
759
+ dmae->tx_channel = NULL;
760
+ }
427761
428762 /*
429763 * If only an RX channel is specified, the driver will
430764 * attempt to use it bidirectionally, however if it is
431765 * is specified but cannot be located, DMA will be disabled.
432766 */
433
- if (host->dma_rx_channel && !host->dma_tx_channel)
434
- host->dma_tx_channel = host->dma_rx_channel;
767
+ if (dmae->rx_channel && !dmae->tx_channel)
768
+ dmae->tx_channel = dmae->rx_channel;
435769
436
- if (host->dma_rx_channel)
437
- rxname = dma_chan_name(host->dma_rx_channel);
770
+ if (dmae->rx_channel)
771
+ rxname = dma_chan_name(dmae->rx_channel);
438772 else
439773 rxname = "none";
440774
441
- if (host->dma_tx_channel)
442
- txname = dma_chan_name(host->dma_tx_channel);
775
+ if (dmae->tx_channel)
776
+ txname = dma_chan_name(dmae->tx_channel);
443777 else
444778 txname = "none";
445779
....@@ -450,65 +784,83 @@
450784 * Limit the maximum segment size in any SG entry according to
451785 * the parameters of the DMA engine device.
452786 */
453
- if (host->dma_tx_channel) {
454
- struct device *dev = host->dma_tx_channel->device->dev;
787
+ if (dmae->tx_channel) {
788
+ struct device *dev = dmae->tx_channel->device->dev;
455789 unsigned int max_seg_size = dma_get_max_seg_size(dev);
456790
457791 if (max_seg_size < host->mmc->max_seg_size)
458792 host->mmc->max_seg_size = max_seg_size;
459793 }
460
- if (host->dma_rx_channel) {
461
- struct device *dev = host->dma_rx_channel->device->dev;
794
+ if (dmae->rx_channel) {
795
+ struct device *dev = dmae->rx_channel->device->dev;
462796 unsigned int max_seg_size = dma_get_max_seg_size(dev);
463797
464798 if (max_seg_size < host->mmc->max_seg_size)
465799 host->mmc->max_seg_size = max_seg_size;
466800 }
467801
468
- if (host->ops && host->ops->dma_setup)
469
- host->ops->dma_setup(host);
802
+ if (!dmae->tx_channel || !dmae->rx_channel) {
803
+ mmci_dmae_release(host);
804
+ return -EINVAL;
805
+ }
806
+
807
+ return 0;
470808 }
471809
472810 /*
473811 * This is used in or so inline it
474812 * so it can be discarded.
475813 */
476
-static inline void mmci_dma_release(struct mmci_host *host)
814
+void mmci_dmae_release(struct mmci_host *host)
477815 {
478
- if (host->dma_rx_channel)
479
- dma_release_channel(host->dma_rx_channel);
480
- if (host->dma_tx_channel)
481
- dma_release_channel(host->dma_tx_channel);
482
- host->dma_rx_channel = host->dma_tx_channel = NULL;
483
-}
816
+ struct mmci_dmae_priv *dmae = host->dma_priv;
484817
485
-static void mmci_dma_data_error(struct mmci_host *host)
486
-{
487
- dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
488
- dmaengine_terminate_all(host->dma_current);
489
- host->dma_in_progress = false;
490
- host->dma_current = NULL;
491
- host->dma_desc_current = NULL;
492
- host->data->host_cookie = 0;
818
+ if (dmae->rx_channel)
819
+ dma_release_channel(dmae->rx_channel);
820
+ if (dmae->tx_channel)
821
+ dma_release_channel(dmae->tx_channel);
822
+ dmae->rx_channel = dmae->tx_channel = NULL;
493823 }
494824
495825 static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
496826 {
827
+ struct mmci_dmae_priv *dmae = host->dma_priv;
497828 struct dma_chan *chan;
498829
499830 if (data->flags & MMC_DATA_READ)
500
- chan = host->dma_rx_channel;
831
+ chan = dmae->rx_channel;
501832 else
502
- chan = host->dma_tx_channel;
833
+ chan = dmae->tx_channel;
503834
504835 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len,
505836 mmc_get_dma_dir(data));
506837 }
507838
508
-static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
839
+void mmci_dmae_error(struct mmci_host *host)
509840 {
841
+ struct mmci_dmae_priv *dmae = host->dma_priv;
842
+
843
+ if (!dma_inprogress(host))
844
+ return;
845
+
846
+ dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
847
+ dmaengine_terminate_all(dmae->cur);
848
+ host->dma_in_progress = false;
849
+ dmae->cur = NULL;
850
+ dmae->desc_current = NULL;
851
+ host->data->host_cookie = 0;
852
+
853
+ mmci_dma_unmap(host, host->data);
854
+}
855
+
856
+void mmci_dmae_finalize(struct mmci_host *host, struct mmc_data *data)
857
+{
858
+ struct mmci_dmae_priv *dmae = host->dma_priv;
510859 u32 status;
511860 int i;
861
+
862
+ if (!dma_inprogress(host))
863
+ return;
512864
513865 /* Wait up to 1ms for the DMA to complete */
514866 for (i = 0; ; i++) {
....@@ -525,13 +877,12 @@
525877 * contiguous buffers. On TX, we'll get a FIFO underrun error.
526878 */
527879 if (status & MCI_RXDATAAVLBLMASK) {
528
- mmci_dma_data_error(host);
880
+ mmci_dma_error(host);
529881 if (!data->error)
530882 data->error = -EIO;
531
- }
532
-
533
- if (!data->host_cookie)
883
+ } else if (!data->host_cookie) {
534884 mmci_dma_unmap(host, data);
885
+ }
535886
536887 /*
537888 * Use of DMA with scatter-gather is impossible.
....@@ -543,15 +894,16 @@
543894 }
544895
545896 host->dma_in_progress = false;
546
- host->dma_current = NULL;
547
- host->dma_desc_current = NULL;
897
+ dmae->cur = NULL;
898
+ dmae->desc_current = NULL;
548899 }
549900
550901 /* prepares DMA channel and DMA descriptor, returns non-zero on failure */
551
-static int __mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
902
+static int _mmci_dmae_prep_data(struct mmci_host *host, struct mmc_data *data,
552903 struct dma_chan **dma_chan,
553904 struct dma_async_tx_descriptor **dma_desc)
554905 {
906
+ struct mmci_dmae_priv *dmae = host->dma_priv;
555907 struct variant_data *variant = host->variant;
556908 struct dma_slave_config conf = {
557909 .src_addr = host->phybase + MMCIFIFO,
....@@ -570,10 +922,10 @@
570922
571923 if (data->flags & MMC_DATA_READ) {
572924 conf.direction = DMA_DEV_TO_MEM;
573
- chan = host->dma_rx_channel;
925
+ chan = dmae->rx_channel;
574926 } else {
575927 conf.direction = DMA_MEM_TO_DEV;
576
- chan = host->dma_tx_channel;
928
+ chan = dmae->tx_channel;
577929 }
578930
579931 /* If there's no DMA channel, fall back to PIO */
....@@ -582,6 +934,18 @@
582934
583935 /* If less than or equal to the fifo size, don't bother with DMA */
584936 if (data->blksz * data->blocks <= variant->fifosize)
937
+ return -EINVAL;
938
+
939
+ /*
940
+ * This is necessary to get SDIO working on the Ux500. We do not yet
941
+ * know if this is a bug in:
942
+ * - The Ux500 DMA controller (DMA40)
943
+ * - The MMCI DMA interface on the Ux500
944
+ * some power of two blocks (such as 64 bytes) are sent regularly
945
+ * during SDIO traffic and those work fine so for these we enable DMA
946
+ * transfers.
947
+ */
948
+ if (host->variant->dma_power_of_2 && !is_power_of_2(data->blksz))
585949 return -EINVAL;
586950
587951 device = chan->device;
....@@ -610,89 +974,143 @@
610974 return -ENOMEM;
611975 }
612976
613
-static inline int mmci_dma_prep_data(struct mmci_host *host,
614
- struct mmc_data *data)
977
+int mmci_dmae_prep_data(struct mmci_host *host,
978
+ struct mmc_data *data,
979
+ bool next)
615980 {
981
+ struct mmci_dmae_priv *dmae = host->dma_priv;
982
+ struct mmci_dmae_next *nd = &dmae->next_data;
983
+
984
+ if (!host->use_dma)
985
+ return -EINVAL;
986
+
987
+ if (next)
988
+ return _mmci_dmae_prep_data(host, data, &nd->chan, &nd->desc);
616989 /* Check if next job is already prepared. */
617
- if (host->dma_current && host->dma_desc_current)
990
+ if (dmae->cur && dmae->desc_current)
618991 return 0;
619992
620993 /* No job were prepared thus do it now. */
621
- return __mmci_dma_prep_data(host, data, &host->dma_current,
622
- &host->dma_desc_current);
994
+ return _mmci_dmae_prep_data(host, data, &dmae->cur,
995
+ &dmae->desc_current);
623996 }
624997
625
-static inline int mmci_dma_prep_next(struct mmci_host *host,
626
- struct mmc_data *data)
998
+int mmci_dmae_start(struct mmci_host *host, unsigned int *datactrl)
627999 {
628
- struct mmci_host_next *nd = &host->next_data;
629
- return __mmci_dma_prep_data(host, data, &nd->dma_chan, &nd->dma_desc);
630
-}
631
-
632
-static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
633
-{
1000
+ struct mmci_dmae_priv *dmae = host->dma_priv;
6341001 int ret;
635
- struct mmc_data *data = host->data;
6361002
637
- ret = mmci_dma_prep_data(host, host->data);
638
- if (ret)
639
- return ret;
640
-
641
- /* Okay, go for it. */
642
- dev_vdbg(mmc_dev(host->mmc),
643
- "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
644
- data->sg_len, data->blksz, data->blocks, data->flags);
6451003 host->dma_in_progress = true;
646
- dmaengine_submit(host->dma_desc_current);
647
- dma_async_issue_pending(host->dma_current);
1004
+ ret = dma_submit_error(dmaengine_submit(dmae->desc_current));
1005
+ if (ret < 0) {
1006
+ host->dma_in_progress = false;
1007
+ return ret;
1008
+ }
1009
+ dma_async_issue_pending(dmae->cur);
6481010
649
- if (host->variant->qcom_dml)
650
- dml_start_xfer(host, data);
1011
+ *datactrl |= MCI_DPSM_DMAENABLE;
6511012
652
- datactrl |= MCI_DPSM_DMAENABLE;
653
-
654
- /* Trigger the DMA transfer */
655
- mmci_write_datactrlreg(host, datactrl);
656
-
657
- /*
658
- * Let the MMCI say when the data is ended and it's time
659
- * to fire next DMA request. When that happens, MMCI will
660
- * call mmci_data_end()
661
- */
662
- writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK,
663
- host->base + MMCIMASK0);
6641013 return 0;
6651014 }
6661015
667
-static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
1016
+void mmci_dmae_get_next_data(struct mmci_host *host, struct mmc_data *data)
6681017 {
669
- struct mmci_host_next *next = &host->next_data;
1018
+ struct mmci_dmae_priv *dmae = host->dma_priv;
1019
+ struct mmci_dmae_next *next = &dmae->next_data;
6701020
671
- WARN_ON(data->host_cookie && data->host_cookie != next->cookie);
672
- WARN_ON(!data->host_cookie && (next->dma_desc || next->dma_chan));
1021
+ if (!host->use_dma)
1022
+ return;
6731023
674
- host->dma_desc_current = next->dma_desc;
675
- host->dma_current = next->dma_chan;
676
- next->dma_desc = NULL;
677
- next->dma_chan = NULL;
1024
+ WARN_ON(!data->host_cookie && (next->desc || next->chan));
1025
+
1026
+ dmae->desc_current = next->desc;
1027
+ dmae->cur = next->chan;
1028
+ next->desc = NULL;
1029
+ next->chan = NULL;
1030
+}
1031
+
1032
+void mmci_dmae_unprep_data(struct mmci_host *host,
1033
+ struct mmc_data *data, int err)
1034
+
1035
+{
1036
+ struct mmci_dmae_priv *dmae = host->dma_priv;
1037
+
1038
+ if (!host->use_dma)
1039
+ return;
1040
+
1041
+ mmci_dma_unmap(host, data);
1042
+
1043
+ if (err) {
1044
+ struct mmci_dmae_next *next = &dmae->next_data;
1045
+ struct dma_chan *chan;
1046
+ if (data->flags & MMC_DATA_READ)
1047
+ chan = dmae->rx_channel;
1048
+ else
1049
+ chan = dmae->tx_channel;
1050
+ dmaengine_terminate_all(chan);
1051
+
1052
+ if (dmae->desc_current == next->desc)
1053
+ dmae->desc_current = NULL;
1054
+
1055
+ if (dmae->cur == next->chan) {
1056
+ host->dma_in_progress = false;
1057
+ dmae->cur = NULL;
1058
+ }
1059
+
1060
+ next->desc = NULL;
1061
+ next->chan = NULL;
1062
+ }
1063
+}
1064
+
1065
+static struct mmci_host_ops mmci_variant_ops = {
1066
+ .prep_data = mmci_dmae_prep_data,
1067
+ .unprep_data = mmci_dmae_unprep_data,
1068
+ .get_datactrl_cfg = mmci_get_dctrl_cfg,
1069
+ .get_next_data = mmci_dmae_get_next_data,
1070
+ .dma_setup = mmci_dmae_setup,
1071
+ .dma_release = mmci_dmae_release,
1072
+ .dma_start = mmci_dmae_start,
1073
+ .dma_finalize = mmci_dmae_finalize,
1074
+ .dma_error = mmci_dmae_error,
1075
+};
1076
+#else
1077
+static struct mmci_host_ops mmci_variant_ops = {
1078
+ .get_datactrl_cfg = mmci_get_dctrl_cfg,
1079
+};
1080
+#endif
1081
+
1082
+static void mmci_variant_init(struct mmci_host *host)
1083
+{
1084
+ host->ops = &mmci_variant_ops;
1085
+}
1086
+
1087
+static void ux500_variant_init(struct mmci_host *host)
1088
+{
1089
+ host->ops = &mmci_variant_ops;
1090
+ host->ops->busy_complete = ux500_busy_complete;
1091
+}
1092
+
1093
+static void ux500v2_variant_init(struct mmci_host *host)
1094
+{
1095
+ host->ops = &mmci_variant_ops;
1096
+ host->ops->busy_complete = ux500_busy_complete;
1097
+ host->ops->get_datactrl_cfg = ux500v2_get_dctrl_cfg;
6781098 }
6791099
6801100 static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq)
6811101 {
6821102 struct mmci_host *host = mmc_priv(mmc);
6831103 struct mmc_data *data = mrq->data;
684
- struct mmci_host_next *nd = &host->next_data;
6851104
6861105 if (!data)
6871106 return;
6881107
689
- BUG_ON(data->host_cookie);
1108
+ WARN_ON(data->host_cookie);
6901109
6911110 if (mmci_validate_data(host, data))
6921111 return;
6931112
694
- if (!mmci_dma_prep_next(host, data))
695
- data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie;
1113
+ mmci_prep_data(host, data, true);
6961114 }
6971115
6981116 static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
....@@ -704,66 +1122,8 @@
7041122 if (!data || !data->host_cookie)
7051123 return;
7061124
707
- mmci_dma_unmap(host, data);
708
-
709
- if (err) {
710
- struct mmci_host_next *next = &host->next_data;
711
- struct dma_chan *chan;
712
- if (data->flags & MMC_DATA_READ)
713
- chan = host->dma_rx_channel;
714
- else
715
- chan = host->dma_tx_channel;
716
- dmaengine_terminate_all(chan);
717
-
718
- if (host->dma_desc_current == next->dma_desc)
719
- host->dma_desc_current = NULL;
720
-
721
- if (host->dma_current == next->dma_chan) {
722
- host->dma_in_progress = false;
723
- host->dma_current = NULL;
724
- }
725
-
726
- next->dma_desc = NULL;
727
- next->dma_chan = NULL;
728
- data->host_cookie = 0;
729
- }
1125
+ mmci_unprep_data(host, data, err);
7301126 }
731
-
732
-#else
733
-/* Blank functions if the DMA engine is not available */
734
-static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
735
-{
736
-}
737
-static inline void mmci_dma_setup(struct mmci_host *host)
738
-{
739
-}
740
-
741
-static inline void mmci_dma_release(struct mmci_host *host)
742
-{
743
-}
744
-
745
-static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
746
-{
747
-}
748
-
749
-static inline void mmci_dma_finalize(struct mmci_host *host,
750
- struct mmc_data *data)
751
-{
752
-}
753
-
754
-static inline void mmci_dma_data_error(struct mmci_host *host)
755
-{
756
-}
757
-
758
-static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
759
-{
760
- return -ENOSYS;
761
-}
762
-
763
-#define mmci_pre_request NULL
764
-#define mmci_post_request NULL
765
-
766
-#endif
7671127
7681128 static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
7691129 {
....@@ -771,7 +1131,6 @@
7711131 unsigned int datactrl, timeout, irqmask;
7721132 unsigned long long clks;
7731133 void __iomem *base;
774
- int blksz_bits;
7751134
7761135 dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n",
7771136 data->blksz, data->blocks, data->flags);
....@@ -789,18 +1148,8 @@
7891148 writel(timeout, base + MMCIDATATIMER);
7901149 writel(host->size, base + MMCIDATALENGTH);
7911150
792
- blksz_bits = ffs(data->blksz) - 1;
793
- BUG_ON(1 << blksz_bits != data->blksz);
794
-
795
- if (variant->blksz_datactrl16)
796
- datactrl = MCI_DPSM_ENABLE | (data->blksz << 16);
797
- else if (variant->blksz_datactrl4)
798
- datactrl = MCI_DPSM_ENABLE | (data->blksz << 4);
799
- else
800
- datactrl = MCI_DPSM_ENABLE | blksz_bits << 4;
801
-
802
- if (data->flags & MMC_DATA_READ)
803
- datactrl |= MCI_DPSM_DIRECTION;
1151
+ datactrl = host->ops->get_datactrl_cfg(host);
1152
+ datactrl |= host->data->flags & MMC_DATA_READ ? MCI_DPSM_DIRECTION : 0;
8041153
8051154 if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
8061155 u32 clk;
....@@ -831,7 +1180,7 @@
8311180 * Attempt to use DMA operation mode, if this
8321181 * should fail, fall back to PIO mode
8331182 */
834
- if (!mmci_dma_start_data(host, datactrl))
1183
+ if (!mmci_dma_start(host, datactrl))
8351184 return;
8361185
8371186 /* IRQ mode, map the SG list for CPU reading/writing */
....@@ -864,21 +1213,46 @@
8641213 mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
8651214 {
8661215 void __iomem *base = host->base;
1216
+ unsigned long long clks;
8671217
8681218 dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n",
8691219 cmd->opcode, cmd->arg, cmd->flags);
8701220
871
- if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) {
1221
+ if (readl(base + MMCICOMMAND) & host->variant->cmdreg_cpsm_enable) {
8721222 writel(0, base + MMCICOMMAND);
8731223 mmci_reg_delay(host);
8741224 }
8751225
876
- c |= cmd->opcode | MCI_CPSM_ENABLE;
1226
+ if (host->variant->cmdreg_stop &&
1227
+ cmd->opcode == MMC_STOP_TRANSMISSION)
1228
+ c |= host->variant->cmdreg_stop;
1229
+
1230
+ c |= cmd->opcode | host->variant->cmdreg_cpsm_enable;
8771231 if (cmd->flags & MMC_RSP_PRESENT) {
8781232 if (cmd->flags & MMC_RSP_136)
879
- c |= MCI_CPSM_LONGRSP;
880
- c |= MCI_CPSM_RESPONSE;
1233
+ c |= host->variant->cmdreg_lrsp_crc;
1234
+ else if (cmd->flags & MMC_RSP_CRC)
1235
+ c |= host->variant->cmdreg_srsp_crc;
1236
+ else
1237
+ c |= host->variant->cmdreg_srsp;
8811238 }
1239
+
1240
+ if (host->variant->busy_timeout && cmd->flags & MMC_RSP_BUSY) {
1241
+ if (!cmd->busy_timeout)
1242
+ cmd->busy_timeout = 10 * MSEC_PER_SEC;
1243
+
1244
+ if (cmd->busy_timeout > host->mmc->max_busy_timeout)
1245
+ clks = (unsigned long long)host->mmc->max_busy_timeout * host->cclk;
1246
+ else
1247
+ clks = (unsigned long long)cmd->busy_timeout * host->cclk;
1248
+
1249
+ do_div(clks, MSEC_PER_SEC);
1250
+ writel_relaxed(clks, host->base + MMCIDATATIMER);
1251
+ }
1252
+
1253
+ if (host->ops->pre_sig_volt_switch && cmd->opcode == SD_SWITCH_VOLTAGE)
1254
+ host->ops->pre_sig_volt_switch(host);
1255
+
8821256 if (/*interrupt*/0)
8831257 c |= MCI_CPSM_INTERRUPT;
8841258
....@@ -889,6 +1263,12 @@
8891263
8901264 writel(cmd->arg, base + MMCIARGUMENT);
8911265 writel(c, base + MMCICOMMAND);
1266
+}
1267
+
1268
+static void mmci_stop_command(struct mmci_host *host)
1269
+{
1270
+ host->stop_abort.error = 0;
1271
+ mmci_start_command(host, &host->stop_abort, 0);
8921272 }
8931273
8941274 static void
....@@ -910,10 +1290,7 @@
9101290 u32 remain, success;
9111291
9121292 /* Terminate the DMA transfer */
913
- if (dma_inprogress(host)) {
914
- mmci_dma_data_error(host);
915
- mmci_dma_unmap(host, data);
916
- }
1293
+ mmci_dma_error(host);
9171294
9181295 /*
9191296 * Calculate how far we are into the transfer. Note that
....@@ -922,8 +1299,12 @@
9221299 * can be as much as a FIFO-worth of data ahead. This
9231300 * matters for FIFO overruns only.
9241301 */
925
- remain = readl(host->base + MMCIDATACNT);
926
- success = data->blksz * data->blocks - remain;
1302
+ if (!host->variant->datacnt_useless) {
1303
+ remain = readl(host->base + MMCIDATACNT);
1304
+ success = data->blksz * data->blocks - remain;
1305
+ } else {
1306
+ success = 0;
1307
+ }
9271308
9281309 dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n",
9291310 status_err, success);
....@@ -951,15 +1332,20 @@
9511332 dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n");
9521333
9531334 if (status & MCI_DATAEND || data->error) {
954
- if (dma_inprogress(host))
955
- mmci_dma_finalize(host, data);
1335
+ mmci_dma_finalize(host, data);
1336
+
9561337 mmci_stop_data(host);
9571338
9581339 if (!data->error)
9591340 /* The error clause is handled above, success! */
9601341 data->bytes_xfered = data->blksz * data->blocks;
9611342
962
- if (!data->stop || host->mrq->sbc) {
1343
+ if (!data->stop) {
1344
+ if (host->variant->cmdreg_stop && data->error)
1345
+ mmci_stop_command(host);
1346
+ else
1347
+ mmci_request_end(host, data->mrq);
1348
+ } else if (host->mrq->sbc && !data->error) {
9631349 mmci_request_end(host, data->mrq);
9641350 } else {
9651351 mmci_start_command(host, data->stop, 0);
....@@ -971,77 +1357,32 @@
9711357 mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
9721358 unsigned int status)
9731359 {
1360
+ u32 err_msk = MCI_CMDCRCFAIL | MCI_CMDTIMEOUT;
9741361 void __iomem *base = host->base;
975
- bool sbc;
1362
+ bool sbc, busy_resp;
9761363
9771364 if (!cmd)
9781365 return;
9791366
9801367 sbc = (cmd == host->mrq->sbc);
1368
+ busy_resp = !!(cmd->flags & MMC_RSP_BUSY);
9811369
9821370 /*
9831371 * We need to be one of these interrupts to be considered worth
9841372 * handling. Note that we tag on any latent IRQs postponed
9851373 * due to waiting for busy status.
9861374 */
987
- if (!((status|host->busy_status) &
988
- (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND)))
1375
+ if (host->variant->busy_timeout && busy_resp)
1376
+ err_msk |= MCI_DATATIMEOUT;
1377
+
1378
+ if (!((status | host->busy_status) &
1379
+ (err_msk | MCI_CMDSENT | MCI_CMDRESPEND)))
9891380 return;
9901381
991
- /*
992
- * ST Micro variant: handle busy detection.
993
- */
994
- if (host->variant->busy_detect) {
995
- bool busy_resp = !!(cmd->flags & MMC_RSP_BUSY);
996
-
997
- /* We are busy with a command, return */
998
- if (host->busy_status &&
999
- (status & host->variant->busy_detect_flag))
1382
+ /* Handle busy detection on DAT0 if the variant supports it. */
1383
+ if (busy_resp && host->variant->busy_detect)
1384
+ if (!host->ops->busy_complete(host, status, err_msk))
10001385 return;
1001
-
1002
- /*
1003
- * We were not busy, but we now got a busy response on
1004
- * something that was not an error, and we double-check
1005
- * that the special busy status bit is still set before
1006
- * proceeding.
1007
- */
1008
- if (!host->busy_status && busy_resp &&
1009
- !(status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT)) &&
1010
- (readl(base + MMCISTATUS) & host->variant->busy_detect_flag)) {
1011
-
1012
- /* Clear the busy start IRQ */
1013
- writel(host->variant->busy_detect_mask,
1014
- host->base + MMCICLEAR);
1015
-
1016
- /* Unmask the busy end IRQ */
1017
- writel(readl(base + MMCIMASK0) |
1018
- host->variant->busy_detect_mask,
1019
- base + MMCIMASK0);
1020
- /*
1021
- * Now cache the last response status code (until
1022
- * the busy bit goes low), and return.
1023
- */
1024
- host->busy_status =
1025
- status & (MCI_CMDSENT|MCI_CMDRESPEND);
1026
- return;
1027
- }
1028
-
1029
- /*
1030
- * At this point we are not busy with a command, we have
1031
- * not received a new busy request, clear and mask the busy
1032
- * end IRQ and fall through to process the IRQ.
1033
- */
1034
- if (host->busy_status) {
1035
-
1036
- writel(host->variant->busy_detect_mask,
1037
- host->base + MMCICLEAR);
1038
-
1039
- writel(readl(base + MMCIMASK0) &
1040
- ~host->variant->busy_detect_mask,
1041
- base + MMCIMASK0);
1042
- host->busy_status = 0;
1043
- }
1044
- }
10451386
10461387 host->cmd = NULL;
10471388
....@@ -1049,6 +1390,10 @@
10491390 cmd->error = -ETIMEDOUT;
10501391 } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) {
10511392 cmd->error = -EILSEQ;
1393
+ } else if (host->variant->busy_timeout && busy_resp &&
1394
+ status & MCI_DATATIMEOUT) {
1395
+ cmd->error = -ETIMEDOUT;
1396
+ host->irq_action = IRQ_WAKE_THREAD;
10521397 } else {
10531398 cmd->resp[0] = readl(base + MMCIRESPONSE0);
10541399 cmd->resp[1] = readl(base + MMCIRESPONSE1);
....@@ -1059,16 +1404,22 @@
10591404 if ((!sbc && !cmd->data) || cmd->error) {
10601405 if (host->data) {
10611406 /* Terminate the DMA transfer */
1062
- if (dma_inprogress(host)) {
1063
- mmci_dma_data_error(host);
1064
- mmci_dma_unmap(host, host->data);
1065
- }
1407
+ mmci_dma_error(host);
1408
+
10661409 mmci_stop_data(host);
1410
+ if (host->variant->cmdreg_stop && cmd->error) {
1411
+ mmci_stop_command(host);
1412
+ return;
1413
+ }
10671414 }
1068
- mmci_request_end(host, host->mrq);
1415
+
1416
+ if (host->irq_action != IRQ_WAKE_THREAD)
1417
+ mmci_request_end(host, host->mrq);
1418
+
10691419 } else if (sbc) {
10701420 mmci_start_command(host, host->mrq->cmd, 0);
1071
- } else if (!(cmd->data->flags & MMC_DATA_READ)) {
1421
+ } else if (!host->variant->datactrl_first &&
1422
+ !(cmd->data->flags & MMC_DATA_READ)) {
10721423 mmci_start_data(host, cmd->data);
10731424 }
10741425 }
....@@ -1257,9 +1608,9 @@
12571608 {
12581609 struct mmci_host *host = dev_id;
12591610 u32 status;
1260
- int ret = 0;
12611611
12621612 spin_lock(&host->lock);
1613
+ host->irq_action = IRQ_HANDLED;
12631614
12641615 do {
12651616 status = readl(host->base + MMCISTATUS);
....@@ -1268,18 +1619,12 @@
12681619 if (status & host->mask1_reg)
12691620 mmci_pio_irq(irq, dev_id);
12701621
1271
- status &= ~MCI_IRQ1MASK;
1622
+ status &= ~host->variant->irq_pio_mask;
12721623 }
12731624
12741625 /*
1275
- * We intentionally clear the MCI_ST_CARDBUSY IRQ (if it's
1276
- * enabled) in mmci_cmd_irq() function where ST Micro busy
1277
- * detection variant is handled. Considering the HW seems to be
1278
- * triggering the IRQ on both edges while monitoring DAT0 for
1279
- * busy completion and that same status bit is used to monitor
1280
- * start and end of busy detection, special care must be taken
1281
- * to make sure that both start and end interrupts are always
1282
- * cleared one after the other.
1626
+ * Busy detection is managed by mmci_cmd_irq(), including to
1627
+ * clear the corresponding IRQ.
12831628 */
12841629 status &= readl(host->base + MMCIMASK0);
12851630 if (host->variant->busy_detect)
....@@ -1305,12 +1650,41 @@
13051650 if (host->variant->busy_detect_flag)
13061651 status &= ~host->variant->busy_detect_flag;
13071652
1308
- ret = 1;
13091653 } while (status);
13101654
13111655 spin_unlock(&host->lock);
13121656
1313
- return IRQ_RETVAL(ret);
1657
+ return host->irq_action;
1658
+}
1659
+
1660
+/*
1661
+ * mmci_irq_thread() - A threaded IRQ handler that manages a reset of the HW.
1662
+ *
1663
+ * A reset is needed for some variants, where a datatimeout for a R1B request
1664
+ * causes the DPSM to stay busy (non-functional).
1665
+ */
1666
+static irqreturn_t mmci_irq_thread(int irq, void *dev_id)
1667
+{
1668
+ struct mmci_host *host = dev_id;
1669
+ unsigned long flags;
1670
+
1671
+ if (host->rst) {
1672
+ reset_control_assert(host->rst);
1673
+ udelay(2);
1674
+ reset_control_deassert(host->rst);
1675
+ }
1676
+
1677
+ spin_lock_irqsave(&host->lock, flags);
1678
+ writel(host->clk_reg, host->base + MMCICLOCK);
1679
+ writel(host->pwr_reg, host->base + MMCIPOWER);
1680
+ writel(MCI_IRQENABLE | host->variant->start_err,
1681
+ host->base + MMCIMASK0);
1682
+
1683
+ host->irq_action = IRQ_HANDLED;
1684
+ mmci_request_end(host, host->mrq);
1685
+ spin_unlock_irqrestore(&host->lock, flags);
1686
+
1687
+ return host->irq_action;
13141688 }
13151689
13161690 static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
....@@ -1333,7 +1707,8 @@
13331707 if (mrq->data)
13341708 mmci_get_next_data(host, mrq->data);
13351709
1336
- if (mrq->data && mrq->data->flags & MMC_DATA_READ)
1710
+ if (mrq->data &&
1711
+ (host->variant->datactrl_first || mrq->data->flags & MMC_DATA_READ))
13371712 mmci_start_data(host, mrq->data);
13381713
13391714 if (mrq->sbc)
....@@ -1342,6 +1717,21 @@
13421717 mmci_start_command(host, mrq->cmd, 0);
13431718
13441719 spin_unlock_irqrestore(&host->lock, flags);
1720
+}
1721
+
1722
+static void mmci_set_max_busy_timeout(struct mmc_host *mmc)
1723
+{
1724
+ struct mmci_host *host = mmc_priv(mmc);
1725
+ u32 max_busy_timeout = 0;
1726
+
1727
+ if (!host->variant->busy_detect)
1728
+ return;
1729
+
1730
+ if (host->variant->busy_timeout && mmc->actual_clock)
1731
+ max_busy_timeout = U32_MAX / DIV_ROUND_UP(mmc->actual_clock,
1732
+ MSEC_PER_SEC);
1733
+
1734
+ mmc->max_busy_timeout = max_busy_timeout;
13451735 }
13461736
13471737 static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
....@@ -1420,7 +1810,7 @@
14201810 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
14211811 pinctrl_select_state(host->pinctrl, host->pins_opendrain);
14221812 else
1423
- pinctrl_select_state(host->pinctrl, host->pins_default);
1813
+ pinctrl_select_default_state(mmc_dev(mmc));
14241814 }
14251815
14261816 /*
....@@ -1443,8 +1833,18 @@
14431833
14441834 spin_lock_irqsave(&host->lock, flags);
14451835
1446
- mmci_set_clkreg(host, ios->clock);
1447
- mmci_write_pwrreg(host, pwr);
1836
+ if (host->ops && host->ops->set_clkreg)
1837
+ host->ops->set_clkreg(host, ios->clock);
1838
+ else
1839
+ mmci_set_clkreg(host, ios->clock);
1840
+
1841
+ mmci_set_max_busy_timeout(mmc);
1842
+
1843
+ if (host->ops && host->ops->set_pwrreg)
1844
+ host->ops->set_pwrreg(host, pwr);
1845
+ else
1846
+ mmci_write_pwrreg(host, pwr);
1847
+
14481848 mmci_reg_delay(host);
14491849
14501850 spin_unlock_irqrestore(&host->lock, flags);
....@@ -1467,28 +1867,18 @@
14671867
14681868 static int mmci_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios)
14691869 {
1470
- int ret = 0;
1870
+ struct mmci_host *host = mmc_priv(mmc);
1871
+ int ret;
14711872
1472
- if (!IS_ERR(mmc->supply.vqmmc)) {
1873
+ ret = mmc_regulator_set_vqmmc(mmc, ios);
14731874
1474
- switch (ios->signal_voltage) {
1475
- case MMC_SIGNAL_VOLTAGE_330:
1476
- ret = regulator_set_voltage(mmc->supply.vqmmc,
1477
- 2700000, 3600000);
1478
- break;
1479
- case MMC_SIGNAL_VOLTAGE_180:
1480
- ret = regulator_set_voltage(mmc->supply.vqmmc,
1481
- 1700000, 1950000);
1482
- break;
1483
- case MMC_SIGNAL_VOLTAGE_120:
1484
- ret = regulator_set_voltage(mmc->supply.vqmmc,
1485
- 1100000, 1300000);
1486
- break;
1487
- }
1875
+ if (!ret && host->ops && host->ops->post_sig_volt_switch)
1876
+ ret = host->ops->post_sig_volt_switch(host, ios);
1877
+ else if (ret)
1878
+ ret = 0;
14881879
1489
- if (ret)
1490
- dev_warn(mmc_dev(mmc), "Voltage switch failed\n");
1491
- }
1880
+ if (ret < 0)
1881
+ dev_warn(mmc_dev(mmc), "Voltage switch failed\n");
14921882
14931883 return ret;
14941884 }
....@@ -1523,6 +1913,12 @@
15231913 host->pwr_reg_add |= MCI_ST_CMDDIREN;
15241914 if (of_get_property(np, "st,sig-pin-fbclk", NULL))
15251915 host->pwr_reg_add |= MCI_ST_FBCLKEN;
1916
+ if (of_get_property(np, "st,sig-dir", NULL))
1917
+ host->pwr_reg_add |= MCI_STM32_DIRPOL;
1918
+ if (of_get_property(np, "st,neg-edge", NULL))
1919
+ host->clk_reg_add |= MCI_STM32_CLK_NEGEDGE;
1920
+ if (of_get_property(np, "st,use-ckin", NULL))
1921
+ host->clk_reg_add |= MCI_STM32_CLK_SELCKIN;
15261922
15271923 if (of_get_property(np, "mmc-cap-mmc-highspeed", NULL))
15281924 mmc->caps |= MMC_CAP_MMC_HIGHSPEED;
....@@ -1564,6 +1960,8 @@
15641960
15651961 host = mmc_priv(mmc);
15661962 host->mmc = mmc;
1963
+ host->mmc_ops = &mmci_ops;
1964
+ mmc->ops = &mmci_ops;
15671965
15681966 /*
15691967 * Some variant (STM32) doesn't have opendrain bit, nevertheless
....@@ -1574,14 +1972,6 @@
15741972 if (IS_ERR(host->pinctrl)) {
15751973 dev_err(&dev->dev, "failed to get pinctrl");
15761974 ret = PTR_ERR(host->pinctrl);
1577
- goto host_free;
1578
- }
1579
-
1580
- host->pins_default = pinctrl_lookup_state(host->pinctrl,
1581
- PINCTRL_STATE_DEFAULT);
1582
- if (IS_ERR(host->pins_default)) {
1583
- dev_err(mmc_dev(mmc), "Can't select default pins\n");
1584
- ret = PTR_ERR(host->pins_default);
15851975 goto host_free;
15861976 }
15871977
....@@ -1649,6 +2039,8 @@
16492039 */
16502040 if (variant->st_clkdiv)
16512041 mmc->f_min = DIV_ROUND_UP(host->mclk, 257);
2042
+ else if (variant->stm32_clkdiv)
2043
+ mmc->f_min = DIV_ROUND_UP(host->mclk, 2046);
16522044 else if (variant->explicit_mclk_control)
16532045 mmc->f_min = clk_round_rate(host->clk, 100000);
16542046 else
....@@ -1670,6 +2062,12 @@
16702062
16712063 dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max);
16722064
2065
+ host->rst = devm_reset_control_get_optional_exclusive(&dev->dev, NULL);
2066
+ if (IS_ERR(host->rst)) {
2067
+ ret = PTR_ERR(host->rst);
2068
+ goto clk_disable;
2069
+ }
2070
+
16732071 /* Get regulators and the supported OCR mask */
16742072 ret = mmc_regulator_get_supply(mmc);
16752073 if (ret)
....@@ -1679,13 +2077,6 @@
16792077 mmc->ocr_avail = plat->ocr_mask;
16802078 else if (plat->ocr_mask)
16812079 dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n");
1682
-
1683
- /* DT takes precedence over platform data. */
1684
- if (!np) {
1685
- if (!plat->cd_invert)
1686
- mmc->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
1687
- mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
1688
- }
16892080
16902081 /* We support these capabilities. */
16912082 mmc->caps |= MMC_CAP_CMD23;
....@@ -1703,10 +2094,16 @@
17032094 mmci_write_datactrlreg(host,
17042095 host->variant->busy_dpsm_flag);
17052096 mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
1706
- mmc->max_busy_timeout = 0;
17072097 }
17082098
1709
- mmc->ops = &mmci_ops;
2099
+ /* Variants with mandatory busy timeout in HW needs R1B responses. */
2100
+ if (variant->busy_timeout)
2101
+ mmc->caps |= MMC_CAP_NEED_RSP_BUSY;
2102
+
2103
+ /* Prepare a CMD12 - needed to clear the DPSM on some variants. */
2104
+ host->stop_abort.opcode = MMC_STOP_TRANSMISSION;
2105
+ host->stop_abort.arg = 0;
2106
+ host->stop_abort.flags = MMC_RSP_R1B | MMC_CMD_AC;
17102107
17112108 /* We support these PM capabilities. */
17122109 mmc->pm_caps |= MMC_PM_KEEP_POWER;
....@@ -1732,13 +2129,13 @@
17322129 /*
17332130 * Block size can be up to 2048 bytes, but must be a power of two.
17342131 */
1735
- mmc->max_blk_size = 1 << 11;
2132
+ mmc->max_blk_size = 1 << variant->datactrl_blocksz;
17362133
17372134 /*
17382135 * Limit the number of blocks transferred so that we don't overflow
17392136 * the maximum request size.
17402137 */
1741
- mmc->max_blk_count = mmc->max_req_size >> 11;
2138
+ mmc->max_blk_count = mmc->max_req_size >> variant->datactrl_blocksz;
17422139
17432140 spin_lock_init(&host->lock);
17442141
....@@ -1754,34 +2151,21 @@
17542151 * - not using DT but using a descriptor table, or
17552152 * - using a table of descriptors ALONGSIDE DT, or
17562153 * look up these descriptors named "cd" and "wp" right here, fail
1757
- * silently of these do not exist and proceed to try platform data
2154
+ * silently of these do not exist
17582155 */
17592156 if (!np) {
1760
- ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL);
1761
- if (ret < 0) {
1762
- if (ret == -EPROBE_DEFER)
1763
- goto clk_disable;
1764
- else if (gpio_is_valid(plat->gpio_cd)) {
1765
- ret = mmc_gpio_request_cd(mmc, plat->gpio_cd, 0);
1766
- if (ret)
1767
- goto clk_disable;
1768
- }
1769
- }
2157
+ ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0);
2158
+ if (ret == -EPROBE_DEFER)
2159
+ goto clk_disable;
17702160
1771
- ret = mmc_gpiod_request_ro(mmc, "wp", 0, false, 0, NULL);
1772
- if (ret < 0) {
1773
- if (ret == -EPROBE_DEFER)
1774
- goto clk_disable;
1775
- else if (gpio_is_valid(plat->gpio_wp)) {
1776
- ret = mmc_gpio_request_ro(mmc, plat->gpio_wp);
1777
- if (ret)
1778
- goto clk_disable;
1779
- }
1780
- }
2161
+ ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0);
2162
+ if (ret == -EPROBE_DEFER)
2163
+ goto clk_disable;
17812164 }
17822165
1783
- ret = devm_request_irq(&dev->dev, dev->irq[0], mmci_irq, IRQF_SHARED,
1784
- DRIVER_NAME " (cmd)", host);
2166
+ ret = devm_request_threaded_irq(&dev->dev, dev->irq[0], mmci_irq,
2167
+ mmci_irq_thread, IRQF_SHARED,
2168
+ DRIVER_NAME " (cmd)", host);
17852169 if (ret)
17862170 goto clk_disable;
17872171
....@@ -1808,7 +2192,9 @@
18082192 pm_runtime_set_autosuspend_delay(&dev->dev, 50);
18092193 pm_runtime_use_autosuspend(&dev->dev);
18102194
1811
- mmc_add_host(mmc);
2195
+ ret = mmc_add_host(mmc);
2196
+ if (ret)
2197
+ goto clk_disable;
18122198
18132199 pm_runtime_put(&dev->dev);
18142200 return 0;
....@@ -1820,7 +2206,7 @@
18202206 return ret;
18212207 }
18222208
1823
-static int mmci_remove(struct amba_device *dev)
2209
+static void mmci_remove(struct amba_device *dev)
18242210 {
18252211 struct mmc_host *mmc = amba_get_drvdata(dev);
18262212
....@@ -1848,8 +2234,6 @@
18482234 clk_disable_unprepare(host->clk);
18492235 mmc_free_host(mmc);
18502236 }
1851
-
1852
- return 0;
18532237 }
18542238
18552239 #ifdef CONFIG_PM
....@@ -1912,7 +2296,7 @@
19122296 struct mmci_host *host = mmc_priv(mmc);
19132297 clk_prepare_enable(host->clk);
19142298 mmci_restore(host);
1915
- pinctrl_pm_select_default_state(dev);
2299
+ pinctrl_select_default_state(dev);
19162300 }
19172301
19182302 return 0;
....@@ -1977,6 +2361,16 @@
19772361 .mask = 0x00ffffff,
19782362 .data = &variant_stm32,
19792363 },
2364
+ {
2365
+ .id = 0x10153180,
2366
+ .mask = 0xf0ffffff,
2367
+ .data = &variant_stm32_sdmmc,
2368
+ },
2369
+ {
2370
+ .id = 0x00253180,
2371
+ .mask = 0xf0ffffff,
2372
+ .data = &variant_stm32_sdmmcv2,
2373
+ },
19802374 /* Qualcomm variants */
19812375 {
19822376 .id = 0x00051180,
....@@ -1992,6 +2386,7 @@
19922386 .drv = {
19932387 .name = DRIVER_NAME,
19942388 .pm = &mmci_dev_pm_ops,
2389
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
19952390 },
19962391 .probe = mmci_probe,
19972392 .remove = mmci_remove,