hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
....@@ -6,6 +6,7 @@
66 * Copyright (C) 2008 Embedded Alley Solutions, Inc.
77 */
88 #include <linux/clk.h>
9
+#include <linux/delay.h>
910 #include <linux/slab.h>
1011 #include <linux/sched/task_stack.h>
1112 #include <linux/interrupt.h>
....@@ -13,7 +14,10 @@
1314 #include <linux/mtd/partitions.h>
1415 #include <linux/of.h>
1516 #include <linux/of_device.h>
17
+#include <linux/pm_runtime.h>
18
+#include <linux/dma/mxs-dma.h>
1619 #include "gpmi-nand.h"
20
+#include "gpmi-regs.h"
1721 #include "bch-regs.h"
1822
1923 /* Resource names for the GPMI NAND driver. */
....@@ -21,149 +25,215 @@
2125 #define GPMI_NAND_BCH_REGS_ADDR_RES_NAME "bch"
2226 #define GPMI_NAND_BCH_INTERRUPT_RES_NAME "bch"
2327
24
-/* add our owner bbt descriptor */
25
-static uint8_t scan_ff_pattern[] = { 0xff };
26
-static struct nand_bbt_descr gpmi_bbt_descr = {
27
- .options = 0,
28
- .offs = 0,
29
- .len = 1,
30
- .pattern = scan_ff_pattern
31
-};
28
+/* Converts time to clock cycles */
29
+#define TO_CYCLES(duration, period) DIV_ROUND_UP_ULL(duration, period)
3230
31
+#define MXS_SET_ADDR 0x4
32
+#define MXS_CLR_ADDR 0x8
3333 /*
34
- * We may change the layout if we can get the ECC info from the datasheet,
35
- * else we will use all the (page + OOB).
34
+ * Clear the bit and poll it cleared. This is usually called with
35
+ * a reset address and mask being either SFTRST(bit 31) or CLKGATE
36
+ * (bit 30).
3637 */
37
-static int gpmi_ooblayout_ecc(struct mtd_info *mtd, int section,
38
- struct mtd_oob_region *oobregion)
38
+static int clear_poll_bit(void __iomem *addr, u32 mask)
3939 {
40
- struct nand_chip *chip = mtd_to_nand(mtd);
41
- struct gpmi_nand_data *this = nand_get_controller_data(chip);
42
- struct bch_geometry *geo = &this->bch_geometry;
40
+ int timeout = 0x400;
4341
44
- if (section)
45
- return -ERANGE;
42
+ /* clear the bit */
43
+ writel(mask, addr + MXS_CLR_ADDR);
4644
47
- oobregion->offset = 0;
48
- oobregion->length = geo->page_size - mtd->writesize;
45
+ /*
46
+ * SFTRST needs 3 GPMI clocks to settle, the reference manual
47
+ * recommends to wait 1us.
48
+ */
49
+ udelay(1);
4950
50
- return 0;
51
+ /* poll the bit becoming clear */
52
+ while ((readl(addr) & mask) && --timeout)
53
+ /* nothing */;
54
+
55
+ return !timeout;
5156 }
5257
53
-static int gpmi_ooblayout_free(struct mtd_info *mtd, int section,
54
- struct mtd_oob_region *oobregion)
58
+#define MODULE_CLKGATE (1 << 30)
59
+#define MODULE_SFTRST (1 << 31)
60
+/*
61
+ * The current mxs_reset_block() will do two things:
62
+ * [1] enable the module.
63
+ * [2] reset the module.
64
+ *
65
+ * In most of the cases, it's ok.
66
+ * But in MX23, there is a hardware bug in the BCH block (see erratum #2847).
67
+ * If you try to soft reset the BCH block, it becomes unusable until
68
+ * the next hard reset. This case occurs in the NAND boot mode. When the board
69
+ * boots by NAND, the ROM of the chip will initialize the BCH blocks itself.
70
+ * So If the driver tries to reset the BCH again, the BCH will not work anymore.
71
+ * You will see a DMA timeout in this case. The bug has been fixed
72
+ * in the following chips, such as MX28.
73
+ *
74
+ * To avoid this bug, just add a new parameter `just_enable` for
75
+ * the mxs_reset_block(), and rewrite it here.
76
+ */
77
+static int gpmi_reset_block(void __iomem *reset_addr, bool just_enable)
5578 {
56
- struct nand_chip *chip = mtd_to_nand(mtd);
57
- struct gpmi_nand_data *this = nand_get_controller_data(chip);
58
- struct bch_geometry *geo = &this->bch_geometry;
79
+ int ret;
80
+ int timeout = 0x400;
5981
60
- if (section)
61
- return -ERANGE;
82
+ /* clear and poll SFTRST */
83
+ ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
84
+ if (unlikely(ret))
85
+ goto error;
6286
63
- /* The available oob size we have. */
64
- if (geo->page_size < mtd->writesize + mtd->oobsize) {
65
- oobregion->offset = geo->page_size - mtd->writesize;
66
- oobregion->length = mtd->oobsize - oobregion->offset;
87
+ /* clear CLKGATE */
88
+ writel(MODULE_CLKGATE, reset_addr + MXS_CLR_ADDR);
89
+
90
+ if (!just_enable) {
91
+ /* set SFTRST to reset the block */
92
+ writel(MODULE_SFTRST, reset_addr + MXS_SET_ADDR);
93
+ udelay(1);
94
+
95
+ /* poll CLKGATE becoming set */
96
+ while ((!(readl(reset_addr) & MODULE_CLKGATE)) && --timeout)
97
+ /* nothing */;
98
+ if (unlikely(!timeout))
99
+ goto error;
67100 }
68101
102
+ /* clear and poll SFTRST */
103
+ ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
104
+ if (unlikely(ret))
105
+ goto error;
106
+
107
+ /* clear and poll CLKGATE */
108
+ ret = clear_poll_bit(reset_addr, MODULE_CLKGATE);
109
+ if (unlikely(ret))
110
+ goto error;
111
+
69112 return 0;
113
+
114
+error:
115
+ pr_err("%s(%p): module reset timeout\n", __func__, reset_addr);
116
+ return -ETIMEDOUT;
70117 }
71118
72
-static const char * const gpmi_clks_for_mx2x[] = {
73
- "gpmi_io",
74
-};
75
-
76
-static const struct mtd_ooblayout_ops gpmi_ooblayout_ops = {
77
- .ecc = gpmi_ooblayout_ecc,
78
- .free = gpmi_ooblayout_free,
79
-};
80
-
81
-static const struct gpmi_devdata gpmi_devdata_imx23 = {
82
- .type = IS_MX23,
83
- .bch_max_ecc_strength = 20,
84
- .max_chain_delay = 16000,
85
- .clks = gpmi_clks_for_mx2x,
86
- .clks_count = ARRAY_SIZE(gpmi_clks_for_mx2x),
87
-};
88
-
89
-static const struct gpmi_devdata gpmi_devdata_imx28 = {
90
- .type = IS_MX28,
91
- .bch_max_ecc_strength = 20,
92
- .max_chain_delay = 16000,
93
- .clks = gpmi_clks_for_mx2x,
94
- .clks_count = ARRAY_SIZE(gpmi_clks_for_mx2x),
95
-};
96
-
97
-static const char * const gpmi_clks_for_mx6[] = {
98
- "gpmi_io", "gpmi_apb", "gpmi_bch", "gpmi_bch_apb", "per1_bch",
99
-};
100
-
101
-static const struct gpmi_devdata gpmi_devdata_imx6q = {
102
- .type = IS_MX6Q,
103
- .bch_max_ecc_strength = 40,
104
- .max_chain_delay = 12000,
105
- .clks = gpmi_clks_for_mx6,
106
- .clks_count = ARRAY_SIZE(gpmi_clks_for_mx6),
107
-};
108
-
109
-static const struct gpmi_devdata gpmi_devdata_imx6sx = {
110
- .type = IS_MX6SX,
111
- .bch_max_ecc_strength = 62,
112
- .max_chain_delay = 12000,
113
- .clks = gpmi_clks_for_mx6,
114
- .clks_count = ARRAY_SIZE(gpmi_clks_for_mx6),
115
-};
116
-
117
-static const char * const gpmi_clks_for_mx7d[] = {
118
- "gpmi_io", "gpmi_bch_apb",
119
-};
120
-
121
-static const struct gpmi_devdata gpmi_devdata_imx7d = {
122
- .type = IS_MX7D,
123
- .bch_max_ecc_strength = 62,
124
- .max_chain_delay = 12000,
125
- .clks = gpmi_clks_for_mx7d,
126
- .clks_count = ARRAY_SIZE(gpmi_clks_for_mx7d),
127
-};
128
-
129
-static irqreturn_t bch_irq(int irq, void *cookie)
119
+static int __gpmi_enable_clk(struct gpmi_nand_data *this, bool v)
130120 {
131
- struct gpmi_nand_data *this = cookie;
121
+ struct clk *clk;
122
+ int ret;
123
+ int i;
132124
133
- gpmi_clear_bch(this);
134
- complete(&this->bch_done);
135
- return IRQ_HANDLED;
125
+ for (i = 0; i < GPMI_CLK_MAX; i++) {
126
+ clk = this->resources.clock[i];
127
+ if (!clk)
128
+ break;
129
+
130
+ if (v) {
131
+ ret = clk_prepare_enable(clk);
132
+ if (ret)
133
+ goto err_clk;
134
+ } else {
135
+ clk_disable_unprepare(clk);
136
+ }
137
+ }
138
+ return 0;
139
+
140
+err_clk:
141
+ for (; i > 0; i--)
142
+ clk_disable_unprepare(this->resources.clock[i - 1]);
143
+ return ret;
136144 }
137145
138
-/*
139
- * Calculate the ECC strength by hand:
140
- * E : The ECC strength.
141
- * G : the length of Galois Field.
142
- * N : The chunk count of per page.
143
- * O : the oobsize of the NAND chip.
144
- * M : the metasize of per page.
145
- *
146
- * The formula is :
147
- * E * G * N
148
- * ------------ <= (O - M)
149
- * 8
150
- *
151
- * So, we get E by:
152
- * (O - M) * 8
153
- * E <= -------------
154
- * G * N
155
- */
156
-static inline int get_ecc_strength(struct gpmi_nand_data *this)
146
+static int gpmi_init(struct gpmi_nand_data *this)
157147 {
148
+ struct resources *r = &this->resources;
149
+ int ret;
150
+
151
+ ret = pm_runtime_get_sync(this->dev);
152
+ if (ret < 0) {
153
+ pm_runtime_put_noidle(this->dev);
154
+ return ret;
155
+ }
156
+
157
+ ret = gpmi_reset_block(r->gpmi_regs, false);
158
+ if (ret)
159
+ goto err_out;
160
+
161
+ /*
162
+ * Reset BCH here, too. We got failures otherwise :(
163
+ * See later BCH reset for explanation of MX23 and MX28 handling
164
+ */
165
+ ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MXS(this));
166
+ if (ret)
167
+ goto err_out;
168
+
169
+ /* Choose NAND mode. */
170
+ writel(BM_GPMI_CTRL1_GPMI_MODE, r->gpmi_regs + HW_GPMI_CTRL1_CLR);
171
+
172
+ /* Set the IRQ polarity. */
173
+ writel(BM_GPMI_CTRL1_ATA_IRQRDY_POLARITY,
174
+ r->gpmi_regs + HW_GPMI_CTRL1_SET);
175
+
176
+ /* Disable Write-Protection. */
177
+ writel(BM_GPMI_CTRL1_DEV_RESET, r->gpmi_regs + HW_GPMI_CTRL1_SET);
178
+
179
+ /* Select BCH ECC. */
180
+ writel(BM_GPMI_CTRL1_BCH_MODE, r->gpmi_regs + HW_GPMI_CTRL1_SET);
181
+
182
+ /*
183
+ * Decouple the chip select from dma channel. We use dma0 for all
184
+ * the chips.
185
+ */
186
+ writel(BM_GPMI_CTRL1_DECOUPLE_CS, r->gpmi_regs + HW_GPMI_CTRL1_SET);
187
+
188
+err_out:
189
+ pm_runtime_mark_last_busy(this->dev);
190
+ pm_runtime_put_autosuspend(this->dev);
191
+ return ret;
192
+}
193
+
194
+/* This function is very useful. It is called only when the bug occur. */
195
+static void gpmi_dump_info(struct gpmi_nand_data *this)
196
+{
197
+ struct resources *r = &this->resources;
158198 struct bch_geometry *geo = &this->bch_geometry;
159
- struct mtd_info *mtd = nand_to_mtd(&this->nand);
160
- int ecc_strength;
199
+ u32 reg;
200
+ int i;
161201
162
- ecc_strength = ((mtd->oobsize - geo->metadata_size) * 8)
163
- / (geo->gf_len * geo->ecc_chunk_count);
202
+ dev_err(this->dev, "Show GPMI registers :\n");
203
+ for (i = 0; i <= HW_GPMI_DEBUG / 0x10 + 1; i++) {
204
+ reg = readl(r->gpmi_regs + i * 0x10);
205
+ dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
206
+ }
164207
165
- /* We need the minor even number. */
166
- return round_down(ecc_strength, 2);
208
+ /* start to print out the BCH info */
209
+ dev_err(this->dev, "Show BCH registers :\n");
210
+ for (i = 0; i <= HW_BCH_VERSION / 0x10 + 1; i++) {
211
+ reg = readl(r->bch_regs + i * 0x10);
212
+ dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
213
+ }
214
+ dev_err(this->dev, "BCH Geometry :\n"
215
+ "GF length : %u\n"
216
+ "ECC Strength : %u\n"
217
+ "Page Size in Bytes : %u\n"
218
+ "Metadata Size in Bytes : %u\n"
219
+ "ECC Chunk Size in Bytes: %u\n"
220
+ "ECC Chunk Count : %u\n"
221
+ "Payload Size in Bytes : %u\n"
222
+ "Auxiliary Size in Bytes: %u\n"
223
+ "Auxiliary Status Offset: %u\n"
224
+ "Block Mark Byte Offset : %u\n"
225
+ "Block Mark Bit Offset : %u\n",
226
+ geo->gf_len,
227
+ geo->ecc_strength,
228
+ geo->page_size,
229
+ geo->metadata_size,
230
+ geo->ecc_chunk_size,
231
+ geo->ecc_chunk_count,
232
+ geo->payload_size,
233
+ geo->auxiliary_size,
234
+ geo->auxiliary_status_offset,
235
+ geo->block_mark_byte_offset,
236
+ geo->block_mark_bit_offset);
167237 }
168238
169239 static inline bool gpmi_check_ecc(struct gpmi_nand_data *this)
....@@ -171,7 +241,7 @@
171241 struct bch_geometry *geo = &this->bch_geometry;
172242
173243 /* Do the sanity check. */
174
- if (GPMI_IS_MX23(this) || GPMI_IS_MX28(this)) {
244
+ if (GPMI_IS_MXS(this)) {
175245 /* The mx23/mx28 only support the GF13. */
176246 if (geo->gf_len == 14)
177247 return false;
....@@ -204,7 +274,8 @@
204274 default:
205275 dev_err(this->dev,
206276 "unsupported nand chip. ecc bits : %d, ecc size : %d\n",
207
- chip->ecc_strength_ds, chip->ecc_step_ds);
277
+ nanddev_get_ecc_requirements(&chip->base)->strength,
278
+ nanddev_get_ecc_requirements(&chip->base)->step_size);
208279 return -EINVAL;
209280 }
210281 geo->ecc_chunk_size = ecc_step;
....@@ -293,6 +364,37 @@
293364 geo->block_mark_byte_offset = block_mark_bit_offset / 8;
294365 geo->block_mark_bit_offset = block_mark_bit_offset % 8;
295366 return 0;
367
+}
368
+
369
+/*
370
+ * Calculate the ECC strength by hand:
371
+ * E : The ECC strength.
372
+ * G : the length of Galois Field.
373
+ * N : The chunk count of per page.
374
+ * O : the oobsize of the NAND chip.
375
+ * M : the metasize of per page.
376
+ *
377
+ * The formula is :
378
+ * E * G * N
379
+ * ------------ <= (O - M)
380
+ * 8
381
+ *
382
+ * So, we get E by:
383
+ * (O - M) * 8
384
+ * E <= -------------
385
+ * G * N
386
+ */
387
+static inline int get_ecc_strength(struct gpmi_nand_data *this)
388
+{
389
+ struct bch_geometry *geo = &this->bch_geometry;
390
+ struct mtd_info *mtd = nand_to_mtd(&this->nand);
391
+ int ecc_strength;
392
+
393
+ ecc_strength = ((mtd->oobsize - geo->metadata_size) * 8)
394
+ / (geo->gf_len * geo->ecc_chunk_count);
395
+
396
+ /* We need the minor even number. */
397
+ return round_down(ecc_strength, 2);
296398 }
297399
298400 static int legacy_set_geometry(struct gpmi_nand_data *this)
....@@ -407,9 +509,11 @@
407509 return 0;
408510 }
409511
410
-int common_nfc_set_geometry(struct gpmi_nand_data *this)
512
+static int common_nfc_set_geometry(struct gpmi_nand_data *this)
411513 {
412514 struct nand_chip *chip = &this->nand;
515
+ const struct nand_ecc_props *requirements =
516
+ nanddev_get_ecc_requirements(&chip->base);
413517
414518 if (chip->ecc.strength > 0 && chip->ecc.size > 0)
415519 return set_geometry_by_ecc_info(this, chip->ecc.strength,
....@@ -417,28 +521,326 @@
417521
418522 if ((of_property_read_bool(this->dev->of_node, "fsl,use-minimum-ecc"))
419523 || legacy_set_geometry(this)) {
420
- if (!(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0))
524
+ if (!(requirements->strength > 0 && requirements->step_size > 0))
421525 return -EINVAL;
422526
423
- return set_geometry_by_ecc_info(this, chip->ecc_strength_ds,
424
- chip->ecc_step_ds);
527
+ return set_geometry_by_ecc_info(this,
528
+ requirements->strength,
529
+ requirements->step_size);
425530 }
426531
427532 return 0;
428533 }
429534
430
-struct dma_chan *get_dma_chan(struct gpmi_nand_data *this)
535
+/* Configures the geometry for BCH. */
536
+static int bch_set_geometry(struct gpmi_nand_data *this)
537
+{
538
+ struct resources *r = &this->resources;
539
+ int ret;
540
+
541
+ ret = common_nfc_set_geometry(this);
542
+ if (ret)
543
+ return ret;
544
+
545
+ ret = pm_runtime_get_sync(this->dev);
546
+ if (ret < 0) {
547
+ pm_runtime_put_autosuspend(this->dev);
548
+ return ret;
549
+ }
550
+
551
+ /*
552
+ * Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this
553
+ * chip, otherwise it will lock up. So we skip resetting BCH on the MX23.
554
+ * and MX28.
555
+ */
556
+ ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MXS(this));
557
+ if (ret)
558
+ goto err_out;
559
+
560
+ /* Set *all* chip selects to use layout 0. */
561
+ writel(0, r->bch_regs + HW_BCH_LAYOUTSELECT);
562
+
563
+ ret = 0;
564
+err_out:
565
+ pm_runtime_mark_last_busy(this->dev);
566
+ pm_runtime_put_autosuspend(this->dev);
567
+
568
+ return ret;
569
+}
570
+
571
+/*
572
+ * <1> Firstly, we should know what's the GPMI-clock means.
573
+ * The GPMI-clock is the internal clock in the gpmi nand controller.
574
+ * If you set 100MHz to gpmi nand controller, the GPMI-clock's period
575
+ * is 10ns. Mark the GPMI-clock's period as GPMI-clock-period.
576
+ *
577
+ * <2> Secondly, we should know what's the frequency on the nand chip pins.
578
+ * The frequency on the nand chip pins is derived from the GPMI-clock.
579
+ * We can get it from the following equation:
580
+ *
581
+ * F = G / (DS + DH)
582
+ *
583
+ * F : the frequency on the nand chip pins.
584
+ * G : the GPMI clock, such as 100MHz.
585
+ * DS : GPMI_HW_GPMI_TIMING0:DATA_SETUP
586
+ * DH : GPMI_HW_GPMI_TIMING0:DATA_HOLD
587
+ *
588
+ * <3> Thirdly, when the frequency on the nand chip pins is above 33MHz,
589
+ * the nand EDO(extended Data Out) timing could be applied.
590
+ * The GPMI implements a feedback read strobe to sample the read data.
591
+ * The feedback read strobe can be delayed to support the nand EDO timing
592
+ * where the read strobe may deasserts before the read data is valid, and
593
+ * read data is valid for some time after read strobe.
594
+ *
595
+ * The following figure illustrates some aspects of a NAND Flash read:
596
+ *
597
+ * |<---tREA---->|
598
+ * | |
599
+ * | | |
600
+ * |<--tRP-->| |
601
+ * | | |
602
+ * __ ___|__________________________________
603
+ * RDN \________/ |
604
+ * |
605
+ * /---------\
606
+ * Read Data --------------< >---------
607
+ * \---------/
608
+ * | |
609
+ * |<-D->|
610
+ * FeedbackRDN ________ ____________
611
+ * \___________/
612
+ *
613
+ * D stands for delay, set in the HW_GPMI_CTRL1:RDN_DELAY.
614
+ *
615
+ *
616
+ * <4> Now, we begin to describe how to compute the right RDN_DELAY.
617
+ *
618
+ * 4.1) From the aspect of the nand chip pins:
619
+ * Delay = (tREA + C - tRP) {1}
620
+ *
621
+ * tREA : the maximum read access time.
622
+ * C : a constant to adjust the delay. default is 4000ps.
623
+ * tRP : the read pulse width, which is exactly:
624
+ * tRP = (GPMI-clock-period) * DATA_SETUP
625
+ *
626
+ * 4.2) From the aspect of the GPMI nand controller:
627
+ * Delay = RDN_DELAY * 0.125 * RP {2}
628
+ *
629
+ * RP : the DLL reference period.
630
+ * if (GPMI-clock-period > DLL_THRETHOLD)
631
+ * RP = GPMI-clock-period / 2;
632
+ * else
633
+ * RP = GPMI-clock-period;
634
+ *
635
+ * Set the HW_GPMI_CTRL1:HALF_PERIOD if GPMI-clock-period
636
+ * is greater DLL_THRETHOLD. In other SOCs, the DLL_THRETHOLD
637
+ * is 16000ps, but in mx6q, we use 12000ps.
638
+ *
639
+ * 4.3) since {1} equals {2}, we get:
640
+ *
641
+ * (tREA + 4000 - tRP) * 8
642
+ * RDN_DELAY = ----------------------- {3}
643
+ * RP
644
+ */
645
+static void gpmi_nfc_compute_timings(struct gpmi_nand_data *this,
646
+ const struct nand_sdr_timings *sdr)
647
+{
648
+ struct gpmi_nfc_hardware_timing *hw = &this->hw;
649
+ struct resources *r = &this->resources;
650
+ unsigned int dll_threshold_ps = this->devdata->max_chain_delay;
651
+ unsigned int period_ps, reference_period_ps;
652
+ unsigned int data_setup_cycles, data_hold_cycles, addr_setup_cycles;
653
+ unsigned int tRP_ps;
654
+ bool use_half_period;
655
+ int sample_delay_ps, sample_delay_factor;
656
+ unsigned int busy_timeout_cycles;
657
+ u8 wrn_dly_sel;
658
+ u64 busy_timeout_ps;
659
+
660
+ if (sdr->tRC_min >= 30000) {
661
+ /* ONFI non-EDO modes [0-3] */
662
+ hw->clk_rate = 22000000;
663
+ wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_4_TO_8NS;
664
+ } else if (sdr->tRC_min >= 25000) {
665
+ /* ONFI EDO mode 4 */
666
+ hw->clk_rate = 80000000;
667
+ wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY;
668
+ } else {
669
+ /* ONFI EDO mode 5 */
670
+ hw->clk_rate = 100000000;
671
+ wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY;
672
+ }
673
+
674
+ hw->clk_rate = clk_round_rate(r->clock[0], hw->clk_rate);
675
+
676
+ /* SDR core timings are given in picoseconds */
677
+ period_ps = div_u64((u64)NSEC_PER_SEC * 1000, hw->clk_rate);
678
+
679
+ addr_setup_cycles = TO_CYCLES(sdr->tALS_min, period_ps);
680
+ data_setup_cycles = TO_CYCLES(sdr->tDS_min, period_ps);
681
+ data_hold_cycles = TO_CYCLES(sdr->tDH_min, period_ps);
682
+ busy_timeout_ps = max(sdr->tBERS_max, sdr->tPROG_max);
683
+ busy_timeout_cycles = TO_CYCLES(busy_timeout_ps, period_ps);
684
+
685
+ hw->timing0 = BF_GPMI_TIMING0_ADDRESS_SETUP(addr_setup_cycles) |
686
+ BF_GPMI_TIMING0_DATA_HOLD(data_hold_cycles) |
687
+ BF_GPMI_TIMING0_DATA_SETUP(data_setup_cycles);
688
+ hw->timing1 = BF_GPMI_TIMING1_BUSY_TIMEOUT(busy_timeout_cycles * 4096);
689
+
690
+ /*
691
+ * Derive NFC ideal delay from {3}:
692
+ *
693
+ * (tREA + 4000 - tRP) * 8
694
+ * RDN_DELAY = -----------------------
695
+ * RP
696
+ */
697
+ if (period_ps > dll_threshold_ps) {
698
+ use_half_period = true;
699
+ reference_period_ps = period_ps / 2;
700
+ } else {
701
+ use_half_period = false;
702
+ reference_period_ps = period_ps;
703
+ }
704
+
705
+ tRP_ps = data_setup_cycles * period_ps;
706
+ sample_delay_ps = (sdr->tREA_max + 4000 - tRP_ps) * 8;
707
+ if (sample_delay_ps > 0)
708
+ sample_delay_factor = sample_delay_ps / reference_period_ps;
709
+ else
710
+ sample_delay_factor = 0;
711
+
712
+ hw->ctrl1n = BF_GPMI_CTRL1_WRN_DLY_SEL(wrn_dly_sel);
713
+ if (sample_delay_factor)
714
+ hw->ctrl1n |= BF_GPMI_CTRL1_RDN_DELAY(sample_delay_factor) |
715
+ BM_GPMI_CTRL1_DLL_ENABLE |
716
+ (use_half_period ? BM_GPMI_CTRL1_HALF_PERIOD : 0);
717
+}
718
+
719
+static int gpmi_nfc_apply_timings(struct gpmi_nand_data *this)
720
+{
721
+ struct gpmi_nfc_hardware_timing *hw = &this->hw;
722
+ struct resources *r = &this->resources;
723
+ void __iomem *gpmi_regs = r->gpmi_regs;
724
+ unsigned int dll_wait_time_us;
725
+ int ret;
726
+
727
+ /* Clock dividers do NOT guarantee a clean clock signal on its output
728
+ * during the change of the divide factor on i.MX6Q/UL/SX. On i.MX7/8,
729
+ * all clock dividers provide these guarantee.
730
+ */
731
+ if (GPMI_IS_MX6Q(this) || GPMI_IS_MX6SX(this))
732
+ clk_disable_unprepare(r->clock[0]);
733
+
734
+ ret = clk_set_rate(r->clock[0], hw->clk_rate);
735
+ if (ret) {
736
+ dev_err(this->dev, "cannot set clock rate to %lu Hz: %d\n", hw->clk_rate, ret);
737
+ return ret;
738
+ }
739
+
740
+ if (GPMI_IS_MX6Q(this) || GPMI_IS_MX6SX(this)) {
741
+ ret = clk_prepare_enable(r->clock[0]);
742
+ if (ret)
743
+ return ret;
744
+ }
745
+
746
+ writel(hw->timing0, gpmi_regs + HW_GPMI_TIMING0);
747
+ writel(hw->timing1, gpmi_regs + HW_GPMI_TIMING1);
748
+
749
+ /*
750
+ * Clear several CTRL1 fields, DLL must be disabled when setting
751
+ * RDN_DELAY or HALF_PERIOD.
752
+ */
753
+ writel(BM_GPMI_CTRL1_CLEAR_MASK, gpmi_regs + HW_GPMI_CTRL1_CLR);
754
+ writel(hw->ctrl1n, gpmi_regs + HW_GPMI_CTRL1_SET);
755
+
756
+ /* Wait 64 clock cycles before using the GPMI after enabling the DLL */
757
+ dll_wait_time_us = USEC_PER_SEC / hw->clk_rate * 64;
758
+ if (!dll_wait_time_us)
759
+ dll_wait_time_us = 1;
760
+
761
+ /* Wait for the DLL to settle. */
762
+ udelay(dll_wait_time_us);
763
+
764
+ return 0;
765
+}
766
+
767
+static int gpmi_setup_interface(struct nand_chip *chip, int chipnr,
768
+ const struct nand_interface_config *conf)
769
+{
770
+ struct gpmi_nand_data *this = nand_get_controller_data(chip);
771
+ const struct nand_sdr_timings *sdr;
772
+
773
+ /* Retrieve required NAND timings */
774
+ sdr = nand_get_sdr_timings(conf);
775
+ if (IS_ERR(sdr))
776
+ return PTR_ERR(sdr);
777
+
778
+ /* Only MX6 GPMI controller can reach EDO timings */
779
+ if (sdr->tRC_min <= 25000 && !GPMI_IS_MX6(this))
780
+ return -ENOTSUPP;
781
+
782
+ /* Stop here if this call was just a check */
783
+ if (chipnr < 0)
784
+ return 0;
785
+
786
+ /* Do the actual derivation of the controller timings */
787
+ gpmi_nfc_compute_timings(this, sdr);
788
+
789
+ this->hw.must_apply_timings = true;
790
+
791
+ return 0;
792
+}
793
+
794
+/* Clears a BCH interrupt. */
795
+static void gpmi_clear_bch(struct gpmi_nand_data *this)
796
+{
797
+ struct resources *r = &this->resources;
798
+ writel(BM_BCH_CTRL_COMPLETE_IRQ, r->bch_regs + HW_BCH_CTRL_CLR);
799
+}
800
+
801
+static struct dma_chan *get_dma_chan(struct gpmi_nand_data *this)
431802 {
432803 /* We use the DMA channel 0 to access all the nand chips. */
433804 return this->dma_chans[0];
434805 }
435806
436
-/* Can we use the upper's buffer directly for DMA? */
437
-bool prepare_data_dma(struct gpmi_nand_data *this, const void *buf, int len,
438
- enum dma_data_direction dr)
807
+/* This will be called after the DMA operation is finished. */
808
+static void dma_irq_callback(void *param)
439809 {
440
- struct scatterlist *sgl = &this->data_sgl;
810
+ struct gpmi_nand_data *this = param;
811
+ struct completion *dma_c = &this->dma_done;
812
+
813
+ complete(dma_c);
814
+}
815
+
816
+static irqreturn_t bch_irq(int irq, void *cookie)
817
+{
818
+ struct gpmi_nand_data *this = cookie;
819
+
820
+ gpmi_clear_bch(this);
821
+ complete(&this->bch_done);
822
+ return IRQ_HANDLED;
823
+}
824
+
825
+static int gpmi_raw_len_to_len(struct gpmi_nand_data *this, int raw_len)
826
+{
827
+ /*
828
+ * raw_len is the length to read/write including bch data which
829
+ * we are passed in exec_op. Calculate the data length from it.
830
+ */
831
+ if (this->bch)
832
+ return ALIGN_DOWN(raw_len, this->bch_geometry.ecc_chunk_size);
833
+ else
834
+ return raw_len;
835
+}
836
+
837
+/* Can we use the upper's buffer directly for DMA? */
838
+static bool prepare_data_dma(struct gpmi_nand_data *this, const void *buf,
839
+ int raw_len, struct scatterlist *sgl,
840
+ enum dma_data_direction dr)
841
+{
441842 int ret;
843
+ int len = gpmi_raw_len_to_len(this, raw_len);
442844
443845 /* first try to map the upper buffer directly */
444846 if (virt_addr_valid(buf) && !object_is_on_stack(buf)) {
....@@ -454,7 +856,7 @@
454856 /* We have to use our own DMA buffer. */
455857 sg_init_one(sgl, this->data_buffer_dma, len);
456858
457
- if (dr == DMA_TO_DEVICE)
859
+ if (dr == DMA_TO_DEVICE && buf != this->data_buffer_dma)
458860 memcpy(this->data_buffer_dma, buf, len);
459861
460862 dma_map_sg(this->dev, sgl, 1, dr);
....@@ -462,66 +864,110 @@
462864 return false;
463865 }
464866
465
-/* This will be called after the DMA operation is finished. */
466
-static void dma_irq_callback(void *param)
467
-{
468
- struct gpmi_nand_data *this = param;
469
- struct completion *dma_c = &this->dma_done;
470
-
471
- complete(dma_c);
472
-}
473
-
474
-int start_dma_without_bch_irq(struct gpmi_nand_data *this,
475
- struct dma_async_tx_descriptor *desc)
476
-{
477
- struct completion *dma_c = &this->dma_done;
478
- unsigned long timeout;
479
-
480
- init_completion(dma_c);
481
-
482
- desc->callback = dma_irq_callback;
483
- desc->callback_param = this;
484
- dmaengine_submit(desc);
485
- dma_async_issue_pending(get_dma_chan(this));
486
-
487
- /* Wait for the interrupt from the DMA block. */
488
- timeout = wait_for_completion_timeout(dma_c, msecs_to_jiffies(1000));
489
- if (!timeout) {
490
- dev_err(this->dev, "DMA timeout, last DMA\n");
491
- gpmi_dump_info(this);
492
- return -ETIMEDOUT;
493
- }
494
- return 0;
495
-}
867
+/* add our owner bbt descriptor */
868
+static uint8_t scan_ff_pattern[] = { 0xff };
869
+static struct nand_bbt_descr gpmi_bbt_descr = {
870
+ .options = 0,
871
+ .offs = 0,
872
+ .len = 1,
873
+ .pattern = scan_ff_pattern
874
+};
496875
497876 /*
498
- * This function is used in BCH reading or BCH writing pages.
499
- * It will wait for the BCH interrupt as long as ONE second.
500
- * Actually, we must wait for two interrupts :
501
- * [1] firstly the DMA interrupt and
502
- * [2] secondly the BCH interrupt.
877
+ * We may change the layout if we can get the ECC info from the datasheet,
878
+ * else we will use all the (page + OOB).
503879 */
504
-int start_dma_with_bch_irq(struct gpmi_nand_data *this,
505
- struct dma_async_tx_descriptor *desc)
880
+static int gpmi_ooblayout_ecc(struct mtd_info *mtd, int section,
881
+ struct mtd_oob_region *oobregion)
506882 {
507
- struct completion *bch_c = &this->bch_done;
508
- unsigned long timeout;
883
+ struct nand_chip *chip = mtd_to_nand(mtd);
884
+ struct gpmi_nand_data *this = nand_get_controller_data(chip);
885
+ struct bch_geometry *geo = &this->bch_geometry;
509886
510
- /* Prepare to receive an interrupt from the BCH block. */
511
- init_completion(bch_c);
887
+ if (section)
888
+ return -ERANGE;
512889
513
- /* start the DMA */
514
- start_dma_without_bch_irq(this, desc);
890
+ oobregion->offset = 0;
891
+ oobregion->length = geo->page_size - mtd->writesize;
515892
516
- /* Wait for the interrupt from the BCH block. */
517
- timeout = wait_for_completion_timeout(bch_c, msecs_to_jiffies(1000));
518
- if (!timeout) {
519
- dev_err(this->dev, "BCH timeout\n");
520
- gpmi_dump_info(this);
521
- return -ETIMEDOUT;
522
- }
523893 return 0;
524894 }
895
+
896
+static int gpmi_ooblayout_free(struct mtd_info *mtd, int section,
897
+ struct mtd_oob_region *oobregion)
898
+{
899
+ struct nand_chip *chip = mtd_to_nand(mtd);
900
+ struct gpmi_nand_data *this = nand_get_controller_data(chip);
901
+ struct bch_geometry *geo = &this->bch_geometry;
902
+
903
+ if (section)
904
+ return -ERANGE;
905
+
906
+ /* The available oob size we have. */
907
+ if (geo->page_size < mtd->writesize + mtd->oobsize) {
908
+ oobregion->offset = geo->page_size - mtd->writesize;
909
+ oobregion->length = mtd->oobsize - oobregion->offset;
910
+ }
911
+
912
+ return 0;
913
+}
914
+
915
+static const char * const gpmi_clks_for_mx2x[] = {
916
+ "gpmi_io",
917
+};
918
+
919
+static const struct mtd_ooblayout_ops gpmi_ooblayout_ops = {
920
+ .ecc = gpmi_ooblayout_ecc,
921
+ .free = gpmi_ooblayout_free,
922
+};
923
+
924
+static const struct gpmi_devdata gpmi_devdata_imx23 = {
925
+ .type = IS_MX23,
926
+ .bch_max_ecc_strength = 20,
927
+ .max_chain_delay = 16000,
928
+ .clks = gpmi_clks_for_mx2x,
929
+ .clks_count = ARRAY_SIZE(gpmi_clks_for_mx2x),
930
+};
931
+
932
+static const struct gpmi_devdata gpmi_devdata_imx28 = {
933
+ .type = IS_MX28,
934
+ .bch_max_ecc_strength = 20,
935
+ .max_chain_delay = 16000,
936
+ .clks = gpmi_clks_for_mx2x,
937
+ .clks_count = ARRAY_SIZE(gpmi_clks_for_mx2x),
938
+};
939
+
940
+static const char * const gpmi_clks_for_mx6[] = {
941
+ "gpmi_io", "gpmi_apb", "gpmi_bch", "gpmi_bch_apb", "per1_bch",
942
+};
943
+
944
+static const struct gpmi_devdata gpmi_devdata_imx6q = {
945
+ .type = IS_MX6Q,
946
+ .bch_max_ecc_strength = 40,
947
+ .max_chain_delay = 12000,
948
+ .clks = gpmi_clks_for_mx6,
949
+ .clks_count = ARRAY_SIZE(gpmi_clks_for_mx6),
950
+};
951
+
952
+static const struct gpmi_devdata gpmi_devdata_imx6sx = {
953
+ .type = IS_MX6SX,
954
+ .bch_max_ecc_strength = 62,
955
+ .max_chain_delay = 12000,
956
+ .clks = gpmi_clks_for_mx6,
957
+ .clks_count = ARRAY_SIZE(gpmi_clks_for_mx6),
958
+};
959
+
960
+static const char * const gpmi_clks_for_mx7d[] = {
961
+ "gpmi_io", "gpmi_bch_apb",
962
+};
963
+
964
+static const struct gpmi_devdata gpmi_devdata_imx7d = {
965
+ .type = IS_MX7D,
966
+ .bch_max_ecc_strength = 62,
967
+ .max_chain_delay = 12000,
968
+ .clks = gpmi_clks_for_mx7d,
969
+ .clks_count = ARRAY_SIZE(gpmi_clks_for_mx7d),
970
+};
525971
526972 static int acquire_register_block(struct gpmi_nand_data *this,
527973 const char *res_name)
....@@ -580,20 +1026,19 @@
5801026 {
5811027 struct platform_device *pdev = this->pdev;
5821028 struct dma_chan *dma_chan;
1029
+ int ret = 0;
5831030
5841031 /* request dma channel */
585
- dma_chan = dma_request_slave_channel(&pdev->dev, "rx-tx");
586
- if (!dma_chan) {
587
- dev_err(this->dev, "Failed to request DMA channel.\n");
588
- goto acquire_err;
1032
+ dma_chan = dma_request_chan(&pdev->dev, "rx-tx");
1033
+ if (IS_ERR(dma_chan)) {
1034
+ ret = dev_err_probe(this->dev, PTR_ERR(dma_chan),
1035
+ "DMA channel request failed\n");
1036
+ release_dma_channels(this);
1037
+ } else {
1038
+ this->dma_chans[0] = dma_chan;
5891039 }
5901040
591
- this->dma_chans[0] = dma_chan;
592
- return 0;
593
-
594
-acquire_err:
595
- release_dma_channels(this);
596
- return -EINVAL;
1041
+ return ret;
5971042 }
5981043
5991044 static int gpmi_get_clks(struct gpmi_nand_data *this)
....@@ -655,68 +1100,20 @@
6551100 release_dma_channels(this);
6561101 }
6571102
658
-static int send_page_prepare(struct gpmi_nand_data *this,
659
- const void *source, unsigned length,
660
- void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
661
- const void **use_virt, dma_addr_t *use_phys)
662
-{
663
- struct device *dev = this->dev;
664
-
665
- if (virt_addr_valid(source)) {
666
- dma_addr_t source_phys;
667
-
668
- source_phys = dma_map_single(dev, (void *)source, length,
669
- DMA_TO_DEVICE);
670
- if (dma_mapping_error(dev, source_phys)) {
671
- if (alt_size < length) {
672
- dev_err(dev, "Alternate buffer is too small\n");
673
- return -ENOMEM;
674
- }
675
- goto map_failed;
676
- }
677
- *use_virt = source;
678
- *use_phys = source_phys;
679
- return 0;
680
- }
681
-map_failed:
682
- /*
683
- * Copy the content of the source buffer into the alternate
684
- * buffer and set up the return values accordingly.
685
- */
686
- memcpy(alt_virt, source, length);
687
-
688
- *use_virt = alt_virt;
689
- *use_phys = alt_phys;
690
- return 0;
691
-}
692
-
693
-static void send_page_end(struct gpmi_nand_data *this,
694
- const void *source, unsigned length,
695
- void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
696
- const void *used_virt, dma_addr_t used_phys)
697
-{
698
- struct device *dev = this->dev;
699
- if (used_virt == source)
700
- dma_unmap_single(dev, used_phys, length, DMA_TO_DEVICE);
701
-}
702
-
7031103 static void gpmi_free_dma_buffer(struct gpmi_nand_data *this)
7041104 {
7051105 struct device *dev = this->dev;
1106
+ struct bch_geometry *geo = &this->bch_geometry;
7061107
707
- if (this->page_buffer_virt && virt_addr_valid(this->page_buffer_virt))
708
- dma_free_coherent(dev, this->page_buffer_size,
709
- this->page_buffer_virt,
710
- this->page_buffer_phys);
711
- kfree(this->cmd_buffer);
1108
+ if (this->auxiliary_virt && virt_addr_valid(this->auxiliary_virt))
1109
+ dma_free_coherent(dev, geo->auxiliary_size,
1110
+ this->auxiliary_virt,
1111
+ this->auxiliary_phys);
7121112 kfree(this->data_buffer_dma);
7131113 kfree(this->raw_buffer);
7141114
715
- this->cmd_buffer = NULL;
7161115 this->data_buffer_dma = NULL;
7171116 this->raw_buffer = NULL;
718
- this->page_buffer_virt = NULL;
719
- this->page_buffer_size = 0;
7201117 }
7211118
7221119 /* Allocate the DMA buffers */
....@@ -725,11 +1122,6 @@
7251122 struct bch_geometry *geo = &this->bch_geometry;
7261123 struct device *dev = this->dev;
7271124 struct mtd_info *mtd = nand_to_mtd(&this->nand);
728
-
729
- /* [1] Allocate a command buffer. PAGE_SIZE is enough. */
730
- this->cmd_buffer = kzalloc(PAGE_SIZE, GFP_DMA | GFP_KERNEL);
731
- if (this->cmd_buffer == NULL)
732
- goto error_alloc;
7331125
7341126 /*
7351127 * [2] Allocate a read/write data buffer.
....@@ -744,140 +1136,20 @@
7441136 if (this->data_buffer_dma == NULL)
7451137 goto error_alloc;
7461138
747
- /*
748
- * [3] Allocate the page buffer.
749
- *
750
- * Both the payload buffer and the auxiliary buffer must appear on
751
- * 32-bit boundaries. We presume the size of the payload buffer is a
752
- * power of two and is much larger than four, which guarantees the
753
- * auxiliary buffer will appear on a 32-bit boundary.
754
- */
755
- this->page_buffer_size = geo->payload_size + geo->auxiliary_size;
756
- this->page_buffer_virt = dma_alloc_coherent(dev, this->page_buffer_size,
757
- &this->page_buffer_phys, GFP_DMA);
758
- if (!this->page_buffer_virt)
1139
+ this->auxiliary_virt = dma_alloc_coherent(dev, geo->auxiliary_size,
1140
+ &this->auxiliary_phys, GFP_DMA);
1141
+ if (!this->auxiliary_virt)
7591142 goto error_alloc;
7601143
761
- this->raw_buffer = kzalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
1144
+ this->raw_buffer = kzalloc((mtd->writesize ?: PAGE_SIZE) + mtd->oobsize, GFP_KERNEL);
7621145 if (!this->raw_buffer)
7631146 goto error_alloc;
7641147
765
- /* Slice up the page buffer. */
766
- this->payload_virt = this->page_buffer_virt;
767
- this->payload_phys = this->page_buffer_phys;
768
- this->auxiliary_virt = this->payload_virt + geo->payload_size;
769
- this->auxiliary_phys = this->payload_phys + geo->payload_size;
7701148 return 0;
7711149
7721150 error_alloc:
7731151 gpmi_free_dma_buffer(this);
7741152 return -ENOMEM;
775
-}
776
-
777
-static void gpmi_cmd_ctrl(struct mtd_info *mtd, int data, unsigned int ctrl)
778
-{
779
- struct nand_chip *chip = mtd_to_nand(mtd);
780
- struct gpmi_nand_data *this = nand_get_controller_data(chip);
781
- int ret;
782
-
783
- /*
784
- * Every operation begins with a command byte and a series of zero or
785
- * more address bytes. These are distinguished by either the Address
786
- * Latch Enable (ALE) or Command Latch Enable (CLE) signals being
787
- * asserted. When MTD is ready to execute the command, it will deassert
788
- * both latch enables.
789
- *
790
- * Rather than run a separate DMA operation for every single byte, we
791
- * queue them up and run a single DMA operation for the entire series
792
- * of command and data bytes. NAND_CMD_NONE means the END of the queue.
793
- */
794
- if ((ctrl & (NAND_ALE | NAND_CLE))) {
795
- if (data != NAND_CMD_NONE)
796
- this->cmd_buffer[this->command_length++] = data;
797
- return;
798
- }
799
-
800
- if (!this->command_length)
801
- return;
802
-
803
- ret = gpmi_send_command(this);
804
- if (ret)
805
- dev_err(this->dev, "Chip: %u, Error %d\n",
806
- this->current_chip, ret);
807
-
808
- this->command_length = 0;
809
-}
810
-
811
-static int gpmi_dev_ready(struct mtd_info *mtd)
812
-{
813
- struct nand_chip *chip = mtd_to_nand(mtd);
814
- struct gpmi_nand_data *this = nand_get_controller_data(chip);
815
-
816
- return gpmi_is_ready(this, this->current_chip);
817
-}
818
-
819
-static void gpmi_select_chip(struct mtd_info *mtd, int chipnr)
820
-{
821
- struct nand_chip *chip = mtd_to_nand(mtd);
822
- struct gpmi_nand_data *this = nand_get_controller_data(chip);
823
- int ret;
824
-
825
- /*
826
- * For power consumption matters, disable/enable the clock each time a
827
- * die is selected/unselected.
828
- */
829
- if (this->current_chip < 0 && chipnr >= 0) {
830
- ret = gpmi_enable_clk(this);
831
- if (ret)
832
- dev_err(this->dev, "Failed to enable the clock\n");
833
- } else if (this->current_chip >= 0 && chipnr < 0) {
834
- ret = gpmi_disable_clk(this);
835
- if (ret)
836
- dev_err(this->dev, "Failed to disable the clock\n");
837
- }
838
-
839
- /*
840
- * This driver currently supports only one NAND chip. Plus, dies share
841
- * the same configuration. So once timings have been applied on the
842
- * controller side, they will not change anymore. When the time will
843
- * come, the check on must_apply_timings will have to be dropped.
844
- */
845
- if (chipnr >= 0 && this->hw.must_apply_timings) {
846
- this->hw.must_apply_timings = false;
847
- gpmi_nfc_apply_timings(this);
848
- }
849
-
850
- this->current_chip = chipnr;
851
-}
852
-
853
-static void gpmi_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
854
-{
855
- struct nand_chip *chip = mtd_to_nand(mtd);
856
- struct gpmi_nand_data *this = nand_get_controller_data(chip);
857
-
858
- dev_dbg(this->dev, "len is %d\n", len);
859
-
860
- gpmi_read_data(this, buf, len);
861
-}
862
-
863
-static void gpmi_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
864
-{
865
- struct nand_chip *chip = mtd_to_nand(mtd);
866
- struct gpmi_nand_data *this = nand_get_controller_data(chip);
867
-
868
- dev_dbg(this->dev, "len is %d\n", len);
869
-
870
- gpmi_send_data(this, buf, len);
871
-}
872
-
873
-static uint8_t gpmi_read_byte(struct mtd_info *mtd)
874
-{
875
- struct nand_chip *chip = mtd_to_nand(mtd);
876
- struct gpmi_nand_data *this = nand_get_controller_data(chip);
877
- uint8_t *buf = this->data_buffer_dma;
878
-
879
- gpmi_read_buf(mtd, buf, 1);
880
- return buf[0];
8811153 }
8821154
8831155 /*
....@@ -928,54 +1200,20 @@
9281200 p[1] = (p[1] & mask) | (from_oob >> (8 - bit));
9291201 }
9301202
931
-static int gpmi_ecc_read_page_data(struct nand_chip *chip,
932
- uint8_t *buf, int oob_required,
933
- int page)
1203
+static int gpmi_count_bitflips(struct nand_chip *chip, void *buf, int first,
1204
+ int last, int meta)
9341205 {
9351206 struct gpmi_nand_data *this = nand_get_controller_data(chip);
9361207 struct bch_geometry *nfc_geo = &this->bch_geometry;
9371208 struct mtd_info *mtd = nand_to_mtd(chip);
938
- dma_addr_t payload_phys;
939
- unsigned int i;
1209
+ int i;
9401210 unsigned char *status;
941
- unsigned int max_bitflips = 0;
942
- int ret;
943
- bool direct = false;
944
-
945
- dev_dbg(this->dev, "page number is : %d\n", page);
946
-
947
- payload_phys = this->payload_phys;
948
-
949
- if (virt_addr_valid(buf)) {
950
- dma_addr_t dest_phys;
951
-
952
- dest_phys = dma_map_single(this->dev, buf, nfc_geo->payload_size,
953
- DMA_FROM_DEVICE);
954
- if (!dma_mapping_error(this->dev, dest_phys)) {
955
- payload_phys = dest_phys;
956
- direct = true;
957
- }
958
- }
959
-
960
- /* go! */
961
- ret = gpmi_read_page(this, payload_phys, this->auxiliary_phys);
962
-
963
- if (direct)
964
- dma_unmap_single(this->dev, payload_phys, nfc_geo->payload_size,
965
- DMA_FROM_DEVICE);
966
-
967
- if (ret) {
968
- dev_err(this->dev, "Error in ECC-based read: %d\n", ret);
969
- return ret;
970
- }
1211
+ unsigned int max_bitflips = 0;
9711212
9721213 /* Loop over status bytes, accumulating ECC status. */
973
- status = this->auxiliary_virt + nfc_geo->auxiliary_status_offset;
1214
+ status = this->auxiliary_virt + ALIGN(meta, 4);
9741215
975
- if (!direct)
976
- memcpy(buf, this->payload_virt, nfc_geo->payload_size);
977
-
978
- for (i = 0; i < nfc_geo->ecc_chunk_count; i++, status++) {
1216
+ for (i = first; i < last; i++, status++) {
9791217 if ((*status == STATUS_GOOD) || (*status == STATUS_ERASED))
9801218 continue;
9811219
....@@ -1055,6 +1293,50 @@
10551293 max_bitflips = max_t(unsigned int, max_bitflips, *status);
10561294 }
10571295
1296
+ return max_bitflips;
1297
+}
1298
+
1299
+static void gpmi_bch_layout_std(struct gpmi_nand_data *this)
1300
+{
1301
+ struct bch_geometry *geo = &this->bch_geometry;
1302
+ unsigned int ecc_strength = geo->ecc_strength >> 1;
1303
+ unsigned int gf_len = geo->gf_len;
1304
+ unsigned int block_size = geo->ecc_chunk_size;
1305
+
1306
+ this->bch_flashlayout0 =
1307
+ BF_BCH_FLASH0LAYOUT0_NBLOCKS(geo->ecc_chunk_count - 1) |
1308
+ BF_BCH_FLASH0LAYOUT0_META_SIZE(geo->metadata_size) |
1309
+ BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this) |
1310
+ BF_BCH_FLASH0LAYOUT0_GF(gf_len, this) |
1311
+ BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block_size, this);
1312
+
1313
+ this->bch_flashlayout1 =
1314
+ BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(geo->page_size) |
1315
+ BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this) |
1316
+ BF_BCH_FLASH0LAYOUT1_GF(gf_len, this) |
1317
+ BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(block_size, this);
1318
+}
1319
+
1320
+static int gpmi_ecc_read_page(struct nand_chip *chip, uint8_t *buf,
1321
+ int oob_required, int page)
1322
+{
1323
+ struct gpmi_nand_data *this = nand_get_controller_data(chip);
1324
+ struct mtd_info *mtd = nand_to_mtd(chip);
1325
+ struct bch_geometry *geo = &this->bch_geometry;
1326
+ unsigned int max_bitflips;
1327
+ int ret;
1328
+
1329
+ gpmi_bch_layout_std(this);
1330
+ this->bch = true;
1331
+
1332
+ ret = nand_read_page_op(chip, page, 0, buf, geo->page_size);
1333
+ if (ret)
1334
+ return ret;
1335
+
1336
+ max_bitflips = gpmi_count_bitflips(chip, buf, 0,
1337
+ geo->ecc_chunk_count,
1338
+ geo->auxiliary_status_offset);
1339
+
10581340 /* handle the block mark swapping */
10591341 block_mark_swapping(this, buf, this->auxiliary_virt);
10601342
....@@ -1076,30 +1358,20 @@
10761358 return max_bitflips;
10771359 }
10781360
1079
-static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
1080
- uint8_t *buf, int oob_required, int page)
1081
-{
1082
- nand_read_page_op(chip, page, 0, NULL, 0);
1083
-
1084
- return gpmi_ecc_read_page_data(chip, buf, oob_required, page);
1085
-}
1086
-
10871361 /* Fake a virtual small page for the subpage read */
1088
-static int gpmi_ecc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
1089
- uint32_t offs, uint32_t len, uint8_t *buf, int page)
1362
+static int gpmi_ecc_read_subpage(struct nand_chip *chip, uint32_t offs,
1363
+ uint32_t len, uint8_t *buf, int page)
10901364 {
10911365 struct gpmi_nand_data *this = nand_get_controller_data(chip);
1092
- void __iomem *bch_regs = this->resources.bch_regs;
1093
- struct bch_geometry old_geo = this->bch_geometry;
10941366 struct bch_geometry *geo = &this->bch_geometry;
10951367 int size = chip->ecc.size; /* ECC chunk size */
10961368 int meta, n, page_size;
1097
- u32 r1_old, r2_old, r1_new, r2_new;
10981369 unsigned int max_bitflips;
1370
+ unsigned int ecc_strength;
10991371 int first, last, marker_pos;
11001372 int ecc_parity_size;
11011373 int col = 0;
1102
- int old_swap_block_mark = this->swap_block_mark;
1374
+ int ret;
11031375
11041376 /* The size of ECC parity */
11051377 ecc_parity_size = geo->gf_len * geo->ecc_strength / 8;
....@@ -1121,7 +1393,7 @@
11211393 dev_dbg(this->dev,
11221394 "page:%d, first:%d, last:%d, marker at:%d\n",
11231395 page, first, last, marker_pos);
1124
- return gpmi_ecc_read_page(mtd, chip, buf, 0, page);
1396
+ return gpmi_ecc_read_page(chip, buf, 0, page);
11251397 }
11261398 }
11271399
....@@ -1132,127 +1404,66 @@
11321404 buf = buf + first * size;
11331405 }
11341406
1135
- nand_read_page_op(chip, page, col, NULL, 0);
1407
+ ecc_parity_size = geo->gf_len * geo->ecc_strength / 8;
11361408
1137
- /* Save the old environment */
1138
- r1_old = r1_new = readl(bch_regs + HW_BCH_FLASH0LAYOUT0);
1139
- r2_old = r2_new = readl(bch_regs + HW_BCH_FLASH0LAYOUT1);
1140
-
1141
- /* change the BCH registers and bch_geometry{} */
11421409 n = last - first + 1;
11431410 page_size = meta + (size + ecc_parity_size) * n;
1411
+ ecc_strength = geo->ecc_strength >> 1;
11441412
1145
- r1_new &= ~(BM_BCH_FLASH0LAYOUT0_NBLOCKS |
1146
- BM_BCH_FLASH0LAYOUT0_META_SIZE);
1147
- r1_new |= BF_BCH_FLASH0LAYOUT0_NBLOCKS(n - 1)
1148
- | BF_BCH_FLASH0LAYOUT0_META_SIZE(meta);
1149
- writel(r1_new, bch_regs + HW_BCH_FLASH0LAYOUT0);
1413
+ this->bch_flashlayout0 = BF_BCH_FLASH0LAYOUT0_NBLOCKS(n - 1) |
1414
+ BF_BCH_FLASH0LAYOUT0_META_SIZE(meta) |
1415
+ BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this) |
1416
+ BF_BCH_FLASH0LAYOUT0_GF(geo->gf_len, this) |
1417
+ BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(geo->ecc_chunk_size, this);
11501418
1151
- r2_new &= ~BM_BCH_FLASH0LAYOUT1_PAGE_SIZE;
1152
- r2_new |= BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size);
1153
- writel(r2_new, bch_regs + HW_BCH_FLASH0LAYOUT1);
1419
+ this->bch_flashlayout1 = BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size) |
1420
+ BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this) |
1421
+ BF_BCH_FLASH0LAYOUT1_GF(geo->gf_len, this) |
1422
+ BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(geo->ecc_chunk_size, this);
11541423
1155
- geo->ecc_chunk_count = n;
1156
- geo->payload_size = n * size;
1157
- geo->page_size = page_size;
1158
- geo->auxiliary_status_offset = ALIGN(meta, 4);
1424
+ this->bch = true;
1425
+
1426
+ ret = nand_read_page_op(chip, page, col, buf, page_size);
1427
+ if (ret)
1428
+ return ret;
11591429
11601430 dev_dbg(this->dev, "page:%d(%d:%d)%d, chunk:(%d:%d), BCH PG size:%d\n",
11611431 page, offs, len, col, first, n, page_size);
11621432
1163
- /* Read the subpage now */
1164
- this->swap_block_mark = false;
1165
- max_bitflips = gpmi_ecc_read_page_data(chip, buf, 0, page);
1166
-
1167
- /* Restore */
1168
- writel(r1_old, bch_regs + HW_BCH_FLASH0LAYOUT0);
1169
- writel(r2_old, bch_regs + HW_BCH_FLASH0LAYOUT1);
1170
- this->bch_geometry = old_geo;
1171
- this->swap_block_mark = old_swap_block_mark;
1433
+ max_bitflips = gpmi_count_bitflips(chip, buf, first, last, meta);
11721434
11731435 return max_bitflips;
11741436 }
11751437
1176
-static int gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
1177
- const uint8_t *buf, int oob_required, int page)
1438
+static int gpmi_ecc_write_page(struct nand_chip *chip, const uint8_t *buf,
1439
+ int oob_required, int page)
11781440 {
1441
+ struct mtd_info *mtd = nand_to_mtd(chip);
11791442 struct gpmi_nand_data *this = nand_get_controller_data(chip);
11801443 struct bch_geometry *nfc_geo = &this->bch_geometry;
1181
- const void *payload_virt;
1182
- dma_addr_t payload_phys;
1183
- const void *auxiliary_virt;
1184
- dma_addr_t auxiliary_phys;
1185
- int ret;
1444
+ int ret;
11861445
11871446 dev_dbg(this->dev, "ecc write page.\n");
11881447
1189
- nand_prog_page_begin_op(chip, page, 0, NULL, 0);
1448
+ gpmi_bch_layout_std(this);
1449
+ this->bch = true;
1450
+
1451
+ memcpy(this->auxiliary_virt, chip->oob_poi, nfc_geo->auxiliary_size);
11901452
11911453 if (this->swap_block_mark) {
11921454 /*
1193
- * If control arrives here, we're doing block mark swapping.
1194
- * Since we can't modify the caller's buffers, we must copy them
1195
- * into our own.
1455
+ * When doing bad block marker swapping we must always copy the
1456
+ * input buffer as we can't modify the const buffer.
11961457 */
1197
- memcpy(this->payload_virt, buf, mtd->writesize);
1198
- payload_virt = this->payload_virt;
1199
- payload_phys = this->payload_phys;
1200
-
1201
- memcpy(this->auxiliary_virt, chip->oob_poi,
1202
- nfc_geo->auxiliary_size);
1203
- auxiliary_virt = this->auxiliary_virt;
1204
- auxiliary_phys = this->auxiliary_phys;
1205
-
1206
- /* Handle block mark swapping. */
1207
- block_mark_swapping(this,
1208
- (void *)payload_virt, (void *)auxiliary_virt);
1209
- } else {
1210
- /*
1211
- * If control arrives here, we're not doing block mark swapping,
1212
- * so we can to try and use the caller's buffers.
1213
- */
1214
- ret = send_page_prepare(this,
1215
- buf, mtd->writesize,
1216
- this->payload_virt, this->payload_phys,
1217
- nfc_geo->payload_size,
1218
- &payload_virt, &payload_phys);
1219
- if (ret) {
1220
- dev_err(this->dev, "Inadequate payload DMA buffer\n");
1221
- return 0;
1222
- }
1223
-
1224
- ret = send_page_prepare(this,
1225
- chip->oob_poi, mtd->oobsize,
1226
- this->auxiliary_virt, this->auxiliary_phys,
1227
- nfc_geo->auxiliary_size,
1228
- &auxiliary_virt, &auxiliary_phys);
1229
- if (ret) {
1230
- dev_err(this->dev, "Inadequate auxiliary DMA buffer\n");
1231
- goto exit_auxiliary;
1232
- }
1458
+ memcpy(this->data_buffer_dma, buf, mtd->writesize);
1459
+ buf = this->data_buffer_dma;
1460
+ block_mark_swapping(this, this->data_buffer_dma,
1461
+ this->auxiliary_virt);
12331462 }
12341463
1235
- /* Ask the NFC. */
1236
- ret = gpmi_send_page(this, payload_phys, auxiliary_phys);
1237
- if (ret)
1238
- dev_err(this->dev, "Error in ECC-based write: %d\n", ret);
1464
+ ret = nand_prog_page_op(chip, page, 0, buf, nfc_geo->page_size);
12391465
1240
- if (!this->swap_block_mark) {
1241
- send_page_end(this, chip->oob_poi, mtd->oobsize,
1242
- this->auxiliary_virt, this->auxiliary_phys,
1243
- nfc_geo->auxiliary_size,
1244
- auxiliary_virt, auxiliary_phys);
1245
-exit_auxiliary:
1246
- send_page_end(this, buf, mtd->writesize,
1247
- this->payload_virt, this->payload_phys,
1248
- nfc_geo->payload_size,
1249
- payload_virt, payload_phys);
1250
- }
1251
-
1252
- if (ret)
1253
- return ret;
1254
-
1255
- return nand_prog_page_end_op(chip);
1466
+ return ret;
12561467 }
12571468
12581469 /*
....@@ -1315,18 +1526,20 @@
13151526 * ECC-based or raw view of the page is implicit in which function it calls
13161527 * (there is a similar pair of ECC-based/raw functions for writing).
13171528 */
1318
-static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
1319
- int page)
1529
+static int gpmi_ecc_read_oob(struct nand_chip *chip, int page)
13201530 {
1531
+ struct mtd_info *mtd = nand_to_mtd(chip);
13211532 struct gpmi_nand_data *this = nand_get_controller_data(chip);
1533
+ int ret;
13221534
1323
- dev_dbg(this->dev, "page number is %d\n", page);
13241535 /* clear the OOB buffer */
13251536 memset(chip->oob_poi, ~0, mtd->oobsize);
13261537
13271538 /* Read out the conventional OOB. */
1328
- nand_read_page_op(chip, page, mtd->writesize, NULL, 0);
1329
- chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1539
+ ret = nand_read_page_op(chip, page, mtd->writesize, chip->oob_poi,
1540
+ mtd->oobsize);
1541
+ if (ret)
1542
+ return ret;
13301543
13311544 /*
13321545 * Now, we want to make sure the block mark is correct. In the
....@@ -1335,16 +1548,17 @@
13351548 */
13361549 if (GPMI_IS_MX23(this)) {
13371550 /* Read the block mark into the first byte of the OOB buffer. */
1338
- nand_read_page_op(chip, page, 0, NULL, 0);
1339
- chip->oob_poi[0] = chip->read_byte(mtd);
1551
+ ret = nand_read_page_op(chip, page, 0, chip->oob_poi, 1);
1552
+ if (ret)
1553
+ return ret;
13401554 }
13411555
13421556 return 0;
13431557 }
13441558
1345
-static int
1346
-gpmi_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *chip, int page)
1559
+static int gpmi_ecc_write_oob(struct nand_chip *chip, int page)
13471560 {
1561
+ struct mtd_info *mtd = nand_to_mtd(chip);
13481562 struct mtd_oob_region of = { };
13491563
13501564 /* Do we have available oob area? */
....@@ -1366,15 +1580,15 @@
13661580 * inline (interleaved with payload DATA), and do not align data chunk on
13671581 * byte boundaries.
13681582 * We thus need to take care moving the payload data and ECC bits stored in the
1369
- * page into the provided buffers, which is why we're using gpmi_copy_bits.
1583
+ * page into the provided buffers, which is why we're using nand_extract_bits().
13701584 *
13711585 * See set_geometry_by_ecc_info inline comments to have a full description
13721586 * of the layout used by the GPMI controller.
13731587 */
1374
-static int gpmi_ecc_read_page_raw(struct mtd_info *mtd,
1375
- struct nand_chip *chip, uint8_t *buf,
1588
+static int gpmi_ecc_read_page_raw(struct nand_chip *chip, uint8_t *buf,
13761589 int oob_required, int page)
13771590 {
1591
+ struct mtd_info *mtd = nand_to_mtd(chip);
13781592 struct gpmi_nand_data *this = nand_get_controller_data(chip);
13791593 struct bch_geometry *nfc_geo = &this->bch_geometry;
13801594 int eccsize = nfc_geo->ecc_chunk_size;
....@@ -1385,9 +1599,12 @@
13851599 size_t oob_byte_off;
13861600 uint8_t *oob = chip->oob_poi;
13871601 int step;
1602
+ int ret;
13881603
1389
- nand_read_page_op(chip, page, 0, tmp_buf,
1390
- mtd->writesize + mtd->oobsize);
1604
+ ret = nand_read_page_op(chip, page, 0, tmp_buf,
1605
+ mtd->writesize + mtd->oobsize);
1606
+ if (ret)
1607
+ return ret;
13911608
13921609 /*
13931610 * If required, swap the bad block marker and the data stored in the
....@@ -1412,9 +1629,8 @@
14121629 /* Extract interleaved payload data and ECC bits */
14131630 for (step = 0; step < nfc_geo->ecc_chunk_count; step++) {
14141631 if (buf)
1415
- gpmi_copy_bits(buf, step * eccsize * 8,
1416
- tmp_buf, src_bit_off,
1417
- eccsize * 8);
1632
+ nand_extract_bits(buf, step * eccsize * 8, tmp_buf,
1633
+ src_bit_off, eccsize * 8);
14181634 src_bit_off += eccsize * 8;
14191635
14201636 /* Align last ECC block to align a byte boundary */
....@@ -1423,9 +1639,8 @@
14231639 eccbits += 8 - ((oob_bit_off + eccbits) % 8);
14241640
14251641 if (oob_required)
1426
- gpmi_copy_bits(oob, oob_bit_off,
1427
- tmp_buf, src_bit_off,
1428
- eccbits);
1642
+ nand_extract_bits(oob, oob_bit_off, tmp_buf,
1643
+ src_bit_off, eccbits);
14291644
14301645 src_bit_off += eccbits;
14311646 oob_bit_off += eccbits;
....@@ -1450,16 +1665,15 @@
14501665 * inline (interleaved with payload DATA), and do not align data chunk on
14511666 * byte boundaries.
14521667 * We thus need to take care moving the OOB area at the right place in the
1453
- * final page, which is why we're using gpmi_copy_bits.
1668
+ * final page, which is why we're using nand_extract_bits().
14541669 *
14551670 * See set_geometry_by_ecc_info inline comments to have a full description
14561671 * of the layout used by the GPMI controller.
14571672 */
1458
-static int gpmi_ecc_write_page_raw(struct mtd_info *mtd,
1459
- struct nand_chip *chip,
1460
- const uint8_t *buf,
1673
+static int gpmi_ecc_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
14611674 int oob_required, int page)
14621675 {
1676
+ struct mtd_info *mtd = nand_to_mtd(chip);
14631677 struct gpmi_nand_data *this = nand_get_controller_data(chip);
14641678 struct bch_geometry *nfc_geo = &this->bch_geometry;
14651679 int eccsize = nfc_geo->ecc_chunk_size;
....@@ -1490,8 +1704,8 @@
14901704 /* Interleave payload data and ECC bits */
14911705 for (step = 0; step < nfc_geo->ecc_chunk_count; step++) {
14921706 if (buf)
1493
- gpmi_copy_bits(tmp_buf, dst_bit_off,
1494
- buf, step * eccsize * 8, eccsize * 8);
1707
+ nand_extract_bits(tmp_buf, dst_bit_off, buf,
1708
+ step * eccsize * 8, eccsize * 8);
14951709 dst_bit_off += eccsize * 8;
14961710
14971711 /* Align last ECC block to align a byte boundary */
....@@ -1500,8 +1714,8 @@
15001714 eccbits += 8 - ((oob_bit_off + eccbits) % 8);
15011715
15021716 if (oob_required)
1503
- gpmi_copy_bits(tmp_buf, dst_bit_off,
1504
- oob, oob_bit_off, eccbits);
1717
+ nand_extract_bits(tmp_buf, dst_bit_off, oob,
1718
+ oob_bit_off, eccbits);
15051719
15061720 dst_bit_off += eccbits;
15071721 oob_bit_off += eccbits;
....@@ -1527,28 +1741,26 @@
15271741 mtd->writesize + mtd->oobsize);
15281742 }
15291743
1530
-static int gpmi_ecc_read_oob_raw(struct mtd_info *mtd, struct nand_chip *chip,
1531
- int page)
1744
+static int gpmi_ecc_read_oob_raw(struct nand_chip *chip, int page)
15321745 {
1533
- return gpmi_ecc_read_page_raw(mtd, chip, NULL, 1, page);
1746
+ return gpmi_ecc_read_page_raw(chip, NULL, 1, page);
15341747 }
15351748
1536
-static int gpmi_ecc_write_oob_raw(struct mtd_info *mtd, struct nand_chip *chip,
1537
- int page)
1749
+static int gpmi_ecc_write_oob_raw(struct nand_chip *chip, int page)
15381750 {
1539
- return gpmi_ecc_write_page_raw(mtd, chip, NULL, 1, page);
1751
+ return gpmi_ecc_write_page_raw(chip, NULL, 1, page);
15401752 }
15411753
1542
-static int gpmi_block_markbad(struct mtd_info *mtd, loff_t ofs)
1754
+static int gpmi_block_markbad(struct nand_chip *chip, loff_t ofs)
15431755 {
1544
- struct nand_chip *chip = mtd_to_nand(mtd);
1756
+ struct mtd_info *mtd = nand_to_mtd(chip);
15451757 struct gpmi_nand_data *this = nand_get_controller_data(chip);
15461758 int ret = 0;
15471759 uint8_t *block_mark;
15481760 int column, page, chipnr;
15491761
15501762 chipnr = (int)(ofs >> chip->chip_shift);
1551
- chip->select_chip(mtd, chipnr);
1763
+ nand_select_target(chip, chipnr);
15521764
15531765 column = !GPMI_IS_MX23(this) ? mtd->writesize : 0;
15541766
....@@ -1561,7 +1773,7 @@
15611773
15621774 ret = nand_prog_page_op(chip, page, column, block_mark, 1);
15631775
1564
- chip->select_chip(mtd, -1);
1776
+ nand_deselect_target(chip);
15651777
15661778 return ret;
15671779 }
....@@ -1598,19 +1810,17 @@
15981810 struct boot_rom_geometry *rom_geo = &this->rom_geometry;
15991811 struct device *dev = this->dev;
16001812 struct nand_chip *chip = &this->nand;
1601
- struct mtd_info *mtd = nand_to_mtd(chip);
16021813 unsigned int search_area_size_in_strides;
16031814 unsigned int stride;
16041815 unsigned int page;
1605
- uint8_t *buffer = chip->data_buf;
1606
- int saved_chip_number;
1816
+ u8 *buffer = nand_get_data_buf(chip);
16071817 int found_an_ncb_fingerprint = false;
1818
+ int ret;
16081819
16091820 /* Compute the number of strides in a search area. */
16101821 search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent;
16111822
1612
- saved_chip_number = this->current_chip;
1613
- chip->select_chip(mtd, 0);
1823
+ nand_select_target(chip, 0);
16141824
16151825 /*
16161826 * Loop through the first search area, looking for the NCB fingerprint.
....@@ -1627,8 +1837,10 @@
16271837 * Read the NCB fingerprint. The fingerprint is four bytes long
16281838 * and starts in the 12th byte of the page.
16291839 */
1630
- nand_read_page_op(chip, page, 12, NULL, 0);
1631
- chip->read_buf(mtd, buffer, strlen(fingerprint));
1840
+ ret = nand_read_page_op(chip, page, 12, buffer,
1841
+ strlen(fingerprint));
1842
+ if (ret)
1843
+ continue;
16321844
16331845 /* Look for the fingerprint. */
16341846 if (!memcmp(buffer, fingerprint, strlen(fingerprint))) {
....@@ -1638,7 +1850,7 @@
16381850
16391851 }
16401852
1641
- chip->select_chip(mtd, saved_chip_number);
1853
+ nand_deselect_target(chip);
16421854
16431855 if (found_an_ncb_fingerprint)
16441856 dev_dbg(dev, "\tFound a fingerprint\n");
....@@ -1661,8 +1873,7 @@
16611873 unsigned int block;
16621874 unsigned int stride;
16631875 unsigned int page;
1664
- uint8_t *buffer = chip->data_buf;
1665
- int saved_chip_number;
1876
+ u8 *buffer = nand_get_data_buf(chip);
16661877 int status;
16671878
16681879 /* Compute the search area geometry. */
....@@ -1679,9 +1890,7 @@
16791890 dev_dbg(dev, "\tin Strides: %u\n", search_area_size_in_strides);
16801891 dev_dbg(dev, "\tin Pages : %u\n", search_area_size_in_pages);
16811892
1682
- /* Select chip 0. */
1683
- saved_chip_number = this->current_chip;
1684
- chip->select_chip(mtd, 0);
1893
+ nand_select_target(chip, 0);
16851894
16861895 /* Loop over blocks in the first search area, erasing them. */
16871896 dev_dbg(dev, "Erasing the search area...\n");
....@@ -1707,13 +1916,13 @@
17071916 /* Write the first page of the current stride. */
17081917 dev_dbg(dev, "Writing an NCB fingerprint in page 0x%x\n", page);
17091918
1710
- status = chip->ecc.write_page_raw(mtd, chip, buffer, 0, page);
1919
+ status = chip->ecc.write_page_raw(chip, buffer, 0, page);
17111920 if (status)
17121921 dev_err(dev, "[%s] Write failed.\n", __func__);
17131922 }
17141923
1715
- /* Deselect chip 0. */
1716
- chip->select_chip(mtd, saved_chip_number);
1924
+ nand_deselect_target(chip);
1925
+
17171926 return 0;
17181927 }
17191928
....@@ -1746,7 +1955,7 @@
17461955 dev_dbg(dev, "Transcribing bad block marks...\n");
17471956
17481957 /* Compute the number of blocks in the entire medium. */
1749
- block_count = chip->chipsize >> chip->phys_erase_shift;
1958
+ block_count = nanddev_eraseblocks_per_target(&chip->base);
17501959
17511960 /*
17521961 * Loop over all the blocks in the medium, transcribing block marks as
....@@ -1762,10 +1971,13 @@
17621971 byte = block << chip->phys_erase_shift;
17631972
17641973 /* Send the command to read the conventional block mark. */
1765
- chip->select_chip(mtd, chipnr);
1766
- nand_read_page_op(chip, page, mtd->writesize, NULL, 0);
1767
- block_mark = chip->read_byte(mtd);
1768
- chip->select_chip(mtd, -1);
1974
+ nand_select_target(chip, chipnr);
1975
+ ret = nand_read_page_op(chip, page, mtd->writesize, &block_mark,
1976
+ 1);
1977
+ nand_deselect_target(chip);
1978
+
1979
+ if (ret)
1980
+ continue;
17691981
17701982 /*
17711983 * Check if the block is marked bad. If so, we need to mark it
....@@ -1774,7 +1986,7 @@
17741986 */
17751987 if (block_mark != 0xff) {
17761988 dev_dbg(dev, "Transcribing mark in block %u\n", block);
1777
- ret = chip->block_markbad(mtd, byte);
1989
+ ret = chip->legacy.block_markbad(chip, byte);
17781990 if (ret)
17791991 dev_err(dev,
17801992 "Failed to mark block bad with ret %d\n",
....@@ -1837,7 +2049,7 @@
18372049 ecc->write_page_raw = gpmi_ecc_write_page_raw;
18382050 ecc->read_oob_raw = gpmi_ecc_read_oob_raw;
18392051 ecc->write_oob_raw = gpmi_ecc_write_oob_raw;
1840
- ecc->mode = NAND_ECC_HW;
2052
+ ecc->engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
18412053 ecc->size = bch_geo->ecc_chunk_size;
18422054 ecc->strength = bch_geo->ecc_strength;
18432055 mtd_set_ooblayout(mtd, &gpmi_ooblayout_ops);
....@@ -1880,8 +2092,350 @@
18802092 return 0;
18812093 }
18822094
2095
+static struct gpmi_transfer *get_next_transfer(struct gpmi_nand_data *this)
2096
+{
2097
+ struct gpmi_transfer *transfer = &this->transfers[this->ntransfers];
2098
+
2099
+ this->ntransfers++;
2100
+
2101
+ if (this->ntransfers == GPMI_MAX_TRANSFERS)
2102
+ return NULL;
2103
+
2104
+ return transfer;
2105
+}
2106
+
2107
+static struct dma_async_tx_descriptor *gpmi_chain_command(
2108
+ struct gpmi_nand_data *this, u8 cmd, const u8 *addr, int naddr)
2109
+{
2110
+ struct dma_chan *channel = get_dma_chan(this);
2111
+ struct dma_async_tx_descriptor *desc;
2112
+ struct gpmi_transfer *transfer;
2113
+ int chip = this->nand.cur_cs;
2114
+ u32 pio[3];
2115
+
2116
+ /* [1] send out the PIO words */
2117
+ pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE)
2118
+ | BM_GPMI_CTRL0_WORD_LENGTH
2119
+ | BF_GPMI_CTRL0_CS(chip, this)
2120
+ | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
2121
+ | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_CLE)
2122
+ | BM_GPMI_CTRL0_ADDRESS_INCREMENT
2123
+ | BF_GPMI_CTRL0_XFER_COUNT(naddr + 1);
2124
+ pio[1] = 0;
2125
+ pio[2] = 0;
2126
+ desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio),
2127
+ DMA_TRANS_NONE, 0);
2128
+ if (!desc)
2129
+ return NULL;
2130
+
2131
+ transfer = get_next_transfer(this);
2132
+ if (!transfer)
2133
+ return NULL;
2134
+
2135
+ transfer->cmdbuf[0] = cmd;
2136
+ if (naddr)
2137
+ memcpy(&transfer->cmdbuf[1], addr, naddr);
2138
+
2139
+ sg_init_one(&transfer->sgl, transfer->cmdbuf, naddr + 1);
2140
+ dma_map_sg(this->dev, &transfer->sgl, 1, DMA_TO_DEVICE);
2141
+
2142
+ transfer->direction = DMA_TO_DEVICE;
2143
+
2144
+ desc = dmaengine_prep_slave_sg(channel, &transfer->sgl, 1, DMA_MEM_TO_DEV,
2145
+ MXS_DMA_CTRL_WAIT4END);
2146
+ return desc;
2147
+}
2148
+
2149
+static struct dma_async_tx_descriptor *gpmi_chain_wait_ready(
2150
+ struct gpmi_nand_data *this)
2151
+{
2152
+ struct dma_chan *channel = get_dma_chan(this);
2153
+ u32 pio[2];
2154
+
2155
+ pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY)
2156
+ | BM_GPMI_CTRL0_WORD_LENGTH
2157
+ | BF_GPMI_CTRL0_CS(this->nand.cur_cs, this)
2158
+ | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
2159
+ | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
2160
+ | BF_GPMI_CTRL0_XFER_COUNT(0);
2161
+ pio[1] = 0;
2162
+
2163
+ return mxs_dmaengine_prep_pio(channel, pio, 2, DMA_TRANS_NONE,
2164
+ MXS_DMA_CTRL_WAIT4END | MXS_DMA_CTRL_WAIT4RDY);
2165
+}
2166
+
2167
+static struct dma_async_tx_descriptor *gpmi_chain_data_read(
2168
+ struct gpmi_nand_data *this, void *buf, int raw_len, bool *direct)
2169
+{
2170
+ struct dma_async_tx_descriptor *desc;
2171
+ struct dma_chan *channel = get_dma_chan(this);
2172
+ struct gpmi_transfer *transfer;
2173
+ u32 pio[6] = {};
2174
+
2175
+ transfer = get_next_transfer(this);
2176
+ if (!transfer)
2177
+ return NULL;
2178
+
2179
+ transfer->direction = DMA_FROM_DEVICE;
2180
+
2181
+ *direct = prepare_data_dma(this, buf, raw_len, &transfer->sgl,
2182
+ DMA_FROM_DEVICE);
2183
+
2184
+ pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__READ)
2185
+ | BM_GPMI_CTRL0_WORD_LENGTH
2186
+ | BF_GPMI_CTRL0_CS(this->nand.cur_cs, this)
2187
+ | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
2188
+ | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
2189
+ | BF_GPMI_CTRL0_XFER_COUNT(raw_len);
2190
+
2191
+ if (this->bch) {
2192
+ pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC
2193
+ | BF_GPMI_ECCCTRL_ECC_CMD(BV_GPMI_ECCCTRL_ECC_CMD__BCH_DECODE)
2194
+ | BF_GPMI_ECCCTRL_BUFFER_MASK(BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE
2195
+ | BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY);
2196
+ pio[3] = raw_len;
2197
+ pio[4] = transfer->sgl.dma_address;
2198
+ pio[5] = this->auxiliary_phys;
2199
+ }
2200
+
2201
+ desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio),
2202
+ DMA_TRANS_NONE, 0);
2203
+ if (!desc)
2204
+ return NULL;
2205
+
2206
+ if (!this->bch)
2207
+ desc = dmaengine_prep_slave_sg(channel, &transfer->sgl, 1,
2208
+ DMA_DEV_TO_MEM,
2209
+ MXS_DMA_CTRL_WAIT4END);
2210
+
2211
+ return desc;
2212
+}
2213
+
2214
+static struct dma_async_tx_descriptor *gpmi_chain_data_write(
2215
+ struct gpmi_nand_data *this, const void *buf, int raw_len)
2216
+{
2217
+ struct dma_chan *channel = get_dma_chan(this);
2218
+ struct dma_async_tx_descriptor *desc;
2219
+ struct gpmi_transfer *transfer;
2220
+ u32 pio[6] = {};
2221
+
2222
+ transfer = get_next_transfer(this);
2223
+ if (!transfer)
2224
+ return NULL;
2225
+
2226
+ transfer->direction = DMA_TO_DEVICE;
2227
+
2228
+ prepare_data_dma(this, buf, raw_len, &transfer->sgl, DMA_TO_DEVICE);
2229
+
2230
+ pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE)
2231
+ | BM_GPMI_CTRL0_WORD_LENGTH
2232
+ | BF_GPMI_CTRL0_CS(this->nand.cur_cs, this)
2233
+ | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
2234
+ | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
2235
+ | BF_GPMI_CTRL0_XFER_COUNT(raw_len);
2236
+
2237
+ if (this->bch) {
2238
+ pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC
2239
+ | BF_GPMI_ECCCTRL_ECC_CMD(BV_GPMI_ECCCTRL_ECC_CMD__BCH_ENCODE)
2240
+ | BF_GPMI_ECCCTRL_BUFFER_MASK(BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE |
2241
+ BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY);
2242
+ pio[3] = raw_len;
2243
+ pio[4] = transfer->sgl.dma_address;
2244
+ pio[5] = this->auxiliary_phys;
2245
+ }
2246
+
2247
+ desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio),
2248
+ DMA_TRANS_NONE,
2249
+ (this->bch ? MXS_DMA_CTRL_WAIT4END : 0));
2250
+ if (!desc)
2251
+ return NULL;
2252
+
2253
+ if (!this->bch)
2254
+ desc = dmaengine_prep_slave_sg(channel, &transfer->sgl, 1,
2255
+ DMA_MEM_TO_DEV,
2256
+ MXS_DMA_CTRL_WAIT4END);
2257
+
2258
+ return desc;
2259
+}
2260
+
2261
+static int gpmi_nfc_exec_op(struct nand_chip *chip,
2262
+ const struct nand_operation *op,
2263
+ bool check_only)
2264
+{
2265
+ const struct nand_op_instr *instr;
2266
+ struct gpmi_nand_data *this = nand_get_controller_data(chip);
2267
+ struct dma_async_tx_descriptor *desc = NULL;
2268
+ int i, ret, buf_len = 0, nbufs = 0;
2269
+ u8 cmd = 0;
2270
+ void *buf_read = NULL;
2271
+ const void *buf_write = NULL;
2272
+ bool direct = false;
2273
+ struct completion *dma_completion, *bch_completion;
2274
+ unsigned long to;
2275
+
2276
+ if (check_only)
2277
+ return 0;
2278
+
2279
+ this->ntransfers = 0;
2280
+ for (i = 0; i < GPMI_MAX_TRANSFERS; i++)
2281
+ this->transfers[i].direction = DMA_NONE;
2282
+
2283
+ ret = pm_runtime_get_sync(this->dev);
2284
+ if (ret < 0) {
2285
+ pm_runtime_put_noidle(this->dev);
2286
+ return ret;
2287
+ }
2288
+
2289
+ /*
2290
+ * This driver currently supports only one NAND chip. Plus, dies share
2291
+ * the same configuration. So once timings have been applied on the
2292
+ * controller side, they will not change anymore. When the time will
2293
+ * come, the check on must_apply_timings will have to be dropped.
2294
+ */
2295
+ if (this->hw.must_apply_timings) {
2296
+ this->hw.must_apply_timings = false;
2297
+ ret = gpmi_nfc_apply_timings(this);
2298
+ if (ret)
2299
+ goto out_pm;
2300
+ }
2301
+
2302
+ dev_dbg(this->dev, "%s: %d instructions\n", __func__, op->ninstrs);
2303
+
2304
+ for (i = 0; i < op->ninstrs; i++) {
2305
+ instr = &op->instrs[i];
2306
+
2307
+ nand_op_trace(" ", instr);
2308
+
2309
+ switch (instr->type) {
2310
+ case NAND_OP_WAITRDY_INSTR:
2311
+ desc = gpmi_chain_wait_ready(this);
2312
+ break;
2313
+ case NAND_OP_CMD_INSTR:
2314
+ cmd = instr->ctx.cmd.opcode;
2315
+
2316
+ /*
2317
+ * When this command has an address cycle chain it
2318
+ * together with the address cycle
2319
+ */
2320
+ if (i + 1 != op->ninstrs &&
2321
+ op->instrs[i + 1].type == NAND_OP_ADDR_INSTR)
2322
+ continue;
2323
+
2324
+ desc = gpmi_chain_command(this, cmd, NULL, 0);
2325
+
2326
+ break;
2327
+ case NAND_OP_ADDR_INSTR:
2328
+ desc = gpmi_chain_command(this, cmd, instr->ctx.addr.addrs,
2329
+ instr->ctx.addr.naddrs);
2330
+ break;
2331
+ case NAND_OP_DATA_OUT_INSTR:
2332
+ buf_write = instr->ctx.data.buf.out;
2333
+ buf_len = instr->ctx.data.len;
2334
+ nbufs++;
2335
+
2336
+ desc = gpmi_chain_data_write(this, buf_write, buf_len);
2337
+
2338
+ break;
2339
+ case NAND_OP_DATA_IN_INSTR:
2340
+ if (!instr->ctx.data.len)
2341
+ break;
2342
+ buf_read = instr->ctx.data.buf.in;
2343
+ buf_len = instr->ctx.data.len;
2344
+ nbufs++;
2345
+
2346
+ desc = gpmi_chain_data_read(this, buf_read, buf_len,
2347
+ &direct);
2348
+ break;
2349
+ }
2350
+
2351
+ if (!desc) {
2352
+ ret = -ENXIO;
2353
+ goto unmap;
2354
+ }
2355
+ }
2356
+
2357
+ dev_dbg(this->dev, "%s setup done\n", __func__);
2358
+
2359
+ if (nbufs > 1) {
2360
+ dev_err(this->dev, "Multiple data instructions not supported\n");
2361
+ ret = -EINVAL;
2362
+ goto unmap;
2363
+ }
2364
+
2365
+ if (this->bch) {
2366
+ writel(this->bch_flashlayout0,
2367
+ this->resources.bch_regs + HW_BCH_FLASH0LAYOUT0);
2368
+ writel(this->bch_flashlayout1,
2369
+ this->resources.bch_regs + HW_BCH_FLASH0LAYOUT1);
2370
+ }
2371
+
2372
+ desc->callback = dma_irq_callback;
2373
+ desc->callback_param = this;
2374
+ dma_completion = &this->dma_done;
2375
+ bch_completion = NULL;
2376
+
2377
+ init_completion(dma_completion);
2378
+
2379
+ if (this->bch && buf_read) {
2380
+ writel(BM_BCH_CTRL_COMPLETE_IRQ_EN,
2381
+ this->resources.bch_regs + HW_BCH_CTRL_SET);
2382
+ bch_completion = &this->bch_done;
2383
+ init_completion(bch_completion);
2384
+ }
2385
+
2386
+ dmaengine_submit(desc);
2387
+ dma_async_issue_pending(get_dma_chan(this));
2388
+
2389
+ to = wait_for_completion_timeout(dma_completion, msecs_to_jiffies(1000));
2390
+ if (!to) {
2391
+ dev_err(this->dev, "DMA timeout, last DMA\n");
2392
+ gpmi_dump_info(this);
2393
+ ret = -ETIMEDOUT;
2394
+ goto unmap;
2395
+ }
2396
+
2397
+ if (this->bch && buf_read) {
2398
+ to = wait_for_completion_timeout(bch_completion, msecs_to_jiffies(1000));
2399
+ if (!to) {
2400
+ dev_err(this->dev, "BCH timeout, last DMA\n");
2401
+ gpmi_dump_info(this);
2402
+ ret = -ETIMEDOUT;
2403
+ goto unmap;
2404
+ }
2405
+ }
2406
+
2407
+ writel(BM_BCH_CTRL_COMPLETE_IRQ_EN,
2408
+ this->resources.bch_regs + HW_BCH_CTRL_CLR);
2409
+ gpmi_clear_bch(this);
2410
+
2411
+ ret = 0;
2412
+
2413
+unmap:
2414
+ for (i = 0; i < this->ntransfers; i++) {
2415
+ struct gpmi_transfer *transfer = &this->transfers[i];
2416
+
2417
+ if (transfer->direction != DMA_NONE)
2418
+ dma_unmap_sg(this->dev, &transfer->sgl, 1,
2419
+ transfer->direction);
2420
+ }
2421
+
2422
+ if (!ret && buf_read && !direct)
2423
+ memcpy(buf_read, this->data_buffer_dma,
2424
+ gpmi_raw_len_to_len(this, buf_len));
2425
+
2426
+ this->bch = false;
2427
+
2428
+out_pm:
2429
+ pm_runtime_mark_last_busy(this->dev);
2430
+ pm_runtime_put_autosuspend(this->dev);
2431
+
2432
+ return ret;
2433
+}
2434
+
18832435 static const struct nand_controller_ops gpmi_nand_controller_ops = {
18842436 .attach_chip = gpmi_nand_attach_chip,
2437
+ .setup_interface = gpmi_setup_interface,
2438
+ .exec_op = gpmi_nfc_exec_op,
18852439 };
18862440
18872441 static int gpmi_nand_init(struct gpmi_nand_data *this)
....@@ -1890,9 +2444,6 @@
18902444 struct mtd_info *mtd = nand_to_mtd(chip);
18912445 int ret;
18922446
1893
- /* init current chip */
1894
- this->current_chip = -1;
1895
-
18962447 /* init the MTD data structures */
18972448 mtd->name = "gpmi-nand";
18982449 mtd->dev.parent = this->dev;
....@@ -1900,15 +2451,8 @@
19002451 /* init the nand_chip{}, we don't support a 16-bit NAND Flash bus. */
19012452 nand_set_controller_data(chip, this);
19022453 nand_set_flash_node(chip, this->pdev->dev.of_node);
1903
- chip->select_chip = gpmi_select_chip;
1904
- chip->setup_data_interface = gpmi_setup_data_interface;
1905
- chip->cmd_ctrl = gpmi_cmd_ctrl;
1906
- chip->dev_ready = gpmi_dev_ready;
1907
- chip->read_byte = gpmi_read_byte;
1908
- chip->read_buf = gpmi_read_buf;
1909
- chip->write_buf = gpmi_write_buf;
2454
+ chip->legacy.block_markbad = gpmi_block_markbad;
19102455 chip->badblock_pattern = &gpmi_bbt_descr;
1911
- chip->block_markbad = gpmi_block_markbad;
19122456 chip->options |= NAND_NO_SUBPAGE_WRITE;
19132457
19142458 /* Set up swap_block_mark, must be set before the gpmi_set_geometry() */
....@@ -1924,7 +2468,10 @@
19242468 if (ret)
19252469 return ret;
19262470
1927
- chip->dummy_controller.ops = &gpmi_nand_controller_ops;
2471
+ nand_controller_init(&this->base);
2472
+ this->base.ops = &gpmi_nand_controller_ops;
2473
+ chip->controller = &this->base;
2474
+
19282475 ret = nand_scan(chip, GPMI_IS_MX6(this) ? 2 : 1);
19292476 if (ret)
19302477 goto err_out;
....@@ -1994,6 +2541,16 @@
19942541 if (ret)
19952542 goto exit_acquire_resources;
19962543
2544
+ ret = __gpmi_enable_clk(this, true);
2545
+ if (ret)
2546
+ goto exit_acquire_resources;
2547
+
2548
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 500);
2549
+ pm_runtime_use_autosuspend(&pdev->dev);
2550
+ pm_runtime_set_active(&pdev->dev);
2551
+ pm_runtime_enable(&pdev->dev);
2552
+ pm_runtime_get_sync(&pdev->dev);
2553
+
19972554 ret = gpmi_init(this);
19982555 if (ret)
19992556 goto exit_nfc_init;
....@@ -2002,11 +2559,16 @@
20022559 if (ret)
20032560 goto exit_nfc_init;
20042561
2562
+ pm_runtime_mark_last_busy(&pdev->dev);
2563
+ pm_runtime_put_autosuspend(&pdev->dev);
2564
+
20052565 dev_info(this->dev, "driver registered.\n");
20062566
20072567 return 0;
20082568
20092569 exit_nfc_init:
2570
+ pm_runtime_put(&pdev->dev);
2571
+ pm_runtime_disable(&pdev->dev);
20102572 release_resources(this);
20112573 exit_acquire_resources:
20122574
....@@ -2016,8 +2578,15 @@
20162578 static int gpmi_nand_remove(struct platform_device *pdev)
20172579 {
20182580 struct gpmi_nand_data *this = platform_get_drvdata(pdev);
2581
+ struct nand_chip *chip = &this->nand;
2582
+ int ret;
20192583
2020
- nand_release(&this->nand);
2584
+ pm_runtime_put_sync(&pdev->dev);
2585
+ pm_runtime_disable(&pdev->dev);
2586
+
2587
+ ret = mtd_device_unregister(nand_to_mtd(chip));
2588
+ WARN_ON(ret);
2589
+ nand_cleanup(chip);
20212590 gpmi_free_dma_buffer(this);
20222591 release_resources(this);
20232592 return 0;
....@@ -2048,6 +2617,10 @@
20482617 return ret;
20492618 }
20502619
2620
+ /* Set flag to get timing setup restored for next exec_op */
2621
+ if (this->hw.clk_rate)
2622
+ this->hw.must_apply_timings = true;
2623
+
20512624 /* re-init the BCH registers */
20522625 ret = bch_set_geometry(this);
20532626 if (ret) {
....@@ -2059,8 +2632,23 @@
20592632 }
20602633 #endif /* CONFIG_PM_SLEEP */
20612634
2635
+static int __maybe_unused gpmi_runtime_suspend(struct device *dev)
2636
+{
2637
+ struct gpmi_nand_data *this = dev_get_drvdata(dev);
2638
+
2639
+ return __gpmi_enable_clk(this, false);
2640
+}
2641
+
2642
+static int __maybe_unused gpmi_runtime_resume(struct device *dev)
2643
+{
2644
+ struct gpmi_nand_data *this = dev_get_drvdata(dev);
2645
+
2646
+ return __gpmi_enable_clk(this, true);
2647
+}
2648
+
20622649 static const struct dev_pm_ops gpmi_pm_ops = {
20632650 SET_SYSTEM_SLEEP_PM_OPS(gpmi_pm_suspend, gpmi_pm_resume)
2651
+ SET_RUNTIME_PM_OPS(gpmi_runtime_suspend, gpmi_runtime_resume, NULL)
20642652 };
20652653
20662654 static struct platform_driver gpmi_nand_driver = {