forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-09 b22da3d8526a935aa31e086e63f60ff3246cb61c
kernel/drivers/mtd/nand/spi/core.c
....@@ -13,26 +13,13 @@
1313 #include <linux/jiffies.h>
1414 #include <linux/kernel.h>
1515 #include <linux/module.h>
16
+#include <linux/mtd/bbt_store.h>
1617 #include <linux/mtd/spinand.h>
1718 #include <linux/of.h>
1819 #include <linux/slab.h>
20
+#include <linux/string.h>
1921 #include <linux/spi/spi.h>
2022 #include <linux/spi/spi-mem.h>
21
-
22
-static void spinand_cache_op_adjust_colum(struct spinand_device *spinand,
23
- const struct nand_page_io_req *req,
24
- u16 *column)
25
-{
26
- struct nand_device *nand = spinand_to_nand(spinand);
27
- unsigned int shift;
28
-
29
- if (nand->memorg.planes_per_lun < 2)
30
- return;
31
-
32
- /* The plane number is passed in MSB just above the column address */
33
- shift = fls(nand->memorg.pagesize);
34
- *column |= req->pos.plane << shift;
35
-}
3623
3724 static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
3825 {
....@@ -207,6 +194,126 @@
207194 enable ? CFG_ECC_ENABLE : 0);
208195 }
209196
197
+static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status)
198
+{
199
+ struct nand_device *nand = spinand_to_nand(spinand);
200
+
201
+ if (spinand->eccinfo.get_status)
202
+ return spinand->eccinfo.get_status(spinand, status);
203
+
204
+ switch (status & STATUS_ECC_MASK) {
205
+ case STATUS_ECC_NO_BITFLIPS:
206
+ return 0;
207
+
208
+ case STATUS_ECC_HAS_BITFLIPS:
209
+ /*
210
+ * We have no way to know exactly how many bitflips have been
211
+ * fixed, so let's return the maximum possible value so that
212
+ * wear-leveling layers move the data immediately.
213
+ */
214
+ return nanddev_get_ecc_requirements(nand)->strength;
215
+
216
+ case STATUS_ECC_UNCOR_ERROR:
217
+ return -EBADMSG;
218
+
219
+ default:
220
+ break;
221
+ }
222
+
223
+ return -EINVAL;
224
+}
225
+
226
+static int spinand_noecc_ooblayout_ecc(struct mtd_info *mtd, int section,
227
+ struct mtd_oob_region *region)
228
+{
229
+ return -ERANGE;
230
+}
231
+
232
+static int spinand_noecc_ooblayout_free(struct mtd_info *mtd, int section,
233
+ struct mtd_oob_region *region)
234
+{
235
+ if (section)
236
+ return -ERANGE;
237
+
238
+ /* Reserve 2 bytes for the BBM. */
239
+ region->offset = 2;
240
+ region->length = 62;
241
+
242
+ return 0;
243
+}
244
+
245
+static const struct mtd_ooblayout_ops spinand_noecc_ooblayout = {
246
+ .ecc = spinand_noecc_ooblayout_ecc,
247
+ .free = spinand_noecc_ooblayout_free,
248
+};
249
+
250
+static int spinand_ondie_ecc_init_ctx(struct nand_device *nand)
251
+{
252
+ struct spinand_device *spinand = nand_to_spinand(nand);
253
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
254
+ struct spinand_ondie_ecc_conf *engine_conf;
255
+
256
+ nand->ecc.ctx.conf.engine_type = NAND_ECC_ENGINE_TYPE_ON_DIE;
257
+ nand->ecc.ctx.conf.step_size = nand->ecc.requirements.step_size;
258
+ nand->ecc.ctx.conf.strength = nand->ecc.requirements.strength;
259
+
260
+ engine_conf = kzalloc(sizeof(*engine_conf), GFP_KERNEL);
261
+ if (!engine_conf)
262
+ return -ENOMEM;
263
+
264
+ nand->ecc.ctx.priv = engine_conf;
265
+
266
+ if (spinand->eccinfo.ooblayout)
267
+ mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout);
268
+ else
269
+ mtd_set_ooblayout(mtd, &spinand_noecc_ooblayout);
270
+
271
+ return 0;
272
+}
273
+
274
+static void spinand_ondie_ecc_cleanup_ctx(struct nand_device *nand)
275
+{
276
+ kfree(nand->ecc.ctx.priv);
277
+}
278
+
279
+static int spinand_ondie_ecc_prepare_io_req(struct nand_device *nand,
280
+ struct nand_page_io_req *req)
281
+{
282
+ struct spinand_device *spinand = nand_to_spinand(nand);
283
+ bool enable = (req->mode != MTD_OPS_RAW);
284
+
285
+ /* Only enable or disable the engine */
286
+ return spinand_ecc_enable(spinand, enable);
287
+}
288
+
289
+static int spinand_ondie_ecc_finish_io_req(struct nand_device *nand,
290
+ struct nand_page_io_req *req)
291
+{
292
+ struct spinand_ondie_ecc_conf *engine_conf = nand->ecc.ctx.priv;
293
+ struct spinand_device *spinand = nand_to_spinand(nand);
294
+
295
+ if (req->mode == MTD_OPS_RAW)
296
+ return 0;
297
+
298
+ /* Nothing to do when finishing a page write */
299
+ if (req->type == NAND_PAGE_WRITE)
300
+ return 0;
301
+
302
+ /* Finish a page write: check the status, report errors/bitflips */
303
+ return spinand_check_ecc_status(spinand, engine_conf->status);
304
+}
305
+
306
+static struct nand_ecc_engine_ops spinand_ondie_ecc_engine_ops = {
307
+ .init_ctx = spinand_ondie_ecc_init_ctx,
308
+ .cleanup_ctx = spinand_ondie_ecc_cleanup_ctx,
309
+ .prepare_io_req = spinand_ondie_ecc_prepare_io_req,
310
+ .finish_io_req = spinand_ondie_ecc_finish_io_req,
311
+};
312
+
313
+static struct nand_ecc_engine spinand_ondie_ecc_engine = {
314
+ .ops = &spinand_ondie_ecc_engine_ops,
315
+};
316
+
210317 static int spinand_write_enable_op(struct spinand_device *spinand)
211318 {
212319 struct spi_mem_op op = SPINAND_WR_EN_DIS_OP(true);
....@@ -227,27 +334,21 @@
227334 static int spinand_read_from_cache_op(struct spinand_device *spinand,
228335 const struct nand_page_io_req *req)
229336 {
230
- struct spi_mem_op op = *spinand->op_templates.read_cache;
231337 struct nand_device *nand = spinand_to_nand(spinand);
232338 struct mtd_info *mtd = nanddev_to_mtd(nand);
233
- struct nand_page_io_req adjreq = *req;
339
+ struct spi_mem_dirmap_desc *rdesc;
234340 unsigned int nbytes = 0;
235341 void *buf = NULL;
236342 u16 column = 0;
237
- int ret;
343
+ ssize_t ret;
238344
239345 if (req->datalen) {
240
- adjreq.datalen = nanddev_page_size(nand);
241
- adjreq.dataoffs = 0;
242
- adjreq.databuf.in = spinand->databuf;
243346 buf = spinand->databuf;
244
- nbytes = adjreq.datalen;
347
+ nbytes = nanddev_page_size(nand);
348
+ column = 0;
245349 }
246350
247351 if (req->ooblen) {
248
- adjreq.ooblen = nanddev_per_page_oobsize(nand);
249
- adjreq.ooboffs = 0;
250
- adjreq.oobbuf.in = spinand->oobbuf;
251352 nbytes += nanddev_per_page_oobsize(nand);
252353 if (!buf) {
253354 buf = spinand->oobbuf;
....@@ -255,28 +356,19 @@
255356 }
256357 }
257358
258
- spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
259
- op.addr.val = column;
359
+ rdesc = spinand->dirmaps[req->pos.plane].rdesc;
260360
261
- /*
262
- * Some controllers are limited in term of max RX data size. In this
263
- * case, just repeat the READ_CACHE operation after updating the
264
- * column.
265
- */
266361 while (nbytes) {
267
- op.data.buf.in = buf;
268
- op.data.nbytes = nbytes;
269
- ret = spi_mem_adjust_op_size(spinand->spimem, &op);
270
- if (ret)
362
+ ret = spi_mem_dirmap_read(rdesc, column, nbytes, buf);
363
+ if (ret < 0)
271364 return ret;
272365
273
- ret = spi_mem_exec_op(spinand->spimem, &op);
274
- if (ret)
275
- return ret;
366
+ if (!ret || ret > nbytes)
367
+ return -EIO;
276368
277
- buf += op.data.nbytes;
278
- nbytes -= op.data.nbytes;
279
- op.addr.val += op.data.nbytes;
369
+ nbytes -= ret;
370
+ column += ret;
371
+ buf += ret;
280372 }
281373
282374 if (req->datalen)
....@@ -300,14 +392,12 @@
300392 static int spinand_write_to_cache_op(struct spinand_device *spinand,
301393 const struct nand_page_io_req *req)
302394 {
303
- struct spi_mem_op op = *spinand->op_templates.write_cache;
304395 struct nand_device *nand = spinand_to_nand(spinand);
305396 struct mtd_info *mtd = nanddev_to_mtd(nand);
306
- struct nand_page_io_req adjreq = *req;
397
+ struct spi_mem_dirmap_desc *wdesc;
398
+ unsigned int nbytes, column = 0;
307399 void *buf = spinand->databuf;
308
- unsigned int nbytes;
309
- u16 column = 0;
310
- int ret;
400
+ ssize_t ret;
311401
312402 /*
313403 * Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset
....@@ -318,12 +408,6 @@
318408 */
319409 nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
320410 memset(spinand->databuf, 0xff, nbytes);
321
- adjreq.dataoffs = 0;
322
- adjreq.datalen = nanddev_page_size(nand);
323
- adjreq.databuf.out = spinand->databuf;
324
- adjreq.ooblen = nanddev_per_page_oobsize(nand);
325
- adjreq.ooboffs = 0;
326
- adjreq.oobbuf.out = spinand->oobbuf;
327411
328412 if (req->datalen)
329413 memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
....@@ -340,42 +424,19 @@
340424 req->ooblen);
341425 }
342426
343
- spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
427
+ wdesc = spinand->dirmaps[req->pos.plane].wdesc;
344428
345
- op = *spinand->op_templates.write_cache;
346
- op.addr.val = column;
347
-
348
- /*
349
- * Some controllers are limited in term of max TX data size. In this
350
- * case, split the operation into one LOAD CACHE and one or more
351
- * LOAD RANDOM CACHE.
352
- */
353429 while (nbytes) {
354
- op.data.buf.out = buf;
355
- op.data.nbytes = nbytes;
356
-
357
- ret = spi_mem_adjust_op_size(spinand->spimem, &op);
358
- if (ret)
430
+ ret = spi_mem_dirmap_write(wdesc, column, nbytes, buf);
431
+ if (ret < 0)
359432 return ret;
360433
361
- ret = spi_mem_exec_op(spinand->spimem, &op);
362
- if (ret)
363
- return ret;
434
+ if (!ret || ret > nbytes)
435
+ return -EIO;
364436
365
- buf += op.data.nbytes;
366
- nbytes -= op.data.nbytes;
367
- op.addr.val += op.data.nbytes;
368
-
369
- /*
370
- * We need to use the RANDOM LOAD CACHE operation if there's
371
- * more than one iteration, because the LOAD operation might
372
- * reset the cache to 0xff.
373
- */
374
- if (nbytes) {
375
- column = op.addr.val;
376
- op = *spinand->op_templates.update_cache;
377
- op.addr.val = column;
378
- }
437
+ nbytes -= ret;
438
+ column += ret;
439
+ buf += ret;
379440 }
380441
381442 return 0;
....@@ -431,10 +492,11 @@
431492 return status & STATUS_BUSY ? -ETIMEDOUT : 0;
432493 }
433494
434
-static int spinand_read_id_op(struct spinand_device *spinand, u8 *buf)
495
+static int spinand_read_id_op(struct spinand_device *spinand, u8 naddr,
496
+ u8 ndummy, u8 *buf)
435497 {
436
- struct spi_mem_op op = SPINAND_READID_OP(0, spinand->scratchbuf,
437
- SPINAND_MAX_ID_LEN);
498
+ struct spi_mem_op op = SPINAND_READID_OP(
499
+ naddr, ndummy, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
438500 int ret;
439501
440502 ret = spi_mem_exec_op(spinand->spimem, &op);
....@@ -459,35 +521,6 @@
459521 static int spinand_lock_block(struct spinand_device *spinand, u8 lock)
460522 {
461523 return spinand_write_reg_op(spinand, REG_BLOCK_LOCK, lock);
462
-}
463
-
464
-static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status)
465
-{
466
- struct nand_device *nand = spinand_to_nand(spinand);
467
-
468
- if (spinand->eccinfo.get_status)
469
- return spinand->eccinfo.get_status(spinand, status);
470
-
471
- switch (status & STATUS_ECC_MASK) {
472
- case STATUS_ECC_NO_BITFLIPS:
473
- return 0;
474
-
475
- case STATUS_ECC_HAS_BITFLIPS:
476
- /*
477
- * We have no way to know exactly how many bitflips have been
478
- * fixed, so let's return the maximum possible value so that
479
- * wear-leveling layers move the data immediately.
480
- */
481
- return nand->eccreq.strength;
482
-
483
- case STATUS_ECC_UNCOR_ERROR:
484
- return -EBADMSG;
485
-
486
- default:
487
- break;
488
- }
489
-
490
- return -EINVAL;
491524 }
492525
493526 static int spinand_read_page(struct spinand_device *spinand,
....@@ -556,7 +589,7 @@
556589
557590 mutex_lock(&spinand->lock);
558591
559
- nanddev_io_for_each_page(nand, from, ops, &iter) {
592
+ nanddev_io_for_each_page(nand, NAND_PAGE_READ, from, ops, &iter) {
560593 ret = spinand_select_target(spinand, iter.req.pos.target);
561594 if (ret)
562595 break;
....@@ -604,7 +637,7 @@
604637
605638 mutex_lock(&spinand->lock);
606639
607
- nanddev_io_for_each_page(nand, to, ops, &iter) {
640
+ nanddev_io_for_each_page(nand, NAND_PAGE_WRITE, to, ops, &iter) {
608641 ret = spinand_select_target(spinand, iter.req.pos.target);
609642 if (ret)
610643 break;
....@@ -697,6 +730,9 @@
697730 ret = nanddev_markbad(nand, &pos);
698731 mutex_unlock(&spinand->lock);
699732
733
+ if (IS_ENABLED(CONFIG_MTD_NAND_BBT_USING_FLASH))
734
+ nanddev_bbt_in_flash_update(nand);
735
+
700736 return ret;
701737 }
702738
....@@ -753,6 +789,59 @@
753789 return ret;
754790 }
755791
792
+static int spinand_create_dirmap(struct spinand_device *spinand,
793
+ unsigned int plane)
794
+{
795
+ struct nand_device *nand = spinand_to_nand(spinand);
796
+ struct spi_mem_dirmap_info info = {
797
+ .length = nanddev_page_size(nand) +
798
+ nanddev_per_page_oobsize(nand),
799
+ };
800
+ struct spi_mem_dirmap_desc *desc;
801
+
802
+ /* The plane number is passed in MSB just above the column address */
803
+ info.offset = plane << fls(nand->memorg.pagesize);
804
+
805
+ info.op_tmpl = *spinand->op_templates.update_cache;
806
+ desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
807
+ spinand->spimem, &info);
808
+ if (IS_ERR(desc))
809
+ return PTR_ERR(desc);
810
+
811
+ spinand->dirmaps[plane].wdesc = desc;
812
+
813
+ info.op_tmpl = *spinand->op_templates.read_cache;
814
+ desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
815
+ spinand->spimem, &info);
816
+ if (IS_ERR(desc))
817
+ return PTR_ERR(desc);
818
+
819
+ spinand->dirmaps[plane].rdesc = desc;
820
+
821
+ return 0;
822
+}
823
+
824
+static int spinand_create_dirmaps(struct spinand_device *spinand)
825
+{
826
+ struct nand_device *nand = spinand_to_nand(spinand);
827
+ int i, ret;
828
+
829
+ spinand->dirmaps = devm_kzalloc(&spinand->spimem->spi->dev,
830
+ sizeof(*spinand->dirmaps) *
831
+ nand->memorg.planes_per_lun,
832
+ GFP_KERNEL);
833
+ if (!spinand->dirmaps)
834
+ return -ENOMEM;
835
+
836
+ for (i = 0; i < nand->memorg.planes_per_lun; i++) {
837
+ ret = spinand_create_dirmap(spinand, i);
838
+ if (ret)
839
+ return ret;
840
+ }
841
+
842
+ return 0;
843
+}
844
+
756845 static const struct nand_ops spinand_ops = {
757846 .erase = spinand_erase,
758847 .markbad = spinand_markbad,
....@@ -760,27 +849,82 @@
760849 };
761850
762851 static const struct spinand_manufacturer *spinand_manufacturers[] = {
852
+ &biwin_spinand_manufacturer,
853
+ &dosilicon_spinand_manufacturer,
854
+ &esmt_spinand_manufacturer,
855
+ &etron_spinand_manufacturer,
856
+ &fmsh_spinand_manufacturer,
857
+ &foresee_spinand_manufacturer,
858
+ &gigadevice_spinand_manufacturer,
859
+ &gsto_spinand_manufacturer,
860
+ &hyf_spinand_manufacturer,
861
+ &jsc_spinand_manufacturer,
763862 &macronix_spinand_manufacturer,
764863 &micron_spinand_manufacturer,
864
+ &paragon_spinand_manufacturer,
865
+ &silicongo_spinand_manufacturer,
866
+ &skyhigh_spinand_manufacturer,
867
+ &toshiba_spinand_manufacturer,
868
+ &unim_spinand_manufacturer,
765869 &winbond_spinand_manufacturer,
870
+ &xincun_spinand_manufacturer,
871
+ &xtx_spinand_manufacturer,
766872 };
767873
768
-static int spinand_manufacturer_detect(struct spinand_device *spinand)
874
+static int spinand_manufacturer_match(struct spinand_device *spinand,
875
+ enum spinand_readid_method rdid_method)
769876 {
877
+ u8 *id = spinand->id.data;
770878 unsigned int i;
771879 int ret;
772880
773881 for (i = 0; i < ARRAY_SIZE(spinand_manufacturers); i++) {
774
- ret = spinand_manufacturers[i]->ops->detect(spinand);
775
- if (ret > 0) {
776
- spinand->manufacturer = spinand_manufacturers[i];
777
- return 0;
778
- } else if (ret < 0) {
779
- return ret;
780
- }
781
- }
882
+ const struct spinand_manufacturer *manufacturer =
883
+ spinand_manufacturers[i];
782884
885
+ if (id[0] != manufacturer->id)
886
+ continue;
887
+
888
+ ret = spinand_match_and_init(spinand,
889
+ manufacturer->chips,
890
+ manufacturer->nchips,
891
+ rdid_method);
892
+ if (ret < 0)
893
+ continue;
894
+
895
+ spinand->manufacturer = manufacturer;
896
+ return 0;
897
+ }
783898 return -ENOTSUPP;
899
+}
900
+
901
+static int spinand_id_detect(struct spinand_device *spinand)
902
+{
903
+ u8 *id = spinand->id.data;
904
+ int ret;
905
+
906
+ ret = spinand_read_id_op(spinand, 0, 0, id);
907
+ if (ret)
908
+ return ret;
909
+ ret = spinand_manufacturer_match(spinand, SPINAND_READID_METHOD_OPCODE);
910
+ if (!ret)
911
+ return 0;
912
+
913
+ ret = spinand_read_id_op(spinand, 1, 0, id);
914
+ if (ret)
915
+ return ret;
916
+ ret = spinand_manufacturer_match(spinand,
917
+ SPINAND_READID_METHOD_OPCODE_ADDR);
918
+ if (!ret)
919
+ return 0;
920
+
921
+ ret = spinand_read_id_op(spinand, 0, 1, id);
922
+ if (ret)
923
+ return ret;
924
+ ret = spinand_manufacturer_match(spinand,
925
+ SPINAND_READID_METHOD_OPCODE_DUMMY);
926
+
927
+ return ret;
784928 }
785929
786930 static int spinand_manufacturer_init(struct spinand_device *spinand)
....@@ -838,9 +982,9 @@
838982 * @spinand: SPI NAND object
839983 * @table: SPI NAND device description table
840984 * @table_size: size of the device description table
985
+ * @rdid_method: read id method to match
841986 *
842
- * Should be used by SPI NAND manufacturer drivers when they want to find a
843
- * match between a device ID retrieved through the READ_ID command and an
987
+ * Match between a device ID retrieved through the READ_ID command and an
844988 * entry in the SPI NAND description table. If a match is found, the spinand
845989 * object will be initialized with information provided by the matching
846990 * spinand_info entry.
....@@ -849,8 +993,10 @@
849993 */
850994 int spinand_match_and_init(struct spinand_device *spinand,
851995 const struct spinand_info *table,
852
- unsigned int table_size, u8 devid)
996
+ unsigned int table_size,
997
+ enum spinand_readid_method rdid_method)
853998 {
999
+ u8 *id = spinand->id.data;
8541000 struct nand_device *nand = spinand_to_nand(spinand);
8551001 unsigned int i;
8561002
....@@ -858,13 +1004,17 @@
8581004 const struct spinand_info *info = &table[i];
8591005 const struct spi_mem_op *op;
8601006
861
- if (devid != info->devid)
1007
+ if (rdid_method != info->devid.method)
1008
+ continue;
1009
+
1010
+ if (memcmp(id + 1, info->devid.id, info->devid.len))
8621011 continue;
8631012
8641013 nand->memorg = table[i].memorg;
865
- nand->eccreq = table[i].eccreq;
1014
+ nanddev_set_ecc_requirements(nand, &table[i].eccreq);
8661015 spinand->eccinfo = table[i].eccinfo;
8671016 spinand->flags = table[i].flags;
1017
+ spinand->id.len = 1 + table[i].devid.len;
8681018 spinand->select_target = table[i].select_target;
8691019
8701020 op = spinand_select_op_variant(spinand,
....@@ -901,13 +1051,7 @@
9011051 if (ret)
9021052 return ret;
9031053
904
- ret = spinand_read_id_op(spinand, spinand->id.data);
905
- if (ret)
906
- return ret;
907
-
908
- spinand->id.len = SPINAND_MAX_ID_LEN;
909
-
910
- ret = spinand_manufacturer_detect(spinand);
1054
+ ret = spinand_id_detect(spinand);
9111055 if (ret) {
9121056 dev_err(dev, "unknown raw ID %*phN\n", SPINAND_MAX_ID_LEN,
9131057 spinand->id.data);
....@@ -930,29 +1074,92 @@
9301074 return 0;
9311075 }
9321076
933
-static int spinand_noecc_ooblayout_ecc(struct mtd_info *mtd, int section,
934
- struct mtd_oob_region *region)
1077
+static int spinand_reinit(struct mtd_info *mtd)
9351078 {
936
- return -ERANGE;
1079
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
1080
+ struct nand_device *nand = mtd_to_nanddev(mtd);
1081
+ struct device *dev = &spinand->spimem->spi->dev;
1082
+ int ret, i;
1083
+
1084
+ ret = spinand_init_quad_enable(spinand);
1085
+ if (ret)
1086
+ return ret;
1087
+
1088
+ ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0);
1089
+ if (ret)
1090
+ return ret;
1091
+
1092
+ ret = spinand_manufacturer_init(spinand);
1093
+ if (ret) {
1094
+ dev_err(dev,
1095
+ "Failed to initialize the SPI NAND chip (err = %d)\n",
1096
+ ret);
1097
+ return ret;
1098
+ }
1099
+
1100
+ ret = spinand_create_dirmaps(spinand);
1101
+ if (ret) {
1102
+ dev_err(dev,
1103
+ "Failed to create direct mappings for read/write operations (err = %d)\n",
1104
+ ret);
1105
+ return ret;
1106
+ }
1107
+
1108
+ /* After power up, all blocks are locked, so unlock them here. */
1109
+ for (i = 0; i < nand->memorg.ntargets; i++) {
1110
+ ret = spinand_select_target(spinand, i);
1111
+ if (ret)
1112
+ return ret;
1113
+
1114
+ ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED);
1115
+ if (ret)
1116
+ return ret;
1117
+ }
1118
+
1119
+ return ret;
9371120 }
9381121
939
-static int spinand_noecc_ooblayout_free(struct mtd_info *mtd, int section,
940
- struct mtd_oob_region *region)
1122
+/**
1123
+ * spinand_mtd_suspend - [MTD Interface] Suspend the spinand flash
1124
+ * @mtd: MTD device structure
1125
+ *
1126
+ * Returns 0 for success or negative error code otherwise.
1127
+ */
1128
+static int spinand_mtd_suspend(struct mtd_info *mtd)
9411129 {
942
- if (section)
943
- return -ERANGE;
1130
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
1131
+ int ret = 0;
9441132
945
- /* Reserve 2 bytes for the BBM. */
946
- region->offset = 2;
947
- region->length = 62;
1133
+ mutex_lock(&spinand->lock);
9481134
949
- return 0;
1135
+ return ret;
9501136 }
9511137
952
-static const struct mtd_ooblayout_ops spinand_noecc_ooblayout = {
953
- .ecc = spinand_noecc_ooblayout_ecc,
954
- .free = spinand_noecc_ooblayout_free,
955
-};
1138
+/**
1139
+ * spinand_mtd_resume - [MTD Interface] Resume the spinand flash
1140
+ * @mtd: MTD device structure
1141
+ */
1142
+static void spinand_mtd_resume(struct mtd_info *mtd)
1143
+{
1144
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
1145
+ struct device *dev = &spinand->spimem->spi->dev;
1146
+ int ret;
1147
+
1148
+ ret = spinand_reinit(mtd);
1149
+ if (ret)
1150
+ dev_err(dev, "Failed to resume, ret =%d !\n", ret);
1151
+ mutex_unlock(&spinand->lock);
1152
+}
1153
+
1154
+/**
1155
+ * spinand_mtd_shutdown - [MTD Interface] Finish the current spinand operation and
1156
+ * prevent further operations
1157
+ * @mtd: MTD device structure
1158
+ */
1159
+static void spinand_mtd_shutdown(struct mtd_info *mtd)
1160
+{
1161
+ spinand_mtd_suspend(mtd);
1162
+}
9561163
9571164 static int spinand_init(struct spinand_device *spinand)
9581165 {
....@@ -1008,6 +1215,14 @@
10081215 goto err_free_bufs;
10091216 }
10101217
1218
+ ret = spinand_create_dirmaps(spinand);
1219
+ if (ret) {
1220
+ dev_err(dev,
1221
+ "Failed to create direct mappings for read/write operations (err = %d)\n",
1222
+ ret);
1223
+ goto err_manuf_cleanup;
1224
+ }
1225
+
10111226 /* After power up, all blocks are locked, so unlock them here. */
10121227 for (i = 0; i < nand->memorg.ntargets; i++) {
10131228 ret = spinand_select_target(spinand, i);
....@@ -1023,6 +1238,10 @@
10231238 if (ret)
10241239 goto err_manuf_cleanup;
10251240
1241
+ /* SPI-NAND default ECC engine is on-die */
1242
+ nand->ecc.defaults.engine_type = NAND_ECC_ENGINE_TYPE_ON_DIE;
1243
+ nand->ecc.ondie_engine = &spinand_ondie_ecc_engine;
1244
+
10261245 /*
10271246 * Right now, we don't support ECC, so let the whole oob
10281247 * area is available for user.
....@@ -1033,6 +1252,10 @@
10331252 mtd->_block_markbad = spinand_mtd_block_markbad;
10341253 mtd->_block_isreserved = spinand_mtd_block_isreserved;
10351254 mtd->_erase = spinand_mtd_erase;
1255
+ mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks;
1256
+ mtd->_suspend = spinand_mtd_suspend;
1257
+ mtd->_resume = spinand_mtd_resume;
1258
+ mtd->_reboot = spinand_mtd_shutdown;
10361259
10371260 if (spinand->eccinfo.ooblayout)
10381261 mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout);
....@@ -1046,8 +1269,13 @@
10461269 mtd->oobavail = ret;
10471270
10481271 /* Propagate ECC information to mtd_info */
1049
- mtd->ecc_strength = nand->eccreq.strength;
1050
- mtd->ecc_step_size = nand->eccreq.step_size;
1272
+ mtd->ecc_strength = nanddev_get_ecc_requirements(nand)->strength;
1273
+ mtd->ecc_step_size = nanddev_get_ecc_requirements(nand)->step_size;
1274
+ if (IS_ENABLED(CONFIG_SPI_ROCKCHIP_SFC))
1275
+ mtd->name = "spi-nand0";
1276
+
1277
+ if (IS_ENABLED(CONFIG_MTD_NAND_BBT_USING_FLASH))
1278
+ nanddev_scan_bbt_in_flash(nand);
10511279
10521280 return 0;
10531281