forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-10 cde9070d9970eef1f7ec2360586c802a16230ad8
kernel/drivers/mtd/nand/spi/core.c
....@@ -13,26 +13,13 @@
1313 #include <linux/jiffies.h>
1414 #include <linux/kernel.h>
1515 #include <linux/module.h>
16
+#include <linux/mtd/bbt_store.h>
1617 #include <linux/mtd/spinand.h>
1718 #include <linux/of.h>
1819 #include <linux/slab.h>
20
+#include <linux/string.h>
1921 #include <linux/spi/spi.h>
2022 #include <linux/spi/spi-mem.h>
21
-
22
-static void spinand_cache_op_adjust_colum(struct spinand_device *spinand,
23
- const struct nand_page_io_req *req,
24
- u16 *column)
25
-{
26
- struct nand_device *nand = spinand_to_nand(spinand);
27
- unsigned int shift;
28
-
29
- if (nand->memorg.planes_per_lun < 2)
30
- return;
31
-
32
- /* The plane number is passed in MSB just above the column address */
33
- shift = fls(nand->memorg.pagesize);
34
- *column |= req->pos.plane << shift;
35
-}
3623
3724 static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
3825 {
....@@ -207,6 +194,126 @@
207194 enable ? CFG_ECC_ENABLE : 0);
208195 }
209196
197
+static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status)
198
+{
199
+ struct nand_device *nand = spinand_to_nand(spinand);
200
+
201
+ if (spinand->eccinfo.get_status)
202
+ return spinand->eccinfo.get_status(spinand, status);
203
+
204
+ switch (status & STATUS_ECC_MASK) {
205
+ case STATUS_ECC_NO_BITFLIPS:
206
+ return 0;
207
+
208
+ case STATUS_ECC_HAS_BITFLIPS:
209
+ /*
210
+ * We have no way to know exactly how many bitflips have been
211
+ * fixed, so let's return the maximum possible value so that
212
+ * wear-leveling layers move the data immediately.
213
+ */
214
+ return nanddev_get_ecc_requirements(nand)->strength;
215
+
216
+ case STATUS_ECC_UNCOR_ERROR:
217
+ return -EBADMSG;
218
+
219
+ default:
220
+ break;
221
+ }
222
+
223
+ return -EINVAL;
224
+}
225
+
226
+static int spinand_noecc_ooblayout_ecc(struct mtd_info *mtd, int section,
227
+ struct mtd_oob_region *region)
228
+{
229
+ return -ERANGE;
230
+}
231
+
232
+static int spinand_noecc_ooblayout_free(struct mtd_info *mtd, int section,
233
+ struct mtd_oob_region *region)
234
+{
235
+ if (section)
236
+ return -ERANGE;
237
+
238
+ /* Reserve 2 bytes for the BBM. */
239
+ region->offset = 2;
240
+ region->length = 62;
241
+
242
+ return 0;
243
+}
244
+
245
+static const struct mtd_ooblayout_ops spinand_noecc_ooblayout = {
246
+ .ecc = spinand_noecc_ooblayout_ecc,
247
+ .free = spinand_noecc_ooblayout_free,
248
+};
249
+
250
+static int spinand_ondie_ecc_init_ctx(struct nand_device *nand)
251
+{
252
+ struct spinand_device *spinand = nand_to_spinand(nand);
253
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
254
+ struct spinand_ondie_ecc_conf *engine_conf;
255
+
256
+ nand->ecc.ctx.conf.engine_type = NAND_ECC_ENGINE_TYPE_ON_DIE;
257
+ nand->ecc.ctx.conf.step_size = nand->ecc.requirements.step_size;
258
+ nand->ecc.ctx.conf.strength = nand->ecc.requirements.strength;
259
+
260
+ engine_conf = kzalloc(sizeof(*engine_conf), GFP_KERNEL);
261
+ if (!engine_conf)
262
+ return -ENOMEM;
263
+
264
+ nand->ecc.ctx.priv = engine_conf;
265
+
266
+ if (spinand->eccinfo.ooblayout)
267
+ mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout);
268
+ else
269
+ mtd_set_ooblayout(mtd, &spinand_noecc_ooblayout);
270
+
271
+ return 0;
272
+}
273
+
274
+static void spinand_ondie_ecc_cleanup_ctx(struct nand_device *nand)
275
+{
276
+ kfree(nand->ecc.ctx.priv);
277
+}
278
+
279
+static int spinand_ondie_ecc_prepare_io_req(struct nand_device *nand,
280
+ struct nand_page_io_req *req)
281
+{
282
+ struct spinand_device *spinand = nand_to_spinand(nand);
283
+ bool enable = (req->mode != MTD_OPS_RAW);
284
+
285
+ /* Only enable or disable the engine */
286
+ return spinand_ecc_enable(spinand, enable);
287
+}
288
+
289
+static int spinand_ondie_ecc_finish_io_req(struct nand_device *nand,
290
+ struct nand_page_io_req *req)
291
+{
292
+ struct spinand_ondie_ecc_conf *engine_conf = nand->ecc.ctx.priv;
293
+ struct spinand_device *spinand = nand_to_spinand(nand);
294
+
295
+ if (req->mode == MTD_OPS_RAW)
296
+ return 0;
297
+
298
+ /* Nothing to do when finishing a page write */
299
+ if (req->type == NAND_PAGE_WRITE)
300
+ return 0;
301
+
302
+ /* Finish a page write: check the status, report errors/bitflips */
303
+ return spinand_check_ecc_status(spinand, engine_conf->status);
304
+}
305
+
306
+static struct nand_ecc_engine_ops spinand_ondie_ecc_engine_ops = {
307
+ .init_ctx = spinand_ondie_ecc_init_ctx,
308
+ .cleanup_ctx = spinand_ondie_ecc_cleanup_ctx,
309
+ .prepare_io_req = spinand_ondie_ecc_prepare_io_req,
310
+ .finish_io_req = spinand_ondie_ecc_finish_io_req,
311
+};
312
+
313
+static struct nand_ecc_engine spinand_ondie_ecc_engine = {
314
+ .ops = &spinand_ondie_ecc_engine_ops,
315
+};
316
+
210317 static int spinand_write_enable_op(struct spinand_device *spinand)
211318 {
212319 struct spi_mem_op op = SPINAND_WR_EN_DIS_OP(true);
....@@ -227,27 +334,21 @@
227334 static int spinand_read_from_cache_op(struct spinand_device *spinand,
228335 const struct nand_page_io_req *req)
229336 {
230
- struct spi_mem_op op = *spinand->op_templates.read_cache;
231337 struct nand_device *nand = spinand_to_nand(spinand);
232338 struct mtd_info *mtd = nanddev_to_mtd(nand);
233
- struct nand_page_io_req adjreq = *req;
339
+ struct spi_mem_dirmap_desc *rdesc;
234340 unsigned int nbytes = 0;
235341 void *buf = NULL;
236342 u16 column = 0;
237
- int ret;
343
+ ssize_t ret;
238344
239345 if (req->datalen) {
240
- adjreq.datalen = nanddev_page_size(nand);
241
- adjreq.dataoffs = 0;
242
- adjreq.databuf.in = spinand->databuf;
243346 buf = spinand->databuf;
244
- nbytes = adjreq.datalen;
347
+ nbytes = nanddev_page_size(nand);
348
+ column = 0;
245349 }
246350
247351 if (req->ooblen) {
248
- adjreq.ooblen = nanddev_per_page_oobsize(nand);
249
- adjreq.ooboffs = 0;
250
- adjreq.oobbuf.in = spinand->oobbuf;
251352 nbytes += nanddev_per_page_oobsize(nand);
252353 if (!buf) {
253354 buf = spinand->oobbuf;
....@@ -255,28 +356,19 @@
255356 }
256357 }
257358
258
- spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
259
- op.addr.val = column;
359
+ rdesc = spinand->dirmaps[req->pos.plane].rdesc;
260360
261
- /*
262
- * Some controllers are limited in term of max RX data size. In this
263
- * case, just repeat the READ_CACHE operation after updating the
264
- * column.
265
- */
266361 while (nbytes) {
267
- op.data.buf.in = buf;
268
- op.data.nbytes = nbytes;
269
- ret = spi_mem_adjust_op_size(spinand->spimem, &op);
270
- if (ret)
362
+ ret = spi_mem_dirmap_read(rdesc, column, nbytes, buf);
363
+ if (ret < 0)
271364 return ret;
272365
273
- ret = spi_mem_exec_op(spinand->spimem, &op);
274
- if (ret)
275
- return ret;
366
+ if (!ret || ret > nbytes)
367
+ return -EIO;
276368
277
- buf += op.data.nbytes;
278
- nbytes -= op.data.nbytes;
279
- op.addr.val += op.data.nbytes;
369
+ nbytes -= ret;
370
+ column += ret;
371
+ buf += ret;
280372 }
281373
282374 if (req->datalen)
....@@ -300,14 +392,12 @@
300392 static int spinand_write_to_cache_op(struct spinand_device *spinand,
301393 const struct nand_page_io_req *req)
302394 {
303
- struct spi_mem_op op = *spinand->op_templates.write_cache;
304395 struct nand_device *nand = spinand_to_nand(spinand);
305396 struct mtd_info *mtd = nanddev_to_mtd(nand);
306
- struct nand_page_io_req adjreq = *req;
397
+ struct spi_mem_dirmap_desc *wdesc;
398
+ unsigned int nbytes, column = 0;
307399 void *buf = spinand->databuf;
308
- unsigned int nbytes;
309
- u16 column = 0;
310
- int ret;
400
+ ssize_t ret;
311401
312402 /*
313403 * Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset
....@@ -318,12 +408,6 @@
318408 */
319409 nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
320410 memset(spinand->databuf, 0xff, nbytes);
321
- adjreq.dataoffs = 0;
322
- adjreq.datalen = nanddev_page_size(nand);
323
- adjreq.databuf.out = spinand->databuf;
324
- adjreq.ooblen = nanddev_per_page_oobsize(nand);
325
- adjreq.ooboffs = 0;
326
- adjreq.oobbuf.out = spinand->oobbuf;
327411
328412 if (req->datalen)
329413 memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
....@@ -340,42 +424,19 @@
340424 req->ooblen);
341425 }
342426
343
- spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
427
+ wdesc = spinand->dirmaps[req->pos.plane].wdesc;
344428
345
- op = *spinand->op_templates.write_cache;
346
- op.addr.val = column;
347
-
348
- /*
349
- * Some controllers are limited in term of max TX data size. In this
350
- * case, split the operation into one LOAD CACHE and one or more
351
- * LOAD RANDOM CACHE.
352
- */
353429 while (nbytes) {
354
- op.data.buf.out = buf;
355
- op.data.nbytes = nbytes;
356
-
357
- ret = spi_mem_adjust_op_size(spinand->spimem, &op);
358
- if (ret)
430
+ ret = spi_mem_dirmap_write(wdesc, column, nbytes, buf);
431
+ if (ret < 0)
359432 return ret;
360433
361
- ret = spi_mem_exec_op(spinand->spimem, &op);
362
- if (ret)
363
- return ret;
434
+ if (!ret || ret > nbytes)
435
+ return -EIO;
364436
365
- buf += op.data.nbytes;
366
- nbytes -= op.data.nbytes;
367
- op.addr.val += op.data.nbytes;
368
-
369
- /*
370
- * We need to use the RANDOM LOAD CACHE operation if there's
371
- * more than one iteration, because the LOAD operation might
372
- * reset the cache to 0xff.
373
- */
374
- if (nbytes) {
375
- column = op.addr.val;
376
- op = *spinand->op_templates.update_cache;
377
- op.addr.val = column;
378
- }
437
+ nbytes -= ret;
438
+ column += ret;
439
+ buf += ret;
379440 }
380441
381442 return 0;
....@@ -431,10 +492,11 @@
431492 return status & STATUS_BUSY ? -ETIMEDOUT : 0;
432493 }
433494
434
-static int spinand_read_id_op(struct spinand_device *spinand, u8 *buf)
495
+static int spinand_read_id_op(struct spinand_device *spinand, u8 naddr,
496
+ u8 ndummy, u8 *buf)
435497 {
436
- struct spi_mem_op op = SPINAND_READID_OP(0, spinand->scratchbuf,
437
- SPINAND_MAX_ID_LEN);
498
+ struct spi_mem_op op = SPINAND_READID_OP(
499
+ naddr, ndummy, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
438500 int ret;
439501
440502 ret = spi_mem_exec_op(spinand->spimem, &op);
....@@ -461,40 +523,11 @@
461523 return spinand_write_reg_op(spinand, REG_BLOCK_LOCK, lock);
462524 }
463525
464
-static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status)
465
-{
466
- struct nand_device *nand = spinand_to_nand(spinand);
467
-
468
- if (spinand->eccinfo.get_status)
469
- return spinand->eccinfo.get_status(spinand, status);
470
-
471
- switch (status & STATUS_ECC_MASK) {
472
- case STATUS_ECC_NO_BITFLIPS:
473
- return 0;
474
-
475
- case STATUS_ECC_HAS_BITFLIPS:
476
- /*
477
- * We have no way to know exactly how many bitflips have been
478
- * fixed, so let's return the maximum possible value so that
479
- * wear-leveling layers move the data immediately.
480
- */
481
- return nand->eccreq.strength;
482
-
483
- case STATUS_ECC_UNCOR_ERROR:
484
- return -EBADMSG;
485
-
486
- default:
487
- break;
488
- }
489
-
490
- return -EINVAL;
491
-}
492
-
493526 static int spinand_read_page(struct spinand_device *spinand,
494527 const struct nand_page_io_req *req,
495528 bool ecc_enabled)
496529 {
497
- u8 status;
530
+ u8 status = 0;
498531 int ret;
499532
500533 ret = spinand_load_page_op(spinand, req);
....@@ -502,6 +535,13 @@
502535 return ret;
503536
504537 ret = spinand_wait(spinand, &status);
538
+ /*
539
+ * When there is data outside of OIP in the status, the status data is
540
+ * inaccurate and needs to be reconfirmed
541
+ */
542
+ if (spinand->id.data[0] == 0x01 && status && !ret)
543
+ ret = spinand_wait(spinand, &status);
544
+
505545 if (ret < 0)
506546 return ret;
507547
....@@ -556,7 +596,7 @@
556596
557597 mutex_lock(&spinand->lock);
558598
559
- nanddev_io_for_each_page(nand, from, ops, &iter) {
599
+ nanddev_io_for_each_page(nand, NAND_PAGE_READ, from, ops, &iter) {
560600 ret = spinand_select_target(spinand, iter.req.pos.target);
561601 if (ret)
562602 break;
....@@ -604,7 +644,7 @@
604644
605645 mutex_lock(&spinand->lock);
606646
607
- nanddev_io_for_each_page(nand, to, ops, &iter) {
647
+ nanddev_io_for_each_page(nand, NAND_PAGE_WRITE, to, ops, &iter) {
608648 ret = spinand_select_target(spinand, iter.req.pos.target);
609649 if (ret)
610650 break;
....@@ -697,6 +737,9 @@
697737 ret = nanddev_markbad(nand, &pos);
698738 mutex_unlock(&spinand->lock);
699739
740
+ if (IS_ENABLED(CONFIG_MTD_NAND_BBT_USING_FLASH))
741
+ nanddev_bbt_in_flash_update(nand);
742
+
700743 return ret;
701744 }
702745
....@@ -753,6 +796,59 @@
753796 return ret;
754797 }
755798
799
+static int spinand_create_dirmap(struct spinand_device *spinand,
800
+ unsigned int plane)
801
+{
802
+ struct nand_device *nand = spinand_to_nand(spinand);
803
+ struct spi_mem_dirmap_info info = {
804
+ .length = nanddev_page_size(nand) +
805
+ nanddev_per_page_oobsize(nand),
806
+ };
807
+ struct spi_mem_dirmap_desc *desc;
808
+
809
+ /* The plane number is passed in MSB just above the column address */
810
+ info.offset = plane << fls(nand->memorg.pagesize);
811
+
812
+ info.op_tmpl = *spinand->op_templates.update_cache;
813
+ desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
814
+ spinand->spimem, &info);
815
+ if (IS_ERR(desc))
816
+ return PTR_ERR(desc);
817
+
818
+ spinand->dirmaps[plane].wdesc = desc;
819
+
820
+ info.op_tmpl = *spinand->op_templates.read_cache;
821
+ desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
822
+ spinand->spimem, &info);
823
+ if (IS_ERR(desc))
824
+ return PTR_ERR(desc);
825
+
826
+ spinand->dirmaps[plane].rdesc = desc;
827
+
828
+ return 0;
829
+}
830
+
831
+static int spinand_create_dirmaps(struct spinand_device *spinand)
832
+{
833
+ struct nand_device *nand = spinand_to_nand(spinand);
834
+ int i, ret;
835
+
836
+ spinand->dirmaps = devm_kzalloc(&spinand->spimem->spi->dev,
837
+ sizeof(*spinand->dirmaps) *
838
+ nand->memorg.planes_per_lun,
839
+ GFP_KERNEL);
840
+ if (!spinand->dirmaps)
841
+ return -ENOMEM;
842
+
843
+ for (i = 0; i < nand->memorg.planes_per_lun; i++) {
844
+ ret = spinand_create_dirmap(spinand, i);
845
+ if (ret)
846
+ return ret;
847
+ }
848
+
849
+ return 0;
850
+}
851
+
756852 static const struct nand_ops spinand_ops = {
757853 .erase = spinand_erase,
758854 .markbad = spinand_markbad,
....@@ -760,27 +856,83 @@
760856 };
761857
762858 static const struct spinand_manufacturer *spinand_manufacturers[] = {
859
+ &biwin_spinand_manufacturer,
860
+ &dosilicon_spinand_manufacturer,
861
+ &esmt_spinand_manufacturer,
862
+ &etron_spinand_manufacturer,
863
+ &fmsh_spinand_manufacturer,
864
+ &foresee_spinand_manufacturer,
865
+ &gigadevice_spinand_manufacturer,
866
+ &gsto_spinand_manufacturer,
867
+ &hyf_spinand_manufacturer,
868
+ &jsc_spinand_manufacturer,
763869 &macronix_spinand_manufacturer,
764870 &micron_spinand_manufacturer,
871
+ &paragon_spinand_manufacturer,
872
+ &silicongo_spinand_manufacturer,
873
+ &skyhigh_spinand_manufacturer,
874
+ &toshiba_spinand_manufacturer,
875
+ &unim_spinand_manufacturer,
876
+ &unim_zl_spinand_manufacturer,
765877 &winbond_spinand_manufacturer,
878
+ &xincun_spinand_manufacturer,
879
+ &xtx_spinand_manufacturer,
766880 };
767881
768
-static int spinand_manufacturer_detect(struct spinand_device *spinand)
882
+static int spinand_manufacturer_match(struct spinand_device *spinand,
883
+ enum spinand_readid_method rdid_method)
769884 {
885
+ u8 *id = spinand->id.data;
770886 unsigned int i;
771887 int ret;
772888
773889 for (i = 0; i < ARRAY_SIZE(spinand_manufacturers); i++) {
774
- ret = spinand_manufacturers[i]->ops->detect(spinand);
775
- if (ret > 0) {
776
- spinand->manufacturer = spinand_manufacturers[i];
777
- return 0;
778
- } else if (ret < 0) {
779
- return ret;
780
- }
781
- }
890
+ const struct spinand_manufacturer *manufacturer =
891
+ spinand_manufacturers[i];
782892
893
+ if (id[0] != manufacturer->id)
894
+ continue;
895
+
896
+ ret = spinand_match_and_init(spinand,
897
+ manufacturer->chips,
898
+ manufacturer->nchips,
899
+ rdid_method);
900
+ if (ret < 0)
901
+ continue;
902
+
903
+ spinand->manufacturer = manufacturer;
904
+ return 0;
905
+ }
783906 return -ENOTSUPP;
907
+}
908
+
909
+static int spinand_id_detect(struct spinand_device *spinand)
910
+{
911
+ u8 *id = spinand->id.data;
912
+ int ret;
913
+
914
+ ret = spinand_read_id_op(spinand, 0, 0, id);
915
+ if (ret)
916
+ return ret;
917
+ ret = spinand_manufacturer_match(spinand, SPINAND_READID_METHOD_OPCODE);
918
+ if (!ret)
919
+ return 0;
920
+
921
+ ret = spinand_read_id_op(spinand, 1, 0, id);
922
+ if (ret)
923
+ return ret;
924
+ ret = spinand_manufacturer_match(spinand,
925
+ SPINAND_READID_METHOD_OPCODE_ADDR);
926
+ if (!ret)
927
+ return 0;
928
+
929
+ ret = spinand_read_id_op(spinand, 0, 1, id);
930
+ if (ret)
931
+ return ret;
932
+ ret = spinand_manufacturer_match(spinand,
933
+ SPINAND_READID_METHOD_OPCODE_DUMMY);
934
+
935
+ return ret;
784936 }
785937
786938 static int spinand_manufacturer_init(struct spinand_device *spinand)
....@@ -838,9 +990,9 @@
838990 * @spinand: SPI NAND object
839991 * @table: SPI NAND device description table
840992 * @table_size: size of the device description table
993
+ * @rdid_method: read id method to match
841994 *
842
- * Should be used by SPI NAND manufacturer drivers when they want to find a
843
- * match between a device ID retrieved through the READ_ID command and an
995
+ * Match between a device ID retrieved through the READ_ID command and an
844996 * entry in the SPI NAND description table. If a match is found, the spinand
845997 * object will be initialized with information provided by the matching
846998 * spinand_info entry.
....@@ -849,8 +1001,10 @@
8491001 */
8501002 int spinand_match_and_init(struct spinand_device *spinand,
8511003 const struct spinand_info *table,
852
- unsigned int table_size, u8 devid)
1004
+ unsigned int table_size,
1005
+ enum spinand_readid_method rdid_method)
8531006 {
1007
+ u8 *id = spinand->id.data;
8541008 struct nand_device *nand = spinand_to_nand(spinand);
8551009 unsigned int i;
8561010
....@@ -858,13 +1012,17 @@
8581012 const struct spinand_info *info = &table[i];
8591013 const struct spi_mem_op *op;
8601014
861
- if (devid != info->devid)
1015
+ if (rdid_method != info->devid.method)
1016
+ continue;
1017
+
1018
+ if (memcmp(id + 1, info->devid.id, info->devid.len))
8621019 continue;
8631020
8641021 nand->memorg = table[i].memorg;
865
- nand->eccreq = table[i].eccreq;
1022
+ nanddev_set_ecc_requirements(nand, &table[i].eccreq);
8661023 spinand->eccinfo = table[i].eccinfo;
8671024 spinand->flags = table[i].flags;
1025
+ spinand->id.len = 1 + table[i].devid.len;
8681026 spinand->select_target = table[i].select_target;
8691027
8701028 op = spinand_select_op_variant(spinand,
....@@ -901,13 +1059,7 @@
9011059 if (ret)
9021060 return ret;
9031061
904
- ret = spinand_read_id_op(spinand, spinand->id.data);
905
- if (ret)
906
- return ret;
907
-
908
- spinand->id.len = SPINAND_MAX_ID_LEN;
909
-
910
- ret = spinand_manufacturer_detect(spinand);
1062
+ ret = spinand_id_detect(spinand);
9111063 if (ret) {
9121064 dev_err(dev, "unknown raw ID %*phN\n", SPINAND_MAX_ID_LEN,
9131065 spinand->id.data);
....@@ -930,29 +1082,99 @@
9301082 return 0;
9311083 }
9321084
933
-static int spinand_noecc_ooblayout_ecc(struct mtd_info *mtd, int section,
934
- struct mtd_oob_region *region)
1085
+static int spinand_reinit(struct mtd_info *mtd)
9351086 {
936
- return -ERANGE;
1087
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
1088
+ struct nand_device *nand = mtd_to_nanddev(mtd);
1089
+ struct device *dev = &spinand->spimem->spi->dev;
1090
+ int ret, i;
1091
+
1092
+ ret = spinand_init_quad_enable(spinand);
1093
+ if (ret)
1094
+ return ret;
1095
+
1096
+ ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0);
1097
+ if (ret)
1098
+ return ret;
1099
+
1100
+ ret = spinand_manufacturer_init(spinand);
1101
+ if (ret) {
1102
+ dev_err(dev,
1103
+ "Failed to initialize the SPI NAND chip (err = %d)\n",
1104
+ ret);
1105
+ return ret;
1106
+ }
1107
+
1108
+ ret = spinand_create_dirmaps(spinand);
1109
+ if (ret) {
1110
+ dev_err(dev,
1111
+ "Failed to create direct mappings for read/write operations (err = %d)\n",
1112
+ ret);
1113
+ return ret;
1114
+ }
1115
+
1116
+ /* After power up, all blocks are locked, so unlock them here. */
1117
+ for (i = 0; i < nand->memorg.ntargets; i++) {
1118
+ ret = spinand_select_target(spinand, i);
1119
+ if (ret)
1120
+ return ret;
1121
+
1122
+ /* HWP_EN must be enabled first before block unlock region is set */
1123
+ if (spinand->id.data[0] == 0x01) {
1124
+ ret = spinand_lock_block(spinand, HWP_EN);
1125
+ if (ret)
1126
+ return ret;
1127
+ }
1128
+
1129
+ ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED);
1130
+ if (ret)
1131
+ return ret;
1132
+ }
1133
+
1134
+ return ret;
9371135 }
9381136
939
-static int spinand_noecc_ooblayout_free(struct mtd_info *mtd, int section,
940
- struct mtd_oob_region *region)
1137
+/**
1138
+ * spinand_mtd_suspend - [MTD Interface] Suspend the spinand flash
1139
+ * @mtd: MTD device structure
1140
+ *
1141
+ * Returns 0 for success or negative error code otherwise.
1142
+ */
1143
+static int spinand_mtd_suspend(struct mtd_info *mtd)
9411144 {
942
- if (section)
943
- return -ERANGE;
1145
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
1146
+ int ret = 0;
9441147
945
- /* Reserve 2 bytes for the BBM. */
946
- region->offset = 2;
947
- region->length = 62;
1148
+ mutex_lock(&spinand->lock);
9481149
949
- return 0;
1150
+ return ret;
9501151 }
9511152
952
-static const struct mtd_ooblayout_ops spinand_noecc_ooblayout = {
953
- .ecc = spinand_noecc_ooblayout_ecc,
954
- .free = spinand_noecc_ooblayout_free,
955
-};
1153
+/**
1154
+ * spinand_mtd_resume - [MTD Interface] Resume the spinand flash
1155
+ * @mtd: MTD device structure
1156
+ */
1157
+static void spinand_mtd_resume(struct mtd_info *mtd)
1158
+{
1159
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
1160
+ struct device *dev = &spinand->spimem->spi->dev;
1161
+ int ret;
1162
+
1163
+ ret = spinand_reinit(mtd);
1164
+ if (ret)
1165
+ dev_err(dev, "Failed to resume, ret =%d !\n", ret);
1166
+ mutex_unlock(&spinand->lock);
1167
+}
1168
+
1169
+/**
1170
+ * spinand_mtd_shutdown - [MTD Interface] Finish the current spinand operation and
1171
+ * prevent further operations
1172
+ * @mtd: MTD device structure
1173
+ */
1174
+static void spinand_mtd_shutdown(struct mtd_info *mtd)
1175
+{
1176
+ spinand_mtd_suspend(mtd);
1177
+}
9561178
9571179 static int spinand_init(struct spinand_device *spinand)
9581180 {
....@@ -1008,6 +1230,14 @@
10081230 goto err_free_bufs;
10091231 }
10101232
1233
+ ret = spinand_create_dirmaps(spinand);
1234
+ if (ret) {
1235
+ dev_err(dev,
1236
+ "Failed to create direct mappings for read/write operations (err = %d)\n",
1237
+ ret);
1238
+ goto err_manuf_cleanup;
1239
+ }
1240
+
10111241 /* After power up, all blocks are locked, so unlock them here. */
10121242 for (i = 0; i < nand->memorg.ntargets; i++) {
10131243 ret = spinand_select_target(spinand, i);
....@@ -1023,6 +1253,10 @@
10231253 if (ret)
10241254 goto err_manuf_cleanup;
10251255
1256
+ /* SPI-NAND default ECC engine is on-die */
1257
+ nand->ecc.defaults.engine_type = NAND_ECC_ENGINE_TYPE_ON_DIE;
1258
+ nand->ecc.ondie_engine = &spinand_ondie_ecc_engine;
1259
+
10261260 /*
10271261 * Right now, we don't support ECC, so let the whole oob
10281262 * area is available for user.
....@@ -1033,6 +1267,10 @@
10331267 mtd->_block_markbad = spinand_mtd_block_markbad;
10341268 mtd->_block_isreserved = spinand_mtd_block_isreserved;
10351269 mtd->_erase = spinand_mtd_erase;
1270
+ mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks;
1271
+ mtd->_suspend = spinand_mtd_suspend;
1272
+ mtd->_resume = spinand_mtd_resume;
1273
+ mtd->_reboot = spinand_mtd_shutdown;
10361274
10371275 if (spinand->eccinfo.ooblayout)
10381276 mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout);
....@@ -1046,8 +1284,13 @@
10461284 mtd->oobavail = ret;
10471285
10481286 /* Propagate ECC information to mtd_info */
1049
- mtd->ecc_strength = nand->eccreq.strength;
1050
- mtd->ecc_step_size = nand->eccreq.step_size;
1287
+ mtd->ecc_strength = nanddev_get_ecc_requirements(nand)->strength;
1288
+ mtd->ecc_step_size = nanddev_get_ecc_requirements(nand)->step_size;
1289
+ if (IS_ENABLED(CONFIG_SPI_ROCKCHIP_SFC))
1290
+ mtd->name = "spi-nand0";
1291
+
1292
+ if (IS_ENABLED(CONFIG_MTD_NAND_BBT_USING_FLASH))
1293
+ nanddev_scan_bbt_in_flash(nand);
10511294
10521295 return 0;
10531296