From 95099d4622f8cb224d94e314c7a8e0df60b13f87 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Sat, 09 Dec 2023 08:38:01 +0000
Subject: [PATCH] enable docker ppp
---
kernel/drivers/mtd/nand/spi/core.c | 540 ++++++++++++++++++++++++++++++++++++++++++-----------------
1 files changed, 384 insertions(+), 156 deletions(-)
diff --git a/kernel/drivers/mtd/nand/spi/core.c b/kernel/drivers/mtd/nand/spi/core.c
index 4a3dc59..bd18e6d 100644
--- a/kernel/drivers/mtd/nand/spi/core.c
+++ b/kernel/drivers/mtd/nand/spi/core.c
@@ -13,26 +13,13 @@
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mtd/bbt_store.h>
#include <linux/mtd/spinand.h>
#include <linux/of.h>
#include <linux/slab.h>
+#include <linux/string.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
-
-static void spinand_cache_op_adjust_colum(struct spinand_device *spinand,
- const struct nand_page_io_req *req,
- u16 *column)
-{
- struct nand_device *nand = spinand_to_nand(spinand);
- unsigned int shift;
-
- if (nand->memorg.planes_per_lun < 2)
- return;
-
- /* The plane number is passed in MSB just above the column address */
- shift = fls(nand->memorg.pagesize);
- *column |= req->pos.plane << shift;
-}
static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
{
@@ -207,6 +194,126 @@
enable ? CFG_ECC_ENABLE : 0);
}
+static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+
+ if (spinand->eccinfo.get_status)
+ return spinand->eccinfo.get_status(spinand, status);
+
+ switch (status & STATUS_ECC_MASK) {
+ case STATUS_ECC_NO_BITFLIPS:
+ return 0;
+
+ case STATUS_ECC_HAS_BITFLIPS:
+ /*
+ * We have no way to know exactly how many bitflips have been
+ * fixed, so let's return the maximum possible value so that
+ * wear-leveling layers move the data immediately.
+ */
+ return nanddev_get_ecc_requirements(nand)->strength;
+
+ case STATUS_ECC_UNCOR_ERROR:
+ return -EBADMSG;
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int spinand_noecc_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ return -ERANGE;
+}
+
+static int spinand_noecc_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section)
+ return -ERANGE;
+
+ /* Reserve 2 bytes for the BBM. */
+ region->offset = 2;
+ region->length = 62;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops spinand_noecc_ooblayout = {
+ .ecc = spinand_noecc_ooblayout_ecc,
+ .free = spinand_noecc_ooblayout_free,
+};
+
+static int spinand_ondie_ecc_init_ctx(struct nand_device *nand)
+{
+ struct spinand_device *spinand = nand_to_spinand(nand);
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+ struct spinand_ondie_ecc_conf *engine_conf;
+
+ nand->ecc.ctx.conf.engine_type = NAND_ECC_ENGINE_TYPE_ON_DIE;
+ nand->ecc.ctx.conf.step_size = nand->ecc.requirements.step_size;
+ nand->ecc.ctx.conf.strength = nand->ecc.requirements.strength;
+
+ engine_conf = kzalloc(sizeof(*engine_conf), GFP_KERNEL);
+ if (!engine_conf)
+ return -ENOMEM;
+
+ nand->ecc.ctx.priv = engine_conf;
+
+ if (spinand->eccinfo.ooblayout)
+ mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout);
+ else
+ mtd_set_ooblayout(mtd, &spinand_noecc_ooblayout);
+
+ return 0;
+}
+
+static void spinand_ondie_ecc_cleanup_ctx(struct nand_device *nand)
+{
+ kfree(nand->ecc.ctx.priv);
+}
+
+static int spinand_ondie_ecc_prepare_io_req(struct nand_device *nand,
+ struct nand_page_io_req *req)
+{
+ struct spinand_device *spinand = nand_to_spinand(nand);
+ bool enable = (req->mode != MTD_OPS_RAW);
+
+ /* Only enable or disable the engine */
+ return spinand_ecc_enable(spinand, enable);
+}
+
+static int spinand_ondie_ecc_finish_io_req(struct nand_device *nand,
+ struct nand_page_io_req *req)
+{
+ struct spinand_ondie_ecc_conf *engine_conf = nand->ecc.ctx.priv;
+ struct spinand_device *spinand = nand_to_spinand(nand);
+
+ if (req->mode == MTD_OPS_RAW)
+ return 0;
+
+ /* Nothing to do when finishing a page write */
+ if (req->type == NAND_PAGE_WRITE)
+ return 0;
+
+ /* Finish a page write: check the status, report errors/bitflips */
+ return spinand_check_ecc_status(spinand, engine_conf->status);
+}
+
+static struct nand_ecc_engine_ops spinand_ondie_ecc_engine_ops = {
+ .init_ctx = spinand_ondie_ecc_init_ctx,
+ .cleanup_ctx = spinand_ondie_ecc_cleanup_ctx,
+ .prepare_io_req = spinand_ondie_ecc_prepare_io_req,
+ .finish_io_req = spinand_ondie_ecc_finish_io_req,
+};
+
+static struct nand_ecc_engine spinand_ondie_ecc_engine = {
+ .ops = &spinand_ondie_ecc_engine_ops,
+};
+
static int spinand_write_enable_op(struct spinand_device *spinand)
{
struct spi_mem_op op = SPINAND_WR_EN_DIS_OP(true);
@@ -227,27 +334,21 @@
static int spinand_read_from_cache_op(struct spinand_device *spinand,
const struct nand_page_io_req *req)
{
- struct spi_mem_op op = *spinand->op_templates.read_cache;
struct nand_device *nand = spinand_to_nand(spinand);
struct mtd_info *mtd = nanddev_to_mtd(nand);
- struct nand_page_io_req adjreq = *req;
+ struct spi_mem_dirmap_desc *rdesc;
unsigned int nbytes = 0;
void *buf = NULL;
u16 column = 0;
- int ret;
+ ssize_t ret;
if (req->datalen) {
- adjreq.datalen = nanddev_page_size(nand);
- adjreq.dataoffs = 0;
- adjreq.databuf.in = spinand->databuf;
buf = spinand->databuf;
- nbytes = adjreq.datalen;
+ nbytes = nanddev_page_size(nand);
+ column = 0;
}
if (req->ooblen) {
- adjreq.ooblen = nanddev_per_page_oobsize(nand);
- adjreq.ooboffs = 0;
- adjreq.oobbuf.in = spinand->oobbuf;
nbytes += nanddev_per_page_oobsize(nand);
if (!buf) {
buf = spinand->oobbuf;
@@ -255,28 +356,19 @@
}
}
- spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
- op.addr.val = column;
+ rdesc = spinand->dirmaps[req->pos.plane].rdesc;
- /*
- * Some controllers are limited in term of max RX data size. In this
- * case, just repeat the READ_CACHE operation after updating the
- * column.
- */
while (nbytes) {
- op.data.buf.in = buf;
- op.data.nbytes = nbytes;
- ret = spi_mem_adjust_op_size(spinand->spimem, &op);
- if (ret)
+ ret = spi_mem_dirmap_read(rdesc, column, nbytes, buf);
+ if (ret < 0)
return ret;
- ret = spi_mem_exec_op(spinand->spimem, &op);
- if (ret)
- return ret;
+ if (!ret || ret > nbytes)
+ return -EIO;
- buf += op.data.nbytes;
- nbytes -= op.data.nbytes;
- op.addr.val += op.data.nbytes;
+ nbytes -= ret;
+ column += ret;
+ buf += ret;
}
if (req->datalen)
@@ -300,14 +392,12 @@
static int spinand_write_to_cache_op(struct spinand_device *spinand,
const struct nand_page_io_req *req)
{
- struct spi_mem_op op = *spinand->op_templates.write_cache;
struct nand_device *nand = spinand_to_nand(spinand);
struct mtd_info *mtd = nanddev_to_mtd(nand);
- struct nand_page_io_req adjreq = *req;
+ struct spi_mem_dirmap_desc *wdesc;
+ unsigned int nbytes, column = 0;
void *buf = spinand->databuf;
- unsigned int nbytes;
- u16 column = 0;
- int ret;
+ ssize_t ret;
/*
* Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset
@@ -318,12 +408,6 @@
*/
nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
memset(spinand->databuf, 0xff, nbytes);
- adjreq.dataoffs = 0;
- adjreq.datalen = nanddev_page_size(nand);
- adjreq.databuf.out = spinand->databuf;
- adjreq.ooblen = nanddev_per_page_oobsize(nand);
- adjreq.ooboffs = 0;
- adjreq.oobbuf.out = spinand->oobbuf;
if (req->datalen)
memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
@@ -340,42 +424,19 @@
req->ooblen);
}
- spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
+ wdesc = spinand->dirmaps[req->pos.plane].wdesc;
- op = *spinand->op_templates.write_cache;
- op.addr.val = column;
-
- /*
- * Some controllers are limited in term of max TX data size. In this
- * case, split the operation into one LOAD CACHE and one or more
- * LOAD RANDOM CACHE.
- */
while (nbytes) {
- op.data.buf.out = buf;
- op.data.nbytes = nbytes;
-
- ret = spi_mem_adjust_op_size(spinand->spimem, &op);
- if (ret)
+ ret = spi_mem_dirmap_write(wdesc, column, nbytes, buf);
+ if (ret < 0)
return ret;
- ret = spi_mem_exec_op(spinand->spimem, &op);
- if (ret)
- return ret;
+ if (!ret || ret > nbytes)
+ return -EIO;
- buf += op.data.nbytes;
- nbytes -= op.data.nbytes;
- op.addr.val += op.data.nbytes;
-
- /*
- * We need to use the RANDOM LOAD CACHE operation if there's
- * more than one iteration, because the LOAD operation might
- * reset the cache to 0xff.
- */
- if (nbytes) {
- column = op.addr.val;
- op = *spinand->op_templates.update_cache;
- op.addr.val = column;
- }
+ nbytes -= ret;
+ column += ret;
+ buf += ret;
}
return 0;
@@ -431,10 +492,11 @@
return status & STATUS_BUSY ? -ETIMEDOUT : 0;
}
-static int spinand_read_id_op(struct spinand_device *spinand, u8 *buf)
+static int spinand_read_id_op(struct spinand_device *spinand, u8 naddr,
+ u8 ndummy, u8 *buf)
{
- struct spi_mem_op op = SPINAND_READID_OP(0, spinand->scratchbuf,
- SPINAND_MAX_ID_LEN);
+ struct spi_mem_op op = SPINAND_READID_OP(
+ naddr, ndummy, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
int ret;
ret = spi_mem_exec_op(spinand->spimem, &op);
@@ -459,35 +521,6 @@
static int spinand_lock_block(struct spinand_device *spinand, u8 lock)
{
return spinand_write_reg_op(spinand, REG_BLOCK_LOCK, lock);
-}
-
-static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status)
-{
- struct nand_device *nand = spinand_to_nand(spinand);
-
- if (spinand->eccinfo.get_status)
- return spinand->eccinfo.get_status(spinand, status);
-
- switch (status & STATUS_ECC_MASK) {
- case STATUS_ECC_NO_BITFLIPS:
- return 0;
-
- case STATUS_ECC_HAS_BITFLIPS:
- /*
- * We have no way to know exactly how many bitflips have been
- * fixed, so let's return the maximum possible value so that
- * wear-leveling layers move the data immediately.
- */
- return nand->eccreq.strength;
-
- case STATUS_ECC_UNCOR_ERROR:
- return -EBADMSG;
-
- default:
- break;
- }
-
- return -EINVAL;
}
static int spinand_read_page(struct spinand_device *spinand,
@@ -556,7 +589,7 @@
mutex_lock(&spinand->lock);
- nanddev_io_for_each_page(nand, from, ops, &iter) {
+ nanddev_io_for_each_page(nand, NAND_PAGE_READ, from, ops, &iter) {
ret = spinand_select_target(spinand, iter.req.pos.target);
if (ret)
break;
@@ -604,7 +637,7 @@
mutex_lock(&spinand->lock);
- nanddev_io_for_each_page(nand, to, ops, &iter) {
+ nanddev_io_for_each_page(nand, NAND_PAGE_WRITE, to, ops, &iter) {
ret = spinand_select_target(spinand, iter.req.pos.target);
if (ret)
break;
@@ -697,6 +730,9 @@
ret = nanddev_markbad(nand, &pos);
mutex_unlock(&spinand->lock);
+ if (IS_ENABLED(CONFIG_MTD_NAND_BBT_USING_FLASH))
+ nanddev_bbt_in_flash_update(nand);
+
return ret;
}
@@ -753,6 +789,59 @@
return ret;
}
+static int spinand_create_dirmap(struct spinand_device *spinand,
+ unsigned int plane)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ struct spi_mem_dirmap_info info = {
+ .length = nanddev_page_size(nand) +
+ nanddev_per_page_oobsize(nand),
+ };
+ struct spi_mem_dirmap_desc *desc;
+
+ /* The plane number is passed in MSB just above the column address */
+ info.offset = plane << fls(nand->memorg.pagesize);
+
+ info.op_tmpl = *spinand->op_templates.update_cache;
+ desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
+ spinand->spimem, &info);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ spinand->dirmaps[plane].wdesc = desc;
+
+ info.op_tmpl = *spinand->op_templates.read_cache;
+ desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
+ spinand->spimem, &info);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ spinand->dirmaps[plane].rdesc = desc;
+
+ return 0;
+}
+
+static int spinand_create_dirmaps(struct spinand_device *spinand)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ int i, ret;
+
+ spinand->dirmaps = devm_kzalloc(&spinand->spimem->spi->dev,
+ sizeof(*spinand->dirmaps) *
+ nand->memorg.planes_per_lun,
+ GFP_KERNEL);
+ if (!spinand->dirmaps)
+ return -ENOMEM;
+
+ for (i = 0; i < nand->memorg.planes_per_lun; i++) {
+ ret = spinand_create_dirmap(spinand, i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static const struct nand_ops spinand_ops = {
.erase = spinand_erase,
.markbad = spinand_markbad,
@@ -760,27 +849,82 @@
};
static const struct spinand_manufacturer *spinand_manufacturers[] = {
+ &biwin_spinand_manufacturer,
+ &dosilicon_spinand_manufacturer,
+ &esmt_spinand_manufacturer,
+ &etron_spinand_manufacturer,
+ &fmsh_spinand_manufacturer,
+ &foresee_spinand_manufacturer,
+ &gigadevice_spinand_manufacturer,
+ &gsto_spinand_manufacturer,
+ &hyf_spinand_manufacturer,
+ &jsc_spinand_manufacturer,
¯onix_spinand_manufacturer,
µn_spinand_manufacturer,
+ ¶gon_spinand_manufacturer,
+ &silicongo_spinand_manufacturer,
+ &skyhigh_spinand_manufacturer,
+ &toshiba_spinand_manufacturer,
+ &unim_spinand_manufacturer,
&winbond_spinand_manufacturer,
+ &xincun_spinand_manufacturer,
+ &xtx_spinand_manufacturer,
};
-static int spinand_manufacturer_detect(struct spinand_device *spinand)
+static int spinand_manufacturer_match(struct spinand_device *spinand,
+ enum spinand_readid_method rdid_method)
{
+ u8 *id = spinand->id.data;
unsigned int i;
int ret;
for (i = 0; i < ARRAY_SIZE(spinand_manufacturers); i++) {
- ret = spinand_manufacturers[i]->ops->detect(spinand);
- if (ret > 0) {
- spinand->manufacturer = spinand_manufacturers[i];
- return 0;
- } else if (ret < 0) {
- return ret;
- }
- }
+ const struct spinand_manufacturer *manufacturer =
+ spinand_manufacturers[i];
+ if (id[0] != manufacturer->id)
+ continue;
+
+ ret = spinand_match_and_init(spinand,
+ manufacturer->chips,
+ manufacturer->nchips,
+ rdid_method);
+ if (ret < 0)
+ continue;
+
+ spinand->manufacturer = manufacturer;
+ return 0;
+ }
return -ENOTSUPP;
+}
+
+static int spinand_id_detect(struct spinand_device *spinand)
+{
+ u8 *id = spinand->id.data;
+ int ret;
+
+ ret = spinand_read_id_op(spinand, 0, 0, id);
+ if (ret)
+ return ret;
+ ret = spinand_manufacturer_match(spinand, SPINAND_READID_METHOD_OPCODE);
+ if (!ret)
+ return 0;
+
+ ret = spinand_read_id_op(spinand, 1, 0, id);
+ if (ret)
+ return ret;
+ ret = spinand_manufacturer_match(spinand,
+ SPINAND_READID_METHOD_OPCODE_ADDR);
+ if (!ret)
+ return 0;
+
+ ret = spinand_read_id_op(spinand, 0, 1, id);
+ if (ret)
+ return ret;
+ ret = spinand_manufacturer_match(spinand,
+ SPINAND_READID_METHOD_OPCODE_DUMMY);
+
+ return ret;
}
static int spinand_manufacturer_init(struct spinand_device *spinand)
@@ -838,9 +982,9 @@
* @spinand: SPI NAND object
* @table: SPI NAND device description table
* @table_size: size of the device description table
+ * @rdid_method: read id method to match
*
- * Should be used by SPI NAND manufacturer drivers when they want to find a
- * match between a device ID retrieved through the READ_ID command and an
+ * Match between a device ID retrieved through the READ_ID command and an
* entry in the SPI NAND description table. If a match is found, the spinand
* object will be initialized with information provided by the matching
* spinand_info entry.
@@ -849,8 +993,10 @@
*/
int spinand_match_and_init(struct spinand_device *spinand,
const struct spinand_info *table,
- unsigned int table_size, u8 devid)
+ unsigned int table_size,
+ enum spinand_readid_method rdid_method)
{
+ u8 *id = spinand->id.data;
struct nand_device *nand = spinand_to_nand(spinand);
unsigned int i;
@@ -858,13 +1004,17 @@
const struct spinand_info *info = &table[i];
const struct spi_mem_op *op;
- if (devid != info->devid)
+ if (rdid_method != info->devid.method)
+ continue;
+
+ if (memcmp(id + 1, info->devid.id, info->devid.len))
continue;
nand->memorg = table[i].memorg;
- nand->eccreq = table[i].eccreq;
+ nanddev_set_ecc_requirements(nand, &table[i].eccreq);
spinand->eccinfo = table[i].eccinfo;
spinand->flags = table[i].flags;
+ spinand->id.len = 1 + table[i].devid.len;
spinand->select_target = table[i].select_target;
op = spinand_select_op_variant(spinand,
@@ -901,13 +1051,7 @@
if (ret)
return ret;
- ret = spinand_read_id_op(spinand, spinand->id.data);
- if (ret)
- return ret;
-
- spinand->id.len = SPINAND_MAX_ID_LEN;
-
- ret = spinand_manufacturer_detect(spinand);
+ ret = spinand_id_detect(spinand);
if (ret) {
dev_err(dev, "unknown raw ID %*phN\n", SPINAND_MAX_ID_LEN,
spinand->id.data);
@@ -930,29 +1074,92 @@
return 0;
}
-static int spinand_noecc_ooblayout_ecc(struct mtd_info *mtd, int section,
- struct mtd_oob_region *region)
+static int spinand_reinit(struct mtd_info *mtd)
{
- return -ERANGE;
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ struct device *dev = &spinand->spimem->spi->dev;
+ int ret, i;
+
+ ret = spinand_init_quad_enable(spinand);
+ if (ret)
+ return ret;
+
+ ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0);
+ if (ret)
+ return ret;
+
+ ret = spinand_manufacturer_init(spinand);
+ if (ret) {
+ dev_err(dev,
+ "Failed to initialize the SPI NAND chip (err = %d)\n",
+ ret);
+ return ret;
+ }
+
+ ret = spinand_create_dirmaps(spinand);
+ if (ret) {
+ dev_err(dev,
+ "Failed to create direct mappings for read/write operations (err = %d)\n",
+ ret);
+ return ret;
+ }
+
+ /* After power up, all blocks are locked, so unlock them here. */
+ for (i = 0; i < nand->memorg.ntargets; i++) {
+ ret = spinand_select_target(spinand, i);
+ if (ret)
+ return ret;
+
+ ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
}
-static int spinand_noecc_ooblayout_free(struct mtd_info *mtd, int section,
- struct mtd_oob_region *region)
+/**
+ * spinand_mtd_suspend - [MTD Interface] Suspend the spinand flash
+ * @mtd: MTD device structure
+ *
+ * Returns 0 for success or negative error code otherwise.
+ */
+static int spinand_mtd_suspend(struct mtd_info *mtd)
{
- if (section)
- return -ERANGE;
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+ int ret = 0;
- /* Reserve 2 bytes for the BBM. */
- region->offset = 2;
- region->length = 62;
+ mutex_lock(&spinand->lock);
- return 0;
+ return ret;
}
-static const struct mtd_ooblayout_ops spinand_noecc_ooblayout = {
- .ecc = spinand_noecc_ooblayout_ecc,
- .free = spinand_noecc_ooblayout_free,
-};
+/**
+ * spinand_mtd_resume - [MTD Interface] Resume the spinand flash
+ * @mtd: MTD device structure
+ */
+static void spinand_mtd_resume(struct mtd_info *mtd)
+{
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+ struct device *dev = &spinand->spimem->spi->dev;
+ int ret;
+
+ ret = spinand_reinit(mtd);
+ if (ret)
+ dev_err(dev, "Failed to resume, ret =%d !\n", ret);
+ mutex_unlock(&spinand->lock);
+}
+
+/**
+ * spinand_mtd_shutdown - [MTD Interface] Finish the current spinand operation and
+ * prevent further operations
+ * @mtd: MTD device structure
+ */
+static void spinand_mtd_shutdown(struct mtd_info *mtd)
+{
+ spinand_mtd_suspend(mtd);
+}
static int spinand_init(struct spinand_device *spinand)
{
@@ -1008,6 +1215,14 @@
goto err_free_bufs;
}
+ ret = spinand_create_dirmaps(spinand);
+ if (ret) {
+ dev_err(dev,
+ "Failed to create direct mappings for read/write operations (err = %d)\n",
+ ret);
+ goto err_manuf_cleanup;
+ }
+
/* After power up, all blocks are locked, so unlock them here. */
for (i = 0; i < nand->memorg.ntargets; i++) {
ret = spinand_select_target(spinand, i);
@@ -1023,6 +1238,10 @@
if (ret)
goto err_manuf_cleanup;
+ /* SPI-NAND default ECC engine is on-die */
+ nand->ecc.defaults.engine_type = NAND_ECC_ENGINE_TYPE_ON_DIE;
+ nand->ecc.ondie_engine = &spinand_ondie_ecc_engine;
+
/*
* Right now, we don't support ECC, so let the whole oob
* area is available for user.
@@ -1033,6 +1252,10 @@
mtd->_block_markbad = spinand_mtd_block_markbad;
mtd->_block_isreserved = spinand_mtd_block_isreserved;
mtd->_erase = spinand_mtd_erase;
+ mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks;
+ mtd->_suspend = spinand_mtd_suspend;
+ mtd->_resume = spinand_mtd_resume;
+ mtd->_reboot = spinand_mtd_shutdown;
if (spinand->eccinfo.ooblayout)
mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout);
@@ -1046,8 +1269,13 @@
mtd->oobavail = ret;
/* Propagate ECC information to mtd_info */
- mtd->ecc_strength = nand->eccreq.strength;
- mtd->ecc_step_size = nand->eccreq.step_size;
+ mtd->ecc_strength = nanddev_get_ecc_requirements(nand)->strength;
+ mtd->ecc_step_size = nanddev_get_ecc_requirements(nand)->step_size;
+ if (IS_ENABLED(CONFIG_SPI_ROCKCHIP_SFC))
+ mtd->name = "spi-nand0";
+
+ if (IS_ENABLED(CONFIG_MTD_NAND_BBT_USING_FLASH))
+ nanddev_scan_bbt_in_flash(nand);
return 0;
--
Gitblit v1.6.2