| .. | .. |
|---|
| 13 | 13 | #include <linux/jiffies.h> |
|---|
| 14 | 14 | #include <linux/kernel.h> |
|---|
| 15 | 15 | #include <linux/module.h> |
|---|
| 16 | +#include <linux/mtd/bbt_store.h> |
|---|
| 16 | 17 | #include <linux/mtd/spinand.h> |
|---|
| 17 | 18 | #include <linux/of.h> |
|---|
| 18 | 19 | #include <linux/slab.h> |
|---|
| 20 | +#include <linux/string.h> |
|---|
| 19 | 21 | #include <linux/spi/spi.h> |
|---|
| 20 | 22 | #include <linux/spi/spi-mem.h> |
|---|
| 21 | | - |
|---|
| 22 | | -static void spinand_cache_op_adjust_colum(struct spinand_device *spinand, |
|---|
| 23 | | - const struct nand_page_io_req *req, |
|---|
| 24 | | - u16 *column) |
|---|
| 25 | | -{ |
|---|
| 26 | | - struct nand_device *nand = spinand_to_nand(spinand); |
|---|
| 27 | | - unsigned int shift; |
|---|
| 28 | | - |
|---|
| 29 | | - if (nand->memorg.planes_per_lun < 2) |
|---|
| 30 | | - return; |
|---|
| 31 | | - |
|---|
| 32 | | - /* The plane number is passed in MSB just above the column address */ |
|---|
| 33 | | - shift = fls(nand->memorg.pagesize); |
|---|
| 34 | | - *column |= req->pos.plane << shift; |
|---|
| 35 | | -} |
|---|
| 36 | 23 | |
|---|
| 37 | 24 | static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val) |
|---|
| 38 | 25 | { |
|---|
| .. | .. |
|---|
| 207 | 194 | enable ? CFG_ECC_ENABLE : 0); |
|---|
| 208 | 195 | } |
|---|
| 209 | 196 | |
|---|
| 197 | +static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status) |
|---|
| 198 | +{ |
|---|
| 199 | + struct nand_device *nand = spinand_to_nand(spinand); |
|---|
| 200 | + |
|---|
| 201 | + if (spinand->eccinfo.get_status) |
|---|
| 202 | + return spinand->eccinfo.get_status(spinand, status); |
|---|
| 203 | + |
|---|
| 204 | + switch (status & STATUS_ECC_MASK) { |
|---|
| 205 | + case STATUS_ECC_NO_BITFLIPS: |
|---|
| 206 | + return 0; |
|---|
| 207 | + |
|---|
| 208 | + case STATUS_ECC_HAS_BITFLIPS: |
|---|
| 209 | + /* |
|---|
| 210 | + * We have no way to know exactly how many bitflips have been |
|---|
| 211 | + * fixed, so let's return the maximum possible value so that |
|---|
| 212 | + * wear-leveling layers move the data immediately. |
|---|
| 213 | + */ |
|---|
| 214 | + return nanddev_get_ecc_requirements(nand)->strength; |
|---|
| 215 | + |
|---|
| 216 | + case STATUS_ECC_UNCOR_ERROR: |
|---|
| 217 | + return -EBADMSG; |
|---|
| 218 | + |
|---|
| 219 | + default: |
|---|
| 220 | + break; |
|---|
| 221 | + } |
|---|
| 222 | + |
|---|
| 223 | + return -EINVAL; |
|---|
| 224 | +} |
|---|
| 225 | + |
|---|
| 226 | +static int spinand_noecc_ooblayout_ecc(struct mtd_info *mtd, int section, |
|---|
| 227 | + struct mtd_oob_region *region) |
|---|
| 228 | +{ |
|---|
| 229 | + return -ERANGE; |
|---|
| 230 | +} |
|---|
| 231 | + |
|---|
| 232 | +static int spinand_noecc_ooblayout_free(struct mtd_info *mtd, int section, |
|---|
| 233 | + struct mtd_oob_region *region) |
|---|
| 234 | +{ |
|---|
| 235 | + if (section) |
|---|
| 236 | + return -ERANGE; |
|---|
| 237 | + |
|---|
| 238 | + /* Reserve 2 bytes for the BBM. */ |
|---|
| 239 | + region->offset = 2; |
|---|
| 240 | + region->length = 62; |
|---|
| 241 | + |
|---|
| 242 | + return 0; |
|---|
| 243 | +} |
|---|
| 244 | + |
|---|
| 245 | +static const struct mtd_ooblayout_ops spinand_noecc_ooblayout = { |
|---|
| 246 | + .ecc = spinand_noecc_ooblayout_ecc, |
|---|
| 247 | + .free = spinand_noecc_ooblayout_free, |
|---|
| 248 | +}; |
|---|
| 249 | + |
|---|
| 250 | +static int spinand_ondie_ecc_init_ctx(struct nand_device *nand) |
|---|
| 251 | +{ |
|---|
| 252 | + struct spinand_device *spinand = nand_to_spinand(nand); |
|---|
| 253 | + struct mtd_info *mtd = nanddev_to_mtd(nand); |
|---|
| 254 | + struct spinand_ondie_ecc_conf *engine_conf; |
|---|
| 255 | + |
|---|
| 256 | + nand->ecc.ctx.conf.engine_type = NAND_ECC_ENGINE_TYPE_ON_DIE; |
|---|
| 257 | + nand->ecc.ctx.conf.step_size = nand->ecc.requirements.step_size; |
|---|
| 258 | + nand->ecc.ctx.conf.strength = nand->ecc.requirements.strength; |
|---|
| 259 | + |
|---|
| 260 | + engine_conf = kzalloc(sizeof(*engine_conf), GFP_KERNEL); |
|---|
| 261 | + if (!engine_conf) |
|---|
| 262 | + return -ENOMEM; |
|---|
| 263 | + |
|---|
| 264 | + nand->ecc.ctx.priv = engine_conf; |
|---|
| 265 | + |
|---|
| 266 | + if (spinand->eccinfo.ooblayout) |
|---|
| 267 | + mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout); |
|---|
| 268 | + else |
|---|
| 269 | + mtd_set_ooblayout(mtd, &spinand_noecc_ooblayout); |
|---|
| 270 | + |
|---|
| 271 | + return 0; |
|---|
| 272 | +} |
|---|
| 273 | + |
|---|
| 274 | +static void spinand_ondie_ecc_cleanup_ctx(struct nand_device *nand) |
|---|
| 275 | +{ |
|---|
| 276 | + kfree(nand->ecc.ctx.priv); |
|---|
| 277 | +} |
|---|
| 278 | + |
|---|
| 279 | +static int spinand_ondie_ecc_prepare_io_req(struct nand_device *nand, |
|---|
| 280 | + struct nand_page_io_req *req) |
|---|
| 281 | +{ |
|---|
| 282 | + struct spinand_device *spinand = nand_to_spinand(nand); |
|---|
| 283 | + bool enable = (req->mode != MTD_OPS_RAW); |
|---|
| 284 | + |
|---|
| 285 | + /* Only enable or disable the engine */ |
|---|
| 286 | + return spinand_ecc_enable(spinand, enable); |
|---|
| 287 | +} |
|---|
| 288 | + |
|---|
| 289 | +static int spinand_ondie_ecc_finish_io_req(struct nand_device *nand, |
|---|
| 290 | + struct nand_page_io_req *req) |
|---|
| 291 | +{ |
|---|
| 292 | + struct spinand_ondie_ecc_conf *engine_conf = nand->ecc.ctx.priv; |
|---|
| 293 | + struct spinand_device *spinand = nand_to_spinand(nand); |
|---|
| 294 | + |
|---|
| 295 | + if (req->mode == MTD_OPS_RAW) |
|---|
| 296 | + return 0; |
|---|
| 297 | + |
|---|
| 298 | + /* Nothing to do when finishing a page write */ |
|---|
| 299 | + if (req->type == NAND_PAGE_WRITE) |
|---|
| 300 | + return 0; |
|---|
| 301 | + |
|---|
| 302 | + /* Finish a page write: check the status, report errors/bitflips */ |
|---|
| 303 | + return spinand_check_ecc_status(spinand, engine_conf->status); |
|---|
| 304 | +} |
|---|
| 305 | + |
|---|
| 306 | +static struct nand_ecc_engine_ops spinand_ondie_ecc_engine_ops = { |
|---|
| 307 | + .init_ctx = spinand_ondie_ecc_init_ctx, |
|---|
| 308 | + .cleanup_ctx = spinand_ondie_ecc_cleanup_ctx, |
|---|
| 309 | + .prepare_io_req = spinand_ondie_ecc_prepare_io_req, |
|---|
| 310 | + .finish_io_req = spinand_ondie_ecc_finish_io_req, |
|---|
| 311 | +}; |
|---|
| 312 | + |
|---|
| 313 | +static struct nand_ecc_engine spinand_ondie_ecc_engine = { |
|---|
| 314 | + .ops = &spinand_ondie_ecc_engine_ops, |
|---|
| 315 | +}; |
|---|
| 316 | + |
|---|
| 210 | 317 | static int spinand_write_enable_op(struct spinand_device *spinand) |
|---|
| 211 | 318 | { |
|---|
| 212 | 319 | struct spi_mem_op op = SPINAND_WR_EN_DIS_OP(true); |
|---|
| .. | .. |
|---|
| 227 | 334 | static int spinand_read_from_cache_op(struct spinand_device *spinand, |
|---|
| 228 | 335 | const struct nand_page_io_req *req) |
|---|
| 229 | 336 | { |
|---|
| 230 | | - struct spi_mem_op op = *spinand->op_templates.read_cache; |
|---|
| 231 | 337 | struct nand_device *nand = spinand_to_nand(spinand); |
|---|
| 232 | 338 | struct mtd_info *mtd = nanddev_to_mtd(nand); |
|---|
| 233 | | - struct nand_page_io_req adjreq = *req; |
|---|
| 339 | + struct spi_mem_dirmap_desc *rdesc; |
|---|
| 234 | 340 | unsigned int nbytes = 0; |
|---|
| 235 | 341 | void *buf = NULL; |
|---|
| 236 | 342 | u16 column = 0; |
|---|
| 237 | | - int ret; |
|---|
| 343 | + ssize_t ret; |
|---|
| 238 | 344 | |
|---|
| 239 | 345 | if (req->datalen) { |
|---|
| 240 | | - adjreq.datalen = nanddev_page_size(nand); |
|---|
| 241 | | - adjreq.dataoffs = 0; |
|---|
| 242 | | - adjreq.databuf.in = spinand->databuf; |
|---|
| 243 | 346 | buf = spinand->databuf; |
|---|
| 244 | | - nbytes = adjreq.datalen; |
|---|
| 347 | + nbytes = nanddev_page_size(nand); |
|---|
| 348 | + column = 0; |
|---|
| 245 | 349 | } |
|---|
| 246 | 350 | |
|---|
| 247 | 351 | if (req->ooblen) { |
|---|
| 248 | | - adjreq.ooblen = nanddev_per_page_oobsize(nand); |
|---|
| 249 | | - adjreq.ooboffs = 0; |
|---|
| 250 | | - adjreq.oobbuf.in = spinand->oobbuf; |
|---|
| 251 | 352 | nbytes += nanddev_per_page_oobsize(nand); |
|---|
| 252 | 353 | if (!buf) { |
|---|
| 253 | 354 | buf = spinand->oobbuf; |
|---|
| .. | .. |
|---|
| 255 | 356 | } |
|---|
| 256 | 357 | } |
|---|
| 257 | 358 | |
|---|
| 258 | | - spinand_cache_op_adjust_colum(spinand, &adjreq, &column); |
|---|
| 259 | | - op.addr.val = column; |
|---|
| 359 | + rdesc = spinand->dirmaps[req->pos.plane].rdesc; |
|---|
| 260 | 360 | |
|---|
| 261 | | - /* |
|---|
| 262 | | - * Some controllers are limited in term of max RX data size. In this |
|---|
| 263 | | - * case, just repeat the READ_CACHE operation after updating the |
|---|
| 264 | | - * column. |
|---|
| 265 | | - */ |
|---|
| 266 | 361 | while (nbytes) { |
|---|
| 267 | | - op.data.buf.in = buf; |
|---|
| 268 | | - op.data.nbytes = nbytes; |
|---|
| 269 | | - ret = spi_mem_adjust_op_size(spinand->spimem, &op); |
|---|
| 270 | | - if (ret) |
|---|
| 362 | + ret = spi_mem_dirmap_read(rdesc, column, nbytes, buf); |
|---|
| 363 | + if (ret < 0) |
|---|
| 271 | 364 | return ret; |
|---|
| 272 | 365 | |
|---|
| 273 | | - ret = spi_mem_exec_op(spinand->spimem, &op); |
|---|
| 274 | | - if (ret) |
|---|
| 275 | | - return ret; |
|---|
| 366 | + if (!ret || ret > nbytes) |
|---|
| 367 | + return -EIO; |
|---|
| 276 | 368 | |
|---|
| 277 | | - buf += op.data.nbytes; |
|---|
| 278 | | - nbytes -= op.data.nbytes; |
|---|
| 279 | | - op.addr.val += op.data.nbytes; |
|---|
| 369 | + nbytes -= ret; |
|---|
| 370 | + column += ret; |
|---|
| 371 | + buf += ret; |
|---|
| 280 | 372 | } |
|---|
| 281 | 373 | |
|---|
| 282 | 374 | if (req->datalen) |
|---|
| .. | .. |
|---|
| 300 | 392 | static int spinand_write_to_cache_op(struct spinand_device *spinand, |
|---|
| 301 | 393 | const struct nand_page_io_req *req) |
|---|
| 302 | 394 | { |
|---|
| 303 | | - struct spi_mem_op op = *spinand->op_templates.write_cache; |
|---|
| 304 | 395 | struct nand_device *nand = spinand_to_nand(spinand); |
|---|
| 305 | 396 | struct mtd_info *mtd = nanddev_to_mtd(nand); |
|---|
| 306 | | - struct nand_page_io_req adjreq = *req; |
|---|
| 397 | + struct spi_mem_dirmap_desc *wdesc; |
|---|
| 398 | + unsigned int nbytes, column = 0; |
|---|
| 307 | 399 | void *buf = spinand->databuf; |
|---|
| 308 | | - unsigned int nbytes; |
|---|
| 309 | | - u16 column = 0; |
|---|
| 310 | | - int ret; |
|---|
| 400 | + ssize_t ret; |
|---|
| 311 | 401 | |
|---|
| 312 | 402 | /* |
|---|
| 313 | 403 | * Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset |
|---|
| .. | .. |
|---|
| 318 | 408 | */ |
|---|
| 319 | 409 | nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand); |
|---|
| 320 | 410 | memset(spinand->databuf, 0xff, nbytes); |
|---|
| 321 | | - adjreq.dataoffs = 0; |
|---|
| 322 | | - adjreq.datalen = nanddev_page_size(nand); |
|---|
| 323 | | - adjreq.databuf.out = spinand->databuf; |
|---|
| 324 | | - adjreq.ooblen = nanddev_per_page_oobsize(nand); |
|---|
| 325 | | - adjreq.ooboffs = 0; |
|---|
| 326 | | - adjreq.oobbuf.out = spinand->oobbuf; |
|---|
| 327 | 411 | |
|---|
| 328 | 412 | if (req->datalen) |
|---|
| 329 | 413 | memcpy(spinand->databuf + req->dataoffs, req->databuf.out, |
|---|
| .. | .. |
|---|
| 340 | 424 | req->ooblen); |
|---|
| 341 | 425 | } |
|---|
| 342 | 426 | |
|---|
| 343 | | - spinand_cache_op_adjust_colum(spinand, &adjreq, &column); |
|---|
| 427 | + wdesc = spinand->dirmaps[req->pos.plane].wdesc; |
|---|
| 344 | 428 | |
|---|
| 345 | | - op = *spinand->op_templates.write_cache; |
|---|
| 346 | | - op.addr.val = column; |
|---|
| 347 | | - |
|---|
| 348 | | - /* |
|---|
| 349 | | - * Some controllers are limited in term of max TX data size. In this |
|---|
| 350 | | - * case, split the operation into one LOAD CACHE and one or more |
|---|
| 351 | | - * LOAD RANDOM CACHE. |
|---|
| 352 | | - */ |
|---|
| 353 | 429 | while (nbytes) { |
|---|
| 354 | | - op.data.buf.out = buf; |
|---|
| 355 | | - op.data.nbytes = nbytes; |
|---|
| 356 | | - |
|---|
| 357 | | - ret = spi_mem_adjust_op_size(spinand->spimem, &op); |
|---|
| 358 | | - if (ret) |
|---|
| 430 | + ret = spi_mem_dirmap_write(wdesc, column, nbytes, buf); |
|---|
| 431 | + if (ret < 0) |
|---|
| 359 | 432 | return ret; |
|---|
| 360 | 433 | |
|---|
| 361 | | - ret = spi_mem_exec_op(spinand->spimem, &op); |
|---|
| 362 | | - if (ret) |
|---|
| 363 | | - return ret; |
|---|
| 434 | + if (!ret || ret > nbytes) |
|---|
| 435 | + return -EIO; |
|---|
| 364 | 436 | |
|---|
| 365 | | - buf += op.data.nbytes; |
|---|
| 366 | | - nbytes -= op.data.nbytes; |
|---|
| 367 | | - op.addr.val += op.data.nbytes; |
|---|
| 368 | | - |
|---|
| 369 | | - /* |
|---|
| 370 | | - * We need to use the RANDOM LOAD CACHE operation if there's |
|---|
| 371 | | - * more than one iteration, because the LOAD operation might |
|---|
| 372 | | - * reset the cache to 0xff. |
|---|
| 373 | | - */ |
|---|
| 374 | | - if (nbytes) { |
|---|
| 375 | | - column = op.addr.val; |
|---|
| 376 | | - op = *spinand->op_templates.update_cache; |
|---|
| 377 | | - op.addr.val = column; |
|---|
| 378 | | - } |
|---|
| 437 | + nbytes -= ret; |
|---|
| 438 | + column += ret; |
|---|
| 439 | + buf += ret; |
|---|
| 379 | 440 | } |
|---|
| 380 | 441 | |
|---|
| 381 | 442 | return 0; |
|---|
| .. | .. |
|---|
| 431 | 492 | return status & STATUS_BUSY ? -ETIMEDOUT : 0; |
|---|
| 432 | 493 | } |
|---|
| 433 | 494 | |
|---|
| 434 | | -static int spinand_read_id_op(struct spinand_device *spinand, u8 *buf) |
|---|
| 495 | +static int spinand_read_id_op(struct spinand_device *spinand, u8 naddr, |
|---|
| 496 | + u8 ndummy, u8 *buf) |
|---|
| 435 | 497 | { |
|---|
| 436 | | - struct spi_mem_op op = SPINAND_READID_OP(0, spinand->scratchbuf, |
|---|
| 437 | | - SPINAND_MAX_ID_LEN); |
|---|
| 498 | + struct spi_mem_op op = SPINAND_READID_OP( |
|---|
| 499 | + naddr, ndummy, spinand->scratchbuf, SPINAND_MAX_ID_LEN); |
|---|
| 438 | 500 | int ret; |
|---|
| 439 | 501 | |
|---|
| 440 | 502 | ret = spi_mem_exec_op(spinand->spimem, &op); |
|---|
| .. | .. |
|---|
| 459 | 521 | static int spinand_lock_block(struct spinand_device *spinand, u8 lock) |
|---|
| 460 | 522 | { |
|---|
| 461 | 523 | return spinand_write_reg_op(spinand, REG_BLOCK_LOCK, lock); |
|---|
| 462 | | -} |
|---|
| 463 | | - |
|---|
| 464 | | -static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status) |
|---|
| 465 | | -{ |
|---|
| 466 | | - struct nand_device *nand = spinand_to_nand(spinand); |
|---|
| 467 | | - |
|---|
| 468 | | - if (spinand->eccinfo.get_status) |
|---|
| 469 | | - return spinand->eccinfo.get_status(spinand, status); |
|---|
| 470 | | - |
|---|
| 471 | | - switch (status & STATUS_ECC_MASK) { |
|---|
| 472 | | - case STATUS_ECC_NO_BITFLIPS: |
|---|
| 473 | | - return 0; |
|---|
| 474 | | - |
|---|
| 475 | | - case STATUS_ECC_HAS_BITFLIPS: |
|---|
| 476 | | - /* |
|---|
| 477 | | - * We have no way to know exactly how many bitflips have been |
|---|
| 478 | | - * fixed, so let's return the maximum possible value so that |
|---|
| 479 | | - * wear-leveling layers move the data immediately. |
|---|
| 480 | | - */ |
|---|
| 481 | | - return nand->eccreq.strength; |
|---|
| 482 | | - |
|---|
| 483 | | - case STATUS_ECC_UNCOR_ERROR: |
|---|
| 484 | | - return -EBADMSG; |
|---|
| 485 | | - |
|---|
| 486 | | - default: |
|---|
| 487 | | - break; |
|---|
| 488 | | - } |
|---|
| 489 | | - |
|---|
| 490 | | - return -EINVAL; |
|---|
| 491 | 524 | } |
|---|
| 492 | 525 | |
|---|
| 493 | 526 | static int spinand_read_page(struct spinand_device *spinand, |
|---|
| .. | .. |
|---|
| 556 | 589 | |
|---|
| 557 | 590 | mutex_lock(&spinand->lock); |
|---|
| 558 | 591 | |
|---|
| 559 | | - nanddev_io_for_each_page(nand, from, ops, &iter) { |
|---|
| 592 | + nanddev_io_for_each_page(nand, NAND_PAGE_READ, from, ops, &iter) { |
|---|
| 560 | 593 | ret = spinand_select_target(spinand, iter.req.pos.target); |
|---|
| 561 | 594 | if (ret) |
|---|
| 562 | 595 | break; |
|---|
| .. | .. |
|---|
| 604 | 637 | |
|---|
| 605 | 638 | mutex_lock(&spinand->lock); |
|---|
| 606 | 639 | |
|---|
| 607 | | - nanddev_io_for_each_page(nand, to, ops, &iter) { |
|---|
| 640 | + nanddev_io_for_each_page(nand, NAND_PAGE_WRITE, to, ops, &iter) { |
|---|
| 608 | 641 | ret = spinand_select_target(spinand, iter.req.pos.target); |
|---|
| 609 | 642 | if (ret) |
|---|
| 610 | 643 | break; |
|---|
| .. | .. |
|---|
| 697 | 730 | ret = nanddev_markbad(nand, &pos); |
|---|
| 698 | 731 | mutex_unlock(&spinand->lock); |
|---|
| 699 | 732 | |
|---|
| 733 | + if (IS_ENABLED(CONFIG_MTD_NAND_BBT_USING_FLASH)) |
|---|
| 734 | + nanddev_bbt_in_flash_update(nand); |
|---|
| 735 | + |
|---|
| 700 | 736 | return ret; |
|---|
| 701 | 737 | } |
|---|
| 702 | 738 | |
|---|
| .. | .. |
|---|
| 753 | 789 | return ret; |
|---|
| 754 | 790 | } |
|---|
| 755 | 791 | |
|---|
| 792 | +static int spinand_create_dirmap(struct spinand_device *spinand, |
|---|
| 793 | + unsigned int plane) |
|---|
| 794 | +{ |
|---|
| 795 | + struct nand_device *nand = spinand_to_nand(spinand); |
|---|
| 796 | + struct spi_mem_dirmap_info info = { |
|---|
| 797 | + .length = nanddev_page_size(nand) + |
|---|
| 798 | + nanddev_per_page_oobsize(nand), |
|---|
| 799 | + }; |
|---|
| 800 | + struct spi_mem_dirmap_desc *desc; |
|---|
| 801 | + |
|---|
| 802 | + /* The plane number is passed in MSB just above the column address */ |
|---|
| 803 | + info.offset = plane << fls(nand->memorg.pagesize); |
|---|
| 804 | + |
|---|
| 805 | + info.op_tmpl = *spinand->op_templates.update_cache; |
|---|
| 806 | + desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev, |
|---|
| 807 | + spinand->spimem, &info); |
|---|
| 808 | + if (IS_ERR(desc)) |
|---|
| 809 | + return PTR_ERR(desc); |
|---|
| 810 | + |
|---|
| 811 | + spinand->dirmaps[plane].wdesc = desc; |
|---|
| 812 | + |
|---|
| 813 | + info.op_tmpl = *spinand->op_templates.read_cache; |
|---|
| 814 | + desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev, |
|---|
| 815 | + spinand->spimem, &info); |
|---|
| 816 | + if (IS_ERR(desc)) |
|---|
| 817 | + return PTR_ERR(desc); |
|---|
| 818 | + |
|---|
| 819 | + spinand->dirmaps[plane].rdesc = desc; |
|---|
| 820 | + |
|---|
| 821 | + return 0; |
|---|
| 822 | +} |
|---|
| 823 | + |
|---|
| 824 | +static int spinand_create_dirmaps(struct spinand_device *spinand) |
|---|
| 825 | +{ |
|---|
| 826 | + struct nand_device *nand = spinand_to_nand(spinand); |
|---|
| 827 | + int i, ret; |
|---|
| 828 | + |
|---|
| 829 | + spinand->dirmaps = devm_kzalloc(&spinand->spimem->spi->dev, |
|---|
| 830 | + sizeof(*spinand->dirmaps) * |
|---|
| 831 | + nand->memorg.planes_per_lun, |
|---|
| 832 | + GFP_KERNEL); |
|---|
| 833 | + if (!spinand->dirmaps) |
|---|
| 834 | + return -ENOMEM; |
|---|
| 835 | + |
|---|
| 836 | + for (i = 0; i < nand->memorg.planes_per_lun; i++) { |
|---|
| 837 | + ret = spinand_create_dirmap(spinand, i); |
|---|
| 838 | + if (ret) |
|---|
| 839 | + return ret; |
|---|
| 840 | + } |
|---|
| 841 | + |
|---|
| 842 | + return 0; |
|---|
| 843 | +} |
|---|
| 844 | + |
|---|
| 756 | 845 | static const struct nand_ops spinand_ops = { |
|---|
| 757 | 846 | .erase = spinand_erase, |
|---|
| 758 | 847 | .markbad = spinand_markbad, |
|---|
| .. | .. |
|---|
| 760 | 849 | }; |
|---|
| 761 | 850 | |
|---|
| 762 | 851 | static const struct spinand_manufacturer *spinand_manufacturers[] = { |
|---|
| 852 | + &biwin_spinand_manufacturer, |
|---|
| 853 | + &dosilicon_spinand_manufacturer, |
|---|
| 854 | + &esmt_spinand_manufacturer, |
|---|
| 855 | + &etron_spinand_manufacturer, |
|---|
| 856 | + &fmsh_spinand_manufacturer, |
|---|
| 857 | + &foresee_spinand_manufacturer, |
|---|
| 858 | + &gigadevice_spinand_manufacturer, |
|---|
| 859 | + &gsto_spinand_manufacturer, |
|---|
| 860 | + &hyf_spinand_manufacturer, |
|---|
| 861 | + &jsc_spinand_manufacturer, |
|---|
| 763 | 862 | ¯onix_spinand_manufacturer, |
|---|
| 764 | 863 | µn_spinand_manufacturer, |
|---|
| 864 | + ¶gon_spinand_manufacturer, |
|---|
| 865 | + &silicongo_spinand_manufacturer, |
|---|
| 866 | + &skyhigh_spinand_manufacturer, |
|---|
| 867 | + &toshiba_spinand_manufacturer, |
|---|
| 868 | + &unim_spinand_manufacturer, |
|---|
| 765 | 869 | &winbond_spinand_manufacturer, |
|---|
| 870 | + &xincun_spinand_manufacturer, |
|---|
| 871 | + &xtx_spinand_manufacturer, |
|---|
| 766 | 872 | }; |
|---|
| 767 | 873 | |
|---|
| 768 | | -static int spinand_manufacturer_detect(struct spinand_device *spinand) |
|---|
| 874 | +static int spinand_manufacturer_match(struct spinand_device *spinand, |
|---|
| 875 | + enum spinand_readid_method rdid_method) |
|---|
| 769 | 876 | { |
|---|
| 877 | + u8 *id = spinand->id.data; |
|---|
| 770 | 878 | unsigned int i; |
|---|
| 771 | 879 | int ret; |
|---|
| 772 | 880 | |
|---|
| 773 | 881 | for (i = 0; i < ARRAY_SIZE(spinand_manufacturers); i++) { |
|---|
| 774 | | - ret = spinand_manufacturers[i]->ops->detect(spinand); |
|---|
| 775 | | - if (ret > 0) { |
|---|
| 776 | | - spinand->manufacturer = spinand_manufacturers[i]; |
|---|
| 777 | | - return 0; |
|---|
| 778 | | - } else if (ret < 0) { |
|---|
| 779 | | - return ret; |
|---|
| 780 | | - } |
|---|
| 781 | | - } |
|---|
| 882 | + const struct spinand_manufacturer *manufacturer = |
|---|
| 883 | + spinand_manufacturers[i]; |
|---|
| 782 | 884 | |
|---|
| 885 | + if (id[0] != manufacturer->id) |
|---|
| 886 | + continue; |
|---|
| 887 | + |
|---|
| 888 | + ret = spinand_match_and_init(spinand, |
|---|
| 889 | + manufacturer->chips, |
|---|
| 890 | + manufacturer->nchips, |
|---|
| 891 | + rdid_method); |
|---|
| 892 | + if (ret < 0) |
|---|
| 893 | + continue; |
|---|
| 894 | + |
|---|
| 895 | + spinand->manufacturer = manufacturer; |
|---|
| 896 | + return 0; |
|---|
| 897 | + } |
|---|
| 783 | 898 | return -ENOTSUPP; |
|---|
| 899 | +} |
|---|
| 900 | + |
|---|
| 901 | +static int spinand_id_detect(struct spinand_device *spinand) |
|---|
| 902 | +{ |
|---|
| 903 | + u8 *id = spinand->id.data; |
|---|
| 904 | + int ret; |
|---|
| 905 | + |
|---|
| 906 | + ret = spinand_read_id_op(spinand, 0, 0, id); |
|---|
| 907 | + if (ret) |
|---|
| 908 | + return ret; |
|---|
| 909 | + ret = spinand_manufacturer_match(spinand, SPINAND_READID_METHOD_OPCODE); |
|---|
| 910 | + if (!ret) |
|---|
| 911 | + return 0; |
|---|
| 912 | + |
|---|
| 913 | + ret = spinand_read_id_op(spinand, 1, 0, id); |
|---|
| 914 | + if (ret) |
|---|
| 915 | + return ret; |
|---|
| 916 | + ret = spinand_manufacturer_match(spinand, |
|---|
| 917 | + SPINAND_READID_METHOD_OPCODE_ADDR); |
|---|
| 918 | + if (!ret) |
|---|
| 919 | + return 0; |
|---|
| 920 | + |
|---|
| 921 | + ret = spinand_read_id_op(spinand, 0, 1, id); |
|---|
| 922 | + if (ret) |
|---|
| 923 | + return ret; |
|---|
| 924 | + ret = spinand_manufacturer_match(spinand, |
|---|
| 925 | + SPINAND_READID_METHOD_OPCODE_DUMMY); |
|---|
| 926 | + |
|---|
| 927 | + return ret; |
|---|
| 784 | 928 | } |
|---|
| 785 | 929 | |
|---|
| 786 | 930 | static int spinand_manufacturer_init(struct spinand_device *spinand) |
|---|
| .. | .. |
|---|
| 838 | 982 | * @spinand: SPI NAND object |
|---|
| 839 | 983 | * @table: SPI NAND device description table |
|---|
| 840 | 984 | * @table_size: size of the device description table |
|---|
| 985 | + * @rdid_method: read id method to match |
|---|
| 841 | 986 | * |
|---|
| 842 | | - * Should be used by SPI NAND manufacturer drivers when they want to find a |
|---|
| 843 | | - * match between a device ID retrieved through the READ_ID command and an |
|---|
| 987 | + * Match between a device ID retrieved through the READ_ID command and an |
|---|
| 844 | 988 | * entry in the SPI NAND description table. If a match is found, the spinand |
|---|
| 845 | 989 | * object will be initialized with information provided by the matching |
|---|
| 846 | 990 | * spinand_info entry. |
|---|
| .. | .. |
|---|
| 849 | 993 | */ |
|---|
| 850 | 994 | int spinand_match_and_init(struct spinand_device *spinand, |
|---|
| 851 | 995 | const struct spinand_info *table, |
|---|
| 852 | | - unsigned int table_size, u8 devid) |
|---|
| 996 | + unsigned int table_size, |
|---|
| 997 | + enum spinand_readid_method rdid_method) |
|---|
| 853 | 998 | { |
|---|
| 999 | + u8 *id = spinand->id.data; |
|---|
| 854 | 1000 | struct nand_device *nand = spinand_to_nand(spinand); |
|---|
| 855 | 1001 | unsigned int i; |
|---|
| 856 | 1002 | |
|---|
| .. | .. |
|---|
| 858 | 1004 | const struct spinand_info *info = &table[i]; |
|---|
| 859 | 1005 | const struct spi_mem_op *op; |
|---|
| 860 | 1006 | |
|---|
| 861 | | - if (devid != info->devid) |
|---|
| 1007 | + if (rdid_method != info->devid.method) |
|---|
| 1008 | + continue; |
|---|
| 1009 | + |
|---|
| 1010 | + if (memcmp(id + 1, info->devid.id, info->devid.len)) |
|---|
| 862 | 1011 | continue; |
|---|
| 863 | 1012 | |
|---|
| 864 | 1013 | nand->memorg = table[i].memorg; |
|---|
| 865 | | - nand->eccreq = table[i].eccreq; |
|---|
| 1014 | + nanddev_set_ecc_requirements(nand, &table[i].eccreq); |
|---|
| 866 | 1015 | spinand->eccinfo = table[i].eccinfo; |
|---|
| 867 | 1016 | spinand->flags = table[i].flags; |
|---|
| 1017 | + spinand->id.len = 1 + table[i].devid.len; |
|---|
| 868 | 1018 | spinand->select_target = table[i].select_target; |
|---|
| 869 | 1019 | |
|---|
| 870 | 1020 | op = spinand_select_op_variant(spinand, |
|---|
| .. | .. |
|---|
| 901 | 1051 | if (ret) |
|---|
| 902 | 1052 | return ret; |
|---|
| 903 | 1053 | |
|---|
| 904 | | - ret = spinand_read_id_op(spinand, spinand->id.data); |
|---|
| 905 | | - if (ret) |
|---|
| 906 | | - return ret; |
|---|
| 907 | | - |
|---|
| 908 | | - spinand->id.len = SPINAND_MAX_ID_LEN; |
|---|
| 909 | | - |
|---|
| 910 | | - ret = spinand_manufacturer_detect(spinand); |
|---|
| 1054 | + ret = spinand_id_detect(spinand); |
|---|
| 911 | 1055 | if (ret) { |
|---|
| 912 | 1056 | dev_err(dev, "unknown raw ID %*phN\n", SPINAND_MAX_ID_LEN, |
|---|
| 913 | 1057 | spinand->id.data); |
|---|
| .. | .. |
|---|
| 930 | 1074 | return 0; |
|---|
| 931 | 1075 | } |
|---|
| 932 | 1076 | |
|---|
| 933 | | -static int spinand_noecc_ooblayout_ecc(struct mtd_info *mtd, int section, |
|---|
| 934 | | - struct mtd_oob_region *region) |
|---|
| 1077 | +static int spinand_reinit(struct mtd_info *mtd) |
|---|
| 935 | 1078 | { |
|---|
| 936 | | - return -ERANGE; |
|---|
| 1079 | + struct spinand_device *spinand = mtd_to_spinand(mtd); |
|---|
| 1080 | + struct nand_device *nand = mtd_to_nanddev(mtd); |
|---|
| 1081 | + struct device *dev = &spinand->spimem->spi->dev; |
|---|
| 1082 | + int ret, i; |
|---|
| 1083 | + |
|---|
| 1084 | + ret = spinand_init_quad_enable(spinand); |
|---|
| 1085 | + if (ret) |
|---|
| 1086 | + return ret; |
|---|
| 1087 | + |
|---|
| 1088 | + ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0); |
|---|
| 1089 | + if (ret) |
|---|
| 1090 | + return ret; |
|---|
| 1091 | + |
|---|
| 1092 | + ret = spinand_manufacturer_init(spinand); |
|---|
| 1093 | + if (ret) { |
|---|
| 1094 | + dev_err(dev, |
|---|
| 1095 | + "Failed to initialize the SPI NAND chip (err = %d)\n", |
|---|
| 1096 | + ret); |
|---|
| 1097 | + return ret; |
|---|
| 1098 | + } |
|---|
| 1099 | + |
|---|
| 1100 | + ret = spinand_create_dirmaps(spinand); |
|---|
| 1101 | + if (ret) { |
|---|
| 1102 | + dev_err(dev, |
|---|
| 1103 | + "Failed to create direct mappings for read/write operations (err = %d)\n", |
|---|
| 1104 | + ret); |
|---|
| 1105 | + return ret; |
|---|
| 1106 | + } |
|---|
| 1107 | + |
|---|
| 1108 | + /* After power up, all blocks are locked, so unlock them here. */ |
|---|
| 1109 | + for (i = 0; i < nand->memorg.ntargets; i++) { |
|---|
| 1110 | + ret = spinand_select_target(spinand, i); |
|---|
| 1111 | + if (ret) |
|---|
| 1112 | + return ret; |
|---|
| 1113 | + |
|---|
| 1114 | + ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED); |
|---|
| 1115 | + if (ret) |
|---|
| 1116 | + return ret; |
|---|
| 1117 | + } |
|---|
| 1118 | + |
|---|
| 1119 | + return ret; |
|---|
| 937 | 1120 | } |
|---|
| 938 | 1121 | |
|---|
| 939 | | -static int spinand_noecc_ooblayout_free(struct mtd_info *mtd, int section, |
|---|
| 940 | | - struct mtd_oob_region *region) |
|---|
| 1122 | +/** |
|---|
| 1123 | + * spinand_mtd_suspend - [MTD Interface] Suspend the spinand flash |
|---|
| 1124 | + * @mtd: MTD device structure |
|---|
| 1125 | + * |
|---|
| 1126 | + * Returns 0 for success or negative error code otherwise. |
|---|
| 1127 | + */ |
|---|
| 1128 | +static int spinand_mtd_suspend(struct mtd_info *mtd) |
|---|
| 941 | 1129 | { |
|---|
| 942 | | - if (section) |
|---|
| 943 | | - return -ERANGE; |
|---|
| 1130 | + struct spinand_device *spinand = mtd_to_spinand(mtd); |
|---|
| 1131 | + int ret = 0; |
|---|
| 944 | 1132 | |
|---|
| 945 | | - /* Reserve 2 bytes for the BBM. */ |
|---|
| 946 | | - region->offset = 2; |
|---|
| 947 | | - region->length = 62; |
|---|
| 1133 | + mutex_lock(&spinand->lock); |
|---|
| 948 | 1134 | |
|---|
| 949 | | - return 0; |
|---|
| 1135 | + return ret; |
|---|
| 950 | 1136 | } |
|---|
| 951 | 1137 | |
|---|
| 952 | | -static const struct mtd_ooblayout_ops spinand_noecc_ooblayout = { |
|---|
| 953 | | - .ecc = spinand_noecc_ooblayout_ecc, |
|---|
| 954 | | - .free = spinand_noecc_ooblayout_free, |
|---|
| 955 | | -}; |
|---|
| 1138 | +/** |
|---|
| 1139 | + * spinand_mtd_resume - [MTD Interface] Resume the spinand flash |
|---|
| 1140 | + * @mtd: MTD device structure |
|---|
| 1141 | + */ |
|---|
| 1142 | +static void spinand_mtd_resume(struct mtd_info *mtd) |
|---|
| 1143 | +{ |
|---|
| 1144 | + struct spinand_device *spinand = mtd_to_spinand(mtd); |
|---|
| 1145 | + struct device *dev = &spinand->spimem->spi->dev; |
|---|
| 1146 | + int ret; |
|---|
| 1147 | + |
|---|
| 1148 | + ret = spinand_reinit(mtd); |
|---|
| 1149 | + if (ret) |
|---|
| 1150 | + dev_err(dev, "Failed to resume, ret =%d !\n", ret); |
|---|
| 1151 | + mutex_unlock(&spinand->lock); |
|---|
| 1152 | +} |
|---|
| 1153 | + |
|---|
| 1154 | +/** |
|---|
| 1155 | + * spinand_mtd_shutdown - [MTD Interface] Finish the current spinand operation and |
|---|
| 1156 | + * prevent further operations |
|---|
| 1157 | + * @mtd: MTD device structure |
|---|
| 1158 | + */ |
|---|
| 1159 | +static void spinand_mtd_shutdown(struct mtd_info *mtd) |
|---|
| 1160 | +{ |
|---|
| 1161 | + spinand_mtd_suspend(mtd); |
|---|
| 1162 | +} |
|---|
| 956 | 1163 | |
|---|
| 957 | 1164 | static int spinand_init(struct spinand_device *spinand) |
|---|
| 958 | 1165 | { |
|---|
| .. | .. |
|---|
| 1008 | 1215 | goto err_free_bufs; |
|---|
| 1009 | 1216 | } |
|---|
| 1010 | 1217 | |
|---|
| 1218 | + ret = spinand_create_dirmaps(spinand); |
|---|
| 1219 | + if (ret) { |
|---|
| 1220 | + dev_err(dev, |
|---|
| 1221 | + "Failed to create direct mappings for read/write operations (err = %d)\n", |
|---|
| 1222 | + ret); |
|---|
| 1223 | + goto err_manuf_cleanup; |
|---|
| 1224 | + } |
|---|
| 1225 | + |
|---|
| 1011 | 1226 | /* After power up, all blocks are locked, so unlock them here. */ |
|---|
| 1012 | 1227 | for (i = 0; i < nand->memorg.ntargets; i++) { |
|---|
| 1013 | 1228 | ret = spinand_select_target(spinand, i); |
|---|
| .. | .. |
|---|
| 1023 | 1238 | if (ret) |
|---|
| 1024 | 1239 | goto err_manuf_cleanup; |
|---|
| 1025 | 1240 | |
|---|
| 1241 | + /* SPI-NAND default ECC engine is on-die */ |
|---|
| 1242 | + nand->ecc.defaults.engine_type = NAND_ECC_ENGINE_TYPE_ON_DIE; |
|---|
| 1243 | + nand->ecc.ondie_engine = &spinand_ondie_ecc_engine; |
|---|
| 1244 | + |
|---|
| 1026 | 1245 | /* |
|---|
| 1027 | 1246 | * Right now, we don't support ECC, so let the whole oob |
|---|
| 1028 | 1247 | * area is available for user. |
|---|
| .. | .. |
|---|
| 1033 | 1252 | mtd->_block_markbad = spinand_mtd_block_markbad; |
|---|
| 1034 | 1253 | mtd->_block_isreserved = spinand_mtd_block_isreserved; |
|---|
| 1035 | 1254 | mtd->_erase = spinand_mtd_erase; |
|---|
| 1255 | + mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks; |
|---|
| 1256 | + mtd->_suspend = spinand_mtd_suspend; |
|---|
| 1257 | + mtd->_resume = spinand_mtd_resume; |
|---|
| 1258 | + mtd->_reboot = spinand_mtd_shutdown; |
|---|
| 1036 | 1259 | |
|---|
| 1037 | 1260 | if (spinand->eccinfo.ooblayout) |
|---|
| 1038 | 1261 | mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout); |
|---|
| .. | .. |
|---|
| 1046 | 1269 | mtd->oobavail = ret; |
|---|
| 1047 | 1270 | |
|---|
| 1048 | 1271 | /* Propagate ECC information to mtd_info */ |
|---|
| 1049 | | - mtd->ecc_strength = nand->eccreq.strength; |
|---|
| 1050 | | - mtd->ecc_step_size = nand->eccreq.step_size; |
|---|
| 1272 | + mtd->ecc_strength = nanddev_get_ecc_requirements(nand)->strength; |
|---|
| 1273 | + mtd->ecc_step_size = nanddev_get_ecc_requirements(nand)->step_size; |
|---|
| 1274 | + if (IS_ENABLED(CONFIG_SPI_ROCKCHIP_SFC)) |
|---|
| 1275 | + mtd->name = "spi-nand0"; |
|---|
| 1276 | + |
|---|
| 1277 | + if (IS_ENABLED(CONFIG_MTD_NAND_BBT_USING_FLASH)) |
|---|
| 1278 | + nanddev_scan_bbt_in_flash(nand); |
|---|
| 1051 | 1279 | |
|---|
| 1052 | 1280 | return 0; |
|---|
| 1053 | 1281 | |
|---|