.. | .. |
---|
86 | 86 | typedef int (nvm_id_fn)(struct nvm_dev *); |
---|
87 | 87 | typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *); |
---|
88 | 88 | typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int); |
---|
89 | | -typedef int (nvm_get_chk_meta_fn)(struct nvm_dev *, struct nvm_chk_meta *, |
---|
90 | | - sector_t, int); |
---|
91 | | -typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *); |
---|
92 | | -typedef int (nvm_submit_io_sync_fn)(struct nvm_dev *, struct nvm_rq *); |
---|
93 | | -typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *); |
---|
| 89 | +typedef int (nvm_get_chk_meta_fn)(struct nvm_dev *, sector_t, int, |
---|
| 90 | + struct nvm_chk_meta *); |
---|
| 91 | +typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *, void *); |
---|
| 92 | +typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *, int); |
---|
94 | 93 | typedef void (nvm_destroy_dma_pool_fn)(void *); |
---|
95 | 94 | typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t, |
---|
96 | 95 | dma_addr_t *); |
---|
.. | .. |
---|
104 | 103 | nvm_get_chk_meta_fn *get_chk_meta; |
---|
105 | 104 | |
---|
106 | 105 | nvm_submit_io_fn *submit_io; |
---|
107 | | - nvm_submit_io_sync_fn *submit_io_sync; |
---|
108 | 106 | |
---|
109 | 107 | nvm_create_dma_pool_fn *create_dma_pool; |
---|
110 | 108 | nvm_destroy_dma_pool_fn *destroy_dma_pool; |
---|
.. | .. |
---|
305 | 303 | u64 ppa_status; /* ppa media status */ |
---|
306 | 304 | int error; |
---|
307 | 305 | |
---|
| 306 | + int is_seq; /* Sequential hint flag. 1.2 only */ |
---|
| 307 | + |
---|
308 | 308 | void *private; |
---|
309 | 309 | }; |
---|
310 | 310 | |
---|
.. | .. |
---|
316 | 316 | static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata) |
---|
317 | 317 | { |
---|
318 | 318 | return rqdata + 1; |
---|
| 319 | +} |
---|
| 320 | + |
---|
| 321 | +static inline struct ppa_addr *nvm_rq_to_ppa_list(struct nvm_rq *rqd) |
---|
| 322 | +{ |
---|
| 323 | + return (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr; |
---|
319 | 324 | } |
---|
320 | 325 | |
---|
321 | 326 | enum { |
---|
.. | .. |
---|
350 | 355 | u32 clba; /* sectors per chunk */ |
---|
351 | 356 | u16 csecs; /* sector size */ |
---|
352 | 357 | u16 sos; /* out-of-band area size */ |
---|
| 358 | + bool ext; /* metadata in extended data buffer */ |
---|
| 359 | + u32 mdts; /* Max data transfer size*/ |
---|
353 | 360 | |
---|
354 | 361 | /* device write constrains */ |
---|
355 | 362 | u32 ws_min; /* minimum write size */ |
---|
.. | .. |
---|
419 | 426 | char name[DISK_NAME_LEN]; |
---|
420 | 427 | void *private_data; |
---|
421 | 428 | |
---|
| 429 | + struct kref ref; |
---|
422 | 430 | void *rmap; |
---|
423 | 431 | |
---|
424 | 432 | struct mutex mlock; |
---|
.. | .. |
---|
485 | 493 | return l; |
---|
486 | 494 | } |
---|
487 | 495 | |
---|
488 | | -typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *); |
---|
| 496 | +static inline u64 dev_to_chunk_addr(struct nvm_dev *dev, void *addrf, |
---|
| 497 | + struct ppa_addr p) |
---|
| 498 | +{ |
---|
| 499 | + struct nvm_geo *geo = &dev->geo; |
---|
| 500 | + u64 caddr; |
---|
| 501 | + |
---|
| 502 | + if (geo->version == NVM_OCSSD_SPEC_12) { |
---|
| 503 | + struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)addrf; |
---|
| 504 | + |
---|
| 505 | + caddr = (u64)p.g.pg << ppaf->pg_offset; |
---|
| 506 | + caddr |= (u64)p.g.pl << ppaf->pln_offset; |
---|
| 507 | + caddr |= (u64)p.g.sec << ppaf->sec_offset; |
---|
| 508 | + } else { |
---|
| 509 | + caddr = p.m.sec; |
---|
| 510 | + } |
---|
| 511 | + |
---|
| 512 | + return caddr; |
---|
| 513 | +} |
---|
| 514 | + |
---|
| 515 | +static inline struct ppa_addr nvm_ppa32_to_ppa64(struct nvm_dev *dev, |
---|
| 516 | + void *addrf, u32 ppa32) |
---|
| 517 | +{ |
---|
| 518 | + struct ppa_addr ppa64; |
---|
| 519 | + |
---|
| 520 | + ppa64.ppa = 0; |
---|
| 521 | + |
---|
| 522 | + if (ppa32 == -1) { |
---|
| 523 | + ppa64.ppa = ADDR_EMPTY; |
---|
| 524 | + } else if (ppa32 & (1U << 31)) { |
---|
| 525 | + ppa64.c.line = ppa32 & ((~0U) >> 1); |
---|
| 526 | + ppa64.c.is_cached = 1; |
---|
| 527 | + } else { |
---|
| 528 | + struct nvm_geo *geo = &dev->geo; |
---|
| 529 | + |
---|
| 530 | + if (geo->version == NVM_OCSSD_SPEC_12) { |
---|
| 531 | + struct nvm_addrf_12 *ppaf = addrf; |
---|
| 532 | + |
---|
| 533 | + ppa64.g.ch = (ppa32 & ppaf->ch_mask) >> |
---|
| 534 | + ppaf->ch_offset; |
---|
| 535 | + ppa64.g.lun = (ppa32 & ppaf->lun_mask) >> |
---|
| 536 | + ppaf->lun_offset; |
---|
| 537 | + ppa64.g.blk = (ppa32 & ppaf->blk_mask) >> |
---|
| 538 | + ppaf->blk_offset; |
---|
| 539 | + ppa64.g.pg = (ppa32 & ppaf->pg_mask) >> |
---|
| 540 | + ppaf->pg_offset; |
---|
| 541 | + ppa64.g.pl = (ppa32 & ppaf->pln_mask) >> |
---|
| 542 | + ppaf->pln_offset; |
---|
| 543 | + ppa64.g.sec = (ppa32 & ppaf->sec_mask) >> |
---|
| 544 | + ppaf->sec_offset; |
---|
| 545 | + } else { |
---|
| 546 | + struct nvm_addrf *lbaf = addrf; |
---|
| 547 | + |
---|
| 548 | + ppa64.m.grp = (ppa32 & lbaf->ch_mask) >> |
---|
| 549 | + lbaf->ch_offset; |
---|
| 550 | + ppa64.m.pu = (ppa32 & lbaf->lun_mask) >> |
---|
| 551 | + lbaf->lun_offset; |
---|
| 552 | + ppa64.m.chk = (ppa32 & lbaf->chk_mask) >> |
---|
| 553 | + lbaf->chk_offset; |
---|
| 554 | + ppa64.m.sec = (ppa32 & lbaf->sec_mask) >> |
---|
| 555 | + lbaf->sec_offset; |
---|
| 556 | + } |
---|
| 557 | + } |
---|
| 558 | + |
---|
| 559 | + return ppa64; |
---|
| 560 | +} |
---|
| 561 | + |
---|
| 562 | +static inline u32 nvm_ppa64_to_ppa32(struct nvm_dev *dev, |
---|
| 563 | + void *addrf, struct ppa_addr ppa64) |
---|
| 564 | +{ |
---|
| 565 | + u32 ppa32 = 0; |
---|
| 566 | + |
---|
| 567 | + if (ppa64.ppa == ADDR_EMPTY) { |
---|
| 568 | + ppa32 = ~0U; |
---|
| 569 | + } else if (ppa64.c.is_cached) { |
---|
| 570 | + ppa32 |= ppa64.c.line; |
---|
| 571 | + ppa32 |= 1U << 31; |
---|
| 572 | + } else { |
---|
| 573 | + struct nvm_geo *geo = &dev->geo; |
---|
| 574 | + |
---|
| 575 | + if (geo->version == NVM_OCSSD_SPEC_12) { |
---|
| 576 | + struct nvm_addrf_12 *ppaf = addrf; |
---|
| 577 | + |
---|
| 578 | + ppa32 |= ppa64.g.ch << ppaf->ch_offset; |
---|
| 579 | + ppa32 |= ppa64.g.lun << ppaf->lun_offset; |
---|
| 580 | + ppa32 |= ppa64.g.blk << ppaf->blk_offset; |
---|
| 581 | + ppa32 |= ppa64.g.pg << ppaf->pg_offset; |
---|
| 582 | + ppa32 |= ppa64.g.pl << ppaf->pln_offset; |
---|
| 583 | + ppa32 |= ppa64.g.sec << ppaf->sec_offset; |
---|
| 584 | + } else { |
---|
| 585 | + struct nvm_addrf *lbaf = addrf; |
---|
| 586 | + |
---|
| 587 | + ppa32 |= ppa64.m.grp << lbaf->ch_offset; |
---|
| 588 | + ppa32 |= ppa64.m.pu << lbaf->lun_offset; |
---|
| 589 | + ppa32 |= ppa64.m.chk << lbaf->chk_offset; |
---|
| 590 | + ppa32 |= ppa64.m.sec << lbaf->sec_offset; |
---|
| 591 | + } |
---|
| 592 | + } |
---|
| 593 | + |
---|
| 594 | + return ppa32; |
---|
| 595 | +} |
---|
| 596 | + |
---|
| 597 | +static inline int nvm_next_ppa_in_chk(struct nvm_tgt_dev *dev, |
---|
| 598 | + struct ppa_addr *ppa) |
---|
| 599 | +{ |
---|
| 600 | + struct nvm_geo *geo = &dev->geo; |
---|
| 601 | + int last = 0; |
---|
| 602 | + |
---|
| 603 | + if (geo->version == NVM_OCSSD_SPEC_12) { |
---|
| 604 | + int sec = ppa->g.sec; |
---|
| 605 | + |
---|
| 606 | + sec++; |
---|
| 607 | + if (sec == geo->ws_min) { |
---|
| 608 | + int pg = ppa->g.pg; |
---|
| 609 | + |
---|
| 610 | + sec = 0; |
---|
| 611 | + pg++; |
---|
| 612 | + if (pg == geo->num_pg) { |
---|
| 613 | + int pl = ppa->g.pl; |
---|
| 614 | + |
---|
| 615 | + pg = 0; |
---|
| 616 | + pl++; |
---|
| 617 | + if (pl == geo->num_pln) |
---|
| 618 | + last = 1; |
---|
| 619 | + |
---|
| 620 | + ppa->g.pl = pl; |
---|
| 621 | + } |
---|
| 622 | + ppa->g.pg = pg; |
---|
| 623 | + } |
---|
| 624 | + ppa->g.sec = sec; |
---|
| 625 | + } else { |
---|
| 626 | + ppa->m.sec++; |
---|
| 627 | + if (ppa->m.sec == geo->clba) |
---|
| 628 | + last = 1; |
---|
| 629 | + } |
---|
| 630 | + |
---|
| 631 | + return last; |
---|
| 632 | +} |
---|
| 633 | + |
---|
489 | 634 | typedef sector_t (nvm_tgt_capacity_fn)(void *); |
---|
490 | 635 | typedef void *(nvm_tgt_init_fn)(struct nvm_tgt_dev *, struct gendisk *, |
---|
491 | 636 | int flags); |
---|
.. | .. |
---|
493 | 638 | typedef int (nvm_tgt_sysfs_init_fn)(struct gendisk *); |
---|
494 | 639 | typedef void (nvm_tgt_sysfs_exit_fn)(struct gendisk *); |
---|
495 | 640 | |
---|
| 641 | +enum { |
---|
| 642 | + NVM_TGT_F_DEV_L2P = 0, |
---|
| 643 | + NVM_TGT_F_HOST_L2P = 1 << 0, |
---|
| 644 | +}; |
---|
| 645 | + |
---|
496 | 646 | struct nvm_tgt_type { |
---|
497 | 647 | const char *name; |
---|
498 | 648 | unsigned int version[3]; |
---|
| 649 | + int flags; |
---|
499 | 650 | |
---|
500 | 651 | /* target entry points */ |
---|
501 | | - nvm_tgt_make_rq_fn *make_rq; |
---|
| 652 | + const struct block_device_operations *bops; |
---|
502 | 653 | nvm_tgt_capacity_fn *capacity; |
---|
503 | 654 | |
---|
504 | 655 | /* module-specific init/teardown */ |
---|
.. | .. |
---|
524 | 675 | extern int nvm_register(struct nvm_dev *); |
---|
525 | 676 | extern void nvm_unregister(struct nvm_dev *); |
---|
526 | 677 | |
---|
527 | | - |
---|
528 | | -extern int nvm_get_chunk_meta(struct nvm_tgt_dev *tgt_dev, |
---|
529 | | - struct nvm_chk_meta *meta, struct ppa_addr ppa, |
---|
530 | | - int nchks); |
---|
531 | | - |
---|
532 | | -extern int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr *, |
---|
| 678 | +extern int nvm_get_chunk_meta(struct nvm_tgt_dev *, struct ppa_addr, |
---|
| 679 | + int, struct nvm_chk_meta *); |
---|
| 680 | +extern int nvm_set_chunk_meta(struct nvm_tgt_dev *, struct ppa_addr *, |
---|
533 | 681 | int, int); |
---|
534 | | -extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *); |
---|
535 | | -extern int nvm_submit_io_sync(struct nvm_tgt_dev *, struct nvm_rq *); |
---|
| 682 | +extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *, void *); |
---|
| 683 | +extern int nvm_submit_io_sync(struct nvm_tgt_dev *, struct nvm_rq *, void *); |
---|
536 | 684 | extern void nvm_end_io(struct nvm_rq *); |
---|
537 | | -extern int nvm_bb_tbl_fold(struct nvm_dev *, u8 *, int); |
---|
538 | | -extern int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr, u8 *); |
---|
539 | 685 | |
---|
540 | 686 | #else /* CONFIG_NVM */ |
---|
541 | 687 | struct nvm_dev_ops; |
---|