.. | .. |
---|
| 1 | +/* SPDX-License-Identifier: GPL-2.0 */ |
---|
1 | 2 | /* |
---|
2 | 3 | * Copyright (C) 2015 IT University of Copenhagen (rrpc.h) |
---|
3 | 4 | * Copyright (C) 2016 CNEX Labs |
---|
.. | .. |
---|
37 | 38 | |
---|
38 | 39 | #define PBLK_SECTOR (512) |
---|
39 | 40 | #define PBLK_EXPOSED_PAGE_SIZE (4096) |
---|
40 | | -#define PBLK_MAX_REQ_ADDRS (64) |
---|
41 | | -#define PBLK_MAX_REQ_ADDRS_PW (6) |
---|
42 | 41 | |
---|
43 | 42 | #define PBLK_NR_CLOSE_JOBS (4) |
---|
44 | 43 | |
---|
45 | 44 | #define PBLK_CACHE_NAME_LEN (DISK_NAME_LEN + 16) |
---|
46 | | - |
---|
47 | | -#define PBLK_COMMAND_TIMEOUT_MS 30000 |
---|
48 | 45 | |
---|
49 | 46 | /* Max 512 LUNs per device */ |
---|
50 | 47 | #define PBLK_MAX_LUNS_BITMAP (4) |
---|
.. | .. |
---|
81 | 78 | PBLK_BLK_ST_CLOSED = 0x2, |
---|
82 | 79 | }; |
---|
83 | 80 | |
---|
| 81 | +enum { |
---|
| 82 | + PBLK_CHUNK_RESET_START, |
---|
| 83 | + PBLK_CHUNK_RESET_DONE, |
---|
| 84 | + PBLK_CHUNK_RESET_FAILED, |
---|
| 85 | +}; |
---|
| 86 | + |
---|
84 | 87 | struct pblk_sec_meta { |
---|
85 | 88 | u64 reserved; |
---|
86 | 89 | __le64 lba; |
---|
.. | .. |
---|
99 | 102 | PBLK_RL_LOW = 4 |
---|
100 | 103 | }; |
---|
101 | 104 | |
---|
102 | | -#define pblk_dma_meta_size (sizeof(struct pblk_sec_meta) * PBLK_MAX_REQ_ADDRS) |
---|
103 | | -#define pblk_dma_ppa_size (sizeof(u64) * PBLK_MAX_REQ_ADDRS) |
---|
| 105 | +#define pblk_dma_ppa_size (sizeof(u64) * NVM_MAX_VLBA) |
---|
104 | 106 | |
---|
105 | 107 | /* write buffer completion context */ |
---|
106 | 108 | struct pblk_c_ctx { |
---|
.. | .. |
---|
117 | 119 | void *private; |
---|
118 | 120 | unsigned long start_time; |
---|
119 | 121 | u64 lba; |
---|
120 | | -}; |
---|
121 | | - |
---|
122 | | -/* partial read context */ |
---|
123 | | -struct pblk_pr_ctx { |
---|
124 | | - struct bio *orig_bio; |
---|
125 | | - DECLARE_BITMAP(bitmap, NVM_MAX_VLBA); |
---|
126 | | - unsigned int orig_nr_secs; |
---|
127 | | - unsigned int bio_init_idx; |
---|
128 | | - void *ppa_ptr; |
---|
129 | | - dma_addr_t dma_ppa_list; |
---|
130 | 122 | }; |
---|
131 | 123 | |
---|
132 | 124 | /* Pad context */ |
---|
.. | .. |
---|
198 | 190 | * will be 4KB |
---|
199 | 191 | */ |
---|
200 | 192 | |
---|
| 193 | + unsigned int back_thres; /* Threshold that shall be maintained by |
---|
| 194 | + * the backpointer in order to respect |
---|
| 195 | + * geo->mw_cunits on a per chunk basis |
---|
| 196 | + */ |
---|
| 197 | + |
---|
201 | 198 | struct list_head pages; /* List of data pages */ |
---|
202 | 199 | |
---|
203 | 200 | spinlock_t w_lock; /* Write lock */ |
---|
.. | .. |
---|
218 | 215 | struct pblk_gc_rq { |
---|
219 | 216 | struct pblk_line *line; |
---|
220 | 217 | void *data; |
---|
221 | | - u64 paddr_list[PBLK_MAX_REQ_ADDRS]; |
---|
222 | | - u64 lba_list[PBLK_MAX_REQ_ADDRS]; |
---|
| 218 | + u64 paddr_list[NVM_MAX_VLBA]; |
---|
| 219 | + u64 lba_list[NVM_MAX_VLBA]; |
---|
223 | 220 | int nr_secs; |
---|
224 | 221 | int secs_to_gc; |
---|
225 | 222 | struct list_head list; |
---|
.. | .. |
---|
294 | 291 | |
---|
295 | 292 | struct timer_list u_timer; |
---|
296 | 293 | |
---|
297 | | - unsigned long long nr_secs; |
---|
298 | 294 | unsigned long total_blocks; |
---|
299 | 295 | |
---|
300 | 296 | atomic_t free_blocks; /* Total number of free blocks (+ OP) */ |
---|
.. | .. |
---|
429 | 425 | |
---|
430 | 426 | struct pblk_w_err_gc { |
---|
431 | 427 | int has_write_err; |
---|
| 428 | + int has_gc_err; |
---|
432 | 429 | __le64 *lba_list; |
---|
433 | 430 | }; |
---|
434 | 431 | |
---|
.. | .. |
---|
454 | 451 | int meta_line; /* Metadata line id */ |
---|
455 | 452 | int meta_distance; /* Distance between data and metadata */ |
---|
456 | 453 | |
---|
457 | | - u64 smeta_ssec; /* Sector where smeta starts */ |
---|
458 | 454 | u64 emeta_ssec; /* Sector where emeta starts */ |
---|
459 | 455 | |
---|
460 | 456 | unsigned int sec_in_line; /* Number of usable secs in line */ |
---|
.. | .. |
---|
476 | 472 | __le32 *vsc; /* Valid sector count in line */ |
---|
477 | 473 | |
---|
478 | 474 | struct kref ref; /* Write buffer L2P references */ |
---|
| 475 | + atomic_t sec_to_update; /* Outstanding L2P updates to ppa */ |
---|
479 | 476 | |
---|
480 | 477 | struct pblk_w_err_gc *w_err_gc; /* Write error gc recovery metadata */ |
---|
481 | 478 | |
---|
.. | .. |
---|
483 | 480 | }; |
---|
484 | 481 | |
---|
485 | 482 | #define PBLK_DATA_LINES 4 |
---|
486 | | - |
---|
487 | | -enum { |
---|
488 | | - PBLK_KMALLOC_META = 1, |
---|
489 | | - PBLK_VMALLOC_META = 2, |
---|
490 | | -}; |
---|
491 | 483 | |
---|
492 | 484 | enum { |
---|
493 | 485 | PBLK_EMETA_TYPE_HEADER = 1, /* struct line_emeta first sector */ |
---|
.. | .. |
---|
524 | 516 | |
---|
525 | 517 | __le32 *vsc_list; /* Valid sector counts for all lines */ |
---|
526 | 518 | |
---|
527 | | - /* Metadata allocation type: VMALLOC | KMALLOC */ |
---|
528 | | - int emeta_alloc_type; |
---|
529 | | - |
---|
530 | 519 | /* Pre-allocated metadata for data lines */ |
---|
531 | 520 | struct pblk_smeta *sline_meta[PBLK_DATA_LINES]; |
---|
532 | 521 | struct pblk_emeta *eline_meta[PBLK_DATA_LINES]; |
---|
533 | 522 | unsigned long meta_bitmap; |
---|
| 523 | + |
---|
| 524 | + /* Cache and mempool for map/invalid bitmaps */ |
---|
| 525 | + struct kmem_cache *bitmap_cache; |
---|
| 526 | + mempool_t *bitmap_pool; |
---|
534 | 527 | |
---|
535 | 528 | /* Helpers for fast bitmap calculations */ |
---|
536 | 529 | unsigned long *bb_template; |
---|
.. | .. |
---|
617 | 610 | int state; /* pblk line state */ |
---|
618 | 611 | |
---|
619 | 612 | int min_write_pgs; /* Minimum amount of pages required by controller */ |
---|
| 613 | + int min_write_pgs_data; /* Minimum amount of payload pages */ |
---|
620 | 614 | int max_write_pgs; /* Maximum amount of pages supported by controller */ |
---|
| 615 | + int oob_meta_size; /* Size of OOB sector metadata */ |
---|
621 | 616 | |
---|
622 | 617 | sector_t capacity; /* Device capacity when bad blocks are subtracted */ |
---|
623 | 618 | |
---|
.. | .. |
---|
629 | 624 | |
---|
630 | 625 | int sec_per_write; |
---|
631 | 626 | |
---|
632 | | - unsigned char instance_uuid[16]; |
---|
| 627 | + guid_t instance_uuid; |
---|
633 | 628 | |
---|
634 | 629 | /* Persistent write amplification counters, 4kb sector I/Os */ |
---|
635 | 630 | atomic64_t user_wa; /* Sectors written by user */ |
---|
.. | .. |
---|
725 | 720 | /* |
---|
726 | 721 | * pblk ring buffer operations |
---|
727 | 722 | */ |
---|
728 | | -int pblk_rb_init(struct pblk_rb *rb, struct pblk_rb_entry *rb_entry_base, |
---|
729 | | - unsigned int power_size, unsigned int power_seg_sz); |
---|
730 | | -unsigned int pblk_rb_calculate_size(unsigned int nr_entries); |
---|
731 | | -void *pblk_rb_entries_ref(struct pblk_rb *rb); |
---|
| 723 | +int pblk_rb_init(struct pblk_rb *rb, unsigned int size, unsigned int threshold, |
---|
| 724 | + unsigned int seg_sz); |
---|
732 | 725 | int pblk_rb_may_write_user(struct pblk_rb *rb, struct bio *bio, |
---|
733 | 726 | unsigned int nr_entries, unsigned int *pos); |
---|
734 | 727 | int pblk_rb_may_write_gc(struct pblk_rb *rb, unsigned int nr_entries, |
---|
.. | .. |
---|
746 | 739 | unsigned int pos, unsigned int nr_entries, |
---|
747 | 740 | unsigned int count); |
---|
748 | 741 | int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba, |
---|
749 | | - struct ppa_addr ppa, int bio_iter, bool advanced_bio); |
---|
| 742 | + struct ppa_addr ppa); |
---|
750 | 743 | unsigned int pblk_rb_read_commit(struct pblk_rb *rb, unsigned int entries); |
---|
751 | 744 | |
---|
752 | 745 | unsigned int pblk_rb_sync_init(struct pblk_rb *rb, unsigned long *flags); |
---|
753 | 746 | unsigned int pblk_rb_sync_advance(struct pblk_rb *rb, unsigned int nr_entries); |
---|
754 | | -struct pblk_rb_entry *pblk_rb_sync_scan_entry(struct pblk_rb *rb, |
---|
755 | | - struct ppa_addr *ppa); |
---|
| 747 | +unsigned int pblk_rb_ptr_wrap(struct pblk_rb *rb, unsigned int p, |
---|
| 748 | + unsigned int nr_entries); |
---|
756 | 749 | void pblk_rb_sync_end(struct pblk_rb *rb, unsigned long *flags); |
---|
757 | 750 | unsigned int pblk_rb_flush_point_count(struct pblk_rb *rb); |
---|
758 | 751 | |
---|
.. | .. |
---|
762 | 755 | |
---|
763 | 756 | int pblk_rb_tear_down_check(struct pblk_rb *rb); |
---|
764 | 757 | int pblk_rb_pos_oob(struct pblk_rb *rb, u64 pos); |
---|
765 | | -void pblk_rb_data_free(struct pblk_rb *rb); |
---|
| 758 | +void pblk_rb_free(struct pblk_rb *rb); |
---|
766 | 759 | ssize_t pblk_rb_sysfs(struct pblk_rb *rb, char *buf); |
---|
767 | 760 | |
---|
768 | 761 | /* |
---|
.. | .. |
---|
770 | 763 | */ |
---|
771 | 764 | struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int type); |
---|
772 | 765 | void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type); |
---|
| 766 | +int pblk_alloc_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd); |
---|
| 767 | +void pblk_free_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd); |
---|
773 | 768 | void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write); |
---|
774 | 769 | int pblk_setup_w_rec_rq(struct pblk *pblk, struct nvm_rq *rqd, |
---|
775 | 770 | struct pblk_c_ctx *c_ctx); |
---|
776 | 771 | void pblk_discard(struct pblk *pblk, struct bio *bio); |
---|
777 | | -struct nvm_chk_meta *pblk_chunk_get_info(struct pblk *pblk); |
---|
| 772 | +struct nvm_chk_meta *pblk_get_chunk_meta(struct pblk *pblk); |
---|
778 | 773 | struct nvm_chk_meta *pblk_chunk_get_off(struct pblk *pblk, |
---|
779 | 774 | struct nvm_chk_meta *lp, |
---|
780 | 775 | struct ppa_addr ppa); |
---|
781 | 776 | void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd); |
---|
782 | 777 | void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd); |
---|
783 | | -int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd); |
---|
784 | | -int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd); |
---|
| 778 | +int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd, void *buf); |
---|
| 779 | +int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd, void *buf); |
---|
785 | 780 | int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line); |
---|
786 | | -struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data, |
---|
787 | | - unsigned int nr_secs, unsigned int len, |
---|
788 | | - int alloc_type, gfp_t gfp_mask); |
---|
| 781 | +void pblk_check_chunk_state_update(struct pblk *pblk, struct nvm_rq *rqd); |
---|
789 | 782 | struct pblk_line *pblk_line_get(struct pblk *pblk); |
---|
790 | 783 | struct pblk_line *pblk_line_get_first_data(struct pblk *pblk); |
---|
791 | 784 | struct pblk_line *pblk_line_replace_data(struct pblk *pblk); |
---|
| 785 | +void pblk_ppa_to_line_put(struct pblk *pblk, struct ppa_addr ppa); |
---|
| 786 | +void pblk_rq_to_line_put(struct pblk *pblk, struct nvm_rq *rqd); |
---|
792 | 787 | int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line); |
---|
793 | 788 | void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line); |
---|
794 | 789 | struct pblk_line *pblk_line_get_data(struct pblk *pblk); |
---|
.. | .. |
---|
806 | 801 | void (*work)(struct work_struct *), gfp_t gfp_mask, |
---|
807 | 802 | struct workqueue_struct *wq); |
---|
808 | 803 | u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line); |
---|
809 | | -int pblk_line_read_smeta(struct pblk *pblk, struct pblk_line *line); |
---|
810 | | -int pblk_line_read_emeta(struct pblk *pblk, struct pblk_line *line, |
---|
| 804 | +int pblk_line_smeta_read(struct pblk *pblk, struct pblk_line *line); |
---|
| 805 | +int pblk_line_emeta_read(struct pblk *pblk, struct pblk_line *line, |
---|
811 | 806 | void *emeta_buf); |
---|
812 | 807 | int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr erase_ppa); |
---|
813 | 808 | void pblk_line_put(struct kref *ref); |
---|
.. | .. |
---|
818 | 813 | u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs); |
---|
819 | 814 | u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs); |
---|
820 | 815 | int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail, |
---|
821 | | - unsigned long secs_to_flush); |
---|
822 | | -void pblk_up_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas); |
---|
823 | | -void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas, |
---|
| 816 | + unsigned long secs_to_flush, bool skip_meta); |
---|
| 817 | +void pblk_down_rq(struct pblk *pblk, struct ppa_addr ppa, |
---|
824 | 818 | unsigned long *lun_bitmap); |
---|
825 | | -void pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas); |
---|
826 | | -void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas, |
---|
827 | | - unsigned long *lun_bitmap); |
---|
| 819 | +void pblk_down_chunk(struct pblk *pblk, struct ppa_addr ppa); |
---|
| 820 | +void pblk_up_chunk(struct pblk *pblk, struct ppa_addr ppa); |
---|
| 821 | +void pblk_up_rq(struct pblk *pblk, unsigned long *lun_bitmap); |
---|
828 | 822 | int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags, |
---|
829 | 823 | int nr_pages); |
---|
830 | 824 | void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off, |
---|
.. | .. |
---|
841 | 835 | struct pblk_line *gc_line, u64 paddr); |
---|
842 | 836 | void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas, |
---|
843 | 837 | u64 *lba_list, int nr_secs); |
---|
844 | | -void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas, |
---|
845 | | - sector_t blba, int nr_secs); |
---|
| 838 | +int pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas, |
---|
| 839 | + sector_t blba, int nr_secs, bool *from_cache); |
---|
| 840 | +void *pblk_get_meta_for_writes(struct pblk *pblk, struct nvm_rq *rqd); |
---|
| 841 | +void pblk_get_packed_meta(struct pblk *pblk, struct nvm_rq *rqd); |
---|
846 | 842 | |
---|
847 | 843 | /* |
---|
848 | 844 | * pblk user I/O write path |
---|
849 | 845 | */ |
---|
850 | | -int pblk_write_to_cache(struct pblk *pblk, struct bio *bio, |
---|
| 846 | +void pblk_write_to_cache(struct pblk *pblk, struct bio *bio, |
---|
851 | 847 | unsigned long flags); |
---|
852 | 848 | int pblk_write_gc_to_cache(struct pblk *pblk, struct pblk_gc_rq *gc_rq); |
---|
853 | 849 | |
---|
854 | 850 | /* |
---|
855 | 851 | * pblk map |
---|
856 | 852 | */ |
---|
857 | | -void pblk_map_erase_rq(struct pblk *pblk, struct nvm_rq *rqd, |
---|
| 853 | +int pblk_map_erase_rq(struct pblk *pblk, struct nvm_rq *rqd, |
---|
858 | 854 | unsigned int sentry, unsigned long *lun_bitmap, |
---|
859 | 855 | unsigned int valid_secs, struct ppa_addr *erase_ppa); |
---|
860 | | -void pblk_map_rq(struct pblk *pblk, struct nvm_rq *rqd, unsigned int sentry, |
---|
| 856 | +int pblk_map_rq(struct pblk *pblk, struct nvm_rq *rqd, unsigned int sentry, |
---|
861 | 857 | unsigned long *lun_bitmap, unsigned int valid_secs, |
---|
862 | 858 | unsigned int off); |
---|
863 | 859 | |
---|
.. | .. |
---|
873 | 869 | * pblk read path |
---|
874 | 870 | */ |
---|
875 | 871 | extern struct bio_set pblk_bio_set; |
---|
876 | | -int pblk_submit_read(struct pblk *pblk, struct bio *bio); |
---|
| 872 | +void pblk_submit_read(struct pblk *pblk, struct bio *bio); |
---|
877 | 873 | int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq); |
---|
878 | 874 | /* |
---|
879 | 875 | * pblk recovery |
---|
.. | .. |
---|
888 | 884 | #define PBLK_GC_MAX_READERS 8 /* Max number of outstanding GC reader jobs */ |
---|
889 | 885 | #define PBLK_GC_RQ_QD 128 /* Queue depth for inflight GC requests */ |
---|
890 | 886 | #define PBLK_GC_L_QD 4 /* Queue depth for inflight GC lines */ |
---|
891 | | -#define PBLK_GC_RSV_LINE 1 /* Reserved lines for GC */ |
---|
892 | 887 | |
---|
893 | 888 | int pblk_gc_init(struct pblk *pblk); |
---|
894 | 889 | void pblk_gc_exit(struct pblk *pblk, bool graceful); |
---|
.. | .. |
---|
899 | 894 | void pblk_gc_sysfs_state_show(struct pblk *pblk, int *gc_enabled, |
---|
900 | 895 | int *gc_active); |
---|
901 | 896 | int pblk_gc_sysfs_force(struct pblk *pblk, int force); |
---|
| 897 | +void pblk_put_line_back(struct pblk *pblk, struct pblk_line *line); |
---|
902 | 898 | |
---|
903 | 899 | /* |
---|
904 | 900 | * pblk rate limiter |
---|
905 | 901 | */ |
---|
906 | | -void pblk_rl_init(struct pblk_rl *rl, int budget); |
---|
| 902 | +void pblk_rl_init(struct pblk_rl *rl, int budget, int threshold); |
---|
907 | 903 | void pblk_rl_free(struct pblk_rl *rl); |
---|
908 | 904 | void pblk_rl_update_rates(struct pblk_rl *rl); |
---|
909 | 905 | int pblk_rl_high_thrs(struct pblk_rl *rl); |
---|
.. | .. |
---|
929 | 925 | */ |
---|
930 | 926 | int pblk_sysfs_init(struct gendisk *tdisk); |
---|
931 | 927 | void pblk_sysfs_exit(struct gendisk *tdisk); |
---|
932 | | - |
---|
933 | | -static inline void *pblk_malloc(size_t size, int type, gfp_t flags) |
---|
934 | | -{ |
---|
935 | | - if (type == PBLK_KMALLOC_META) |
---|
936 | | - return kmalloc(size, flags); |
---|
937 | | - return vmalloc(size); |
---|
938 | | -} |
---|
939 | | - |
---|
940 | | -static inline void pblk_mfree(void *ptr, int type) |
---|
941 | | -{ |
---|
942 | | - if (type == PBLK_KMALLOC_META) |
---|
943 | | - kfree(ptr); |
---|
944 | | - else |
---|
945 | | - vfree(ptr); |
---|
946 | | -} |
---|
947 | 928 | |
---|
948 | 929 | static inline struct nvm_rq *nvm_rq_from_c_ctx(void *c_ctx) |
---|
949 | 930 | { |
---|
.. | .. |
---|
976 | 957 | return le32_to_cpu(*line->vsc); |
---|
977 | 958 | } |
---|
978 | 959 | |
---|
979 | | -static inline int pblk_pad_distance(struct pblk *pblk) |
---|
980 | | -{ |
---|
981 | | - struct nvm_tgt_dev *dev = pblk->dev; |
---|
982 | | - struct nvm_geo *geo = &dev->geo; |
---|
983 | | - |
---|
984 | | - return geo->mw_cunits * geo->all_luns * geo->ws_opt; |
---|
985 | | -} |
---|
986 | | - |
---|
987 | | -static inline int pblk_ppa_to_line(struct ppa_addr p) |
---|
| 960 | +static inline int pblk_ppa_to_line_id(struct ppa_addr p) |
---|
988 | 961 | { |
---|
989 | 962 | return p.a.blk; |
---|
| 963 | +} |
---|
| 964 | + |
---|
| 965 | +static inline struct pblk_line *pblk_ppa_to_line(struct pblk *pblk, |
---|
| 966 | + struct ppa_addr p) |
---|
| 967 | +{ |
---|
| 968 | + return &pblk->lines[pblk_ppa_to_line_id(p)]; |
---|
990 | 969 | } |
---|
991 | 970 | |
---|
992 | 971 | static inline int pblk_ppa_to_pos(struct nvm_geo *geo, struct ppa_addr p) |
---|
.. | .. |
---|
1034 | 1013 | return ppa; |
---|
1035 | 1014 | } |
---|
1036 | 1015 | |
---|
| 1016 | +static inline struct nvm_chk_meta *pblk_dev_ppa_to_chunk(struct pblk *pblk, |
---|
| 1017 | + struct ppa_addr p) |
---|
| 1018 | +{ |
---|
| 1019 | + struct nvm_tgt_dev *dev = pblk->dev; |
---|
| 1020 | + struct nvm_geo *geo = &dev->geo; |
---|
| 1021 | + struct pblk_line *line = pblk_ppa_to_line(pblk, p); |
---|
| 1022 | + int pos = pblk_ppa_to_pos(geo, p); |
---|
| 1023 | + |
---|
| 1024 | + return &line->chks[pos]; |
---|
| 1025 | +} |
---|
| 1026 | + |
---|
| 1027 | +static inline u64 pblk_dev_ppa_to_chunk_addr(struct pblk *pblk, |
---|
| 1028 | + struct ppa_addr p) |
---|
| 1029 | +{ |
---|
| 1030 | + struct nvm_tgt_dev *dev = pblk->dev; |
---|
| 1031 | + |
---|
| 1032 | + return dev_to_chunk_addr(dev->parent, &pblk->addrf, p); |
---|
| 1033 | +} |
---|
| 1034 | + |
---|
1037 | 1035 | static inline u64 pblk_dev_ppa_to_line_addr(struct pblk *pblk, |
---|
1038 | 1036 | struct ppa_addr p) |
---|
1039 | 1037 | { |
---|
.. | .. |
---|
1067 | 1065 | |
---|
1068 | 1066 | static inline struct ppa_addr pblk_ppa32_to_ppa64(struct pblk *pblk, u32 ppa32) |
---|
1069 | 1067 | { |
---|
1070 | | - struct ppa_addr ppa64; |
---|
| 1068 | + struct nvm_tgt_dev *dev = pblk->dev; |
---|
1071 | 1069 | |
---|
1072 | | - ppa64.ppa = 0; |
---|
1073 | | - |
---|
1074 | | - if (ppa32 == -1) { |
---|
1075 | | - ppa64.ppa = ADDR_EMPTY; |
---|
1076 | | - } else if (ppa32 & (1U << 31)) { |
---|
1077 | | - ppa64.c.line = ppa32 & ((~0U) >> 1); |
---|
1078 | | - ppa64.c.is_cached = 1; |
---|
1079 | | - } else { |
---|
1080 | | - struct nvm_tgt_dev *dev = pblk->dev; |
---|
1081 | | - struct nvm_geo *geo = &dev->geo; |
---|
1082 | | - |
---|
1083 | | - if (geo->version == NVM_OCSSD_SPEC_12) { |
---|
1084 | | - struct nvm_addrf_12 *ppaf = |
---|
1085 | | - (struct nvm_addrf_12 *)&pblk->addrf; |
---|
1086 | | - |
---|
1087 | | - ppa64.g.ch = (ppa32 & ppaf->ch_mask) >> |
---|
1088 | | - ppaf->ch_offset; |
---|
1089 | | - ppa64.g.lun = (ppa32 & ppaf->lun_mask) >> |
---|
1090 | | - ppaf->lun_offset; |
---|
1091 | | - ppa64.g.blk = (ppa32 & ppaf->blk_mask) >> |
---|
1092 | | - ppaf->blk_offset; |
---|
1093 | | - ppa64.g.pg = (ppa32 & ppaf->pg_mask) >> |
---|
1094 | | - ppaf->pg_offset; |
---|
1095 | | - ppa64.g.pl = (ppa32 & ppaf->pln_mask) >> |
---|
1096 | | - ppaf->pln_offset; |
---|
1097 | | - ppa64.g.sec = (ppa32 & ppaf->sec_mask) >> |
---|
1098 | | - ppaf->sec_offset; |
---|
1099 | | - } else { |
---|
1100 | | - struct nvm_addrf *lbaf = &pblk->addrf; |
---|
1101 | | - |
---|
1102 | | - ppa64.m.grp = (ppa32 & lbaf->ch_mask) >> |
---|
1103 | | - lbaf->ch_offset; |
---|
1104 | | - ppa64.m.pu = (ppa32 & lbaf->lun_mask) >> |
---|
1105 | | - lbaf->lun_offset; |
---|
1106 | | - ppa64.m.chk = (ppa32 & lbaf->chk_mask) >> |
---|
1107 | | - lbaf->chk_offset; |
---|
1108 | | - ppa64.m.sec = (ppa32 & lbaf->sec_mask) >> |
---|
1109 | | - lbaf->sec_offset; |
---|
1110 | | - } |
---|
1111 | | - } |
---|
1112 | | - |
---|
1113 | | - return ppa64; |
---|
| 1070 | + return nvm_ppa32_to_ppa64(dev->parent, &pblk->addrf, ppa32); |
---|
1114 | 1071 | } |
---|
1115 | 1072 | |
---|
1116 | 1073 | static inline u32 pblk_ppa64_to_ppa32(struct pblk *pblk, struct ppa_addr ppa64) |
---|
1117 | 1074 | { |
---|
1118 | | - u32 ppa32 = 0; |
---|
| 1075 | + struct nvm_tgt_dev *dev = pblk->dev; |
---|
1119 | 1076 | |
---|
1120 | | - if (ppa64.ppa == ADDR_EMPTY) { |
---|
1121 | | - ppa32 = ~0U; |
---|
1122 | | - } else if (ppa64.c.is_cached) { |
---|
1123 | | - ppa32 |= ppa64.c.line; |
---|
1124 | | - ppa32 |= 1U << 31; |
---|
1125 | | - } else { |
---|
1126 | | - struct nvm_tgt_dev *dev = pblk->dev; |
---|
1127 | | - struct nvm_geo *geo = &dev->geo; |
---|
1128 | | - |
---|
1129 | | - if (geo->version == NVM_OCSSD_SPEC_12) { |
---|
1130 | | - struct nvm_addrf_12 *ppaf = |
---|
1131 | | - (struct nvm_addrf_12 *)&pblk->addrf; |
---|
1132 | | - |
---|
1133 | | - ppa32 |= ppa64.g.ch << ppaf->ch_offset; |
---|
1134 | | - ppa32 |= ppa64.g.lun << ppaf->lun_offset; |
---|
1135 | | - ppa32 |= ppa64.g.blk << ppaf->blk_offset; |
---|
1136 | | - ppa32 |= ppa64.g.pg << ppaf->pg_offset; |
---|
1137 | | - ppa32 |= ppa64.g.pl << ppaf->pln_offset; |
---|
1138 | | - ppa32 |= ppa64.g.sec << ppaf->sec_offset; |
---|
1139 | | - } else { |
---|
1140 | | - struct nvm_addrf *lbaf = &pblk->addrf; |
---|
1141 | | - |
---|
1142 | | - ppa32 |= ppa64.m.grp << lbaf->ch_offset; |
---|
1143 | | - ppa32 |= ppa64.m.pu << lbaf->lun_offset; |
---|
1144 | | - ppa32 |= ppa64.m.chk << lbaf->chk_offset; |
---|
1145 | | - ppa32 |= ppa64.m.sec << lbaf->sec_offset; |
---|
1146 | | - } |
---|
1147 | | - } |
---|
1148 | | - |
---|
1149 | | - return ppa32; |
---|
| 1077 | + return nvm_ppa64_to_ppa32(dev->parent, &pblk->addrf, ppa64); |
---|
1150 | 1078 | } |
---|
1151 | 1079 | |
---|
1152 | 1080 | static inline struct ppa_addr pblk_trans_map_get(struct pblk *pblk, |
---|
.. | .. |
---|
1255 | 1183 | return crc; |
---|
1256 | 1184 | } |
---|
1257 | 1185 | |
---|
1258 | | -static inline int pblk_set_progr_mode(struct pblk *pblk, int type) |
---|
1259 | | -{ |
---|
1260 | | - struct nvm_tgt_dev *dev = pblk->dev; |
---|
1261 | | - struct nvm_geo *geo = &dev->geo; |
---|
1262 | | - int flags; |
---|
1263 | | - |
---|
1264 | | - if (geo->version == NVM_OCSSD_SPEC_20) |
---|
1265 | | - return 0; |
---|
1266 | | - |
---|
1267 | | - flags = geo->pln_mode >> 1; |
---|
1268 | | - |
---|
1269 | | - if (type == PBLK_WRITE) |
---|
1270 | | - flags |= NVM_IO_SCRAMBLE_ENABLE; |
---|
1271 | | - |
---|
1272 | | - return flags; |
---|
1273 | | -} |
---|
1274 | | - |
---|
1275 | | -enum { |
---|
1276 | | - PBLK_READ_RANDOM = 0, |
---|
1277 | | - PBLK_READ_SEQUENTIAL = 1, |
---|
1278 | | -}; |
---|
1279 | | - |
---|
1280 | | -static inline int pblk_set_read_mode(struct pblk *pblk, int type) |
---|
1281 | | -{ |
---|
1282 | | - struct nvm_tgt_dev *dev = pblk->dev; |
---|
1283 | | - struct nvm_geo *geo = &dev->geo; |
---|
1284 | | - int flags; |
---|
1285 | | - |
---|
1286 | | - if (geo->version == NVM_OCSSD_SPEC_20) |
---|
1287 | | - return 0; |
---|
1288 | | - |
---|
1289 | | - flags = NVM_IO_SUSPEND | NVM_IO_SCRAMBLE_ENABLE; |
---|
1290 | | - if (type == PBLK_READ_SEQUENTIAL) |
---|
1291 | | - flags |= geo->pln_mode >> 1; |
---|
1292 | | - |
---|
1293 | | - return flags; |
---|
1294 | | -} |
---|
1295 | | - |
---|
1296 | 1186 | static inline int pblk_io_aligned(struct pblk *pblk, int nr_secs) |
---|
1297 | 1187 | { |
---|
1298 | 1188 | return !(nr_secs % pblk->min_write_pgs); |
---|
.. | .. |
---|
1375 | 1265 | static inline int pblk_check_io(struct pblk *pblk, struct nvm_rq *rqd) |
---|
1376 | 1266 | { |
---|
1377 | 1267 | struct nvm_tgt_dev *dev = pblk->dev; |
---|
1378 | | - struct ppa_addr *ppa_list; |
---|
1379 | | - |
---|
1380 | | - ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr; |
---|
| 1268 | + struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd); |
---|
1381 | 1269 | |
---|
1382 | 1270 | if (pblk_boundary_ppa_checks(dev, ppa_list, rqd->nr_ppas)) { |
---|
1383 | 1271 | WARN_ON(1); |
---|
.. | .. |
---|
1386 | 1274 | |
---|
1387 | 1275 | if (rqd->opcode == NVM_OP_PWRITE) { |
---|
1388 | 1276 | struct pblk_line *line; |
---|
1389 | | - struct ppa_addr ppa; |
---|
1390 | 1277 | int i; |
---|
1391 | 1278 | |
---|
1392 | 1279 | for (i = 0; i < rqd->nr_ppas; i++) { |
---|
1393 | | - ppa = ppa_list[i]; |
---|
1394 | | - line = &pblk->lines[pblk_ppa_to_line(ppa)]; |
---|
| 1280 | + line = pblk_ppa_to_line(pblk, ppa_list[i]); |
---|
1395 | 1281 | |
---|
1396 | 1282 | spin_lock(&line->lock); |
---|
1397 | 1283 | if (line->state != PBLK_LINESTATE_OPEN) { |
---|
.. | .. |
---|
1434 | 1320 | return bio->bi_iter.bi_size / PBLK_EXPOSED_PAGE_SIZE; |
---|
1435 | 1321 | } |
---|
1436 | 1322 | |
---|
1437 | | -static inline void pblk_setup_uuid(struct pblk *pblk) |
---|
| 1323 | +static inline char *pblk_disk_name(struct pblk *pblk) |
---|
1438 | 1324 | { |
---|
1439 | | - uuid_le uuid; |
---|
| 1325 | + struct gendisk *disk = pblk->disk; |
---|
1440 | 1326 | |
---|
1441 | | - uuid_le_gen(&uuid); |
---|
1442 | | - memcpy(pblk->instance_uuid, uuid.b, 16); |
---|
| 1327 | + return disk->disk_name; |
---|
| 1328 | +} |
---|
| 1329 | + |
---|
| 1330 | +static inline unsigned int pblk_get_min_chks(struct pblk *pblk) |
---|
| 1331 | +{ |
---|
| 1332 | + struct pblk_line_meta *lm = &pblk->lm; |
---|
| 1333 | + /* In a worst-case scenario every line will have OP invalid sectors. |
---|
| 1334 | + * We will then need a minimum of 1/OP lines to free up a single line |
---|
| 1335 | + */ |
---|
| 1336 | + |
---|
| 1337 | + return DIV_ROUND_UP(100, pblk->op) * lm->blk_per_line; |
---|
| 1338 | +} |
---|
| 1339 | + |
---|
| 1340 | +static inline struct pblk_sec_meta *pblk_get_meta(struct pblk *pblk, |
---|
| 1341 | + void *meta, int index) |
---|
| 1342 | +{ |
---|
| 1343 | + return meta + |
---|
| 1344 | + max_t(int, sizeof(struct pblk_sec_meta), pblk->oob_meta_size) |
---|
| 1345 | + * index; |
---|
| 1346 | +} |
---|
| 1347 | + |
---|
| 1348 | +static inline int pblk_dma_meta_size(struct pblk *pblk) |
---|
| 1349 | +{ |
---|
| 1350 | + return max_t(int, sizeof(struct pblk_sec_meta), pblk->oob_meta_size) |
---|
| 1351 | + * NVM_MAX_VLBA; |
---|
| 1352 | +} |
---|
| 1353 | + |
---|
| 1354 | +static inline int pblk_is_oob_meta_supported(struct pblk *pblk) |
---|
| 1355 | +{ |
---|
| 1356 | + return pblk->oob_meta_size >= sizeof(struct pblk_sec_meta); |
---|
1443 | 1357 | } |
---|
1444 | 1358 | #endif /* PBLK_H_ */ |
---|