hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/drivers/lightnvm/pblk-core.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0
12 /*
23 * Copyright (C) 2016 CNEX Labs
34 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
....@@ -16,7 +17,10 @@
1617 *
1718 */
1819
20
+#define CREATE_TRACE_POINTS
21
+
1922 #include "pblk.h"
23
+#include "pblk-trace.h"
2024
2125 static void pblk_line_mark_bb(struct work_struct *work)
2226 {
....@@ -27,12 +31,12 @@
2731 struct ppa_addr *ppa = line_ws->priv;
2832 int ret;
2933
30
- ret = nvm_set_tgt_bb_tbl(dev, ppa, 1, NVM_BLK_T_GRWN_BAD);
34
+ ret = nvm_set_chunk_meta(dev, ppa, 1, NVM_BLK_T_GRWN_BAD);
3135 if (ret) {
3236 struct pblk_line *line;
3337 int pos;
3438
35
- line = &pblk->lines[pblk_ppa_to_line(*ppa)];
39
+ line = pblk_ppa_to_line(pblk, *ppa);
3640 pos = pblk_ppa_to_pos(&dev->geo, *ppa);
3741
3842 pblk_err(pblk, "failed to mark bb, line:%d, pos:%d\n",
....@@ -80,18 +84,27 @@
8084 struct pblk_line *line;
8185 int pos;
8286
83
- line = &pblk->lines[pblk_ppa_to_line(rqd->ppa_addr)];
87
+ line = pblk_ppa_to_line(pblk, rqd->ppa_addr);
8488 pos = pblk_ppa_to_pos(geo, rqd->ppa_addr);
8589 chunk = &line->chks[pos];
8690
8791 atomic_dec(&line->left_seblks);
8892
8993 if (rqd->error) {
94
+ trace_pblk_chunk_reset(pblk_disk_name(pblk),
95
+ &rqd->ppa_addr, PBLK_CHUNK_RESET_FAILED);
96
+
9097 chunk->state = NVM_CHK_ST_OFFLINE;
9198 pblk_mark_bb(pblk, line, rqd->ppa_addr);
9299 } else {
100
+ trace_pblk_chunk_reset(pblk_disk_name(pblk),
101
+ &rqd->ppa_addr, PBLK_CHUNK_RESET_DONE);
102
+
93103 chunk->state = NVM_CHK_ST_FREE;
94104 }
105
+
106
+ trace_pblk_chunk_state(pblk_disk_name(pblk), &rqd->ppa_addr,
107
+ chunk->state);
95108
96109 atomic_dec(&pblk->inflight_io);
97110 }
....@@ -108,9 +121,9 @@
108121 /*
109122 * Get information for all chunks from the device.
110123 *
111
- * The caller is responsible for freeing the returned structure
124
+ * The caller is responsible for freeing (vmalloc) the returned structure
112125 */
113
-struct nvm_chk_meta *pblk_chunk_get_info(struct pblk *pblk)
126
+struct nvm_chk_meta *pblk_get_chunk_meta(struct pblk *pblk)
114127 {
115128 struct nvm_tgt_dev *dev = pblk->dev;
116129 struct nvm_geo *geo = &dev->geo;
....@@ -122,13 +135,13 @@
122135 ppa.ppa = 0;
123136
124137 len = geo->all_chunks * sizeof(*meta);
125
- meta = kzalloc(len, GFP_KERNEL);
138
+ meta = vzalloc(len);
126139 if (!meta)
127140 return ERR_PTR(-ENOMEM);
128141
129
- ret = nvm_get_chunk_meta(dev, meta, ppa, geo->all_chunks);
142
+ ret = nvm_get_chunk_meta(dev, ppa, geo->all_chunks, meta);
130143 if (ret) {
131
- kfree(meta);
144
+ vfree(meta);
132145 return ERR_PTR(-EIO);
133146 }
134147
....@@ -192,7 +205,6 @@
192205 {
193206 struct pblk_line *line;
194207 u64 paddr;
195
- int line_id;
196208
197209 #ifdef CONFIG_NVM_PBLK_DEBUG
198210 /* Callers must ensure that the ppa points to a device address */
....@@ -200,8 +212,7 @@
200212 BUG_ON(pblk_ppa_empty(ppa));
201213 #endif
202214
203
- line_id = pblk_ppa_to_line(ppa);
204
- line = &pblk->lines[line_id];
215
+ line = pblk_ppa_to_line(pblk, ppa);
205216 paddr = pblk_dev_ppa_to_line_addr(pblk, ppa);
206217
207218 __pblk_map_invalidate(pblk, line, paddr);
....@@ -225,6 +236,33 @@
225236 pblk_trans_map_set(pblk, lba, ppa);
226237 }
227238 spin_unlock(&pblk->trans_lock);
239
+}
240
+
241
+int pblk_alloc_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd)
242
+{
243
+ struct nvm_tgt_dev *dev = pblk->dev;
244
+
245
+ rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
246
+ &rqd->dma_meta_list);
247
+ if (!rqd->meta_list)
248
+ return -ENOMEM;
249
+
250
+ if (rqd->nr_ppas == 1)
251
+ return 0;
252
+
253
+ rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size(pblk);
254
+ rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size(pblk);
255
+
256
+ return 0;
257
+}
258
+
259
+void pblk_free_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd)
260
+{
261
+ struct nvm_tgt_dev *dev = pblk->dev;
262
+
263
+ if (rqd->meta_list)
264
+ nvm_dev_dma_free(dev->parent, rqd->meta_list,
265
+ rqd->dma_meta_list);
228266 }
229267
230268 /* Caller must guarantee that the request is a valid type */
....@@ -258,13 +296,12 @@
258296 /* Typically used on completion path. Cannot guarantee request consistency */
259297 void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type)
260298 {
261
- struct nvm_tgt_dev *dev = pblk->dev;
262299 mempool_t *pool;
263300
264301 switch (type) {
265302 case PBLK_WRITE:
266303 kfree(((struct pblk_c_ctx *)nvm_rq_to_pdu(rqd))->lun_bitmap);
267
- /* fall through */
304
+ fallthrough;
268305 case PBLK_WRITE_INT:
269306 pool = &pblk->w_rq_pool;
270307 break;
....@@ -279,9 +316,7 @@
279316 return;
280317 }
281318
282
- if (rqd->meta_list)
283
- nvm_dev_dma_free(dev->parent, rqd->meta_list,
284
- rqd->dma_meta_list);
319
+ pblk_free_rqd_meta(pblk, rqd);
285320 mempool_free(rqd, pool);
286321 }
287322
....@@ -343,7 +378,7 @@
343378 {
344379 unsigned int secs_avail = pblk_rb_read_count(&pblk->rwb);
345380
346
- if (secs_avail >= pblk->min_write_pgs)
381
+ if (secs_avail >= pblk->min_write_pgs_data)
347382 pblk_write_kick(pblk);
348383 }
349384
....@@ -374,7 +409,9 @@
374409 struct pblk_line_meta *lm = &pblk->lm;
375410 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
376411 struct list_head *move_list = NULL;
377
- int vsc = le32_to_cpu(*line->vsc);
412
+ int packed_meta = (le32_to_cpu(*line->vsc) / pblk->min_write_pgs_data)
413
+ * (pblk->min_write_pgs - pblk->min_write_pgs_data);
414
+ int vsc = le32_to_cpu(*line->vsc) + packed_meta;
378415
379416 lockdep_assert_held(&line->lock);
380417
....@@ -411,6 +448,9 @@
411448 }
412449 } else {
413450 line->state = PBLK_LINESTATE_CORRUPT;
451
+ trace_pblk_line_state(pblk_disk_name(pblk), line->id,
452
+ line->state);
453
+
414454 line->gc_group = PBLK_LINEGC_NONE;
415455 move_list = &l_mg->corrupt_list;
416456 pblk_err(pblk, "corrupted vsc for line %d, vsc:%d (%d/%d/%d)\n",
....@@ -467,7 +507,7 @@
467507 pblk->sec_per_write = sec_per_write;
468508 }
469509
470
-int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd)
510
+int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd, void *buf)
471511 {
472512 struct nvm_tgt_dev *dev = pblk->dev;
473513
....@@ -478,76 +518,72 @@
478518 return NVM_IO_ERR;
479519 #endif
480520
481
- return nvm_submit_io(dev, rqd);
521
+ return nvm_submit_io(dev, rqd, buf);
482522 }
483523
484
-int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd)
524
+void pblk_check_chunk_state_update(struct pblk *pblk, struct nvm_rq *rqd)
485525 {
486
- struct nvm_tgt_dev *dev = pblk->dev;
526
+ struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
487527
488
- atomic_inc(&pblk->inflight_io);
528
+ int i;
489529
490
-#ifdef CONFIG_NVM_PBLK_DEBUG
491
- if (pblk_check_io(pblk, rqd))
492
- return NVM_IO_ERR;
493
-#endif
530
+ for (i = 0; i < rqd->nr_ppas; i++) {
531
+ struct ppa_addr *ppa = &ppa_list[i];
532
+ struct nvm_chk_meta *chunk = pblk_dev_ppa_to_chunk(pblk, *ppa);
533
+ u64 caddr = pblk_dev_ppa_to_chunk_addr(pblk, *ppa);
494534
495
- return nvm_submit_io_sync(dev, rqd);
496
-}
497
-
498
-static void pblk_bio_map_addr_endio(struct bio *bio)
499
-{
500
- bio_put(bio);
501
-}
502
-
503
-struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
504
- unsigned int nr_secs, unsigned int len,
505
- int alloc_type, gfp_t gfp_mask)
506
-{
507
- struct nvm_tgt_dev *dev = pblk->dev;
508
- void *kaddr = data;
509
- struct page *page;
510
- struct bio *bio;
511
- int i, ret;
512
-
513
- if (alloc_type == PBLK_KMALLOC_META)
514
- return bio_map_kern(dev->q, kaddr, len, gfp_mask);
515
-
516
- bio = bio_kmalloc(gfp_mask, nr_secs);
517
- if (!bio)
518
- return ERR_PTR(-ENOMEM);
519
-
520
- for (i = 0; i < nr_secs; i++) {
521
- page = vmalloc_to_page(kaddr);
522
- if (!page) {
523
- pblk_err(pblk, "could not map vmalloc bio\n");
524
- bio_put(bio);
525
- bio = ERR_PTR(-ENOMEM);
526
- goto out;
527
- }
528
-
529
- ret = bio_add_pc_page(dev->q, bio, page, PAGE_SIZE, 0);
530
- if (ret != PAGE_SIZE) {
531
- pblk_err(pblk, "could not add page to bio\n");
532
- bio_put(bio);
533
- bio = ERR_PTR(-ENOMEM);
534
- goto out;
535
- }
536
-
537
- kaddr += PAGE_SIZE;
535
+ if (caddr == 0)
536
+ trace_pblk_chunk_state(pblk_disk_name(pblk),
537
+ ppa, NVM_CHK_ST_OPEN);
538
+ else if (caddr == (chunk->cnlb - 1))
539
+ trace_pblk_chunk_state(pblk_disk_name(pblk),
540
+ ppa, NVM_CHK_ST_CLOSED);
538541 }
542
+}
539543
540
- bio->bi_end_io = pblk_bio_map_addr_endio;
541
-out:
542
- return bio;
544
+int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd, void *buf)
545
+{
546
+ struct nvm_tgt_dev *dev = pblk->dev;
547
+ int ret;
548
+
549
+ atomic_inc(&pblk->inflight_io);
550
+
551
+#ifdef CONFIG_NVM_PBLK_DEBUG
552
+ if (pblk_check_io(pblk, rqd))
553
+ return NVM_IO_ERR;
554
+#endif
555
+
556
+ ret = nvm_submit_io_sync(dev, rqd, buf);
557
+
558
+ if (trace_pblk_chunk_state_enabled() && !ret &&
559
+ rqd->opcode == NVM_OP_PWRITE)
560
+ pblk_check_chunk_state_update(pblk, rqd);
561
+
562
+ return ret;
563
+}
564
+
565
+static int pblk_submit_io_sync_sem(struct pblk *pblk, struct nvm_rq *rqd,
566
+ void *buf)
567
+{
568
+ struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
569
+ int ret;
570
+
571
+ pblk_down_chunk(pblk, ppa_list[0]);
572
+ ret = pblk_submit_io_sync(pblk, rqd, buf);
573
+ pblk_up_chunk(pblk, ppa_list[0]);
574
+
575
+ return ret;
543576 }
544577
545578 int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
546
- unsigned long secs_to_flush)
579
+ unsigned long secs_to_flush, bool skip_meta)
547580 {
548581 int max = pblk->sec_per_write;
549582 int min = pblk->min_write_pgs;
550583 int secs_to_sync = 0;
584
+
585
+ if (skip_meta && pblk->min_write_pgs_data != pblk->min_write_pgs)
586
+ min = max = pblk->min_write_pgs_data;
551587
552588 if (secs_avail >= max)
553589 secs_to_sync = max;
....@@ -623,147 +659,6 @@
623659 return paddr;
624660 }
625661
626
-/*
627
- * Submit emeta to one LUN in the raid line at the time to avoid a deadlock when
628
- * taking the per LUN semaphore.
629
- */
630
-static int pblk_line_submit_emeta_io(struct pblk *pblk, struct pblk_line *line,
631
- void *emeta_buf, u64 paddr, int dir)
632
-{
633
- struct nvm_tgt_dev *dev = pblk->dev;
634
- struct nvm_geo *geo = &dev->geo;
635
- struct pblk_line_mgmt *l_mg = &pblk->l_mg;
636
- struct pblk_line_meta *lm = &pblk->lm;
637
- void *ppa_list, *meta_list;
638
- struct bio *bio;
639
- struct nvm_rq rqd;
640
- dma_addr_t dma_ppa_list, dma_meta_list;
641
- int min = pblk->min_write_pgs;
642
- int left_ppas = lm->emeta_sec[0];
643
- int id = line->id;
644
- int rq_ppas, rq_len;
645
- int cmd_op, bio_op;
646
- int i, j;
647
- int ret;
648
-
649
- if (dir == PBLK_WRITE) {
650
- bio_op = REQ_OP_WRITE;
651
- cmd_op = NVM_OP_PWRITE;
652
- } else if (dir == PBLK_READ) {
653
- bio_op = REQ_OP_READ;
654
- cmd_op = NVM_OP_PREAD;
655
- } else
656
- return -EINVAL;
657
-
658
- meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
659
- &dma_meta_list);
660
- if (!meta_list)
661
- return -ENOMEM;
662
-
663
- ppa_list = meta_list + pblk_dma_meta_size;
664
- dma_ppa_list = dma_meta_list + pblk_dma_meta_size;
665
-
666
-next_rq:
667
- memset(&rqd, 0, sizeof(struct nvm_rq));
668
-
669
- rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
670
- rq_len = rq_ppas * geo->csecs;
671
-
672
- bio = pblk_bio_map_addr(pblk, emeta_buf, rq_ppas, rq_len,
673
- l_mg->emeta_alloc_type, GFP_KERNEL);
674
- if (IS_ERR(bio)) {
675
- ret = PTR_ERR(bio);
676
- goto free_rqd_dma;
677
- }
678
-
679
- bio->bi_iter.bi_sector = 0; /* internal bio */
680
- bio_set_op_attrs(bio, bio_op, 0);
681
-
682
- rqd.bio = bio;
683
- rqd.meta_list = meta_list;
684
- rqd.ppa_list = ppa_list;
685
- rqd.dma_meta_list = dma_meta_list;
686
- rqd.dma_ppa_list = dma_ppa_list;
687
- rqd.opcode = cmd_op;
688
- rqd.nr_ppas = rq_ppas;
689
-
690
- if (dir == PBLK_WRITE) {
691
- struct pblk_sec_meta *meta_list = rqd.meta_list;
692
-
693
- rqd.flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
694
- for (i = 0; i < rqd.nr_ppas; ) {
695
- spin_lock(&line->lock);
696
- paddr = __pblk_alloc_page(pblk, line, min);
697
- spin_unlock(&line->lock);
698
- for (j = 0; j < min; j++, i++, paddr++) {
699
- meta_list[i].lba = cpu_to_le64(ADDR_EMPTY);
700
- rqd.ppa_list[i] =
701
- addr_to_gen_ppa(pblk, paddr, id);
702
- }
703
- }
704
- } else {
705
- for (i = 0; i < rqd.nr_ppas; ) {
706
- struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, id);
707
- int pos = pblk_ppa_to_pos(geo, ppa);
708
- int read_type = PBLK_READ_RANDOM;
709
-
710
- if (pblk_io_aligned(pblk, rq_ppas))
711
- read_type = PBLK_READ_SEQUENTIAL;
712
- rqd.flags = pblk_set_read_mode(pblk, read_type);
713
-
714
- while (test_bit(pos, line->blk_bitmap)) {
715
- paddr += min;
716
- if (pblk_boundary_paddr_checks(pblk, paddr)) {
717
- pblk_err(pblk, "corrupt emeta line:%d\n",
718
- line->id);
719
- bio_put(bio);
720
- ret = -EINTR;
721
- goto free_rqd_dma;
722
- }
723
-
724
- ppa = addr_to_gen_ppa(pblk, paddr, id);
725
- pos = pblk_ppa_to_pos(geo, ppa);
726
- }
727
-
728
- if (pblk_boundary_paddr_checks(pblk, paddr + min)) {
729
- pblk_err(pblk, "corrupt emeta line:%d\n",
730
- line->id);
731
- bio_put(bio);
732
- ret = -EINTR;
733
- goto free_rqd_dma;
734
- }
735
-
736
- for (j = 0; j < min; j++, i++, paddr++)
737
- rqd.ppa_list[i] =
738
- addr_to_gen_ppa(pblk, paddr, line->id);
739
- }
740
- }
741
-
742
- ret = pblk_submit_io_sync(pblk, &rqd);
743
- if (ret) {
744
- pblk_err(pblk, "emeta I/O submission failed: %d\n", ret);
745
- bio_put(bio);
746
- goto free_rqd_dma;
747
- }
748
-
749
- atomic_dec(&pblk->inflight_io);
750
-
751
- if (rqd.error) {
752
- if (dir == PBLK_WRITE)
753
- pblk_log_write_err(pblk, &rqd);
754
- else
755
- pblk_log_read_err(pblk, &rqd);
756
- }
757
-
758
- emeta_buf += rq_len;
759
- left_ppas -= rq_ppas;
760
- if (left_ppas)
761
- goto next_rq;
762
-free_rqd_dma:
763
- nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
764
- return ret;
765
-}
766
-
767662 u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line)
768663 {
769664 struct nvm_tgt_dev *dev = pblk->dev;
....@@ -779,106 +674,182 @@
779674 return bit * geo->ws_opt;
780675 }
781676
782
-static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
783
- u64 paddr, int dir)
677
+int pblk_line_smeta_read(struct pblk *pblk, struct pblk_line *line)
784678 {
785
- struct nvm_tgt_dev *dev = pblk->dev;
786679 struct pblk_line_meta *lm = &pblk->lm;
787
- struct bio *bio;
680
+ struct ppa_addr *ppa_list;
788681 struct nvm_rq rqd;
789
- __le64 *lba_list = NULL;
682
+ u64 paddr = pblk_line_smeta_start(pblk, line);
790683 int i, ret;
791
- int cmd_op, bio_op;
792
- int flags;
793
-
794
- if (dir == PBLK_WRITE) {
795
- bio_op = REQ_OP_WRITE;
796
- cmd_op = NVM_OP_PWRITE;
797
- flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
798
- lba_list = emeta_to_lbas(pblk, line->emeta->buf);
799
- } else if (dir == PBLK_READ_RECOV || dir == PBLK_READ) {
800
- bio_op = REQ_OP_READ;
801
- cmd_op = NVM_OP_PREAD;
802
- flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
803
- } else
804
- return -EINVAL;
805684
806685 memset(&rqd, 0, sizeof(struct nvm_rq));
807686
808
- rqd.meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
809
- &rqd.dma_meta_list);
810
- if (!rqd.meta_list)
811
- return -ENOMEM;
687
+ ret = pblk_alloc_rqd_meta(pblk, &rqd);
688
+ if (ret)
689
+ return ret;
812690
813
- rqd.ppa_list = rqd.meta_list + pblk_dma_meta_size;
814
- rqd.dma_ppa_list = rqd.dma_meta_list + pblk_dma_meta_size;
815
-
816
- bio = bio_map_kern(dev->q, line->smeta, lm->smeta_len, GFP_KERNEL);
817
- if (IS_ERR(bio)) {
818
- ret = PTR_ERR(bio);
819
- goto free_ppa_list;
820
- }
821
-
822
- bio->bi_iter.bi_sector = 0; /* internal bio */
823
- bio_set_op_attrs(bio, bio_op, 0);
824
-
825
- rqd.bio = bio;
826
- rqd.opcode = cmd_op;
827
- rqd.flags = flags;
691
+ rqd.opcode = NVM_OP_PREAD;
828692 rqd.nr_ppas = lm->smeta_sec;
693
+ rqd.is_seq = 1;
694
+ ppa_list = nvm_rq_to_ppa_list(&rqd);
829695
830
- for (i = 0; i < lm->smeta_sec; i++, paddr++) {
831
- struct pblk_sec_meta *meta_list = rqd.meta_list;
696
+ for (i = 0; i < lm->smeta_sec; i++, paddr++)
697
+ ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
832698
833
- rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
834
-
835
- if (dir == PBLK_WRITE) {
836
- __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
837
-
838
- meta_list[i].lba = lba_list[paddr] = addr_empty;
839
- }
840
- }
841
-
842
- /*
843
- * This I/O is sent by the write thread when a line is replace. Since
844
- * the write thread is the only one sending write and erase commands,
845
- * there is no need to take the LUN semaphore.
846
- */
847
- ret = pblk_submit_io_sync(pblk, &rqd);
699
+ ret = pblk_submit_io_sync(pblk, &rqd, line->smeta);
848700 if (ret) {
849701 pblk_err(pblk, "smeta I/O submission failed: %d\n", ret);
850
- bio_put(bio);
851
- goto free_ppa_list;
702
+ goto clear_rqd;
703
+ }
704
+
705
+ atomic_dec(&pblk->inflight_io);
706
+
707
+ if (rqd.error && rqd.error != NVM_RSP_WARN_HIGHECC) {
708
+ pblk_log_read_err(pblk, &rqd);
709
+ ret = -EIO;
710
+ }
711
+
712
+clear_rqd:
713
+ pblk_free_rqd_meta(pblk, &rqd);
714
+ return ret;
715
+}
716
+
717
+static int pblk_line_smeta_write(struct pblk *pblk, struct pblk_line *line,
718
+ u64 paddr)
719
+{
720
+ struct pblk_line_meta *lm = &pblk->lm;
721
+ struct ppa_addr *ppa_list;
722
+ struct nvm_rq rqd;
723
+ __le64 *lba_list = emeta_to_lbas(pblk, line->emeta->buf);
724
+ __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
725
+ int i, ret;
726
+
727
+ memset(&rqd, 0, sizeof(struct nvm_rq));
728
+
729
+ ret = pblk_alloc_rqd_meta(pblk, &rqd);
730
+ if (ret)
731
+ return ret;
732
+
733
+ rqd.opcode = NVM_OP_PWRITE;
734
+ rqd.nr_ppas = lm->smeta_sec;
735
+ rqd.is_seq = 1;
736
+ ppa_list = nvm_rq_to_ppa_list(&rqd);
737
+
738
+ for (i = 0; i < lm->smeta_sec; i++, paddr++) {
739
+ struct pblk_sec_meta *meta = pblk_get_meta(pblk,
740
+ rqd.meta_list, i);
741
+
742
+ ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
743
+ meta->lba = lba_list[paddr] = addr_empty;
744
+ }
745
+
746
+ ret = pblk_submit_io_sync_sem(pblk, &rqd, line->smeta);
747
+ if (ret) {
748
+ pblk_err(pblk, "smeta I/O submission failed: %d\n", ret);
749
+ goto clear_rqd;
852750 }
853751
854752 atomic_dec(&pblk->inflight_io);
855753
856754 if (rqd.error) {
857
- if (dir == PBLK_WRITE) {
858
- pblk_log_write_err(pblk, &rqd);
859
- ret = 1;
860
- } else if (dir == PBLK_READ)
861
- pblk_log_read_err(pblk, &rqd);
755
+ pblk_log_write_err(pblk, &rqd);
756
+ ret = -EIO;
862757 }
863758
864
-free_ppa_list:
865
- nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
866
-
759
+clear_rqd:
760
+ pblk_free_rqd_meta(pblk, &rqd);
867761 return ret;
868762 }
869763
870
-int pblk_line_read_smeta(struct pblk *pblk, struct pblk_line *line)
871
-{
872
- u64 bpaddr = pblk_line_smeta_start(pblk, line);
873
-
874
- return pblk_line_submit_smeta_io(pblk, line, bpaddr, PBLK_READ_RECOV);
875
-}
876
-
877
-int pblk_line_read_emeta(struct pblk *pblk, struct pblk_line *line,
764
+int pblk_line_emeta_read(struct pblk *pblk, struct pblk_line *line,
878765 void *emeta_buf)
879766 {
880
- return pblk_line_submit_emeta_io(pblk, line, emeta_buf,
881
- line->emeta_ssec, PBLK_READ);
767
+ struct nvm_tgt_dev *dev = pblk->dev;
768
+ struct nvm_geo *geo = &dev->geo;
769
+ struct pblk_line_meta *lm = &pblk->lm;
770
+ void *ppa_list_buf, *meta_list;
771
+ struct ppa_addr *ppa_list;
772
+ struct nvm_rq rqd;
773
+ u64 paddr = line->emeta_ssec;
774
+ dma_addr_t dma_ppa_list, dma_meta_list;
775
+ int min = pblk->min_write_pgs;
776
+ int left_ppas = lm->emeta_sec[0];
777
+ int line_id = line->id;
778
+ int rq_ppas, rq_len;
779
+ int i, j;
780
+ int ret;
781
+
782
+ meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
783
+ &dma_meta_list);
784
+ if (!meta_list)
785
+ return -ENOMEM;
786
+
787
+ ppa_list_buf = meta_list + pblk_dma_meta_size(pblk);
788
+ dma_ppa_list = dma_meta_list + pblk_dma_meta_size(pblk);
789
+
790
+next_rq:
791
+ memset(&rqd, 0, sizeof(struct nvm_rq));
792
+
793
+ rq_ppas = pblk_calc_secs(pblk, left_ppas, 0, false);
794
+ rq_len = rq_ppas * geo->csecs;
795
+
796
+ rqd.meta_list = meta_list;
797
+ rqd.ppa_list = ppa_list_buf;
798
+ rqd.dma_meta_list = dma_meta_list;
799
+ rqd.dma_ppa_list = dma_ppa_list;
800
+ rqd.opcode = NVM_OP_PREAD;
801
+ rqd.nr_ppas = rq_ppas;
802
+ ppa_list = nvm_rq_to_ppa_list(&rqd);
803
+
804
+ for (i = 0; i < rqd.nr_ppas; ) {
805
+ struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, line_id);
806
+ int pos = pblk_ppa_to_pos(geo, ppa);
807
+
808
+ if (pblk_io_aligned(pblk, rq_ppas))
809
+ rqd.is_seq = 1;
810
+
811
+ while (test_bit(pos, line->blk_bitmap)) {
812
+ paddr += min;
813
+ if (pblk_boundary_paddr_checks(pblk, paddr)) {
814
+ ret = -EINTR;
815
+ goto free_rqd_dma;
816
+ }
817
+
818
+ ppa = addr_to_gen_ppa(pblk, paddr, line_id);
819
+ pos = pblk_ppa_to_pos(geo, ppa);
820
+ }
821
+
822
+ if (pblk_boundary_paddr_checks(pblk, paddr + min)) {
823
+ ret = -EINTR;
824
+ goto free_rqd_dma;
825
+ }
826
+
827
+ for (j = 0; j < min; j++, i++, paddr++)
828
+ ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line_id);
829
+ }
830
+
831
+ ret = pblk_submit_io_sync(pblk, &rqd, emeta_buf);
832
+ if (ret) {
833
+ pblk_err(pblk, "emeta I/O submission failed: %d\n", ret);
834
+ goto free_rqd_dma;
835
+ }
836
+
837
+ atomic_dec(&pblk->inflight_io);
838
+
839
+ if (rqd.error && rqd.error != NVM_RSP_WARN_HIGHECC) {
840
+ pblk_log_read_err(pblk, &rqd);
841
+ ret = -EIO;
842
+ goto free_rqd_dma;
843
+ }
844
+
845
+ emeta_buf += rq_len;
846
+ left_ppas -= rq_ppas;
847
+ if (left_ppas)
848
+ goto next_rq;
849
+
850
+free_rqd_dma:
851
+ nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
852
+ return ret;
882853 }
883854
884855 static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd,
....@@ -887,7 +858,7 @@
887858 rqd->opcode = NVM_OP_ERASE;
888859 rqd->ppa_addr = ppa;
889860 rqd->nr_ppas = 1;
890
- rqd->flags = pblk_set_progr_mode(pblk, PBLK_ERASE);
861
+ rqd->is_seq = 1;
891862 rqd->bio = NULL;
892863 }
893864
....@@ -896,12 +867,15 @@
896867 struct nvm_rq rqd = {NULL};
897868 int ret;
898869
870
+ trace_pblk_chunk_reset(pblk_disk_name(pblk), &ppa,
871
+ PBLK_CHUNK_RESET_START);
872
+
899873 pblk_setup_e_rq(pblk, &rqd, ppa);
900874
901875 /* The write thread schedules erases so that it minimizes disturbances
902876 * with writes. Thus, there is no need to take the LUN semaphore.
903877 */
904
- ret = pblk_submit_io_sync(pblk, &rqd);
878
+ ret = pblk_submit_io_sync(pblk, &rqd, NULL);
905879 rqd.private = pblk;
906880 __pblk_end_io_erase(pblk, &rqd);
907881
....@@ -995,6 +969,8 @@
995969 spin_lock(&l_mg->free_lock);
996970 spin_lock(&line->lock);
997971 line->state = PBLK_LINESTATE_BAD;
972
+ trace_pblk_line_state(pblk_disk_name(pblk), line->id,
973
+ line->state);
998974 spin_unlock(&line->lock);
999975
1000976 list_add_tail(&line->list, &l_mg->bad_list);
....@@ -1012,7 +988,7 @@
1012988 bitmap_set(line->lun_bitmap, 0, lm->lun_bitmap_len);
1013989
1014990 smeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC);
1015
- memcpy(smeta_buf->header.uuid, pblk->instance_uuid, 16);
991
+ guid_copy((guid_t *)&smeta_buf->header.uuid, &pblk->instance_uuid);
1016992 smeta_buf->header.id = cpu_to_le32(line->id);
1017993 smeta_buf->header.type = cpu_to_le16(line->type);
1018994 smeta_buf->header.version_major = SMETA_VERSION_MAJOR;
....@@ -1058,15 +1034,18 @@
10581034 static int pblk_line_alloc_bitmaps(struct pblk *pblk, struct pblk_line *line)
10591035 {
10601036 struct pblk_line_meta *lm = &pblk->lm;
1037
+ struct pblk_line_mgmt *l_mg = &pblk->l_mg;
10611038
1062
- line->map_bitmap = kzalloc(lm->sec_bitmap_len, GFP_KERNEL);
1039
+ line->map_bitmap = mempool_alloc(l_mg->bitmap_pool, GFP_KERNEL);
10631040 if (!line->map_bitmap)
10641041 return -ENOMEM;
10651042
1043
+ memset(line->map_bitmap, 0, lm->sec_bitmap_len);
1044
+
10661045 /* will be initialized using bb info from map_bitmap */
1067
- line->invalid_bitmap = kmalloc(lm->sec_bitmap_len, GFP_KERNEL);
1046
+ line->invalid_bitmap = mempool_alloc(l_mg->bitmap_pool, GFP_KERNEL);
10681047 if (!line->invalid_bitmap) {
1069
- kfree(line->map_bitmap);
1048
+ mempool_free(line->map_bitmap, l_mg->bitmap_pool);
10701049 line->map_bitmap = NULL;
10711050 return -ENOMEM;
10721051 }
....@@ -1106,10 +1085,9 @@
11061085 off = bit * geo->ws_opt;
11071086 bitmap_set(line->map_bitmap, off, lm->smeta_sec);
11081087 line->sec_in_line -= lm->smeta_sec;
1109
- line->smeta_ssec = off;
11101088 line->cur_sec = off + lm->smeta_sec;
11111089
1112
- if (init && pblk_line_submit_smeta_io(pblk, line, off, PBLK_WRITE)) {
1090
+ if (init && pblk_line_smeta_write(pblk, line, off)) {
11131091 pblk_debug(pblk, "line smeta I/O failed. Retry\n");
11141092 return 0;
11151093 }
....@@ -1139,6 +1117,8 @@
11391117 bitmap_weight(line->invalid_bitmap, lm->sec_per_line)) {
11401118 spin_lock(&line->lock);
11411119 line->state = PBLK_LINESTATE_BAD;
1120
+ trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1121
+ line->state);
11421122 spin_unlock(&line->lock);
11431123
11441124 list_add_tail(&line->list, &l_mg->bad_list);
....@@ -1191,6 +1171,8 @@
11911171 if (line->state == PBLK_LINESTATE_NEW) {
11921172 blk_to_erase = pblk_prepare_new_line(pblk, line);
11931173 line->state = PBLK_LINESTATE_FREE;
1174
+ trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1175
+ line->state);
11941176 } else {
11951177 blk_to_erase = blk_in_line;
11961178 }
....@@ -1208,6 +1190,8 @@
12081190 }
12091191
12101192 line->state = PBLK_LINESTATE_OPEN;
1193
+ trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1194
+ line->state);
12111195
12121196 atomic_set(&line->left_eblks, blk_to_erase);
12131197 atomic_set(&line->left_seblks, blk_to_erase);
....@@ -1216,10 +1200,12 @@
12161200 spin_unlock(&line->lock);
12171201
12181202 kref_init(&line->ref);
1203
+ atomic_set(&line->sec_to_update, 0);
12191204
12201205 return 0;
12211206 }
12221207
1208
+/* Line allocations in the recovery path are always single threaded */
12231209 int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line)
12241210 {
12251211 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
....@@ -1259,7 +1245,9 @@
12591245
12601246 void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line)
12611247 {
1262
- kfree(line->map_bitmap);
1248
+ struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1249
+
1250
+ mempool_free(line->map_bitmap, l_mg->bitmap_pool);
12631251 line->map_bitmap = NULL;
12641252 line->smeta = NULL;
12651253 line->emeta = NULL;
....@@ -1277,8 +1265,11 @@
12771265
12781266 void pblk_line_free(struct pblk_line *line)
12791267 {
1280
- kfree(line->map_bitmap);
1281
- kfree(line->invalid_bitmap);
1268
+ struct pblk *pblk = line->pblk;
1269
+ struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1270
+
1271
+ mempool_free(line->map_bitmap, l_mg->bitmap_pool);
1272
+ mempool_free(line->invalid_bitmap, l_mg->bitmap_pool);
12821273
12831274 pblk_line_reinit(line);
12841275 }
....@@ -1306,6 +1297,8 @@
13061297 if (unlikely(bit >= lm->blk_per_line)) {
13071298 spin_lock(&line->lock);
13081299 line->state = PBLK_LINESTATE_BAD;
1300
+ trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1301
+ line->state);
13091302 spin_unlock(&line->lock);
13101303
13111304 list_add_tail(&line->list, &l_mg->bad_list);
....@@ -1440,12 +1433,30 @@
14401433 return line;
14411434 }
14421435
1436
+void pblk_ppa_to_line_put(struct pblk *pblk, struct ppa_addr ppa)
1437
+{
1438
+ struct pblk_line *line;
1439
+
1440
+ line = pblk_ppa_to_line(pblk, ppa);
1441
+ kref_put(&line->ref, pblk_line_put_wq);
1442
+}
1443
+
1444
+void pblk_rq_to_line_put(struct pblk *pblk, struct nvm_rq *rqd)
1445
+{
1446
+ struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
1447
+ int i;
1448
+
1449
+ for (i = 0; i < rqd->nr_ppas; i++)
1450
+ pblk_ppa_to_line_put(pblk, ppa_list[i]);
1451
+}
1452
+
14431453 static void pblk_stop_writes(struct pblk *pblk, struct pblk_line *line)
14441454 {
14451455 lockdep_assert_held(&pblk->l_mg.free_lock);
14461456
14471457 pblk_set_space_limit(pblk);
14481458 pblk->state = PBLK_STATE_STOPPING;
1459
+ trace_pblk_state(pblk_disk_name(pblk), pblk->state);
14491460 }
14501461
14511462 static void pblk_line_close_meta_sync(struct pblk *pblk)
....@@ -1495,6 +1506,7 @@
14951506 return;
14961507 }
14971508 pblk->state = PBLK_STATE_RECOVERING;
1509
+ trace_pblk_state(pblk_disk_name(pblk), pblk->state);
14981510 spin_unlock(&l_mg->free_lock);
14991511
15001512 pblk_flush_writer(pblk);
....@@ -1516,6 +1528,7 @@
15161528
15171529 spin_lock(&l_mg->free_lock);
15181530 pblk->state = PBLK_STATE_STOPPED;
1531
+ trace_pblk_state(pblk_disk_name(pblk), pblk->state);
15191532 l_mg->data_line = NULL;
15201533 l_mg->data_next = NULL;
15211534 spin_unlock(&l_mg->free_lock);
....@@ -1606,7 +1619,17 @@
16061619
16071620 spin_lock(&line->lock);
16081621 WARN_ON(line->state != PBLK_LINESTATE_GC);
1622
+ if (line->w_err_gc->has_gc_err) {
1623
+ spin_unlock(&line->lock);
1624
+ pblk_err(pblk, "line %d had errors during GC\n", line->id);
1625
+ pblk_put_line_back(pblk, line);
1626
+ line->w_err_gc->has_gc_err = 0;
1627
+ return;
1628
+ }
1629
+
16091630 line->state = PBLK_LINESTATE_FREE;
1631
+ trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1632
+ line->state);
16101633 line->gc_group = PBLK_LINEGC_NONE;
16111634 pblk_line_free(line);
16121635
....@@ -1675,16 +1698,19 @@
16751698 rqd->end_io = pblk_end_io_erase;
16761699 rqd->private = pblk;
16771700
1701
+ trace_pblk_chunk_reset(pblk_disk_name(pblk),
1702
+ &ppa, PBLK_CHUNK_RESET_START);
1703
+
16781704 /* The write thread schedules erases so that it minimizes disturbances
16791705 * with writes. Thus, there is no need to take the LUN semaphore.
16801706 */
1681
- err = pblk_submit_io(pblk, rqd);
1707
+ err = pblk_submit_io(pblk, rqd, NULL);
16821708 if (err) {
16831709 struct nvm_tgt_dev *dev = pblk->dev;
16841710 struct nvm_geo *geo = &dev->geo;
16851711
16861712 pblk_err(pblk, "could not async erase line:%d,blk:%d\n",
1687
- pblk_ppa_to_line(ppa),
1713
+ pblk_ppa_to_line_id(ppa),
16881714 pblk_ppa_to_pos(geo, ppa));
16891715 }
16901716
....@@ -1736,10 +1762,9 @@
17361762 WARN_ON(line->state != PBLK_LINESTATE_OPEN);
17371763 line->state = PBLK_LINESTATE_CLOSED;
17381764 move_list = pblk_line_gc_list(pblk, line);
1739
-
17401765 list_add_tail(&line->list, move_list);
17411766
1742
- kfree(line->map_bitmap);
1767
+ mempool_free(line->map_bitmap, l_mg->bitmap_pool);
17431768 line->map_bitmap = NULL;
17441769 line->smeta = NULL;
17451770 line->emeta = NULL;
....@@ -1755,6 +1780,9 @@
17551780
17561781 spin_unlock(&line->lock);
17571782 spin_unlock(&l_mg->gc_lock);
1783
+
1784
+ trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1785
+ line->state);
17581786 }
17591787
17601788 void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line)
....@@ -1775,7 +1803,8 @@
17751803
17761804 if (le32_to_cpu(emeta_buf->header.identifier) != PBLK_MAGIC) {
17771805 emeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC);
1778
- memcpy(emeta_buf->header.uuid, pblk->instance_uuid, 16);
1806
+ guid_copy((guid_t *)&emeta_buf->header.uuid,
1807
+ &pblk->instance_uuid);
17791808 emeta_buf->header.id = cpu_to_le32(line->id);
17801809 emeta_buf->header.type = cpu_to_le16(line->type);
17811810 emeta_buf->header.version_major = EMETA_VERSION_MAJOR;
....@@ -1806,13 +1835,11 @@
18061835 static void pblk_save_lba_list(struct pblk *pblk, struct pblk_line *line)
18071836 {
18081837 struct pblk_line_meta *lm = &pblk->lm;
1809
- struct pblk_line_mgmt *l_mg = &pblk->l_mg;
18101838 unsigned int lba_list_size = lm->emeta_len[2];
18111839 struct pblk_w_err_gc *w_err_gc = line->w_err_gc;
18121840 struct pblk_emeta *emeta = line->emeta;
18131841
1814
- w_err_gc->lba_list = pblk_malloc(lba_list_size,
1815
- l_mg->emeta_alloc_type, GFP_KERNEL);
1842
+ w_err_gc->lba_list = kvmalloc(lba_list_size, GFP_KERNEL);
18161843 memcpy(w_err_gc->lba_list, emeta_to_lbas(pblk, emeta->buf),
18171844 lba_list_size);
18181845 }
....@@ -1851,8 +1878,7 @@
18511878 queue_work(wq, &line_ws->ws);
18521879 }
18531880
1854
-static void __pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list,
1855
- int nr_ppas, int pos)
1881
+static void __pblk_down_chunk(struct pblk *pblk, int pos)
18561882 {
18571883 struct pblk_lun *rlun = &pblk->luns[pos];
18581884 int ret;
....@@ -1861,13 +1887,6 @@
18611887 * Only send one inflight I/O per LUN. Since we map at a page
18621888 * granurality, all ppas in the I/O will map to the same LUN
18631889 */
1864
-#ifdef CONFIG_NVM_PBLK_DEBUG
1865
- int i;
1866
-
1867
- for (i = 1; i < nr_ppas; i++)
1868
- WARN_ON(ppa_list[0].a.lun != ppa_list[i].a.lun ||
1869
- ppa_list[0].a.ch != ppa_list[i].a.ch);
1870
-#endif
18711890
18721891 ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(30000));
18731892 if (ret == -ETIME || ret == -EINTR)
....@@ -1875,21 +1894,21 @@
18751894 -ret);
18761895 }
18771896
1878
-void pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas)
1897
+void pblk_down_chunk(struct pblk *pblk, struct ppa_addr ppa)
18791898 {
18801899 struct nvm_tgt_dev *dev = pblk->dev;
18811900 struct nvm_geo *geo = &dev->geo;
1882
- int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
1901
+ int pos = pblk_ppa_to_pos(geo, ppa);
18831902
1884
- __pblk_down_page(pblk, ppa_list, nr_ppas, pos);
1903
+ __pblk_down_chunk(pblk, pos);
18851904 }
18861905
1887
-void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
1906
+void pblk_down_rq(struct pblk *pblk, struct ppa_addr ppa,
18881907 unsigned long *lun_bitmap)
18891908 {
18901909 struct nvm_tgt_dev *dev = pblk->dev;
18911910 struct nvm_geo *geo = &dev->geo;
1892
- int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
1911
+ int pos = pblk_ppa_to_pos(geo, ppa);
18931912
18941913 /* If the LUN has been locked for this same request, do no attempt to
18951914 * lock it again
....@@ -1897,30 +1916,21 @@
18971916 if (test_and_set_bit(pos, lun_bitmap))
18981917 return;
18991918
1900
- __pblk_down_page(pblk, ppa_list, nr_ppas, pos);
1919
+ __pblk_down_chunk(pblk, pos);
19011920 }
19021921
1903
-void pblk_up_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas)
1922
+void pblk_up_chunk(struct pblk *pblk, struct ppa_addr ppa)
19041923 {
19051924 struct nvm_tgt_dev *dev = pblk->dev;
19061925 struct nvm_geo *geo = &dev->geo;
19071926 struct pblk_lun *rlun;
1908
- int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
1909
-
1910
-#ifdef CONFIG_NVM_PBLK_DEBUG
1911
- int i;
1912
-
1913
- for (i = 1; i < nr_ppas; i++)
1914
- WARN_ON(ppa_list[0].a.lun != ppa_list[i].a.lun ||
1915
- ppa_list[0].a.ch != ppa_list[i].a.ch);
1916
-#endif
1927
+ int pos = pblk_ppa_to_pos(geo, ppa);
19171928
19181929 rlun = &pblk->luns[pos];
19191930 up(&rlun->wr_sem);
19201931 }
19211932
1922
-void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
1923
- unsigned long *lun_bitmap)
1933
+void pblk_up_rq(struct pblk *pblk, unsigned long *lun_bitmap)
19241934 {
19251935 struct nvm_tgt_dev *dev = pblk->dev;
19261936 struct nvm_geo *geo = &dev->geo;
....@@ -1939,7 +1949,7 @@
19391949 struct ppa_addr ppa_l2p;
19401950
19411951 /* logic error: lba out-of-bounds. Ignore update */
1942
- if (!(lba < pblk->rl.nr_secs)) {
1952
+ if (!(lba < pblk->capacity)) {
19431953 WARN(1, "pblk: corrupted L2P map request\n");
19441954 return;
19451955 }
....@@ -1979,7 +1989,7 @@
19791989 #endif
19801990
19811991 /* logic error: lba out-of-bounds. Ignore update */
1982
- if (!(lba < pblk->rl.nr_secs)) {
1992
+ if (!(lba < pblk->capacity)) {
19831993 WARN(1, "pblk: corrupted L2P map request\n");
19841994 return 0;
19851995 }
....@@ -2025,7 +2035,7 @@
20252035 }
20262036
20272037 /* logic error: lba out-of-bounds. Ignore update */
2028
- if (!(lba < pblk->rl.nr_secs)) {
2038
+ if (!(lba < pblk->capacity)) {
20292039 WARN(1, "pblk: corrupted L2P map request\n");
20302040 return;
20312041 }
....@@ -2051,8 +2061,8 @@
20512061 spin_unlock(&pblk->trans_lock);
20522062 }
20532063
2054
-void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
2055
- sector_t blba, int nr_secs)
2064
+int pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
2065
+ sector_t blba, int nr_secs, bool *from_cache)
20562066 {
20572067 int i;
20582068
....@@ -2064,13 +2074,21 @@
20642074
20652075 /* If the L2P entry maps to a line, the reference is valid */
20662076 if (!pblk_ppa_empty(ppa) && !pblk_addr_in_cache(ppa)) {
2067
- int line_id = pblk_ppa_to_line(ppa);
2068
- struct pblk_line *line = &pblk->lines[line_id];
2077
+ struct pblk_line *line = pblk_ppa_to_line(pblk, ppa);
2078
+
2079
+ if (i > 0 && *from_cache)
2080
+ break;
2081
+ *from_cache = false;
20692082
20702083 kref_get(&line->ref);
2084
+ } else {
2085
+ if (i > 0 && !*from_cache)
2086
+ break;
2087
+ *from_cache = true;
20712088 }
20722089 }
20732090 spin_unlock(&pblk->trans_lock);
2091
+ return i;
20742092 }
20752093
20762094 void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
....@@ -2084,7 +2102,7 @@
20842102 lba = lba_list[i];
20852103 if (lba != ADDR_EMPTY) {
20862104 /* logic error: lba out-of-bounds. Ignore update */
2087
- if (!(lba < pblk->rl.nr_secs)) {
2105
+ if (!(lba < pblk->capacity)) {
20882106 WARN(1, "pblk: corrupted L2P map request\n");
20892107 continue;
20902108 }
....@@ -2093,3 +2111,38 @@
20932111 }
20942112 spin_unlock(&pblk->trans_lock);
20952113 }
2114
+
2115
+void *pblk_get_meta_for_writes(struct pblk *pblk, struct nvm_rq *rqd)
2116
+{
2117
+ void *buffer;
2118
+
2119
+ if (pblk_is_oob_meta_supported(pblk)) {
2120
+ /* Just use OOB metadata buffer as always */
2121
+ buffer = rqd->meta_list;
2122
+ } else {
2123
+ /* We need to reuse last page of request (packed metadata)
2124
+ * in similar way as traditional oob metadata
2125
+ */
2126
+ buffer = page_to_virt(
2127
+ rqd->bio->bi_io_vec[rqd->bio->bi_vcnt - 1].bv_page);
2128
+ }
2129
+
2130
+ return buffer;
2131
+}
2132
+
2133
+void pblk_get_packed_meta(struct pblk *pblk, struct nvm_rq *rqd)
2134
+{
2135
+ void *meta_list = rqd->meta_list;
2136
+ void *page;
2137
+ int i = 0;
2138
+
2139
+ if (pblk_is_oob_meta_supported(pblk))
2140
+ return;
2141
+
2142
+ page = page_to_virt(rqd->bio->bi_io_vec[rqd->bio->bi_vcnt - 1].bv_page);
2143
+ /* We need to fill oob meta buffer with data from packed metadata */
2144
+ for (; i < rqd->nr_ppas; i++)
2145
+ memcpy(pblk_get_meta(pblk, meta_list, i),
2146
+ page + (i * sizeof(struct pblk_sec_meta)),
2147
+ sizeof(struct pblk_sec_meta));
2148
+}