hc
2024-05-10 37f49e37ab4cb5d0bc4c60eb5c6d4dd57db767bb
kernel/drivers/lightnvm/core.c
....@@ -1,22 +1,10 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Copyright (C) 2015 IT University of Copenhagen. All rights reserved.
34 * Initial release: Matias Bjorling <m@bjorling.me>
4
- *
5
- * This program is free software; you can redistribute it and/or
6
- * modify it under the terms of the GNU General Public License version
7
- * 2 as published by the Free Software Foundation.
8
- *
9
- * This program is distributed in the hope that it will be useful, but
10
- * WITHOUT ANY WARRANTY; without even the implied warranty of
11
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12
- * General Public License for more details.
13
- *
14
- * You should have received a copy of the GNU General Public License
15
- * along with this program; see the file COPYING. If not, write to
16
- * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
17
- * USA.
18
- *
195 */
6
+
7
+#define pr_fmt(fmt) "nvm: " fmt
208
219 #include <linux/list.h>
2210 #include <linux/types.h>
....@@ -44,6 +32,8 @@
4432 struct nvm_ch_map *chnls;
4533 int num_ch;
4634 };
35
+
36
+static void nvm_free(struct kref *ref);
4737
4838 static struct nvm_target *nvm_find_target(struct nvm_dev *dev, const char *name)
4939 {
....@@ -86,7 +76,7 @@
8676
8777 for (i = lun_begin; i <= lun_end; i++) {
8878 if (test_and_set_bit(i, dev->lun_map)) {
89
- pr_err("nvm: lun %d already allocated\n", i);
79
+ pr_err("lun %d already allocated\n", i);
9080 goto err;
9181 }
9282 }
....@@ -246,10 +236,6 @@
246236 return tgt_dev;
247237 }
248238
249
-static const struct block_device_operations nvm_fops = {
250
- .owner = THIS_MODULE,
251
-};
252
-
253239 static struct nvm_tgt_type *__nvm_find_target_type(const char *name)
254240 {
255241 struct nvm_tgt_type *tt;
....@@ -276,7 +262,7 @@
276262 int lun_end)
277263 {
278264 if (lun_begin > lun_end || lun_end >= geo->all_luns) {
279
- pr_err("nvm: lun out of bound (%u:%u > %u)\n",
265
+ pr_err("lun out of bound (%u:%u > %u)\n",
280266 lun_begin, lun_end, geo->all_luns - 1);
281267 return -EINVAL;
282268 }
....@@ -309,7 +295,7 @@
309295 if (e->op == 0xFFFF) {
310296 e->op = NVM_TARGET_DEFAULT_OP;
311297 } else if (e->op < NVM_TARGET_MIN_OP || e->op > NVM_TARGET_MAX_OP) {
312
- pr_err("nvm: invalid over provisioning value\n");
298
+ pr_err("invalid over provisioning value\n");
313299 return -EINVAL;
314300 }
315301
....@@ -325,6 +311,7 @@
325311 struct nvm_target *t;
326312 struct nvm_tgt_dev *tgt_dev;
327313 void *targetdata;
314
+ unsigned int mdts;
328315 int ret;
329316
330317 switch (create->conf.type) {
....@@ -345,18 +332,23 @@
345332 e = create->conf.e;
346333 break;
347334 default:
348
- pr_err("nvm: config type not valid\n");
335
+ pr_err("config type not valid\n");
349336 return -EINVAL;
350337 }
351338
352339 tt = nvm_find_target_type(create->tgttype);
353340 if (!tt) {
354
- pr_err("nvm: target type %s not found\n", create->tgttype);
341
+ pr_err("target type %s not found\n", create->tgttype);
342
+ return -EINVAL;
343
+ }
344
+
345
+ if ((tt->flags & NVM_TGT_F_HOST_L2P) != (dev->geo.dom & NVM_RSP_L2P)) {
346
+ pr_err("device is incompatible with target L2P type.\n");
355347 return -EINVAL;
356348 }
357349
358350 if (nvm_target_exists(create->tgtname)) {
359
- pr_err("nvm: target name already exists (%s)\n",
351
+ pr_err("target name already exists (%s)\n",
360352 create->tgtname);
361353 return -EINVAL;
362354 }
....@@ -373,7 +365,7 @@
373365
374366 tgt_dev = nvm_create_tgt_dev(dev, e.lun_begin, e.lun_end, e.op);
375367 if (!tgt_dev) {
376
- pr_err("nvm: could not create target device\n");
368
+ pr_err("could not create target device\n");
377369 ret = -ENOMEM;
378370 goto err_t;
379371 }
....@@ -384,18 +376,17 @@
384376 goto err_dev;
385377 }
386378
387
- tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node, NULL);
379
+ tqueue = blk_alloc_queue(dev->q->node);
388380 if (!tqueue) {
389381 ret = -ENOMEM;
390382 goto err_disk;
391383 }
392
- blk_queue_make_request(tqueue, tt->make_rq);
393384
394385 strlcpy(tdisk->disk_name, create->tgtname, sizeof(tdisk->disk_name));
395386 tdisk->flags = GENHD_FL_EXT_DEVT;
396387 tdisk->major = 0;
397388 tdisk->first_minor = 0;
398
- tdisk->fops = &nvm_fops;
389
+ tdisk->fops = tt->bops;
399390 tdisk->queue = tqueue;
400391
401392 targetdata = tt->init(tgt_dev, tdisk, create->flags);
....@@ -407,8 +398,12 @@
407398 tdisk->private_data = targetdata;
408399 tqueue->queuedata = targetdata;
409400
410
- blk_queue_max_hw_sectors(tqueue,
411
- (dev->geo.csecs >> 9) * NVM_MAX_VLBA);
401
+ mdts = (dev->geo.csecs >> 9) * NVM_MAX_VLBA;
402
+ if (dev->geo.mdts) {
403
+ mdts = min_t(u32, dev->geo.mdts,
404
+ (dev->geo.csecs >> 9) * NVM_MAX_VLBA);
405
+ }
406
+ blk_queue_max_hw_sectors(tqueue, mdts);
412407
413408 set_capacity(tdisk, tt->capacity(targetdata));
414409 add_disk(tdisk);
....@@ -471,7 +466,6 @@
471466
472467 /**
473468 * nvm_remove_tgt - Removes a target from the media manager
474
- * @dev: device
475469 * @remove: ioctl structure with target name to remove.
476470 *
477471 * Returns:
....@@ -479,18 +473,31 @@
479473 * 1: on not found
480474 * <0: on error
481475 */
482
-static int nvm_remove_tgt(struct nvm_dev *dev, struct nvm_ioctl_remove *remove)
476
+static int nvm_remove_tgt(struct nvm_ioctl_remove *remove)
483477 {
484
- struct nvm_target *t;
478
+ struct nvm_target *t = NULL;
479
+ struct nvm_dev *dev;
485480
486
- mutex_lock(&dev->mlock);
487
- t = nvm_find_target(dev, remove->tgtname);
488
- if (!t) {
481
+ down_read(&nvm_lock);
482
+ list_for_each_entry(dev, &nvm_devices, devices) {
483
+ mutex_lock(&dev->mlock);
484
+ t = nvm_find_target(dev, remove->tgtname);
485
+ if (t) {
486
+ mutex_unlock(&dev->mlock);
487
+ break;
488
+ }
489489 mutex_unlock(&dev->mlock);
490
+ }
491
+ up_read(&nvm_lock);
492
+
493
+ if (!t) {
494
+ pr_err("failed to remove target %s\n",
495
+ remove->tgtname);
490496 return 1;
491497 }
498
+
492499 __nvm_remove_target(t, true);
493
- mutex_unlock(&dev->mlock);
500
+ kref_put(&dev->ref, nvm_free);
494501
495502 return 0;
496503 }
....@@ -598,22 +605,16 @@
598605
599606 static void nvm_rq_tgt_to_dev(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
600607 {
601
- if (rqd->nr_ppas == 1) {
602
- nvm_ppa_tgt_to_dev(tgt_dev, &rqd->ppa_addr, 1);
603
- return;
604
- }
608
+ struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
605609
606
- nvm_ppa_tgt_to_dev(tgt_dev, rqd->ppa_list, rqd->nr_ppas);
610
+ nvm_ppa_tgt_to_dev(tgt_dev, ppa_list, rqd->nr_ppas);
607611 }
608612
609613 static void nvm_rq_dev_to_tgt(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
610614 {
611
- if (rqd->nr_ppas == 1) {
612
- nvm_ppa_dev_to_tgt(tgt_dev, &rqd->ppa_addr, 1);
613
- return;
614
- }
615
+ struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
615616
616
- nvm_ppa_dev_to_tgt(tgt_dev, rqd->ppa_list, rqd->nr_ppas);
617
+ nvm_ppa_dev_to_tgt(tgt_dev, ppa_list, rqd->nr_ppas);
617618 }
618619
619620 int nvm_register_tgt_type(struct nvm_tgt_type *tt)
....@@ -685,7 +686,7 @@
685686 rqd->nr_ppas = nr_ppas;
686687 rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
687688 if (!rqd->ppa_list) {
688
- pr_err("nvm: failed to allocate dma memory\n");
689
+ pr_err("failed to allocate dma memory\n");
689690 return -ENOMEM;
690691 }
691692
....@@ -712,47 +713,25 @@
712713 nvm_dev_dma_free(tgt_dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
713714 }
714715
715
-int nvm_get_chunk_meta(struct nvm_tgt_dev *tgt_dev, struct nvm_chk_meta *meta,
716
- struct ppa_addr ppa, int nchks)
716
+static int nvm_set_flags(struct nvm_geo *geo, struct nvm_rq *rqd)
717717 {
718
- struct nvm_dev *dev = tgt_dev->parent;
718
+ int flags = 0;
719719
720
- nvm_ppa_tgt_to_dev(tgt_dev, &ppa, 1);
720
+ if (geo->version == NVM_OCSSD_SPEC_20)
721
+ return 0;
721722
722
- return dev->ops->get_chk_meta(tgt_dev->parent, meta,
723
- (sector_t)ppa.ppa, nchks);
723
+ if (rqd->is_seq)
724
+ flags |= geo->pln_mode >> 1;
725
+
726
+ if (rqd->opcode == NVM_OP_PREAD)
727
+ flags |= (NVM_IO_SCRAMBLE_ENABLE | NVM_IO_SUSPEND);
728
+ else if (rqd->opcode == NVM_OP_PWRITE)
729
+ flags |= NVM_IO_SCRAMBLE_ENABLE;
730
+
731
+ return flags;
724732 }
725
-EXPORT_SYMBOL(nvm_get_chunk_meta);
726733
727
-int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
728
- int nr_ppas, int type)
729
-{
730
- struct nvm_dev *dev = tgt_dev->parent;
731
- struct nvm_rq rqd;
732
- int ret;
733
-
734
- if (nr_ppas > NVM_MAX_VLBA) {
735
- pr_err("nvm: unable to update all blocks atomically\n");
736
- return -EINVAL;
737
- }
738
-
739
- memset(&rqd, 0, sizeof(struct nvm_rq));
740
-
741
- nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas);
742
- nvm_rq_tgt_to_dev(tgt_dev, &rqd);
743
-
744
- ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
745
- nvm_free_rqd_ppalist(tgt_dev, &rqd);
746
- if (ret) {
747
- pr_err("nvm: failed bb mark\n");
748
- return -EINVAL;
749
- }
750
-
751
- return 0;
752
-}
753
-EXPORT_SYMBOL(nvm_set_tgt_bb_tbl);
754
-
755
-int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
734
+int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd, void *buf)
756735 {
757736 struct nvm_dev *dev = tgt_dev->parent;
758737 int ret;
....@@ -763,30 +742,56 @@
763742 nvm_rq_tgt_to_dev(tgt_dev, rqd);
764743
765744 rqd->dev = tgt_dev;
745
+ rqd->flags = nvm_set_flags(&tgt_dev->geo, rqd);
766746
767747 /* In case of error, fail with right address format */
768
- ret = dev->ops->submit_io(dev, rqd);
748
+ ret = dev->ops->submit_io(dev, rqd, buf);
769749 if (ret)
770750 nvm_rq_dev_to_tgt(tgt_dev, rqd);
771751 return ret;
772752 }
773753 EXPORT_SYMBOL(nvm_submit_io);
774754
775
-int nvm_submit_io_sync(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
755
+static void nvm_sync_end_io(struct nvm_rq *rqd)
756
+{
757
+ struct completion *waiting = rqd->private;
758
+
759
+ complete(waiting);
760
+}
761
+
762
+static int nvm_submit_io_wait(struct nvm_dev *dev, struct nvm_rq *rqd,
763
+ void *buf)
764
+{
765
+ DECLARE_COMPLETION_ONSTACK(wait);
766
+ int ret = 0;
767
+
768
+ rqd->end_io = nvm_sync_end_io;
769
+ rqd->private = &wait;
770
+
771
+ ret = dev->ops->submit_io(dev, rqd, buf);
772
+ if (ret)
773
+ return ret;
774
+
775
+ wait_for_completion_io(&wait);
776
+
777
+ return 0;
778
+}
779
+
780
+int nvm_submit_io_sync(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd,
781
+ void *buf)
776782 {
777783 struct nvm_dev *dev = tgt_dev->parent;
778784 int ret;
779785
780
- if (!dev->ops->submit_io_sync)
786
+ if (!dev->ops->submit_io)
781787 return -ENODEV;
782788
783789 nvm_rq_tgt_to_dev(tgt_dev, rqd);
784790
785791 rqd->dev = tgt_dev;
792
+ rqd->flags = nvm_set_flags(&tgt_dev->geo, rqd);
786793
787
- /* In case of error, fail with right address format */
788
- ret = dev->ops->submit_io_sync(dev, rqd);
789
- nvm_rq_dev_to_tgt(tgt_dev, rqd);
794
+ ret = nvm_submit_io_wait(dev, rqd, buf);
790795
791796 return ret;
792797 }
....@@ -805,27 +810,159 @@
805810 }
806811 EXPORT_SYMBOL(nvm_end_io);
807812
813
+static int nvm_submit_io_sync_raw(struct nvm_dev *dev, struct nvm_rq *rqd)
814
+{
815
+ if (!dev->ops->submit_io)
816
+ return -ENODEV;
817
+
818
+ rqd->dev = NULL;
819
+ rqd->flags = nvm_set_flags(&dev->geo, rqd);
820
+
821
+ return nvm_submit_io_wait(dev, rqd, NULL);
822
+}
823
+
824
+static int nvm_bb_chunk_sense(struct nvm_dev *dev, struct ppa_addr ppa)
825
+{
826
+ struct nvm_rq rqd = { NULL };
827
+ struct bio bio;
828
+ struct bio_vec bio_vec;
829
+ struct page *page;
830
+ int ret;
831
+
832
+ page = alloc_page(GFP_KERNEL);
833
+ if (!page)
834
+ return -ENOMEM;
835
+
836
+ bio_init(&bio, &bio_vec, 1);
837
+ bio_add_page(&bio, page, PAGE_SIZE, 0);
838
+ bio_set_op_attrs(&bio, REQ_OP_READ, 0);
839
+
840
+ rqd.bio = &bio;
841
+ rqd.opcode = NVM_OP_PREAD;
842
+ rqd.is_seq = 1;
843
+ rqd.nr_ppas = 1;
844
+ rqd.ppa_addr = generic_to_dev_addr(dev, ppa);
845
+
846
+ ret = nvm_submit_io_sync_raw(dev, &rqd);
847
+ __free_page(page);
848
+ if (ret)
849
+ return ret;
850
+
851
+ return rqd.error;
852
+}
853
+
808854 /*
809
- * folds a bad block list from its plane representation to its virtual
810
- * block representation. The fold is done in place and reduced size is
811
- * returned.
812
- *
813
- * If any of the planes status are bad or grown bad block, the virtual block
814
- * is marked bad. If not bad, the first plane state acts as the block state.
855
+ * Scans a 1.2 chunk first and last page to determine if its state.
856
+ * If the chunk is found to be open, also scan it to update the write
857
+ * pointer.
815858 */
816
-int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks)
859
+static int nvm_bb_chunk_scan(struct nvm_dev *dev, struct ppa_addr ppa,
860
+ struct nvm_chk_meta *meta)
817861 {
818862 struct nvm_geo *geo = &dev->geo;
819
- int blk, offset, pl, blktype;
863
+ int ret, pg, pl;
820864
821
- if (nr_blks != geo->num_chk * geo->pln_mode)
822
- return -EINVAL;
865
+ /* sense first page */
866
+ ret = nvm_bb_chunk_sense(dev, ppa);
867
+ if (ret < 0) /* io error */
868
+ return ret;
869
+ else if (ret == 0) /* valid data */
870
+ meta->state = NVM_CHK_ST_OPEN;
871
+ else if (ret > 0) {
872
+ /*
873
+ * If empty page, the chunk is free, else it is an
874
+ * actual io error. In that case, mark it offline.
875
+ */
876
+ switch (ret) {
877
+ case NVM_RSP_ERR_EMPTYPAGE:
878
+ meta->state = NVM_CHK_ST_FREE;
879
+ return 0;
880
+ case NVM_RSP_ERR_FAILCRC:
881
+ case NVM_RSP_ERR_FAILECC:
882
+ case NVM_RSP_WARN_HIGHECC:
883
+ meta->state = NVM_CHK_ST_OPEN;
884
+ goto scan;
885
+ default:
886
+ return -ret; /* other io error */
887
+ }
888
+ }
889
+
890
+ /* sense last page */
891
+ ppa.g.pg = geo->num_pg - 1;
892
+ ppa.g.pl = geo->num_pln - 1;
893
+
894
+ ret = nvm_bb_chunk_sense(dev, ppa);
895
+ if (ret < 0) /* io error */
896
+ return ret;
897
+ else if (ret == 0) { /* Chunk fully written */
898
+ meta->state = NVM_CHK_ST_CLOSED;
899
+ meta->wp = geo->clba;
900
+ return 0;
901
+ } else if (ret > 0) {
902
+ switch (ret) {
903
+ case NVM_RSP_ERR_EMPTYPAGE:
904
+ case NVM_RSP_ERR_FAILCRC:
905
+ case NVM_RSP_ERR_FAILECC:
906
+ case NVM_RSP_WARN_HIGHECC:
907
+ meta->state = NVM_CHK_ST_OPEN;
908
+ break;
909
+ default:
910
+ return -ret; /* other io error */
911
+ }
912
+ }
913
+
914
+scan:
915
+ /*
916
+ * chunk is open, we scan sequentially to update the write pointer.
917
+ * We make the assumption that targets write data across all planes
918
+ * before moving to the next page.
919
+ */
920
+ for (pg = 0; pg < geo->num_pg; pg++) {
921
+ for (pl = 0; pl < geo->num_pln; pl++) {
922
+ ppa.g.pg = pg;
923
+ ppa.g.pl = pl;
924
+
925
+ ret = nvm_bb_chunk_sense(dev, ppa);
926
+ if (ret < 0) /* io error */
927
+ return ret;
928
+ else if (ret == 0) {
929
+ meta->wp += geo->ws_min;
930
+ } else if (ret > 0) {
931
+ switch (ret) {
932
+ case NVM_RSP_ERR_EMPTYPAGE:
933
+ return 0;
934
+ case NVM_RSP_ERR_FAILCRC:
935
+ case NVM_RSP_ERR_FAILECC:
936
+ case NVM_RSP_WARN_HIGHECC:
937
+ meta->wp += geo->ws_min;
938
+ break;
939
+ default:
940
+ return -ret; /* other io error */
941
+ }
942
+ }
943
+ }
944
+ }
945
+
946
+ return 0;
947
+}
948
+
949
+/*
950
+ * folds a bad block list from its plane representation to its
951
+ * chunk representation.
952
+ *
953
+ * If any of the planes status are bad or grown bad, the chunk is marked
954
+ * offline. If not bad, the first plane state acts as the chunk state.
955
+ */
956
+static int nvm_bb_to_chunk(struct nvm_dev *dev, struct ppa_addr ppa,
957
+ u8 *blks, int nr_blks, struct nvm_chk_meta *meta)
958
+{
959
+ struct nvm_geo *geo = &dev->geo;
960
+ int ret, blk, pl, offset, blktype;
823961
824962 for (blk = 0; blk < geo->num_chk; blk++) {
825963 offset = blk * geo->pln_mode;
826964 blktype = blks[offset];
827965
828
- /* Bad blocks on any planes take precedence over other types */
829966 for (pl = 0; pl < geo->pln_mode; pl++) {
830967 if (blks[offset + pl] &
831968 (NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
....@@ -834,23 +971,124 @@
834971 }
835972 }
836973
837
- blks[blk] = blktype;
974
+ ppa.g.blk = blk;
975
+
976
+ meta->wp = 0;
977
+ meta->type = NVM_CHK_TP_W_SEQ;
978
+ meta->wi = 0;
979
+ meta->slba = generic_to_dev_addr(dev, ppa).ppa;
980
+ meta->cnlb = dev->geo.clba;
981
+
982
+ if (blktype == NVM_BLK_T_FREE) {
983
+ ret = nvm_bb_chunk_scan(dev, ppa, meta);
984
+ if (ret)
985
+ return ret;
986
+ } else {
987
+ meta->state = NVM_CHK_ST_OFFLINE;
988
+ }
989
+
990
+ meta++;
838991 }
839992
840
- return geo->num_chk;
993
+ return 0;
841994 }
842
-EXPORT_SYMBOL(nvm_bb_tbl_fold);
843995
844
-int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr ppa,
845
- u8 *blks)
996
+static int nvm_get_bb_meta(struct nvm_dev *dev, sector_t slba,
997
+ int nchks, struct nvm_chk_meta *meta)
998
+{
999
+ struct nvm_geo *geo = &dev->geo;
1000
+ struct ppa_addr ppa;
1001
+ u8 *blks;
1002
+ int ch, lun, nr_blks;
1003
+ int ret = 0;
1004
+
1005
+ ppa.ppa = slba;
1006
+ ppa = dev_to_generic_addr(dev, ppa);
1007
+
1008
+ if (ppa.g.blk != 0)
1009
+ return -EINVAL;
1010
+
1011
+ if ((nchks % geo->num_chk) != 0)
1012
+ return -EINVAL;
1013
+
1014
+ nr_blks = geo->num_chk * geo->pln_mode;
1015
+
1016
+ blks = kmalloc(nr_blks, GFP_KERNEL);
1017
+ if (!blks)
1018
+ return -ENOMEM;
1019
+
1020
+ for (ch = ppa.g.ch; ch < geo->num_ch; ch++) {
1021
+ for (lun = ppa.g.lun; lun < geo->num_lun; lun++) {
1022
+ struct ppa_addr ppa_gen, ppa_dev;
1023
+
1024
+ if (!nchks)
1025
+ goto done;
1026
+
1027
+ ppa_gen.ppa = 0;
1028
+ ppa_gen.g.ch = ch;
1029
+ ppa_gen.g.lun = lun;
1030
+ ppa_dev = generic_to_dev_addr(dev, ppa_gen);
1031
+
1032
+ ret = dev->ops->get_bb_tbl(dev, ppa_dev, blks);
1033
+ if (ret)
1034
+ goto done;
1035
+
1036
+ ret = nvm_bb_to_chunk(dev, ppa_gen, blks, nr_blks,
1037
+ meta);
1038
+ if (ret)
1039
+ goto done;
1040
+
1041
+ meta += geo->num_chk;
1042
+ nchks -= geo->num_chk;
1043
+ }
1044
+ }
1045
+done:
1046
+ kfree(blks);
1047
+ return ret;
1048
+}
1049
+
1050
+int nvm_get_chunk_meta(struct nvm_tgt_dev *tgt_dev, struct ppa_addr ppa,
1051
+ int nchks, struct nvm_chk_meta *meta)
8461052 {
8471053 struct nvm_dev *dev = tgt_dev->parent;
8481054
8491055 nvm_ppa_tgt_to_dev(tgt_dev, &ppa, 1);
8501056
851
- return dev->ops->get_bb_tbl(dev, ppa, blks);
1057
+ if (dev->geo.version == NVM_OCSSD_SPEC_12)
1058
+ return nvm_get_bb_meta(dev, (sector_t)ppa.ppa, nchks, meta);
1059
+
1060
+ return dev->ops->get_chk_meta(dev, (sector_t)ppa.ppa, nchks, meta);
8521061 }
853
-EXPORT_SYMBOL(nvm_get_tgt_bb_tbl);
1062
+EXPORT_SYMBOL_GPL(nvm_get_chunk_meta);
1063
+
1064
+int nvm_set_chunk_meta(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
1065
+ int nr_ppas, int type)
1066
+{
1067
+ struct nvm_dev *dev = tgt_dev->parent;
1068
+ struct nvm_rq rqd;
1069
+ int ret;
1070
+
1071
+ if (dev->geo.version == NVM_OCSSD_SPEC_20)
1072
+ return 0;
1073
+
1074
+ if (nr_ppas > NVM_MAX_VLBA) {
1075
+ pr_err("unable to update all blocks atomically\n");
1076
+ return -EINVAL;
1077
+ }
1078
+
1079
+ memset(&rqd, 0, sizeof(struct nvm_rq));
1080
+
1081
+ nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas);
1082
+ nvm_rq_tgt_to_dev(tgt_dev, &rqd);
1083
+
1084
+ ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
1085
+ nvm_free_rqd_ppalist(tgt_dev, &rqd);
1086
+ if (ret)
1087
+ return -EINVAL;
1088
+
1089
+ return 0;
1090
+}
1091
+EXPORT_SYMBOL_GPL(nvm_set_chunk_meta);
8541092
8551093 static int nvm_core_init(struct nvm_dev *dev)
8561094 {
....@@ -877,15 +1115,16 @@
8771115 return ret;
8781116 }
8791117
880
-static void nvm_free(struct nvm_dev *dev)
1118
+static void nvm_free(struct kref *ref)
8811119 {
882
- if (!dev)
883
- return;
1120
+ struct nvm_dev *dev = container_of(ref, struct nvm_dev, ref);
8841121
8851122 if (dev->dma_pool)
8861123 dev->ops->destroy_dma_pool(dev->dma_pool);
8871124
888
- nvm_unregister_map(dev);
1125
+ if (dev->rmap)
1126
+ nvm_unregister_map(dev);
1127
+
8891128 kfree(dev->lun_map);
8901129 kfree(dev);
8911130 }
....@@ -896,52 +1135,67 @@
8961135 int ret = -EINVAL;
8971136
8981137 if (dev->ops->identity(dev)) {
899
- pr_err("nvm: device could not be identified\n");
1138
+ pr_err("device could not be identified\n");
9001139 goto err;
9011140 }
9021141
903
- pr_debug("nvm: ver:%u.%u nvm_vendor:%x\n",
904
- geo->major_ver_id, geo->minor_ver_id,
905
- geo->vmnt);
1142
+ pr_debug("ver:%u.%u nvm_vendor:%x\n", geo->major_ver_id,
1143
+ geo->minor_ver_id, geo->vmnt);
9061144
9071145 ret = nvm_core_init(dev);
9081146 if (ret) {
909
- pr_err("nvm: could not initialize core structures.\n");
1147
+ pr_err("could not initialize core structures.\n");
9101148 goto err;
9111149 }
9121150
913
- pr_info("nvm: registered %s [%u/%u/%u/%u/%u]\n",
1151
+ pr_info("registered %s [%u/%u/%u/%u/%u]\n",
9141152 dev->name, dev->geo.ws_min, dev->geo.ws_opt,
9151153 dev->geo.num_chk, dev->geo.all_luns,
9161154 dev->geo.num_ch);
9171155 return 0;
9181156 err:
919
- pr_err("nvm: failed to initialize nvm\n");
1157
+ pr_err("failed to initialize nvm\n");
9201158 return ret;
9211159 }
9221160
9231161 struct nvm_dev *nvm_alloc_dev(int node)
9241162 {
925
- return kzalloc_node(sizeof(struct nvm_dev), GFP_KERNEL, node);
1163
+ struct nvm_dev *dev;
1164
+
1165
+ dev = kzalloc_node(sizeof(struct nvm_dev), GFP_KERNEL, node);
1166
+ if (dev)
1167
+ kref_init(&dev->ref);
1168
+
1169
+ return dev;
9261170 }
9271171 EXPORT_SYMBOL(nvm_alloc_dev);
9281172
9291173 int nvm_register(struct nvm_dev *dev)
9301174 {
931
- int ret;
1175
+ int ret, exp_pool_size;
9321176
933
- if (!dev->q || !dev->ops)
1177
+ if (!dev->q || !dev->ops) {
1178
+ kref_put(&dev->ref, nvm_free);
9341179 return -EINVAL;
935
-
936
- dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist");
937
- if (!dev->dma_pool) {
938
- pr_err("nvm: could not create dma pool\n");
939
- return -ENOMEM;
9401180 }
9411181
9421182 ret = nvm_init(dev);
943
- if (ret)
944
- goto err_init;
1183
+ if (ret) {
1184
+ kref_put(&dev->ref, nvm_free);
1185
+ return ret;
1186
+ }
1187
+
1188
+ exp_pool_size = max_t(int, PAGE_SIZE,
1189
+ (NVM_MAX_VLBA * (sizeof(u64) + dev->geo.sos)));
1190
+ exp_pool_size = round_up(exp_pool_size, PAGE_SIZE);
1191
+
1192
+ dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist",
1193
+ exp_pool_size);
1194
+ if (!dev->dma_pool) {
1195
+ pr_err("could not create dma pool\n");
1196
+ kref_put(&dev->ref, nvm_free);
1197
+ return -ENOMEM;
1198
+ }
9451199
9461200 /* register device with a supported media manager */
9471201 down_write(&nvm_lock);
....@@ -949,9 +1203,6 @@
9491203 up_write(&nvm_lock);
9501204
9511205 return 0;
952
-err_init:
953
- dev->ops->destroy_dma_pool(dev->dma_pool);
954
- return ret;
9551206 }
9561207 EXPORT_SYMBOL(nvm_register);
9571208
....@@ -964,6 +1215,7 @@
9641215 if (t->dev->parent != dev)
9651216 continue;
9661217 __nvm_remove_target(t, false);
1218
+ kref_put(&dev->ref, nvm_free);
9671219 }
9681220 mutex_unlock(&dev->mlock);
9691221
....@@ -971,24 +1223,30 @@
9711223 list_del(&dev->devices);
9721224 up_write(&nvm_lock);
9731225
974
- nvm_free(dev);
1226
+ kref_put(&dev->ref, nvm_free);
9751227 }
9761228 EXPORT_SYMBOL(nvm_unregister);
9771229
9781230 static int __nvm_configure_create(struct nvm_ioctl_create *create)
9791231 {
9801232 struct nvm_dev *dev;
1233
+ int ret;
9811234
9821235 down_write(&nvm_lock);
9831236 dev = nvm_find_nvm_dev(create->dev);
9841237 up_write(&nvm_lock);
9851238
9861239 if (!dev) {
987
- pr_err("nvm: device not found\n");
1240
+ pr_err("device not found\n");
9881241 return -EINVAL;
9891242 }
9901243
991
- return nvm_create_tgt(dev, create);
1244
+ kref_get(&dev->ref);
1245
+ ret = nvm_create_tgt(dev, create);
1246
+ if (ret)
1247
+ kref_put(&dev->ref, nvm_free);
1248
+
1249
+ return ret;
9921250 }
9931251
9941252 static long nvm_ioctl_info(struct file *file, void __user *arg)
....@@ -1052,8 +1310,9 @@
10521310 strlcpy(info->bmname, "gennvm", sizeof(info->bmname));
10531311 i++;
10541312
1055
- if (i > 31) {
1056
- pr_err("nvm: max 31 devices can be reported.\n");
1313
+ if (i >= ARRAY_SIZE(devices->info)) {
1314
+ pr_err("max %zd devices can be reported.\n",
1315
+ ARRAY_SIZE(devices->info));
10571316 break;
10581317 }
10591318 }
....@@ -1080,7 +1339,7 @@
10801339
10811340 if (create.conf.type == NVM_CONFIG_TYPE_EXTENDED &&
10821341 create.conf.e.rsv != 0) {
1083
- pr_err("nvm: reserved config field in use\n");
1342
+ pr_err("reserved config field in use\n");
10841343 return -EINVAL;
10851344 }
10861345
....@@ -1096,7 +1355,7 @@
10961355 flags &= ~NVM_TARGET_FACTORY;
10971356
10981357 if (flags) {
1099
- pr_err("nvm: flag not supported\n");
1358
+ pr_err("flag not supported\n");
11001359 return -EINVAL;
11011360 }
11021361 }
....@@ -1107,8 +1366,6 @@
11071366 static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
11081367 {
11091368 struct nvm_ioctl_remove remove;
1110
- struct nvm_dev *dev;
1111
- int ret = 0;
11121369
11131370 if (copy_from_user(&remove, arg, sizeof(struct nvm_ioctl_remove)))
11141371 return -EFAULT;
....@@ -1116,17 +1373,11 @@
11161373 remove.tgtname[DISK_NAME_LEN - 1] = '\0';
11171374
11181375 if (remove.flags != 0) {
1119
- pr_err("nvm: no flags supported\n");
1376
+ pr_err("no flags supported\n");
11201377 return -EINVAL;
11211378 }
11221379
1123
- list_for_each_entry(dev, &nvm_devices, devices) {
1124
- ret = nvm_remove_tgt(dev, &remove);
1125
- if (!ret)
1126
- break;
1127
- }
1128
-
1129
- return ret;
1380
+ return nvm_remove_tgt(&remove);
11301381 }
11311382
11321383 /* kept for compatibility reasons */
....@@ -1138,7 +1389,7 @@
11381389 return -EFAULT;
11391390
11401391 if (init.flags != 0) {
1141
- pr_err("nvm: no flags supported\n");
1392
+ pr_err("no flags supported\n");
11421393 return -EINVAL;
11431394 }
11441395