.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * Copyright (C) 2015 IT University of Copenhagen. All rights reserved. |
---|
3 | 4 | * Initial release: Matias Bjorling <m@bjorling.me> |
---|
4 | | - * |
---|
5 | | - * This program is free software; you can redistribute it and/or |
---|
6 | | - * modify it under the terms of the GNU General Public License version |
---|
7 | | - * 2 as published by the Free Software Foundation. |
---|
8 | | - * |
---|
9 | | - * This program is distributed in the hope that it will be useful, but |
---|
10 | | - * WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
11 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
---|
12 | | - * General Public License for more details. |
---|
13 | | - * |
---|
14 | | - * You should have received a copy of the GNU General Public License |
---|
15 | | - * along with this program; see the file COPYING. If not, write to |
---|
16 | | - * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, |
---|
17 | | - * USA. |
---|
18 | | - * |
---|
19 | 5 | */ |
---|
| 6 | + |
---|
| 7 | +#define pr_fmt(fmt) "nvm: " fmt |
---|
20 | 8 | |
---|
21 | 9 | #include <linux/list.h> |
---|
22 | 10 | #include <linux/types.h> |
---|
.. | .. |
---|
44 | 32 | struct nvm_ch_map *chnls; |
---|
45 | 33 | int num_ch; |
---|
46 | 34 | }; |
---|
| 35 | + |
---|
| 36 | +static void nvm_free(struct kref *ref); |
---|
47 | 37 | |
---|
48 | 38 | static struct nvm_target *nvm_find_target(struct nvm_dev *dev, const char *name) |
---|
49 | 39 | { |
---|
.. | .. |
---|
86 | 76 | |
---|
87 | 77 | for (i = lun_begin; i <= lun_end; i++) { |
---|
88 | 78 | if (test_and_set_bit(i, dev->lun_map)) { |
---|
89 | | - pr_err("nvm: lun %d already allocated\n", i); |
---|
| 79 | + pr_err("lun %d already allocated\n", i); |
---|
90 | 80 | goto err; |
---|
91 | 81 | } |
---|
92 | 82 | } |
---|
.. | .. |
---|
246 | 236 | return tgt_dev; |
---|
247 | 237 | } |
---|
248 | 238 | |
---|
249 | | -static const struct block_device_operations nvm_fops = { |
---|
250 | | - .owner = THIS_MODULE, |
---|
251 | | -}; |
---|
252 | | - |
---|
253 | 239 | static struct nvm_tgt_type *__nvm_find_target_type(const char *name) |
---|
254 | 240 | { |
---|
255 | 241 | struct nvm_tgt_type *tt; |
---|
.. | .. |
---|
276 | 262 | int lun_end) |
---|
277 | 263 | { |
---|
278 | 264 | if (lun_begin > lun_end || lun_end >= geo->all_luns) { |
---|
279 | | - pr_err("nvm: lun out of bound (%u:%u > %u)\n", |
---|
| 265 | + pr_err("lun out of bound (%u:%u > %u)\n", |
---|
280 | 266 | lun_begin, lun_end, geo->all_luns - 1); |
---|
281 | 267 | return -EINVAL; |
---|
282 | 268 | } |
---|
.. | .. |
---|
309 | 295 | if (e->op == 0xFFFF) { |
---|
310 | 296 | e->op = NVM_TARGET_DEFAULT_OP; |
---|
311 | 297 | } else if (e->op < NVM_TARGET_MIN_OP || e->op > NVM_TARGET_MAX_OP) { |
---|
312 | | - pr_err("nvm: invalid over provisioning value\n"); |
---|
| 298 | + pr_err("invalid over provisioning value\n"); |
---|
313 | 299 | return -EINVAL; |
---|
314 | 300 | } |
---|
315 | 301 | |
---|
.. | .. |
---|
325 | 311 | struct nvm_target *t; |
---|
326 | 312 | struct nvm_tgt_dev *tgt_dev; |
---|
327 | 313 | void *targetdata; |
---|
| 314 | + unsigned int mdts; |
---|
328 | 315 | int ret; |
---|
329 | 316 | |
---|
330 | 317 | switch (create->conf.type) { |
---|
.. | .. |
---|
345 | 332 | e = create->conf.e; |
---|
346 | 333 | break; |
---|
347 | 334 | default: |
---|
348 | | - pr_err("nvm: config type not valid\n"); |
---|
| 335 | + pr_err("config type not valid\n"); |
---|
349 | 336 | return -EINVAL; |
---|
350 | 337 | } |
---|
351 | 338 | |
---|
352 | 339 | tt = nvm_find_target_type(create->tgttype); |
---|
353 | 340 | if (!tt) { |
---|
354 | | - pr_err("nvm: target type %s not found\n", create->tgttype); |
---|
| 341 | + pr_err("target type %s not found\n", create->tgttype); |
---|
| 342 | + return -EINVAL; |
---|
| 343 | + } |
---|
| 344 | + |
---|
| 345 | + if ((tt->flags & NVM_TGT_F_HOST_L2P) != (dev->geo.dom & NVM_RSP_L2P)) { |
---|
| 346 | + pr_err("device is incompatible with target L2P type.\n"); |
---|
355 | 347 | return -EINVAL; |
---|
356 | 348 | } |
---|
357 | 349 | |
---|
358 | 350 | if (nvm_target_exists(create->tgtname)) { |
---|
359 | | - pr_err("nvm: target name already exists (%s)\n", |
---|
| 351 | + pr_err("target name already exists (%s)\n", |
---|
360 | 352 | create->tgtname); |
---|
361 | 353 | return -EINVAL; |
---|
362 | 354 | } |
---|
.. | .. |
---|
373 | 365 | |
---|
374 | 366 | tgt_dev = nvm_create_tgt_dev(dev, e.lun_begin, e.lun_end, e.op); |
---|
375 | 367 | if (!tgt_dev) { |
---|
376 | | - pr_err("nvm: could not create target device\n"); |
---|
| 368 | + pr_err("could not create target device\n"); |
---|
377 | 369 | ret = -ENOMEM; |
---|
378 | 370 | goto err_t; |
---|
379 | 371 | } |
---|
.. | .. |
---|
384 | 376 | goto err_dev; |
---|
385 | 377 | } |
---|
386 | 378 | |
---|
387 | | - tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node, NULL); |
---|
| 379 | + tqueue = blk_alloc_queue(dev->q->node); |
---|
388 | 380 | if (!tqueue) { |
---|
389 | 381 | ret = -ENOMEM; |
---|
390 | 382 | goto err_disk; |
---|
391 | 383 | } |
---|
392 | | - blk_queue_make_request(tqueue, tt->make_rq); |
---|
393 | 384 | |
---|
394 | 385 | strlcpy(tdisk->disk_name, create->tgtname, sizeof(tdisk->disk_name)); |
---|
395 | 386 | tdisk->flags = GENHD_FL_EXT_DEVT; |
---|
396 | 387 | tdisk->major = 0; |
---|
397 | 388 | tdisk->first_minor = 0; |
---|
398 | | - tdisk->fops = &nvm_fops; |
---|
| 389 | + tdisk->fops = tt->bops; |
---|
399 | 390 | tdisk->queue = tqueue; |
---|
400 | 391 | |
---|
401 | 392 | targetdata = tt->init(tgt_dev, tdisk, create->flags); |
---|
.. | .. |
---|
407 | 398 | tdisk->private_data = targetdata; |
---|
408 | 399 | tqueue->queuedata = targetdata; |
---|
409 | 400 | |
---|
410 | | - blk_queue_max_hw_sectors(tqueue, |
---|
411 | | - (dev->geo.csecs >> 9) * NVM_MAX_VLBA); |
---|
| 401 | + mdts = (dev->geo.csecs >> 9) * NVM_MAX_VLBA; |
---|
| 402 | + if (dev->geo.mdts) { |
---|
| 403 | + mdts = min_t(u32, dev->geo.mdts, |
---|
| 404 | + (dev->geo.csecs >> 9) * NVM_MAX_VLBA); |
---|
| 405 | + } |
---|
| 406 | + blk_queue_max_hw_sectors(tqueue, mdts); |
---|
412 | 407 | |
---|
413 | 408 | set_capacity(tdisk, tt->capacity(targetdata)); |
---|
414 | 409 | add_disk(tdisk); |
---|
.. | .. |
---|
471 | 466 | |
---|
472 | 467 | /** |
---|
473 | 468 | * nvm_remove_tgt - Removes a target from the media manager |
---|
474 | | - * @dev: device |
---|
475 | 469 | * @remove: ioctl structure with target name to remove. |
---|
476 | 470 | * |
---|
477 | 471 | * Returns: |
---|
.. | .. |
---|
479 | 473 | * 1: on not found |
---|
480 | 474 | * <0: on error |
---|
481 | 475 | */ |
---|
482 | | -static int nvm_remove_tgt(struct nvm_dev *dev, struct nvm_ioctl_remove *remove) |
---|
| 476 | +static int nvm_remove_tgt(struct nvm_ioctl_remove *remove) |
---|
483 | 477 | { |
---|
484 | | - struct nvm_target *t; |
---|
| 478 | + struct nvm_target *t = NULL; |
---|
| 479 | + struct nvm_dev *dev; |
---|
485 | 480 | |
---|
486 | | - mutex_lock(&dev->mlock); |
---|
487 | | - t = nvm_find_target(dev, remove->tgtname); |
---|
488 | | - if (!t) { |
---|
| 481 | + down_read(&nvm_lock); |
---|
| 482 | + list_for_each_entry(dev, &nvm_devices, devices) { |
---|
| 483 | + mutex_lock(&dev->mlock); |
---|
| 484 | + t = nvm_find_target(dev, remove->tgtname); |
---|
| 485 | + if (t) { |
---|
| 486 | + mutex_unlock(&dev->mlock); |
---|
| 487 | + break; |
---|
| 488 | + } |
---|
489 | 489 | mutex_unlock(&dev->mlock); |
---|
| 490 | + } |
---|
| 491 | + up_read(&nvm_lock); |
---|
| 492 | + |
---|
| 493 | + if (!t) { |
---|
| 494 | + pr_err("failed to remove target %s\n", |
---|
| 495 | + remove->tgtname); |
---|
490 | 496 | return 1; |
---|
491 | 497 | } |
---|
| 498 | + |
---|
492 | 499 | __nvm_remove_target(t, true); |
---|
493 | | - mutex_unlock(&dev->mlock); |
---|
| 500 | + kref_put(&dev->ref, nvm_free); |
---|
494 | 501 | |
---|
495 | 502 | return 0; |
---|
496 | 503 | } |
---|
.. | .. |
---|
598 | 605 | |
---|
599 | 606 | static void nvm_rq_tgt_to_dev(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd) |
---|
600 | 607 | { |
---|
601 | | - if (rqd->nr_ppas == 1) { |
---|
602 | | - nvm_ppa_tgt_to_dev(tgt_dev, &rqd->ppa_addr, 1); |
---|
603 | | - return; |
---|
604 | | - } |
---|
| 608 | + struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd); |
---|
605 | 609 | |
---|
606 | | - nvm_ppa_tgt_to_dev(tgt_dev, rqd->ppa_list, rqd->nr_ppas); |
---|
| 610 | + nvm_ppa_tgt_to_dev(tgt_dev, ppa_list, rqd->nr_ppas); |
---|
607 | 611 | } |
---|
608 | 612 | |
---|
609 | 613 | static void nvm_rq_dev_to_tgt(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd) |
---|
610 | 614 | { |
---|
611 | | - if (rqd->nr_ppas == 1) { |
---|
612 | | - nvm_ppa_dev_to_tgt(tgt_dev, &rqd->ppa_addr, 1); |
---|
613 | | - return; |
---|
614 | | - } |
---|
| 615 | + struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd); |
---|
615 | 616 | |
---|
616 | | - nvm_ppa_dev_to_tgt(tgt_dev, rqd->ppa_list, rqd->nr_ppas); |
---|
| 617 | + nvm_ppa_dev_to_tgt(tgt_dev, ppa_list, rqd->nr_ppas); |
---|
617 | 618 | } |
---|
618 | 619 | |
---|
619 | 620 | int nvm_register_tgt_type(struct nvm_tgt_type *tt) |
---|
.. | .. |
---|
685 | 686 | rqd->nr_ppas = nr_ppas; |
---|
686 | 687 | rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list); |
---|
687 | 688 | if (!rqd->ppa_list) { |
---|
688 | | - pr_err("nvm: failed to allocate dma memory\n"); |
---|
| 689 | + pr_err("failed to allocate dma memory\n"); |
---|
689 | 690 | return -ENOMEM; |
---|
690 | 691 | } |
---|
691 | 692 | |
---|
.. | .. |
---|
712 | 713 | nvm_dev_dma_free(tgt_dev->parent, rqd->ppa_list, rqd->dma_ppa_list); |
---|
713 | 714 | } |
---|
714 | 715 | |
---|
715 | | -int nvm_get_chunk_meta(struct nvm_tgt_dev *tgt_dev, struct nvm_chk_meta *meta, |
---|
716 | | - struct ppa_addr ppa, int nchks) |
---|
| 716 | +static int nvm_set_flags(struct nvm_geo *geo, struct nvm_rq *rqd) |
---|
717 | 717 | { |
---|
718 | | - struct nvm_dev *dev = tgt_dev->parent; |
---|
| 718 | + int flags = 0; |
---|
719 | 719 | |
---|
720 | | - nvm_ppa_tgt_to_dev(tgt_dev, &ppa, 1); |
---|
| 720 | + if (geo->version == NVM_OCSSD_SPEC_20) |
---|
| 721 | + return 0; |
---|
721 | 722 | |
---|
722 | | - return dev->ops->get_chk_meta(tgt_dev->parent, meta, |
---|
723 | | - (sector_t)ppa.ppa, nchks); |
---|
| 723 | + if (rqd->is_seq) |
---|
| 724 | + flags |= geo->pln_mode >> 1; |
---|
| 725 | + |
---|
| 726 | + if (rqd->opcode == NVM_OP_PREAD) |
---|
| 727 | + flags |= (NVM_IO_SCRAMBLE_ENABLE | NVM_IO_SUSPEND); |
---|
| 728 | + else if (rqd->opcode == NVM_OP_PWRITE) |
---|
| 729 | + flags |= NVM_IO_SCRAMBLE_ENABLE; |
---|
| 730 | + |
---|
| 731 | + return flags; |
---|
724 | 732 | } |
---|
725 | | -EXPORT_SYMBOL(nvm_get_chunk_meta); |
---|
726 | 733 | |
---|
727 | | -int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas, |
---|
728 | | - int nr_ppas, int type) |
---|
729 | | -{ |
---|
730 | | - struct nvm_dev *dev = tgt_dev->parent; |
---|
731 | | - struct nvm_rq rqd; |
---|
732 | | - int ret; |
---|
733 | | - |
---|
734 | | - if (nr_ppas > NVM_MAX_VLBA) { |
---|
735 | | - pr_err("nvm: unable to update all blocks atomically\n"); |
---|
736 | | - return -EINVAL; |
---|
737 | | - } |
---|
738 | | - |
---|
739 | | - memset(&rqd, 0, sizeof(struct nvm_rq)); |
---|
740 | | - |
---|
741 | | - nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas); |
---|
742 | | - nvm_rq_tgt_to_dev(tgt_dev, &rqd); |
---|
743 | | - |
---|
744 | | - ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type); |
---|
745 | | - nvm_free_rqd_ppalist(tgt_dev, &rqd); |
---|
746 | | - if (ret) { |
---|
747 | | - pr_err("nvm: failed bb mark\n"); |
---|
748 | | - return -EINVAL; |
---|
749 | | - } |
---|
750 | | - |
---|
751 | | - return 0; |
---|
752 | | -} |
---|
753 | | -EXPORT_SYMBOL(nvm_set_tgt_bb_tbl); |
---|
754 | | - |
---|
755 | | -int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd) |
---|
| 734 | +int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd, void *buf) |
---|
756 | 735 | { |
---|
757 | 736 | struct nvm_dev *dev = tgt_dev->parent; |
---|
758 | 737 | int ret; |
---|
.. | .. |
---|
763 | 742 | nvm_rq_tgt_to_dev(tgt_dev, rqd); |
---|
764 | 743 | |
---|
765 | 744 | rqd->dev = tgt_dev; |
---|
| 745 | + rqd->flags = nvm_set_flags(&tgt_dev->geo, rqd); |
---|
766 | 746 | |
---|
767 | 747 | /* In case of error, fail with right address format */ |
---|
768 | | - ret = dev->ops->submit_io(dev, rqd); |
---|
| 748 | + ret = dev->ops->submit_io(dev, rqd, buf); |
---|
769 | 749 | if (ret) |
---|
770 | 750 | nvm_rq_dev_to_tgt(tgt_dev, rqd); |
---|
771 | 751 | return ret; |
---|
772 | 752 | } |
---|
773 | 753 | EXPORT_SYMBOL(nvm_submit_io); |
---|
774 | 754 | |
---|
775 | | -int nvm_submit_io_sync(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd) |
---|
| 755 | +static void nvm_sync_end_io(struct nvm_rq *rqd) |
---|
| 756 | +{ |
---|
| 757 | + struct completion *waiting = rqd->private; |
---|
| 758 | + |
---|
| 759 | + complete(waiting); |
---|
| 760 | +} |
---|
| 761 | + |
---|
| 762 | +static int nvm_submit_io_wait(struct nvm_dev *dev, struct nvm_rq *rqd, |
---|
| 763 | + void *buf) |
---|
| 764 | +{ |
---|
| 765 | + DECLARE_COMPLETION_ONSTACK(wait); |
---|
| 766 | + int ret = 0; |
---|
| 767 | + |
---|
| 768 | + rqd->end_io = nvm_sync_end_io; |
---|
| 769 | + rqd->private = &wait; |
---|
| 770 | + |
---|
| 771 | + ret = dev->ops->submit_io(dev, rqd, buf); |
---|
| 772 | + if (ret) |
---|
| 773 | + return ret; |
---|
| 774 | + |
---|
| 775 | + wait_for_completion_io(&wait); |
---|
| 776 | + |
---|
| 777 | + return 0; |
---|
| 778 | +} |
---|
| 779 | + |
---|
| 780 | +int nvm_submit_io_sync(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd, |
---|
| 781 | + void *buf) |
---|
776 | 782 | { |
---|
777 | 783 | struct nvm_dev *dev = tgt_dev->parent; |
---|
778 | 784 | int ret; |
---|
779 | 785 | |
---|
780 | | - if (!dev->ops->submit_io_sync) |
---|
| 786 | + if (!dev->ops->submit_io) |
---|
781 | 787 | return -ENODEV; |
---|
782 | 788 | |
---|
783 | 789 | nvm_rq_tgt_to_dev(tgt_dev, rqd); |
---|
784 | 790 | |
---|
785 | 791 | rqd->dev = tgt_dev; |
---|
| 792 | + rqd->flags = nvm_set_flags(&tgt_dev->geo, rqd); |
---|
786 | 793 | |
---|
787 | | - /* In case of error, fail with right address format */ |
---|
788 | | - ret = dev->ops->submit_io_sync(dev, rqd); |
---|
789 | | - nvm_rq_dev_to_tgt(tgt_dev, rqd); |
---|
| 794 | + ret = nvm_submit_io_wait(dev, rqd, buf); |
---|
790 | 795 | |
---|
791 | 796 | return ret; |
---|
792 | 797 | } |
---|
.. | .. |
---|
805 | 810 | } |
---|
806 | 811 | EXPORT_SYMBOL(nvm_end_io); |
---|
807 | 812 | |
---|
| 813 | +static int nvm_submit_io_sync_raw(struct nvm_dev *dev, struct nvm_rq *rqd) |
---|
| 814 | +{ |
---|
| 815 | + if (!dev->ops->submit_io) |
---|
| 816 | + return -ENODEV; |
---|
| 817 | + |
---|
| 818 | + rqd->dev = NULL; |
---|
| 819 | + rqd->flags = nvm_set_flags(&dev->geo, rqd); |
---|
| 820 | + |
---|
| 821 | + return nvm_submit_io_wait(dev, rqd, NULL); |
---|
| 822 | +} |
---|
| 823 | + |
---|
| 824 | +static int nvm_bb_chunk_sense(struct nvm_dev *dev, struct ppa_addr ppa) |
---|
| 825 | +{ |
---|
| 826 | + struct nvm_rq rqd = { NULL }; |
---|
| 827 | + struct bio bio; |
---|
| 828 | + struct bio_vec bio_vec; |
---|
| 829 | + struct page *page; |
---|
| 830 | + int ret; |
---|
| 831 | + |
---|
| 832 | + page = alloc_page(GFP_KERNEL); |
---|
| 833 | + if (!page) |
---|
| 834 | + return -ENOMEM; |
---|
| 835 | + |
---|
| 836 | + bio_init(&bio, &bio_vec, 1); |
---|
| 837 | + bio_add_page(&bio, page, PAGE_SIZE, 0); |
---|
| 838 | + bio_set_op_attrs(&bio, REQ_OP_READ, 0); |
---|
| 839 | + |
---|
| 840 | + rqd.bio = &bio; |
---|
| 841 | + rqd.opcode = NVM_OP_PREAD; |
---|
| 842 | + rqd.is_seq = 1; |
---|
| 843 | + rqd.nr_ppas = 1; |
---|
| 844 | + rqd.ppa_addr = generic_to_dev_addr(dev, ppa); |
---|
| 845 | + |
---|
| 846 | + ret = nvm_submit_io_sync_raw(dev, &rqd); |
---|
| 847 | + __free_page(page); |
---|
| 848 | + if (ret) |
---|
| 849 | + return ret; |
---|
| 850 | + |
---|
| 851 | + return rqd.error; |
---|
| 852 | +} |
---|
| 853 | + |
---|
808 | 854 | /* |
---|
809 | | - * folds a bad block list from its plane representation to its virtual |
---|
810 | | - * block representation. The fold is done in place and reduced size is |
---|
811 | | - * returned. |
---|
812 | | - * |
---|
813 | | - * If any of the planes status are bad or grown bad block, the virtual block |
---|
814 | | - * is marked bad. If not bad, the first plane state acts as the block state. |
---|
| 855 | + * Scans a 1.2 chunk first and last page to determine if its state. |
---|
| 856 | + * If the chunk is found to be open, also scan it to update the write |
---|
| 857 | + * pointer. |
---|
815 | 858 | */ |
---|
816 | | -int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks) |
---|
| 859 | +static int nvm_bb_chunk_scan(struct nvm_dev *dev, struct ppa_addr ppa, |
---|
| 860 | + struct nvm_chk_meta *meta) |
---|
817 | 861 | { |
---|
818 | 862 | struct nvm_geo *geo = &dev->geo; |
---|
819 | | - int blk, offset, pl, blktype; |
---|
| 863 | + int ret, pg, pl; |
---|
820 | 864 | |
---|
821 | | - if (nr_blks != geo->num_chk * geo->pln_mode) |
---|
822 | | - return -EINVAL; |
---|
| 865 | + /* sense first page */ |
---|
| 866 | + ret = nvm_bb_chunk_sense(dev, ppa); |
---|
| 867 | + if (ret < 0) /* io error */ |
---|
| 868 | + return ret; |
---|
| 869 | + else if (ret == 0) /* valid data */ |
---|
| 870 | + meta->state = NVM_CHK_ST_OPEN; |
---|
| 871 | + else if (ret > 0) { |
---|
| 872 | + /* |
---|
| 873 | + * If empty page, the chunk is free, else it is an |
---|
| 874 | + * actual io error. In that case, mark it offline. |
---|
| 875 | + */ |
---|
| 876 | + switch (ret) { |
---|
| 877 | + case NVM_RSP_ERR_EMPTYPAGE: |
---|
| 878 | + meta->state = NVM_CHK_ST_FREE; |
---|
| 879 | + return 0; |
---|
| 880 | + case NVM_RSP_ERR_FAILCRC: |
---|
| 881 | + case NVM_RSP_ERR_FAILECC: |
---|
| 882 | + case NVM_RSP_WARN_HIGHECC: |
---|
| 883 | + meta->state = NVM_CHK_ST_OPEN; |
---|
| 884 | + goto scan; |
---|
| 885 | + default: |
---|
| 886 | + return -ret; /* other io error */ |
---|
| 887 | + } |
---|
| 888 | + } |
---|
| 889 | + |
---|
| 890 | + /* sense last page */ |
---|
| 891 | + ppa.g.pg = geo->num_pg - 1; |
---|
| 892 | + ppa.g.pl = geo->num_pln - 1; |
---|
| 893 | + |
---|
| 894 | + ret = nvm_bb_chunk_sense(dev, ppa); |
---|
| 895 | + if (ret < 0) /* io error */ |
---|
| 896 | + return ret; |
---|
| 897 | + else if (ret == 0) { /* Chunk fully written */ |
---|
| 898 | + meta->state = NVM_CHK_ST_CLOSED; |
---|
| 899 | + meta->wp = geo->clba; |
---|
| 900 | + return 0; |
---|
| 901 | + } else if (ret > 0) { |
---|
| 902 | + switch (ret) { |
---|
| 903 | + case NVM_RSP_ERR_EMPTYPAGE: |
---|
| 904 | + case NVM_RSP_ERR_FAILCRC: |
---|
| 905 | + case NVM_RSP_ERR_FAILECC: |
---|
| 906 | + case NVM_RSP_WARN_HIGHECC: |
---|
| 907 | + meta->state = NVM_CHK_ST_OPEN; |
---|
| 908 | + break; |
---|
| 909 | + default: |
---|
| 910 | + return -ret; /* other io error */ |
---|
| 911 | + } |
---|
| 912 | + } |
---|
| 913 | + |
---|
| 914 | +scan: |
---|
| 915 | + /* |
---|
| 916 | + * chunk is open, we scan sequentially to update the write pointer. |
---|
| 917 | + * We make the assumption that targets write data across all planes |
---|
| 918 | + * before moving to the next page. |
---|
| 919 | + */ |
---|
| 920 | + for (pg = 0; pg < geo->num_pg; pg++) { |
---|
| 921 | + for (pl = 0; pl < geo->num_pln; pl++) { |
---|
| 922 | + ppa.g.pg = pg; |
---|
| 923 | + ppa.g.pl = pl; |
---|
| 924 | + |
---|
| 925 | + ret = nvm_bb_chunk_sense(dev, ppa); |
---|
| 926 | + if (ret < 0) /* io error */ |
---|
| 927 | + return ret; |
---|
| 928 | + else if (ret == 0) { |
---|
| 929 | + meta->wp += geo->ws_min; |
---|
| 930 | + } else if (ret > 0) { |
---|
| 931 | + switch (ret) { |
---|
| 932 | + case NVM_RSP_ERR_EMPTYPAGE: |
---|
| 933 | + return 0; |
---|
| 934 | + case NVM_RSP_ERR_FAILCRC: |
---|
| 935 | + case NVM_RSP_ERR_FAILECC: |
---|
| 936 | + case NVM_RSP_WARN_HIGHECC: |
---|
| 937 | + meta->wp += geo->ws_min; |
---|
| 938 | + break; |
---|
| 939 | + default: |
---|
| 940 | + return -ret; /* other io error */ |
---|
| 941 | + } |
---|
| 942 | + } |
---|
| 943 | + } |
---|
| 944 | + } |
---|
| 945 | + |
---|
| 946 | + return 0; |
---|
| 947 | +} |
---|
| 948 | + |
---|
| 949 | +/* |
---|
| 950 | + * folds a bad block list from its plane representation to its |
---|
| 951 | + * chunk representation. |
---|
| 952 | + * |
---|
| 953 | + * If any of the planes status are bad or grown bad, the chunk is marked |
---|
| 954 | + * offline. If not bad, the first plane state acts as the chunk state. |
---|
| 955 | + */ |
---|
| 956 | +static int nvm_bb_to_chunk(struct nvm_dev *dev, struct ppa_addr ppa, |
---|
| 957 | + u8 *blks, int nr_blks, struct nvm_chk_meta *meta) |
---|
| 958 | +{ |
---|
| 959 | + struct nvm_geo *geo = &dev->geo; |
---|
| 960 | + int ret, blk, pl, offset, blktype; |
---|
823 | 961 | |
---|
824 | 962 | for (blk = 0; blk < geo->num_chk; blk++) { |
---|
825 | 963 | offset = blk * geo->pln_mode; |
---|
826 | 964 | blktype = blks[offset]; |
---|
827 | 965 | |
---|
828 | | - /* Bad blocks on any planes take precedence over other types */ |
---|
829 | 966 | for (pl = 0; pl < geo->pln_mode; pl++) { |
---|
830 | 967 | if (blks[offset + pl] & |
---|
831 | 968 | (NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) { |
---|
.. | .. |
---|
834 | 971 | } |
---|
835 | 972 | } |
---|
836 | 973 | |
---|
837 | | - blks[blk] = blktype; |
---|
| 974 | + ppa.g.blk = blk; |
---|
| 975 | + |
---|
| 976 | + meta->wp = 0; |
---|
| 977 | + meta->type = NVM_CHK_TP_W_SEQ; |
---|
| 978 | + meta->wi = 0; |
---|
| 979 | + meta->slba = generic_to_dev_addr(dev, ppa).ppa; |
---|
| 980 | + meta->cnlb = dev->geo.clba; |
---|
| 981 | + |
---|
| 982 | + if (blktype == NVM_BLK_T_FREE) { |
---|
| 983 | + ret = nvm_bb_chunk_scan(dev, ppa, meta); |
---|
| 984 | + if (ret) |
---|
| 985 | + return ret; |
---|
| 986 | + } else { |
---|
| 987 | + meta->state = NVM_CHK_ST_OFFLINE; |
---|
| 988 | + } |
---|
| 989 | + |
---|
| 990 | + meta++; |
---|
838 | 991 | } |
---|
839 | 992 | |
---|
840 | | - return geo->num_chk; |
---|
| 993 | + return 0; |
---|
841 | 994 | } |
---|
842 | | -EXPORT_SYMBOL(nvm_bb_tbl_fold); |
---|
843 | 995 | |
---|
844 | | -int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr ppa, |
---|
845 | | - u8 *blks) |
---|
| 996 | +static int nvm_get_bb_meta(struct nvm_dev *dev, sector_t slba, |
---|
| 997 | + int nchks, struct nvm_chk_meta *meta) |
---|
| 998 | +{ |
---|
| 999 | + struct nvm_geo *geo = &dev->geo; |
---|
| 1000 | + struct ppa_addr ppa; |
---|
| 1001 | + u8 *blks; |
---|
| 1002 | + int ch, lun, nr_blks; |
---|
| 1003 | + int ret = 0; |
---|
| 1004 | + |
---|
| 1005 | + ppa.ppa = slba; |
---|
| 1006 | + ppa = dev_to_generic_addr(dev, ppa); |
---|
| 1007 | + |
---|
| 1008 | + if (ppa.g.blk != 0) |
---|
| 1009 | + return -EINVAL; |
---|
| 1010 | + |
---|
| 1011 | + if ((nchks % geo->num_chk) != 0) |
---|
| 1012 | + return -EINVAL; |
---|
| 1013 | + |
---|
| 1014 | + nr_blks = geo->num_chk * geo->pln_mode; |
---|
| 1015 | + |
---|
| 1016 | + blks = kmalloc(nr_blks, GFP_KERNEL); |
---|
| 1017 | + if (!blks) |
---|
| 1018 | + return -ENOMEM; |
---|
| 1019 | + |
---|
| 1020 | + for (ch = ppa.g.ch; ch < geo->num_ch; ch++) { |
---|
| 1021 | + for (lun = ppa.g.lun; lun < geo->num_lun; lun++) { |
---|
| 1022 | + struct ppa_addr ppa_gen, ppa_dev; |
---|
| 1023 | + |
---|
| 1024 | + if (!nchks) |
---|
| 1025 | + goto done; |
---|
| 1026 | + |
---|
| 1027 | + ppa_gen.ppa = 0; |
---|
| 1028 | + ppa_gen.g.ch = ch; |
---|
| 1029 | + ppa_gen.g.lun = lun; |
---|
| 1030 | + ppa_dev = generic_to_dev_addr(dev, ppa_gen); |
---|
| 1031 | + |
---|
| 1032 | + ret = dev->ops->get_bb_tbl(dev, ppa_dev, blks); |
---|
| 1033 | + if (ret) |
---|
| 1034 | + goto done; |
---|
| 1035 | + |
---|
| 1036 | + ret = nvm_bb_to_chunk(dev, ppa_gen, blks, nr_blks, |
---|
| 1037 | + meta); |
---|
| 1038 | + if (ret) |
---|
| 1039 | + goto done; |
---|
| 1040 | + |
---|
| 1041 | + meta += geo->num_chk; |
---|
| 1042 | + nchks -= geo->num_chk; |
---|
| 1043 | + } |
---|
| 1044 | + } |
---|
| 1045 | +done: |
---|
| 1046 | + kfree(blks); |
---|
| 1047 | + return ret; |
---|
| 1048 | +} |
---|
| 1049 | + |
---|
| 1050 | +int nvm_get_chunk_meta(struct nvm_tgt_dev *tgt_dev, struct ppa_addr ppa, |
---|
| 1051 | + int nchks, struct nvm_chk_meta *meta) |
---|
846 | 1052 | { |
---|
847 | 1053 | struct nvm_dev *dev = tgt_dev->parent; |
---|
848 | 1054 | |
---|
849 | 1055 | nvm_ppa_tgt_to_dev(tgt_dev, &ppa, 1); |
---|
850 | 1056 | |
---|
851 | | - return dev->ops->get_bb_tbl(dev, ppa, blks); |
---|
| 1057 | + if (dev->geo.version == NVM_OCSSD_SPEC_12) |
---|
| 1058 | + return nvm_get_bb_meta(dev, (sector_t)ppa.ppa, nchks, meta); |
---|
| 1059 | + |
---|
| 1060 | + return dev->ops->get_chk_meta(dev, (sector_t)ppa.ppa, nchks, meta); |
---|
852 | 1061 | } |
---|
853 | | -EXPORT_SYMBOL(nvm_get_tgt_bb_tbl); |
---|
| 1062 | +EXPORT_SYMBOL_GPL(nvm_get_chunk_meta); |
---|
| 1063 | + |
---|
| 1064 | +int nvm_set_chunk_meta(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas, |
---|
| 1065 | + int nr_ppas, int type) |
---|
| 1066 | +{ |
---|
| 1067 | + struct nvm_dev *dev = tgt_dev->parent; |
---|
| 1068 | + struct nvm_rq rqd; |
---|
| 1069 | + int ret; |
---|
| 1070 | + |
---|
| 1071 | + if (dev->geo.version == NVM_OCSSD_SPEC_20) |
---|
| 1072 | + return 0; |
---|
| 1073 | + |
---|
| 1074 | + if (nr_ppas > NVM_MAX_VLBA) { |
---|
| 1075 | + pr_err("unable to update all blocks atomically\n"); |
---|
| 1076 | + return -EINVAL; |
---|
| 1077 | + } |
---|
| 1078 | + |
---|
| 1079 | + memset(&rqd, 0, sizeof(struct nvm_rq)); |
---|
| 1080 | + |
---|
| 1081 | + nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas); |
---|
| 1082 | + nvm_rq_tgt_to_dev(tgt_dev, &rqd); |
---|
| 1083 | + |
---|
| 1084 | + ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type); |
---|
| 1085 | + nvm_free_rqd_ppalist(tgt_dev, &rqd); |
---|
| 1086 | + if (ret) |
---|
| 1087 | + return -EINVAL; |
---|
| 1088 | + |
---|
| 1089 | + return 0; |
---|
| 1090 | +} |
---|
| 1091 | +EXPORT_SYMBOL_GPL(nvm_set_chunk_meta); |
---|
854 | 1092 | |
---|
855 | 1093 | static int nvm_core_init(struct nvm_dev *dev) |
---|
856 | 1094 | { |
---|
.. | .. |
---|
877 | 1115 | return ret; |
---|
878 | 1116 | } |
---|
879 | 1117 | |
---|
880 | | -static void nvm_free(struct nvm_dev *dev) |
---|
| 1118 | +static void nvm_free(struct kref *ref) |
---|
881 | 1119 | { |
---|
882 | | - if (!dev) |
---|
883 | | - return; |
---|
| 1120 | + struct nvm_dev *dev = container_of(ref, struct nvm_dev, ref); |
---|
884 | 1121 | |
---|
885 | 1122 | if (dev->dma_pool) |
---|
886 | 1123 | dev->ops->destroy_dma_pool(dev->dma_pool); |
---|
887 | 1124 | |
---|
888 | | - nvm_unregister_map(dev); |
---|
| 1125 | + if (dev->rmap) |
---|
| 1126 | + nvm_unregister_map(dev); |
---|
| 1127 | + |
---|
889 | 1128 | kfree(dev->lun_map); |
---|
890 | 1129 | kfree(dev); |
---|
891 | 1130 | } |
---|
.. | .. |
---|
896 | 1135 | int ret = -EINVAL; |
---|
897 | 1136 | |
---|
898 | 1137 | if (dev->ops->identity(dev)) { |
---|
899 | | - pr_err("nvm: device could not be identified\n"); |
---|
| 1138 | + pr_err("device could not be identified\n"); |
---|
900 | 1139 | goto err; |
---|
901 | 1140 | } |
---|
902 | 1141 | |
---|
903 | | - pr_debug("nvm: ver:%u.%u nvm_vendor:%x\n", |
---|
904 | | - geo->major_ver_id, geo->minor_ver_id, |
---|
905 | | - geo->vmnt); |
---|
| 1142 | + pr_debug("ver:%u.%u nvm_vendor:%x\n", geo->major_ver_id, |
---|
| 1143 | + geo->minor_ver_id, geo->vmnt); |
---|
906 | 1144 | |
---|
907 | 1145 | ret = nvm_core_init(dev); |
---|
908 | 1146 | if (ret) { |
---|
909 | | - pr_err("nvm: could not initialize core structures.\n"); |
---|
| 1147 | + pr_err("could not initialize core structures.\n"); |
---|
910 | 1148 | goto err; |
---|
911 | 1149 | } |
---|
912 | 1150 | |
---|
913 | | - pr_info("nvm: registered %s [%u/%u/%u/%u/%u]\n", |
---|
| 1151 | + pr_info("registered %s [%u/%u/%u/%u/%u]\n", |
---|
914 | 1152 | dev->name, dev->geo.ws_min, dev->geo.ws_opt, |
---|
915 | 1153 | dev->geo.num_chk, dev->geo.all_luns, |
---|
916 | 1154 | dev->geo.num_ch); |
---|
917 | 1155 | return 0; |
---|
918 | 1156 | err: |
---|
919 | | - pr_err("nvm: failed to initialize nvm\n"); |
---|
| 1157 | + pr_err("failed to initialize nvm\n"); |
---|
920 | 1158 | return ret; |
---|
921 | 1159 | } |
---|
922 | 1160 | |
---|
923 | 1161 | struct nvm_dev *nvm_alloc_dev(int node) |
---|
924 | 1162 | { |
---|
925 | | - return kzalloc_node(sizeof(struct nvm_dev), GFP_KERNEL, node); |
---|
| 1163 | + struct nvm_dev *dev; |
---|
| 1164 | + |
---|
| 1165 | + dev = kzalloc_node(sizeof(struct nvm_dev), GFP_KERNEL, node); |
---|
| 1166 | + if (dev) |
---|
| 1167 | + kref_init(&dev->ref); |
---|
| 1168 | + |
---|
| 1169 | + return dev; |
---|
926 | 1170 | } |
---|
927 | 1171 | EXPORT_SYMBOL(nvm_alloc_dev); |
---|
928 | 1172 | |
---|
929 | 1173 | int nvm_register(struct nvm_dev *dev) |
---|
930 | 1174 | { |
---|
931 | | - int ret; |
---|
| 1175 | + int ret, exp_pool_size; |
---|
932 | 1176 | |
---|
933 | | - if (!dev->q || !dev->ops) |
---|
| 1177 | + if (!dev->q || !dev->ops) { |
---|
| 1178 | + kref_put(&dev->ref, nvm_free); |
---|
934 | 1179 | return -EINVAL; |
---|
935 | | - |
---|
936 | | - dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist"); |
---|
937 | | - if (!dev->dma_pool) { |
---|
938 | | - pr_err("nvm: could not create dma pool\n"); |
---|
939 | | - return -ENOMEM; |
---|
940 | 1180 | } |
---|
941 | 1181 | |
---|
942 | 1182 | ret = nvm_init(dev); |
---|
943 | | - if (ret) |
---|
944 | | - goto err_init; |
---|
| 1183 | + if (ret) { |
---|
| 1184 | + kref_put(&dev->ref, nvm_free); |
---|
| 1185 | + return ret; |
---|
| 1186 | + } |
---|
| 1187 | + |
---|
| 1188 | + exp_pool_size = max_t(int, PAGE_SIZE, |
---|
| 1189 | + (NVM_MAX_VLBA * (sizeof(u64) + dev->geo.sos))); |
---|
| 1190 | + exp_pool_size = round_up(exp_pool_size, PAGE_SIZE); |
---|
| 1191 | + |
---|
| 1192 | + dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist", |
---|
| 1193 | + exp_pool_size); |
---|
| 1194 | + if (!dev->dma_pool) { |
---|
| 1195 | + pr_err("could not create dma pool\n"); |
---|
| 1196 | + kref_put(&dev->ref, nvm_free); |
---|
| 1197 | + return -ENOMEM; |
---|
| 1198 | + } |
---|
945 | 1199 | |
---|
946 | 1200 | /* register device with a supported media manager */ |
---|
947 | 1201 | down_write(&nvm_lock); |
---|
.. | .. |
---|
949 | 1203 | up_write(&nvm_lock); |
---|
950 | 1204 | |
---|
951 | 1205 | return 0; |
---|
952 | | -err_init: |
---|
953 | | - dev->ops->destroy_dma_pool(dev->dma_pool); |
---|
954 | | - return ret; |
---|
955 | 1206 | } |
---|
956 | 1207 | EXPORT_SYMBOL(nvm_register); |
---|
957 | 1208 | |
---|
.. | .. |
---|
964 | 1215 | if (t->dev->parent != dev) |
---|
965 | 1216 | continue; |
---|
966 | 1217 | __nvm_remove_target(t, false); |
---|
| 1218 | + kref_put(&dev->ref, nvm_free); |
---|
967 | 1219 | } |
---|
968 | 1220 | mutex_unlock(&dev->mlock); |
---|
969 | 1221 | |
---|
.. | .. |
---|
971 | 1223 | list_del(&dev->devices); |
---|
972 | 1224 | up_write(&nvm_lock); |
---|
973 | 1225 | |
---|
974 | | - nvm_free(dev); |
---|
| 1226 | + kref_put(&dev->ref, nvm_free); |
---|
975 | 1227 | } |
---|
976 | 1228 | EXPORT_SYMBOL(nvm_unregister); |
---|
977 | 1229 | |
---|
978 | 1230 | static int __nvm_configure_create(struct nvm_ioctl_create *create) |
---|
979 | 1231 | { |
---|
980 | 1232 | struct nvm_dev *dev; |
---|
| 1233 | + int ret; |
---|
981 | 1234 | |
---|
982 | 1235 | down_write(&nvm_lock); |
---|
983 | 1236 | dev = nvm_find_nvm_dev(create->dev); |
---|
984 | 1237 | up_write(&nvm_lock); |
---|
985 | 1238 | |
---|
986 | 1239 | if (!dev) { |
---|
987 | | - pr_err("nvm: device not found\n"); |
---|
| 1240 | + pr_err("device not found\n"); |
---|
988 | 1241 | return -EINVAL; |
---|
989 | 1242 | } |
---|
990 | 1243 | |
---|
991 | | - return nvm_create_tgt(dev, create); |
---|
| 1244 | + kref_get(&dev->ref); |
---|
| 1245 | + ret = nvm_create_tgt(dev, create); |
---|
| 1246 | + if (ret) |
---|
| 1247 | + kref_put(&dev->ref, nvm_free); |
---|
| 1248 | + |
---|
| 1249 | + return ret; |
---|
992 | 1250 | } |
---|
993 | 1251 | |
---|
994 | 1252 | static long nvm_ioctl_info(struct file *file, void __user *arg) |
---|
.. | .. |
---|
1052 | 1310 | strlcpy(info->bmname, "gennvm", sizeof(info->bmname)); |
---|
1053 | 1311 | i++; |
---|
1054 | 1312 | |
---|
1055 | | - if (i > 31) { |
---|
1056 | | - pr_err("nvm: max 31 devices can be reported.\n"); |
---|
| 1313 | + if (i >= ARRAY_SIZE(devices->info)) { |
---|
| 1314 | + pr_err("max %zd devices can be reported.\n", |
---|
| 1315 | + ARRAY_SIZE(devices->info)); |
---|
1057 | 1316 | break; |
---|
1058 | 1317 | } |
---|
1059 | 1318 | } |
---|
.. | .. |
---|
1080 | 1339 | |
---|
1081 | 1340 | if (create.conf.type == NVM_CONFIG_TYPE_EXTENDED && |
---|
1082 | 1341 | create.conf.e.rsv != 0) { |
---|
1083 | | - pr_err("nvm: reserved config field in use\n"); |
---|
| 1342 | + pr_err("reserved config field in use\n"); |
---|
1084 | 1343 | return -EINVAL; |
---|
1085 | 1344 | } |
---|
1086 | 1345 | |
---|
.. | .. |
---|
1096 | 1355 | flags &= ~NVM_TARGET_FACTORY; |
---|
1097 | 1356 | |
---|
1098 | 1357 | if (flags) { |
---|
1099 | | - pr_err("nvm: flag not supported\n"); |
---|
| 1358 | + pr_err("flag not supported\n"); |
---|
1100 | 1359 | return -EINVAL; |
---|
1101 | 1360 | } |
---|
1102 | 1361 | } |
---|
.. | .. |
---|
1107 | 1366 | static long nvm_ioctl_dev_remove(struct file *file, void __user *arg) |
---|
1108 | 1367 | { |
---|
1109 | 1368 | struct nvm_ioctl_remove remove; |
---|
1110 | | - struct nvm_dev *dev; |
---|
1111 | | - int ret = 0; |
---|
1112 | 1369 | |
---|
1113 | 1370 | if (copy_from_user(&remove, arg, sizeof(struct nvm_ioctl_remove))) |
---|
1114 | 1371 | return -EFAULT; |
---|
.. | .. |
---|
1116 | 1373 | remove.tgtname[DISK_NAME_LEN - 1] = '\0'; |
---|
1117 | 1374 | |
---|
1118 | 1375 | if (remove.flags != 0) { |
---|
1119 | | - pr_err("nvm: no flags supported\n"); |
---|
| 1376 | + pr_err("no flags supported\n"); |
---|
1120 | 1377 | return -EINVAL; |
---|
1121 | 1378 | } |
---|
1122 | 1379 | |
---|
1123 | | - list_for_each_entry(dev, &nvm_devices, devices) { |
---|
1124 | | - ret = nvm_remove_tgt(dev, &remove); |
---|
1125 | | - if (!ret) |
---|
1126 | | - break; |
---|
1127 | | - } |
---|
1128 | | - |
---|
1129 | | - return ret; |
---|
| 1380 | + return nvm_remove_tgt(&remove); |
---|
1130 | 1381 | } |
---|
1131 | 1382 | |
---|
1132 | 1383 | /* kept for compatibility reasons */ |
---|
.. | .. |
---|
1138 | 1389 | return -EFAULT; |
---|
1139 | 1390 | |
---|
1140 | 1391 | if (init.flags != 0) { |
---|
1141 | | - pr_err("nvm: no flags supported\n"); |
---|
| 1392 | + pr_err("no flags supported\n"); |
---|
1142 | 1393 | return -EINVAL; |
---|
1143 | 1394 | } |
---|
1144 | 1395 | |
---|