.. | .. |
---|
14 | 14 | #include <linux/pagemap.h> |
---|
15 | 15 | #include <linux/writeback.h> |
---|
16 | 16 | #include <linux/uio.h> |
---|
| 17 | +#include <linux/fiemap.h> |
---|
17 | 18 | #include "nilfs.h" |
---|
18 | 19 | #include "btnode.h" |
---|
19 | 20 | #include "segment.h" |
---|
.. | .. |
---|
28 | 29 | * @cno: checkpoint number |
---|
29 | 30 | * @root: pointer on NILFS root object (mounted checkpoint) |
---|
30 | 31 | * @for_gc: inode for GC flag |
---|
| 32 | + * @for_btnc: inode for B-tree node cache flag |
---|
| 33 | + * @for_shadow: inode for shadowed page cache flag |
---|
31 | 34 | */ |
---|
32 | 35 | struct nilfs_iget_args { |
---|
33 | 36 | u64 ino; |
---|
34 | 37 | __u64 cno; |
---|
35 | 38 | struct nilfs_root *root; |
---|
36 | | - int for_gc; |
---|
| 39 | + bool for_gc; |
---|
| 40 | + bool for_btnc; |
---|
| 41 | + bool for_shadow; |
---|
37 | 42 | }; |
---|
38 | 43 | |
---|
39 | 44 | static int nilfs_iget_test(struct inode *inode, void *opaque); |
---|
.. | .. |
---|
103 | 108 | * However, the page having this block must |
---|
104 | 109 | * be locked in this case. |
---|
105 | 110 | */ |
---|
106 | | - nilfs_msg(inode->i_sb, KERN_WARNING, |
---|
107 | | - "%s (ino=%lu): a race condition while inserting a data block at offset=%llu", |
---|
108 | | - __func__, inode->i_ino, |
---|
109 | | - (unsigned long long)blkoff); |
---|
| 111 | + nilfs_warn(inode->i_sb, |
---|
| 112 | + "%s (ino=%lu): a race condition while inserting a data block at offset=%llu", |
---|
| 113 | + __func__, inode->i_ino, |
---|
| 114 | + (unsigned long long)blkoff); |
---|
110 | 115 | err = 0; |
---|
111 | 116 | } |
---|
112 | 117 | nilfs_transaction_abort(inode->i_sb); |
---|
.. | .. |
---|
145 | 150 | return mpage_readpage(page, nilfs_get_block); |
---|
146 | 151 | } |
---|
147 | 152 | |
---|
148 | | -/** |
---|
149 | | - * nilfs_readpages() - implement readpages() method of nilfs_aops {} |
---|
150 | | - * address_space_operations. |
---|
151 | | - * @file - file struct of the file to be read |
---|
152 | | - * @mapping - address_space struct used for reading multiple pages |
---|
153 | | - * @pages - the pages to be read |
---|
154 | | - * @nr_pages - number of pages to be read |
---|
155 | | - */ |
---|
156 | | -static int nilfs_readpages(struct file *file, struct address_space *mapping, |
---|
157 | | - struct list_head *pages, unsigned int nr_pages) |
---|
| 153 | +static void nilfs_readahead(struct readahead_control *rac) |
---|
158 | 154 | { |
---|
159 | | - return mpage_readpages(mapping, pages, nr_pages, nilfs_get_block); |
---|
| 155 | + mpage_readahead(rac, nilfs_get_block); |
---|
160 | 156 | } |
---|
161 | 157 | |
---|
162 | 158 | static int nilfs_writepages(struct address_space *mapping, |
---|
.. | .. |
---|
308 | 304 | .readpage = nilfs_readpage, |
---|
309 | 305 | .writepages = nilfs_writepages, |
---|
310 | 306 | .set_page_dirty = nilfs_set_page_dirty, |
---|
311 | | - .readpages = nilfs_readpages, |
---|
| 307 | + .readahead = nilfs_readahead, |
---|
312 | 308 | .write_begin = nilfs_write_begin, |
---|
313 | 309 | .write_end = nilfs_write_end, |
---|
314 | 310 | /* .releasepage = nilfs_releasepage, */ |
---|
.. | .. |
---|
322 | 318 | unsigned long ino) |
---|
323 | 319 | { |
---|
324 | 320 | struct nilfs_iget_args args = { |
---|
325 | | - .ino = ino, .root = root, .cno = 0, .for_gc = 0 |
---|
| 321 | + .ino = ino, .root = root, .cno = 0, .for_gc = false, |
---|
| 322 | + .for_btnc = false, .for_shadow = false |
---|
326 | 323 | }; |
---|
327 | 324 | |
---|
328 | 325 | return insert_inode_locked4(inode, ino, nilfs_iget_test, &args); |
---|
.. | .. |
---|
335 | 332 | struct inode *inode; |
---|
336 | 333 | struct nilfs_inode_info *ii; |
---|
337 | 334 | struct nilfs_root *root; |
---|
| 335 | + struct buffer_head *bh; |
---|
338 | 336 | int err = -ENOMEM; |
---|
339 | 337 | ino_t ino; |
---|
340 | 338 | |
---|
.. | .. |
---|
350 | 348 | ii->i_state = BIT(NILFS_I_NEW); |
---|
351 | 349 | ii->i_root = root; |
---|
352 | 350 | |
---|
353 | | - err = nilfs_ifile_create_inode(root->ifile, &ino, &ii->i_bh); |
---|
| 351 | + err = nilfs_ifile_create_inode(root->ifile, &ino, &bh); |
---|
354 | 352 | if (unlikely(err)) |
---|
355 | 353 | goto failed_ifile_create_inode; |
---|
356 | 354 | /* reference count of i_bh inherits from nilfs_mdt_read_block() */ |
---|
| 355 | + |
---|
| 356 | + if (unlikely(ino < NILFS_USER_INO)) { |
---|
| 357 | + nilfs_warn(sb, |
---|
| 358 | + "inode bitmap is inconsistent for reserved inodes"); |
---|
| 359 | + do { |
---|
| 360 | + brelse(bh); |
---|
| 361 | + err = nilfs_ifile_create_inode(root->ifile, &ino, &bh); |
---|
| 362 | + if (unlikely(err)) |
---|
| 363 | + goto failed_ifile_create_inode; |
---|
| 364 | + } while (ino < NILFS_USER_INO); |
---|
| 365 | + |
---|
| 366 | + nilfs_info(sb, "repaired inode bitmap for reserved inodes"); |
---|
| 367 | + } |
---|
| 368 | + ii->i_bh = bh; |
---|
357 | 369 | |
---|
358 | 370 | atomic64_inc(&root->inodes_count); |
---|
359 | 371 | inode_init_owner(inode, dir, mode); |
---|
.. | .. |
---|
396 | 408 | |
---|
397 | 409 | failed_after_creation: |
---|
398 | 410 | clear_nlink(inode); |
---|
399 | | - unlock_new_inode(inode); |
---|
| 411 | + if (inode->i_state & I_NEW) |
---|
| 412 | + unlock_new_inode(inode); |
---|
400 | 413 | iput(inode); /* |
---|
401 | 414 | * raw_inode will be deleted through |
---|
402 | 415 | * nilfs_evict_inode(). |
---|
.. | .. |
---|
446 | 459 | inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec); |
---|
447 | 460 | inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec); |
---|
448 | 461 | inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec); |
---|
| 462 | + if (nilfs_is_metadata_file_inode(inode) && !S_ISREG(inode->i_mode)) |
---|
| 463 | + return -EIO; /* this inode is for metadata and corrupted */ |
---|
449 | 464 | if (inode->i_nlink == 0) |
---|
450 | 465 | return -ESTALE; /* this inode is deleted */ |
---|
451 | 466 | |
---|
.. | .. |
---|
534 | 549 | return 0; |
---|
535 | 550 | |
---|
536 | 551 | ii = NILFS_I(inode); |
---|
| 552 | + if (test_bit(NILFS_I_BTNC, &ii->i_state)) { |
---|
| 553 | + if (!args->for_btnc) |
---|
| 554 | + return 0; |
---|
| 555 | + } else if (args->for_btnc) { |
---|
| 556 | + return 0; |
---|
| 557 | + } |
---|
| 558 | + if (test_bit(NILFS_I_SHADOW, &ii->i_state)) { |
---|
| 559 | + if (!args->for_shadow) |
---|
| 560 | + return 0; |
---|
| 561 | + } else if (args->for_shadow) { |
---|
| 562 | + return 0; |
---|
| 563 | + } |
---|
| 564 | + |
---|
537 | 565 | if (!test_bit(NILFS_I_GCINODE, &ii->i_state)) |
---|
538 | 566 | return !args->for_gc; |
---|
539 | 567 | |
---|
.. | .. |
---|
545 | 573 | struct nilfs_iget_args *args = opaque; |
---|
546 | 574 | |
---|
547 | 575 | inode->i_ino = args->ino; |
---|
548 | | - if (args->for_gc) { |
---|
| 576 | + NILFS_I(inode)->i_cno = args->cno; |
---|
| 577 | + NILFS_I(inode)->i_root = args->root; |
---|
| 578 | + if (args->root && args->ino == NILFS_ROOT_INO) |
---|
| 579 | + nilfs_get_root(args->root); |
---|
| 580 | + |
---|
| 581 | + if (args->for_gc) |
---|
549 | 582 | NILFS_I(inode)->i_state = BIT(NILFS_I_GCINODE); |
---|
550 | | - NILFS_I(inode)->i_cno = args->cno; |
---|
551 | | - NILFS_I(inode)->i_root = NULL; |
---|
552 | | - } else { |
---|
553 | | - if (args->root && args->ino == NILFS_ROOT_INO) |
---|
554 | | - nilfs_get_root(args->root); |
---|
555 | | - NILFS_I(inode)->i_root = args->root; |
---|
556 | | - } |
---|
| 583 | + if (args->for_btnc) |
---|
| 584 | + NILFS_I(inode)->i_state |= BIT(NILFS_I_BTNC); |
---|
| 585 | + if (args->for_shadow) |
---|
| 586 | + NILFS_I(inode)->i_state |= BIT(NILFS_I_SHADOW); |
---|
557 | 587 | return 0; |
---|
558 | 588 | } |
---|
559 | 589 | |
---|
.. | .. |
---|
561 | 591 | unsigned long ino) |
---|
562 | 592 | { |
---|
563 | 593 | struct nilfs_iget_args args = { |
---|
564 | | - .ino = ino, .root = root, .cno = 0, .for_gc = 0 |
---|
| 594 | + .ino = ino, .root = root, .cno = 0, .for_gc = false, |
---|
| 595 | + .for_btnc = false, .for_shadow = false |
---|
565 | 596 | }; |
---|
566 | 597 | |
---|
567 | 598 | return ilookup5(sb, ino, nilfs_iget_test, &args); |
---|
.. | .. |
---|
571 | 602 | unsigned long ino) |
---|
572 | 603 | { |
---|
573 | 604 | struct nilfs_iget_args args = { |
---|
574 | | - .ino = ino, .root = root, .cno = 0, .for_gc = 0 |
---|
| 605 | + .ino = ino, .root = root, .cno = 0, .for_gc = false, |
---|
| 606 | + .for_btnc = false, .for_shadow = false |
---|
575 | 607 | }; |
---|
576 | 608 | |
---|
577 | 609 | return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args); |
---|
.. | .. |
---|
602 | 634 | __u64 cno) |
---|
603 | 635 | { |
---|
604 | 636 | struct nilfs_iget_args args = { |
---|
605 | | - .ino = ino, .root = NULL, .cno = cno, .for_gc = 1 |
---|
| 637 | + .ino = ino, .root = NULL, .cno = cno, .for_gc = true, |
---|
| 638 | + .for_btnc = false, .for_shadow = false |
---|
606 | 639 | }; |
---|
607 | 640 | struct inode *inode; |
---|
608 | 641 | int err; |
---|
.. | .. |
---|
620 | 653 | } |
---|
621 | 654 | unlock_new_inode(inode); |
---|
622 | 655 | return inode; |
---|
| 656 | +} |
---|
| 657 | + |
---|
| 658 | +/** |
---|
| 659 | + * nilfs_attach_btree_node_cache - attach a B-tree node cache to the inode |
---|
| 660 | + * @inode: inode object |
---|
| 661 | + * |
---|
| 662 | + * nilfs_attach_btree_node_cache() attaches a B-tree node cache to @inode, |
---|
| 663 | + * or does nothing if the inode already has it. This function allocates |
---|
| 664 | + * an additional inode to maintain page cache of B-tree nodes one-on-one. |
---|
| 665 | + * |
---|
| 666 | + * Return Value: On success, 0 is returned. On errors, one of the following |
---|
| 667 | + * negative error code is returned. |
---|
| 668 | + * |
---|
| 669 | + * %-ENOMEM - Insufficient memory available. |
---|
| 670 | + */ |
---|
| 671 | +int nilfs_attach_btree_node_cache(struct inode *inode) |
---|
| 672 | +{ |
---|
| 673 | + struct nilfs_inode_info *ii = NILFS_I(inode); |
---|
| 674 | + struct inode *btnc_inode; |
---|
| 675 | + struct nilfs_iget_args args; |
---|
| 676 | + |
---|
| 677 | + if (ii->i_assoc_inode) |
---|
| 678 | + return 0; |
---|
| 679 | + |
---|
| 680 | + args.ino = inode->i_ino; |
---|
| 681 | + args.root = ii->i_root; |
---|
| 682 | + args.cno = ii->i_cno; |
---|
| 683 | + args.for_gc = test_bit(NILFS_I_GCINODE, &ii->i_state) != 0; |
---|
| 684 | + args.for_btnc = true; |
---|
| 685 | + args.for_shadow = test_bit(NILFS_I_SHADOW, &ii->i_state) != 0; |
---|
| 686 | + |
---|
| 687 | + btnc_inode = iget5_locked(inode->i_sb, inode->i_ino, nilfs_iget_test, |
---|
| 688 | + nilfs_iget_set, &args); |
---|
| 689 | + if (unlikely(!btnc_inode)) |
---|
| 690 | + return -ENOMEM; |
---|
| 691 | + if (btnc_inode->i_state & I_NEW) { |
---|
| 692 | + nilfs_init_btnc_inode(btnc_inode); |
---|
| 693 | + unlock_new_inode(btnc_inode); |
---|
| 694 | + } |
---|
| 695 | + NILFS_I(btnc_inode)->i_assoc_inode = inode; |
---|
| 696 | + NILFS_I(btnc_inode)->i_bmap = ii->i_bmap; |
---|
| 697 | + ii->i_assoc_inode = btnc_inode; |
---|
| 698 | + |
---|
| 699 | + return 0; |
---|
| 700 | +} |
---|
| 701 | + |
---|
| 702 | +/** |
---|
| 703 | + * nilfs_detach_btree_node_cache - detach the B-tree node cache from the inode |
---|
| 704 | + * @inode: inode object |
---|
| 705 | + * |
---|
| 706 | + * nilfs_detach_btree_node_cache() detaches the B-tree node cache and its |
---|
| 707 | + * holder inode bound to @inode, or does nothing if @inode doesn't have it. |
---|
| 708 | + */ |
---|
| 709 | +void nilfs_detach_btree_node_cache(struct inode *inode) |
---|
| 710 | +{ |
---|
| 711 | + struct nilfs_inode_info *ii = NILFS_I(inode); |
---|
| 712 | + struct inode *btnc_inode = ii->i_assoc_inode; |
---|
| 713 | + |
---|
| 714 | + if (btnc_inode) { |
---|
| 715 | + NILFS_I(btnc_inode)->i_assoc_inode = NULL; |
---|
| 716 | + ii->i_assoc_inode = NULL; |
---|
| 717 | + iput(btnc_inode); |
---|
| 718 | + } |
---|
| 719 | +} |
---|
| 720 | + |
---|
| 721 | +/** |
---|
| 722 | + * nilfs_iget_for_shadow - obtain inode for shadow mapping |
---|
| 723 | + * @inode: inode object that uses shadow mapping |
---|
| 724 | + * |
---|
| 725 | + * nilfs_iget_for_shadow() allocates a pair of inodes that holds page |
---|
| 726 | + * caches for shadow mapping. The page cache for data pages is set up |
---|
| 727 | + * in one inode and the one for b-tree node pages is set up in the |
---|
| 728 | + * other inode, which is attached to the former inode. |
---|
| 729 | + * |
---|
| 730 | + * Return Value: On success, a pointer to the inode for data pages is |
---|
| 731 | + * returned. On errors, one of the following negative error code is returned |
---|
| 732 | + * in a pointer type. |
---|
| 733 | + * |
---|
| 734 | + * %-ENOMEM - Insufficient memory available. |
---|
| 735 | + */ |
---|
| 736 | +struct inode *nilfs_iget_for_shadow(struct inode *inode) |
---|
| 737 | +{ |
---|
| 738 | + struct nilfs_iget_args args = { |
---|
| 739 | + .ino = inode->i_ino, .root = NULL, .cno = 0, .for_gc = false, |
---|
| 740 | + .for_btnc = false, .for_shadow = true |
---|
| 741 | + }; |
---|
| 742 | + struct inode *s_inode; |
---|
| 743 | + int err; |
---|
| 744 | + |
---|
| 745 | + s_inode = iget5_locked(inode->i_sb, inode->i_ino, nilfs_iget_test, |
---|
| 746 | + nilfs_iget_set, &args); |
---|
| 747 | + if (unlikely(!s_inode)) |
---|
| 748 | + return ERR_PTR(-ENOMEM); |
---|
| 749 | + if (!(s_inode->i_state & I_NEW)) |
---|
| 750 | + return inode; |
---|
| 751 | + |
---|
| 752 | + NILFS_I(s_inode)->i_flags = 0; |
---|
| 753 | + memset(NILFS_I(s_inode)->i_bmap, 0, sizeof(struct nilfs_bmap)); |
---|
| 754 | + mapping_set_gfp_mask(s_inode->i_mapping, GFP_NOFS); |
---|
| 755 | + |
---|
| 756 | + err = nilfs_attach_btree_node_cache(s_inode); |
---|
| 757 | + if (unlikely(err)) { |
---|
| 758 | + iget_failed(s_inode); |
---|
| 759 | + return ERR_PTR(err); |
---|
| 760 | + } |
---|
| 761 | + unlock_new_inode(s_inode); |
---|
| 762 | + return s_inode; |
---|
623 | 763 | } |
---|
624 | 764 | |
---|
625 | 765 | void nilfs_write_inode_common(struct inode *inode, |
---|
.. | .. |
---|
714 | 854 | goto repeat; |
---|
715 | 855 | |
---|
716 | 856 | failed: |
---|
717 | | - nilfs_msg(ii->vfs_inode.i_sb, KERN_WARNING, |
---|
718 | | - "error %d truncating bmap (ino=%lu)", ret, |
---|
719 | | - ii->vfs_inode.i_ino); |
---|
| 857 | + nilfs_warn(ii->vfs_inode.i_sb, "error %d truncating bmap (ino=%lu)", |
---|
| 858 | + ret, ii->vfs_inode.i_ino); |
---|
720 | 859 | } |
---|
721 | 860 | |
---|
722 | 861 | void nilfs_truncate(struct inode *inode) |
---|
.. | .. |
---|
770 | 909 | if (test_bit(NILFS_I_BMAP, &ii->i_state)) |
---|
771 | 910 | nilfs_bmap_clear(ii->i_bmap); |
---|
772 | 911 | |
---|
773 | | - nilfs_btnode_cache_clear(&ii->i_btnode_cache); |
---|
| 912 | + if (!test_bit(NILFS_I_BTNC, &ii->i_state)) |
---|
| 913 | + nilfs_detach_btree_node_cache(inode); |
---|
774 | 914 | |
---|
775 | 915 | if (ii->i_root && inode->i_ino == NILFS_ROOT_INO) |
---|
776 | 916 | nilfs_put_root(ii->i_root); |
---|
.. | .. |
---|
781 | 921 | struct nilfs_transaction_info ti; |
---|
782 | 922 | struct super_block *sb = inode->i_sb; |
---|
783 | 923 | struct nilfs_inode_info *ii = NILFS_I(inode); |
---|
| 924 | + struct the_nilfs *nilfs; |
---|
784 | 925 | int ret; |
---|
785 | 926 | |
---|
786 | 927 | if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) { |
---|
.. | .. |
---|
792 | 933 | nilfs_transaction_begin(sb, &ti, 0); /* never fails */ |
---|
793 | 934 | |
---|
794 | 935 | truncate_inode_pages_final(&inode->i_data); |
---|
| 936 | + |
---|
| 937 | + nilfs = sb->s_fs_info; |
---|
| 938 | + if (unlikely(sb_rdonly(sb) || !nilfs->ns_writer)) { |
---|
| 939 | + /* |
---|
| 940 | + * If this inode is about to be disposed after the file system |
---|
| 941 | + * has been degraded to read-only due to file system corruption |
---|
| 942 | + * or after the writer has been detached, do not make any |
---|
| 943 | + * changes that cause writes, just clear it. |
---|
| 944 | + * Do this check after read-locking ns_segctor_sem by |
---|
| 945 | + * nilfs_transaction_begin() in order to avoid a race with |
---|
| 946 | + * the writer detach operation. |
---|
| 947 | + */ |
---|
| 948 | + clear_inode(inode); |
---|
| 949 | + nilfs_clear_inode(inode); |
---|
| 950 | + nilfs_transaction_abort(sb); |
---|
| 951 | + return; |
---|
| 952 | + } |
---|
795 | 953 | |
---|
796 | 954 | /* TODO: some of the following operations may fail. */ |
---|
797 | 955 | nilfs_truncate_bmap(ii, 0); |
---|
.. | .. |
---|
869 | 1027 | int err; |
---|
870 | 1028 | |
---|
871 | 1029 | spin_lock(&nilfs->ns_inode_lock); |
---|
872 | | - if (ii->i_bh == NULL) { |
---|
| 1030 | + if (ii->i_bh == NULL || unlikely(!buffer_uptodate(ii->i_bh))) { |
---|
873 | 1031 | spin_unlock(&nilfs->ns_inode_lock); |
---|
874 | 1032 | err = nilfs_ifile_get_inode_block(ii->i_root->ifile, |
---|
875 | 1033 | inode->i_ino, pbh); |
---|
.. | .. |
---|
878 | 1036 | spin_lock(&nilfs->ns_inode_lock); |
---|
879 | 1037 | if (ii->i_bh == NULL) |
---|
880 | 1038 | ii->i_bh = *pbh; |
---|
881 | | - else { |
---|
| 1039 | + else if (unlikely(!buffer_uptodate(ii->i_bh))) { |
---|
| 1040 | + __brelse(ii->i_bh); |
---|
| 1041 | + ii->i_bh = *pbh; |
---|
| 1042 | + } else { |
---|
882 | 1043 | brelse(*pbh); |
---|
883 | 1044 | *pbh = ii->i_bh; |
---|
884 | 1045 | } |
---|
.. | .. |
---|
927 | 1088 | * This will happen when somebody is freeing |
---|
928 | 1089 | * this inode. |
---|
929 | 1090 | */ |
---|
930 | | - nilfs_msg(inode->i_sb, KERN_WARNING, |
---|
931 | | - "cannot set file dirty (ino=%lu): the file is being freed", |
---|
932 | | - inode->i_ino); |
---|
| 1091 | + nilfs_warn(inode->i_sb, |
---|
| 1092 | + "cannot set file dirty (ino=%lu): the file is being freed", |
---|
| 1093 | + inode->i_ino); |
---|
933 | 1094 | spin_unlock(&nilfs->ns_inode_lock); |
---|
934 | 1095 | return -EINVAL; /* |
---|
935 | 1096 | * NILFS_I_DIRTY may remain for |
---|
.. | .. |
---|
945 | 1106 | |
---|
946 | 1107 | int __nilfs_mark_inode_dirty(struct inode *inode, int flags) |
---|
947 | 1108 | { |
---|
| 1109 | + struct the_nilfs *nilfs = inode->i_sb->s_fs_info; |
---|
948 | 1110 | struct buffer_head *ibh; |
---|
949 | 1111 | int err; |
---|
950 | 1112 | |
---|
| 1113 | + /* |
---|
| 1114 | + * Do not dirty inodes after the log writer has been detached |
---|
| 1115 | + * and its nilfs_root struct has been freed. |
---|
| 1116 | + */ |
---|
| 1117 | + if (unlikely(nilfs_purging(nilfs))) |
---|
| 1118 | + return 0; |
---|
| 1119 | + |
---|
951 | 1120 | err = nilfs_load_inode_block(inode, &ibh); |
---|
952 | 1121 | if (unlikely(err)) { |
---|
953 | | - nilfs_msg(inode->i_sb, KERN_WARNING, |
---|
954 | | - "cannot mark inode dirty (ino=%lu): error %d loading inode block", |
---|
955 | | - inode->i_ino, err); |
---|
| 1122 | + nilfs_warn(inode->i_sb, |
---|
| 1123 | + "cannot mark inode dirty (ino=%lu): error %d loading inode block", |
---|
| 1124 | + inode->i_ino, err); |
---|
956 | 1125 | return err; |
---|
957 | 1126 | } |
---|
958 | 1127 | nilfs_update_inode(inode, ibh, flags); |
---|
.. | .. |
---|
978 | 1147 | struct nilfs_mdt_info *mdi = NILFS_MDT(inode); |
---|
979 | 1148 | |
---|
980 | 1149 | if (is_bad_inode(inode)) { |
---|
981 | | - nilfs_msg(inode->i_sb, KERN_WARNING, |
---|
982 | | - "tried to mark bad_inode dirty. ignored."); |
---|
| 1150 | + nilfs_warn(inode->i_sb, |
---|
| 1151 | + "tried to mark bad_inode dirty. ignored."); |
---|
983 | 1152 | dump_stack(); |
---|
984 | 1153 | return; |
---|
985 | 1154 | } |
---|
.. | .. |
---|
1005 | 1174 | unsigned int blkbits = inode->i_blkbits; |
---|
1006 | 1175 | int ret, n; |
---|
1007 | 1176 | |
---|
1008 | | - ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC); |
---|
| 1177 | + ret = fiemap_prep(inode, fieinfo, start, &len, 0); |
---|
1009 | 1178 | if (ret) |
---|
1010 | 1179 | return ret; |
---|
1011 | 1180 | |
---|