.. | .. |
---|
45 | 45 | |
---|
46 | 46 | static struct kmem_cache *fsync_entry_slab; |
---|
47 | 47 | |
---|
| 48 | +#ifdef CONFIG_UNICODE |
---|
| 49 | +extern struct kmem_cache *f2fs_cf_name_slab; |
---|
| 50 | +#endif |
---|
| 51 | + |
---|
48 | 52 | bool f2fs_space_for_roll_forward(struct f2fs_sb_info *sbi) |
---|
49 | 53 | { |
---|
50 | 54 | s64 nalloc = percpu_counter_sum_positive(&sbi->alloc_valid_block_count); |
---|
.. | .. |
---|
145 | 149 | f2fs_hash_filename(dir, fname); |
---|
146 | 150 | #ifdef CONFIG_UNICODE |
---|
147 | 151 | /* Case-sensitive match is fine for recovery */ |
---|
148 | | - kfree(fname->cf_name.name); |
---|
| 152 | + kmem_cache_free(f2fs_cf_name_slab, fname->cf_name.name); |
---|
149 | 153 | fname->cf_name.name = NULL; |
---|
150 | 154 | #endif |
---|
151 | 155 | } else { |
---|
.. | .. |
---|
447 | 451 | struct dnode_of_data tdn = *dn; |
---|
448 | 452 | nid_t ino, nid; |
---|
449 | 453 | struct inode *inode; |
---|
450 | | - unsigned int offset; |
---|
| 454 | + unsigned int offset, ofs_in_node, max_addrs; |
---|
451 | 455 | block_t bidx; |
---|
452 | 456 | int i; |
---|
453 | 457 | |
---|
.. | .. |
---|
458 | 462 | /* Get the previous summary */ |
---|
459 | 463 | for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { |
---|
460 | 464 | struct curseg_info *curseg = CURSEG_I(sbi, i); |
---|
| 465 | + |
---|
461 | 466 | if (curseg->segno == segno) { |
---|
462 | 467 | sum = curseg->sum_blk->entries[blkoff]; |
---|
463 | 468 | goto got_it; |
---|
.. | .. |
---|
473 | 478 | got_it: |
---|
474 | 479 | /* Use the locked dnode page and inode */ |
---|
475 | 480 | nid = le32_to_cpu(sum.nid); |
---|
| 481 | + ofs_in_node = le16_to_cpu(sum.ofs_in_node); |
---|
| 482 | + |
---|
| 483 | + max_addrs = ADDRS_PER_PAGE(dn->node_page, dn->inode); |
---|
| 484 | + if (ofs_in_node >= max_addrs) { |
---|
| 485 | + f2fs_err(sbi, "Inconsistent ofs_in_node:%u in summary, ino:%lu, nid:%u, max:%u", |
---|
| 486 | + ofs_in_node, dn->inode->i_ino, nid, max_addrs); |
---|
| 487 | + return -EFSCORRUPTED; |
---|
| 488 | + } |
---|
| 489 | + |
---|
476 | 490 | if (dn->inode->i_ino == nid) { |
---|
477 | 491 | tdn.nid = nid; |
---|
478 | 492 | if (!dn->inode_page_locked) |
---|
479 | 493 | lock_page(dn->inode_page); |
---|
480 | 494 | tdn.node_page = dn->inode_page; |
---|
481 | | - tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node); |
---|
| 495 | + tdn.ofs_in_node = ofs_in_node; |
---|
482 | 496 | goto truncate_out; |
---|
483 | 497 | } else if (dn->nid == nid) { |
---|
484 | | - tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node); |
---|
| 498 | + tdn.ofs_in_node = ofs_in_node; |
---|
485 | 499 | goto truncate_out; |
---|
486 | 500 | } |
---|
487 | 501 | |
---|
.. | .. |
---|
589 | 603 | |
---|
590 | 604 | f2fs_wait_on_page_writeback(dn.node_page, NODE, true, true); |
---|
591 | 605 | |
---|
592 | | - err = f2fs_get_node_info(sbi, dn.nid, &ni); |
---|
| 606 | + err = f2fs_get_node_info(sbi, dn.nid, &ni, false); |
---|
593 | 607 | if (err) |
---|
594 | 608 | goto err; |
---|
595 | 609 | |
---|
.. | .. |
---|
668 | 682 | DEFAULT_IO_TIMEOUT); |
---|
669 | 683 | goto retry_prev; |
---|
670 | 684 | } |
---|
| 685 | + goto err; |
---|
| 686 | + } |
---|
| 687 | + |
---|
| 688 | + if (f2fs_is_valid_blkaddr(sbi, dest, |
---|
| 689 | + DATA_GENERIC_ENHANCE_UPDATE)) { |
---|
| 690 | + f2fs_err(sbi, "Inconsistent dest blkaddr:%u, ino:%lu, ofs:%u", |
---|
| 691 | + dest, inode->i_ino, dn.ofs_in_node); |
---|
| 692 | + err = -EFSCORRUPTED; |
---|
671 | 693 | goto err; |
---|
672 | 694 | } |
---|
673 | 695 | |
---|
.. | .. |
---|
758 | 780 | f2fs_put_page(page, 1); |
---|
759 | 781 | } |
---|
760 | 782 | if (!err) |
---|
761 | | - f2fs_allocate_new_segments(sbi, NO_CHECK_TYPE); |
---|
| 783 | + f2fs_allocate_new_segments(sbi); |
---|
762 | 784 | return err; |
---|
763 | 785 | } |
---|
764 | 786 | |
---|
.. | .. |
---|
770 | 792 | int ret = 0; |
---|
771 | 793 | unsigned long s_flags = sbi->sb->s_flags; |
---|
772 | 794 | bool need_writecp = false; |
---|
| 795 | + bool fix_curseg_write_pointer = false; |
---|
773 | 796 | #ifdef CONFIG_QUOTA |
---|
774 | 797 | int quota_enabled; |
---|
775 | 798 | #endif |
---|
.. | .. |
---|
786 | 809 | quota_enabled = f2fs_enable_quota_files(sbi, s_flags & SB_RDONLY); |
---|
787 | 810 | #endif |
---|
788 | 811 | |
---|
789 | | - fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry", |
---|
790 | | - sizeof(struct fsync_inode_entry)); |
---|
791 | | - if (!fsync_entry_slab) { |
---|
792 | | - err = -ENOMEM; |
---|
793 | | - goto out; |
---|
794 | | - } |
---|
795 | | - |
---|
796 | 812 | INIT_LIST_HEAD(&inode_list); |
---|
797 | 813 | INIT_LIST_HEAD(&tmp_inode_list); |
---|
798 | 814 | INIT_LIST_HEAD(&dir_list); |
---|
799 | 815 | |
---|
800 | 816 | /* prevent checkpoint */ |
---|
801 | | - mutex_lock(&sbi->cp_mutex); |
---|
| 817 | + f2fs_down_write(&sbi->cp_global_sem); |
---|
802 | 818 | |
---|
803 | 819 | /* step #1: find fsynced inode numbers */ |
---|
804 | 820 | err = find_fsync_dnodes(sbi, &inode_list, check_only); |
---|
.. | .. |
---|
821 | 837 | sbi->sb->s_flags = s_flags; |
---|
822 | 838 | } |
---|
823 | 839 | skip: |
---|
| 840 | + fix_curseg_write_pointer = !check_only || list_empty(&inode_list); |
---|
| 841 | + |
---|
824 | 842 | destroy_fsync_dnodes(&inode_list, err); |
---|
825 | 843 | destroy_fsync_dnodes(&tmp_inode_list, err); |
---|
826 | 844 | |
---|
.. | .. |
---|
831 | 849 | if (err) { |
---|
832 | 850 | truncate_inode_pages_final(NODE_MAPPING(sbi)); |
---|
833 | 851 | truncate_inode_pages_final(META_MAPPING(sbi)); |
---|
834 | | - } else { |
---|
835 | | - clear_sbi_flag(sbi, SBI_POR_DOING); |
---|
836 | 852 | } |
---|
837 | | - mutex_unlock(&sbi->cp_mutex); |
---|
| 853 | + |
---|
| 854 | + /* |
---|
| 855 | + * If fsync data succeeds or there is no fsync data to recover, |
---|
| 856 | + * and the f2fs is not read only, check and fix zoned block devices' |
---|
| 857 | + * write pointer consistency. |
---|
| 858 | + */ |
---|
| 859 | + if (!err && fix_curseg_write_pointer && !f2fs_readonly(sbi->sb) && |
---|
| 860 | + f2fs_sb_has_blkzoned(sbi)) { |
---|
| 861 | + err = f2fs_fix_curseg_write_pointer(sbi); |
---|
| 862 | + ret = err; |
---|
| 863 | + } |
---|
| 864 | + |
---|
| 865 | + if (!err) |
---|
| 866 | + clear_sbi_flag(sbi, SBI_POR_DOING); |
---|
| 867 | + |
---|
| 868 | + f2fs_up_write(&sbi->cp_global_sem); |
---|
838 | 869 | |
---|
839 | 870 | /* let's drop all the directory inodes for clean checkpoint */ |
---|
840 | 871 | destroy_fsync_dnodes(&dir_list, err); |
---|
.. | .. |
---|
850 | 881 | } |
---|
851 | 882 | } |
---|
852 | 883 | |
---|
853 | | - kmem_cache_destroy(fsync_entry_slab); |
---|
854 | | -out: |
---|
855 | 884 | #ifdef CONFIG_QUOTA |
---|
856 | 885 | /* Turn quotas off */ |
---|
857 | 886 | if (quota_enabled) |
---|
.. | .. |
---|
859 | 888 | #endif |
---|
860 | 889 | sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */ |
---|
861 | 890 | |
---|
862 | | - return ret ? ret: err; |
---|
| 891 | + return ret ? ret : err; |
---|
| 892 | +} |
---|
| 893 | + |
---|
| 894 | +int __init f2fs_create_recovery_cache(void) |
---|
| 895 | +{ |
---|
| 896 | + fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry", |
---|
| 897 | + sizeof(struct fsync_inode_entry)); |
---|
| 898 | + if (!fsync_entry_slab) |
---|
| 899 | + return -ENOMEM; |
---|
| 900 | + return 0; |
---|
| 901 | +} |
---|
| 902 | + |
---|
| 903 | +void f2fs_destroy_recovery_cache(void) |
---|
| 904 | +{ |
---|
| 905 | + kmem_cache_destroy(fsync_entry_slab); |
---|
863 | 906 | } |
---|