| .. | .. |
|---|
| 17 | 17 | #include "node.h" |
|---|
| 18 | 18 | #include "segment.h" |
|---|
| 19 | 19 | #include "xattr.h" |
|---|
| 20 | | -#include "trace.h" |
|---|
| 21 | 20 | #include <trace/events/f2fs.h> |
|---|
| 22 | 21 | |
|---|
| 23 | 22 | #define on_f2fs_build_free_nids(nmi) mutex_is_locked(&(nm_i)->build_lock) |
|---|
| .. | .. |
|---|
| 44 | 43 | bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type) |
|---|
| 45 | 44 | { |
|---|
| 46 | 45 | struct f2fs_nm_info *nm_i = NM_I(sbi); |
|---|
| 46 | + struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; |
|---|
| 47 | 47 | struct sysinfo val; |
|---|
| 48 | 48 | unsigned long avail_ram; |
|---|
| 49 | 49 | unsigned long mem_size = 0; |
|---|
| 50 | 50 | bool res = false; |
|---|
| 51 | + |
|---|
| 52 | + if (!nm_i) |
|---|
| 53 | + return true; |
|---|
| 51 | 54 | |
|---|
| 52 | 55 | si_meminfo(&val); |
|---|
| 53 | 56 | |
|---|
| .. | .. |
|---|
| 55 | 58 | avail_ram = val.totalram - val.totalhigh; |
|---|
| 56 | 59 | |
|---|
| 57 | 60 | /* |
|---|
| 58 | | - * give 25%, 25%, 50%, 50%, 50% memory for each components respectively |
|---|
| 61 | + * give 25%, 25%, 50%, 50%, 25%, 25% memory for each components respectively |
|---|
| 59 | 62 | */ |
|---|
| 60 | 63 | if (type == FREE_NIDS) { |
|---|
| 61 | 64 | mem_size = (nm_i->nid_cnt[FREE_NID] * |
|---|
| 62 | 65 | sizeof(struct free_nid)) >> PAGE_SHIFT; |
|---|
| 63 | 66 | res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); |
|---|
| 64 | 67 | } else if (type == NAT_ENTRIES) { |
|---|
| 65 | | - mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >> |
|---|
| 66 | | - PAGE_SHIFT; |
|---|
| 68 | + mem_size = (nm_i->nat_cnt[TOTAL_NAT] * |
|---|
| 69 | + sizeof(struct nat_entry)) >> PAGE_SHIFT; |
|---|
| 67 | 70 | res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); |
|---|
| 68 | 71 | if (excess_cached_nats(sbi)) |
|---|
| 69 | 72 | res = false; |
|---|
| .. | .. |
|---|
| 80 | 83 | sizeof(struct ino_entry); |
|---|
| 81 | 84 | mem_size >>= PAGE_SHIFT; |
|---|
| 82 | 85 | res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); |
|---|
| 83 | | - } else if (type == EXTENT_CACHE) { |
|---|
| 84 | | - mem_size = (atomic_read(&sbi->total_ext_tree) * |
|---|
| 86 | + } else if (type == READ_EXTENT_CACHE || type == AGE_EXTENT_CACHE) { |
|---|
| 87 | + enum extent_type etype = type == READ_EXTENT_CACHE ? |
|---|
| 88 | + EX_READ : EX_BLOCK_AGE; |
|---|
| 89 | + struct extent_tree_info *eti = &sbi->extent_tree[etype]; |
|---|
| 90 | + |
|---|
| 91 | + mem_size = (atomic_read(&eti->total_ext_tree) * |
|---|
| 85 | 92 | sizeof(struct extent_tree) + |
|---|
| 86 | | - atomic_read(&sbi->total_ext_node) * |
|---|
| 93 | + atomic_read(&eti->total_ext_node) * |
|---|
| 87 | 94 | sizeof(struct extent_node)) >> PAGE_SHIFT; |
|---|
| 88 | | - res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); |
|---|
| 95 | + res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); |
|---|
| 89 | 96 | } else if (type == INMEM_PAGES) { |
|---|
| 90 | 97 | /* it allows 20% / total_ram for inmemory pages */ |
|---|
| 91 | 98 | mem_size = get_pages(sbi, F2FS_INMEM_PAGES); |
|---|
| 92 | 99 | res = mem_size < (val.totalram / 5); |
|---|
| 100 | + } else if (type == DISCARD_CACHE) { |
|---|
| 101 | + mem_size = (atomic_read(&dcc->discard_cmd_cnt) * |
|---|
| 102 | + sizeof(struct discard_cmd)) >> PAGE_SHIFT; |
|---|
| 103 | + res = mem_size < (avail_ram * nm_i->ram_thresh / 100); |
|---|
| 104 | + } else if (type == COMPRESS_PAGE) { |
|---|
| 105 | +#ifdef CONFIG_F2FS_FS_COMPRESSION |
|---|
| 106 | + unsigned long free_ram = val.freeram; |
|---|
| 107 | + |
|---|
| 108 | + /* |
|---|
| 109 | + * free memory is lower than watermark or cached page count |
|---|
| 110 | + * exceed threshold, deny caching compress page. |
|---|
| 111 | + */ |
|---|
| 112 | + res = (free_ram > avail_ram * sbi->compress_watermark / 100) && |
|---|
| 113 | + (COMPRESS_MAPPING(sbi)->nrpages < |
|---|
| 114 | + free_ram * sbi->compress_percent / 100); |
|---|
| 115 | +#else |
|---|
| 116 | + res = false; |
|---|
| 117 | +#endif |
|---|
| 93 | 118 | } else { |
|---|
| 94 | 119 | if (!sbi->sb->s_bdi->wb.dirty_exceeded) |
|---|
| 95 | 120 | return true; |
|---|
| .. | .. |
|---|
| 100 | 125 | static void clear_node_page_dirty(struct page *page) |
|---|
| 101 | 126 | { |
|---|
| 102 | 127 | if (PageDirty(page)) { |
|---|
| 103 | | - f2fs_clear_radix_tree_dirty_tag(page); |
|---|
| 128 | + f2fs_clear_page_cache_dirty_tag(page); |
|---|
| 104 | 129 | clear_page_dirty_for_io(page); |
|---|
| 105 | 130 | dec_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES); |
|---|
| 106 | 131 | } |
|---|
| .. | .. |
|---|
| 109 | 134 | |
|---|
| 110 | 135 | static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid) |
|---|
| 111 | 136 | { |
|---|
| 112 | | - return f2fs_get_meta_page_nofail(sbi, current_nat_addr(sbi, nid)); |
|---|
| 137 | + return f2fs_get_meta_page_retry(sbi, current_nat_addr(sbi, nid)); |
|---|
| 113 | 138 | } |
|---|
| 114 | 139 | |
|---|
| 115 | 140 | static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid) |
|---|
| .. | .. |
|---|
| 177 | 202 | list_add_tail(&ne->list, &nm_i->nat_entries); |
|---|
| 178 | 203 | spin_unlock(&nm_i->nat_list_lock); |
|---|
| 179 | 204 | |
|---|
| 180 | | - nm_i->nat_cnt++; |
|---|
| 205 | + nm_i->nat_cnt[TOTAL_NAT]++; |
|---|
| 206 | + nm_i->nat_cnt[RECLAIMABLE_NAT]++; |
|---|
| 181 | 207 | return ne; |
|---|
| 182 | 208 | } |
|---|
| 183 | 209 | |
|---|
| .. | .. |
|---|
| 207 | 233 | static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e) |
|---|
| 208 | 234 | { |
|---|
| 209 | 235 | radix_tree_delete(&nm_i->nat_root, nat_get_nid(e)); |
|---|
| 210 | | - nm_i->nat_cnt--; |
|---|
| 236 | + nm_i->nat_cnt[TOTAL_NAT]--; |
|---|
| 237 | + nm_i->nat_cnt[RECLAIMABLE_NAT]--; |
|---|
| 211 | 238 | __free_nat_entry(e); |
|---|
| 212 | 239 | } |
|---|
| 213 | 240 | |
|---|
| .. | .. |
|---|
| 253 | 280 | if (get_nat_flag(ne, IS_DIRTY)) |
|---|
| 254 | 281 | goto refresh_list; |
|---|
| 255 | 282 | |
|---|
| 256 | | - nm_i->dirty_nat_cnt++; |
|---|
| 283 | + nm_i->nat_cnt[DIRTY_NAT]++; |
|---|
| 284 | + nm_i->nat_cnt[RECLAIMABLE_NAT]--; |
|---|
| 257 | 285 | set_nat_flag(ne, IS_DIRTY, true); |
|---|
| 258 | 286 | refresh_list: |
|---|
| 259 | 287 | spin_lock(&nm_i->nat_list_lock); |
|---|
| .. | .. |
|---|
| 273 | 301 | |
|---|
| 274 | 302 | set_nat_flag(ne, IS_DIRTY, false); |
|---|
| 275 | 303 | set->entry_cnt--; |
|---|
| 276 | | - nm_i->dirty_nat_cnt--; |
|---|
| 304 | + nm_i->nat_cnt[DIRTY_NAT]--; |
|---|
| 305 | + nm_i->nat_cnt[RECLAIMABLE_NAT]++; |
|---|
| 277 | 306 | } |
|---|
| 278 | 307 | |
|---|
| 279 | 308 | static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i, |
|---|
| .. | .. |
|---|
| 355 | 384 | struct nat_entry *e; |
|---|
| 356 | 385 | bool need = false; |
|---|
| 357 | 386 | |
|---|
| 358 | | - down_read(&nm_i->nat_tree_lock); |
|---|
| 387 | + f2fs_down_read(&nm_i->nat_tree_lock); |
|---|
| 359 | 388 | e = __lookup_nat_cache(nm_i, nid); |
|---|
| 360 | 389 | if (e) { |
|---|
| 361 | 390 | if (!get_nat_flag(e, IS_CHECKPOINTED) && |
|---|
| 362 | 391 | !get_nat_flag(e, HAS_FSYNCED_INODE)) |
|---|
| 363 | 392 | need = true; |
|---|
| 364 | 393 | } |
|---|
| 365 | | - up_read(&nm_i->nat_tree_lock); |
|---|
| 394 | + f2fs_up_read(&nm_i->nat_tree_lock); |
|---|
| 366 | 395 | return need; |
|---|
| 367 | 396 | } |
|---|
| 368 | 397 | |
|---|
| .. | .. |
|---|
| 372 | 401 | struct nat_entry *e; |
|---|
| 373 | 402 | bool is_cp = true; |
|---|
| 374 | 403 | |
|---|
| 375 | | - down_read(&nm_i->nat_tree_lock); |
|---|
| 404 | + f2fs_down_read(&nm_i->nat_tree_lock); |
|---|
| 376 | 405 | e = __lookup_nat_cache(nm_i, nid); |
|---|
| 377 | 406 | if (e && !get_nat_flag(e, IS_CHECKPOINTED)) |
|---|
| 378 | 407 | is_cp = false; |
|---|
| 379 | | - up_read(&nm_i->nat_tree_lock); |
|---|
| 408 | + f2fs_up_read(&nm_i->nat_tree_lock); |
|---|
| 380 | 409 | return is_cp; |
|---|
| 381 | 410 | } |
|---|
| 382 | 411 | |
|---|
| .. | .. |
|---|
| 386 | 415 | struct nat_entry *e; |
|---|
| 387 | 416 | bool need_update = true; |
|---|
| 388 | 417 | |
|---|
| 389 | | - down_read(&nm_i->nat_tree_lock); |
|---|
| 418 | + f2fs_down_read(&nm_i->nat_tree_lock); |
|---|
| 390 | 419 | e = __lookup_nat_cache(nm_i, ino); |
|---|
| 391 | 420 | if (e && get_nat_flag(e, HAS_LAST_FSYNC) && |
|---|
| 392 | 421 | (get_nat_flag(e, IS_CHECKPOINTED) || |
|---|
| 393 | 422 | get_nat_flag(e, HAS_FSYNCED_INODE))) |
|---|
| 394 | 423 | need_update = false; |
|---|
| 395 | | - up_read(&nm_i->nat_tree_lock); |
|---|
| 424 | + f2fs_up_read(&nm_i->nat_tree_lock); |
|---|
| 396 | 425 | return need_update; |
|---|
| 397 | 426 | } |
|---|
| 398 | 427 | |
|---|
| .. | .. |
|---|
| 403 | 432 | struct f2fs_nm_info *nm_i = NM_I(sbi); |
|---|
| 404 | 433 | struct nat_entry *new, *e; |
|---|
| 405 | 434 | |
|---|
| 435 | + /* Let's mitigate lock contention of nat_tree_lock during checkpoint */ |
|---|
| 436 | + if (f2fs_rwsem_is_locked(&sbi->cp_global_sem)) |
|---|
| 437 | + return; |
|---|
| 438 | + |
|---|
| 406 | 439 | new = __alloc_nat_entry(nid, false); |
|---|
| 407 | 440 | if (!new) |
|---|
| 408 | 441 | return; |
|---|
| 409 | 442 | |
|---|
| 410 | | - down_write(&nm_i->nat_tree_lock); |
|---|
| 443 | + f2fs_down_write(&nm_i->nat_tree_lock); |
|---|
| 411 | 444 | e = __lookup_nat_cache(nm_i, nid); |
|---|
| 412 | 445 | if (!e) |
|---|
| 413 | 446 | e = __init_nat_entry(nm_i, new, ne, false); |
|---|
| .. | .. |
|---|
| 416 | 449 | nat_get_blkaddr(e) != |
|---|
| 417 | 450 | le32_to_cpu(ne->block_addr) || |
|---|
| 418 | 451 | nat_get_version(e) != ne->version); |
|---|
| 419 | | - up_write(&nm_i->nat_tree_lock); |
|---|
| 452 | + f2fs_up_write(&nm_i->nat_tree_lock); |
|---|
| 420 | 453 | if (e != new) |
|---|
| 421 | 454 | __free_nat_entry(new); |
|---|
| 422 | 455 | } |
|---|
| .. | .. |
|---|
| 428 | 461 | struct nat_entry *e; |
|---|
| 429 | 462 | struct nat_entry *new = __alloc_nat_entry(ni->nid, true); |
|---|
| 430 | 463 | |
|---|
| 431 | | - down_write(&nm_i->nat_tree_lock); |
|---|
| 464 | + f2fs_down_write(&nm_i->nat_tree_lock); |
|---|
| 432 | 465 | e = __lookup_nat_cache(nm_i, ni->nid); |
|---|
| 433 | 466 | if (!e) { |
|---|
| 434 | 467 | e = __init_nat_entry(nm_i, new, NULL, true); |
|---|
| .. | .. |
|---|
| 459 | 492 | /* increment version no as node is removed */ |
|---|
| 460 | 493 | if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) { |
|---|
| 461 | 494 | unsigned char version = nat_get_version(e); |
|---|
| 495 | + |
|---|
| 462 | 496 | nat_set_version(e, inc_node_version(version)); |
|---|
| 463 | 497 | } |
|---|
| 464 | 498 | |
|---|
| .. | .. |
|---|
| 476 | 510 | set_nat_flag(e, HAS_FSYNCED_INODE, true); |
|---|
| 477 | 511 | set_nat_flag(e, HAS_LAST_FSYNC, fsync_done); |
|---|
| 478 | 512 | } |
|---|
| 479 | | - up_write(&nm_i->nat_tree_lock); |
|---|
| 513 | + f2fs_up_write(&nm_i->nat_tree_lock); |
|---|
| 480 | 514 | } |
|---|
| 481 | 515 | |
|---|
| 482 | 516 | int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink) |
|---|
| .. | .. |
|---|
| 484 | 518 | struct f2fs_nm_info *nm_i = NM_I(sbi); |
|---|
| 485 | 519 | int nr = nr_shrink; |
|---|
| 486 | 520 | |
|---|
| 487 | | - if (!down_write_trylock(&nm_i->nat_tree_lock)) |
|---|
| 521 | + if (!f2fs_down_write_trylock(&nm_i->nat_tree_lock)) |
|---|
| 488 | 522 | return 0; |
|---|
| 489 | 523 | |
|---|
| 490 | 524 | spin_lock(&nm_i->nat_list_lock); |
|---|
| .. | .. |
|---|
| 506 | 540 | } |
|---|
| 507 | 541 | spin_unlock(&nm_i->nat_list_lock); |
|---|
| 508 | 542 | |
|---|
| 509 | | - up_write(&nm_i->nat_tree_lock); |
|---|
| 543 | + f2fs_up_write(&nm_i->nat_tree_lock); |
|---|
| 510 | 544 | return nr - nr_shrink; |
|---|
| 511 | 545 | } |
|---|
| 512 | 546 | |
|---|
| 513 | 547 | int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid, |
|---|
| 514 | | - struct node_info *ni) |
|---|
| 548 | + struct node_info *ni, bool checkpoint_context) |
|---|
| 515 | 549 | { |
|---|
| 516 | 550 | struct f2fs_nm_info *nm_i = NM_I(sbi); |
|---|
| 517 | 551 | struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); |
|---|
| .. | .. |
|---|
| 526 | 560 | int i; |
|---|
| 527 | 561 | |
|---|
| 528 | 562 | ni->nid = nid; |
|---|
| 529 | | - |
|---|
| 563 | +retry: |
|---|
| 530 | 564 | /* Check nat cache */ |
|---|
| 531 | | - down_read(&nm_i->nat_tree_lock); |
|---|
| 565 | + f2fs_down_read(&nm_i->nat_tree_lock); |
|---|
| 532 | 566 | e = __lookup_nat_cache(nm_i, nid); |
|---|
| 533 | 567 | if (e) { |
|---|
| 534 | 568 | ni->ino = nat_get_ino(e); |
|---|
| 535 | 569 | ni->blk_addr = nat_get_blkaddr(e); |
|---|
| 536 | 570 | ni->version = nat_get_version(e); |
|---|
| 537 | | - up_read(&nm_i->nat_tree_lock); |
|---|
| 571 | + f2fs_up_read(&nm_i->nat_tree_lock); |
|---|
| 538 | 572 | return 0; |
|---|
| 539 | 573 | } |
|---|
| 540 | 574 | |
|---|
| 541 | | - memset(&ne, 0, sizeof(struct f2fs_nat_entry)); |
|---|
| 575 | + /* |
|---|
| 576 | + * Check current segment summary by trying to grab journal_rwsem first. |
|---|
| 577 | + * This sem is on the critical path on the checkpoint requiring the above |
|---|
| 578 | + * nat_tree_lock. Therefore, we should retry, if we failed to grab here |
|---|
| 579 | + * while not bothering checkpoint. |
|---|
| 580 | + */ |
|---|
| 581 | + if (!f2fs_rwsem_is_locked(&sbi->cp_global_sem) || checkpoint_context) { |
|---|
| 582 | + down_read(&curseg->journal_rwsem); |
|---|
| 583 | + } else if (f2fs_rwsem_is_contended(&nm_i->nat_tree_lock) || |
|---|
| 584 | + !down_read_trylock(&curseg->journal_rwsem)) { |
|---|
| 585 | + f2fs_up_read(&nm_i->nat_tree_lock); |
|---|
| 586 | + goto retry; |
|---|
| 587 | + } |
|---|
| 542 | 588 | |
|---|
| 543 | | - /* Check current segment summary */ |
|---|
| 544 | | - down_read(&curseg->journal_rwsem); |
|---|
| 545 | 589 | i = f2fs_lookup_journal_in_cursum(journal, NAT_JOURNAL, nid, 0); |
|---|
| 546 | 590 | if (i >= 0) { |
|---|
| 547 | 591 | ne = nat_in_journal(journal, i); |
|---|
| 548 | 592 | node_info_from_raw_nat(ni, &ne); |
|---|
| 549 | 593 | } |
|---|
| 550 | | - up_read(&curseg->journal_rwsem); |
|---|
| 594 | + up_read(&curseg->journal_rwsem); |
|---|
| 551 | 595 | if (i >= 0) { |
|---|
| 552 | | - up_read(&nm_i->nat_tree_lock); |
|---|
| 596 | + f2fs_up_read(&nm_i->nat_tree_lock); |
|---|
| 553 | 597 | goto cache; |
|---|
| 554 | 598 | } |
|---|
| 555 | 599 | |
|---|
| 556 | 600 | /* Fill node_info from nat page */ |
|---|
| 557 | 601 | index = current_nat_addr(sbi, nid); |
|---|
| 558 | | - up_read(&nm_i->nat_tree_lock); |
|---|
| 602 | + f2fs_up_read(&nm_i->nat_tree_lock); |
|---|
| 559 | 603 | |
|---|
| 560 | 604 | page = f2fs_get_meta_page(sbi, index); |
|---|
| 561 | 605 | if (IS_ERR(page)) |
|---|
| .. | .. |
|---|
| 618 | 662 | switch (dn->max_level) { |
|---|
| 619 | 663 | case 3: |
|---|
| 620 | 664 | base += 2 * indirect_blks; |
|---|
| 665 | + fallthrough; |
|---|
| 621 | 666 | case 2: |
|---|
| 622 | 667 | base += 2 * direct_blks; |
|---|
| 668 | + fallthrough; |
|---|
| 623 | 669 | case 1: |
|---|
| 624 | 670 | base += direct_index; |
|---|
| 625 | 671 | break; |
|---|
| .. | .. |
|---|
| 804 | 850 | dn->ofs_in_node = offset[level]; |
|---|
| 805 | 851 | dn->node_page = npage[level]; |
|---|
| 806 | 852 | dn->data_blkaddr = f2fs_data_blkaddr(dn); |
|---|
| 853 | + |
|---|
| 854 | + if (is_inode_flag_set(dn->inode, FI_COMPRESSED_FILE) && |
|---|
| 855 | + f2fs_sb_has_readonly(sbi)) { |
|---|
| 856 | + unsigned int c_len = f2fs_cluster_blocks_are_contiguous(dn); |
|---|
| 857 | + block_t blkaddr; |
|---|
| 858 | + |
|---|
| 859 | + if (!c_len) |
|---|
| 860 | + goto out; |
|---|
| 861 | + |
|---|
| 862 | + blkaddr = f2fs_data_blkaddr(dn); |
|---|
| 863 | + if (blkaddr == COMPRESS_ADDR) |
|---|
| 864 | + blkaddr = data_blkaddr(dn->inode, dn->node_page, |
|---|
| 865 | + dn->ofs_in_node + 1); |
|---|
| 866 | + |
|---|
| 867 | + f2fs_update_read_extent_tree_range_compressed(dn->inode, |
|---|
| 868 | + index, blkaddr, |
|---|
| 869 | + F2FS_I(dn->inode)->i_cluster_size, |
|---|
| 870 | + c_len); |
|---|
| 871 | + } |
|---|
| 872 | +out: |
|---|
| 807 | 873 | return 0; |
|---|
| 808 | 874 | |
|---|
| 809 | 875 | release_pages: |
|---|
| .. | .. |
|---|
| 828 | 894 | int err; |
|---|
| 829 | 895 | pgoff_t index; |
|---|
| 830 | 896 | |
|---|
| 831 | | - err = f2fs_get_node_info(sbi, dn->nid, &ni); |
|---|
| 897 | + err = f2fs_get_node_info(sbi, dn->nid, &ni, false); |
|---|
| 832 | 898 | if (err) |
|---|
| 833 | 899 | return err; |
|---|
| 834 | 900 | |
|---|
| .. | .. |
|---|
| 868 | 934 | |
|---|
| 869 | 935 | /* get direct node */ |
|---|
| 870 | 936 | page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid); |
|---|
| 871 | | - if (IS_ERR(page) && PTR_ERR(page) == -ENOENT) |
|---|
| 937 | + if (PTR_ERR(page) == -ENOENT) |
|---|
| 872 | 938 | return 1; |
|---|
| 873 | 939 | else if (IS_ERR(page)) |
|---|
| 874 | 940 | return PTR_ERR(page); |
|---|
| .. | .. |
|---|
| 878 | 944 | dn->ofs_in_node = 0; |
|---|
| 879 | 945 | f2fs_truncate_data_blocks(dn); |
|---|
| 880 | 946 | err = truncate_node(dn); |
|---|
| 881 | | - if (err) |
|---|
| 947 | + if (err) { |
|---|
| 948 | + f2fs_put_page(page, 1); |
|---|
| 882 | 949 | return err; |
|---|
| 950 | + } |
|---|
| 883 | 951 | |
|---|
| 884 | 952 | return 1; |
|---|
| 885 | 953 | } |
|---|
| .. | .. |
|---|
| 1039 | 1107 | trace_f2fs_truncate_inode_blocks_enter(inode, from); |
|---|
| 1040 | 1108 | |
|---|
| 1041 | 1109 | level = get_node_path(inode, from, offset, noffset); |
|---|
| 1042 | | - if (level < 0) |
|---|
| 1110 | + if (level < 0) { |
|---|
| 1111 | + trace_f2fs_truncate_inode_blocks_exit(inode, level); |
|---|
| 1043 | 1112 | return level; |
|---|
| 1113 | + } |
|---|
| 1044 | 1114 | |
|---|
| 1045 | 1115 | page = f2fs_get_node_page(sbi, inode->i_ino); |
|---|
| 1046 | 1116 | if (IS_ERR(page)) { |
|---|
| .. | .. |
|---|
| 1225 | 1295 | goto fail; |
|---|
| 1226 | 1296 | |
|---|
| 1227 | 1297 | #ifdef CONFIG_F2FS_CHECK_FS |
|---|
| 1228 | | - err = f2fs_get_node_info(sbi, dn->nid, &new_ni); |
|---|
| 1298 | + err = f2fs_get_node_info(sbi, dn->nid, &new_ni, false); |
|---|
| 1229 | 1299 | if (err) { |
|---|
| 1230 | 1300 | dec_valid_node_count(sbi, dn->inode, !ofs); |
|---|
| 1231 | 1301 | goto fail; |
|---|
| 1232 | 1302 | } |
|---|
| 1233 | | - f2fs_bug_on(sbi, new_ni.blk_addr != NULL_ADDR); |
|---|
| 1303 | + if (unlikely(new_ni.blk_addr != NULL_ADDR)) { |
|---|
| 1304 | + err = -EFSCORRUPTED; |
|---|
| 1305 | + set_sbi_flag(sbi, SBI_NEED_FSCK); |
|---|
| 1306 | + goto fail; |
|---|
| 1307 | + } |
|---|
| 1234 | 1308 | #endif |
|---|
| 1235 | 1309 | new_ni.nid = dn->nid; |
|---|
| 1236 | 1310 | new_ni.ino = dn->inode->i_ino; |
|---|
| .. | .. |
|---|
| 1287 | 1361 | return LOCKED_PAGE; |
|---|
| 1288 | 1362 | } |
|---|
| 1289 | 1363 | |
|---|
| 1290 | | - err = f2fs_get_node_info(sbi, page->index, &ni); |
|---|
| 1364 | + err = f2fs_get_node_info(sbi, page->index, &ni, false); |
|---|
| 1291 | 1365 | if (err) |
|---|
| 1292 | 1366 | return err; |
|---|
| 1293 | 1367 | |
|---|
| 1294 | | - if (unlikely(ni.blk_addr == NULL_ADDR) || |
|---|
| 1295 | | - is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN)) { |
|---|
| 1368 | + /* NEW_ADDR can be seen, after cp_error drops some dirty node pages */ |
|---|
| 1369 | + if (unlikely(ni.blk_addr == NULL_ADDR || ni.blk_addr == NEW_ADDR)) { |
|---|
| 1296 | 1370 | ClearPageUptodate(page); |
|---|
| 1297 | 1371 | return -ENOENT; |
|---|
| 1298 | 1372 | } |
|---|
| .. | .. |
|---|
| 1320 | 1394 | if (f2fs_check_nid_range(sbi, nid)) |
|---|
| 1321 | 1395 | return; |
|---|
| 1322 | 1396 | |
|---|
| 1323 | | - rcu_read_lock(); |
|---|
| 1324 | | - apage = radix_tree_lookup(&NODE_MAPPING(sbi)->i_pages, nid); |
|---|
| 1325 | | - rcu_read_unlock(); |
|---|
| 1397 | + apage = xa_load(&NODE_MAPPING(sbi)->i_pages, nid); |
|---|
| 1326 | 1398 | if (apage) |
|---|
| 1327 | 1399 | return; |
|---|
| 1328 | 1400 | |
|---|
| .. | .. |
|---|
| 1378 | 1450 | goto out_err; |
|---|
| 1379 | 1451 | } |
|---|
| 1380 | 1452 | page_hit: |
|---|
| 1381 | | - if(unlikely(nid != nid_of_node(page))) { |
|---|
| 1453 | + if (unlikely(nid != nid_of_node(page))) { |
|---|
| 1382 | 1454 | f2fs_warn(sbi, "inconsistent node block, nid:%lu, node_footer[nid:%u,ino:%u,ofs:%u,cpver:%llu,blkaddr:%u]", |
|---|
| 1383 | 1455 | nid, nid_of_node(page), ino_of_node(page), |
|---|
| 1384 | 1456 | ofs_of_node(page), cpver_of_node(page), |
|---|
| 1385 | 1457 | next_blkaddr_of_node(page)); |
|---|
| 1458 | + set_sbi_flag(sbi, SBI_NEED_FSCK); |
|---|
| 1386 | 1459 | err = -EINVAL; |
|---|
| 1387 | 1460 | out_err: |
|---|
| 1388 | 1461 | ClearPageUptodate(page); |
|---|
| .. | .. |
|---|
| 1542 | 1615 | nid = nid_of_node(page); |
|---|
| 1543 | 1616 | f2fs_bug_on(sbi, page->index != nid); |
|---|
| 1544 | 1617 | |
|---|
| 1545 | | - if (f2fs_get_node_info(sbi, nid, &ni)) |
|---|
| 1618 | + if (f2fs_get_node_info(sbi, nid, &ni, !do_balance)) |
|---|
| 1546 | 1619 | goto redirty_out; |
|---|
| 1547 | 1620 | |
|---|
| 1548 | 1621 | if (wbc->for_reclaim) { |
|---|
| 1549 | | - if (!down_read_trylock(&sbi->node_write)) |
|---|
| 1622 | + if (!f2fs_down_read_trylock(&sbi->node_write)) |
|---|
| 1550 | 1623 | goto redirty_out; |
|---|
| 1551 | 1624 | } else { |
|---|
| 1552 | | - down_read(&sbi->node_write); |
|---|
| 1625 | + f2fs_down_read(&sbi->node_write); |
|---|
| 1553 | 1626 | } |
|---|
| 1554 | 1627 | |
|---|
| 1555 | 1628 | /* This page is already truncated */ |
|---|
| 1556 | 1629 | if (unlikely(ni.blk_addr == NULL_ADDR)) { |
|---|
| 1557 | 1630 | ClearPageUptodate(page); |
|---|
| 1558 | 1631 | dec_page_count(sbi, F2FS_DIRTY_NODES); |
|---|
| 1559 | | - up_read(&sbi->node_write); |
|---|
| 1632 | + f2fs_up_read(&sbi->node_write); |
|---|
| 1560 | 1633 | unlock_page(page); |
|---|
| 1561 | 1634 | return 0; |
|---|
| 1562 | 1635 | } |
|---|
| .. | .. |
|---|
| 1564 | 1637 | if (__is_valid_data_blkaddr(ni.blk_addr) && |
|---|
| 1565 | 1638 | !f2fs_is_valid_blkaddr(sbi, ni.blk_addr, |
|---|
| 1566 | 1639 | DATA_GENERIC_ENHANCE)) { |
|---|
| 1567 | | - up_read(&sbi->node_write); |
|---|
| 1640 | + f2fs_up_read(&sbi->node_write); |
|---|
| 1568 | 1641 | goto redirty_out; |
|---|
| 1569 | 1642 | } |
|---|
| 1570 | 1643 | |
|---|
| .. | .. |
|---|
| 1585 | 1658 | f2fs_do_write_node_page(nid, &fio); |
|---|
| 1586 | 1659 | set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page)); |
|---|
| 1587 | 1660 | dec_page_count(sbi, F2FS_DIRTY_NODES); |
|---|
| 1588 | | - up_read(&sbi->node_write); |
|---|
| 1661 | + f2fs_up_read(&sbi->node_write); |
|---|
| 1589 | 1662 | |
|---|
| 1590 | 1663 | if (wbc->for_reclaim) { |
|---|
| 1591 | 1664 | f2fs_submit_merged_write_cond(sbi, NULL, page, 0, NODE); |
|---|
| .. | .. |
|---|
| 1726 | 1799 | set_dentry_mark(page, |
|---|
| 1727 | 1800 | f2fs_need_dentry_mark(sbi, ino)); |
|---|
| 1728 | 1801 | } |
|---|
| 1729 | | - /* may be written by other thread */ |
|---|
| 1802 | + /* may be written by other thread */ |
|---|
| 1730 | 1803 | if (!PageDirty(page)) |
|---|
| 1731 | 1804 | set_page_dirty(page); |
|---|
| 1732 | 1805 | } |
|---|
| .. | .. |
|---|
| 1770 | 1843 | out: |
|---|
| 1771 | 1844 | if (nwritten) |
|---|
| 1772 | 1845 | f2fs_submit_merged_write_cond(sbi, NULL, NULL, ino, NODE); |
|---|
| 1773 | | - return ret ? -EIO: 0; |
|---|
| 1846 | + return ret ? -EIO : 0; |
|---|
| 1774 | 1847 | } |
|---|
| 1775 | 1848 | |
|---|
| 1776 | 1849 | static int f2fs_match_ino(struct inode *inode, unsigned long ino, void *data) |
|---|
| .. | .. |
|---|
| 1814 | 1887 | return true; |
|---|
| 1815 | 1888 | } |
|---|
| 1816 | 1889 | |
|---|
| 1817 | | -int f2fs_flush_inline_data(struct f2fs_sb_info *sbi) |
|---|
| 1890 | +void f2fs_flush_inline_data(struct f2fs_sb_info *sbi) |
|---|
| 1818 | 1891 | { |
|---|
| 1819 | 1892 | pgoff_t index = 0; |
|---|
| 1820 | 1893 | struct pagevec pvec; |
|---|
| 1821 | 1894 | int nr_pages; |
|---|
| 1822 | | - int ret = 0; |
|---|
| 1823 | 1895 | |
|---|
| 1824 | 1896 | pagevec_init(&pvec); |
|---|
| 1825 | 1897 | |
|---|
| .. | .. |
|---|
| 1847 | 1919 | } |
|---|
| 1848 | 1920 | |
|---|
| 1849 | 1921 | /* flush inline_data, if it's async context. */ |
|---|
| 1850 | | - if (is_inline_node(page)) { |
|---|
| 1851 | | - clear_inline_node(page); |
|---|
| 1922 | + if (page_private_inline(page)) { |
|---|
| 1923 | + clear_page_private_inline(page); |
|---|
| 1852 | 1924 | unlock_page(page); |
|---|
| 1853 | 1925 | flush_inline_data(sbi, ino_of_node(page)); |
|---|
| 1854 | 1926 | continue; |
|---|
| .. | .. |
|---|
| 1858 | 1930 | pagevec_release(&pvec); |
|---|
| 1859 | 1931 | cond_resched(); |
|---|
| 1860 | 1932 | } |
|---|
| 1861 | | - return ret; |
|---|
| 1862 | 1933 | } |
|---|
| 1863 | 1934 | |
|---|
| 1864 | 1935 | int f2fs_sync_node_pages(struct f2fs_sb_info *sbi, |
|---|
| .. | .. |
|---|
| 1924 | 1995 | goto continue_unlock; |
|---|
| 1925 | 1996 | } |
|---|
| 1926 | 1997 | |
|---|
| 1927 | | - /* flush inline_data, if it's async context. */ |
|---|
| 1928 | | - if (do_balance && is_inline_node(page)) { |
|---|
| 1929 | | - clear_inline_node(page); |
|---|
| 1998 | + /* flush inline_data/inode, if it's async context. */ |
|---|
| 1999 | + if (!do_balance) |
|---|
| 2000 | + goto write_node; |
|---|
| 2001 | + |
|---|
| 2002 | + /* flush inline_data */ |
|---|
| 2003 | + if (page_private_inline(page)) { |
|---|
| 2004 | + clear_page_private_inline(page); |
|---|
| 1930 | 2005 | unlock_page(page); |
|---|
| 1931 | 2006 | flush_inline_data(sbi, ino_of_node(page)); |
|---|
| 1932 | 2007 | goto lock_node; |
|---|
| .. | .. |
|---|
| 1938 | 2013 | if (flush_dirty_inode(page)) |
|---|
| 1939 | 2014 | goto lock_node; |
|---|
| 1940 | 2015 | } |
|---|
| 1941 | | - |
|---|
| 2016 | +write_node: |
|---|
| 1942 | 2017 | f2fs_wait_on_page_writeback(page, NODE, true, true); |
|---|
| 1943 | 2018 | |
|---|
| 1944 | 2019 | if (!clear_page_dirty_for_io(page)) |
|---|
| .. | .. |
|---|
| 2046 | 2121 | |
|---|
| 2047 | 2122 | if (wbc->sync_mode == WB_SYNC_ALL) |
|---|
| 2048 | 2123 | atomic_inc(&sbi->wb_sync_req[NODE]); |
|---|
| 2049 | | - else if (atomic_read(&sbi->wb_sync_req[NODE])) |
|---|
| 2124 | + else if (atomic_read(&sbi->wb_sync_req[NODE])) { |
|---|
| 2125 | + /* to avoid potential deadlock */ |
|---|
| 2126 | + if (current->plug) |
|---|
| 2127 | + blk_finish_plug(current->plug); |
|---|
| 2050 | 2128 | goto skip_write; |
|---|
| 2129 | + } |
|---|
| 2051 | 2130 | |
|---|
| 2052 | 2131 | trace_f2fs_writepages(mapping->host, wbc, NODE); |
|---|
| 2053 | 2132 | |
|---|
| .. | .. |
|---|
| 2080 | 2159 | if (!PageDirty(page)) { |
|---|
| 2081 | 2160 | __set_page_dirty_nobuffers(page); |
|---|
| 2082 | 2161 | inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES); |
|---|
| 2083 | | - f2fs_set_page_private(page, 0); |
|---|
| 2084 | | - f2fs_trace_pid(page); |
|---|
| 2162 | + set_page_private_reference(page); |
|---|
| 2085 | 2163 | return 1; |
|---|
| 2086 | 2164 | } |
|---|
| 2087 | 2165 | return 0; |
|---|
| .. | .. |
|---|
| 2097 | 2175 | .invalidatepage = f2fs_invalidate_page, |
|---|
| 2098 | 2176 | .releasepage = f2fs_release_page, |
|---|
| 2099 | 2177 | #ifdef CONFIG_MIGRATION |
|---|
| 2100 | | - .migratepage = f2fs_migrate_page, |
|---|
| 2178 | + .migratepage = f2fs_migrate_page, |
|---|
| 2101 | 2179 | #endif |
|---|
| 2102 | 2180 | }; |
|---|
| 2103 | 2181 | |
|---|
| .. | .. |
|---|
| 2108 | 2186 | } |
|---|
| 2109 | 2187 | |
|---|
| 2110 | 2188 | static int __insert_free_nid(struct f2fs_sb_info *sbi, |
|---|
| 2111 | | - struct free_nid *i, enum nid_state state) |
|---|
| 2189 | + struct free_nid *i) |
|---|
| 2112 | 2190 | { |
|---|
| 2113 | 2191 | struct f2fs_nm_info *nm_i = NM_I(sbi); |
|---|
| 2114 | | - |
|---|
| 2115 | 2192 | int err = radix_tree_insert(&nm_i->free_nid_root, i->nid, i); |
|---|
| 2193 | + |
|---|
| 2116 | 2194 | if (err) |
|---|
| 2117 | 2195 | return err; |
|---|
| 2118 | 2196 | |
|---|
| 2119 | | - f2fs_bug_on(sbi, state != i->state); |
|---|
| 2120 | | - nm_i->nid_cnt[state]++; |
|---|
| 2121 | | - if (state == FREE_NID) |
|---|
| 2122 | | - list_add_tail(&i->list, &nm_i->free_nid_list); |
|---|
| 2197 | + nm_i->nid_cnt[FREE_NID]++; |
|---|
| 2198 | + list_add_tail(&i->list, &nm_i->free_nid_list); |
|---|
| 2123 | 2199 | return 0; |
|---|
| 2124 | 2200 | } |
|---|
| 2125 | 2201 | |
|---|
| .. | .. |
|---|
| 2241 | 2317 | } |
|---|
| 2242 | 2318 | } |
|---|
| 2243 | 2319 | ret = true; |
|---|
| 2244 | | - err = __insert_free_nid(sbi, i, FREE_NID); |
|---|
| 2320 | + err = __insert_free_nid(sbi, i); |
|---|
| 2245 | 2321 | err_out: |
|---|
| 2246 | 2322 | if (update) { |
|---|
| 2247 | 2323 | update_free_nid_bitmap(sbi, nid, ret, build); |
|---|
| .. | .. |
|---|
| 2335 | 2411 | unsigned int i, idx; |
|---|
| 2336 | 2412 | nid_t nid; |
|---|
| 2337 | 2413 | |
|---|
| 2338 | | - down_read(&nm_i->nat_tree_lock); |
|---|
| 2414 | + f2fs_down_read(&nm_i->nat_tree_lock); |
|---|
| 2339 | 2415 | |
|---|
| 2340 | 2416 | for (i = 0; i < nm_i->nat_blocks; i++) { |
|---|
| 2341 | 2417 | if (!test_bit_le(i, nm_i->nat_block_bitmap)) |
|---|
| .. | .. |
|---|
| 2358 | 2434 | out: |
|---|
| 2359 | 2435 | scan_curseg_cache(sbi); |
|---|
| 2360 | 2436 | |
|---|
| 2361 | | - up_read(&nm_i->nat_tree_lock); |
|---|
| 2437 | + f2fs_up_read(&nm_i->nat_tree_lock); |
|---|
| 2362 | 2438 | } |
|---|
| 2363 | 2439 | |
|---|
| 2364 | 2440 | static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi, |
|---|
| .. | .. |
|---|
| 2393 | 2469 | f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES, |
|---|
| 2394 | 2470 | META_NAT, true); |
|---|
| 2395 | 2471 | |
|---|
| 2396 | | - down_read(&nm_i->nat_tree_lock); |
|---|
| 2472 | + f2fs_down_read(&nm_i->nat_tree_lock); |
|---|
| 2397 | 2473 | |
|---|
| 2398 | 2474 | while (1) { |
|---|
| 2399 | 2475 | if (!test_bit_le(NAT_BLOCK_OFFSET(nid), |
|---|
| .. | .. |
|---|
| 2408 | 2484 | } |
|---|
| 2409 | 2485 | |
|---|
| 2410 | 2486 | if (ret) { |
|---|
| 2411 | | - up_read(&nm_i->nat_tree_lock); |
|---|
| 2487 | + f2fs_up_read(&nm_i->nat_tree_lock); |
|---|
| 2412 | 2488 | f2fs_err(sbi, "NAT is corrupt, run fsck to fix it"); |
|---|
| 2413 | 2489 | return ret; |
|---|
| 2414 | 2490 | } |
|---|
| .. | .. |
|---|
| 2428 | 2504 | /* find free nids from current sum_pages */ |
|---|
| 2429 | 2505 | scan_curseg_cache(sbi); |
|---|
| 2430 | 2506 | |
|---|
| 2431 | | - up_read(&nm_i->nat_tree_lock); |
|---|
| 2507 | + f2fs_up_read(&nm_i->nat_tree_lock); |
|---|
| 2432 | 2508 | |
|---|
| 2433 | 2509 | f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid), |
|---|
| 2434 | 2510 | nm_i->ra_nid_pages, META_NAT, false); |
|---|
| .. | .. |
|---|
| 2588 | 2664 | |
|---|
| 2589 | 2665 | ri = F2FS_INODE(page); |
|---|
| 2590 | 2666 | if (ri->i_inline & F2FS_INLINE_XATTR) { |
|---|
| 2591 | | - set_inode_flag(inode, FI_INLINE_XATTR); |
|---|
| 2667 | + if (!f2fs_has_inline_xattr(inode)) { |
|---|
| 2668 | + set_inode_flag(inode, FI_INLINE_XATTR); |
|---|
| 2669 | + stat_inc_inline_xattr(inode); |
|---|
| 2670 | + } |
|---|
| 2592 | 2671 | } else { |
|---|
| 2593 | | - clear_inode_flag(inode, FI_INLINE_XATTR); |
|---|
| 2672 | + if (f2fs_has_inline_xattr(inode)) { |
|---|
| 2673 | + stat_dec_inline_xattr(inode); |
|---|
| 2674 | + clear_inode_flag(inode, FI_INLINE_XATTR); |
|---|
| 2675 | + } |
|---|
| 2594 | 2676 | goto update_inode; |
|---|
| 2595 | 2677 | } |
|---|
| 2596 | 2678 | |
|---|
| .. | .. |
|---|
| 2620 | 2702 | goto recover_xnid; |
|---|
| 2621 | 2703 | |
|---|
| 2622 | 2704 | /* 1: invalidate the previous xattr nid */ |
|---|
| 2623 | | - err = f2fs_get_node_info(sbi, prev_xnid, &ni); |
|---|
| 2705 | + err = f2fs_get_node_info(sbi, prev_xnid, &ni, false); |
|---|
| 2624 | 2706 | if (err) |
|---|
| 2625 | 2707 | return err; |
|---|
| 2626 | 2708 | |
|---|
| .. | .. |
|---|
| 2660 | 2742 | struct page *ipage; |
|---|
| 2661 | 2743 | int err; |
|---|
| 2662 | 2744 | |
|---|
| 2663 | | - err = f2fs_get_node_info(sbi, ino, &old_ni); |
|---|
| 2745 | + err = f2fs_get_node_info(sbi, ino, &old_ni, false); |
|---|
| 2664 | 2746 | if (err) |
|---|
| 2665 | 2747 | return err; |
|---|
| 2666 | 2748 | |
|---|
| .. | .. |
|---|
| 2684 | 2766 | src = F2FS_INODE(page); |
|---|
| 2685 | 2767 | dst = F2FS_INODE(ipage); |
|---|
| 2686 | 2768 | |
|---|
| 2687 | | - memcpy(dst, src, (unsigned long)&src->i_ext - (unsigned long)src); |
|---|
| 2769 | + memcpy(dst, src, offsetof(struct f2fs_inode, i_ext)); |
|---|
| 2688 | 2770 | dst->i_size = 0; |
|---|
| 2689 | 2771 | dst->i_blocks = cpu_to_le64(1); |
|---|
| 2690 | 2772 | dst->i_links = cpu_to_le32(1); |
|---|
| .. | .. |
|---|
| 2945 | 3027 | LIST_HEAD(sets); |
|---|
| 2946 | 3028 | int err = 0; |
|---|
| 2947 | 3029 | |
|---|
| 2948 | | - /* during unmount, let's flush nat_bits before checking dirty_nat_cnt */ |
|---|
| 3030 | + /* |
|---|
| 3031 | + * during unmount, let's flush nat_bits before checking |
|---|
| 3032 | + * nat_cnt[DIRTY_NAT]. |
|---|
| 3033 | + */ |
|---|
| 2949 | 3034 | if (enabled_nat_bits(sbi, cpc)) { |
|---|
| 2950 | | - down_write(&nm_i->nat_tree_lock); |
|---|
| 3035 | + f2fs_down_write(&nm_i->nat_tree_lock); |
|---|
| 2951 | 3036 | remove_nats_in_journal(sbi); |
|---|
| 2952 | | - up_write(&nm_i->nat_tree_lock); |
|---|
| 3037 | + f2fs_up_write(&nm_i->nat_tree_lock); |
|---|
| 2953 | 3038 | } |
|---|
| 2954 | 3039 | |
|---|
| 2955 | | - if (!nm_i->dirty_nat_cnt) |
|---|
| 3040 | + if (!nm_i->nat_cnt[DIRTY_NAT]) |
|---|
| 2956 | 3041 | return 0; |
|---|
| 2957 | 3042 | |
|---|
| 2958 | | - down_write(&nm_i->nat_tree_lock); |
|---|
| 3043 | + f2fs_down_write(&nm_i->nat_tree_lock); |
|---|
| 2959 | 3044 | |
|---|
| 2960 | 3045 | /* |
|---|
| 2961 | 3046 | * if there are no enough space in journal to store dirty nat |
|---|
| .. | .. |
|---|
| 2963 | 3048 | * into nat entry set. |
|---|
| 2964 | 3049 | */ |
|---|
| 2965 | 3050 | if (enabled_nat_bits(sbi, cpc) || |
|---|
| 2966 | | - !__has_cursum_space(journal, nm_i->dirty_nat_cnt, NAT_JOURNAL)) |
|---|
| 3051 | + !__has_cursum_space(journal, |
|---|
| 3052 | + nm_i->nat_cnt[DIRTY_NAT], NAT_JOURNAL)) |
|---|
| 2967 | 3053 | remove_nats_in_journal(sbi); |
|---|
| 2968 | 3054 | |
|---|
| 2969 | 3055 | while ((found = __gang_lookup_nat_set(nm_i, |
|---|
| 2970 | 3056 | set_idx, SETVEC_SIZE, setvec))) { |
|---|
| 2971 | 3057 | unsigned idx; |
|---|
| 3058 | + |
|---|
| 2972 | 3059 | set_idx = setvec[found - 1]->set + 1; |
|---|
| 2973 | 3060 | for (idx = 0; idx < found; idx++) |
|---|
| 2974 | 3061 | __adjust_nat_entry_set(setvec[idx], &sets, |
|---|
| .. | .. |
|---|
| 2982 | 3069 | break; |
|---|
| 2983 | 3070 | } |
|---|
| 2984 | 3071 | |
|---|
| 2985 | | - up_write(&nm_i->nat_tree_lock); |
|---|
| 3072 | + f2fs_up_write(&nm_i->nat_tree_lock); |
|---|
| 2986 | 3073 | /* Allow dirty nats by node block allocation in write_begin */ |
|---|
| 2987 | 3074 | |
|---|
| 2988 | 3075 | return err; |
|---|
| .. | .. |
|---|
| 3087 | 3174 | F2FS_RESERVED_NODE_NUM; |
|---|
| 3088 | 3175 | nm_i->nid_cnt[FREE_NID] = 0; |
|---|
| 3089 | 3176 | nm_i->nid_cnt[PREALLOC_NID] = 0; |
|---|
| 3090 | | - nm_i->nat_cnt = 0; |
|---|
| 3091 | 3177 | nm_i->ram_thresh = DEF_RAM_THRESHOLD; |
|---|
| 3092 | 3178 | nm_i->ra_nid_pages = DEF_RA_NID_PAGES; |
|---|
| 3093 | 3179 | nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD; |
|---|
| .. | .. |
|---|
| 3101 | 3187 | |
|---|
| 3102 | 3188 | mutex_init(&nm_i->build_lock); |
|---|
| 3103 | 3189 | spin_lock_init(&nm_i->nid_list_lock); |
|---|
| 3104 | | - init_rwsem(&nm_i->nat_tree_lock); |
|---|
| 3190 | + init_f2fs_rwsem(&nm_i->nat_tree_lock); |
|---|
| 3105 | 3191 | |
|---|
| 3106 | 3192 | nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid); |
|---|
| 3107 | 3193 | nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP); |
|---|
| 3108 | 3194 | version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP); |
|---|
| 3109 | | - if (!version_bitmap) |
|---|
| 3110 | | - return -EFAULT; |
|---|
| 3111 | | - |
|---|
| 3112 | 3195 | nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size, |
|---|
| 3113 | 3196 | GFP_KERNEL); |
|---|
| 3114 | 3197 | if (!nm_i->nat_bitmap) |
|---|
| .. | .. |
|---|
| 3210 | 3293 | spin_unlock(&nm_i->nid_list_lock); |
|---|
| 3211 | 3294 | |
|---|
| 3212 | 3295 | /* destroy nat cache */ |
|---|
| 3213 | | - down_write(&nm_i->nat_tree_lock); |
|---|
| 3296 | + f2fs_down_write(&nm_i->nat_tree_lock); |
|---|
| 3214 | 3297 | while ((found = __gang_lookup_nat_cache(nm_i, |
|---|
| 3215 | 3298 | nid, NATVEC_SIZE, natvec))) { |
|---|
| 3216 | 3299 | unsigned idx; |
|---|
| .. | .. |
|---|
| 3224 | 3307 | __del_from_nat_cache(nm_i, natvec[idx]); |
|---|
| 3225 | 3308 | } |
|---|
| 3226 | 3309 | } |
|---|
| 3227 | | - f2fs_bug_on(sbi, nm_i->nat_cnt); |
|---|
| 3310 | + f2fs_bug_on(sbi, nm_i->nat_cnt[TOTAL_NAT]); |
|---|
| 3228 | 3311 | |
|---|
| 3229 | 3312 | /* destroy nat set cache */ |
|---|
| 3230 | 3313 | nid = 0; |
|---|
| .. | .. |
|---|
| 3240 | 3323 | kmem_cache_free(nat_entry_set_slab, setvec[idx]); |
|---|
| 3241 | 3324 | } |
|---|
| 3242 | 3325 | } |
|---|
| 3243 | | - up_write(&nm_i->nat_tree_lock); |
|---|
| 3326 | + f2fs_up_write(&nm_i->nat_tree_lock); |
|---|
| 3244 | 3327 | |
|---|
| 3245 | 3328 | kvfree(nm_i->nat_block_bitmap); |
|---|
| 3246 | 3329 | if (nm_i->free_nid_bitmap) { |
|---|
| .. | .. |
|---|
| 3258 | 3341 | kvfree(nm_i->nat_bitmap_mir); |
|---|
| 3259 | 3342 | #endif |
|---|
| 3260 | 3343 | sbi->nm_info = NULL; |
|---|
| 3261 | | - kvfree(nm_i); |
|---|
| 3344 | + kfree(nm_i); |
|---|
| 3262 | 3345 | } |
|---|
| 3263 | 3346 | |
|---|
| 3264 | 3347 | int __init f2fs_create_node_manager_caches(void) |
|---|