.. | .. |
---|
22 | 22 | #include <linux/bio.h> |
---|
23 | 23 | #include <linux/blkdev.h> |
---|
24 | 24 | #include <linux/quotaops.h> |
---|
| 25 | +#include <linux/part_stat.h> |
---|
25 | 26 | #include <crypto/hash.h> |
---|
26 | 27 | |
---|
27 | 28 | #include <linux/fscrypt.h> |
---|
.. | .. |
---|
32 | 33 | #else |
---|
33 | 34 | #define f2fs_bug_on(sbi, condition) \ |
---|
34 | 35 | do { \ |
---|
35 | | - if (unlikely(condition)) { \ |
---|
36 | | - WARN_ON(1); \ |
---|
| 36 | + if (WARN_ON(condition)) \ |
---|
37 | 37 | set_sbi_flag(sbi, SBI_NEED_FSCK); \ |
---|
38 | | - } \ |
---|
39 | 38 | } while (0) |
---|
40 | 39 | #endif |
---|
41 | 40 | |
---|
.. | .. |
---|
44 | 43 | FAULT_KVMALLOC, |
---|
45 | 44 | FAULT_PAGE_ALLOC, |
---|
46 | 45 | FAULT_PAGE_GET, |
---|
47 | | - FAULT_ALLOC_BIO, |
---|
48 | 46 | FAULT_ALLOC_NID, |
---|
49 | 47 | FAULT_ORPHAN, |
---|
50 | 48 | FAULT_BLOCK, |
---|
.. | .. |
---|
71 | 69 | #define IS_FAULT_SET(fi, type) ((fi)->inject_type & (1 << (type))) |
---|
72 | 70 | #endif |
---|
73 | 71 | |
---|
74 | | -#define MIN_ROOT_RESERVED_BLOCKS (128 * 1024 * 1024U) |
---|
| 72 | +#define MIN_ROOT_RESERVED_BLOCKS (128 * 1024 * 1024) |
---|
75 | 73 | |
---|
76 | 74 | /* |
---|
77 | 75 | * For mount options |
---|
.. | .. |
---|
88 | 86 | #define F2FS_MOUNT_FLUSH_MERGE 0x00000400 |
---|
89 | 87 | #define F2FS_MOUNT_NOBARRIER 0x00000800 |
---|
90 | 88 | #define F2FS_MOUNT_FASTBOOT 0x00001000 |
---|
91 | | -#define F2FS_MOUNT_EXTENT_CACHE 0x00002000 |
---|
| 89 | +#define F2FS_MOUNT_READ_EXTENT_CACHE 0x00002000 |
---|
92 | 90 | #define F2FS_MOUNT_DATA_FLUSH 0x00008000 |
---|
93 | 91 | #define F2FS_MOUNT_FAULT_INJECTION 0x00010000 |
---|
94 | 92 | #define F2FS_MOUNT_USRQUOTA 0x00080000 |
---|
.. | .. |
---|
99 | 97 | #define F2FS_MOUNT_RESERVE_ROOT 0x01000000 |
---|
100 | 98 | #define F2FS_MOUNT_DISABLE_CHECKPOINT 0x02000000 |
---|
101 | 99 | #define F2FS_MOUNT_NORECOVERY 0x04000000 |
---|
| 100 | +#define F2FS_MOUNT_ATGC 0x08000000 |
---|
| 101 | +#define F2FS_MOUNT_MERGE_CHECKPOINT 0x10000000 |
---|
| 102 | +#define F2FS_MOUNT_GC_MERGE 0x20000000 |
---|
| 103 | +#define F2FS_MOUNT_COMPRESS_CACHE 0x40000000 |
---|
| 104 | +#define F2FS_MOUNT_AGE_EXTENT_CACHE 0x80000000 |
---|
102 | 105 | |
---|
103 | 106 | #define F2FS_OPTION(sbi) ((sbi)->mount_opt) |
---|
104 | 107 | #define clear_opt(sbi, option) (F2FS_OPTION(sbi).opt &= ~F2FS_MOUNT_##option) |
---|
.. | .. |
---|
116 | 119 | typedef u32 nid_t; |
---|
117 | 120 | |
---|
118 | 121 | #define COMPRESS_EXT_NUM 16 |
---|
| 122 | + |
---|
| 123 | +/* |
---|
| 124 | + * An implementation of an rwsem that is explicitly unfair to readers. This |
---|
| 125 | + * prevents priority inversion when a low-priority reader acquires the read lock |
---|
| 126 | + * while sleeping on the write lock but the write lock is needed by |
---|
| 127 | + * higher-priority clients. |
---|
| 128 | + */ |
---|
| 129 | + |
---|
| 130 | +struct f2fs_rwsem { |
---|
| 131 | + struct rw_semaphore internal_rwsem; |
---|
| 132 | + wait_queue_head_t read_waiters; |
---|
| 133 | +}; |
---|
119 | 134 | |
---|
120 | 135 | struct f2fs_mount_info { |
---|
121 | 136 | unsigned int opt; |
---|
.. | .. |
---|
139 | 154 | int fsync_mode; /* fsync policy */ |
---|
140 | 155 | int fs_mode; /* fs mode: LFS or ADAPTIVE */ |
---|
141 | 156 | int bggc_mode; /* bggc mode: off, on or sync */ |
---|
142 | | - struct fscrypt_dummy_context dummy_enc_ctx; /* test dummy encryption */ |
---|
143 | | -#ifdef CONFIG_FS_ENCRYPTION |
---|
144 | | - bool inlinecrypt; /* inline encryption enabled */ |
---|
145 | | -#endif |
---|
| 157 | + int memory_mode; /* memory mode */ |
---|
| 158 | + struct fscrypt_dummy_policy dummy_enc_policy; /* test dummy encryption */ |
---|
146 | 159 | block_t unusable_cap_perc; /* percentage for cap */ |
---|
147 | 160 | block_t unusable_cap; /* Amount of space allowed to be |
---|
148 | 161 | * unusable when disabling checkpoint |
---|
.. | .. |
---|
150 | 163 | |
---|
151 | 164 | /* For compression */ |
---|
152 | 165 | unsigned char compress_algorithm; /* algorithm type */ |
---|
153 | | - unsigned compress_log_size; /* cluster log size */ |
---|
| 166 | + unsigned char compress_log_size; /* cluster log size */ |
---|
| 167 | + unsigned char compress_level; /* compress level */ |
---|
| 168 | + bool compress_chksum; /* compressed data chksum */ |
---|
154 | 169 | unsigned char compress_ext_cnt; /* extension count */ |
---|
| 170 | + int compress_mode; /* compression mode */ |
---|
155 | 171 | unsigned char extensions[COMPRESS_EXT_NUM][F2FS_EXTENSION_LEN]; /* extensions */ |
---|
156 | 172 | }; |
---|
157 | 173 | |
---|
.. | .. |
---|
169 | 185 | #define F2FS_FEATURE_SB_CHKSUM 0x0800 |
---|
170 | 186 | #define F2FS_FEATURE_CASEFOLD 0x1000 |
---|
171 | 187 | #define F2FS_FEATURE_COMPRESSION 0x2000 |
---|
| 188 | +#define F2FS_FEATURE_RO 0x4000 |
---|
172 | 189 | |
---|
173 | 190 | #define __F2FS_HAS_FEATURE(raw_super, mask) \ |
---|
174 | 191 | ((raw_super->feature & cpu_to_le32(mask)) != 0) |
---|
.. | .. |
---|
238 | 255 | * condition of read on truncated area |
---|
239 | 256 | * by extent_cache |
---|
240 | 257 | */ |
---|
| 258 | + DATA_GENERIC_ENHANCE_UPDATE, /* |
---|
| 259 | + * strong check on range and segment |
---|
| 260 | + * bitmap for update case |
---|
| 261 | + */ |
---|
241 | 262 | META_GENERIC, |
---|
242 | 263 | }; |
---|
243 | 264 | |
---|
.. | .. |
---|
267 | 288 | struct list_head list; /* list head */ |
---|
268 | 289 | struct page *page; /* warm node page pointer */ |
---|
269 | 290 | unsigned int seq_id; /* sequence id */ |
---|
| 291 | +}; |
---|
| 292 | + |
---|
| 293 | +struct ckpt_req { |
---|
| 294 | + struct completion wait; /* completion for checkpoint done */ |
---|
| 295 | + struct llist_node llnode; /* llist_node to be linked in wait queue */ |
---|
| 296 | + int ret; /* return code of checkpoint */ |
---|
| 297 | + ktime_t queue_time; /* request queued time */ |
---|
| 298 | +}; |
---|
| 299 | + |
---|
| 300 | +struct ckpt_req_control { |
---|
| 301 | + struct task_struct *f2fs_issue_ckpt; /* checkpoint task */ |
---|
| 302 | + int ckpt_thread_ioprio; /* checkpoint merge thread ioprio */ |
---|
| 303 | + wait_queue_head_t ckpt_wait_queue; /* waiting queue for wake-up */ |
---|
| 304 | + atomic_t issued_ckpt; /* # of actually issued ckpts */ |
---|
| 305 | + atomic_t total_ckpt; /* # of total ckpts */ |
---|
| 306 | + atomic_t queued_ckpt; /* # of queued ckpts */ |
---|
| 307 | + struct llist_head issue_list; /* list for command issue */ |
---|
| 308 | + spinlock_t stat_lock; /* lock for below checkpoint time stats */ |
---|
| 309 | + unsigned int cur_time; /* cur wait time in msec for currently issued checkpoint */ |
---|
| 310 | + unsigned int peak_time; /* peak wait time in msec until now */ |
---|
270 | 311 | }; |
---|
271 | 312 | |
---|
272 | 313 | /* for the bitmap indicate blocks to be discarded */ |
---|
.. | .. |
---|
405 | 446 | return size <= MAX_SIT_JENTRIES(journal); |
---|
406 | 447 | } |
---|
407 | 448 | |
---|
408 | | -/* |
---|
409 | | - * ioctl commands |
---|
410 | | - */ |
---|
411 | | -#define F2FS_IOC_GETFLAGS FS_IOC_GETFLAGS |
---|
412 | | -#define F2FS_IOC_SETFLAGS FS_IOC_SETFLAGS |
---|
413 | | -#define F2FS_IOC_GETVERSION FS_IOC_GETVERSION |
---|
414 | | - |
---|
415 | | -#define F2FS_IOCTL_MAGIC 0xf5 |
---|
416 | | -#define F2FS_IOC_START_ATOMIC_WRITE _IO(F2FS_IOCTL_MAGIC, 1) |
---|
417 | | -#define F2FS_IOC_COMMIT_ATOMIC_WRITE _IO(F2FS_IOCTL_MAGIC, 2) |
---|
418 | | -#define F2FS_IOC_START_VOLATILE_WRITE _IO(F2FS_IOCTL_MAGIC, 3) |
---|
419 | | -#define F2FS_IOC_RELEASE_VOLATILE_WRITE _IO(F2FS_IOCTL_MAGIC, 4) |
---|
420 | | -#define F2FS_IOC_ABORT_VOLATILE_WRITE _IO(F2FS_IOCTL_MAGIC, 5) |
---|
421 | | -#define F2FS_IOC_GARBAGE_COLLECT _IOW(F2FS_IOCTL_MAGIC, 6, __u32) |
---|
422 | | -#define F2FS_IOC_WRITE_CHECKPOINT _IO(F2FS_IOCTL_MAGIC, 7) |
---|
423 | | -#define F2FS_IOC_DEFRAGMENT _IOWR(F2FS_IOCTL_MAGIC, 8, \ |
---|
424 | | - struct f2fs_defragment) |
---|
425 | | -#define F2FS_IOC_MOVE_RANGE _IOWR(F2FS_IOCTL_MAGIC, 9, \ |
---|
426 | | - struct f2fs_move_range) |
---|
427 | | -#define F2FS_IOC_FLUSH_DEVICE _IOW(F2FS_IOCTL_MAGIC, 10, \ |
---|
428 | | - struct f2fs_flush_device) |
---|
429 | | -#define F2FS_IOC_GARBAGE_COLLECT_RANGE _IOW(F2FS_IOCTL_MAGIC, 11, \ |
---|
430 | | - struct f2fs_gc_range) |
---|
431 | | -#define F2FS_IOC_GET_FEATURES _IOR(F2FS_IOCTL_MAGIC, 12, __u32) |
---|
432 | | -#define F2FS_IOC_SET_PIN_FILE _IOW(F2FS_IOCTL_MAGIC, 13, __u32) |
---|
433 | | -#define F2FS_IOC_GET_PIN_FILE _IOR(F2FS_IOCTL_MAGIC, 14, __u32) |
---|
434 | | -#define F2FS_IOC_PRECACHE_EXTENTS _IO(F2FS_IOCTL_MAGIC, 15) |
---|
435 | | -#define F2FS_IOC_RESIZE_FS _IOW(F2FS_IOCTL_MAGIC, 16, __u64) |
---|
436 | | -#define F2FS_IOC_GET_COMPRESS_BLOCKS _IOR(F2FS_IOCTL_MAGIC, 17, __u64) |
---|
437 | | -#define F2FS_IOC_RELEASE_COMPRESS_BLOCKS \ |
---|
438 | | - _IOR(F2FS_IOCTL_MAGIC, 18, __u64) |
---|
439 | | -#define F2FS_IOC_RESERVE_COMPRESS_BLOCKS \ |
---|
440 | | - _IOR(F2FS_IOCTL_MAGIC, 19, __u64) |
---|
441 | | - |
---|
442 | | -#define F2FS_IOC_GET_VOLUME_NAME FS_IOC_GETFSLABEL |
---|
443 | | -#define F2FS_IOC_SET_VOLUME_NAME FS_IOC_SETFSLABEL |
---|
444 | | - |
---|
445 | | -#define F2FS_IOC_SET_ENCRYPTION_POLICY FS_IOC_SET_ENCRYPTION_POLICY |
---|
446 | | -#define F2FS_IOC_GET_ENCRYPTION_POLICY FS_IOC_GET_ENCRYPTION_POLICY |
---|
447 | | -#define F2FS_IOC_GET_ENCRYPTION_PWSALT FS_IOC_GET_ENCRYPTION_PWSALT |
---|
448 | | - |
---|
449 | | -/* |
---|
450 | | - * should be same as XFS_IOC_GOINGDOWN. |
---|
451 | | - * Flags for going down operation used by FS_IOC_GOINGDOWN |
---|
452 | | - */ |
---|
453 | | -#define F2FS_IOC_SHUTDOWN _IOR('X', 125, __u32) /* Shutdown */ |
---|
454 | | -#define F2FS_GOING_DOWN_FULLSYNC 0x0 /* going down with full sync */ |
---|
455 | | -#define F2FS_GOING_DOWN_METASYNC 0x1 /* going down with metadata */ |
---|
456 | | -#define F2FS_GOING_DOWN_NOSYNC 0x2 /* going down */ |
---|
457 | | -#define F2FS_GOING_DOWN_METAFLUSH 0x3 /* going down with meta flush */ |
---|
458 | | -#define F2FS_GOING_DOWN_NEED_FSCK 0x4 /* going down to trigger fsck */ |
---|
459 | | - |
---|
460 | | -#if defined(__KERNEL__) && defined(CONFIG_COMPAT) |
---|
461 | | -/* |
---|
462 | | - * ioctl commands in 32 bit emulation |
---|
463 | | - */ |
---|
464 | | -#define F2FS_IOC32_GETFLAGS FS_IOC32_GETFLAGS |
---|
465 | | -#define F2FS_IOC32_SETFLAGS FS_IOC32_SETFLAGS |
---|
466 | | -#define F2FS_IOC32_GETVERSION FS_IOC32_GETVERSION |
---|
467 | | -#endif |
---|
468 | | - |
---|
469 | | -#define F2FS_IOC_FSGETXATTR FS_IOC_FSGETXATTR |
---|
470 | | -#define F2FS_IOC_FSSETXATTR FS_IOC_FSSETXATTR |
---|
471 | | - |
---|
472 | | -struct f2fs_gc_range { |
---|
473 | | - u32 sync; |
---|
474 | | - u64 start; |
---|
475 | | - u64 len; |
---|
476 | | -}; |
---|
477 | | - |
---|
478 | | -struct f2fs_defragment { |
---|
479 | | - u64 start; |
---|
480 | | - u64 len; |
---|
481 | | -}; |
---|
482 | | - |
---|
483 | | -struct f2fs_move_range { |
---|
484 | | - u32 dst_fd; /* destination fd */ |
---|
485 | | - u64 pos_in; /* start position in src_fd */ |
---|
486 | | - u64 pos_out; /* start position in dst_fd */ |
---|
487 | | - u64 len; /* size to move */ |
---|
488 | | -}; |
---|
489 | | - |
---|
490 | | -struct f2fs_flush_device { |
---|
491 | | - u32 dev_num; /* device number to flush */ |
---|
492 | | - u32 segments; /* # of segments to flush */ |
---|
493 | | -}; |
---|
494 | | - |
---|
495 | 449 | /* for inline stuff */ |
---|
496 | 450 | #define DEF_INLINE_RESERVED_SIZE 1 |
---|
497 | 451 | static inline int get_extra_isize(struct inode *inode); |
---|
.. | .. |
---|
544 | 498 | #ifdef CONFIG_UNICODE |
---|
545 | 499 | /* |
---|
546 | 500 | * For casefolded directories: the casefolded name, but it's left NULL |
---|
547 | | - * if the original name is not valid Unicode, if the directory is both |
---|
548 | | - * casefolded and encrypted and its encryption key is unavailable, or if |
---|
549 | | - * the filesystem is doing an internal operation where usr_fname is also |
---|
550 | | - * NULL. In all these cases we fall back to treating the name as an |
---|
551 | | - * opaque byte sequence. |
---|
| 501 | + * if the original name is not valid Unicode, if the original name is |
---|
| 502 | + * "." or "..", if the directory is both casefolded and encrypted and |
---|
| 503 | + * its encryption key is unavailable, or if the filesystem is doing an |
---|
| 504 | + * internal operation where usr_fname is also NULL. In all these cases |
---|
| 505 | + * we fall back to treating the name as an opaque byte sequence. |
---|
552 | 506 | */ |
---|
553 | 507 | struct fscrypt_str cf_name; |
---|
554 | 508 | #endif |
---|
.. | .. |
---|
622 | 576 | #define F2FS_MIN_EXTENT_LEN 64 /* minimum extent length */ |
---|
623 | 577 | |
---|
624 | 578 | /* number of extent info in extent cache we try to shrink */ |
---|
625 | | -#define EXTENT_CACHE_SHRINK_NUMBER 128 |
---|
| 579 | +#define READ_EXTENT_CACHE_SHRINK_NUMBER 128 |
---|
| 580 | + |
---|
| 581 | +/* number of age extent info in extent cache we try to shrink */ |
---|
| 582 | +#define AGE_EXTENT_CACHE_SHRINK_NUMBER 128 |
---|
| 583 | +#define LAST_AGE_WEIGHT 30 |
---|
| 584 | +#define SAME_AGE_REGION 1024 |
---|
| 585 | + |
---|
| 586 | +/* |
---|
| 587 | + * Define data block with age less than 1GB as hot data |
---|
| 588 | + * define data block with age less than 10GB but more than 1GB as warm data |
---|
| 589 | + */ |
---|
| 590 | +#define DEF_HOT_DATA_AGE_THRESHOLD 262144 |
---|
| 591 | +#define DEF_WARM_DATA_AGE_THRESHOLD 2621440 |
---|
| 592 | + |
---|
| 593 | +/* extent cache type */ |
---|
| 594 | +enum extent_type { |
---|
| 595 | + EX_READ, |
---|
| 596 | + EX_BLOCK_AGE, |
---|
| 597 | + NR_EXTENT_CACHES, |
---|
| 598 | +}; |
---|
626 | 599 | |
---|
627 | 600 | struct rb_entry { |
---|
628 | 601 | struct rb_node rb_node; /* rb node located in rb-tree */ |
---|
629 | | - unsigned int ofs; /* start offset of the entry */ |
---|
630 | | - unsigned int len; /* length of the entry */ |
---|
| 602 | + union { |
---|
| 603 | + struct { |
---|
| 604 | + unsigned int ofs; /* start offset of the entry */ |
---|
| 605 | + unsigned int len; /* length of the entry */ |
---|
| 606 | + }; |
---|
| 607 | + unsigned long long key; /* 64-bits key */ |
---|
| 608 | + } __packed; |
---|
631 | 609 | }; |
---|
632 | 610 | |
---|
633 | 611 | struct extent_info { |
---|
634 | 612 | unsigned int fofs; /* start offset in a file */ |
---|
635 | 613 | unsigned int len; /* length of the extent */ |
---|
636 | | - u32 blk; /* start block address of the extent */ |
---|
| 614 | + union { |
---|
| 615 | + /* read extent_cache */ |
---|
| 616 | + struct { |
---|
| 617 | + /* start block address of the extent */ |
---|
| 618 | + block_t blk; |
---|
| 619 | +#ifdef CONFIG_F2FS_FS_COMPRESSION |
---|
| 620 | + /* physical extent length of compressed blocks */ |
---|
| 621 | + unsigned int c_len; |
---|
| 622 | +#endif |
---|
| 623 | + }; |
---|
| 624 | + /* block age extent_cache */ |
---|
| 625 | + struct { |
---|
| 626 | + /* block age of the extent */ |
---|
| 627 | + unsigned long long age; |
---|
| 628 | + /* last total blocks allocated */ |
---|
| 629 | + unsigned long long last_blocks; |
---|
| 630 | + }; |
---|
| 631 | + }; |
---|
637 | 632 | }; |
---|
638 | 633 | |
---|
639 | 634 | struct extent_node { |
---|
.. | .. |
---|
645 | 640 | |
---|
646 | 641 | struct extent_tree { |
---|
647 | 642 | nid_t ino; /* inode number */ |
---|
| 643 | + enum extent_type type; /* keep the extent tree type */ |
---|
648 | 644 | struct rb_root_cached root; /* root of extent info rb-tree */ |
---|
649 | 645 | struct extent_node *cached_en; /* recently accessed extent node */ |
---|
650 | | - struct extent_info largest; /* largested extent info */ |
---|
651 | 646 | struct list_head list; /* to be used by sbi->zombie_list */ |
---|
652 | 647 | rwlock_t lock; /* protect extent info rb-tree */ |
---|
653 | 648 | atomic_t node_cnt; /* # of extent node in rb-tree*/ |
---|
654 | 649 | bool largest_updated; /* largest extent updated */ |
---|
| 650 | + struct extent_info largest; /* largest cached extent for EX_READ */ |
---|
| 651 | +}; |
---|
| 652 | + |
---|
| 653 | +struct extent_tree_info { |
---|
| 654 | + struct radix_tree_root extent_tree_root;/* cache extent cache entries */ |
---|
| 655 | + struct mutex extent_tree_lock; /* locking extent radix tree */ |
---|
| 656 | + struct list_head extent_list; /* lru list for shrinker */ |
---|
| 657 | + spinlock_t extent_lock; /* locking extent lru list */ |
---|
| 658 | + atomic_t total_ext_tree; /* extent tree count */ |
---|
| 659 | + struct list_head zombie_list; /* extent zombie tree list */ |
---|
| 660 | + atomic_t total_zombie_tree; /* extent zombie tree count */ |
---|
| 661 | + atomic_t total_ext_node; /* extent info count */ |
---|
655 | 662 | }; |
---|
656 | 663 | |
---|
657 | 664 | /* |
---|
.. | .. |
---|
701 | 708 | #define FADVISE_MODIFIABLE_BITS (FADVISE_COLD_BIT | FADVISE_HOT_BIT) |
---|
702 | 709 | |
---|
703 | 710 | #define file_is_cold(inode) is_file(inode, FADVISE_COLD_BIT) |
---|
704 | | -#define file_wrong_pino(inode) is_file(inode, FADVISE_LOST_PINO_BIT) |
---|
705 | 711 | #define file_set_cold(inode) set_file(inode, FADVISE_COLD_BIT) |
---|
706 | | -#define file_lost_pino(inode) set_file(inode, FADVISE_LOST_PINO_BIT) |
---|
707 | 712 | #define file_clear_cold(inode) clear_file(inode, FADVISE_COLD_BIT) |
---|
| 713 | + |
---|
| 714 | +#define file_wrong_pino(inode) is_file(inode, FADVISE_LOST_PINO_BIT) |
---|
| 715 | +#define file_lost_pino(inode) set_file(inode, FADVISE_LOST_PINO_BIT) |
---|
708 | 716 | #define file_got_pino(inode) clear_file(inode, FADVISE_LOST_PINO_BIT) |
---|
| 717 | + |
---|
709 | 718 | #define file_is_encrypt(inode) is_file(inode, FADVISE_ENCRYPT_BIT) |
---|
710 | 719 | #define file_set_encrypt(inode) set_file(inode, FADVISE_ENCRYPT_BIT) |
---|
711 | | -#define file_clear_encrypt(inode) clear_file(inode, FADVISE_ENCRYPT_BIT) |
---|
| 720 | + |
---|
712 | 721 | #define file_enc_name(inode) is_file(inode, FADVISE_ENC_NAME_BIT) |
---|
713 | 722 | #define file_set_enc_name(inode) set_file(inode, FADVISE_ENC_NAME_BIT) |
---|
| 723 | + |
---|
714 | 724 | #define file_keep_isize(inode) is_file(inode, FADVISE_KEEP_SIZE_BIT) |
---|
715 | 725 | #define file_set_keep_isize(inode) set_file(inode, FADVISE_KEEP_SIZE_BIT) |
---|
| 726 | + |
---|
716 | 727 | #define file_is_hot(inode) is_file(inode, FADVISE_HOT_BIT) |
---|
717 | 728 | #define file_set_hot(inode) set_file(inode, FADVISE_HOT_BIT) |
---|
718 | 729 | #define file_clear_hot(inode) clear_file(inode, FADVISE_HOT_BIT) |
---|
| 730 | + |
---|
719 | 731 | #define file_is_verity(inode) is_file(inode, FADVISE_VERITY_BIT) |
---|
720 | 732 | #define file_set_verity(inode) set_file(inode, FADVISE_VERITY_BIT) |
---|
721 | 733 | |
---|
.. | .. |
---|
751 | 763 | FI_DROP_CACHE, /* drop dirty page cache */ |
---|
752 | 764 | FI_DATA_EXIST, /* indicate data exists */ |
---|
753 | 765 | FI_INLINE_DOTS, /* indicate inline dot dentries */ |
---|
754 | | - FI_DO_DEFRAG, /* indicate defragment is running */ |
---|
| 766 | + FI_SKIP_WRITES, /* should skip data page writeback */ |
---|
| 767 | + FI_OPU_WRITE, /* used for opu per file */ |
---|
755 | 768 | FI_DIRTY_FILE, /* indicate regular/symlink has dirty pages */ |
---|
756 | 769 | FI_NO_PREALLOC, /* indicate skipped preallocated blocks */ |
---|
757 | 770 | FI_HOT_DATA, /* indicate file is hot */ |
---|
.. | .. |
---|
761 | 774 | FI_ATOMIC_REVOKE_REQUEST, /* request to drop atomic data */ |
---|
762 | 775 | FI_VERITY_IN_PROGRESS, /* building fs-verity Merkle tree */ |
---|
763 | 776 | FI_COMPRESSED_FILE, /* indicate file's data can be compressed */ |
---|
| 777 | + FI_COMPRESS_CORRUPT, /* indicate compressed cluster is corrupted */ |
---|
764 | 778 | FI_MMAP_FILE, /* indicate file was mmapped */ |
---|
| 779 | + FI_ENABLE_COMPRESS, /* enable compression in "user" compression mode */ |
---|
| 780 | + FI_COMPRESS_RELEASED, /* compressed blocks were released */ |
---|
| 781 | + FI_ALIGNED_WRITE, /* enable aligned write */ |
---|
765 | 782 | FI_MAX, /* max flag, never be used */ |
---|
766 | 783 | }; |
---|
767 | 784 | |
---|
.. | .. |
---|
778 | 795 | |
---|
779 | 796 | /* Use below internally in f2fs*/ |
---|
780 | 797 | unsigned long flags[BITS_TO_LONGS(FI_MAX)]; /* use to pass per-file flags */ |
---|
781 | | - struct rw_semaphore i_sem; /* protect fi info */ |
---|
| 798 | + struct f2fs_rwsem i_sem; /* protect fi info */ |
---|
782 | 799 | atomic_t dirty_pages; /* # of dirty pages */ |
---|
783 | 800 | f2fs_hash_t chash; /* hash value of given file name */ |
---|
784 | 801 | unsigned int clevel; /* maximum level of given file name */ |
---|
785 | 802 | struct task_struct *task; /* lookup and create consistency */ |
---|
786 | 803 | struct task_struct *cp_task; /* separate cp/wb IO stats*/ |
---|
| 804 | + struct task_struct *wb_task; /* indicate inode is in context of writeback */ |
---|
787 | 805 | nid_t i_xattr_nid; /* node id that contains xattrs */ |
---|
788 | 806 | loff_t last_disk_size; /* lastly written file size */ |
---|
789 | 807 | spinlock_t i_size_lock; /* protect last_disk_size */ |
---|
.. | .. |
---|
800 | 818 | struct list_head inmem_pages; /* inmemory pages managed by f2fs */ |
---|
801 | 819 | struct task_struct *inmem_task; /* store inmemory task */ |
---|
802 | 820 | struct mutex inmem_lock; /* lock for inmemory pages */ |
---|
803 | | - struct extent_tree *extent_tree; /* cached extent_tree entry */ |
---|
| 821 | + struct extent_tree *extent_tree[NR_EXTENT_CACHES]; |
---|
| 822 | + /* cached extent_tree entry */ |
---|
804 | 823 | |
---|
805 | 824 | /* avoid racing between foreground op and gc */ |
---|
806 | | - struct rw_semaphore i_gc_rwsem[2]; |
---|
807 | | - struct rw_semaphore i_mmap_sem; |
---|
808 | | - struct rw_semaphore i_xattr_sem; /* avoid racing between reading and changing EAs */ |
---|
| 825 | + struct f2fs_rwsem i_gc_rwsem[2]; |
---|
| 826 | + struct f2fs_rwsem i_mmap_sem; |
---|
| 827 | + struct f2fs_rwsem i_xattr_sem; /* avoid racing between reading and changing EAs */ |
---|
809 | 828 | |
---|
810 | 829 | int i_extra_isize; /* size of extra space located in i_addr */ |
---|
811 | 830 | kprojid_t i_projid; /* id for project quota */ |
---|
.. | .. |
---|
814 | 833 | struct timespec64 i_disk_time[4];/* inode disk times */ |
---|
815 | 834 | |
---|
816 | 835 | /* for file compress */ |
---|
817 | | - u64 i_compr_blocks; /* # of compressed blocks */ |
---|
| 836 | + atomic_t i_compr_blocks; /* # of compressed blocks */ |
---|
818 | 837 | unsigned char i_compress_algorithm; /* algorithm type */ |
---|
819 | 838 | unsigned char i_log_cluster_size; /* log of cluster size */ |
---|
| 839 | + unsigned char i_compress_level; /* compress level (lz4hc,zstd) */ |
---|
| 840 | + unsigned short i_compress_flag; /* compress flag */ |
---|
820 | 841 | unsigned int i_cluster_size; /* cluster size */ |
---|
821 | 842 | }; |
---|
822 | 843 | |
---|
823 | | -static inline void get_extent_info(struct extent_info *ext, |
---|
| 844 | +static inline void get_read_extent_info(struct extent_info *ext, |
---|
824 | 845 | struct f2fs_extent *i_ext) |
---|
825 | 846 | { |
---|
826 | 847 | ext->fofs = le32_to_cpu(i_ext->fofs); |
---|
.. | .. |
---|
828 | 849 | ext->len = le32_to_cpu(i_ext->len); |
---|
829 | 850 | } |
---|
830 | 851 | |
---|
831 | | -static inline void set_raw_extent(struct extent_info *ext, |
---|
| 852 | +static inline void set_raw_read_extent(struct extent_info *ext, |
---|
832 | 853 | struct f2fs_extent *i_ext) |
---|
833 | 854 | { |
---|
834 | 855 | i_ext->fofs = cpu_to_le32(ext->fofs); |
---|
835 | 856 | i_ext->blk = cpu_to_le32(ext->blk); |
---|
836 | 857 | i_ext->len = cpu_to_le32(ext->len); |
---|
837 | | -} |
---|
838 | | - |
---|
839 | | -static inline void set_extent_info(struct extent_info *ei, unsigned int fofs, |
---|
840 | | - u32 blk, unsigned int len) |
---|
841 | | -{ |
---|
842 | | - ei->fofs = fofs; |
---|
843 | | - ei->blk = blk; |
---|
844 | | - ei->len = len; |
---|
845 | 858 | } |
---|
846 | 859 | |
---|
847 | 860 | static inline bool __is_discard_mergeable(struct discard_info *back, |
---|
.. | .. |
---|
863 | 876 | return __is_discard_mergeable(cur, front, max_len); |
---|
864 | 877 | } |
---|
865 | 878 | |
---|
866 | | -static inline bool __is_extent_mergeable(struct extent_info *back, |
---|
867 | | - struct extent_info *front) |
---|
868 | | -{ |
---|
869 | | - return (back->fofs + back->len == front->fofs && |
---|
870 | | - back->blk + back->len == front->blk); |
---|
871 | | -} |
---|
872 | | - |
---|
873 | | -static inline bool __is_back_mergeable(struct extent_info *cur, |
---|
874 | | - struct extent_info *back) |
---|
875 | | -{ |
---|
876 | | - return __is_extent_mergeable(back, cur); |
---|
877 | | -} |
---|
878 | | - |
---|
879 | | -static inline bool __is_front_mergeable(struct extent_info *cur, |
---|
880 | | - struct extent_info *front) |
---|
881 | | -{ |
---|
882 | | - return __is_extent_mergeable(cur, front); |
---|
883 | | -} |
---|
884 | | - |
---|
885 | | -extern void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync); |
---|
886 | | -static inline void __try_update_largest_extent(struct extent_tree *et, |
---|
887 | | - struct extent_node *en) |
---|
888 | | -{ |
---|
889 | | - if (en->ei.len > et->largest.len) { |
---|
890 | | - et->largest = en->ei; |
---|
891 | | - et->largest_updated = true; |
---|
892 | | - } |
---|
893 | | -} |
---|
894 | | - |
---|
895 | 879 | /* |
---|
896 | 880 | * For free nid management |
---|
897 | 881 | */ |
---|
.. | .. |
---|
899 | 883 | FREE_NID, /* newly added to free nid list */ |
---|
900 | 884 | PREALLOC_NID, /* it is preallocated */ |
---|
901 | 885 | MAX_NID_STATE, |
---|
| 886 | +}; |
---|
| 887 | + |
---|
| 888 | +enum nat_state { |
---|
| 889 | + TOTAL_NAT, |
---|
| 890 | + DIRTY_NAT, |
---|
| 891 | + RECLAIMABLE_NAT, |
---|
| 892 | + MAX_NAT_STATE, |
---|
902 | 893 | }; |
---|
903 | 894 | |
---|
904 | 895 | struct f2fs_nm_info { |
---|
.. | .. |
---|
913 | 904 | /* NAT cache management */ |
---|
914 | 905 | struct radix_tree_root nat_root;/* root of the nat entry cache */ |
---|
915 | 906 | struct radix_tree_root nat_set_root;/* root of the nat set cache */ |
---|
916 | | - struct rw_semaphore nat_tree_lock; /* protect nat_tree_lock */ |
---|
| 907 | + struct f2fs_rwsem nat_tree_lock; /* protect nat entry tree */ |
---|
917 | 908 | struct list_head nat_entries; /* cached nat entry list (clean) */ |
---|
918 | 909 | spinlock_t nat_list_lock; /* protect clean nat entry list */ |
---|
919 | | - unsigned int nat_cnt; /* the # of cached nat entries */ |
---|
920 | | - unsigned int dirty_nat_cnt; /* total num of nat entries in set */ |
---|
| 910 | + unsigned int nat_cnt[MAX_NAT_STATE]; /* the # of cached nat entries */ |
---|
921 | 911 | unsigned int nat_blocks; /* # of nat blocks */ |
---|
922 | 912 | |
---|
923 | 913 | /* free node ids management */ |
---|
.. | .. |
---|
986 | 976 | */ |
---|
987 | 977 | #define NR_CURSEG_DATA_TYPE (3) |
---|
988 | 978 | #define NR_CURSEG_NODE_TYPE (3) |
---|
989 | | -#define NR_CURSEG_TYPE (NR_CURSEG_DATA_TYPE + NR_CURSEG_NODE_TYPE) |
---|
| 979 | +#define NR_CURSEG_INMEM_TYPE (2) |
---|
| 980 | +#define NR_CURSEG_RO_TYPE (2) |
---|
| 981 | +#define NR_CURSEG_PERSIST_TYPE (NR_CURSEG_DATA_TYPE + NR_CURSEG_NODE_TYPE) |
---|
| 982 | +#define NR_CURSEG_TYPE (NR_CURSEG_INMEM_TYPE + NR_CURSEG_PERSIST_TYPE) |
---|
990 | 983 | |
---|
991 | 984 | enum { |
---|
992 | 985 | CURSEG_HOT_DATA = 0, /* directory entry blocks */ |
---|
.. | .. |
---|
995 | 988 | CURSEG_HOT_NODE, /* direct node blocks of directory files */ |
---|
996 | 989 | CURSEG_WARM_NODE, /* direct node blocks of normal files */ |
---|
997 | 990 | CURSEG_COLD_NODE, /* indirect node blocks */ |
---|
998 | | - NO_CHECK_TYPE, |
---|
999 | | - CURSEG_COLD_DATA_PINNED,/* cold data for pinned file */ |
---|
| 991 | + NR_PERSISTENT_LOG, /* number of persistent log */ |
---|
| 992 | + CURSEG_COLD_DATA_PINNED = NR_PERSISTENT_LOG, |
---|
| 993 | + /* pinned file that needs consecutive block address */ |
---|
| 994 | + CURSEG_ALL_DATA_ATGC, /* SSR alloctor in hot/warm/cold data area */ |
---|
| 995 | + NO_CHECK_TYPE, /* number of persistent & inmem log */ |
---|
1000 | 996 | }; |
---|
1001 | 997 | |
---|
1002 | 998 | struct flush_cmd { |
---|
.. | .. |
---|
1021 | 1017 | struct dirty_seglist_info *dirty_info; /* dirty segment information */ |
---|
1022 | 1018 | struct curseg_info *curseg_array; /* active segment information */ |
---|
1023 | 1019 | |
---|
1024 | | - struct rw_semaphore curseg_lock; /* for preventing curseg change */ |
---|
| 1020 | + struct f2fs_rwsem curseg_lock; /* for preventing curseg change */ |
---|
1025 | 1021 | |
---|
1026 | 1022 | block_t seg0_blkaddr; /* block address of 0'th segment */ |
---|
1027 | 1023 | block_t main_blkaddr; /* start block address of main area */ |
---|
.. | .. |
---|
1030 | 1026 | unsigned int segment_count; /* total # of segments */ |
---|
1031 | 1027 | unsigned int main_segments; /* # of segments in main area */ |
---|
1032 | 1028 | unsigned int reserved_segments; /* # of reserved segments */ |
---|
| 1029 | + unsigned int additional_reserved_segments;/* reserved segs for IO align feature */ |
---|
1033 | 1030 | unsigned int ovp_segments; /* # of overprovision segments */ |
---|
1034 | 1031 | |
---|
1035 | 1032 | /* a threshold to reclaim prefree segments */ |
---|
.. | .. |
---|
1095 | 1092 | */ |
---|
1096 | 1093 | #define PAGE_TYPE_OF_BIO(type) ((type) > META ? META : (type)) |
---|
1097 | 1094 | enum page_type { |
---|
1098 | | - DATA, |
---|
1099 | | - NODE, |
---|
| 1095 | + DATA = 0, |
---|
| 1096 | + NODE = 1, /* should not change this */ |
---|
1100 | 1097 | META, |
---|
1101 | 1098 | NR_PAGE_TYPE, |
---|
1102 | 1099 | META_FLUSH, |
---|
.. | .. |
---|
1186 | 1183 | bool retry; /* need to reallocate block address */ |
---|
1187 | 1184 | int compr_blocks; /* # of compressed block addresses */ |
---|
1188 | 1185 | bool encrypted; /* indicate file is encrypted */ |
---|
| 1186 | + bool post_read; /* require post read */ |
---|
1189 | 1187 | enum iostat_type io_type; /* io type */ |
---|
1190 | 1188 | struct writeback_control *io_wbc; /* writeback control */ |
---|
1191 | 1189 | struct bio **bio; /* bio for ipu */ |
---|
.. | .. |
---|
1204 | 1202 | struct bio *bio; /* bios to merge */ |
---|
1205 | 1203 | sector_t last_block_in_bio; /* last block number */ |
---|
1206 | 1204 | struct f2fs_io_info fio; /* store buffered io info. */ |
---|
1207 | | - struct rw_semaphore io_rwsem; /* blocking op for bio */ |
---|
| 1205 | + struct f2fs_rwsem io_rwsem; /* blocking op for bio */ |
---|
1208 | 1206 | spinlock_t io_lock; /* serialize DATA/NODE IOs */ |
---|
1209 | 1207 | struct list_head io_list; /* track fios */ |
---|
1210 | 1208 | struct list_head bio_list; /* bio entry list head */ |
---|
1211 | | - struct rw_semaphore bio_list_lock; /* lock to protect bio entry list */ |
---|
| 1209 | + struct f2fs_rwsem bio_list_lock; /* lock to protect bio entry list */ |
---|
1212 | 1210 | }; |
---|
1213 | 1211 | |
---|
1214 | 1212 | #define FDEV(i) (sbi->devs[i]) |
---|
.. | .. |
---|
1241 | 1239 | unsigned long ino_num; /* number of entries */ |
---|
1242 | 1240 | }; |
---|
1243 | 1241 | |
---|
| 1242 | +/* for GC_AT */ |
---|
| 1243 | +struct atgc_management { |
---|
| 1244 | + bool atgc_enabled; /* ATGC is enabled or not */ |
---|
| 1245 | + struct rb_root_cached root; /* root of victim rb-tree */ |
---|
| 1246 | + struct list_head victim_list; /* linked with all victim entries */ |
---|
| 1247 | + unsigned int victim_count; /* victim count in rb-tree */ |
---|
| 1248 | + unsigned int candidate_ratio; /* candidate ratio */ |
---|
| 1249 | + unsigned int max_candidate_count; /* max candidate count */ |
---|
| 1250 | + unsigned int age_weight; /* age weight, vblock_weight = 100 - age_weight */ |
---|
| 1251 | + unsigned long long age_threshold; /* age threshold */ |
---|
| 1252 | +}; |
---|
| 1253 | + |
---|
1244 | 1254 | /* For s_flag in struct f2fs_sb_info */ |
---|
1245 | 1255 | enum { |
---|
1246 | 1256 | SBI_IS_DIRTY, /* dirty flag for checkpoint */ |
---|
.. | .. |
---|
1257 | 1267 | SBI_QUOTA_SKIP_FLUSH, /* skip flushing quota in current CP */ |
---|
1258 | 1268 | SBI_QUOTA_NEED_REPAIR, /* quota file may be corrupted */ |
---|
1259 | 1269 | SBI_IS_RESIZEFS, /* resizefs is in process */ |
---|
| 1270 | + SBI_IS_FREEZING, /* freezefs is in process */ |
---|
1260 | 1271 | }; |
---|
1261 | 1272 | |
---|
1262 | 1273 | enum { |
---|
.. | .. |
---|
1273 | 1284 | GC_NORMAL, |
---|
1274 | 1285 | GC_IDLE_CB, |
---|
1275 | 1286 | GC_IDLE_GREEDY, |
---|
1276 | | - GC_URGENT, |
---|
| 1287 | + GC_IDLE_AT, |
---|
| 1288 | + GC_URGENT_HIGH, |
---|
| 1289 | + GC_URGENT_LOW, |
---|
| 1290 | + GC_URGENT_MID, |
---|
| 1291 | + MAX_GC_MODE, |
---|
1277 | 1292 | }; |
---|
1278 | 1293 | |
---|
1279 | 1294 | enum { |
---|
.. | .. |
---|
1307 | 1322 | FSYNC_MODE_NOBARRIER, /* fsync behaves nobarrier based on posix */ |
---|
1308 | 1323 | }; |
---|
1309 | 1324 | |
---|
| 1325 | +enum { |
---|
| 1326 | + COMPR_MODE_FS, /* |
---|
| 1327 | + * automatically compress compression |
---|
| 1328 | + * enabled files |
---|
| 1329 | + */ |
---|
| 1330 | + COMPR_MODE_USER, /* |
---|
| 1331 | + * automatical compression is disabled. |
---|
| 1332 | + * user can control the file compression |
---|
| 1333 | + * using ioctls |
---|
| 1334 | + */ |
---|
| 1335 | +}; |
---|
| 1336 | + |
---|
| 1337 | +enum { |
---|
| 1338 | + MEMORY_MODE_NORMAL, /* memory mode for normal devices */ |
---|
| 1339 | + MEMORY_MODE_LOW, /* memory mode for low memry devices */ |
---|
| 1340 | +}; |
---|
| 1341 | + |
---|
| 1342 | +static inline int f2fs_test_bit(unsigned int nr, char *addr); |
---|
| 1343 | +static inline void f2fs_set_bit(unsigned int nr, char *addr); |
---|
| 1344 | +static inline void f2fs_clear_bit(unsigned int nr, char *addr); |
---|
| 1345 | + |
---|
1310 | 1346 | /* |
---|
1311 | | - * this value is set in page as a private data which indicate that |
---|
1312 | | - * the page is atomically written, and it is in inmem_pages list. |
---|
| 1347 | + * Layout of f2fs page.private: |
---|
| 1348 | + * |
---|
| 1349 | + * Layout A: lowest bit should be 1 |
---|
| 1350 | + * | bit0 = 1 | bit1 | bit2 | ... | bit MAX | private data .... | |
---|
| 1351 | + * bit 0 PAGE_PRIVATE_NOT_POINTER |
---|
| 1352 | + * bit 1 PAGE_PRIVATE_ATOMIC_WRITE |
---|
| 1353 | + * bit 2 PAGE_PRIVATE_DUMMY_WRITE |
---|
| 1354 | + * bit 3 PAGE_PRIVATE_ONGOING_MIGRATION |
---|
| 1355 | + * bit 4 PAGE_PRIVATE_INLINE_INODE |
---|
| 1356 | + * bit 5 PAGE_PRIVATE_REF_RESOURCE |
---|
| 1357 | + * bit 6- f2fs private data |
---|
| 1358 | + * |
---|
| 1359 | + * Layout B: lowest bit should be 0 |
---|
| 1360 | + * page.private is a wrapped pointer. |
---|
1313 | 1361 | */ |
---|
1314 | | -#define ATOMIC_WRITTEN_PAGE ((unsigned long)-1) |
---|
1315 | | -#define DUMMY_WRITTEN_PAGE ((unsigned long)-2) |
---|
| 1362 | +enum { |
---|
| 1363 | + PAGE_PRIVATE_NOT_POINTER, /* private contains non-pointer data */ |
---|
| 1364 | + PAGE_PRIVATE_ATOMIC_WRITE, /* data page from atomic write path */ |
---|
| 1365 | + PAGE_PRIVATE_DUMMY_WRITE, /* data page for padding aligned IO */ |
---|
| 1366 | + PAGE_PRIVATE_ONGOING_MIGRATION, /* data page which is on-going migrating */ |
---|
| 1367 | + PAGE_PRIVATE_INLINE_INODE, /* inode page contains inline data */ |
---|
| 1368 | + PAGE_PRIVATE_REF_RESOURCE, /* dirty page has referenced resources */ |
---|
| 1369 | + PAGE_PRIVATE_MAX |
---|
| 1370 | +}; |
---|
1316 | 1371 | |
---|
1317 | | -#define IS_ATOMIC_WRITTEN_PAGE(page) \ |
---|
1318 | | - (page_private(page) == (unsigned long)ATOMIC_WRITTEN_PAGE) |
---|
1319 | | -#define IS_DUMMY_WRITTEN_PAGE(page) \ |
---|
1320 | | - (page_private(page) == (unsigned long)DUMMY_WRITTEN_PAGE) |
---|
| 1372 | +#define PAGE_PRIVATE_GET_FUNC(name, flagname) \ |
---|
| 1373 | +static inline bool page_private_##name(struct page *page) \ |
---|
| 1374 | +{ \ |
---|
| 1375 | + return PagePrivate(page) && \ |
---|
| 1376 | + test_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)) && \ |
---|
| 1377 | + test_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \ |
---|
| 1378 | +} |
---|
1321 | 1379 | |
---|
1322 | | -#ifdef CONFIG_FS_ENCRYPTION |
---|
1323 | | -#define DUMMY_ENCRYPTION_ENABLED(sbi) \ |
---|
1324 | | - (unlikely(F2FS_OPTION(sbi).dummy_enc_ctx.ctx != NULL)) |
---|
1325 | | -#else |
---|
1326 | | -#define DUMMY_ENCRYPTION_ENABLED(sbi) (0) |
---|
1327 | | -#endif |
---|
| 1380 | +#define PAGE_PRIVATE_SET_FUNC(name, flagname) \ |
---|
| 1381 | +static inline void set_page_private_##name(struct page *page) \ |
---|
| 1382 | +{ \ |
---|
| 1383 | + if (!PagePrivate(page)) { \ |
---|
| 1384 | + get_page(page); \ |
---|
| 1385 | + SetPagePrivate(page); \ |
---|
| 1386 | + set_page_private(page, 0); \ |
---|
| 1387 | + } \ |
---|
| 1388 | + set_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)); \ |
---|
| 1389 | + set_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \ |
---|
| 1390 | +} |
---|
| 1391 | + |
---|
| 1392 | +#define PAGE_PRIVATE_CLEAR_FUNC(name, flagname) \ |
---|
| 1393 | +static inline void clear_page_private_##name(struct page *page) \ |
---|
| 1394 | +{ \ |
---|
| 1395 | + clear_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \ |
---|
| 1396 | + if (page_private(page) == 1 << PAGE_PRIVATE_NOT_POINTER) { \ |
---|
| 1397 | + set_page_private(page, 0); \ |
---|
| 1398 | + if (PagePrivate(page)) { \ |
---|
| 1399 | + ClearPagePrivate(page); \ |
---|
| 1400 | + put_page(page); \ |
---|
| 1401 | + }\ |
---|
| 1402 | + } \ |
---|
| 1403 | +} |
---|
| 1404 | + |
---|
| 1405 | +PAGE_PRIVATE_GET_FUNC(nonpointer, NOT_POINTER); |
---|
| 1406 | +PAGE_PRIVATE_GET_FUNC(reference, REF_RESOURCE); |
---|
| 1407 | +PAGE_PRIVATE_GET_FUNC(inline, INLINE_INODE); |
---|
| 1408 | +PAGE_PRIVATE_GET_FUNC(gcing, ONGOING_MIGRATION); |
---|
| 1409 | +PAGE_PRIVATE_GET_FUNC(atomic, ATOMIC_WRITE); |
---|
| 1410 | +PAGE_PRIVATE_GET_FUNC(dummy, DUMMY_WRITE); |
---|
| 1411 | + |
---|
| 1412 | +PAGE_PRIVATE_SET_FUNC(reference, REF_RESOURCE); |
---|
| 1413 | +PAGE_PRIVATE_SET_FUNC(inline, INLINE_INODE); |
---|
| 1414 | +PAGE_PRIVATE_SET_FUNC(gcing, ONGOING_MIGRATION); |
---|
| 1415 | +PAGE_PRIVATE_SET_FUNC(atomic, ATOMIC_WRITE); |
---|
| 1416 | +PAGE_PRIVATE_SET_FUNC(dummy, DUMMY_WRITE); |
---|
| 1417 | + |
---|
| 1418 | +PAGE_PRIVATE_CLEAR_FUNC(reference, REF_RESOURCE); |
---|
| 1419 | +PAGE_PRIVATE_CLEAR_FUNC(inline, INLINE_INODE); |
---|
| 1420 | +PAGE_PRIVATE_CLEAR_FUNC(gcing, ONGOING_MIGRATION); |
---|
| 1421 | +PAGE_PRIVATE_CLEAR_FUNC(atomic, ATOMIC_WRITE); |
---|
| 1422 | +PAGE_PRIVATE_CLEAR_FUNC(dummy, DUMMY_WRITE); |
---|
| 1423 | + |
---|
| 1424 | +static inline unsigned long get_page_private_data(struct page *page) |
---|
| 1425 | +{ |
---|
| 1426 | + unsigned long data = page_private(page); |
---|
| 1427 | + |
---|
| 1428 | + if (!test_bit(PAGE_PRIVATE_NOT_POINTER, &data)) |
---|
| 1429 | + return 0; |
---|
| 1430 | + return data >> PAGE_PRIVATE_MAX; |
---|
| 1431 | +} |
---|
| 1432 | + |
---|
| 1433 | +static inline void set_page_private_data(struct page *page, unsigned long data) |
---|
| 1434 | +{ |
---|
| 1435 | + if (!PagePrivate(page)) { |
---|
| 1436 | + get_page(page); |
---|
| 1437 | + SetPagePrivate(page); |
---|
| 1438 | + set_page_private(page, 0); |
---|
| 1439 | + } |
---|
| 1440 | + set_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)); |
---|
| 1441 | + page_private(page) |= data << PAGE_PRIVATE_MAX; |
---|
| 1442 | +} |
---|
| 1443 | + |
---|
| 1444 | +static inline void clear_page_private_data(struct page *page) |
---|
| 1445 | +{ |
---|
| 1446 | + page_private(page) &= (1 << PAGE_PRIVATE_MAX) - 1; |
---|
| 1447 | + if (page_private(page) == 1 << PAGE_PRIVATE_NOT_POINTER) { |
---|
| 1448 | + set_page_private(page, 0); |
---|
| 1449 | + if (PagePrivate(page)) { |
---|
| 1450 | + ClearPagePrivate(page); |
---|
| 1451 | + put_page(page); |
---|
| 1452 | + } |
---|
| 1453 | + } |
---|
| 1454 | +} |
---|
1328 | 1455 | |
---|
1329 | 1456 | /* For compression */ |
---|
1330 | 1457 | enum compress_algorithm_type { |
---|
1331 | 1458 | COMPRESS_LZO, |
---|
1332 | 1459 | COMPRESS_LZ4, |
---|
1333 | 1460 | COMPRESS_ZSTD, |
---|
| 1461 | + COMPRESS_LZORLE, |
---|
1334 | 1462 | COMPRESS_MAX, |
---|
1335 | 1463 | }; |
---|
1336 | 1464 | |
---|
1337 | | -#define COMPRESS_DATA_RESERVED_SIZE 5 |
---|
| 1465 | +enum compress_flag { |
---|
| 1466 | + COMPRESS_CHKSUM, |
---|
| 1467 | + COMPRESS_MAX_FLAG, |
---|
| 1468 | +}; |
---|
| 1469 | + |
---|
| 1470 | +#define COMPRESS_WATERMARK 20 |
---|
| 1471 | +#define COMPRESS_PERCENT 20 |
---|
| 1472 | + |
---|
| 1473 | +#define COMPRESS_DATA_RESERVED_SIZE 4 |
---|
1338 | 1474 | struct compress_data { |
---|
1339 | 1475 | __le32 clen; /* compressed data size */ |
---|
| 1476 | + __le32 chksum; /* compressed data chksum */ |
---|
1340 | 1477 | __le32 reserved[COMPRESS_DATA_RESERVED_SIZE]; /* reserved */ |
---|
1341 | 1478 | u8 cdata[]; /* compressed data */ |
---|
1342 | 1479 | }; |
---|
.. | .. |
---|
1344 | 1481 | #define COMPRESS_HEADER_SIZE (sizeof(struct compress_data)) |
---|
1345 | 1482 | |
---|
1346 | 1483 | #define F2FS_COMPRESSED_PAGE_MAGIC 0xF5F2C000 |
---|
| 1484 | + |
---|
| 1485 | +#define COMPRESS_LEVEL_OFFSET 8 |
---|
1347 | 1486 | |
---|
1348 | 1487 | /* compress context */ |
---|
1349 | 1488 | struct compress_ctx { |
---|
.. | .. |
---|
1369 | 1508 | struct inode *inode; /* inode the context belong to */ |
---|
1370 | 1509 | struct page **rpages; /* pages store raw data in cluster */ |
---|
1371 | 1510 | unsigned int nr_rpages; /* total page number in rpages */ |
---|
1372 | | - refcount_t ref; /* referrence count of raw page */ |
---|
| 1511 | + atomic_t pending_pages; /* in-flight compressed page count */ |
---|
1373 | 1512 | }; |
---|
1374 | 1513 | |
---|
1375 | | -/* decompress io context for read IO path */ |
---|
| 1514 | +/* Context for decompressing one cluster on the read IO path */ |
---|
1376 | 1515 | struct decompress_io_ctx { |
---|
1377 | 1516 | u32 magic; /* magic number to indicate page is compressed */ |
---|
1378 | 1517 | struct inode *inode; /* inode the context belong to */ |
---|
.. | .. |
---|
1388 | 1527 | struct compress_data *cbuf; /* virtual mapped address on cpages */ |
---|
1389 | 1528 | size_t rlen; /* valid data length in rbuf */ |
---|
1390 | 1529 | size_t clen; /* valid data length in cbuf */ |
---|
1391 | | - refcount_t ref; /* referrence count of compressed page */ |
---|
1392 | | - bool failed; /* indicate IO error during decompression */ |
---|
| 1530 | + |
---|
| 1531 | + /* |
---|
| 1532 | + * The number of compressed pages remaining to be read in this cluster. |
---|
| 1533 | + * This is initially nr_cpages. It is decremented by 1 each time a page |
---|
| 1534 | + * has been read (or failed to be read). When it reaches 0, the cluster |
---|
| 1535 | + * is decompressed (or an error is reported). |
---|
| 1536 | + * |
---|
| 1537 | + * If an error occurs before all the pages have been submitted for I/O, |
---|
| 1538 | + * then this will never reach 0. In this case the I/O submitter is |
---|
| 1539 | + * responsible for calling f2fs_decompress_end_io() instead. |
---|
| 1540 | + */ |
---|
| 1541 | + atomic_t remaining_pages; |
---|
| 1542 | + |
---|
| 1543 | + /* |
---|
| 1544 | + * Number of references to this decompress_io_ctx. |
---|
| 1545 | + * |
---|
| 1546 | + * One reference is held for I/O completion. This reference is dropped |
---|
| 1547 | + * after the pagecache pages are updated and unlocked -- either after |
---|
| 1548 | + * decompression (and verity if enabled), or after an error. |
---|
| 1549 | + * |
---|
| 1550 | + * In addition, each compressed page holds a reference while it is in a |
---|
| 1551 | + * bio. These references are necessary prevent compressed pages from |
---|
| 1552 | + * being freed while they are still in a bio. |
---|
| 1553 | + */ |
---|
| 1554 | + refcount_t refcnt; |
---|
| 1555 | + |
---|
| 1556 | + bool failed; /* IO error occurred before decompression? */ |
---|
| 1557 | + bool need_verity; /* need fs-verity verification after decompression? */ |
---|
1393 | 1558 | void *private; /* payload buffer for specified decompression algorithm */ |
---|
1394 | 1559 | void *private2; /* extra payload buffer */ |
---|
| 1560 | + struct work_struct verity_work; /* work to verify the decompressed pages */ |
---|
| 1561 | + struct work_struct free_work; /* work for late free this structure itself */ |
---|
1395 | 1562 | }; |
---|
1396 | 1563 | |
---|
1397 | 1564 | #define NULL_CLUSTER ((unsigned int)(~0)) |
---|
1398 | 1565 | #define MIN_COMPRESS_LOG_SIZE 2 |
---|
1399 | 1566 | #define MAX_COMPRESS_LOG_SIZE 8 |
---|
1400 | | -#define MAX_COMPRESS_WINDOW_SIZE ((PAGE_SIZE) << MAX_COMPRESS_LOG_SIZE) |
---|
| 1567 | +#define MAX_COMPRESS_WINDOW_SIZE(log_size) ((PAGE_SIZE) << (log_size)) |
---|
1401 | 1568 | |
---|
1402 | 1569 | struct f2fs_sb_info { |
---|
1403 | 1570 | struct super_block *sb; /* pointer to VFS super block */ |
---|
1404 | 1571 | struct proc_dir_entry *s_proc; /* proc entry */ |
---|
1405 | 1572 | struct f2fs_super_block *raw_super; /* raw super block pointer */ |
---|
1406 | | - struct rw_semaphore sb_lock; /* lock for raw super block */ |
---|
| 1573 | + struct f2fs_rwsem sb_lock; /* lock for raw super block */ |
---|
1407 | 1574 | int valid_super_block; /* valid super block no */ |
---|
1408 | 1575 | unsigned long s_flag; /* flags for sbi */ |
---|
1409 | 1576 | struct mutex writepages; /* mutex for writepages() */ |
---|
.. | .. |
---|
1423 | 1590 | /* for bio operations */ |
---|
1424 | 1591 | struct f2fs_bio_info *write_io[NR_PAGE_TYPE]; /* for write bios */ |
---|
1425 | 1592 | /* keep migration IO order for LFS mode */ |
---|
1426 | | - struct rw_semaphore io_order_lock; |
---|
| 1593 | + struct f2fs_rwsem io_order_lock; |
---|
1427 | 1594 | mempool_t *write_io_dummy; /* Dummy pages */ |
---|
1428 | 1595 | |
---|
1429 | 1596 | /* for checkpoint */ |
---|
.. | .. |
---|
1431 | 1598 | int cur_cp_pack; /* remain current cp pack */ |
---|
1432 | 1599 | spinlock_t cp_lock; /* for flag in ckpt */ |
---|
1433 | 1600 | struct inode *meta_inode; /* cache meta blocks */ |
---|
1434 | | - struct mutex cp_mutex; /* checkpoint procedure lock */ |
---|
1435 | | - struct rw_semaphore cp_rwsem; /* blocking FS operations */ |
---|
1436 | | - struct rw_semaphore node_write; /* locking node writes */ |
---|
1437 | | - struct rw_semaphore node_change; /* locking node change */ |
---|
| 1601 | + struct f2fs_rwsem cp_global_sem; /* checkpoint procedure lock */ |
---|
| 1602 | + struct f2fs_rwsem cp_rwsem; /* blocking FS operations */ |
---|
| 1603 | + struct f2fs_rwsem node_write; /* locking node writes */ |
---|
| 1604 | + struct f2fs_rwsem node_change; /* locking node change */ |
---|
1438 | 1605 | wait_queue_head_t cp_wait; |
---|
1439 | 1606 | unsigned long last_time[MAX_TIME]; /* to store time in jiffies */ |
---|
1440 | 1607 | long interval_time[MAX_TIME]; /* to store thresholds */ |
---|
| 1608 | + struct ckpt_req_control cprc_info; /* for checkpoint request control */ |
---|
1441 | 1609 | |
---|
1442 | | - struct inode_management im[MAX_INO_ENTRY]; /* manage inode cache */ |
---|
| 1610 | + struct inode_management im[MAX_INO_ENTRY]; /* manage inode cache */ |
---|
1443 | 1611 | |
---|
1444 | 1612 | spinlock_t fsync_node_lock; /* for node entry lock */ |
---|
1445 | 1613 | struct list_head fsync_node_list; /* node list head */ |
---|
.. | .. |
---|
1455 | 1623 | struct mutex flush_lock; /* for flush exclusion */ |
---|
1456 | 1624 | |
---|
1457 | 1625 | /* for extent tree cache */ |
---|
1458 | | - struct radix_tree_root extent_tree_root;/* cache extent cache entries */ |
---|
1459 | | - struct mutex extent_tree_lock; /* locking extent radix tree */ |
---|
1460 | | - struct list_head extent_list; /* lru list for shrinker */ |
---|
1461 | | - spinlock_t extent_lock; /* locking extent lru list */ |
---|
1462 | | - atomic_t total_ext_tree; /* extent tree count */ |
---|
1463 | | - struct list_head zombie_list; /* extent zombie tree list */ |
---|
1464 | | - atomic_t total_zombie_tree; /* extent zombie tree count */ |
---|
1465 | | - atomic_t total_ext_node; /* extent info count */ |
---|
| 1626 | + struct extent_tree_info extent_tree[NR_EXTENT_CACHES]; |
---|
| 1627 | + atomic64_t allocated_data_blocks; /* for block age extent_cache */ |
---|
| 1628 | + |
---|
| 1629 | + /* The threshold used for hot and warm data seperation*/ |
---|
| 1630 | + unsigned int hot_data_age_threshold; |
---|
| 1631 | + unsigned int warm_data_age_threshold; |
---|
| 1632 | + unsigned int last_age_weight; |
---|
1466 | 1633 | |
---|
1467 | 1634 | /* basic filesystem units */ |
---|
1468 | 1635 | unsigned int log_sectors_per_block; /* log2 sectors per block */ |
---|
.. | .. |
---|
1473 | 1640 | unsigned int meta_ino_num; /* meta inode number*/ |
---|
1474 | 1641 | unsigned int log_blocks_per_seg; /* log2 blocks per segment */ |
---|
1475 | 1642 | unsigned int blocks_per_seg; /* blocks per segment */ |
---|
| 1643 | + unsigned int unusable_blocks_per_sec; /* unusable blocks per section */ |
---|
1476 | 1644 | unsigned int segs_per_sec; /* segments per section */ |
---|
1477 | 1645 | unsigned int secs_per_zone; /* sections per zone */ |
---|
1478 | 1646 | unsigned int total_sections; /* total section count */ |
---|
1479 | 1647 | unsigned int total_node_count; /* total node block count */ |
---|
1480 | 1648 | unsigned int total_valid_node_count; /* valid node block count */ |
---|
1481 | | - loff_t max_file_blocks; /* max block index of file */ |
---|
1482 | 1649 | int dir_level; /* directory level */ |
---|
1483 | 1650 | int readdir_ra; /* readahead inode in readdir */ |
---|
| 1651 | + u64 max_io_bytes; /* max io bytes to merge IOs */ |
---|
1484 | 1652 | |
---|
1485 | 1653 | block_t user_block_count; /* # of user blocks */ |
---|
1486 | 1654 | block_t total_valid_block_count; /* # of valid blocks */ |
---|
.. | .. |
---|
1493 | 1661 | block_t unusable_block_count; /* # of blocks saved by last cp */ |
---|
1494 | 1662 | |
---|
1495 | 1663 | unsigned int nquota_files; /* # of quota sysfile */ |
---|
1496 | | - struct rw_semaphore quota_sem; /* blocking cp for flags */ |
---|
| 1664 | + struct f2fs_rwsem quota_sem; /* blocking cp for flags */ |
---|
1497 | 1665 | |
---|
1498 | 1666 | /* # of pages, see count_type */ |
---|
1499 | 1667 | atomic_t nr_pages[NR_COUNT_TYPE]; |
---|
.. | .. |
---|
1509 | 1677 | struct f2fs_mount_info mount_opt; /* mount options */ |
---|
1510 | 1678 | |
---|
1511 | 1679 | /* for cleaning operations */ |
---|
1512 | | - struct rw_semaphore gc_lock; /* |
---|
| 1680 | + struct f2fs_rwsem gc_lock; /* |
---|
1513 | 1681 | * semaphore for GC, avoid |
---|
1514 | 1682 | * race between GC and GC or CP |
---|
1515 | 1683 | */ |
---|
1516 | 1684 | struct f2fs_gc_kthread *gc_thread; /* GC thread */ |
---|
| 1685 | + struct atgc_management am; /* atgc management */ |
---|
1517 | 1686 | unsigned int cur_victim_sec; /* current victim section num */ |
---|
1518 | 1687 | unsigned int gc_mode; /* current GC state */ |
---|
1519 | 1688 | unsigned int next_victim_seg[2]; /* next segment in victim section */ |
---|
| 1689 | + |
---|
1520 | 1690 | /* for skip statistic */ |
---|
1521 | | - unsigned int atomic_files; /* # of opened atomic file */ |
---|
| 1691 | + unsigned int atomic_files; /* # of opened atomic file */ |
---|
1522 | 1692 | unsigned long long skipped_atomic_files[2]; /* FG_GC and BG_GC */ |
---|
1523 | 1693 | unsigned long long skipped_gc_rwsem; /* FG_GC only */ |
---|
1524 | 1694 | |
---|
1525 | 1695 | /* threshold for gc trials on pinned files */ |
---|
1526 | 1696 | u64 gc_pin_file_threshold; |
---|
1527 | | - struct rw_semaphore pin_sem; |
---|
| 1697 | + struct f2fs_rwsem pin_sem; |
---|
1528 | 1698 | |
---|
1529 | 1699 | /* maximum # of trials to find a victim segment for SSR and GC */ |
---|
1530 | 1700 | unsigned int max_victim_search; |
---|
.. | .. |
---|
1543 | 1713 | unsigned int segment_count[2]; /* # of allocated segments */ |
---|
1544 | 1714 | unsigned int block_count[2]; /* # of allocated blocks */ |
---|
1545 | 1715 | atomic_t inplace_count; /* # of inplace update */ |
---|
1546 | | - atomic64_t total_hit_ext; /* # of lookup extent cache */ |
---|
1547 | | - atomic64_t read_hit_rbtree; /* # of hit rbtree extent node */ |
---|
1548 | | - atomic64_t read_hit_largest; /* # of hit largest extent node */ |
---|
1549 | | - atomic64_t read_hit_cached; /* # of hit cached extent node */ |
---|
| 1716 | + /* # of lookup extent cache */ |
---|
| 1717 | + atomic64_t total_hit_ext[NR_EXTENT_CACHES]; |
---|
| 1718 | + /* # of hit rbtree extent node */ |
---|
| 1719 | + atomic64_t read_hit_rbtree[NR_EXTENT_CACHES]; |
---|
| 1720 | + /* # of hit cached extent node */ |
---|
| 1721 | + atomic64_t read_hit_cached[NR_EXTENT_CACHES]; |
---|
| 1722 | + /* # of hit largest extent node in read extent cache */ |
---|
| 1723 | + atomic64_t read_hit_largest; |
---|
1550 | 1724 | atomic_t inline_xattr; /* # of inline_xattr inodes */ |
---|
1551 | 1725 | atomic_t inline_inode; /* # of inline_data inodes */ |
---|
1552 | 1726 | atomic_t inline_dir; /* # of inline_dentry inodes */ |
---|
1553 | 1727 | atomic_t compr_inode; /* # of compressed inodes */ |
---|
1554 | | - atomic_t compr_blocks; /* # of compressed blocks */ |
---|
| 1728 | + atomic64_t compr_blocks; /* # of compressed blocks */ |
---|
1555 | 1729 | atomic_t vw_cnt; /* # of volatile writes */ |
---|
1556 | 1730 | atomic_t max_aw_cnt; /* max # of atomic writes */ |
---|
1557 | 1731 | atomic_t max_vw_cnt; /* max # of volatile writes */ |
---|
.. | .. |
---|
1574 | 1748 | unsigned int node_io_flag; |
---|
1575 | 1749 | |
---|
1576 | 1750 | /* For sysfs suppport */ |
---|
1577 | | - struct kobject s_kobj; |
---|
| 1751 | + struct kobject s_kobj; /* /sys/fs/f2fs/<devname> */ |
---|
1578 | 1752 | struct completion s_kobj_unregister; |
---|
| 1753 | + |
---|
| 1754 | + struct kobject s_stat_kobj; /* /sys/fs/f2fs/<devname>/stat */ |
---|
| 1755 | + struct completion s_stat_kobj_unregister; |
---|
| 1756 | + |
---|
| 1757 | + struct kobject s_feature_list_kobj; /* /sys/fs/f2fs/<devname>/feature_list */ |
---|
| 1758 | + struct completion s_feature_list_kobj_unregister; |
---|
1579 | 1759 | |
---|
1580 | 1760 | /* For shrinker support */ |
---|
1581 | 1761 | struct list_head s_list; |
---|
.. | .. |
---|
1600 | 1780 | |
---|
1601 | 1781 | struct kmem_cache *inline_xattr_slab; /* inline xattr entry */ |
---|
1602 | 1782 | unsigned int inline_xattr_slab_size; /* default inline xattr slab size */ |
---|
| 1783 | + |
---|
| 1784 | + /* For reclaimed segs statistics per each GC mode */ |
---|
| 1785 | + unsigned int gc_segment_mode; /* GC state for reclaimed segments */ |
---|
| 1786 | + unsigned int gc_reclaimed_segs[MAX_GC_MODE]; /* Reclaimed segs for each mode */ |
---|
| 1787 | + |
---|
| 1788 | +#ifdef CONFIG_F2FS_FS_COMPRESSION |
---|
| 1789 | + struct kmem_cache *page_array_slab; /* page array entry */ |
---|
| 1790 | + unsigned int page_array_slab_size; /* default page array slab size */ |
---|
| 1791 | + |
---|
| 1792 | + /* For runtime compression statistics */ |
---|
| 1793 | + u64 compr_written_block; |
---|
| 1794 | + u64 compr_saved_block; |
---|
| 1795 | + u32 compr_new_inode; |
---|
| 1796 | + |
---|
| 1797 | + /* For compressed block cache */ |
---|
| 1798 | + struct inode *compress_inode; /* cache compressed blocks */ |
---|
| 1799 | + unsigned int compress_percent; /* cache page percentage */ |
---|
| 1800 | + unsigned int compress_watermark; /* cache page watermark */ |
---|
| 1801 | + atomic_t compress_page_hit; /* cache hit count */ |
---|
| 1802 | +#endif |
---|
1603 | 1803 | }; |
---|
1604 | 1804 | |
---|
1605 | 1805 | struct f2fs_private_dio { |
---|
.. | .. |
---|
1651 | 1851 | return sbi->s_ndevs > 1; |
---|
1652 | 1852 | } |
---|
1653 | 1853 | |
---|
1654 | | -/* For write statistics. Suppose sector size is 512 bytes, |
---|
1655 | | - * and the return value is in kbytes. s is of struct f2fs_sb_info. |
---|
1656 | | - */ |
---|
1657 | | -#define BD_PART_WRITTEN(s) \ |
---|
1658 | | -(((u64)part_stat_read((s)->sb->s_bdev->bd_part, sectors[STAT_WRITE]) - \ |
---|
1659 | | - (s)->sectors_written_start) >> 1) |
---|
1660 | | - |
---|
1661 | 1854 | static inline void f2fs_update_time(struct f2fs_sb_info *sbi, int type) |
---|
1662 | 1855 | { |
---|
1663 | 1856 | unsigned long now = jiffies; |
---|
.. | .. |
---|
1707 | 1900 | BUG_ON(crypto_shash_descsize(sbi->s_chksum_driver) != sizeof(desc.ctx)); |
---|
1708 | 1901 | |
---|
1709 | 1902 | desc.shash.tfm = sbi->s_chksum_driver; |
---|
1710 | | - desc.shash.flags = 0; |
---|
1711 | 1903 | *(u32 *)desc.ctx = crc; |
---|
1712 | 1904 | |
---|
1713 | 1905 | err = crypto_shash_update(&desc.shash, address, length); |
---|
.. | .. |
---|
1925 | 2117 | return (cpc) ? (cpc->reason & CP_UMOUNT) && set : set; |
---|
1926 | 2118 | } |
---|
1927 | 2119 | |
---|
| 2120 | +#define init_f2fs_rwsem(sem) \ |
---|
| 2121 | +do { \ |
---|
| 2122 | + static struct lock_class_key __key; \ |
---|
| 2123 | + \ |
---|
| 2124 | + __init_f2fs_rwsem((sem), #sem, &__key); \ |
---|
| 2125 | +} while (0) |
---|
| 2126 | + |
---|
| 2127 | +static inline void __init_f2fs_rwsem(struct f2fs_rwsem *sem, |
---|
| 2128 | + const char *sem_name, struct lock_class_key *key) |
---|
| 2129 | +{ |
---|
| 2130 | + __init_rwsem(&sem->internal_rwsem, sem_name, key); |
---|
| 2131 | + init_waitqueue_head(&sem->read_waiters); |
---|
| 2132 | +} |
---|
| 2133 | + |
---|
| 2134 | +static inline int f2fs_rwsem_is_locked(struct f2fs_rwsem *sem) |
---|
| 2135 | +{ |
---|
| 2136 | + return rwsem_is_locked(&sem->internal_rwsem); |
---|
| 2137 | +} |
---|
| 2138 | + |
---|
| 2139 | +static inline int f2fs_rwsem_is_contended(struct f2fs_rwsem *sem) |
---|
| 2140 | +{ |
---|
| 2141 | + return rwsem_is_contended(&sem->internal_rwsem); |
---|
| 2142 | +} |
---|
| 2143 | + |
---|
| 2144 | +static inline void f2fs_down_read(struct f2fs_rwsem *sem) |
---|
| 2145 | +{ |
---|
| 2146 | + wait_event(sem->read_waiters, down_read_trylock(&sem->internal_rwsem)); |
---|
| 2147 | +} |
---|
| 2148 | + |
---|
| 2149 | +static inline int f2fs_down_read_trylock(struct f2fs_rwsem *sem) |
---|
| 2150 | +{ |
---|
| 2151 | + return down_read_trylock(&sem->internal_rwsem); |
---|
| 2152 | +} |
---|
| 2153 | + |
---|
| 2154 | +#ifdef CONFIG_DEBUG_LOCK_ALLOC |
---|
| 2155 | +static inline void f2fs_down_read_nested(struct f2fs_rwsem *sem, int subclass) |
---|
| 2156 | +{ |
---|
| 2157 | + down_read_nested(&sem->internal_rwsem, subclass); |
---|
| 2158 | +} |
---|
| 2159 | +#else |
---|
| 2160 | +#define f2fs_down_read_nested(sem, subclass) f2fs_down_read(sem) |
---|
| 2161 | +#endif |
---|
| 2162 | + |
---|
| 2163 | +static inline void f2fs_up_read(struct f2fs_rwsem *sem) |
---|
| 2164 | +{ |
---|
| 2165 | + up_read(&sem->internal_rwsem); |
---|
| 2166 | +} |
---|
| 2167 | + |
---|
| 2168 | +static inline void f2fs_down_write(struct f2fs_rwsem *sem) |
---|
| 2169 | +{ |
---|
| 2170 | + down_write(&sem->internal_rwsem); |
---|
| 2171 | +} |
---|
| 2172 | + |
---|
| 2173 | +static inline int f2fs_down_write_trylock(struct f2fs_rwsem *sem) |
---|
| 2174 | +{ |
---|
| 2175 | + return down_write_trylock(&sem->internal_rwsem); |
---|
| 2176 | +} |
---|
| 2177 | + |
---|
| 2178 | +static inline void f2fs_up_write(struct f2fs_rwsem *sem) |
---|
| 2179 | +{ |
---|
| 2180 | + up_write(&sem->internal_rwsem); |
---|
| 2181 | + wake_up_all(&sem->read_waiters); |
---|
| 2182 | +} |
---|
| 2183 | + |
---|
1928 | 2184 | static inline void f2fs_lock_op(struct f2fs_sb_info *sbi) |
---|
1929 | 2185 | { |
---|
1930 | | - down_read(&sbi->cp_rwsem); |
---|
| 2186 | + f2fs_down_read(&sbi->cp_rwsem); |
---|
1931 | 2187 | } |
---|
1932 | 2188 | |
---|
1933 | 2189 | static inline int f2fs_trylock_op(struct f2fs_sb_info *sbi) |
---|
1934 | 2190 | { |
---|
1935 | | - return down_read_trylock(&sbi->cp_rwsem); |
---|
| 2191 | + return f2fs_down_read_trylock(&sbi->cp_rwsem); |
---|
1936 | 2192 | } |
---|
1937 | 2193 | |
---|
1938 | 2194 | static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi) |
---|
1939 | 2195 | { |
---|
1940 | | - up_read(&sbi->cp_rwsem); |
---|
| 2196 | + f2fs_up_read(&sbi->cp_rwsem); |
---|
1941 | 2197 | } |
---|
1942 | 2198 | |
---|
1943 | 2199 | static inline void f2fs_lock_all(struct f2fs_sb_info *sbi) |
---|
1944 | 2200 | { |
---|
1945 | | - down_write(&sbi->cp_rwsem); |
---|
| 2201 | + f2fs_down_write(&sbi->cp_rwsem); |
---|
1946 | 2202 | } |
---|
1947 | 2203 | |
---|
1948 | 2204 | static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi) |
---|
1949 | 2205 | { |
---|
1950 | | - up_write(&sbi->cp_rwsem); |
---|
| 2206 | + f2fs_up_write(&sbi->cp_rwsem); |
---|
1951 | 2207 | } |
---|
1952 | 2208 | |
---|
1953 | 2209 | static inline int __get_cp_reason(struct f2fs_sb_info *sbi) |
---|
.. | .. |
---|
2037 | 2293 | |
---|
2038 | 2294 | if (!__allow_reserved_blocks(sbi, inode, true)) |
---|
2039 | 2295 | avail_user_block_count -= F2FS_OPTION(sbi).root_reserved_blocks; |
---|
| 2296 | + |
---|
| 2297 | + if (F2FS_IO_ALIGNED(sbi)) |
---|
| 2298 | + avail_user_block_count -= sbi->blocks_per_seg * |
---|
| 2299 | + SM_I(sbi)->additional_reserved_segments; |
---|
| 2300 | + |
---|
2040 | 2301 | if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { |
---|
2041 | 2302 | if (avail_user_block_count > sbi->unusable_block_count) |
---|
2042 | 2303 | avail_user_block_count -= sbi->unusable_block_count; |
---|
.. | .. |
---|
2199 | 2460 | static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag) |
---|
2200 | 2461 | { |
---|
2201 | 2462 | struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); |
---|
| 2463 | + void *tmp_ptr = &ckpt->sit_nat_version_bitmap; |
---|
2202 | 2464 | int offset; |
---|
2203 | 2465 | |
---|
2204 | 2466 | if (is_set_ckpt_flags(sbi, CP_LARGE_NAT_BITMAP_FLAG)) { |
---|
.. | .. |
---|
2208 | 2470 | * if large_nat_bitmap feature is enabled, leave checksum |
---|
2209 | 2471 | * protection for all nat/sit bitmaps. |
---|
2210 | 2472 | */ |
---|
2211 | | - return &ckpt->sit_nat_version_bitmap + offset + sizeof(__le32); |
---|
| 2473 | + return tmp_ptr + offset + sizeof(__le32); |
---|
2212 | 2474 | } |
---|
2213 | 2475 | |
---|
2214 | 2476 | if (__cp_payload(sbi) > 0) { |
---|
.. | .. |
---|
2219 | 2481 | } else { |
---|
2220 | 2482 | offset = (flag == NAT_BITMAP) ? |
---|
2221 | 2483 | le32_to_cpu(ckpt->sit_ver_bitmap_bytesize) : 0; |
---|
2222 | | - return &ckpt->sit_nat_version_bitmap + offset; |
---|
| 2484 | + return tmp_ptr + offset; |
---|
2223 | 2485 | } |
---|
2224 | 2486 | } |
---|
2225 | 2487 | |
---|
.. | .. |
---|
2251 | 2513 | return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum); |
---|
2252 | 2514 | } |
---|
2253 | 2515 | |
---|
| 2516 | +extern void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync); |
---|
2254 | 2517 | static inline int inc_valid_node_count(struct f2fs_sb_info *sbi, |
---|
2255 | 2518 | struct inode *inode, bool is_inode) |
---|
2256 | 2519 | { |
---|
.. | .. |
---|
2282 | 2545 | |
---|
2283 | 2546 | if (!__allow_reserved_blocks(sbi, inode, false)) |
---|
2284 | 2547 | valid_block_count += F2FS_OPTION(sbi).root_reserved_blocks; |
---|
| 2548 | + |
---|
| 2549 | + if (F2FS_IO_ALIGNED(sbi)) |
---|
| 2550 | + valid_block_count += sbi->blocks_per_seg * |
---|
| 2551 | + SM_I(sbi)->additional_reserved_segments; |
---|
| 2552 | + |
---|
2285 | 2553 | user_block_count = sbi->user_block_count; |
---|
2286 | 2554 | if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) |
---|
2287 | 2555 | user_block_count -= sbi->unusable_block_count; |
---|
.. | .. |
---|
2326 | 2594 | { |
---|
2327 | 2595 | spin_lock(&sbi->stat_lock); |
---|
2328 | 2596 | |
---|
2329 | | - f2fs_bug_on(sbi, !sbi->total_valid_block_count); |
---|
2330 | | - f2fs_bug_on(sbi, !sbi->total_valid_node_count); |
---|
| 2597 | + if (unlikely(!sbi->total_valid_block_count || |
---|
| 2598 | + !sbi->total_valid_node_count)) { |
---|
| 2599 | + f2fs_warn(sbi, "dec_valid_node_count: inconsistent block counts, total_valid_block:%u, total_valid_node:%u", |
---|
| 2600 | + sbi->total_valid_block_count, |
---|
| 2601 | + sbi->total_valid_node_count); |
---|
| 2602 | + set_sbi_flag(sbi, SBI_NEED_FSCK); |
---|
| 2603 | + } else { |
---|
| 2604 | + sbi->total_valid_block_count--; |
---|
| 2605 | + sbi->total_valid_node_count--; |
---|
| 2606 | + } |
---|
2331 | 2607 | |
---|
2332 | | - sbi->total_valid_node_count--; |
---|
2333 | | - sbi->total_valid_block_count--; |
---|
2334 | 2608 | if (sbi->reserved_blocks && |
---|
2335 | 2609 | sbi->current_reserved_blocks < sbi->reserved_blocks) |
---|
2336 | 2610 | sbi->current_reserved_blocks++; |
---|
.. | .. |
---|
2458 | 2732 | return entry; |
---|
2459 | 2733 | } |
---|
2460 | 2734 | |
---|
2461 | | -static inline bool is_idle(struct f2fs_sb_info *sbi, int type) |
---|
| 2735 | +static inline bool is_inflight_io(struct f2fs_sb_info *sbi, int type) |
---|
2462 | 2736 | { |
---|
2463 | | - if (sbi->gc_mode == GC_URGENT) |
---|
2464 | | - return true; |
---|
2465 | | - |
---|
2466 | 2737 | if (get_pages(sbi, F2FS_RD_DATA) || get_pages(sbi, F2FS_RD_NODE) || |
---|
2467 | 2738 | get_pages(sbi, F2FS_RD_META) || get_pages(sbi, F2FS_WB_DATA) || |
---|
2468 | 2739 | get_pages(sbi, F2FS_WB_CP_DATA) || |
---|
2469 | 2740 | get_pages(sbi, F2FS_DIO_READ) || |
---|
2470 | 2741 | get_pages(sbi, F2FS_DIO_WRITE)) |
---|
2471 | | - return false; |
---|
| 2742 | + return true; |
---|
2472 | 2743 | |
---|
2473 | 2744 | if (type != DISCARD_TIME && SM_I(sbi) && SM_I(sbi)->dcc_info && |
---|
2474 | 2745 | atomic_read(&SM_I(sbi)->dcc_info->queued_discard)) |
---|
2475 | | - return false; |
---|
| 2746 | + return true; |
---|
2476 | 2747 | |
---|
2477 | 2748 | if (SM_I(sbi) && SM_I(sbi)->fcc_info && |
---|
2478 | 2749 | atomic_read(&SM_I(sbi)->fcc_info->queued_flush)) |
---|
| 2750 | + return true; |
---|
| 2751 | + return false; |
---|
| 2752 | +} |
---|
| 2753 | + |
---|
| 2754 | +static inline bool is_idle(struct f2fs_sb_info *sbi, int type) |
---|
| 2755 | +{ |
---|
| 2756 | + if (sbi->gc_mode == GC_URGENT_HIGH) |
---|
| 2757 | + return true; |
---|
| 2758 | + |
---|
| 2759 | + if (is_inflight_io(sbi, type)) |
---|
2479 | 2760 | return false; |
---|
| 2761 | + |
---|
| 2762 | + if (sbi->gc_mode == GC_URGENT_MID) |
---|
| 2763 | + return true; |
---|
| 2764 | + |
---|
| 2765 | + if (sbi->gc_mode == GC_URGENT_LOW && |
---|
| 2766 | + (type == DISCARD_TIME || type == GC_TIME)) |
---|
| 2767 | + return true; |
---|
2480 | 2768 | |
---|
2481 | 2769 | return f2fs_time_over(sbi, type); |
---|
2482 | 2770 | } |
---|
.. | .. |
---|
2643 | 2931 | case FI_NEW_INODE: |
---|
2644 | 2932 | if (set) |
---|
2645 | 2933 | return; |
---|
2646 | | - /* fall through */ |
---|
| 2934 | + fallthrough; |
---|
2647 | 2935 | case FI_DATA_EXIST: |
---|
2648 | 2936 | case FI_INLINE_DOTS: |
---|
2649 | 2937 | case FI_PIN_FILE: |
---|
| 2938 | + case FI_COMPRESS_RELEASED: |
---|
2650 | 2939 | f2fs_mark_inode_dirty_sync(inode, true); |
---|
2651 | 2940 | } |
---|
2652 | 2941 | } |
---|
2653 | 2942 | |
---|
2654 | 2943 | static inline void set_inode_flag(struct inode *inode, int flag) |
---|
2655 | 2944 | { |
---|
2656 | | - test_and_set_bit(flag, F2FS_I(inode)->flags); |
---|
| 2945 | + set_bit(flag, F2FS_I(inode)->flags); |
---|
2657 | 2946 | __mark_inode_dirty_flag(inode, flag, true); |
---|
2658 | 2947 | } |
---|
2659 | 2948 | |
---|
.. | .. |
---|
2664 | 2953 | |
---|
2665 | 2954 | static inline void clear_inode_flag(struct inode *inode, int flag) |
---|
2666 | 2955 | { |
---|
2667 | | - test_and_clear_bit(flag, F2FS_I(inode)->flags); |
---|
| 2956 | + clear_bit(flag, F2FS_I(inode)->flags); |
---|
2668 | 2957 | __mark_inode_dirty_flag(inode, flag, false); |
---|
2669 | 2958 | } |
---|
2670 | 2959 | |
---|
.. | .. |
---|
2768 | 3057 | set_bit(FI_EXTRA_ATTR, fi->flags); |
---|
2769 | 3058 | if (ri->i_inline & F2FS_PIN_FILE) |
---|
2770 | 3059 | set_bit(FI_PIN_FILE, fi->flags); |
---|
| 3060 | + if (ri->i_inline & F2FS_COMPRESS_RELEASED) |
---|
| 3061 | + set_bit(FI_COMPRESS_RELEASED, fi->flags); |
---|
2771 | 3062 | } |
---|
2772 | 3063 | |
---|
2773 | 3064 | static inline void set_raw_inline(struct inode *inode, struct f2fs_inode *ri) |
---|
.. | .. |
---|
2788 | 3079 | ri->i_inline |= F2FS_EXTRA_ATTR; |
---|
2789 | 3080 | if (is_inode_flag_set(inode, FI_PIN_FILE)) |
---|
2790 | 3081 | ri->i_inline |= F2FS_PIN_FILE; |
---|
| 3082 | + if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) |
---|
| 3083 | + ri->i_inline |= F2FS_COMPRESS_RELEASED; |
---|
2791 | 3084 | } |
---|
2792 | 3085 | |
---|
2793 | 3086 | static inline int f2fs_has_extra_attr(struct inode *inode) |
---|
.. | .. |
---|
2804 | 3097 | { |
---|
2805 | 3098 | return S_ISREG(inode->i_mode) && |
---|
2806 | 3099 | is_inode_flag_set(inode, FI_COMPRESSED_FILE); |
---|
| 3100 | +} |
---|
| 3101 | + |
---|
| 3102 | +static inline bool f2fs_need_compress_data(struct inode *inode) |
---|
| 3103 | +{ |
---|
| 3104 | + int compress_mode = F2FS_OPTION(F2FS_I_SB(inode)).compress_mode; |
---|
| 3105 | + |
---|
| 3106 | + if (!f2fs_compressed_file(inode)) |
---|
| 3107 | + return false; |
---|
| 3108 | + |
---|
| 3109 | + if (compress_mode == COMPR_MODE_FS) |
---|
| 3110 | + return true; |
---|
| 3111 | + else if (compress_mode == COMPR_MODE_USER && |
---|
| 3112 | + is_inode_flag_set(inode, FI_ENABLE_COMPRESS)) |
---|
| 3113 | + return true; |
---|
| 3114 | + |
---|
| 3115 | + return false; |
---|
2807 | 3116 | } |
---|
2808 | 3117 | |
---|
2809 | 3118 | static inline unsigned int addrs_per_inode(struct inode *inode) |
---|
.. | .. |
---|
2980 | 3289 | return false; |
---|
2981 | 3290 | } |
---|
2982 | 3291 | |
---|
2983 | | -static inline bool f2fs_may_extent_tree(struct inode *inode) |
---|
2984 | | -{ |
---|
2985 | | - struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
---|
2986 | | - |
---|
2987 | | - if (!test_opt(sbi, EXTENT_CACHE) || |
---|
2988 | | - is_inode_flag_set(inode, FI_NO_EXTENT) || |
---|
2989 | | - is_inode_flag_set(inode, FI_COMPRESSED_FILE)) |
---|
2990 | | - return false; |
---|
2991 | | - |
---|
2992 | | - /* |
---|
2993 | | - * for recovered files during mount do not create extents |
---|
2994 | | - * if shrinker is not registered. |
---|
2995 | | - */ |
---|
2996 | | - if (list_empty(&sbi->s_list)) |
---|
2997 | | - return false; |
---|
2998 | | - |
---|
2999 | | - return S_ISREG(inode->i_mode); |
---|
3000 | | -} |
---|
3001 | | - |
---|
3002 | 3292 | static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi, |
---|
3003 | 3293 | size_t size, gfp_t flags) |
---|
3004 | 3294 | { |
---|
.. | .. |
---|
3122 | 3412 | return true; |
---|
3123 | 3413 | } |
---|
3124 | 3414 | |
---|
3125 | | -static inline void f2fs_set_page_private(struct page *page, |
---|
3126 | | - unsigned long data) |
---|
3127 | | -{ |
---|
3128 | | - if (PagePrivate(page)) |
---|
3129 | | - return; |
---|
3130 | | - |
---|
3131 | | - get_page(page); |
---|
3132 | | - SetPagePrivate(page); |
---|
3133 | | - set_page_private(page, data); |
---|
3134 | | -} |
---|
3135 | | - |
---|
3136 | | -static inline void f2fs_clear_page_private(struct page *page) |
---|
3137 | | -{ |
---|
3138 | | - if (!PagePrivate(page)) |
---|
3139 | | - return; |
---|
3140 | | - |
---|
3141 | | - set_page_private(page, 0); |
---|
3142 | | - ClearPagePrivate(page); |
---|
3143 | | - f2fs_put_page(page, 0); |
---|
3144 | | -} |
---|
3145 | | - |
---|
3146 | 3415 | /* |
---|
3147 | 3416 | * file.c |
---|
3148 | 3417 | */ |
---|
.. | .. |
---|
3248 | 3517 | void f2fs_inode_synced(struct inode *inode); |
---|
3249 | 3518 | int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly); |
---|
3250 | 3519 | int f2fs_quota_sync(struct super_block *sb, int type); |
---|
| 3520 | +loff_t max_file_blocks(struct inode *inode); |
---|
3251 | 3521 | void f2fs_quota_off_umount(struct super_block *sb); |
---|
| 3522 | +void f2fs_handle_stop(struct f2fs_sb_info *sbi, unsigned char reason); |
---|
3252 | 3523 | int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover); |
---|
3253 | 3524 | int f2fs_sync_fs(struct super_block *sb, int sync); |
---|
3254 | 3525 | int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi); |
---|
.. | .. |
---|
3261 | 3532 | /* |
---|
3262 | 3533 | * node.c |
---|
3263 | 3534 | */ |
---|
3264 | | -struct dnode_of_data; |
---|
3265 | 3535 | struct node_info; |
---|
3266 | 3536 | |
---|
3267 | 3537 | int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid); |
---|
.. | .. |
---|
3274 | 3544 | bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid); |
---|
3275 | 3545 | bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino); |
---|
3276 | 3546 | int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid, |
---|
3277 | | - struct node_info *ni); |
---|
| 3547 | + struct node_info *ni, bool checkpoint_context); |
---|
3278 | 3548 | pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs); |
---|
3279 | 3549 | int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode); |
---|
3280 | 3550 | int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from); |
---|
.. | .. |
---|
3288 | 3558 | struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid); |
---|
3289 | 3559 | struct page *f2fs_get_node_page_ra(struct page *parent, int start); |
---|
3290 | 3560 | int f2fs_move_node_page(struct page *node_page, int gc_type); |
---|
3291 | | -int f2fs_flush_inline_data(struct f2fs_sb_info *sbi); |
---|
| 3561 | +void f2fs_flush_inline_data(struct f2fs_sb_info *sbi); |
---|
3292 | 3562 | int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode, |
---|
3293 | 3563 | struct writeback_control *wbc, bool atomic, |
---|
3294 | 3564 | unsigned int *seq_id); |
---|
.. | .. |
---|
3338 | 3608 | int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable); |
---|
3339 | 3609 | void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi); |
---|
3340 | 3610 | int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra); |
---|
3341 | | -void allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type, |
---|
| 3611 | +bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno); |
---|
| 3612 | +void f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi); |
---|
| 3613 | +void f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi); |
---|
| 3614 | +void f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi); |
---|
| 3615 | +void f2fs_get_new_segment(struct f2fs_sb_info *sbi, |
---|
| 3616 | + unsigned int *newseg, bool new_sec, int dir); |
---|
| 3617 | +void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type, |
---|
3342 | 3618 | unsigned int start, unsigned int end); |
---|
3343 | | -void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi, int type); |
---|
| 3619 | +void f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type, bool force); |
---|
| 3620 | +void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi); |
---|
3344 | 3621 | int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range); |
---|
3345 | 3622 | bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi, |
---|
3346 | 3623 | struct cp_control *cpc); |
---|
.. | .. |
---|
3355 | 3632 | int f2fs_inplace_write_data(struct f2fs_io_info *fio); |
---|
3356 | 3633 | void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, |
---|
3357 | 3634 | block_t old_blkaddr, block_t new_blkaddr, |
---|
3358 | | - bool recover_curseg, bool recover_newaddr); |
---|
| 3635 | + bool recover_curseg, bool recover_newaddr, |
---|
| 3636 | + bool from_gc); |
---|
3359 | 3637 | void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn, |
---|
3360 | 3638 | block_t old_addr, block_t new_addr, |
---|
3361 | 3639 | unsigned char version, bool recover_curseg, |
---|
.. | .. |
---|
3363 | 3641 | void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, |
---|
3364 | 3642 | block_t old_blkaddr, block_t *new_blkaddr, |
---|
3365 | 3643 | struct f2fs_summary *sum, int type, |
---|
3366 | | - struct f2fs_io_info *fio, bool add_list); |
---|
| 3644 | + struct f2fs_io_info *fio); |
---|
3367 | 3645 | void f2fs_wait_on_page_writeback(struct page *page, |
---|
3368 | 3646 | enum page_type type, bool ordered, bool locked); |
---|
3369 | 3647 | void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr); |
---|
.. | .. |
---|
3374 | 3652 | int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type, |
---|
3375 | 3653 | unsigned int val, int alloc); |
---|
3376 | 3654 | void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc); |
---|
| 3655 | +int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi); |
---|
| 3656 | +int f2fs_check_write_pointer(struct f2fs_sb_info *sbi); |
---|
3377 | 3657 | int f2fs_build_segment_manager(struct f2fs_sb_info *sbi); |
---|
3378 | 3658 | void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi); |
---|
3379 | 3659 | int __init f2fs_create_segment_manager_caches(void); |
---|
.. | .. |
---|
3381 | 3661 | int f2fs_rw_hint_to_seg_type(enum rw_hint hint); |
---|
3382 | 3662 | enum rw_hint f2fs_io_type_to_rw_hint(struct f2fs_sb_info *sbi, |
---|
3383 | 3663 | enum page_type type, enum temp_type temp); |
---|
| 3664 | +unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi, |
---|
| 3665 | + unsigned int segno); |
---|
| 3666 | +unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi, |
---|
| 3667 | + unsigned int segno); |
---|
3384 | 3668 | |
---|
3385 | 3669 | /* |
---|
3386 | 3670 | * checkpoint.c |
---|
3387 | 3671 | */ |
---|
3388 | | -void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io); |
---|
| 3672 | +void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io, |
---|
| 3673 | + unsigned char reason); |
---|
| 3674 | +void f2fs_flush_ckpt_thread(struct f2fs_sb_info *sbi); |
---|
3389 | 3675 | struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index); |
---|
3390 | 3676 | struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index); |
---|
3391 | | -struct page *f2fs_get_meta_page_nofail(struct f2fs_sb_info *sbi, pgoff_t index); |
---|
| 3677 | +struct page *f2fs_get_meta_page_retry(struct f2fs_sb_info *sbi, pgoff_t index); |
---|
3392 | 3678 | struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index); |
---|
3393 | 3679 | bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi, |
---|
3394 | 3680 | block_t blkaddr, int type); |
---|
.. | .. |
---|
3414 | 3700 | int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi); |
---|
3415 | 3701 | void f2fs_update_dirty_page(struct inode *inode, struct page *page); |
---|
3416 | 3702 | void f2fs_remove_dirty_inode(struct inode *inode); |
---|
3417 | | -int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type); |
---|
| 3703 | +int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type, |
---|
| 3704 | + bool from_cp); |
---|
3418 | 3705 | void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type); |
---|
| 3706 | +u64 f2fs_get_sectors_written(struct f2fs_sb_info *sbi); |
---|
3419 | 3707 | int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc); |
---|
3420 | 3708 | void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi); |
---|
3421 | 3709 | int __init f2fs_create_checkpoint_caches(void); |
---|
3422 | 3710 | void f2fs_destroy_checkpoint_caches(void); |
---|
| 3711 | +int f2fs_issue_checkpoint(struct f2fs_sb_info *sbi); |
---|
| 3712 | +int f2fs_start_ckpt_thread(struct f2fs_sb_info *sbi); |
---|
| 3713 | +void f2fs_stop_ckpt_thread(struct f2fs_sb_info *sbi); |
---|
| 3714 | +void f2fs_init_ckpt_req_control(struct f2fs_sb_info *sbi); |
---|
3423 | 3715 | |
---|
3424 | 3716 | /* |
---|
3425 | 3717 | * data.c |
---|
3426 | 3718 | */ |
---|
3427 | 3719 | int __init f2fs_init_bioset(void); |
---|
3428 | 3720 | void f2fs_destroy_bioset(void); |
---|
3429 | | -struct bio *f2fs_bio_alloc(struct f2fs_sb_info *sbi, int npages, bool noio); |
---|
3430 | 3721 | int f2fs_init_bio_entry_cache(void); |
---|
3431 | 3722 | void f2fs_destroy_bio_entry_cache(void); |
---|
3432 | 3723 | void f2fs_submit_bio(struct f2fs_sb_info *sbi, |
---|
.. | .. |
---|
3451 | 3742 | int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index); |
---|
3452 | 3743 | int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from); |
---|
3453 | 3744 | int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index); |
---|
3454 | | -int f2fs_mpage_readpages(struct address_space *mapping, |
---|
3455 | | - struct list_head *pages, struct page *page, |
---|
3456 | | - unsigned nr_pages, bool is_readahead); |
---|
3457 | 3745 | struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index, |
---|
3458 | 3746 | int op_flags, bool for_write); |
---|
3459 | 3747 | struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index); |
---|
.. | .. |
---|
3462 | 3750 | struct page *f2fs_get_new_data_page(struct inode *inode, |
---|
3463 | 3751 | struct page *ipage, pgoff_t index, bool new_i_size); |
---|
3464 | 3752 | int f2fs_do_write_data_page(struct f2fs_io_info *fio); |
---|
3465 | | -void __do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock); |
---|
| 3753 | +void f2fs_do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock); |
---|
3466 | 3754 | int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, |
---|
3467 | 3755 | int create, int flag); |
---|
3468 | 3756 | int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, |
---|
.. | .. |
---|
3474 | 3762 | struct bio **bio, sector_t *last_block, |
---|
3475 | 3763 | struct writeback_control *wbc, |
---|
3476 | 3764 | enum iostat_type io_type, |
---|
3477 | | - int compr_blocks); |
---|
| 3765 | + int compr_blocks, bool allow_balance); |
---|
3478 | 3766 | void f2fs_invalidate_page(struct page *page, unsigned int offset, |
---|
3479 | 3767 | unsigned int length); |
---|
3480 | 3768 | int f2fs_release_page(struct page *page, gfp_t wait); |
---|
.. | .. |
---|
3483 | 3771 | struct page *page, enum migrate_mode mode); |
---|
3484 | 3772 | #endif |
---|
3485 | 3773 | bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len); |
---|
3486 | | -void f2fs_clear_radix_tree_dirty_tag(struct page *page); |
---|
| 3774 | +void f2fs_clear_page_cache_dirty_tag(struct page *page); |
---|
3487 | 3775 | int f2fs_init_post_read_processing(void); |
---|
3488 | 3776 | void f2fs_destroy_post_read_processing(void); |
---|
3489 | 3777 | int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi); |
---|
.. | .. |
---|
3495 | 3783 | int f2fs_start_gc_thread(struct f2fs_sb_info *sbi); |
---|
3496 | 3784 | void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi); |
---|
3497 | 3785 | block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode); |
---|
3498 | | -int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, bool background, |
---|
| 3786 | +int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, bool background, bool force, |
---|
3499 | 3787 | unsigned int segno); |
---|
3500 | 3788 | void f2fs_build_gc_manager(struct f2fs_sb_info *sbi); |
---|
3501 | | -int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count); |
---|
| 3789 | +int f2fs_resize_fs(struct file *filp, __u64 block_count); |
---|
| 3790 | +int __init f2fs_create_garbage_collection_cache(void); |
---|
| 3791 | +void f2fs_destroy_garbage_collection_cache(void); |
---|
3502 | 3792 | |
---|
3503 | 3793 | /* |
---|
3504 | 3794 | * recovery.c |
---|
3505 | 3795 | */ |
---|
3506 | 3796 | int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only); |
---|
3507 | 3797 | bool f2fs_space_for_roll_forward(struct f2fs_sb_info *sbi); |
---|
| 3798 | +int __init f2fs_create_recovery_cache(void); |
---|
| 3799 | +void f2fs_destroy_recovery_cache(void); |
---|
3508 | 3800 | |
---|
3509 | 3801 | /* |
---|
3510 | 3802 | * debug.c |
---|
.. | .. |
---|
3515 | 3807 | struct f2fs_sb_info *sbi; |
---|
3516 | 3808 | int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs; |
---|
3517 | 3809 | int main_area_segs, main_area_sections, main_area_zones; |
---|
3518 | | - unsigned long long hit_largest, hit_cached, hit_rbtree; |
---|
3519 | | - unsigned long long hit_total, total_ext; |
---|
3520 | | - int ext_tree, zombie_tree, ext_node; |
---|
| 3810 | + unsigned long long hit_cached[NR_EXTENT_CACHES]; |
---|
| 3811 | + unsigned long long hit_rbtree[NR_EXTENT_CACHES]; |
---|
| 3812 | + unsigned long long total_ext[NR_EXTENT_CACHES]; |
---|
| 3813 | + unsigned long long hit_total[NR_EXTENT_CACHES]; |
---|
| 3814 | + int ext_tree[NR_EXTENT_CACHES]; |
---|
| 3815 | + int zombie_tree[NR_EXTENT_CACHES]; |
---|
| 3816 | + int ext_node[NR_EXTENT_CACHES]; |
---|
| 3817 | + /* to count memory footprint */ |
---|
| 3818 | + unsigned long long ext_mem[NR_EXTENT_CACHES]; |
---|
| 3819 | + /* for read extent cache */ |
---|
| 3820 | + unsigned long long hit_largest; |
---|
| 3821 | + /* for block age extent cache */ |
---|
| 3822 | + unsigned long long allocated_data_blocks; |
---|
3521 | 3823 | int ndirty_node, ndirty_dent, ndirty_meta, ndirty_imeta; |
---|
3522 | 3824 | int ndirty_data, ndirty_qdata; |
---|
3523 | 3825 | int inmem_pages; |
---|
.. | .. |
---|
3533 | 3835 | int nr_discarding, nr_discarded; |
---|
3534 | 3836 | int nr_discard_cmd; |
---|
3535 | 3837 | unsigned int undiscard_blks; |
---|
| 3838 | + int nr_issued_ckpt, nr_total_ckpt, nr_queued_ckpt; |
---|
| 3839 | + unsigned int cur_ckpt_time, peak_ckpt_time; |
---|
3536 | 3840 | int inline_xattr, inline_inode, inline_dir, append, update, orphans; |
---|
3537 | | - int compr_inode, compr_blocks; |
---|
| 3841 | + int compr_inode; |
---|
| 3842 | + unsigned long long compr_blocks; |
---|
3538 | 3843 | int aw_cnt, max_aw_cnt, vw_cnt, max_vw_cnt; |
---|
3539 | 3844 | unsigned int valid_count, valid_node_count, valid_inode_count, discard_blks; |
---|
3540 | 3845 | unsigned int bimodal, avg_vblocks; |
---|
3541 | 3846 | int util_free, util_valid, util_invalid; |
---|
3542 | 3847 | int rsvd_segs, overp_segs; |
---|
3543 | | - int dirty_count, node_pages, meta_pages; |
---|
| 3848 | + int dirty_count, node_pages, meta_pages, compress_pages; |
---|
| 3849 | + int compress_page_hit; |
---|
3544 | 3850 | int prefree_count, call_count, cp_count, bg_cp_count; |
---|
3545 | 3851 | int tot_segs, node_segs, data_segs, free_segs, free_secs; |
---|
3546 | 3852 | int bg_node_segs, bg_data_segs; |
---|
.. | .. |
---|
3550 | 3856 | int curseg[NR_CURSEG_TYPE]; |
---|
3551 | 3857 | int cursec[NR_CURSEG_TYPE]; |
---|
3552 | 3858 | int curzone[NR_CURSEG_TYPE]; |
---|
| 3859 | + unsigned int dirty_seg[NR_CURSEG_TYPE]; |
---|
| 3860 | + unsigned int full_seg[NR_CURSEG_TYPE]; |
---|
| 3861 | + unsigned int valid_blks[NR_CURSEG_TYPE]; |
---|
3553 | 3862 | |
---|
3554 | 3863 | unsigned int meta_count[META_MAX]; |
---|
3555 | 3864 | unsigned int segment_count[2]; |
---|
.. | .. |
---|
3571 | 3880 | #define stat_other_skip_bggc_count(sbi) ((sbi)->other_skip_bggc++) |
---|
3572 | 3881 | #define stat_inc_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]++) |
---|
3573 | 3882 | #define stat_dec_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]--) |
---|
3574 | | -#define stat_inc_total_hit(sbi) (atomic64_inc(&(sbi)->total_hit_ext)) |
---|
3575 | | -#define stat_inc_rbtree_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_rbtree)) |
---|
| 3883 | +#define stat_inc_total_hit(sbi, type) (atomic64_inc(&(sbi)->total_hit_ext[type])) |
---|
| 3884 | +#define stat_inc_rbtree_node_hit(sbi, type) (atomic64_inc(&(sbi)->read_hit_rbtree[type])) |
---|
3576 | 3885 | #define stat_inc_largest_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_largest)) |
---|
3577 | | -#define stat_inc_cached_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_cached)) |
---|
| 3886 | +#define stat_inc_cached_node_hit(sbi, type) (atomic64_inc(&(sbi)->read_hit_cached[type])) |
---|
3578 | 3887 | #define stat_inc_inline_xattr(inode) \ |
---|
3579 | 3888 | do { \ |
---|
3580 | 3889 | if (f2fs_has_inline_xattr(inode)) \ |
---|
.. | .. |
---|
3616 | 3925 | (atomic_dec(&F2FS_I_SB(inode)->compr_inode)); \ |
---|
3617 | 3926 | } while (0) |
---|
3618 | 3927 | #define stat_add_compr_blocks(inode, blocks) \ |
---|
3619 | | - (atomic_add(blocks, &F2FS_I_SB(inode)->compr_blocks)) |
---|
| 3928 | + (atomic64_add(blocks, &F2FS_I_SB(inode)->compr_blocks)) |
---|
3620 | 3929 | #define stat_sub_compr_blocks(inode, blocks) \ |
---|
3621 | | - (atomic_sub(blocks, &F2FS_I_SB(inode)->compr_blocks)) |
---|
| 3930 | + (atomic64_sub(blocks, &F2FS_I_SB(inode)->compr_blocks)) |
---|
3622 | 3931 | #define stat_inc_meta_count(sbi, blkaddr) \ |
---|
3623 | 3932 | do { \ |
---|
3624 | 3933 | if (blkaddr < SIT_I(sbi)->sit_base_addr) \ |
---|
.. | .. |
---|
3700 | 4009 | #define stat_other_skip_bggc_count(sbi) do { } while (0) |
---|
3701 | 4010 | #define stat_inc_dirty_inode(sbi, type) do { } while (0) |
---|
3702 | 4011 | #define stat_dec_dirty_inode(sbi, type) do { } while (0) |
---|
3703 | | -#define stat_inc_total_hit(sbi) do { } while (0) |
---|
3704 | | -#define stat_inc_rbtree_node_hit(sbi) do { } while (0) |
---|
| 4012 | +#define stat_inc_total_hit(sbi, type) do { } while (0) |
---|
| 4013 | +#define stat_inc_rbtree_node_hit(sbi, type) do { } while (0) |
---|
3705 | 4014 | #define stat_inc_largest_node_hit(sbi) do { } while (0) |
---|
3706 | | -#define stat_inc_cached_node_hit(sbi) do { } while (0) |
---|
| 4015 | +#define stat_inc_cached_node_hit(sbi, type) do { } while (0) |
---|
3707 | 4016 | #define stat_inc_inline_xattr(inode) do { } while (0) |
---|
3708 | 4017 | #define stat_dec_inline_xattr(inode) do { } while (0) |
---|
3709 | 4018 | #define stat_inc_inline_inode(inode) do { } while (0) |
---|
.. | .. |
---|
3714 | 4023 | #define stat_dec_compr_inode(inode) do { } while (0) |
---|
3715 | 4024 | #define stat_add_compr_blocks(inode, blocks) do { } while (0) |
---|
3716 | 4025 | #define stat_sub_compr_blocks(inode, blocks) do { } while (0) |
---|
3717 | | -#define stat_inc_atomic_write(inode) do { } while (0) |
---|
3718 | | -#define stat_dec_atomic_write(inode) do { } while (0) |
---|
3719 | 4026 | #define stat_update_max_atomic_write(inode) do { } while (0) |
---|
3720 | 4027 | #define stat_inc_volatile_write(inode) do { } while (0) |
---|
3721 | 4028 | #define stat_dec_volatile_write(inode) do { } while (0) |
---|
.. | .. |
---|
3752 | 4059 | * inline.c |
---|
3753 | 4060 | */ |
---|
3754 | 4061 | bool f2fs_may_inline_data(struct inode *inode); |
---|
| 4062 | +bool f2fs_sanity_check_inline_data(struct inode *inode); |
---|
3755 | 4063 | bool f2fs_may_inline_dentry(struct inode *inode); |
---|
3756 | 4064 | void f2fs_do_read_inline_data(struct page *page, struct page *ipage); |
---|
3757 | 4065 | void f2fs_truncate_inline_inode(struct inode *inode, |
---|
.. | .. |
---|
3794 | 4102 | */ |
---|
3795 | 4103 | struct rb_entry *f2fs_lookup_rb_tree(struct rb_root_cached *root, |
---|
3796 | 4104 | struct rb_entry *cached_re, unsigned int ofs); |
---|
| 4105 | +struct rb_node **f2fs_lookup_rb_tree_ext(struct f2fs_sb_info *sbi, |
---|
| 4106 | + struct rb_root_cached *root, |
---|
| 4107 | + struct rb_node **parent, |
---|
| 4108 | + unsigned long long key, bool *left_most); |
---|
3797 | 4109 | struct rb_node **f2fs_lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi, |
---|
3798 | 4110 | struct rb_root_cached *root, |
---|
3799 | 4111 | struct rb_node **parent, |
---|
.. | .. |
---|
3804 | 4116 | struct rb_node ***insert_p, struct rb_node **insert_parent, |
---|
3805 | 4117 | bool force, bool *leftmost); |
---|
3806 | 4118 | bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi, |
---|
3807 | | - struct rb_root_cached *root); |
---|
3808 | | -unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink); |
---|
3809 | | -bool f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext); |
---|
| 4119 | + struct rb_root_cached *root, bool check_key); |
---|
| 4120 | +void f2fs_init_extent_tree(struct inode *inode); |
---|
3810 | 4121 | void f2fs_drop_extent_tree(struct inode *inode); |
---|
3811 | | -unsigned int f2fs_destroy_extent_node(struct inode *inode); |
---|
| 4122 | +void f2fs_destroy_extent_node(struct inode *inode); |
---|
3812 | 4123 | void f2fs_destroy_extent_tree(struct inode *inode); |
---|
3813 | | -bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs, |
---|
3814 | | - struct extent_info *ei); |
---|
3815 | | -void f2fs_update_extent_cache(struct dnode_of_data *dn); |
---|
3816 | | -void f2fs_update_extent_cache_range(struct dnode_of_data *dn, |
---|
3817 | | - pgoff_t fofs, block_t blkaddr, unsigned int len); |
---|
3818 | 4124 | void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi); |
---|
3819 | 4125 | int __init f2fs_create_extent_cache(void); |
---|
3820 | 4126 | void f2fs_destroy_extent_cache(void); |
---|
| 4127 | + |
---|
| 4128 | +/* read extent cache ops */ |
---|
| 4129 | +void f2fs_init_read_extent_tree(struct inode *inode, struct page *ipage); |
---|
| 4130 | +bool f2fs_lookup_read_extent_cache(struct inode *inode, pgoff_t pgofs, |
---|
| 4131 | + struct extent_info *ei); |
---|
| 4132 | +void f2fs_update_read_extent_cache(struct dnode_of_data *dn); |
---|
| 4133 | +void f2fs_update_read_extent_cache_range(struct dnode_of_data *dn, |
---|
| 4134 | + pgoff_t fofs, block_t blkaddr, unsigned int len); |
---|
| 4135 | +unsigned int f2fs_shrink_read_extent_tree(struct f2fs_sb_info *sbi, |
---|
| 4136 | + int nr_shrink); |
---|
| 4137 | + |
---|
| 4138 | +/* block age extent cache ops */ |
---|
| 4139 | +void f2fs_init_age_extent_tree(struct inode *inode); |
---|
| 4140 | +bool f2fs_lookup_age_extent_cache(struct inode *inode, pgoff_t pgofs, |
---|
| 4141 | + struct extent_info *ei); |
---|
| 4142 | +void f2fs_update_age_extent_cache(struct dnode_of_data *dn); |
---|
| 4143 | +void f2fs_update_age_extent_cache_range(struct dnode_of_data *dn, |
---|
| 4144 | + pgoff_t fofs, unsigned int len); |
---|
| 4145 | +unsigned int f2fs_shrink_age_extent_tree(struct f2fs_sb_info *sbi, |
---|
| 4146 | + int nr_shrink); |
---|
3821 | 4147 | |
---|
3822 | 4148 | /* |
---|
3823 | 4149 | * sysfs.c |
---|
.. | .. |
---|
3871 | 4197 | bool f2fs_is_compress_backend_ready(struct inode *inode); |
---|
3872 | 4198 | int f2fs_init_compress_mempool(void); |
---|
3873 | 4199 | void f2fs_destroy_compress_mempool(void); |
---|
3874 | | -void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity); |
---|
| 4200 | +void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task); |
---|
| 4201 | +void f2fs_end_read_compressed_page(struct page *page, bool failed, |
---|
| 4202 | + block_t blkaddr, bool in_task); |
---|
3875 | 4203 | bool f2fs_cluster_is_empty(struct compress_ctx *cc); |
---|
3876 | 4204 | bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index); |
---|
3877 | 4205 | void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page); |
---|
.. | .. |
---|
3880 | 4208 | struct writeback_control *wbc, |
---|
3881 | 4209 | enum iostat_type io_type); |
---|
3882 | 4210 | int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index); |
---|
| 4211 | +void f2fs_update_read_extent_tree_range_compressed(struct inode *inode, |
---|
| 4212 | + pgoff_t fofs, block_t blkaddr, |
---|
| 4213 | + unsigned int llen, unsigned int c_len); |
---|
3883 | 4214 | int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret, |
---|
3884 | 4215 | unsigned nr_pages, sector_t *last_block_in_bio, |
---|
3885 | 4216 | bool is_readahead, bool for_write); |
---|
3886 | 4217 | struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc); |
---|
3887 | | -void f2fs_free_dic(struct decompress_io_ctx *dic); |
---|
3888 | | -void f2fs_decompress_end_io(struct page **rpages, |
---|
3889 | | - unsigned int cluster_size, bool err, bool verity); |
---|
| 4218 | +void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed, |
---|
| 4219 | + bool in_task); |
---|
| 4220 | +void f2fs_put_page_dic(struct page *page, bool in_task); |
---|
| 4221 | +unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn); |
---|
3890 | 4222 | int f2fs_init_compress_ctx(struct compress_ctx *cc); |
---|
3891 | | -void f2fs_destroy_compress_ctx(struct compress_ctx *cc); |
---|
| 4223 | +void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse); |
---|
3892 | 4224 | void f2fs_init_compress_info(struct f2fs_sb_info *sbi); |
---|
| 4225 | +int f2fs_init_compress_inode(struct f2fs_sb_info *sbi); |
---|
| 4226 | +void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi); |
---|
| 4227 | +int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi); |
---|
| 4228 | +void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi); |
---|
| 4229 | +int __init f2fs_init_compress_cache(void); |
---|
| 4230 | +void f2fs_destroy_compress_cache(void); |
---|
| 4231 | +struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi); |
---|
| 4232 | +void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr); |
---|
| 4233 | +void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page, |
---|
| 4234 | + nid_t ino, block_t blkaddr); |
---|
| 4235 | +bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page, |
---|
| 4236 | + block_t blkaddr); |
---|
| 4237 | +void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino); |
---|
| 4238 | +#define inc_compr_inode_stat(inode) \ |
---|
| 4239 | + do { \ |
---|
| 4240 | + struct f2fs_sb_info *sbi = F2FS_I_SB(inode); \ |
---|
| 4241 | + sbi->compr_new_inode++; \ |
---|
| 4242 | + } while (0) |
---|
| 4243 | +#define add_compr_block_stat(inode, blocks) \ |
---|
| 4244 | + do { \ |
---|
| 4245 | + struct f2fs_sb_info *sbi = F2FS_I_SB(inode); \ |
---|
| 4246 | + int diff = F2FS_I(inode)->i_cluster_size - blocks; \ |
---|
| 4247 | + sbi->compr_written_block += blocks; \ |
---|
| 4248 | + sbi->compr_saved_block += diff; \ |
---|
| 4249 | + } while (0) |
---|
3893 | 4250 | #else |
---|
3894 | 4251 | static inline bool f2fs_is_compressed_page(struct page *page) { return false; } |
---|
3895 | 4252 | static inline bool f2fs_is_compress_backend_ready(struct inode *inode) |
---|
.. | .. |
---|
3906 | 4263 | } |
---|
3907 | 4264 | static inline int f2fs_init_compress_mempool(void) { return 0; } |
---|
3908 | 4265 | static inline void f2fs_destroy_compress_mempool(void) { } |
---|
| 4266 | +static inline void f2fs_decompress_cluster(struct decompress_io_ctx *dic, |
---|
| 4267 | + bool in_task) { } |
---|
| 4268 | +static inline void f2fs_end_read_compressed_page(struct page *page, |
---|
| 4269 | + bool failed, block_t blkaddr, bool in_task) |
---|
| 4270 | +{ |
---|
| 4271 | + WARN_ON_ONCE(1); |
---|
| 4272 | +} |
---|
| 4273 | +static inline void f2fs_put_page_dic(struct page *page, bool in_task) |
---|
| 4274 | +{ |
---|
| 4275 | + WARN_ON_ONCE(1); |
---|
| 4276 | +} |
---|
| 4277 | +static inline unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn) { return 0; } |
---|
| 4278 | +static inline int f2fs_init_compress_inode(struct f2fs_sb_info *sbi) { return 0; } |
---|
| 4279 | +static inline void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi) { } |
---|
| 4280 | +static inline int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi) { return 0; } |
---|
| 4281 | +static inline void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi) { } |
---|
| 4282 | +static inline int __init f2fs_init_compress_cache(void) { return 0; } |
---|
| 4283 | +static inline void f2fs_destroy_compress_cache(void) { } |
---|
| 4284 | +static inline void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, |
---|
| 4285 | + block_t blkaddr) { } |
---|
| 4286 | +static inline void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, |
---|
| 4287 | + struct page *page, nid_t ino, block_t blkaddr) { } |
---|
| 4288 | +static inline bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, |
---|
| 4289 | + struct page *page, block_t blkaddr) { return false; } |
---|
| 4290 | +static inline void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, |
---|
| 4291 | + nid_t ino) { } |
---|
| 4292 | +#define inc_compr_inode_stat(inode) do { } while (0) |
---|
| 4293 | +static inline void f2fs_update_read_extent_tree_range_compressed( |
---|
| 4294 | + struct inode *inode, |
---|
| 4295 | + pgoff_t fofs, block_t blkaddr, |
---|
| 4296 | + unsigned int llen, unsigned int c_len) { } |
---|
3909 | 4297 | #endif |
---|
3910 | 4298 | |
---|
3911 | | -static inline void set_compress_context(struct inode *inode) |
---|
| 4299 | +static inline int set_compress_context(struct inode *inode) |
---|
3912 | 4300 | { |
---|
| 4301 | +#ifdef CONFIG_F2FS_FS_COMPRESSION |
---|
3913 | 4302 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
---|
3914 | 4303 | |
---|
3915 | 4304 | F2FS_I(inode)->i_compress_algorithm = |
---|
3916 | 4305 | F2FS_OPTION(sbi).compress_algorithm; |
---|
3917 | 4306 | F2FS_I(inode)->i_log_cluster_size = |
---|
3918 | 4307 | F2FS_OPTION(sbi).compress_log_size; |
---|
| 4308 | + F2FS_I(inode)->i_compress_flag = |
---|
| 4309 | + F2FS_OPTION(sbi).compress_chksum ? |
---|
| 4310 | + 1 << COMPRESS_CHKSUM : 0; |
---|
3919 | 4311 | F2FS_I(inode)->i_cluster_size = |
---|
3920 | 4312 | 1 << F2FS_I(inode)->i_log_cluster_size; |
---|
| 4313 | + if (F2FS_I(inode)->i_compress_algorithm == COMPRESS_LZ4 && |
---|
| 4314 | + F2FS_OPTION(sbi).compress_level) |
---|
| 4315 | + F2FS_I(inode)->i_compress_flag |= |
---|
| 4316 | + F2FS_OPTION(sbi).compress_level << |
---|
| 4317 | + COMPRESS_LEVEL_OFFSET; |
---|
3921 | 4318 | F2FS_I(inode)->i_flags |= F2FS_COMPR_FL; |
---|
3922 | 4319 | set_inode_flag(inode, FI_COMPRESSED_FILE); |
---|
3923 | 4320 | stat_inc_compr_inode(inode); |
---|
| 4321 | + inc_compr_inode_stat(inode); |
---|
3924 | 4322 | f2fs_mark_inode_dirty_sync(inode, true); |
---|
| 4323 | + return 0; |
---|
| 4324 | +#else |
---|
| 4325 | + return -EOPNOTSUPP; |
---|
| 4326 | +#endif |
---|
3925 | 4327 | } |
---|
3926 | 4328 | |
---|
3927 | | -static inline u64 f2fs_disable_compressed_file(struct inode *inode) |
---|
| 4329 | +static inline bool f2fs_disable_compressed_file(struct inode *inode) |
---|
3928 | 4330 | { |
---|
3929 | 4331 | struct f2fs_inode_info *fi = F2FS_I(inode); |
---|
3930 | 4332 | |
---|
3931 | 4333 | if (!f2fs_compressed_file(inode)) |
---|
3932 | | - return 0; |
---|
3933 | | - if (S_ISREG(inode->i_mode)) { |
---|
3934 | | - if (get_dirty_pages(inode)) |
---|
3935 | | - return 1; |
---|
3936 | | - if (fi->i_compr_blocks) |
---|
3937 | | - return fi->i_compr_blocks; |
---|
3938 | | - } |
---|
| 4334 | + return true; |
---|
| 4335 | + if (S_ISREG(inode->i_mode) && F2FS_HAS_BLOCKS(inode)) |
---|
| 4336 | + return false; |
---|
3939 | 4337 | |
---|
3940 | 4338 | fi->i_flags &= ~F2FS_COMPR_FL; |
---|
3941 | 4339 | stat_dec_compr_inode(inode); |
---|
3942 | 4340 | clear_inode_flag(inode, FI_COMPRESSED_FILE); |
---|
3943 | 4341 | f2fs_mark_inode_dirty_sync(inode, true); |
---|
3944 | | - return 0; |
---|
| 4342 | + return true; |
---|
3945 | 4343 | } |
---|
3946 | 4344 | |
---|
3947 | 4345 | #define F2FS_FEATURE_FUNCS(name, flagname) \ |
---|
.. | .. |
---|
3963 | 4361 | F2FS_FEATURE_FUNCS(sb_chksum, SB_CHKSUM); |
---|
3964 | 4362 | F2FS_FEATURE_FUNCS(casefold, CASEFOLD); |
---|
3965 | 4363 | F2FS_FEATURE_FUNCS(compression, COMPRESSION); |
---|
| 4364 | +F2FS_FEATURE_FUNCS(readonly, RO); |
---|
3966 | 4365 | |
---|
3967 | 4366 | #ifdef CONFIG_BLK_DEV_ZONED |
---|
3968 | 4367 | static inline bool f2fs_blkz_is_seq(struct f2fs_sb_info *sbi, int devi, |
---|
.. | .. |
---|
4022 | 4421 | return F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS; |
---|
4023 | 4422 | } |
---|
4024 | 4423 | |
---|
4025 | | -static inline bool f2fs_may_encrypt(struct inode *dir, struct inode *inode) |
---|
| 4424 | +static inline bool f2fs_low_mem_mode(struct f2fs_sb_info *sbi) |
---|
4026 | 4425 | { |
---|
4027 | | -#ifdef CONFIG_FS_ENCRYPTION |
---|
4028 | | - struct f2fs_sb_info *sbi = F2FS_I_SB(dir); |
---|
4029 | | - umode_t mode = inode->i_mode; |
---|
4030 | | - |
---|
4031 | | - /* |
---|
4032 | | - * If the directory encrypted or dummy encryption enabled, |
---|
4033 | | - * then we should encrypt the inode. |
---|
4034 | | - */ |
---|
4035 | | - if (IS_ENCRYPTED(dir) || DUMMY_ENCRYPTION_ENABLED(sbi)) |
---|
4036 | | - return (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)); |
---|
4037 | | -#endif |
---|
4038 | | - return false; |
---|
| 4426 | + return F2FS_OPTION(sbi).memory_mode == MEMORY_MODE_LOW; |
---|
4039 | 4427 | } |
---|
4040 | 4428 | |
---|
4041 | 4429 | static inline bool f2fs_may_compress(struct inode *inode) |
---|
.. | .. |
---|
4051 | 4439 | u64 blocks, bool add) |
---|
4052 | 4440 | { |
---|
4053 | 4441 | int diff = F2FS_I(inode)->i_cluster_size - blocks; |
---|
| 4442 | + struct f2fs_inode_info *fi = F2FS_I(inode); |
---|
4054 | 4443 | |
---|
4055 | 4444 | /* don't update i_compr_blocks if saved blocks were released */ |
---|
4056 | | - if (!add && !F2FS_I(inode)->i_compr_blocks) |
---|
| 4445 | + if (!add && !atomic_read(&fi->i_compr_blocks)) |
---|
4057 | 4446 | return; |
---|
4058 | 4447 | |
---|
4059 | 4448 | if (add) { |
---|
4060 | | - F2FS_I(inode)->i_compr_blocks += diff; |
---|
| 4449 | + atomic_add(diff, &fi->i_compr_blocks); |
---|
4061 | 4450 | stat_add_compr_blocks(inode, diff); |
---|
4062 | 4451 | } else { |
---|
4063 | | - F2FS_I(inode)->i_compr_blocks -= diff; |
---|
| 4452 | + atomic_sub(diff, &fi->i_compr_blocks); |
---|
4064 | 4453 | stat_sub_compr_blocks(inode, diff); |
---|
4065 | 4454 | } |
---|
4066 | 4455 | f2fs_mark_inode_dirty_sync(inode, true); |
---|
.. | .. |
---|
4113 | 4502 | if (F2FS_IO_ALIGNED(sbi)) |
---|
4114 | 4503 | return true; |
---|
4115 | 4504 | } |
---|
4116 | | - if (is_sbi_flag_set(F2FS_I_SB(inode), SBI_CP_DISABLED) && |
---|
4117 | | - !IS_SWAPFILE(inode)) |
---|
| 4505 | + if (is_sbi_flag_set(F2FS_I_SB(inode), SBI_CP_DISABLED)) |
---|
4118 | 4506 | return true; |
---|
4119 | 4507 | |
---|
4120 | 4508 | return false; |
---|
4121 | 4509 | } |
---|
4122 | 4510 | |
---|
| 4511 | +static inline bool f2fs_need_verity(const struct inode *inode, pgoff_t idx) |
---|
| 4512 | +{ |
---|
| 4513 | + return fsverity_active(inode) && |
---|
| 4514 | + idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE); |
---|
| 4515 | +} |
---|
| 4516 | + |
---|
4123 | 4517 | #ifdef CONFIG_F2FS_FAULT_INJECTION |
---|
4124 | 4518 | extern void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate, |
---|
4125 | 4519 | unsigned int type); |
---|