.. | .. |
---|
16 | 16 | #define DEF_MAX_RECLAIM_PREFREE_SEGMENTS 4096 /* 8GB in maximum */ |
---|
17 | 17 | |
---|
18 | 18 | #define F2FS_MIN_SEGMENTS 9 /* SB + 2 (CP + SIT + NAT) + SSA + MAIN */ |
---|
| 19 | +#define F2FS_MIN_META_SEGMENTS 8 /* SB + 2 (CP + SIT + NAT) + SSA */ |
---|
19 | 20 | |
---|
20 | 21 | /* L: Logical segment # in volume, R: Relative segment # in main area */ |
---|
21 | 22 | #define GET_L2R_SEGNO(free_i, segno) ((segno) - (free_i)->start_segno) |
---|
22 | 23 | #define GET_R2L_SEGNO(free_i, segno) ((segno) + (free_i)->start_segno) |
---|
23 | 24 | |
---|
24 | 25 | #define IS_DATASEG(t) ((t) <= CURSEG_COLD_DATA) |
---|
25 | | -#define IS_NODESEG(t) ((t) >= CURSEG_HOT_NODE) |
---|
| 26 | +#define IS_NODESEG(t) ((t) >= CURSEG_HOT_NODE && (t) <= CURSEG_COLD_NODE) |
---|
| 27 | +#define SE_PAGETYPE(se) ((IS_NODESEG((se)->type) ? NODE : DATA)) |
---|
| 28 | + |
---|
| 29 | +static inline void sanity_check_seg_type(struct f2fs_sb_info *sbi, |
---|
| 30 | + unsigned short seg_type) |
---|
| 31 | +{ |
---|
| 32 | + f2fs_bug_on(sbi, seg_type >= NR_PERSISTENT_LOG); |
---|
| 33 | +} |
---|
26 | 34 | |
---|
27 | 35 | #define IS_HOT(t) ((t) == CURSEG_HOT_NODE || (t) == CURSEG_HOT_DATA) |
---|
28 | 36 | #define IS_WARM(t) ((t) == CURSEG_WARM_NODE || (t) == CURSEG_WARM_DATA) |
---|
.. | .. |
---|
34 | 42 | ((seg) == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno) || \ |
---|
35 | 43 | ((seg) == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno) || \ |
---|
36 | 44 | ((seg) == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno) || \ |
---|
37 | | - ((seg) == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno)) |
---|
| 45 | + ((seg) == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno) || \ |
---|
| 46 | + ((seg) == CURSEG_I(sbi, CURSEG_COLD_DATA_PINNED)->segno) || \ |
---|
| 47 | + ((seg) == CURSEG_I(sbi, CURSEG_ALL_DATA_ATGC)->segno)) |
---|
38 | 48 | |
---|
39 | 49 | #define IS_CURSEC(sbi, secno) \ |
---|
40 | 50 | (((secno) == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno / \ |
---|
.. | .. |
---|
48 | 58 | ((secno) == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno / \ |
---|
49 | 59 | (sbi)->segs_per_sec) || \ |
---|
50 | 60 | ((secno) == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno / \ |
---|
51 | | - (sbi)->segs_per_sec)) \ |
---|
| 61 | + (sbi)->segs_per_sec) || \ |
---|
| 62 | + ((secno) == CURSEG_I(sbi, CURSEG_COLD_DATA_PINNED)->segno / \ |
---|
| 63 | + (sbi)->segs_per_sec) || \ |
---|
| 64 | + ((secno) == CURSEG_I(sbi, CURSEG_ALL_DATA_ATGC)->segno / \ |
---|
| 65 | + (sbi)->segs_per_sec)) |
---|
52 | 66 | |
---|
53 | 67 | #define MAIN_BLKADDR(sbi) \ |
---|
54 | 68 | (SM_I(sbi) ? SM_I(sbi)->main_blkaddr : \ |
---|
.. | .. |
---|
87 | 101 | GET_SEGNO_FROM_SEG0(sbi, blk_addr))) |
---|
88 | 102 | #define BLKS_PER_SEC(sbi) \ |
---|
89 | 103 | ((sbi)->segs_per_sec * (sbi)->blocks_per_seg) |
---|
| 104 | +#define CAP_BLKS_PER_SEC(sbi) \ |
---|
| 105 | + ((sbi)->segs_per_sec * (sbi)->blocks_per_seg - \ |
---|
| 106 | + (sbi)->unusable_blocks_per_sec) |
---|
| 107 | +#define CAP_SEGS_PER_SEC(sbi) \ |
---|
| 108 | + ((sbi)->segs_per_sec - ((sbi)->unusable_blocks_per_sec >>\ |
---|
| 109 | + (sbi)->log_blocks_per_seg)) |
---|
90 | 110 | #define GET_SEC_FROM_SEG(sbi, segno) \ |
---|
91 | 111 | (((segno) == -1) ? -1: (segno) / (sbi)->segs_per_sec) |
---|
92 | 112 | #define GET_SEG_FROM_SEC(sbi, secno) \ |
---|
.. | .. |
---|
132 | 152 | * In the victim_sel_policy->alloc_mode, there are two block allocation modes. |
---|
133 | 153 | * LFS writes data sequentially with cleaning operations. |
---|
134 | 154 | * SSR (Slack Space Recycle) reuses obsolete space without cleaning operations. |
---|
| 155 | + * AT_SSR (Age Threshold based Slack Space Recycle) merges fragments into |
---|
| 156 | + * fragmented segment which has similar aging degree. |
---|
135 | 157 | */ |
---|
136 | 158 | enum { |
---|
137 | 159 | LFS = 0, |
---|
138 | | - SSR |
---|
| 160 | + SSR, |
---|
| 161 | + AT_SSR, |
---|
139 | 162 | }; |
---|
140 | 163 | |
---|
141 | 164 | /* |
---|
142 | 165 | * In the victim_sel_policy->gc_mode, there are two gc, aka cleaning, modes. |
---|
143 | 166 | * GC_CB is based on cost-benefit algorithm. |
---|
144 | 167 | * GC_GREEDY is based on greedy algorithm. |
---|
| 168 | + * GC_AT is based on age-threshold algorithm. |
---|
145 | 169 | */ |
---|
146 | 170 | enum { |
---|
147 | 171 | GC_CB = 0, |
---|
148 | 172 | GC_GREEDY, |
---|
| 173 | + GC_AT, |
---|
149 | 174 | ALLOC_NEXT, |
---|
150 | 175 | FLUSH_DEVICE, |
---|
151 | 176 | MAX_GC_POLICY, |
---|
.. | .. |
---|
154 | 179 | /* |
---|
155 | 180 | * BG_GC means the background cleaning job. |
---|
156 | 181 | * FG_GC means the on-demand cleaning job. |
---|
157 | | - * FORCE_FG_GC means on-demand cleaning job in background. |
---|
158 | 182 | */ |
---|
159 | 183 | enum { |
---|
160 | 184 | BG_GC = 0, |
---|
161 | 185 | FG_GC, |
---|
162 | | - FORCE_FG_GC, |
---|
163 | 186 | }; |
---|
164 | 187 | |
---|
165 | 188 | /* for a function parameter to select a victim segment */ |
---|
166 | 189 | struct victim_sel_policy { |
---|
167 | 190 | int alloc_mode; /* LFS or SSR */ |
---|
168 | 191 | int gc_mode; /* GC_CB or GC_GREEDY */ |
---|
169 | | - unsigned long *dirty_segmap; /* dirty segment bitmap */ |
---|
170 | | - unsigned int max_search; /* maximum # of segments to search */ |
---|
| 192 | + unsigned long *dirty_bitmap; /* dirty segment/section bitmap */ |
---|
| 193 | + unsigned int max_search; /* |
---|
| 194 | + * maximum # of segments/sections |
---|
| 195 | + * to search |
---|
| 196 | + */ |
---|
171 | 197 | unsigned int offset; /* last scanned bitmap offset */ |
---|
172 | 198 | unsigned int ofs_unit; /* bitmap search unit */ |
---|
173 | 199 | unsigned int min_cost; /* minimum cost */ |
---|
| 200 | + unsigned long long oldest_age; /* oldest age of segments having the same min cost */ |
---|
174 | 201 | unsigned int min_segno; /* segment # having min. cost */ |
---|
| 202 | + unsigned long long age; /* mtime of GCed section*/ |
---|
| 203 | + unsigned long long age_threshold;/* age threshold */ |
---|
175 | 204 | }; |
---|
176 | 205 | |
---|
177 | 206 | struct seg_entry { |
---|
.. | .. |
---|
184 | 213 | unsigned char *cur_valid_map_mir; /* mirror of current valid bitmap */ |
---|
185 | 214 | #endif |
---|
186 | 215 | /* |
---|
187 | | - * # of valid blocks and the validity bitmap stored in the the last |
---|
| 216 | + * # of valid blocks and the validity bitmap stored in the last |
---|
188 | 217 | * checkpoint pack. This information is used by the SSR mode. |
---|
189 | 218 | */ |
---|
190 | 219 | unsigned char *ckpt_valid_map; /* validity bitmap of blocks last cp */ |
---|
.. | .. |
---|
237 | 266 | unsigned long long mounted_time; /* mount time */ |
---|
238 | 267 | unsigned long long min_mtime; /* min. modification time */ |
---|
239 | 268 | unsigned long long max_mtime; /* max. modification time */ |
---|
| 269 | + unsigned long long dirty_min_mtime; /* rerange candidates in GC_AT */ |
---|
| 270 | + unsigned long long dirty_max_mtime; /* rerange candidates in GC_AT */ |
---|
240 | 271 | |
---|
241 | 272 | unsigned int last_victim[MAX_GC_POLICY]; /* last victim segment # */ |
---|
242 | 273 | }; |
---|
.. | .. |
---|
266 | 297 | struct dirty_seglist_info { |
---|
267 | 298 | const struct victim_selection *v_ops; /* victim selction operation */ |
---|
268 | 299 | unsigned long *dirty_segmap[NR_DIRTY_TYPE]; |
---|
| 300 | + unsigned long *dirty_secmap; |
---|
269 | 301 | struct mutex seglist_lock; /* lock for segment bitmaps */ |
---|
270 | 302 | int nr_dirty[NR_DIRTY_TYPE]; /* # of dirty segments */ |
---|
271 | 303 | unsigned long *victim_secmap; /* background GC victims */ |
---|
| 304 | + unsigned long *pinned_secmap; /* pinned victims from foreground GC */ |
---|
| 305 | + unsigned int pinned_secmap_cnt; /* count of victims which has pinned data */ |
---|
| 306 | + bool enable_pin_section; /* enable pinning section */ |
---|
272 | 307 | }; |
---|
273 | 308 | |
---|
274 | 309 | /* victim selection function for cleaning and SSR */ |
---|
275 | 310 | struct victim_selection { |
---|
276 | 311 | int (*get_victim)(struct f2fs_sb_info *, unsigned int *, |
---|
277 | | - int, int, char); |
---|
| 312 | + int, int, char, unsigned long long); |
---|
278 | 313 | }; |
---|
279 | 314 | |
---|
280 | 315 | /* for active log information */ |
---|
.. | .. |
---|
284 | 319 | struct rw_semaphore journal_rwsem; /* protect journal area */ |
---|
285 | 320 | struct f2fs_journal *journal; /* cached journal info */ |
---|
286 | 321 | unsigned char alloc_type; /* current allocation type */ |
---|
| 322 | + unsigned short seg_type; /* segment type like CURSEG_XXX_TYPE */ |
---|
287 | 323 | unsigned int segno; /* current segment number */ |
---|
288 | 324 | unsigned short next_blkoff; /* next block offset to write */ |
---|
289 | 325 | unsigned int zone; /* current zone number */ |
---|
290 | 326 | unsigned int next_segno; /* preallocated segment */ |
---|
| 327 | + bool inited; /* indicate inmem log is inited */ |
---|
291 | 328 | }; |
---|
292 | 329 | |
---|
293 | 330 | struct sit_entry_set { |
---|
.. | .. |
---|
301 | 338 | */ |
---|
302 | 339 | static inline struct curseg_info *CURSEG_I(struct f2fs_sb_info *sbi, int type) |
---|
303 | 340 | { |
---|
304 | | - if (type == CURSEG_COLD_DATA_PINNED) |
---|
305 | | - type = CURSEG_COLD_DATA; |
---|
306 | 341 | return (struct curseg_info *)(SM_I(sbi)->curseg_array + type); |
---|
307 | 342 | } |
---|
308 | 343 | |
---|
.. | .. |
---|
334 | 369 | } |
---|
335 | 370 | |
---|
336 | 371 | static inline unsigned int get_ckpt_valid_blocks(struct f2fs_sb_info *sbi, |
---|
337 | | - unsigned int segno) |
---|
| 372 | + unsigned int segno, bool use_section) |
---|
338 | 373 | { |
---|
| 374 | + if (use_section && __is_large_section(sbi)) { |
---|
| 375 | + unsigned int start_segno = START_SEGNO(segno); |
---|
| 376 | + unsigned int blocks = 0; |
---|
| 377 | + int i; |
---|
| 378 | + |
---|
| 379 | + for (i = 0; i < sbi->segs_per_sec; i++, start_segno++) { |
---|
| 380 | + struct seg_entry *se = get_seg_entry(sbi, start_segno); |
---|
| 381 | + |
---|
| 382 | + blocks += se->ckpt_valid_blocks; |
---|
| 383 | + } |
---|
| 384 | + return blocks; |
---|
| 385 | + } |
---|
339 | 386 | return get_seg_entry(sbi, segno)->ckpt_valid_blocks; |
---|
340 | 387 | } |
---|
341 | 388 | |
---|
.. | .. |
---|
407 | 454 | unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); |
---|
408 | 455 | unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno); |
---|
409 | 456 | unsigned int next; |
---|
| 457 | + unsigned int usable_segs = f2fs_usable_segs_in_sec(sbi, segno); |
---|
410 | 458 | |
---|
411 | 459 | spin_lock(&free_i->segmap_lock); |
---|
412 | 460 | clear_bit(segno, free_i->free_segmap); |
---|
.. | .. |
---|
414 | 462 | |
---|
415 | 463 | next = find_next_bit(free_i->free_segmap, |
---|
416 | 464 | start_segno + sbi->segs_per_sec, start_segno); |
---|
417 | | - if (next >= start_segno + sbi->segs_per_sec) { |
---|
| 465 | + if (next >= start_segno + usable_segs) { |
---|
418 | 466 | clear_bit(secno, free_i->free_secmap); |
---|
419 | 467 | free_i->free_sections++; |
---|
420 | 468 | } |
---|
.. | .. |
---|
434 | 482 | } |
---|
435 | 483 | |
---|
436 | 484 | static inline void __set_test_and_free(struct f2fs_sb_info *sbi, |
---|
437 | | - unsigned int segno) |
---|
| 485 | + unsigned int segno, bool inmem) |
---|
438 | 486 | { |
---|
439 | 487 | struct free_segmap_info *free_i = FREE_I(sbi); |
---|
440 | 488 | unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); |
---|
441 | 489 | unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno); |
---|
442 | 490 | unsigned int next; |
---|
| 491 | + unsigned int usable_segs = f2fs_usable_segs_in_sec(sbi, segno); |
---|
443 | 492 | |
---|
444 | 493 | spin_lock(&free_i->segmap_lock); |
---|
445 | 494 | if (test_and_clear_bit(segno, free_i->free_segmap)) { |
---|
446 | 495 | free_i->free_segments++; |
---|
447 | 496 | |
---|
448 | | - if (IS_CURSEC(sbi, secno)) |
---|
| 497 | + if (!inmem && IS_CURSEC(sbi, secno)) |
---|
449 | 498 | goto skip_free; |
---|
450 | 499 | next = find_next_bit(free_i->free_segmap, |
---|
451 | 500 | start_segno + sbi->segs_per_sec, start_segno); |
---|
452 | | - if (next >= start_segno + sbi->segs_per_sec) { |
---|
| 501 | + if (next >= start_segno + usable_segs) { |
---|
453 | 502 | if (test_and_clear_bit(secno, free_i->free_secmap)) |
---|
454 | 503 | free_i->free_sections++; |
---|
455 | 504 | } |
---|
.. | .. |
---|
496 | 545 | return FREE_I(sbi)->free_segments; |
---|
497 | 546 | } |
---|
498 | 547 | |
---|
499 | | -static inline int reserved_segments(struct f2fs_sb_info *sbi) |
---|
| 548 | +static inline unsigned int reserved_segments(struct f2fs_sb_info *sbi) |
---|
500 | 549 | { |
---|
501 | | - return SM_I(sbi)->reserved_segments; |
---|
| 550 | + return SM_I(sbi)->reserved_segments + |
---|
| 551 | + SM_I(sbi)->additional_reserved_segments; |
---|
502 | 552 | } |
---|
503 | 553 | |
---|
504 | 554 | static inline unsigned int free_sections(struct f2fs_sb_info *sbi) |
---|
.. | .. |
---|
528 | 578 | |
---|
529 | 579 | static inline int reserved_sections(struct f2fs_sb_info *sbi) |
---|
530 | 580 | { |
---|
531 | | - return GET_SEC_FROM_SEG(sbi, (unsigned int)reserved_segments(sbi)); |
---|
| 581 | + return GET_SEC_FROM_SEG(sbi, reserved_segments(sbi)); |
---|
532 | 582 | } |
---|
533 | 583 | |
---|
534 | | -static inline bool has_curseg_enough_space(struct f2fs_sb_info *sbi) |
---|
| 584 | +static inline bool has_curseg_enough_space(struct f2fs_sb_info *sbi, |
---|
| 585 | + unsigned int node_blocks, unsigned int dent_blocks) |
---|
535 | 586 | { |
---|
536 | | - unsigned int node_blocks = get_pages(sbi, F2FS_DIRTY_NODES) + |
---|
537 | | - get_pages(sbi, F2FS_DIRTY_DENTS); |
---|
538 | | - unsigned int dent_blocks = get_pages(sbi, F2FS_DIRTY_DENTS); |
---|
| 587 | + |
---|
539 | 588 | unsigned int segno, left_blocks; |
---|
540 | 589 | int i; |
---|
541 | 590 | |
---|
542 | 591 | /* check current node segment */ |
---|
543 | 592 | for (i = CURSEG_HOT_NODE; i <= CURSEG_COLD_NODE; i++) { |
---|
544 | 593 | segno = CURSEG_I(sbi, i)->segno; |
---|
545 | | - left_blocks = sbi->blocks_per_seg - |
---|
546 | | - get_seg_entry(sbi, segno)->ckpt_valid_blocks; |
---|
| 594 | + left_blocks = f2fs_usable_blks_in_seg(sbi, segno) - |
---|
| 595 | + get_seg_entry(sbi, segno)->ckpt_valid_blocks; |
---|
547 | 596 | |
---|
548 | 597 | if (node_blocks > left_blocks) |
---|
549 | 598 | return false; |
---|
.. | .. |
---|
551 | 600 | |
---|
552 | 601 | /* check current data segment */ |
---|
553 | 602 | segno = CURSEG_I(sbi, CURSEG_HOT_DATA)->segno; |
---|
554 | | - left_blocks = sbi->blocks_per_seg - |
---|
| 603 | + left_blocks = f2fs_usable_blks_in_seg(sbi, segno) - |
---|
555 | 604 | get_seg_entry(sbi, segno)->ckpt_valid_blocks; |
---|
556 | 605 | if (dent_blocks > left_blocks) |
---|
557 | 606 | return false; |
---|
.. | .. |
---|
561 | 610 | static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi, |
---|
562 | 611 | int freed, int needed) |
---|
563 | 612 | { |
---|
564 | | - int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES); |
---|
565 | | - int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS); |
---|
566 | | - int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA); |
---|
| 613 | + unsigned int total_node_blocks = get_pages(sbi, F2FS_DIRTY_NODES) + |
---|
| 614 | + get_pages(sbi, F2FS_DIRTY_DENTS) + |
---|
| 615 | + get_pages(sbi, F2FS_DIRTY_IMETA); |
---|
| 616 | + unsigned int total_dent_blocks = get_pages(sbi, F2FS_DIRTY_DENTS); |
---|
| 617 | + unsigned int node_secs = total_node_blocks / BLKS_PER_SEC(sbi); |
---|
| 618 | + unsigned int dent_secs = total_dent_blocks / BLKS_PER_SEC(sbi); |
---|
| 619 | + unsigned int node_blocks = total_node_blocks % BLKS_PER_SEC(sbi); |
---|
| 620 | + unsigned int dent_blocks = total_dent_blocks % BLKS_PER_SEC(sbi); |
---|
| 621 | + unsigned int free, need_lower, need_upper; |
---|
567 | 622 | |
---|
568 | 623 | if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) |
---|
569 | 624 | return false; |
---|
570 | 625 | |
---|
571 | | - if (free_sections(sbi) + freed == reserved_sections(sbi) + needed && |
---|
572 | | - has_curseg_enough_space(sbi)) |
---|
| 626 | + free = free_sections(sbi) + freed; |
---|
| 627 | + need_lower = node_secs + dent_secs + reserved_sections(sbi) + needed; |
---|
| 628 | + need_upper = need_lower + (node_blocks ? 1 : 0) + (dent_blocks ? 1 : 0); |
---|
| 629 | + |
---|
| 630 | + if (free > need_upper) |
---|
573 | 631 | return false; |
---|
574 | | - return (free_sections(sbi) + freed) <= |
---|
575 | | - (node_secs + 2 * dent_secs + imeta_secs + |
---|
576 | | - reserved_sections(sbi) + needed); |
---|
| 632 | + else if (free <= need_lower) |
---|
| 633 | + return true; |
---|
| 634 | + return !has_curseg_enough_space(sbi, node_blocks, dent_blocks); |
---|
577 | 635 | } |
---|
578 | 636 | |
---|
579 | 637 | static inline bool f2fs_is_checkpoint_ready(struct f2fs_sb_info *sbi) |
---|
.. | .. |
---|
610 | 668 | * pages over min_fsync_blocks. (=default option) |
---|
611 | 669 | * F2FS_IPU_ASYNC - do IPU given by asynchronous write requests. |
---|
612 | 670 | * F2FS_IPU_NOCACHE - disable IPU bio cache. |
---|
613 | | - * F2FS_IPUT_DISABLE - disable IPU. (=default option in LFS mode) |
---|
| 671 | + * F2FS_IPU_HONOR_OPU_WRITE - use OPU write prior to IPU write if inode has |
---|
| 672 | + * FI_OPU_WRITE flag. |
---|
| 673 | + * F2FS_IPU_DISABLE - disable IPU. (=default option in LFS mode) |
---|
614 | 674 | */ |
---|
615 | 675 | #define DEF_MIN_IPU_UTIL 70 |
---|
616 | 676 | #define DEF_MIN_FSYNC_BLOCKS 8 |
---|
.. | .. |
---|
626 | 686 | F2FS_IPU_FSYNC, |
---|
627 | 687 | F2FS_IPU_ASYNC, |
---|
628 | 688 | F2FS_IPU_NOCACHE, |
---|
| 689 | + F2FS_IPU_HONOR_OPU_WRITE, |
---|
629 | 690 | }; |
---|
630 | 691 | |
---|
631 | 692 | static inline unsigned int curseg_segno(struct f2fs_sb_info *sbi, |
---|
.. | .. |
---|
673 | 734 | bool is_valid = test_bit_le(0, raw_sit->valid_map) ? true : false; |
---|
674 | 735 | int valid_blocks = 0; |
---|
675 | 736 | int cur_pos = 0, next_pos; |
---|
| 737 | + unsigned int usable_blks_per_seg = f2fs_usable_blks_in_seg(sbi, segno); |
---|
676 | 738 | |
---|
677 | 739 | /* check bitmap with valid block count */ |
---|
678 | 740 | do { |
---|
679 | 741 | if (is_valid) { |
---|
680 | 742 | next_pos = find_next_zero_bit_le(&raw_sit->valid_map, |
---|
681 | | - sbi->blocks_per_seg, |
---|
| 743 | + usable_blks_per_seg, |
---|
682 | 744 | cur_pos); |
---|
683 | 745 | valid_blocks += next_pos - cur_pos; |
---|
684 | 746 | } else |
---|
685 | 747 | next_pos = find_next_bit_le(&raw_sit->valid_map, |
---|
686 | | - sbi->blocks_per_seg, |
---|
| 748 | + usable_blks_per_seg, |
---|
687 | 749 | cur_pos); |
---|
688 | 750 | cur_pos = next_pos; |
---|
689 | 751 | is_valid = !is_valid; |
---|
690 | | - } while (cur_pos < sbi->blocks_per_seg); |
---|
| 752 | + } while (cur_pos < usable_blks_per_seg); |
---|
691 | 753 | |
---|
692 | 754 | if (unlikely(GET_SIT_VBLOCKS(raw_sit) != valid_blocks)) { |
---|
693 | 755 | f2fs_err(sbi, "Mismatch valid blocks %d vs. %d", |
---|
.. | .. |
---|
696 | 758 | return -EFSCORRUPTED; |
---|
697 | 759 | } |
---|
698 | 760 | |
---|
| 761 | + if (usable_blks_per_seg < sbi->blocks_per_seg) |
---|
| 762 | + f2fs_bug_on(sbi, find_next_bit_le(&raw_sit->valid_map, |
---|
| 763 | + sbi->blocks_per_seg, |
---|
| 764 | + usable_blks_per_seg) != sbi->blocks_per_seg); |
---|
| 765 | + |
---|
699 | 766 | /* check segment usage, and check boundary of a given segment number */ |
---|
700 | | - if (unlikely(GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg |
---|
| 767 | + if (unlikely(GET_SIT_VBLOCKS(raw_sit) > usable_blks_per_seg |
---|
701 | 768 | || segno > TOTAL_SEGS(sbi) - 1)) { |
---|
702 | 769 | f2fs_err(sbi, "Wrong valid blocks %d or segno %u", |
---|
703 | 770 | GET_SIT_VBLOCKS(raw_sit), segno); |
---|