.. | .. |
---|
5 | 5 | #define CUTOFF_WRITEBACK 40 |
---|
6 | 6 | #define CUTOFF_WRITEBACK_SYNC 70 |
---|
7 | 7 | |
---|
| 8 | +#define CUTOFF_WRITEBACK_MAX 70 |
---|
| 9 | +#define CUTOFF_WRITEBACK_SYNC_MAX 90 |
---|
| 10 | + |
---|
8 | 11 | #define MAX_WRITEBACKS_IN_PASS 5 |
---|
9 | 12 | #define MAX_WRITESIZE_IN_PASS 5000 /* *512b */ |
---|
10 | 13 | |
---|
11 | 14 | #define WRITEBACK_RATE_UPDATE_SECS_MAX 60 |
---|
12 | 15 | #define WRITEBACK_RATE_UPDATE_SECS_DEFAULT 5 |
---|
13 | 16 | |
---|
| 17 | +#define BCH_AUTO_GC_DIRTY_THRESHOLD 50 |
---|
| 18 | + |
---|
| 19 | +#define BCH_DIRTY_INIT_THRD_MAX 12 |
---|
14 | 20 | /* |
---|
15 | 21 | * 14 (16384ths) is chosen here as something that each backing device |
---|
16 | 22 | * should be a reasonable fraction of the share, and not to blow up |
---|
17 | 23 | * until individual backing devices are a petabyte. |
---|
18 | 24 | */ |
---|
19 | 25 | #define WRITEBACK_SHARE_SHIFT 14 |
---|
| 26 | + |
---|
| 27 | +struct bch_dirty_init_state; |
---|
| 28 | +struct dirty_init_thrd_info { |
---|
| 29 | + struct bch_dirty_init_state *state; |
---|
| 30 | + struct task_struct *thread; |
---|
| 31 | +}; |
---|
| 32 | + |
---|
| 33 | +struct bch_dirty_init_state { |
---|
| 34 | + struct cache_set *c; |
---|
| 35 | + struct bcache_device *d; |
---|
| 36 | + int total_threads; |
---|
| 37 | + int key_idx; |
---|
| 38 | + spinlock_t idx_lock; |
---|
| 39 | + atomic_t started; |
---|
| 40 | + atomic_t enough; |
---|
| 41 | + wait_queue_head_t wait; |
---|
| 42 | + struct dirty_init_thrd_info infos[BCH_DIRTY_INIT_THRD_MAX]; |
---|
| 43 | +}; |
---|
20 | 44 | |
---|
21 | 45 | static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d) |
---|
22 | 46 | { |
---|
.. | .. |
---|
68 | 92 | } |
---|
69 | 93 | } |
---|
70 | 94 | |
---|
| 95 | +extern unsigned int bch_cutoff_writeback; |
---|
| 96 | +extern unsigned int bch_cutoff_writeback_sync; |
---|
| 97 | + |
---|
71 | 98 | static inline bool should_writeback(struct cached_dev *dc, struct bio *bio, |
---|
72 | 99 | unsigned int cache_mode, bool would_skip) |
---|
73 | 100 | { |
---|
.. | .. |
---|
75 | 102 | |
---|
76 | 103 | if (cache_mode != CACHE_MODE_WRITEBACK || |
---|
77 | 104 | test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || |
---|
78 | | - in_use > CUTOFF_WRITEBACK_SYNC) |
---|
| 105 | + in_use > bch_cutoff_writeback_sync) |
---|
79 | 106 | return false; |
---|
80 | 107 | |
---|
81 | 108 | if (bio_op(bio) == REQ_OP_DISCARD) |
---|
.. | .. |
---|
91 | 118 | |
---|
92 | 119 | return (op_is_sync(bio->bi_opf) || |
---|
93 | 120 | bio->bi_opf & (REQ_META|REQ_PRIO) || |
---|
94 | | - in_use <= CUTOFF_WRITEBACK); |
---|
| 121 | + in_use <= bch_cutoff_writeback); |
---|
95 | 122 | } |
---|
96 | 123 | |
---|
97 | 124 | static inline void bch_writeback_queue(struct cached_dev *dc) |
---|