.. | .. |
---|
145 | 145 | continue; |
---|
146 | 146 | } |
---|
147 | 147 | |
---|
148 | | - io = kzalloc(sizeof(struct moving_io) + sizeof(struct bio_vec) |
---|
149 | | - * DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS), |
---|
| 148 | + io = kzalloc(struct_size(io, bio.bio.bi_inline_vecs, |
---|
| 149 | + DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS)), |
---|
150 | 150 | GFP_KERNEL); |
---|
151 | 151 | if (!io) |
---|
152 | 152 | goto err; |
---|
.. | .. |
---|
196 | 196 | |
---|
197 | 197 | void bch_moving_gc(struct cache_set *c) |
---|
198 | 198 | { |
---|
199 | | - struct cache *ca; |
---|
| 199 | + struct cache *ca = c->cache; |
---|
200 | 200 | struct bucket *b; |
---|
201 | | - unsigned int i; |
---|
| 201 | + unsigned long sectors_to_move, reserve_sectors; |
---|
202 | 202 | |
---|
203 | 203 | if (!c->copy_gc_enabled) |
---|
204 | 204 | return; |
---|
205 | 205 | |
---|
206 | 206 | mutex_lock(&c->bucket_lock); |
---|
207 | 207 | |
---|
208 | | - for_each_cache(ca, c, i) { |
---|
209 | | - unsigned int sectors_to_move = 0; |
---|
210 | | - unsigned int reserve_sectors = ca->sb.bucket_size * |
---|
| 208 | + sectors_to_move = 0; |
---|
| 209 | + reserve_sectors = ca->sb.bucket_size * |
---|
211 | 210 | fifo_used(&ca->free[RESERVE_MOVINGGC]); |
---|
212 | 211 | |
---|
213 | | - ca->heap.used = 0; |
---|
| 212 | + ca->heap.used = 0; |
---|
214 | 213 | |
---|
215 | | - for_each_bucket(b, ca) { |
---|
216 | | - if (GC_MARK(b) == GC_MARK_METADATA || |
---|
217 | | - !GC_SECTORS_USED(b) || |
---|
218 | | - GC_SECTORS_USED(b) == ca->sb.bucket_size || |
---|
219 | | - atomic_read(&b->pin)) |
---|
220 | | - continue; |
---|
| 214 | + for_each_bucket(b, ca) { |
---|
| 215 | + if (GC_MARK(b) == GC_MARK_METADATA || |
---|
| 216 | + !GC_SECTORS_USED(b) || |
---|
| 217 | + GC_SECTORS_USED(b) == ca->sb.bucket_size || |
---|
| 218 | + atomic_read(&b->pin)) |
---|
| 219 | + continue; |
---|
221 | 220 | |
---|
222 | | - if (!heap_full(&ca->heap)) { |
---|
223 | | - sectors_to_move += GC_SECTORS_USED(b); |
---|
224 | | - heap_add(&ca->heap, b, bucket_cmp); |
---|
225 | | - } else if (bucket_cmp(b, heap_peek(&ca->heap))) { |
---|
226 | | - sectors_to_move -= bucket_heap_top(ca); |
---|
227 | | - sectors_to_move += GC_SECTORS_USED(b); |
---|
| 221 | + if (!heap_full(&ca->heap)) { |
---|
| 222 | + sectors_to_move += GC_SECTORS_USED(b); |
---|
| 223 | + heap_add(&ca->heap, b, bucket_cmp); |
---|
| 224 | + } else if (bucket_cmp(b, heap_peek(&ca->heap))) { |
---|
| 225 | + sectors_to_move -= bucket_heap_top(ca); |
---|
| 226 | + sectors_to_move += GC_SECTORS_USED(b); |
---|
228 | 227 | |
---|
229 | | - ca->heap.data[0] = b; |
---|
230 | | - heap_sift(&ca->heap, 0, bucket_cmp); |
---|
231 | | - } |
---|
| 228 | + ca->heap.data[0] = b; |
---|
| 229 | + heap_sift(&ca->heap, 0, bucket_cmp); |
---|
232 | 230 | } |
---|
233 | | - |
---|
234 | | - while (sectors_to_move > reserve_sectors) { |
---|
235 | | - heap_pop(&ca->heap, b, bucket_cmp); |
---|
236 | | - sectors_to_move -= GC_SECTORS_USED(b); |
---|
237 | | - } |
---|
238 | | - |
---|
239 | | - while (heap_pop(&ca->heap, b, bucket_cmp)) |
---|
240 | | - SET_GC_MOVE(b, 1); |
---|
241 | 231 | } |
---|
242 | 232 | |
---|
| 233 | + while (sectors_to_move > reserve_sectors) { |
---|
| 234 | + heap_pop(&ca->heap, b, bucket_cmp); |
---|
| 235 | + sectors_to_move -= GC_SECTORS_USED(b); |
---|
| 236 | + } |
---|
| 237 | + |
---|
| 238 | + while (heap_pop(&ca->heap, b, bucket_cmp)) |
---|
| 239 | + SET_GC_MOVE(b, 1); |
---|
| 240 | + |
---|
243 | 241 | mutex_unlock(&c->bucket_lock); |
---|
244 | 242 | |
---|
245 | 243 | c->moving_gc_keys.last_scanned = ZERO_KEY; |
---|