.. | .. |
---|
121 | 121 | /* Key/pointer for this btree node */ |
---|
122 | 122 | BKEY_PADDED(key); |
---|
123 | 123 | |
---|
124 | | - /* Single bit - set when accessed, cleared by shrinker */ |
---|
125 | | - unsigned long accessed; |
---|
126 | 124 | unsigned long seq; |
---|
127 | 125 | struct rw_semaphore lock; |
---|
128 | 126 | struct cache_set *c; |
---|
.. | .. |
---|
146 | 144 | struct btree_write writes[2]; |
---|
147 | 145 | struct bio *bio; |
---|
148 | 146 | }; |
---|
| 147 | + |
---|
| 148 | + |
---|
| 149 | + |
---|
149 | 150 | |
---|
150 | 151 | #define BTREE_FLAG(flag) \ |
---|
151 | 152 | static inline bool btree_node_ ## flag(struct btree *b) \ |
---|
.. | .. |
---|
193 | 194 | |
---|
194 | 195 | static inline void set_gc_sectors(struct cache_set *c) |
---|
195 | 196 | { |
---|
196 | | - atomic_set(&c->sectors_to_gc, c->sb.bucket_size * c->nbuckets / 16); |
---|
| 197 | + atomic_set(&c->sectors_to_gc, c->cache->sb.bucket_size * c->nbuckets / 16); |
---|
197 | 198 | } |
---|
198 | 199 | |
---|
199 | 200 | void bkey_put(struct cache_set *c, struct bkey *k); |
---|
.. | .. |
---|
216 | 217 | short lock; |
---|
217 | 218 | |
---|
218 | 219 | unsigned int insert_collision:1; |
---|
| 220 | +}; |
---|
| 221 | + |
---|
| 222 | +struct btree_check_state; |
---|
| 223 | +struct btree_check_info { |
---|
| 224 | + struct btree_check_state *state; |
---|
| 225 | + struct task_struct *thread; |
---|
| 226 | + int result; |
---|
| 227 | +}; |
---|
| 228 | + |
---|
| 229 | +#define BCH_BTR_CHKTHREAD_MAX 12 |
---|
| 230 | +struct btree_check_state { |
---|
| 231 | + struct cache_set *c; |
---|
| 232 | + int total_threads; |
---|
| 233 | + int key_idx; |
---|
| 234 | + spinlock_t idx_lock; |
---|
| 235 | + atomic_t started; |
---|
| 236 | + atomic_t enough; |
---|
| 237 | + wait_queue_head_t wait; |
---|
| 238 | + struct btree_check_info infos[BCH_BTR_CHKTHREAD_MAX]; |
---|
219 | 239 | }; |
---|
220 | 240 | |
---|
221 | 241 | static inline void bch_btree_op_init(struct btree_op *op, int write_lock_level) |
---|
.. | .. |
---|
262 | 282 | void bch_moving_gc(struct cache_set *c); |
---|
263 | 283 | int bch_btree_check(struct cache_set *c); |
---|
264 | 284 | void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k); |
---|
| 285 | +void bch_cannibalize_unlock(struct cache_set *c); |
---|
265 | 286 | |
---|
266 | 287 | static inline void wake_up_gc(struct cache_set *c) |
---|
267 | 288 | { |
---|
268 | 289 | wake_up(&c->gc_wait); |
---|
269 | 290 | } |
---|
| 291 | + |
---|
| 292 | +static inline void force_wake_up_gc(struct cache_set *c) |
---|
| 293 | +{ |
---|
| 294 | + /* |
---|
| 295 | + * Garbage collection thread only works when sectors_to_gc < 0, |
---|
| 296 | + * calling wake_up_gc() won't start gc thread if sectors_to_gc is |
---|
| 297 | + * not a nagetive value. |
---|
| 298 | + * Therefore sectors_to_gc is set to -1 here, before waking up |
---|
| 299 | + * gc thread by calling wake_up_gc(). Then gc_should_run() will |
---|
| 300 | + * give a chance to permit gc thread to run. "Give a chance" means |
---|
| 301 | + * before going into gc_should_run(), there is still possibility |
---|
| 302 | + * that c->sectors_to_gc being set to other positive value. So |
---|
| 303 | + * this routine won't 100% make sure gc thread will be woken up |
---|
| 304 | + * to run. |
---|
| 305 | + */ |
---|
| 306 | + atomic_set(&c->sectors_to_gc, -1); |
---|
| 307 | + wake_up_gc(c); |
---|
| 308 | +} |
---|
| 309 | + |
---|
| 310 | +/* |
---|
| 311 | + * These macros are for recursing down the btree - they handle the details of |
---|
| 312 | + * locking and looking up nodes in the cache for you. They're best treated as |
---|
| 313 | + * mere syntax when reading code that uses them. |
---|
| 314 | + * |
---|
| 315 | + * op->lock determines whether we take a read or a write lock at a given depth. |
---|
| 316 | + * If you've got a read lock and find that you need a write lock (i.e. you're |
---|
| 317 | + * going to have to split), set op->lock and return -EINTR; btree_root() will |
---|
| 318 | + * call you again and you'll have the correct lock. |
---|
| 319 | + */ |
---|
| 320 | + |
---|
| 321 | +/** |
---|
| 322 | + * btree - recurse down the btree on a specified key |
---|
| 323 | + * @fn: function to call, which will be passed the child node |
---|
| 324 | + * @key: key to recurse on |
---|
| 325 | + * @b: parent btree node |
---|
| 326 | + * @op: pointer to struct btree_op |
---|
| 327 | + */ |
---|
| 328 | +#define bcache_btree(fn, key, b, op, ...) \ |
---|
| 329 | +({ \ |
---|
| 330 | + int _r, l = (b)->level - 1; \ |
---|
| 331 | + bool _w = l <= (op)->lock; \ |
---|
| 332 | + struct btree *_child = bch_btree_node_get((b)->c, op, key, l, \ |
---|
| 333 | + _w, b); \ |
---|
| 334 | + if (!IS_ERR(_child)) { \ |
---|
| 335 | + _r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__); \ |
---|
| 336 | + rw_unlock(_w, _child); \ |
---|
| 337 | + } else \ |
---|
| 338 | + _r = PTR_ERR(_child); \ |
---|
| 339 | + _r; \ |
---|
| 340 | +}) |
---|
| 341 | + |
---|
| 342 | +/** |
---|
| 343 | + * btree_root - call a function on the root of the btree |
---|
| 344 | + * @fn: function to call, which will be passed the child node |
---|
| 345 | + * @c: cache set |
---|
| 346 | + * @op: pointer to struct btree_op |
---|
| 347 | + */ |
---|
| 348 | +#define bcache_btree_root(fn, c, op, ...) \ |
---|
| 349 | +({ \ |
---|
| 350 | + int _r = -EINTR; \ |
---|
| 351 | + do { \ |
---|
| 352 | + struct btree *_b = (c)->root; \ |
---|
| 353 | + bool _w = insert_lock(op, _b); \ |
---|
| 354 | + rw_lock(_w, _b, _b->level); \ |
---|
| 355 | + if (_b == (c)->root && \ |
---|
| 356 | + _w == insert_lock(op, _b)) { \ |
---|
| 357 | + _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \ |
---|
| 358 | + } \ |
---|
| 359 | + rw_unlock(_w, _b); \ |
---|
| 360 | + bch_cannibalize_unlock(c); \ |
---|
| 361 | + if (_r == -EINTR) \ |
---|
| 362 | + schedule(); \ |
---|
| 363 | + } while (_r == -EINTR); \ |
---|
| 364 | + \ |
---|
| 365 | + finish_wait(&(c)->btree_cache_wait, &(op)->wait); \ |
---|
| 366 | + _r; \ |
---|
| 367 | +}) |
---|
270 | 368 | |
---|
271 | 369 | #define MAP_DONE 0 |
---|
272 | 370 | #define MAP_CONTINUE 1 |
---|
.. | .. |
---|
298 | 396 | struct bkey *k); |
---|
299 | 397 | int bch_btree_map_keys(struct btree_op *op, struct cache_set *c, |
---|
300 | 398 | struct bkey *from, btree_map_keys_fn *fn, int flags); |
---|
| 399 | +int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op, |
---|
| 400 | + struct bkey *from, btree_map_keys_fn *fn, |
---|
| 401 | + int flags); |
---|
301 | 402 | |
---|
302 | 403 | typedef bool (keybuf_pred_fn)(struct keybuf *buf, struct bkey *k); |
---|
303 | 404 | |
---|