| .. | .. |
|---|
| 121 | 121 | /* Key/pointer for this btree node */ |
|---|
| 122 | 122 | BKEY_PADDED(key); |
|---|
| 123 | 123 | |
|---|
| 124 | | - /* Single bit - set when accessed, cleared by shrinker */ |
|---|
| 125 | | - unsigned long accessed; |
|---|
| 126 | 124 | unsigned long seq; |
|---|
| 127 | 125 | struct rw_semaphore lock; |
|---|
| 128 | 126 | struct cache_set *c; |
|---|
| .. | .. |
|---|
| 146 | 144 | struct btree_write writes[2]; |
|---|
| 147 | 145 | struct bio *bio; |
|---|
| 148 | 146 | }; |
|---|
| 147 | + |
|---|
| 148 | + |
|---|
| 149 | + |
|---|
| 149 | 150 | |
|---|
| 150 | 151 | #define BTREE_FLAG(flag) \ |
|---|
| 151 | 152 | static inline bool btree_node_ ## flag(struct btree *b) \ |
|---|
| .. | .. |
|---|
| 193 | 194 | |
|---|
| 194 | 195 | static inline void set_gc_sectors(struct cache_set *c) |
|---|
| 195 | 196 | { |
|---|
| 196 | | - atomic_set(&c->sectors_to_gc, c->sb.bucket_size * c->nbuckets / 16); |
|---|
| 197 | + atomic_set(&c->sectors_to_gc, c->cache->sb.bucket_size * c->nbuckets / 16); |
|---|
| 197 | 198 | } |
|---|
| 198 | 199 | |
|---|
| 199 | 200 | void bkey_put(struct cache_set *c, struct bkey *k); |
|---|
| .. | .. |
|---|
| 216 | 217 | short lock; |
|---|
| 217 | 218 | |
|---|
| 218 | 219 | unsigned int insert_collision:1; |
|---|
| 220 | +}; |
|---|
| 221 | + |
|---|
| 222 | +struct btree_check_state; |
|---|
| 223 | +struct btree_check_info { |
|---|
| 224 | + struct btree_check_state *state; |
|---|
| 225 | + struct task_struct *thread; |
|---|
| 226 | + int result; |
|---|
| 227 | +}; |
|---|
| 228 | + |
|---|
| 229 | +#define BCH_BTR_CHKTHREAD_MAX 12 |
|---|
| 230 | +struct btree_check_state { |
|---|
| 231 | + struct cache_set *c; |
|---|
| 232 | + int total_threads; |
|---|
| 233 | + int key_idx; |
|---|
| 234 | + spinlock_t idx_lock; |
|---|
| 235 | + atomic_t started; |
|---|
| 236 | + atomic_t enough; |
|---|
| 237 | + wait_queue_head_t wait; |
|---|
| 238 | + struct btree_check_info infos[BCH_BTR_CHKTHREAD_MAX]; |
|---|
| 219 | 239 | }; |
|---|
| 220 | 240 | |
|---|
| 221 | 241 | static inline void bch_btree_op_init(struct btree_op *op, int write_lock_level) |
|---|
| .. | .. |
|---|
| 268 | 288 | wake_up(&c->gc_wait); |
|---|
| 269 | 289 | } |
|---|
| 270 | 290 | |
|---|
| 291 | +static inline void force_wake_up_gc(struct cache_set *c) |
|---|
| 292 | +{ |
|---|
| 293 | + /* |
|---|
| 294 | + * Garbage collection thread only works when sectors_to_gc < 0, |
|---|
| 295 | + * calling wake_up_gc() won't start gc thread if sectors_to_gc is |
|---|
| 296 | + * not a nagetive value. |
|---|
| 297 | + * Therefore sectors_to_gc is set to -1 here, before waking up |
|---|
| 298 | + * gc thread by calling wake_up_gc(). Then gc_should_run() will |
|---|
| 299 | + * give a chance to permit gc thread to run. "Give a chance" means |
|---|
| 300 | + * before going into gc_should_run(), there is still possibility |
|---|
| 301 | + * that c->sectors_to_gc being set to other positive value. So |
|---|
| 302 | + * this routine won't 100% make sure gc thread will be woken up |
|---|
| 303 | + * to run. |
|---|
| 304 | + */ |
|---|
| 305 | + atomic_set(&c->sectors_to_gc, -1); |
|---|
| 306 | + wake_up_gc(c); |
|---|
| 307 | +} |
|---|
| 308 | + |
|---|
| 309 | +/* |
|---|
| 310 | + * These macros are for recursing down the btree - they handle the details of |
|---|
| 311 | + * locking and looking up nodes in the cache for you. They're best treated as |
|---|
| 312 | + * mere syntax when reading code that uses them. |
|---|
| 313 | + * |
|---|
| 314 | + * op->lock determines whether we take a read or a write lock at a given depth. |
|---|
| 315 | + * If you've got a read lock and find that you need a write lock (i.e. you're |
|---|
| 316 | + * going to have to split), set op->lock and return -EINTR; btree_root() will |
|---|
| 317 | + * call you again and you'll have the correct lock. |
|---|
| 318 | + */ |
|---|
| 319 | + |
|---|
| 320 | +/** |
|---|
| 321 | + * btree - recurse down the btree on a specified key |
|---|
| 322 | + * @fn: function to call, which will be passed the child node |
|---|
| 323 | + * @key: key to recurse on |
|---|
| 324 | + * @b: parent btree node |
|---|
| 325 | + * @op: pointer to struct btree_op |
|---|
| 326 | + */ |
|---|
| 327 | +#define bcache_btree(fn, key, b, op, ...) \ |
|---|
| 328 | +({ \ |
|---|
| 329 | + int _r, l = (b)->level - 1; \ |
|---|
| 330 | + bool _w = l <= (op)->lock; \ |
|---|
| 331 | + struct btree *_child = bch_btree_node_get((b)->c, op, key, l, \ |
|---|
| 332 | + _w, b); \ |
|---|
| 333 | + if (!IS_ERR(_child)) { \ |
|---|
| 334 | + _r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__); \ |
|---|
| 335 | + rw_unlock(_w, _child); \ |
|---|
| 336 | + } else \ |
|---|
| 337 | + _r = PTR_ERR(_child); \ |
|---|
| 338 | + _r; \ |
|---|
| 339 | +}) |
|---|
| 340 | + |
|---|
| 341 | +/** |
|---|
| 342 | + * btree_root - call a function on the root of the btree |
|---|
| 343 | + * @fn: function to call, which will be passed the child node |
|---|
| 344 | + * @c: cache set |
|---|
| 345 | + * @op: pointer to struct btree_op |
|---|
| 346 | + */ |
|---|
| 347 | +#define bcache_btree_root(fn, c, op, ...) \ |
|---|
| 348 | +({ \ |
|---|
| 349 | + int _r = -EINTR; \ |
|---|
| 350 | + do { \ |
|---|
| 351 | + struct btree *_b = (c)->root; \ |
|---|
| 352 | + bool _w = insert_lock(op, _b); \ |
|---|
| 353 | + rw_lock(_w, _b, _b->level); \ |
|---|
| 354 | + if (_b == (c)->root && \ |
|---|
| 355 | + _w == insert_lock(op, _b)) { \ |
|---|
| 356 | + _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \ |
|---|
| 357 | + } \ |
|---|
| 358 | + rw_unlock(_w, _b); \ |
|---|
| 359 | + bch_cannibalize_unlock(c); \ |
|---|
| 360 | + if (_r == -EINTR) \ |
|---|
| 361 | + schedule(); \ |
|---|
| 362 | + } while (_r == -EINTR); \ |
|---|
| 363 | + \ |
|---|
| 364 | + finish_wait(&(c)->btree_cache_wait, &(op)->wait); \ |
|---|
| 365 | + _r; \ |
|---|
| 366 | +}) |
|---|
| 367 | + |
|---|
| 271 | 368 | #define MAP_DONE 0 |
|---|
| 272 | 369 | #define MAP_CONTINUE 1 |
|---|
| 273 | 370 | |
|---|
| .. | .. |
|---|
| 298 | 395 | struct bkey *k); |
|---|
| 299 | 396 | int bch_btree_map_keys(struct btree_op *op, struct cache_set *c, |
|---|
| 300 | 397 | struct bkey *from, btree_map_keys_fn *fn, int flags); |
|---|
| 398 | +int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op, |
|---|
| 399 | + struct bkey *from, btree_map_keys_fn *fn, |
|---|
| 400 | + int flags); |
|---|
| 301 | 401 | |
|---|
| 302 | 402 | typedef bool (keybuf_pred_fn)(struct keybuf *buf, struct bkey *k); |
|---|
| 303 | 403 | |
|---|