hc
2024-01-03 2f7c68cb55ecb7331f2381deb497c27155f32faf
kernel/drivers/md/bcache/btree.h
....@@ -121,8 +121,6 @@
121121 /* Key/pointer for this btree node */
122122 BKEY_PADDED(key);
123123
124
- /* Single bit - set when accessed, cleared by shrinker */
125
- unsigned long accessed;
126124 unsigned long seq;
127125 struct rw_semaphore lock;
128126 struct cache_set *c;
....@@ -146,6 +144,9 @@
146144 struct btree_write writes[2];
147145 struct bio *bio;
148146 };
147
+
148
+
149
+
149150
150151 #define BTREE_FLAG(flag) \
151152 static inline bool btree_node_ ## flag(struct btree *b) \
....@@ -193,7 +194,7 @@
193194
194195 static inline void set_gc_sectors(struct cache_set *c)
195196 {
196
- atomic_set(&c->sectors_to_gc, c->sb.bucket_size * c->nbuckets / 16);
197
+ atomic_set(&c->sectors_to_gc, c->cache->sb.bucket_size * c->nbuckets / 16);
197198 }
198199
199200 void bkey_put(struct cache_set *c, struct bkey *k);
....@@ -216,6 +217,25 @@
216217 short lock;
217218
218219 unsigned int insert_collision:1;
220
+};
221
+
222
+struct btree_check_state;
223
+struct btree_check_info {
224
+ struct btree_check_state *state;
225
+ struct task_struct *thread;
226
+ int result;
227
+};
228
+
229
+#define BCH_BTR_CHKTHREAD_MAX 12
230
+struct btree_check_state {
231
+ struct cache_set *c;
232
+ int total_threads;
233
+ int key_idx;
234
+ spinlock_t idx_lock;
235
+ atomic_t started;
236
+ atomic_t enough;
237
+ wait_queue_head_t wait;
238
+ struct btree_check_info infos[BCH_BTR_CHKTHREAD_MAX];
219239 };
220240
221241 static inline void bch_btree_op_init(struct btree_op *op, int write_lock_level)
....@@ -262,11 +282,89 @@
262282 void bch_moving_gc(struct cache_set *c);
263283 int bch_btree_check(struct cache_set *c);
264284 void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k);
285
+void bch_cannibalize_unlock(struct cache_set *c);
265286
266287 static inline void wake_up_gc(struct cache_set *c)
267288 {
268289 wake_up(&c->gc_wait);
269290 }
291
+
292
+static inline void force_wake_up_gc(struct cache_set *c)
293
+{
294
+ /*
295
+ * Garbage collection thread only works when sectors_to_gc < 0,
296
+ * calling wake_up_gc() won't start gc thread if sectors_to_gc is
297
+ * not a nagetive value.
298
+ * Therefore sectors_to_gc is set to -1 here, before waking up
299
+ * gc thread by calling wake_up_gc(). Then gc_should_run() will
300
+ * give a chance to permit gc thread to run. "Give a chance" means
301
+ * before going into gc_should_run(), there is still possibility
302
+ * that c->sectors_to_gc being set to other positive value. So
303
+ * this routine won't 100% make sure gc thread will be woken up
304
+ * to run.
305
+ */
306
+ atomic_set(&c->sectors_to_gc, -1);
307
+ wake_up_gc(c);
308
+}
309
+
310
+/*
311
+ * These macros are for recursing down the btree - they handle the details of
312
+ * locking and looking up nodes in the cache for you. They're best treated as
313
+ * mere syntax when reading code that uses them.
314
+ *
315
+ * op->lock determines whether we take a read or a write lock at a given depth.
316
+ * If you've got a read lock and find that you need a write lock (i.e. you're
317
+ * going to have to split), set op->lock and return -EINTR; btree_root() will
318
+ * call you again and you'll have the correct lock.
319
+ */
320
+
321
+/**
322
+ * btree - recurse down the btree on a specified key
323
+ * @fn: function to call, which will be passed the child node
324
+ * @key: key to recurse on
325
+ * @b: parent btree node
326
+ * @op: pointer to struct btree_op
327
+ */
328
+#define bcache_btree(fn, key, b, op, ...) \
329
+({ \
330
+ int _r, l = (b)->level - 1; \
331
+ bool _w = l <= (op)->lock; \
332
+ struct btree *_child = bch_btree_node_get((b)->c, op, key, l, \
333
+ _w, b); \
334
+ if (!IS_ERR(_child)) { \
335
+ _r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__); \
336
+ rw_unlock(_w, _child); \
337
+ } else \
338
+ _r = PTR_ERR(_child); \
339
+ _r; \
340
+})
341
+
342
+/**
343
+ * btree_root - call a function on the root of the btree
344
+ * @fn: function to call, which will be passed the child node
345
+ * @c: cache set
346
+ * @op: pointer to struct btree_op
347
+ */
348
+#define bcache_btree_root(fn, c, op, ...) \
349
+({ \
350
+ int _r = -EINTR; \
351
+ do { \
352
+ struct btree *_b = (c)->root; \
353
+ bool _w = insert_lock(op, _b); \
354
+ rw_lock(_w, _b, _b->level); \
355
+ if (_b == (c)->root && \
356
+ _w == insert_lock(op, _b)) { \
357
+ _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \
358
+ } \
359
+ rw_unlock(_w, _b); \
360
+ bch_cannibalize_unlock(c); \
361
+ if (_r == -EINTR) \
362
+ schedule(); \
363
+ } while (_r == -EINTR); \
364
+ \
365
+ finish_wait(&(c)->btree_cache_wait, &(op)->wait); \
366
+ _r; \
367
+})
270368
271369 #define MAP_DONE 0
272370 #define MAP_CONTINUE 1
....@@ -298,6 +396,9 @@
298396 struct bkey *k);
299397 int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
300398 struct bkey *from, btree_map_keys_fn *fn, int flags);
399
+int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
400
+ struct bkey *from, btree_map_keys_fn *fn,
401
+ int flags);
301402
302403 typedef bool (keybuf_pred_fn)(struct keybuf *buf, struct bkey *k);
303404