hc
2024-01-03 2f7c68cb55ecb7331f2381deb497c27155f32faf
kernel/drivers/md/bcache/btree.c
....@@ -885,7 +885,7 @@
885885 * cannibalize_bucket() will take. This means every time we unlock the root of
886886 * the btree, we need to release this lock if we have it held.
887887 */
888
-static void bch_cannibalize_unlock(struct cache_set *c)
888
+void bch_cannibalize_unlock(struct cache_set *c)
889889 {
890890 spin_lock(&c->btree_cannibalize_lock);
891891 if (c->btree_cache_alloc_lock == current) {
....@@ -1090,10 +1090,12 @@
10901090 struct btree *parent)
10911091 {
10921092 BKEY_PADDED(key) k;
1093
- struct btree *b = ERR_PTR(-EAGAIN);
1093
+ struct btree *b;
10941094
10951095 mutex_lock(&c->bucket_lock);
10961096 retry:
1097
+ /* return ERR_PTR(-EAGAIN) when it fails */
1098
+ b = ERR_PTR(-EAGAIN);
10971099 if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, wait))
10981100 goto err;
10991101
....@@ -1138,7 +1140,7 @@
11381140 {
11391141 struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent);
11401142
1141
- if (!IS_ERR_OR_NULL(n)) {
1143
+ if (!IS_ERR(n)) {
11421144 mutex_lock(&n->write_lock);
11431145 bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort);
11441146 bkey_copy_key(&n->key, &b->key);
....@@ -1340,7 +1342,7 @@
13401342 memset(new_nodes, 0, sizeof(new_nodes));
13411343 closure_init_stack(&cl);
13421344
1343
- while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b))
1345
+ while (nodes < GC_MERGE_NODES && !IS_ERR(r[nodes].b))
13441346 keys += r[nodes++].keys;
13451347
13461348 blocks = btree_default_blocks(b->c) * 2 / 3;
....@@ -1352,7 +1354,7 @@
13521354
13531355 for (i = 0; i < nodes; i++) {
13541356 new_nodes[i] = btree_node_alloc_replacement(r[i].b, NULL);
1355
- if (IS_ERR_OR_NULL(new_nodes[i]))
1357
+ if (IS_ERR(new_nodes[i]))
13561358 goto out_nocoalesce;
13571359 }
13581360
....@@ -1487,7 +1489,7 @@
14871489 bch_keylist_free(&keylist);
14881490
14891491 for (i = 0; i < nodes; i++)
1490
- if (!IS_ERR_OR_NULL(new_nodes[i])) {
1492
+ if (!IS_ERR(new_nodes[i])) {
14911493 btree_node_free(new_nodes[i]);
14921494 rw_unlock(true, new_nodes[i]);
14931495 }
....@@ -1669,7 +1671,7 @@
16691671 if (should_rewrite) {
16701672 n = btree_node_alloc_replacement(b, NULL);
16711673
1672
- if (!IS_ERR_OR_NULL(n)) {
1674
+ if (!IS_ERR(n)) {
16731675 bch_btree_node_write_sync(n);
16741676
16751677 bch_btree_set_root(n);
....@@ -1968,6 +1970,15 @@
19681970 c->gc_stats.nodes++;
19691971 bch_btree_op_init(&op, 0);
19701972 ret = bcache_btree(check_recurse, p, c->root, &op);
1973
+ /*
1974
+ * The op may be added to cache_set's btree_cache_wait
1975
+ * in mca_cannibalize(), must ensure it is removed from
1976
+ * the list and release btree_cache_alloc_lock before
1977
+ * free op memory.
1978
+ * Otherwise, the btree_cache_wait will be damaged.
1979
+ */
1980
+ bch_cannibalize_unlock(c);
1981
+ finish_wait(&c->btree_cache_wait, &(&op)->wait);
19711982 if (ret)
19721983 goto out;
19731984 }