| .. | .. | 
|---|
| 885 | 885 | * cannibalize_bucket() will take. This means every time we unlock the root of | 
|---|
| 886 | 886 | * the btree, we need to release this lock if we have it held. | 
|---|
| 887 | 887 | */ | 
|---|
| 888 |  | -static void bch_cannibalize_unlock(struct cache_set *c) | 
|---|
|  | 888 | +void bch_cannibalize_unlock(struct cache_set *c) | 
|---|
| 889 | 889 | { | 
|---|
| 890 | 890 | spin_lock(&c->btree_cannibalize_lock); | 
|---|
| 891 | 891 | if (c->btree_cache_alloc_lock == current) { | 
|---|
| .. | .. | 
|---|
| 1090 | 1090 | struct btree *parent) | 
|---|
| 1091 | 1091 | { | 
|---|
| 1092 | 1092 | BKEY_PADDED(key) k; | 
|---|
| 1093 |  | -	struct btree *b = ERR_PTR(-EAGAIN); | 
|---|
|  | 1093 | +	struct btree *b; | 
|---|
| 1094 | 1094 |  | 
|---|
| 1095 | 1095 | mutex_lock(&c->bucket_lock); | 
|---|
| 1096 | 1096 | retry: | 
|---|
|  | 1097 | +	/* return ERR_PTR(-EAGAIN) when it fails */ | 
|---|
|  | 1098 | +	b = ERR_PTR(-EAGAIN); | 
|---|
| 1097 | 1099 | if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, wait)) | 
|---|
| 1098 | 1100 | goto err; | 
|---|
| 1099 | 1101 |  | 
|---|
| .. | .. | 
|---|
| 1138 | 1140 | { | 
|---|
| 1139 | 1141 | struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent); | 
|---|
| 1140 | 1142 |  | 
|---|
| 1141 |  | -	if (!IS_ERR_OR_NULL(n)) { | 
|---|
|  | 1143 | +	if (!IS_ERR(n)) { | 
|---|
| 1142 | 1144 | mutex_lock(&n->write_lock); | 
|---|
| 1143 | 1145 | bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort); | 
|---|
| 1144 | 1146 | bkey_copy_key(&n->key, &b->key); | 
|---|
| .. | .. | 
|---|
| 1340 | 1342 | memset(new_nodes, 0, sizeof(new_nodes)); | 
|---|
| 1341 | 1343 | closure_init_stack(&cl); | 
|---|
| 1342 | 1344 |  | 
|---|
| 1343 |  | -	while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b)) | 
|---|
|  | 1345 | +	while (nodes < GC_MERGE_NODES && !IS_ERR(r[nodes].b)) | 
|---|
| 1344 | 1346 | keys += r[nodes++].keys; | 
|---|
| 1345 | 1347 |  | 
|---|
| 1346 | 1348 | blocks = btree_default_blocks(b->c) * 2 / 3; | 
|---|
| .. | .. | 
|---|
| 1352 | 1354 |  | 
|---|
| 1353 | 1355 | for (i = 0; i < nodes; i++) { | 
|---|
| 1354 | 1356 | new_nodes[i] = btree_node_alloc_replacement(r[i].b, NULL); | 
|---|
| 1355 |  | -		if (IS_ERR_OR_NULL(new_nodes[i])) | 
|---|
|  | 1357 | +		if (IS_ERR(new_nodes[i])) | 
|---|
| 1356 | 1358 | goto out_nocoalesce; | 
|---|
| 1357 | 1359 | } | 
|---|
| 1358 | 1360 |  | 
|---|
| .. | .. | 
|---|
| 1487 | 1489 | bch_keylist_free(&keylist); | 
|---|
| 1488 | 1490 |  | 
|---|
| 1489 | 1491 | for (i = 0; i < nodes; i++) | 
|---|
| 1490 |  | -		if (!IS_ERR_OR_NULL(new_nodes[i])) { | 
|---|
|  | 1492 | +		if (!IS_ERR(new_nodes[i])) { | 
|---|
| 1491 | 1493 | btree_node_free(new_nodes[i]); | 
|---|
| 1492 | 1494 | rw_unlock(true, new_nodes[i]); | 
|---|
| 1493 | 1495 | } | 
|---|
| .. | .. | 
|---|
| 1669 | 1671 | if (should_rewrite) { | 
|---|
| 1670 | 1672 | n = btree_node_alloc_replacement(b, NULL); | 
|---|
| 1671 | 1673 |  | 
|---|
| 1672 |  | -		if (!IS_ERR_OR_NULL(n)) { | 
|---|
|  | 1674 | +		if (!IS_ERR(n)) { | 
|---|
| 1673 | 1675 | bch_btree_node_write_sync(n); | 
|---|
| 1674 | 1676 |  | 
|---|
| 1675 | 1677 | bch_btree_set_root(n); | 
|---|
| .. | .. | 
|---|
| 1968 | 1970 | c->gc_stats.nodes++; | 
|---|
| 1969 | 1971 | bch_btree_op_init(&op, 0); | 
|---|
| 1970 | 1972 | ret = bcache_btree(check_recurse, p, c->root, &op); | 
|---|
|  | 1973 | +			/* | 
|---|
|  | 1974 | +			 * The op may be added to cache_set's btree_cache_wait | 
|---|
|  | 1975 | +			 * in mca_cannibalize(), must ensure it is removed from | 
|---|
|  | 1976 | +			 * the list and release btree_cache_alloc_lock before | 
|---|
|  | 1977 | +			 * free op memory. | 
|---|
|  | 1978 | +			 * Otherwise, the btree_cache_wait will be damaged. | 
|---|
|  | 1979 | +			 */ | 
|---|
|  | 1980 | +			bch_cannibalize_unlock(c); | 
|---|
|  | 1981 | +			finish_wait(&c->btree_cache_wait, &(&op)->wait); | 
|---|
| 1971 | 1982 | if (ret) | 
|---|
| 1972 | 1983 | goto out; | 
|---|
| 1973 | 1984 | } | 
|---|