hc
2024-01-03 2f7c68cb55ecb7331f2381deb497c27155f32faf
kernel/drivers/md/bcache/bset.c
....@@ -6,7 +6,7 @@
66 * Copyright 2012 Google, Inc.
77 */
88
9
-#define pr_fmt(fmt) "bcache: %s() " fmt "\n", __func__
9
+#define pr_fmt(fmt) "bcache: %s() " fmt, __func__
1010
1111 #include "util.h"
1212 #include "bset.h"
....@@ -31,7 +31,7 @@
3131 if (b->ops->key_dump)
3232 b->ops->key_dump(b, k);
3333 else
34
- pr_err("%llu:%llu\n", KEY_INODE(k), KEY_OFFSET(k));
34
+ pr_cont("%llu:%llu\n", KEY_INODE(k), KEY_OFFSET(k));
3535
3636 if (next < bset_bkey_last(i) &&
3737 bkey_cmp(k, b->ops->is_extents ?
....@@ -155,6 +155,7 @@
155155 return 0;
156156 }
157157
158
+/* Pop the top key of keylist by pointing l->top to its previous key */
158159 struct bkey *bch_keylist_pop(struct keylist *l)
159160 {
160161 struct bkey *k = l->keys;
....@@ -168,6 +169,7 @@
168169 return l->top = k;
169170 }
170171
172
+/* Pop the bottom key of keylist and update l->top_p */
171173 void bch_keylist_pop_front(struct keylist *l)
172174 {
173175 l->top_p -= bkey_u64s(l->keys);
....@@ -309,7 +311,6 @@
309311 t->tree = NULL;
310312 t->data = NULL;
311313 }
312
-EXPORT_SYMBOL(bch_btree_keys_free);
313314
314315 int bch_btree_keys_alloc(struct btree_keys *b,
315316 unsigned int page_order,
....@@ -342,29 +343,24 @@
342343 bch_btree_keys_free(b);
343344 return -ENOMEM;
344345 }
345
-EXPORT_SYMBOL(bch_btree_keys_alloc);
346346
347347 void bch_btree_keys_init(struct btree_keys *b, const struct btree_keys_ops *ops,
348348 bool *expensive_debug_checks)
349349 {
350
- unsigned int i;
351
-
352350 b->ops = ops;
353351 b->expensive_debug_checks = expensive_debug_checks;
354352 b->nsets = 0;
355353 b->last_set_unwritten = 0;
356354
357
- /* XXX: shouldn't be needed */
358
- for (i = 0; i < MAX_BSETS; i++)
359
- b->set[i].size = 0;
360355 /*
361
- * Second loop starts at 1 because b->keys[0]->data is the memory we
362
- * allocated
356
+ * struct btree_keys in embedded in struct btree, and struct
357
+ * bset_tree is embedded into struct btree_keys. They are all
358
+ * initialized as 0 by kzalloc() in mca_bucket_alloc(), and
359
+ * b->set[0].data is allocated in bch_btree_keys_alloc(), so we
360
+ * don't have to initiate b->set[].size and b->set[].data here
361
+ * any more.
363362 */
364
- for (i = 1; i < MAX_BSETS; i++)
365
- b->set[i].data = NULL;
366363 }
367
-EXPORT_SYMBOL(bch_btree_keys_init);
368364
369365 /* Binary tree stuff for auxiliary search trees */
370366
....@@ -681,7 +677,6 @@
681677
682678 bch_bset_build_unwritten_tree(b);
683679 }
684
-EXPORT_SYMBOL(bch_bset_init_next);
685680
686681 /*
687682 * Build auxiliary binary tree 'struct bset_tree *t', this tree is used to
....@@ -735,7 +730,6 @@
735730 j = inorder_next(j, t->size))
736731 make_bfloat(t, j);
737732 }
738
-EXPORT_SYMBOL(bch_bset_build_written_tree);
739733
740734 /* Insert */
741735
....@@ -783,7 +777,6 @@
783777 j = j * 2 + 1;
784778 } while (j < t->size);
785779 }
786
-EXPORT_SYMBOL(bch_bset_fix_invalidated_key);
787780
788781 static void bch_bset_fix_lookup_table(struct btree_keys *b,
789782 struct bset_tree *t,
....@@ -858,7 +851,6 @@
858851
859852 return b->ops->key_merge(b, l, r);
860853 }
861
-EXPORT_SYMBOL(bch_bkey_try_merge);
862854
863855 void bch_bset_insert(struct btree_keys *b, struct bkey *where,
864856 struct bkey *insert)
....@@ -878,7 +870,6 @@
878870 bkey_copy(where, insert);
879871 bch_bset_fix_lookup_table(b, t, where);
880872 }
881
-EXPORT_SYMBOL(bch_bset_insert);
882873
883874 unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
884875 struct bkey *replace_key)
....@@ -934,7 +925,6 @@
934925 merged:
935926 return status;
936927 }
937
-EXPORT_SYMBOL(bch_btree_insert_key);
938928
939929 /* Lookup */
940930
....@@ -970,45 +960,25 @@
970960 unsigned int inorder, j, n = 1;
971961
972962 do {
973
- /*
974
- * A bit trick here.
975
- * If p < t->size, (int)(p - t->size) is a minus value and
976
- * the most significant bit is set, right shifting 31 bits
977
- * gets 1. If p >= t->size, the most significant bit is
978
- * not set, right shifting 31 bits gets 0.
979
- * So the following 2 lines equals to
980
- * if (p >= t->size)
981
- * p = 0;
982
- * but a branch instruction is avoided.
983
- */
984963 unsigned int p = n << 4;
985964
986
- p &= ((int) (p - t->size)) >> 31;
987
-
988
- prefetch(&t->tree[p]);
965
+ if (p < t->size)
966
+ prefetch(&t->tree[p]);
989967
990968 j = n;
991969 f = &t->tree[j];
992970
993
- /*
994
- * Similar bit trick, use subtract operation to avoid a branch
995
- * instruction.
996
- *
997
- * n = (f->mantissa > bfloat_mantissa())
998
- * ? j * 2
999
- * : j * 2 + 1;
1000
- *
1001
- * We need to subtract 1 from f->mantissa for the sign bit trick
1002
- * to work - that's done in make_bfloat()
1003
- */
1004
- if (likely(f->exponent != 127))
1005
- n = j * 2 + (((unsigned int)
1006
- (f->mantissa -
1007
- bfloat_mantissa(search, f))) >> 31);
1008
- else
1009
- n = (bkey_cmp(tree_to_bkey(t, j), search) > 0)
1010
- ? j * 2
1011
- : j * 2 + 1;
971
+ if (likely(f->exponent != 127)) {
972
+ if (f->mantissa >= bfloat_mantissa(search, f))
973
+ n = j * 2;
974
+ else
975
+ n = j * 2 + 1;
976
+ } else {
977
+ if (bkey_cmp(tree_to_bkey(t, j), search) > 0)
978
+ n = j * 2;
979
+ else
980
+ n = j * 2 + 1;
981
+ }
1012982 } while (n < t->size);
1013983
1014984 inorder = to_inorder(j, t);
....@@ -1100,7 +1070,6 @@
11001070
11011071 return i.l;
11021072 }
1103
-EXPORT_SYMBOL(__bch_bset_search);
11041073
11051074 /* Btree iterator */
11061075
....@@ -1155,7 +1124,6 @@
11551124 {
11561125 return __bch_btree_iter_init(b, iter, search, b->set);
11571126 }
1158
-EXPORT_SYMBOL(bch_btree_iter_init);
11591127
11601128 static inline struct bkey *__bch_btree_iter_next(struct btree_iter *iter,
11611129 btree_iter_cmp_fn *cmp)
....@@ -1188,7 +1156,6 @@
11881156 return __bch_btree_iter_next(iter, btree_iter_cmp);
11891157
11901158 }
1191
-EXPORT_SYMBOL(bch_btree_iter_next);
11921159
11931160 struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter,
11941161 struct btree_keys *b, ptr_filter_fn fn)
....@@ -1219,7 +1186,6 @@
12191186
12201187 return mempool_init_page_pool(&state->pool, 1, page_order);
12211188 }
1222
-EXPORT_SYMBOL(bch_bset_sort_state_init);
12231189
12241190 static void btree_mergesort(struct btree_keys *b, struct bset *out,
12251191 struct btree_iter *iter,
....@@ -1259,7 +1225,7 @@
12591225
12601226 out->keys = last ? (uint64_t *) bkey_next(last) - out->d : 0;
12611227
1262
- pr_debug("sorted %i keys", out->keys);
1228
+ pr_debug("sorted %i keys\n", out->keys);
12631229 }
12641230
12651231 static void __btree_sort(struct btree_keys *b, struct btree_iter *iter,
....@@ -1291,6 +1257,11 @@
12911257 * Our temporary buffer is the same size as the btree node's
12921258 * buffer, we can just swap buffers instead of doing a big
12931259 * memcpy()
1260
+ *
1261
+ * Don't worry event 'out' is allocated from mempool, it can
1262
+ * still be swapped here. Because state->pool is a page mempool
1263
+ * creaated by by mempool_init_page_pool(), which allocates
1264
+ * pages by alloc_pages() indeed.
12941265 */
12951266
12961267 out->magic = b->set->data->magic;
....@@ -1336,7 +1307,6 @@
13361307
13371308 EBUG_ON(oldsize >= 0 && bch_count_data(b) != oldsize);
13381309 }
1339
-EXPORT_SYMBOL(bch_btree_sort_partial);
13401310
13411311 void bch_btree_sort_and_fix_extents(struct btree_keys *b,
13421312 struct btree_iter *iter,
....@@ -1389,7 +1359,6 @@
13891359 out:
13901360 bch_bset_build_written_tree(b);
13911361 }
1392
-EXPORT_SYMBOL(bch_btree_sort_lazy);
13931362
13941363 void bch_btree_keys_stats(struct btree_keys *b, struct bset_stats *stats)
13951364 {