hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/fs/xfs/scrub/bitmap.c
....@@ -10,11 +10,6 @@
1010 #include "xfs_trans_resv.h"
1111 #include "xfs_mount.h"
1212 #include "xfs_btree.h"
13
-#include "scrub/xfs_scrub.h"
14
-#include "scrub/scrub.h"
15
-#include "scrub/common.h"
16
-#include "scrub/trace.h"
17
-#include "scrub/repair.h"
1813 #include "scrub/bitmap.h"
1914
2015 /*
....@@ -23,14 +18,14 @@
2318 * This is the logical equivalent of bitmap |= mask(start, len).
2419 */
2520 int
26
-xfs_bitmap_set(
27
- struct xfs_bitmap *bitmap,
21
+xbitmap_set(
22
+ struct xbitmap *bitmap,
2823 uint64_t start,
2924 uint64_t len)
3025 {
31
- struct xfs_bitmap_range *bmr;
26
+ struct xbitmap_range *bmr;
3227
33
- bmr = kmem_alloc(sizeof(struct xfs_bitmap_range), KM_MAYFAIL);
28
+ bmr = kmem_alloc(sizeof(struct xbitmap_range), KM_MAYFAIL);
3429 if (!bmr)
3530 return -ENOMEM;
3631
....@@ -44,13 +39,13 @@
4439
4540 /* Free everything related to this bitmap. */
4641 void
47
-xfs_bitmap_destroy(
48
- struct xfs_bitmap *bitmap)
42
+xbitmap_destroy(
43
+ struct xbitmap *bitmap)
4944 {
50
- struct xfs_bitmap_range *bmr;
51
- struct xfs_bitmap_range *n;
45
+ struct xbitmap_range *bmr;
46
+ struct xbitmap_range *n;
5247
53
- for_each_xfs_bitmap_extent(bmr, n, bitmap) {
48
+ for_each_xbitmap_extent(bmr, n, bitmap) {
5449 list_del(&bmr->list);
5550 kmem_free(bmr);
5651 }
....@@ -58,24 +53,24 @@
5853
5954 /* Set up a per-AG block bitmap. */
6055 void
61
-xfs_bitmap_init(
62
- struct xfs_bitmap *bitmap)
56
+xbitmap_init(
57
+ struct xbitmap *bitmap)
6358 {
6459 INIT_LIST_HEAD(&bitmap->list);
6560 }
6661
6762 /* Compare two btree extents. */
6863 static int
69
-xfs_bitmap_range_cmp(
64
+xbitmap_range_cmp(
7065 void *priv,
7166 struct list_head *a,
7267 struct list_head *b)
7368 {
74
- struct xfs_bitmap_range *ap;
75
- struct xfs_bitmap_range *bp;
69
+ struct xbitmap_range *ap;
70
+ struct xbitmap_range *bp;
7671
77
- ap = container_of(a, struct xfs_bitmap_range, list);
78
- bp = container_of(b, struct xfs_bitmap_range, list);
72
+ ap = container_of(a, struct xbitmap_range, list);
73
+ bp = container_of(b, struct xbitmap_range, list);
7974
8075 if (ap->start > bp->start)
8176 return 1;
....@@ -101,14 +96,14 @@
10196 #define LEFT_ALIGNED (1 << 0)
10297 #define RIGHT_ALIGNED (1 << 1)
10398 int
104
-xfs_bitmap_disunion(
105
- struct xfs_bitmap *bitmap,
106
- struct xfs_bitmap *sub)
99
+xbitmap_disunion(
100
+ struct xbitmap *bitmap,
101
+ struct xbitmap *sub)
107102 {
108103 struct list_head *lp;
109
- struct xfs_bitmap_range *br;
110
- struct xfs_bitmap_range *new_br;
111
- struct xfs_bitmap_range *sub_br;
104
+ struct xbitmap_range *br;
105
+ struct xbitmap_range *new_br;
106
+ struct xbitmap_range *sub_br;
112107 uint64_t sub_start;
113108 uint64_t sub_len;
114109 int state;
....@@ -118,8 +113,8 @@
118113 return 0;
119114 ASSERT(!list_empty(&sub->list));
120115
121
- list_sort(NULL, &bitmap->list, xfs_bitmap_range_cmp);
122
- list_sort(NULL, &sub->list, xfs_bitmap_range_cmp);
116
+ list_sort(NULL, &bitmap->list, xbitmap_range_cmp);
117
+ list_sort(NULL, &sub->list, xbitmap_range_cmp);
123118
124119 /*
125120 * Now that we've sorted both lists, we iterate bitmap once, rolling
....@@ -129,11 +124,11 @@
129124 * list traversal is similar to merge sort, but we're deleting
130125 * instead. In this manner we avoid O(n^2) operations.
131126 */
132
- sub_br = list_first_entry(&sub->list, struct xfs_bitmap_range,
127
+ sub_br = list_first_entry(&sub->list, struct xbitmap_range,
133128 list);
134129 lp = bitmap->list.next;
135130 while (lp != &bitmap->list) {
136
- br = list_entry(lp, struct xfs_bitmap_range, list);
131
+ br = list_entry(lp, struct xbitmap_range, list);
137132
138133 /*
139134 * Advance sub_br and/or br until we find a pair that
....@@ -186,7 +181,7 @@
186181 * Deleting from the middle: add the new right extent
187182 * and then shrink the left extent.
188183 */
189
- new_br = kmem_alloc(sizeof(struct xfs_bitmap_range),
184
+ new_br = kmem_alloc(sizeof(struct xbitmap_range),
190185 KM_MAYFAIL);
191186 if (!new_br) {
192187 error = -ENOMEM;
....@@ -252,8 +247,8 @@
252247 * blocks going from the leaf towards the root.
253248 */
254249 int
255
-xfs_bitmap_set_btcur_path(
256
- struct xfs_bitmap *bitmap,
250
+xbitmap_set_btcur_path(
251
+ struct xbitmap *bitmap,
257252 struct xfs_btree_cur *cur)
258253 {
259254 struct xfs_buf *bp;
....@@ -266,7 +261,7 @@
266261 if (!bp)
267262 continue;
268263 fsb = XFS_DADDR_TO_FSB(cur->bc_mp, bp->b_bn);
269
- error = xfs_bitmap_set(bitmap, fsb, 1);
264
+ error = xbitmap_set(bitmap, fsb, 1);
270265 if (error)
271266 return error;
272267 }
....@@ -276,12 +271,12 @@
276271
277272 /* Collect a btree's block in the bitmap. */
278273 STATIC int
279
-xfs_bitmap_collect_btblock(
274
+xbitmap_collect_btblock(
280275 struct xfs_btree_cur *cur,
281276 int level,
282277 void *priv)
283278 {
284
- struct xfs_bitmap *bitmap = priv;
279
+ struct xbitmap *bitmap = priv;
285280 struct xfs_buf *bp;
286281 xfs_fsblock_t fsbno;
287282
....@@ -290,14 +285,30 @@
290285 return 0;
291286
292287 fsbno = XFS_DADDR_TO_FSB(cur->bc_mp, bp->b_bn);
293
- return xfs_bitmap_set(bitmap, fsbno, 1);
288
+ return xbitmap_set(bitmap, fsbno, 1);
294289 }
295290
296291 /* Walk the btree and mark the bitmap wherever a btree block is found. */
297292 int
298
-xfs_bitmap_set_btblocks(
299
- struct xfs_bitmap *bitmap,
293
+xbitmap_set_btblocks(
294
+ struct xbitmap *bitmap,
300295 struct xfs_btree_cur *cur)
301296 {
302
- return xfs_btree_visit_blocks(cur, xfs_bitmap_collect_btblock, bitmap);
297
+ return xfs_btree_visit_blocks(cur, xbitmap_collect_btblock,
298
+ XFS_BTREE_VISIT_ALL, bitmap);
299
+}
300
+
301
+/* How many bits are set in this bitmap? */
302
+uint64_t
303
+xbitmap_hweight(
304
+ struct xbitmap *bitmap)
305
+{
306
+ struct xbitmap_range *bmr;
307
+ struct xbitmap_range *n;
308
+ uint64_t ret = 0;
309
+
310
+ for_each_xbitmap_extent(bmr, n, bitmap)
311
+ ret += bmr->len;
312
+
313
+ return ret;
303314 }