hc
2024-05-10 9999e48639b3cecb08ffb37358bcba3b48161b29
kernel/fs/btrfs/extent_map.c
....@@ -4,6 +4,7 @@
44 #include <linux/slab.h>
55 #include <linux/spinlock.h>
66 #include "ctree.h"
7
+#include "volumes.h"
78 #include "extent_map.h"
89 #include "compression.h"
910
....@@ -34,7 +35,7 @@
3435 */
3536 void extent_map_tree_init(struct extent_map_tree *tree)
3637 {
37
- tree->map = RB_ROOT;
38
+ tree->map = RB_ROOT_CACHED;
3839 INIT_LIST_HEAD(&tree->modified_extents);
3940 rwlock_init(&tree->lock);
4041 }
....@@ -90,24 +91,27 @@
9091 return start + len;
9192 }
9293
93
-static int tree_insert(struct rb_root *root, struct extent_map *em)
94
+static int tree_insert(struct rb_root_cached *root, struct extent_map *em)
9495 {
95
- struct rb_node **p = &root->rb_node;
96
+ struct rb_node **p = &root->rb_root.rb_node;
9697 struct rb_node *parent = NULL;
9798 struct extent_map *entry = NULL;
9899 struct rb_node *orig_parent = NULL;
99100 u64 end = range_end(em->start, em->len);
101
+ bool leftmost = true;
100102
101103 while (*p) {
102104 parent = *p;
103105 entry = rb_entry(parent, struct extent_map, rb_node);
104106
105
- if (em->start < entry->start)
107
+ if (em->start < entry->start) {
106108 p = &(*p)->rb_left;
107
- else if (em->start >= extent_map_end(entry))
109
+ } else if (em->start >= extent_map_end(entry)) {
108110 p = &(*p)->rb_right;
109
- else
111
+ leftmost = false;
112
+ } else {
110113 return -EEXIST;
114
+ }
111115 }
112116
113117 orig_parent = parent;
....@@ -130,7 +134,7 @@
130134 return -EEXIST;
131135
132136 rb_link_node(&em->rb_node, orig_parent, p);
133
- rb_insert_color(&em->rb_node, root);
137
+ rb_insert_color_cached(&em->rb_node, root, leftmost);
134138 return 0;
135139 }
136140
....@@ -207,15 +211,20 @@
207211 if (!list_empty(&prev->list) || !list_empty(&next->list))
208212 return 0;
209213
214
+ ASSERT(next->block_start != EXTENT_MAP_DELALLOC &&
215
+ prev->block_start != EXTENT_MAP_DELALLOC);
216
+
217
+ if (prev->map_lookup || next->map_lookup)
218
+ ASSERT(test_bit(EXTENT_FLAG_FS_MAPPING, &prev->flags) &&
219
+ test_bit(EXTENT_FLAG_FS_MAPPING, &next->flags));
220
+
210221 if (extent_map_end(prev) == next->start &&
211222 prev->flags == next->flags &&
212
- prev->bdev == next->bdev &&
223
+ prev->map_lookup == next->map_lookup &&
213224 ((next->block_start == EXTENT_MAP_HOLE &&
214225 prev->block_start == EXTENT_MAP_HOLE) ||
215226 (next->block_start == EXTENT_MAP_INLINE &&
216227 prev->block_start == EXTENT_MAP_INLINE) ||
217
- (next->block_start == EXTENT_MAP_DELALLOC &&
218
- prev->block_start == EXTENT_MAP_DELALLOC) ||
219228 (next->block_start < EXTENT_MAP_LAST_BYTE - 1 &&
220229 next->block_start == extent_map_block_end(prev)))) {
221230 return 1;
....@@ -253,7 +262,7 @@
253262 em->mod_start = merge->mod_start;
254263 em->generation = max(em->generation, merge->generation);
255264
256
- rb_erase(&merge->rb_node, &tree->map);
265
+ rb_erase_cached(&merge->rb_node, &tree->map);
257266 RB_CLEAR_NODE(&merge->rb_node);
258267 free_extent_map(merge);
259268 }
....@@ -265,7 +274,7 @@
265274 if (rb && mergable_maps(em, merge)) {
266275 em->len += merge->len;
267276 em->block_len += merge->block_len;
268
- rb_erase(&merge->rb_node, &tree->map);
277
+ rb_erase_cached(&merge->rb_node, &tree->map);
269278 RB_CLEAR_NODE(&merge->rb_node);
270279 em->mod_len = (merge->mod_start + merge->mod_len) - em->mod_start;
271280 em->generation = max(em->generation, merge->generation);
....@@ -344,6 +353,37 @@
344353 try_merge_map(tree, em);
345354 }
346355
356
+static void extent_map_device_set_bits(struct extent_map *em, unsigned bits)
357
+{
358
+ struct map_lookup *map = em->map_lookup;
359
+ u64 stripe_size = em->orig_block_len;
360
+ int i;
361
+
362
+ for (i = 0; i < map->num_stripes; i++) {
363
+ struct btrfs_bio_stripe *stripe = &map->stripes[i];
364
+ struct btrfs_device *device = stripe->dev;
365
+
366
+ set_extent_bits_nowait(&device->alloc_state, stripe->physical,
367
+ stripe->physical + stripe_size - 1, bits);
368
+ }
369
+}
370
+
371
+static void extent_map_device_clear_bits(struct extent_map *em, unsigned bits)
372
+{
373
+ struct map_lookup *map = em->map_lookup;
374
+ u64 stripe_size = em->orig_block_len;
375
+ int i;
376
+
377
+ for (i = 0; i < map->num_stripes; i++) {
378
+ struct btrfs_bio_stripe *stripe = &map->stripes[i];
379
+ struct btrfs_device *device = stripe->dev;
380
+
381
+ __clear_extent_bit(&device->alloc_state, stripe->physical,
382
+ stripe->physical + stripe_size - 1, bits,
383
+ 0, 0, NULL, GFP_NOWAIT, NULL);
384
+ }
385
+}
386
+
347387 /**
348388 * add_extent_mapping - add new extent map to the extent tree
349389 * @tree: tree to insert new map in
....@@ -359,11 +399,17 @@
359399 {
360400 int ret = 0;
361401
402
+ lockdep_assert_held_write(&tree->lock);
403
+
362404 ret = tree_insert(&tree->map, em);
363405 if (ret)
364406 goto out;
365407
366408 setup_extent_mapping(tree, em, modified);
409
+ if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags)) {
410
+ extent_map_device_set_bits(em, CHUNK_ALLOCATED);
411
+ extent_map_device_clear_bits(em, CHUNK_TRIMMED);
412
+ }
367413 out:
368414 return ret;
369415 }
....@@ -378,7 +424,7 @@
378424 struct rb_node *next = NULL;
379425 u64 end = range_end(start, len);
380426
381
- rb_node = __tree_search(&tree->map, start, &prev, &next);
427
+ rb_node = __tree_search(&tree->map.rb_root, start, &prev, &next);
382428 if (!rb_node) {
383429 if (prev)
384430 rb_node = prev;
....@@ -439,16 +485,15 @@
439485 * Removes @em from @tree. No reference counts are dropped, and no checks
440486 * are done to see if the range is in use
441487 */
442
-int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
488
+void remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
443489 {
444
- int ret = 0;
445
-
446490 WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags));
447
- rb_erase(&em->rb_node, &tree->map);
491
+ rb_erase_cached(&em->rb_node, &tree->map);
448492 if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags))
449493 list_del_init(&em->list);
494
+ if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags))
495
+ extent_map_device_clear_bits(em, CHUNK_ALLOCATED);
450496 RB_CLEAR_NODE(&em->rb_node);
451
- return ret;
452497 }
453498
454499 void replace_extent_mapping(struct extent_map_tree *tree,
....@@ -460,7 +505,7 @@
460505 ASSERT(extent_map_in_tree(cur));
461506 if (!test_bit(EXTENT_FLAG_LOGGING, &cur->flags))
462507 list_del_init(&cur->list);
463
- rb_replace_node(&cur->rb_node, &new->rb_node, &tree->map);
508
+ rb_replace_node_cached(&cur->rb_node, &new->rb_node, &tree->map);
464509 RB_CLEAR_NODE(&cur->rb_node);
465510
466511 setup_extent_mapping(tree, new, modified);
....@@ -486,7 +531,8 @@
486531 return container_of(prev, struct extent_map, rb_node);
487532 }
488533
489
-/* helper for btfs_get_extent. Given an existing extent in the tree,
534
+/*
535
+ * Helper for btrfs_get_extent. Given an existing extent in the tree,
490536 * the existing extent is the nearest extent to map_start,
491537 * and an extent that you want to insert, deal with overlap and insert
492538 * the best fitted new extent into the tree.