hc
2024-05-10 9999e48639b3cecb08ffb37358bcba3b48161b29
kernel/fs/btrfs/delayed-ref.c
....@@ -10,6 +10,7 @@
1010 #include "delayed-ref.h"
1111 #include "transaction.h"
1212 #include "qgroup.h"
13
+#include "space-info.h"
1314
1415 struct kmem_cache *btrfs_delayed_ref_head_cachep;
1516 struct kmem_cache *btrfs_delayed_tree_ref_cachep;
....@@ -23,6 +24,178 @@
2324 * us to buffer up frequently modified backrefs in an rb tree instead
2425 * of hammering updates on the extent allocation tree.
2526 */
27
+
28
+bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info)
29
+{
30
+ struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
31
+ struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
32
+ bool ret = false;
33
+ u64 reserved;
34
+
35
+ spin_lock(&global_rsv->lock);
36
+ reserved = global_rsv->reserved;
37
+ spin_unlock(&global_rsv->lock);
38
+
39
+ /*
40
+ * Since the global reserve is just kind of magic we don't really want
41
+ * to rely on it to save our bacon, so if our size is more than the
42
+ * delayed_refs_rsv and the global rsv then it's time to think about
43
+ * bailing.
44
+ */
45
+ spin_lock(&delayed_refs_rsv->lock);
46
+ reserved += delayed_refs_rsv->reserved;
47
+ if (delayed_refs_rsv->size >= reserved)
48
+ ret = true;
49
+ spin_unlock(&delayed_refs_rsv->lock);
50
+ return ret;
51
+}
52
+
53
+int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans)
54
+{
55
+ u64 num_entries =
56
+ atomic_read(&trans->transaction->delayed_refs.num_entries);
57
+ u64 avg_runtime;
58
+ u64 val;
59
+
60
+ smp_mb();
61
+ avg_runtime = trans->fs_info->avg_delayed_ref_runtime;
62
+ val = num_entries * avg_runtime;
63
+ if (val >= NSEC_PER_SEC)
64
+ return 1;
65
+ if (val >= NSEC_PER_SEC / 2)
66
+ return 2;
67
+
68
+ return btrfs_check_space_for_delayed_refs(trans->fs_info);
69
+}
70
+
71
+/**
72
+ * btrfs_delayed_refs_rsv_release - release a ref head's reservation.
73
+ * @fs_info - the fs_info for our fs.
74
+ * @nr - the number of items to drop.
75
+ *
76
+ * This drops the delayed ref head's count from the delayed refs rsv and frees
77
+ * any excess reservation we had.
78
+ */
79
+void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr)
80
+{
81
+ struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
82
+ u64 num_bytes = btrfs_calc_insert_metadata_size(fs_info, nr);
83
+ u64 released = 0;
84
+
85
+ released = btrfs_block_rsv_release(fs_info, block_rsv, num_bytes, NULL);
86
+ if (released)
87
+ trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
88
+ 0, released, 0);
89
+}
90
+
91
+/*
92
+ * btrfs_update_delayed_refs_rsv - adjust the size of the delayed refs rsv
93
+ * @trans - the trans that may have generated delayed refs
94
+ *
95
+ * This is to be called anytime we may have adjusted trans->delayed_ref_updates,
96
+ * it'll calculate the additional size and add it to the delayed_refs_rsv.
97
+ */
98
+void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans)
99
+{
100
+ struct btrfs_fs_info *fs_info = trans->fs_info;
101
+ struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
102
+ u64 num_bytes;
103
+
104
+ if (!trans->delayed_ref_updates)
105
+ return;
106
+
107
+ num_bytes = btrfs_calc_insert_metadata_size(fs_info,
108
+ trans->delayed_ref_updates);
109
+ spin_lock(&delayed_rsv->lock);
110
+ delayed_rsv->size += num_bytes;
111
+ delayed_rsv->full = 0;
112
+ spin_unlock(&delayed_rsv->lock);
113
+ trans->delayed_ref_updates = 0;
114
+}
115
+
116
+/**
117
+ * btrfs_migrate_to_delayed_refs_rsv - transfer bytes to our delayed refs rsv.
118
+ * @fs_info - the fs info for our fs.
119
+ * @src - the source block rsv to transfer from.
120
+ * @num_bytes - the number of bytes to transfer.
121
+ *
122
+ * This transfers up to the num_bytes amount from the src rsv to the
123
+ * delayed_refs_rsv. Any extra bytes are returned to the space info.
124
+ */
125
+void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info,
126
+ struct btrfs_block_rsv *src,
127
+ u64 num_bytes)
128
+{
129
+ struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
130
+ u64 to_free = 0;
131
+
132
+ spin_lock(&src->lock);
133
+ src->reserved -= num_bytes;
134
+ src->size -= num_bytes;
135
+ spin_unlock(&src->lock);
136
+
137
+ spin_lock(&delayed_refs_rsv->lock);
138
+ if (delayed_refs_rsv->size > delayed_refs_rsv->reserved) {
139
+ u64 delta = delayed_refs_rsv->size -
140
+ delayed_refs_rsv->reserved;
141
+ if (num_bytes > delta) {
142
+ to_free = num_bytes - delta;
143
+ num_bytes = delta;
144
+ }
145
+ } else {
146
+ to_free = num_bytes;
147
+ num_bytes = 0;
148
+ }
149
+
150
+ if (num_bytes)
151
+ delayed_refs_rsv->reserved += num_bytes;
152
+ if (delayed_refs_rsv->reserved >= delayed_refs_rsv->size)
153
+ delayed_refs_rsv->full = 1;
154
+ spin_unlock(&delayed_refs_rsv->lock);
155
+
156
+ if (num_bytes)
157
+ trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
158
+ 0, num_bytes, 1);
159
+ if (to_free)
160
+ btrfs_space_info_free_bytes_may_use(fs_info,
161
+ delayed_refs_rsv->space_info, to_free);
162
+}
163
+
164
+/**
165
+ * btrfs_delayed_refs_rsv_refill - refill based on our delayed refs usage.
166
+ * @fs_info - the fs_info for our fs.
167
+ * @flush - control how we can flush for this reservation.
168
+ *
169
+ * This will refill the delayed block_rsv up to 1 items size worth of space and
170
+ * will return -ENOSPC if we can't make the reservation.
171
+ */
172
+int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
173
+ enum btrfs_reserve_flush_enum flush)
174
+{
175
+ struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
176
+ u64 limit = btrfs_calc_insert_metadata_size(fs_info, 1);
177
+ u64 num_bytes = 0;
178
+ int ret = -ENOSPC;
179
+
180
+ spin_lock(&block_rsv->lock);
181
+ if (block_rsv->reserved < block_rsv->size) {
182
+ num_bytes = block_rsv->size - block_rsv->reserved;
183
+ num_bytes = min(num_bytes, limit);
184
+ }
185
+ spin_unlock(&block_rsv->lock);
186
+
187
+ if (!num_bytes)
188
+ return 0;
189
+
190
+ ret = btrfs_reserve_metadata_bytes(fs_info->extent_root, block_rsv,
191
+ num_bytes, flush);
192
+ if (ret)
193
+ return ret;
194
+ btrfs_block_rsv_add_bytes(block_rsv, num_bytes, 0);
195
+ trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
196
+ 0, num_bytes, 1);
197
+ return 0;
198
+}
26199
27200 /*
28201 * compare two delayed tree backrefs with same bytenr and type
....@@ -101,14 +274,15 @@
101274 }
102275
103276 /* insert a new ref to head ref rbtree */
104
-static struct btrfs_delayed_ref_head *htree_insert(struct rb_root *root,
277
+static struct btrfs_delayed_ref_head *htree_insert(struct rb_root_cached *root,
105278 struct rb_node *node)
106279 {
107
- struct rb_node **p = &root->rb_node;
280
+ struct rb_node **p = &root->rb_root.rb_node;
108281 struct rb_node *parent_node = NULL;
109282 struct btrfs_delayed_ref_head *entry;
110283 struct btrfs_delayed_ref_head *ins;
111284 u64 bytenr;
285
+ bool leftmost = true;
112286
113287 ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
114288 bytenr = ins->bytenr;
....@@ -117,26 +291,29 @@
117291 entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
118292 href_node);
119293
120
- if (bytenr < entry->bytenr)
294
+ if (bytenr < entry->bytenr) {
121295 p = &(*p)->rb_left;
122
- else if (bytenr > entry->bytenr)
296
+ } else if (bytenr > entry->bytenr) {
123297 p = &(*p)->rb_right;
124
- else
298
+ leftmost = false;
299
+ } else {
125300 return entry;
301
+ }
126302 }
127303
128304 rb_link_node(node, parent_node, p);
129
- rb_insert_color(node, root);
305
+ rb_insert_color_cached(node, root, leftmost);
130306 return NULL;
131307 }
132308
133
-static struct btrfs_delayed_ref_node* tree_insert(struct rb_root *root,
309
+static struct btrfs_delayed_ref_node* tree_insert(struct rb_root_cached *root,
134310 struct btrfs_delayed_ref_node *ins)
135311 {
136
- struct rb_node **p = &root->rb_node;
312
+ struct rb_node **p = &root->rb_root.rb_node;
137313 struct rb_node *node = &ins->ref_node;
138314 struct rb_node *parent_node = NULL;
139315 struct btrfs_delayed_ref_node *entry;
316
+ bool leftmost = true;
140317
141318 while (*p) {
142319 int comp;
....@@ -145,29 +322,46 @@
145322 entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
146323 ref_node);
147324 comp = comp_refs(ins, entry, true);
148
- if (comp < 0)
325
+ if (comp < 0) {
149326 p = &(*p)->rb_left;
150
- else if (comp > 0)
327
+ } else if (comp > 0) {
151328 p = &(*p)->rb_right;
152
- else
329
+ leftmost = false;
330
+ } else {
153331 return entry;
332
+ }
154333 }
155334
156335 rb_link_node(node, parent_node, p);
157
- rb_insert_color(node, root);
336
+ rb_insert_color_cached(node, root, leftmost);
158337 return NULL;
159338 }
160339
161
-/*
162
- * find an head entry based on bytenr. This returns the delayed ref
163
- * head if it was able to find one, or NULL if nothing was in that spot.
164
- * If return_bigger is given, the next bigger entry is returned if no exact
165
- * match is found.
166
- */
167
-static struct btrfs_delayed_ref_head *
168
-find_ref_head(struct rb_root *root, u64 bytenr,
169
- int return_bigger)
340
+static struct btrfs_delayed_ref_head *find_first_ref_head(
341
+ struct btrfs_delayed_ref_root *dr)
170342 {
343
+ struct rb_node *n;
344
+ struct btrfs_delayed_ref_head *entry;
345
+
346
+ n = rb_first_cached(&dr->href_root);
347
+ if (!n)
348
+ return NULL;
349
+
350
+ entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
351
+
352
+ return entry;
353
+}
354
+
355
+/*
356
+ * Find a head entry based on bytenr. This returns the delayed ref head if it
357
+ * was able to find one, or NULL if nothing was in that spot. If return_bigger
358
+ * is given, the next bigger entry is returned if no exact match is found.
359
+ */
360
+static struct btrfs_delayed_ref_head *find_ref_head(
361
+ struct btrfs_delayed_ref_root *dr, u64 bytenr,
362
+ bool return_bigger)
363
+{
364
+ struct rb_root *root = &dr->href_root.rb_root;
171365 struct rb_node *n;
172366 struct btrfs_delayed_ref_head *entry;
173367
....@@ -187,22 +381,18 @@
187381 if (bytenr > entry->bytenr) {
188382 n = rb_next(&entry->href_node);
189383 if (!n)
190
- n = rb_first(root);
384
+ return NULL;
191385 entry = rb_entry(n, struct btrfs_delayed_ref_head,
192386 href_node);
193
- return entry;
194387 }
195388 return entry;
196389 }
197390 return NULL;
198391 }
199392
200
-int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
393
+int btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root *delayed_refs,
201394 struct btrfs_delayed_ref_head *head)
202395 {
203
- struct btrfs_delayed_ref_root *delayed_refs;
204
-
205
- delayed_refs = &trans->transaction->delayed_refs;
206396 lockdep_assert_held(&delayed_refs->lock);
207397 if (mutex_trylock(&head->mutex))
208398 return 0;
....@@ -227,7 +417,7 @@
227417 struct btrfs_delayed_ref_node *ref)
228418 {
229419 lockdep_assert_held(&head->lock);
230
- rb_erase(&ref->ref_node, &head->ref_tree);
420
+ rb_erase_cached(&ref->ref_node, &head->ref_tree);
231421 RB_CLEAR_NODE(&ref->ref_node);
232422 if (!list_empty(&ref->add_list))
233423 list_del(&ref->add_list);
....@@ -294,7 +484,7 @@
294484
295485 lockdep_assert_held(&head->lock);
296486
297
- if (RB_EMPTY_ROOT(&head->ref_tree))
487
+ if (RB_EMPTY_ROOT(&head->ref_tree.rb_root))
298488 return;
299489
300490 /* We don't have too many refs to merge for data. */
....@@ -312,7 +502,8 @@
312502 read_unlock(&fs_info->tree_mod_log_lock);
313503
314504 again:
315
- for (node = rb_first(&head->ref_tree); node; node = rb_next(node)) {
505
+ for (node = rb_first_cached(&head->ref_tree); node;
506
+ node = rb_next(node)) {
316507 ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
317508 if (seq && ref->seq >= seq)
318509 continue;
....@@ -343,40 +534,29 @@
343534 return ret;
344535 }
345536
346
-struct btrfs_delayed_ref_head *
347
-btrfs_select_ref_head(struct btrfs_trans_handle *trans)
537
+struct btrfs_delayed_ref_head *btrfs_select_ref_head(
538
+ struct btrfs_delayed_ref_root *delayed_refs)
348539 {
349
- struct btrfs_delayed_ref_root *delayed_refs;
350540 struct btrfs_delayed_ref_head *head;
351
- u64 start;
352
- bool loop = false;
353
-
354
- delayed_refs = &trans->transaction->delayed_refs;
355541
356542 again:
357
- start = delayed_refs->run_delayed_start;
358
- head = find_ref_head(&delayed_refs->href_root, start, 1);
359
- if (!head && !loop) {
543
+ head = find_ref_head(delayed_refs, delayed_refs->run_delayed_start,
544
+ true);
545
+ if (!head && delayed_refs->run_delayed_start != 0) {
360546 delayed_refs->run_delayed_start = 0;
361
- start = 0;
362
- loop = true;
363
- head = find_ref_head(&delayed_refs->href_root, start, 1);
364
- if (!head)
365
- return NULL;
366
- } else if (!head && loop) {
367
- return NULL;
547
+ head = find_first_ref_head(delayed_refs);
368548 }
549
+ if (!head)
550
+ return NULL;
369551
370552 while (head->processing) {
371553 struct rb_node *node;
372554
373555 node = rb_next(&head->href_node);
374556 if (!node) {
375
- if (loop)
557
+ if (delayed_refs->run_delayed_start == 0)
376558 return NULL;
377559 delayed_refs->run_delayed_start = 0;
378
- start = 0;
379
- loop = true;
380560 goto again;
381561 }
382562 head = rb_entry(node, struct btrfs_delayed_ref_head,
....@@ -389,6 +569,20 @@
389569 delayed_refs->run_delayed_start = head->bytenr +
390570 head->num_bytes;
391571 return head;
572
+}
573
+
574
+void btrfs_delete_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
575
+ struct btrfs_delayed_ref_head *head)
576
+{
577
+ lockdep_assert_held(&delayed_refs->lock);
578
+ lockdep_assert_held(&head->lock);
579
+
580
+ rb_erase_cached(&head->href_node, &delayed_refs->href_root);
581
+ RB_CLEAR_NODE(&head->href_node);
582
+ atomic_dec(&delayed_refs->num_entries);
583
+ delayed_refs->num_heads--;
584
+ if (head->processing == 0)
585
+ delayed_refs->num_heads_ready--;
392586 }
393587
394588 /*
....@@ -452,12 +646,14 @@
452646 * helper function to update the accounting in the head ref
453647 * existing and update must have the same bytenr
454648 */
455
-static noinline void
456
-update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs,
649
+static noinline void update_existing_head_ref(struct btrfs_trans_handle *trans,
457650 struct btrfs_delayed_ref_head *existing,
458
- struct btrfs_delayed_ref_head *update,
459
- int *old_ref_mod_ret)
651
+ struct btrfs_delayed_ref_head *update)
460652 {
653
+ struct btrfs_delayed_ref_root *delayed_refs =
654
+ &trans->transaction->delayed_refs;
655
+ struct btrfs_fs_info *fs_info = trans->fs_info;
656
+ u64 flags = btrfs_ref_head_to_space_flags(existing);
461657 int old_ref_mod;
462658
463659 BUG_ON(existing->is_data != update->is_data);
....@@ -505,8 +701,6 @@
505701 * currently, for refs we just added we know we're a-ok.
506702 */
507703 old_ref_mod = existing->total_ref_mod;
508
- if (old_ref_mod_ret)
509
- *old_ref_mod_ret = old_ref_mod;
510704 existing->ref_mod += update->ref_mod;
511705 existing->total_ref_mod += update->ref_mod;
512706
....@@ -515,11 +709,40 @@
515709 * versa we need to make sure to adjust pending_csums accordingly.
516710 */
517711 if (existing->is_data) {
518
- if (existing->total_ref_mod >= 0 && old_ref_mod < 0)
712
+ u64 csum_leaves =
713
+ btrfs_csum_bytes_to_leaves(fs_info,
714
+ existing->num_bytes);
715
+
716
+ if (existing->total_ref_mod >= 0 && old_ref_mod < 0) {
519717 delayed_refs->pending_csums -= existing->num_bytes;
520
- if (existing->total_ref_mod < 0 && old_ref_mod >= 0)
718
+ btrfs_delayed_refs_rsv_release(fs_info, csum_leaves);
719
+ }
720
+ if (existing->total_ref_mod < 0 && old_ref_mod >= 0) {
521721 delayed_refs->pending_csums += existing->num_bytes;
722
+ trans->delayed_ref_updates += csum_leaves;
723
+ }
522724 }
725
+
726
+ /*
727
+ * This handles the following conditions:
728
+ *
729
+ * 1. We had a ref mod of 0 or more and went negative, indicating that
730
+ * we may be freeing space, so add our space to the
731
+ * total_bytes_pinned counter.
732
+ * 2. We were negative and went to 0 or positive, so no longer can say
733
+ * that the space would be pinned, decrement our counter from the
734
+ * total_bytes_pinned counter.
735
+ * 3. We are now at 0 and have ->must_insert_reserved set, which means
736
+ * this was a new allocation and then we dropped it, and thus must
737
+ * add our space to the total_bytes_pinned counter.
738
+ */
739
+ if (existing->total_ref_mod < 0 && old_ref_mod >= 0)
740
+ btrfs_mod_total_bytes_pinned(fs_info, flags, existing->num_bytes);
741
+ else if (existing->total_ref_mod >= 0 && old_ref_mod < 0)
742
+ btrfs_mod_total_bytes_pinned(fs_info, flags, -existing->num_bytes);
743
+ else if (existing->total_ref_mod == 0 && existing->must_insert_reserved)
744
+ btrfs_mod_total_bytes_pinned(fs_info, flags, existing->num_bytes);
745
+
523746 spin_unlock(&existing->lock);
524747 }
525748
....@@ -566,22 +789,19 @@
566789 head_ref->must_insert_reserved = must_insert_reserved;
567790 head_ref->is_data = is_data;
568791 head_ref->is_system = is_system;
569
- head_ref->ref_tree = RB_ROOT;
792
+ head_ref->ref_tree = RB_ROOT_CACHED;
570793 INIT_LIST_HEAD(&head_ref->ref_add_list);
571794 RB_CLEAR_NODE(&head_ref->href_node);
572795 head_ref->processing = 0;
573796 head_ref->total_ref_mod = count_mod;
574
- head_ref->qgroup_reserved = 0;
575
- head_ref->qgroup_ref_root = 0;
576797 spin_lock_init(&head_ref->lock);
577798 mutex_init(&head_ref->mutex);
578799
579800 if (qrecord) {
580801 if (ref_root && reserved) {
581
- head_ref->qgroup_ref_root = ref_root;
582
- head_ref->qgroup_reserved = reserved;
802
+ qrecord->data_rsv = reserved;
803
+ qrecord->data_rsv_refroot = ref_root;
583804 }
584
-
585805 qrecord->bytenr = bytenr;
586806 qrecord->num_bytes = num_bytes;
587807 qrecord->old_roots = NULL;
....@@ -597,8 +817,7 @@
597817 add_delayed_ref_head(struct btrfs_trans_handle *trans,
598818 struct btrfs_delayed_ref_head *head_ref,
599819 struct btrfs_qgroup_extent_record *qrecord,
600
- int action, int *qrecord_inserted_ret,
601
- int *old_ref_mod, int *new_ref_mod)
820
+ int action, int *qrecord_inserted_ret)
602821 {
603822 struct btrfs_delayed_ref_head *existing;
604823 struct btrfs_delayed_ref_root *delayed_refs;
....@@ -620,12 +839,7 @@
620839 existing = htree_insert(&delayed_refs->href_root,
621840 &head_ref->href_node);
622841 if (existing) {
623
- WARN_ON(qrecord && head_ref->qgroup_ref_root
624
- && head_ref->qgroup_reserved
625
- && existing->qgroup_ref_root
626
- && existing->qgroup_reserved);
627
- update_existing_head_ref(delayed_refs, existing, head_ref,
628
- old_ref_mod);
842
+ update_existing_head_ref(trans, existing, head_ref);
629843 /*
630844 * we've updated the existing ref, free the newly
631845 * allocated ref
....@@ -633,10 +847,17 @@
633847 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
634848 head_ref = existing;
635849 } else {
636
- if (old_ref_mod)
637
- *old_ref_mod = 0;
638
- if (head_ref->is_data && head_ref->ref_mod < 0)
850
+ u64 flags = btrfs_ref_head_to_space_flags(head_ref);
851
+
852
+ if (head_ref->is_data && head_ref->ref_mod < 0) {
639853 delayed_refs->pending_csums += head_ref->num_bytes;
854
+ trans->delayed_ref_updates +=
855
+ btrfs_csum_bytes_to_leaves(trans->fs_info,
856
+ head_ref->num_bytes);
857
+ }
858
+ if (head_ref->ref_mod < 0)
859
+ btrfs_mod_total_bytes_pinned(trans->fs_info, flags,
860
+ head_ref->num_bytes);
640861 delayed_refs->num_heads++;
641862 delayed_refs->num_heads_ready++;
642863 atomic_inc(&delayed_refs->num_entries);
....@@ -644,8 +865,6 @@
644865 }
645866 if (qrecord_inserted_ret)
646867 *qrecord_inserted_ret = qrecord_inserted;
647
- if (new_ref_mod)
648
- *new_ref_mod = head_ref->total_ref_mod;
649868
650869 return head_ref;
651870 }
....@@ -707,10 +926,8 @@
707926 * transaction commits.
708927 */
709928 int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
710
- u64 bytenr, u64 num_bytes, u64 parent,
711
- u64 ref_root, int level, int action,
712
- struct btrfs_delayed_extent_op *extent_op,
713
- int *old_ref_mod, int *new_ref_mod)
929
+ struct btrfs_ref *generic_ref,
930
+ struct btrfs_delayed_extent_op *extent_op)
714931 {
715932 struct btrfs_fs_info *fs_info = trans->fs_info;
716933 struct btrfs_delayed_tree_ref *ref;
....@@ -718,10 +935,18 @@
718935 struct btrfs_delayed_ref_root *delayed_refs;
719936 struct btrfs_qgroup_extent_record *record = NULL;
720937 int qrecord_inserted;
721
- bool is_system = (ref_root == BTRFS_CHUNK_TREE_OBJECTID);
938
+ bool is_system;
939
+ int action = generic_ref->action;
940
+ int level = generic_ref->tree_ref.level;
722941 int ret;
942
+ u64 bytenr = generic_ref->bytenr;
943
+ u64 num_bytes = generic_ref->len;
944
+ u64 parent = generic_ref->parent;
723945 u8 ref_type;
724946
947
+ is_system = (generic_ref->real_root == BTRFS_CHUNK_TREE_OBJECTID);
948
+
949
+ ASSERT(generic_ref->type == BTRFS_REF_METADATA && generic_ref->action);
725950 BUG_ON(extent_op && extent_op->is_data);
726951 ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
727952 if (!ref)
....@@ -734,8 +959,10 @@
734959 }
735960
736961 if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
737
- is_fstree(ref_root)) {
738
- record = kmalloc(sizeof(*record), GFP_NOFS);
962
+ is_fstree(generic_ref->real_root) &&
963
+ is_fstree(generic_ref->tree_ref.root) &&
964
+ !generic_ref->skip_qgroup) {
965
+ record = kzalloc(sizeof(*record), GFP_NOFS);
739966 if (!record) {
740967 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
741968 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
....@@ -749,13 +976,14 @@
749976 ref_type = BTRFS_TREE_BLOCK_REF_KEY;
750977
751978 init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
752
- ref_root, action, ref_type);
753
- ref->root = ref_root;
979
+ generic_ref->tree_ref.root, action, ref_type);
980
+ ref->root = generic_ref->tree_ref.root;
754981 ref->parent = parent;
755982 ref->level = level;
756983
757984 init_delayed_ref_head(head_ref, record, bytenr, num_bytes,
758
- ref_root, 0, action, false, is_system);
985
+ generic_ref->tree_ref.root, 0, action, false,
986
+ is_system);
759987 head_ref->extent_op = extent_op;
760988
761989 delayed_refs = &trans->transaction->delayed_refs;
....@@ -766,11 +994,16 @@
766994 * the spin lock
767995 */
768996 head_ref = add_delayed_ref_head(trans, head_ref, record,
769
- action, &qrecord_inserted,
770
- old_ref_mod, new_ref_mod);
997
+ action, &qrecord_inserted);
771998
772999 ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
7731000 spin_unlock(&delayed_refs->lock);
1001
+
1002
+ /*
1003
+ * Need to update the delayed_refs_rsv with any changes we may have
1004
+ * made.
1005
+ */
1006
+ btrfs_update_delayed_refs_rsv(trans);
7741007
7751008 trace_add_delayed_tree_ref(fs_info, &ref->node, ref,
7761009 action == BTRFS_ADD_DELAYED_EXTENT ?
....@@ -788,10 +1021,8 @@
7881021 * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
7891022 */
7901023 int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
791
- u64 bytenr, u64 num_bytes,
792
- u64 parent, u64 ref_root,
793
- u64 owner, u64 offset, u64 reserved, int action,
794
- int *old_ref_mod, int *new_ref_mod)
1024
+ struct btrfs_ref *generic_ref,
1025
+ u64 reserved)
7951026 {
7961027 struct btrfs_fs_info *fs_info = trans->fs_info;
7971028 struct btrfs_delayed_data_ref *ref;
....@@ -799,9 +1030,17 @@
7991030 struct btrfs_delayed_ref_root *delayed_refs;
8001031 struct btrfs_qgroup_extent_record *record = NULL;
8011032 int qrecord_inserted;
1033
+ int action = generic_ref->action;
8021034 int ret;
1035
+ u64 bytenr = generic_ref->bytenr;
1036
+ u64 num_bytes = generic_ref->len;
1037
+ u64 parent = generic_ref->parent;
1038
+ u64 ref_root = generic_ref->data_ref.ref_root;
1039
+ u64 owner = generic_ref->data_ref.ino;
1040
+ u64 offset = generic_ref->data_ref.offset;
8031041 u8 ref_type;
8041042
1043
+ ASSERT(generic_ref->type == BTRFS_REF_DATA && action);
8051044 ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
8061045 if (!ref)
8071046 return -ENOMEM;
....@@ -825,8 +1064,10 @@
8251064 }
8261065
8271066 if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
828
- is_fstree(ref_root)) {
829
- record = kmalloc(sizeof(*record), GFP_NOFS);
1067
+ is_fstree(ref_root) &&
1068
+ is_fstree(generic_ref->real_root) &&
1069
+ !generic_ref->skip_qgroup) {
1070
+ record = kzalloc(sizeof(*record), GFP_NOFS);
8301071 if (!record) {
8311072 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
8321073 kmem_cache_free(btrfs_delayed_ref_head_cachep,
....@@ -847,11 +1088,16 @@
8471088 * the spin lock
8481089 */
8491090 head_ref = add_delayed_ref_head(trans, head_ref, record,
850
- action, &qrecord_inserted,
851
- old_ref_mod, new_ref_mod);
1091
+ action, &qrecord_inserted);
8521092
8531093 ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
8541094 spin_unlock(&delayed_refs->lock);
1095
+
1096
+ /*
1097
+ * Need to update the delayed_refs_rsv with any changes we may have
1098
+ * made.
1099
+ */
1100
+ btrfs_update_delayed_refs_rsv(trans);
8551101
8561102 trace_add_delayed_data_ref(trans->fs_info, &ref->node, ref,
8571103 action == BTRFS_ADD_DELAYED_EXTENT ?
....@@ -865,8 +1111,7 @@
8651111 return 0;
8661112 }
8671113
868
-int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
869
- struct btrfs_trans_handle *trans,
1114
+int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
8701115 u64 bytenr, u64 num_bytes,
8711116 struct btrfs_delayed_extent_op *extent_op)
8721117 {
....@@ -886,21 +1131,28 @@
8861131 spin_lock(&delayed_refs->lock);
8871132
8881133 add_delayed_ref_head(trans, head_ref, NULL, BTRFS_UPDATE_DELAYED_HEAD,
889
- NULL, NULL, NULL);
1134
+ NULL);
8901135
8911136 spin_unlock(&delayed_refs->lock);
1137
+
1138
+ /*
1139
+ * Need to update the delayed_refs_rsv with any changes we may have
1140
+ * made.
1141
+ */
1142
+ btrfs_update_delayed_refs_rsv(trans);
8921143 return 0;
8931144 }
8941145
8951146 /*
896
- * this does a simple search for the head node for a given extent.
897
- * It must be called with the delayed ref spinlock held, and it returns
898
- * the head node if any where found, or NULL if not.
1147
+ * This does a simple search for the head node for a given extent. Returns the
1148
+ * head node if found, or NULL if not.
8991149 */
9001150 struct btrfs_delayed_ref_head *
9011151 btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, u64 bytenr)
9021152 {
903
- return find_ref_head(&delayed_refs->href_root, bytenr, 0);
1153
+ lockdep_assert_held(&delayed_refs->lock);
1154
+
1155
+ return find_ref_head(delayed_refs, bytenr, false);
9041156 }
9051157
9061158 void __cold btrfs_delayed_ref_exit(void)