| .. | .. |
|---|
| 10 | 10 | #include "delayed-ref.h" |
|---|
| 11 | 11 | #include "transaction.h" |
|---|
| 12 | 12 | #include "qgroup.h" |
|---|
| 13 | +#include "space-info.h" |
|---|
| 13 | 14 | |
|---|
| 14 | 15 | struct kmem_cache *btrfs_delayed_ref_head_cachep; |
|---|
| 15 | 16 | struct kmem_cache *btrfs_delayed_tree_ref_cachep; |
|---|
| .. | .. |
|---|
| 23 | 24 | * us to buffer up frequently modified backrefs in an rb tree instead |
|---|
| 24 | 25 | * of hammering updates on the extent allocation tree. |
|---|
| 25 | 26 | */ |
|---|
| 27 | + |
|---|
| 28 | +bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info) |
|---|
| 29 | +{ |
|---|
| 30 | + struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv; |
|---|
| 31 | + struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; |
|---|
| 32 | + bool ret = false; |
|---|
| 33 | + u64 reserved; |
|---|
| 34 | + |
|---|
| 35 | + spin_lock(&global_rsv->lock); |
|---|
| 36 | + reserved = global_rsv->reserved; |
|---|
| 37 | + spin_unlock(&global_rsv->lock); |
|---|
| 38 | + |
|---|
| 39 | + /* |
|---|
| 40 | + * Since the global reserve is just kind of magic we don't really want |
|---|
| 41 | + * to rely on it to save our bacon, so if our size is more than the |
|---|
| 42 | + * delayed_refs_rsv and the global rsv then it's time to think about |
|---|
| 43 | + * bailing. |
|---|
| 44 | + */ |
|---|
| 45 | + spin_lock(&delayed_refs_rsv->lock); |
|---|
| 46 | + reserved += delayed_refs_rsv->reserved; |
|---|
| 47 | + if (delayed_refs_rsv->size >= reserved) |
|---|
| 48 | + ret = true; |
|---|
| 49 | + spin_unlock(&delayed_refs_rsv->lock); |
|---|
| 50 | + return ret; |
|---|
| 51 | +} |
|---|
| 52 | + |
|---|
| 53 | +int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans) |
|---|
| 54 | +{ |
|---|
| 55 | + u64 num_entries = |
|---|
| 56 | + atomic_read(&trans->transaction->delayed_refs.num_entries); |
|---|
| 57 | + u64 avg_runtime; |
|---|
| 58 | + u64 val; |
|---|
| 59 | + |
|---|
| 60 | + smp_mb(); |
|---|
| 61 | + avg_runtime = trans->fs_info->avg_delayed_ref_runtime; |
|---|
| 62 | + val = num_entries * avg_runtime; |
|---|
| 63 | + if (val >= NSEC_PER_SEC) |
|---|
| 64 | + return 1; |
|---|
| 65 | + if (val >= NSEC_PER_SEC / 2) |
|---|
| 66 | + return 2; |
|---|
| 67 | + |
|---|
| 68 | + return btrfs_check_space_for_delayed_refs(trans->fs_info); |
|---|
| 69 | +} |
|---|
| 70 | + |
|---|
| 71 | +/** |
|---|
| 72 | + * btrfs_delayed_refs_rsv_release - release a ref head's reservation. |
|---|
| 73 | + * @fs_info - the fs_info for our fs. |
|---|
| 74 | + * @nr - the number of items to drop. |
|---|
| 75 | + * |
|---|
| 76 | + * This drops the delayed ref head's count from the delayed refs rsv and frees |
|---|
| 77 | + * any excess reservation we had. |
|---|
| 78 | + */ |
|---|
| 79 | +void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr) |
|---|
| 80 | +{ |
|---|
| 81 | + struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv; |
|---|
| 82 | + u64 num_bytes = btrfs_calc_insert_metadata_size(fs_info, nr); |
|---|
| 83 | + u64 released = 0; |
|---|
| 84 | + |
|---|
| 85 | + released = btrfs_block_rsv_release(fs_info, block_rsv, num_bytes, NULL); |
|---|
| 86 | + if (released) |
|---|
| 87 | + trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv", |
|---|
| 88 | + 0, released, 0); |
|---|
| 89 | +} |
|---|
| 90 | + |
|---|
| 91 | +/* |
|---|
| 92 | + * btrfs_update_delayed_refs_rsv - adjust the size of the delayed refs rsv |
|---|
| 93 | + * @trans - the trans that may have generated delayed refs |
|---|
| 94 | + * |
|---|
| 95 | + * This is to be called anytime we may have adjusted trans->delayed_ref_updates, |
|---|
| 96 | + * it'll calculate the additional size and add it to the delayed_refs_rsv. |
|---|
| 97 | + */ |
|---|
| 98 | +void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans) |
|---|
| 99 | +{ |
|---|
| 100 | + struct btrfs_fs_info *fs_info = trans->fs_info; |
|---|
| 101 | + struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv; |
|---|
| 102 | + u64 num_bytes; |
|---|
| 103 | + |
|---|
| 104 | + if (!trans->delayed_ref_updates) |
|---|
| 105 | + return; |
|---|
| 106 | + |
|---|
| 107 | + num_bytes = btrfs_calc_insert_metadata_size(fs_info, |
|---|
| 108 | + trans->delayed_ref_updates); |
|---|
| 109 | + spin_lock(&delayed_rsv->lock); |
|---|
| 110 | + delayed_rsv->size += num_bytes; |
|---|
| 111 | + delayed_rsv->full = 0; |
|---|
| 112 | + spin_unlock(&delayed_rsv->lock); |
|---|
| 113 | + trans->delayed_ref_updates = 0; |
|---|
| 114 | +} |
|---|
| 115 | + |
|---|
| 116 | +/** |
|---|
| 117 | + * btrfs_migrate_to_delayed_refs_rsv - transfer bytes to our delayed refs rsv. |
|---|
| 118 | + * @fs_info - the fs info for our fs. |
|---|
| 119 | + * @src - the source block rsv to transfer from. |
|---|
| 120 | + * @num_bytes - the number of bytes to transfer. |
|---|
| 121 | + * |
|---|
| 122 | + * This transfers up to the num_bytes amount from the src rsv to the |
|---|
| 123 | + * delayed_refs_rsv. Any extra bytes are returned to the space info. |
|---|
| 124 | + */ |
|---|
| 125 | +void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info, |
|---|
| 126 | + struct btrfs_block_rsv *src, |
|---|
| 127 | + u64 num_bytes) |
|---|
| 128 | +{ |
|---|
| 129 | + struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv; |
|---|
| 130 | + u64 to_free = 0; |
|---|
| 131 | + |
|---|
| 132 | + spin_lock(&src->lock); |
|---|
| 133 | + src->reserved -= num_bytes; |
|---|
| 134 | + src->size -= num_bytes; |
|---|
| 135 | + spin_unlock(&src->lock); |
|---|
| 136 | + |
|---|
| 137 | + spin_lock(&delayed_refs_rsv->lock); |
|---|
| 138 | + if (delayed_refs_rsv->size > delayed_refs_rsv->reserved) { |
|---|
| 139 | + u64 delta = delayed_refs_rsv->size - |
|---|
| 140 | + delayed_refs_rsv->reserved; |
|---|
| 141 | + if (num_bytes > delta) { |
|---|
| 142 | + to_free = num_bytes - delta; |
|---|
| 143 | + num_bytes = delta; |
|---|
| 144 | + } |
|---|
| 145 | + } else { |
|---|
| 146 | + to_free = num_bytes; |
|---|
| 147 | + num_bytes = 0; |
|---|
| 148 | + } |
|---|
| 149 | + |
|---|
| 150 | + if (num_bytes) |
|---|
| 151 | + delayed_refs_rsv->reserved += num_bytes; |
|---|
| 152 | + if (delayed_refs_rsv->reserved >= delayed_refs_rsv->size) |
|---|
| 153 | + delayed_refs_rsv->full = 1; |
|---|
| 154 | + spin_unlock(&delayed_refs_rsv->lock); |
|---|
| 155 | + |
|---|
| 156 | + if (num_bytes) |
|---|
| 157 | + trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv", |
|---|
| 158 | + 0, num_bytes, 1); |
|---|
| 159 | + if (to_free) |
|---|
| 160 | + btrfs_space_info_free_bytes_may_use(fs_info, |
|---|
| 161 | + delayed_refs_rsv->space_info, to_free); |
|---|
| 162 | +} |
|---|
| 163 | + |
|---|
| 164 | +/** |
|---|
| 165 | + * btrfs_delayed_refs_rsv_refill - refill based on our delayed refs usage. |
|---|
| 166 | + * @fs_info - the fs_info for our fs. |
|---|
| 167 | + * @flush - control how we can flush for this reservation. |
|---|
| 168 | + * |
|---|
| 169 | + * This will refill the delayed block_rsv up to 1 items size worth of space and |
|---|
| 170 | + * will return -ENOSPC if we can't make the reservation. |
|---|
| 171 | + */ |
|---|
| 172 | +int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info, |
|---|
| 173 | + enum btrfs_reserve_flush_enum flush) |
|---|
| 174 | +{ |
|---|
| 175 | + struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv; |
|---|
| 176 | + u64 limit = btrfs_calc_insert_metadata_size(fs_info, 1); |
|---|
| 177 | + u64 num_bytes = 0; |
|---|
| 178 | + int ret = -ENOSPC; |
|---|
| 179 | + |
|---|
| 180 | + spin_lock(&block_rsv->lock); |
|---|
| 181 | + if (block_rsv->reserved < block_rsv->size) { |
|---|
| 182 | + num_bytes = block_rsv->size - block_rsv->reserved; |
|---|
| 183 | + num_bytes = min(num_bytes, limit); |
|---|
| 184 | + } |
|---|
| 185 | + spin_unlock(&block_rsv->lock); |
|---|
| 186 | + |
|---|
| 187 | + if (!num_bytes) |
|---|
| 188 | + return 0; |
|---|
| 189 | + |
|---|
| 190 | + ret = btrfs_reserve_metadata_bytes(fs_info->extent_root, block_rsv, |
|---|
| 191 | + num_bytes, flush); |
|---|
| 192 | + if (ret) |
|---|
| 193 | + return ret; |
|---|
| 194 | + btrfs_block_rsv_add_bytes(block_rsv, num_bytes, 0); |
|---|
| 195 | + trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv", |
|---|
| 196 | + 0, num_bytes, 1); |
|---|
| 197 | + return 0; |
|---|
| 198 | +} |
|---|
| 26 | 199 | |
|---|
| 27 | 200 | /* |
|---|
| 28 | 201 | * compare two delayed tree backrefs with same bytenr and type |
|---|
| .. | .. |
|---|
| 101 | 274 | } |
|---|
| 102 | 275 | |
|---|
| 103 | 276 | /* insert a new ref to head ref rbtree */ |
|---|
| 104 | | -static struct btrfs_delayed_ref_head *htree_insert(struct rb_root *root, |
|---|
| 277 | +static struct btrfs_delayed_ref_head *htree_insert(struct rb_root_cached *root, |
|---|
| 105 | 278 | struct rb_node *node) |
|---|
| 106 | 279 | { |
|---|
| 107 | | - struct rb_node **p = &root->rb_node; |
|---|
| 280 | + struct rb_node **p = &root->rb_root.rb_node; |
|---|
| 108 | 281 | struct rb_node *parent_node = NULL; |
|---|
| 109 | 282 | struct btrfs_delayed_ref_head *entry; |
|---|
| 110 | 283 | struct btrfs_delayed_ref_head *ins; |
|---|
| 111 | 284 | u64 bytenr; |
|---|
| 285 | + bool leftmost = true; |
|---|
| 112 | 286 | |
|---|
| 113 | 287 | ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node); |
|---|
| 114 | 288 | bytenr = ins->bytenr; |
|---|
| .. | .. |
|---|
| 117 | 291 | entry = rb_entry(parent_node, struct btrfs_delayed_ref_head, |
|---|
| 118 | 292 | href_node); |
|---|
| 119 | 293 | |
|---|
| 120 | | - if (bytenr < entry->bytenr) |
|---|
| 294 | + if (bytenr < entry->bytenr) { |
|---|
| 121 | 295 | p = &(*p)->rb_left; |
|---|
| 122 | | - else if (bytenr > entry->bytenr) |
|---|
| 296 | + } else if (bytenr > entry->bytenr) { |
|---|
| 123 | 297 | p = &(*p)->rb_right; |
|---|
| 124 | | - else |
|---|
| 298 | + leftmost = false; |
|---|
| 299 | + } else { |
|---|
| 125 | 300 | return entry; |
|---|
| 301 | + } |
|---|
| 126 | 302 | } |
|---|
| 127 | 303 | |
|---|
| 128 | 304 | rb_link_node(node, parent_node, p); |
|---|
| 129 | | - rb_insert_color(node, root); |
|---|
| 305 | + rb_insert_color_cached(node, root, leftmost); |
|---|
| 130 | 306 | return NULL; |
|---|
| 131 | 307 | } |
|---|
| 132 | 308 | |
|---|
| 133 | | -static struct btrfs_delayed_ref_node* tree_insert(struct rb_root *root, |
|---|
| 309 | +static struct btrfs_delayed_ref_node* tree_insert(struct rb_root_cached *root, |
|---|
| 134 | 310 | struct btrfs_delayed_ref_node *ins) |
|---|
| 135 | 311 | { |
|---|
| 136 | | - struct rb_node **p = &root->rb_node; |
|---|
| 312 | + struct rb_node **p = &root->rb_root.rb_node; |
|---|
| 137 | 313 | struct rb_node *node = &ins->ref_node; |
|---|
| 138 | 314 | struct rb_node *parent_node = NULL; |
|---|
| 139 | 315 | struct btrfs_delayed_ref_node *entry; |
|---|
| 316 | + bool leftmost = true; |
|---|
| 140 | 317 | |
|---|
| 141 | 318 | while (*p) { |
|---|
| 142 | 319 | int comp; |
|---|
| .. | .. |
|---|
| 145 | 322 | entry = rb_entry(parent_node, struct btrfs_delayed_ref_node, |
|---|
| 146 | 323 | ref_node); |
|---|
| 147 | 324 | comp = comp_refs(ins, entry, true); |
|---|
| 148 | | - if (comp < 0) |
|---|
| 325 | + if (comp < 0) { |
|---|
| 149 | 326 | p = &(*p)->rb_left; |
|---|
| 150 | | - else if (comp > 0) |
|---|
| 327 | + } else if (comp > 0) { |
|---|
| 151 | 328 | p = &(*p)->rb_right; |
|---|
| 152 | | - else |
|---|
| 329 | + leftmost = false; |
|---|
| 330 | + } else { |
|---|
| 153 | 331 | return entry; |
|---|
| 332 | + } |
|---|
| 154 | 333 | } |
|---|
| 155 | 334 | |
|---|
| 156 | 335 | rb_link_node(node, parent_node, p); |
|---|
| 157 | | - rb_insert_color(node, root); |
|---|
| 336 | + rb_insert_color_cached(node, root, leftmost); |
|---|
| 158 | 337 | return NULL; |
|---|
| 159 | 338 | } |
|---|
| 160 | 339 | |
|---|
| 161 | | -/* |
|---|
| 162 | | - * find an head entry based on bytenr. This returns the delayed ref |
|---|
| 163 | | - * head if it was able to find one, or NULL if nothing was in that spot. |
|---|
| 164 | | - * If return_bigger is given, the next bigger entry is returned if no exact |
|---|
| 165 | | - * match is found. |
|---|
| 166 | | - */ |
|---|
| 167 | | -static struct btrfs_delayed_ref_head * |
|---|
| 168 | | -find_ref_head(struct rb_root *root, u64 bytenr, |
|---|
| 169 | | - int return_bigger) |
|---|
| 340 | +static struct btrfs_delayed_ref_head *find_first_ref_head( |
|---|
| 341 | + struct btrfs_delayed_ref_root *dr) |
|---|
| 170 | 342 | { |
|---|
| 343 | + struct rb_node *n; |
|---|
| 344 | + struct btrfs_delayed_ref_head *entry; |
|---|
| 345 | + |
|---|
| 346 | + n = rb_first_cached(&dr->href_root); |
|---|
| 347 | + if (!n) |
|---|
| 348 | + return NULL; |
|---|
| 349 | + |
|---|
| 350 | + entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node); |
|---|
| 351 | + |
|---|
| 352 | + return entry; |
|---|
| 353 | +} |
|---|
| 354 | + |
|---|
| 355 | +/* |
|---|
| 356 | + * Find a head entry based on bytenr. This returns the delayed ref head if it |
|---|
| 357 | + * was able to find one, or NULL if nothing was in that spot. If return_bigger |
|---|
| 358 | + * is given, the next bigger entry is returned if no exact match is found. |
|---|
| 359 | + */ |
|---|
| 360 | +static struct btrfs_delayed_ref_head *find_ref_head( |
|---|
| 361 | + struct btrfs_delayed_ref_root *dr, u64 bytenr, |
|---|
| 362 | + bool return_bigger) |
|---|
| 363 | +{ |
|---|
| 364 | + struct rb_root *root = &dr->href_root.rb_root; |
|---|
| 171 | 365 | struct rb_node *n; |
|---|
| 172 | 366 | struct btrfs_delayed_ref_head *entry; |
|---|
| 173 | 367 | |
|---|
| .. | .. |
|---|
| 187 | 381 | if (bytenr > entry->bytenr) { |
|---|
| 188 | 382 | n = rb_next(&entry->href_node); |
|---|
| 189 | 383 | if (!n) |
|---|
| 190 | | - n = rb_first(root); |
|---|
| 384 | + return NULL; |
|---|
| 191 | 385 | entry = rb_entry(n, struct btrfs_delayed_ref_head, |
|---|
| 192 | 386 | href_node); |
|---|
| 193 | | - return entry; |
|---|
| 194 | 387 | } |
|---|
| 195 | 388 | return entry; |
|---|
| 196 | 389 | } |
|---|
| 197 | 390 | return NULL; |
|---|
| 198 | 391 | } |
|---|
| 199 | 392 | |
|---|
| 200 | | -int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans, |
|---|
| 393 | +int btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root *delayed_refs, |
|---|
| 201 | 394 | struct btrfs_delayed_ref_head *head) |
|---|
| 202 | 395 | { |
|---|
| 203 | | - struct btrfs_delayed_ref_root *delayed_refs; |
|---|
| 204 | | - |
|---|
| 205 | | - delayed_refs = &trans->transaction->delayed_refs; |
|---|
| 206 | 396 | lockdep_assert_held(&delayed_refs->lock); |
|---|
| 207 | 397 | if (mutex_trylock(&head->mutex)) |
|---|
| 208 | 398 | return 0; |
|---|
| .. | .. |
|---|
| 227 | 417 | struct btrfs_delayed_ref_node *ref) |
|---|
| 228 | 418 | { |
|---|
| 229 | 419 | lockdep_assert_held(&head->lock); |
|---|
| 230 | | - rb_erase(&ref->ref_node, &head->ref_tree); |
|---|
| 420 | + rb_erase_cached(&ref->ref_node, &head->ref_tree); |
|---|
| 231 | 421 | RB_CLEAR_NODE(&ref->ref_node); |
|---|
| 232 | 422 | if (!list_empty(&ref->add_list)) |
|---|
| 233 | 423 | list_del(&ref->add_list); |
|---|
| .. | .. |
|---|
| 294 | 484 | |
|---|
| 295 | 485 | lockdep_assert_held(&head->lock); |
|---|
| 296 | 486 | |
|---|
| 297 | | - if (RB_EMPTY_ROOT(&head->ref_tree)) |
|---|
| 487 | + if (RB_EMPTY_ROOT(&head->ref_tree.rb_root)) |
|---|
| 298 | 488 | return; |
|---|
| 299 | 489 | |
|---|
| 300 | 490 | /* We don't have too many refs to merge for data. */ |
|---|
| .. | .. |
|---|
| 312 | 502 | read_unlock(&fs_info->tree_mod_log_lock); |
|---|
| 313 | 503 | |
|---|
| 314 | 504 | again: |
|---|
| 315 | | - for (node = rb_first(&head->ref_tree); node; node = rb_next(node)) { |
|---|
| 505 | + for (node = rb_first_cached(&head->ref_tree); node; |
|---|
| 506 | + node = rb_next(node)) { |
|---|
| 316 | 507 | ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node); |
|---|
| 317 | 508 | if (seq && ref->seq >= seq) |
|---|
| 318 | 509 | continue; |
|---|
| .. | .. |
|---|
| 343 | 534 | return ret; |
|---|
| 344 | 535 | } |
|---|
| 345 | 536 | |
|---|
| 346 | | -struct btrfs_delayed_ref_head * |
|---|
| 347 | | -btrfs_select_ref_head(struct btrfs_trans_handle *trans) |
|---|
| 537 | +struct btrfs_delayed_ref_head *btrfs_select_ref_head( |
|---|
| 538 | + struct btrfs_delayed_ref_root *delayed_refs) |
|---|
| 348 | 539 | { |
|---|
| 349 | | - struct btrfs_delayed_ref_root *delayed_refs; |
|---|
| 350 | 540 | struct btrfs_delayed_ref_head *head; |
|---|
| 351 | | - u64 start; |
|---|
| 352 | | - bool loop = false; |
|---|
| 353 | | - |
|---|
| 354 | | - delayed_refs = &trans->transaction->delayed_refs; |
|---|
| 355 | 541 | |
|---|
| 356 | 542 | again: |
|---|
| 357 | | - start = delayed_refs->run_delayed_start; |
|---|
| 358 | | - head = find_ref_head(&delayed_refs->href_root, start, 1); |
|---|
| 359 | | - if (!head && !loop) { |
|---|
| 543 | + head = find_ref_head(delayed_refs, delayed_refs->run_delayed_start, |
|---|
| 544 | + true); |
|---|
| 545 | + if (!head && delayed_refs->run_delayed_start != 0) { |
|---|
| 360 | 546 | delayed_refs->run_delayed_start = 0; |
|---|
| 361 | | - start = 0; |
|---|
| 362 | | - loop = true; |
|---|
| 363 | | - head = find_ref_head(&delayed_refs->href_root, start, 1); |
|---|
| 364 | | - if (!head) |
|---|
| 365 | | - return NULL; |
|---|
| 366 | | - } else if (!head && loop) { |
|---|
| 367 | | - return NULL; |
|---|
| 547 | + head = find_first_ref_head(delayed_refs); |
|---|
| 368 | 548 | } |
|---|
| 549 | + if (!head) |
|---|
| 550 | + return NULL; |
|---|
| 369 | 551 | |
|---|
| 370 | 552 | while (head->processing) { |
|---|
| 371 | 553 | struct rb_node *node; |
|---|
| 372 | 554 | |
|---|
| 373 | 555 | node = rb_next(&head->href_node); |
|---|
| 374 | 556 | if (!node) { |
|---|
| 375 | | - if (loop) |
|---|
| 557 | + if (delayed_refs->run_delayed_start == 0) |
|---|
| 376 | 558 | return NULL; |
|---|
| 377 | 559 | delayed_refs->run_delayed_start = 0; |
|---|
| 378 | | - start = 0; |
|---|
| 379 | | - loop = true; |
|---|
| 380 | 560 | goto again; |
|---|
| 381 | 561 | } |
|---|
| 382 | 562 | head = rb_entry(node, struct btrfs_delayed_ref_head, |
|---|
| .. | .. |
|---|
| 389 | 569 | delayed_refs->run_delayed_start = head->bytenr + |
|---|
| 390 | 570 | head->num_bytes; |
|---|
| 391 | 571 | return head; |
|---|
| 572 | +} |
|---|
| 573 | + |
|---|
| 574 | +void btrfs_delete_ref_head(struct btrfs_delayed_ref_root *delayed_refs, |
|---|
| 575 | + struct btrfs_delayed_ref_head *head) |
|---|
| 576 | +{ |
|---|
| 577 | + lockdep_assert_held(&delayed_refs->lock); |
|---|
| 578 | + lockdep_assert_held(&head->lock); |
|---|
| 579 | + |
|---|
| 580 | + rb_erase_cached(&head->href_node, &delayed_refs->href_root); |
|---|
| 581 | + RB_CLEAR_NODE(&head->href_node); |
|---|
| 582 | + atomic_dec(&delayed_refs->num_entries); |
|---|
| 583 | + delayed_refs->num_heads--; |
|---|
| 584 | + if (head->processing == 0) |
|---|
| 585 | + delayed_refs->num_heads_ready--; |
|---|
| 392 | 586 | } |
|---|
| 393 | 587 | |
|---|
| 394 | 588 | /* |
|---|
| .. | .. |
|---|
| 452 | 646 | * helper function to update the accounting in the head ref |
|---|
| 453 | 647 | * existing and update must have the same bytenr |
|---|
| 454 | 648 | */ |
|---|
| 455 | | -static noinline void |
|---|
| 456 | | -update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs, |
|---|
| 649 | +static noinline void update_existing_head_ref(struct btrfs_trans_handle *trans, |
|---|
| 457 | 650 | struct btrfs_delayed_ref_head *existing, |
|---|
| 458 | | - struct btrfs_delayed_ref_head *update, |
|---|
| 459 | | - int *old_ref_mod_ret) |
|---|
| 651 | + struct btrfs_delayed_ref_head *update) |
|---|
| 460 | 652 | { |
|---|
| 653 | + struct btrfs_delayed_ref_root *delayed_refs = |
|---|
| 654 | + &trans->transaction->delayed_refs; |
|---|
| 655 | + struct btrfs_fs_info *fs_info = trans->fs_info; |
|---|
| 656 | + u64 flags = btrfs_ref_head_to_space_flags(existing); |
|---|
| 461 | 657 | int old_ref_mod; |
|---|
| 462 | 658 | |
|---|
| 463 | 659 | BUG_ON(existing->is_data != update->is_data); |
|---|
| .. | .. |
|---|
| 505 | 701 | * currently, for refs we just added we know we're a-ok. |
|---|
| 506 | 702 | */ |
|---|
| 507 | 703 | old_ref_mod = existing->total_ref_mod; |
|---|
| 508 | | - if (old_ref_mod_ret) |
|---|
| 509 | | - *old_ref_mod_ret = old_ref_mod; |
|---|
| 510 | 704 | existing->ref_mod += update->ref_mod; |
|---|
| 511 | 705 | existing->total_ref_mod += update->ref_mod; |
|---|
| 512 | 706 | |
|---|
| .. | .. |
|---|
| 515 | 709 | * versa we need to make sure to adjust pending_csums accordingly. |
|---|
| 516 | 710 | */ |
|---|
| 517 | 711 | if (existing->is_data) { |
|---|
| 518 | | - if (existing->total_ref_mod >= 0 && old_ref_mod < 0) |
|---|
| 712 | + u64 csum_leaves = |
|---|
| 713 | + btrfs_csum_bytes_to_leaves(fs_info, |
|---|
| 714 | + existing->num_bytes); |
|---|
| 715 | + |
|---|
| 716 | + if (existing->total_ref_mod >= 0 && old_ref_mod < 0) { |
|---|
| 519 | 717 | delayed_refs->pending_csums -= existing->num_bytes; |
|---|
| 520 | | - if (existing->total_ref_mod < 0 && old_ref_mod >= 0) |
|---|
| 718 | + btrfs_delayed_refs_rsv_release(fs_info, csum_leaves); |
|---|
| 719 | + } |
|---|
| 720 | + if (existing->total_ref_mod < 0 && old_ref_mod >= 0) { |
|---|
| 521 | 721 | delayed_refs->pending_csums += existing->num_bytes; |
|---|
| 722 | + trans->delayed_ref_updates += csum_leaves; |
|---|
| 723 | + } |
|---|
| 522 | 724 | } |
|---|
| 725 | + |
|---|
| 726 | + /* |
|---|
| 727 | + * This handles the following conditions: |
|---|
| 728 | + * |
|---|
| 729 | + * 1. We had a ref mod of 0 or more and went negative, indicating that |
|---|
| 730 | + * we may be freeing space, so add our space to the |
|---|
| 731 | + * total_bytes_pinned counter. |
|---|
| 732 | + * 2. We were negative and went to 0 or positive, so no longer can say |
|---|
| 733 | + * that the space would be pinned, decrement our counter from the |
|---|
| 734 | + * total_bytes_pinned counter. |
|---|
| 735 | + * 3. We are now at 0 and have ->must_insert_reserved set, which means |
|---|
| 736 | + * this was a new allocation and then we dropped it, and thus must |
|---|
| 737 | + * add our space to the total_bytes_pinned counter. |
|---|
| 738 | + */ |
|---|
| 739 | + if (existing->total_ref_mod < 0 && old_ref_mod >= 0) |
|---|
| 740 | + btrfs_mod_total_bytes_pinned(fs_info, flags, existing->num_bytes); |
|---|
| 741 | + else if (existing->total_ref_mod >= 0 && old_ref_mod < 0) |
|---|
| 742 | + btrfs_mod_total_bytes_pinned(fs_info, flags, -existing->num_bytes); |
|---|
| 743 | + else if (existing->total_ref_mod == 0 && existing->must_insert_reserved) |
|---|
| 744 | + btrfs_mod_total_bytes_pinned(fs_info, flags, existing->num_bytes); |
|---|
| 745 | + |
|---|
| 523 | 746 | spin_unlock(&existing->lock); |
|---|
| 524 | 747 | } |
|---|
| 525 | 748 | |
|---|
| .. | .. |
|---|
| 566 | 789 | head_ref->must_insert_reserved = must_insert_reserved; |
|---|
| 567 | 790 | head_ref->is_data = is_data; |
|---|
| 568 | 791 | head_ref->is_system = is_system; |
|---|
| 569 | | - head_ref->ref_tree = RB_ROOT; |
|---|
| 792 | + head_ref->ref_tree = RB_ROOT_CACHED; |
|---|
| 570 | 793 | INIT_LIST_HEAD(&head_ref->ref_add_list); |
|---|
| 571 | 794 | RB_CLEAR_NODE(&head_ref->href_node); |
|---|
| 572 | 795 | head_ref->processing = 0; |
|---|
| 573 | 796 | head_ref->total_ref_mod = count_mod; |
|---|
| 574 | | - head_ref->qgroup_reserved = 0; |
|---|
| 575 | | - head_ref->qgroup_ref_root = 0; |
|---|
| 576 | 797 | spin_lock_init(&head_ref->lock); |
|---|
| 577 | 798 | mutex_init(&head_ref->mutex); |
|---|
| 578 | 799 | |
|---|
| 579 | 800 | if (qrecord) { |
|---|
| 580 | 801 | if (ref_root && reserved) { |
|---|
| 581 | | - head_ref->qgroup_ref_root = ref_root; |
|---|
| 582 | | - head_ref->qgroup_reserved = reserved; |
|---|
| 802 | + qrecord->data_rsv = reserved; |
|---|
| 803 | + qrecord->data_rsv_refroot = ref_root; |
|---|
| 583 | 804 | } |
|---|
| 584 | | - |
|---|
| 585 | 805 | qrecord->bytenr = bytenr; |
|---|
| 586 | 806 | qrecord->num_bytes = num_bytes; |
|---|
| 587 | 807 | qrecord->old_roots = NULL; |
|---|
| .. | .. |
|---|
| 597 | 817 | add_delayed_ref_head(struct btrfs_trans_handle *trans, |
|---|
| 598 | 818 | struct btrfs_delayed_ref_head *head_ref, |
|---|
| 599 | 819 | struct btrfs_qgroup_extent_record *qrecord, |
|---|
| 600 | | - int action, int *qrecord_inserted_ret, |
|---|
| 601 | | - int *old_ref_mod, int *new_ref_mod) |
|---|
| 820 | + int action, int *qrecord_inserted_ret) |
|---|
| 602 | 821 | { |
|---|
| 603 | 822 | struct btrfs_delayed_ref_head *existing; |
|---|
| 604 | 823 | struct btrfs_delayed_ref_root *delayed_refs; |
|---|
| .. | .. |
|---|
| 620 | 839 | existing = htree_insert(&delayed_refs->href_root, |
|---|
| 621 | 840 | &head_ref->href_node); |
|---|
| 622 | 841 | if (existing) { |
|---|
| 623 | | - WARN_ON(qrecord && head_ref->qgroup_ref_root |
|---|
| 624 | | - && head_ref->qgroup_reserved |
|---|
| 625 | | - && existing->qgroup_ref_root |
|---|
| 626 | | - && existing->qgroup_reserved); |
|---|
| 627 | | - update_existing_head_ref(delayed_refs, existing, head_ref, |
|---|
| 628 | | - old_ref_mod); |
|---|
| 842 | + update_existing_head_ref(trans, existing, head_ref); |
|---|
| 629 | 843 | /* |
|---|
| 630 | 844 | * we've updated the existing ref, free the newly |
|---|
| 631 | 845 | * allocated ref |
|---|
| .. | .. |
|---|
| 633 | 847 | kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref); |
|---|
| 634 | 848 | head_ref = existing; |
|---|
| 635 | 849 | } else { |
|---|
| 636 | | - if (old_ref_mod) |
|---|
| 637 | | - *old_ref_mod = 0; |
|---|
| 638 | | - if (head_ref->is_data && head_ref->ref_mod < 0) |
|---|
| 850 | + u64 flags = btrfs_ref_head_to_space_flags(head_ref); |
|---|
| 851 | + |
|---|
| 852 | + if (head_ref->is_data && head_ref->ref_mod < 0) { |
|---|
| 639 | 853 | delayed_refs->pending_csums += head_ref->num_bytes; |
|---|
| 854 | + trans->delayed_ref_updates += |
|---|
| 855 | + btrfs_csum_bytes_to_leaves(trans->fs_info, |
|---|
| 856 | + head_ref->num_bytes); |
|---|
| 857 | + } |
|---|
| 858 | + if (head_ref->ref_mod < 0) |
|---|
| 859 | + btrfs_mod_total_bytes_pinned(trans->fs_info, flags, |
|---|
| 860 | + head_ref->num_bytes); |
|---|
| 640 | 861 | delayed_refs->num_heads++; |
|---|
| 641 | 862 | delayed_refs->num_heads_ready++; |
|---|
| 642 | 863 | atomic_inc(&delayed_refs->num_entries); |
|---|
| .. | .. |
|---|
| 644 | 865 | } |
|---|
| 645 | 866 | if (qrecord_inserted_ret) |
|---|
| 646 | 867 | *qrecord_inserted_ret = qrecord_inserted; |
|---|
| 647 | | - if (new_ref_mod) |
|---|
| 648 | | - *new_ref_mod = head_ref->total_ref_mod; |
|---|
| 649 | 868 | |
|---|
| 650 | 869 | return head_ref; |
|---|
| 651 | 870 | } |
|---|
| .. | .. |
|---|
| 707 | 926 | * transaction commits. |
|---|
| 708 | 927 | */ |
|---|
| 709 | 928 | int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans, |
|---|
| 710 | | - u64 bytenr, u64 num_bytes, u64 parent, |
|---|
| 711 | | - u64 ref_root, int level, int action, |
|---|
| 712 | | - struct btrfs_delayed_extent_op *extent_op, |
|---|
| 713 | | - int *old_ref_mod, int *new_ref_mod) |
|---|
| 929 | + struct btrfs_ref *generic_ref, |
|---|
| 930 | + struct btrfs_delayed_extent_op *extent_op) |
|---|
| 714 | 931 | { |
|---|
| 715 | 932 | struct btrfs_fs_info *fs_info = trans->fs_info; |
|---|
| 716 | 933 | struct btrfs_delayed_tree_ref *ref; |
|---|
| .. | .. |
|---|
| 718 | 935 | struct btrfs_delayed_ref_root *delayed_refs; |
|---|
| 719 | 936 | struct btrfs_qgroup_extent_record *record = NULL; |
|---|
| 720 | 937 | int qrecord_inserted; |
|---|
| 721 | | - bool is_system = (ref_root == BTRFS_CHUNK_TREE_OBJECTID); |
|---|
| 938 | + bool is_system; |
|---|
| 939 | + int action = generic_ref->action; |
|---|
| 940 | + int level = generic_ref->tree_ref.level; |
|---|
| 722 | 941 | int ret; |
|---|
| 942 | + u64 bytenr = generic_ref->bytenr; |
|---|
| 943 | + u64 num_bytes = generic_ref->len; |
|---|
| 944 | + u64 parent = generic_ref->parent; |
|---|
| 723 | 945 | u8 ref_type; |
|---|
| 724 | 946 | |
|---|
| 947 | + is_system = (generic_ref->real_root == BTRFS_CHUNK_TREE_OBJECTID); |
|---|
| 948 | + |
|---|
| 949 | + ASSERT(generic_ref->type == BTRFS_REF_METADATA && generic_ref->action); |
|---|
| 725 | 950 | BUG_ON(extent_op && extent_op->is_data); |
|---|
| 726 | 951 | ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS); |
|---|
| 727 | 952 | if (!ref) |
|---|
| .. | .. |
|---|
| 734 | 959 | } |
|---|
| 735 | 960 | |
|---|
| 736 | 961 | if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) && |
|---|
| 737 | | - is_fstree(ref_root)) { |
|---|
| 738 | | - record = kmalloc(sizeof(*record), GFP_NOFS); |
|---|
| 962 | + is_fstree(generic_ref->real_root) && |
|---|
| 963 | + is_fstree(generic_ref->tree_ref.root) && |
|---|
| 964 | + !generic_ref->skip_qgroup) { |
|---|
| 965 | + record = kzalloc(sizeof(*record), GFP_NOFS); |
|---|
| 739 | 966 | if (!record) { |
|---|
| 740 | 967 | kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref); |
|---|
| 741 | 968 | kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref); |
|---|
| .. | .. |
|---|
| 749 | 976 | ref_type = BTRFS_TREE_BLOCK_REF_KEY; |
|---|
| 750 | 977 | |
|---|
| 751 | 978 | init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes, |
|---|
| 752 | | - ref_root, action, ref_type); |
|---|
| 753 | | - ref->root = ref_root; |
|---|
| 979 | + generic_ref->tree_ref.root, action, ref_type); |
|---|
| 980 | + ref->root = generic_ref->tree_ref.root; |
|---|
| 754 | 981 | ref->parent = parent; |
|---|
| 755 | 982 | ref->level = level; |
|---|
| 756 | 983 | |
|---|
| 757 | 984 | init_delayed_ref_head(head_ref, record, bytenr, num_bytes, |
|---|
| 758 | | - ref_root, 0, action, false, is_system); |
|---|
| 985 | + generic_ref->tree_ref.root, 0, action, false, |
|---|
| 986 | + is_system); |
|---|
| 759 | 987 | head_ref->extent_op = extent_op; |
|---|
| 760 | 988 | |
|---|
| 761 | 989 | delayed_refs = &trans->transaction->delayed_refs; |
|---|
| .. | .. |
|---|
| 766 | 994 | * the spin lock |
|---|
| 767 | 995 | */ |
|---|
| 768 | 996 | head_ref = add_delayed_ref_head(trans, head_ref, record, |
|---|
| 769 | | - action, &qrecord_inserted, |
|---|
| 770 | | - old_ref_mod, new_ref_mod); |
|---|
| 997 | + action, &qrecord_inserted); |
|---|
| 771 | 998 | |
|---|
| 772 | 999 | ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node); |
|---|
| 773 | 1000 | spin_unlock(&delayed_refs->lock); |
|---|
| 1001 | + |
|---|
| 1002 | + /* |
|---|
| 1003 | + * Need to update the delayed_refs_rsv with any changes we may have |
|---|
| 1004 | + * made. |
|---|
| 1005 | + */ |
|---|
| 1006 | + btrfs_update_delayed_refs_rsv(trans); |
|---|
| 774 | 1007 | |
|---|
| 775 | 1008 | trace_add_delayed_tree_ref(fs_info, &ref->node, ref, |
|---|
| 776 | 1009 | action == BTRFS_ADD_DELAYED_EXTENT ? |
|---|
| .. | .. |
|---|
| 788 | 1021 | * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref. |
|---|
| 789 | 1022 | */ |
|---|
| 790 | 1023 | int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans, |
|---|
| 791 | | - u64 bytenr, u64 num_bytes, |
|---|
| 792 | | - u64 parent, u64 ref_root, |
|---|
| 793 | | - u64 owner, u64 offset, u64 reserved, int action, |
|---|
| 794 | | - int *old_ref_mod, int *new_ref_mod) |
|---|
| 1024 | + struct btrfs_ref *generic_ref, |
|---|
| 1025 | + u64 reserved) |
|---|
| 795 | 1026 | { |
|---|
| 796 | 1027 | struct btrfs_fs_info *fs_info = trans->fs_info; |
|---|
| 797 | 1028 | struct btrfs_delayed_data_ref *ref; |
|---|
| .. | .. |
|---|
| 799 | 1030 | struct btrfs_delayed_ref_root *delayed_refs; |
|---|
| 800 | 1031 | struct btrfs_qgroup_extent_record *record = NULL; |
|---|
| 801 | 1032 | int qrecord_inserted; |
|---|
| 1033 | + int action = generic_ref->action; |
|---|
| 802 | 1034 | int ret; |
|---|
| 1035 | + u64 bytenr = generic_ref->bytenr; |
|---|
| 1036 | + u64 num_bytes = generic_ref->len; |
|---|
| 1037 | + u64 parent = generic_ref->parent; |
|---|
| 1038 | + u64 ref_root = generic_ref->data_ref.ref_root; |
|---|
| 1039 | + u64 owner = generic_ref->data_ref.ino; |
|---|
| 1040 | + u64 offset = generic_ref->data_ref.offset; |
|---|
| 803 | 1041 | u8 ref_type; |
|---|
| 804 | 1042 | |
|---|
| 1043 | + ASSERT(generic_ref->type == BTRFS_REF_DATA && action); |
|---|
| 805 | 1044 | ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS); |
|---|
| 806 | 1045 | if (!ref) |
|---|
| 807 | 1046 | return -ENOMEM; |
|---|
| .. | .. |
|---|
| 825 | 1064 | } |
|---|
| 826 | 1065 | |
|---|
| 827 | 1066 | if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) && |
|---|
| 828 | | - is_fstree(ref_root)) { |
|---|
| 829 | | - record = kmalloc(sizeof(*record), GFP_NOFS); |
|---|
| 1067 | + is_fstree(ref_root) && |
|---|
| 1068 | + is_fstree(generic_ref->real_root) && |
|---|
| 1069 | + !generic_ref->skip_qgroup) { |
|---|
| 1070 | + record = kzalloc(sizeof(*record), GFP_NOFS); |
|---|
| 830 | 1071 | if (!record) { |
|---|
| 831 | 1072 | kmem_cache_free(btrfs_delayed_data_ref_cachep, ref); |
|---|
| 832 | 1073 | kmem_cache_free(btrfs_delayed_ref_head_cachep, |
|---|
| .. | .. |
|---|
| 847 | 1088 | * the spin lock |
|---|
| 848 | 1089 | */ |
|---|
| 849 | 1090 | head_ref = add_delayed_ref_head(trans, head_ref, record, |
|---|
| 850 | | - action, &qrecord_inserted, |
|---|
| 851 | | - old_ref_mod, new_ref_mod); |
|---|
| 1091 | + action, &qrecord_inserted); |
|---|
| 852 | 1092 | |
|---|
| 853 | 1093 | ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node); |
|---|
| 854 | 1094 | spin_unlock(&delayed_refs->lock); |
|---|
| 1095 | + |
|---|
| 1096 | + /* |
|---|
| 1097 | + * Need to update the delayed_refs_rsv with any changes we may have |
|---|
| 1098 | + * made. |
|---|
| 1099 | + */ |
|---|
| 1100 | + btrfs_update_delayed_refs_rsv(trans); |
|---|
| 855 | 1101 | |
|---|
| 856 | 1102 | trace_add_delayed_data_ref(trans->fs_info, &ref->node, ref, |
|---|
| 857 | 1103 | action == BTRFS_ADD_DELAYED_EXTENT ? |
|---|
| .. | .. |
|---|
| 865 | 1111 | return 0; |
|---|
| 866 | 1112 | } |
|---|
| 867 | 1113 | |
|---|
| 868 | | -int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info, |
|---|
| 869 | | - struct btrfs_trans_handle *trans, |
|---|
| 1114 | +int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans, |
|---|
| 870 | 1115 | u64 bytenr, u64 num_bytes, |
|---|
| 871 | 1116 | struct btrfs_delayed_extent_op *extent_op) |
|---|
| 872 | 1117 | { |
|---|
| .. | .. |
|---|
| 886 | 1131 | spin_lock(&delayed_refs->lock); |
|---|
| 887 | 1132 | |
|---|
| 888 | 1133 | add_delayed_ref_head(trans, head_ref, NULL, BTRFS_UPDATE_DELAYED_HEAD, |
|---|
| 889 | | - NULL, NULL, NULL); |
|---|
| 1134 | + NULL); |
|---|
| 890 | 1135 | |
|---|
| 891 | 1136 | spin_unlock(&delayed_refs->lock); |
|---|
| 1137 | + |
|---|
| 1138 | + /* |
|---|
| 1139 | + * Need to update the delayed_refs_rsv with any changes we may have |
|---|
| 1140 | + * made. |
|---|
| 1141 | + */ |
|---|
| 1142 | + btrfs_update_delayed_refs_rsv(trans); |
|---|
| 892 | 1143 | return 0; |
|---|
| 893 | 1144 | } |
|---|
| 894 | 1145 | |
|---|
| 895 | 1146 | /* |
|---|
| 896 | | - * this does a simple search for the head node for a given extent. |
|---|
| 897 | | - * It must be called with the delayed ref spinlock held, and it returns |
|---|
| 898 | | - * the head node if any where found, or NULL if not. |
|---|
| 1147 | + * This does a simple search for the head node for a given extent. Returns the |
|---|
| 1148 | + * head node if found, or NULL if not. |
|---|
| 899 | 1149 | */ |
|---|
| 900 | 1150 | struct btrfs_delayed_ref_head * |
|---|
| 901 | 1151 | btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, u64 bytenr) |
|---|
| 902 | 1152 | { |
|---|
| 903 | | - return find_ref_head(&delayed_refs->href_root, bytenr, 0); |
|---|
| 1153 | + lockdep_assert_held(&delayed_refs->lock); |
|---|
| 1154 | + |
|---|
| 1155 | + return find_ref_head(delayed_refs, bytenr, false); |
|---|
| 904 | 1156 | } |
|---|
| 905 | 1157 | |
|---|
| 906 | 1158 | void __cold btrfs_delayed_ref_exit(void) |
|---|