.. | .. |
---|
7 | 7 | #include <linux/blkdev.h> |
---|
8 | 8 | #include <linux/radix-tree.h> |
---|
9 | 9 | #include <linux/writeback.h> |
---|
10 | | -#include <linux/buffer_head.h> |
---|
11 | 10 | #include <linux/workqueue.h> |
---|
12 | 11 | #include <linux/kthread.h> |
---|
13 | 12 | #include <linux/slab.h> |
---|
.. | .. |
---|
19 | 18 | #include <linux/crc32c.h> |
---|
20 | 19 | #include <linux/sched/mm.h> |
---|
21 | 20 | #include <asm/unaligned.h> |
---|
| 21 | +#include <crypto/hash.h> |
---|
22 | 22 | #include "ctree.h" |
---|
23 | 23 | #include "disk-io.h" |
---|
24 | 24 | #include "transaction.h" |
---|
.. | .. |
---|
39 | 39 | #include "compression.h" |
---|
40 | 40 | #include "tree-checker.h" |
---|
41 | 41 | #include "ref-verify.h" |
---|
42 | | - |
---|
43 | | -#ifdef CONFIG_X86 |
---|
44 | | -#include <asm/cpufeature.h> |
---|
45 | | -#endif |
---|
| 42 | +#include "block-group.h" |
---|
| 43 | +#include "discard.h" |
---|
| 44 | +#include "space-info.h" |
---|
46 | 45 | |
---|
47 | 46 | #define BTRFS_SUPER_FLAG_SUPP (BTRFS_HEADER_FLAG_WRITTEN |\ |
---|
48 | 47 | BTRFS_HEADER_FLAG_RELOC |\ |
---|
.. | .. |
---|
51 | 50 | BTRFS_SUPER_FLAG_METADUMP |\ |
---|
52 | 51 | BTRFS_SUPER_FLAG_METADUMP_V2) |
---|
53 | 52 | |
---|
54 | | -static const struct extent_io_ops btree_extent_io_ops; |
---|
55 | 53 | static void end_workqueue_fn(struct btrfs_work *work); |
---|
56 | 54 | static void btrfs_destroy_ordered_extents(struct btrfs_root *root); |
---|
57 | 55 | static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, |
---|
.. | .. |
---|
99 | 97 | kmem_cache_destroy(btrfs_end_io_wq_cache); |
---|
100 | 98 | } |
---|
101 | 99 | |
---|
| 100 | +static void btrfs_free_csum_hash(struct btrfs_fs_info *fs_info) |
---|
| 101 | +{ |
---|
| 102 | + if (fs_info->csum_shash) |
---|
| 103 | + crypto_free_shash(fs_info->csum_shash); |
---|
| 104 | +} |
---|
| 105 | + |
---|
102 | 106 | /* |
---|
103 | 107 | * async submit bios are used to offload expensive checksumming |
---|
104 | 108 | * onto the worker threads. They checksum file and metadata bios |
---|
.. | .. |
---|
126 | 130 | * Different roots are used for different purposes and may nest inside each |
---|
127 | 131 | * other and they require separate keysets. As lockdep keys should be |
---|
128 | 132 | * static, assign keysets according to the purpose of the root as indicated |
---|
129 | | - * by btrfs_root->objectid. This ensures that all special purpose roots |
---|
130 | | - * have separate keysets. |
---|
| 133 | + * by btrfs_root->root_key.objectid. This ensures that all special purpose |
---|
| 134 | + * roots have separate keysets. |
---|
131 | 135 | * |
---|
132 | 136 | * Lock-nesting across peer nodes is always done with the immediate parent |
---|
133 | 137 | * node locked thus preventing deadlock. As lockdep doesn't know this, use |
---|
.. | .. |
---|
200 | 204 | #endif |
---|
201 | 205 | |
---|
202 | 206 | /* |
---|
203 | | - * extents on the btree inode are pretty simple, there's one extent |
---|
204 | | - * that covers the entire device |
---|
| 207 | + * Compute the csum of a btree block and store the result to provided buffer. |
---|
205 | 208 | */ |
---|
206 | | -struct extent_map *btree_get_extent(struct btrfs_inode *inode, |
---|
207 | | - struct page *page, size_t pg_offset, u64 start, u64 len, |
---|
208 | | - int create) |
---|
| 209 | +static void csum_tree_block(struct extent_buffer *buf, u8 *result) |
---|
209 | 210 | { |
---|
210 | | - struct btrfs_fs_info *fs_info = inode->root->fs_info; |
---|
211 | | - struct extent_map_tree *em_tree = &inode->extent_tree; |
---|
212 | | - struct extent_map *em; |
---|
213 | | - int ret; |
---|
214 | | - |
---|
215 | | - read_lock(&em_tree->lock); |
---|
216 | | - em = lookup_extent_mapping(em_tree, start, len); |
---|
217 | | - if (em) { |
---|
218 | | - em->bdev = fs_info->fs_devices->latest_bdev; |
---|
219 | | - read_unlock(&em_tree->lock); |
---|
220 | | - goto out; |
---|
221 | | - } |
---|
222 | | - read_unlock(&em_tree->lock); |
---|
223 | | - |
---|
224 | | - em = alloc_extent_map(); |
---|
225 | | - if (!em) { |
---|
226 | | - em = ERR_PTR(-ENOMEM); |
---|
227 | | - goto out; |
---|
228 | | - } |
---|
229 | | - em->start = 0; |
---|
230 | | - em->len = (u64)-1; |
---|
231 | | - em->block_len = (u64)-1; |
---|
232 | | - em->block_start = 0; |
---|
233 | | - em->bdev = fs_info->fs_devices->latest_bdev; |
---|
234 | | - |
---|
235 | | - write_lock(&em_tree->lock); |
---|
236 | | - ret = add_extent_mapping(em_tree, em, 0); |
---|
237 | | - if (ret == -EEXIST) { |
---|
238 | | - free_extent_map(em); |
---|
239 | | - em = lookup_extent_mapping(em_tree, start, len); |
---|
240 | | - if (!em) |
---|
241 | | - em = ERR_PTR(-EIO); |
---|
242 | | - } else if (ret) { |
---|
243 | | - free_extent_map(em); |
---|
244 | | - em = ERR_PTR(ret); |
---|
245 | | - } |
---|
246 | | - write_unlock(&em_tree->lock); |
---|
247 | | - |
---|
248 | | -out: |
---|
249 | | - return em; |
---|
250 | | -} |
---|
251 | | - |
---|
252 | | -u32 btrfs_csum_data(const char *data, u32 seed, size_t len) |
---|
253 | | -{ |
---|
254 | | - return crc32c(seed, data, len); |
---|
255 | | -} |
---|
256 | | - |
---|
257 | | -void btrfs_csum_final(u32 crc, u8 *result) |
---|
258 | | -{ |
---|
259 | | - put_unaligned_le32(~crc, result); |
---|
260 | | -} |
---|
261 | | - |
---|
262 | | -/* |
---|
263 | | - * compute the csum for a btree block, and either verify it or write it |
---|
264 | | - * into the csum field of the block. |
---|
265 | | - */ |
---|
266 | | -static int csum_tree_block(struct btrfs_fs_info *fs_info, |
---|
267 | | - struct extent_buffer *buf, |
---|
268 | | - int verify) |
---|
269 | | -{ |
---|
270 | | - u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); |
---|
271 | | - char result[BTRFS_CSUM_SIZE]; |
---|
272 | | - unsigned long len; |
---|
273 | | - unsigned long cur_len; |
---|
274 | | - unsigned long offset = BTRFS_CSUM_SIZE; |
---|
| 211 | + struct btrfs_fs_info *fs_info = buf->fs_info; |
---|
| 212 | + const int num_pages = fs_info->nodesize >> PAGE_SHIFT; |
---|
| 213 | + SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); |
---|
275 | 214 | char *kaddr; |
---|
276 | | - unsigned long map_start; |
---|
277 | | - unsigned long map_len; |
---|
278 | | - int err; |
---|
279 | | - u32 crc = ~(u32)0; |
---|
| 215 | + int i; |
---|
280 | 216 | |
---|
281 | | - len = buf->len - offset; |
---|
282 | | - while (len > 0) { |
---|
283 | | - err = map_private_extent_buffer(buf, offset, 32, |
---|
284 | | - &kaddr, &map_start, &map_len); |
---|
285 | | - if (err) |
---|
286 | | - return err; |
---|
287 | | - cur_len = min(len, map_len - (offset - map_start)); |
---|
288 | | - crc = btrfs_csum_data(kaddr + offset - map_start, |
---|
289 | | - crc, cur_len); |
---|
290 | | - len -= cur_len; |
---|
291 | | - offset += cur_len; |
---|
| 217 | + shash->tfm = fs_info->csum_shash; |
---|
| 218 | + crypto_shash_init(shash); |
---|
| 219 | + kaddr = page_address(buf->pages[0]); |
---|
| 220 | + crypto_shash_update(shash, kaddr + BTRFS_CSUM_SIZE, |
---|
| 221 | + PAGE_SIZE - BTRFS_CSUM_SIZE); |
---|
| 222 | + |
---|
| 223 | + for (i = 1; i < num_pages && INLINE_EXTENT_BUFFER_PAGES > 1; i++) { |
---|
| 224 | + kaddr = page_address(buf->pages[i]); |
---|
| 225 | + crypto_shash_update(shash, kaddr, PAGE_SIZE); |
---|
292 | 226 | } |
---|
293 | 227 | memset(result, 0, BTRFS_CSUM_SIZE); |
---|
294 | | - |
---|
295 | | - btrfs_csum_final(crc, result); |
---|
296 | | - |
---|
297 | | - if (verify) { |
---|
298 | | - if (memcmp_extent_buffer(buf, result, 0, csum_size)) { |
---|
299 | | - u32 val; |
---|
300 | | - u32 found = 0; |
---|
301 | | - memcpy(&found, result, csum_size); |
---|
302 | | - |
---|
303 | | - read_extent_buffer(buf, &val, 0, csum_size); |
---|
304 | | - btrfs_warn_rl(fs_info, |
---|
305 | | - "%s checksum verify failed on %llu wanted %X found %X level %d", |
---|
306 | | - fs_info->sb->s_id, buf->start, |
---|
307 | | - val, found, btrfs_header_level(buf)); |
---|
308 | | - return -EUCLEAN; |
---|
309 | | - } |
---|
310 | | - } else { |
---|
311 | | - write_extent_buffer(buf, result, 0, csum_size); |
---|
312 | | - } |
---|
313 | | - |
---|
314 | | - return 0; |
---|
| 228 | + crypto_shash_final(shash, result); |
---|
315 | 229 | } |
---|
316 | 230 | |
---|
317 | 231 | /* |
---|
.. | .. |
---|
336 | 250 | |
---|
337 | 251 | if (need_lock) { |
---|
338 | 252 | btrfs_tree_read_lock(eb); |
---|
339 | | - btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); |
---|
| 253 | + btrfs_set_lock_blocking_read(eb); |
---|
340 | 254 | } |
---|
341 | 255 | |
---|
342 | 256 | lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1, |
---|
.. | .. |
---|
370 | 284 | return ret; |
---|
371 | 285 | } |
---|
372 | 286 | |
---|
| 287 | +static bool btrfs_supported_super_csum(u16 csum_type) |
---|
| 288 | +{ |
---|
| 289 | + switch (csum_type) { |
---|
| 290 | + case BTRFS_CSUM_TYPE_CRC32: |
---|
| 291 | + case BTRFS_CSUM_TYPE_XXHASH: |
---|
| 292 | + case BTRFS_CSUM_TYPE_SHA256: |
---|
| 293 | + case BTRFS_CSUM_TYPE_BLAKE2: |
---|
| 294 | + return true; |
---|
| 295 | + default: |
---|
| 296 | + return false; |
---|
| 297 | + } |
---|
| 298 | +} |
---|
| 299 | + |
---|
373 | 300 | /* |
---|
374 | 301 | * Return 0 if the superblock checksum type matches the checksum value of that |
---|
375 | 302 | * algorithm. Pass the raw disk superblock data. |
---|
.. | .. |
---|
379 | 306 | { |
---|
380 | 307 | struct btrfs_super_block *disk_sb = |
---|
381 | 308 | (struct btrfs_super_block *)raw_disk_sb; |
---|
382 | | - u16 csum_type = btrfs_super_csum_type(disk_sb); |
---|
383 | | - int ret = 0; |
---|
| 309 | + char result[BTRFS_CSUM_SIZE]; |
---|
| 310 | + SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); |
---|
384 | 311 | |
---|
385 | | - if (csum_type == BTRFS_CSUM_TYPE_CRC32) { |
---|
386 | | - u32 crc = ~(u32)0; |
---|
387 | | - char result[sizeof(crc)]; |
---|
| 312 | + shash->tfm = fs_info->csum_shash; |
---|
388 | 313 | |
---|
389 | | - /* |
---|
390 | | - * The super_block structure does not span the whole |
---|
391 | | - * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space |
---|
392 | | - * is filled with zeros and is included in the checksum. |
---|
393 | | - */ |
---|
394 | | - crc = btrfs_csum_data(raw_disk_sb + BTRFS_CSUM_SIZE, |
---|
395 | | - crc, BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE); |
---|
396 | | - btrfs_csum_final(crc, result); |
---|
| 314 | + /* |
---|
| 315 | + * The super_block structure does not span the whole |
---|
| 316 | + * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space is |
---|
| 317 | + * filled with zeros and is included in the checksum. |
---|
| 318 | + */ |
---|
| 319 | + crypto_shash_digest(shash, raw_disk_sb + BTRFS_CSUM_SIZE, |
---|
| 320 | + BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE, result); |
---|
397 | 321 | |
---|
398 | | - if (memcmp(raw_disk_sb, result, sizeof(result))) |
---|
399 | | - ret = 1; |
---|
400 | | - } |
---|
| 322 | + if (memcmp(disk_sb->csum, result, btrfs_super_csum_size(disk_sb))) |
---|
| 323 | + return 1; |
---|
401 | 324 | |
---|
402 | | - if (csum_type >= ARRAY_SIZE(btrfs_csum_sizes)) { |
---|
403 | | - btrfs_err(fs_info, "unsupported checksum algorithm %u", |
---|
404 | | - csum_type); |
---|
405 | | - ret = 1; |
---|
406 | | - } |
---|
407 | | - |
---|
408 | | - return ret; |
---|
| 325 | + return 0; |
---|
409 | 326 | } |
---|
410 | 327 | |
---|
411 | | -int btrfs_verify_level_key(struct btrfs_fs_info *fs_info, |
---|
412 | | - struct extent_buffer *eb, int level, |
---|
| 328 | +int btrfs_verify_level_key(struct extent_buffer *eb, int level, |
---|
413 | 329 | struct btrfs_key *first_key, u64 parent_transid) |
---|
414 | 330 | { |
---|
| 331 | + struct btrfs_fs_info *fs_info = eb->fs_info; |
---|
415 | 332 | int found_level; |
---|
416 | 333 | struct btrfs_key found_key; |
---|
417 | 334 | int ret; |
---|
418 | 335 | |
---|
419 | 336 | found_level = btrfs_header_level(eb); |
---|
420 | 337 | if (found_level != level) { |
---|
421 | | -#ifdef CONFIG_BTRFS_DEBUG |
---|
422 | | - WARN_ON(1); |
---|
| 338 | + WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG), |
---|
| 339 | + KERN_ERR "BTRFS: tree level check failed\n"); |
---|
423 | 340 | btrfs_err(fs_info, |
---|
424 | 341 | "tree level mismatch detected, bytenr=%llu level expected=%u has=%u", |
---|
425 | 342 | eb->start, level, found_level); |
---|
426 | | -#endif |
---|
427 | 343 | return -EIO; |
---|
428 | 344 | } |
---|
429 | 345 | |
---|
.. | .. |
---|
454 | 370 | btrfs_item_key_to_cpu(eb, &found_key, 0); |
---|
455 | 371 | ret = btrfs_comp_cpu_keys(first_key, &found_key); |
---|
456 | 372 | |
---|
457 | | -#ifdef CONFIG_BTRFS_DEBUG |
---|
458 | 373 | if (ret) { |
---|
459 | | - WARN_ON(1); |
---|
| 374 | + WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG), |
---|
| 375 | + KERN_ERR "BTRFS: tree first key check failed\n"); |
---|
460 | 376 | btrfs_err(fs_info, |
---|
461 | 377 | "tree first key mismatch detected, bytenr=%llu parent_transid=%llu key expected=(%llu,%u,%llu) has=(%llu,%u,%llu)", |
---|
462 | 378 | eb->start, parent_transid, first_key->objectid, |
---|
.. | .. |
---|
464 | 380 | found_key.objectid, found_key.type, |
---|
465 | 381 | found_key.offset); |
---|
466 | 382 | } |
---|
467 | | -#endif |
---|
468 | 383 | return ret; |
---|
469 | 384 | } |
---|
470 | 385 | |
---|
.. | .. |
---|
476 | 391 | * @level: expected level, mandatory check |
---|
477 | 392 | * @first_key: expected key of first slot, skip check if NULL |
---|
478 | 393 | */ |
---|
479 | | -static int btree_read_extent_buffer_pages(struct btrfs_fs_info *fs_info, |
---|
480 | | - struct extent_buffer *eb, |
---|
| 394 | +static int btree_read_extent_buffer_pages(struct extent_buffer *eb, |
---|
481 | 395 | u64 parent_transid, int level, |
---|
482 | 396 | struct btrfs_key *first_key) |
---|
483 | 397 | { |
---|
| 398 | + struct btrfs_fs_info *fs_info = eb->fs_info; |
---|
484 | 399 | struct extent_io_tree *io_tree; |
---|
485 | 400 | int failed = 0; |
---|
486 | 401 | int ret; |
---|
.. | .. |
---|
491 | 406 | io_tree = &BTRFS_I(fs_info->btree_inode)->io_tree; |
---|
492 | 407 | while (1) { |
---|
493 | 408 | clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags); |
---|
494 | | - ret = read_extent_buffer_pages(io_tree, eb, WAIT_COMPLETE, |
---|
495 | | - mirror_num); |
---|
| 409 | + ret = read_extent_buffer_pages(eb, WAIT_COMPLETE, mirror_num); |
---|
496 | 410 | if (!ret) { |
---|
497 | 411 | if (verify_parent_transid(io_tree, eb, |
---|
498 | 412 | parent_transid, 0)) |
---|
499 | 413 | ret = -EIO; |
---|
500 | | - else if (btrfs_verify_level_key(fs_info, eb, level, |
---|
| 414 | + else if (btrfs_verify_level_key(eb, level, |
---|
501 | 415 | first_key, parent_transid)) |
---|
502 | 416 | ret = -EUCLEAN; |
---|
503 | 417 | else |
---|
.. | .. |
---|
523 | 437 | } |
---|
524 | 438 | |
---|
525 | 439 | if (failed && !ret && failed_mirror) |
---|
526 | | - repair_eb_io_failure(fs_info, eb, failed_mirror); |
---|
| 440 | + btrfs_repair_eb_io_failure(eb, failed_mirror); |
---|
527 | 441 | |
---|
528 | 442 | return ret; |
---|
529 | 443 | } |
---|
.. | .. |
---|
537 | 451 | { |
---|
538 | 452 | u64 start = page_offset(page); |
---|
539 | 453 | u64 found_start; |
---|
| 454 | + u8 result[BTRFS_CSUM_SIZE]; |
---|
| 455 | + u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); |
---|
540 | 456 | struct extent_buffer *eb; |
---|
| 457 | + int ret; |
---|
541 | 458 | |
---|
542 | 459 | eb = (struct extent_buffer *)page->private; |
---|
543 | 460 | if (page != eb->pages[0]) |
---|
.. | .. |
---|
553 | 470 | if (WARN_ON(!PageUptodate(page))) |
---|
554 | 471 | return -EUCLEAN; |
---|
555 | 472 | |
---|
556 | | - ASSERT(memcmp_extent_buffer(eb, fs_info->fsid, |
---|
557 | | - btrfs_header_fsid(), BTRFS_FSID_SIZE) == 0); |
---|
| 473 | + ASSERT(memcmp_extent_buffer(eb, fs_info->fs_devices->metadata_uuid, |
---|
| 474 | + offsetof(struct btrfs_header, fsid), |
---|
| 475 | + BTRFS_FSID_SIZE) == 0); |
---|
558 | 476 | |
---|
559 | | - return csum_tree_block(fs_info, eb, 0); |
---|
560 | | -} |
---|
| 477 | + csum_tree_block(eb, result); |
---|
561 | 478 | |
---|
562 | | -static int check_tree_block_fsid(struct btrfs_fs_info *fs_info, |
---|
563 | | - struct extent_buffer *eb) |
---|
564 | | -{ |
---|
565 | | - struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; |
---|
566 | | - u8 fsid[BTRFS_FSID_SIZE]; |
---|
567 | | - int ret = 1; |
---|
| 479 | + if (btrfs_header_level(eb)) |
---|
| 480 | + ret = btrfs_check_node(eb); |
---|
| 481 | + else |
---|
| 482 | + ret = btrfs_check_leaf_full(eb); |
---|
568 | 483 | |
---|
569 | | - read_extent_buffer(eb, fsid, btrfs_header_fsid(), BTRFS_FSID_SIZE); |
---|
570 | | - while (fs_devices) { |
---|
571 | | - if (!memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE)) { |
---|
572 | | - ret = 0; |
---|
573 | | - break; |
---|
574 | | - } |
---|
575 | | - fs_devices = fs_devices->seed; |
---|
| 484 | + if (ret < 0) { |
---|
| 485 | + btrfs_print_tree(eb, 0); |
---|
| 486 | + btrfs_err(fs_info, |
---|
| 487 | + "block=%llu write time tree block corruption detected", |
---|
| 488 | + eb->start); |
---|
| 489 | + WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG)); |
---|
| 490 | + return ret; |
---|
576 | 491 | } |
---|
577 | | - return ret; |
---|
| 492 | + write_extent_buffer(eb, result, 0, csum_size); |
---|
| 493 | + |
---|
| 494 | + return 0; |
---|
578 | 495 | } |
---|
579 | 496 | |
---|
580 | | -static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio, |
---|
581 | | - u64 phy_offset, struct page *page, |
---|
582 | | - u64 start, u64 end, int mirror) |
---|
| 497 | +static int check_tree_block_fsid(struct extent_buffer *eb) |
---|
| 498 | +{ |
---|
| 499 | + struct btrfs_fs_info *fs_info = eb->fs_info; |
---|
| 500 | + struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs; |
---|
| 501 | + u8 fsid[BTRFS_FSID_SIZE]; |
---|
| 502 | + u8 *metadata_uuid; |
---|
| 503 | + |
---|
| 504 | + read_extent_buffer(eb, fsid, offsetof(struct btrfs_header, fsid), |
---|
| 505 | + BTRFS_FSID_SIZE); |
---|
| 506 | + /* |
---|
| 507 | + * Checking the incompat flag is only valid for the current fs. For |
---|
| 508 | + * seed devices it's forbidden to have their uuid changed so reading |
---|
| 509 | + * ->fsid in this case is fine |
---|
| 510 | + */ |
---|
| 511 | + if (btrfs_fs_incompat(fs_info, METADATA_UUID)) |
---|
| 512 | + metadata_uuid = fs_devices->metadata_uuid; |
---|
| 513 | + else |
---|
| 514 | + metadata_uuid = fs_devices->fsid; |
---|
| 515 | + |
---|
| 516 | + if (!memcmp(fsid, metadata_uuid, BTRFS_FSID_SIZE)) |
---|
| 517 | + return 0; |
---|
| 518 | + |
---|
| 519 | + list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) |
---|
| 520 | + if (!memcmp(fsid, seed_devs->fsid, BTRFS_FSID_SIZE)) |
---|
| 521 | + return 0; |
---|
| 522 | + |
---|
| 523 | + return 1; |
---|
| 524 | +} |
---|
| 525 | + |
---|
| 526 | +int btrfs_validate_metadata_buffer(struct btrfs_io_bio *io_bio, u64 phy_offset, |
---|
| 527 | + struct page *page, u64 start, u64 end, |
---|
| 528 | + int mirror) |
---|
583 | 529 | { |
---|
584 | 530 | u64 found_start; |
---|
585 | 531 | int found_level; |
---|
586 | 532 | struct extent_buffer *eb; |
---|
587 | | - struct btrfs_root *root = BTRFS_I(page->mapping->host)->root; |
---|
588 | | - struct btrfs_fs_info *fs_info = root->fs_info; |
---|
| 533 | + struct btrfs_fs_info *fs_info; |
---|
| 534 | + u16 csum_size; |
---|
589 | 535 | int ret = 0; |
---|
| 536 | + u8 result[BTRFS_CSUM_SIZE]; |
---|
590 | 537 | int reads_done; |
---|
591 | 538 | |
---|
592 | 539 | if (!page->private) |
---|
593 | 540 | goto out; |
---|
594 | 541 | |
---|
595 | 542 | eb = (struct extent_buffer *)page->private; |
---|
| 543 | + fs_info = eb->fs_info; |
---|
| 544 | + csum_size = btrfs_super_csum_size(fs_info->super_copy); |
---|
596 | 545 | |
---|
597 | 546 | /* the pending IO might have been the only thing that kept this buffer |
---|
598 | 547 | * in memory. Make sure we have a ref for all this other checks |
---|
599 | 548 | */ |
---|
600 | | - extent_buffer_get(eb); |
---|
| 549 | + atomic_inc(&eb->refs); |
---|
601 | 550 | |
---|
602 | 551 | reads_done = atomic_dec_and_test(&eb->io_pages); |
---|
603 | 552 | if (!reads_done) |
---|
.. | .. |
---|
616 | 565 | ret = -EIO; |
---|
617 | 566 | goto err; |
---|
618 | 567 | } |
---|
619 | | - if (check_tree_block_fsid(fs_info, eb)) { |
---|
| 568 | + if (check_tree_block_fsid(eb)) { |
---|
620 | 569 | btrfs_err_rl(fs_info, "bad fsid on block %llu", |
---|
621 | 570 | eb->start); |
---|
622 | 571 | ret = -EIO; |
---|
.. | .. |
---|
633 | 582 | btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb), |
---|
634 | 583 | eb, found_level); |
---|
635 | 584 | |
---|
636 | | - ret = csum_tree_block(fs_info, eb, 1); |
---|
637 | | - if (ret) |
---|
| 585 | + csum_tree_block(eb, result); |
---|
| 586 | + |
---|
| 587 | + if (memcmp_extent_buffer(eb, result, 0, csum_size)) { |
---|
| 588 | + u8 val[BTRFS_CSUM_SIZE] = { 0 }; |
---|
| 589 | + |
---|
| 590 | + read_extent_buffer(eb, &val, 0, csum_size); |
---|
| 591 | + btrfs_warn_rl(fs_info, |
---|
| 592 | + "%s checksum verify failed on %llu wanted " CSUM_FMT " found " CSUM_FMT " level %d", |
---|
| 593 | + fs_info->sb->s_id, eb->start, |
---|
| 594 | + CSUM_FMT_VALUE(csum_size, val), |
---|
| 595 | + CSUM_FMT_VALUE(csum_size, result), |
---|
| 596 | + btrfs_header_level(eb)); |
---|
| 597 | + ret = -EUCLEAN; |
---|
638 | 598 | goto err; |
---|
| 599 | + } |
---|
639 | 600 | |
---|
640 | 601 | /* |
---|
641 | 602 | * If this is a leaf block and it is corrupt, set the corrupt bit so |
---|
642 | 603 | * that we don't try and read the other copies of this block, just |
---|
643 | 604 | * return -EIO. |
---|
644 | 605 | */ |
---|
645 | | - if (found_level == 0 && btrfs_check_leaf_full(fs_info, eb)) { |
---|
| 606 | + if (found_level == 0 && btrfs_check_leaf_full(eb)) { |
---|
646 | 607 | set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags); |
---|
647 | 608 | ret = -EIO; |
---|
648 | 609 | } |
---|
649 | 610 | |
---|
650 | | - if (found_level > 0 && btrfs_check_node(fs_info, eb)) |
---|
| 611 | + if (found_level > 0 && btrfs_check_node(eb)) |
---|
651 | 612 | ret = -EIO; |
---|
652 | 613 | |
---|
653 | 614 | if (!ret) |
---|
654 | 615 | set_extent_buffer_uptodate(eb); |
---|
| 616 | + else |
---|
| 617 | + btrfs_err(fs_info, |
---|
| 618 | + "block=%llu read time tree block corruption detected", |
---|
| 619 | + eb->start); |
---|
655 | 620 | err: |
---|
656 | 621 | if (reads_done && |
---|
657 | 622 | test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) |
---|
.. | .. |
---|
671 | 636 | return ret; |
---|
672 | 637 | } |
---|
673 | 638 | |
---|
674 | | -static int btree_io_failed_hook(struct page *page, int failed_mirror) |
---|
675 | | -{ |
---|
676 | | - struct extent_buffer *eb; |
---|
677 | | - |
---|
678 | | - eb = (struct extent_buffer *)page->private; |
---|
679 | | - set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags); |
---|
680 | | - eb->read_mirror = failed_mirror; |
---|
681 | | - atomic_dec(&eb->io_pages); |
---|
682 | | - if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) |
---|
683 | | - btree_readahead_hook(eb, -EIO); |
---|
684 | | - return -EIO; /* we fixed nothing */ |
---|
685 | | -} |
---|
686 | | - |
---|
687 | 639 | static void end_workqueue_bio(struct bio *bio) |
---|
688 | 640 | { |
---|
689 | 641 | struct btrfs_end_io_wq *end_io_wq = bio->bi_private; |
---|
690 | 642 | struct btrfs_fs_info *fs_info; |
---|
691 | 643 | struct btrfs_workqueue *wq; |
---|
692 | | - btrfs_work_func_t func; |
---|
693 | 644 | |
---|
694 | 645 | fs_info = end_io_wq->info; |
---|
695 | 646 | end_io_wq->status = bio->bi_status; |
---|
696 | 647 | |
---|
697 | 648 | if (bio_op(bio) == REQ_OP_WRITE) { |
---|
698 | | - if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) { |
---|
| 649 | + if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) |
---|
699 | 650 | wq = fs_info->endio_meta_write_workers; |
---|
700 | | - func = btrfs_endio_meta_write_helper; |
---|
701 | | - } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE) { |
---|
| 651 | + else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE) |
---|
702 | 652 | wq = fs_info->endio_freespace_worker; |
---|
703 | | - func = btrfs_freespace_write_helper; |
---|
704 | | - } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) { |
---|
| 653 | + else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) |
---|
705 | 654 | wq = fs_info->endio_raid56_workers; |
---|
706 | | - func = btrfs_endio_raid56_helper; |
---|
707 | | - } else { |
---|
| 655 | + else |
---|
708 | 656 | wq = fs_info->endio_write_workers; |
---|
709 | | - func = btrfs_endio_write_helper; |
---|
710 | | - } |
---|
711 | 657 | } else { |
---|
712 | | - if (unlikely(end_io_wq->metadata == |
---|
713 | | - BTRFS_WQ_ENDIO_DIO_REPAIR)) { |
---|
714 | | - wq = fs_info->endio_repair_workers; |
---|
715 | | - func = btrfs_endio_repair_helper; |
---|
716 | | - } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) { |
---|
| 658 | + if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) |
---|
717 | 659 | wq = fs_info->endio_raid56_workers; |
---|
718 | | - func = btrfs_endio_raid56_helper; |
---|
719 | | - } else if (end_io_wq->metadata) { |
---|
| 660 | + else if (end_io_wq->metadata) |
---|
720 | 661 | wq = fs_info->endio_meta_workers; |
---|
721 | | - func = btrfs_endio_meta_helper; |
---|
722 | | - } else { |
---|
| 662 | + else |
---|
723 | 663 | wq = fs_info->endio_workers; |
---|
724 | | - func = btrfs_endio_helper; |
---|
725 | | - } |
---|
726 | 664 | } |
---|
727 | 665 | |
---|
728 | | - btrfs_init_work(&end_io_wq->work, func, end_workqueue_fn, NULL, NULL); |
---|
| 666 | + btrfs_init_work(&end_io_wq->work, end_workqueue_fn, NULL, NULL); |
---|
729 | 667 | btrfs_queue_work(wq, &end_io_wq->work); |
---|
730 | 668 | } |
---|
731 | 669 | |
---|
.. | .. |
---|
762 | 700 | async->status = ret; |
---|
763 | 701 | } |
---|
764 | 702 | |
---|
| 703 | +/* |
---|
| 704 | + * In order to insert checksums into the metadata in large chunks, we wait |
---|
| 705 | + * until bio submission time. All the pages in the bio are checksummed and |
---|
| 706 | + * sums are attached onto the ordered extent record. |
---|
| 707 | + * |
---|
| 708 | + * At IO completion time the csums attached on the ordered extent record are |
---|
| 709 | + * inserted into the tree. |
---|
| 710 | + */ |
---|
765 | 711 | static void run_one_async_done(struct btrfs_work *work) |
---|
766 | 712 | { |
---|
767 | 713 | struct async_submit_bio *async; |
---|
| 714 | + struct inode *inode; |
---|
| 715 | + blk_status_t ret; |
---|
768 | 716 | |
---|
769 | 717 | async = container_of(work, struct async_submit_bio, work); |
---|
| 718 | + inode = async->private_data; |
---|
770 | 719 | |
---|
771 | 720 | /* If an error occurred we just want to clean up the bio and move on */ |
---|
772 | 721 | if (async->status) { |
---|
.. | .. |
---|
775 | 724 | return; |
---|
776 | 725 | } |
---|
777 | 726 | |
---|
778 | | - btrfs_submit_bio_done(async->private_data, async->bio, async->mirror_num); |
---|
| 727 | + /* |
---|
| 728 | + * All of the bios that pass through here are from async helpers. |
---|
| 729 | + * Use REQ_CGROUP_PUNT to issue them from the owning cgroup's context. |
---|
| 730 | + * This changes nothing when cgroups aren't in use. |
---|
| 731 | + */ |
---|
| 732 | + async->bio->bi_opf |= REQ_CGROUP_PUNT; |
---|
| 733 | + ret = btrfs_map_bio(btrfs_sb(inode->i_sb), async->bio, async->mirror_num); |
---|
| 734 | + if (ret) { |
---|
| 735 | + async->bio->bi_status = ret; |
---|
| 736 | + bio_endio(async->bio); |
---|
| 737 | + } |
---|
779 | 738 | } |
---|
780 | 739 | |
---|
781 | 740 | static void run_one_async_free(struct btrfs_work *work) |
---|
.. | .. |
---|
802 | 761 | async->mirror_num = mirror_num; |
---|
803 | 762 | async->submit_bio_start = submit_bio_start; |
---|
804 | 763 | |
---|
805 | | - btrfs_init_work(&async->work, btrfs_worker_helper, run_one_async_start, |
---|
806 | | - run_one_async_done, run_one_async_free); |
---|
| 764 | + btrfs_init_work(&async->work, run_one_async_start, run_one_async_done, |
---|
| 765 | + run_one_async_free); |
---|
807 | 766 | |
---|
808 | 767 | async->bio_offset = bio_offset; |
---|
809 | 768 | |
---|
.. | .. |
---|
820 | 779 | { |
---|
821 | 780 | struct bio_vec *bvec; |
---|
822 | 781 | struct btrfs_root *root; |
---|
823 | | - int i, ret = 0; |
---|
| 782 | + int ret = 0; |
---|
| 783 | + struct bvec_iter_all iter_all; |
---|
824 | 784 | |
---|
825 | 785 | ASSERT(!bio_flagged(bio, BIO_CLONED)); |
---|
826 | | - bio_for_each_segment_all(bvec, bio, i) { |
---|
| 786 | + bio_for_each_segment_all(bvec, bio, iter_all) { |
---|
827 | 787 | root = BTRFS_I(bvec->bv_page->mapping->host)->root; |
---|
828 | 788 | ret = csum_dirty_buffer(root->fs_info, bvec->bv_page); |
---|
829 | 789 | if (ret) |
---|
.. | .. |
---|
843 | 803 | return btree_csum_one_bio(bio); |
---|
844 | 804 | } |
---|
845 | 805 | |
---|
846 | | -static int check_async_write(struct btrfs_inode *bi) |
---|
| 806 | +static int check_async_write(struct btrfs_fs_info *fs_info, |
---|
| 807 | + struct btrfs_inode *bi) |
---|
847 | 808 | { |
---|
848 | 809 | if (atomic_read(&bi->sync_writers)) |
---|
849 | 810 | return 0; |
---|
850 | | -#ifdef CONFIG_X86 |
---|
851 | | - if (static_cpu_has(X86_FEATURE_XMM4_2)) |
---|
| 811 | + if (test_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags)) |
---|
852 | 812 | return 0; |
---|
853 | | -#endif |
---|
854 | 813 | return 1; |
---|
855 | 814 | } |
---|
856 | 815 | |
---|
857 | | -static blk_status_t btree_submit_bio_hook(void *private_data, struct bio *bio, |
---|
858 | | - int mirror_num, unsigned long bio_flags, |
---|
859 | | - u64 bio_offset) |
---|
| 816 | +blk_status_t btrfs_submit_metadata_bio(struct inode *inode, struct bio *bio, |
---|
| 817 | + int mirror_num, unsigned long bio_flags) |
---|
860 | 818 | { |
---|
861 | | - struct inode *inode = private_data; |
---|
862 | 819 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
---|
863 | | - int async = check_async_write(BTRFS_I(inode)); |
---|
| 820 | + int async = check_async_write(fs_info, BTRFS_I(inode)); |
---|
864 | 821 | blk_status_t ret; |
---|
865 | 822 | |
---|
866 | 823 | if (bio_op(bio) != REQ_OP_WRITE) { |
---|
.. | .. |
---|
872 | 829 | BTRFS_WQ_ENDIO_METADATA); |
---|
873 | 830 | if (ret) |
---|
874 | 831 | goto out_w_error; |
---|
875 | | - ret = btrfs_map_bio(fs_info, bio, mirror_num, 0); |
---|
| 832 | + ret = btrfs_map_bio(fs_info, bio, mirror_num); |
---|
876 | 833 | } else if (!async) { |
---|
877 | 834 | ret = btree_csum_one_bio(bio); |
---|
878 | 835 | if (ret) |
---|
879 | 836 | goto out_w_error; |
---|
880 | | - ret = btrfs_map_bio(fs_info, bio, mirror_num, 0); |
---|
| 837 | + ret = btrfs_map_bio(fs_info, bio, mirror_num); |
---|
881 | 838 | } else { |
---|
882 | 839 | /* |
---|
883 | 840 | * kthread helpers are used to submit writes so that |
---|
884 | 841 | * checksumming can happen in parallel across all CPUs |
---|
885 | 842 | */ |
---|
886 | 843 | ret = btrfs_wq_submit_bio(fs_info, bio, mirror_num, 0, |
---|
887 | | - bio_offset, private_data, |
---|
888 | | - btree_submit_bio_start); |
---|
| 844 | + 0, inode, btree_submit_bio_start); |
---|
889 | 845 | } |
---|
890 | 846 | |
---|
891 | 847 | if (ret) |
---|
.. | .. |
---|
943 | 899 | return btree_write_cache_pages(mapping, wbc); |
---|
944 | 900 | } |
---|
945 | 901 | |
---|
946 | | -static int btree_readpage(struct file *file, struct page *page) |
---|
947 | | -{ |
---|
948 | | - struct extent_io_tree *tree; |
---|
949 | | - tree = &BTRFS_I(page->mapping->host)->io_tree; |
---|
950 | | - return extent_read_full_page(tree, page, btree_get_extent, 0); |
---|
951 | | -} |
---|
952 | | - |
---|
953 | 902 | static int btree_releasepage(struct page *page, gfp_t gfp_flags) |
---|
954 | 903 | { |
---|
955 | 904 | if (PageWriteback(page) || PageDirty(page)) |
---|
.. | .. |
---|
969 | 918 | btrfs_warn(BTRFS_I(page->mapping->host)->root->fs_info, |
---|
970 | 919 | "page private not zero on page %llu", |
---|
971 | 920 | (unsigned long long)page_offset(page)); |
---|
972 | | - ClearPagePrivate(page); |
---|
973 | | - set_page_private(page, 0); |
---|
974 | | - put_page(page); |
---|
| 921 | + detach_page_private(page); |
---|
975 | 922 | } |
---|
976 | 923 | } |
---|
977 | 924 | |
---|
.. | .. |
---|
991 | 938 | } |
---|
992 | 939 | |
---|
993 | 940 | static const struct address_space_operations btree_aops = { |
---|
994 | | - .readpage = btree_readpage, |
---|
995 | 941 | .writepages = btree_writepages, |
---|
996 | 942 | .releasepage = btree_releasepage, |
---|
997 | 943 | .invalidatepage = btree_invalidatepage, |
---|
.. | .. |
---|
1004 | 950 | void readahead_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr) |
---|
1005 | 951 | { |
---|
1006 | 952 | struct extent_buffer *buf = NULL; |
---|
1007 | | - struct inode *btree_inode = fs_info->btree_inode; |
---|
1008 | 953 | int ret; |
---|
1009 | 954 | |
---|
1010 | 955 | buf = btrfs_find_create_tree_block(fs_info, bytenr); |
---|
1011 | 956 | if (IS_ERR(buf)) |
---|
1012 | 957 | return; |
---|
1013 | 958 | |
---|
1014 | | - ret = read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree, buf, |
---|
1015 | | - WAIT_NONE, 0); |
---|
| 959 | + ret = read_extent_buffer_pages(buf, WAIT_NONE, 0); |
---|
1016 | 960 | if (ret < 0) |
---|
1017 | 961 | free_extent_buffer_stale(buf); |
---|
1018 | 962 | else |
---|
1019 | 963 | free_extent_buffer(buf); |
---|
1020 | | -} |
---|
1021 | | - |
---|
1022 | | -int reada_tree_block_flagged(struct btrfs_fs_info *fs_info, u64 bytenr, |
---|
1023 | | - int mirror_num, struct extent_buffer **eb) |
---|
1024 | | -{ |
---|
1025 | | - struct extent_buffer *buf = NULL; |
---|
1026 | | - struct inode *btree_inode = fs_info->btree_inode; |
---|
1027 | | - struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree; |
---|
1028 | | - int ret; |
---|
1029 | | - |
---|
1030 | | - buf = btrfs_find_create_tree_block(fs_info, bytenr); |
---|
1031 | | - if (IS_ERR(buf)) |
---|
1032 | | - return 0; |
---|
1033 | | - |
---|
1034 | | - set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags); |
---|
1035 | | - |
---|
1036 | | - ret = read_extent_buffer_pages(io_tree, buf, WAIT_PAGE_LOCK, |
---|
1037 | | - mirror_num); |
---|
1038 | | - if (ret) { |
---|
1039 | | - free_extent_buffer_stale(buf); |
---|
1040 | | - return ret; |
---|
1041 | | - } |
---|
1042 | | - |
---|
1043 | | - if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) { |
---|
1044 | | - free_extent_buffer_stale(buf); |
---|
1045 | | - return -EIO; |
---|
1046 | | - } else if (extent_buffer_uptodate(buf)) { |
---|
1047 | | - *eb = buf; |
---|
1048 | | - } else { |
---|
1049 | | - free_extent_buffer(buf); |
---|
1050 | | - } |
---|
1051 | | - return 0; |
---|
1052 | 964 | } |
---|
1053 | 965 | |
---|
1054 | 966 | struct extent_buffer *btrfs_find_create_tree_block( |
---|
.. | .. |
---|
1058 | 970 | if (btrfs_is_testing(fs_info)) |
---|
1059 | 971 | return alloc_test_extent_buffer(fs_info, bytenr); |
---|
1060 | 972 | return alloc_extent_buffer(fs_info, bytenr); |
---|
1061 | | -} |
---|
1062 | | - |
---|
1063 | | - |
---|
1064 | | -int btrfs_write_tree_block(struct extent_buffer *buf) |
---|
1065 | | -{ |
---|
1066 | | - return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start, |
---|
1067 | | - buf->start + buf->len - 1); |
---|
1068 | | -} |
---|
1069 | | - |
---|
1070 | | -void btrfs_wait_tree_block_writeback(struct extent_buffer *buf) |
---|
1071 | | -{ |
---|
1072 | | - filemap_fdatawait_range(buf->pages[0]->mapping, |
---|
1073 | | - buf->start, buf->start + buf->len - 1); |
---|
1074 | 973 | } |
---|
1075 | 974 | |
---|
1076 | 975 | /* |
---|
.. | .. |
---|
1092 | 991 | if (IS_ERR(buf)) |
---|
1093 | 992 | return buf; |
---|
1094 | 993 | |
---|
1095 | | - ret = btree_read_extent_buffer_pages(fs_info, buf, parent_transid, |
---|
| 994 | + ret = btree_read_extent_buffer_pages(buf, parent_transid, |
---|
1096 | 995 | level, first_key); |
---|
1097 | 996 | if (ret) { |
---|
1098 | 997 | free_extent_buffer_stale(buf); |
---|
.. | .. |
---|
1102 | 1001 | |
---|
1103 | 1002 | } |
---|
1104 | 1003 | |
---|
1105 | | -void clean_tree_block(struct btrfs_fs_info *fs_info, |
---|
1106 | | - struct extent_buffer *buf) |
---|
| 1004 | +void btrfs_clean_tree_block(struct extent_buffer *buf) |
---|
1107 | 1005 | { |
---|
| 1006 | + struct btrfs_fs_info *fs_info = buf->fs_info; |
---|
1108 | 1007 | if (btrfs_header_generation(buf) == |
---|
1109 | 1008 | fs_info->running_transaction->transid) { |
---|
1110 | 1009 | btrfs_assert_tree_locked(buf); |
---|
.. | .. |
---|
1114 | 1013 | -buf->len, |
---|
1115 | 1014 | fs_info->dirty_metadata_batch); |
---|
1116 | 1015 | /* ugh, clear_extent_buffer_dirty needs to lock the page */ |
---|
1117 | | - btrfs_set_lock_blocking(buf); |
---|
| 1016 | + btrfs_set_lock_blocking_write(buf); |
---|
1118 | 1017 | clear_extent_buffer_dirty(buf); |
---|
1119 | 1018 | } |
---|
1120 | 1019 | } |
---|
1121 | | -} |
---|
1122 | | - |
---|
1123 | | -static struct btrfs_subvolume_writers *btrfs_alloc_subvolume_writers(void) |
---|
1124 | | -{ |
---|
1125 | | - struct btrfs_subvolume_writers *writers; |
---|
1126 | | - int ret; |
---|
1127 | | - |
---|
1128 | | - writers = kmalloc(sizeof(*writers), GFP_NOFS); |
---|
1129 | | - if (!writers) |
---|
1130 | | - return ERR_PTR(-ENOMEM); |
---|
1131 | | - |
---|
1132 | | - ret = percpu_counter_init(&writers->counter, 0, GFP_NOFS); |
---|
1133 | | - if (ret < 0) { |
---|
1134 | | - kfree(writers); |
---|
1135 | | - return ERR_PTR(ret); |
---|
1136 | | - } |
---|
1137 | | - |
---|
1138 | | - init_waitqueue_head(&writers->wait); |
---|
1139 | | - return writers; |
---|
1140 | | -} |
---|
1141 | | - |
---|
1142 | | -static void |
---|
1143 | | -btrfs_free_subvolume_writers(struct btrfs_subvolume_writers *writers) |
---|
1144 | | -{ |
---|
1145 | | - percpu_counter_destroy(&writers->counter); |
---|
1146 | | - kfree(writers); |
---|
1147 | 1020 | } |
---|
1148 | 1021 | |
---|
1149 | 1022 | static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info, |
---|
1150 | 1023 | u64 objectid) |
---|
1151 | 1024 | { |
---|
1152 | 1025 | bool dummy = test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state); |
---|
| 1026 | + root->fs_info = fs_info; |
---|
1153 | 1027 | root->node = NULL; |
---|
1154 | 1028 | root->commit_root = NULL; |
---|
1155 | 1029 | root->state = 0; |
---|
1156 | 1030 | root->orphan_cleanup_state = 0; |
---|
1157 | 1031 | |
---|
1158 | | - root->objectid = objectid; |
---|
1159 | 1032 | root->last_trans = 0; |
---|
1160 | 1033 | root->highest_objectid = 0; |
---|
1161 | 1034 | root->nr_delalloc_inodes = 0; |
---|
.. | .. |
---|
1170 | 1043 | INIT_LIST_HEAD(&root->delalloc_root); |
---|
1171 | 1044 | INIT_LIST_HEAD(&root->ordered_extents); |
---|
1172 | 1045 | INIT_LIST_HEAD(&root->ordered_root); |
---|
| 1046 | + INIT_LIST_HEAD(&root->reloc_dirty_list); |
---|
1173 | 1047 | INIT_LIST_HEAD(&root->logged_list[0]); |
---|
1174 | 1048 | INIT_LIST_HEAD(&root->logged_list[1]); |
---|
1175 | 1049 | spin_lock_init(&root->inode_lock); |
---|
.. | .. |
---|
1183 | 1057 | mutex_init(&root->log_mutex); |
---|
1184 | 1058 | mutex_init(&root->ordered_extent_mutex); |
---|
1185 | 1059 | mutex_init(&root->delalloc_mutex); |
---|
| 1060 | + init_waitqueue_head(&root->qgroup_flush_wait); |
---|
1186 | 1061 | init_waitqueue_head(&root->log_writer_wait); |
---|
1187 | 1062 | init_waitqueue_head(&root->log_commit_wait[0]); |
---|
1188 | 1063 | init_waitqueue_head(&root->log_commit_wait[1]); |
---|
.. | .. |
---|
1193 | 1068 | atomic_set(&root->log_writers, 0); |
---|
1194 | 1069 | atomic_set(&root->log_batch, 0); |
---|
1195 | 1070 | refcount_set(&root->refs, 1); |
---|
1196 | | - atomic_set(&root->will_be_snapshotted, 0); |
---|
1197 | 1071 | atomic_set(&root->snapshot_force_cow, 0); |
---|
| 1072 | + atomic_set(&root->nr_swapfiles, 0); |
---|
1198 | 1073 | root->log_transid = 0; |
---|
1199 | 1074 | root->log_transid_committed = -1; |
---|
1200 | 1075 | root->last_log_commit = 0; |
---|
1201 | | - if (!dummy) |
---|
1202 | | - extent_io_tree_init(&root->dirty_log_pages, NULL); |
---|
| 1076 | + if (!dummy) { |
---|
| 1077 | + extent_io_tree_init(fs_info, &root->dirty_log_pages, |
---|
| 1078 | + IO_TREE_ROOT_DIRTY_LOG_PAGES, NULL); |
---|
| 1079 | + extent_io_tree_init(fs_info, &root->log_csum_range, |
---|
| 1080 | + IO_TREE_LOG_CSUM_RANGE, NULL); |
---|
| 1081 | + } |
---|
1203 | 1082 | |
---|
1204 | 1083 | memset(&root->root_key, 0, sizeof(root->root_key)); |
---|
1205 | 1084 | memset(&root->root_item, 0, sizeof(root->root_item)); |
---|
1206 | 1085 | memset(&root->defrag_progress, 0, sizeof(root->defrag_progress)); |
---|
1207 | | - if (!dummy) |
---|
1208 | | - root->defrag_trans_start = fs_info->generation; |
---|
1209 | | - else |
---|
1210 | | - root->defrag_trans_start = 0; |
---|
1211 | 1086 | root->root_key.objectid = objectid; |
---|
1212 | 1087 | root->anon_dev = 0; |
---|
1213 | 1088 | |
---|
1214 | 1089 | spin_lock_init(&root->root_item_lock); |
---|
| 1090 | + btrfs_qgroup_init_swapped_blocks(&root->swapped_blocks); |
---|
| 1091 | +#ifdef CONFIG_BTRFS_DEBUG |
---|
| 1092 | + INIT_LIST_HEAD(&root->leak_list); |
---|
| 1093 | + spin_lock(&fs_info->fs_roots_radix_lock); |
---|
| 1094 | + list_add_tail(&root->leak_list, &fs_info->allocated_roots); |
---|
| 1095 | + spin_unlock(&fs_info->fs_roots_radix_lock); |
---|
| 1096 | +#endif |
---|
1215 | 1097 | } |
---|
1216 | 1098 | |
---|
1217 | 1099 | static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info, |
---|
1218 | | - gfp_t flags) |
---|
| 1100 | + u64 objectid, gfp_t flags) |
---|
1219 | 1101 | { |
---|
1220 | 1102 | struct btrfs_root *root = kzalloc(sizeof(*root), flags); |
---|
1221 | 1103 | if (root) |
---|
1222 | | - root->fs_info = fs_info; |
---|
| 1104 | + __setup_root(root, fs_info, objectid); |
---|
1223 | 1105 | return root; |
---|
1224 | 1106 | } |
---|
1225 | 1107 | |
---|
.. | .. |
---|
1232 | 1114 | if (!fs_info) |
---|
1233 | 1115 | return ERR_PTR(-EINVAL); |
---|
1234 | 1116 | |
---|
1235 | | - root = btrfs_alloc_root(fs_info, GFP_KERNEL); |
---|
| 1117 | + root = btrfs_alloc_root(fs_info, BTRFS_ROOT_TREE_OBJECTID, GFP_KERNEL); |
---|
1236 | 1118 | if (!root) |
---|
1237 | 1119 | return ERR_PTR(-ENOMEM); |
---|
1238 | 1120 | |
---|
1239 | 1121 | /* We don't use the stripesize in selftest, set it as sectorsize */ |
---|
1240 | | - __setup_root(root, fs_info, BTRFS_ROOT_TREE_OBJECTID); |
---|
1241 | 1122 | root->alloc_bytenr = 0; |
---|
1242 | 1123 | |
---|
1243 | 1124 | return root; |
---|
.. | .. |
---|
1245 | 1126 | #endif |
---|
1246 | 1127 | |
---|
1247 | 1128 | struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans, |
---|
1248 | | - struct btrfs_fs_info *fs_info, |
---|
1249 | 1129 | u64 objectid) |
---|
1250 | 1130 | { |
---|
| 1131 | + struct btrfs_fs_info *fs_info = trans->fs_info; |
---|
1251 | 1132 | struct extent_buffer *leaf; |
---|
1252 | 1133 | struct btrfs_root *tree_root = fs_info->tree_root; |
---|
1253 | 1134 | struct btrfs_root *root; |
---|
1254 | 1135 | struct btrfs_key key; |
---|
1255 | 1136 | unsigned int nofs_flag; |
---|
1256 | 1137 | int ret = 0; |
---|
1257 | | - uuid_le uuid = NULL_UUID_LE; |
---|
1258 | 1138 | |
---|
1259 | 1139 | /* |
---|
1260 | 1140 | * We're holding a transaction handle, so use a NOFS memory allocation |
---|
1261 | 1141 | * context to avoid deadlock if reclaim happens. |
---|
1262 | 1142 | */ |
---|
1263 | 1143 | nofs_flag = memalloc_nofs_save(); |
---|
1264 | | - root = btrfs_alloc_root(fs_info, GFP_KERNEL); |
---|
| 1144 | + root = btrfs_alloc_root(fs_info, objectid, GFP_KERNEL); |
---|
1265 | 1145 | memalloc_nofs_restore(nofs_flag); |
---|
1266 | 1146 | if (!root) |
---|
1267 | 1147 | return ERR_PTR(-ENOMEM); |
---|
1268 | 1148 | |
---|
1269 | | - __setup_root(root, fs_info, objectid); |
---|
1270 | 1149 | root->root_key.objectid = objectid; |
---|
1271 | 1150 | root->root_key.type = BTRFS_ROOT_ITEM_KEY; |
---|
1272 | 1151 | root->root_key.offset = 0; |
---|
1273 | 1152 | |
---|
1274 | | - leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0); |
---|
| 1153 | + leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0, |
---|
| 1154 | + BTRFS_NESTING_NORMAL); |
---|
1275 | 1155 | if (IS_ERR(leaf)) { |
---|
1276 | 1156 | ret = PTR_ERR(leaf); |
---|
1277 | 1157 | leaf = NULL; |
---|
.. | .. |
---|
1294 | 1174 | btrfs_set_root_last_snapshot(&root->root_item, 0); |
---|
1295 | 1175 | btrfs_set_root_dirid(&root->root_item, 0); |
---|
1296 | 1176 | if (is_fstree(objectid)) |
---|
1297 | | - uuid_le_gen(&uuid); |
---|
1298 | | - memcpy(root->root_item.uuid, uuid.b, BTRFS_UUID_SIZE); |
---|
| 1177 | + generate_random_guid(root->root_item.uuid); |
---|
| 1178 | + else |
---|
| 1179 | + export_guid(root->root_item.uuid, &guid_null); |
---|
1299 | 1180 | root->root_item.drop_level = 0; |
---|
1300 | 1181 | |
---|
1301 | 1182 | key.objectid = objectid; |
---|
.. | .. |
---|
1310 | 1191 | return root; |
---|
1311 | 1192 | |
---|
1312 | 1193 | fail: |
---|
1313 | | - if (leaf) { |
---|
| 1194 | + if (leaf) |
---|
1314 | 1195 | btrfs_tree_unlock(leaf); |
---|
1315 | | - free_extent_buffer(root->commit_root); |
---|
1316 | | - free_extent_buffer(leaf); |
---|
1317 | | - } |
---|
1318 | | - kfree(root); |
---|
| 1196 | + btrfs_put_root(root); |
---|
1319 | 1197 | |
---|
1320 | 1198 | return ERR_PTR(ret); |
---|
1321 | 1199 | } |
---|
.. | .. |
---|
1326 | 1204 | struct btrfs_root *root; |
---|
1327 | 1205 | struct extent_buffer *leaf; |
---|
1328 | 1206 | |
---|
1329 | | - root = btrfs_alloc_root(fs_info, GFP_NOFS); |
---|
| 1207 | + root = btrfs_alloc_root(fs_info, BTRFS_TREE_LOG_OBJECTID, GFP_NOFS); |
---|
1330 | 1208 | if (!root) |
---|
1331 | 1209 | return ERR_PTR(-ENOMEM); |
---|
1332 | | - |
---|
1333 | | - __setup_root(root, fs_info, BTRFS_TREE_LOG_OBJECTID); |
---|
1334 | 1210 | |
---|
1335 | 1211 | root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID; |
---|
1336 | 1212 | root->root_key.type = BTRFS_ROOT_ITEM_KEY; |
---|
1337 | 1213 | root->root_key.offset = BTRFS_TREE_LOG_OBJECTID; |
---|
1338 | 1214 | |
---|
1339 | 1215 | /* |
---|
1340 | | - * DON'T set REF_COWS for log trees |
---|
| 1216 | + * DON'T set SHAREABLE bit for log trees. |
---|
1341 | 1217 | * |
---|
1342 | | - * log trees do not get reference counted because they go away |
---|
1343 | | - * before a real commit is actually done. They do store pointers |
---|
1344 | | - * to file data extents, and those reference counts still get |
---|
1345 | | - * updated (along with back refs to the log tree). |
---|
| 1218 | + * Log trees are not exposed to user space thus can't be snapshotted, |
---|
| 1219 | + * and they go away before a real commit is actually done. |
---|
| 1220 | + * |
---|
| 1221 | + * They do store pointers to file data extents, and those reference |
---|
| 1222 | + * counts still get updated (along with back refs to the log tree). |
---|
1346 | 1223 | */ |
---|
1347 | 1224 | |
---|
1348 | 1225 | leaf = btrfs_alloc_tree_block(trans, root, 0, BTRFS_TREE_LOG_OBJECTID, |
---|
1349 | | - NULL, 0, 0, 0); |
---|
| 1226 | + NULL, 0, 0, 0, BTRFS_NESTING_NORMAL); |
---|
1350 | 1227 | if (IS_ERR(leaf)) { |
---|
1351 | | - kfree(root); |
---|
| 1228 | + btrfs_put_root(root); |
---|
1352 | 1229 | return ERR_CAST(leaf); |
---|
1353 | 1230 | } |
---|
1354 | 1231 | |
---|
.. | .. |
---|
1404 | 1281 | return 0; |
---|
1405 | 1282 | } |
---|
1406 | 1283 | |
---|
1407 | | -static struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root, |
---|
1408 | | - struct btrfs_key *key) |
---|
| 1284 | +static struct btrfs_root *read_tree_root_path(struct btrfs_root *tree_root, |
---|
| 1285 | + struct btrfs_path *path, |
---|
| 1286 | + struct btrfs_key *key) |
---|
1409 | 1287 | { |
---|
1410 | 1288 | struct btrfs_root *root; |
---|
1411 | 1289 | struct btrfs_fs_info *fs_info = tree_root->fs_info; |
---|
1412 | | - struct btrfs_path *path; |
---|
1413 | 1290 | u64 generation; |
---|
1414 | 1291 | int ret; |
---|
1415 | 1292 | int level; |
---|
1416 | 1293 | |
---|
1417 | | - path = btrfs_alloc_path(); |
---|
1418 | | - if (!path) |
---|
| 1294 | + root = btrfs_alloc_root(fs_info, key->objectid, GFP_NOFS); |
---|
| 1295 | + if (!root) |
---|
1419 | 1296 | return ERR_PTR(-ENOMEM); |
---|
1420 | | - |
---|
1421 | | - root = btrfs_alloc_root(fs_info, GFP_NOFS); |
---|
1422 | | - if (!root) { |
---|
1423 | | - ret = -ENOMEM; |
---|
1424 | | - goto alloc_fail; |
---|
1425 | | - } |
---|
1426 | | - |
---|
1427 | | - __setup_root(root, fs_info, key->objectid); |
---|
1428 | 1297 | |
---|
1429 | 1298 | ret = btrfs_find_root(tree_root, key, path, |
---|
1430 | 1299 | &root->root_item, &root->root_key); |
---|
1431 | 1300 | if (ret) { |
---|
1432 | 1301 | if (ret > 0) |
---|
1433 | 1302 | ret = -ENOENT; |
---|
1434 | | - goto find_fail; |
---|
| 1303 | + goto fail; |
---|
1435 | 1304 | } |
---|
1436 | 1305 | |
---|
1437 | 1306 | generation = btrfs_root_generation(&root->root_item); |
---|
.. | .. |
---|
1441 | 1310 | generation, level, NULL); |
---|
1442 | 1311 | if (IS_ERR(root->node)) { |
---|
1443 | 1312 | ret = PTR_ERR(root->node); |
---|
1444 | | - goto find_fail; |
---|
| 1313 | + root->node = NULL; |
---|
| 1314 | + goto fail; |
---|
1445 | 1315 | } else if (!btrfs_buffer_uptodate(root->node, generation, 0)) { |
---|
1446 | 1316 | ret = -EIO; |
---|
1447 | | - free_extent_buffer(root->node); |
---|
1448 | | - goto find_fail; |
---|
| 1317 | + goto fail; |
---|
1449 | 1318 | } |
---|
1450 | 1319 | root->commit_root = btrfs_root_node(root); |
---|
1451 | | -out: |
---|
1452 | | - btrfs_free_path(path); |
---|
1453 | 1320 | return root; |
---|
1454 | | - |
---|
1455 | | -find_fail: |
---|
1456 | | - kfree(root); |
---|
1457 | | -alloc_fail: |
---|
1458 | | - root = ERR_PTR(ret); |
---|
1459 | | - goto out; |
---|
| 1321 | +fail: |
---|
| 1322 | + btrfs_put_root(root); |
---|
| 1323 | + return ERR_PTR(ret); |
---|
1460 | 1324 | } |
---|
1461 | 1325 | |
---|
1462 | | -struct btrfs_root *btrfs_read_fs_root(struct btrfs_root *tree_root, |
---|
1463 | | - struct btrfs_key *location) |
---|
| 1326 | +struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root, |
---|
| 1327 | + struct btrfs_key *key) |
---|
1464 | 1328 | { |
---|
1465 | 1329 | struct btrfs_root *root; |
---|
| 1330 | + struct btrfs_path *path; |
---|
1466 | 1331 | |
---|
1467 | | - root = btrfs_read_tree_root(tree_root, location); |
---|
1468 | | - if (IS_ERR(root)) |
---|
1469 | | - return root; |
---|
1470 | | - |
---|
1471 | | - if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) { |
---|
1472 | | - set_bit(BTRFS_ROOT_REF_COWS, &root->state); |
---|
1473 | | - btrfs_check_and_init_root_item(&root->root_item); |
---|
1474 | | - } |
---|
| 1332 | + path = btrfs_alloc_path(); |
---|
| 1333 | + if (!path) |
---|
| 1334 | + return ERR_PTR(-ENOMEM); |
---|
| 1335 | + root = read_tree_root_path(tree_root, path, key); |
---|
| 1336 | + btrfs_free_path(path); |
---|
1475 | 1337 | |
---|
1476 | 1338 | return root; |
---|
1477 | 1339 | } |
---|
1478 | 1340 | |
---|
1479 | | -int btrfs_init_fs_root(struct btrfs_root *root) |
---|
| 1341 | +/* |
---|
| 1342 | + * Initialize subvolume root in-memory structure |
---|
| 1343 | + * |
---|
| 1344 | + * @anon_dev: anonymous device to attach to the root, if zero, allocate new |
---|
| 1345 | + */ |
---|
| 1346 | +static int btrfs_init_fs_root(struct btrfs_root *root, dev_t anon_dev) |
---|
1480 | 1347 | { |
---|
1481 | 1348 | int ret; |
---|
1482 | | - struct btrfs_subvolume_writers *writers; |
---|
| 1349 | + unsigned int nofs_flag; |
---|
1483 | 1350 | |
---|
1484 | 1351 | root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS); |
---|
1485 | 1352 | root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned), |
---|
.. | .. |
---|
1489 | 1356 | goto fail; |
---|
1490 | 1357 | } |
---|
1491 | 1358 | |
---|
1492 | | - writers = btrfs_alloc_subvolume_writers(); |
---|
1493 | | - if (IS_ERR(writers)) { |
---|
1494 | | - ret = PTR_ERR(writers); |
---|
| 1359 | + /* |
---|
| 1360 | + * We might be called under a transaction (e.g. indirect backref |
---|
| 1361 | + * resolution) which could deadlock if it triggers memory reclaim |
---|
| 1362 | + */ |
---|
| 1363 | + nofs_flag = memalloc_nofs_save(); |
---|
| 1364 | + ret = btrfs_drew_lock_init(&root->snapshot_lock); |
---|
| 1365 | + memalloc_nofs_restore(nofs_flag); |
---|
| 1366 | + if (ret) |
---|
1495 | 1367 | goto fail; |
---|
| 1368 | + |
---|
| 1369 | + if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID && |
---|
| 1370 | + root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) { |
---|
| 1371 | + set_bit(BTRFS_ROOT_SHAREABLE, &root->state); |
---|
| 1372 | + btrfs_check_and_init_root_item(&root->root_item); |
---|
1496 | 1373 | } |
---|
1497 | | - root->subv_writers = writers; |
---|
1498 | 1374 | |
---|
1499 | 1375 | btrfs_init_free_ino_ctl(root); |
---|
1500 | 1376 | spin_lock_init(&root->ino_cache_lock); |
---|
.. | .. |
---|
1506 | 1382 | */ |
---|
1507 | 1383 | if (is_fstree(root->root_key.objectid) && |
---|
1508 | 1384 | btrfs_root_refs(&root->root_item) > 0) { |
---|
1509 | | - ret = get_anon_bdev(&root->anon_dev); |
---|
1510 | | - if (ret) |
---|
1511 | | - goto fail; |
---|
| 1385 | + if (!anon_dev) { |
---|
| 1386 | + ret = get_anon_bdev(&root->anon_dev); |
---|
| 1387 | + if (ret) |
---|
| 1388 | + goto fail; |
---|
| 1389 | + } else { |
---|
| 1390 | + root->anon_dev = anon_dev; |
---|
| 1391 | + } |
---|
1512 | 1392 | } |
---|
1513 | 1393 | |
---|
1514 | 1394 | mutex_lock(&root->objectid_mutex); |
---|
.. | .. |
---|
1529 | 1409 | return ret; |
---|
1530 | 1410 | } |
---|
1531 | 1411 | |
---|
1532 | | -struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info, |
---|
1533 | | - u64 root_id) |
---|
| 1412 | +static struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info, |
---|
| 1413 | + u64 root_id) |
---|
1534 | 1414 | { |
---|
1535 | 1415 | struct btrfs_root *root; |
---|
1536 | 1416 | |
---|
1537 | 1417 | spin_lock(&fs_info->fs_roots_radix_lock); |
---|
1538 | 1418 | root = radix_tree_lookup(&fs_info->fs_roots_radix, |
---|
1539 | 1419 | (unsigned long)root_id); |
---|
| 1420 | + if (root) |
---|
| 1421 | + root = btrfs_grab_root(root); |
---|
1540 | 1422 | spin_unlock(&fs_info->fs_roots_radix_lock); |
---|
1541 | 1423 | return root; |
---|
| 1424 | +} |
---|
| 1425 | + |
---|
| 1426 | +static struct btrfs_root *btrfs_get_global_root(struct btrfs_fs_info *fs_info, |
---|
| 1427 | + u64 objectid) |
---|
| 1428 | +{ |
---|
| 1429 | + if (objectid == BTRFS_ROOT_TREE_OBJECTID) |
---|
| 1430 | + return btrfs_grab_root(fs_info->tree_root); |
---|
| 1431 | + if (objectid == BTRFS_EXTENT_TREE_OBJECTID) |
---|
| 1432 | + return btrfs_grab_root(fs_info->extent_root); |
---|
| 1433 | + if (objectid == BTRFS_CHUNK_TREE_OBJECTID) |
---|
| 1434 | + return btrfs_grab_root(fs_info->chunk_root); |
---|
| 1435 | + if (objectid == BTRFS_DEV_TREE_OBJECTID) |
---|
| 1436 | + return btrfs_grab_root(fs_info->dev_root); |
---|
| 1437 | + if (objectid == BTRFS_CSUM_TREE_OBJECTID) |
---|
| 1438 | + return btrfs_grab_root(fs_info->csum_root); |
---|
| 1439 | + if (objectid == BTRFS_QUOTA_TREE_OBJECTID) |
---|
| 1440 | + return btrfs_grab_root(fs_info->quota_root) ? |
---|
| 1441 | + fs_info->quota_root : ERR_PTR(-ENOENT); |
---|
| 1442 | + if (objectid == BTRFS_UUID_TREE_OBJECTID) |
---|
| 1443 | + return btrfs_grab_root(fs_info->uuid_root) ? |
---|
| 1444 | + fs_info->uuid_root : ERR_PTR(-ENOENT); |
---|
| 1445 | + if (objectid == BTRFS_FREE_SPACE_TREE_OBJECTID) |
---|
| 1446 | + return btrfs_grab_root(fs_info->free_space_root) ? |
---|
| 1447 | + fs_info->free_space_root : ERR_PTR(-ENOENT); |
---|
| 1448 | + return NULL; |
---|
1542 | 1449 | } |
---|
1543 | 1450 | |
---|
1544 | 1451 | int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info, |
---|
.. | .. |
---|
1554 | 1461 | ret = radix_tree_insert(&fs_info->fs_roots_radix, |
---|
1555 | 1462 | (unsigned long)root->root_key.objectid, |
---|
1556 | 1463 | root); |
---|
1557 | | - if (ret == 0) |
---|
| 1464 | + if (ret == 0) { |
---|
| 1465 | + btrfs_grab_root(root); |
---|
1558 | 1466 | set_bit(BTRFS_ROOT_IN_RADIX, &root->state); |
---|
| 1467 | + } |
---|
1559 | 1468 | spin_unlock(&fs_info->fs_roots_radix_lock); |
---|
1560 | 1469 | radix_tree_preload_end(); |
---|
1561 | 1470 | |
---|
1562 | 1471 | return ret; |
---|
1563 | 1472 | } |
---|
1564 | 1473 | |
---|
1565 | | -struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info, |
---|
1566 | | - struct btrfs_key *location, |
---|
1567 | | - bool check_ref) |
---|
| 1474 | +void btrfs_check_leaked_roots(struct btrfs_fs_info *fs_info) |
---|
| 1475 | +{ |
---|
| 1476 | +#ifdef CONFIG_BTRFS_DEBUG |
---|
| 1477 | + struct btrfs_root *root; |
---|
| 1478 | + |
---|
| 1479 | + while (!list_empty(&fs_info->allocated_roots)) { |
---|
| 1480 | + char buf[BTRFS_ROOT_NAME_BUF_LEN]; |
---|
| 1481 | + |
---|
| 1482 | + root = list_first_entry(&fs_info->allocated_roots, |
---|
| 1483 | + struct btrfs_root, leak_list); |
---|
| 1484 | + btrfs_err(fs_info, "leaked root %s refcount %d", |
---|
| 1485 | + btrfs_root_name(&root->root_key, buf), |
---|
| 1486 | + refcount_read(&root->refs)); |
---|
| 1487 | + while (refcount_read(&root->refs) > 1) |
---|
| 1488 | + btrfs_put_root(root); |
---|
| 1489 | + btrfs_put_root(root); |
---|
| 1490 | + } |
---|
| 1491 | +#endif |
---|
| 1492 | +} |
---|
| 1493 | + |
---|
| 1494 | +void btrfs_free_fs_info(struct btrfs_fs_info *fs_info) |
---|
| 1495 | +{ |
---|
| 1496 | + percpu_counter_destroy(&fs_info->dirty_metadata_bytes); |
---|
| 1497 | + percpu_counter_destroy(&fs_info->delalloc_bytes); |
---|
| 1498 | + percpu_counter_destroy(&fs_info->dio_bytes); |
---|
| 1499 | + percpu_counter_destroy(&fs_info->dev_replace.bio_counter); |
---|
| 1500 | + btrfs_free_csum_hash(fs_info); |
---|
| 1501 | + btrfs_free_stripe_hash_table(fs_info); |
---|
| 1502 | + btrfs_free_ref_cache(fs_info); |
---|
| 1503 | + kfree(fs_info->balance_ctl); |
---|
| 1504 | + kfree(fs_info->delayed_root); |
---|
| 1505 | + btrfs_put_root(fs_info->extent_root); |
---|
| 1506 | + btrfs_put_root(fs_info->tree_root); |
---|
| 1507 | + btrfs_put_root(fs_info->chunk_root); |
---|
| 1508 | + btrfs_put_root(fs_info->dev_root); |
---|
| 1509 | + btrfs_put_root(fs_info->csum_root); |
---|
| 1510 | + btrfs_put_root(fs_info->quota_root); |
---|
| 1511 | + btrfs_put_root(fs_info->uuid_root); |
---|
| 1512 | + btrfs_put_root(fs_info->free_space_root); |
---|
| 1513 | + btrfs_put_root(fs_info->fs_root); |
---|
| 1514 | + btrfs_put_root(fs_info->data_reloc_root); |
---|
| 1515 | + btrfs_check_leaked_roots(fs_info); |
---|
| 1516 | + btrfs_extent_buffer_leak_debug_check(fs_info); |
---|
| 1517 | + kfree(fs_info->super_copy); |
---|
| 1518 | + kfree(fs_info->super_for_commit); |
---|
| 1519 | + kvfree(fs_info); |
---|
| 1520 | +} |
---|
| 1521 | + |
---|
| 1522 | + |
---|
| 1523 | +/* |
---|
| 1524 | + * Get an in-memory reference of a root structure. |
---|
| 1525 | + * |
---|
| 1526 | + * For essential trees like root/extent tree, we grab it from fs_info directly. |
---|
| 1527 | + * For subvolume trees, we check the cached filesystem roots first. If not |
---|
| 1528 | + * found, then read it from disk and add it to cached fs roots. |
---|
| 1529 | + * |
---|
| 1530 | + * Caller should release the root by calling btrfs_put_root() after the usage. |
---|
| 1531 | + * |
---|
| 1532 | + * NOTE: Reloc and log trees can't be read by this function as they share the |
---|
| 1533 | + * same root objectid. |
---|
| 1534 | + * |
---|
| 1535 | + * @objectid: root id |
---|
| 1536 | + * @anon_dev: preallocated anonymous block device number for new roots, |
---|
| 1537 | + * pass 0 for new allocation. |
---|
| 1538 | + * @check_ref: whether to check root item references, If true, return -ENOENT |
---|
| 1539 | + * for orphan roots |
---|
| 1540 | + */ |
---|
| 1541 | +static struct btrfs_root *btrfs_get_root_ref(struct btrfs_fs_info *fs_info, |
---|
| 1542 | + u64 objectid, dev_t anon_dev, |
---|
| 1543 | + bool check_ref) |
---|
1568 | 1544 | { |
---|
1569 | 1545 | struct btrfs_root *root; |
---|
1570 | 1546 | struct btrfs_path *path; |
---|
1571 | 1547 | struct btrfs_key key; |
---|
1572 | 1548 | int ret; |
---|
1573 | 1549 | |
---|
1574 | | - if (location->objectid == BTRFS_ROOT_TREE_OBJECTID) |
---|
1575 | | - return fs_info->tree_root; |
---|
1576 | | - if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID) |
---|
1577 | | - return fs_info->extent_root; |
---|
1578 | | - if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID) |
---|
1579 | | - return fs_info->chunk_root; |
---|
1580 | | - if (location->objectid == BTRFS_DEV_TREE_OBJECTID) |
---|
1581 | | - return fs_info->dev_root; |
---|
1582 | | - if (location->objectid == BTRFS_CSUM_TREE_OBJECTID) |
---|
1583 | | - return fs_info->csum_root; |
---|
1584 | | - if (location->objectid == BTRFS_QUOTA_TREE_OBJECTID) |
---|
1585 | | - return fs_info->quota_root ? fs_info->quota_root : |
---|
1586 | | - ERR_PTR(-ENOENT); |
---|
1587 | | - if (location->objectid == BTRFS_UUID_TREE_OBJECTID) |
---|
1588 | | - return fs_info->uuid_root ? fs_info->uuid_root : |
---|
1589 | | - ERR_PTR(-ENOENT); |
---|
1590 | | - if (location->objectid == BTRFS_FREE_SPACE_TREE_OBJECTID) |
---|
1591 | | - return fs_info->free_space_root ? fs_info->free_space_root : |
---|
1592 | | - ERR_PTR(-ENOENT); |
---|
| 1550 | + root = btrfs_get_global_root(fs_info, objectid); |
---|
| 1551 | + if (root) |
---|
| 1552 | + return root; |
---|
1593 | 1553 | again: |
---|
1594 | | - root = btrfs_lookup_fs_root(fs_info, location->objectid); |
---|
| 1554 | + root = btrfs_lookup_fs_root(fs_info, objectid); |
---|
1595 | 1555 | if (root) { |
---|
1596 | | - if (check_ref && btrfs_root_refs(&root->root_item) == 0) |
---|
| 1556 | + /* Shouldn't get preallocated anon_dev for cached roots */ |
---|
| 1557 | + ASSERT(!anon_dev); |
---|
| 1558 | + if (check_ref && btrfs_root_refs(&root->root_item) == 0) { |
---|
| 1559 | + btrfs_put_root(root); |
---|
1597 | 1560 | return ERR_PTR(-ENOENT); |
---|
| 1561 | + } |
---|
1598 | 1562 | return root; |
---|
1599 | 1563 | } |
---|
1600 | 1564 | |
---|
1601 | | - root = btrfs_read_fs_root(fs_info->tree_root, location); |
---|
| 1565 | + key.objectid = objectid; |
---|
| 1566 | + key.type = BTRFS_ROOT_ITEM_KEY; |
---|
| 1567 | + key.offset = (u64)-1; |
---|
| 1568 | + root = btrfs_read_tree_root(fs_info->tree_root, &key); |
---|
1602 | 1569 | if (IS_ERR(root)) |
---|
1603 | 1570 | return root; |
---|
1604 | 1571 | |
---|
.. | .. |
---|
1607 | 1574 | goto fail; |
---|
1608 | 1575 | } |
---|
1609 | 1576 | |
---|
1610 | | - ret = btrfs_init_fs_root(root); |
---|
| 1577 | + ret = btrfs_init_fs_root(root, anon_dev); |
---|
1611 | 1578 | if (ret) |
---|
1612 | 1579 | goto fail; |
---|
1613 | 1580 | |
---|
.. | .. |
---|
1618 | 1585 | } |
---|
1619 | 1586 | key.objectid = BTRFS_ORPHAN_OBJECTID; |
---|
1620 | 1587 | key.type = BTRFS_ORPHAN_ITEM_KEY; |
---|
1621 | | - key.offset = location->objectid; |
---|
| 1588 | + key.offset = objectid; |
---|
1622 | 1589 | |
---|
1623 | 1590 | ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); |
---|
1624 | 1591 | btrfs_free_path(path); |
---|
.. | .. |
---|
1630 | 1597 | ret = btrfs_insert_fs_root(fs_info, root); |
---|
1631 | 1598 | if (ret) { |
---|
1632 | 1599 | if (ret == -EEXIST) { |
---|
1633 | | - btrfs_free_fs_root(root); |
---|
| 1600 | + btrfs_put_root(root); |
---|
1634 | 1601 | goto again; |
---|
1635 | 1602 | } |
---|
1636 | 1603 | goto fail; |
---|
1637 | 1604 | } |
---|
1638 | 1605 | return root; |
---|
1639 | 1606 | fail: |
---|
1640 | | - btrfs_free_fs_root(root); |
---|
| 1607 | + /* |
---|
| 1608 | + * If our caller provided us an anonymous device, then it's his |
---|
| 1609 | + * responsability to free it in case we fail. So we have to set our |
---|
| 1610 | + * root's anon_dev to 0 to avoid a double free, once by btrfs_put_root() |
---|
| 1611 | + * and once again by our caller. |
---|
| 1612 | + */ |
---|
| 1613 | + if (anon_dev) |
---|
| 1614 | + root->anon_dev = 0; |
---|
| 1615 | + btrfs_put_root(root); |
---|
1641 | 1616 | return ERR_PTR(ret); |
---|
1642 | 1617 | } |
---|
1643 | 1618 | |
---|
1644 | | -static int btrfs_congested_fn(void *congested_data, int bdi_bits) |
---|
| 1619 | +/* |
---|
| 1620 | + * Get in-memory reference of a root structure |
---|
| 1621 | + * |
---|
| 1622 | + * @objectid: tree objectid |
---|
| 1623 | + * @check_ref: if set, verify that the tree exists and the item has at least |
---|
| 1624 | + * one reference |
---|
| 1625 | + */ |
---|
| 1626 | +struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info, |
---|
| 1627 | + u64 objectid, bool check_ref) |
---|
1645 | 1628 | { |
---|
1646 | | - struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data; |
---|
1647 | | - int ret = 0; |
---|
1648 | | - struct btrfs_device *device; |
---|
1649 | | - struct backing_dev_info *bdi; |
---|
| 1629 | + return btrfs_get_root_ref(fs_info, objectid, 0, check_ref); |
---|
| 1630 | +} |
---|
1650 | 1631 | |
---|
1651 | | - rcu_read_lock(); |
---|
1652 | | - list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) { |
---|
1653 | | - if (!device->bdev) |
---|
1654 | | - continue; |
---|
1655 | | - bdi = device->bdev->bd_bdi; |
---|
1656 | | - if (bdi_congested(bdi, bdi_bits)) { |
---|
1657 | | - ret = 1; |
---|
1658 | | - break; |
---|
1659 | | - } |
---|
1660 | | - } |
---|
1661 | | - rcu_read_unlock(); |
---|
1662 | | - return ret; |
---|
| 1632 | +/* |
---|
| 1633 | + * Get in-memory reference of a root structure, created as new, optionally pass |
---|
| 1634 | + * the anonymous block device id |
---|
| 1635 | + * |
---|
| 1636 | + * @objectid: tree objectid |
---|
| 1637 | + * @anon_dev: if zero, allocate a new anonymous block device or use the |
---|
| 1638 | + * parameter value |
---|
| 1639 | + */ |
---|
| 1640 | +struct btrfs_root *btrfs_get_new_fs_root(struct btrfs_fs_info *fs_info, |
---|
| 1641 | + u64 objectid, dev_t anon_dev) |
---|
| 1642 | +{ |
---|
| 1643 | + return btrfs_get_root_ref(fs_info, objectid, anon_dev, true); |
---|
| 1644 | +} |
---|
| 1645 | + |
---|
| 1646 | +/* |
---|
| 1647 | + * btrfs_get_fs_root_commit_root - return a root for the given objectid |
---|
| 1648 | + * @fs_info: the fs_info |
---|
| 1649 | + * @objectid: the objectid we need to lookup |
---|
| 1650 | + * |
---|
| 1651 | + * This is exclusively used for backref walking, and exists specifically because |
---|
| 1652 | + * of how qgroups does lookups. Qgroups will do a backref lookup at delayed ref |
---|
| 1653 | + * creation time, which means we may have to read the tree_root in order to look |
---|
| 1654 | + * up a fs root that is not in memory. If the root is not in memory we will |
---|
| 1655 | + * read the tree root commit root and look up the fs root from there. This is a |
---|
| 1656 | + * temporary root, it will not be inserted into the radix tree as it doesn't |
---|
| 1657 | + * have the most uptodate information, it'll simply be discarded once the |
---|
| 1658 | + * backref code is finished using the root. |
---|
| 1659 | + */ |
---|
| 1660 | +struct btrfs_root *btrfs_get_fs_root_commit_root(struct btrfs_fs_info *fs_info, |
---|
| 1661 | + struct btrfs_path *path, |
---|
| 1662 | + u64 objectid) |
---|
| 1663 | +{ |
---|
| 1664 | + struct btrfs_root *root; |
---|
| 1665 | + struct btrfs_key key; |
---|
| 1666 | + |
---|
| 1667 | + ASSERT(path->search_commit_root && path->skip_locking); |
---|
| 1668 | + |
---|
| 1669 | + /* |
---|
| 1670 | + * This can return -ENOENT if we ask for a root that doesn't exist, but |
---|
| 1671 | + * since this is called via the backref walking code we won't be looking |
---|
| 1672 | + * up a root that doesn't exist, unless there's corruption. So if root |
---|
| 1673 | + * != NULL just return it. |
---|
| 1674 | + */ |
---|
| 1675 | + root = btrfs_get_global_root(fs_info, objectid); |
---|
| 1676 | + if (root) |
---|
| 1677 | + return root; |
---|
| 1678 | + |
---|
| 1679 | + root = btrfs_lookup_fs_root(fs_info, objectid); |
---|
| 1680 | + if (root) |
---|
| 1681 | + return root; |
---|
| 1682 | + |
---|
| 1683 | + key.objectid = objectid; |
---|
| 1684 | + key.type = BTRFS_ROOT_ITEM_KEY; |
---|
| 1685 | + key.offset = (u64)-1; |
---|
| 1686 | + root = read_tree_root_path(fs_info->tree_root, path, &key); |
---|
| 1687 | + btrfs_release_path(path); |
---|
| 1688 | + |
---|
| 1689 | + return root; |
---|
1663 | 1690 | } |
---|
1664 | 1691 | |
---|
1665 | 1692 | /* |
---|
.. | .. |
---|
1690 | 1717 | while (1) { |
---|
1691 | 1718 | again = 0; |
---|
1692 | 1719 | |
---|
| 1720 | + set_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags); |
---|
| 1721 | + |
---|
1693 | 1722 | /* Make the cleaner go to sleep early. */ |
---|
1694 | 1723 | if (btrfs_need_cleaner_sleep(fs_info)) |
---|
1695 | 1724 | goto sleep; |
---|
.. | .. |
---|
1713 | 1742 | goto sleep; |
---|
1714 | 1743 | } |
---|
1715 | 1744 | |
---|
1716 | | - mutex_lock(&fs_info->cleaner_delayed_iput_mutex); |
---|
1717 | 1745 | btrfs_run_delayed_iputs(fs_info); |
---|
1718 | | - mutex_unlock(&fs_info->cleaner_delayed_iput_mutex); |
---|
1719 | 1746 | |
---|
1720 | 1747 | again = btrfs_clean_one_deleted_snapshot(root); |
---|
1721 | 1748 | mutex_unlock(&fs_info->cleaner_mutex); |
---|
.. | .. |
---|
1736 | 1763 | */ |
---|
1737 | 1764 | btrfs_delete_unused_bgs(fs_info); |
---|
1738 | 1765 | sleep: |
---|
| 1766 | + clear_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags); |
---|
1739 | 1767 | if (kthread_should_park()) |
---|
1740 | 1768 | kthread_parkme(); |
---|
1741 | 1769 | if (kthread_should_stop()) |
---|
.. | .. |
---|
1772 | 1800 | } |
---|
1773 | 1801 | |
---|
1774 | 1802 | now = ktime_get_seconds(); |
---|
1775 | | - if (cur->state < TRANS_STATE_BLOCKED && |
---|
1776 | | - !test_bit(BTRFS_FS_NEED_ASYNC_COMMIT, &fs_info->flags) && |
---|
| 1803 | + if (cur->state < TRANS_STATE_COMMIT_START && |
---|
1777 | 1804 | (now < cur->start_time || |
---|
1778 | 1805 | now - cur->start_time < fs_info->commit_interval)) { |
---|
1779 | 1806 | spin_unlock(&fs_info->trans_lock); |
---|
.. | .. |
---|
1811 | 1838 | } |
---|
1812 | 1839 | |
---|
1813 | 1840 | /* |
---|
1814 | | - * this will find the highest generation in the array of |
---|
1815 | | - * root backups. The index of the highest array is returned, |
---|
1816 | | - * or -1 if we can't find anything. |
---|
| 1841 | + * This will find the highest generation in the array of root backups. The |
---|
| 1842 | + * index of the highest array is returned, or -EINVAL if we can't find |
---|
| 1843 | + * anything. |
---|
1817 | 1844 | * |
---|
1818 | 1845 | * We check to make sure the array is valid by comparing the |
---|
1819 | 1846 | * generation of the latest root in the array with the generation |
---|
1820 | 1847 | * in the super block. If they don't match we pitch it. |
---|
1821 | 1848 | */ |
---|
1822 | | -static int find_newest_super_backup(struct btrfs_fs_info *info, u64 newest_gen) |
---|
| 1849 | +static int find_newest_super_backup(struct btrfs_fs_info *info) |
---|
1823 | 1850 | { |
---|
| 1851 | + const u64 newest_gen = btrfs_super_generation(info->super_copy); |
---|
1824 | 1852 | u64 cur; |
---|
1825 | | - int newest_index = -1; |
---|
1826 | 1853 | struct btrfs_root_backup *root_backup; |
---|
1827 | 1854 | int i; |
---|
1828 | 1855 | |
---|
.. | .. |
---|
1830 | 1857 | root_backup = info->super_copy->super_roots + i; |
---|
1831 | 1858 | cur = btrfs_backup_tree_root_gen(root_backup); |
---|
1832 | 1859 | if (cur == newest_gen) |
---|
1833 | | - newest_index = i; |
---|
| 1860 | + return i; |
---|
1834 | 1861 | } |
---|
1835 | 1862 | |
---|
1836 | | - /* check to see if we actually wrapped around */ |
---|
1837 | | - if (newest_index == BTRFS_NUM_BACKUP_ROOTS - 1) { |
---|
1838 | | - root_backup = info->super_copy->super_roots; |
---|
1839 | | - cur = btrfs_backup_tree_root_gen(root_backup); |
---|
1840 | | - if (cur == newest_gen) |
---|
1841 | | - newest_index = 0; |
---|
1842 | | - } |
---|
1843 | | - return newest_index; |
---|
1844 | | -} |
---|
1845 | | - |
---|
1846 | | - |
---|
1847 | | -/* |
---|
1848 | | - * find the oldest backup so we know where to store new entries |
---|
1849 | | - * in the backup array. This will set the backup_root_index |
---|
1850 | | - * field in the fs_info struct |
---|
1851 | | - */ |
---|
1852 | | -static void find_oldest_super_backup(struct btrfs_fs_info *info, |
---|
1853 | | - u64 newest_gen) |
---|
1854 | | -{ |
---|
1855 | | - int newest_index = -1; |
---|
1856 | | - |
---|
1857 | | - newest_index = find_newest_super_backup(info, newest_gen); |
---|
1858 | | - /* if there was garbage in there, just move along */ |
---|
1859 | | - if (newest_index == -1) { |
---|
1860 | | - info->backup_root_index = 0; |
---|
1861 | | - } else { |
---|
1862 | | - info->backup_root_index = (newest_index + 1) % BTRFS_NUM_BACKUP_ROOTS; |
---|
1863 | | - } |
---|
| 1863 | + return -EINVAL; |
---|
1864 | 1864 | } |
---|
1865 | 1865 | |
---|
1866 | 1866 | /* |
---|
.. | .. |
---|
1870 | 1870 | */ |
---|
1871 | 1871 | static void backup_super_roots(struct btrfs_fs_info *info) |
---|
1872 | 1872 | { |
---|
1873 | | - int next_backup; |
---|
| 1873 | + const int next_backup = info->backup_root_index; |
---|
1874 | 1874 | struct btrfs_root_backup *root_backup; |
---|
1875 | | - int last_backup; |
---|
1876 | | - |
---|
1877 | | - next_backup = info->backup_root_index; |
---|
1878 | | - last_backup = (next_backup + BTRFS_NUM_BACKUP_ROOTS - 1) % |
---|
1879 | | - BTRFS_NUM_BACKUP_ROOTS; |
---|
1880 | | - |
---|
1881 | | - /* |
---|
1882 | | - * just overwrite the last backup if we're at the same generation |
---|
1883 | | - * this happens only at umount |
---|
1884 | | - */ |
---|
1885 | | - root_backup = info->super_for_commit->super_roots + last_backup; |
---|
1886 | | - if (btrfs_backup_tree_root_gen(root_backup) == |
---|
1887 | | - btrfs_header_generation(info->tree_root->node)) |
---|
1888 | | - next_backup = last_backup; |
---|
1889 | 1875 | |
---|
1890 | 1876 | root_backup = info->super_for_commit->super_roots + next_backup; |
---|
1891 | 1877 | |
---|
.. | .. |
---|
1958 | 1944 | } |
---|
1959 | 1945 | |
---|
1960 | 1946 | /* |
---|
1961 | | - * this copies info out of the root backup array and back into |
---|
1962 | | - * the in-memory super block. It is meant to help iterate through |
---|
1963 | | - * the array, so you send it the number of backups you've already |
---|
1964 | | - * tried and the last backup index you used. |
---|
| 1947 | + * read_backup_root - Reads a backup root based on the passed priority. Prio 0 |
---|
| 1948 | + * is the newest, prio 1/2/3 are 2nd newest/3rd newest/4th (oldest) backup roots |
---|
1965 | 1949 | * |
---|
1966 | | - * this returns -1 when it has tried all the backups |
---|
| 1950 | + * fs_info - filesystem whose backup roots need to be read |
---|
| 1951 | + * priority - priority of backup root required |
---|
| 1952 | + * |
---|
| 1953 | + * Returns backup root index on success and -EINVAL otherwise. |
---|
1967 | 1954 | */ |
---|
1968 | | -static noinline int next_root_backup(struct btrfs_fs_info *info, |
---|
1969 | | - struct btrfs_super_block *super, |
---|
1970 | | - int *num_backups_tried, int *backup_index) |
---|
| 1955 | +static int read_backup_root(struct btrfs_fs_info *fs_info, u8 priority) |
---|
1971 | 1956 | { |
---|
| 1957 | + int backup_index = find_newest_super_backup(fs_info); |
---|
| 1958 | + struct btrfs_super_block *super = fs_info->super_copy; |
---|
1972 | 1959 | struct btrfs_root_backup *root_backup; |
---|
1973 | | - int newest = *backup_index; |
---|
1974 | 1960 | |
---|
1975 | | - if (*num_backups_tried == 0) { |
---|
1976 | | - u64 gen = btrfs_super_generation(super); |
---|
| 1961 | + if (priority < BTRFS_NUM_BACKUP_ROOTS && backup_index >= 0) { |
---|
| 1962 | + if (priority == 0) |
---|
| 1963 | + return backup_index; |
---|
1977 | 1964 | |
---|
1978 | | - newest = find_newest_super_backup(info, gen); |
---|
1979 | | - if (newest == -1) |
---|
1980 | | - return -1; |
---|
1981 | | - |
---|
1982 | | - *backup_index = newest; |
---|
1983 | | - *num_backups_tried = 1; |
---|
1984 | | - } else if (*num_backups_tried == BTRFS_NUM_BACKUP_ROOTS) { |
---|
1985 | | - /* we've tried all the backups, all done */ |
---|
1986 | | - return -1; |
---|
| 1965 | + backup_index = backup_index + BTRFS_NUM_BACKUP_ROOTS - priority; |
---|
| 1966 | + backup_index %= BTRFS_NUM_BACKUP_ROOTS; |
---|
1987 | 1967 | } else { |
---|
1988 | | - /* jump to the next oldest backup */ |
---|
1989 | | - newest = (*backup_index + BTRFS_NUM_BACKUP_ROOTS - 1) % |
---|
1990 | | - BTRFS_NUM_BACKUP_ROOTS; |
---|
1991 | | - *backup_index = newest; |
---|
1992 | | - *num_backups_tried += 1; |
---|
| 1968 | + return -EINVAL; |
---|
1993 | 1969 | } |
---|
1994 | | - root_backup = super->super_roots + newest; |
---|
| 1970 | + |
---|
| 1971 | + root_backup = super->super_roots + backup_index; |
---|
1995 | 1972 | |
---|
1996 | 1973 | btrfs_set_super_generation(super, |
---|
1997 | 1974 | btrfs_backup_tree_root_gen(root_backup)); |
---|
.. | .. |
---|
2001 | 1978 | btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup)); |
---|
2002 | 1979 | |
---|
2003 | 1980 | /* |
---|
2004 | | - * fixme: the total bytes and num_devices need to match or we should |
---|
| 1981 | + * Fixme: the total bytes and num_devices need to match or we should |
---|
2005 | 1982 | * need a fsck |
---|
2006 | 1983 | */ |
---|
2007 | 1984 | btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup)); |
---|
2008 | 1985 | btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup)); |
---|
2009 | | - return 0; |
---|
| 1986 | + |
---|
| 1987 | + return backup_index; |
---|
2010 | 1988 | } |
---|
2011 | 1989 | |
---|
2012 | 1990 | /* helper to cleanup workers */ |
---|
.. | .. |
---|
2017 | 1995 | btrfs_destroy_workqueue(fs_info->workers); |
---|
2018 | 1996 | btrfs_destroy_workqueue(fs_info->endio_workers); |
---|
2019 | 1997 | btrfs_destroy_workqueue(fs_info->endio_raid56_workers); |
---|
2020 | | - btrfs_destroy_workqueue(fs_info->endio_repair_workers); |
---|
2021 | 1998 | btrfs_destroy_workqueue(fs_info->rmw_workers); |
---|
2022 | 1999 | btrfs_destroy_workqueue(fs_info->endio_write_workers); |
---|
2023 | 2000 | btrfs_destroy_workqueue(fs_info->endio_freespace_worker); |
---|
2024 | | - btrfs_destroy_workqueue(fs_info->submit_workers); |
---|
2025 | 2001 | btrfs_destroy_workqueue(fs_info->delayed_workers); |
---|
2026 | 2002 | btrfs_destroy_workqueue(fs_info->caching_workers); |
---|
2027 | 2003 | btrfs_destroy_workqueue(fs_info->readahead_workers); |
---|
2028 | 2004 | btrfs_destroy_workqueue(fs_info->flush_workers); |
---|
2029 | 2005 | btrfs_destroy_workqueue(fs_info->qgroup_rescan_workers); |
---|
2030 | | - btrfs_destroy_workqueue(fs_info->extent_workers); |
---|
| 2006 | + if (fs_info->discard_ctl.discard_workers) |
---|
| 2007 | + destroy_workqueue(fs_info->discard_ctl.discard_workers); |
---|
2031 | 2008 | /* |
---|
2032 | 2009 | * Now that all other work queues are destroyed, we can safely destroy |
---|
2033 | 2010 | * the queues used for metadata I/O, since tasks from those other work |
---|
.. | .. |
---|
2057 | 2034 | free_root_extent_buffers(info->csum_root); |
---|
2058 | 2035 | free_root_extent_buffers(info->quota_root); |
---|
2059 | 2036 | free_root_extent_buffers(info->uuid_root); |
---|
| 2037 | + free_root_extent_buffers(info->fs_root); |
---|
| 2038 | + free_root_extent_buffers(info->data_reloc_root); |
---|
2060 | 2039 | if (free_chunk_root) |
---|
2061 | 2040 | free_root_extent_buffers(info->chunk_root); |
---|
2062 | 2041 | free_root_extent_buffers(info->free_space_root); |
---|
| 2042 | +} |
---|
| 2043 | + |
---|
| 2044 | +void btrfs_put_root(struct btrfs_root *root) |
---|
| 2045 | +{ |
---|
| 2046 | + if (!root) |
---|
| 2047 | + return; |
---|
| 2048 | + |
---|
| 2049 | + if (refcount_dec_and_test(&root->refs)) { |
---|
| 2050 | + WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree)); |
---|
| 2051 | + WARN_ON(test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state)); |
---|
| 2052 | + if (root->anon_dev) |
---|
| 2053 | + free_anon_bdev(root->anon_dev); |
---|
| 2054 | + btrfs_drew_lock_destroy(&root->snapshot_lock); |
---|
| 2055 | + free_root_extent_buffers(root); |
---|
| 2056 | + kfree(root->free_ino_ctl); |
---|
| 2057 | + kfree(root->free_ino_pinned); |
---|
| 2058 | +#ifdef CONFIG_BTRFS_DEBUG |
---|
| 2059 | + spin_lock(&root->fs_info->fs_roots_radix_lock); |
---|
| 2060 | + list_del_init(&root->leak_list); |
---|
| 2061 | + spin_unlock(&root->fs_info->fs_roots_radix_lock); |
---|
| 2062 | +#endif |
---|
| 2063 | + kfree(root); |
---|
| 2064 | + } |
---|
2063 | 2065 | } |
---|
2064 | 2066 | |
---|
2065 | 2067 | void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info) |
---|
.. | .. |
---|
2073 | 2075 | struct btrfs_root, root_list); |
---|
2074 | 2076 | list_del(&gang[0]->root_list); |
---|
2075 | 2077 | |
---|
2076 | | - if (test_bit(BTRFS_ROOT_IN_RADIX, &gang[0]->state)) { |
---|
| 2078 | + if (test_bit(BTRFS_ROOT_IN_RADIX, &gang[0]->state)) |
---|
2077 | 2079 | btrfs_drop_and_free_fs_root(fs_info, gang[0]); |
---|
2078 | | - } else { |
---|
2079 | | - free_extent_buffer(gang[0]->node); |
---|
2080 | | - free_extent_buffer(gang[0]->commit_root); |
---|
2081 | | - btrfs_put_fs_root(gang[0]); |
---|
2082 | | - } |
---|
| 2080 | + btrfs_put_root(gang[0]); |
---|
2083 | 2081 | } |
---|
2084 | 2082 | |
---|
2085 | 2083 | while (1) { |
---|
.. | .. |
---|
2091 | 2089 | for (i = 0; i < ret; i++) |
---|
2092 | 2090 | btrfs_drop_and_free_fs_root(fs_info, gang[i]); |
---|
2093 | 2091 | } |
---|
2094 | | - |
---|
2095 | | - if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) { |
---|
2096 | | - btrfs_free_log_root_tree(NULL, fs_info); |
---|
2097 | | - btrfs_destroy_pinned_extent(fs_info, fs_info->pinned_extents); |
---|
2098 | | - } |
---|
2099 | 2092 | } |
---|
2100 | 2093 | |
---|
2101 | 2094 | static void btrfs_init_scrub(struct btrfs_fs_info *fs_info) |
---|
.. | .. |
---|
2106 | 2099 | atomic_set(&fs_info->scrubs_paused, 0); |
---|
2107 | 2100 | atomic_set(&fs_info->scrub_cancel_req, 0); |
---|
2108 | 2101 | init_waitqueue_head(&fs_info->scrub_pause_wait); |
---|
2109 | | - fs_info->scrub_workers_refcnt = 0; |
---|
| 2102 | + refcount_set(&fs_info->scrub_workers_refcnt, 0); |
---|
2110 | 2103 | } |
---|
2111 | 2104 | |
---|
2112 | 2105 | static void btrfs_init_balance(struct btrfs_fs_info *fs_info) |
---|
.. | .. |
---|
2134 | 2127 | inode->i_mapping->a_ops = &btree_aops; |
---|
2135 | 2128 | |
---|
2136 | 2129 | RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node); |
---|
2137 | | - extent_io_tree_init(&BTRFS_I(inode)->io_tree, inode); |
---|
2138 | | - BTRFS_I(inode)->io_tree.track_uptodate = 0; |
---|
| 2130 | + extent_io_tree_init(fs_info, &BTRFS_I(inode)->io_tree, |
---|
| 2131 | + IO_TREE_BTREE_INODE_IO, inode); |
---|
| 2132 | + BTRFS_I(inode)->io_tree.track_uptodate = false; |
---|
2139 | 2133 | extent_map_tree_init(&BTRFS_I(inode)->extent_tree); |
---|
2140 | 2134 | |
---|
2141 | | - BTRFS_I(inode)->io_tree.ops = &btree_extent_io_ops; |
---|
2142 | | - |
---|
2143 | | - BTRFS_I(inode)->root = fs_info->tree_root; |
---|
| 2135 | + BTRFS_I(inode)->root = btrfs_grab_root(fs_info->tree_root); |
---|
2144 | 2136 | memset(&BTRFS_I(inode)->location, 0, sizeof(struct btrfs_key)); |
---|
2145 | 2137 | set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags); |
---|
2146 | 2138 | btrfs_insert_inode_hash(inode); |
---|
.. | .. |
---|
2149 | 2141 | static void btrfs_init_dev_replace_locks(struct btrfs_fs_info *fs_info) |
---|
2150 | 2142 | { |
---|
2151 | 2143 | mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount); |
---|
2152 | | - rwlock_init(&fs_info->dev_replace.lock); |
---|
2153 | | - atomic_set(&fs_info->dev_replace.read_locks, 0); |
---|
2154 | | - atomic_set(&fs_info->dev_replace.blocking_readers, 0); |
---|
2155 | | - init_waitqueue_head(&fs_info->replace_wait); |
---|
2156 | | - init_waitqueue_head(&fs_info->dev_replace.read_lock_wq); |
---|
| 2144 | + init_rwsem(&fs_info->dev_replace.rwsem); |
---|
| 2145 | + init_waitqueue_head(&fs_info->dev_replace.replace_wait); |
---|
2157 | 2146 | } |
---|
2158 | 2147 | |
---|
2159 | 2148 | static void btrfs_init_qgroup(struct btrfs_fs_info *fs_info) |
---|
.. | .. |
---|
2161 | 2150 | spin_lock_init(&fs_info->qgroup_lock); |
---|
2162 | 2151 | mutex_init(&fs_info->qgroup_ioctl_lock); |
---|
2163 | 2152 | fs_info->qgroup_tree = RB_ROOT; |
---|
2164 | | - fs_info->qgroup_op_tree = RB_ROOT; |
---|
2165 | 2153 | INIT_LIST_HEAD(&fs_info->dirty_qgroups); |
---|
2166 | 2154 | fs_info->qgroup_seq = 1; |
---|
2167 | 2155 | fs_info->qgroup_ulist = NULL; |
---|
.. | .. |
---|
2190 | 2178 | fs_info->caching_workers = |
---|
2191 | 2179 | btrfs_alloc_workqueue(fs_info, "cache", flags, max_active, 0); |
---|
2192 | 2180 | |
---|
2193 | | - /* |
---|
2194 | | - * a higher idle thresh on the submit workers makes it much more |
---|
2195 | | - * likely that bios will be send down in a sane order to the |
---|
2196 | | - * devices |
---|
2197 | | - */ |
---|
2198 | | - fs_info->submit_workers = |
---|
2199 | | - btrfs_alloc_workqueue(fs_info, "submit", flags, |
---|
2200 | | - min_t(u64, fs_devices->num_devices, |
---|
2201 | | - max_active), 64); |
---|
2202 | | - |
---|
2203 | 2181 | fs_info->fixup_workers = |
---|
2204 | 2182 | btrfs_alloc_workqueue(fs_info, "fixup", flags, 1, 0); |
---|
2205 | 2183 | |
---|
.. | .. |
---|
2218 | 2196 | fs_info->endio_raid56_workers = |
---|
2219 | 2197 | btrfs_alloc_workqueue(fs_info, "endio-raid56", flags, |
---|
2220 | 2198 | max_active, 4); |
---|
2221 | | - fs_info->endio_repair_workers = |
---|
2222 | | - btrfs_alloc_workqueue(fs_info, "endio-repair", flags, 1, 0); |
---|
2223 | 2199 | fs_info->rmw_workers = |
---|
2224 | 2200 | btrfs_alloc_workqueue(fs_info, "rmw", flags, max_active, 2); |
---|
2225 | 2201 | fs_info->endio_write_workers = |
---|
.. | .. |
---|
2236 | 2212 | max_active, 2); |
---|
2237 | 2213 | fs_info->qgroup_rescan_workers = |
---|
2238 | 2214 | btrfs_alloc_workqueue(fs_info, "qgroup-rescan", flags, 1, 0); |
---|
2239 | | - fs_info->extent_workers = |
---|
2240 | | - btrfs_alloc_workqueue(fs_info, "extent-refs", flags, |
---|
2241 | | - min_t(u64, fs_devices->num_devices, |
---|
2242 | | - max_active), 8); |
---|
| 2215 | + fs_info->discard_ctl.discard_workers = |
---|
| 2216 | + alloc_workqueue("btrfs_discard", WQ_UNBOUND | WQ_FREEZABLE, 1); |
---|
2243 | 2217 | |
---|
2244 | 2218 | if (!(fs_info->workers && fs_info->delalloc_workers && |
---|
2245 | | - fs_info->submit_workers && fs_info->flush_workers && |
---|
| 2219 | + fs_info->flush_workers && |
---|
2246 | 2220 | fs_info->endio_workers && fs_info->endio_meta_workers && |
---|
2247 | 2221 | fs_info->endio_meta_write_workers && |
---|
2248 | | - fs_info->endio_repair_workers && |
---|
2249 | 2222 | fs_info->endio_write_workers && fs_info->endio_raid56_workers && |
---|
2250 | 2223 | fs_info->endio_freespace_worker && fs_info->rmw_workers && |
---|
2251 | 2224 | fs_info->caching_workers && fs_info->readahead_workers && |
---|
2252 | 2225 | fs_info->fixup_workers && fs_info->delayed_workers && |
---|
2253 | | - fs_info->extent_workers && |
---|
2254 | | - fs_info->qgroup_rescan_workers)) { |
---|
| 2226 | + fs_info->qgroup_rescan_workers && |
---|
| 2227 | + fs_info->discard_ctl.discard_workers)) { |
---|
2255 | 2228 | return -ENOMEM; |
---|
2256 | 2229 | } |
---|
2257 | 2230 | |
---|
| 2231 | + return 0; |
---|
| 2232 | +} |
---|
| 2233 | + |
---|
| 2234 | +static int btrfs_init_csum_hash(struct btrfs_fs_info *fs_info, u16 csum_type) |
---|
| 2235 | +{ |
---|
| 2236 | + struct crypto_shash *csum_shash; |
---|
| 2237 | + const char *csum_driver = btrfs_super_csum_driver(csum_type); |
---|
| 2238 | + |
---|
| 2239 | + csum_shash = crypto_alloc_shash(csum_driver, 0, 0); |
---|
| 2240 | + |
---|
| 2241 | + if (IS_ERR(csum_shash)) { |
---|
| 2242 | + btrfs_err(fs_info, "error allocating %s hash for checksum", |
---|
| 2243 | + csum_driver); |
---|
| 2244 | + return PTR_ERR(csum_shash); |
---|
| 2245 | + } |
---|
| 2246 | + |
---|
| 2247 | + fs_info->csum_shash = csum_shash; |
---|
| 2248 | + |
---|
| 2249 | + /* |
---|
| 2250 | + * Check if the checksum implementation is a fast accelerated one. |
---|
| 2251 | + * As-is this is a bit of a hack and should be replaced once the csum |
---|
| 2252 | + * implementations provide that information themselves. |
---|
| 2253 | + */ |
---|
| 2254 | + switch (csum_type) { |
---|
| 2255 | + case BTRFS_CSUM_TYPE_CRC32: |
---|
| 2256 | + if (!strstr(crypto_shash_driver_name(csum_shash), "generic")) |
---|
| 2257 | + set_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags); |
---|
| 2258 | + break; |
---|
| 2259 | + case BTRFS_CSUM_TYPE_XXHASH: |
---|
| 2260 | + set_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags); |
---|
| 2261 | + break; |
---|
| 2262 | + default: |
---|
| 2263 | + break; |
---|
| 2264 | + } |
---|
| 2265 | + |
---|
| 2266 | + btrfs_info(fs_info, "using %s (%s) checksum algorithm", |
---|
| 2267 | + btrfs_super_csum_name(csum_type), |
---|
| 2268 | + crypto_shash_driver_name(csum_shash)); |
---|
2258 | 2269 | return 0; |
---|
2259 | 2270 | } |
---|
2260 | 2271 | |
---|
.. | .. |
---|
2272 | 2283 | return -EIO; |
---|
2273 | 2284 | } |
---|
2274 | 2285 | |
---|
2275 | | - log_tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL); |
---|
| 2286 | + log_tree_root = btrfs_alloc_root(fs_info, BTRFS_TREE_LOG_OBJECTID, |
---|
| 2287 | + GFP_KERNEL); |
---|
2276 | 2288 | if (!log_tree_root) |
---|
2277 | 2289 | return -ENOMEM; |
---|
2278 | | - |
---|
2279 | | - __setup_root(log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID); |
---|
2280 | 2290 | |
---|
2281 | 2291 | log_tree_root->node = read_tree_block(fs_info, bytenr, |
---|
2282 | 2292 | fs_info->generation + 1, |
---|
.. | .. |
---|
2284 | 2294 | if (IS_ERR(log_tree_root->node)) { |
---|
2285 | 2295 | btrfs_warn(fs_info, "failed to read log tree"); |
---|
2286 | 2296 | ret = PTR_ERR(log_tree_root->node); |
---|
2287 | | - kfree(log_tree_root); |
---|
| 2297 | + log_tree_root->node = NULL; |
---|
| 2298 | + btrfs_put_root(log_tree_root); |
---|
2288 | 2299 | return ret; |
---|
2289 | 2300 | } else if (!extent_buffer_uptodate(log_tree_root->node)) { |
---|
2290 | 2301 | btrfs_err(fs_info, "failed to read log tree"); |
---|
2291 | | - free_extent_buffer(log_tree_root->node); |
---|
2292 | | - kfree(log_tree_root); |
---|
| 2302 | + btrfs_put_root(log_tree_root); |
---|
2293 | 2303 | return -EIO; |
---|
2294 | 2304 | } |
---|
2295 | 2305 | /* returns with log_tree_root freed on success */ |
---|
.. | .. |
---|
2297 | 2307 | if (ret) { |
---|
2298 | 2308 | btrfs_handle_fs_error(fs_info, ret, |
---|
2299 | 2309 | "Failed to recover log tree"); |
---|
2300 | | - free_extent_buffer(log_tree_root->node); |
---|
2301 | | - kfree(log_tree_root); |
---|
| 2310 | + btrfs_put_root(log_tree_root); |
---|
2302 | 2311 | return ret; |
---|
2303 | 2312 | } |
---|
2304 | 2313 | |
---|
.. | .. |
---|
2350 | 2359 | } |
---|
2351 | 2360 | set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); |
---|
2352 | 2361 | fs_info->csum_root = root; |
---|
| 2362 | + |
---|
| 2363 | + /* |
---|
| 2364 | + * This tree can share blocks with some other fs tree during relocation |
---|
| 2365 | + * and we need a proper setup by btrfs_get_fs_root |
---|
| 2366 | + */ |
---|
| 2367 | + root = btrfs_get_fs_root(tree_root->fs_info, |
---|
| 2368 | + BTRFS_DATA_RELOC_TREE_OBJECTID, true); |
---|
| 2369 | + if (IS_ERR(root)) { |
---|
| 2370 | + ret = PTR_ERR(root); |
---|
| 2371 | + goto out; |
---|
| 2372 | + } |
---|
| 2373 | + set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); |
---|
| 2374 | + fs_info->data_reloc_root = root; |
---|
2353 | 2375 | |
---|
2354 | 2376 | location.objectid = BTRFS_QUOTA_TREE_OBJECTID; |
---|
2355 | 2377 | root = btrfs_read_tree_root(tree_root, &location); |
---|
.. | .. |
---|
2474 | 2496 | ret = -EINVAL; |
---|
2475 | 2497 | } |
---|
2476 | 2498 | |
---|
2477 | | - if (memcmp(fs_info->fsid, sb->dev_item.fsid, BTRFS_FSID_SIZE) != 0) { |
---|
| 2499 | + if (memcmp(fs_info->fs_devices->fsid, sb->fsid, BTRFS_FSID_SIZE) != 0) { |
---|
2478 | 2500 | btrfs_err(fs_info, |
---|
2479 | | - "dev_item UUID does not match fsid: %pU != %pU", |
---|
2480 | | - fs_info->fsid, sb->dev_item.fsid); |
---|
| 2501 | + "superblock fsid doesn't match fsid of fs_devices: %pU != %pU", |
---|
| 2502 | + sb->fsid, fs_info->fs_devices->fsid); |
---|
| 2503 | + ret = -EINVAL; |
---|
| 2504 | + } |
---|
| 2505 | + |
---|
| 2506 | + if (memcmp(fs_info->fs_devices->metadata_uuid, btrfs_sb_fsid_ptr(sb), |
---|
| 2507 | + BTRFS_FSID_SIZE) != 0) { |
---|
| 2508 | + btrfs_err(fs_info, |
---|
| 2509 | +"superblock metadata_uuid doesn't match metadata uuid of fs_devices: %pU != %pU", |
---|
| 2510 | + btrfs_sb_fsid_ptr(sb), fs_info->fs_devices->metadata_uuid); |
---|
| 2511 | + ret = -EINVAL; |
---|
| 2512 | + } |
---|
| 2513 | + |
---|
| 2514 | + if (memcmp(fs_info->fs_devices->metadata_uuid, sb->dev_item.fsid, |
---|
| 2515 | + BTRFS_FSID_SIZE) != 0) { |
---|
| 2516 | + btrfs_err(fs_info, |
---|
| 2517 | + "dev_item UUID does not match metadata fsid: %pU != %pU", |
---|
| 2518 | + fs_info->fs_devices->metadata_uuid, sb->dev_item.fsid); |
---|
2481 | 2519 | ret = -EINVAL; |
---|
2482 | 2520 | } |
---|
2483 | 2521 | |
---|
.. | .. |
---|
2572 | 2610 | ret = validate_super(fs_info, sb, -1); |
---|
2573 | 2611 | if (ret < 0) |
---|
2574 | 2612 | goto out; |
---|
2575 | | - if (btrfs_super_csum_type(sb) != BTRFS_CSUM_TYPE_CRC32) { |
---|
| 2613 | + if (!btrfs_supported_super_csum(btrfs_super_csum_type(sb))) { |
---|
2576 | 2614 | ret = -EUCLEAN; |
---|
2577 | 2615 | btrfs_err(fs_info, "invalid csum type, has %u want %u", |
---|
2578 | 2616 | btrfs_super_csum_type(sb), BTRFS_CSUM_TYPE_CRC32); |
---|
.. | .. |
---|
2593 | 2631 | return ret; |
---|
2594 | 2632 | } |
---|
2595 | 2633 | |
---|
2596 | | -int open_ctree(struct super_block *sb, |
---|
2597 | | - struct btrfs_fs_devices *fs_devices, |
---|
2598 | | - char *options) |
---|
| 2634 | +static int __cold init_tree_roots(struct btrfs_fs_info *fs_info) |
---|
2599 | 2635 | { |
---|
2600 | | - u32 sectorsize; |
---|
2601 | | - u32 nodesize; |
---|
2602 | | - u32 stripesize; |
---|
2603 | | - u64 generation; |
---|
2604 | | - u64 features; |
---|
2605 | | - struct btrfs_key location; |
---|
2606 | | - struct buffer_head *bh; |
---|
2607 | | - struct btrfs_super_block *disk_super; |
---|
2608 | | - struct btrfs_fs_info *fs_info = btrfs_sb(sb); |
---|
2609 | | - struct btrfs_root *tree_root; |
---|
2610 | | - struct btrfs_root *chunk_root; |
---|
2611 | | - int ret; |
---|
2612 | | - int err = -EINVAL; |
---|
2613 | | - int num_backups_tried = 0; |
---|
2614 | | - int backup_index = 0; |
---|
2615 | | - int clear_free_space_tree = 0; |
---|
2616 | | - int level; |
---|
| 2636 | + int backup_index = find_newest_super_backup(fs_info); |
---|
| 2637 | + struct btrfs_super_block *sb = fs_info->super_copy; |
---|
| 2638 | + struct btrfs_root *tree_root = fs_info->tree_root; |
---|
| 2639 | + bool handle_error = false; |
---|
| 2640 | + int ret = 0; |
---|
| 2641 | + int i; |
---|
2617 | 2642 | |
---|
2618 | | - tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL); |
---|
2619 | | - chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info, GFP_KERNEL); |
---|
2620 | | - if (!tree_root || !chunk_root) { |
---|
2621 | | - err = -ENOMEM; |
---|
2622 | | - goto fail; |
---|
| 2643 | + for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) { |
---|
| 2644 | + u64 generation; |
---|
| 2645 | + int level; |
---|
| 2646 | + |
---|
| 2647 | + if (handle_error) { |
---|
| 2648 | + if (!IS_ERR(tree_root->node)) |
---|
| 2649 | + free_extent_buffer(tree_root->node); |
---|
| 2650 | + tree_root->node = NULL; |
---|
| 2651 | + |
---|
| 2652 | + if (!btrfs_test_opt(fs_info, USEBACKUPROOT)) |
---|
| 2653 | + break; |
---|
| 2654 | + |
---|
| 2655 | + free_root_pointers(fs_info, 0); |
---|
| 2656 | + |
---|
| 2657 | + /* |
---|
| 2658 | + * Don't use the log in recovery mode, it won't be |
---|
| 2659 | + * valid |
---|
| 2660 | + */ |
---|
| 2661 | + btrfs_set_super_log_root(sb, 0); |
---|
| 2662 | + |
---|
| 2663 | + /* We can't trust the free space cache either */ |
---|
| 2664 | + btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE); |
---|
| 2665 | + |
---|
| 2666 | + ret = read_backup_root(fs_info, i); |
---|
| 2667 | + backup_index = ret; |
---|
| 2668 | + if (ret < 0) |
---|
| 2669 | + return ret; |
---|
| 2670 | + } |
---|
| 2671 | + generation = btrfs_super_generation(sb); |
---|
| 2672 | + level = btrfs_super_root_level(sb); |
---|
| 2673 | + tree_root->node = read_tree_block(fs_info, btrfs_super_root(sb), |
---|
| 2674 | + generation, level, NULL); |
---|
| 2675 | + if (IS_ERR(tree_root->node)) { |
---|
| 2676 | + handle_error = true; |
---|
| 2677 | + ret = PTR_ERR(tree_root->node); |
---|
| 2678 | + tree_root->node = NULL; |
---|
| 2679 | + btrfs_warn(fs_info, "couldn't read tree root"); |
---|
| 2680 | + continue; |
---|
| 2681 | + |
---|
| 2682 | + } else if (!extent_buffer_uptodate(tree_root->node)) { |
---|
| 2683 | + handle_error = true; |
---|
| 2684 | + ret = -EIO; |
---|
| 2685 | + btrfs_warn(fs_info, "error while reading tree root"); |
---|
| 2686 | + continue; |
---|
| 2687 | + } |
---|
| 2688 | + |
---|
| 2689 | + btrfs_set_root_node(&tree_root->root_item, tree_root->node); |
---|
| 2690 | + tree_root->commit_root = btrfs_root_node(tree_root); |
---|
| 2691 | + btrfs_set_root_refs(&tree_root->root_item, 1); |
---|
| 2692 | + |
---|
| 2693 | + /* |
---|
| 2694 | + * No need to hold btrfs_root::objectid_mutex since the fs |
---|
| 2695 | + * hasn't been fully initialised and we are the only user |
---|
| 2696 | + */ |
---|
| 2697 | + ret = btrfs_find_highest_objectid(tree_root, |
---|
| 2698 | + &tree_root->highest_objectid); |
---|
| 2699 | + if (ret < 0) { |
---|
| 2700 | + handle_error = true; |
---|
| 2701 | + continue; |
---|
| 2702 | + } |
---|
| 2703 | + |
---|
| 2704 | + ASSERT(tree_root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID); |
---|
| 2705 | + |
---|
| 2706 | + ret = btrfs_read_roots(fs_info); |
---|
| 2707 | + if (ret < 0) { |
---|
| 2708 | + handle_error = true; |
---|
| 2709 | + continue; |
---|
| 2710 | + } |
---|
| 2711 | + |
---|
| 2712 | + /* All successful */ |
---|
| 2713 | + fs_info->generation = generation; |
---|
| 2714 | + fs_info->last_trans_committed = generation; |
---|
| 2715 | + |
---|
| 2716 | + /* Always begin writing backup roots after the one being used */ |
---|
| 2717 | + if (backup_index < 0) { |
---|
| 2718 | + fs_info->backup_root_index = 0; |
---|
| 2719 | + } else { |
---|
| 2720 | + fs_info->backup_root_index = backup_index + 1; |
---|
| 2721 | + fs_info->backup_root_index %= BTRFS_NUM_BACKUP_ROOTS; |
---|
| 2722 | + } |
---|
| 2723 | + break; |
---|
2623 | 2724 | } |
---|
2624 | 2725 | |
---|
2625 | | - ret = init_srcu_struct(&fs_info->subvol_srcu); |
---|
2626 | | - if (ret) { |
---|
2627 | | - err = ret; |
---|
2628 | | - goto fail; |
---|
2629 | | - } |
---|
| 2726 | + return ret; |
---|
| 2727 | +} |
---|
2630 | 2728 | |
---|
2631 | | - ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0, GFP_KERNEL); |
---|
2632 | | - if (ret) { |
---|
2633 | | - err = ret; |
---|
2634 | | - goto fail_srcu; |
---|
2635 | | - } |
---|
2636 | | - fs_info->dirty_metadata_batch = PAGE_SIZE * |
---|
2637 | | - (1 + ilog2(nr_cpu_ids)); |
---|
2638 | | - |
---|
2639 | | - ret = percpu_counter_init(&fs_info->delalloc_bytes, 0, GFP_KERNEL); |
---|
2640 | | - if (ret) { |
---|
2641 | | - err = ret; |
---|
2642 | | - goto fail_dirty_metadata_bytes; |
---|
2643 | | - } |
---|
2644 | | - |
---|
2645 | | - ret = percpu_counter_init(&fs_info->bio_counter, 0, GFP_KERNEL); |
---|
2646 | | - if (ret) { |
---|
2647 | | - err = ret; |
---|
2648 | | - goto fail_delalloc_bytes; |
---|
2649 | | - } |
---|
2650 | | - |
---|
| 2729 | +void btrfs_init_fs_info(struct btrfs_fs_info *fs_info) |
---|
| 2730 | +{ |
---|
2651 | 2731 | INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC); |
---|
2652 | 2732 | INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC); |
---|
2653 | 2733 | INIT_LIST_HEAD(&fs_info->trans_list); |
---|
.. | .. |
---|
2655 | 2735 | INIT_LIST_HEAD(&fs_info->delayed_iputs); |
---|
2656 | 2736 | INIT_LIST_HEAD(&fs_info->delalloc_roots); |
---|
2657 | 2737 | INIT_LIST_HEAD(&fs_info->caching_block_groups); |
---|
2658 | | - INIT_LIST_HEAD(&fs_info->pending_raid_kobjs); |
---|
2659 | | - spin_lock_init(&fs_info->pending_raid_kobjs_lock); |
---|
2660 | 2738 | spin_lock_init(&fs_info->delalloc_root_lock); |
---|
2661 | 2739 | spin_lock_init(&fs_info->trans_lock); |
---|
2662 | 2740 | spin_lock_init(&fs_info->fs_roots_radix_lock); |
---|
2663 | 2741 | spin_lock_init(&fs_info->delayed_iput_lock); |
---|
2664 | 2742 | spin_lock_init(&fs_info->defrag_inodes_lock); |
---|
2665 | 2743 | spin_lock_init(&fs_info->super_lock); |
---|
2666 | | - spin_lock_init(&fs_info->qgroup_op_lock); |
---|
2667 | 2744 | spin_lock_init(&fs_info->buffer_lock); |
---|
2668 | 2745 | spin_lock_init(&fs_info->unused_bgs_lock); |
---|
2669 | 2746 | rwlock_init(&fs_info->tree_mod_log_lock); |
---|
.. | .. |
---|
2671 | 2748 | mutex_init(&fs_info->delete_unused_bgs_mutex); |
---|
2672 | 2749 | mutex_init(&fs_info->reloc_mutex); |
---|
2673 | 2750 | mutex_init(&fs_info->delalloc_root_mutex); |
---|
2674 | | - mutex_init(&fs_info->cleaner_delayed_iput_mutex); |
---|
2675 | 2751 | seqlock_init(&fs_info->profiles_lock); |
---|
2676 | 2752 | |
---|
2677 | 2753 | INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots); |
---|
2678 | 2754 | INIT_LIST_HEAD(&fs_info->space_info); |
---|
2679 | 2755 | INIT_LIST_HEAD(&fs_info->tree_mod_seq_list); |
---|
2680 | 2756 | INIT_LIST_HEAD(&fs_info->unused_bgs); |
---|
2681 | | - btrfs_mapping_init(&fs_info->mapping_tree); |
---|
| 2757 | +#ifdef CONFIG_BTRFS_DEBUG |
---|
| 2758 | + INIT_LIST_HEAD(&fs_info->allocated_roots); |
---|
| 2759 | + INIT_LIST_HEAD(&fs_info->allocated_ebs); |
---|
| 2760 | + spin_lock_init(&fs_info->eb_leak_lock); |
---|
| 2761 | +#endif |
---|
| 2762 | + extent_map_tree_init(&fs_info->mapping_tree); |
---|
2682 | 2763 | btrfs_init_block_rsv(&fs_info->global_block_rsv, |
---|
2683 | 2764 | BTRFS_BLOCK_RSV_GLOBAL); |
---|
2684 | 2765 | btrfs_init_block_rsv(&fs_info->trans_block_rsv, BTRFS_BLOCK_RSV_TRANS); |
---|
.. | .. |
---|
2686 | 2767 | btrfs_init_block_rsv(&fs_info->empty_block_rsv, BTRFS_BLOCK_RSV_EMPTY); |
---|
2687 | 2768 | btrfs_init_block_rsv(&fs_info->delayed_block_rsv, |
---|
2688 | 2769 | BTRFS_BLOCK_RSV_DELOPS); |
---|
| 2770 | + btrfs_init_block_rsv(&fs_info->delayed_refs_rsv, |
---|
| 2771 | + BTRFS_BLOCK_RSV_DELREFS); |
---|
| 2772 | + |
---|
2689 | 2773 | atomic_set(&fs_info->async_delalloc_pages, 0); |
---|
2690 | 2774 | atomic_set(&fs_info->defrag_running, 0); |
---|
2691 | | - atomic_set(&fs_info->qgroup_op_seq, 0); |
---|
2692 | 2775 | atomic_set(&fs_info->reada_works_cnt, 0); |
---|
| 2776 | + atomic_set(&fs_info->nr_delayed_iputs, 0); |
---|
2693 | 2777 | atomic64_set(&fs_info->tree_mod_seq, 0); |
---|
2694 | | - fs_info->sb = sb; |
---|
2695 | 2778 | fs_info->max_inline = BTRFS_DEFAULT_MAX_INLINE; |
---|
2696 | 2779 | fs_info->metadata_ratio = 0; |
---|
2697 | 2780 | fs_info->defrag_inodes = RB_ROOT; |
---|
.. | .. |
---|
2710 | 2793 | INIT_LIST_HEAD(&fs_info->ordered_roots); |
---|
2711 | 2794 | spin_lock_init(&fs_info->ordered_root_lock); |
---|
2712 | 2795 | |
---|
2713 | | - fs_info->btree_inode = new_inode(sb); |
---|
2714 | | - if (!fs_info->btree_inode) { |
---|
2715 | | - err = -ENOMEM; |
---|
2716 | | - goto fail_bio_counter; |
---|
2717 | | - } |
---|
2718 | | - mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS); |
---|
2719 | | - |
---|
2720 | | - fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root), |
---|
2721 | | - GFP_KERNEL); |
---|
2722 | | - if (!fs_info->delayed_root) { |
---|
2723 | | - err = -ENOMEM; |
---|
2724 | | - goto fail_iput; |
---|
2725 | | - } |
---|
2726 | | - btrfs_init_delayed_root(fs_info->delayed_root); |
---|
2727 | | - |
---|
2728 | 2796 | btrfs_init_scrub(fs_info); |
---|
2729 | 2797 | #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY |
---|
2730 | 2798 | fs_info->check_integrity_print_mask = 0; |
---|
2731 | 2799 | #endif |
---|
2732 | 2800 | btrfs_init_balance(fs_info); |
---|
2733 | | - btrfs_init_async_reclaim_work(&fs_info->async_reclaim_work); |
---|
2734 | | - |
---|
2735 | | - sb->s_blocksize = BTRFS_BDEV_BLOCKSIZE; |
---|
2736 | | - sb->s_blocksize_bits = blksize_bits(BTRFS_BDEV_BLOCKSIZE); |
---|
2737 | | - |
---|
2738 | | - btrfs_init_btree_inode(fs_info); |
---|
| 2801 | + btrfs_init_async_reclaim_work(fs_info); |
---|
2739 | 2802 | |
---|
2740 | 2803 | spin_lock_init(&fs_info->block_group_cache_lock); |
---|
2741 | 2804 | fs_info->block_group_cache_tree = RB_ROOT; |
---|
2742 | 2805 | fs_info->first_logical_byte = (u64)-1; |
---|
2743 | 2806 | |
---|
2744 | | - extent_io_tree_init(&fs_info->freed_extents[0], NULL); |
---|
2745 | | - extent_io_tree_init(&fs_info->freed_extents[1], NULL); |
---|
2746 | | - fs_info->pinned_extents = &fs_info->freed_extents[0]; |
---|
| 2807 | + extent_io_tree_init(fs_info, &fs_info->excluded_extents, |
---|
| 2808 | + IO_TREE_FS_EXCLUDED_EXTENTS, NULL); |
---|
2747 | 2809 | set_bit(BTRFS_FS_BARRIER, &fs_info->flags); |
---|
2748 | 2810 | |
---|
2749 | 2811 | mutex_init(&fs_info->ordered_operations_mutex); |
---|
.. | .. |
---|
2759 | 2821 | |
---|
2760 | 2822 | btrfs_init_dev_replace_locks(fs_info); |
---|
2761 | 2823 | btrfs_init_qgroup(fs_info); |
---|
| 2824 | + btrfs_discard_init(fs_info); |
---|
2762 | 2825 | |
---|
2763 | 2826 | btrfs_init_free_cluster(&fs_info->meta_alloc_cluster); |
---|
2764 | 2827 | btrfs_init_free_cluster(&fs_info->data_alloc_cluster); |
---|
.. | .. |
---|
2767 | 2830 | init_waitqueue_head(&fs_info->transaction_wait); |
---|
2768 | 2831 | init_waitqueue_head(&fs_info->transaction_blocked_wait); |
---|
2769 | 2832 | init_waitqueue_head(&fs_info->async_submit_wait); |
---|
2770 | | - |
---|
2771 | | - INIT_LIST_HEAD(&fs_info->pinned_chunks); |
---|
| 2833 | + init_waitqueue_head(&fs_info->delayed_iputs_wait); |
---|
2772 | 2834 | |
---|
2773 | 2835 | /* Usable values until the real ones are cached from the superblock */ |
---|
2774 | 2836 | fs_info->nodesize = 4096; |
---|
2775 | 2837 | fs_info->sectorsize = 4096; |
---|
2776 | 2838 | fs_info->stripesize = 4096; |
---|
2777 | 2839 | |
---|
2778 | | - ret = btrfs_alloc_stripe_hash_table(fs_info); |
---|
2779 | | - if (ret) { |
---|
2780 | | - err = ret; |
---|
2781 | | - goto fail_alloc; |
---|
| 2840 | + spin_lock_init(&fs_info->swapfile_pins_lock); |
---|
| 2841 | + fs_info->swapfile_pins = RB_ROOT; |
---|
| 2842 | + |
---|
| 2843 | + fs_info->send_in_progress = 0; |
---|
| 2844 | +} |
---|
| 2845 | + |
---|
| 2846 | +static int init_mount_fs_info(struct btrfs_fs_info *fs_info, struct super_block *sb) |
---|
| 2847 | +{ |
---|
| 2848 | + int ret; |
---|
| 2849 | + |
---|
| 2850 | + fs_info->sb = sb; |
---|
| 2851 | + sb->s_blocksize = BTRFS_BDEV_BLOCKSIZE; |
---|
| 2852 | + sb->s_blocksize_bits = blksize_bits(BTRFS_BDEV_BLOCKSIZE); |
---|
| 2853 | + |
---|
| 2854 | + ret = percpu_counter_init(&fs_info->dio_bytes, 0, GFP_KERNEL); |
---|
| 2855 | + if (ret) |
---|
| 2856 | + return ret; |
---|
| 2857 | + |
---|
| 2858 | + ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0, GFP_KERNEL); |
---|
| 2859 | + if (ret) |
---|
| 2860 | + return ret; |
---|
| 2861 | + |
---|
| 2862 | + fs_info->dirty_metadata_batch = PAGE_SIZE * |
---|
| 2863 | + (1 + ilog2(nr_cpu_ids)); |
---|
| 2864 | + |
---|
| 2865 | + ret = percpu_counter_init(&fs_info->delalloc_bytes, 0, GFP_KERNEL); |
---|
| 2866 | + if (ret) |
---|
| 2867 | + return ret; |
---|
| 2868 | + |
---|
| 2869 | + ret = percpu_counter_init(&fs_info->dev_replace.bio_counter, 0, |
---|
| 2870 | + GFP_KERNEL); |
---|
| 2871 | + if (ret) |
---|
| 2872 | + return ret; |
---|
| 2873 | + |
---|
| 2874 | + fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root), |
---|
| 2875 | + GFP_KERNEL); |
---|
| 2876 | + if (!fs_info->delayed_root) |
---|
| 2877 | + return -ENOMEM; |
---|
| 2878 | + btrfs_init_delayed_root(fs_info->delayed_root); |
---|
| 2879 | + |
---|
| 2880 | + return btrfs_alloc_stripe_hash_table(fs_info); |
---|
| 2881 | +} |
---|
| 2882 | + |
---|
| 2883 | +static int btrfs_uuid_rescan_kthread(void *data) |
---|
| 2884 | +{ |
---|
| 2885 | + struct btrfs_fs_info *fs_info = (struct btrfs_fs_info *)data; |
---|
| 2886 | + int ret; |
---|
| 2887 | + |
---|
| 2888 | + /* |
---|
| 2889 | + * 1st step is to iterate through the existing UUID tree and |
---|
| 2890 | + * to delete all entries that contain outdated data. |
---|
| 2891 | + * 2nd step is to add all missing entries to the UUID tree. |
---|
| 2892 | + */ |
---|
| 2893 | + ret = btrfs_uuid_tree_iterate(fs_info); |
---|
| 2894 | + if (ret < 0) { |
---|
| 2895 | + if (ret != -EINTR) |
---|
| 2896 | + btrfs_warn(fs_info, "iterating uuid_tree failed %d", |
---|
| 2897 | + ret); |
---|
| 2898 | + up(&fs_info->uuid_tree_rescan_sem); |
---|
| 2899 | + return ret; |
---|
| 2900 | + } |
---|
| 2901 | + return btrfs_uuid_scan_kthread(data); |
---|
| 2902 | +} |
---|
| 2903 | + |
---|
| 2904 | +static int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info) |
---|
| 2905 | +{ |
---|
| 2906 | + struct task_struct *task; |
---|
| 2907 | + |
---|
| 2908 | + down(&fs_info->uuid_tree_rescan_sem); |
---|
| 2909 | + task = kthread_run(btrfs_uuid_rescan_kthread, fs_info, "btrfs-uuid"); |
---|
| 2910 | + if (IS_ERR(task)) { |
---|
| 2911 | + /* fs_info->update_uuid_tree_gen remains 0 in all error case */ |
---|
| 2912 | + btrfs_warn(fs_info, "failed to start uuid_rescan task"); |
---|
| 2913 | + up(&fs_info->uuid_tree_rescan_sem); |
---|
| 2914 | + return PTR_ERR(task); |
---|
2782 | 2915 | } |
---|
2783 | 2916 | |
---|
2784 | | - __setup_root(tree_root, fs_info, BTRFS_ROOT_TREE_OBJECTID); |
---|
| 2917 | + return 0; |
---|
| 2918 | +} |
---|
| 2919 | + |
---|
| 2920 | +int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_devices, |
---|
| 2921 | + char *options) |
---|
| 2922 | +{ |
---|
| 2923 | + u32 sectorsize; |
---|
| 2924 | + u32 nodesize; |
---|
| 2925 | + u32 stripesize; |
---|
| 2926 | + u64 generation; |
---|
| 2927 | + u64 features; |
---|
| 2928 | + u16 csum_type; |
---|
| 2929 | + struct btrfs_super_block *disk_super; |
---|
| 2930 | + struct btrfs_fs_info *fs_info = btrfs_sb(sb); |
---|
| 2931 | + struct btrfs_root *tree_root; |
---|
| 2932 | + struct btrfs_root *chunk_root; |
---|
| 2933 | + int ret; |
---|
| 2934 | + int err = -EINVAL; |
---|
| 2935 | + int clear_free_space_tree = 0; |
---|
| 2936 | + int level; |
---|
| 2937 | + |
---|
| 2938 | + ret = init_mount_fs_info(fs_info, sb); |
---|
| 2939 | + if (ret) { |
---|
| 2940 | + err = ret; |
---|
| 2941 | + goto fail; |
---|
| 2942 | + } |
---|
| 2943 | + |
---|
| 2944 | + /* These need to be init'ed before we start creating inodes and such. */ |
---|
| 2945 | + tree_root = btrfs_alloc_root(fs_info, BTRFS_ROOT_TREE_OBJECTID, |
---|
| 2946 | + GFP_KERNEL); |
---|
| 2947 | + fs_info->tree_root = tree_root; |
---|
| 2948 | + chunk_root = btrfs_alloc_root(fs_info, BTRFS_CHUNK_TREE_OBJECTID, |
---|
| 2949 | + GFP_KERNEL); |
---|
| 2950 | + fs_info->chunk_root = chunk_root; |
---|
| 2951 | + if (!tree_root || !chunk_root) { |
---|
| 2952 | + err = -ENOMEM; |
---|
| 2953 | + goto fail; |
---|
| 2954 | + } |
---|
| 2955 | + |
---|
| 2956 | + fs_info->btree_inode = new_inode(sb); |
---|
| 2957 | + if (!fs_info->btree_inode) { |
---|
| 2958 | + err = -ENOMEM; |
---|
| 2959 | + goto fail; |
---|
| 2960 | + } |
---|
| 2961 | + mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS); |
---|
| 2962 | + btrfs_init_btree_inode(fs_info); |
---|
2785 | 2963 | |
---|
2786 | 2964 | invalidate_bdev(fs_devices->latest_bdev); |
---|
2787 | 2965 | |
---|
2788 | 2966 | /* |
---|
2789 | 2967 | * Read super block and check the signature bytes only |
---|
2790 | 2968 | */ |
---|
2791 | | - bh = btrfs_read_dev_super(fs_devices->latest_bdev); |
---|
2792 | | - if (IS_ERR(bh)) { |
---|
2793 | | - err = PTR_ERR(bh); |
---|
| 2969 | + disk_super = btrfs_read_dev_super(fs_devices->latest_bdev); |
---|
| 2970 | + if (IS_ERR(disk_super)) { |
---|
| 2971 | + err = PTR_ERR(disk_super); |
---|
| 2972 | + goto fail_alloc; |
---|
| 2973 | + } |
---|
| 2974 | + |
---|
| 2975 | + /* |
---|
| 2976 | + * Verify the type first, if that or the checksum value are |
---|
| 2977 | + * corrupted, we'll find out |
---|
| 2978 | + */ |
---|
| 2979 | + csum_type = btrfs_super_csum_type(disk_super); |
---|
| 2980 | + if (!btrfs_supported_super_csum(csum_type)) { |
---|
| 2981 | + btrfs_err(fs_info, "unsupported checksum algorithm: %u", |
---|
| 2982 | + csum_type); |
---|
| 2983 | + err = -EINVAL; |
---|
| 2984 | + btrfs_release_disk_super(disk_super); |
---|
| 2985 | + goto fail_alloc; |
---|
| 2986 | + } |
---|
| 2987 | + |
---|
| 2988 | + ret = btrfs_init_csum_hash(fs_info, csum_type); |
---|
| 2989 | + if (ret) { |
---|
| 2990 | + err = ret; |
---|
| 2991 | + btrfs_release_disk_super(disk_super); |
---|
2794 | 2992 | goto fail_alloc; |
---|
2795 | 2993 | } |
---|
2796 | 2994 | |
---|
.. | .. |
---|
2798 | 2996 | * We want to check superblock checksum, the type is stored inside. |
---|
2799 | 2997 | * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k). |
---|
2800 | 2998 | */ |
---|
2801 | | - if (btrfs_check_super_csum(fs_info, bh->b_data)) { |
---|
| 2999 | + if (btrfs_check_super_csum(fs_info, (u8 *)disk_super)) { |
---|
2802 | 3000 | btrfs_err(fs_info, "superblock checksum mismatch"); |
---|
2803 | 3001 | err = -EINVAL; |
---|
2804 | | - brelse(bh); |
---|
| 3002 | + btrfs_release_disk_super(disk_super); |
---|
2805 | 3003 | goto fail_alloc; |
---|
2806 | 3004 | } |
---|
2807 | 3005 | |
---|
.. | .. |
---|
2810 | 3008 | * following bytes up to INFO_SIZE, the checksum is calculated from |
---|
2811 | 3009 | * the whole block of INFO_SIZE |
---|
2812 | 3010 | */ |
---|
2813 | | - memcpy(fs_info->super_copy, bh->b_data, sizeof(*fs_info->super_copy)); |
---|
| 3011 | + memcpy(fs_info->super_copy, disk_super, sizeof(*fs_info->super_copy)); |
---|
| 3012 | + btrfs_release_disk_super(disk_super); |
---|
| 3013 | + |
---|
| 3014 | + disk_super = fs_info->super_copy; |
---|
| 3015 | + |
---|
| 3016 | + |
---|
| 3017 | + features = btrfs_super_flags(disk_super); |
---|
| 3018 | + if (features & BTRFS_SUPER_FLAG_CHANGING_FSID_V2) { |
---|
| 3019 | + features &= ~BTRFS_SUPER_FLAG_CHANGING_FSID_V2; |
---|
| 3020 | + btrfs_set_super_flags(disk_super, features); |
---|
| 3021 | + btrfs_info(fs_info, |
---|
| 3022 | + "found metadata UUID change in progress flag, clearing"); |
---|
| 3023 | + } |
---|
| 3024 | + |
---|
2814 | 3025 | memcpy(fs_info->super_for_commit, fs_info->super_copy, |
---|
2815 | 3026 | sizeof(*fs_info->super_for_commit)); |
---|
2816 | | - brelse(bh); |
---|
2817 | | - |
---|
2818 | | - memcpy(fs_info->fsid, fs_info->super_copy->fsid, BTRFS_FSID_SIZE); |
---|
2819 | 3027 | |
---|
2820 | 3028 | ret = btrfs_validate_mount_super(fs_info); |
---|
2821 | 3029 | if (ret) { |
---|
.. | .. |
---|
2824 | 3032 | goto fail_alloc; |
---|
2825 | 3033 | } |
---|
2826 | 3034 | |
---|
2827 | | - disk_super = fs_info->super_copy; |
---|
2828 | 3035 | if (!btrfs_super_root(disk_super)) |
---|
2829 | 3036 | goto fail_alloc; |
---|
2830 | 3037 | |
---|
.. | .. |
---|
2833 | 3040 | set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state); |
---|
2834 | 3041 | |
---|
2835 | 3042 | /* |
---|
2836 | | - * run through our array of backup supers and setup |
---|
2837 | | - * our ring pointer to the oldest one |
---|
2838 | | - */ |
---|
2839 | | - generation = btrfs_super_generation(disk_super); |
---|
2840 | | - find_oldest_super_backup(fs_info, generation); |
---|
2841 | | - |
---|
2842 | | - /* |
---|
2843 | 3043 | * In the long term, we'll store the compression type in the super |
---|
2844 | 3044 | * block, and it'll be used for per file compression control. |
---|
2845 | 3045 | */ |
---|
2846 | 3046 | fs_info->compress_type = BTRFS_COMPRESS_ZLIB; |
---|
| 3047 | + |
---|
| 3048 | + /* |
---|
| 3049 | + * Flag our filesystem as having big metadata blocks if they are bigger |
---|
| 3050 | + * than the page size |
---|
| 3051 | + */ |
---|
| 3052 | + if (btrfs_super_nodesize(disk_super) > PAGE_SIZE) { |
---|
| 3053 | + if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA)) |
---|
| 3054 | + btrfs_info(fs_info, |
---|
| 3055 | + "flagging fs with big metadata feature"); |
---|
| 3056 | + features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA; |
---|
| 3057 | + } |
---|
| 3058 | + |
---|
| 3059 | + /* Set up fs_info before parsing mount options */ |
---|
| 3060 | + nodesize = btrfs_super_nodesize(disk_super); |
---|
| 3061 | + sectorsize = btrfs_super_sectorsize(disk_super); |
---|
| 3062 | + stripesize = sectorsize; |
---|
| 3063 | + fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids)); |
---|
| 3064 | + fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids)); |
---|
| 3065 | + |
---|
| 3066 | + /* Cache block sizes */ |
---|
| 3067 | + fs_info->nodesize = nodesize; |
---|
| 3068 | + fs_info->sectorsize = sectorsize; |
---|
| 3069 | + fs_info->stripesize = stripesize; |
---|
2847 | 3070 | |
---|
2848 | 3071 | ret = btrfs_parse_options(fs_info, options, sb->s_flags); |
---|
2849 | 3072 | if (ret) { |
---|
.. | .. |
---|
2855 | 3078 | ~BTRFS_FEATURE_INCOMPAT_SUPP; |
---|
2856 | 3079 | if (features) { |
---|
2857 | 3080 | btrfs_err(fs_info, |
---|
2858 | | - "cannot mount because of unsupported optional features (%llx)", |
---|
| 3081 | + "cannot mount because of unsupported optional features (0x%llx)", |
---|
2859 | 3082 | features); |
---|
2860 | 3083 | err = -EINVAL; |
---|
2861 | 3084 | goto fail_alloc; |
---|
.. | .. |
---|
2870 | 3093 | |
---|
2871 | 3094 | if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA) |
---|
2872 | 3095 | btrfs_info(fs_info, "has skinny extents"); |
---|
2873 | | - |
---|
2874 | | - /* |
---|
2875 | | - * flag our filesystem as having big metadata blocks if |
---|
2876 | | - * they are bigger than the page size |
---|
2877 | | - */ |
---|
2878 | | - if (btrfs_super_nodesize(disk_super) > PAGE_SIZE) { |
---|
2879 | | - if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA)) |
---|
2880 | | - btrfs_info(fs_info, |
---|
2881 | | - "flagging fs with big metadata feature"); |
---|
2882 | | - features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA; |
---|
2883 | | - } |
---|
2884 | | - |
---|
2885 | | - nodesize = btrfs_super_nodesize(disk_super); |
---|
2886 | | - sectorsize = btrfs_super_sectorsize(disk_super); |
---|
2887 | | - stripesize = sectorsize; |
---|
2888 | | - fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids)); |
---|
2889 | | - fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids)); |
---|
2890 | | - |
---|
2891 | | - /* Cache block sizes */ |
---|
2892 | | - fs_info->nodesize = nodesize; |
---|
2893 | | - fs_info->sectorsize = sectorsize; |
---|
2894 | | - fs_info->stripesize = stripesize; |
---|
2895 | 3096 | |
---|
2896 | 3097 | /* |
---|
2897 | 3098 | * mixed block groups end up with duplicate but slightly offset |
---|
.. | .. |
---|
2915 | 3116 | ~BTRFS_FEATURE_COMPAT_RO_SUPP; |
---|
2916 | 3117 | if (!sb_rdonly(sb) && features) { |
---|
2917 | 3118 | btrfs_err(fs_info, |
---|
2918 | | - "cannot mount read-write because of unsupported optional features (%llx)", |
---|
| 3119 | + "cannot mount read-write because of unsupported optional features (0x%llx)", |
---|
2919 | 3120 | features); |
---|
2920 | 3121 | err = -EINVAL; |
---|
2921 | 3122 | goto fail_alloc; |
---|
2922 | 3123 | } |
---|
| 3124 | + /* |
---|
| 3125 | + * We have unsupported RO compat features, although RO mounted, we |
---|
| 3126 | + * should not cause any metadata write, including log replay. |
---|
| 3127 | + * Or we could screw up whatever the new feature requires. |
---|
| 3128 | + */ |
---|
| 3129 | + if (unlikely(features && btrfs_super_log_root(disk_super) && |
---|
| 3130 | + !btrfs_test_opt(fs_info, NOLOGREPLAY))) { |
---|
| 3131 | + btrfs_err(fs_info, |
---|
| 3132 | +"cannot replay dirty log with unsupported compat_ro features (0x%llx), try rescue=nologreplay", |
---|
| 3133 | + features); |
---|
| 3134 | + err = -EINVAL; |
---|
| 3135 | + goto fail_alloc; |
---|
| 3136 | + } |
---|
| 3137 | + |
---|
2923 | 3138 | |
---|
2924 | 3139 | ret = btrfs_init_workqueues(fs_info, fs_devices); |
---|
2925 | 3140 | if (ret) { |
---|
.. | .. |
---|
2927 | 3142 | goto fail_sb_buffer; |
---|
2928 | 3143 | } |
---|
2929 | 3144 | |
---|
2930 | | - sb->s_bdi->congested_fn = btrfs_congested_fn; |
---|
2931 | | - sb->s_bdi->congested_data = fs_info; |
---|
2932 | | - sb->s_bdi->capabilities |= BDI_CAP_CGROUP_WRITEBACK; |
---|
2933 | | - sb->s_bdi->ra_pages = VM_MAX_READAHEAD * SZ_1K / PAGE_SIZE; |
---|
2934 | 3145 | sb->s_bdi->ra_pages *= btrfs_super_num_devices(disk_super); |
---|
2935 | 3146 | sb->s_bdi->ra_pages = max(sb->s_bdi->ra_pages, SZ_4M / PAGE_SIZE); |
---|
2936 | 3147 | |
---|
2937 | 3148 | sb->s_blocksize = sectorsize; |
---|
2938 | 3149 | sb->s_blocksize_bits = blksize_bits(sectorsize); |
---|
2939 | | - memcpy(&sb->s_uuid, fs_info->fsid, BTRFS_FSID_SIZE); |
---|
| 3150 | + memcpy(&sb->s_uuid, fs_info->fs_devices->fsid, BTRFS_FSID_SIZE); |
---|
2940 | 3151 | |
---|
2941 | 3152 | mutex_lock(&fs_info->chunk_mutex); |
---|
2942 | 3153 | ret = btrfs_read_sys_array(fs_info); |
---|
.. | .. |
---|
2948 | 3159 | |
---|
2949 | 3160 | generation = btrfs_super_chunk_root_generation(disk_super); |
---|
2950 | 3161 | level = btrfs_super_chunk_root_level(disk_super); |
---|
2951 | | - |
---|
2952 | | - __setup_root(chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID); |
---|
2953 | 3162 | |
---|
2954 | 3163 | chunk_root->node = read_tree_block(fs_info, |
---|
2955 | 3164 | btrfs_super_chunk_root(disk_super), |
---|
.. | .. |
---|
2966 | 3175 | chunk_root->commit_root = btrfs_root_node(chunk_root); |
---|
2967 | 3176 | |
---|
2968 | 3177 | read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid, |
---|
2969 | | - btrfs_header_chunk_tree_uuid(chunk_root->node), BTRFS_UUID_SIZE); |
---|
| 3178 | + offsetof(struct btrfs_header, chunk_tree_uuid), |
---|
| 3179 | + BTRFS_UUID_SIZE); |
---|
2970 | 3180 | |
---|
2971 | 3181 | ret = btrfs_read_chunk_tree(fs_info); |
---|
2972 | 3182 | if (ret) { |
---|
.. | .. |
---|
2985 | 3195 | goto fail_tree_roots; |
---|
2986 | 3196 | } |
---|
2987 | 3197 | |
---|
2988 | | -retry_root_backup: |
---|
2989 | | - generation = btrfs_super_generation(disk_super); |
---|
2990 | | - level = btrfs_super_root_level(disk_super); |
---|
2991 | | - |
---|
2992 | | - tree_root->node = read_tree_block(fs_info, |
---|
2993 | | - btrfs_super_root(disk_super), |
---|
2994 | | - generation, level, NULL); |
---|
2995 | | - if (IS_ERR(tree_root->node) || |
---|
2996 | | - !extent_buffer_uptodate(tree_root->node)) { |
---|
2997 | | - btrfs_warn(fs_info, "failed to read tree root"); |
---|
2998 | | - if (!IS_ERR(tree_root->node)) |
---|
2999 | | - free_extent_buffer(tree_root->node); |
---|
3000 | | - tree_root->node = NULL; |
---|
3001 | | - goto recovery_tree_root; |
---|
3002 | | - } |
---|
3003 | | - |
---|
3004 | | - btrfs_set_root_node(&tree_root->root_item, tree_root->node); |
---|
3005 | | - tree_root->commit_root = btrfs_root_node(tree_root); |
---|
3006 | | - btrfs_set_root_refs(&tree_root->root_item, 1); |
---|
3007 | | - |
---|
3008 | | - mutex_lock(&tree_root->objectid_mutex); |
---|
3009 | | - ret = btrfs_find_highest_objectid(tree_root, |
---|
3010 | | - &tree_root->highest_objectid); |
---|
3011 | | - if (ret) { |
---|
3012 | | - mutex_unlock(&tree_root->objectid_mutex); |
---|
3013 | | - goto recovery_tree_root; |
---|
3014 | | - } |
---|
3015 | | - |
---|
3016 | | - ASSERT(tree_root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID); |
---|
3017 | | - |
---|
3018 | | - mutex_unlock(&tree_root->objectid_mutex); |
---|
3019 | | - |
---|
3020 | | - ret = btrfs_read_roots(fs_info); |
---|
| 3198 | + ret = init_tree_roots(fs_info); |
---|
3021 | 3199 | if (ret) |
---|
3022 | | - goto recovery_tree_root; |
---|
3023 | | - |
---|
3024 | | - fs_info->generation = generation; |
---|
3025 | | - fs_info->last_trans_committed = generation; |
---|
| 3200 | + goto fail_tree_roots; |
---|
3026 | 3201 | |
---|
3027 | 3202 | /* |
---|
3028 | 3203 | * If we have a uuid root and we're not being told to rescan we need to |
---|
.. | .. |
---|
3063 | 3238 | |
---|
3064 | 3239 | btrfs_free_extra_devids(fs_devices, 1); |
---|
3065 | 3240 | |
---|
3066 | | - ret = btrfs_sysfs_add_fsid(fs_devices, NULL); |
---|
| 3241 | + ret = btrfs_sysfs_add_fsid(fs_devices); |
---|
3067 | 3242 | if (ret) { |
---|
3068 | 3243 | btrfs_err(fs_info, "failed to init sysfs fsid interface: %d", |
---|
3069 | 3244 | ret); |
---|
3070 | 3245 | goto fail_block_groups; |
---|
3071 | | - } |
---|
3072 | | - |
---|
3073 | | - ret = btrfs_sysfs_add_device(fs_devices); |
---|
3074 | | - if (ret) { |
---|
3075 | | - btrfs_err(fs_info, "failed to init sysfs device interface: %d", |
---|
3076 | | - ret); |
---|
3077 | | - goto fail_fsdev_sysfs; |
---|
3078 | 3246 | } |
---|
3079 | 3247 | |
---|
3080 | 3248 | ret = btrfs_sysfs_add_mounted(fs_info); |
---|
.. | .. |
---|
3098 | 3266 | if (!sb_rdonly(sb) && fs_info->fs_devices->missing_devices && |
---|
3099 | 3267 | !btrfs_check_rw_degradable(fs_info, NULL)) { |
---|
3100 | 3268 | btrfs_warn(fs_info, |
---|
3101 | | - "writeable mount is not allowed due to too many missing devices"); |
---|
| 3269 | + "writable mount is not allowed due to too many missing devices"); |
---|
3102 | 3270 | goto fail_sysfs; |
---|
3103 | 3271 | } |
---|
3104 | 3272 | |
---|
.. | .. |
---|
3175 | 3343 | } |
---|
3176 | 3344 | } |
---|
3177 | 3345 | |
---|
3178 | | - location.objectid = BTRFS_FS_TREE_OBJECTID; |
---|
3179 | | - location.type = BTRFS_ROOT_ITEM_KEY; |
---|
3180 | | - location.offset = 0; |
---|
3181 | | - |
---|
3182 | | - fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location); |
---|
| 3346 | + fs_info->fs_root = btrfs_get_fs_root(fs_info, BTRFS_FS_TREE_OBJECTID, true); |
---|
3183 | 3347 | if (IS_ERR(fs_info->fs_root)) { |
---|
3184 | 3348 | err = PTR_ERR(fs_info->fs_root); |
---|
3185 | 3349 | btrfs_warn(fs_info, "failed to read fs tree: %d", err); |
---|
.. | .. |
---|
3246 | 3410 | } |
---|
3247 | 3411 | |
---|
3248 | 3412 | btrfs_qgroup_rescan_resume(fs_info); |
---|
| 3413 | + btrfs_discard_resume(fs_info); |
---|
3249 | 3414 | |
---|
3250 | 3415 | if (!fs_info->uuid_root) { |
---|
3251 | 3416 | btrfs_info(fs_info, "creating UUID tree"); |
---|
.. | .. |
---|
3303 | 3468 | btrfs_put_block_group_cache(fs_info); |
---|
3304 | 3469 | |
---|
3305 | 3470 | fail_tree_roots: |
---|
| 3471 | + if (fs_info->data_reloc_root) |
---|
| 3472 | + btrfs_drop_and_free_fs_root(fs_info, fs_info->data_reloc_root); |
---|
3306 | 3473 | free_root_pointers(fs_info, true); |
---|
3307 | 3474 | invalidate_inode_pages2(fs_info->btree_inode->i_mapping); |
---|
3308 | 3475 | |
---|
.. | .. |
---|
3310 | 3477 | btrfs_stop_all_workers(fs_info); |
---|
3311 | 3478 | btrfs_free_block_groups(fs_info); |
---|
3312 | 3479 | fail_alloc: |
---|
3313 | | -fail_iput: |
---|
3314 | 3480 | btrfs_mapping_tree_free(&fs_info->mapping_tree); |
---|
3315 | 3481 | |
---|
3316 | 3482 | iput(fs_info->btree_inode); |
---|
3317 | | -fail_bio_counter: |
---|
3318 | | - percpu_counter_destroy(&fs_info->bio_counter); |
---|
3319 | | -fail_delalloc_bytes: |
---|
3320 | | - percpu_counter_destroy(&fs_info->delalloc_bytes); |
---|
3321 | | -fail_dirty_metadata_bytes: |
---|
3322 | | - percpu_counter_destroy(&fs_info->dirty_metadata_bytes); |
---|
3323 | | -fail_srcu: |
---|
3324 | | - cleanup_srcu_struct(&fs_info->subvol_srcu); |
---|
3325 | 3483 | fail: |
---|
3326 | | - btrfs_free_stripe_hash_table(fs_info); |
---|
3327 | 3484 | btrfs_close_devices(fs_info->fs_devices); |
---|
3328 | 3485 | return err; |
---|
3329 | | - |
---|
3330 | | -recovery_tree_root: |
---|
3331 | | - if (!btrfs_test_opt(fs_info, USEBACKUPROOT)) |
---|
3332 | | - goto fail_tree_roots; |
---|
3333 | | - |
---|
3334 | | - free_root_pointers(fs_info, false); |
---|
3335 | | - |
---|
3336 | | - /* don't use the log in recovery mode, it won't be valid */ |
---|
3337 | | - btrfs_set_super_log_root(disk_super, 0); |
---|
3338 | | - |
---|
3339 | | - /* we can't trust the free space cache either */ |
---|
3340 | | - btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE); |
---|
3341 | | - |
---|
3342 | | - ret = next_root_backup(fs_info, fs_info->super_copy, |
---|
3343 | | - &num_backups_tried, &backup_index); |
---|
3344 | | - if (ret == -1) |
---|
3345 | | - goto fail_block_groups; |
---|
3346 | | - goto retry_root_backup; |
---|
3347 | 3486 | } |
---|
3348 | 3487 | ALLOW_ERROR_INJECTION(open_ctree, ERRNO); |
---|
3349 | 3488 | |
---|
3350 | | -static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate) |
---|
| 3489 | +static void btrfs_end_super_write(struct bio *bio) |
---|
3351 | 3490 | { |
---|
3352 | | - if (uptodate) { |
---|
3353 | | - set_buffer_uptodate(bh); |
---|
3354 | | - } else { |
---|
3355 | | - struct btrfs_device *device = (struct btrfs_device *) |
---|
3356 | | - bh->b_private; |
---|
| 3491 | + struct btrfs_device *device = bio->bi_private; |
---|
| 3492 | + struct bio_vec *bvec; |
---|
| 3493 | + struct bvec_iter_all iter_all; |
---|
| 3494 | + struct page *page; |
---|
3357 | 3495 | |
---|
3358 | | - btrfs_warn_rl_in_rcu(device->fs_info, |
---|
3359 | | - "lost page write due to IO error on %s", |
---|
3360 | | - rcu_str_deref(device->name)); |
---|
3361 | | - /* note, we don't set_buffer_write_io_error because we have |
---|
3362 | | - * our own ways of dealing with the IO errors |
---|
3363 | | - */ |
---|
3364 | | - clear_buffer_uptodate(bh); |
---|
3365 | | - btrfs_dev_stat_inc_and_print(device, BTRFS_DEV_STAT_WRITE_ERRS); |
---|
| 3496 | + bio_for_each_segment_all(bvec, bio, iter_all) { |
---|
| 3497 | + page = bvec->bv_page; |
---|
| 3498 | + |
---|
| 3499 | + if (bio->bi_status) { |
---|
| 3500 | + btrfs_warn_rl_in_rcu(device->fs_info, |
---|
| 3501 | + "lost page write due to IO error on %s (%d)", |
---|
| 3502 | + rcu_str_deref(device->name), |
---|
| 3503 | + blk_status_to_errno(bio->bi_status)); |
---|
| 3504 | + ClearPageUptodate(page); |
---|
| 3505 | + SetPageError(page); |
---|
| 3506 | + btrfs_dev_stat_inc_and_print(device, |
---|
| 3507 | + BTRFS_DEV_STAT_WRITE_ERRS); |
---|
| 3508 | + } else { |
---|
| 3509 | + SetPageUptodate(page); |
---|
| 3510 | + } |
---|
| 3511 | + |
---|
| 3512 | + put_page(page); |
---|
| 3513 | + unlock_page(page); |
---|
3366 | 3514 | } |
---|
3367 | | - unlock_buffer(bh); |
---|
3368 | | - put_bh(bh); |
---|
| 3515 | + |
---|
| 3516 | + bio_put(bio); |
---|
3369 | 3517 | } |
---|
3370 | 3518 | |
---|
3371 | | -int btrfs_read_dev_one_super(struct block_device *bdev, int copy_num, |
---|
3372 | | - struct buffer_head **bh_ret) |
---|
| 3519 | +struct btrfs_super_block *btrfs_read_dev_one_super(struct block_device *bdev, |
---|
| 3520 | + int copy_num) |
---|
3373 | 3521 | { |
---|
3374 | | - struct buffer_head *bh; |
---|
3375 | 3522 | struct btrfs_super_block *super; |
---|
| 3523 | + struct page *page; |
---|
3376 | 3524 | u64 bytenr; |
---|
| 3525 | + struct address_space *mapping = bdev->bd_inode->i_mapping; |
---|
3377 | 3526 | |
---|
3378 | 3527 | bytenr = btrfs_sb_offset(copy_num); |
---|
3379 | 3528 | if (bytenr + BTRFS_SUPER_INFO_SIZE >= i_size_read(bdev->bd_inode)) |
---|
3380 | | - return -EINVAL; |
---|
| 3529 | + return ERR_PTR(-EINVAL); |
---|
3381 | 3530 | |
---|
3382 | | - bh = __bread(bdev, bytenr / BTRFS_BDEV_BLOCKSIZE, BTRFS_SUPER_INFO_SIZE); |
---|
3383 | | - /* |
---|
3384 | | - * If we fail to read from the underlying devices, as of now |
---|
3385 | | - * the best option we have is to mark it EIO. |
---|
3386 | | - */ |
---|
3387 | | - if (!bh) |
---|
3388 | | - return -EIO; |
---|
| 3531 | + page = read_cache_page_gfp(mapping, bytenr >> PAGE_SHIFT, GFP_NOFS); |
---|
| 3532 | + if (IS_ERR(page)) |
---|
| 3533 | + return ERR_CAST(page); |
---|
3389 | 3534 | |
---|
3390 | | - super = (struct btrfs_super_block *)bh->b_data; |
---|
3391 | | - if (btrfs_super_bytenr(super) != bytenr || |
---|
3392 | | - btrfs_super_magic(super) != BTRFS_MAGIC) { |
---|
3393 | | - brelse(bh); |
---|
3394 | | - return -EINVAL; |
---|
| 3535 | + super = page_address(page); |
---|
| 3536 | + if (btrfs_super_magic(super) != BTRFS_MAGIC) { |
---|
| 3537 | + btrfs_release_disk_super(super); |
---|
| 3538 | + return ERR_PTR(-ENODATA); |
---|
3395 | 3539 | } |
---|
3396 | 3540 | |
---|
3397 | | - *bh_ret = bh; |
---|
3398 | | - return 0; |
---|
| 3541 | + if (btrfs_super_bytenr(super) != bytenr) { |
---|
| 3542 | + btrfs_release_disk_super(super); |
---|
| 3543 | + return ERR_PTR(-EINVAL); |
---|
| 3544 | + } |
---|
| 3545 | + |
---|
| 3546 | + return super; |
---|
3399 | 3547 | } |
---|
3400 | 3548 | |
---|
3401 | 3549 | |
---|
3402 | | -struct buffer_head *btrfs_read_dev_super(struct block_device *bdev) |
---|
| 3550 | +struct btrfs_super_block *btrfs_read_dev_super(struct block_device *bdev) |
---|
3403 | 3551 | { |
---|
3404 | | - struct buffer_head *bh; |
---|
3405 | | - struct buffer_head *latest = NULL; |
---|
3406 | | - struct btrfs_super_block *super; |
---|
| 3552 | + struct btrfs_super_block *super, *latest = NULL; |
---|
3407 | 3553 | int i; |
---|
3408 | 3554 | u64 transid = 0; |
---|
3409 | | - int ret = -EINVAL; |
---|
3410 | 3555 | |
---|
3411 | 3556 | /* we would like to check all the supers, but that would make |
---|
3412 | 3557 | * a btrfs mount succeed after a mkfs from a different FS. |
---|
.. | .. |
---|
3414 | 3559 | * later supers, using BTRFS_SUPER_MIRROR_MAX instead |
---|
3415 | 3560 | */ |
---|
3416 | 3561 | for (i = 0; i < 1; i++) { |
---|
3417 | | - ret = btrfs_read_dev_one_super(bdev, i, &bh); |
---|
3418 | | - if (ret) |
---|
| 3562 | + super = btrfs_read_dev_one_super(bdev, i); |
---|
| 3563 | + if (IS_ERR(super)) |
---|
3419 | 3564 | continue; |
---|
3420 | 3565 | |
---|
3421 | | - super = (struct btrfs_super_block *)bh->b_data; |
---|
3422 | | - |
---|
3423 | 3566 | if (!latest || btrfs_super_generation(super) > transid) { |
---|
3424 | | - brelse(latest); |
---|
3425 | | - latest = bh; |
---|
| 3567 | + if (latest) |
---|
| 3568 | + btrfs_release_disk_super(super); |
---|
| 3569 | + |
---|
| 3570 | + latest = super; |
---|
3426 | 3571 | transid = btrfs_super_generation(super); |
---|
3427 | | - } else { |
---|
3428 | | - brelse(bh); |
---|
3429 | 3572 | } |
---|
3430 | 3573 | } |
---|
3431 | 3574 | |
---|
3432 | | - if (!latest) |
---|
3433 | | - return ERR_PTR(ret); |
---|
3434 | | - |
---|
3435 | | - return latest; |
---|
| 3575 | + return super; |
---|
3436 | 3576 | } |
---|
3437 | 3577 | |
---|
3438 | 3578 | /* |
---|
3439 | 3579 | * Write superblock @sb to the @device. Do not wait for completion, all the |
---|
3440 | | - * buffer heads we write are pinned. |
---|
| 3580 | + * pages we use for writing are locked. |
---|
3441 | 3581 | * |
---|
3442 | 3582 | * Write @max_mirrors copies of the superblock, where 0 means default that fit |
---|
3443 | 3583 | * the expected device size at commit time. Note that max_mirrors must be |
---|
3444 | 3584 | * same for write and wait phases. |
---|
3445 | 3585 | * |
---|
3446 | | - * Return number of errors when buffer head is not found or submission fails. |
---|
| 3586 | + * Return number of errors when page is not found or submission fails. |
---|
3447 | 3587 | */ |
---|
3448 | 3588 | static int write_dev_supers(struct btrfs_device *device, |
---|
3449 | 3589 | struct btrfs_super_block *sb, int max_mirrors) |
---|
3450 | 3590 | { |
---|
3451 | | - struct buffer_head *bh; |
---|
| 3591 | + struct btrfs_fs_info *fs_info = device->fs_info; |
---|
| 3592 | + struct address_space *mapping = device->bdev->bd_inode->i_mapping; |
---|
| 3593 | + SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); |
---|
3452 | 3594 | int i; |
---|
3453 | | - int ret; |
---|
3454 | 3595 | int errors = 0; |
---|
3455 | | - u32 crc; |
---|
3456 | 3596 | u64 bytenr; |
---|
3457 | | - int op_flags; |
---|
3458 | 3597 | |
---|
3459 | 3598 | if (max_mirrors == 0) |
---|
3460 | 3599 | max_mirrors = BTRFS_SUPER_MIRROR_MAX; |
---|
3461 | 3600 | |
---|
| 3601 | + shash->tfm = fs_info->csum_shash; |
---|
| 3602 | + |
---|
3462 | 3603 | for (i = 0; i < max_mirrors; i++) { |
---|
| 3604 | + struct page *page; |
---|
| 3605 | + struct bio *bio; |
---|
| 3606 | + struct btrfs_super_block *disk_super; |
---|
| 3607 | + |
---|
3463 | 3608 | bytenr = btrfs_sb_offset(i); |
---|
3464 | 3609 | if (bytenr + BTRFS_SUPER_INFO_SIZE >= |
---|
3465 | 3610 | device->commit_total_bytes) |
---|
.. | .. |
---|
3467 | 3612 | |
---|
3468 | 3613 | btrfs_set_super_bytenr(sb, bytenr); |
---|
3469 | 3614 | |
---|
3470 | | - crc = ~(u32)0; |
---|
3471 | | - crc = btrfs_csum_data((const char *)sb + BTRFS_CSUM_SIZE, crc, |
---|
3472 | | - BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE); |
---|
3473 | | - btrfs_csum_final(crc, sb->csum); |
---|
| 3615 | + crypto_shash_digest(shash, (const char *)sb + BTRFS_CSUM_SIZE, |
---|
| 3616 | + BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE, |
---|
| 3617 | + sb->csum); |
---|
3474 | 3618 | |
---|
3475 | | - /* One reference for us, and we leave it for the caller */ |
---|
3476 | | - bh = __getblk(device->bdev, bytenr / BTRFS_BDEV_BLOCKSIZE, |
---|
3477 | | - BTRFS_SUPER_INFO_SIZE); |
---|
3478 | | - if (!bh) { |
---|
| 3619 | + page = find_or_create_page(mapping, bytenr >> PAGE_SHIFT, |
---|
| 3620 | + GFP_NOFS); |
---|
| 3621 | + if (!page) { |
---|
3479 | 3622 | btrfs_err(device->fs_info, |
---|
3480 | | - "couldn't get super buffer head for bytenr %llu", |
---|
| 3623 | + "couldn't get super block page for bytenr %llu", |
---|
3481 | 3624 | bytenr); |
---|
3482 | 3625 | errors++; |
---|
3483 | 3626 | continue; |
---|
3484 | 3627 | } |
---|
3485 | 3628 | |
---|
3486 | | - memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE); |
---|
| 3629 | + /* Bump the refcount for wait_dev_supers() */ |
---|
| 3630 | + get_page(page); |
---|
3487 | 3631 | |
---|
3488 | | - /* one reference for submit_bh */ |
---|
3489 | | - get_bh(bh); |
---|
3490 | | - |
---|
3491 | | - set_buffer_uptodate(bh); |
---|
3492 | | - lock_buffer(bh); |
---|
3493 | | - bh->b_end_io = btrfs_end_buffer_write_sync; |
---|
3494 | | - bh->b_private = device; |
---|
| 3632 | + disk_super = page_address(page); |
---|
| 3633 | + memcpy(disk_super, sb, BTRFS_SUPER_INFO_SIZE); |
---|
3495 | 3634 | |
---|
3496 | 3635 | /* |
---|
3497 | | - * we fua the first super. The others we allow |
---|
3498 | | - * to go down lazy. |
---|
| 3636 | + * Directly use bios here instead of relying on the page cache |
---|
| 3637 | + * to do I/O, so we don't lose the ability to do integrity |
---|
| 3638 | + * checking. |
---|
3499 | 3639 | */ |
---|
3500 | | - op_flags = REQ_SYNC | REQ_META | REQ_PRIO; |
---|
| 3640 | + bio = bio_alloc(GFP_NOFS, 1); |
---|
| 3641 | + bio_set_dev(bio, device->bdev); |
---|
| 3642 | + bio->bi_iter.bi_sector = bytenr >> SECTOR_SHIFT; |
---|
| 3643 | + bio->bi_private = device; |
---|
| 3644 | + bio->bi_end_io = btrfs_end_super_write; |
---|
| 3645 | + __bio_add_page(bio, page, BTRFS_SUPER_INFO_SIZE, |
---|
| 3646 | + offset_in_page(bytenr)); |
---|
| 3647 | + |
---|
| 3648 | + /* |
---|
| 3649 | + * We FUA only the first super block. The others we allow to |
---|
| 3650 | + * go down lazy and there's a short window where the on-disk |
---|
| 3651 | + * copies might still contain the older version. |
---|
| 3652 | + */ |
---|
| 3653 | + bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_META | REQ_PRIO; |
---|
3501 | 3654 | if (i == 0 && !btrfs_test_opt(device->fs_info, NOBARRIER)) |
---|
3502 | | - op_flags |= REQ_FUA; |
---|
3503 | | - ret = btrfsic_submit_bh(REQ_OP_WRITE, op_flags, bh); |
---|
3504 | | - if (ret) |
---|
3505 | | - errors++; |
---|
| 3655 | + bio->bi_opf |= REQ_FUA; |
---|
| 3656 | + |
---|
| 3657 | + btrfsic_submit_bio(bio); |
---|
3506 | 3658 | } |
---|
3507 | 3659 | return errors < i ? 0 : -1; |
---|
3508 | 3660 | } |
---|
.. | .. |
---|
3511 | 3663 | * Wait for write completion of superblocks done by write_dev_supers, |
---|
3512 | 3664 | * @max_mirrors same for write and wait phases. |
---|
3513 | 3665 | * |
---|
3514 | | - * Return number of errors when buffer head is not found or not marked up to |
---|
| 3666 | + * Return number of errors when page is not found or not marked up to |
---|
3515 | 3667 | * date. |
---|
3516 | 3668 | */ |
---|
3517 | 3669 | static int wait_dev_supers(struct btrfs_device *device, int max_mirrors) |
---|
3518 | 3670 | { |
---|
3519 | | - struct buffer_head *bh; |
---|
3520 | 3671 | int i; |
---|
3521 | 3672 | int errors = 0; |
---|
3522 | 3673 | bool primary_failed = false; |
---|
.. | .. |
---|
3526 | 3677 | max_mirrors = BTRFS_SUPER_MIRROR_MAX; |
---|
3527 | 3678 | |
---|
3528 | 3679 | for (i = 0; i < max_mirrors; i++) { |
---|
| 3680 | + struct page *page; |
---|
| 3681 | + |
---|
3529 | 3682 | bytenr = btrfs_sb_offset(i); |
---|
3530 | 3683 | if (bytenr + BTRFS_SUPER_INFO_SIZE >= |
---|
3531 | 3684 | device->commit_total_bytes) |
---|
3532 | 3685 | break; |
---|
3533 | 3686 | |
---|
3534 | | - bh = __find_get_block(device->bdev, |
---|
3535 | | - bytenr / BTRFS_BDEV_BLOCKSIZE, |
---|
3536 | | - BTRFS_SUPER_INFO_SIZE); |
---|
3537 | | - if (!bh) { |
---|
| 3687 | + page = find_get_page(device->bdev->bd_inode->i_mapping, |
---|
| 3688 | + bytenr >> PAGE_SHIFT); |
---|
| 3689 | + if (!page) { |
---|
3538 | 3690 | errors++; |
---|
3539 | 3691 | if (i == 0) |
---|
3540 | 3692 | primary_failed = true; |
---|
3541 | 3693 | continue; |
---|
3542 | 3694 | } |
---|
3543 | | - wait_on_buffer(bh); |
---|
3544 | | - if (!buffer_uptodate(bh)) { |
---|
| 3695 | + /* Page is submitted locked and unlocked once the IO completes */ |
---|
| 3696 | + wait_on_page_locked(page); |
---|
| 3697 | + if (PageError(page)) { |
---|
3545 | 3698 | errors++; |
---|
3546 | 3699 | if (i == 0) |
---|
3547 | 3700 | primary_failed = true; |
---|
3548 | 3701 | } |
---|
3549 | 3702 | |
---|
3550 | | - /* drop our reference */ |
---|
3551 | | - brelse(bh); |
---|
| 3703 | + /* Drop our reference */ |
---|
| 3704 | + put_page(page); |
---|
3552 | 3705 | |
---|
3553 | | - /* drop the reference from the writing run */ |
---|
3554 | | - brelse(bh); |
---|
| 3706 | + /* Drop the reference from the writing run */ |
---|
| 3707 | + put_page(page); |
---|
3555 | 3708 | } |
---|
3556 | 3709 | |
---|
3557 | 3710 | /* log error, force error return */ |
---|
.. | .. |
---|
3697 | 3850 | |
---|
3698 | 3851 | if ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 || |
---|
3699 | 3852 | (flags & BTRFS_AVAIL_ALLOC_BIT_SINGLE)) |
---|
3700 | | - min_tolerated = min(min_tolerated, |
---|
| 3853 | + min_tolerated = min_t(int, min_tolerated, |
---|
3701 | 3854 | btrfs_raid_array[BTRFS_RAID_SINGLE]. |
---|
3702 | 3855 | tolerated_failures); |
---|
3703 | 3856 | |
---|
.. | .. |
---|
3706 | 3859 | continue; |
---|
3707 | 3860 | if (!(flags & btrfs_raid_array[raid_type].bg_flag)) |
---|
3708 | 3861 | continue; |
---|
3709 | | - min_tolerated = min(min_tolerated, |
---|
| 3862 | + min_tolerated = min_t(int, min_tolerated, |
---|
3710 | 3863 | btrfs_raid_array[raid_type]. |
---|
3711 | 3864 | tolerated_failures); |
---|
3712 | 3865 | } |
---|
.. | .. |
---|
3779 | 3932 | btrfs_set_stack_device_io_width(dev_item, dev->io_width); |
---|
3780 | 3933 | btrfs_set_stack_device_sector_size(dev_item, dev->sector_size); |
---|
3781 | 3934 | memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE); |
---|
3782 | | - memcpy(dev_item->fsid, dev->fs_devices->fsid, BTRFS_FSID_SIZE); |
---|
| 3935 | + memcpy(dev_item->fsid, dev->fs_devices->metadata_uuid, |
---|
| 3936 | + BTRFS_FSID_SIZE); |
---|
3783 | 3937 | |
---|
3784 | 3938 | flags = btrfs_super_flags(sb); |
---|
3785 | 3939 | btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN); |
---|
.. | .. |
---|
3834 | 3988 | void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info, |
---|
3835 | 3989 | struct btrfs_root *root) |
---|
3836 | 3990 | { |
---|
| 3991 | + bool drop_ref = false; |
---|
| 3992 | + |
---|
3837 | 3993 | spin_lock(&fs_info->fs_roots_radix_lock); |
---|
3838 | 3994 | radix_tree_delete(&fs_info->fs_roots_radix, |
---|
3839 | 3995 | (unsigned long)root->root_key.objectid); |
---|
| 3996 | + if (test_and_clear_bit(BTRFS_ROOT_IN_RADIX, &root->state)) |
---|
| 3997 | + drop_ref = true; |
---|
3840 | 3998 | spin_unlock(&fs_info->fs_roots_radix_lock); |
---|
3841 | 3999 | |
---|
3842 | | - if (btrfs_root_refs(&root->root_item) == 0) |
---|
3843 | | - synchronize_srcu(&fs_info->subvol_srcu); |
---|
3844 | | - |
---|
3845 | 4000 | if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) { |
---|
3846 | | - btrfs_free_log(NULL, root); |
---|
| 4001 | + ASSERT(root->log_root == NULL); |
---|
3847 | 4002 | if (root->reloc_root) { |
---|
3848 | | - free_extent_buffer(root->reloc_root->node); |
---|
3849 | | - free_extent_buffer(root->reloc_root->commit_root); |
---|
3850 | | - btrfs_put_fs_root(root->reloc_root); |
---|
| 4003 | + btrfs_put_root(root->reloc_root); |
---|
3851 | 4004 | root->reloc_root = NULL; |
---|
3852 | 4005 | } |
---|
3853 | 4006 | } |
---|
.. | .. |
---|
3856 | 4009 | __btrfs_remove_free_space_cache(root->free_ino_pinned); |
---|
3857 | 4010 | if (root->free_ino_ctl) |
---|
3858 | 4011 | __btrfs_remove_free_space_cache(root->free_ino_ctl); |
---|
3859 | | - btrfs_free_fs_root(root); |
---|
3860 | | -} |
---|
3861 | | - |
---|
3862 | | -void btrfs_free_fs_root(struct btrfs_root *root) |
---|
3863 | | -{ |
---|
3864 | | - iput(root->ino_cache_inode); |
---|
3865 | | - WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree)); |
---|
3866 | | - if (root->anon_dev) |
---|
3867 | | - free_anon_bdev(root->anon_dev); |
---|
3868 | | - if (root->subv_writers) |
---|
3869 | | - btrfs_free_subvolume_writers(root->subv_writers); |
---|
3870 | | - free_extent_buffer(root->node); |
---|
3871 | | - free_extent_buffer(root->commit_root); |
---|
3872 | | - kfree(root->free_ino_ctl); |
---|
3873 | | - kfree(root->free_ino_pinned); |
---|
3874 | | - btrfs_put_fs_root(root); |
---|
| 4012 | + if (root->ino_cache_inode) { |
---|
| 4013 | + iput(root->ino_cache_inode); |
---|
| 4014 | + root->ino_cache_inode = NULL; |
---|
| 4015 | + } |
---|
| 4016 | + if (drop_ref) |
---|
| 4017 | + btrfs_put_root(root); |
---|
3875 | 4018 | } |
---|
3876 | 4019 | |
---|
3877 | 4020 | int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info) |
---|
.. | .. |
---|
3881 | 4024 | int i = 0; |
---|
3882 | 4025 | int err = 0; |
---|
3883 | 4026 | unsigned int ret = 0; |
---|
3884 | | - int index; |
---|
3885 | 4027 | |
---|
3886 | 4028 | while (1) { |
---|
3887 | | - index = srcu_read_lock(&fs_info->subvol_srcu); |
---|
| 4029 | + spin_lock(&fs_info->fs_roots_radix_lock); |
---|
3888 | 4030 | ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix, |
---|
3889 | 4031 | (void **)gang, root_objectid, |
---|
3890 | 4032 | ARRAY_SIZE(gang)); |
---|
3891 | 4033 | if (!ret) { |
---|
3892 | | - srcu_read_unlock(&fs_info->subvol_srcu, index); |
---|
| 4034 | + spin_unlock(&fs_info->fs_roots_radix_lock); |
---|
3893 | 4035 | break; |
---|
3894 | 4036 | } |
---|
3895 | 4037 | root_objectid = gang[ret - 1]->root_key.objectid + 1; |
---|
.. | .. |
---|
3901 | 4043 | continue; |
---|
3902 | 4044 | } |
---|
3903 | 4045 | /* grab all the search result for later use */ |
---|
3904 | | - gang[i] = btrfs_grab_fs_root(gang[i]); |
---|
| 4046 | + gang[i] = btrfs_grab_root(gang[i]); |
---|
3905 | 4047 | } |
---|
3906 | | - srcu_read_unlock(&fs_info->subvol_srcu, index); |
---|
| 4048 | + spin_unlock(&fs_info->fs_roots_radix_lock); |
---|
3907 | 4049 | |
---|
3908 | 4050 | for (i = 0; i < ret; i++) { |
---|
3909 | 4051 | if (!gang[i]) |
---|
.. | .. |
---|
3912 | 4054 | err = btrfs_orphan_cleanup(gang[i]); |
---|
3913 | 4055 | if (err) |
---|
3914 | 4056 | break; |
---|
3915 | | - btrfs_put_fs_root(gang[i]); |
---|
| 4057 | + btrfs_put_root(gang[i]); |
---|
3916 | 4058 | } |
---|
3917 | 4059 | root_objectid++; |
---|
3918 | 4060 | } |
---|
.. | .. |
---|
3920 | 4062 | /* release the uncleaned roots due to error */ |
---|
3921 | 4063 | for (; i < ret; i++) { |
---|
3922 | 4064 | if (gang[i]) |
---|
3923 | | - btrfs_put_fs_root(gang[i]); |
---|
| 4065 | + btrfs_put_root(gang[i]); |
---|
3924 | 4066 | } |
---|
3925 | 4067 | return err; |
---|
3926 | 4068 | } |
---|
.. | .. |
---|
3945 | 4087 | return btrfs_commit_transaction(trans); |
---|
3946 | 4088 | } |
---|
3947 | 4089 | |
---|
3948 | | -void close_ctree(struct btrfs_fs_info *fs_info) |
---|
| 4090 | +void __cold close_ctree(struct btrfs_fs_info *fs_info) |
---|
3949 | 4091 | { |
---|
3950 | 4092 | int ret; |
---|
3951 | 4093 | |
---|
.. | .. |
---|
3980 | 4122 | /* clear out the rbtree of defraggable inodes */ |
---|
3981 | 4123 | btrfs_cleanup_defrag_inodes(fs_info); |
---|
3982 | 4124 | |
---|
| 4125 | + /* |
---|
| 4126 | + * After we parked the cleaner kthread, ordered extents may have |
---|
| 4127 | + * completed and created new delayed iputs. If one of the async reclaim |
---|
| 4128 | + * tasks is running and in the RUN_DELAYED_IPUTS flush state, then we |
---|
| 4129 | + * can hang forever trying to stop it, because if a delayed iput is |
---|
| 4130 | + * added after it ran btrfs_run_delayed_iputs() and before it called |
---|
| 4131 | + * btrfs_wait_on_delayed_iputs(), it will hang forever since there is |
---|
| 4132 | + * no one else to run iputs. |
---|
| 4133 | + * |
---|
| 4134 | + * So wait for all ongoing ordered extents to complete and then run |
---|
| 4135 | + * delayed iputs. This works because once we reach this point no one |
---|
| 4136 | + * can either create new ordered extents nor create delayed iputs |
---|
| 4137 | + * through some other means. |
---|
| 4138 | + * |
---|
| 4139 | + * Also note that btrfs_wait_ordered_roots() is not safe here, because |
---|
| 4140 | + * it waits for BTRFS_ORDERED_COMPLETE to be set on an ordered extent, |
---|
| 4141 | + * but the delayed iput for the respective inode is made only when doing |
---|
| 4142 | + * the final btrfs_put_ordered_extent() (which must happen at |
---|
| 4143 | + * btrfs_finish_ordered_io() when we are unmounting). |
---|
| 4144 | + */ |
---|
| 4145 | + btrfs_flush_workqueue(fs_info->endio_write_workers); |
---|
| 4146 | + /* Ordered extents for free space inodes. */ |
---|
| 4147 | + btrfs_flush_workqueue(fs_info->endio_freespace_worker); |
---|
| 4148 | + btrfs_run_delayed_iputs(fs_info); |
---|
| 4149 | + |
---|
3983 | 4150 | cancel_work_sync(&fs_info->async_reclaim_work); |
---|
| 4151 | + cancel_work_sync(&fs_info->async_data_reclaim_work); |
---|
| 4152 | + |
---|
| 4153 | + /* Cancel or finish ongoing discard work */ |
---|
| 4154 | + btrfs_discard_cleanup(fs_info); |
---|
3984 | 4155 | |
---|
3985 | 4156 | if (!sb_rdonly(fs_info->sb)) { |
---|
3986 | 4157 | /* |
---|
.. | .. |
---|
4014 | 4185 | kthread_stop(fs_info->transaction_kthread); |
---|
4015 | 4186 | kthread_stop(fs_info->cleaner_kthread); |
---|
4016 | 4187 | |
---|
| 4188 | + ASSERT(list_empty(&fs_info->delayed_iputs)); |
---|
4017 | 4189 | set_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags); |
---|
| 4190 | + |
---|
| 4191 | + if (btrfs_check_quota_leak(fs_info)) { |
---|
| 4192 | + WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG)); |
---|
| 4193 | + btrfs_err(fs_info, "qgroup reserved space leaked"); |
---|
| 4194 | + } |
---|
4018 | 4195 | |
---|
4019 | 4196 | btrfs_free_qgroup_config(fs_info); |
---|
4020 | 4197 | ASSERT(list_empty(&fs_info->delalloc_roots)); |
---|
.. | .. |
---|
4024 | 4201 | percpu_counter_sum(&fs_info->delalloc_bytes)); |
---|
4025 | 4202 | } |
---|
4026 | 4203 | |
---|
| 4204 | + if (percpu_counter_sum(&fs_info->dio_bytes)) |
---|
| 4205 | + btrfs_info(fs_info, "at unmount dio bytes count %lld", |
---|
| 4206 | + percpu_counter_sum(&fs_info->dio_bytes)); |
---|
| 4207 | + |
---|
4027 | 4208 | btrfs_sysfs_remove_mounted(fs_info); |
---|
4028 | 4209 | btrfs_sysfs_remove_fsid(fs_info->fs_devices); |
---|
4029 | | - |
---|
4030 | | - btrfs_free_fs_roots(fs_info); |
---|
4031 | 4210 | |
---|
4032 | 4211 | btrfs_put_block_group_cache(fs_info); |
---|
4033 | 4212 | |
---|
.. | .. |
---|
4040 | 4219 | |
---|
4041 | 4220 | clear_bit(BTRFS_FS_OPEN, &fs_info->flags); |
---|
4042 | 4221 | free_root_pointers(fs_info, true); |
---|
| 4222 | + btrfs_free_fs_roots(fs_info); |
---|
4043 | 4223 | |
---|
4044 | 4224 | /* |
---|
4045 | 4225 | * We must free the block groups after dropping the fs_roots as we could |
---|
.. | .. |
---|
4057 | 4237 | btrfsic_unmount(fs_info->fs_devices); |
---|
4058 | 4238 | #endif |
---|
4059 | 4239 | |
---|
4060 | | - btrfs_close_devices(fs_info->fs_devices); |
---|
4061 | 4240 | btrfs_mapping_tree_free(&fs_info->mapping_tree); |
---|
4062 | | - |
---|
4063 | | - percpu_counter_destroy(&fs_info->dirty_metadata_bytes); |
---|
4064 | | - percpu_counter_destroy(&fs_info->delalloc_bytes); |
---|
4065 | | - percpu_counter_destroy(&fs_info->bio_counter); |
---|
4066 | | - cleanup_srcu_struct(&fs_info->subvol_srcu); |
---|
4067 | | - |
---|
4068 | | - btrfs_free_stripe_hash_table(fs_info); |
---|
4069 | | - btrfs_free_ref_cache(fs_info); |
---|
4070 | | - |
---|
4071 | | - while (!list_empty(&fs_info->pinned_chunks)) { |
---|
4072 | | - struct extent_map *em; |
---|
4073 | | - |
---|
4074 | | - em = list_first_entry(&fs_info->pinned_chunks, |
---|
4075 | | - struct extent_map, list); |
---|
4076 | | - list_del_init(&em->list); |
---|
4077 | | - free_extent_map(em); |
---|
4078 | | - } |
---|
| 4241 | + btrfs_close_devices(fs_info->fs_devices); |
---|
4079 | 4242 | } |
---|
4080 | 4243 | |
---|
4081 | 4244 | int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid, |
---|
.. | .. |
---|
4105 | 4268 | #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS |
---|
4106 | 4269 | /* |
---|
4107 | 4270 | * This is a fast path so only do this check if we have sanity tests |
---|
4108 | | - * enabled. Normal people shouldn't be using umapped buffers as dirty |
---|
| 4271 | + * enabled. Normal people shouldn't be using unmapped buffers as dirty |
---|
4109 | 4272 | * outside of the sanity tests. |
---|
4110 | 4273 | */ |
---|
4111 | 4274 | if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &buf->bflags))) |
---|
.. | .. |
---|
4129 | 4292 | * So here we should only check item pointers, not item data. |
---|
4130 | 4293 | */ |
---|
4131 | 4294 | if (btrfs_header_level(buf) == 0 && |
---|
4132 | | - btrfs_check_leaf_relaxed(fs_info, buf)) { |
---|
| 4295 | + btrfs_check_leaf_relaxed(buf)) { |
---|
4133 | 4296 | btrfs_print_leaf(buf); |
---|
4134 | 4297 | ASSERT(0); |
---|
4135 | 4298 | } |
---|
.. | .. |
---|
4172 | 4335 | int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid, int level, |
---|
4173 | 4336 | struct btrfs_key *first_key) |
---|
4174 | 4337 | { |
---|
4175 | | - struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root; |
---|
4176 | | - struct btrfs_fs_info *fs_info = root->fs_info; |
---|
4177 | | - |
---|
4178 | | - return btree_read_extent_buffer_pages(fs_info, buf, parent_transid, |
---|
| 4338 | + return btree_read_extent_buffer_pages(buf, parent_transid, |
---|
4179 | 4339 | level, first_key); |
---|
4180 | 4340 | } |
---|
4181 | 4341 | |
---|
.. | .. |
---|
4190 | 4350 | |
---|
4191 | 4351 | down_write(&fs_info->cleanup_work_sem); |
---|
4192 | 4352 | up_write(&fs_info->cleanup_work_sem); |
---|
| 4353 | +} |
---|
| 4354 | + |
---|
| 4355 | +static void btrfs_drop_all_logs(struct btrfs_fs_info *fs_info) |
---|
| 4356 | +{ |
---|
| 4357 | + struct btrfs_root *gang[8]; |
---|
| 4358 | + u64 root_objectid = 0; |
---|
| 4359 | + int ret; |
---|
| 4360 | + |
---|
| 4361 | + spin_lock(&fs_info->fs_roots_radix_lock); |
---|
| 4362 | + while ((ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix, |
---|
| 4363 | + (void **)gang, root_objectid, |
---|
| 4364 | + ARRAY_SIZE(gang))) != 0) { |
---|
| 4365 | + int i; |
---|
| 4366 | + |
---|
| 4367 | + for (i = 0; i < ret; i++) |
---|
| 4368 | + gang[i] = btrfs_grab_root(gang[i]); |
---|
| 4369 | + spin_unlock(&fs_info->fs_roots_radix_lock); |
---|
| 4370 | + |
---|
| 4371 | + for (i = 0; i < ret; i++) { |
---|
| 4372 | + if (!gang[i]) |
---|
| 4373 | + continue; |
---|
| 4374 | + root_objectid = gang[i]->root_key.objectid; |
---|
| 4375 | + btrfs_free_log(NULL, gang[i]); |
---|
| 4376 | + btrfs_put_root(gang[i]); |
---|
| 4377 | + } |
---|
| 4378 | + root_objectid++; |
---|
| 4379 | + spin_lock(&fs_info->fs_roots_radix_lock); |
---|
| 4380 | + } |
---|
| 4381 | + spin_unlock(&fs_info->fs_roots_radix_lock); |
---|
| 4382 | + btrfs_free_log_root_tree(NULL, fs_info); |
---|
4193 | 4383 | } |
---|
4194 | 4384 | |
---|
4195 | 4385 | static void btrfs_destroy_ordered_extents(struct btrfs_root *root) |
---|
.. | .. |
---|
4252 | 4442 | spin_lock(&delayed_refs->lock); |
---|
4253 | 4443 | if (atomic_read(&delayed_refs->num_entries) == 0) { |
---|
4254 | 4444 | spin_unlock(&delayed_refs->lock); |
---|
4255 | | - btrfs_info(fs_info, "delayed_refs has NO entry"); |
---|
| 4445 | + btrfs_debug(fs_info, "delayed_refs has NO entry"); |
---|
4256 | 4446 | return ret; |
---|
4257 | 4447 | } |
---|
4258 | 4448 | |
---|
4259 | | - while ((node = rb_first(&delayed_refs->href_root)) != NULL) { |
---|
| 4449 | + while ((node = rb_first_cached(&delayed_refs->href_root)) != NULL) { |
---|
4260 | 4450 | struct btrfs_delayed_ref_head *head; |
---|
4261 | 4451 | struct rb_node *n; |
---|
4262 | 4452 | bool pin_bytes = false; |
---|
4263 | 4453 | |
---|
4264 | 4454 | head = rb_entry(node, struct btrfs_delayed_ref_head, |
---|
4265 | 4455 | href_node); |
---|
4266 | | - if (!mutex_trylock(&head->mutex)) { |
---|
4267 | | - refcount_inc(&head->refs); |
---|
4268 | | - spin_unlock(&delayed_refs->lock); |
---|
4269 | | - |
---|
4270 | | - mutex_lock(&head->mutex); |
---|
4271 | | - mutex_unlock(&head->mutex); |
---|
4272 | | - btrfs_put_delayed_ref_head(head); |
---|
4273 | | - spin_lock(&delayed_refs->lock); |
---|
| 4456 | + if (btrfs_delayed_ref_lock(delayed_refs, head)) |
---|
4274 | 4457 | continue; |
---|
4275 | | - } |
---|
| 4458 | + |
---|
4276 | 4459 | spin_lock(&head->lock); |
---|
4277 | | - while ((n = rb_first(&head->ref_tree)) != NULL) { |
---|
| 4460 | + while ((n = rb_first_cached(&head->ref_tree)) != NULL) { |
---|
4278 | 4461 | ref = rb_entry(n, struct btrfs_delayed_ref_node, |
---|
4279 | 4462 | ref_node); |
---|
4280 | 4463 | ref->in_tree = 0; |
---|
4281 | | - rb_erase(&ref->ref_node, &head->ref_tree); |
---|
| 4464 | + rb_erase_cached(&ref->ref_node, &head->ref_tree); |
---|
4282 | 4465 | RB_CLEAR_NODE(&ref->ref_node); |
---|
4283 | 4466 | if (!list_empty(&ref->add_list)) |
---|
4284 | 4467 | list_del(&ref->add_list); |
---|
.. | .. |
---|
4288 | 4471 | if (head->must_insert_reserved) |
---|
4289 | 4472 | pin_bytes = true; |
---|
4290 | 4473 | btrfs_free_delayed_extent_op(head->extent_op); |
---|
4291 | | - delayed_refs->num_heads--; |
---|
4292 | | - if (head->processing == 0) |
---|
4293 | | - delayed_refs->num_heads_ready--; |
---|
4294 | | - atomic_dec(&delayed_refs->num_entries); |
---|
4295 | | - rb_erase(&head->href_node, &delayed_refs->href_root); |
---|
4296 | | - RB_CLEAR_NODE(&head->href_node); |
---|
| 4474 | + btrfs_delete_ref_head(delayed_refs, head); |
---|
4297 | 4475 | spin_unlock(&head->lock); |
---|
4298 | 4476 | spin_unlock(&delayed_refs->lock); |
---|
4299 | 4477 | mutex_unlock(&head->mutex); |
---|
4300 | 4478 | |
---|
4301 | | - if (pin_bytes) |
---|
4302 | | - btrfs_pin_extent(fs_info, head->bytenr, |
---|
4303 | | - head->num_bytes, 1); |
---|
| 4479 | + if (pin_bytes) { |
---|
| 4480 | + struct btrfs_block_group *cache; |
---|
| 4481 | + |
---|
| 4482 | + cache = btrfs_lookup_block_group(fs_info, head->bytenr); |
---|
| 4483 | + BUG_ON(!cache); |
---|
| 4484 | + |
---|
| 4485 | + spin_lock(&cache->space_info->lock); |
---|
| 4486 | + spin_lock(&cache->lock); |
---|
| 4487 | + cache->pinned += head->num_bytes; |
---|
| 4488 | + btrfs_space_info_update_bytes_pinned(fs_info, |
---|
| 4489 | + cache->space_info, head->num_bytes); |
---|
| 4490 | + cache->reserved -= head->num_bytes; |
---|
| 4491 | + cache->space_info->bytes_reserved -= head->num_bytes; |
---|
| 4492 | + spin_unlock(&cache->lock); |
---|
| 4493 | + spin_unlock(&cache->space_info->lock); |
---|
| 4494 | + percpu_counter_add_batch( |
---|
| 4495 | + &cache->space_info->total_bytes_pinned, |
---|
| 4496 | + head->num_bytes, BTRFS_TOTAL_BYTES_PINNED_BATCH); |
---|
| 4497 | + |
---|
| 4498 | + btrfs_put_block_group(cache); |
---|
| 4499 | + |
---|
| 4500 | + btrfs_error_unpin_extent_range(fs_info, head->bytenr, |
---|
| 4501 | + head->bytenr + head->num_bytes - 1); |
---|
| 4502 | + } |
---|
| 4503 | + btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head); |
---|
4304 | 4504 | btrfs_put_delayed_ref_head(head); |
---|
4305 | 4505 | cond_resched(); |
---|
4306 | 4506 | spin_lock(&delayed_refs->lock); |
---|
4307 | 4507 | } |
---|
| 4508 | + btrfs_qgroup_destroy_extent_records(trans); |
---|
4308 | 4509 | |
---|
4309 | 4510 | spin_unlock(&delayed_refs->lock); |
---|
4310 | 4511 | |
---|
.. | .. |
---|
4334 | 4535 | */ |
---|
4335 | 4536 | inode = igrab(&btrfs_inode->vfs_inode); |
---|
4336 | 4537 | if (inode) { |
---|
| 4538 | + unsigned int nofs_flag; |
---|
| 4539 | + |
---|
| 4540 | + nofs_flag = memalloc_nofs_save(); |
---|
4337 | 4541 | invalidate_inode_pages2(inode->i_mapping); |
---|
| 4542 | + memalloc_nofs_restore(nofs_flag); |
---|
4338 | 4543 | iput(inode); |
---|
4339 | 4544 | } |
---|
4340 | 4545 | spin_lock(&root->delalloc_lock); |
---|
.. | .. |
---|
4354 | 4559 | while (!list_empty(&splice)) { |
---|
4355 | 4560 | root = list_first_entry(&splice, struct btrfs_root, |
---|
4356 | 4561 | delalloc_root); |
---|
4357 | | - root = btrfs_grab_fs_root(root); |
---|
| 4562 | + root = btrfs_grab_root(root); |
---|
4358 | 4563 | BUG_ON(!root); |
---|
4359 | 4564 | spin_unlock(&fs_info->delalloc_root_lock); |
---|
4360 | 4565 | |
---|
4361 | 4566 | btrfs_destroy_delalloc_inodes(root); |
---|
4362 | | - btrfs_put_fs_root(root); |
---|
| 4567 | + btrfs_put_root(root); |
---|
4363 | 4568 | |
---|
4364 | 4569 | spin_lock(&fs_info->delalloc_root_lock); |
---|
4365 | 4570 | } |
---|
.. | .. |
---|
4400 | 4605 | } |
---|
4401 | 4606 | |
---|
4402 | 4607 | static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info, |
---|
4403 | | - struct extent_io_tree *pinned_extents) |
---|
| 4608 | + struct extent_io_tree *unpin) |
---|
4404 | 4609 | { |
---|
4405 | | - struct extent_io_tree *unpin; |
---|
4406 | 4610 | u64 start; |
---|
4407 | 4611 | u64 end; |
---|
4408 | 4612 | int ret; |
---|
4409 | | - bool loop = true; |
---|
4410 | 4613 | |
---|
4411 | | - unpin = pinned_extents; |
---|
4412 | | -again: |
---|
4413 | 4614 | while (1) { |
---|
4414 | 4615 | struct extent_state *cached_state = NULL; |
---|
4415 | 4616 | |
---|
.. | .. |
---|
4434 | 4635 | cond_resched(); |
---|
4435 | 4636 | } |
---|
4436 | 4637 | |
---|
4437 | | - if (loop) { |
---|
4438 | | - if (unpin == &fs_info->freed_extents[0]) |
---|
4439 | | - unpin = &fs_info->freed_extents[1]; |
---|
4440 | | - else |
---|
4441 | | - unpin = &fs_info->freed_extents[0]; |
---|
4442 | | - loop = false; |
---|
4443 | | - goto again; |
---|
4444 | | - } |
---|
4445 | | - |
---|
4446 | 4638 | return 0; |
---|
4447 | 4639 | } |
---|
4448 | 4640 | |
---|
4449 | | -static void btrfs_cleanup_bg_io(struct btrfs_block_group_cache *cache) |
---|
| 4641 | +static void btrfs_cleanup_bg_io(struct btrfs_block_group *cache) |
---|
4450 | 4642 | { |
---|
4451 | 4643 | struct inode *inode; |
---|
4452 | 4644 | |
---|
4453 | 4645 | inode = cache->io_ctl.inode; |
---|
4454 | 4646 | if (inode) { |
---|
| 4647 | + unsigned int nofs_flag; |
---|
| 4648 | + |
---|
| 4649 | + nofs_flag = memalloc_nofs_save(); |
---|
4455 | 4650 | invalidate_inode_pages2(inode->i_mapping); |
---|
| 4651 | + memalloc_nofs_restore(nofs_flag); |
---|
| 4652 | + |
---|
4456 | 4653 | BTRFS_I(inode)->generation = 0; |
---|
4457 | 4654 | cache->io_ctl.inode = NULL; |
---|
4458 | 4655 | iput(inode); |
---|
.. | .. |
---|
4464 | 4661 | void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans, |
---|
4465 | 4662 | struct btrfs_fs_info *fs_info) |
---|
4466 | 4663 | { |
---|
4467 | | - struct btrfs_block_group_cache *cache; |
---|
| 4664 | + struct btrfs_block_group *cache; |
---|
4468 | 4665 | |
---|
4469 | 4666 | spin_lock(&cur_trans->dirty_bgs_lock); |
---|
4470 | 4667 | while (!list_empty(&cur_trans->dirty_bgs)) { |
---|
4471 | 4668 | cache = list_first_entry(&cur_trans->dirty_bgs, |
---|
4472 | | - struct btrfs_block_group_cache, |
---|
| 4669 | + struct btrfs_block_group, |
---|
4473 | 4670 | dirty_list); |
---|
4474 | 4671 | |
---|
4475 | 4672 | if (!list_empty(&cache->io_list)) { |
---|
.. | .. |
---|
4486 | 4683 | |
---|
4487 | 4684 | spin_unlock(&cur_trans->dirty_bgs_lock); |
---|
4488 | 4685 | btrfs_put_block_group(cache); |
---|
| 4686 | + btrfs_delayed_refs_rsv_release(fs_info, 1); |
---|
4489 | 4687 | spin_lock(&cur_trans->dirty_bgs_lock); |
---|
4490 | 4688 | } |
---|
4491 | 4689 | spin_unlock(&cur_trans->dirty_bgs_lock); |
---|
.. | .. |
---|
4496 | 4694 | */ |
---|
4497 | 4695 | while (!list_empty(&cur_trans->io_bgs)) { |
---|
4498 | 4696 | cache = list_first_entry(&cur_trans->io_bgs, |
---|
4499 | | - struct btrfs_block_group_cache, |
---|
| 4697 | + struct btrfs_block_group, |
---|
4500 | 4698 | io_list); |
---|
4501 | 4699 | |
---|
4502 | 4700 | list_del_init(&cache->io_list); |
---|
.. | .. |
---|
4510 | 4708 | void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans, |
---|
4511 | 4709 | struct btrfs_fs_info *fs_info) |
---|
4512 | 4710 | { |
---|
| 4711 | + struct btrfs_device *dev, *tmp; |
---|
| 4712 | + |
---|
4513 | 4713 | btrfs_cleanup_dirty_bgs(cur_trans, fs_info); |
---|
4514 | 4714 | ASSERT(list_empty(&cur_trans->dirty_bgs)); |
---|
4515 | 4715 | ASSERT(list_empty(&cur_trans->io_bgs)); |
---|
| 4716 | + |
---|
| 4717 | + list_for_each_entry_safe(dev, tmp, &cur_trans->dev_update_list, |
---|
| 4718 | + post_commit_list) { |
---|
| 4719 | + list_del_init(&dev->post_commit_list); |
---|
| 4720 | + } |
---|
4516 | 4721 | |
---|
4517 | 4722 | btrfs_destroy_delayed_refs(cur_trans, fs_info); |
---|
4518 | 4723 | |
---|
.. | .. |
---|
4526 | 4731 | |
---|
4527 | 4732 | btrfs_destroy_marked_extents(fs_info, &cur_trans->dirty_pages, |
---|
4528 | 4733 | EXTENT_DIRTY); |
---|
4529 | | - btrfs_destroy_pinned_extent(fs_info, |
---|
4530 | | - fs_info->pinned_extents); |
---|
| 4734 | + btrfs_destroy_pinned_extent(fs_info, &cur_trans->pinned_extents); |
---|
4531 | 4735 | |
---|
4532 | 4736 | cur_trans->state =TRANS_STATE_COMPLETED; |
---|
4533 | 4737 | wake_up(&cur_trans->commit_wait); |
---|
.. | .. |
---|
4579 | 4783 | btrfs_destroy_all_ordered_extents(fs_info); |
---|
4580 | 4784 | btrfs_destroy_delayed_inodes(fs_info); |
---|
4581 | 4785 | btrfs_assert_delayed_root_empty(fs_info); |
---|
4582 | | - btrfs_destroy_pinned_extent(fs_info, fs_info->pinned_extents); |
---|
4583 | 4786 | btrfs_destroy_all_delalloc_inodes(fs_info); |
---|
| 4787 | + btrfs_drop_all_logs(fs_info); |
---|
4584 | 4788 | mutex_unlock(&fs_info->transaction_kthread_mutex); |
---|
4585 | 4789 | |
---|
4586 | 4790 | return 0; |
---|
4587 | 4791 | } |
---|
4588 | 4792 | |
---|
4589 | | -static const struct extent_io_ops btree_extent_io_ops = { |
---|
4590 | | - /* mandatory callbacks */ |
---|
4591 | | - .submit_bio_hook = btree_submit_bio_hook, |
---|
4592 | | - .readpage_end_io_hook = btree_readpage_end_io_hook, |
---|
4593 | | - .readpage_io_failed_hook = btree_io_failed_hook, |
---|
| 4793 | +int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid) |
---|
| 4794 | +{ |
---|
| 4795 | + struct btrfs_path *path; |
---|
| 4796 | + int ret; |
---|
| 4797 | + struct extent_buffer *l; |
---|
| 4798 | + struct btrfs_key search_key; |
---|
| 4799 | + struct btrfs_key found_key; |
---|
| 4800 | + int slot; |
---|
4594 | 4801 | |
---|
4595 | | - /* optional callbacks */ |
---|
4596 | | -}; |
---|
| 4802 | + path = btrfs_alloc_path(); |
---|
| 4803 | + if (!path) |
---|
| 4804 | + return -ENOMEM; |
---|
| 4805 | + |
---|
| 4806 | + search_key.objectid = BTRFS_LAST_FREE_OBJECTID; |
---|
| 4807 | + search_key.type = -1; |
---|
| 4808 | + search_key.offset = (u64)-1; |
---|
| 4809 | + ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); |
---|
| 4810 | + if (ret < 0) |
---|
| 4811 | + goto error; |
---|
| 4812 | + BUG_ON(ret == 0); /* Corruption */ |
---|
| 4813 | + if (path->slots[0] > 0) { |
---|
| 4814 | + slot = path->slots[0] - 1; |
---|
| 4815 | + l = path->nodes[0]; |
---|
| 4816 | + btrfs_item_key_to_cpu(l, &found_key, slot); |
---|
| 4817 | + *objectid = max_t(u64, found_key.objectid, |
---|
| 4818 | + BTRFS_FIRST_FREE_OBJECTID - 1); |
---|
| 4819 | + } else { |
---|
| 4820 | + *objectid = BTRFS_FIRST_FREE_OBJECTID - 1; |
---|
| 4821 | + } |
---|
| 4822 | + ret = 0; |
---|
| 4823 | +error: |
---|
| 4824 | + btrfs_free_path(path); |
---|
| 4825 | + return ret; |
---|
| 4826 | +} |
---|
| 4827 | + |
---|
| 4828 | +int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid) |
---|
| 4829 | +{ |
---|
| 4830 | + int ret; |
---|
| 4831 | + mutex_lock(&root->objectid_mutex); |
---|
| 4832 | + |
---|
| 4833 | + if (unlikely(root->highest_objectid >= BTRFS_LAST_FREE_OBJECTID)) { |
---|
| 4834 | + btrfs_warn(root->fs_info, |
---|
| 4835 | + "the objectid of root %llu reaches its highest value", |
---|
| 4836 | + root->root_key.objectid); |
---|
| 4837 | + ret = -ENOSPC; |
---|
| 4838 | + goto out; |
---|
| 4839 | + } |
---|
| 4840 | + |
---|
| 4841 | + *objectid = ++root->highest_objectid; |
---|
| 4842 | + ret = 0; |
---|
| 4843 | +out: |
---|
| 4844 | + mutex_unlock(&root->objectid_mutex); |
---|
| 4845 | + return ret; |
---|
| 4846 | +} |
---|