.. | .. |
---|
17 | 17 | #include <linux/slab.h> |
---|
18 | 18 | #include <linux/sched/mm.h> |
---|
19 | 19 | #include <linux/log2.h> |
---|
| 20 | +#include <crypto/hash.h> |
---|
| 21 | +#include "misc.h" |
---|
20 | 22 | #include "ctree.h" |
---|
21 | 23 | #include "disk-io.h" |
---|
22 | 24 | #include "transaction.h" |
---|
.. | .. |
---|
37 | 39 | case BTRFS_COMPRESS_ZSTD: |
---|
38 | 40 | case BTRFS_COMPRESS_NONE: |
---|
39 | 41 | return btrfs_compress_types[type]; |
---|
| 42 | + default: |
---|
| 43 | + break; |
---|
40 | 44 | } |
---|
41 | 45 | |
---|
42 | 46 | return NULL; |
---|
.. | .. |
---|
58 | 62 | return false; |
---|
59 | 63 | } |
---|
60 | 64 | |
---|
| 65 | +static int compression_compress_pages(int type, struct list_head *ws, |
---|
| 66 | + struct address_space *mapping, u64 start, struct page **pages, |
---|
| 67 | + unsigned long *out_pages, unsigned long *total_in, |
---|
| 68 | + unsigned long *total_out) |
---|
| 69 | +{ |
---|
| 70 | + switch (type) { |
---|
| 71 | + case BTRFS_COMPRESS_ZLIB: |
---|
| 72 | + return zlib_compress_pages(ws, mapping, start, pages, |
---|
| 73 | + out_pages, total_in, total_out); |
---|
| 74 | + case BTRFS_COMPRESS_LZO: |
---|
| 75 | + return lzo_compress_pages(ws, mapping, start, pages, |
---|
| 76 | + out_pages, total_in, total_out); |
---|
| 77 | + case BTRFS_COMPRESS_ZSTD: |
---|
| 78 | + return zstd_compress_pages(ws, mapping, start, pages, |
---|
| 79 | + out_pages, total_in, total_out); |
---|
| 80 | + case BTRFS_COMPRESS_NONE: |
---|
| 81 | + default: |
---|
| 82 | + /* |
---|
| 83 | + * This can happen when compression races with remount setting |
---|
| 84 | + * it to 'no compress', while caller doesn't call |
---|
| 85 | + * inode_need_compress() to check if we really need to |
---|
| 86 | + * compress. |
---|
| 87 | + * |
---|
| 88 | + * Not a big deal, just need to inform caller that we |
---|
| 89 | + * haven't allocated any pages yet. |
---|
| 90 | + */ |
---|
| 91 | + *out_pages = 0; |
---|
| 92 | + return -E2BIG; |
---|
| 93 | + } |
---|
| 94 | +} |
---|
| 95 | + |
---|
| 96 | +static int compression_decompress_bio(int type, struct list_head *ws, |
---|
| 97 | + struct compressed_bio *cb) |
---|
| 98 | +{ |
---|
| 99 | + switch (type) { |
---|
| 100 | + case BTRFS_COMPRESS_ZLIB: return zlib_decompress_bio(ws, cb); |
---|
| 101 | + case BTRFS_COMPRESS_LZO: return lzo_decompress_bio(ws, cb); |
---|
| 102 | + case BTRFS_COMPRESS_ZSTD: return zstd_decompress_bio(ws, cb); |
---|
| 103 | + case BTRFS_COMPRESS_NONE: |
---|
| 104 | + default: |
---|
| 105 | + /* |
---|
| 106 | + * This can't happen, the type is validated several times |
---|
| 107 | + * before we get here. |
---|
| 108 | + */ |
---|
| 109 | + BUG(); |
---|
| 110 | + } |
---|
| 111 | +} |
---|
| 112 | + |
---|
| 113 | +static int compression_decompress(int type, struct list_head *ws, |
---|
| 114 | + unsigned char *data_in, struct page *dest_page, |
---|
| 115 | + unsigned long start_byte, size_t srclen, size_t destlen) |
---|
| 116 | +{ |
---|
| 117 | + switch (type) { |
---|
| 118 | + case BTRFS_COMPRESS_ZLIB: return zlib_decompress(ws, data_in, dest_page, |
---|
| 119 | + start_byte, srclen, destlen); |
---|
| 120 | + case BTRFS_COMPRESS_LZO: return lzo_decompress(ws, data_in, dest_page, |
---|
| 121 | + start_byte, srclen, destlen); |
---|
| 122 | + case BTRFS_COMPRESS_ZSTD: return zstd_decompress(ws, data_in, dest_page, |
---|
| 123 | + start_byte, srclen, destlen); |
---|
| 124 | + case BTRFS_COMPRESS_NONE: |
---|
| 125 | + default: |
---|
| 126 | + /* |
---|
| 127 | + * This can't happen, the type is validated several times |
---|
| 128 | + * before we get here. |
---|
| 129 | + */ |
---|
| 130 | + BUG(); |
---|
| 131 | + } |
---|
| 132 | +} |
---|
| 133 | + |
---|
61 | 134 | static int btrfs_decompress_bio(struct compressed_bio *cb); |
---|
62 | 135 | |
---|
63 | 136 | static inline int compressed_bio_size(struct btrfs_fs_info *fs_info, |
---|
.. | .. |
---|
69 | 142 | (DIV_ROUND_UP(disk_size, fs_info->sectorsize)) * csum_size; |
---|
70 | 143 | } |
---|
71 | 144 | |
---|
72 | | -static int check_compressed_csum(struct btrfs_inode *inode, |
---|
73 | | - struct compressed_bio *cb, |
---|
| 145 | +static int check_compressed_csum(struct btrfs_inode *inode, struct bio *bio, |
---|
74 | 146 | u64 disk_start) |
---|
75 | 147 | { |
---|
76 | | - int ret; |
---|
| 148 | + struct btrfs_fs_info *fs_info = inode->root->fs_info; |
---|
| 149 | + SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); |
---|
| 150 | + const u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); |
---|
77 | 151 | struct page *page; |
---|
78 | 152 | unsigned long i; |
---|
79 | 153 | char *kaddr; |
---|
80 | | - u32 csum; |
---|
81 | | - u32 *cb_sum = &cb->sums; |
---|
| 154 | + u8 csum[BTRFS_CSUM_SIZE]; |
---|
| 155 | + struct compressed_bio *cb = bio->bi_private; |
---|
| 156 | + u8 *cb_sum = cb->sums; |
---|
82 | 157 | |
---|
83 | 158 | if (inode->flags & BTRFS_INODE_NODATASUM) |
---|
84 | 159 | return 0; |
---|
85 | 160 | |
---|
| 161 | + shash->tfm = fs_info->csum_shash; |
---|
| 162 | + |
---|
86 | 163 | for (i = 0; i < cb->nr_pages; i++) { |
---|
87 | 164 | page = cb->compressed_pages[i]; |
---|
88 | | - csum = ~(u32)0; |
---|
89 | 165 | |
---|
90 | 166 | kaddr = kmap_atomic(page); |
---|
91 | | - csum = btrfs_csum_data(kaddr, csum, PAGE_SIZE); |
---|
92 | | - btrfs_csum_final(csum, (u8 *)&csum); |
---|
| 167 | + crypto_shash_digest(shash, kaddr, PAGE_SIZE, csum); |
---|
93 | 168 | kunmap_atomic(kaddr); |
---|
94 | 169 | |
---|
95 | | - if (csum != *cb_sum) { |
---|
96 | | - btrfs_print_data_csum_error(inode, disk_start, csum, |
---|
97 | | - *cb_sum, cb->mirror_num); |
---|
98 | | - ret = -EIO; |
---|
99 | | - goto fail; |
---|
| 170 | + if (memcmp(&csum, cb_sum, csum_size)) { |
---|
| 171 | + btrfs_print_data_csum_error(inode, disk_start, |
---|
| 172 | + csum, cb_sum, cb->mirror_num); |
---|
| 173 | + if (btrfs_io_bio(bio)->device) |
---|
| 174 | + btrfs_dev_stat_inc_and_print( |
---|
| 175 | + btrfs_io_bio(bio)->device, |
---|
| 176 | + BTRFS_DEV_STAT_CORRUPTION_ERRS); |
---|
| 177 | + return -EIO; |
---|
100 | 178 | } |
---|
101 | | - cb_sum++; |
---|
102 | | - |
---|
| 179 | + cb_sum += csum_size; |
---|
103 | 180 | } |
---|
104 | | - ret = 0; |
---|
105 | | -fail: |
---|
106 | | - return ret; |
---|
| 181 | + return 0; |
---|
107 | 182 | } |
---|
108 | 183 | |
---|
109 | 184 | /* when we finish reading compressed pages from the disk, we |
---|
.. | .. |
---|
138 | 213 | * Record the correct mirror_num in cb->orig_bio so that |
---|
139 | 214 | * read-repair can work properly. |
---|
140 | 215 | */ |
---|
141 | | - ASSERT(btrfs_io_bio(cb->orig_bio)); |
---|
142 | 216 | btrfs_io_bio(cb->orig_bio)->mirror_num = mirror; |
---|
143 | 217 | cb->mirror_num = mirror; |
---|
144 | 218 | |
---|
.. | .. |
---|
150 | 224 | goto csum_failed; |
---|
151 | 225 | |
---|
152 | 226 | inode = cb->inode; |
---|
153 | | - ret = check_compressed_csum(BTRFS_I(inode), cb, |
---|
| 227 | + ret = check_compressed_csum(BTRFS_I(inode), bio, |
---|
154 | 228 | (u64)bio->bi_iter.bi_sector << 9); |
---|
155 | 229 | if (ret) |
---|
156 | 230 | goto csum_failed; |
---|
.. | .. |
---|
176 | 250 | if (cb->errors) { |
---|
177 | 251 | bio_io_error(cb->orig_bio); |
---|
178 | 252 | } else { |
---|
179 | | - int i; |
---|
180 | 253 | struct bio_vec *bvec; |
---|
| 254 | + struct bvec_iter_all iter_all; |
---|
181 | 255 | |
---|
182 | 256 | /* |
---|
183 | 257 | * we have verified the checksum already, set page |
---|
184 | 258 | * checked so the end_io handlers know about it |
---|
185 | 259 | */ |
---|
186 | 260 | ASSERT(!bio_flagged(bio, BIO_CLONED)); |
---|
187 | | - bio_for_each_segment_all(bvec, cb->orig_bio, i) |
---|
| 261 | + bio_for_each_segment_all(bvec, cb->orig_bio, iter_all) |
---|
188 | 262 | SetPageChecked(bvec->bv_page); |
---|
189 | 263 | |
---|
190 | 264 | bio_endio(cb->orig_bio); |
---|
.. | .. |
---|
245 | 319 | */ |
---|
246 | 320 | static void end_compressed_bio_write(struct bio *bio) |
---|
247 | 321 | { |
---|
248 | | - struct extent_io_tree *tree; |
---|
249 | 322 | struct compressed_bio *cb = bio->bi_private; |
---|
250 | 323 | struct inode *inode; |
---|
251 | 324 | struct page *page; |
---|
.. | .. |
---|
264 | 337 | * call back into the FS and do all the end_io operations |
---|
265 | 338 | */ |
---|
266 | 339 | inode = cb->inode; |
---|
267 | | - tree = &BTRFS_I(inode)->io_tree; |
---|
268 | 340 | cb->compressed_pages[0]->mapping = cb->inode->i_mapping; |
---|
269 | | - tree->ops->writepage_end_io_hook(cb->compressed_pages[0], |
---|
270 | | - cb->start, |
---|
271 | | - cb->start + cb->len - 1, |
---|
272 | | - NULL, |
---|
273 | | - !cb->errors); |
---|
| 341 | + btrfs_writepage_endio_finish_ordered(cb->compressed_pages[0], |
---|
| 342 | + cb->start, cb->start + cb->len - 1, |
---|
| 343 | + !cb->errors); |
---|
274 | 344 | cb->compressed_pages[0]->mapping = NULL; |
---|
275 | 345 | |
---|
276 | 346 | end_compressed_writeback(inode, cb); |
---|
.. | .. |
---|
303 | 373 | * This also checksums the file bytes and gets things ready for |
---|
304 | 374 | * the end io hooks. |
---|
305 | 375 | */ |
---|
306 | | -blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start, |
---|
| 376 | +blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start, |
---|
307 | 377 | unsigned long len, u64 disk_start, |
---|
308 | 378 | unsigned long compressed_len, |
---|
309 | 379 | struct page **compressed_pages, |
---|
310 | 380 | unsigned long nr_pages, |
---|
311 | | - unsigned int write_flags) |
---|
| 381 | + unsigned int write_flags, |
---|
| 382 | + struct cgroup_subsys_state *blkcg_css) |
---|
312 | 383 | { |
---|
313 | | - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
---|
| 384 | + struct btrfs_fs_info *fs_info = inode->root->fs_info; |
---|
314 | 385 | struct bio *bio = NULL; |
---|
315 | 386 | struct compressed_bio *cb; |
---|
316 | 387 | unsigned long bytes_left; |
---|
317 | 388 | int pg_index = 0; |
---|
318 | 389 | struct page *page; |
---|
319 | 390 | u64 first_byte = disk_start; |
---|
320 | | - struct block_device *bdev; |
---|
321 | 391 | blk_status_t ret; |
---|
322 | | - int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; |
---|
| 392 | + int skip_sum = inode->flags & BTRFS_INODE_NODATASUM; |
---|
323 | 393 | |
---|
324 | | - WARN_ON(start & ((u64)PAGE_SIZE - 1)); |
---|
| 394 | + WARN_ON(!PAGE_ALIGNED(start)); |
---|
325 | 395 | cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS); |
---|
326 | 396 | if (!cb) |
---|
327 | 397 | return BLK_STS_RESOURCE; |
---|
328 | 398 | refcount_set(&cb->pending_bios, 0); |
---|
329 | 399 | cb->errors = 0; |
---|
330 | | - cb->inode = inode; |
---|
| 400 | + cb->inode = &inode->vfs_inode; |
---|
331 | 401 | cb->start = start; |
---|
332 | 402 | cb->len = len; |
---|
333 | 403 | cb->mirror_num = 0; |
---|
.. | .. |
---|
336 | 406 | cb->orig_bio = NULL; |
---|
337 | 407 | cb->nr_pages = nr_pages; |
---|
338 | 408 | |
---|
339 | | - bdev = fs_info->fs_devices->latest_bdev; |
---|
340 | | - |
---|
341 | | - bio = btrfs_bio_alloc(bdev, first_byte); |
---|
| 409 | + bio = btrfs_bio_alloc(first_byte); |
---|
342 | 410 | bio->bi_opf = REQ_OP_WRITE | write_flags; |
---|
343 | 411 | bio->bi_private = cb; |
---|
344 | 412 | bio->bi_end_io = end_compressed_bio_write; |
---|
| 413 | + |
---|
| 414 | + if (blkcg_css) { |
---|
| 415 | + bio->bi_opf |= REQ_CGROUP_PUNT; |
---|
| 416 | + kthread_associate_blkcg(blkcg_css); |
---|
| 417 | + } |
---|
345 | 418 | refcount_set(&cb->pending_bios, 1); |
---|
346 | 419 | |
---|
347 | 420 | /* create and submit bios for the compressed pages */ |
---|
.. | .. |
---|
350 | 423 | int submit = 0; |
---|
351 | 424 | |
---|
352 | 425 | page = compressed_pages[pg_index]; |
---|
353 | | - page->mapping = inode->i_mapping; |
---|
| 426 | + page->mapping = inode->vfs_inode.i_mapping; |
---|
354 | 427 | if (bio->bi_iter.bi_size) |
---|
355 | | - submit = btrfs_merge_bio_hook(page, 0, PAGE_SIZE, bio, 0); |
---|
| 428 | + submit = btrfs_bio_fits_in_stripe(page, PAGE_SIZE, bio, |
---|
| 429 | + 0); |
---|
356 | 430 | |
---|
357 | 431 | page->mapping = NULL; |
---|
358 | 432 | if (submit || bio_add_page(bio, page, PAGE_SIZE, 0) < |
---|
.. | .. |
---|
373 | 447 | BUG_ON(ret); /* -ENOMEM */ |
---|
374 | 448 | } |
---|
375 | 449 | |
---|
376 | | - ret = btrfs_map_bio(fs_info, bio, 0, 1); |
---|
| 450 | + ret = btrfs_map_bio(fs_info, bio, 0); |
---|
377 | 451 | if (ret) { |
---|
378 | 452 | bio->bi_status = ret; |
---|
379 | 453 | bio_endio(bio); |
---|
380 | 454 | } |
---|
381 | 455 | |
---|
382 | | - bio = btrfs_bio_alloc(bdev, first_byte); |
---|
| 456 | + bio = btrfs_bio_alloc(first_byte); |
---|
383 | 457 | bio->bi_opf = REQ_OP_WRITE | write_flags; |
---|
384 | 458 | bio->bi_private = cb; |
---|
385 | 459 | bio->bi_end_io = end_compressed_bio_write; |
---|
| 460 | + if (blkcg_css) |
---|
| 461 | + bio->bi_opf |= REQ_CGROUP_PUNT; |
---|
386 | 462 | bio_add_page(bio, page, PAGE_SIZE, 0); |
---|
387 | 463 | } |
---|
388 | 464 | if (bytes_left < PAGE_SIZE) { |
---|
.. | .. |
---|
403 | 479 | BUG_ON(ret); /* -ENOMEM */ |
---|
404 | 480 | } |
---|
405 | 481 | |
---|
406 | | - ret = btrfs_map_bio(fs_info, bio, 0, 1); |
---|
| 482 | + ret = btrfs_map_bio(fs_info, bio, 0); |
---|
407 | 483 | if (ret) { |
---|
408 | 484 | bio->bi_status = ret; |
---|
409 | 485 | bio_endio(bio); |
---|
410 | 486 | } |
---|
| 487 | + |
---|
| 488 | + if (blkcg_css) |
---|
| 489 | + kthread_associate_blkcg(NULL); |
---|
411 | 490 | |
---|
412 | 491 | return 0; |
---|
413 | 492 | } |
---|
.. | .. |
---|
452 | 531 | if (pg_index > end_index) |
---|
453 | 532 | break; |
---|
454 | 533 | |
---|
455 | | - rcu_read_lock(); |
---|
456 | | - page = radix_tree_lookup(&mapping->i_pages, pg_index); |
---|
457 | | - rcu_read_unlock(); |
---|
458 | | - if (page && !radix_tree_exceptional_entry(page)) { |
---|
| 534 | + page = xa_load(&mapping->i_pages, pg_index); |
---|
| 535 | + if (page && !xa_is_value(page)) { |
---|
459 | 536 | misses++; |
---|
460 | 537 | if (misses > 4) |
---|
461 | 538 | break; |
---|
.. | .. |
---|
498 | 575 | |
---|
499 | 576 | if (page->index == end_index) { |
---|
500 | 577 | char *userpage; |
---|
501 | | - size_t zero_offset = isize & (PAGE_SIZE - 1); |
---|
| 578 | + size_t zero_offset = offset_in_page(isize); |
---|
502 | 579 | |
---|
503 | 580 | if (zero_offset) { |
---|
504 | 581 | int zeros; |
---|
.. | .. |
---|
543 | 620 | int mirror_num, unsigned long bio_flags) |
---|
544 | 621 | { |
---|
545 | 622 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
---|
546 | | - struct extent_io_tree *tree; |
---|
547 | 623 | struct extent_map_tree *em_tree; |
---|
548 | 624 | struct compressed_bio *cb; |
---|
549 | 625 | unsigned long compressed_len; |
---|
550 | 626 | unsigned long nr_pages; |
---|
551 | 627 | unsigned long pg_index; |
---|
552 | 628 | struct page *page; |
---|
553 | | - struct block_device *bdev; |
---|
554 | 629 | struct bio *comp_bio; |
---|
555 | 630 | u64 cur_disk_byte = (u64)bio->bi_iter.bi_sector << 9; |
---|
556 | 631 | u64 em_len; |
---|
.. | .. |
---|
558 | 633 | struct extent_map *em; |
---|
559 | 634 | blk_status_t ret = BLK_STS_RESOURCE; |
---|
560 | 635 | int faili = 0; |
---|
561 | | - u32 *sums; |
---|
| 636 | + const u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); |
---|
| 637 | + u8 *sums; |
---|
562 | 638 | |
---|
563 | | - tree = &BTRFS_I(inode)->io_tree; |
---|
564 | 639 | em_tree = &BTRFS_I(inode)->extent_tree; |
---|
565 | 640 | |
---|
566 | 641 | /* we need the actual starting offset of this extent in the file */ |
---|
.. | .. |
---|
581 | 656 | cb->errors = 0; |
---|
582 | 657 | cb->inode = inode; |
---|
583 | 658 | cb->mirror_num = mirror_num; |
---|
584 | | - sums = &cb->sums; |
---|
| 659 | + sums = cb->sums; |
---|
585 | 660 | |
---|
586 | 661 | cb->start = em->orig_start; |
---|
587 | 662 | em_len = em->len; |
---|
.. | .. |
---|
601 | 676 | if (!cb->compressed_pages) |
---|
602 | 677 | goto fail1; |
---|
603 | 678 | |
---|
604 | | - bdev = fs_info->fs_devices->latest_bdev; |
---|
605 | | - |
---|
606 | 679 | for (pg_index = 0; pg_index < nr_pages; pg_index++) { |
---|
607 | 680 | cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS | |
---|
608 | 681 | __GFP_HIGHMEM); |
---|
.. | .. |
---|
620 | 693 | /* include any pages we added in add_ra-bio_pages */ |
---|
621 | 694 | cb->len = bio->bi_iter.bi_size; |
---|
622 | 695 | |
---|
623 | | - comp_bio = btrfs_bio_alloc(bdev, cur_disk_byte); |
---|
| 696 | + comp_bio = btrfs_bio_alloc(cur_disk_byte); |
---|
624 | 697 | comp_bio->bi_opf = REQ_OP_READ; |
---|
625 | 698 | comp_bio->bi_private = cb; |
---|
626 | 699 | comp_bio->bi_end_io = end_compressed_bio_read; |
---|
.. | .. |
---|
634 | 707 | page->index = em_start >> PAGE_SHIFT; |
---|
635 | 708 | |
---|
636 | 709 | if (comp_bio->bi_iter.bi_size) |
---|
637 | | - submit = btrfs_merge_bio_hook(page, 0, PAGE_SIZE, |
---|
638 | | - comp_bio, 0); |
---|
| 710 | + submit = btrfs_bio_fits_in_stripe(page, PAGE_SIZE, |
---|
| 711 | + comp_bio, 0); |
---|
639 | 712 | |
---|
640 | 713 | page->mapping = NULL; |
---|
641 | 714 | if (submit || bio_add_page(comp_bio, page, PAGE_SIZE, 0) < |
---|
642 | 715 | PAGE_SIZE) { |
---|
| 716 | + unsigned int nr_sectors; |
---|
| 717 | + |
---|
643 | 718 | ret = btrfs_bio_wq_end_io(fs_info, comp_bio, |
---|
644 | 719 | BTRFS_WQ_ENDIO_DATA); |
---|
645 | 720 | BUG_ON(ret); /* -ENOMEM */ |
---|
.. | .. |
---|
654 | 729 | |
---|
655 | 730 | if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { |
---|
656 | 731 | ret = btrfs_lookup_bio_sums(inode, comp_bio, |
---|
657 | | - sums); |
---|
| 732 | + (u64)-1, sums); |
---|
658 | 733 | BUG_ON(ret); /* -ENOMEM */ |
---|
659 | 734 | } |
---|
660 | | - sums += DIV_ROUND_UP(comp_bio->bi_iter.bi_size, |
---|
661 | | - fs_info->sectorsize); |
---|
662 | 735 | |
---|
663 | | - ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0); |
---|
| 736 | + nr_sectors = DIV_ROUND_UP(comp_bio->bi_iter.bi_size, |
---|
| 737 | + fs_info->sectorsize); |
---|
| 738 | + sums += csum_size * nr_sectors; |
---|
| 739 | + |
---|
| 740 | + ret = btrfs_map_bio(fs_info, comp_bio, mirror_num); |
---|
664 | 741 | if (ret) { |
---|
665 | 742 | comp_bio->bi_status = ret; |
---|
666 | 743 | bio_endio(comp_bio); |
---|
667 | 744 | } |
---|
668 | 745 | |
---|
669 | | - comp_bio = btrfs_bio_alloc(bdev, cur_disk_byte); |
---|
| 746 | + comp_bio = btrfs_bio_alloc(cur_disk_byte); |
---|
670 | 747 | comp_bio->bi_opf = REQ_OP_READ; |
---|
671 | 748 | comp_bio->bi_private = cb; |
---|
672 | 749 | comp_bio->bi_end_io = end_compressed_bio_read; |
---|
.. | .. |
---|
680 | 757 | BUG_ON(ret); /* -ENOMEM */ |
---|
681 | 758 | |
---|
682 | 759 | if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { |
---|
683 | | - ret = btrfs_lookup_bio_sums(inode, comp_bio, sums); |
---|
| 760 | + ret = btrfs_lookup_bio_sums(inode, comp_bio, (u64)-1, sums); |
---|
684 | 761 | BUG_ON(ret); /* -ENOMEM */ |
---|
685 | 762 | } |
---|
686 | 763 | |
---|
687 | | - ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0); |
---|
| 764 | + ret = btrfs_map_bio(fs_info, comp_bio, mirror_num); |
---|
688 | 765 | if (ret) { |
---|
689 | 766 | comp_bio->bi_status = ret; |
---|
690 | 767 | bio_endio(comp_bio); |
---|
.. | .. |
---|
753 | 830 | struct list_head list; |
---|
754 | 831 | }; |
---|
755 | 832 | |
---|
| 833 | +static struct workspace_manager heuristic_wsm; |
---|
| 834 | + |
---|
756 | 835 | static void free_heuristic_ws(struct list_head *ws) |
---|
757 | 836 | { |
---|
758 | 837 | struct heuristic_ws *workspace; |
---|
.. | .. |
---|
765 | 844 | kfree(workspace); |
---|
766 | 845 | } |
---|
767 | 846 | |
---|
768 | | -static struct list_head *alloc_heuristic_ws(void) |
---|
| 847 | +static struct list_head *alloc_heuristic_ws(unsigned int level) |
---|
769 | 848 | { |
---|
770 | 849 | struct heuristic_ws *ws; |
---|
771 | 850 | |
---|
.. | .. |
---|
792 | 871 | return ERR_PTR(-ENOMEM); |
---|
793 | 872 | } |
---|
794 | 873 | |
---|
795 | | -struct workspaces_list { |
---|
796 | | - struct list_head idle_ws; |
---|
797 | | - spinlock_t ws_lock; |
---|
798 | | - /* Number of free workspaces */ |
---|
799 | | - int free_ws; |
---|
800 | | - /* Total number of allocated workspaces */ |
---|
801 | | - atomic_t total_ws; |
---|
802 | | - /* Waiters for a free workspace */ |
---|
803 | | - wait_queue_head_t ws_wait; |
---|
| 874 | +const struct btrfs_compress_op btrfs_heuristic_compress = { |
---|
| 875 | + .workspace_manager = &heuristic_wsm, |
---|
804 | 876 | }; |
---|
805 | 877 | |
---|
806 | | -static struct workspaces_list btrfs_comp_ws[BTRFS_COMPRESS_TYPES]; |
---|
807 | | - |
---|
808 | | -static struct workspaces_list btrfs_heuristic_ws; |
---|
809 | | - |
---|
810 | 878 | static const struct btrfs_compress_op * const btrfs_compress_op[] = { |
---|
| 879 | + /* The heuristic is represented as compression type 0 */ |
---|
| 880 | + &btrfs_heuristic_compress, |
---|
811 | 881 | &btrfs_zlib_compress, |
---|
812 | 882 | &btrfs_lzo_compress, |
---|
813 | 883 | &btrfs_zstd_compress, |
---|
814 | 884 | }; |
---|
815 | 885 | |
---|
816 | | -void __init btrfs_init_compress(void) |
---|
| 886 | +static struct list_head *alloc_workspace(int type, unsigned int level) |
---|
817 | 887 | { |
---|
| 888 | + switch (type) { |
---|
| 889 | + case BTRFS_COMPRESS_NONE: return alloc_heuristic_ws(level); |
---|
| 890 | + case BTRFS_COMPRESS_ZLIB: return zlib_alloc_workspace(level); |
---|
| 891 | + case BTRFS_COMPRESS_LZO: return lzo_alloc_workspace(level); |
---|
| 892 | + case BTRFS_COMPRESS_ZSTD: return zstd_alloc_workspace(level); |
---|
| 893 | + default: |
---|
| 894 | + /* |
---|
| 895 | + * This can't happen, the type is validated several times |
---|
| 896 | + * before we get here. |
---|
| 897 | + */ |
---|
| 898 | + BUG(); |
---|
| 899 | + } |
---|
| 900 | +} |
---|
| 901 | + |
---|
| 902 | +static void free_workspace(int type, struct list_head *ws) |
---|
| 903 | +{ |
---|
| 904 | + switch (type) { |
---|
| 905 | + case BTRFS_COMPRESS_NONE: return free_heuristic_ws(ws); |
---|
| 906 | + case BTRFS_COMPRESS_ZLIB: return zlib_free_workspace(ws); |
---|
| 907 | + case BTRFS_COMPRESS_LZO: return lzo_free_workspace(ws); |
---|
| 908 | + case BTRFS_COMPRESS_ZSTD: return zstd_free_workspace(ws); |
---|
| 909 | + default: |
---|
| 910 | + /* |
---|
| 911 | + * This can't happen, the type is validated several times |
---|
| 912 | + * before we get here. |
---|
| 913 | + */ |
---|
| 914 | + BUG(); |
---|
| 915 | + } |
---|
| 916 | +} |
---|
| 917 | + |
---|
| 918 | +static void btrfs_init_workspace_manager(int type) |
---|
| 919 | +{ |
---|
| 920 | + struct workspace_manager *wsm; |
---|
818 | 921 | struct list_head *workspace; |
---|
819 | | - int i; |
---|
820 | 922 | |
---|
821 | | - INIT_LIST_HEAD(&btrfs_heuristic_ws.idle_ws); |
---|
822 | | - spin_lock_init(&btrfs_heuristic_ws.ws_lock); |
---|
823 | | - atomic_set(&btrfs_heuristic_ws.total_ws, 0); |
---|
824 | | - init_waitqueue_head(&btrfs_heuristic_ws.ws_wait); |
---|
| 923 | + wsm = btrfs_compress_op[type]->workspace_manager; |
---|
| 924 | + INIT_LIST_HEAD(&wsm->idle_ws); |
---|
| 925 | + spin_lock_init(&wsm->ws_lock); |
---|
| 926 | + atomic_set(&wsm->total_ws, 0); |
---|
| 927 | + init_waitqueue_head(&wsm->ws_wait); |
---|
825 | 928 | |
---|
826 | | - workspace = alloc_heuristic_ws(); |
---|
| 929 | + /* |
---|
| 930 | + * Preallocate one workspace for each compression type so we can |
---|
| 931 | + * guarantee forward progress in the worst case |
---|
| 932 | + */ |
---|
| 933 | + workspace = alloc_workspace(type, 0); |
---|
827 | 934 | if (IS_ERR(workspace)) { |
---|
828 | 935 | pr_warn( |
---|
829 | | - "BTRFS: cannot preallocate heuristic workspace, will try later\n"); |
---|
| 936 | + "BTRFS: cannot preallocate compression workspace, will try later\n"); |
---|
830 | 937 | } else { |
---|
831 | | - atomic_set(&btrfs_heuristic_ws.total_ws, 1); |
---|
832 | | - btrfs_heuristic_ws.free_ws = 1; |
---|
833 | | - list_add(workspace, &btrfs_heuristic_ws.idle_ws); |
---|
| 938 | + atomic_set(&wsm->total_ws, 1); |
---|
| 939 | + wsm->free_ws = 1; |
---|
| 940 | + list_add(workspace, &wsm->idle_ws); |
---|
834 | 941 | } |
---|
| 942 | +} |
---|
835 | 943 | |
---|
836 | | - for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) { |
---|
837 | | - INIT_LIST_HEAD(&btrfs_comp_ws[i].idle_ws); |
---|
838 | | - spin_lock_init(&btrfs_comp_ws[i].ws_lock); |
---|
839 | | - atomic_set(&btrfs_comp_ws[i].total_ws, 0); |
---|
840 | | - init_waitqueue_head(&btrfs_comp_ws[i].ws_wait); |
---|
| 944 | +static void btrfs_cleanup_workspace_manager(int type) |
---|
| 945 | +{ |
---|
| 946 | + struct workspace_manager *wsman; |
---|
| 947 | + struct list_head *ws; |
---|
841 | 948 | |
---|
842 | | - /* |
---|
843 | | - * Preallocate one workspace for each compression type so |
---|
844 | | - * we can guarantee forward progress in the worst case |
---|
845 | | - */ |
---|
846 | | - workspace = btrfs_compress_op[i]->alloc_workspace(); |
---|
847 | | - if (IS_ERR(workspace)) { |
---|
848 | | - pr_warn("BTRFS: cannot preallocate compression workspace, will try later\n"); |
---|
849 | | - } else { |
---|
850 | | - atomic_set(&btrfs_comp_ws[i].total_ws, 1); |
---|
851 | | - btrfs_comp_ws[i].free_ws = 1; |
---|
852 | | - list_add(workspace, &btrfs_comp_ws[i].idle_ws); |
---|
853 | | - } |
---|
| 949 | + wsman = btrfs_compress_op[type]->workspace_manager; |
---|
| 950 | + while (!list_empty(&wsman->idle_ws)) { |
---|
| 951 | + ws = wsman->idle_ws.next; |
---|
| 952 | + list_del(ws); |
---|
| 953 | + free_workspace(type, ws); |
---|
| 954 | + atomic_dec(&wsman->total_ws); |
---|
854 | 955 | } |
---|
855 | 956 | } |
---|
856 | 957 | |
---|
.. | .. |
---|
860 | 961 | * Preallocation makes a forward progress guarantees and we do not return |
---|
861 | 962 | * errors. |
---|
862 | 963 | */ |
---|
863 | | -static struct list_head *__find_workspace(int type, bool heuristic) |
---|
| 964 | +struct list_head *btrfs_get_workspace(int type, unsigned int level) |
---|
864 | 965 | { |
---|
| 966 | + struct workspace_manager *wsm; |
---|
865 | 967 | struct list_head *workspace; |
---|
866 | 968 | int cpus = num_online_cpus(); |
---|
867 | | - int idx = type - 1; |
---|
868 | 969 | unsigned nofs_flag; |
---|
869 | 970 | struct list_head *idle_ws; |
---|
870 | 971 | spinlock_t *ws_lock; |
---|
.. | .. |
---|
872 | 973 | wait_queue_head_t *ws_wait; |
---|
873 | 974 | int *free_ws; |
---|
874 | 975 | |
---|
875 | | - if (heuristic) { |
---|
876 | | - idle_ws = &btrfs_heuristic_ws.idle_ws; |
---|
877 | | - ws_lock = &btrfs_heuristic_ws.ws_lock; |
---|
878 | | - total_ws = &btrfs_heuristic_ws.total_ws; |
---|
879 | | - ws_wait = &btrfs_heuristic_ws.ws_wait; |
---|
880 | | - free_ws = &btrfs_heuristic_ws.free_ws; |
---|
881 | | - } else { |
---|
882 | | - idle_ws = &btrfs_comp_ws[idx].idle_ws; |
---|
883 | | - ws_lock = &btrfs_comp_ws[idx].ws_lock; |
---|
884 | | - total_ws = &btrfs_comp_ws[idx].total_ws; |
---|
885 | | - ws_wait = &btrfs_comp_ws[idx].ws_wait; |
---|
886 | | - free_ws = &btrfs_comp_ws[idx].free_ws; |
---|
887 | | - } |
---|
| 976 | + wsm = btrfs_compress_op[type]->workspace_manager; |
---|
| 977 | + idle_ws = &wsm->idle_ws; |
---|
| 978 | + ws_lock = &wsm->ws_lock; |
---|
| 979 | + total_ws = &wsm->total_ws; |
---|
| 980 | + ws_wait = &wsm->ws_wait; |
---|
| 981 | + free_ws = &wsm->free_ws; |
---|
888 | 982 | |
---|
889 | 983 | again: |
---|
890 | 984 | spin_lock(ws_lock); |
---|
.. | .. |
---|
915 | 1009 | * context of btrfs_compress_bio/btrfs_compress_pages |
---|
916 | 1010 | */ |
---|
917 | 1011 | nofs_flag = memalloc_nofs_save(); |
---|
918 | | - if (heuristic) |
---|
919 | | - workspace = alloc_heuristic_ws(); |
---|
920 | | - else |
---|
921 | | - workspace = btrfs_compress_op[idx]->alloc_workspace(); |
---|
| 1012 | + workspace = alloc_workspace(type, level); |
---|
922 | 1013 | memalloc_nofs_restore(nofs_flag); |
---|
923 | 1014 | |
---|
924 | 1015 | if (IS_ERR(workspace)) { |
---|
.. | .. |
---|
949 | 1040 | return workspace; |
---|
950 | 1041 | } |
---|
951 | 1042 | |
---|
952 | | -static struct list_head *find_workspace(int type) |
---|
| 1043 | +static struct list_head *get_workspace(int type, int level) |
---|
953 | 1044 | { |
---|
954 | | - return __find_workspace(type, false); |
---|
| 1045 | + switch (type) { |
---|
| 1046 | + case BTRFS_COMPRESS_NONE: return btrfs_get_workspace(type, level); |
---|
| 1047 | + case BTRFS_COMPRESS_ZLIB: return zlib_get_workspace(level); |
---|
| 1048 | + case BTRFS_COMPRESS_LZO: return btrfs_get_workspace(type, level); |
---|
| 1049 | + case BTRFS_COMPRESS_ZSTD: return zstd_get_workspace(level); |
---|
| 1050 | + default: |
---|
| 1051 | + /* |
---|
| 1052 | + * This can't happen, the type is validated several times |
---|
| 1053 | + * before we get here. |
---|
| 1054 | + */ |
---|
| 1055 | + BUG(); |
---|
| 1056 | + } |
---|
955 | 1057 | } |
---|
956 | 1058 | |
---|
957 | 1059 | /* |
---|
958 | 1060 | * put a workspace struct back on the list or free it if we have enough |
---|
959 | 1061 | * idle ones sitting around |
---|
960 | 1062 | */ |
---|
961 | | -static void __free_workspace(int type, struct list_head *workspace, |
---|
962 | | - bool heuristic) |
---|
| 1063 | +void btrfs_put_workspace(int type, struct list_head *ws) |
---|
963 | 1064 | { |
---|
964 | | - int idx = type - 1; |
---|
| 1065 | + struct workspace_manager *wsm; |
---|
965 | 1066 | struct list_head *idle_ws; |
---|
966 | 1067 | spinlock_t *ws_lock; |
---|
967 | 1068 | atomic_t *total_ws; |
---|
968 | 1069 | wait_queue_head_t *ws_wait; |
---|
969 | 1070 | int *free_ws; |
---|
970 | 1071 | |
---|
971 | | - if (heuristic) { |
---|
972 | | - idle_ws = &btrfs_heuristic_ws.idle_ws; |
---|
973 | | - ws_lock = &btrfs_heuristic_ws.ws_lock; |
---|
974 | | - total_ws = &btrfs_heuristic_ws.total_ws; |
---|
975 | | - ws_wait = &btrfs_heuristic_ws.ws_wait; |
---|
976 | | - free_ws = &btrfs_heuristic_ws.free_ws; |
---|
977 | | - } else { |
---|
978 | | - idle_ws = &btrfs_comp_ws[idx].idle_ws; |
---|
979 | | - ws_lock = &btrfs_comp_ws[idx].ws_lock; |
---|
980 | | - total_ws = &btrfs_comp_ws[idx].total_ws; |
---|
981 | | - ws_wait = &btrfs_comp_ws[idx].ws_wait; |
---|
982 | | - free_ws = &btrfs_comp_ws[idx].free_ws; |
---|
983 | | - } |
---|
| 1072 | + wsm = btrfs_compress_op[type]->workspace_manager; |
---|
| 1073 | + idle_ws = &wsm->idle_ws; |
---|
| 1074 | + ws_lock = &wsm->ws_lock; |
---|
| 1075 | + total_ws = &wsm->total_ws; |
---|
| 1076 | + ws_wait = &wsm->ws_wait; |
---|
| 1077 | + free_ws = &wsm->free_ws; |
---|
984 | 1078 | |
---|
985 | 1079 | spin_lock(ws_lock); |
---|
986 | 1080 | if (*free_ws <= num_online_cpus()) { |
---|
987 | | - list_add(workspace, idle_ws); |
---|
| 1081 | + list_add(ws, idle_ws); |
---|
988 | 1082 | (*free_ws)++; |
---|
989 | 1083 | spin_unlock(ws_lock); |
---|
990 | 1084 | goto wake; |
---|
991 | 1085 | } |
---|
992 | 1086 | spin_unlock(ws_lock); |
---|
993 | 1087 | |
---|
994 | | - if (heuristic) |
---|
995 | | - free_heuristic_ws(workspace); |
---|
996 | | - else |
---|
997 | | - btrfs_compress_op[idx]->free_workspace(workspace); |
---|
| 1088 | + free_workspace(type, ws); |
---|
998 | 1089 | atomic_dec(total_ws); |
---|
999 | 1090 | wake: |
---|
1000 | 1091 | cond_wake_up(ws_wait); |
---|
1001 | 1092 | } |
---|
1002 | 1093 | |
---|
1003 | | -static void free_workspace(int type, struct list_head *ws) |
---|
| 1094 | +static void put_workspace(int type, struct list_head *ws) |
---|
1004 | 1095 | { |
---|
1005 | | - return __free_workspace(type, ws, false); |
---|
| 1096 | + switch (type) { |
---|
| 1097 | + case BTRFS_COMPRESS_NONE: return btrfs_put_workspace(type, ws); |
---|
| 1098 | + case BTRFS_COMPRESS_ZLIB: return btrfs_put_workspace(type, ws); |
---|
| 1099 | + case BTRFS_COMPRESS_LZO: return btrfs_put_workspace(type, ws); |
---|
| 1100 | + case BTRFS_COMPRESS_ZSTD: return zstd_put_workspace(ws); |
---|
| 1101 | + default: |
---|
| 1102 | + /* |
---|
| 1103 | + * This can't happen, the type is validated several times |
---|
| 1104 | + * before we get here. |
---|
| 1105 | + */ |
---|
| 1106 | + BUG(); |
---|
| 1107 | + } |
---|
1006 | 1108 | } |
---|
1007 | 1109 | |
---|
1008 | 1110 | /* |
---|
1009 | | - * cleanup function for module exit |
---|
| 1111 | + * Adjust @level according to the limits of the compression algorithm or |
---|
| 1112 | + * fallback to default |
---|
1010 | 1113 | */ |
---|
1011 | | -static void free_workspaces(void) |
---|
| 1114 | +static unsigned int btrfs_compress_set_level(int type, unsigned level) |
---|
1012 | 1115 | { |
---|
1013 | | - struct list_head *workspace; |
---|
1014 | | - int i; |
---|
| 1116 | + const struct btrfs_compress_op *ops = btrfs_compress_op[type]; |
---|
1015 | 1117 | |
---|
1016 | | - while (!list_empty(&btrfs_heuristic_ws.idle_ws)) { |
---|
1017 | | - workspace = btrfs_heuristic_ws.idle_ws.next; |
---|
1018 | | - list_del(workspace); |
---|
1019 | | - free_heuristic_ws(workspace); |
---|
1020 | | - atomic_dec(&btrfs_heuristic_ws.total_ws); |
---|
1021 | | - } |
---|
| 1118 | + if (level == 0) |
---|
| 1119 | + level = ops->default_level; |
---|
| 1120 | + else |
---|
| 1121 | + level = min(level, ops->max_level); |
---|
1022 | 1122 | |
---|
1023 | | - for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) { |
---|
1024 | | - while (!list_empty(&btrfs_comp_ws[i].idle_ws)) { |
---|
1025 | | - workspace = btrfs_comp_ws[i].idle_ws.next; |
---|
1026 | | - list_del(workspace); |
---|
1027 | | - btrfs_compress_op[i]->free_workspace(workspace); |
---|
1028 | | - atomic_dec(&btrfs_comp_ws[i].total_ws); |
---|
1029 | | - } |
---|
1030 | | - } |
---|
| 1123 | + return level; |
---|
1031 | 1124 | } |
---|
1032 | 1125 | |
---|
1033 | 1126 | /* |
---|
.. | .. |
---|
1059 | 1152 | unsigned long *total_in, |
---|
1060 | 1153 | unsigned long *total_out) |
---|
1061 | 1154 | { |
---|
| 1155 | + int type = btrfs_compress_type(type_level); |
---|
| 1156 | + int level = btrfs_compress_level(type_level); |
---|
1062 | 1157 | struct list_head *workspace; |
---|
1063 | 1158 | int ret; |
---|
1064 | | - int type = type_level & 0xF; |
---|
1065 | 1159 | |
---|
1066 | | - workspace = find_workspace(type); |
---|
1067 | | - |
---|
1068 | | - btrfs_compress_op[type - 1]->set_level(workspace, type_level); |
---|
1069 | | - ret = btrfs_compress_op[type-1]->compress_pages(workspace, mapping, |
---|
1070 | | - start, pages, |
---|
1071 | | - out_pages, |
---|
1072 | | - total_in, total_out); |
---|
1073 | | - free_workspace(type, workspace); |
---|
| 1160 | + level = btrfs_compress_set_level(type, level); |
---|
| 1161 | + workspace = get_workspace(type, level); |
---|
| 1162 | + ret = compression_compress_pages(type, workspace, mapping, start, pages, |
---|
| 1163 | + out_pages, total_in, total_out); |
---|
| 1164 | + put_workspace(type, workspace); |
---|
1074 | 1165 | return ret; |
---|
1075 | 1166 | } |
---|
1076 | 1167 | |
---|
.. | .. |
---|
1094 | 1185 | int ret; |
---|
1095 | 1186 | int type = cb->compress_type; |
---|
1096 | 1187 | |
---|
1097 | | - workspace = find_workspace(type); |
---|
1098 | | - ret = btrfs_compress_op[type - 1]->decompress_bio(workspace, cb); |
---|
1099 | | - free_workspace(type, workspace); |
---|
| 1188 | + workspace = get_workspace(type, 0); |
---|
| 1189 | + ret = compression_decompress_bio(type, workspace, cb); |
---|
| 1190 | + put_workspace(type, workspace); |
---|
1100 | 1191 | |
---|
1101 | 1192 | return ret; |
---|
1102 | 1193 | } |
---|
.. | .. |
---|
1112 | 1203 | struct list_head *workspace; |
---|
1113 | 1204 | int ret; |
---|
1114 | 1205 | |
---|
1115 | | - workspace = find_workspace(type); |
---|
| 1206 | + workspace = get_workspace(type, 0); |
---|
| 1207 | + ret = compression_decompress(type, workspace, data_in, dest_page, |
---|
| 1208 | + start_byte, srclen, destlen); |
---|
| 1209 | + put_workspace(type, workspace); |
---|
1116 | 1210 | |
---|
1117 | | - ret = btrfs_compress_op[type-1]->decompress(workspace, data_in, |
---|
1118 | | - dest_page, start_byte, |
---|
1119 | | - srclen, destlen); |
---|
1120 | | - |
---|
1121 | | - free_workspace(type, workspace); |
---|
1122 | 1211 | return ret; |
---|
| 1212 | +} |
---|
| 1213 | + |
---|
| 1214 | +void __init btrfs_init_compress(void) |
---|
| 1215 | +{ |
---|
| 1216 | + btrfs_init_workspace_manager(BTRFS_COMPRESS_NONE); |
---|
| 1217 | + btrfs_init_workspace_manager(BTRFS_COMPRESS_ZLIB); |
---|
| 1218 | + btrfs_init_workspace_manager(BTRFS_COMPRESS_LZO); |
---|
| 1219 | + zstd_init_workspace_manager(); |
---|
1123 | 1220 | } |
---|
1124 | 1221 | |
---|
1125 | 1222 | void __cold btrfs_exit_compress(void) |
---|
1126 | 1223 | { |
---|
1127 | | - free_workspaces(); |
---|
| 1224 | + btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_NONE); |
---|
| 1225 | + btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_ZLIB); |
---|
| 1226 | + btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_LZO); |
---|
| 1227 | + zstd_cleanup_workspace_manager(); |
---|
1128 | 1228 | } |
---|
1129 | 1229 | |
---|
1130 | 1230 | /* |
---|
.. | .. |
---|
1172 | 1272 | /* copy bytes from the working buffer into the pages */ |
---|
1173 | 1273 | while (working_bytes > 0) { |
---|
1174 | 1274 | bytes = min_t(unsigned long, bvec.bv_len, |
---|
1175 | | - PAGE_SIZE - buf_offset); |
---|
| 1275 | + PAGE_SIZE - (buf_offset % PAGE_SIZE)); |
---|
1176 | 1276 | bytes = min(bytes, working_bytes); |
---|
1177 | 1277 | |
---|
1178 | 1278 | kaddr = kmap_atomic(bvec.bv_page); |
---|
.. | .. |
---|
1226 | 1326 | /* |
---|
1227 | 1327 | * Shannon Entropy calculation |
---|
1228 | 1328 | * |
---|
1229 | | - * Pure byte distribution analysis fails to determine compressiability of data. |
---|
| 1329 | + * Pure byte distribution analysis fails to determine compressibility of data. |
---|
1230 | 1330 | * Try calculating entropy to estimate the average minimum number of bits |
---|
1231 | 1331 | * needed to encode the sampled data. |
---|
1232 | 1332 | * |
---|
.. | .. |
---|
1290 | 1390 | |
---|
1291 | 1391 | /* |
---|
1292 | 1392 | * Use 4 bits as radix base |
---|
1293 | | - * Use 16 u32 counters for calculating new possition in buf array |
---|
| 1393 | + * Use 16 u32 counters for calculating new position in buf array |
---|
1294 | 1394 | * |
---|
1295 | 1395 | * @array - array that will be sorted |
---|
1296 | 1396 | * @array_buf - buffer array to store sorting results |
---|
.. | .. |
---|
1535 | 1635 | */ |
---|
1536 | 1636 | int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end) |
---|
1537 | 1637 | { |
---|
1538 | | - struct list_head *ws_list = __find_workspace(0, true); |
---|
| 1638 | + struct list_head *ws_list = get_workspace(0, 0); |
---|
1539 | 1639 | struct heuristic_ws *ws; |
---|
1540 | 1640 | u32 i; |
---|
1541 | 1641 | u8 byte; |
---|
.. | .. |
---|
1604 | 1704 | } |
---|
1605 | 1705 | |
---|
1606 | 1706 | out: |
---|
1607 | | - __free_workspace(0, ws_list, true); |
---|
| 1707 | + put_workspace(0, ws_list); |
---|
1608 | 1708 | return ret; |
---|
1609 | 1709 | } |
---|
1610 | 1710 | |
---|
1611 | | -unsigned int btrfs_compress_str2level(const char *str) |
---|
| 1711 | +/* |
---|
| 1712 | + * Convert the compression suffix (eg. after "zlib" starting with ":") to |
---|
| 1713 | + * level, unrecognized string will set the default level |
---|
| 1714 | + */ |
---|
| 1715 | +unsigned int btrfs_compress_str2level(unsigned int type, const char *str) |
---|
1612 | 1716 | { |
---|
1613 | | - if (strncmp(str, "zlib", 4) != 0) |
---|
| 1717 | + unsigned int level = 0; |
---|
| 1718 | + int ret; |
---|
| 1719 | + |
---|
| 1720 | + if (!type) |
---|
1614 | 1721 | return 0; |
---|
1615 | 1722 | |
---|
1616 | | - /* Accepted form: zlib:1 up to zlib:9 and nothing left after the number */ |
---|
1617 | | - if (str[4] == ':' && '1' <= str[5] && str[5] <= '9' && str[6] == 0) |
---|
1618 | | - return str[5] - '0'; |
---|
| 1723 | + if (str[0] == ':') { |
---|
| 1724 | + ret = kstrtouint(str + 1, 10, &level); |
---|
| 1725 | + if (ret) |
---|
| 1726 | + level = 0; |
---|
| 1727 | + } |
---|
1619 | 1728 | |
---|
1620 | | - return BTRFS_ZLIB_DEFAULT_LEVEL; |
---|
| 1729 | + level = btrfs_compress_set_level(type, level); |
---|
| 1730 | + |
---|
| 1731 | + return level; |
---|
1621 | 1732 | } |
---|