From 9370bb92b2d16684ee45cf24e879c93c509162da Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Thu, 19 Dec 2024 01:47:39 +0000 Subject: [PATCH] add wifi6 8852be driver --- kernel/fs/btrfs/compression.c | 511 ++++++++++++++++++++++++++++++++++---------------------- 1 files changed, 311 insertions(+), 200 deletions(-) diff --git a/kernel/fs/btrfs/compression.c b/kernel/fs/btrfs/compression.c index 919c033..646416b 100644 --- a/kernel/fs/btrfs/compression.c +++ b/kernel/fs/btrfs/compression.c @@ -17,6 +17,8 @@ #include <linux/slab.h> #include <linux/sched/mm.h> #include <linux/log2.h> +#include <crypto/hash.h> +#include "misc.h" #include "ctree.h" #include "disk-io.h" #include "transaction.h" @@ -37,6 +39,8 @@ case BTRFS_COMPRESS_ZSTD: case BTRFS_COMPRESS_NONE: return btrfs_compress_types[type]; + default: + break; } return NULL; @@ -58,6 +62,75 @@ return false; } +static int compression_compress_pages(int type, struct list_head *ws, + struct address_space *mapping, u64 start, struct page **pages, + unsigned long *out_pages, unsigned long *total_in, + unsigned long *total_out) +{ + switch (type) { + case BTRFS_COMPRESS_ZLIB: + return zlib_compress_pages(ws, mapping, start, pages, + out_pages, total_in, total_out); + case BTRFS_COMPRESS_LZO: + return lzo_compress_pages(ws, mapping, start, pages, + out_pages, total_in, total_out); + case BTRFS_COMPRESS_ZSTD: + return zstd_compress_pages(ws, mapping, start, pages, + out_pages, total_in, total_out); + case BTRFS_COMPRESS_NONE: + default: + /* + * This can happen when compression races with remount setting + * it to 'no compress', while caller doesn't call + * inode_need_compress() to check if we really need to + * compress. + * + * Not a big deal, just need to inform caller that we + * haven't allocated any pages yet. + */ + *out_pages = 0; + return -E2BIG; + } +} + +static int compression_decompress_bio(int type, struct list_head *ws, + struct compressed_bio *cb) +{ + switch (type) { + case BTRFS_COMPRESS_ZLIB: return zlib_decompress_bio(ws, cb); + case BTRFS_COMPRESS_LZO: return lzo_decompress_bio(ws, cb); + case BTRFS_COMPRESS_ZSTD: return zstd_decompress_bio(ws, cb); + case BTRFS_COMPRESS_NONE: + default: + /* + * This can't happen, the type is validated several times + * before we get here. + */ + BUG(); + } +} + +static int compression_decompress(int type, struct list_head *ws, + unsigned char *data_in, struct page *dest_page, + unsigned long start_byte, size_t srclen, size_t destlen) +{ + switch (type) { + case BTRFS_COMPRESS_ZLIB: return zlib_decompress(ws, data_in, dest_page, + start_byte, srclen, destlen); + case BTRFS_COMPRESS_LZO: return lzo_decompress(ws, data_in, dest_page, + start_byte, srclen, destlen); + case BTRFS_COMPRESS_ZSTD: return zstd_decompress(ws, data_in, dest_page, + start_byte, srclen, destlen); + case BTRFS_COMPRESS_NONE: + default: + /* + * This can't happen, the type is validated several times + * before we get here. + */ + BUG(); + } +} + static int btrfs_decompress_bio(struct compressed_bio *cb); static inline int compressed_bio_size(struct btrfs_fs_info *fs_info, @@ -69,41 +142,43 @@ (DIV_ROUND_UP(disk_size, fs_info->sectorsize)) * csum_size; } -static int check_compressed_csum(struct btrfs_inode *inode, - struct compressed_bio *cb, +static int check_compressed_csum(struct btrfs_inode *inode, struct bio *bio, u64 disk_start) { - int ret; + struct btrfs_fs_info *fs_info = inode->root->fs_info; + SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); + const u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); struct page *page; unsigned long i; char *kaddr; - u32 csum; - u32 *cb_sum = &cb->sums; + u8 csum[BTRFS_CSUM_SIZE]; + struct compressed_bio *cb = bio->bi_private; + u8 *cb_sum = cb->sums; if (inode->flags & BTRFS_INODE_NODATASUM) return 0; + shash->tfm = fs_info->csum_shash; + for (i = 0; i < cb->nr_pages; i++) { page = cb->compressed_pages[i]; - csum = ~(u32)0; kaddr = kmap_atomic(page); - csum = btrfs_csum_data(kaddr, csum, PAGE_SIZE); - btrfs_csum_final(csum, (u8 *)&csum); + crypto_shash_digest(shash, kaddr, PAGE_SIZE, csum); kunmap_atomic(kaddr); - if (csum != *cb_sum) { - btrfs_print_data_csum_error(inode, disk_start, csum, - *cb_sum, cb->mirror_num); - ret = -EIO; - goto fail; + if (memcmp(&csum, cb_sum, csum_size)) { + btrfs_print_data_csum_error(inode, disk_start, + csum, cb_sum, cb->mirror_num); + if (btrfs_io_bio(bio)->device) + btrfs_dev_stat_inc_and_print( + btrfs_io_bio(bio)->device, + BTRFS_DEV_STAT_CORRUPTION_ERRS); + return -EIO; } - cb_sum++; - + cb_sum += csum_size; } - ret = 0; -fail: - return ret; + return 0; } /* when we finish reading compressed pages from the disk, we @@ -138,7 +213,6 @@ * Record the correct mirror_num in cb->orig_bio so that * read-repair can work properly. */ - ASSERT(btrfs_io_bio(cb->orig_bio)); btrfs_io_bio(cb->orig_bio)->mirror_num = mirror; cb->mirror_num = mirror; @@ -150,7 +224,7 @@ goto csum_failed; inode = cb->inode; - ret = check_compressed_csum(BTRFS_I(inode), cb, + ret = check_compressed_csum(BTRFS_I(inode), bio, (u64)bio->bi_iter.bi_sector << 9); if (ret) goto csum_failed; @@ -176,15 +250,15 @@ if (cb->errors) { bio_io_error(cb->orig_bio); } else { - int i; struct bio_vec *bvec; + struct bvec_iter_all iter_all; /* * we have verified the checksum already, set page * checked so the end_io handlers know about it */ ASSERT(!bio_flagged(bio, BIO_CLONED)); - bio_for_each_segment_all(bvec, cb->orig_bio, i) + bio_for_each_segment_all(bvec, cb->orig_bio, iter_all) SetPageChecked(bvec->bv_page); bio_endio(cb->orig_bio); @@ -245,7 +319,6 @@ */ static void end_compressed_bio_write(struct bio *bio) { - struct extent_io_tree *tree; struct compressed_bio *cb = bio->bi_private; struct inode *inode; struct page *page; @@ -264,13 +337,10 @@ * call back into the FS and do all the end_io operations */ inode = cb->inode; - tree = &BTRFS_I(inode)->io_tree; cb->compressed_pages[0]->mapping = cb->inode->i_mapping; - tree->ops->writepage_end_io_hook(cb->compressed_pages[0], - cb->start, - cb->start + cb->len - 1, - NULL, - !cb->errors); + btrfs_writepage_endio_finish_ordered(cb->compressed_pages[0], + cb->start, cb->start + cb->len - 1, + !cb->errors); cb->compressed_pages[0]->mapping = NULL; end_compressed_writeback(inode, cb); @@ -303,31 +373,31 @@ * This also checksums the file bytes and gets things ready for * the end io hooks. */ -blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start, +blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start, unsigned long len, u64 disk_start, unsigned long compressed_len, struct page **compressed_pages, unsigned long nr_pages, - unsigned int write_flags) + unsigned int write_flags, + struct cgroup_subsys_state *blkcg_css) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct btrfs_fs_info *fs_info = inode->root->fs_info; struct bio *bio = NULL; struct compressed_bio *cb; unsigned long bytes_left; int pg_index = 0; struct page *page; u64 first_byte = disk_start; - struct block_device *bdev; blk_status_t ret; - int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; + int skip_sum = inode->flags & BTRFS_INODE_NODATASUM; - WARN_ON(start & ((u64)PAGE_SIZE - 1)); + WARN_ON(!PAGE_ALIGNED(start)); cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS); if (!cb) return BLK_STS_RESOURCE; refcount_set(&cb->pending_bios, 0); cb->errors = 0; - cb->inode = inode; + cb->inode = &inode->vfs_inode; cb->start = start; cb->len = len; cb->mirror_num = 0; @@ -336,12 +406,15 @@ cb->orig_bio = NULL; cb->nr_pages = nr_pages; - bdev = fs_info->fs_devices->latest_bdev; - - bio = btrfs_bio_alloc(bdev, first_byte); + bio = btrfs_bio_alloc(first_byte); bio->bi_opf = REQ_OP_WRITE | write_flags; bio->bi_private = cb; bio->bi_end_io = end_compressed_bio_write; + + if (blkcg_css) { + bio->bi_opf |= REQ_CGROUP_PUNT; + kthread_associate_blkcg(blkcg_css); + } refcount_set(&cb->pending_bios, 1); /* create and submit bios for the compressed pages */ @@ -350,9 +423,10 @@ int submit = 0; page = compressed_pages[pg_index]; - page->mapping = inode->i_mapping; + page->mapping = inode->vfs_inode.i_mapping; if (bio->bi_iter.bi_size) - submit = btrfs_merge_bio_hook(page, 0, PAGE_SIZE, bio, 0); + submit = btrfs_bio_fits_in_stripe(page, PAGE_SIZE, bio, + 0); page->mapping = NULL; if (submit || bio_add_page(bio, page, PAGE_SIZE, 0) < @@ -373,16 +447,18 @@ BUG_ON(ret); /* -ENOMEM */ } - ret = btrfs_map_bio(fs_info, bio, 0, 1); + ret = btrfs_map_bio(fs_info, bio, 0); if (ret) { bio->bi_status = ret; bio_endio(bio); } - bio = btrfs_bio_alloc(bdev, first_byte); + bio = btrfs_bio_alloc(first_byte); bio->bi_opf = REQ_OP_WRITE | write_flags; bio->bi_private = cb; bio->bi_end_io = end_compressed_bio_write; + if (blkcg_css) + bio->bi_opf |= REQ_CGROUP_PUNT; bio_add_page(bio, page, PAGE_SIZE, 0); } if (bytes_left < PAGE_SIZE) { @@ -403,11 +479,14 @@ BUG_ON(ret); /* -ENOMEM */ } - ret = btrfs_map_bio(fs_info, bio, 0, 1); + ret = btrfs_map_bio(fs_info, bio, 0); if (ret) { bio->bi_status = ret; bio_endio(bio); } + + if (blkcg_css) + kthread_associate_blkcg(NULL); return 0; } @@ -452,10 +531,8 @@ if (pg_index > end_index) break; - rcu_read_lock(); - page = radix_tree_lookup(&mapping->i_pages, pg_index); - rcu_read_unlock(); - if (page && !radix_tree_exceptional_entry(page)) { + page = xa_load(&mapping->i_pages, pg_index); + if (page && !xa_is_value(page)) { misses++; if (misses > 4) break; @@ -498,7 +575,7 @@ if (page->index == end_index) { char *userpage; - size_t zero_offset = isize & (PAGE_SIZE - 1); + size_t zero_offset = offset_in_page(isize); if (zero_offset) { int zeros; @@ -543,14 +620,12 @@ int mirror_num, unsigned long bio_flags) { struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - struct extent_io_tree *tree; struct extent_map_tree *em_tree; struct compressed_bio *cb; unsigned long compressed_len; unsigned long nr_pages; unsigned long pg_index; struct page *page; - struct block_device *bdev; struct bio *comp_bio; u64 cur_disk_byte = (u64)bio->bi_iter.bi_sector << 9; u64 em_len; @@ -558,9 +633,9 @@ struct extent_map *em; blk_status_t ret = BLK_STS_RESOURCE; int faili = 0; - u32 *sums; + const u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); + u8 *sums; - tree = &BTRFS_I(inode)->io_tree; em_tree = &BTRFS_I(inode)->extent_tree; /* we need the actual starting offset of this extent in the file */ @@ -581,7 +656,7 @@ cb->errors = 0; cb->inode = inode; cb->mirror_num = mirror_num; - sums = &cb->sums; + sums = cb->sums; cb->start = em->orig_start; em_len = em->len; @@ -601,8 +676,6 @@ if (!cb->compressed_pages) goto fail1; - bdev = fs_info->fs_devices->latest_bdev; - for (pg_index = 0; pg_index < nr_pages; pg_index++) { cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS | __GFP_HIGHMEM); @@ -620,7 +693,7 @@ /* include any pages we added in add_ra-bio_pages */ cb->len = bio->bi_iter.bi_size; - comp_bio = btrfs_bio_alloc(bdev, cur_disk_byte); + comp_bio = btrfs_bio_alloc(cur_disk_byte); comp_bio->bi_opf = REQ_OP_READ; comp_bio->bi_private = cb; comp_bio->bi_end_io = end_compressed_bio_read; @@ -634,12 +707,14 @@ page->index = em_start >> PAGE_SHIFT; if (comp_bio->bi_iter.bi_size) - submit = btrfs_merge_bio_hook(page, 0, PAGE_SIZE, - comp_bio, 0); + submit = btrfs_bio_fits_in_stripe(page, PAGE_SIZE, + comp_bio, 0); page->mapping = NULL; if (submit || bio_add_page(comp_bio, page, PAGE_SIZE, 0) < PAGE_SIZE) { + unsigned int nr_sectors; + ret = btrfs_bio_wq_end_io(fs_info, comp_bio, BTRFS_WQ_ENDIO_DATA); BUG_ON(ret); /* -ENOMEM */ @@ -654,19 +729,21 @@ if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { ret = btrfs_lookup_bio_sums(inode, comp_bio, - sums); + (u64)-1, sums); BUG_ON(ret); /* -ENOMEM */ } - sums += DIV_ROUND_UP(comp_bio->bi_iter.bi_size, - fs_info->sectorsize); - ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0); + nr_sectors = DIV_ROUND_UP(comp_bio->bi_iter.bi_size, + fs_info->sectorsize); + sums += csum_size * nr_sectors; + + ret = btrfs_map_bio(fs_info, comp_bio, mirror_num); if (ret) { comp_bio->bi_status = ret; bio_endio(comp_bio); } - comp_bio = btrfs_bio_alloc(bdev, cur_disk_byte); + comp_bio = btrfs_bio_alloc(cur_disk_byte); comp_bio->bi_opf = REQ_OP_READ; comp_bio->bi_private = cb; comp_bio->bi_end_io = end_compressed_bio_read; @@ -680,11 +757,11 @@ BUG_ON(ret); /* -ENOMEM */ if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { - ret = btrfs_lookup_bio_sums(inode, comp_bio, sums); + ret = btrfs_lookup_bio_sums(inode, comp_bio, (u64)-1, sums); BUG_ON(ret); /* -ENOMEM */ } - ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0); + ret = btrfs_map_bio(fs_info, comp_bio, mirror_num); if (ret) { comp_bio->bi_status = ret; bio_endio(comp_bio); @@ -753,6 +830,8 @@ struct list_head list; }; +static struct workspace_manager heuristic_wsm; + static void free_heuristic_ws(struct list_head *ws) { struct heuristic_ws *workspace; @@ -765,7 +844,7 @@ kfree(workspace); } -static struct list_head *alloc_heuristic_ws(void) +static struct list_head *alloc_heuristic_ws(unsigned int level) { struct heuristic_ws *ws; @@ -792,65 +871,87 @@ return ERR_PTR(-ENOMEM); } -struct workspaces_list { - struct list_head idle_ws; - spinlock_t ws_lock; - /* Number of free workspaces */ - int free_ws; - /* Total number of allocated workspaces */ - atomic_t total_ws; - /* Waiters for a free workspace */ - wait_queue_head_t ws_wait; +const struct btrfs_compress_op btrfs_heuristic_compress = { + .workspace_manager = &heuristic_wsm, }; -static struct workspaces_list btrfs_comp_ws[BTRFS_COMPRESS_TYPES]; - -static struct workspaces_list btrfs_heuristic_ws; - static const struct btrfs_compress_op * const btrfs_compress_op[] = { + /* The heuristic is represented as compression type 0 */ + &btrfs_heuristic_compress, &btrfs_zlib_compress, &btrfs_lzo_compress, &btrfs_zstd_compress, }; -void __init btrfs_init_compress(void) +static struct list_head *alloc_workspace(int type, unsigned int level) { + switch (type) { + case BTRFS_COMPRESS_NONE: return alloc_heuristic_ws(level); + case BTRFS_COMPRESS_ZLIB: return zlib_alloc_workspace(level); + case BTRFS_COMPRESS_LZO: return lzo_alloc_workspace(level); + case BTRFS_COMPRESS_ZSTD: return zstd_alloc_workspace(level); + default: + /* + * This can't happen, the type is validated several times + * before we get here. + */ + BUG(); + } +} + +static void free_workspace(int type, struct list_head *ws) +{ + switch (type) { + case BTRFS_COMPRESS_NONE: return free_heuristic_ws(ws); + case BTRFS_COMPRESS_ZLIB: return zlib_free_workspace(ws); + case BTRFS_COMPRESS_LZO: return lzo_free_workspace(ws); + case BTRFS_COMPRESS_ZSTD: return zstd_free_workspace(ws); + default: + /* + * This can't happen, the type is validated several times + * before we get here. + */ + BUG(); + } +} + +static void btrfs_init_workspace_manager(int type) +{ + struct workspace_manager *wsm; struct list_head *workspace; - int i; - INIT_LIST_HEAD(&btrfs_heuristic_ws.idle_ws); - spin_lock_init(&btrfs_heuristic_ws.ws_lock); - atomic_set(&btrfs_heuristic_ws.total_ws, 0); - init_waitqueue_head(&btrfs_heuristic_ws.ws_wait); + wsm = btrfs_compress_op[type]->workspace_manager; + INIT_LIST_HEAD(&wsm->idle_ws); + spin_lock_init(&wsm->ws_lock); + atomic_set(&wsm->total_ws, 0); + init_waitqueue_head(&wsm->ws_wait); - workspace = alloc_heuristic_ws(); + /* + * Preallocate one workspace for each compression type so we can + * guarantee forward progress in the worst case + */ + workspace = alloc_workspace(type, 0); if (IS_ERR(workspace)) { pr_warn( - "BTRFS: cannot preallocate heuristic workspace, will try later\n"); + "BTRFS: cannot preallocate compression workspace, will try later\n"); } else { - atomic_set(&btrfs_heuristic_ws.total_ws, 1); - btrfs_heuristic_ws.free_ws = 1; - list_add(workspace, &btrfs_heuristic_ws.idle_ws); + atomic_set(&wsm->total_ws, 1); + wsm->free_ws = 1; + list_add(workspace, &wsm->idle_ws); } +} - for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) { - INIT_LIST_HEAD(&btrfs_comp_ws[i].idle_ws); - spin_lock_init(&btrfs_comp_ws[i].ws_lock); - atomic_set(&btrfs_comp_ws[i].total_ws, 0); - init_waitqueue_head(&btrfs_comp_ws[i].ws_wait); +static void btrfs_cleanup_workspace_manager(int type) +{ + struct workspace_manager *wsman; + struct list_head *ws; - /* - * Preallocate one workspace for each compression type so - * we can guarantee forward progress in the worst case - */ - workspace = btrfs_compress_op[i]->alloc_workspace(); - if (IS_ERR(workspace)) { - pr_warn("BTRFS: cannot preallocate compression workspace, will try later\n"); - } else { - atomic_set(&btrfs_comp_ws[i].total_ws, 1); - btrfs_comp_ws[i].free_ws = 1; - list_add(workspace, &btrfs_comp_ws[i].idle_ws); - } + wsman = btrfs_compress_op[type]->workspace_manager; + while (!list_empty(&wsman->idle_ws)) { + ws = wsman->idle_ws.next; + list_del(ws); + free_workspace(type, ws); + atomic_dec(&wsman->total_ws); } } @@ -860,11 +961,11 @@ * Preallocation makes a forward progress guarantees and we do not return * errors. */ -static struct list_head *__find_workspace(int type, bool heuristic) +struct list_head *btrfs_get_workspace(int type, unsigned int level) { + struct workspace_manager *wsm; struct list_head *workspace; int cpus = num_online_cpus(); - int idx = type - 1; unsigned nofs_flag; struct list_head *idle_ws; spinlock_t *ws_lock; @@ -872,19 +973,12 @@ wait_queue_head_t *ws_wait; int *free_ws; - if (heuristic) { - idle_ws = &btrfs_heuristic_ws.idle_ws; - ws_lock = &btrfs_heuristic_ws.ws_lock; - total_ws = &btrfs_heuristic_ws.total_ws; - ws_wait = &btrfs_heuristic_ws.ws_wait; - free_ws = &btrfs_heuristic_ws.free_ws; - } else { - idle_ws = &btrfs_comp_ws[idx].idle_ws; - ws_lock = &btrfs_comp_ws[idx].ws_lock; - total_ws = &btrfs_comp_ws[idx].total_ws; - ws_wait = &btrfs_comp_ws[idx].ws_wait; - free_ws = &btrfs_comp_ws[idx].free_ws; - } + wsm = btrfs_compress_op[type]->workspace_manager; + idle_ws = &wsm->idle_ws; + ws_lock = &wsm->ws_lock; + total_ws = &wsm->total_ws; + ws_wait = &wsm->ws_wait; + free_ws = &wsm->free_ws; again: spin_lock(ws_lock); @@ -915,10 +1009,7 @@ * context of btrfs_compress_bio/btrfs_compress_pages */ nofs_flag = memalloc_nofs_save(); - if (heuristic) - workspace = alloc_heuristic_ws(); - else - workspace = btrfs_compress_op[idx]->alloc_workspace(); + workspace = alloc_workspace(type, level); memalloc_nofs_restore(nofs_flag); if (IS_ERR(workspace)) { @@ -949,85 +1040,87 @@ return workspace; } -static struct list_head *find_workspace(int type) +static struct list_head *get_workspace(int type, int level) { - return __find_workspace(type, false); + switch (type) { + case BTRFS_COMPRESS_NONE: return btrfs_get_workspace(type, level); + case BTRFS_COMPRESS_ZLIB: return zlib_get_workspace(level); + case BTRFS_COMPRESS_LZO: return btrfs_get_workspace(type, level); + case BTRFS_COMPRESS_ZSTD: return zstd_get_workspace(level); + default: + /* + * This can't happen, the type is validated several times + * before we get here. + */ + BUG(); + } } /* * put a workspace struct back on the list or free it if we have enough * idle ones sitting around */ -static void __free_workspace(int type, struct list_head *workspace, - bool heuristic) +void btrfs_put_workspace(int type, struct list_head *ws) { - int idx = type - 1; + struct workspace_manager *wsm; struct list_head *idle_ws; spinlock_t *ws_lock; atomic_t *total_ws; wait_queue_head_t *ws_wait; int *free_ws; - if (heuristic) { - idle_ws = &btrfs_heuristic_ws.idle_ws; - ws_lock = &btrfs_heuristic_ws.ws_lock; - total_ws = &btrfs_heuristic_ws.total_ws; - ws_wait = &btrfs_heuristic_ws.ws_wait; - free_ws = &btrfs_heuristic_ws.free_ws; - } else { - idle_ws = &btrfs_comp_ws[idx].idle_ws; - ws_lock = &btrfs_comp_ws[idx].ws_lock; - total_ws = &btrfs_comp_ws[idx].total_ws; - ws_wait = &btrfs_comp_ws[idx].ws_wait; - free_ws = &btrfs_comp_ws[idx].free_ws; - } + wsm = btrfs_compress_op[type]->workspace_manager; + idle_ws = &wsm->idle_ws; + ws_lock = &wsm->ws_lock; + total_ws = &wsm->total_ws; + ws_wait = &wsm->ws_wait; + free_ws = &wsm->free_ws; spin_lock(ws_lock); if (*free_ws <= num_online_cpus()) { - list_add(workspace, idle_ws); + list_add(ws, idle_ws); (*free_ws)++; spin_unlock(ws_lock); goto wake; } spin_unlock(ws_lock); - if (heuristic) - free_heuristic_ws(workspace); - else - btrfs_compress_op[idx]->free_workspace(workspace); + free_workspace(type, ws); atomic_dec(total_ws); wake: cond_wake_up(ws_wait); } -static void free_workspace(int type, struct list_head *ws) +static void put_workspace(int type, struct list_head *ws) { - return __free_workspace(type, ws, false); + switch (type) { + case BTRFS_COMPRESS_NONE: return btrfs_put_workspace(type, ws); + case BTRFS_COMPRESS_ZLIB: return btrfs_put_workspace(type, ws); + case BTRFS_COMPRESS_LZO: return btrfs_put_workspace(type, ws); + case BTRFS_COMPRESS_ZSTD: return zstd_put_workspace(ws); + default: + /* + * This can't happen, the type is validated several times + * before we get here. + */ + BUG(); + } } /* - * cleanup function for module exit + * Adjust @level according to the limits of the compression algorithm or + * fallback to default */ -static void free_workspaces(void) +static unsigned int btrfs_compress_set_level(int type, unsigned level) { - struct list_head *workspace; - int i; + const struct btrfs_compress_op *ops = btrfs_compress_op[type]; - while (!list_empty(&btrfs_heuristic_ws.idle_ws)) { - workspace = btrfs_heuristic_ws.idle_ws.next; - list_del(workspace); - free_heuristic_ws(workspace); - atomic_dec(&btrfs_heuristic_ws.total_ws); - } + if (level == 0) + level = ops->default_level; + else + level = min(level, ops->max_level); - for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) { - while (!list_empty(&btrfs_comp_ws[i].idle_ws)) { - workspace = btrfs_comp_ws[i].idle_ws.next; - list_del(workspace); - btrfs_compress_op[i]->free_workspace(workspace); - atomic_dec(&btrfs_comp_ws[i].total_ws); - } - } + return level; } /* @@ -1059,18 +1152,16 @@ unsigned long *total_in, unsigned long *total_out) { + int type = btrfs_compress_type(type_level); + int level = btrfs_compress_level(type_level); struct list_head *workspace; int ret; - int type = type_level & 0xF; - workspace = find_workspace(type); - - btrfs_compress_op[type - 1]->set_level(workspace, type_level); - ret = btrfs_compress_op[type-1]->compress_pages(workspace, mapping, - start, pages, - out_pages, - total_in, total_out); - free_workspace(type, workspace); + level = btrfs_compress_set_level(type, level); + workspace = get_workspace(type, level); + ret = compression_compress_pages(type, workspace, mapping, start, pages, + out_pages, total_in, total_out); + put_workspace(type, workspace); return ret; } @@ -1094,9 +1185,9 @@ int ret; int type = cb->compress_type; - workspace = find_workspace(type); - ret = btrfs_compress_op[type - 1]->decompress_bio(workspace, cb); - free_workspace(type, workspace); + workspace = get_workspace(type, 0); + ret = compression_decompress_bio(type, workspace, cb); + put_workspace(type, workspace); return ret; } @@ -1112,19 +1203,28 @@ struct list_head *workspace; int ret; - workspace = find_workspace(type); + workspace = get_workspace(type, 0); + ret = compression_decompress(type, workspace, data_in, dest_page, + start_byte, srclen, destlen); + put_workspace(type, workspace); - ret = btrfs_compress_op[type-1]->decompress(workspace, data_in, - dest_page, start_byte, - srclen, destlen); - - free_workspace(type, workspace); return ret; +} + +void __init btrfs_init_compress(void) +{ + btrfs_init_workspace_manager(BTRFS_COMPRESS_NONE); + btrfs_init_workspace_manager(BTRFS_COMPRESS_ZLIB); + btrfs_init_workspace_manager(BTRFS_COMPRESS_LZO); + zstd_init_workspace_manager(); } void __cold btrfs_exit_compress(void) { - free_workspaces(); + btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_NONE); + btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_ZLIB); + btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_LZO); + zstd_cleanup_workspace_manager(); } /* @@ -1172,7 +1272,7 @@ /* copy bytes from the working buffer into the pages */ while (working_bytes > 0) { bytes = min_t(unsigned long, bvec.bv_len, - PAGE_SIZE - buf_offset); + PAGE_SIZE - (buf_offset % PAGE_SIZE)); bytes = min(bytes, working_bytes); kaddr = kmap_atomic(bvec.bv_page); @@ -1226,7 +1326,7 @@ /* * Shannon Entropy calculation * - * Pure byte distribution analysis fails to determine compressiability of data. + * Pure byte distribution analysis fails to determine compressibility of data. * Try calculating entropy to estimate the average minimum number of bits * needed to encode the sampled data. * @@ -1290,7 +1390,7 @@ /* * Use 4 bits as radix base - * Use 16 u32 counters for calculating new possition in buf array + * Use 16 u32 counters for calculating new position in buf array * * @array - array that will be sorted * @array_buf - buffer array to store sorting results @@ -1535,7 +1635,7 @@ */ int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end) { - struct list_head *ws_list = __find_workspace(0, true); + struct list_head *ws_list = get_workspace(0, 0); struct heuristic_ws *ws; u32 i; u8 byte; @@ -1604,18 +1704,29 @@ } out: - __free_workspace(0, ws_list, true); + put_workspace(0, ws_list); return ret; } -unsigned int btrfs_compress_str2level(const char *str) +/* + * Convert the compression suffix (eg. after "zlib" starting with ":") to + * level, unrecognized string will set the default level + */ +unsigned int btrfs_compress_str2level(unsigned int type, const char *str) { - if (strncmp(str, "zlib", 4) != 0) + unsigned int level = 0; + int ret; + + if (!type) return 0; - /* Accepted form: zlib:1 up to zlib:9 and nothing left after the number */ - if (str[4] == ':' && '1' <= str[5] && str[5] <= '9' && str[6] == 0) - return str[5] - '0'; + if (str[0] == ':') { + ret = kstrtouint(str + 1, 10, &level); + if (ret) + level = 0; + } - return BTRFS_ZLIB_DEFAULT_LEVEL; + level = btrfs_compress_set_level(type, level); + + return level; } -- Gitblit v1.6.2