| .. | .. |
|---|
| 33 | 33 | #include <linux/sysfs.h> |
|---|
| 34 | 34 | #include <linux/debugfs.h> |
|---|
| 35 | 35 | #include <linux/cpuhotplug.h> |
|---|
| 36 | +#include <linux/part_stat.h> |
|---|
| 36 | 37 | |
|---|
| 37 | 38 | #include "zram_drv.h" |
|---|
| 38 | 39 | |
|---|
| .. | .. |
|---|
| 41 | 42 | static DEFINE_MUTEX(zram_index_mutex); |
|---|
| 42 | 43 | |
|---|
| 43 | 44 | static int zram_major; |
|---|
| 44 | | -static const char *default_compressor = "lzo"; |
|---|
| 45 | +static const char *default_compressor = "lzo-rle"; |
|---|
| 45 | 46 | |
|---|
| 46 | 47 | /* Module params (documentation at end) */ |
|---|
| 47 | 48 | static unsigned int num_devices = 1; |
|---|
| .. | .. |
|---|
| 51 | 52 | */ |
|---|
| 52 | 53 | static size_t huge_class_size; |
|---|
| 53 | 54 | |
|---|
| 55 | +static const struct block_device_operations zram_devops; |
|---|
| 56 | +static const struct block_device_operations zram_wb_devops; |
|---|
| 57 | + |
|---|
| 54 | 58 | static void zram_free_page(struct zram *zram, size_t index); |
|---|
| 55 | 59 | static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, |
|---|
| 56 | 60 | u32 index, int offset, struct bio *bio); |
|---|
| 57 | 61 | |
|---|
| 58 | | - |
|---|
| 59 | | -#ifdef CONFIG_PREEMPT_RT_BASE |
|---|
| 62 | +#ifdef CONFIG_PREEMPT_RT |
|---|
| 60 | 63 | static void zram_meta_init_table_locks(struct zram *zram, size_t num_pages) |
|---|
| 61 | 64 | { |
|---|
| 62 | 65 | size_t index; |
|---|
| .. | .. |
|---|
| 71 | 74 | |
|---|
| 72 | 75 | ret = spin_trylock(&zram->table[index].lock); |
|---|
| 73 | 76 | if (ret) |
|---|
| 74 | | - __set_bit(ZRAM_LOCK, &zram->table[index].value); |
|---|
| 77 | + __set_bit(ZRAM_LOCK, &zram->table[index].flags); |
|---|
| 75 | 78 | return ret; |
|---|
| 76 | 79 | } |
|---|
| 77 | 80 | |
|---|
| 78 | 81 | static void zram_slot_lock(struct zram *zram, u32 index) |
|---|
| 79 | 82 | { |
|---|
| 80 | 83 | spin_lock(&zram->table[index].lock); |
|---|
| 81 | | - __set_bit(ZRAM_LOCK, &zram->table[index].value); |
|---|
| 84 | + __set_bit(ZRAM_LOCK, &zram->table[index].flags); |
|---|
| 82 | 85 | } |
|---|
| 83 | 86 | |
|---|
| 84 | 87 | static void zram_slot_unlock(struct zram *zram, u32 index) |
|---|
| 85 | 88 | { |
|---|
| 86 | | - __clear_bit(ZRAM_LOCK, &zram->table[index].value); |
|---|
| 89 | + __clear_bit(ZRAM_LOCK, &zram->table[index].flags); |
|---|
| 87 | 90 | spin_unlock(&zram->table[index].lock); |
|---|
| 88 | 91 | } |
|---|
| 89 | 92 | |
|---|
| 90 | 93 | #else |
|---|
| 94 | + |
|---|
| 91 | 95 | static void zram_meta_init_table_locks(struct zram *zram, size_t num_pages) { } |
|---|
| 92 | 96 | |
|---|
| 93 | 97 | static int zram_slot_trylock(struct zram *zram, u32 index) |
|---|
| .. | .. |
|---|
| 242 | 246 | |
|---|
| 243 | 247 | static bool page_same_filled(void *ptr, unsigned long *element) |
|---|
| 244 | 248 | { |
|---|
| 245 | | - unsigned int pos; |
|---|
| 246 | 249 | unsigned long *page; |
|---|
| 247 | 250 | unsigned long val; |
|---|
| 251 | + unsigned int pos, last_pos = PAGE_SIZE / sizeof(*page) - 1; |
|---|
| 248 | 252 | |
|---|
| 249 | 253 | page = (unsigned long *)ptr; |
|---|
| 250 | 254 | val = page[0]; |
|---|
| 251 | 255 | |
|---|
| 252 | | - for (pos = 1; pos < PAGE_SIZE / sizeof(*page); pos++) { |
|---|
| 256 | + if (val != page[last_pos]) |
|---|
| 257 | + return false; |
|---|
| 258 | + |
|---|
| 259 | + for (pos = 1; pos < last_pos; pos++) { |
|---|
| 253 | 260 | if (val != page[pos]) |
|---|
| 254 | 261 | return false; |
|---|
| 255 | 262 | } |
|---|
| .. | .. |
|---|
| 325 | 332 | struct zram *zram = dev_to_zram(dev); |
|---|
| 326 | 333 | unsigned long nr_pages = zram->disksize >> PAGE_SHIFT; |
|---|
| 327 | 334 | int index; |
|---|
| 328 | | - char mode_buf[8]; |
|---|
| 329 | | - ssize_t sz; |
|---|
| 330 | 335 | |
|---|
| 331 | | - sz = strscpy(mode_buf, buf, sizeof(mode_buf)); |
|---|
| 332 | | - if (sz <= 0) |
|---|
| 333 | | - return -EINVAL; |
|---|
| 334 | | - |
|---|
| 335 | | - /* ignore trailing new line */ |
|---|
| 336 | | - if (mode_buf[sz - 1] == '\n') |
|---|
| 337 | | - mode_buf[sz - 1] = 0x00; |
|---|
| 338 | | - |
|---|
| 339 | | - if (strcmp(mode_buf, "all")) |
|---|
| 336 | + if (!sysfs_streq(buf, "all")) |
|---|
| 340 | 337 | return -EINVAL; |
|---|
| 341 | 338 | |
|---|
| 342 | 339 | down_read(&zram->init_lock); |
|---|
| .. | .. |
|---|
| 449 | 446 | zram->backing_dev = NULL; |
|---|
| 450 | 447 | zram->old_block_size = 0; |
|---|
| 451 | 448 | zram->bdev = NULL; |
|---|
| 452 | | - zram->disk->queue->backing_dev_info->capabilities |= |
|---|
| 453 | | - BDI_CAP_SYNCHRONOUS_IO; |
|---|
| 449 | + zram->disk->fops = &zram_devops; |
|---|
| 454 | 450 | kvfree(zram->bitmap); |
|---|
| 455 | 451 | zram->bitmap = NULL; |
|---|
| 456 | 452 | } |
|---|
| .. | .. |
|---|
| 516 | 512 | if (sz > 0 && file_name[sz - 1] == '\n') |
|---|
| 517 | 513 | file_name[sz - 1] = 0x00; |
|---|
| 518 | 514 | |
|---|
| 519 | | - backing_dev = filp_open(file_name, O_RDWR|O_LARGEFILE, 0); |
|---|
| 515 | + backing_dev = filp_open_block(file_name, O_RDWR|O_LARGEFILE, 0); |
|---|
| 520 | 516 | if (IS_ERR(backing_dev)) { |
|---|
| 521 | 517 | err = PTR_ERR(backing_dev); |
|---|
| 522 | 518 | backing_dev = NULL; |
|---|
| .. | .. |
|---|
| 532 | 528 | goto out; |
|---|
| 533 | 529 | } |
|---|
| 534 | 530 | |
|---|
| 535 | | - bdev = bdgrab(I_BDEV(inode)); |
|---|
| 536 | | - err = blkdev_get(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL, zram); |
|---|
| 537 | | - if (err < 0) { |
|---|
| 531 | + bdev = blkdev_get_by_dev(inode->i_rdev, |
|---|
| 532 | + FMODE_READ | FMODE_WRITE | FMODE_EXCL, zram); |
|---|
| 533 | + if (IS_ERR(bdev)) { |
|---|
| 534 | + err = PTR_ERR(bdev); |
|---|
| 538 | 535 | bdev = NULL; |
|---|
| 539 | 536 | goto out; |
|---|
| 540 | 537 | } |
|---|
| .. | .. |
|---|
| 569 | 566 | * freely but in fact, IO is going on so finally could cause |
|---|
| 570 | 567 | * use-after-free when the IO is really done. |
|---|
| 571 | 568 | */ |
|---|
| 572 | | - zram->disk->queue->backing_dev_info->capabilities &= |
|---|
| 573 | | - ~BDI_CAP_SYNCHRONOUS_IO; |
|---|
| 569 | + zram->disk->fops = &zram_wb_devops; |
|---|
| 574 | 570 | up_write(&zram->init_lock); |
|---|
| 575 | 571 | |
|---|
| 576 | 572 | pr_info("setup backing device %s\n", file_name); |
|---|
| .. | .. |
|---|
| 659 | 655 | return 1; |
|---|
| 660 | 656 | } |
|---|
| 661 | 657 | |
|---|
| 658 | +#define PAGE_WB_SIG "page_index=" |
|---|
| 659 | + |
|---|
| 660 | +#define PAGE_WRITEBACK 0 |
|---|
| 662 | 661 | #define HUGE_WRITEBACK 1 |
|---|
| 663 | 662 | #define IDLE_WRITEBACK 2 |
|---|
| 663 | + |
|---|
| 664 | 664 | |
|---|
| 665 | 665 | static ssize_t writeback_store(struct device *dev, |
|---|
| 666 | 666 | struct device_attribute *attr, const char *buf, size_t len) |
|---|
| 667 | 667 | { |
|---|
| 668 | 668 | struct zram *zram = dev_to_zram(dev); |
|---|
| 669 | 669 | unsigned long nr_pages = zram->disksize >> PAGE_SHIFT; |
|---|
| 670 | | - unsigned long index; |
|---|
| 670 | + unsigned long index = 0; |
|---|
| 671 | 671 | struct bio bio; |
|---|
| 672 | 672 | struct bio_vec bio_vec; |
|---|
| 673 | 673 | struct page *page; |
|---|
| 674 | | - ssize_t ret, sz; |
|---|
| 675 | | - char mode_buf[8]; |
|---|
| 676 | | - int mode = -1; |
|---|
| 674 | + ssize_t ret = len; |
|---|
| 675 | + int mode, err; |
|---|
| 677 | 676 | unsigned long blk_idx = 0; |
|---|
| 678 | 677 | |
|---|
| 679 | | - sz = strscpy(mode_buf, buf, sizeof(mode_buf)); |
|---|
| 680 | | - if (sz <= 0) |
|---|
| 681 | | - return -EINVAL; |
|---|
| 682 | | - |
|---|
| 683 | | - /* ignore trailing newline */ |
|---|
| 684 | | - if (mode_buf[sz - 1] == '\n') |
|---|
| 685 | | - mode_buf[sz - 1] = 0x00; |
|---|
| 686 | | - |
|---|
| 687 | | - if (!strcmp(mode_buf, "idle")) |
|---|
| 678 | + if (sysfs_streq(buf, "idle")) |
|---|
| 688 | 679 | mode = IDLE_WRITEBACK; |
|---|
| 689 | | - else if (!strcmp(mode_buf, "huge")) |
|---|
| 680 | + else if (sysfs_streq(buf, "huge")) |
|---|
| 690 | 681 | mode = HUGE_WRITEBACK; |
|---|
| 682 | + else { |
|---|
| 683 | + if (strncmp(buf, PAGE_WB_SIG, sizeof(PAGE_WB_SIG) - 1)) |
|---|
| 684 | + return -EINVAL; |
|---|
| 691 | 685 | |
|---|
| 692 | | - if (mode == -1) |
|---|
| 693 | | - return -EINVAL; |
|---|
| 686 | + if (kstrtol(buf + sizeof(PAGE_WB_SIG) - 1, 10, &index) || |
|---|
| 687 | + index >= nr_pages) |
|---|
| 688 | + return -EINVAL; |
|---|
| 689 | + |
|---|
| 690 | + nr_pages = 1; |
|---|
| 691 | + mode = PAGE_WRITEBACK; |
|---|
| 692 | + } |
|---|
| 694 | 693 | |
|---|
| 695 | 694 | down_read(&zram->init_lock); |
|---|
| 696 | 695 | if (!init_done(zram)) { |
|---|
| .. | .. |
|---|
| 709 | 708 | goto release_init_lock; |
|---|
| 710 | 709 | } |
|---|
| 711 | 710 | |
|---|
| 712 | | - for (index = 0; index < nr_pages; index++) { |
|---|
| 711 | + for (; nr_pages != 0; index++, nr_pages--) { |
|---|
| 713 | 712 | struct bio_vec bvec; |
|---|
| 714 | 713 | |
|---|
| 715 | 714 | bvec.bv_page = page; |
|---|
| .. | .. |
|---|
| 774 | 773 | * XXX: A single page IO would be inefficient for write |
|---|
| 775 | 774 | * but it would be not bad as starter. |
|---|
| 776 | 775 | */ |
|---|
| 777 | | - ret = submit_bio_wait(&bio); |
|---|
| 778 | | - if (ret) { |
|---|
| 776 | + err = submit_bio_wait(&bio); |
|---|
| 777 | + if (err) { |
|---|
| 779 | 778 | zram_slot_lock(zram, index); |
|---|
| 780 | 779 | zram_clear_flag(zram, index, ZRAM_UNDER_WB); |
|---|
| 781 | 780 | zram_clear_flag(zram, index, ZRAM_IDLE); |
|---|
| 782 | 781 | zram_slot_unlock(zram, index); |
|---|
| 782 | + /* |
|---|
| 783 | + * Return last IO error unless every IO were |
|---|
| 784 | + * not suceeded. |
|---|
| 785 | + */ |
|---|
| 786 | + ret = err; |
|---|
| 783 | 787 | continue; |
|---|
| 784 | 788 | } |
|---|
| 785 | 789 | |
|---|
| .. | .. |
|---|
| 817 | 821 | |
|---|
| 818 | 822 | if (blk_idx) |
|---|
| 819 | 823 | free_block_bdev(zram, blk_idx); |
|---|
| 820 | | - ret = len; |
|---|
| 821 | 824 | __free_page(page); |
|---|
| 822 | 825 | release_init_lock: |
|---|
| 823 | 826 | up_read(&zram->init_lock); |
|---|
| .. | .. |
|---|
| 845 | 848 | } |
|---|
| 846 | 849 | |
|---|
| 847 | 850 | /* |
|---|
| 848 | | - * Block layer want one ->make_request_fn to be active at a time |
|---|
| 849 | | - * so if we use chained IO with parent IO in same context, |
|---|
| 850 | | - * it's a deadlock. To avoid, it, it uses worker thread context. |
|---|
| 851 | + * Block layer want one ->submit_bio to be active at a time, so if we use |
|---|
| 852 | + * chained IO with parent IO in same context, it's a deadlock. To avoid that, |
|---|
| 853 | + * use a worker thread context. |
|---|
| 851 | 854 | */ |
|---|
| 852 | 855 | static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec, |
|---|
| 853 | 856 | unsigned long entry, struct bio *bio) |
|---|
| .. | .. |
|---|
| 1180 | 1183 | #endif |
|---|
| 1181 | 1184 | static DEVICE_ATTR_RO(debug_stat); |
|---|
| 1182 | 1185 | |
|---|
| 1183 | | - |
|---|
| 1184 | | - |
|---|
| 1185 | 1186 | static void zram_meta_free(struct zram *zram, u64 disksize) |
|---|
| 1186 | 1187 | { |
|---|
| 1187 | 1188 | size_t num_pages = disksize >> PAGE_SHIFT; |
|---|
| .. | .. |
|---|
| 1271 | 1272 | static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index, |
|---|
| 1272 | 1273 | struct bio *bio, bool partial_io) |
|---|
| 1273 | 1274 | { |
|---|
| 1274 | | - int ret; |
|---|
| 1275 | + struct zcomp_strm *zstrm; |
|---|
| 1275 | 1276 | unsigned long handle; |
|---|
| 1276 | 1277 | unsigned int size; |
|---|
| 1277 | 1278 | void *src, *dst; |
|---|
| 1278 | | - struct zcomp_strm *zstrm; |
|---|
| 1279 | + int ret; |
|---|
| 1279 | 1280 | |
|---|
| 1280 | 1281 | zram_slot_lock(zram, index); |
|---|
| 1281 | 1282 | if (zram_test_flag(zram, index, ZRAM_WB)) { |
|---|
| .. | .. |
|---|
| 1306 | 1307 | |
|---|
| 1307 | 1308 | size = zram_get_obj_size(zram, index); |
|---|
| 1308 | 1309 | |
|---|
| 1309 | | - zstrm = zcomp_stream_get(zram->comp); |
|---|
| 1310 | + if (size != PAGE_SIZE) |
|---|
| 1311 | + zstrm = zcomp_stream_get(zram->comp); |
|---|
| 1312 | + |
|---|
| 1310 | 1313 | src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO); |
|---|
| 1311 | 1314 | if (size == PAGE_SIZE) { |
|---|
| 1312 | 1315 | dst = kmap_atomic(page); |
|---|
| .. | .. |
|---|
| 1314 | 1317 | kunmap_atomic(dst); |
|---|
| 1315 | 1318 | ret = 0; |
|---|
| 1316 | 1319 | } else { |
|---|
| 1317 | | - |
|---|
| 1318 | 1320 | dst = kmap_atomic(page); |
|---|
| 1319 | 1321 | ret = zcomp_decompress(zstrm, src, size, dst); |
|---|
| 1320 | 1322 | kunmap_atomic(dst); |
|---|
| 1323 | + zcomp_stream_put(zram->comp); |
|---|
| 1321 | 1324 | } |
|---|
| 1322 | 1325 | zs_unmap_object(zram->mem_pool, handle); |
|---|
| 1323 | | - zcomp_stream_put(zram->comp); |
|---|
| 1324 | 1326 | zram_slot_unlock(zram, index); |
|---|
| 1325 | 1327 | |
|---|
| 1326 | 1328 | /* Should NEVER happen. Return bio error if it does. */ |
|---|
| 1327 | | - if (unlikely(ret)) |
|---|
| 1329 | + if (WARN_ON(ret)) |
|---|
| 1328 | 1330 | pr_err("Decompression failed! err=%d, page=%u\n", ret, index); |
|---|
| 1329 | 1331 | |
|---|
| 1330 | 1332 | return ret; |
|---|
| .. | .. |
|---|
| 1419 | 1421 | __GFP_KSWAPD_RECLAIM | |
|---|
| 1420 | 1422 | __GFP_NOWARN | |
|---|
| 1421 | 1423 | __GFP_HIGHMEM | |
|---|
| 1422 | | - __GFP_MOVABLE); |
|---|
| 1424 | + __GFP_MOVABLE | |
|---|
| 1425 | + __GFP_CMA); |
|---|
| 1423 | 1426 | if (!handle) { |
|---|
| 1424 | 1427 | zcomp_stream_put(zram->comp); |
|---|
| 1425 | 1428 | atomic64_inc(&zram->stats.writestall); |
|---|
| 1426 | 1429 | handle = zs_malloc(zram->mem_pool, comp_len, |
|---|
| 1427 | 1430 | GFP_NOIO | __GFP_HIGHMEM | |
|---|
| 1428 | | - __GFP_MOVABLE); |
|---|
| 1431 | + __GFP_MOVABLE | __GFP_CMA); |
|---|
| 1429 | 1432 | if (handle) |
|---|
| 1430 | 1433 | goto compress_again; |
|---|
| 1431 | 1434 | return -ENOMEM; |
|---|
| .. | .. |
|---|
| 1566 | 1569 | static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index, |
|---|
| 1567 | 1570 | int offset, unsigned int op, struct bio *bio) |
|---|
| 1568 | 1571 | { |
|---|
| 1569 | | - unsigned long start_time = jiffies; |
|---|
| 1570 | | - struct request_queue *q = zram->disk->queue; |
|---|
| 1571 | 1572 | int ret; |
|---|
| 1572 | | - |
|---|
| 1573 | | - generic_start_io_acct(q, op, bvec->bv_len >> SECTOR_SHIFT, |
|---|
| 1574 | | - &zram->disk->part0); |
|---|
| 1575 | 1573 | |
|---|
| 1576 | 1574 | if (!op_is_write(op)) { |
|---|
| 1577 | 1575 | atomic64_inc(&zram->stats.num_reads); |
|---|
| .. | .. |
|---|
| 1581 | 1579 | atomic64_inc(&zram->stats.num_writes); |
|---|
| 1582 | 1580 | ret = zram_bvec_write(zram, bvec, index, offset, bio); |
|---|
| 1583 | 1581 | } |
|---|
| 1584 | | - |
|---|
| 1585 | | - generic_end_io_acct(q, op, &zram->disk->part0, start_time); |
|---|
| 1586 | 1582 | |
|---|
| 1587 | 1583 | zram_slot_lock(zram, index); |
|---|
| 1588 | 1584 | zram_accessed(zram, index); |
|---|
| .. | .. |
|---|
| 1604 | 1600 | u32 index; |
|---|
| 1605 | 1601 | struct bio_vec bvec; |
|---|
| 1606 | 1602 | struct bvec_iter iter; |
|---|
| 1603 | + unsigned long start_time; |
|---|
| 1607 | 1604 | |
|---|
| 1608 | 1605 | index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT; |
|---|
| 1609 | 1606 | offset = (bio->bi_iter.bi_sector & |
|---|
| .. | .. |
|---|
| 1619 | 1616 | break; |
|---|
| 1620 | 1617 | } |
|---|
| 1621 | 1618 | |
|---|
| 1619 | + start_time = bio_start_io_acct(bio); |
|---|
| 1622 | 1620 | bio_for_each_segment(bvec, bio, iter) { |
|---|
| 1623 | 1621 | struct bio_vec bv = bvec; |
|---|
| 1624 | 1622 | unsigned int unwritten = bvec.bv_len; |
|---|
| .. | .. |
|---|
| 1627 | 1625 | bv.bv_len = min_t(unsigned int, PAGE_SIZE - offset, |
|---|
| 1628 | 1626 | unwritten); |
|---|
| 1629 | 1627 | if (zram_bvec_rw(zram, &bv, index, offset, |
|---|
| 1630 | | - bio_op(bio), bio) < 0) |
|---|
| 1631 | | - goto out; |
|---|
| 1628 | + bio_op(bio), bio) < 0) { |
|---|
| 1629 | + bio->bi_status = BLK_STS_IOERR; |
|---|
| 1630 | + break; |
|---|
| 1631 | + } |
|---|
| 1632 | 1632 | |
|---|
| 1633 | 1633 | bv.bv_offset += bv.bv_len; |
|---|
| 1634 | 1634 | unwritten -= bv.bv_len; |
|---|
| .. | .. |
|---|
| 1636 | 1636 | update_position(&index, &offset, &bv); |
|---|
| 1637 | 1637 | } while (unwritten); |
|---|
| 1638 | 1638 | } |
|---|
| 1639 | | - |
|---|
| 1639 | + bio_end_io_acct(bio, start_time); |
|---|
| 1640 | 1640 | bio_endio(bio); |
|---|
| 1641 | | - return; |
|---|
| 1642 | | - |
|---|
| 1643 | | -out: |
|---|
| 1644 | | - bio_io_error(bio); |
|---|
| 1645 | 1641 | } |
|---|
| 1646 | 1642 | |
|---|
| 1647 | 1643 | /* |
|---|
| 1648 | 1644 | * Handler function for all zram I/O requests. |
|---|
| 1649 | 1645 | */ |
|---|
| 1650 | | -static blk_qc_t zram_make_request(struct request_queue *queue, struct bio *bio) |
|---|
| 1646 | +static blk_qc_t zram_submit_bio(struct bio *bio) |
|---|
| 1651 | 1647 | { |
|---|
| 1652 | | - struct zram *zram = queue->queuedata; |
|---|
| 1648 | + struct zram *zram = bio->bi_disk->private_data; |
|---|
| 1653 | 1649 | |
|---|
| 1654 | 1650 | if (!valid_io_request(zram, bio->bi_iter.bi_sector, |
|---|
| 1655 | 1651 | bio->bi_iter.bi_size)) { |
|---|
| .. | .. |
|---|
| 1689 | 1685 | u32 index; |
|---|
| 1690 | 1686 | struct zram *zram; |
|---|
| 1691 | 1687 | struct bio_vec bv; |
|---|
| 1688 | + unsigned long start_time; |
|---|
| 1692 | 1689 | |
|---|
| 1693 | 1690 | if (PageTransHuge(page)) |
|---|
| 1694 | 1691 | return -ENOTSUPP; |
|---|
| .. | .. |
|---|
| 1707 | 1704 | bv.bv_len = PAGE_SIZE; |
|---|
| 1708 | 1705 | bv.bv_offset = 0; |
|---|
| 1709 | 1706 | |
|---|
| 1707 | + start_time = disk_start_io_acct(bdev->bd_disk, SECTORS_PER_PAGE, op); |
|---|
| 1710 | 1708 | ret = zram_bvec_rw(zram, &bv, index, offset, op, NULL); |
|---|
| 1709 | + disk_end_io_acct(bdev->bd_disk, op, start_time); |
|---|
| 1711 | 1710 | out: |
|---|
| 1712 | 1711 | /* |
|---|
| 1713 | 1712 | * If I/O fails, just return error(ie, non-zero) without |
|---|
| .. | .. |
|---|
| 1799 | 1798 | zram->disksize = disksize; |
|---|
| 1800 | 1799 | set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT); |
|---|
| 1801 | 1800 | |
|---|
| 1802 | | - revalidate_disk(zram->disk); |
|---|
| 1801 | + revalidate_disk_size(zram->disk, true); |
|---|
| 1803 | 1802 | up_write(&zram->init_lock); |
|---|
| 1804 | 1803 | |
|---|
| 1805 | 1804 | return len; |
|---|
| .. | .. |
|---|
| 1846 | 1845 | /* Make sure all the pending I/O are finished */ |
|---|
| 1847 | 1846 | fsync_bdev(bdev); |
|---|
| 1848 | 1847 | zram_reset_device(zram); |
|---|
| 1849 | | - revalidate_disk(zram->disk); |
|---|
| 1848 | + revalidate_disk_size(zram->disk, true); |
|---|
| 1850 | 1849 | bdput(bdev); |
|---|
| 1851 | 1850 | |
|---|
| 1852 | 1851 | mutex_lock(&bdev->bd_mutex); |
|---|
| .. | .. |
|---|
| 1873 | 1872 | |
|---|
| 1874 | 1873 | static const struct block_device_operations zram_devops = { |
|---|
| 1875 | 1874 | .open = zram_open, |
|---|
| 1875 | + .submit_bio = zram_submit_bio, |
|---|
| 1876 | 1876 | .swap_slot_free_notify = zram_slot_free_notify, |
|---|
| 1877 | 1877 | .rw_page = zram_rw_page, |
|---|
| 1878 | + .owner = THIS_MODULE |
|---|
| 1879 | +}; |
|---|
| 1880 | + |
|---|
| 1881 | +static const struct block_device_operations zram_wb_devops = { |
|---|
| 1882 | + .open = zram_open, |
|---|
| 1883 | + .submit_bio = zram_submit_bio, |
|---|
| 1884 | + .swap_slot_free_notify = zram_slot_free_notify, |
|---|
| 1878 | 1885 | .owner = THIS_MODULE |
|---|
| 1879 | 1886 | }; |
|---|
| 1880 | 1887 | |
|---|
| .. | .. |
|---|
| 1951 | 1958 | #ifdef CONFIG_ZRAM_WRITEBACK |
|---|
| 1952 | 1959 | spin_lock_init(&zram->wb_limit_lock); |
|---|
| 1953 | 1960 | #endif |
|---|
| 1954 | | - queue = blk_alloc_queue(GFP_KERNEL); |
|---|
| 1961 | + queue = blk_alloc_queue(NUMA_NO_NODE); |
|---|
| 1955 | 1962 | if (!queue) { |
|---|
| 1956 | 1963 | pr_err("Error allocating disk queue for device %d\n", |
|---|
| 1957 | 1964 | device_id); |
|---|
| 1958 | 1965 | ret = -ENOMEM; |
|---|
| 1959 | 1966 | goto out_free_idr; |
|---|
| 1960 | 1967 | } |
|---|
| 1961 | | - |
|---|
| 1962 | | - blk_queue_make_request(queue, zram_make_request); |
|---|
| 1963 | 1968 | |
|---|
| 1964 | 1969 | /* gendisk structure */ |
|---|
| 1965 | 1970 | zram->disk = alloc_disk(1); |
|---|
| .. | .. |
|---|
| 1974 | 1979 | zram->disk->first_minor = device_id; |
|---|
| 1975 | 1980 | zram->disk->fops = &zram_devops; |
|---|
| 1976 | 1981 | zram->disk->queue = queue; |
|---|
| 1977 | | - zram->disk->queue->queuedata = zram; |
|---|
| 1978 | 1982 | zram->disk->private_data = zram; |
|---|
| 1979 | 1983 | snprintf(zram->disk->disk_name, 16, "zram%d", device_id); |
|---|
| 1980 | 1984 | |
|---|
| .. | .. |
|---|
| 2008 | 2012 | if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE) |
|---|
| 2009 | 2013 | blk_queue_max_write_zeroes_sectors(zram->disk->queue, UINT_MAX); |
|---|
| 2010 | 2014 | |
|---|
| 2011 | | - zram->disk->queue->backing_dev_info->capabilities |= |
|---|
| 2012 | | - (BDI_CAP_STABLE_WRITES | BDI_CAP_SYNCHRONOUS_IO); |
|---|
| 2013 | | - disk_to_dev(zram->disk)->groups = zram_disk_attr_groups; |
|---|
| 2014 | | - add_disk(zram->disk); |
|---|
| 2015 | + blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, zram->disk->queue); |
|---|
| 2016 | + device_add_disk(NULL, zram->disk, zram_disk_attr_groups); |
|---|
| 2015 | 2017 | |
|---|
| 2016 | 2018 | strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor)); |
|---|
| 2017 | 2019 | |
|---|
| .. | .. |
|---|
| 2047 | 2049 | mutex_unlock(&bdev->bd_mutex); |
|---|
| 2048 | 2050 | |
|---|
| 2049 | 2051 | zram_debugfs_unregister(zram); |
|---|
| 2052 | + |
|---|
| 2050 | 2053 | /* Make sure all the pending I/O are finished */ |
|---|
| 2051 | 2054 | fsync_bdev(bdev); |
|---|
| 2052 | 2055 | zram_reset_device(zram); |
|---|