.. | .. |
---|
33 | 33 | #include <linux/sysfs.h> |
---|
34 | 34 | #include <linux/debugfs.h> |
---|
35 | 35 | #include <linux/cpuhotplug.h> |
---|
| 36 | +#include <linux/part_stat.h> |
---|
36 | 37 | |
---|
37 | 38 | #include "zram_drv.h" |
---|
38 | 39 | |
---|
.. | .. |
---|
41 | 42 | static DEFINE_MUTEX(zram_index_mutex); |
---|
42 | 43 | |
---|
43 | 44 | static int zram_major; |
---|
44 | | -static const char *default_compressor = "lzo"; |
---|
| 45 | +static const char *default_compressor = "lzo-rle"; |
---|
45 | 46 | |
---|
46 | 47 | /* Module params (documentation at end) */ |
---|
47 | 48 | static unsigned int num_devices = 1; |
---|
.. | .. |
---|
51 | 52 | */ |
---|
52 | 53 | static size_t huge_class_size; |
---|
53 | 54 | |
---|
| 55 | +static const struct block_device_operations zram_devops; |
---|
| 56 | +static const struct block_device_operations zram_wb_devops; |
---|
| 57 | + |
---|
54 | 58 | static void zram_free_page(struct zram *zram, size_t index); |
---|
55 | 59 | static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, |
---|
56 | 60 | u32 index, int offset, struct bio *bio); |
---|
57 | 61 | |
---|
58 | | - |
---|
59 | | -#ifdef CONFIG_PREEMPT_RT_BASE |
---|
60 | | -static void zram_meta_init_table_locks(struct zram *zram, size_t num_pages) |
---|
61 | | -{ |
---|
62 | | - size_t index; |
---|
63 | | - |
---|
64 | | - for (index = 0; index < num_pages; index++) |
---|
65 | | - spin_lock_init(&zram->table[index].lock); |
---|
66 | | -} |
---|
67 | | - |
---|
68 | | -static int zram_slot_trylock(struct zram *zram, u32 index) |
---|
69 | | -{ |
---|
70 | | - int ret; |
---|
71 | | - |
---|
72 | | - ret = spin_trylock(&zram->table[index].lock); |
---|
73 | | - if (ret) |
---|
74 | | - __set_bit(ZRAM_LOCK, &zram->table[index].value); |
---|
75 | | - return ret; |
---|
76 | | -} |
---|
77 | | - |
---|
78 | | -static void zram_slot_lock(struct zram *zram, u32 index) |
---|
79 | | -{ |
---|
80 | | - spin_lock(&zram->table[index].lock); |
---|
81 | | - __set_bit(ZRAM_LOCK, &zram->table[index].value); |
---|
82 | | -} |
---|
83 | | - |
---|
84 | | -static void zram_slot_unlock(struct zram *zram, u32 index) |
---|
85 | | -{ |
---|
86 | | - __clear_bit(ZRAM_LOCK, &zram->table[index].value); |
---|
87 | | - spin_unlock(&zram->table[index].lock); |
---|
88 | | -} |
---|
89 | | - |
---|
90 | | -#else |
---|
91 | | -static void zram_meta_init_table_locks(struct zram *zram, size_t num_pages) { } |
---|
92 | 62 | |
---|
93 | 63 | static int zram_slot_trylock(struct zram *zram, u32 index) |
---|
94 | 64 | { |
---|
.. | .. |
---|
104 | 74 | { |
---|
105 | 75 | bit_spin_unlock(ZRAM_LOCK, &zram->table[index].flags); |
---|
106 | 76 | } |
---|
107 | | -#endif |
---|
108 | 77 | |
---|
109 | 78 | static inline bool init_done(struct zram *zram) |
---|
110 | 79 | { |
---|
.. | .. |
---|
242 | 211 | |
---|
243 | 212 | static bool page_same_filled(void *ptr, unsigned long *element) |
---|
244 | 213 | { |
---|
245 | | - unsigned int pos; |
---|
246 | 214 | unsigned long *page; |
---|
247 | 215 | unsigned long val; |
---|
| 216 | + unsigned int pos, last_pos = PAGE_SIZE / sizeof(*page) - 1; |
---|
248 | 217 | |
---|
249 | 218 | page = (unsigned long *)ptr; |
---|
250 | 219 | val = page[0]; |
---|
251 | 220 | |
---|
252 | | - for (pos = 1; pos < PAGE_SIZE / sizeof(*page); pos++) { |
---|
| 221 | + if (val != page[last_pos]) |
---|
| 222 | + return false; |
---|
| 223 | + |
---|
| 224 | + for (pos = 1; pos < last_pos; pos++) { |
---|
253 | 225 | if (val != page[pos]) |
---|
254 | 226 | return false; |
---|
255 | 227 | } |
---|
.. | .. |
---|
325 | 297 | struct zram *zram = dev_to_zram(dev); |
---|
326 | 298 | unsigned long nr_pages = zram->disksize >> PAGE_SHIFT; |
---|
327 | 299 | int index; |
---|
328 | | - char mode_buf[8]; |
---|
329 | | - ssize_t sz; |
---|
330 | 300 | |
---|
331 | | - sz = strscpy(mode_buf, buf, sizeof(mode_buf)); |
---|
332 | | - if (sz <= 0) |
---|
333 | | - return -EINVAL; |
---|
334 | | - |
---|
335 | | - /* ignore trailing new line */ |
---|
336 | | - if (mode_buf[sz - 1] == '\n') |
---|
337 | | - mode_buf[sz - 1] = 0x00; |
---|
338 | | - |
---|
339 | | - if (strcmp(mode_buf, "all")) |
---|
| 301 | + if (!sysfs_streq(buf, "all")) |
---|
340 | 302 | return -EINVAL; |
---|
341 | 303 | |
---|
342 | 304 | down_read(&zram->init_lock); |
---|
.. | .. |
---|
449 | 411 | zram->backing_dev = NULL; |
---|
450 | 412 | zram->old_block_size = 0; |
---|
451 | 413 | zram->bdev = NULL; |
---|
452 | | - zram->disk->queue->backing_dev_info->capabilities |= |
---|
453 | | - BDI_CAP_SYNCHRONOUS_IO; |
---|
| 414 | + zram->disk->fops = &zram_devops; |
---|
454 | 415 | kvfree(zram->bitmap); |
---|
455 | 416 | zram->bitmap = NULL; |
---|
456 | 417 | } |
---|
.. | .. |
---|
516 | 477 | if (sz > 0 && file_name[sz - 1] == '\n') |
---|
517 | 478 | file_name[sz - 1] = 0x00; |
---|
518 | 479 | |
---|
519 | | - backing_dev = filp_open(file_name, O_RDWR|O_LARGEFILE, 0); |
---|
| 480 | + backing_dev = filp_open_block(file_name, O_RDWR|O_LARGEFILE, 0); |
---|
520 | 481 | if (IS_ERR(backing_dev)) { |
---|
521 | 482 | err = PTR_ERR(backing_dev); |
---|
522 | 483 | backing_dev = NULL; |
---|
.. | .. |
---|
532 | 493 | goto out; |
---|
533 | 494 | } |
---|
534 | 495 | |
---|
535 | | - bdev = bdgrab(I_BDEV(inode)); |
---|
536 | | - err = blkdev_get(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL, zram); |
---|
537 | | - if (err < 0) { |
---|
| 496 | + bdev = blkdev_get_by_dev(inode->i_rdev, |
---|
| 497 | + FMODE_READ | FMODE_WRITE | FMODE_EXCL, zram); |
---|
| 498 | + if (IS_ERR(bdev)) { |
---|
| 499 | + err = PTR_ERR(bdev); |
---|
538 | 500 | bdev = NULL; |
---|
539 | 501 | goto out; |
---|
540 | 502 | } |
---|
.. | .. |
---|
569 | 531 | * freely but in fact, IO is going on so finally could cause |
---|
570 | 532 | * use-after-free when the IO is really done. |
---|
571 | 533 | */ |
---|
572 | | - zram->disk->queue->backing_dev_info->capabilities &= |
---|
573 | | - ~BDI_CAP_SYNCHRONOUS_IO; |
---|
| 534 | + zram->disk->fops = &zram_wb_devops; |
---|
574 | 535 | up_write(&zram->init_lock); |
---|
575 | 536 | |
---|
576 | 537 | pr_info("setup backing device %s\n", file_name); |
---|
.. | .. |
---|
659 | 620 | return 1; |
---|
660 | 621 | } |
---|
661 | 622 | |
---|
| 623 | +#define PAGE_WB_SIG "page_index=" |
---|
| 624 | + |
---|
| 625 | +#define PAGE_WRITEBACK 0 |
---|
662 | 626 | #define HUGE_WRITEBACK 1 |
---|
663 | 627 | #define IDLE_WRITEBACK 2 |
---|
| 628 | + |
---|
664 | 629 | |
---|
665 | 630 | static ssize_t writeback_store(struct device *dev, |
---|
666 | 631 | struct device_attribute *attr, const char *buf, size_t len) |
---|
667 | 632 | { |
---|
668 | 633 | struct zram *zram = dev_to_zram(dev); |
---|
669 | 634 | unsigned long nr_pages = zram->disksize >> PAGE_SHIFT; |
---|
670 | | - unsigned long index; |
---|
| 635 | + unsigned long index = 0; |
---|
671 | 636 | struct bio bio; |
---|
672 | 637 | struct bio_vec bio_vec; |
---|
673 | 638 | struct page *page; |
---|
674 | | - ssize_t ret, sz; |
---|
675 | | - char mode_buf[8]; |
---|
676 | | - int mode = -1; |
---|
| 639 | + ssize_t ret = len; |
---|
| 640 | + int mode, err; |
---|
677 | 641 | unsigned long blk_idx = 0; |
---|
678 | 642 | |
---|
679 | | - sz = strscpy(mode_buf, buf, sizeof(mode_buf)); |
---|
680 | | - if (sz <= 0) |
---|
681 | | - return -EINVAL; |
---|
682 | | - |
---|
683 | | - /* ignore trailing newline */ |
---|
684 | | - if (mode_buf[sz - 1] == '\n') |
---|
685 | | - mode_buf[sz - 1] = 0x00; |
---|
686 | | - |
---|
687 | | - if (!strcmp(mode_buf, "idle")) |
---|
| 643 | + if (sysfs_streq(buf, "idle")) |
---|
688 | 644 | mode = IDLE_WRITEBACK; |
---|
689 | | - else if (!strcmp(mode_buf, "huge")) |
---|
| 645 | + else if (sysfs_streq(buf, "huge")) |
---|
690 | 646 | mode = HUGE_WRITEBACK; |
---|
| 647 | + else { |
---|
| 648 | + if (strncmp(buf, PAGE_WB_SIG, sizeof(PAGE_WB_SIG) - 1)) |
---|
| 649 | + return -EINVAL; |
---|
691 | 650 | |
---|
692 | | - if (mode == -1) |
---|
693 | | - return -EINVAL; |
---|
| 651 | + if (kstrtol(buf + sizeof(PAGE_WB_SIG) - 1, 10, &index) || |
---|
| 652 | + index >= nr_pages) |
---|
| 653 | + return -EINVAL; |
---|
| 654 | + |
---|
| 655 | + nr_pages = 1; |
---|
| 656 | + mode = PAGE_WRITEBACK; |
---|
| 657 | + } |
---|
694 | 658 | |
---|
695 | 659 | down_read(&zram->init_lock); |
---|
696 | 660 | if (!init_done(zram)) { |
---|
.. | .. |
---|
709 | 673 | goto release_init_lock; |
---|
710 | 674 | } |
---|
711 | 675 | |
---|
712 | | - for (index = 0; index < nr_pages; index++) { |
---|
| 676 | + for (; nr_pages != 0; index++, nr_pages--) { |
---|
713 | 677 | struct bio_vec bvec; |
---|
714 | 678 | |
---|
715 | 679 | bvec.bv_page = page; |
---|
.. | .. |
---|
774 | 738 | * XXX: A single page IO would be inefficient for write |
---|
775 | 739 | * but it would be not bad as starter. |
---|
776 | 740 | */ |
---|
777 | | - ret = submit_bio_wait(&bio); |
---|
778 | | - if (ret) { |
---|
| 741 | + err = submit_bio_wait(&bio); |
---|
| 742 | + if (err) { |
---|
779 | 743 | zram_slot_lock(zram, index); |
---|
780 | 744 | zram_clear_flag(zram, index, ZRAM_UNDER_WB); |
---|
781 | 745 | zram_clear_flag(zram, index, ZRAM_IDLE); |
---|
782 | 746 | zram_slot_unlock(zram, index); |
---|
| 747 | + /* |
---|
| 748 | + * Return last IO error unless every IO were |
---|
| 749 | + * not suceeded. |
---|
| 750 | + */ |
---|
| 751 | + ret = err; |
---|
783 | 752 | continue; |
---|
784 | 753 | } |
---|
785 | 754 | |
---|
.. | .. |
---|
817 | 786 | |
---|
818 | 787 | if (blk_idx) |
---|
819 | 788 | free_block_bdev(zram, blk_idx); |
---|
820 | | - ret = len; |
---|
821 | 789 | __free_page(page); |
---|
822 | 790 | release_init_lock: |
---|
823 | 791 | up_read(&zram->init_lock); |
---|
.. | .. |
---|
845 | 813 | } |
---|
846 | 814 | |
---|
847 | 815 | /* |
---|
848 | | - * Block layer want one ->make_request_fn to be active at a time |
---|
849 | | - * so if we use chained IO with parent IO in same context, |
---|
850 | | - * it's a deadlock. To avoid, it, it uses worker thread context. |
---|
| 816 | + * Block layer want one ->submit_bio to be active at a time, so if we use |
---|
| 817 | + * chained IO with parent IO in same context, it's a deadlock. To avoid that, |
---|
| 818 | + * use a worker thread context. |
---|
851 | 819 | */ |
---|
852 | 820 | static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec, |
---|
853 | 821 | unsigned long entry, struct bio *bio) |
---|
.. | .. |
---|
1180 | 1148 | #endif |
---|
1181 | 1149 | static DEVICE_ATTR_RO(debug_stat); |
---|
1182 | 1150 | |
---|
1183 | | - |
---|
1184 | | - |
---|
1185 | 1151 | static void zram_meta_free(struct zram *zram, u64 disksize) |
---|
1186 | 1152 | { |
---|
1187 | 1153 | size_t num_pages = disksize >> PAGE_SHIFT; |
---|
.. | .. |
---|
1212 | 1178 | |
---|
1213 | 1179 | if (!huge_class_size) |
---|
1214 | 1180 | huge_class_size = zs_huge_class_size(zram->mem_pool); |
---|
1215 | | - zram_meta_init_table_locks(zram, num_pages); |
---|
1216 | 1181 | return true; |
---|
1217 | 1182 | } |
---|
1218 | 1183 | |
---|
.. | .. |
---|
1271 | 1236 | static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index, |
---|
1272 | 1237 | struct bio *bio, bool partial_io) |
---|
1273 | 1238 | { |
---|
1274 | | - int ret; |
---|
| 1239 | + struct zcomp_strm *zstrm; |
---|
1275 | 1240 | unsigned long handle; |
---|
1276 | 1241 | unsigned int size; |
---|
1277 | 1242 | void *src, *dst; |
---|
1278 | | - struct zcomp_strm *zstrm; |
---|
| 1243 | + int ret; |
---|
1279 | 1244 | |
---|
1280 | 1245 | zram_slot_lock(zram, index); |
---|
1281 | 1246 | if (zram_test_flag(zram, index, ZRAM_WB)) { |
---|
.. | .. |
---|
1306 | 1271 | |
---|
1307 | 1272 | size = zram_get_obj_size(zram, index); |
---|
1308 | 1273 | |
---|
1309 | | - zstrm = zcomp_stream_get(zram->comp); |
---|
| 1274 | + if (size != PAGE_SIZE) |
---|
| 1275 | + zstrm = zcomp_stream_get(zram->comp); |
---|
| 1276 | + |
---|
1310 | 1277 | src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO); |
---|
1311 | 1278 | if (size == PAGE_SIZE) { |
---|
1312 | 1279 | dst = kmap_atomic(page); |
---|
.. | .. |
---|
1314 | 1281 | kunmap_atomic(dst); |
---|
1315 | 1282 | ret = 0; |
---|
1316 | 1283 | } else { |
---|
1317 | | - |
---|
1318 | 1284 | dst = kmap_atomic(page); |
---|
1319 | 1285 | ret = zcomp_decompress(zstrm, src, size, dst); |
---|
1320 | 1286 | kunmap_atomic(dst); |
---|
| 1287 | + zcomp_stream_put(zram->comp); |
---|
1321 | 1288 | } |
---|
1322 | 1289 | zs_unmap_object(zram->mem_pool, handle); |
---|
1323 | | - zcomp_stream_put(zram->comp); |
---|
1324 | 1290 | zram_slot_unlock(zram, index); |
---|
1325 | 1291 | |
---|
1326 | 1292 | /* Should NEVER happen. Return bio error if it does. */ |
---|
1327 | | - if (unlikely(ret)) |
---|
| 1293 | + if (WARN_ON(ret)) |
---|
1328 | 1294 | pr_err("Decompression failed! err=%d, page=%u\n", ret, index); |
---|
1329 | 1295 | |
---|
1330 | 1296 | return ret; |
---|
.. | .. |
---|
1419 | 1385 | __GFP_KSWAPD_RECLAIM | |
---|
1420 | 1386 | __GFP_NOWARN | |
---|
1421 | 1387 | __GFP_HIGHMEM | |
---|
1422 | | - __GFP_MOVABLE); |
---|
| 1388 | + __GFP_MOVABLE | |
---|
| 1389 | + __GFP_CMA); |
---|
1423 | 1390 | if (!handle) { |
---|
1424 | 1391 | zcomp_stream_put(zram->comp); |
---|
1425 | 1392 | atomic64_inc(&zram->stats.writestall); |
---|
1426 | 1393 | handle = zs_malloc(zram->mem_pool, comp_len, |
---|
1427 | 1394 | GFP_NOIO | __GFP_HIGHMEM | |
---|
1428 | | - __GFP_MOVABLE); |
---|
| 1395 | + __GFP_MOVABLE | __GFP_CMA); |
---|
1429 | 1396 | if (handle) |
---|
1430 | 1397 | goto compress_again; |
---|
1431 | 1398 | return -ENOMEM; |
---|
.. | .. |
---|
1566 | 1533 | static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index, |
---|
1567 | 1534 | int offset, unsigned int op, struct bio *bio) |
---|
1568 | 1535 | { |
---|
1569 | | - unsigned long start_time = jiffies; |
---|
1570 | | - struct request_queue *q = zram->disk->queue; |
---|
1571 | 1536 | int ret; |
---|
1572 | | - |
---|
1573 | | - generic_start_io_acct(q, op, bvec->bv_len >> SECTOR_SHIFT, |
---|
1574 | | - &zram->disk->part0); |
---|
1575 | 1537 | |
---|
1576 | 1538 | if (!op_is_write(op)) { |
---|
1577 | 1539 | atomic64_inc(&zram->stats.num_reads); |
---|
.. | .. |
---|
1581 | 1543 | atomic64_inc(&zram->stats.num_writes); |
---|
1582 | 1544 | ret = zram_bvec_write(zram, bvec, index, offset, bio); |
---|
1583 | 1545 | } |
---|
1584 | | - |
---|
1585 | | - generic_end_io_acct(q, op, &zram->disk->part0, start_time); |
---|
1586 | 1546 | |
---|
1587 | 1547 | zram_slot_lock(zram, index); |
---|
1588 | 1548 | zram_accessed(zram, index); |
---|
.. | .. |
---|
1604 | 1564 | u32 index; |
---|
1605 | 1565 | struct bio_vec bvec; |
---|
1606 | 1566 | struct bvec_iter iter; |
---|
| 1567 | + unsigned long start_time; |
---|
1607 | 1568 | |
---|
1608 | 1569 | index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT; |
---|
1609 | 1570 | offset = (bio->bi_iter.bi_sector & |
---|
.. | .. |
---|
1619 | 1580 | break; |
---|
1620 | 1581 | } |
---|
1621 | 1582 | |
---|
| 1583 | + start_time = bio_start_io_acct(bio); |
---|
1622 | 1584 | bio_for_each_segment(bvec, bio, iter) { |
---|
1623 | 1585 | struct bio_vec bv = bvec; |
---|
1624 | 1586 | unsigned int unwritten = bvec.bv_len; |
---|
.. | .. |
---|
1627 | 1589 | bv.bv_len = min_t(unsigned int, PAGE_SIZE - offset, |
---|
1628 | 1590 | unwritten); |
---|
1629 | 1591 | if (zram_bvec_rw(zram, &bv, index, offset, |
---|
1630 | | - bio_op(bio), bio) < 0) |
---|
1631 | | - goto out; |
---|
| 1592 | + bio_op(bio), bio) < 0) { |
---|
| 1593 | + bio->bi_status = BLK_STS_IOERR; |
---|
| 1594 | + break; |
---|
| 1595 | + } |
---|
1632 | 1596 | |
---|
1633 | 1597 | bv.bv_offset += bv.bv_len; |
---|
1634 | 1598 | unwritten -= bv.bv_len; |
---|
.. | .. |
---|
1636 | 1600 | update_position(&index, &offset, &bv); |
---|
1637 | 1601 | } while (unwritten); |
---|
1638 | 1602 | } |
---|
1639 | | - |
---|
| 1603 | + bio_end_io_acct(bio, start_time); |
---|
1640 | 1604 | bio_endio(bio); |
---|
1641 | | - return; |
---|
1642 | | - |
---|
1643 | | -out: |
---|
1644 | | - bio_io_error(bio); |
---|
1645 | 1605 | } |
---|
1646 | 1606 | |
---|
1647 | 1607 | /* |
---|
1648 | 1608 | * Handler function for all zram I/O requests. |
---|
1649 | 1609 | */ |
---|
1650 | | -static blk_qc_t zram_make_request(struct request_queue *queue, struct bio *bio) |
---|
| 1610 | +static blk_qc_t zram_submit_bio(struct bio *bio) |
---|
1651 | 1611 | { |
---|
1652 | | - struct zram *zram = queue->queuedata; |
---|
| 1612 | + struct zram *zram = bio->bi_disk->private_data; |
---|
1653 | 1613 | |
---|
1654 | 1614 | if (!valid_io_request(zram, bio->bi_iter.bi_sector, |
---|
1655 | 1615 | bio->bi_iter.bi_size)) { |
---|
.. | .. |
---|
1689 | 1649 | u32 index; |
---|
1690 | 1650 | struct zram *zram; |
---|
1691 | 1651 | struct bio_vec bv; |
---|
| 1652 | + unsigned long start_time; |
---|
1692 | 1653 | |
---|
1693 | 1654 | if (PageTransHuge(page)) |
---|
1694 | 1655 | return -ENOTSUPP; |
---|
.. | .. |
---|
1707 | 1668 | bv.bv_len = PAGE_SIZE; |
---|
1708 | 1669 | bv.bv_offset = 0; |
---|
1709 | 1670 | |
---|
| 1671 | + start_time = disk_start_io_acct(bdev->bd_disk, SECTORS_PER_PAGE, op); |
---|
1710 | 1672 | ret = zram_bvec_rw(zram, &bv, index, offset, op, NULL); |
---|
| 1673 | + disk_end_io_acct(bdev->bd_disk, op, start_time); |
---|
1711 | 1674 | out: |
---|
1712 | 1675 | /* |
---|
1713 | 1676 | * If I/O fails, just return error(ie, non-zero) without |
---|
.. | .. |
---|
1799 | 1762 | zram->disksize = disksize; |
---|
1800 | 1763 | set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT); |
---|
1801 | 1764 | |
---|
1802 | | - revalidate_disk(zram->disk); |
---|
| 1765 | + revalidate_disk_size(zram->disk, true); |
---|
1803 | 1766 | up_write(&zram->init_lock); |
---|
1804 | 1767 | |
---|
1805 | 1768 | return len; |
---|
.. | .. |
---|
1846 | 1809 | /* Make sure all the pending I/O are finished */ |
---|
1847 | 1810 | fsync_bdev(bdev); |
---|
1848 | 1811 | zram_reset_device(zram); |
---|
1849 | | - revalidate_disk(zram->disk); |
---|
| 1812 | + revalidate_disk_size(zram->disk, true); |
---|
1850 | 1813 | bdput(bdev); |
---|
1851 | 1814 | |
---|
1852 | 1815 | mutex_lock(&bdev->bd_mutex); |
---|
.. | .. |
---|
1873 | 1836 | |
---|
1874 | 1837 | static const struct block_device_operations zram_devops = { |
---|
1875 | 1838 | .open = zram_open, |
---|
| 1839 | + .submit_bio = zram_submit_bio, |
---|
1876 | 1840 | .swap_slot_free_notify = zram_slot_free_notify, |
---|
1877 | 1841 | .rw_page = zram_rw_page, |
---|
| 1842 | + .owner = THIS_MODULE |
---|
| 1843 | +}; |
---|
| 1844 | + |
---|
| 1845 | +static const struct block_device_operations zram_wb_devops = { |
---|
| 1846 | + .open = zram_open, |
---|
| 1847 | + .submit_bio = zram_submit_bio, |
---|
| 1848 | + .swap_slot_free_notify = zram_slot_free_notify, |
---|
1878 | 1849 | .owner = THIS_MODULE |
---|
1879 | 1850 | }; |
---|
1880 | 1851 | |
---|
.. | .. |
---|
1951 | 1922 | #ifdef CONFIG_ZRAM_WRITEBACK |
---|
1952 | 1923 | spin_lock_init(&zram->wb_limit_lock); |
---|
1953 | 1924 | #endif |
---|
1954 | | - queue = blk_alloc_queue(GFP_KERNEL); |
---|
| 1925 | + queue = blk_alloc_queue(NUMA_NO_NODE); |
---|
1955 | 1926 | if (!queue) { |
---|
1956 | 1927 | pr_err("Error allocating disk queue for device %d\n", |
---|
1957 | 1928 | device_id); |
---|
1958 | 1929 | ret = -ENOMEM; |
---|
1959 | 1930 | goto out_free_idr; |
---|
1960 | 1931 | } |
---|
1961 | | - |
---|
1962 | | - blk_queue_make_request(queue, zram_make_request); |
---|
1963 | 1932 | |
---|
1964 | 1933 | /* gendisk structure */ |
---|
1965 | 1934 | zram->disk = alloc_disk(1); |
---|
.. | .. |
---|
1974 | 1943 | zram->disk->first_minor = device_id; |
---|
1975 | 1944 | zram->disk->fops = &zram_devops; |
---|
1976 | 1945 | zram->disk->queue = queue; |
---|
1977 | | - zram->disk->queue->queuedata = zram; |
---|
1978 | 1946 | zram->disk->private_data = zram; |
---|
1979 | 1947 | snprintf(zram->disk->disk_name, 16, "zram%d", device_id); |
---|
1980 | 1948 | |
---|
.. | .. |
---|
2008 | 1976 | if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE) |
---|
2009 | 1977 | blk_queue_max_write_zeroes_sectors(zram->disk->queue, UINT_MAX); |
---|
2010 | 1978 | |
---|
2011 | | - zram->disk->queue->backing_dev_info->capabilities |= |
---|
2012 | | - (BDI_CAP_STABLE_WRITES | BDI_CAP_SYNCHRONOUS_IO); |
---|
2013 | | - disk_to_dev(zram->disk)->groups = zram_disk_attr_groups; |
---|
2014 | | - add_disk(zram->disk); |
---|
| 1979 | + blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, zram->disk->queue); |
---|
| 1980 | + device_add_disk(NULL, zram->disk, zram_disk_attr_groups); |
---|
2015 | 1981 | |
---|
2016 | 1982 | strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor)); |
---|
2017 | 1983 | |
---|
.. | .. |
---|
2047 | 2013 | mutex_unlock(&bdev->bd_mutex); |
---|
2048 | 2014 | |
---|
2049 | 2015 | zram_debugfs_unregister(zram); |
---|
| 2016 | + |
---|
2050 | 2017 | /* Make sure all the pending I/O are finished */ |
---|
2051 | 2018 | fsync_bdev(bdev); |
---|
2052 | 2019 | zram_reset_device(zram); |
---|