forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-09 95099d4622f8cb224d94e314c7a8e0df60b13f87
kernel/drivers/block/zram/zram_drv.c
....@@ -33,6 +33,7 @@
3333 #include <linux/sysfs.h>
3434 #include <linux/debugfs.h>
3535 #include <linux/cpuhotplug.h>
36
+#include <linux/part_stat.h>
3637
3738 #include "zram_drv.h"
3839
....@@ -41,7 +42,7 @@
4142 static DEFINE_MUTEX(zram_index_mutex);
4243
4344 static int zram_major;
44
-static const char *default_compressor = "lzo";
45
+static const char *default_compressor = "lzo-rle";
4546
4647 /* Module params (documentation at end) */
4748 static unsigned int num_devices = 1;
....@@ -51,12 +52,14 @@
5152 */
5253 static size_t huge_class_size;
5354
55
+static const struct block_device_operations zram_devops;
56
+static const struct block_device_operations zram_wb_devops;
57
+
5458 static void zram_free_page(struct zram *zram, size_t index);
5559 static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
5660 u32 index, int offset, struct bio *bio);
5761
58
-
59
-#ifdef CONFIG_PREEMPT_RT_BASE
62
+#ifdef CONFIG_PREEMPT_RT
6063 static void zram_meta_init_table_locks(struct zram *zram, size_t num_pages)
6164 {
6265 size_t index;
....@@ -71,23 +74,24 @@
7174
7275 ret = spin_trylock(&zram->table[index].lock);
7376 if (ret)
74
- __set_bit(ZRAM_LOCK, &zram->table[index].value);
77
+ __set_bit(ZRAM_LOCK, &zram->table[index].flags);
7578 return ret;
7679 }
7780
7881 static void zram_slot_lock(struct zram *zram, u32 index)
7982 {
8083 spin_lock(&zram->table[index].lock);
81
- __set_bit(ZRAM_LOCK, &zram->table[index].value);
84
+ __set_bit(ZRAM_LOCK, &zram->table[index].flags);
8285 }
8386
8487 static void zram_slot_unlock(struct zram *zram, u32 index)
8588 {
86
- __clear_bit(ZRAM_LOCK, &zram->table[index].value);
89
+ __clear_bit(ZRAM_LOCK, &zram->table[index].flags);
8790 spin_unlock(&zram->table[index].lock);
8891 }
8992
9093 #else
94
+
9195 static void zram_meta_init_table_locks(struct zram *zram, size_t num_pages) { }
9296
9397 static int zram_slot_trylock(struct zram *zram, u32 index)
....@@ -242,14 +246,17 @@
242246
243247 static bool page_same_filled(void *ptr, unsigned long *element)
244248 {
245
- unsigned int pos;
246249 unsigned long *page;
247250 unsigned long val;
251
+ unsigned int pos, last_pos = PAGE_SIZE / sizeof(*page) - 1;
248252
249253 page = (unsigned long *)ptr;
250254 val = page[0];
251255
252
- for (pos = 1; pos < PAGE_SIZE / sizeof(*page); pos++) {
256
+ if (val != page[last_pos])
257
+ return false;
258
+
259
+ for (pos = 1; pos < last_pos; pos++) {
253260 if (val != page[pos])
254261 return false;
255262 }
....@@ -325,18 +332,8 @@
325332 struct zram *zram = dev_to_zram(dev);
326333 unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
327334 int index;
328
- char mode_buf[8];
329
- ssize_t sz;
330335
331
- sz = strscpy(mode_buf, buf, sizeof(mode_buf));
332
- if (sz <= 0)
333
- return -EINVAL;
334
-
335
- /* ignore trailing new line */
336
- if (mode_buf[sz - 1] == '\n')
337
- mode_buf[sz - 1] = 0x00;
338
-
339
- if (strcmp(mode_buf, "all"))
336
+ if (!sysfs_streq(buf, "all"))
340337 return -EINVAL;
341338
342339 down_read(&zram->init_lock);
....@@ -449,8 +446,7 @@
449446 zram->backing_dev = NULL;
450447 zram->old_block_size = 0;
451448 zram->bdev = NULL;
452
- zram->disk->queue->backing_dev_info->capabilities |=
453
- BDI_CAP_SYNCHRONOUS_IO;
449
+ zram->disk->fops = &zram_devops;
454450 kvfree(zram->bitmap);
455451 zram->bitmap = NULL;
456452 }
....@@ -516,7 +512,7 @@
516512 if (sz > 0 && file_name[sz - 1] == '\n')
517513 file_name[sz - 1] = 0x00;
518514
519
- backing_dev = filp_open(file_name, O_RDWR|O_LARGEFILE, 0);
515
+ backing_dev = filp_open_block(file_name, O_RDWR|O_LARGEFILE, 0);
520516 if (IS_ERR(backing_dev)) {
521517 err = PTR_ERR(backing_dev);
522518 backing_dev = NULL;
....@@ -532,9 +528,10 @@
532528 goto out;
533529 }
534530
535
- bdev = bdgrab(I_BDEV(inode));
536
- err = blkdev_get(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL, zram);
537
- if (err < 0) {
531
+ bdev = blkdev_get_by_dev(inode->i_rdev,
532
+ FMODE_READ | FMODE_WRITE | FMODE_EXCL, zram);
533
+ if (IS_ERR(bdev)) {
534
+ err = PTR_ERR(bdev);
538535 bdev = NULL;
539536 goto out;
540537 }
....@@ -569,8 +566,7 @@
569566 * freely but in fact, IO is going on so finally could cause
570567 * use-after-free when the IO is really done.
571568 */
572
- zram->disk->queue->backing_dev_info->capabilities &=
573
- ~BDI_CAP_SYNCHRONOUS_IO;
569
+ zram->disk->fops = &zram_wb_devops;
574570 up_write(&zram->init_lock);
575571
576572 pr_info("setup backing device %s\n", file_name);
....@@ -659,38 +655,41 @@
659655 return 1;
660656 }
661657
658
+#define PAGE_WB_SIG "page_index="
659
+
660
+#define PAGE_WRITEBACK 0
662661 #define HUGE_WRITEBACK 1
663662 #define IDLE_WRITEBACK 2
663
+
664664
665665 static ssize_t writeback_store(struct device *dev,
666666 struct device_attribute *attr, const char *buf, size_t len)
667667 {
668668 struct zram *zram = dev_to_zram(dev);
669669 unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
670
- unsigned long index;
670
+ unsigned long index = 0;
671671 struct bio bio;
672672 struct bio_vec bio_vec;
673673 struct page *page;
674
- ssize_t ret, sz;
675
- char mode_buf[8];
676
- int mode = -1;
674
+ ssize_t ret = len;
675
+ int mode, err;
677676 unsigned long blk_idx = 0;
678677
679
- sz = strscpy(mode_buf, buf, sizeof(mode_buf));
680
- if (sz <= 0)
681
- return -EINVAL;
682
-
683
- /* ignore trailing newline */
684
- if (mode_buf[sz - 1] == '\n')
685
- mode_buf[sz - 1] = 0x00;
686
-
687
- if (!strcmp(mode_buf, "idle"))
678
+ if (sysfs_streq(buf, "idle"))
688679 mode = IDLE_WRITEBACK;
689
- else if (!strcmp(mode_buf, "huge"))
680
+ else if (sysfs_streq(buf, "huge"))
690681 mode = HUGE_WRITEBACK;
682
+ else {
683
+ if (strncmp(buf, PAGE_WB_SIG, sizeof(PAGE_WB_SIG) - 1))
684
+ return -EINVAL;
691685
692
- if (mode == -1)
693
- return -EINVAL;
686
+ if (kstrtol(buf + sizeof(PAGE_WB_SIG) - 1, 10, &index) ||
687
+ index >= nr_pages)
688
+ return -EINVAL;
689
+
690
+ nr_pages = 1;
691
+ mode = PAGE_WRITEBACK;
692
+ }
694693
695694 down_read(&zram->init_lock);
696695 if (!init_done(zram)) {
....@@ -709,7 +708,7 @@
709708 goto release_init_lock;
710709 }
711710
712
- for (index = 0; index < nr_pages; index++) {
711
+ for (; nr_pages != 0; index++, nr_pages--) {
713712 struct bio_vec bvec;
714713
715714 bvec.bv_page = page;
....@@ -774,12 +773,17 @@
774773 * XXX: A single page IO would be inefficient for write
775774 * but it would be not bad as starter.
776775 */
777
- ret = submit_bio_wait(&bio);
778
- if (ret) {
776
+ err = submit_bio_wait(&bio);
777
+ if (err) {
779778 zram_slot_lock(zram, index);
780779 zram_clear_flag(zram, index, ZRAM_UNDER_WB);
781780 zram_clear_flag(zram, index, ZRAM_IDLE);
782781 zram_slot_unlock(zram, index);
782
+ /*
783
+ * Return last IO error unless every IO were
784
+ * not suceeded.
785
+ */
786
+ ret = err;
783787 continue;
784788 }
785789
....@@ -817,7 +821,6 @@
817821
818822 if (blk_idx)
819823 free_block_bdev(zram, blk_idx);
820
- ret = len;
821824 __free_page(page);
822825 release_init_lock:
823826 up_read(&zram->init_lock);
....@@ -845,9 +848,9 @@
845848 }
846849
847850 /*
848
- * Block layer want one ->make_request_fn to be active at a time
849
- * so if we use chained IO with parent IO in same context,
850
- * it's a deadlock. To avoid, it, it uses worker thread context.
851
+ * Block layer want one ->submit_bio to be active at a time, so if we use
852
+ * chained IO with parent IO in same context, it's a deadlock. To avoid that,
853
+ * use a worker thread context.
851854 */
852855 static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec,
853856 unsigned long entry, struct bio *bio)
....@@ -1180,8 +1183,6 @@
11801183 #endif
11811184 static DEVICE_ATTR_RO(debug_stat);
11821185
1183
-
1184
-
11851186 static void zram_meta_free(struct zram *zram, u64 disksize)
11861187 {
11871188 size_t num_pages = disksize >> PAGE_SHIFT;
....@@ -1271,11 +1272,11 @@
12711272 static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
12721273 struct bio *bio, bool partial_io)
12731274 {
1274
- int ret;
1275
+ struct zcomp_strm *zstrm;
12751276 unsigned long handle;
12761277 unsigned int size;
12771278 void *src, *dst;
1278
- struct zcomp_strm *zstrm;
1279
+ int ret;
12791280
12801281 zram_slot_lock(zram, index);
12811282 if (zram_test_flag(zram, index, ZRAM_WB)) {
....@@ -1306,7 +1307,9 @@
13061307
13071308 size = zram_get_obj_size(zram, index);
13081309
1309
- zstrm = zcomp_stream_get(zram->comp);
1310
+ if (size != PAGE_SIZE)
1311
+ zstrm = zcomp_stream_get(zram->comp);
1312
+
13101313 src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
13111314 if (size == PAGE_SIZE) {
13121315 dst = kmap_atomic(page);
....@@ -1314,17 +1317,16 @@
13141317 kunmap_atomic(dst);
13151318 ret = 0;
13161319 } else {
1317
-
13181320 dst = kmap_atomic(page);
13191321 ret = zcomp_decompress(zstrm, src, size, dst);
13201322 kunmap_atomic(dst);
1323
+ zcomp_stream_put(zram->comp);
13211324 }
13221325 zs_unmap_object(zram->mem_pool, handle);
1323
- zcomp_stream_put(zram->comp);
13241326 zram_slot_unlock(zram, index);
13251327
13261328 /* Should NEVER happen. Return bio error if it does. */
1327
- if (unlikely(ret))
1329
+ if (WARN_ON(ret))
13281330 pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
13291331
13301332 return ret;
....@@ -1419,13 +1421,14 @@
14191421 __GFP_KSWAPD_RECLAIM |
14201422 __GFP_NOWARN |
14211423 __GFP_HIGHMEM |
1422
- __GFP_MOVABLE);
1424
+ __GFP_MOVABLE |
1425
+ __GFP_CMA);
14231426 if (!handle) {
14241427 zcomp_stream_put(zram->comp);
14251428 atomic64_inc(&zram->stats.writestall);
14261429 handle = zs_malloc(zram->mem_pool, comp_len,
14271430 GFP_NOIO | __GFP_HIGHMEM |
1428
- __GFP_MOVABLE);
1431
+ __GFP_MOVABLE | __GFP_CMA);
14291432 if (handle)
14301433 goto compress_again;
14311434 return -ENOMEM;
....@@ -1566,12 +1569,7 @@
15661569 static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
15671570 int offset, unsigned int op, struct bio *bio)
15681571 {
1569
- unsigned long start_time = jiffies;
1570
- struct request_queue *q = zram->disk->queue;
15711572 int ret;
1572
-
1573
- generic_start_io_acct(q, op, bvec->bv_len >> SECTOR_SHIFT,
1574
- &zram->disk->part0);
15751573
15761574 if (!op_is_write(op)) {
15771575 atomic64_inc(&zram->stats.num_reads);
....@@ -1581,8 +1579,6 @@
15811579 atomic64_inc(&zram->stats.num_writes);
15821580 ret = zram_bvec_write(zram, bvec, index, offset, bio);
15831581 }
1584
-
1585
- generic_end_io_acct(q, op, &zram->disk->part0, start_time);
15861582
15871583 zram_slot_lock(zram, index);
15881584 zram_accessed(zram, index);
....@@ -1604,6 +1600,7 @@
16041600 u32 index;
16051601 struct bio_vec bvec;
16061602 struct bvec_iter iter;
1603
+ unsigned long start_time;
16071604
16081605 index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
16091606 offset = (bio->bi_iter.bi_sector &
....@@ -1619,6 +1616,7 @@
16191616 break;
16201617 }
16211618
1619
+ start_time = bio_start_io_acct(bio);
16221620 bio_for_each_segment(bvec, bio, iter) {
16231621 struct bio_vec bv = bvec;
16241622 unsigned int unwritten = bvec.bv_len;
....@@ -1627,8 +1625,10 @@
16271625 bv.bv_len = min_t(unsigned int, PAGE_SIZE - offset,
16281626 unwritten);
16291627 if (zram_bvec_rw(zram, &bv, index, offset,
1630
- bio_op(bio), bio) < 0)
1631
- goto out;
1628
+ bio_op(bio), bio) < 0) {
1629
+ bio->bi_status = BLK_STS_IOERR;
1630
+ break;
1631
+ }
16321632
16331633 bv.bv_offset += bv.bv_len;
16341634 unwritten -= bv.bv_len;
....@@ -1636,20 +1636,16 @@
16361636 update_position(&index, &offset, &bv);
16371637 } while (unwritten);
16381638 }
1639
-
1639
+ bio_end_io_acct(bio, start_time);
16401640 bio_endio(bio);
1641
- return;
1642
-
1643
-out:
1644
- bio_io_error(bio);
16451641 }
16461642
16471643 /*
16481644 * Handler function for all zram I/O requests.
16491645 */
1650
-static blk_qc_t zram_make_request(struct request_queue *queue, struct bio *bio)
1646
+static blk_qc_t zram_submit_bio(struct bio *bio)
16511647 {
1652
- struct zram *zram = queue->queuedata;
1648
+ struct zram *zram = bio->bi_disk->private_data;
16531649
16541650 if (!valid_io_request(zram, bio->bi_iter.bi_sector,
16551651 bio->bi_iter.bi_size)) {
....@@ -1689,6 +1685,7 @@
16891685 u32 index;
16901686 struct zram *zram;
16911687 struct bio_vec bv;
1688
+ unsigned long start_time;
16921689
16931690 if (PageTransHuge(page))
16941691 return -ENOTSUPP;
....@@ -1707,7 +1704,9 @@
17071704 bv.bv_len = PAGE_SIZE;
17081705 bv.bv_offset = 0;
17091706
1707
+ start_time = disk_start_io_acct(bdev->bd_disk, SECTORS_PER_PAGE, op);
17101708 ret = zram_bvec_rw(zram, &bv, index, offset, op, NULL);
1709
+ disk_end_io_acct(bdev->bd_disk, op, start_time);
17111710 out:
17121711 /*
17131712 * If I/O fails, just return error(ie, non-zero) without
....@@ -1799,7 +1798,7 @@
17991798 zram->disksize = disksize;
18001799 set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
18011800
1802
- revalidate_disk(zram->disk);
1801
+ revalidate_disk_size(zram->disk, true);
18031802 up_write(&zram->init_lock);
18041803
18051804 return len;
....@@ -1846,7 +1845,7 @@
18461845 /* Make sure all the pending I/O are finished */
18471846 fsync_bdev(bdev);
18481847 zram_reset_device(zram);
1849
- revalidate_disk(zram->disk);
1848
+ revalidate_disk_size(zram->disk, true);
18501849 bdput(bdev);
18511850
18521851 mutex_lock(&bdev->bd_mutex);
....@@ -1873,8 +1872,16 @@
18731872
18741873 static const struct block_device_operations zram_devops = {
18751874 .open = zram_open,
1875
+ .submit_bio = zram_submit_bio,
18761876 .swap_slot_free_notify = zram_slot_free_notify,
18771877 .rw_page = zram_rw_page,
1878
+ .owner = THIS_MODULE
1879
+};
1880
+
1881
+static const struct block_device_operations zram_wb_devops = {
1882
+ .open = zram_open,
1883
+ .submit_bio = zram_submit_bio,
1884
+ .swap_slot_free_notify = zram_slot_free_notify,
18781885 .owner = THIS_MODULE
18791886 };
18801887
....@@ -1951,15 +1958,13 @@
19511958 #ifdef CONFIG_ZRAM_WRITEBACK
19521959 spin_lock_init(&zram->wb_limit_lock);
19531960 #endif
1954
- queue = blk_alloc_queue(GFP_KERNEL);
1961
+ queue = blk_alloc_queue(NUMA_NO_NODE);
19551962 if (!queue) {
19561963 pr_err("Error allocating disk queue for device %d\n",
19571964 device_id);
19581965 ret = -ENOMEM;
19591966 goto out_free_idr;
19601967 }
1961
-
1962
- blk_queue_make_request(queue, zram_make_request);
19631968
19641969 /* gendisk structure */
19651970 zram->disk = alloc_disk(1);
....@@ -1974,7 +1979,6 @@
19741979 zram->disk->first_minor = device_id;
19751980 zram->disk->fops = &zram_devops;
19761981 zram->disk->queue = queue;
1977
- zram->disk->queue->queuedata = zram;
19781982 zram->disk->private_data = zram;
19791983 snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
19801984
....@@ -2008,10 +2012,8 @@
20082012 if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
20092013 blk_queue_max_write_zeroes_sectors(zram->disk->queue, UINT_MAX);
20102014
2011
- zram->disk->queue->backing_dev_info->capabilities |=
2012
- (BDI_CAP_STABLE_WRITES | BDI_CAP_SYNCHRONOUS_IO);
2013
- disk_to_dev(zram->disk)->groups = zram_disk_attr_groups;
2014
- add_disk(zram->disk);
2015
+ blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, zram->disk->queue);
2016
+ device_add_disk(NULL, zram->disk, zram_disk_attr_groups);
20152017
20162018 strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
20172019
....@@ -2047,6 +2049,7 @@
20472049 mutex_unlock(&bdev->bd_mutex);
20482050
20492051 zram_debugfs_unregister(zram);
2052
+
20502053 /* Make sure all the pending I/O are finished */
20512054 fsync_bdev(bdev);
20522055 zram_reset_device(zram);