hc
2024-10-22 8ac6c7a54ed1b98d142dce24b11c6de6a1e239a5
kernel/drivers/md/dm-kcopyd.c
....@@ -17,6 +17,8 @@
1717 #include <linux/list.h>
1818 #include <linux/mempool.h>
1919 #include <linux/module.h>
20
+#include <linux/of_platform.h>
21
+#include <linux/of_reserved_mem.h>
2022 #include <linux/pagemap.h>
2123 #include <linux/slab.h>
2224 #include <linux/vmalloc.h>
....@@ -28,10 +30,126 @@
2830
2931 #include "dm-core.h"
3032
31
-#define SUB_JOB_SIZE 128
3233 #define SPLIT_COUNT 8
3334 #define MIN_JOBS 8
34
-#define RESERVE_PAGES (DIV_ROUND_UP(SUB_JOB_SIZE << SECTOR_SHIFT, PAGE_SIZE))
35
+
36
+#define DEFAULT_SUB_JOB_SIZE_KB 512
37
+#define MAX_SUB_JOB_SIZE_KB 1024
38
+
39
+static unsigned kcopyd_subjob_size_kb = DEFAULT_SUB_JOB_SIZE_KB;
40
+
41
+module_param(kcopyd_subjob_size_kb, uint, S_IRUGO | S_IWUSR);
42
+MODULE_PARM_DESC(kcopyd_subjob_size_kb, "Sub-job size for dm-kcopyd clients");
43
+
44
+static bool rsm_enabled;
45
+static phys_addr_t rsm_mem_base, rsm_mem_size;
46
+
47
+#ifndef MODULE
48
+static DEFINE_SPINLOCK(rsm_lock);
49
+static int *rsm_mem;
50
+static int rsm_page_cnt;
51
+static int rsm_tbl_idx;
52
+static struct reserved_mem *rmem;
53
+
54
+static void __init kcopyd_rsm_init(void)
55
+{
56
+ static struct device_node *rsm_node;
57
+ int ret = 0;
58
+
59
+ if (!rsm_enabled)
60
+ return;
61
+
62
+ rsm_node = of_find_compatible_node(NULL, NULL, "mediatek,dm_ota");
63
+ if (!rsm_node) {
64
+ ret = -ENODEV;
65
+ goto out;
66
+ }
67
+
68
+ rmem = of_reserved_mem_lookup(rsm_node);
69
+ if (!rmem) {
70
+ ret = -EINVAL;
71
+ goto out_put_node;
72
+ }
73
+
74
+ rsm_mem_base = rmem->base;
75
+ rsm_mem_size = rmem->size;
76
+ rsm_page_cnt = rsm_mem_size / PAGE_SIZE;
77
+ rsm_mem = kcalloc(rsm_page_cnt, sizeof(int), GFP_KERNEL);
78
+ if (!rsm_mem)
79
+ ret = -ENOMEM;
80
+
81
+out_put_node:
82
+ of_node_put(rsm_node);
83
+out:
84
+ if (ret)
85
+ pr_warn("kcopyd: failed to init rsm: %d", ret);
86
+}
87
+
88
+static int __init kcopyd_rsm_enable(char *str)
89
+{
90
+ rsm_enabled = true;
91
+
92
+ return 0;
93
+}
94
+early_param("mtk_kcopyd_quirk", kcopyd_rsm_enable);
95
+
96
+static void kcopyd_rsm_get_page(struct page **p)
97
+{
98
+ int i;
99
+ unsigned long flags;
100
+
101
+ *p = NULL;
102
+ spin_lock_irqsave(&rsm_lock, flags);
103
+ for (i = 0 ; i < rsm_page_cnt ; i++) {
104
+ rsm_tbl_idx = (rsm_tbl_idx + 1 == rsm_page_cnt) ? 0 : rsm_tbl_idx + 1;
105
+
106
+ if (rsm_mem[rsm_tbl_idx] == 0) {
107
+ rsm_mem[rsm_tbl_idx] = 1;
108
+ *p = virt_to_page(phys_to_virt(rsm_mem_base + PAGE_SIZE
109
+ * rsm_tbl_idx));
110
+ break;
111
+ }
112
+ }
113
+ spin_unlock_irqrestore(&rsm_lock, flags);
114
+}
115
+
116
+static void kcopyd_rsm_drop_page(struct page **p)
117
+{
118
+ u64 off;
119
+ unsigned long flags;
120
+
121
+ if (*p) {
122
+ off = page_to_phys(*p) - rsm_mem_base;
123
+ spin_lock_irqsave(&rsm_lock, flags);
124
+ rsm_mem[off >> PAGE_SHIFT] = 0;
125
+ spin_unlock_irqrestore(&rsm_lock, flags);
126
+ *p = NULL;
127
+ }
128
+}
129
+
130
+static void kcopyd_rsm_destroy(void)
131
+{
132
+ if (rsm_enabled)
133
+ kfree(rsm_mem);
134
+}
135
+
136
+#else
137
+#define kcopyd_rsm_destroy(...)
138
+#define kcopyd_rsm_drop_page(...)
139
+#define kcopyd_rsm_get_page(...)
140
+#define kcopyd_rsm_init(...)
141
+#endif
142
+
143
+static unsigned dm_get_kcopyd_subjob_size(void)
144
+{
145
+ unsigned sub_job_size_kb;
146
+
147
+ sub_job_size_kb = __dm_get_module_param(&kcopyd_subjob_size_kb,
148
+ DEFAULT_SUB_JOB_SIZE_KB,
149
+ MAX_SUB_JOB_SIZE_KB);
150
+
151
+ return sub_job_size_kb << 1;
152
+}
35153
36154 /*-----------------------------------------------------------------
37155 * Each kcopyd client has its own little pool of preallocated
....@@ -41,6 +159,7 @@
41159 struct page_list *pages;
42160 unsigned nr_reserved_pages;
43161 unsigned nr_free_pages;
162
+ unsigned sub_job_size;
44163
45164 struct dm_io_client *io_client;
46165
....@@ -193,7 +312,7 @@
193312 /*
194313 * Obtain one page for the use of kcopyd.
195314 */
196
-static struct page_list *alloc_pl(gfp_t gfp)
315
+static struct page_list *alloc_pl(gfp_t gfp, unsigned long job_flags)
197316 {
198317 struct page_list *pl;
199318
....@@ -201,7 +320,12 @@
201320 if (!pl)
202321 return NULL;
203322
204
- pl->page = alloc_page(gfp);
323
+ if (rsm_enabled && test_bit(DM_KCOPYD_SNAP_MERGE, &job_flags)) {
324
+ kcopyd_rsm_get_page(&pl->page);
325
+ } else {
326
+ pl->page = alloc_page(gfp);
327
+ }
328
+
205329 if (!pl->page) {
206330 kfree(pl);
207331 return NULL;
....@@ -212,7 +336,14 @@
212336
213337 static void free_pl(struct page_list *pl)
214338 {
215
- __free_page(pl->page);
339
+ struct page *p = pl->page;
340
+ phys_addr_t pa = page_to_phys(p);
341
+
342
+ if (rsm_enabled && pa >= rsm_mem_base && pa < rsm_mem_base + rsm_mem_size)
343
+ kcopyd_rsm_drop_page(&pl->page);
344
+ else
345
+ __free_page(pl->page);
346
+
216347 kfree(pl);
217348 }
218349
....@@ -240,14 +371,15 @@
240371 }
241372
242373 static int kcopyd_get_pages(struct dm_kcopyd_client *kc,
243
- unsigned int nr, struct page_list **pages)
374
+ unsigned int nr, struct page_list **pages,
375
+ unsigned long job_flags)
244376 {
245377 struct page_list *pl;
246378
247379 *pages = NULL;
248380
249381 do {
250
- pl = alloc_pl(__GFP_NOWARN | __GFP_NORETRY | __GFP_KSWAPD_RECLAIM);
382
+ pl = alloc_pl(__GFP_NOWARN | __GFP_NORETRY | __GFP_KSWAPD_RECLAIM, job_flags);
251383 if (unlikely(!pl)) {
252384 /* Use reserved pages */
253385 pl = kc->pages;
....@@ -291,7 +423,7 @@
291423 struct page_list *pl = NULL, *next;
292424
293425 for (i = 0; i < nr_pages; i++) {
294
- next = alloc_pl(GFP_KERNEL);
426
+ next = alloc_pl(GFP_KERNEL, 0);
295427 if (!next) {
296428 if (pl)
297429 drop_pages(pl);
....@@ -377,6 +509,8 @@
377509 zero_page_list.next = &zero_page_list;
378510 zero_page_list.page = ZERO_PAGE(0);
379511
512
+ kcopyd_rsm_init();
513
+
380514 return 0;
381515 }
382516
....@@ -384,6 +518,7 @@
384518 {
385519 kmem_cache_destroy(_job_cache);
386520 _job_cache = NULL;
521
+ kcopyd_rsm_destroy();
387522 }
388523
389524 /*
....@@ -568,7 +703,7 @@
568703 int r;
569704 unsigned nr_pages = dm_div_up(job->dests[0].count, PAGE_SIZE >> 9);
570705
571
- r = kcopyd_get_pages(job->kc, nr_pages, &job->pages);
706
+ r = kcopyd_get_pages(job->kc, nr_pages, &job->pages, job->flags);
572707 if (!r) {
573708 /* this job is ready for io */
574709 push(&job->kc->io_jobs, job);
....@@ -696,8 +831,8 @@
696831 progress = job->progress;
697832 count = job->source.count - progress;
698833 if (count) {
699
- if (count > SUB_JOB_SIZE)
700
- count = SUB_JOB_SIZE;
834
+ if (count > kc->sub_job_size)
835
+ count = kc->sub_job_size;
701836
702837 job->progress += count;
703838 }
....@@ -824,7 +959,7 @@
824959 job->master_job = job;
825960 job->write_offset = 0;
826961
827
- if (job->source.count <= SUB_JOB_SIZE)
962
+ if (job->source.count <= kc->sub_job_size)
828963 dispatch_job(job);
829964 else {
830965 job->progress = 0;
....@@ -891,6 +1026,7 @@
8911026 struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *throttle)
8921027 {
8931028 int r;
1029
+ unsigned reserve_pages;
8941030 struct dm_kcopyd_client *kc;
8951031
8961032 kc = kzalloc(sizeof(*kc), GFP_KERNEL);
....@@ -915,9 +1051,12 @@
9151051 goto bad_workqueue;
9161052 }
9171053
1054
+ kc->sub_job_size = dm_get_kcopyd_subjob_size();
1055
+ reserve_pages = DIV_ROUND_UP(kc->sub_job_size << SECTOR_SHIFT, PAGE_SIZE);
1056
+
9181057 kc->pages = NULL;
9191058 kc->nr_reserved_pages = kc->nr_free_pages = 0;
920
- r = client_reserve_pages(kc, RESERVE_PAGES);
1059
+ r = client_reserve_pages(kc, reserve_pages);
9211060 if (r)
9221061 goto bad_client_pages;
9231062