.. | .. |
---|
17 | 17 | #include <linux/list.h> |
---|
18 | 18 | #include <linux/mempool.h> |
---|
19 | 19 | #include <linux/module.h> |
---|
| 20 | +#include <linux/of_platform.h> |
---|
| 21 | +#include <linux/of_reserved_mem.h> |
---|
20 | 22 | #include <linux/pagemap.h> |
---|
21 | 23 | #include <linux/slab.h> |
---|
22 | 24 | #include <linux/vmalloc.h> |
---|
.. | .. |
---|
28 | 30 | |
---|
29 | 31 | #include "dm-core.h" |
---|
30 | 32 | |
---|
31 | | -#define SUB_JOB_SIZE 128 |
---|
32 | 33 | #define SPLIT_COUNT 8 |
---|
33 | 34 | #define MIN_JOBS 8 |
---|
34 | | -#define RESERVE_PAGES (DIV_ROUND_UP(SUB_JOB_SIZE << SECTOR_SHIFT, PAGE_SIZE)) |
---|
| 35 | + |
---|
| 36 | +#define DEFAULT_SUB_JOB_SIZE_KB 512 |
---|
| 37 | +#define MAX_SUB_JOB_SIZE_KB 1024 |
---|
| 38 | + |
---|
| 39 | +static unsigned kcopyd_subjob_size_kb = DEFAULT_SUB_JOB_SIZE_KB; |
---|
| 40 | + |
---|
| 41 | +module_param(kcopyd_subjob_size_kb, uint, S_IRUGO | S_IWUSR); |
---|
| 42 | +MODULE_PARM_DESC(kcopyd_subjob_size_kb, "Sub-job size for dm-kcopyd clients"); |
---|
| 43 | + |
---|
| 44 | +static bool rsm_enabled; |
---|
| 45 | +static phys_addr_t rsm_mem_base, rsm_mem_size; |
---|
| 46 | + |
---|
| 47 | +#ifndef MODULE |
---|
| 48 | +static DEFINE_SPINLOCK(rsm_lock); |
---|
| 49 | +static int *rsm_mem; |
---|
| 50 | +static int rsm_page_cnt; |
---|
| 51 | +static int rsm_tbl_idx; |
---|
| 52 | +static struct reserved_mem *rmem; |
---|
| 53 | + |
---|
| 54 | +static void __init kcopyd_rsm_init(void) |
---|
| 55 | +{ |
---|
| 56 | + static struct device_node *rsm_node; |
---|
| 57 | + int ret = 0; |
---|
| 58 | + |
---|
| 59 | + if (!rsm_enabled) |
---|
| 60 | + return; |
---|
| 61 | + |
---|
| 62 | + rsm_node = of_find_compatible_node(NULL, NULL, "mediatek,dm_ota"); |
---|
| 63 | + if (!rsm_node) { |
---|
| 64 | + ret = -ENODEV; |
---|
| 65 | + goto out; |
---|
| 66 | + } |
---|
| 67 | + |
---|
| 68 | + rmem = of_reserved_mem_lookup(rsm_node); |
---|
| 69 | + if (!rmem) { |
---|
| 70 | + ret = -EINVAL; |
---|
| 71 | + goto out_put_node; |
---|
| 72 | + } |
---|
| 73 | + |
---|
| 74 | + rsm_mem_base = rmem->base; |
---|
| 75 | + rsm_mem_size = rmem->size; |
---|
| 76 | + rsm_page_cnt = rsm_mem_size / PAGE_SIZE; |
---|
| 77 | + rsm_mem = kcalloc(rsm_page_cnt, sizeof(int), GFP_KERNEL); |
---|
| 78 | + if (!rsm_mem) |
---|
| 79 | + ret = -ENOMEM; |
---|
| 80 | + |
---|
| 81 | +out_put_node: |
---|
| 82 | + of_node_put(rsm_node); |
---|
| 83 | +out: |
---|
| 84 | + if (ret) |
---|
| 85 | + pr_warn("kcopyd: failed to init rsm: %d", ret); |
---|
| 86 | +} |
---|
| 87 | + |
---|
| 88 | +static int __init kcopyd_rsm_enable(char *str) |
---|
| 89 | +{ |
---|
| 90 | + rsm_enabled = true; |
---|
| 91 | + |
---|
| 92 | + return 0; |
---|
| 93 | +} |
---|
| 94 | +early_param("mtk_kcopyd_quirk", kcopyd_rsm_enable); |
---|
| 95 | + |
---|
| 96 | +static void kcopyd_rsm_get_page(struct page **p) |
---|
| 97 | +{ |
---|
| 98 | + int i; |
---|
| 99 | + unsigned long flags; |
---|
| 100 | + |
---|
| 101 | + *p = NULL; |
---|
| 102 | + spin_lock_irqsave(&rsm_lock, flags); |
---|
| 103 | + for (i = 0 ; i < rsm_page_cnt ; i++) { |
---|
| 104 | + rsm_tbl_idx = (rsm_tbl_idx + 1 == rsm_page_cnt) ? 0 : rsm_tbl_idx + 1; |
---|
| 105 | + |
---|
| 106 | + if (rsm_mem[rsm_tbl_idx] == 0) { |
---|
| 107 | + rsm_mem[rsm_tbl_idx] = 1; |
---|
| 108 | + *p = virt_to_page(phys_to_virt(rsm_mem_base + PAGE_SIZE |
---|
| 109 | + * rsm_tbl_idx)); |
---|
| 110 | + break; |
---|
| 111 | + } |
---|
| 112 | + } |
---|
| 113 | + spin_unlock_irqrestore(&rsm_lock, flags); |
---|
| 114 | +} |
---|
| 115 | + |
---|
| 116 | +static void kcopyd_rsm_drop_page(struct page **p) |
---|
| 117 | +{ |
---|
| 118 | + u64 off; |
---|
| 119 | + unsigned long flags; |
---|
| 120 | + |
---|
| 121 | + if (*p) { |
---|
| 122 | + off = page_to_phys(*p) - rsm_mem_base; |
---|
| 123 | + spin_lock_irqsave(&rsm_lock, flags); |
---|
| 124 | + rsm_mem[off >> PAGE_SHIFT] = 0; |
---|
| 125 | + spin_unlock_irqrestore(&rsm_lock, flags); |
---|
| 126 | + *p = NULL; |
---|
| 127 | + } |
---|
| 128 | +} |
---|
| 129 | + |
---|
| 130 | +static void kcopyd_rsm_destroy(void) |
---|
| 131 | +{ |
---|
| 132 | + if (rsm_enabled) |
---|
| 133 | + kfree(rsm_mem); |
---|
| 134 | +} |
---|
| 135 | + |
---|
| 136 | +#else |
---|
| 137 | +#define kcopyd_rsm_destroy(...) |
---|
| 138 | +#define kcopyd_rsm_drop_page(...) |
---|
| 139 | +#define kcopyd_rsm_get_page(...) |
---|
| 140 | +#define kcopyd_rsm_init(...) |
---|
| 141 | +#endif |
---|
| 142 | + |
---|
| 143 | +static unsigned dm_get_kcopyd_subjob_size(void) |
---|
| 144 | +{ |
---|
| 145 | + unsigned sub_job_size_kb; |
---|
| 146 | + |
---|
| 147 | + sub_job_size_kb = __dm_get_module_param(&kcopyd_subjob_size_kb, |
---|
| 148 | + DEFAULT_SUB_JOB_SIZE_KB, |
---|
| 149 | + MAX_SUB_JOB_SIZE_KB); |
---|
| 150 | + |
---|
| 151 | + return sub_job_size_kb << 1; |
---|
| 152 | +} |
---|
35 | 153 | |
---|
36 | 154 | /*----------------------------------------------------------------- |
---|
37 | 155 | * Each kcopyd client has its own little pool of preallocated |
---|
.. | .. |
---|
41 | 159 | struct page_list *pages; |
---|
42 | 160 | unsigned nr_reserved_pages; |
---|
43 | 161 | unsigned nr_free_pages; |
---|
| 162 | + unsigned sub_job_size; |
---|
44 | 163 | |
---|
45 | 164 | struct dm_io_client *io_client; |
---|
46 | 165 | |
---|
.. | .. |
---|
193 | 312 | /* |
---|
194 | 313 | * Obtain one page for the use of kcopyd. |
---|
195 | 314 | */ |
---|
196 | | -static struct page_list *alloc_pl(gfp_t gfp) |
---|
| 315 | +static struct page_list *alloc_pl(gfp_t gfp, unsigned long job_flags) |
---|
197 | 316 | { |
---|
198 | 317 | struct page_list *pl; |
---|
199 | 318 | |
---|
.. | .. |
---|
201 | 320 | if (!pl) |
---|
202 | 321 | return NULL; |
---|
203 | 322 | |
---|
204 | | - pl->page = alloc_page(gfp); |
---|
| 323 | + if (rsm_enabled && test_bit(DM_KCOPYD_SNAP_MERGE, &job_flags)) { |
---|
| 324 | + kcopyd_rsm_get_page(&pl->page); |
---|
| 325 | + } else { |
---|
| 326 | + pl->page = alloc_page(gfp); |
---|
| 327 | + } |
---|
| 328 | + |
---|
205 | 329 | if (!pl->page) { |
---|
206 | 330 | kfree(pl); |
---|
207 | 331 | return NULL; |
---|
.. | .. |
---|
212 | 336 | |
---|
213 | 337 | static void free_pl(struct page_list *pl) |
---|
214 | 338 | { |
---|
215 | | - __free_page(pl->page); |
---|
| 339 | + struct page *p = pl->page; |
---|
| 340 | + phys_addr_t pa = page_to_phys(p); |
---|
| 341 | + |
---|
| 342 | + if (rsm_enabled && pa >= rsm_mem_base && pa < rsm_mem_base + rsm_mem_size) |
---|
| 343 | + kcopyd_rsm_drop_page(&pl->page); |
---|
| 344 | + else |
---|
| 345 | + __free_page(pl->page); |
---|
| 346 | + |
---|
216 | 347 | kfree(pl); |
---|
217 | 348 | } |
---|
218 | 349 | |
---|
.. | .. |
---|
240 | 371 | } |
---|
241 | 372 | |
---|
242 | 373 | static int kcopyd_get_pages(struct dm_kcopyd_client *kc, |
---|
243 | | - unsigned int nr, struct page_list **pages) |
---|
| 374 | + unsigned int nr, struct page_list **pages, |
---|
| 375 | + unsigned long job_flags) |
---|
244 | 376 | { |
---|
245 | 377 | struct page_list *pl; |
---|
246 | 378 | |
---|
247 | 379 | *pages = NULL; |
---|
248 | 380 | |
---|
249 | 381 | do { |
---|
250 | | - pl = alloc_pl(__GFP_NOWARN | __GFP_NORETRY | __GFP_KSWAPD_RECLAIM); |
---|
| 382 | + pl = alloc_pl(__GFP_NOWARN | __GFP_NORETRY | __GFP_KSWAPD_RECLAIM, job_flags); |
---|
251 | 383 | if (unlikely(!pl)) { |
---|
252 | 384 | /* Use reserved pages */ |
---|
253 | 385 | pl = kc->pages; |
---|
.. | .. |
---|
291 | 423 | struct page_list *pl = NULL, *next; |
---|
292 | 424 | |
---|
293 | 425 | for (i = 0; i < nr_pages; i++) { |
---|
294 | | - next = alloc_pl(GFP_KERNEL); |
---|
| 426 | + next = alloc_pl(GFP_KERNEL, 0); |
---|
295 | 427 | if (!next) { |
---|
296 | 428 | if (pl) |
---|
297 | 429 | drop_pages(pl); |
---|
.. | .. |
---|
377 | 509 | zero_page_list.next = &zero_page_list; |
---|
378 | 510 | zero_page_list.page = ZERO_PAGE(0); |
---|
379 | 511 | |
---|
| 512 | + kcopyd_rsm_init(); |
---|
| 513 | + |
---|
380 | 514 | return 0; |
---|
381 | 515 | } |
---|
382 | 516 | |
---|
.. | .. |
---|
384 | 518 | { |
---|
385 | 519 | kmem_cache_destroy(_job_cache); |
---|
386 | 520 | _job_cache = NULL; |
---|
| 521 | + kcopyd_rsm_destroy(); |
---|
387 | 522 | } |
---|
388 | 523 | |
---|
389 | 524 | /* |
---|
.. | .. |
---|
568 | 703 | int r; |
---|
569 | 704 | unsigned nr_pages = dm_div_up(job->dests[0].count, PAGE_SIZE >> 9); |
---|
570 | 705 | |
---|
571 | | - r = kcopyd_get_pages(job->kc, nr_pages, &job->pages); |
---|
| 706 | + r = kcopyd_get_pages(job->kc, nr_pages, &job->pages, job->flags); |
---|
572 | 707 | if (!r) { |
---|
573 | 708 | /* this job is ready for io */ |
---|
574 | 709 | push(&job->kc->io_jobs, job); |
---|
.. | .. |
---|
696 | 831 | progress = job->progress; |
---|
697 | 832 | count = job->source.count - progress; |
---|
698 | 833 | if (count) { |
---|
699 | | - if (count > SUB_JOB_SIZE) |
---|
700 | | - count = SUB_JOB_SIZE; |
---|
| 834 | + if (count > kc->sub_job_size) |
---|
| 835 | + count = kc->sub_job_size; |
---|
701 | 836 | |
---|
702 | 837 | job->progress += count; |
---|
703 | 838 | } |
---|
.. | .. |
---|
824 | 959 | job->master_job = job; |
---|
825 | 960 | job->write_offset = 0; |
---|
826 | 961 | |
---|
827 | | - if (job->source.count <= SUB_JOB_SIZE) |
---|
| 962 | + if (job->source.count <= kc->sub_job_size) |
---|
828 | 963 | dispatch_job(job); |
---|
829 | 964 | else { |
---|
830 | 965 | job->progress = 0; |
---|
.. | .. |
---|
891 | 1026 | struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *throttle) |
---|
892 | 1027 | { |
---|
893 | 1028 | int r; |
---|
| 1029 | + unsigned reserve_pages; |
---|
894 | 1030 | struct dm_kcopyd_client *kc; |
---|
895 | 1031 | |
---|
896 | 1032 | kc = kzalloc(sizeof(*kc), GFP_KERNEL); |
---|
.. | .. |
---|
915 | 1051 | goto bad_workqueue; |
---|
916 | 1052 | } |
---|
917 | 1053 | |
---|
| 1054 | + kc->sub_job_size = dm_get_kcopyd_subjob_size(); |
---|
| 1055 | + reserve_pages = DIV_ROUND_UP(kc->sub_job_size << SECTOR_SHIFT, PAGE_SIZE); |
---|
| 1056 | + |
---|
918 | 1057 | kc->pages = NULL; |
---|
919 | 1058 | kc->nr_reserved_pages = kc->nr_free_pages = 0; |
---|
920 | | - r = client_reserve_pages(kc, RESERVE_PAGES); |
---|
| 1059 | + r = client_reserve_pages(kc, reserve_pages); |
---|
921 | 1060 | if (r) |
---|
922 | 1061 | goto bad_client_pages; |
---|
923 | 1062 | |
---|