hc
2024-05-16 8d2a02b24d66aa359e83eebc1ed3c0f85367a1cb
kernel/drivers/gpu/arm/bifrost/mali_kbase_mem_pool.c
....@@ -1,7 +1,7 @@
11 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
22 /*
33 *
4
- * (C) COPYRIGHT 2015-2021 ARM Limited. All rights reserved.
4
+ * (C) COPYRIGHT 2015-2022 ARM Limited. All rights reserved.
55 *
66 * This program is free software and is provided to you under the terms of the
77 * GNU General Public License version 2 as published by the Free Software
....@@ -21,12 +21,18 @@
2121
2222 #include <mali_kbase.h>
2323 #include <linux/mm.h>
24
+#include <linux/migrate.h>
2425 #include <linux/dma-mapping.h>
2526 #include <linux/highmem.h>
2627 #include <linux/spinlock.h>
2728 #include <linux/shrinker.h>
2829 #include <linux/atomic.h>
2930 #include <linux/version.h>
31
+#if KERNEL_VERSION(4, 11, 0) <= LINUX_VERSION_CODE
32
+#include <linux/sched/signal.h>
33
+#else
34
+#include <linux/signal.h>
35
+#endif
3036
3137 #define pool_dbg(pool, format, ...) \
3238 dev_dbg(pool->kbdev->dev, "%s-pool [%zu/%zu]: " format, \
....@@ -37,6 +43,47 @@
3743
3844 #define NOT_DIRTY false
3945 #define NOT_RECLAIMED false
46
+
47
+/**
48
+ * can_alloc_page() - Check if the current thread can allocate a physical page
49
+ *
50
+ * @pool: Pointer to the memory pool.
51
+ * @page_owner: Pointer to the task/process that created the Kbase context
52
+ * for which a page needs to be allocated. It can be NULL if
53
+ * the page won't be associated with Kbase context.
54
+ * @alloc_from_kthread: Flag indicating that the current thread is a kernel thread.
55
+ *
56
+ * This function checks if the current thread is a kernel thread and can make a
57
+ * request to kernel to allocate a physical page. If the kernel thread is allocating
58
+ * a page for the Kbase context and the process that created the context is exiting
59
+ * or is being killed, then there is no point in doing a page allocation.
60
+ *
61
+ * The check done by the function is particularly helpful when the system is running
62
+ * low on memory. When a page is allocated from the context of a kernel thread, OoM
63
+ * killer doesn't consider the kernel thread for killing and kernel keeps retrying
64
+ * to allocate the page as long as the OoM killer is able to kill processes.
65
+ * The check allows kernel thread to quickly exit the page allocation loop once OoM
66
+ * killer has initiated the killing of @page_owner, thereby unblocking the context
67
+ * termination for @page_owner and freeing of GPU memory allocated by it. This helps
68
+ * in preventing the kernel panic and also limits the number of innocent processes
69
+ * that get killed.
70
+ *
71
+ * Return: true if the page can be allocated otherwise false.
72
+ */
73
+static inline bool can_alloc_page(struct kbase_mem_pool *pool, struct task_struct *page_owner,
74
+ const bool alloc_from_kthread)
75
+{
76
+ if (likely(!alloc_from_kthread || !page_owner))
77
+ return true;
78
+
79
+ if ((page_owner->flags & PF_EXITING) || fatal_signal_pending(page_owner)) {
80
+ dev_info(pool->kbdev->dev, "%s : Process %s/%d exiting",
81
+ __func__, page_owner->comm, task_pid_nr(page_owner));
82
+ return false;
83
+ }
84
+
85
+ return true;
86
+}
4087
4188 static size_t kbase_mem_pool_capacity(struct kbase_mem_pool *pool)
4289 {
....@@ -56,13 +103,58 @@
56103 return kbase_mem_pool_size(pool) == 0;
57104 }
58105
106
+static bool set_pool_new_page_metadata(struct kbase_mem_pool *pool, struct page *p,
107
+ struct list_head *page_list, size_t *list_size)
108
+{
109
+ struct kbase_page_metadata *page_md = kbase_page_private(p);
110
+ bool not_movable = false;
111
+
112
+ lockdep_assert_held(&pool->pool_lock);
113
+
114
+ /* Free the page instead of adding it to the pool if it's not movable.
115
+ * Only update page status and add the page to the memory pool if
116
+ * it is not isolated.
117
+ */
118
+ spin_lock(&page_md->migrate_lock);
119
+ if (PAGE_STATUS_GET(page_md->status) == (u8)NOT_MOVABLE) {
120
+ not_movable = true;
121
+ } else if (!WARN_ON_ONCE(IS_PAGE_ISOLATED(page_md->status))) {
122
+ page_md->status = PAGE_STATUS_SET(page_md->status, (u8)MEM_POOL);
123
+ page_md->data.mem_pool.pool = pool;
124
+ page_md->data.mem_pool.kbdev = pool->kbdev;
125
+ list_add(&p->lru, page_list);
126
+ (*list_size)++;
127
+ }
128
+ spin_unlock(&page_md->migrate_lock);
129
+
130
+ if (not_movable) {
131
+ kbase_free_page_later(pool->kbdev, p);
132
+ pool_dbg(pool, "skipping a not movable page\n");
133
+ }
134
+
135
+ return not_movable;
136
+}
137
+
59138 static void kbase_mem_pool_add_locked(struct kbase_mem_pool *pool,
60139 struct page *p)
61140 {
141
+ bool queue_work_to_free = false;
142
+
62143 lockdep_assert_held(&pool->pool_lock);
63144
64
- list_add(&p->lru, &pool->page_list);
65
- pool->cur_size++;
145
+ if (!pool->order && kbase_page_migration_enabled) {
146
+ if (set_pool_new_page_metadata(pool, p, &pool->page_list, &pool->cur_size))
147
+ queue_work_to_free = true;
148
+ } else {
149
+ list_add(&p->lru, &pool->page_list);
150
+ pool->cur_size++;
151
+ }
152
+
153
+ if (queue_work_to_free) {
154
+ struct kbase_mem_migrate *mem_migrate = &pool->kbdev->mem_migrate;
155
+
156
+ queue_work(mem_migrate->free_pages_workq, &mem_migrate->free_pages_work);
157
+ }
66158
67159 pool_dbg(pool, "added page\n");
68160 }
....@@ -77,10 +169,28 @@
77169 static void kbase_mem_pool_add_list_locked(struct kbase_mem_pool *pool,
78170 struct list_head *page_list, size_t nr_pages)
79171 {
172
+ bool queue_work_to_free = false;
173
+
80174 lockdep_assert_held(&pool->pool_lock);
81175
82
- list_splice(page_list, &pool->page_list);
83
- pool->cur_size += nr_pages;
176
+ if (!pool->order && kbase_page_migration_enabled) {
177
+ struct page *p, *tmp;
178
+
179
+ list_for_each_entry_safe(p, tmp, page_list, lru) {
180
+ list_del_init(&p->lru);
181
+ if (set_pool_new_page_metadata(pool, p, &pool->page_list, &pool->cur_size))
182
+ queue_work_to_free = true;
183
+ }
184
+ } else {
185
+ list_splice(page_list, &pool->page_list);
186
+ pool->cur_size += nr_pages;
187
+ }
188
+
189
+ if (queue_work_to_free) {
190
+ struct kbase_mem_migrate *mem_migrate = &pool->kbdev->mem_migrate;
191
+
192
+ queue_work(mem_migrate->free_pages_workq, &mem_migrate->free_pages_work);
193
+ }
84194
85195 pool_dbg(pool, "added %zu pages\n", nr_pages);
86196 }
....@@ -93,7 +203,8 @@
93203 kbase_mem_pool_unlock(pool);
94204 }
95205
96
-static struct page *kbase_mem_pool_remove_locked(struct kbase_mem_pool *pool)
206
+static struct page *kbase_mem_pool_remove_locked(struct kbase_mem_pool *pool,
207
+ enum kbase_page_status status)
97208 {
98209 struct page *p;
99210
....@@ -103,6 +214,16 @@
103214 return NULL;
104215
105216 p = list_first_entry(&pool->page_list, struct page, lru);
217
+
218
+ if (!pool->order && kbase_page_migration_enabled) {
219
+ struct kbase_page_metadata *page_md = kbase_page_private(p);
220
+
221
+ spin_lock(&page_md->migrate_lock);
222
+ WARN_ON(PAGE_STATUS_GET(page_md->status) != (u8)MEM_POOL);
223
+ page_md->status = PAGE_STATUS_SET(page_md->status, (u8)status);
224
+ spin_unlock(&page_md->migrate_lock);
225
+ }
226
+
106227 list_del_init(&p->lru);
107228 pool->cur_size--;
108229
....@@ -111,12 +232,13 @@
111232 return p;
112233 }
113234
114
-static struct page *kbase_mem_pool_remove(struct kbase_mem_pool *pool)
235
+static struct page *kbase_mem_pool_remove(struct kbase_mem_pool *pool,
236
+ enum kbase_page_status status)
115237 {
116238 struct page *p;
117239
118240 kbase_mem_pool_lock(pool);
119
- p = kbase_mem_pool_remove_locked(pool);
241
+ p = kbase_mem_pool_remove_locked(pool, status);
120242 kbase_mem_pool_unlock(pool);
121243
122244 return p;
....@@ -126,8 +248,9 @@
126248 struct page *p)
127249 {
128250 struct device *dev = pool->kbdev->dev;
129
- dma_sync_single_for_device(dev, kbase_dma_addr(p),
130
- (PAGE_SIZE << pool->order), DMA_BIDIRECTIONAL);
251
+ dma_addr_t dma_addr = pool->order ? kbase_dma_addr_as_priv(p) : kbase_dma_addr(p);
252
+
253
+ dma_sync_single_for_device(dev, dma_addr, (PAGE_SIZE << pool->order), DMA_BIDIRECTIONAL);
131254 }
132255
133256 static void kbase_mem_pool_zero_page(struct kbase_mem_pool *pool,
....@@ -153,7 +276,7 @@
153276 struct page *kbase_mem_alloc_page(struct kbase_mem_pool *pool)
154277 {
155278 struct page *p;
156
- gfp_t gfp = GFP_HIGHUSER | __GFP_ZERO;
279
+ gfp_t gfp = __GFP_ZERO;
157280 struct kbase_device *const kbdev = pool->kbdev;
158281 struct device *const dev = kbdev->dev;
159282 dma_addr_t dma_addr;
....@@ -161,7 +284,9 @@
161284
162285 /* don't warn on higher order failures */
163286 if (pool->order)
164
- gfp |= __GFP_NOWARN;
287
+ gfp |= GFP_HIGHUSER | __GFP_NOWARN;
288
+ else
289
+ gfp |= kbase_page_migration_enabled ? GFP_HIGHUSER_MOVABLE : GFP_HIGHUSER;
165290
166291 p = kbdev->mgm_dev->ops.mgm_alloc_page(kbdev->mgm_dev,
167292 pool->group_id, gfp, pool->order);
....@@ -177,30 +302,59 @@
177302 return NULL;
178303 }
179304
180
- WARN_ON(dma_addr != page_to_phys(p));
181
- for (i = 0; i < (1u << pool->order); i++)
182
- kbase_set_dma_addr(p+i, dma_addr + PAGE_SIZE * i);
305
+ /* Setup page metadata for 4KB pages when page migration is enabled */
306
+ if (!pool->order && kbase_page_migration_enabled) {
307
+ INIT_LIST_HEAD(&p->lru);
308
+ if (!kbase_alloc_page_metadata(kbdev, p, dma_addr, pool->group_id)) {
309
+ dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
310
+ kbdev->mgm_dev->ops.mgm_free_page(kbdev->mgm_dev, pool->group_id, p,
311
+ pool->order);
312
+ return NULL;
313
+ }
314
+ } else {
315
+ WARN_ON(dma_addr != page_to_phys(p));
316
+ for (i = 0; i < (1u << pool->order); i++)
317
+ kbase_set_dma_addr_as_priv(p + i, dma_addr + PAGE_SIZE * i);
318
+ }
183319
184320 return p;
185321 }
186322
187
-static void kbase_mem_pool_free_page(struct kbase_mem_pool *pool,
188
- struct page *p)
323
+static void enqueue_free_pool_pages_work(struct kbase_mem_pool *pool)
189324 {
190
- struct kbase_device *const kbdev = pool->kbdev;
191
- struct device *const dev = kbdev->dev;
192
- dma_addr_t dma_addr = kbase_dma_addr(p);
193
- int i;
325
+ struct kbase_mem_migrate *mem_migrate = &pool->kbdev->mem_migrate;
194326
195
- dma_unmap_page(dev, dma_addr, (PAGE_SIZE << pool->order),
196
- DMA_BIDIRECTIONAL);
197
- for (i = 0; i < (1u << pool->order); i++)
198
- kbase_clear_dma_addr(p+i);
327
+ if (!pool->order && kbase_page_migration_enabled)
328
+ queue_work(mem_migrate->free_pages_workq, &mem_migrate->free_pages_work);
329
+}
199330
200
- kbdev->mgm_dev->ops.mgm_free_page(kbdev->mgm_dev,
201
- pool->group_id, p, pool->order);
331
+void kbase_mem_pool_free_page(struct kbase_mem_pool *pool, struct page *p)
332
+{
333
+ struct kbase_device *kbdev;
202334
203
- pool_dbg(pool, "freed page to kernel\n");
335
+ if (WARN_ON(!pool))
336
+ return;
337
+ if (WARN_ON(!p))
338
+ return;
339
+
340
+ kbdev = pool->kbdev;
341
+
342
+ if (!pool->order && kbase_page_migration_enabled) {
343
+ kbase_free_page_later(kbdev, p);
344
+ pool_dbg(pool, "page to be freed to kernel later\n");
345
+ } else {
346
+ int i;
347
+ dma_addr_t dma_addr = kbase_dma_addr_as_priv(p);
348
+
349
+ for (i = 0; i < (1u << pool->order); i++)
350
+ kbase_clear_dma_addr_as_priv(p + i);
351
+
352
+ dma_unmap_page(kbdev->dev, dma_addr, (PAGE_SIZE << pool->order), DMA_BIDIRECTIONAL);
353
+
354
+ kbdev->mgm_dev->ops.mgm_free_page(kbdev->mgm_dev, pool->group_id, p, pool->order);
355
+
356
+ pool_dbg(pool, "freed page to kernel\n");
357
+ }
204358 }
205359
206360 static size_t kbase_mem_pool_shrink_locked(struct kbase_mem_pool *pool,
....@@ -212,9 +366,12 @@
212366 lockdep_assert_held(&pool->pool_lock);
213367
214368 for (i = 0; i < nr_to_shrink && !kbase_mem_pool_is_empty(pool); i++) {
215
- p = kbase_mem_pool_remove_locked(pool);
369
+ p = kbase_mem_pool_remove_locked(pool, FREE_IN_PROGRESS);
216370 kbase_mem_pool_free_page(pool, p);
217371 }
372
+
373
+ /* Freeing of pages will be deferred when page migration is enabled. */
374
+ enqueue_free_pool_pages_work(pool);
218375
219376 return i;
220377 }
....@@ -231,11 +388,12 @@
231388 return nr_freed;
232389 }
233390
234
-int kbase_mem_pool_grow(struct kbase_mem_pool *pool,
235
- size_t nr_to_grow)
391
+int kbase_mem_pool_grow(struct kbase_mem_pool *pool, size_t nr_to_grow,
392
+ struct task_struct *page_owner)
236393 {
237394 struct page *p;
238395 size_t i;
396
+ const bool alloc_from_kthread = !!(current->flags & PF_KTHREAD);
239397
240398 kbase_mem_pool_lock(pool);
241399
....@@ -249,6 +407,9 @@
249407 return -ENOMEM;
250408 }
251409 kbase_mem_pool_unlock(pool);
410
+
411
+ if (unlikely(!can_alloc_page(pool, page_owner, alloc_from_kthread)))
412
+ return -ENOMEM;
252413
253414 p = kbase_mem_alloc_page(pool);
254415 if (!p) {
....@@ -267,6 +428,7 @@
267428
268429 return 0;
269430 }
431
+KBASE_EXPORT_TEST_API(kbase_mem_pool_grow);
270432
271433 void kbase_mem_pool_trim(struct kbase_mem_pool *pool, size_t new_size)
272434 {
....@@ -281,7 +443,7 @@
281443 if (new_size < cur_size)
282444 kbase_mem_pool_shrink(pool, cur_size - new_size);
283445 else if (new_size > cur_size)
284
- err = kbase_mem_pool_grow(pool, new_size - cur_size);
446
+ err = kbase_mem_pool_grow(pool, new_size - cur_size, NULL);
285447
286448 if (err) {
287449 size_t grown_size = kbase_mem_pool_size(pool);
....@@ -322,6 +484,9 @@
322484 kbase_mem_pool_lock(pool);
323485 if (pool->dont_reclaim && !pool->dying) {
324486 kbase_mem_pool_unlock(pool);
487
+ /* Tell shrinker to skip reclaim
488
+ * even though freeable pages are available
489
+ */
325490 return 0;
326491 }
327492 pool_size = kbase_mem_pool_size(pool);
....@@ -341,7 +506,10 @@
341506 kbase_mem_pool_lock(pool);
342507 if (pool->dont_reclaim && !pool->dying) {
343508 kbase_mem_pool_unlock(pool);
344
- return 0;
509
+ /* Tell shrinker that reclaim can't be made and
510
+ * do not attempt again for this reclaim context.
511
+ */
512
+ return SHRINK_STOP;
345513 }
346514
347515 pool_dbg(pool, "reclaim scan %ld:\n", sc->nr_to_scan);
....@@ -355,12 +523,9 @@
355523 return freed;
356524 }
357525
358
-int kbase_mem_pool_init(struct kbase_mem_pool *pool,
359
- const struct kbase_mem_pool_config *config,
360
- unsigned int order,
361
- int group_id,
362
- struct kbase_device *kbdev,
363
- struct kbase_mem_pool *next_pool)
526
+int kbase_mem_pool_init(struct kbase_mem_pool *pool, const struct kbase_mem_pool_config *config,
527
+ unsigned int order, int group_id, struct kbase_device *kbdev,
528
+ struct kbase_mem_pool *next_pool)
364529 {
365530 if (WARN_ON(group_id < 0) ||
366531 WARN_ON(group_id >= MEMORY_GROUP_MANAGER_NR_GROUPS)) {
....@@ -374,6 +539,7 @@
374539 pool->kbdev = kbdev;
375540 pool->next_pool = next_pool;
376541 pool->dying = false;
542
+ atomic_set(&pool->isolation_in_progress_cnt, 0);
377543
378544 spin_lock_init(&pool->pool_lock);
379545 INIT_LIST_HEAD(&pool->page_list);
....@@ -385,12 +551,17 @@
385551 * struct shrinker does not define batch
386552 */
387553 pool->reclaim.batch = 0;
554
+#if KERNEL_VERSION(6, 0, 0) > LINUX_VERSION_CODE
388555 register_shrinker(&pool->reclaim);
556
+#else
557
+ register_shrinker(&pool->reclaim, "mali-mem-pool");
558
+#endif
389559
390560 pool_dbg(pool, "initialized\n");
391561
392562 return 0;
393563 }
564
+KBASE_EXPORT_TEST_API(kbase_mem_pool_init);
394565
395566 void kbase_mem_pool_mark_dying(struct kbase_mem_pool *pool)
396567 {
....@@ -422,15 +593,17 @@
422593
423594 /* Zero pages first without holding the next_pool lock */
424595 for (i = 0; i < nr_to_spill; i++) {
425
- p = kbase_mem_pool_remove_locked(pool);
426
- list_add(&p->lru, &spill_list);
596
+ p = kbase_mem_pool_remove_locked(pool, SPILL_IN_PROGRESS);
597
+ if (p)
598
+ list_add(&p->lru, &spill_list);
427599 }
428600 }
429601
430602 while (!kbase_mem_pool_is_empty(pool)) {
431603 /* Free remaining pages to kernel */
432
- p = kbase_mem_pool_remove_locked(pool);
433
- list_add(&p->lru, &free_list);
604
+ p = kbase_mem_pool_remove_locked(pool, FREE_IN_PROGRESS);
605
+ if (p)
606
+ list_add(&p->lru, &free_list);
434607 }
435608
436609 kbase_mem_pool_unlock(pool);
....@@ -450,8 +623,18 @@
450623 kbase_mem_pool_free_page(pool, p);
451624 }
452625
626
+ /* Freeing of pages will be deferred when page migration is enabled. */
627
+ enqueue_free_pool_pages_work(pool);
628
+
629
+ /* Before returning wait to make sure there are no pages undergoing page isolation
630
+ * which will require reference to this pool.
631
+ */
632
+ while (atomic_read(&pool->isolation_in_progress_cnt))
633
+ cpu_relax();
634
+
453635 pool_dbg(pool, "terminated\n");
454636 }
637
+KBASE_EXPORT_TEST_API(kbase_mem_pool_term);
455638
456639 struct page *kbase_mem_pool_alloc(struct kbase_mem_pool *pool)
457640 {
....@@ -459,7 +642,7 @@
459642
460643 do {
461644 pool_dbg(pool, "alloc()\n");
462
- p = kbase_mem_pool_remove(pool);
645
+ p = kbase_mem_pool_remove(pool, ALLOCATE_IN_PROGRESS);
463646
464647 if (p)
465648 return p;
....@@ -472,17 +655,10 @@
472655
473656 struct page *kbase_mem_pool_alloc_locked(struct kbase_mem_pool *pool)
474657 {
475
- struct page *p;
476
-
477658 lockdep_assert_held(&pool->pool_lock);
478659
479660 pool_dbg(pool, "alloc_locked()\n");
480
- p = kbase_mem_pool_remove_locked(pool);
481
-
482
- if (p)
483
- return p;
484
-
485
- return NULL;
661
+ return kbase_mem_pool_remove_locked(pool, ALLOCATE_IN_PROGRESS);
486662 }
487663
488664 void kbase_mem_pool_free(struct kbase_mem_pool *pool, struct page *p,
....@@ -504,6 +680,8 @@
504680 } else {
505681 /* Free page */
506682 kbase_mem_pool_free_page(pool, p);
683
+ /* Freeing of pages will be deferred when page migration is enabled. */
684
+ enqueue_free_pool_pages_work(pool);
507685 }
508686 }
509687
....@@ -523,17 +701,21 @@
523701 } else {
524702 /* Free page */
525703 kbase_mem_pool_free_page(pool, p);
704
+ /* Freeing of pages will be deferred when page migration is enabled. */
705
+ enqueue_free_pool_pages_work(pool);
526706 }
527707 }
528708
529709 int kbase_mem_pool_alloc_pages(struct kbase_mem_pool *pool, size_t nr_4k_pages,
530
- struct tagged_addr *pages, bool partial_allowed)
710
+ struct tagged_addr *pages, bool partial_allowed,
711
+ struct task_struct *page_owner)
531712 {
532713 struct page *p;
533714 size_t nr_from_pool;
534715 size_t i = 0;
535716 int err = -ENOMEM;
536717 size_t nr_pages_internal;
718
+ const bool alloc_from_kthread = !!(current->flags & PF_KTHREAD);
537719
538720 nr_pages_internal = nr_4k_pages / (1u << (pool->order));
539721
....@@ -546,9 +728,12 @@
546728 /* Get pages from this pool */
547729 kbase_mem_pool_lock(pool);
548730 nr_from_pool = min(nr_pages_internal, kbase_mem_pool_size(pool));
731
+
549732 while (nr_from_pool--) {
550733 int j;
551
- p = kbase_mem_pool_remove_locked(pool);
734
+
735
+ p = kbase_mem_pool_remove_locked(pool, ALLOCATE_IN_PROGRESS);
736
+
552737 if (pool->order) {
553738 pages[i++] = as_tagged_tag(page_to_phys(p),
554739 HUGE_HEAD | HUGE_PAGE);
....@@ -564,8 +749,8 @@
564749
565750 if (i != nr_4k_pages && pool->next_pool) {
566751 /* Allocate via next pool */
567
- err = kbase_mem_pool_alloc_pages(pool->next_pool,
568
- nr_4k_pages - i, pages + i, partial_allowed);
752
+ err = kbase_mem_pool_alloc_pages(pool->next_pool, nr_4k_pages - i, pages + i,
753
+ partial_allowed, page_owner);
569754
570755 if (err < 0)
571756 goto err_rollback;
....@@ -574,6 +759,9 @@
574759 } else {
575760 /* Get any remaining pages from kernel */
576761 while (i != nr_4k_pages) {
762
+ if (unlikely(!can_alloc_page(pool, page_owner, alloc_from_kthread)))
763
+ goto err_rollback;
764
+
577765 p = kbase_mem_alloc_page(pool);
578766 if (!p) {
579767 if (partial_allowed)
....@@ -636,7 +824,7 @@
636824 for (i = 0; i < nr_pages_internal; i++) {
637825 int j;
638826
639
- p = kbase_mem_pool_remove_locked(pool);
827
+ p = kbase_mem_pool_remove_locked(pool, ALLOCATE_IN_PROGRESS);
640828 if (pool->order) {
641829 *pages++ = as_tagged_tag(page_to_phys(p),
642830 HUGE_HEAD | HUGE_PAGE);
....@@ -743,6 +931,7 @@
743931 size_t nr_to_pool;
744932 LIST_HEAD(to_pool_list);
745933 size_t i = 0;
934
+ bool pages_released = false;
746935
747936 pool_dbg(pool, "free_pages(%zu):\n", nr_pages);
748937
....@@ -775,12 +964,16 @@
775964 pages[i] = as_tagged(0);
776965 continue;
777966 }
778
-
779967 p = as_page(pages[i]);
780968
781969 kbase_mem_pool_free_page(pool, p);
782970 pages[i] = as_tagged(0);
971
+ pages_released = true;
783972 }
973
+
974
+ /* Freeing of pages will be deferred when page migration is enabled. */
975
+ if (pages_released)
976
+ enqueue_free_pool_pages_work(pool);
784977
785978 pool_dbg(pool, "free_pages(%zu) done\n", nr_pages);
786979 }
....@@ -794,6 +987,7 @@
794987 size_t nr_to_pool;
795988 LIST_HEAD(to_pool_list);
796989 size_t i = 0;
990
+ bool pages_released = false;
797991
798992 lockdep_assert_held(&pool->pool_lock);
799993
....@@ -824,7 +1018,12 @@
8241018
8251019 kbase_mem_pool_free_page(pool, p);
8261020 pages[i] = as_tagged(0);
1021
+ pages_released = true;
8271022 }
8281023
1024
+ /* Freeing of pages will be deferred when page migration is enabled. */
1025
+ if (pages_released)
1026
+ enqueue_free_pool_pages_work(pool);
1027
+
8291028 pool_dbg(pool, "free_pages_locked(%zu) done\n", nr_pages);
8301029 }