.. | .. |
---|
1 | 1 | // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note |
---|
2 | 2 | /* |
---|
3 | 3 | * |
---|
4 | | - * (C) COPYRIGHT 2015-2021 ARM Limited. All rights reserved. |
---|
| 4 | + * (C) COPYRIGHT 2015-2022 ARM Limited. All rights reserved. |
---|
5 | 5 | * |
---|
6 | 6 | * This program is free software and is provided to you under the terms of the |
---|
7 | 7 | * GNU General Public License version 2 as published by the Free Software |
---|
.. | .. |
---|
21 | 21 | |
---|
22 | 22 | #include <mali_kbase.h> |
---|
23 | 23 | #include <linux/mm.h> |
---|
| 24 | +#include <linux/migrate.h> |
---|
24 | 25 | #include <linux/dma-mapping.h> |
---|
25 | 26 | #include <linux/highmem.h> |
---|
26 | 27 | #include <linux/spinlock.h> |
---|
27 | 28 | #include <linux/shrinker.h> |
---|
28 | 29 | #include <linux/atomic.h> |
---|
29 | 30 | #include <linux/version.h> |
---|
| 31 | +#if KERNEL_VERSION(4, 11, 0) <= LINUX_VERSION_CODE |
---|
| 32 | +#include <linux/sched/signal.h> |
---|
| 33 | +#else |
---|
| 34 | +#include <linux/signal.h> |
---|
| 35 | +#endif |
---|
30 | 36 | |
---|
31 | 37 | #define pool_dbg(pool, format, ...) \ |
---|
32 | 38 | dev_dbg(pool->kbdev->dev, "%s-pool [%zu/%zu]: " format, \ |
---|
.. | .. |
---|
37 | 43 | |
---|
38 | 44 | #define NOT_DIRTY false |
---|
39 | 45 | #define NOT_RECLAIMED false |
---|
| 46 | + |
---|
| 47 | +/** |
---|
| 48 | + * can_alloc_page() - Check if the current thread can allocate a physical page |
---|
| 49 | + * |
---|
| 50 | + * @pool: Pointer to the memory pool. |
---|
| 51 | + * @page_owner: Pointer to the task/process that created the Kbase context |
---|
| 52 | + * for which a page needs to be allocated. It can be NULL if |
---|
| 53 | + * the page won't be associated with Kbase context. |
---|
| 54 | + * @alloc_from_kthread: Flag indicating that the current thread is a kernel thread. |
---|
| 55 | + * |
---|
| 56 | + * This function checks if the current thread is a kernel thread and can make a |
---|
| 57 | + * request to kernel to allocate a physical page. If the kernel thread is allocating |
---|
| 58 | + * a page for the Kbase context and the process that created the context is exiting |
---|
| 59 | + * or is being killed, then there is no point in doing a page allocation. |
---|
| 60 | + * |
---|
| 61 | + * The check done by the function is particularly helpful when the system is running |
---|
| 62 | + * low on memory. When a page is allocated from the context of a kernel thread, OoM |
---|
| 63 | + * killer doesn't consider the kernel thread for killing and kernel keeps retrying |
---|
| 64 | + * to allocate the page as long as the OoM killer is able to kill processes. |
---|
| 65 | + * The check allows kernel thread to quickly exit the page allocation loop once OoM |
---|
| 66 | + * killer has initiated the killing of @page_owner, thereby unblocking the context |
---|
| 67 | + * termination for @page_owner and freeing of GPU memory allocated by it. This helps |
---|
| 68 | + * in preventing the kernel panic and also limits the number of innocent processes |
---|
| 69 | + * that get killed. |
---|
| 70 | + * |
---|
| 71 | + * Return: true if the page can be allocated otherwise false. |
---|
| 72 | + */ |
---|
| 73 | +static inline bool can_alloc_page(struct kbase_mem_pool *pool, struct task_struct *page_owner, |
---|
| 74 | + const bool alloc_from_kthread) |
---|
| 75 | +{ |
---|
| 76 | + if (likely(!alloc_from_kthread || !page_owner)) |
---|
| 77 | + return true; |
---|
| 78 | + |
---|
| 79 | + if ((page_owner->flags & PF_EXITING) || fatal_signal_pending(page_owner)) { |
---|
| 80 | + dev_info(pool->kbdev->dev, "%s : Process %s/%d exiting", |
---|
| 81 | + __func__, page_owner->comm, task_pid_nr(page_owner)); |
---|
| 82 | + return false; |
---|
| 83 | + } |
---|
| 84 | + |
---|
| 85 | + return true; |
---|
| 86 | +} |
---|
40 | 87 | |
---|
41 | 88 | static size_t kbase_mem_pool_capacity(struct kbase_mem_pool *pool) |
---|
42 | 89 | { |
---|
.. | .. |
---|
56 | 103 | return kbase_mem_pool_size(pool) == 0; |
---|
57 | 104 | } |
---|
58 | 105 | |
---|
| 106 | +static bool set_pool_new_page_metadata(struct kbase_mem_pool *pool, struct page *p, |
---|
| 107 | + struct list_head *page_list, size_t *list_size) |
---|
| 108 | +{ |
---|
| 109 | + struct kbase_page_metadata *page_md = kbase_page_private(p); |
---|
| 110 | + bool not_movable = false; |
---|
| 111 | + |
---|
| 112 | + lockdep_assert_held(&pool->pool_lock); |
---|
| 113 | + |
---|
| 114 | + /* Free the page instead of adding it to the pool if it's not movable. |
---|
| 115 | + * Only update page status and add the page to the memory pool if |
---|
| 116 | + * it is not isolated. |
---|
| 117 | + */ |
---|
| 118 | + spin_lock(&page_md->migrate_lock); |
---|
| 119 | + if (PAGE_STATUS_GET(page_md->status) == (u8)NOT_MOVABLE) { |
---|
| 120 | + not_movable = true; |
---|
| 121 | + } else if (!WARN_ON_ONCE(IS_PAGE_ISOLATED(page_md->status))) { |
---|
| 122 | + page_md->status = PAGE_STATUS_SET(page_md->status, (u8)MEM_POOL); |
---|
| 123 | + page_md->data.mem_pool.pool = pool; |
---|
| 124 | + page_md->data.mem_pool.kbdev = pool->kbdev; |
---|
| 125 | + list_add(&p->lru, page_list); |
---|
| 126 | + (*list_size)++; |
---|
| 127 | + } |
---|
| 128 | + spin_unlock(&page_md->migrate_lock); |
---|
| 129 | + |
---|
| 130 | + if (not_movable) { |
---|
| 131 | + kbase_free_page_later(pool->kbdev, p); |
---|
| 132 | + pool_dbg(pool, "skipping a not movable page\n"); |
---|
| 133 | + } |
---|
| 134 | + |
---|
| 135 | + return not_movable; |
---|
| 136 | +} |
---|
| 137 | + |
---|
59 | 138 | static void kbase_mem_pool_add_locked(struct kbase_mem_pool *pool, |
---|
60 | 139 | struct page *p) |
---|
61 | 140 | { |
---|
| 141 | + bool queue_work_to_free = false; |
---|
| 142 | + |
---|
62 | 143 | lockdep_assert_held(&pool->pool_lock); |
---|
63 | 144 | |
---|
64 | | - list_add(&p->lru, &pool->page_list); |
---|
65 | | - pool->cur_size++; |
---|
| 145 | + if (!pool->order && kbase_page_migration_enabled) { |
---|
| 146 | + if (set_pool_new_page_metadata(pool, p, &pool->page_list, &pool->cur_size)) |
---|
| 147 | + queue_work_to_free = true; |
---|
| 148 | + } else { |
---|
| 149 | + list_add(&p->lru, &pool->page_list); |
---|
| 150 | + pool->cur_size++; |
---|
| 151 | + } |
---|
| 152 | + |
---|
| 153 | + if (queue_work_to_free) { |
---|
| 154 | + struct kbase_mem_migrate *mem_migrate = &pool->kbdev->mem_migrate; |
---|
| 155 | + |
---|
| 156 | + queue_work(mem_migrate->free_pages_workq, &mem_migrate->free_pages_work); |
---|
| 157 | + } |
---|
66 | 158 | |
---|
67 | 159 | pool_dbg(pool, "added page\n"); |
---|
68 | 160 | } |
---|
.. | .. |
---|
77 | 169 | static void kbase_mem_pool_add_list_locked(struct kbase_mem_pool *pool, |
---|
78 | 170 | struct list_head *page_list, size_t nr_pages) |
---|
79 | 171 | { |
---|
| 172 | + bool queue_work_to_free = false; |
---|
| 173 | + |
---|
80 | 174 | lockdep_assert_held(&pool->pool_lock); |
---|
81 | 175 | |
---|
82 | | - list_splice(page_list, &pool->page_list); |
---|
83 | | - pool->cur_size += nr_pages; |
---|
| 176 | + if (!pool->order && kbase_page_migration_enabled) { |
---|
| 177 | + struct page *p, *tmp; |
---|
| 178 | + |
---|
| 179 | + list_for_each_entry_safe(p, tmp, page_list, lru) { |
---|
| 180 | + list_del_init(&p->lru); |
---|
| 181 | + if (set_pool_new_page_metadata(pool, p, &pool->page_list, &pool->cur_size)) |
---|
| 182 | + queue_work_to_free = true; |
---|
| 183 | + } |
---|
| 184 | + } else { |
---|
| 185 | + list_splice(page_list, &pool->page_list); |
---|
| 186 | + pool->cur_size += nr_pages; |
---|
| 187 | + } |
---|
| 188 | + |
---|
| 189 | + if (queue_work_to_free) { |
---|
| 190 | + struct kbase_mem_migrate *mem_migrate = &pool->kbdev->mem_migrate; |
---|
| 191 | + |
---|
| 192 | + queue_work(mem_migrate->free_pages_workq, &mem_migrate->free_pages_work); |
---|
| 193 | + } |
---|
84 | 194 | |
---|
85 | 195 | pool_dbg(pool, "added %zu pages\n", nr_pages); |
---|
86 | 196 | } |
---|
.. | .. |
---|
93 | 203 | kbase_mem_pool_unlock(pool); |
---|
94 | 204 | } |
---|
95 | 205 | |
---|
96 | | -static struct page *kbase_mem_pool_remove_locked(struct kbase_mem_pool *pool) |
---|
| 206 | +static struct page *kbase_mem_pool_remove_locked(struct kbase_mem_pool *pool, |
---|
| 207 | + enum kbase_page_status status) |
---|
97 | 208 | { |
---|
98 | 209 | struct page *p; |
---|
99 | 210 | |
---|
.. | .. |
---|
103 | 214 | return NULL; |
---|
104 | 215 | |
---|
105 | 216 | p = list_first_entry(&pool->page_list, struct page, lru); |
---|
| 217 | + |
---|
| 218 | + if (!pool->order && kbase_page_migration_enabled) { |
---|
| 219 | + struct kbase_page_metadata *page_md = kbase_page_private(p); |
---|
| 220 | + |
---|
| 221 | + spin_lock(&page_md->migrate_lock); |
---|
| 222 | + WARN_ON(PAGE_STATUS_GET(page_md->status) != (u8)MEM_POOL); |
---|
| 223 | + page_md->status = PAGE_STATUS_SET(page_md->status, (u8)status); |
---|
| 224 | + spin_unlock(&page_md->migrate_lock); |
---|
| 225 | + } |
---|
| 226 | + |
---|
106 | 227 | list_del_init(&p->lru); |
---|
107 | 228 | pool->cur_size--; |
---|
108 | 229 | |
---|
.. | .. |
---|
111 | 232 | return p; |
---|
112 | 233 | } |
---|
113 | 234 | |
---|
114 | | -static struct page *kbase_mem_pool_remove(struct kbase_mem_pool *pool) |
---|
| 235 | +static struct page *kbase_mem_pool_remove(struct kbase_mem_pool *pool, |
---|
| 236 | + enum kbase_page_status status) |
---|
115 | 237 | { |
---|
116 | 238 | struct page *p; |
---|
117 | 239 | |
---|
118 | 240 | kbase_mem_pool_lock(pool); |
---|
119 | | - p = kbase_mem_pool_remove_locked(pool); |
---|
| 241 | + p = kbase_mem_pool_remove_locked(pool, status); |
---|
120 | 242 | kbase_mem_pool_unlock(pool); |
---|
121 | 243 | |
---|
122 | 244 | return p; |
---|
.. | .. |
---|
126 | 248 | struct page *p) |
---|
127 | 249 | { |
---|
128 | 250 | struct device *dev = pool->kbdev->dev; |
---|
129 | | - dma_sync_single_for_device(dev, kbase_dma_addr(p), |
---|
130 | | - (PAGE_SIZE << pool->order), DMA_BIDIRECTIONAL); |
---|
| 251 | + dma_addr_t dma_addr = pool->order ? kbase_dma_addr_as_priv(p) : kbase_dma_addr(p); |
---|
| 252 | + |
---|
| 253 | + dma_sync_single_for_device(dev, dma_addr, (PAGE_SIZE << pool->order), DMA_BIDIRECTIONAL); |
---|
131 | 254 | } |
---|
132 | 255 | |
---|
133 | 256 | static void kbase_mem_pool_zero_page(struct kbase_mem_pool *pool, |
---|
.. | .. |
---|
153 | 276 | struct page *kbase_mem_alloc_page(struct kbase_mem_pool *pool) |
---|
154 | 277 | { |
---|
155 | 278 | struct page *p; |
---|
156 | | - gfp_t gfp = GFP_HIGHUSER | __GFP_ZERO; |
---|
| 279 | + gfp_t gfp = __GFP_ZERO; |
---|
157 | 280 | struct kbase_device *const kbdev = pool->kbdev; |
---|
158 | 281 | struct device *const dev = kbdev->dev; |
---|
159 | 282 | dma_addr_t dma_addr; |
---|
.. | .. |
---|
161 | 284 | |
---|
162 | 285 | /* don't warn on higher order failures */ |
---|
163 | 286 | if (pool->order) |
---|
164 | | - gfp |= __GFP_NOWARN; |
---|
| 287 | + gfp |= GFP_HIGHUSER | __GFP_NOWARN; |
---|
| 288 | + else |
---|
| 289 | + gfp |= kbase_page_migration_enabled ? GFP_HIGHUSER_MOVABLE : GFP_HIGHUSER; |
---|
165 | 290 | |
---|
166 | 291 | p = kbdev->mgm_dev->ops.mgm_alloc_page(kbdev->mgm_dev, |
---|
167 | 292 | pool->group_id, gfp, pool->order); |
---|
.. | .. |
---|
177 | 302 | return NULL; |
---|
178 | 303 | } |
---|
179 | 304 | |
---|
180 | | - WARN_ON(dma_addr != page_to_phys(p)); |
---|
181 | | - for (i = 0; i < (1u << pool->order); i++) |
---|
182 | | - kbase_set_dma_addr(p+i, dma_addr + PAGE_SIZE * i); |
---|
| 305 | + /* Setup page metadata for 4KB pages when page migration is enabled */ |
---|
| 306 | + if (!pool->order && kbase_page_migration_enabled) { |
---|
| 307 | + INIT_LIST_HEAD(&p->lru); |
---|
| 308 | + if (!kbase_alloc_page_metadata(kbdev, p, dma_addr, pool->group_id)) { |
---|
| 309 | + dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL); |
---|
| 310 | + kbdev->mgm_dev->ops.mgm_free_page(kbdev->mgm_dev, pool->group_id, p, |
---|
| 311 | + pool->order); |
---|
| 312 | + return NULL; |
---|
| 313 | + } |
---|
| 314 | + } else { |
---|
| 315 | + WARN_ON(dma_addr != page_to_phys(p)); |
---|
| 316 | + for (i = 0; i < (1u << pool->order); i++) |
---|
| 317 | + kbase_set_dma_addr_as_priv(p + i, dma_addr + PAGE_SIZE * i); |
---|
| 318 | + } |
---|
183 | 319 | |
---|
184 | 320 | return p; |
---|
185 | 321 | } |
---|
186 | 322 | |
---|
187 | | -static void kbase_mem_pool_free_page(struct kbase_mem_pool *pool, |
---|
188 | | - struct page *p) |
---|
| 323 | +static void enqueue_free_pool_pages_work(struct kbase_mem_pool *pool) |
---|
189 | 324 | { |
---|
190 | | - struct kbase_device *const kbdev = pool->kbdev; |
---|
191 | | - struct device *const dev = kbdev->dev; |
---|
192 | | - dma_addr_t dma_addr = kbase_dma_addr(p); |
---|
193 | | - int i; |
---|
| 325 | + struct kbase_mem_migrate *mem_migrate = &pool->kbdev->mem_migrate; |
---|
194 | 326 | |
---|
195 | | - dma_unmap_page(dev, dma_addr, (PAGE_SIZE << pool->order), |
---|
196 | | - DMA_BIDIRECTIONAL); |
---|
197 | | - for (i = 0; i < (1u << pool->order); i++) |
---|
198 | | - kbase_clear_dma_addr(p+i); |
---|
| 327 | + if (!pool->order && kbase_page_migration_enabled) |
---|
| 328 | + queue_work(mem_migrate->free_pages_workq, &mem_migrate->free_pages_work); |
---|
| 329 | +} |
---|
199 | 330 | |
---|
200 | | - kbdev->mgm_dev->ops.mgm_free_page(kbdev->mgm_dev, |
---|
201 | | - pool->group_id, p, pool->order); |
---|
| 331 | +void kbase_mem_pool_free_page(struct kbase_mem_pool *pool, struct page *p) |
---|
| 332 | +{ |
---|
| 333 | + struct kbase_device *kbdev; |
---|
202 | 334 | |
---|
203 | | - pool_dbg(pool, "freed page to kernel\n"); |
---|
| 335 | + if (WARN_ON(!pool)) |
---|
| 336 | + return; |
---|
| 337 | + if (WARN_ON(!p)) |
---|
| 338 | + return; |
---|
| 339 | + |
---|
| 340 | + kbdev = pool->kbdev; |
---|
| 341 | + |
---|
| 342 | + if (!pool->order && kbase_page_migration_enabled) { |
---|
| 343 | + kbase_free_page_later(kbdev, p); |
---|
| 344 | + pool_dbg(pool, "page to be freed to kernel later\n"); |
---|
| 345 | + } else { |
---|
| 346 | + int i; |
---|
| 347 | + dma_addr_t dma_addr = kbase_dma_addr_as_priv(p); |
---|
| 348 | + |
---|
| 349 | + for (i = 0; i < (1u << pool->order); i++) |
---|
| 350 | + kbase_clear_dma_addr_as_priv(p + i); |
---|
| 351 | + |
---|
| 352 | + dma_unmap_page(kbdev->dev, dma_addr, (PAGE_SIZE << pool->order), DMA_BIDIRECTIONAL); |
---|
| 353 | + |
---|
| 354 | + kbdev->mgm_dev->ops.mgm_free_page(kbdev->mgm_dev, pool->group_id, p, pool->order); |
---|
| 355 | + |
---|
| 356 | + pool_dbg(pool, "freed page to kernel\n"); |
---|
| 357 | + } |
---|
204 | 358 | } |
---|
205 | 359 | |
---|
206 | 360 | static size_t kbase_mem_pool_shrink_locked(struct kbase_mem_pool *pool, |
---|
.. | .. |
---|
212 | 366 | lockdep_assert_held(&pool->pool_lock); |
---|
213 | 367 | |
---|
214 | 368 | for (i = 0; i < nr_to_shrink && !kbase_mem_pool_is_empty(pool); i++) { |
---|
215 | | - p = kbase_mem_pool_remove_locked(pool); |
---|
| 369 | + p = kbase_mem_pool_remove_locked(pool, FREE_IN_PROGRESS); |
---|
216 | 370 | kbase_mem_pool_free_page(pool, p); |
---|
217 | 371 | } |
---|
| 372 | + |
---|
| 373 | + /* Freeing of pages will be deferred when page migration is enabled. */ |
---|
| 374 | + enqueue_free_pool_pages_work(pool); |
---|
218 | 375 | |
---|
219 | 376 | return i; |
---|
220 | 377 | } |
---|
.. | .. |
---|
231 | 388 | return nr_freed; |
---|
232 | 389 | } |
---|
233 | 390 | |
---|
234 | | -int kbase_mem_pool_grow(struct kbase_mem_pool *pool, |
---|
235 | | - size_t nr_to_grow) |
---|
| 391 | +int kbase_mem_pool_grow(struct kbase_mem_pool *pool, size_t nr_to_grow, |
---|
| 392 | + struct task_struct *page_owner) |
---|
236 | 393 | { |
---|
237 | 394 | struct page *p; |
---|
238 | 395 | size_t i; |
---|
| 396 | + const bool alloc_from_kthread = !!(current->flags & PF_KTHREAD); |
---|
239 | 397 | |
---|
240 | 398 | kbase_mem_pool_lock(pool); |
---|
241 | 399 | |
---|
.. | .. |
---|
249 | 407 | return -ENOMEM; |
---|
250 | 408 | } |
---|
251 | 409 | kbase_mem_pool_unlock(pool); |
---|
| 410 | + |
---|
| 411 | + if (unlikely(!can_alloc_page(pool, page_owner, alloc_from_kthread))) |
---|
| 412 | + return -ENOMEM; |
---|
252 | 413 | |
---|
253 | 414 | p = kbase_mem_alloc_page(pool); |
---|
254 | 415 | if (!p) { |
---|
.. | .. |
---|
267 | 428 | |
---|
268 | 429 | return 0; |
---|
269 | 430 | } |
---|
| 431 | +KBASE_EXPORT_TEST_API(kbase_mem_pool_grow); |
---|
270 | 432 | |
---|
271 | 433 | void kbase_mem_pool_trim(struct kbase_mem_pool *pool, size_t new_size) |
---|
272 | 434 | { |
---|
.. | .. |
---|
281 | 443 | if (new_size < cur_size) |
---|
282 | 444 | kbase_mem_pool_shrink(pool, cur_size - new_size); |
---|
283 | 445 | else if (new_size > cur_size) |
---|
284 | | - err = kbase_mem_pool_grow(pool, new_size - cur_size); |
---|
| 446 | + err = kbase_mem_pool_grow(pool, new_size - cur_size, NULL); |
---|
285 | 447 | |
---|
286 | 448 | if (err) { |
---|
287 | 449 | size_t grown_size = kbase_mem_pool_size(pool); |
---|
.. | .. |
---|
322 | 484 | kbase_mem_pool_lock(pool); |
---|
323 | 485 | if (pool->dont_reclaim && !pool->dying) { |
---|
324 | 486 | kbase_mem_pool_unlock(pool); |
---|
| 487 | + /* Tell shrinker to skip reclaim |
---|
| 488 | + * even though freeable pages are available |
---|
| 489 | + */ |
---|
325 | 490 | return 0; |
---|
326 | 491 | } |
---|
327 | 492 | pool_size = kbase_mem_pool_size(pool); |
---|
.. | .. |
---|
341 | 506 | kbase_mem_pool_lock(pool); |
---|
342 | 507 | if (pool->dont_reclaim && !pool->dying) { |
---|
343 | 508 | kbase_mem_pool_unlock(pool); |
---|
344 | | - return 0; |
---|
| 509 | + /* Tell shrinker that reclaim can't be made and |
---|
| 510 | + * do not attempt again for this reclaim context. |
---|
| 511 | + */ |
---|
| 512 | + return SHRINK_STOP; |
---|
345 | 513 | } |
---|
346 | 514 | |
---|
347 | 515 | pool_dbg(pool, "reclaim scan %ld:\n", sc->nr_to_scan); |
---|
.. | .. |
---|
355 | 523 | return freed; |
---|
356 | 524 | } |
---|
357 | 525 | |
---|
358 | | -int kbase_mem_pool_init(struct kbase_mem_pool *pool, |
---|
359 | | - const struct kbase_mem_pool_config *config, |
---|
360 | | - unsigned int order, |
---|
361 | | - int group_id, |
---|
362 | | - struct kbase_device *kbdev, |
---|
363 | | - struct kbase_mem_pool *next_pool) |
---|
| 526 | +int kbase_mem_pool_init(struct kbase_mem_pool *pool, const struct kbase_mem_pool_config *config, |
---|
| 527 | + unsigned int order, int group_id, struct kbase_device *kbdev, |
---|
| 528 | + struct kbase_mem_pool *next_pool) |
---|
364 | 529 | { |
---|
365 | 530 | if (WARN_ON(group_id < 0) || |
---|
366 | 531 | WARN_ON(group_id >= MEMORY_GROUP_MANAGER_NR_GROUPS)) { |
---|
.. | .. |
---|
374 | 539 | pool->kbdev = kbdev; |
---|
375 | 540 | pool->next_pool = next_pool; |
---|
376 | 541 | pool->dying = false; |
---|
| 542 | + atomic_set(&pool->isolation_in_progress_cnt, 0); |
---|
377 | 543 | |
---|
378 | 544 | spin_lock_init(&pool->pool_lock); |
---|
379 | 545 | INIT_LIST_HEAD(&pool->page_list); |
---|
.. | .. |
---|
385 | 551 | * struct shrinker does not define batch |
---|
386 | 552 | */ |
---|
387 | 553 | pool->reclaim.batch = 0; |
---|
| 554 | +#if KERNEL_VERSION(6, 0, 0) > LINUX_VERSION_CODE |
---|
388 | 555 | register_shrinker(&pool->reclaim); |
---|
| 556 | +#else |
---|
| 557 | + register_shrinker(&pool->reclaim, "mali-mem-pool"); |
---|
| 558 | +#endif |
---|
389 | 559 | |
---|
390 | 560 | pool_dbg(pool, "initialized\n"); |
---|
391 | 561 | |
---|
392 | 562 | return 0; |
---|
393 | 563 | } |
---|
| 564 | +KBASE_EXPORT_TEST_API(kbase_mem_pool_init); |
---|
394 | 565 | |
---|
395 | 566 | void kbase_mem_pool_mark_dying(struct kbase_mem_pool *pool) |
---|
396 | 567 | { |
---|
.. | .. |
---|
422 | 593 | |
---|
423 | 594 | /* Zero pages first without holding the next_pool lock */ |
---|
424 | 595 | for (i = 0; i < nr_to_spill; i++) { |
---|
425 | | - p = kbase_mem_pool_remove_locked(pool); |
---|
426 | | - list_add(&p->lru, &spill_list); |
---|
| 596 | + p = kbase_mem_pool_remove_locked(pool, SPILL_IN_PROGRESS); |
---|
| 597 | + if (p) |
---|
| 598 | + list_add(&p->lru, &spill_list); |
---|
427 | 599 | } |
---|
428 | 600 | } |
---|
429 | 601 | |
---|
430 | 602 | while (!kbase_mem_pool_is_empty(pool)) { |
---|
431 | 603 | /* Free remaining pages to kernel */ |
---|
432 | | - p = kbase_mem_pool_remove_locked(pool); |
---|
433 | | - list_add(&p->lru, &free_list); |
---|
| 604 | + p = kbase_mem_pool_remove_locked(pool, FREE_IN_PROGRESS); |
---|
| 605 | + if (p) |
---|
| 606 | + list_add(&p->lru, &free_list); |
---|
434 | 607 | } |
---|
435 | 608 | |
---|
436 | 609 | kbase_mem_pool_unlock(pool); |
---|
.. | .. |
---|
450 | 623 | kbase_mem_pool_free_page(pool, p); |
---|
451 | 624 | } |
---|
452 | 625 | |
---|
| 626 | + /* Freeing of pages will be deferred when page migration is enabled. */ |
---|
| 627 | + enqueue_free_pool_pages_work(pool); |
---|
| 628 | + |
---|
| 629 | + /* Before returning wait to make sure there are no pages undergoing page isolation |
---|
| 630 | + * which will require reference to this pool. |
---|
| 631 | + */ |
---|
| 632 | + while (atomic_read(&pool->isolation_in_progress_cnt)) |
---|
| 633 | + cpu_relax(); |
---|
| 634 | + |
---|
453 | 635 | pool_dbg(pool, "terminated\n"); |
---|
454 | 636 | } |
---|
| 637 | +KBASE_EXPORT_TEST_API(kbase_mem_pool_term); |
---|
455 | 638 | |
---|
456 | 639 | struct page *kbase_mem_pool_alloc(struct kbase_mem_pool *pool) |
---|
457 | 640 | { |
---|
.. | .. |
---|
459 | 642 | |
---|
460 | 643 | do { |
---|
461 | 644 | pool_dbg(pool, "alloc()\n"); |
---|
462 | | - p = kbase_mem_pool_remove(pool); |
---|
| 645 | + p = kbase_mem_pool_remove(pool, ALLOCATE_IN_PROGRESS); |
---|
463 | 646 | |
---|
464 | 647 | if (p) |
---|
465 | 648 | return p; |
---|
.. | .. |
---|
472 | 655 | |
---|
473 | 656 | struct page *kbase_mem_pool_alloc_locked(struct kbase_mem_pool *pool) |
---|
474 | 657 | { |
---|
475 | | - struct page *p; |
---|
476 | | - |
---|
477 | 658 | lockdep_assert_held(&pool->pool_lock); |
---|
478 | 659 | |
---|
479 | 660 | pool_dbg(pool, "alloc_locked()\n"); |
---|
480 | | - p = kbase_mem_pool_remove_locked(pool); |
---|
481 | | - |
---|
482 | | - if (p) |
---|
483 | | - return p; |
---|
484 | | - |
---|
485 | | - return NULL; |
---|
| 661 | + return kbase_mem_pool_remove_locked(pool, ALLOCATE_IN_PROGRESS); |
---|
486 | 662 | } |
---|
487 | 663 | |
---|
488 | 664 | void kbase_mem_pool_free(struct kbase_mem_pool *pool, struct page *p, |
---|
.. | .. |
---|
504 | 680 | } else { |
---|
505 | 681 | /* Free page */ |
---|
506 | 682 | kbase_mem_pool_free_page(pool, p); |
---|
| 683 | + /* Freeing of pages will be deferred when page migration is enabled. */ |
---|
| 684 | + enqueue_free_pool_pages_work(pool); |
---|
507 | 685 | } |
---|
508 | 686 | } |
---|
509 | 687 | |
---|
.. | .. |
---|
523 | 701 | } else { |
---|
524 | 702 | /* Free page */ |
---|
525 | 703 | kbase_mem_pool_free_page(pool, p); |
---|
| 704 | + /* Freeing of pages will be deferred when page migration is enabled. */ |
---|
| 705 | + enqueue_free_pool_pages_work(pool); |
---|
526 | 706 | } |
---|
527 | 707 | } |
---|
528 | 708 | |
---|
529 | 709 | int kbase_mem_pool_alloc_pages(struct kbase_mem_pool *pool, size_t nr_4k_pages, |
---|
530 | | - struct tagged_addr *pages, bool partial_allowed) |
---|
| 710 | + struct tagged_addr *pages, bool partial_allowed, |
---|
| 711 | + struct task_struct *page_owner) |
---|
531 | 712 | { |
---|
532 | 713 | struct page *p; |
---|
533 | 714 | size_t nr_from_pool; |
---|
534 | 715 | size_t i = 0; |
---|
535 | 716 | int err = -ENOMEM; |
---|
536 | 717 | size_t nr_pages_internal; |
---|
| 718 | + const bool alloc_from_kthread = !!(current->flags & PF_KTHREAD); |
---|
537 | 719 | |
---|
538 | 720 | nr_pages_internal = nr_4k_pages / (1u << (pool->order)); |
---|
539 | 721 | |
---|
.. | .. |
---|
546 | 728 | /* Get pages from this pool */ |
---|
547 | 729 | kbase_mem_pool_lock(pool); |
---|
548 | 730 | nr_from_pool = min(nr_pages_internal, kbase_mem_pool_size(pool)); |
---|
| 731 | + |
---|
549 | 732 | while (nr_from_pool--) { |
---|
550 | 733 | int j; |
---|
551 | | - p = kbase_mem_pool_remove_locked(pool); |
---|
| 734 | + |
---|
| 735 | + p = kbase_mem_pool_remove_locked(pool, ALLOCATE_IN_PROGRESS); |
---|
| 736 | + |
---|
552 | 737 | if (pool->order) { |
---|
553 | 738 | pages[i++] = as_tagged_tag(page_to_phys(p), |
---|
554 | 739 | HUGE_HEAD | HUGE_PAGE); |
---|
.. | .. |
---|
564 | 749 | |
---|
565 | 750 | if (i != nr_4k_pages && pool->next_pool) { |
---|
566 | 751 | /* Allocate via next pool */ |
---|
567 | | - err = kbase_mem_pool_alloc_pages(pool->next_pool, |
---|
568 | | - nr_4k_pages - i, pages + i, partial_allowed); |
---|
| 752 | + err = kbase_mem_pool_alloc_pages(pool->next_pool, nr_4k_pages - i, pages + i, |
---|
| 753 | + partial_allowed, page_owner); |
---|
569 | 754 | |
---|
570 | 755 | if (err < 0) |
---|
571 | 756 | goto err_rollback; |
---|
.. | .. |
---|
574 | 759 | } else { |
---|
575 | 760 | /* Get any remaining pages from kernel */ |
---|
576 | 761 | while (i != nr_4k_pages) { |
---|
| 762 | + if (unlikely(!can_alloc_page(pool, page_owner, alloc_from_kthread))) |
---|
| 763 | + goto err_rollback; |
---|
| 764 | + |
---|
577 | 765 | p = kbase_mem_alloc_page(pool); |
---|
578 | 766 | if (!p) { |
---|
579 | 767 | if (partial_allowed) |
---|
.. | .. |
---|
636 | 824 | for (i = 0; i < nr_pages_internal; i++) { |
---|
637 | 825 | int j; |
---|
638 | 826 | |
---|
639 | | - p = kbase_mem_pool_remove_locked(pool); |
---|
| 827 | + p = kbase_mem_pool_remove_locked(pool, ALLOCATE_IN_PROGRESS); |
---|
640 | 828 | if (pool->order) { |
---|
641 | 829 | *pages++ = as_tagged_tag(page_to_phys(p), |
---|
642 | 830 | HUGE_HEAD | HUGE_PAGE); |
---|
.. | .. |
---|
743 | 931 | size_t nr_to_pool; |
---|
744 | 932 | LIST_HEAD(to_pool_list); |
---|
745 | 933 | size_t i = 0; |
---|
| 934 | + bool pages_released = false; |
---|
746 | 935 | |
---|
747 | 936 | pool_dbg(pool, "free_pages(%zu):\n", nr_pages); |
---|
748 | 937 | |
---|
.. | .. |
---|
775 | 964 | pages[i] = as_tagged(0); |
---|
776 | 965 | continue; |
---|
777 | 966 | } |
---|
778 | | - |
---|
779 | 967 | p = as_page(pages[i]); |
---|
780 | 968 | |
---|
781 | 969 | kbase_mem_pool_free_page(pool, p); |
---|
782 | 970 | pages[i] = as_tagged(0); |
---|
| 971 | + pages_released = true; |
---|
783 | 972 | } |
---|
| 973 | + |
---|
| 974 | + /* Freeing of pages will be deferred when page migration is enabled. */ |
---|
| 975 | + if (pages_released) |
---|
| 976 | + enqueue_free_pool_pages_work(pool); |
---|
784 | 977 | |
---|
785 | 978 | pool_dbg(pool, "free_pages(%zu) done\n", nr_pages); |
---|
786 | 979 | } |
---|
.. | .. |
---|
794 | 987 | size_t nr_to_pool; |
---|
795 | 988 | LIST_HEAD(to_pool_list); |
---|
796 | 989 | size_t i = 0; |
---|
| 990 | + bool pages_released = false; |
---|
797 | 991 | |
---|
798 | 992 | lockdep_assert_held(&pool->pool_lock); |
---|
799 | 993 | |
---|
.. | .. |
---|
824 | 1018 | |
---|
825 | 1019 | kbase_mem_pool_free_page(pool, p); |
---|
826 | 1020 | pages[i] = as_tagged(0); |
---|
| 1021 | + pages_released = true; |
---|
827 | 1022 | } |
---|
828 | 1023 | |
---|
| 1024 | + /* Freeing of pages will be deferred when page migration is enabled. */ |
---|
| 1025 | + if (pages_released) |
---|
| 1026 | + enqueue_free_pool_pages_work(pool); |
---|
| 1027 | + |
---|
829 | 1028 | pool_dbg(pool, "free_pages_locked(%zu) done\n", nr_pages); |
---|
830 | 1029 | } |
---|