hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/drivers/dma-buf/heaps/page_pool.c
....@@ -8,13 +8,18 @@
88 * Copyright (C) 2011 Google, Inc.
99 */
1010
11
-#include <linux/module.h>
1211 #include <linux/freezer.h>
1312 #include <linux/list.h>
1413 #include <linux/slab.h>
14
+#include <linux/spinlock.h>
1515 #include <linux/swap.h>
1616 #include <linux/sched/signal.h>
1717 #include "page_pool.h"
18
+
19
+struct dmabuf_page_pool_with_spinlock {
20
+ struct dmabuf_page_pool pool;
21
+ struct spinlock spinlock;
22
+};
1823
1924 static LIST_HEAD(pool_list);
2025 static DEFINE_MUTEX(pool_list_lock);
....@@ -36,34 +41,41 @@
3641 static void dmabuf_page_pool_add(struct dmabuf_page_pool *pool, struct page *page)
3742 {
3843 int index;
44
+ struct dmabuf_page_pool_with_spinlock *container_pool =
45
+ container_of(pool, struct dmabuf_page_pool_with_spinlock, pool);
3946
4047 if (PageHighMem(page))
4148 index = POOL_HIGHPAGE;
4249 else
4350 index = POOL_LOWPAGE;
4451
45
- mutex_lock(&pool->mutex);
52
+ spin_lock(&container_pool->spinlock);
4653 list_add_tail(&page->lru, &pool->items[index]);
4754 pool->count[index]++;
55
+ spin_unlock(&container_pool->spinlock);
4856 mod_node_page_state(page_pgdat(page), NR_KERNEL_MISC_RECLAIMABLE,
4957 1 << pool->order);
50
- mutex_unlock(&pool->mutex);
5158 }
5259
5360 static struct page *dmabuf_page_pool_remove(struct dmabuf_page_pool *pool, int index)
5461 {
5562 struct page *page;
63
+ struct dmabuf_page_pool_with_spinlock *container_pool =
64
+ container_of(pool, struct dmabuf_page_pool_with_spinlock, pool);
5665
57
- mutex_lock(&pool->mutex);
66
+ spin_lock(&container_pool->spinlock);
5867 page = list_first_entry_or_null(&pool->items[index], struct page, lru);
5968 if (page) {
6069 pool->count[index]--;
6170 list_del(&page->lru);
71
+ spin_unlock(&container_pool->spinlock);
6272 mod_node_page_state(page_pgdat(page), NR_KERNEL_MISC_RECLAIMABLE,
6373 -(1 << pool->order));
74
+ goto out;
6475 }
65
- mutex_unlock(&pool->mutex);
76
+ spin_unlock(&container_pool->spinlock);
6677
78
+out:
6779 return page;
6880 }
6981
....@@ -114,11 +126,16 @@
114126
115127 struct dmabuf_page_pool *dmabuf_page_pool_create(gfp_t gfp_mask, unsigned int order)
116128 {
117
- struct dmabuf_page_pool *pool = kmalloc(sizeof(*pool), GFP_KERNEL);
129
+ struct dmabuf_page_pool *pool;
130
+ struct dmabuf_page_pool_with_spinlock *container_pool =
131
+ kmalloc(sizeof(*container_pool), GFP_KERNEL);
118132 int i;
119133
120
- if (!pool)
134
+ if (!container_pool)
121135 return NULL;
136
+
137
+ spin_lock_init(&container_pool->spinlock);
138
+ pool = &container_pool->pool;
122139
123140 for (i = 0; i < POOL_TYPE_SIZE; i++) {
124141 pool->count[i] = 0;
....@@ -126,7 +143,8 @@
126143 }
127144 pool->gfp_mask = gfp_mask | __GFP_COMP;
128145 pool->order = order;
129
- mutex_init(&pool->mutex);
146
+ mutex_init(&pool->mutex); /* No longer used! */
147
+ mutex_lock(&pool->mutex); /* Make sure anyone who attempts to acquire this hangs */
130148
131149 mutex_lock(&pool_list_lock);
132150 list_add(&pool->list, &pool_list);
....@@ -139,6 +157,7 @@
139157 void dmabuf_page_pool_destroy(struct dmabuf_page_pool *pool)
140158 {
141159 struct page *page;
160
+ struct dmabuf_page_pool_with_spinlock *container_pool;
142161 int i;
143162
144163 /* Remove us from the pool list */
....@@ -152,7 +171,8 @@
152171 dmabuf_page_pool_free_pages(pool, page);
153172 }
154173
155
- kfree(pool);
174
+ container_pool = container_of(pool, struct dmabuf_page_pool_with_spinlock, pool);
175
+ kfree(container_pool);
156176 }
157177 EXPORT_SYMBOL_GPL(dmabuf_page_pool_destroy);
158178
....@@ -245,4 +265,4 @@
245265 return register_shrinker(&pool_shrinker);
246266 }
247267 module_init(dmabuf_page_pool_init_shrinker);
248
-MODULE_LICENSE("GPL");
268
+MODULE_LICENSE("GPL v2");