.. | .. |
---|
58 | 58 | static void check_element(mempool_t *pool, void *element) |
---|
59 | 59 | { |
---|
60 | 60 | /* Mempools backed by slab allocator */ |
---|
61 | | - if (pool->free == mempool_free_slab || pool->free == mempool_kfree) |
---|
| 61 | + if (pool->free == mempool_free_slab || pool->free == mempool_kfree) { |
---|
62 | 62 | __check_element(pool, element, ksize(element)); |
---|
63 | | - |
---|
64 | | - /* Mempools backed by page allocator */ |
---|
65 | | - if (pool->free == mempool_free_pages) { |
---|
| 63 | + } else if (pool->free == mempool_free_pages) { |
---|
| 64 | + /* Mempools backed by page allocator */ |
---|
66 | 65 | int order = (int)(long)pool->pool_data; |
---|
67 | 66 | void *addr = kmap_atomic((struct page *)element); |
---|
68 | 67 | |
---|
.. | .. |
---|
82 | 81 | static void poison_element(mempool_t *pool, void *element) |
---|
83 | 82 | { |
---|
84 | 83 | /* Mempools backed by slab allocator */ |
---|
85 | | - if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) |
---|
| 84 | + if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) { |
---|
86 | 85 | __poison_element(element, ksize(element)); |
---|
87 | | - |
---|
88 | | - /* Mempools backed by page allocator */ |
---|
89 | | - if (pool->alloc == mempool_alloc_pages) { |
---|
| 86 | + } else if (pool->alloc == mempool_alloc_pages) { |
---|
| 87 | + /* Mempools backed by page allocator */ |
---|
90 | 88 | int order = (int)(long)pool->pool_data; |
---|
91 | 89 | void *addr = kmap_atomic((struct page *)element); |
---|
92 | 90 | |
---|
.. | .. |
---|
106 | 104 | static __always_inline void kasan_poison_element(mempool_t *pool, void *element) |
---|
107 | 105 | { |
---|
108 | 106 | if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) |
---|
109 | | - kasan_poison_kfree(element, _RET_IP_); |
---|
110 | | - if (pool->alloc == mempool_alloc_pages) |
---|
111 | | - kasan_free_pages(element, (unsigned long)pool->pool_data); |
---|
| 107 | + kasan_slab_free_mempool(element); |
---|
| 108 | + else if (pool->alloc == mempool_alloc_pages) |
---|
| 109 | + kasan_poison_pages(element, (unsigned long)pool->pool_data, |
---|
| 110 | + false); |
---|
112 | 111 | } |
---|
113 | 112 | |
---|
114 | 113 | static void kasan_unpoison_element(mempool_t *pool, void *element) |
---|
115 | 114 | { |
---|
116 | 115 | if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) |
---|
117 | | - kasan_unpoison_slab(element); |
---|
118 | | - if (pool->alloc == mempool_alloc_pages) |
---|
119 | | - kasan_alloc_pages(element, (unsigned long)pool->pool_data); |
---|
| 116 | + kasan_unpoison_range(element, __ksize(element)); |
---|
| 117 | + else if (pool->alloc == mempool_alloc_pages) |
---|
| 118 | + kasan_unpoison_pages(element, (unsigned long)pool->pool_data, |
---|
| 119 | + false); |
---|
120 | 120 | } |
---|
121 | 121 | |
---|
122 | 122 | static __always_inline void add_element(mempool_t *pool, void *element) |
---|
.. | .. |
---|
222 | 222 | * |
---|
223 | 223 | * Like mempool_create(), but initializes the pool in (i.e. embedded in another |
---|
224 | 224 | * structure). |
---|
| 225 | + * |
---|
| 226 | + * Return: %0 on success, negative error code otherwise. |
---|
225 | 227 | */ |
---|
226 | 228 | int mempool_init(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn, |
---|
227 | 229 | mempool_free_t *free_fn, void *pool_data) |
---|
.. | .. |
---|
245 | 247 | * functions. This function might sleep. Both the alloc_fn() and the free_fn() |
---|
246 | 248 | * functions might sleep - as long as the mempool_alloc() function is not called |
---|
247 | 249 | * from IRQ contexts. |
---|
| 250 | + * |
---|
| 251 | + * Return: pointer to the created memory pool object or %NULL on error. |
---|
248 | 252 | */ |
---|
249 | 253 | mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn, |
---|
250 | 254 | mempool_free_t *free_fn, void *pool_data) |
---|
.. | .. |
---|
289 | 293 | * Note, the caller must guarantee that no mempool_destroy is called |
---|
290 | 294 | * while this function is running. mempool_alloc() & mempool_free() |
---|
291 | 295 | * might be called (eg. from IRQ contexts) while this function executes. |
---|
| 296 | + * |
---|
| 297 | + * Return: %0 on success, negative error code otherwise. |
---|
292 | 298 | */ |
---|
293 | 299 | int mempool_resize(mempool_t *pool, int new_min_nr) |
---|
294 | 300 | { |
---|
.. | .. |
---|
363 | 369 | * *never* fails when called from process contexts. (it might |
---|
364 | 370 | * fail if called from an IRQ context.) |
---|
365 | 371 | * Note: using __GFP_ZERO is not supported. |
---|
| 372 | + * |
---|
| 373 | + * Return: pointer to the allocated element or %NULL on error. |
---|
366 | 374 | */ |
---|
367 | 375 | void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask) |
---|
368 | 376 | { |
---|
.. | .. |
---|
481 | 489 | * ensures that there will be frees which return elements to the |
---|
482 | 490 | * pool waking up the waiters. |
---|
483 | 491 | */ |
---|
484 | | - if (unlikely(pool->curr_nr < pool->min_nr)) { |
---|
| 492 | + if (unlikely(READ_ONCE(pool->curr_nr) < pool->min_nr)) { |
---|
485 | 493 | spin_lock_irqsave(&pool->lock, flags); |
---|
486 | 494 | if (likely(pool->curr_nr < pool->min_nr)) { |
---|
487 | 495 | add_element(pool, element); |
---|