hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/mm/dmapool.c
....@@ -1,13 +1,10 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * DMA Pool allocator
34 *
45 * Copyright 2001 David Brownell
56 * Copyright 2007 Intel Corporation
67 * Author: Matthew Wilcox <willy@linux.intel.com>
7
- *
8
- * This software may be redistributed and/or modified under the terms of
9
- * the GNU General Public License ("GPL") version 2 as published by the
10
- * Free Software Foundation.
118 *
129 * This allocator returns small blocks of a given size which are DMA-able by
1310 * the given device. It uses the dma_alloc_coherent page allocator to get
....@@ -114,10 +111,9 @@
114111 * @size: size of the blocks in this pool.
115112 * @align: alignment requirement for blocks; must be a power of two
116113 * @boundary: returned blocks won't cross this power of two boundary
117
- * Context: !in_interrupt()
114
+ * Context: not in_interrupt()
118115 *
119
- * Returns a dma allocation pool with the requested characteristics, or
120
- * null if one can't be created. Given one of these pools, dma_pool_alloc()
116
+ * Given one of these pools, dma_pool_alloc()
121117 * may be used to allocate memory. Such memory will all have "consistent"
122118 * DMA mappings, accessible by the device and its driver without using
123119 * cache flushing primitives. The actual size of blocks allocated may be
....@@ -127,6 +123,9 @@
127123 * cross that size boundary. This is useful for devices which have
128124 * addressing restrictions on individual DMA transfers, such as not crossing
129125 * boundaries of 4KBytes.
126
+ *
127
+ * Return: a dma allocation pool with the requested characteristics, or
128
+ * %NULL if one can't be created.
130129 */
131130 struct dma_pool *dma_pool_create(const char *name, struct device *dev,
132131 size_t size, size_t align, size_t boundary)
....@@ -145,9 +144,7 @@
145144 else if (size < 4)
146145 size = 4;
147146
148
- if ((size % align) != 0)
149
- size = ALIGN(size, align);
150
-
147
+ size = ALIGN(size, align);
151148 allocation = max_t(size_t, size, PAGE_SIZE);
152149
153150 if (!boundary)
....@@ -269,6 +266,7 @@
269266 */
270267 void dma_pool_destroy(struct dma_pool *pool)
271268 {
269
+ struct dma_page *page, *tmp;
272270 bool empty = false;
273271
274272 if (unlikely(!pool))
....@@ -284,17 +282,13 @@
284282 device_remove_file(pool->dev, &dev_attr_pools);
285283 mutex_unlock(&pools_reg_lock);
286284
287
- while (!list_empty(&pool->page_list)) {
288
- struct dma_page *page;
289
- page = list_entry(pool->page_list.next,
290
- struct dma_page, page_list);
285
+ list_for_each_entry_safe(page, tmp, &pool->page_list, page_list) {
291286 if (is_page_busy(page)) {
292287 if (pool->dev)
293
- dev_err(pool->dev,
294
- "dma_pool_destroy %s, %p busy\n",
288
+ dev_err(pool->dev, "%s %s, %p busy\n", __func__,
295289 pool->name, page->vaddr);
296290 else
297
- pr_err("dma_pool_destroy %s, %p busy\n",
291
+ pr_err("%s %s, %p busy\n", __func__,
298292 pool->name, page->vaddr);
299293 /* leak the still-in-use consistent memory */
300294 list_del(&page->page_list);
....@@ -313,7 +307,7 @@
313307 * @mem_flags: GFP_* bitmask
314308 * @handle: pointer to dma address of block
315309 *
316
- * This returns the kernel virtual address of a currently unused block,
310
+ * Return: the kernel virtual address of a currently unused block,
317311 * and reports its dma address through the handle.
318312 * If such a memory block can't be allocated, %NULL is returned.
319313 */
....@@ -358,12 +352,11 @@
358352 if (data[i] == POOL_POISON_FREED)
359353 continue;
360354 if (pool->dev)
361
- dev_err(pool->dev,
362
- "dma_pool_alloc %s, %p (corrupted)\n",
363
- pool->name, retval);
355
+ dev_err(pool->dev, "%s %s, %p (corrupted)\n",
356
+ __func__, pool->name, retval);
364357 else
365
- pr_err("dma_pool_alloc %s, %p (corrupted)\n",
366
- pool->name, retval);
358
+ pr_err("%s %s, %p (corrupted)\n",
359
+ __func__, pool->name, retval);
367360
368361 /*
369362 * Dump the first 4 bytes even if they are not
....@@ -419,12 +412,11 @@
419412 if (!page) {
420413 spin_unlock_irqrestore(&pool->lock, flags);
421414 if (pool->dev)
422
- dev_err(pool->dev,
423
- "dma_pool_free %s, %p/%lx (bad dma)\n",
424
- pool->name, vaddr, (unsigned long)dma);
415
+ dev_err(pool->dev, "%s %s, %p/%pad (bad dma)\n",
416
+ __func__, pool->name, vaddr, &dma);
425417 else
426
- pr_err("dma_pool_free %s, %p/%lx (bad dma)\n",
427
- pool->name, vaddr, (unsigned long)dma);
418
+ pr_err("%s %s, %p/%pad (bad dma)\n",
419
+ __func__, pool->name, vaddr, &dma);
428420 return;
429421 }
430422
....@@ -435,12 +427,11 @@
435427 if ((dma - page->dma) != offset) {
436428 spin_unlock_irqrestore(&pool->lock, flags);
437429 if (pool->dev)
438
- dev_err(pool->dev,
439
- "dma_pool_free %s, %p (bad vaddr)/%pad\n",
440
- pool->name, vaddr, &dma);
430
+ dev_err(pool->dev, "%s %s, %p (bad vaddr)/%pad\n",
431
+ __func__, pool->name, vaddr, &dma);
441432 else
442
- pr_err("dma_pool_free %s, %p (bad vaddr)/%pad\n",
443
- pool->name, vaddr, &dma);
433
+ pr_err("%s %s, %p (bad vaddr)/%pad\n",
434
+ __func__, pool->name, vaddr, &dma);
444435 return;
445436 }
446437 {
....@@ -452,11 +443,11 @@
452443 }
453444 spin_unlock_irqrestore(&pool->lock, flags);
454445 if (pool->dev)
455
- dev_err(pool->dev, "dma_pool_free %s, dma %pad already free\n",
456
- pool->name, &dma);
446
+ dev_err(pool->dev, "%s %s, dma %pad already free\n",
447
+ __func__, pool->name, &dma);
457448 else
458
- pr_err("dma_pool_free %s, dma %pad already free\n",
459
- pool->name, &dma);
449
+ pr_err("%s %s, dma %pad already free\n",
450
+ __func__, pool->name, &dma);
460451 return;
461452 }
462453 }
....@@ -500,6 +491,9 @@
500491 *
501492 * Managed dma_pool_create(). DMA pool created with this function is
502493 * automatically destroyed on driver detach.
494
+ *
495
+ * Return: a managed dma allocation pool with the requested
496
+ * characteristics, or %NULL if one can't be created.
503497 */
504498 struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
505499 size_t size, size_t align, size_t allocation)