.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * DMA Pool allocator |
---|
3 | 4 | * |
---|
4 | 5 | * Copyright 2001 David Brownell |
---|
5 | 6 | * Copyright 2007 Intel Corporation |
---|
6 | 7 | * Author: Matthew Wilcox <willy@linux.intel.com> |
---|
7 | | - * |
---|
8 | | - * This software may be redistributed and/or modified under the terms of |
---|
9 | | - * the GNU General Public License ("GPL") version 2 as published by the |
---|
10 | | - * Free Software Foundation. |
---|
11 | 8 | * |
---|
12 | 9 | * This allocator returns small blocks of a given size which are DMA-able by |
---|
13 | 10 | * the given device. It uses the dma_alloc_coherent page allocator to get |
---|
.. | .. |
---|
114 | 111 | * @size: size of the blocks in this pool. |
---|
115 | 112 | * @align: alignment requirement for blocks; must be a power of two |
---|
116 | 113 | * @boundary: returned blocks won't cross this power of two boundary |
---|
117 | | - * Context: !in_interrupt() |
---|
| 114 | + * Context: not in_interrupt() |
---|
118 | 115 | * |
---|
119 | | - * Returns a dma allocation pool with the requested characteristics, or |
---|
120 | | - * null if one can't be created. Given one of these pools, dma_pool_alloc() |
---|
| 116 | + * Given one of these pools, dma_pool_alloc() |
---|
121 | 117 | * may be used to allocate memory. Such memory will all have "consistent" |
---|
122 | 118 | * DMA mappings, accessible by the device and its driver without using |
---|
123 | 119 | * cache flushing primitives. The actual size of blocks allocated may be |
---|
.. | .. |
---|
127 | 123 | * cross that size boundary. This is useful for devices which have |
---|
128 | 124 | * addressing restrictions on individual DMA transfers, such as not crossing |
---|
129 | 125 | * boundaries of 4KBytes. |
---|
| 126 | + * |
---|
| 127 | + * Return: a dma allocation pool with the requested characteristics, or |
---|
| 128 | + * %NULL if one can't be created. |
---|
130 | 129 | */ |
---|
131 | 130 | struct dma_pool *dma_pool_create(const char *name, struct device *dev, |
---|
132 | 131 | size_t size, size_t align, size_t boundary) |
---|
.. | .. |
---|
145 | 144 | else if (size < 4) |
---|
146 | 145 | size = 4; |
---|
147 | 146 | |
---|
148 | | - if ((size % align) != 0) |
---|
149 | | - size = ALIGN(size, align); |
---|
150 | | - |
---|
| 147 | + size = ALIGN(size, align); |
---|
151 | 148 | allocation = max_t(size_t, size, PAGE_SIZE); |
---|
152 | 149 | |
---|
153 | 150 | if (!boundary) |
---|
.. | .. |
---|
269 | 266 | */ |
---|
270 | 267 | void dma_pool_destroy(struct dma_pool *pool) |
---|
271 | 268 | { |
---|
| 269 | + struct dma_page *page, *tmp; |
---|
272 | 270 | bool empty = false; |
---|
273 | 271 | |
---|
274 | 272 | if (unlikely(!pool)) |
---|
.. | .. |
---|
284 | 282 | device_remove_file(pool->dev, &dev_attr_pools); |
---|
285 | 283 | mutex_unlock(&pools_reg_lock); |
---|
286 | 284 | |
---|
287 | | - while (!list_empty(&pool->page_list)) { |
---|
288 | | - struct dma_page *page; |
---|
289 | | - page = list_entry(pool->page_list.next, |
---|
290 | | - struct dma_page, page_list); |
---|
| 285 | + list_for_each_entry_safe(page, tmp, &pool->page_list, page_list) { |
---|
291 | 286 | if (is_page_busy(page)) { |
---|
292 | 287 | if (pool->dev) |
---|
293 | | - dev_err(pool->dev, |
---|
294 | | - "dma_pool_destroy %s, %p busy\n", |
---|
| 288 | + dev_err(pool->dev, "%s %s, %p busy\n", __func__, |
---|
295 | 289 | pool->name, page->vaddr); |
---|
296 | 290 | else |
---|
297 | | - pr_err("dma_pool_destroy %s, %p busy\n", |
---|
| 291 | + pr_err("%s %s, %p busy\n", __func__, |
---|
298 | 292 | pool->name, page->vaddr); |
---|
299 | 293 | /* leak the still-in-use consistent memory */ |
---|
300 | 294 | list_del(&page->page_list); |
---|
.. | .. |
---|
313 | 307 | * @mem_flags: GFP_* bitmask |
---|
314 | 308 | * @handle: pointer to dma address of block |
---|
315 | 309 | * |
---|
316 | | - * This returns the kernel virtual address of a currently unused block, |
---|
| 310 | + * Return: the kernel virtual address of a currently unused block, |
---|
317 | 311 | * and reports its dma address through the handle. |
---|
318 | 312 | * If such a memory block can't be allocated, %NULL is returned. |
---|
319 | 313 | */ |
---|
.. | .. |
---|
358 | 352 | if (data[i] == POOL_POISON_FREED) |
---|
359 | 353 | continue; |
---|
360 | 354 | if (pool->dev) |
---|
361 | | - dev_err(pool->dev, |
---|
362 | | - "dma_pool_alloc %s, %p (corrupted)\n", |
---|
363 | | - pool->name, retval); |
---|
| 355 | + dev_err(pool->dev, "%s %s, %p (corrupted)\n", |
---|
| 356 | + __func__, pool->name, retval); |
---|
364 | 357 | else |
---|
365 | | - pr_err("dma_pool_alloc %s, %p (corrupted)\n", |
---|
366 | | - pool->name, retval); |
---|
| 358 | + pr_err("%s %s, %p (corrupted)\n", |
---|
| 359 | + __func__, pool->name, retval); |
---|
367 | 360 | |
---|
368 | 361 | /* |
---|
369 | 362 | * Dump the first 4 bytes even if they are not |
---|
.. | .. |
---|
419 | 412 | if (!page) { |
---|
420 | 413 | spin_unlock_irqrestore(&pool->lock, flags); |
---|
421 | 414 | if (pool->dev) |
---|
422 | | - dev_err(pool->dev, |
---|
423 | | - "dma_pool_free %s, %p/%lx (bad dma)\n", |
---|
424 | | - pool->name, vaddr, (unsigned long)dma); |
---|
| 415 | + dev_err(pool->dev, "%s %s, %p/%pad (bad dma)\n", |
---|
| 416 | + __func__, pool->name, vaddr, &dma); |
---|
425 | 417 | else |
---|
426 | | - pr_err("dma_pool_free %s, %p/%lx (bad dma)\n", |
---|
427 | | - pool->name, vaddr, (unsigned long)dma); |
---|
| 418 | + pr_err("%s %s, %p/%pad (bad dma)\n", |
---|
| 419 | + __func__, pool->name, vaddr, &dma); |
---|
428 | 420 | return; |
---|
429 | 421 | } |
---|
430 | 422 | |
---|
.. | .. |
---|
435 | 427 | if ((dma - page->dma) != offset) { |
---|
436 | 428 | spin_unlock_irqrestore(&pool->lock, flags); |
---|
437 | 429 | if (pool->dev) |
---|
438 | | - dev_err(pool->dev, |
---|
439 | | - "dma_pool_free %s, %p (bad vaddr)/%pad\n", |
---|
440 | | - pool->name, vaddr, &dma); |
---|
| 430 | + dev_err(pool->dev, "%s %s, %p (bad vaddr)/%pad\n", |
---|
| 431 | + __func__, pool->name, vaddr, &dma); |
---|
441 | 432 | else |
---|
442 | | - pr_err("dma_pool_free %s, %p (bad vaddr)/%pad\n", |
---|
443 | | - pool->name, vaddr, &dma); |
---|
| 433 | + pr_err("%s %s, %p (bad vaddr)/%pad\n", |
---|
| 434 | + __func__, pool->name, vaddr, &dma); |
---|
444 | 435 | return; |
---|
445 | 436 | } |
---|
446 | 437 | { |
---|
.. | .. |
---|
452 | 443 | } |
---|
453 | 444 | spin_unlock_irqrestore(&pool->lock, flags); |
---|
454 | 445 | if (pool->dev) |
---|
455 | | - dev_err(pool->dev, "dma_pool_free %s, dma %pad already free\n", |
---|
456 | | - pool->name, &dma); |
---|
| 446 | + dev_err(pool->dev, "%s %s, dma %pad already free\n", |
---|
| 447 | + __func__, pool->name, &dma); |
---|
457 | 448 | else |
---|
458 | | - pr_err("dma_pool_free %s, dma %pad already free\n", |
---|
459 | | - pool->name, &dma); |
---|
| 449 | + pr_err("%s %s, dma %pad already free\n", |
---|
| 450 | + __func__, pool->name, &dma); |
---|
460 | 451 | return; |
---|
461 | 452 | } |
---|
462 | 453 | } |
---|
.. | .. |
---|
500 | 491 | * |
---|
501 | 492 | * Managed dma_pool_create(). DMA pool created with this function is |
---|
502 | 493 | * automatically destroyed on driver detach. |
---|
| 494 | + * |
---|
| 495 | + * Return: a managed dma allocation pool with the requested |
---|
| 496 | + * characteristics, or %NULL if one can't be created. |
---|
503 | 497 | */ |
---|
504 | 498 | struct dma_pool *dmam_pool_create(const char *name, struct device *dev, |
---|
505 | 499 | size_t size, size_t align, size_t allocation) |
---|