.. | .. |
---|
3 | 3 | ** PARISC 1.1 Dynamic DMA mapping support. |
---|
4 | 4 | ** This implementation is for PA-RISC platforms that do not support |
---|
5 | 5 | ** I/O TLBs (aka DMA address translation hardware). |
---|
6 | | -** See Documentation/DMA-API-HOWTO.txt for interface definitions. |
---|
| 6 | +** See Documentation/core-api/dma-api-howto.rst for interface definitions. |
---|
7 | 7 | ** |
---|
8 | 8 | ** (c) Copyright 1999,2000 Hewlett-Packard Company |
---|
9 | 9 | ** (c) Copyright 2000 Grant Grundler |
---|
.. | .. |
---|
26 | 26 | #include <linux/string.h> |
---|
27 | 27 | #include <linux/types.h> |
---|
28 | 28 | #include <linux/dma-direct.h> |
---|
29 | | -#include <linux/dma-noncoherent.h> |
---|
| 29 | +#include <linux/dma-map-ops.h> |
---|
30 | 30 | |
---|
31 | 31 | #include <asm/cacheflush.h> |
---|
32 | 32 | #include <asm/dma.h> /* for DMA_CHUNK_SIZE */ |
---|
33 | 33 | #include <asm/io.h> |
---|
34 | 34 | #include <asm/page.h> /* get_order */ |
---|
35 | | -#include <asm/pgalloc.h> |
---|
36 | 35 | #include <linux/uaccess.h> |
---|
37 | 36 | #include <asm/tlbflush.h> /* for purge_tlb_*() macros */ |
---|
38 | 37 | |
---|
.. | .. |
---|
133 | 132 | |
---|
134 | 133 | dir = pgd_offset_k(vaddr); |
---|
135 | 134 | do { |
---|
| 135 | + p4d_t *p4d; |
---|
| 136 | + pud_t *pud; |
---|
136 | 137 | pmd_t *pmd; |
---|
137 | | - |
---|
138 | | - pmd = pmd_alloc(NULL, dir, vaddr); |
---|
| 138 | + |
---|
| 139 | + p4d = p4d_offset(dir, vaddr); |
---|
| 140 | + pud = pud_offset(p4d, vaddr); |
---|
| 141 | + pmd = pmd_alloc(NULL, pud, vaddr); |
---|
| 142 | + |
---|
139 | 143 | if (!pmd) |
---|
140 | 144 | return -ENOMEM; |
---|
141 | 145 | if (map_pmd_uncached(pmd, vaddr, end - vaddr, &paddr)) |
---|
.. | .. |
---|
196 | 200 | pgd_clear(dir); |
---|
197 | 201 | return; |
---|
198 | 202 | } |
---|
199 | | - pmd = pmd_offset(dir, vaddr); |
---|
| 203 | + pmd = pmd_offset(pud_offset(p4d_offset(dir, vaddr), vaddr), vaddr); |
---|
200 | 204 | vaddr &= ~PGDIR_MASK; |
---|
201 | 205 | end = vaddr + size; |
---|
202 | 206 | if (end > PGDIR_SIZE) |
---|
.. | .. |
---|
394 | 398 | |
---|
395 | 399 | __initcall(pcxl_dma_init); |
---|
396 | 400 | |
---|
397 | | -static void *pcxl_dma_alloc(struct device *dev, size_t size, |
---|
398 | | - dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs) |
---|
| 401 | +void *arch_dma_alloc(struct device *dev, size_t size, |
---|
| 402 | + dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) |
---|
399 | 403 | { |
---|
400 | 404 | unsigned long vaddr; |
---|
401 | 405 | unsigned long paddr; |
---|
402 | 406 | int order; |
---|
403 | 407 | |
---|
| 408 | + if (boot_cpu_data.cpu_type != pcxl2 && boot_cpu_data.cpu_type != pcxl) |
---|
| 409 | + return NULL; |
---|
| 410 | + |
---|
404 | 411 | order = get_order(size); |
---|
405 | 412 | size = 1 << (order + PAGE_SHIFT); |
---|
406 | 413 | vaddr = pcxl_alloc_range(size); |
---|
407 | | - paddr = __get_free_pages(flag, order); |
---|
| 414 | + paddr = __get_free_pages(gfp | __GFP_ZERO, order); |
---|
408 | 415 | flush_kernel_dcache_range(paddr, size); |
---|
409 | 416 | paddr = __pa(paddr); |
---|
410 | 417 | map_uncached_pages(vaddr, size, paddr); |
---|
.. | .. |
---|
421 | 428 | return (void *)vaddr; |
---|
422 | 429 | } |
---|
423 | 430 | |
---|
424 | | -static void *pcx_dma_alloc(struct device *dev, size_t size, |
---|
425 | | - dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs) |
---|
426 | | -{ |
---|
427 | | - void *addr; |
---|
428 | | - |
---|
429 | | - if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0) |
---|
430 | | - return NULL; |
---|
431 | | - |
---|
432 | | - addr = (void *)__get_free_pages(flag, get_order(size)); |
---|
433 | | - if (addr) |
---|
434 | | - *dma_handle = (dma_addr_t)virt_to_phys(addr); |
---|
435 | | - |
---|
436 | | - return addr; |
---|
437 | | -} |
---|
438 | | - |
---|
439 | | -void *arch_dma_alloc(struct device *dev, size_t size, |
---|
440 | | - dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) |
---|
441 | | -{ |
---|
442 | | - |
---|
443 | | - if (boot_cpu_data.cpu_type == pcxl2 || boot_cpu_data.cpu_type == pcxl) |
---|
444 | | - return pcxl_dma_alloc(dev, size, dma_handle, gfp, attrs); |
---|
445 | | - else |
---|
446 | | - return pcx_dma_alloc(dev, size, dma_handle, gfp, attrs); |
---|
447 | | -} |
---|
448 | | - |
---|
449 | 431 | void arch_dma_free(struct device *dev, size_t size, void *vaddr, |
---|
450 | 432 | dma_addr_t dma_handle, unsigned long attrs) |
---|
451 | 433 | { |
---|
452 | 434 | int order = get_order(size); |
---|
453 | 435 | |
---|
454 | | - if (boot_cpu_data.cpu_type == pcxl2 || boot_cpu_data.cpu_type == pcxl) { |
---|
455 | | - size = 1 << (order + PAGE_SHIFT); |
---|
456 | | - unmap_uncached_pages((unsigned long)vaddr, size); |
---|
457 | | - pcxl_free_range((unsigned long)vaddr, size); |
---|
| 436 | + WARN_ON_ONCE(boot_cpu_data.cpu_type != pcxl2 && |
---|
| 437 | + boot_cpu_data.cpu_type != pcxl); |
---|
458 | 438 | |
---|
459 | | - vaddr = __va(dma_handle); |
---|
| 439 | + size = 1 << (order + PAGE_SHIFT); |
---|
| 440 | + unmap_uncached_pages((unsigned long)vaddr, size); |
---|
| 441 | + pcxl_free_range((unsigned long)vaddr, size); |
---|
| 442 | + |
---|
| 443 | + free_pages((unsigned long)__va(dma_handle), order); |
---|
| 444 | +} |
---|
| 445 | + |
---|
| 446 | +void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, |
---|
| 447 | + enum dma_data_direction dir) |
---|
| 448 | +{ |
---|
| 449 | + /* |
---|
| 450 | + * fdc: The data cache line is written back to memory, if and only if |
---|
| 451 | + * it is dirty, and then invalidated from the data cache. |
---|
| 452 | + */ |
---|
| 453 | + flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size); |
---|
| 454 | +} |
---|
| 455 | + |
---|
| 456 | +void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, |
---|
| 457 | + enum dma_data_direction dir) |
---|
| 458 | +{ |
---|
| 459 | + unsigned long addr = (unsigned long) phys_to_virt(paddr); |
---|
| 460 | + |
---|
| 461 | + switch (dir) { |
---|
| 462 | + case DMA_TO_DEVICE: |
---|
| 463 | + case DMA_BIDIRECTIONAL: |
---|
| 464 | + flush_kernel_dcache_range(addr, size); |
---|
| 465 | + return; |
---|
| 466 | + case DMA_FROM_DEVICE: |
---|
| 467 | + purge_kernel_dcache_range_asm(addr, addr + size); |
---|
| 468 | + return; |
---|
| 469 | + default: |
---|
| 470 | + BUG(); |
---|
460 | 471 | } |
---|
461 | | - free_pages((unsigned long)vaddr, get_order(size)); |
---|
462 | | -} |
---|
463 | | - |
---|
464 | | -void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, |
---|
465 | | - size_t size, enum dma_data_direction dir) |
---|
466 | | -{ |
---|
467 | | - flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size); |
---|
468 | | -} |
---|
469 | | - |
---|
470 | | -void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, |
---|
471 | | - size_t size, enum dma_data_direction dir) |
---|
472 | | -{ |
---|
473 | | - flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size); |
---|
474 | | -} |
---|
475 | | - |
---|
476 | | -void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
---|
477 | | - enum dma_data_direction direction) |
---|
478 | | -{ |
---|
479 | | - flush_kernel_dcache_range((unsigned long)vaddr, size); |
---|
480 | 472 | } |
---|