.. | .. |
---|
13 | 13 | #include <linux/export.h> |
---|
14 | 14 | #include <linux/errno.h> |
---|
15 | 15 | #include <linux/mm.h> |
---|
16 | | -#include <linux/bootmem.h> |
---|
| 16 | +#include <linux/memblock.h> |
---|
17 | 17 | #include <linux/spinlock.h> |
---|
18 | 18 | #include <linux/gfp.h> |
---|
19 | | -#include <linux/dma-direct.h> |
---|
20 | | -#include <linux/dma-noncoherent.h> |
---|
| 19 | +#include <linux/dma-map-ops.h> |
---|
21 | 20 | #include <asm/mipsregs.h> |
---|
22 | 21 | #include <asm/jazz.h> |
---|
23 | 22 | #include <asm/io.h> |
---|
24 | 23 | #include <linux/uaccess.h> |
---|
25 | 24 | #include <asm/dma.h> |
---|
26 | 25 | #include <asm/jazzdma.h> |
---|
27 | | -#include <asm/pgtable.h> |
---|
28 | 26 | |
---|
29 | 27 | /* |
---|
30 | 28 | * Set this to one to enable additional vdma debug code. |
---|
.. | .. |
---|
105 | 103 | if (vdma_debug) |
---|
106 | 104 | printk("vdma_alloc: Invalid physical address: %08lx\n", |
---|
107 | 105 | paddr); |
---|
108 | | - return VDMA_ERROR; /* invalid physical address */ |
---|
| 106 | + return DMA_MAPPING_ERROR; /* invalid physical address */ |
---|
109 | 107 | } |
---|
110 | 108 | if (size > 0x400000 || size == 0) { |
---|
111 | 109 | if (vdma_debug) |
---|
112 | 110 | printk("vdma_alloc: Invalid size: %08lx\n", size); |
---|
113 | | - return VDMA_ERROR; /* invalid physical address */ |
---|
| 111 | + return DMA_MAPPING_ERROR; /* invalid physical address */ |
---|
114 | 112 | } |
---|
115 | 113 | |
---|
116 | 114 | spin_lock_irqsave(&vdma_lock, flags); |
---|
.. | .. |
---|
124 | 122 | first < VDMA_PGTBL_ENTRIES) first++; |
---|
125 | 123 | if (first + pages > VDMA_PGTBL_ENTRIES) { /* nothing free */ |
---|
126 | 124 | spin_unlock_irqrestore(&vdma_lock, flags); |
---|
127 | | - return VDMA_ERROR; |
---|
| 125 | + return DMA_MAPPING_ERROR; |
---|
128 | 126 | } |
---|
129 | 127 | |
---|
130 | 128 | last = first + 1; |
---|
.. | .. |
---|
209 | 207 | } |
---|
210 | 208 | |
---|
211 | 209 | EXPORT_SYMBOL(vdma_free); |
---|
212 | | - |
---|
213 | | -/* |
---|
214 | | - * Map certain page(s) to another physical address. |
---|
215 | | - * Caller must have allocated the page(s) before. |
---|
216 | | - */ |
---|
217 | | -int vdma_remap(unsigned long laddr, unsigned long paddr, unsigned long size) |
---|
218 | | -{ |
---|
219 | | - int first, pages; |
---|
220 | | - |
---|
221 | | - if (laddr > 0xffffff) { |
---|
222 | | - if (vdma_debug) |
---|
223 | | - printk |
---|
224 | | - ("vdma_map: Invalid logical address: %08lx\n", |
---|
225 | | - laddr); |
---|
226 | | - return -EINVAL; /* invalid logical address */ |
---|
227 | | - } |
---|
228 | | - if (paddr > 0x1fffffff) { |
---|
229 | | - if (vdma_debug) |
---|
230 | | - printk |
---|
231 | | - ("vdma_map: Invalid physical address: %08lx\n", |
---|
232 | | - paddr); |
---|
233 | | - return -EINVAL; /* invalid physical address */ |
---|
234 | | - } |
---|
235 | | - |
---|
236 | | - pages = (((paddr & (VDMA_PAGESIZE - 1)) + size) >> 12) + 1; |
---|
237 | | - first = laddr >> 12; |
---|
238 | | - if (vdma_debug) |
---|
239 | | - printk("vdma_remap: first=%x, pages=%x\n", first, pages); |
---|
240 | | - if (first + pages > VDMA_PGTBL_ENTRIES) { |
---|
241 | | - if (vdma_debug) |
---|
242 | | - printk("vdma_alloc: Invalid size: %08lx\n", size); |
---|
243 | | - return -EINVAL; |
---|
244 | | - } |
---|
245 | | - |
---|
246 | | - paddr &= ~(VDMA_PAGESIZE - 1); |
---|
247 | | - while (pages > 0 && first < VDMA_PGTBL_ENTRIES) { |
---|
248 | | - if (pgtbl[first].owner != laddr) { |
---|
249 | | - if (vdma_debug) |
---|
250 | | - printk("Trying to remap other's pages.\n"); |
---|
251 | | - return -EPERM; /* not owner */ |
---|
252 | | - } |
---|
253 | | - pgtbl[first].frame = paddr; |
---|
254 | | - paddr += VDMA_PAGESIZE; |
---|
255 | | - first++; |
---|
256 | | - pages--; |
---|
257 | | - } |
---|
258 | | - |
---|
259 | | - /* |
---|
260 | | - * Update translation table |
---|
261 | | - */ |
---|
262 | | - r4030_write_reg32(JAZZ_R4030_TRSTBL_INV, 0); |
---|
263 | | - |
---|
264 | | - if (vdma_debug > 2) { |
---|
265 | | - int i; |
---|
266 | | - pages = (((paddr & (VDMA_PAGESIZE - 1)) + size) >> 12) + 1; |
---|
267 | | - first = laddr >> 12; |
---|
268 | | - printk("LADDR: "); |
---|
269 | | - for (i = first; i < first + pages; i++) |
---|
270 | | - printk("%08x ", i << 12); |
---|
271 | | - printk("\nPADDR: "); |
---|
272 | | - for (i = first; i < first + pages; i++) |
---|
273 | | - printk("%08x ", pgtbl[i].frame); |
---|
274 | | - printk("\nOWNER: "); |
---|
275 | | - for (i = first; i < first + pages; i++) |
---|
276 | | - printk("%08x ", pgtbl[i].owner); |
---|
277 | | - printk("\n"); |
---|
278 | | - } |
---|
279 | | - |
---|
280 | | - return 0; |
---|
281 | | -} |
---|
282 | 210 | |
---|
283 | 211 | /* |
---|
284 | 212 | * Translate a physical address to a logical address. |
---|
.. | .. |
---|
563 | 491 | static void *jazz_dma_alloc(struct device *dev, size_t size, |
---|
564 | 492 | dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) |
---|
565 | 493 | { |
---|
| 494 | + struct page *page; |
---|
566 | 495 | void *ret; |
---|
567 | 496 | |
---|
568 | | - ret = dma_direct_alloc(dev, size, dma_handle, gfp, attrs); |
---|
569 | | - if (!ret) |
---|
570 | | - return NULL; |
---|
| 497 | + if (attrs & DMA_ATTR_NO_WARN) |
---|
| 498 | + gfp |= __GFP_NOWARN; |
---|
571 | 499 | |
---|
| 500 | + size = PAGE_ALIGN(size); |
---|
| 501 | + page = alloc_pages(gfp, get_order(size)); |
---|
| 502 | + if (!page) |
---|
| 503 | + return NULL; |
---|
| 504 | + ret = page_address(page); |
---|
| 505 | + memset(ret, 0, size); |
---|
572 | 506 | *dma_handle = vdma_alloc(virt_to_phys(ret), size); |
---|
573 | | - if (*dma_handle == VDMA_ERROR) { |
---|
574 | | - dma_direct_free(dev, size, ret, *dma_handle, attrs); |
---|
575 | | - return NULL; |
---|
576 | | - } |
---|
| 507 | + if (*dma_handle == DMA_MAPPING_ERROR) |
---|
| 508 | + goto out_free_pages; |
---|
| 509 | + arch_dma_prep_coherent(page, size); |
---|
| 510 | + return (void *)(UNCAC_BASE + __pa(ret)); |
---|
577 | 511 | |
---|
578 | | - if (!(attrs & DMA_ATTR_NON_CONSISTENT)) { |
---|
579 | | - dma_cache_wback_inv((unsigned long)ret, size); |
---|
580 | | - ret = (void *)UNCAC_ADDR(ret); |
---|
581 | | - } |
---|
582 | | - return ret; |
---|
| 512 | +out_free_pages: |
---|
| 513 | + __free_pages(page, get_order(size)); |
---|
| 514 | + return NULL; |
---|
583 | 515 | } |
---|
584 | 516 | |
---|
585 | 517 | static void jazz_dma_free(struct device *dev, size_t size, void *vaddr, |
---|
586 | 518 | dma_addr_t dma_handle, unsigned long attrs) |
---|
587 | 519 | { |
---|
588 | 520 | vdma_free(dma_handle); |
---|
589 | | - if (!(attrs & DMA_ATTR_NON_CONSISTENT)) |
---|
590 | | - vaddr = (void *)CAC_ADDR((unsigned long)vaddr); |
---|
591 | | - return dma_direct_free(dev, size, vaddr, dma_handle, attrs); |
---|
| 521 | + __free_pages(virt_to_page(vaddr), get_order(size)); |
---|
592 | 522 | } |
---|
593 | 523 | |
---|
594 | 524 | static dma_addr_t jazz_dma_map_page(struct device *dev, struct page *page, |
---|
.. | .. |
---|
598 | 528 | phys_addr_t phys = page_to_phys(page) + offset; |
---|
599 | 529 | |
---|
600 | 530 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) |
---|
601 | | - arch_sync_dma_for_device(dev, phys, size, dir); |
---|
| 531 | + arch_sync_dma_for_device(phys, size, dir); |
---|
602 | 532 | return vdma_alloc(phys, size); |
---|
603 | 533 | } |
---|
604 | 534 | |
---|
.. | .. |
---|
606 | 536 | size_t size, enum dma_data_direction dir, unsigned long attrs) |
---|
607 | 537 | { |
---|
608 | 538 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) |
---|
609 | | - arch_sync_dma_for_cpu(dev, vdma_log2phys(dma_addr), size, dir); |
---|
| 539 | + arch_sync_dma_for_cpu(vdma_log2phys(dma_addr), size, dir); |
---|
610 | 540 | vdma_free(dma_addr); |
---|
611 | 541 | } |
---|
612 | 542 | |
---|
.. | .. |
---|
618 | 548 | |
---|
619 | 549 | for_each_sg(sglist, sg, nents, i) { |
---|
620 | 550 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) |
---|
621 | | - arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, |
---|
| 551 | + arch_sync_dma_for_device(sg_phys(sg), sg->length, |
---|
622 | 552 | dir); |
---|
623 | 553 | sg->dma_address = vdma_alloc(sg_phys(sg), sg->length); |
---|
624 | | - if (sg->dma_address == VDMA_ERROR) |
---|
| 554 | + if (sg->dma_address == DMA_MAPPING_ERROR) |
---|
625 | 555 | return 0; |
---|
626 | 556 | sg_dma_len(sg) = sg->length; |
---|
627 | 557 | } |
---|
.. | .. |
---|
637 | 567 | |
---|
638 | 568 | for_each_sg(sglist, sg, nents, i) { |
---|
639 | 569 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) |
---|
640 | | - arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, |
---|
641 | | - dir); |
---|
| 570 | + arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir); |
---|
642 | 571 | vdma_free(sg->dma_address); |
---|
643 | 572 | } |
---|
644 | 573 | } |
---|
.. | .. |
---|
646 | 575 | static void jazz_dma_sync_single_for_device(struct device *dev, |
---|
647 | 576 | dma_addr_t addr, size_t size, enum dma_data_direction dir) |
---|
648 | 577 | { |
---|
649 | | - arch_sync_dma_for_device(dev, vdma_log2phys(addr), size, dir); |
---|
| 578 | + arch_sync_dma_for_device(vdma_log2phys(addr), size, dir); |
---|
650 | 579 | } |
---|
651 | 580 | |
---|
652 | 581 | static void jazz_dma_sync_single_for_cpu(struct device *dev, |
---|
653 | 582 | dma_addr_t addr, size_t size, enum dma_data_direction dir) |
---|
654 | 583 | { |
---|
655 | | - arch_sync_dma_for_cpu(dev, vdma_log2phys(addr), size, dir); |
---|
| 584 | + arch_sync_dma_for_cpu(vdma_log2phys(addr), size, dir); |
---|
656 | 585 | } |
---|
657 | 586 | |
---|
658 | 587 | static void jazz_dma_sync_sg_for_device(struct device *dev, |
---|
.. | .. |
---|
662 | 591 | int i; |
---|
663 | 592 | |
---|
664 | 593 | for_each_sg(sgl, sg, nents, i) |
---|
665 | | - arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir); |
---|
| 594 | + arch_sync_dma_for_device(sg_phys(sg), sg->length, dir); |
---|
666 | 595 | } |
---|
667 | 596 | |
---|
668 | 597 | static void jazz_dma_sync_sg_for_cpu(struct device *dev, |
---|
.. | .. |
---|
672 | 601 | int i; |
---|
673 | 602 | |
---|
674 | 603 | for_each_sg(sgl, sg, nents, i) |
---|
675 | | - arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir); |
---|
676 | | -} |
---|
677 | | - |
---|
678 | | -static int jazz_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
---|
679 | | -{ |
---|
680 | | - return dma_addr == VDMA_ERROR; |
---|
| 604 | + arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir); |
---|
681 | 605 | } |
---|
682 | 606 | |
---|
683 | 607 | const struct dma_map_ops jazz_dma_ops = { |
---|
684 | 608 | .alloc = jazz_dma_alloc, |
---|
685 | 609 | .free = jazz_dma_free, |
---|
686 | | - .mmap = arch_dma_mmap, |
---|
687 | 610 | .map_page = jazz_dma_map_page, |
---|
688 | 611 | .unmap_page = jazz_dma_unmap_page, |
---|
689 | 612 | .map_sg = jazz_dma_map_sg, |
---|
.. | .. |
---|
692 | 615 | .sync_single_for_device = jazz_dma_sync_single_for_device, |
---|
693 | 616 | .sync_sg_for_cpu = jazz_dma_sync_sg_for_cpu, |
---|
694 | 617 | .sync_sg_for_device = jazz_dma_sync_sg_for_device, |
---|
695 | | - .dma_supported = dma_direct_supported, |
---|
696 | | - .cache_sync = arch_dma_cache_sync, |
---|
697 | | - .mapping_error = jazz_dma_mapping_error, |
---|
| 618 | + .mmap = dma_common_mmap, |
---|
| 619 | + .get_sgtable = dma_common_get_sgtable, |
---|
| 620 | + .alloc_pages = dma_common_alloc_pages, |
---|
| 621 | + .free_pages = dma_common_free_pages, |
---|
698 | 622 | }; |
---|
699 | 623 | EXPORT_SYMBOL(jazz_dma_ops); |
---|