forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-10 10ebd8556b7990499c896a550e3d416b444211e6
kernel/arch/parisc/kernel/pci-dma.c
....@@ -3,7 +3,7 @@
33 ** PARISC 1.1 Dynamic DMA mapping support.
44 ** This implementation is for PA-RISC platforms that do not support
55 ** I/O TLBs (aka DMA address translation hardware).
6
-** See Documentation/DMA-API-HOWTO.txt for interface definitions.
6
+** See Documentation/core-api/dma-api-howto.rst for interface definitions.
77 **
88 ** (c) Copyright 1999,2000 Hewlett-Packard Company
99 ** (c) Copyright 2000 Grant Grundler
....@@ -26,13 +26,12 @@
2626 #include <linux/string.h>
2727 #include <linux/types.h>
2828 #include <linux/dma-direct.h>
29
-#include <linux/dma-noncoherent.h>
29
+#include <linux/dma-map-ops.h>
3030
3131 #include <asm/cacheflush.h>
3232 #include <asm/dma.h> /* for DMA_CHUNK_SIZE */
3333 #include <asm/io.h>
3434 #include <asm/page.h> /* get_order */
35
-#include <asm/pgalloc.h>
3635 #include <linux/uaccess.h>
3736 #include <asm/tlbflush.h> /* for purge_tlb_*() macros */
3837
....@@ -133,9 +132,14 @@
133132
134133 dir = pgd_offset_k(vaddr);
135134 do {
135
+ p4d_t *p4d;
136
+ pud_t *pud;
136137 pmd_t *pmd;
137
-
138
- pmd = pmd_alloc(NULL, dir, vaddr);
138
+
139
+ p4d = p4d_offset(dir, vaddr);
140
+ pud = pud_offset(p4d, vaddr);
141
+ pmd = pmd_alloc(NULL, pud, vaddr);
142
+
139143 if (!pmd)
140144 return -ENOMEM;
141145 if (map_pmd_uncached(pmd, vaddr, end - vaddr, &paddr))
....@@ -196,7 +200,7 @@
196200 pgd_clear(dir);
197201 return;
198202 }
199
- pmd = pmd_offset(dir, vaddr);
203
+ pmd = pmd_offset(pud_offset(p4d_offset(dir, vaddr), vaddr), vaddr);
200204 vaddr &= ~PGDIR_MASK;
201205 end = vaddr + size;
202206 if (end > PGDIR_SIZE)
....@@ -394,17 +398,20 @@
394398
395399 __initcall(pcxl_dma_init);
396400
397
-static void *pcxl_dma_alloc(struct device *dev, size_t size,
398
- dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
401
+void *arch_dma_alloc(struct device *dev, size_t size,
402
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
399403 {
400404 unsigned long vaddr;
401405 unsigned long paddr;
402406 int order;
403407
408
+ if (boot_cpu_data.cpu_type != pcxl2 && boot_cpu_data.cpu_type != pcxl)
409
+ return NULL;
410
+
404411 order = get_order(size);
405412 size = 1 << (order + PAGE_SHIFT);
406413 vaddr = pcxl_alloc_range(size);
407
- paddr = __get_free_pages(flag, order);
414
+ paddr = __get_free_pages(gfp | __GFP_ZERO, order);
408415 flush_kernel_dcache_range(paddr, size);
409416 paddr = __pa(paddr);
410417 map_uncached_pages(vaddr, size, paddr);
....@@ -421,60 +428,45 @@
421428 return (void *)vaddr;
422429 }
423430
424
-static void *pcx_dma_alloc(struct device *dev, size_t size,
425
- dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
426
-{
427
- void *addr;
428
-
429
- if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0)
430
- return NULL;
431
-
432
- addr = (void *)__get_free_pages(flag, get_order(size));
433
- if (addr)
434
- *dma_handle = (dma_addr_t)virt_to_phys(addr);
435
-
436
- return addr;
437
-}
438
-
439
-void *arch_dma_alloc(struct device *dev, size_t size,
440
- dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
441
-{
442
-
443
- if (boot_cpu_data.cpu_type == pcxl2 || boot_cpu_data.cpu_type == pcxl)
444
- return pcxl_dma_alloc(dev, size, dma_handle, gfp, attrs);
445
- else
446
- return pcx_dma_alloc(dev, size, dma_handle, gfp, attrs);
447
-}
448
-
449431 void arch_dma_free(struct device *dev, size_t size, void *vaddr,
450432 dma_addr_t dma_handle, unsigned long attrs)
451433 {
452434 int order = get_order(size);
453435
454
- if (boot_cpu_data.cpu_type == pcxl2 || boot_cpu_data.cpu_type == pcxl) {
455
- size = 1 << (order + PAGE_SHIFT);
456
- unmap_uncached_pages((unsigned long)vaddr, size);
457
- pcxl_free_range((unsigned long)vaddr, size);
436
+ WARN_ON_ONCE(boot_cpu_data.cpu_type != pcxl2 &&
437
+ boot_cpu_data.cpu_type != pcxl);
458438
459
- vaddr = __va(dma_handle);
439
+ size = 1 << (order + PAGE_SHIFT);
440
+ unmap_uncached_pages((unsigned long)vaddr, size);
441
+ pcxl_free_range((unsigned long)vaddr, size);
442
+
443
+ free_pages((unsigned long)__va(dma_handle), order);
444
+}
445
+
446
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
447
+ enum dma_data_direction dir)
448
+{
449
+ /*
450
+ * fdc: The data cache line is written back to memory, if and only if
451
+ * it is dirty, and then invalidated from the data cache.
452
+ */
453
+ flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size);
454
+}
455
+
456
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
457
+ enum dma_data_direction dir)
458
+{
459
+ unsigned long addr = (unsigned long) phys_to_virt(paddr);
460
+
461
+ switch (dir) {
462
+ case DMA_TO_DEVICE:
463
+ case DMA_BIDIRECTIONAL:
464
+ flush_kernel_dcache_range(addr, size);
465
+ return;
466
+ case DMA_FROM_DEVICE:
467
+ purge_kernel_dcache_range_asm(addr, addr + size);
468
+ return;
469
+ default:
470
+ BUG();
460471 }
461
- free_pages((unsigned long)vaddr, get_order(size));
462
-}
463
-
464
-void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
465
- size_t size, enum dma_data_direction dir)
466
-{
467
- flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size);
468
-}
469
-
470
-void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
471
- size_t size, enum dma_data_direction dir)
472
-{
473
- flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size);
474
-}
475
-
476
-void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
477
- enum dma_data_direction direction)
478
-{
479
- flush_kernel_dcache_range((unsigned long)vaddr, size);
480472 }