.. | .. |
---|
5 | 5 | * swiped from i386, and cloned for MIPS by Geert, polished by Ralf. |
---|
6 | 6 | */ |
---|
7 | 7 | #include <linux/dma-direct.h> |
---|
8 | | -#include <linux/dma-noncoherent.h> |
---|
9 | | -#include <linux/dma-contiguous.h> |
---|
| 8 | +#include <linux/dma-map-ops.h> |
---|
10 | 9 | #include <linux/highmem.h> |
---|
11 | 10 | |
---|
12 | 11 | #include <asm/cache.h> |
---|
13 | 12 | #include <asm/cpu-type.h> |
---|
14 | 13 | #include <asm/dma-coherence.h> |
---|
15 | 14 | #include <asm/io.h> |
---|
16 | | - |
---|
17 | | -#ifdef CONFIG_DMA_PERDEV_COHERENT |
---|
18 | | -static inline int dev_is_coherent(struct device *dev) |
---|
19 | | -{ |
---|
20 | | - return dev->archdata.dma_coherent; |
---|
21 | | -} |
---|
22 | | -#else |
---|
23 | | -static inline int dev_is_coherent(struct device *dev) |
---|
24 | | -{ |
---|
25 | | - switch (coherentio) { |
---|
26 | | - default: |
---|
27 | | - case IO_COHERENCE_DEFAULT: |
---|
28 | | - return hw_coherentio; |
---|
29 | | - case IO_COHERENCE_ENABLED: |
---|
30 | | - return 1; |
---|
31 | | - case IO_COHERENCE_DISABLED: |
---|
32 | | - return 0; |
---|
33 | | - } |
---|
34 | | -} |
---|
35 | | -#endif /* CONFIG_DMA_PERDEV_COHERENT */ |
---|
36 | 15 | |
---|
37 | 16 | /* |
---|
38 | 17 | * The affected CPUs below in 'cpu_needs_post_dma_flush()' can speculatively |
---|
.. | .. |
---|
47 | 26 | * R10000 and R12000 are used in such systems, the SGI IP28 Indigo² rsp. |
---|
48 | 27 | * SGI IP32 aka O2. |
---|
49 | 28 | */ |
---|
50 | | -static inline bool cpu_needs_post_dma_flush(struct device *dev) |
---|
| 29 | +static inline bool cpu_needs_post_dma_flush(void) |
---|
51 | 30 | { |
---|
52 | | - if (dev_is_coherent(dev)) |
---|
53 | | - return false; |
---|
54 | | - |
---|
55 | 31 | switch (boot_cpu_type()) { |
---|
56 | 32 | case CPU_R10000: |
---|
57 | 33 | case CPU_R12000: |
---|
58 | 34 | case CPU_BMIPS5000: |
---|
| 35 | + case CPU_LOONGSON2EF: |
---|
59 | 36 | return true; |
---|
60 | 37 | default: |
---|
61 | 38 | /* |
---|
.. | .. |
---|
67 | 44 | } |
---|
68 | 45 | } |
---|
69 | 46 | |
---|
70 | | -void *arch_dma_alloc(struct device *dev, size_t size, |
---|
71 | | - dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) |
---|
| 47 | +void arch_dma_prep_coherent(struct page *page, size_t size) |
---|
72 | 48 | { |
---|
73 | | - void *ret; |
---|
74 | | - |
---|
75 | | - ret = dma_direct_alloc(dev, size, dma_handle, gfp, attrs); |
---|
76 | | - if (!ret) |
---|
77 | | - return NULL; |
---|
78 | | - |
---|
79 | | - if (!dev_is_coherent(dev) && !(attrs & DMA_ATTR_NON_CONSISTENT)) { |
---|
80 | | - dma_cache_wback_inv((unsigned long) ret, size); |
---|
81 | | - ret = (void *)UNCAC_ADDR(ret); |
---|
82 | | - } |
---|
83 | | - |
---|
84 | | - return ret; |
---|
| 49 | + dma_cache_wback_inv((unsigned long)page_address(page), size); |
---|
85 | 50 | } |
---|
86 | 51 | |
---|
87 | | -void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, |
---|
88 | | - dma_addr_t dma_addr, unsigned long attrs) |
---|
| 52 | +void *arch_dma_set_uncached(void *addr, size_t size) |
---|
89 | 53 | { |
---|
90 | | - if (!(attrs & DMA_ATTR_NON_CONSISTENT) && !dev_is_coherent(dev)) |
---|
91 | | - cpu_addr = (void *)CAC_ADDR((unsigned long)cpu_addr); |
---|
92 | | - dma_direct_free(dev, size, cpu_addr, dma_addr, attrs); |
---|
| 54 | + return (void *)(__pa(addr) + UNCAC_BASE); |
---|
93 | 55 | } |
---|
94 | 56 | |
---|
95 | | -int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma, |
---|
96 | | - void *cpu_addr, dma_addr_t dma_addr, size_t size, |
---|
97 | | - unsigned long attrs) |
---|
98 | | -{ |
---|
99 | | - unsigned long user_count = vma_pages(vma); |
---|
100 | | - unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
---|
101 | | - unsigned long addr = (unsigned long)cpu_addr; |
---|
102 | | - unsigned long off = vma->vm_pgoff; |
---|
103 | | - unsigned long pfn; |
---|
104 | | - int ret = -ENXIO; |
---|
105 | | - |
---|
106 | | - if (!dev_is_coherent(dev)) |
---|
107 | | - addr = CAC_ADDR(addr); |
---|
108 | | - |
---|
109 | | - pfn = page_to_pfn(virt_to_page((void *)addr)); |
---|
110 | | - |
---|
111 | | - if (attrs & DMA_ATTR_WRITE_COMBINE) |
---|
112 | | - vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); |
---|
113 | | - else |
---|
114 | | - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
---|
115 | | - |
---|
116 | | - if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) |
---|
117 | | - return ret; |
---|
118 | | - |
---|
119 | | - if (off < count && user_count <= (count - off)) { |
---|
120 | | - ret = remap_pfn_range(vma, vma->vm_start, |
---|
121 | | - pfn + off, |
---|
122 | | - user_count << PAGE_SHIFT, |
---|
123 | | - vma->vm_page_prot); |
---|
124 | | - } |
---|
125 | | - |
---|
126 | | - return ret; |
---|
127 | | -} |
---|
128 | | - |
---|
129 | | -static inline void dma_sync_virt(void *addr, size_t size, |
---|
| 57 | +static inline void dma_sync_virt_for_device(void *addr, size_t size, |
---|
130 | 58 | enum dma_data_direction dir) |
---|
131 | 59 | { |
---|
132 | 60 | switch (dir) { |
---|
133 | 61 | case DMA_TO_DEVICE: |
---|
134 | 62 | dma_cache_wback((unsigned long)addr, size); |
---|
135 | 63 | break; |
---|
136 | | - |
---|
137 | 64 | case DMA_FROM_DEVICE: |
---|
138 | 65 | dma_cache_inv((unsigned long)addr, size); |
---|
139 | 66 | break; |
---|
140 | | - |
---|
141 | 67 | case DMA_BIDIRECTIONAL: |
---|
142 | 68 | dma_cache_wback_inv((unsigned long)addr, size); |
---|
143 | 69 | break; |
---|
| 70 | + default: |
---|
| 71 | + BUG(); |
---|
| 72 | + } |
---|
| 73 | +} |
---|
144 | 74 | |
---|
| 75 | +static inline void dma_sync_virt_for_cpu(void *addr, size_t size, |
---|
| 76 | + enum dma_data_direction dir) |
---|
| 77 | +{ |
---|
| 78 | + switch (dir) { |
---|
| 79 | + case DMA_TO_DEVICE: |
---|
| 80 | + break; |
---|
| 81 | + case DMA_FROM_DEVICE: |
---|
| 82 | + case DMA_BIDIRECTIONAL: |
---|
| 83 | + dma_cache_inv((unsigned long)addr, size); |
---|
| 84 | + break; |
---|
145 | 85 | default: |
---|
146 | 86 | BUG(); |
---|
147 | 87 | } |
---|
.. | .. |
---|
153 | 93 | * configured then the bulk of this loop gets optimized out. |
---|
154 | 94 | */ |
---|
155 | 95 | static inline void dma_sync_phys(phys_addr_t paddr, size_t size, |
---|
156 | | - enum dma_data_direction dir) |
---|
| 96 | + enum dma_data_direction dir, bool for_device) |
---|
157 | 97 | { |
---|
158 | 98 | struct page *page = pfn_to_page(paddr >> PAGE_SHIFT); |
---|
159 | 99 | unsigned long offset = paddr & ~PAGE_MASK; |
---|
.. | .. |
---|
161 | 101 | |
---|
162 | 102 | do { |
---|
163 | 103 | size_t len = left; |
---|
| 104 | + void *addr; |
---|
164 | 105 | |
---|
165 | 106 | if (PageHighMem(page)) { |
---|
166 | | - void *addr; |
---|
167 | | - |
---|
168 | | - if (offset + len > PAGE_SIZE) { |
---|
169 | | - if (offset >= PAGE_SIZE) { |
---|
170 | | - page += offset >> PAGE_SHIFT; |
---|
171 | | - offset &= ~PAGE_MASK; |
---|
172 | | - } |
---|
| 107 | + if (offset + len > PAGE_SIZE) |
---|
173 | 108 | len = PAGE_SIZE - offset; |
---|
174 | | - } |
---|
| 109 | + } |
---|
175 | 110 | |
---|
176 | | - addr = kmap_atomic(page); |
---|
177 | | - dma_sync_virt(addr + offset, len, dir); |
---|
178 | | - kunmap_atomic(addr); |
---|
179 | | - } else |
---|
180 | | - dma_sync_virt(page_address(page) + offset, size, dir); |
---|
| 111 | + addr = kmap_atomic(page); |
---|
| 112 | + if (for_device) |
---|
| 113 | + dma_sync_virt_for_device(addr + offset, len, dir); |
---|
| 114 | + else |
---|
| 115 | + dma_sync_virt_for_cpu(addr + offset, len, dir); |
---|
| 116 | + kunmap_atomic(addr); |
---|
| 117 | + |
---|
181 | 118 | offset = 0; |
---|
182 | 119 | page++; |
---|
183 | 120 | left -= len; |
---|
184 | 121 | } while (left); |
---|
185 | 122 | } |
---|
186 | 123 | |
---|
187 | | -void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, |
---|
188 | | - size_t size, enum dma_data_direction dir) |
---|
| 124 | +void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, |
---|
| 125 | + enum dma_data_direction dir) |
---|
189 | 126 | { |
---|
190 | | - if (!dev_is_coherent(dev)) |
---|
191 | | - dma_sync_phys(paddr, size, dir); |
---|
| 127 | + dma_sync_phys(paddr, size, dir, true); |
---|
192 | 128 | } |
---|
193 | 129 | |
---|
194 | | -void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, |
---|
195 | | - size_t size, enum dma_data_direction dir) |
---|
| 130 | +#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU |
---|
| 131 | +void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, |
---|
| 132 | + enum dma_data_direction dir) |
---|
196 | 133 | { |
---|
197 | | - if (cpu_needs_post_dma_flush(dev)) |
---|
198 | | - dma_sync_phys(paddr, size, dir); |
---|
| 134 | + if (cpu_needs_post_dma_flush()) |
---|
| 135 | + dma_sync_phys(paddr, size, dir, false); |
---|
199 | 136 | } |
---|
| 137 | +#endif |
---|
200 | 138 | |
---|
201 | | -void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
---|
202 | | - enum dma_data_direction direction) |
---|
| 139 | +#ifdef CONFIG_DMA_PERDEV_COHERENT |
---|
| 140 | +void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, |
---|
| 141 | + const struct iommu_ops *iommu, bool coherent) |
---|
203 | 142 | { |
---|
204 | | - BUG_ON(direction == DMA_NONE); |
---|
205 | | - |
---|
206 | | - if (!dev_is_coherent(dev)) |
---|
207 | | - dma_sync_virt(vaddr, size, direction); |
---|
| 143 | + dev->dma_coherent = coherent; |
---|
208 | 144 | } |
---|
| 145 | +#endif |
---|