.. | .. |
---|
5 | 5 | * Copyright (c) 2006 SUSE Linux Products GmbH |
---|
6 | 6 | * Copyright (c) 2006 Tejun Heo <teheo@suse.de> |
---|
7 | 7 | */ |
---|
8 | | - |
---|
| 8 | +#include <linux/memblock.h> /* for max_pfn */ |
---|
9 | 9 | #include <linux/acpi.h> |
---|
10 | | -#include <linux/dma-mapping.h> |
---|
| 10 | +#include <linux/dma-map-ops.h> |
---|
11 | 11 | #include <linux/export.h> |
---|
12 | 12 | #include <linux/gfp.h> |
---|
13 | 13 | #include <linux/of_device.h> |
---|
14 | 14 | #include <linux/slab.h> |
---|
15 | 15 | #include <linux/vmalloc.h> |
---|
| 16 | +#include "debug.h" |
---|
| 17 | +#include "direct.h" |
---|
16 | 18 | |
---|
17 | 19 | /* |
---|
18 | 20 | * Managed DMA API |
---|
.. | .. |
---|
43 | 45 | } |
---|
44 | 46 | return 0; |
---|
45 | 47 | } |
---|
46 | | - |
---|
47 | | -/** |
---|
48 | | - * dmam_alloc_coherent - Managed dma_alloc_coherent() |
---|
49 | | - * @dev: Device to allocate coherent memory for |
---|
50 | | - * @size: Size of allocation |
---|
51 | | - * @dma_handle: Out argument for allocated DMA handle |
---|
52 | | - * @gfp: Allocation flags |
---|
53 | | - * |
---|
54 | | - * Managed dma_alloc_coherent(). Memory allocated using this function |
---|
55 | | - * will be automatically released on driver detach. |
---|
56 | | - * |
---|
57 | | - * RETURNS: |
---|
58 | | - * Pointer to allocated memory on success, NULL on failure. |
---|
59 | | - */ |
---|
60 | | -void *dmam_alloc_coherent(struct device *dev, size_t size, |
---|
61 | | - dma_addr_t *dma_handle, gfp_t gfp) |
---|
62 | | -{ |
---|
63 | | - struct dma_devres *dr; |
---|
64 | | - void *vaddr; |
---|
65 | | - |
---|
66 | | - dr = devres_alloc(dmam_release, sizeof(*dr), gfp); |
---|
67 | | - if (!dr) |
---|
68 | | - return NULL; |
---|
69 | | - |
---|
70 | | - vaddr = dma_alloc_coherent(dev, size, dma_handle, gfp); |
---|
71 | | - if (!vaddr) { |
---|
72 | | - devres_free(dr); |
---|
73 | | - return NULL; |
---|
74 | | - } |
---|
75 | | - |
---|
76 | | - dr->vaddr = vaddr; |
---|
77 | | - dr->dma_handle = *dma_handle; |
---|
78 | | - dr->size = size; |
---|
79 | | - |
---|
80 | | - devres_add(dev, dr); |
---|
81 | | - |
---|
82 | | - return vaddr; |
---|
83 | | -} |
---|
84 | | -EXPORT_SYMBOL(dmam_alloc_coherent); |
---|
85 | 48 | |
---|
86 | 49 | /** |
---|
87 | 50 | * dmam_free_coherent - Managed dma_free_coherent() |
---|
.. | .. |
---|
143 | 106 | } |
---|
144 | 107 | EXPORT_SYMBOL(dmam_alloc_attrs); |
---|
145 | 108 | |
---|
146 | | -#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT |
---|
147 | | - |
---|
148 | | -static void dmam_coherent_decl_release(struct device *dev, void *res) |
---|
| 109 | +static bool dma_go_direct(struct device *dev, dma_addr_t mask, |
---|
| 110 | + const struct dma_map_ops *ops) |
---|
149 | 111 | { |
---|
150 | | - dma_release_declared_memory(dev); |
---|
151 | | -} |
---|
152 | | - |
---|
153 | | -/** |
---|
154 | | - * dmam_declare_coherent_memory - Managed dma_declare_coherent_memory() |
---|
155 | | - * @dev: Device to declare coherent memory for |
---|
156 | | - * @phys_addr: Physical address of coherent memory to be declared |
---|
157 | | - * @device_addr: Device address of coherent memory to be declared |
---|
158 | | - * @size: Size of coherent memory to be declared |
---|
159 | | - * @flags: Flags |
---|
160 | | - * |
---|
161 | | - * Managed dma_declare_coherent_memory(). |
---|
162 | | - * |
---|
163 | | - * RETURNS: |
---|
164 | | - * 0 on success, -errno on failure. |
---|
165 | | - */ |
---|
166 | | -int dmam_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, |
---|
167 | | - dma_addr_t device_addr, size_t size, int flags) |
---|
168 | | -{ |
---|
169 | | - void *res; |
---|
170 | | - int rc; |
---|
171 | | - |
---|
172 | | - res = devres_alloc(dmam_coherent_decl_release, 0, GFP_KERNEL); |
---|
173 | | - if (!res) |
---|
174 | | - return -ENOMEM; |
---|
175 | | - |
---|
176 | | - rc = dma_declare_coherent_memory(dev, phys_addr, device_addr, size, |
---|
177 | | - flags); |
---|
178 | | - if (!rc) |
---|
179 | | - devres_add(dev, res); |
---|
180 | | - else |
---|
181 | | - devres_free(res); |
---|
182 | | - |
---|
183 | | - return rc; |
---|
184 | | -} |
---|
185 | | -EXPORT_SYMBOL(dmam_declare_coherent_memory); |
---|
186 | | - |
---|
187 | | -/** |
---|
188 | | - * dmam_release_declared_memory - Managed dma_release_declared_memory(). |
---|
189 | | - * @dev: Device to release declared coherent memory for |
---|
190 | | - * |
---|
191 | | - * Managed dmam_release_declared_memory(). |
---|
192 | | - */ |
---|
193 | | -void dmam_release_declared_memory(struct device *dev) |
---|
194 | | -{ |
---|
195 | | - WARN_ON(devres_destroy(dev, dmam_coherent_decl_release, NULL, NULL)); |
---|
196 | | -} |
---|
197 | | -EXPORT_SYMBOL(dmam_release_declared_memory); |
---|
198 | | - |
---|
| 112 | + if (likely(!ops)) |
---|
| 113 | + return true; |
---|
| 114 | +#ifdef CONFIG_DMA_OPS_BYPASS |
---|
| 115 | + if (dev->dma_ops_bypass) |
---|
| 116 | + return min_not_zero(mask, dev->bus_dma_limit) >= |
---|
| 117 | + dma_direct_get_required_mask(dev); |
---|
199 | 118 | #endif |
---|
| 119 | + return false; |
---|
| 120 | +} |
---|
| 121 | + |
---|
200 | 122 | |
---|
201 | 123 | /* |
---|
202 | | - * Create scatter-list for the already allocated DMA buffer. |
---|
| 124 | + * Check if the devices uses a direct mapping for streaming DMA operations. |
---|
| 125 | + * This allows IOMMU drivers to set a bypass mode if the DMA mask is large |
---|
| 126 | + * enough. |
---|
203 | 127 | */ |
---|
204 | | -int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, |
---|
205 | | - void *cpu_addr, dma_addr_t handle, size_t size) |
---|
| 128 | +static inline bool dma_alloc_direct(struct device *dev, |
---|
| 129 | + const struct dma_map_ops *ops) |
---|
206 | 130 | { |
---|
207 | | - struct page *page = virt_to_page(cpu_addr); |
---|
208 | | - int ret; |
---|
209 | | - |
---|
210 | | - ret = sg_alloc_table(sgt, 1, GFP_KERNEL); |
---|
211 | | - if (unlikely(ret)) |
---|
212 | | - return ret; |
---|
213 | | - |
---|
214 | | - sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); |
---|
215 | | - return 0; |
---|
| 131 | + return dma_go_direct(dev, dev->coherent_dma_mask, ops); |
---|
216 | 132 | } |
---|
217 | | -EXPORT_SYMBOL(dma_common_get_sgtable); |
---|
| 133 | + |
---|
| 134 | +static inline bool dma_map_direct(struct device *dev, |
---|
| 135 | + const struct dma_map_ops *ops) |
---|
| 136 | +{ |
---|
| 137 | + return dma_go_direct(dev, *dev->dma_mask, ops); |
---|
| 138 | +} |
---|
| 139 | + |
---|
| 140 | +dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page, |
---|
| 141 | + size_t offset, size_t size, enum dma_data_direction dir, |
---|
| 142 | + unsigned long attrs) |
---|
| 143 | +{ |
---|
| 144 | + const struct dma_map_ops *ops = get_dma_ops(dev); |
---|
| 145 | + dma_addr_t addr; |
---|
| 146 | + |
---|
| 147 | + BUG_ON(!valid_dma_direction(dir)); |
---|
| 148 | + |
---|
| 149 | + if (WARN_ON_ONCE(!dev->dma_mask)) |
---|
| 150 | + return DMA_MAPPING_ERROR; |
---|
| 151 | + |
---|
| 152 | + if (dma_map_direct(dev, ops)) |
---|
| 153 | + addr = dma_direct_map_page(dev, page, offset, size, dir, attrs); |
---|
| 154 | + else |
---|
| 155 | + addr = ops->map_page(dev, page, offset, size, dir, attrs); |
---|
| 156 | + debug_dma_map_page(dev, page, offset, size, dir, addr); |
---|
| 157 | + |
---|
| 158 | + return addr; |
---|
| 159 | +} |
---|
| 160 | +EXPORT_SYMBOL(dma_map_page_attrs); |
---|
| 161 | + |
---|
| 162 | +void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size, |
---|
| 163 | + enum dma_data_direction dir, unsigned long attrs) |
---|
| 164 | +{ |
---|
| 165 | + const struct dma_map_ops *ops = get_dma_ops(dev); |
---|
| 166 | + |
---|
| 167 | + BUG_ON(!valid_dma_direction(dir)); |
---|
| 168 | + if (dma_map_direct(dev, ops)) |
---|
| 169 | + dma_direct_unmap_page(dev, addr, size, dir, attrs); |
---|
| 170 | + else if (ops->unmap_page) |
---|
| 171 | + ops->unmap_page(dev, addr, size, dir, attrs); |
---|
| 172 | + debug_dma_unmap_page(dev, addr, size, dir); |
---|
| 173 | +} |
---|
| 174 | +EXPORT_SYMBOL(dma_unmap_page_attrs); |
---|
218 | 175 | |
---|
219 | 176 | /* |
---|
220 | | - * Create userspace mapping for the DMA-coherent memory. |
---|
| 177 | + * dma_maps_sg_attrs returns 0 on error and > 0 on success. |
---|
| 178 | + * It should never return a value < 0. |
---|
221 | 179 | */ |
---|
222 | | -int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, |
---|
223 | | - void *cpu_addr, dma_addr_t dma_addr, size_t size) |
---|
| 180 | +int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, |
---|
| 181 | + enum dma_data_direction dir, unsigned long attrs) |
---|
224 | 182 | { |
---|
225 | | - int ret = -ENXIO; |
---|
226 | | -#ifndef CONFIG_ARCH_NO_COHERENT_DMA_MMAP |
---|
227 | | - unsigned long user_count = vma_pages(vma); |
---|
228 | | - unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
---|
229 | | - unsigned long off = vma->vm_pgoff; |
---|
| 183 | + const struct dma_map_ops *ops = get_dma_ops(dev); |
---|
| 184 | + int ents; |
---|
230 | 185 | |
---|
231 | | - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
---|
| 186 | + BUG_ON(!valid_dma_direction(dir)); |
---|
232 | 187 | |
---|
233 | | - if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) |
---|
234 | | - return ret; |
---|
| 188 | + if (WARN_ON_ONCE(!dev->dma_mask)) |
---|
| 189 | + return 0; |
---|
235 | 190 | |
---|
236 | | - if (off < count && user_count <= (count - off)) |
---|
237 | | - ret = remap_pfn_range(vma, vma->vm_start, |
---|
238 | | - page_to_pfn(virt_to_page(cpu_addr)) + off, |
---|
239 | | - user_count << PAGE_SHIFT, |
---|
240 | | - vma->vm_page_prot); |
---|
241 | | -#endif /* !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */ |
---|
| 191 | + if (dma_map_direct(dev, ops)) |
---|
| 192 | + ents = dma_direct_map_sg(dev, sg, nents, dir, attrs); |
---|
| 193 | + else |
---|
| 194 | + ents = ops->map_sg(dev, sg, nents, dir, attrs); |
---|
| 195 | + BUG_ON(ents < 0); |
---|
| 196 | + debug_dma_map_sg(dev, sg, nents, ents, dir); |
---|
242 | 197 | |
---|
243 | | - return ret; |
---|
| 198 | + return ents; |
---|
244 | 199 | } |
---|
245 | | -EXPORT_SYMBOL(dma_common_mmap); |
---|
| 200 | +EXPORT_SYMBOL(dma_map_sg_attrs); |
---|
| 201 | + |
---|
| 202 | +void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, |
---|
| 203 | + int nents, enum dma_data_direction dir, |
---|
| 204 | + unsigned long attrs) |
---|
| 205 | +{ |
---|
| 206 | + const struct dma_map_ops *ops = get_dma_ops(dev); |
---|
| 207 | + |
---|
| 208 | + BUG_ON(!valid_dma_direction(dir)); |
---|
| 209 | + debug_dma_unmap_sg(dev, sg, nents, dir); |
---|
| 210 | + if (dma_map_direct(dev, ops)) |
---|
| 211 | + dma_direct_unmap_sg(dev, sg, nents, dir, attrs); |
---|
| 212 | + else if (ops->unmap_sg) |
---|
| 213 | + ops->unmap_sg(dev, sg, nents, dir, attrs); |
---|
| 214 | +} |
---|
| 215 | +EXPORT_SYMBOL(dma_unmap_sg_attrs); |
---|
| 216 | + |
---|
| 217 | +dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr, |
---|
| 218 | + size_t size, enum dma_data_direction dir, unsigned long attrs) |
---|
| 219 | +{ |
---|
| 220 | + const struct dma_map_ops *ops = get_dma_ops(dev); |
---|
| 221 | + dma_addr_t addr = DMA_MAPPING_ERROR; |
---|
| 222 | + |
---|
| 223 | + BUG_ON(!valid_dma_direction(dir)); |
---|
| 224 | + |
---|
| 225 | + if (WARN_ON_ONCE(!dev->dma_mask)) |
---|
| 226 | + return DMA_MAPPING_ERROR; |
---|
| 227 | + |
---|
| 228 | + /* Don't allow RAM to be mapped */ |
---|
| 229 | + if (WARN_ON_ONCE(pfn_valid(PHYS_PFN(phys_addr)))) |
---|
| 230 | + return DMA_MAPPING_ERROR; |
---|
| 231 | + |
---|
| 232 | + if (dma_map_direct(dev, ops)) |
---|
| 233 | + addr = dma_direct_map_resource(dev, phys_addr, size, dir, attrs); |
---|
| 234 | + else if (ops->map_resource) |
---|
| 235 | + addr = ops->map_resource(dev, phys_addr, size, dir, attrs); |
---|
| 236 | + |
---|
| 237 | + debug_dma_map_resource(dev, phys_addr, size, dir, addr); |
---|
| 238 | + return addr; |
---|
| 239 | +} |
---|
| 240 | +EXPORT_SYMBOL(dma_map_resource); |
---|
| 241 | + |
---|
| 242 | +void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size, |
---|
| 243 | + enum dma_data_direction dir, unsigned long attrs) |
---|
| 244 | +{ |
---|
| 245 | + const struct dma_map_ops *ops = get_dma_ops(dev); |
---|
| 246 | + |
---|
| 247 | + BUG_ON(!valid_dma_direction(dir)); |
---|
| 248 | + if (!dma_map_direct(dev, ops) && ops->unmap_resource) |
---|
| 249 | + ops->unmap_resource(dev, addr, size, dir, attrs); |
---|
| 250 | + debug_dma_unmap_resource(dev, addr, size, dir); |
---|
| 251 | +} |
---|
| 252 | +EXPORT_SYMBOL(dma_unmap_resource); |
---|
| 253 | + |
---|
| 254 | +void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, |
---|
| 255 | + enum dma_data_direction dir) |
---|
| 256 | +{ |
---|
| 257 | + const struct dma_map_ops *ops = get_dma_ops(dev); |
---|
| 258 | + |
---|
| 259 | + BUG_ON(!valid_dma_direction(dir)); |
---|
| 260 | + if (dma_map_direct(dev, ops)) |
---|
| 261 | + dma_direct_sync_single_for_cpu(dev, addr, size, dir); |
---|
| 262 | + else if (ops->sync_single_for_cpu) |
---|
| 263 | + ops->sync_single_for_cpu(dev, addr, size, dir); |
---|
| 264 | + debug_dma_sync_single_for_cpu(dev, addr, size, dir); |
---|
| 265 | +} |
---|
| 266 | +EXPORT_SYMBOL(dma_sync_single_for_cpu); |
---|
| 267 | + |
---|
| 268 | +void dma_sync_single_for_device(struct device *dev, dma_addr_t addr, |
---|
| 269 | + size_t size, enum dma_data_direction dir) |
---|
| 270 | +{ |
---|
| 271 | + const struct dma_map_ops *ops = get_dma_ops(dev); |
---|
| 272 | + |
---|
| 273 | + BUG_ON(!valid_dma_direction(dir)); |
---|
| 274 | + if (dma_map_direct(dev, ops)) |
---|
| 275 | + dma_direct_sync_single_for_device(dev, addr, size, dir); |
---|
| 276 | + else if (ops->sync_single_for_device) |
---|
| 277 | + ops->sync_single_for_device(dev, addr, size, dir); |
---|
| 278 | + debug_dma_sync_single_for_device(dev, addr, size, dir); |
---|
| 279 | +} |
---|
| 280 | +EXPORT_SYMBOL(dma_sync_single_for_device); |
---|
| 281 | + |
---|
| 282 | +void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, |
---|
| 283 | + int nelems, enum dma_data_direction dir) |
---|
| 284 | +{ |
---|
| 285 | + const struct dma_map_ops *ops = get_dma_ops(dev); |
---|
| 286 | + |
---|
| 287 | + BUG_ON(!valid_dma_direction(dir)); |
---|
| 288 | + if (dma_map_direct(dev, ops)) |
---|
| 289 | + dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir); |
---|
| 290 | + else if (ops->sync_sg_for_cpu) |
---|
| 291 | + ops->sync_sg_for_cpu(dev, sg, nelems, dir); |
---|
| 292 | + debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir); |
---|
| 293 | +} |
---|
| 294 | +EXPORT_SYMBOL(dma_sync_sg_for_cpu); |
---|
| 295 | + |
---|
| 296 | +void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, |
---|
| 297 | + int nelems, enum dma_data_direction dir) |
---|
| 298 | +{ |
---|
| 299 | + const struct dma_map_ops *ops = get_dma_ops(dev); |
---|
| 300 | + |
---|
| 301 | + BUG_ON(!valid_dma_direction(dir)); |
---|
| 302 | + if (dma_map_direct(dev, ops)) |
---|
| 303 | + dma_direct_sync_sg_for_device(dev, sg, nelems, dir); |
---|
| 304 | + else if (ops->sync_sg_for_device) |
---|
| 305 | + ops->sync_sg_for_device(dev, sg, nelems, dir); |
---|
| 306 | + debug_dma_sync_sg_for_device(dev, sg, nelems, dir); |
---|
| 307 | +} |
---|
| 308 | +EXPORT_SYMBOL(dma_sync_sg_for_device); |
---|
| 309 | + |
---|
| 310 | +/* |
---|
| 311 | + * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems |
---|
| 312 | + * that the intention is to allow exporting memory allocated via the |
---|
| 313 | + * coherent DMA APIs through the dma_buf API, which only accepts a |
---|
| 314 | + * scattertable. This presents a couple of problems: |
---|
| 315 | + * 1. Not all memory allocated via the coherent DMA APIs is backed by |
---|
| 316 | + * a struct page |
---|
| 317 | + * 2. Passing coherent DMA memory into the streaming APIs is not allowed |
---|
| 318 | + * as we will try to flush the memory through a different alias to that |
---|
| 319 | + * actually being used (and the flushes are redundant.) |
---|
| 320 | + */ |
---|
| 321 | +int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, |
---|
| 322 | + void *cpu_addr, dma_addr_t dma_addr, size_t size, |
---|
| 323 | + unsigned long attrs) |
---|
| 324 | +{ |
---|
| 325 | + const struct dma_map_ops *ops = get_dma_ops(dev); |
---|
| 326 | + |
---|
| 327 | + if (dma_alloc_direct(dev, ops)) |
---|
| 328 | + return dma_direct_get_sgtable(dev, sgt, cpu_addr, dma_addr, |
---|
| 329 | + size, attrs); |
---|
| 330 | + if (!ops->get_sgtable) |
---|
| 331 | + return -ENXIO; |
---|
| 332 | + return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, attrs); |
---|
| 333 | +} |
---|
| 334 | +EXPORT_SYMBOL(dma_get_sgtable_attrs); |
---|
246 | 335 | |
---|
247 | 336 | #ifdef CONFIG_MMU |
---|
248 | | -static struct vm_struct *__dma_common_pages_remap(struct page **pages, |
---|
249 | | - size_t size, unsigned long vm_flags, pgprot_t prot, |
---|
250 | | - const void *caller) |
---|
| 337 | +/* |
---|
| 338 | + * Return the page attributes used for mapping dma_alloc_* memory, either in |
---|
| 339 | + * kernel space if remapping is needed, or to userspace through dma_mmap_*. |
---|
| 340 | + */ |
---|
| 341 | +pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs) |
---|
251 | 342 | { |
---|
252 | | - struct vm_struct *area; |
---|
| 343 | + if (force_dma_unencrypted(dev)) |
---|
| 344 | + prot = pgprot_decrypted(prot); |
---|
| 345 | + if (dev_is_dma_coherent(dev)) |
---|
| 346 | + return prot; |
---|
| 347 | +#ifdef CONFIG_ARCH_HAS_DMA_WRITE_COMBINE |
---|
| 348 | + if (attrs & DMA_ATTR_WRITE_COMBINE) |
---|
| 349 | + return pgprot_writecombine(prot); |
---|
| 350 | +#endif |
---|
| 351 | + if (attrs & DMA_ATTR_SYS_CACHE_ONLY || |
---|
| 352 | + attrs & DMA_ATTR_SYS_CACHE_ONLY_NWA) |
---|
| 353 | + return pgprot_syscached(prot); |
---|
| 354 | + return pgprot_dmacoherent(prot); |
---|
| 355 | +} |
---|
| 356 | +#endif /* CONFIG_MMU */ |
---|
253 | 357 | |
---|
254 | | - area = get_vm_area_caller(size, vm_flags, caller); |
---|
255 | | - if (!area) |
---|
| 358 | +/** |
---|
| 359 | + * dma_can_mmap - check if a given device supports dma_mmap_* |
---|
| 360 | + * @dev: device to check |
---|
| 361 | + * |
---|
| 362 | + * Returns %true if @dev supports dma_mmap_coherent() and dma_mmap_attrs() to |
---|
| 363 | + * map DMA allocations to userspace. |
---|
| 364 | + */ |
---|
| 365 | +bool dma_can_mmap(struct device *dev) |
---|
| 366 | +{ |
---|
| 367 | + const struct dma_map_ops *ops = get_dma_ops(dev); |
---|
| 368 | + |
---|
| 369 | + if (dma_alloc_direct(dev, ops)) |
---|
| 370 | + return dma_direct_can_mmap(dev); |
---|
| 371 | + return ops->mmap != NULL; |
---|
| 372 | +} |
---|
| 373 | +EXPORT_SYMBOL_GPL(dma_can_mmap); |
---|
| 374 | + |
---|
| 375 | +/** |
---|
| 376 | + * dma_mmap_attrs - map a coherent DMA allocation into user space |
---|
| 377 | + * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
---|
| 378 | + * @vma: vm_area_struct describing requested user mapping |
---|
| 379 | + * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs |
---|
| 380 | + * @dma_addr: device-view address returned from dma_alloc_attrs |
---|
| 381 | + * @size: size of memory originally requested in dma_alloc_attrs |
---|
| 382 | + * @attrs: attributes of mapping properties requested in dma_alloc_attrs |
---|
| 383 | + * |
---|
| 384 | + * Map a coherent DMA buffer previously allocated by dma_alloc_attrs into user |
---|
| 385 | + * space. The coherent DMA buffer must not be freed by the driver until the |
---|
| 386 | + * user space mapping has been released. |
---|
| 387 | + */ |
---|
| 388 | +int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, |
---|
| 389 | + void *cpu_addr, dma_addr_t dma_addr, size_t size, |
---|
| 390 | + unsigned long attrs) |
---|
| 391 | +{ |
---|
| 392 | + const struct dma_map_ops *ops = get_dma_ops(dev); |
---|
| 393 | + |
---|
| 394 | + if (dma_alloc_direct(dev, ops)) |
---|
| 395 | + return dma_direct_mmap(dev, vma, cpu_addr, dma_addr, size, |
---|
| 396 | + attrs); |
---|
| 397 | + if (!ops->mmap) |
---|
| 398 | + return -ENXIO; |
---|
| 399 | + return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); |
---|
| 400 | +} |
---|
| 401 | +EXPORT_SYMBOL(dma_mmap_attrs); |
---|
| 402 | + |
---|
| 403 | +u64 dma_get_required_mask(struct device *dev) |
---|
| 404 | +{ |
---|
| 405 | + const struct dma_map_ops *ops = get_dma_ops(dev); |
---|
| 406 | + |
---|
| 407 | + if (dma_alloc_direct(dev, ops)) |
---|
| 408 | + return dma_direct_get_required_mask(dev); |
---|
| 409 | + if (ops->get_required_mask) |
---|
| 410 | + return ops->get_required_mask(dev); |
---|
| 411 | + |
---|
| 412 | + /* |
---|
| 413 | + * We require every DMA ops implementation to at least support a 32-bit |
---|
| 414 | + * DMA mask (and use bounce buffering if that isn't supported in |
---|
| 415 | + * hardware). As the direct mapping code has its own routine to |
---|
| 416 | + * actually report an optimal mask we default to 32-bit here as that |
---|
| 417 | + * is the right thing for most IOMMUs, and at least not actively |
---|
| 418 | + * harmful in general. |
---|
| 419 | + */ |
---|
| 420 | + return DMA_BIT_MASK(32); |
---|
| 421 | +} |
---|
| 422 | +EXPORT_SYMBOL_GPL(dma_get_required_mask); |
---|
| 423 | + |
---|
| 424 | +void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, |
---|
| 425 | + gfp_t flag, unsigned long attrs) |
---|
| 426 | +{ |
---|
| 427 | + const struct dma_map_ops *ops = get_dma_ops(dev); |
---|
| 428 | + void *cpu_addr; |
---|
| 429 | + |
---|
| 430 | + WARN_ON_ONCE(!dev->coherent_dma_mask); |
---|
| 431 | + |
---|
| 432 | + if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr)) |
---|
| 433 | + return cpu_addr; |
---|
| 434 | + |
---|
| 435 | + /* let the implementation decide on the zone to allocate from: */ |
---|
| 436 | + flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM); |
---|
| 437 | + |
---|
| 438 | + if (dma_alloc_direct(dev, ops)) |
---|
| 439 | + cpu_addr = dma_direct_alloc(dev, size, dma_handle, flag, attrs); |
---|
| 440 | + else if (ops->alloc) |
---|
| 441 | + cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); |
---|
| 442 | + else |
---|
256 | 443 | return NULL; |
---|
257 | 444 | |
---|
258 | | - if (map_vm_area(area, prot, pages)) { |
---|
259 | | - vunmap(area->addr); |
---|
| 445 | + debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); |
---|
| 446 | + return cpu_addr; |
---|
| 447 | +} |
---|
| 448 | +EXPORT_SYMBOL(dma_alloc_attrs); |
---|
| 449 | + |
---|
| 450 | +void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, |
---|
| 451 | + dma_addr_t dma_handle, unsigned long attrs) |
---|
| 452 | +{ |
---|
| 453 | + const struct dma_map_ops *ops = get_dma_ops(dev); |
---|
| 454 | + |
---|
| 455 | + if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr)) |
---|
| 456 | + return; |
---|
| 457 | + /* |
---|
| 458 | + * On non-coherent platforms which implement DMA-coherent buffers via |
---|
| 459 | + * non-cacheable remaps, ops->free() may call vunmap(). Thus getting |
---|
| 460 | + * this far in IRQ context is a) at risk of a BUG_ON() or trying to |
---|
| 461 | + * sleep on some machines, and b) an indication that the driver is |
---|
| 462 | + * probably misusing the coherent API anyway. |
---|
| 463 | + */ |
---|
| 464 | + WARN_ON(irqs_disabled()); |
---|
| 465 | + |
---|
| 466 | + if (!cpu_addr) |
---|
| 467 | + return; |
---|
| 468 | + |
---|
| 469 | + debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); |
---|
| 470 | + if (dma_alloc_direct(dev, ops)) |
---|
| 471 | + dma_direct_free(dev, size, cpu_addr, dma_handle, attrs); |
---|
| 472 | + else if (ops->free) |
---|
| 473 | + ops->free(dev, size, cpu_addr, dma_handle, attrs); |
---|
| 474 | +} |
---|
| 475 | +EXPORT_SYMBOL(dma_free_attrs); |
---|
| 476 | + |
---|
| 477 | +struct page *dma_alloc_pages(struct device *dev, size_t size, |
---|
| 478 | + dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp) |
---|
| 479 | +{ |
---|
| 480 | + const struct dma_map_ops *ops = get_dma_ops(dev); |
---|
| 481 | + struct page *page; |
---|
| 482 | + |
---|
| 483 | + if (WARN_ON_ONCE(!dev->coherent_dma_mask)) |
---|
260 | 484 | return NULL; |
---|
| 485 | + if (WARN_ON_ONCE(gfp & (__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM))) |
---|
| 486 | + return NULL; |
---|
| 487 | + |
---|
| 488 | + size = PAGE_ALIGN(size); |
---|
| 489 | + if (dma_alloc_direct(dev, ops)) |
---|
| 490 | + page = dma_direct_alloc_pages(dev, size, dma_handle, dir, gfp); |
---|
| 491 | + else if (ops->alloc_pages) |
---|
| 492 | + page = ops->alloc_pages(dev, size, dma_handle, dir, gfp); |
---|
| 493 | + else |
---|
| 494 | + return NULL; |
---|
| 495 | + |
---|
| 496 | + debug_dma_map_page(dev, page, 0, size, dir, *dma_handle); |
---|
| 497 | + |
---|
| 498 | + return page; |
---|
| 499 | +} |
---|
| 500 | +EXPORT_SYMBOL_GPL(dma_alloc_pages); |
---|
| 501 | + |
---|
| 502 | +void dma_free_pages(struct device *dev, size_t size, struct page *page, |
---|
| 503 | + dma_addr_t dma_handle, enum dma_data_direction dir) |
---|
| 504 | +{ |
---|
| 505 | + const struct dma_map_ops *ops = get_dma_ops(dev); |
---|
| 506 | + |
---|
| 507 | + size = PAGE_ALIGN(size); |
---|
| 508 | + debug_dma_unmap_page(dev, dma_handle, size, dir); |
---|
| 509 | + |
---|
| 510 | + if (dma_alloc_direct(dev, ops)) |
---|
| 511 | + dma_direct_free_pages(dev, size, page, dma_handle, dir); |
---|
| 512 | + else if (ops->free_pages) |
---|
| 513 | + ops->free_pages(dev, size, page, dma_handle, dir); |
---|
| 514 | +} |
---|
| 515 | +EXPORT_SYMBOL_GPL(dma_free_pages); |
---|
| 516 | + |
---|
| 517 | +void *dma_alloc_noncoherent(struct device *dev, size_t size, |
---|
| 518 | + dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp) |
---|
| 519 | +{ |
---|
| 520 | + const struct dma_map_ops *ops = get_dma_ops(dev); |
---|
| 521 | + void *vaddr; |
---|
| 522 | + |
---|
| 523 | + if (!ops || !ops->alloc_noncoherent) { |
---|
| 524 | + struct page *page; |
---|
| 525 | + |
---|
| 526 | + page = dma_alloc_pages(dev, size, dma_handle, dir, gfp); |
---|
| 527 | + if (!page) |
---|
| 528 | + return NULL; |
---|
| 529 | + return page_address(page); |
---|
261 | 530 | } |
---|
262 | 531 | |
---|
263 | | - return area; |
---|
| 532 | + size = PAGE_ALIGN(size); |
---|
| 533 | + vaddr = ops->alloc_noncoherent(dev, size, dma_handle, dir, gfp); |
---|
| 534 | + if (vaddr) |
---|
| 535 | + debug_dma_map_page(dev, virt_to_page(vaddr), 0, size, dir, |
---|
| 536 | + *dma_handle); |
---|
| 537 | + return vaddr; |
---|
264 | 538 | } |
---|
| 539 | +EXPORT_SYMBOL_GPL(dma_alloc_noncoherent); |
---|
265 | 540 | |
---|
266 | | -/* |
---|
267 | | - * remaps an array of PAGE_SIZE pages into another vm_area |
---|
268 | | - * Cannot be used in non-sleeping contexts |
---|
269 | | - */ |
---|
270 | | -void *dma_common_pages_remap(struct page **pages, size_t size, |
---|
271 | | - unsigned long vm_flags, pgprot_t prot, |
---|
272 | | - const void *caller) |
---|
| 541 | +void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, |
---|
| 542 | + dma_addr_t dma_handle, enum dma_data_direction dir) |
---|
273 | 543 | { |
---|
274 | | - struct vm_struct *area; |
---|
| 544 | + const struct dma_map_ops *ops = get_dma_ops(dev); |
---|
275 | 545 | |
---|
276 | | - area = __dma_common_pages_remap(pages, size, vm_flags, prot, caller); |
---|
277 | | - if (!area) |
---|
278 | | - return NULL; |
---|
279 | | - |
---|
280 | | - area->pages = pages; |
---|
281 | | - |
---|
282 | | - return area->addr; |
---|
283 | | -} |
---|
284 | | - |
---|
285 | | -/* |
---|
286 | | - * remaps an allocated contiguous region into another vm_area. |
---|
287 | | - * Cannot be used in non-sleeping contexts |
---|
288 | | - */ |
---|
289 | | - |
---|
290 | | -void *dma_common_contiguous_remap(struct page *page, size_t size, |
---|
291 | | - unsigned long vm_flags, |
---|
292 | | - pgprot_t prot, const void *caller) |
---|
293 | | -{ |
---|
294 | | - unsigned long i; |
---|
295 | | - struct page **pages; |
---|
296 | | - struct vm_struct *area; |
---|
297 | | - |
---|
298 | | - pages = kvmalloc(sizeof(struct page *) << get_order(size), GFP_KERNEL); |
---|
299 | | - |
---|
300 | | - if (!pages) |
---|
301 | | - return NULL; |
---|
302 | | - |
---|
303 | | - for (i = 0; i < (size >> PAGE_SHIFT); i++) |
---|
304 | | - pages[i] = nth_page(page, i); |
---|
305 | | - |
---|
306 | | - area = __dma_common_pages_remap(pages, size, vm_flags, prot, caller); |
---|
307 | | - |
---|
308 | | - kvfree(pages); |
---|
309 | | - |
---|
310 | | - if (!area) |
---|
311 | | - return NULL; |
---|
312 | | - return area->addr; |
---|
313 | | -} |
---|
314 | | - |
---|
315 | | -/* |
---|
316 | | - * unmaps a range previously mapped by dma_common_*_remap |
---|
317 | | - */ |
---|
318 | | -void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags, |
---|
319 | | - bool no_warn) |
---|
320 | | -{ |
---|
321 | | - struct vm_struct *area = find_vm_area(cpu_addr); |
---|
322 | | - |
---|
323 | | - if (!area || (area->flags & vm_flags) != vm_flags) { |
---|
324 | | - WARN(!no_warn, "trying to free invalid coherent area: %p\n", |
---|
325 | | - cpu_addr); |
---|
| 546 | + if (!ops || !ops->free_noncoherent) { |
---|
| 547 | + dma_free_pages(dev, size, virt_to_page(vaddr), dma_handle, dir); |
---|
326 | 548 | return; |
---|
327 | 549 | } |
---|
328 | 550 | |
---|
329 | | - unmap_kernel_range((unsigned long)cpu_addr, PAGE_ALIGN(size)); |
---|
330 | | - vunmap(cpu_addr); |
---|
| 551 | + size = PAGE_ALIGN(size); |
---|
| 552 | + debug_dma_unmap_page(dev, dma_handle, size, dir); |
---|
| 553 | + ops->free_noncoherent(dev, size, vaddr, dma_handle, dir); |
---|
331 | 554 | } |
---|
| 555 | +EXPORT_SYMBOL_GPL(dma_free_noncoherent); |
---|
| 556 | + |
---|
| 557 | +int dma_supported(struct device *dev, u64 mask) |
---|
| 558 | +{ |
---|
| 559 | + const struct dma_map_ops *ops = get_dma_ops(dev); |
---|
| 560 | + |
---|
| 561 | + /* |
---|
| 562 | + * ->dma_supported sets the bypass flag, so we must always call |
---|
| 563 | + * into the method here unless the device is truly direct mapped. |
---|
| 564 | + */ |
---|
| 565 | + if (!ops) |
---|
| 566 | + return dma_direct_supported(dev, mask); |
---|
| 567 | + if (!ops->dma_supported) |
---|
| 568 | + return 1; |
---|
| 569 | + return ops->dma_supported(dev, mask); |
---|
| 570 | +} |
---|
| 571 | +EXPORT_SYMBOL(dma_supported); |
---|
| 572 | + |
---|
| 573 | +#ifdef CONFIG_ARCH_HAS_DMA_SET_MASK |
---|
| 574 | +void arch_dma_set_mask(struct device *dev, u64 mask); |
---|
| 575 | +#else |
---|
| 576 | +#define arch_dma_set_mask(dev, mask) do { } while (0) |
---|
332 | 577 | #endif |
---|
333 | 578 | |
---|
334 | | -/* |
---|
335 | | - * enables DMA API use for a device |
---|
336 | | - */ |
---|
337 | | -int dma_configure(struct device *dev) |
---|
| 579 | +int dma_set_mask(struct device *dev, u64 mask) |
---|
338 | 580 | { |
---|
339 | | - if (dev->bus->dma_configure) |
---|
340 | | - return dev->bus->dma_configure(dev); |
---|
| 581 | + /* |
---|
| 582 | + * Truncate the mask to the actually supported dma_addr_t width to |
---|
| 583 | + * avoid generating unsupportable addresses. |
---|
| 584 | + */ |
---|
| 585 | + mask = (dma_addr_t)mask; |
---|
| 586 | + |
---|
| 587 | + if (!dev->dma_mask || !dma_supported(dev, mask)) |
---|
| 588 | + return -EIO; |
---|
| 589 | + |
---|
| 590 | + arch_dma_set_mask(dev, mask); |
---|
| 591 | + *dev->dma_mask = mask; |
---|
341 | 592 | return 0; |
---|
342 | 593 | } |
---|
| 594 | +EXPORT_SYMBOL(dma_set_mask); |
---|
343 | 595 | |
---|
344 | | -void dma_deconfigure(struct device *dev) |
---|
| 596 | +#ifndef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK |
---|
| 597 | +int dma_set_coherent_mask(struct device *dev, u64 mask) |
---|
345 | 598 | { |
---|
346 | | - of_dma_deconfigure(dev); |
---|
347 | | - acpi_dma_deconfigure(dev); |
---|
| 599 | + /* |
---|
| 600 | + * Truncate the mask to the actually supported dma_addr_t width to |
---|
| 601 | + * avoid generating unsupportable addresses. |
---|
| 602 | + */ |
---|
| 603 | + mask = (dma_addr_t)mask; |
---|
| 604 | + |
---|
| 605 | + if (!dma_supported(dev, mask)) |
---|
| 606 | + return -EIO; |
---|
| 607 | + |
---|
| 608 | + dev->coherent_dma_mask = mask; |
---|
| 609 | + return 0; |
---|
348 | 610 | } |
---|
| 611 | +EXPORT_SYMBOL(dma_set_coherent_mask); |
---|
| 612 | +#endif |
---|
| 613 | + |
---|
| 614 | +size_t dma_max_mapping_size(struct device *dev) |
---|
| 615 | +{ |
---|
| 616 | + const struct dma_map_ops *ops = get_dma_ops(dev); |
---|
| 617 | + size_t size = SIZE_MAX; |
---|
| 618 | + |
---|
| 619 | + if (dma_map_direct(dev, ops)) |
---|
| 620 | + size = dma_direct_max_mapping_size(dev); |
---|
| 621 | + else if (ops && ops->max_mapping_size) |
---|
| 622 | + size = ops->max_mapping_size(dev); |
---|
| 623 | + |
---|
| 624 | + return size; |
---|
| 625 | +} |
---|
| 626 | +EXPORT_SYMBOL_GPL(dma_max_mapping_size); |
---|
| 627 | + |
---|
| 628 | +bool dma_need_sync(struct device *dev, dma_addr_t dma_addr) |
---|
| 629 | +{ |
---|
| 630 | + const struct dma_map_ops *ops = get_dma_ops(dev); |
---|
| 631 | + |
---|
| 632 | + if (dma_map_direct(dev, ops)) |
---|
| 633 | + return dma_direct_need_sync(dev, dma_addr); |
---|
| 634 | + return ops->sync_single_for_cpu || ops->sync_single_for_device; |
---|
| 635 | +} |
---|
| 636 | +EXPORT_SYMBOL_GPL(dma_need_sync); |
---|
| 637 | + |
---|
| 638 | +unsigned long dma_get_merge_boundary(struct device *dev) |
---|
| 639 | +{ |
---|
| 640 | + const struct dma_map_ops *ops = get_dma_ops(dev); |
---|
| 641 | + |
---|
| 642 | + if (!ops || !ops->get_merge_boundary) |
---|
| 643 | + return 0; /* can't merge */ |
---|
| 644 | + |
---|
| 645 | + return ops->get_merge_boundary(dev); |
---|
| 646 | +} |
---|
| 647 | +EXPORT_SYMBOL_GPL(dma_get_merge_boundary); |
---|