.. | .. |
---|
6 | 6 | #include <linux/string.h> |
---|
7 | 7 | #include <linux/device.h> |
---|
8 | 8 | #include <linux/err.h> |
---|
9 | | -#include <linux/dma-debug.h> |
---|
10 | 9 | #include <linux/dma-direction.h> |
---|
11 | 10 | #include <linux/scatterlist.h> |
---|
12 | 11 | #include <linux/bug.h> |
---|
13 | 12 | #include <linux/mem_encrypt.h> |
---|
14 | | -#include <linux/android_kabi.h> |
---|
15 | 13 | |
---|
16 | 14 | /** |
---|
17 | 15 | * List of possible attributes associated with a DMA mapping. The semantics |
---|
18 | | - * of each attribute should be defined in Documentation/DMA-attributes.txt. |
---|
19 | | - * |
---|
20 | | - * DMA_ATTR_WRITE_BARRIER: DMA to a memory region with this attribute |
---|
21 | | - * forces all pending DMA writes to complete. |
---|
| 16 | + * of each attribute should be defined in Documentation/core-api/dma-attributes.rst. |
---|
22 | 17 | */ |
---|
23 | | -#define DMA_ATTR_WRITE_BARRIER (1UL << 0) |
---|
| 18 | + |
---|
24 | 19 | /* |
---|
25 | 20 | * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping |
---|
26 | 21 | * may be weakly ordered, that is that reads and writes may pass each other. |
---|
.. | .. |
---|
31 | 26 | * buffered to improve performance. |
---|
32 | 27 | */ |
---|
33 | 28 | #define DMA_ATTR_WRITE_COMBINE (1UL << 2) |
---|
34 | | -/* |
---|
35 | | - * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either |
---|
36 | | - * consistent or non-consistent memory as it sees fit. |
---|
37 | | - */ |
---|
38 | | -#define DMA_ATTR_NON_CONSISTENT (1UL << 3) |
---|
39 | 29 | /* |
---|
40 | 30 | * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel |
---|
41 | 31 | * virtual mapping for the allocated buffer. |
---|
.. | .. |
---|
72 | 62 | #define DMA_ATTR_PRIVILEGED (1UL << 9) |
---|
73 | 63 | |
---|
74 | 64 | /* |
---|
75 | | - * DMA_ATTR_STRONGLY_ORDERED: Specifies that accesses to the mapping must |
---|
76 | | - * not be buffered, reordered, merged with other accesses, or unaligned. |
---|
77 | | - * No speculative access may occur in this mapping. |
---|
| 65 | + * DMA_ATTR_SYS_CACHE_ONLY: used to indicate that the buffer should be mapped |
---|
| 66 | + * with the correct memory attributes so that it can be cached in the system |
---|
| 67 | + * or last level cache. This is useful for buffers that are being mapped for |
---|
| 68 | + * devices that are non-coherent, but can use the system cache. |
---|
78 | 69 | */ |
---|
79 | | -#define DMA_ATTR_STRONGLY_ORDERED (1UL << 10) |
---|
80 | | -/* |
---|
81 | | - * DMA_ATTR_SKIP_ZEROING: Do not zero mapping. |
---|
82 | | - */ |
---|
83 | | -#define DMA_ATTR_SKIP_ZEROING (1UL << 11) |
---|
84 | | -/* |
---|
85 | | - * DMA_ATTR_NO_DELAYED_UNMAP: Used by msm specific lazy mapping to indicate |
---|
86 | | - * that the mapping can be freed on unmap, rather than when the ion_buffer |
---|
87 | | - * is freed. |
---|
88 | | - */ |
---|
89 | | -#define DMA_ATTR_NO_DELAYED_UNMAP (1UL << 12) |
---|
90 | | -/* |
---|
91 | | - * DMA_ATTR_EXEC_MAPPING: The mapping has executable permissions. |
---|
92 | | - */ |
---|
93 | | -#define DMA_ATTR_EXEC_MAPPING (1UL << 13) |
---|
94 | | -/* |
---|
95 | | - * DMA_ATTR_IOMMU_USE_UPSTREAM_HINT: Normally an smmu will override any bus |
---|
96 | | - * attributes (i.e cacheablilty) provided by the client device. Some hardware |
---|
97 | | - * may be designed to use the original attributes instead. |
---|
98 | | - */ |
---|
99 | | -#define DMA_ATTR_IOMMU_USE_UPSTREAM_HINT (1UL << 14) |
---|
100 | | -/* |
---|
101 | | - * When passed to a DMA map call the DMA_ATTR_FORCE_COHERENT DMA |
---|
102 | | - * attribute can be used to force a buffer to be mapped as IO coherent. |
---|
103 | | - */ |
---|
104 | | -#define DMA_ATTR_FORCE_COHERENT (1UL << 15) |
---|
105 | | -/* |
---|
106 | | - * When passed to a DMA map call the DMA_ATTR_FORCE_NON_COHERENT DMA |
---|
107 | | - * attribute can be used to force a buffer to not be mapped as IO |
---|
108 | | - * coherent. |
---|
109 | | - */ |
---|
110 | | -#define DMA_ATTR_FORCE_NON_COHERENT (1UL << 16) |
---|
111 | | -/* |
---|
112 | | - * DMA_ATTR_DELAYED_UNMAP: Used by ION, it will ensure that mappings are not |
---|
113 | | - * removed on unmap but instead are removed when the ion_buffer is freed. |
---|
114 | | - */ |
---|
115 | | -#define DMA_ATTR_DELAYED_UNMAP (1UL << 17) |
---|
| 70 | +#define DMA_ATTR_SYS_CACHE_ONLY (1UL << 14) |
---|
116 | 71 | |
---|
117 | 72 | /* |
---|
118 | | - * DMA_ATTR_IOMMU_USE_LLC_NWA: Overrides the bus attributes to use the System |
---|
119 | | - * Cache(LLC) with allocation policy as Inner Non-Cacheable, Outer Cacheable: |
---|
120 | | - * Write-Back, Read-Allocate, No Write-Allocate policy. |
---|
| 73 | + * DMA_ATTR_SYS_CACHE_ONLY_NWA: used to indicate that the buffer should be |
---|
| 74 | + * mapped with the correct memory attributes so that it can be cached in the |
---|
| 75 | + * system or last level cache, with a no write allocate cache policy. This is |
---|
| 76 | + * useful for buffers that are being mapped for devices that are non-coherent, |
---|
| 77 | + * but can use the system cache. |
---|
121 | 78 | */ |
---|
122 | | -#define DMA_ATTR_IOMMU_USE_LLC_NWA (1UL << 18) |
---|
123 | | - |
---|
124 | | -#define DMA_ERROR_CODE (~(dma_addr_t)0) |
---|
| 79 | +#define DMA_ATTR_SYS_CACHE_ONLY_NWA (1UL << 15) |
---|
125 | 80 | |
---|
126 | 81 | /* |
---|
127 | | - * A dma_addr_t can hold any valid DMA or bus address for the platform. |
---|
128 | | - * It can be given to a device to use as a DMA source or target. A CPU cannot |
---|
129 | | - * reference a dma_addr_t directly because there may be translation between |
---|
130 | | - * its physical address space and the bus address space. |
---|
| 82 | + * A dma_addr_t can hold any valid DMA or bus address for the platform. It can |
---|
| 83 | + * be given to a device to use as a DMA source or target. It is specific to a |
---|
| 84 | + * given device and there may be a translation between the CPU physical address |
---|
| 85 | + * space and the bus address space. |
---|
| 86 | + * |
---|
| 87 | + * DMA_MAPPING_ERROR is the magic error code if a mapping failed. It should not |
---|
| 88 | + * be used directly in drivers, but checked for using dma_mapping_error() |
---|
| 89 | + * instead. |
---|
131 | 90 | */ |
---|
132 | | -struct dma_map_ops { |
---|
133 | | - void* (*alloc)(struct device *dev, size_t size, |
---|
134 | | - dma_addr_t *dma_handle, gfp_t gfp, |
---|
135 | | - unsigned long attrs); |
---|
136 | | - void (*free)(struct device *dev, size_t size, |
---|
137 | | - void *vaddr, dma_addr_t dma_handle, |
---|
138 | | - unsigned long attrs); |
---|
139 | | - int (*mmap)(struct device *, struct vm_area_struct *, |
---|
140 | | - void *, dma_addr_t, size_t, |
---|
141 | | - unsigned long attrs); |
---|
142 | | - |
---|
143 | | - int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *, |
---|
144 | | - dma_addr_t, size_t, unsigned long attrs); |
---|
145 | | - |
---|
146 | | - dma_addr_t (*map_page)(struct device *dev, struct page *page, |
---|
147 | | - unsigned long offset, size_t size, |
---|
148 | | - enum dma_data_direction dir, |
---|
149 | | - unsigned long attrs); |
---|
150 | | - void (*unmap_page)(struct device *dev, dma_addr_t dma_handle, |
---|
151 | | - size_t size, enum dma_data_direction dir, |
---|
152 | | - unsigned long attrs); |
---|
153 | | - /* |
---|
154 | | - * map_sg returns 0 on error and a value > 0 on success. |
---|
155 | | - * It should never return a value < 0. |
---|
156 | | - */ |
---|
157 | | - int (*map_sg)(struct device *dev, struct scatterlist *sg, |
---|
158 | | - int nents, enum dma_data_direction dir, |
---|
159 | | - unsigned long attrs); |
---|
160 | | - void (*unmap_sg)(struct device *dev, |
---|
161 | | - struct scatterlist *sg, int nents, |
---|
162 | | - enum dma_data_direction dir, |
---|
163 | | - unsigned long attrs); |
---|
164 | | - dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr, |
---|
165 | | - size_t size, enum dma_data_direction dir, |
---|
166 | | - unsigned long attrs); |
---|
167 | | - void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle, |
---|
168 | | - size_t size, enum dma_data_direction dir, |
---|
169 | | - unsigned long attrs); |
---|
170 | | - void (*sync_single_for_cpu)(struct device *dev, |
---|
171 | | - dma_addr_t dma_handle, size_t size, |
---|
172 | | - enum dma_data_direction dir); |
---|
173 | | - void (*sync_single_for_device)(struct device *dev, |
---|
174 | | - dma_addr_t dma_handle, size_t size, |
---|
175 | | - enum dma_data_direction dir); |
---|
176 | | - void (*sync_sg_for_cpu)(struct device *dev, |
---|
177 | | - struct scatterlist *sg, int nents, |
---|
178 | | - enum dma_data_direction dir); |
---|
179 | | - void (*sync_sg_for_device)(struct device *dev, |
---|
180 | | - struct scatterlist *sg, int nents, |
---|
181 | | - enum dma_data_direction dir); |
---|
182 | | - void (*cache_sync)(struct device *dev, void *vaddr, size_t size, |
---|
183 | | - enum dma_data_direction direction); |
---|
184 | | - int (*mapping_error)(struct device *dev, dma_addr_t dma_addr); |
---|
185 | | - int (*dma_supported)(struct device *dev, u64 mask); |
---|
186 | | - int (*set_dma_mask)(struct device *dev, u64 mask); |
---|
187 | | - void *(*remap)(struct device *dev, void *cpu_addr, dma_addr_t handle, |
---|
188 | | - size_t size, unsigned long attrs); |
---|
189 | | - void (*unremap)(struct device *dev, void *remapped_address, |
---|
190 | | - size_t size); |
---|
191 | | -#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK |
---|
192 | | - u64 (*get_required_mask)(struct device *dev); |
---|
193 | | -#endif |
---|
194 | | - |
---|
195 | | - ANDROID_KABI_RESERVE(1); |
---|
196 | | - ANDROID_KABI_RESERVE(2); |
---|
197 | | - ANDROID_KABI_RESERVE(3); |
---|
198 | | - ANDROID_KABI_RESERVE(4); |
---|
199 | | -}; |
---|
200 | | - |
---|
201 | | -extern const struct dma_map_ops dma_direct_ops; |
---|
202 | | -extern const struct dma_map_ops dma_noncoherent_ops; |
---|
203 | | -extern const struct dma_map_ops dma_virt_ops; |
---|
| 91 | +#define DMA_MAPPING_ERROR (~(dma_addr_t)0) |
---|
204 | 92 | |
---|
205 | 93 | #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) |
---|
206 | 94 | |
---|
207 | | -#define DMA_MASK_NONE 0x0ULL |
---|
208 | | - |
---|
209 | | -static inline int valid_dma_direction(int dma_direction) |
---|
210 | | -{ |
---|
211 | | - return ((dma_direction == DMA_BIDIRECTIONAL) || |
---|
212 | | - (dma_direction == DMA_TO_DEVICE) || |
---|
213 | | - (dma_direction == DMA_FROM_DEVICE)); |
---|
214 | | -} |
---|
215 | | - |
---|
216 | | -static inline int is_device_dma_capable(struct device *dev) |
---|
217 | | -{ |
---|
218 | | - return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE; |
---|
219 | | -} |
---|
220 | | - |
---|
221 | | -#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT |
---|
222 | | -/* |
---|
223 | | - * These three functions are only for dma allocator. |
---|
224 | | - * Don't use them in device drivers. |
---|
225 | | - */ |
---|
226 | | -int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size, |
---|
227 | | - dma_addr_t *dma_handle, void **ret); |
---|
228 | | -int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr); |
---|
229 | | - |
---|
230 | | -int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma, |
---|
231 | | - void *cpu_addr, size_t size, int *ret); |
---|
232 | | - |
---|
233 | | -void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle); |
---|
234 | | -int dma_release_from_global_coherent(int order, void *vaddr); |
---|
235 | | -int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr, |
---|
236 | | - size_t size, int *ret); |
---|
237 | | - |
---|
| 95 | +#ifdef CONFIG_DMA_API_DEBUG |
---|
| 96 | +void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr); |
---|
| 97 | +void debug_dma_map_single(struct device *dev, const void *addr, |
---|
| 98 | + unsigned long len); |
---|
238 | 99 | #else |
---|
239 | | -#define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0) |
---|
240 | | -#define dma_release_from_dev_coherent(dev, order, vaddr) (0) |
---|
241 | | -#define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0) |
---|
242 | | - |
---|
243 | | -static inline void *dma_alloc_from_global_coherent(ssize_t size, |
---|
244 | | - dma_addr_t *dma_handle) |
---|
| 100 | +static inline void debug_dma_mapping_error(struct device *dev, |
---|
| 101 | + dma_addr_t dma_addr) |
---|
245 | 102 | { |
---|
246 | | - return NULL; |
---|
247 | 103 | } |
---|
248 | | - |
---|
249 | | -static inline int dma_release_from_global_coherent(int order, void *vaddr) |
---|
| 104 | +static inline void debug_dma_map_single(struct device *dev, const void *addr, |
---|
| 105 | + unsigned long len) |
---|
250 | 106 | { |
---|
251 | | - return 0; |
---|
252 | 107 | } |
---|
253 | | - |
---|
254 | | -static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma, |
---|
255 | | - void *cpu_addr, size_t size, |
---|
256 | | - int *ret) |
---|
257 | | -{ |
---|
258 | | - return 0; |
---|
259 | | -} |
---|
260 | | -#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ |
---|
| 108 | +#endif /* CONFIG_DMA_API_DEBUG */ |
---|
261 | 109 | |
---|
262 | 110 | #ifdef CONFIG_HAS_DMA |
---|
263 | | -#include <asm/dma-mapping.h> |
---|
264 | | -static inline const struct dma_map_ops *get_dma_ops(struct device *dev) |
---|
| 111 | +static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
---|
265 | 112 | { |
---|
266 | | - if (dev && dev->dma_ops) |
---|
267 | | - return dev->dma_ops; |
---|
268 | | - return get_arch_dma_ops(dev ? dev->bus : NULL); |
---|
| 113 | + debug_dma_mapping_error(dev, dma_addr); |
---|
| 114 | + |
---|
| 115 | + if (dma_addr == DMA_MAPPING_ERROR) |
---|
| 116 | + return -ENOMEM; |
---|
| 117 | + return 0; |
---|
269 | 118 | } |
---|
270 | 119 | |
---|
271 | | -static inline void set_dma_ops(struct device *dev, |
---|
272 | | - const struct dma_map_ops *dma_ops) |
---|
| 120 | +dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page, |
---|
| 121 | + size_t offset, size_t size, enum dma_data_direction dir, |
---|
| 122 | + unsigned long attrs); |
---|
| 123 | +void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size, |
---|
| 124 | + enum dma_data_direction dir, unsigned long attrs); |
---|
| 125 | +int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, |
---|
| 126 | + enum dma_data_direction dir, unsigned long attrs); |
---|
| 127 | +void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, |
---|
| 128 | + int nents, enum dma_data_direction dir, |
---|
| 129 | + unsigned long attrs); |
---|
| 130 | +dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr, |
---|
| 131 | + size_t size, enum dma_data_direction dir, unsigned long attrs); |
---|
| 132 | +void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size, |
---|
| 133 | + enum dma_data_direction dir, unsigned long attrs); |
---|
| 134 | +void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, |
---|
| 135 | + enum dma_data_direction dir); |
---|
| 136 | +void dma_sync_single_for_device(struct device *dev, dma_addr_t addr, |
---|
| 137 | + size_t size, enum dma_data_direction dir); |
---|
| 138 | +void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, |
---|
| 139 | + int nelems, enum dma_data_direction dir); |
---|
| 140 | +void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, |
---|
| 141 | + int nelems, enum dma_data_direction dir); |
---|
| 142 | +void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, |
---|
| 143 | + gfp_t flag, unsigned long attrs); |
---|
| 144 | +void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, |
---|
| 145 | + dma_addr_t dma_handle, unsigned long attrs); |
---|
| 146 | +void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, |
---|
| 147 | + gfp_t gfp, unsigned long attrs); |
---|
| 148 | +void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, |
---|
| 149 | + dma_addr_t dma_handle); |
---|
| 150 | +int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, |
---|
| 151 | + void *cpu_addr, dma_addr_t dma_addr, size_t size, |
---|
| 152 | + unsigned long attrs); |
---|
| 153 | +int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, |
---|
| 154 | + void *cpu_addr, dma_addr_t dma_addr, size_t size, |
---|
| 155 | + unsigned long attrs); |
---|
| 156 | +bool dma_can_mmap(struct device *dev); |
---|
| 157 | +int dma_supported(struct device *dev, u64 mask); |
---|
| 158 | +int dma_set_mask(struct device *dev, u64 mask); |
---|
| 159 | +int dma_set_coherent_mask(struct device *dev, u64 mask); |
---|
| 160 | +u64 dma_get_required_mask(struct device *dev); |
---|
| 161 | +size_t dma_max_mapping_size(struct device *dev); |
---|
| 162 | +bool dma_need_sync(struct device *dev, dma_addr_t dma_addr); |
---|
| 163 | +unsigned long dma_get_merge_boundary(struct device *dev); |
---|
| 164 | +#else /* CONFIG_HAS_DMA */ |
---|
| 165 | +static inline dma_addr_t dma_map_page_attrs(struct device *dev, |
---|
| 166 | + struct page *page, size_t offset, size_t size, |
---|
| 167 | + enum dma_data_direction dir, unsigned long attrs) |
---|
273 | 168 | { |
---|
274 | | - dev->dma_ops = dma_ops; |
---|
| 169 | + return DMA_MAPPING_ERROR; |
---|
275 | 170 | } |
---|
276 | | -#else |
---|
277 | | -/* |
---|
278 | | - * Define the dma api to allow compilation of dma dependent code. |
---|
279 | | - * Code that depends on the dma-mapping API needs to set 'depends on HAS_DMA' |
---|
280 | | - * in its Kconfig, unless it already depends on <something> || COMPILE_TEST, |
---|
281 | | - * where <something> guarantuees the availability of the dma-mapping API. |
---|
282 | | - */ |
---|
283 | | -static inline const struct dma_map_ops *get_dma_ops(struct device *dev) |
---|
| 171 | +static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, |
---|
| 172 | + size_t size, enum dma_data_direction dir, unsigned long attrs) |
---|
| 173 | +{ |
---|
| 174 | +} |
---|
| 175 | +static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, |
---|
| 176 | + int nents, enum dma_data_direction dir, unsigned long attrs) |
---|
| 177 | +{ |
---|
| 178 | + return 0; |
---|
| 179 | +} |
---|
| 180 | +static inline void dma_unmap_sg_attrs(struct device *dev, |
---|
| 181 | + struct scatterlist *sg, int nents, enum dma_data_direction dir, |
---|
| 182 | + unsigned long attrs) |
---|
| 183 | +{ |
---|
| 184 | +} |
---|
| 185 | +static inline dma_addr_t dma_map_resource(struct device *dev, |
---|
| 186 | + phys_addr_t phys_addr, size_t size, enum dma_data_direction dir, |
---|
| 187 | + unsigned long attrs) |
---|
| 188 | +{ |
---|
| 189 | + return DMA_MAPPING_ERROR; |
---|
| 190 | +} |
---|
| 191 | +static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr, |
---|
| 192 | + size_t size, enum dma_data_direction dir, unsigned long attrs) |
---|
| 193 | +{ |
---|
| 194 | +} |
---|
| 195 | +static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, |
---|
| 196 | + size_t size, enum dma_data_direction dir) |
---|
| 197 | +{ |
---|
| 198 | +} |
---|
| 199 | +static inline void dma_sync_single_for_device(struct device *dev, |
---|
| 200 | + dma_addr_t addr, size_t size, enum dma_data_direction dir) |
---|
| 201 | +{ |
---|
| 202 | +} |
---|
| 203 | +static inline void dma_sync_sg_for_cpu(struct device *dev, |
---|
| 204 | + struct scatterlist *sg, int nelems, enum dma_data_direction dir) |
---|
| 205 | +{ |
---|
| 206 | +} |
---|
| 207 | +static inline void dma_sync_sg_for_device(struct device *dev, |
---|
| 208 | + struct scatterlist *sg, int nelems, enum dma_data_direction dir) |
---|
| 209 | +{ |
---|
| 210 | +} |
---|
| 211 | +static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
---|
| 212 | +{ |
---|
| 213 | + return -ENOMEM; |
---|
| 214 | +} |
---|
| 215 | +static inline void *dma_alloc_attrs(struct device *dev, size_t size, |
---|
| 216 | + dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs) |
---|
284 | 217 | { |
---|
285 | 218 | return NULL; |
---|
286 | 219 | } |
---|
287 | | -#endif |
---|
| 220 | +static void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, |
---|
| 221 | + dma_addr_t dma_handle, unsigned long attrs) |
---|
| 222 | +{ |
---|
| 223 | +} |
---|
| 224 | +static inline void *dmam_alloc_attrs(struct device *dev, size_t size, |
---|
| 225 | + dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) |
---|
| 226 | +{ |
---|
| 227 | + return NULL; |
---|
| 228 | +} |
---|
| 229 | +static inline void dmam_free_coherent(struct device *dev, size_t size, |
---|
| 230 | + void *vaddr, dma_addr_t dma_handle) |
---|
| 231 | +{ |
---|
| 232 | +} |
---|
| 233 | +static inline int dma_get_sgtable_attrs(struct device *dev, |
---|
| 234 | + struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr, |
---|
| 235 | + size_t size, unsigned long attrs) |
---|
| 236 | +{ |
---|
| 237 | + return -ENXIO; |
---|
| 238 | +} |
---|
| 239 | +static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, |
---|
| 240 | + void *cpu_addr, dma_addr_t dma_addr, size_t size, |
---|
| 241 | + unsigned long attrs) |
---|
| 242 | +{ |
---|
| 243 | + return -ENXIO; |
---|
| 244 | +} |
---|
| 245 | +static inline bool dma_can_mmap(struct device *dev) |
---|
| 246 | +{ |
---|
| 247 | + return false; |
---|
| 248 | +} |
---|
| 249 | +static inline int dma_supported(struct device *dev, u64 mask) |
---|
| 250 | +{ |
---|
| 251 | + return 0; |
---|
| 252 | +} |
---|
| 253 | +static inline int dma_set_mask(struct device *dev, u64 mask) |
---|
| 254 | +{ |
---|
| 255 | + return -EIO; |
---|
| 256 | +} |
---|
| 257 | +static inline int dma_set_coherent_mask(struct device *dev, u64 mask) |
---|
| 258 | +{ |
---|
| 259 | + return -EIO; |
---|
| 260 | +} |
---|
| 261 | +static inline u64 dma_get_required_mask(struct device *dev) |
---|
| 262 | +{ |
---|
| 263 | + return 0; |
---|
| 264 | +} |
---|
| 265 | +static inline size_t dma_max_mapping_size(struct device *dev) |
---|
| 266 | +{ |
---|
| 267 | + return 0; |
---|
| 268 | +} |
---|
| 269 | +static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr) |
---|
| 270 | +{ |
---|
| 271 | + return false; |
---|
| 272 | +} |
---|
| 273 | +static inline unsigned long dma_get_merge_boundary(struct device *dev) |
---|
| 274 | +{ |
---|
| 275 | + return 0; |
---|
| 276 | +} |
---|
| 277 | +#endif /* CONFIG_HAS_DMA */ |
---|
| 278 | + |
---|
| 279 | +struct page *dma_alloc_pages(struct device *dev, size_t size, |
---|
| 280 | + dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp); |
---|
| 281 | +void dma_free_pages(struct device *dev, size_t size, struct page *page, |
---|
| 282 | + dma_addr_t dma_handle, enum dma_data_direction dir); |
---|
| 283 | +void *dma_alloc_noncoherent(struct device *dev, size_t size, |
---|
| 284 | + dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp); |
---|
| 285 | +void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, |
---|
| 286 | + dma_addr_t dma_handle, enum dma_data_direction dir); |
---|
288 | 287 | |
---|
289 | 288 | static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, |
---|
290 | | - size_t size, |
---|
291 | | - enum dma_data_direction dir, |
---|
292 | | - unsigned long attrs) |
---|
| 289 | + size_t size, enum dma_data_direction dir, unsigned long attrs) |
---|
293 | 290 | { |
---|
294 | | - const struct dma_map_ops *ops = get_dma_ops(dev); |
---|
295 | | - dma_addr_t addr; |
---|
296 | | - |
---|
297 | | - BUG_ON(!valid_dma_direction(dir)); |
---|
298 | | - addr = ops->map_page(dev, virt_to_page(ptr), |
---|
299 | | - offset_in_page(ptr), size, |
---|
300 | | - dir, attrs); |
---|
301 | | - debug_dma_map_page(dev, virt_to_page(ptr), |
---|
302 | | - offset_in_page(ptr), size, |
---|
303 | | - dir, addr, true); |
---|
304 | | - return addr; |
---|
| 291 | + /* DMA must never operate on areas that might be remapped. */ |
---|
| 292 | + if (dev_WARN_ONCE(dev, is_vmalloc_addr(ptr), |
---|
| 293 | + "rejecting DMA map of vmalloc memory\n")) |
---|
| 294 | + return DMA_MAPPING_ERROR; |
---|
| 295 | + debug_dma_map_single(dev, ptr, size); |
---|
| 296 | + return dma_map_page_attrs(dev, virt_to_page(ptr), offset_in_page(ptr), |
---|
| 297 | + size, dir, attrs); |
---|
305 | 298 | } |
---|
306 | 299 | |
---|
307 | 300 | static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, |
---|
308 | | - size_t size, |
---|
309 | | - enum dma_data_direction dir, |
---|
310 | | - unsigned long attrs) |
---|
| 301 | + size_t size, enum dma_data_direction dir, unsigned long attrs) |
---|
311 | 302 | { |
---|
312 | | - const struct dma_map_ops *ops = get_dma_ops(dev); |
---|
313 | | - |
---|
314 | | - BUG_ON(!valid_dma_direction(dir)); |
---|
315 | | - if (ops->unmap_page) |
---|
316 | | - ops->unmap_page(dev, addr, size, dir, attrs); |
---|
317 | | - debug_dma_unmap_page(dev, addr, size, dir, true); |
---|
318 | | -} |
---|
319 | | - |
---|
320 | | -/* |
---|
321 | | - * dma_maps_sg_attrs returns 0 on error and > 0 on success. |
---|
322 | | - * It should never return a value < 0. |
---|
323 | | - */ |
---|
324 | | -static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, |
---|
325 | | - int nents, enum dma_data_direction dir, |
---|
326 | | - unsigned long attrs) |
---|
327 | | -{ |
---|
328 | | - const struct dma_map_ops *ops = get_dma_ops(dev); |
---|
329 | | - int ents; |
---|
330 | | - |
---|
331 | | - BUG_ON(!valid_dma_direction(dir)); |
---|
332 | | - ents = ops->map_sg(dev, sg, nents, dir, attrs); |
---|
333 | | - BUG_ON(ents < 0); |
---|
334 | | - debug_dma_map_sg(dev, sg, nents, ents, dir); |
---|
335 | | - |
---|
336 | | - return ents; |
---|
337 | | -} |
---|
338 | | - |
---|
339 | | -static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, |
---|
340 | | - int nents, enum dma_data_direction dir, |
---|
341 | | - unsigned long attrs) |
---|
342 | | -{ |
---|
343 | | - const struct dma_map_ops *ops = get_dma_ops(dev); |
---|
344 | | - |
---|
345 | | - BUG_ON(!valid_dma_direction(dir)); |
---|
346 | | - debug_dma_unmap_sg(dev, sg, nents, dir); |
---|
347 | | - if (ops->unmap_sg) |
---|
348 | | - ops->unmap_sg(dev, sg, nents, dir, attrs); |
---|
349 | | -} |
---|
350 | | - |
---|
351 | | -static inline dma_addr_t dma_map_page_attrs(struct device *dev, |
---|
352 | | - struct page *page, |
---|
353 | | - size_t offset, size_t size, |
---|
354 | | - enum dma_data_direction dir, |
---|
355 | | - unsigned long attrs) |
---|
356 | | -{ |
---|
357 | | - const struct dma_map_ops *ops = get_dma_ops(dev); |
---|
358 | | - dma_addr_t addr; |
---|
359 | | - |
---|
360 | | - BUG_ON(!valid_dma_direction(dir)); |
---|
361 | | - addr = ops->map_page(dev, page, offset, size, dir, attrs); |
---|
362 | | - debug_dma_map_page(dev, page, offset, size, dir, addr, false); |
---|
363 | | - |
---|
364 | | - return addr; |
---|
365 | | -} |
---|
366 | | - |
---|
367 | | -static inline void dma_unmap_page_attrs(struct device *dev, |
---|
368 | | - dma_addr_t addr, size_t size, |
---|
369 | | - enum dma_data_direction dir, |
---|
370 | | - unsigned long attrs) |
---|
371 | | -{ |
---|
372 | | - const struct dma_map_ops *ops = get_dma_ops(dev); |
---|
373 | | - |
---|
374 | | - BUG_ON(!valid_dma_direction(dir)); |
---|
375 | | - if (ops->unmap_page) |
---|
376 | | - ops->unmap_page(dev, addr, size, dir, attrs); |
---|
377 | | - debug_dma_unmap_page(dev, addr, size, dir, false); |
---|
378 | | -} |
---|
379 | | - |
---|
380 | | -static inline dma_addr_t dma_map_resource(struct device *dev, |
---|
381 | | - phys_addr_t phys_addr, |
---|
382 | | - size_t size, |
---|
383 | | - enum dma_data_direction dir, |
---|
384 | | - unsigned long attrs) |
---|
385 | | -{ |
---|
386 | | - const struct dma_map_ops *ops = get_dma_ops(dev); |
---|
387 | | - dma_addr_t addr; |
---|
388 | | - |
---|
389 | | - BUG_ON(!valid_dma_direction(dir)); |
---|
390 | | - |
---|
391 | | - /* Don't allow RAM to be mapped */ |
---|
392 | | - BUG_ON(pfn_valid(PHYS_PFN(phys_addr))); |
---|
393 | | - |
---|
394 | | - addr = phys_addr; |
---|
395 | | - if (ops->map_resource) |
---|
396 | | - addr = ops->map_resource(dev, phys_addr, size, dir, attrs); |
---|
397 | | - |
---|
398 | | - debug_dma_map_resource(dev, phys_addr, size, dir, addr); |
---|
399 | | - |
---|
400 | | - return addr; |
---|
401 | | -} |
---|
402 | | - |
---|
403 | | -static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr, |
---|
404 | | - size_t size, enum dma_data_direction dir, |
---|
405 | | - unsigned long attrs) |
---|
406 | | -{ |
---|
407 | | - const struct dma_map_ops *ops = get_dma_ops(dev); |
---|
408 | | - |
---|
409 | | - BUG_ON(!valid_dma_direction(dir)); |
---|
410 | | - if (ops->unmap_resource) |
---|
411 | | - ops->unmap_resource(dev, addr, size, dir, attrs); |
---|
412 | | - debug_dma_unmap_resource(dev, addr, size, dir); |
---|
413 | | -} |
---|
414 | | - |
---|
415 | | -static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, |
---|
416 | | - size_t size, |
---|
417 | | - enum dma_data_direction dir) |
---|
418 | | -{ |
---|
419 | | - const struct dma_map_ops *ops = get_dma_ops(dev); |
---|
420 | | - |
---|
421 | | - BUG_ON(!valid_dma_direction(dir)); |
---|
422 | | - if (ops->sync_single_for_cpu) |
---|
423 | | - ops->sync_single_for_cpu(dev, addr, size, dir); |
---|
424 | | - debug_dma_sync_single_for_cpu(dev, addr, size, dir); |
---|
425 | | -} |
---|
426 | | - |
---|
427 | | -static inline void dma_sync_single_for_device(struct device *dev, |
---|
428 | | - dma_addr_t addr, size_t size, |
---|
429 | | - enum dma_data_direction dir) |
---|
430 | | -{ |
---|
431 | | - const struct dma_map_ops *ops = get_dma_ops(dev); |
---|
432 | | - |
---|
433 | | - BUG_ON(!valid_dma_direction(dir)); |
---|
434 | | - if (ops->sync_single_for_device) |
---|
435 | | - ops->sync_single_for_device(dev, addr, size, dir); |
---|
436 | | - debug_dma_sync_single_for_device(dev, addr, size, dir); |
---|
| 303 | + return dma_unmap_page_attrs(dev, addr, size, dir, attrs); |
---|
437 | 304 | } |
---|
438 | 305 | |
---|
439 | 306 | static inline void dma_sync_single_range_for_cpu(struct device *dev, |
---|
440 | | - dma_addr_t addr, |
---|
441 | | - unsigned long offset, |
---|
442 | | - size_t size, |
---|
443 | | - enum dma_data_direction dir) |
---|
| 307 | + dma_addr_t addr, unsigned long offset, size_t size, |
---|
| 308 | + enum dma_data_direction dir) |
---|
444 | 309 | { |
---|
445 | | - const struct dma_map_ops *ops = get_dma_ops(dev); |
---|
446 | | - |
---|
447 | | - BUG_ON(!valid_dma_direction(dir)); |
---|
448 | | - if (ops->sync_single_for_cpu) |
---|
449 | | - ops->sync_single_for_cpu(dev, addr + offset, size, dir); |
---|
450 | | - debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir); |
---|
| 310 | + return dma_sync_single_for_cpu(dev, addr + offset, size, dir); |
---|
451 | 311 | } |
---|
452 | 312 | |
---|
453 | 313 | static inline void dma_sync_single_range_for_device(struct device *dev, |
---|
454 | | - dma_addr_t addr, |
---|
455 | | - unsigned long offset, |
---|
456 | | - size_t size, |
---|
457 | | - enum dma_data_direction dir) |
---|
458 | | -{ |
---|
459 | | - const struct dma_map_ops *ops = get_dma_ops(dev); |
---|
460 | | - |
---|
461 | | - BUG_ON(!valid_dma_direction(dir)); |
---|
462 | | - if (ops->sync_single_for_device) |
---|
463 | | - ops->sync_single_for_device(dev, addr + offset, size, dir); |
---|
464 | | - debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir); |
---|
465 | | -} |
---|
466 | | - |
---|
467 | | -static inline void |
---|
468 | | -dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, |
---|
469 | | - int nelems, enum dma_data_direction dir) |
---|
470 | | -{ |
---|
471 | | - const struct dma_map_ops *ops = get_dma_ops(dev); |
---|
472 | | - |
---|
473 | | - BUG_ON(!valid_dma_direction(dir)); |
---|
474 | | - if (ops->sync_sg_for_cpu) |
---|
475 | | - ops->sync_sg_for_cpu(dev, sg, nelems, dir); |
---|
476 | | - debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir); |
---|
477 | | -} |
---|
478 | | - |
---|
479 | | -static inline void |
---|
480 | | -dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, |
---|
481 | | - int nelems, enum dma_data_direction dir) |
---|
482 | | -{ |
---|
483 | | - const struct dma_map_ops *ops = get_dma_ops(dev); |
---|
484 | | - |
---|
485 | | - BUG_ON(!valid_dma_direction(dir)); |
---|
486 | | - if (ops->sync_sg_for_device) |
---|
487 | | - ops->sync_sg_for_device(dev, sg, nelems, dir); |
---|
488 | | - debug_dma_sync_sg_for_device(dev, sg, nelems, dir); |
---|
489 | | - |
---|
490 | | -} |
---|
491 | | - |
---|
492 | | -#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0) |
---|
493 | | -#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0) |
---|
494 | | -#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0) |
---|
495 | | -#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0) |
---|
496 | | -#define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0) |
---|
497 | | -#define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0) |
---|
498 | | - |
---|
499 | | -static inline void |
---|
500 | | -dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
---|
| 314 | + dma_addr_t addr, unsigned long offset, size_t size, |
---|
501 | 315 | enum dma_data_direction dir) |
---|
502 | 316 | { |
---|
503 | | - const struct dma_map_ops *ops = get_dma_ops(dev); |
---|
504 | | - |
---|
505 | | - BUG_ON(!valid_dma_direction(dir)); |
---|
506 | | - if (ops->cache_sync) |
---|
507 | | - ops->cache_sync(dev, vaddr, size, dir); |
---|
| 317 | + return dma_sync_single_for_device(dev, addr + offset, size, dir); |
---|
508 | 318 | } |
---|
509 | | - |
---|
510 | | -extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, |
---|
511 | | - void *cpu_addr, dma_addr_t dma_addr, size_t size); |
---|
512 | | - |
---|
513 | | -void *dma_common_contiguous_remap(struct page *page, size_t size, |
---|
514 | | - unsigned long vm_flags, |
---|
515 | | - pgprot_t prot, const void *caller); |
---|
516 | | - |
---|
517 | | -void *dma_common_pages_remap(struct page **pages, size_t size, |
---|
518 | | - unsigned long vm_flags, pgprot_t prot, |
---|
519 | | - const void *caller); |
---|
520 | | -void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags, |
---|
521 | | - bool nowarn); |
---|
522 | | - |
---|
523 | | -/** |
---|
524 | | - * dma_mmap_attrs - map a coherent DMA allocation into user space |
---|
525 | | - * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
---|
526 | | - * @vma: vm_area_struct describing requested user mapping |
---|
527 | | - * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs |
---|
528 | | - * @handle: device-view address returned from dma_alloc_attrs |
---|
529 | | - * @size: size of memory originally requested in dma_alloc_attrs |
---|
530 | | - * @attrs: attributes of mapping properties requested in dma_alloc_attrs |
---|
531 | | - * |
---|
532 | | - * Map a coherent DMA buffer previously allocated by dma_alloc_attrs |
---|
533 | | - * into user space. The coherent DMA buffer must not be freed by the |
---|
534 | | - * driver until the user space mapping has been released. |
---|
535 | | - */ |
---|
536 | | -static inline int |
---|
537 | | -dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, |
---|
538 | | - dma_addr_t dma_addr, size_t size, unsigned long attrs) |
---|
539 | | -{ |
---|
540 | | - const struct dma_map_ops *ops = get_dma_ops(dev); |
---|
541 | | - BUG_ON(!ops); |
---|
542 | | - if (ops->mmap) |
---|
543 | | - return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); |
---|
544 | | - return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size); |
---|
545 | | -} |
---|
546 | | - |
---|
547 | | -#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0) |
---|
548 | | - |
---|
549 | | -int |
---|
550 | | -dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, |
---|
551 | | - void *cpu_addr, dma_addr_t dma_addr, size_t size); |
---|
552 | | - |
---|
553 | | -static inline int |
---|
554 | | -dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr, |
---|
555 | | - dma_addr_t dma_addr, size_t size, |
---|
556 | | - unsigned long attrs) |
---|
557 | | -{ |
---|
558 | | - const struct dma_map_ops *ops = get_dma_ops(dev); |
---|
559 | | - BUG_ON(!ops); |
---|
560 | | - if (ops->get_sgtable) |
---|
561 | | - return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, |
---|
562 | | - attrs); |
---|
563 | | - return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size); |
---|
564 | | -} |
---|
565 | | - |
---|
566 | | -#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0) |
---|
567 | | - |
---|
568 | | -#ifndef arch_dma_alloc_attrs |
---|
569 | | -#define arch_dma_alloc_attrs(dev) (true) |
---|
570 | | -#endif |
---|
571 | | - |
---|
572 | | -static inline void *dma_alloc_attrs(struct device *dev, size_t size, |
---|
573 | | - dma_addr_t *dma_handle, gfp_t flag, |
---|
574 | | - unsigned long attrs) |
---|
575 | | -{ |
---|
576 | | - const struct dma_map_ops *ops = get_dma_ops(dev); |
---|
577 | | - void *cpu_addr; |
---|
578 | | - |
---|
579 | | - BUG_ON(!ops); |
---|
580 | | - WARN_ON_ONCE(dev && !dev->coherent_dma_mask); |
---|
581 | | - |
---|
582 | | - if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr)) |
---|
583 | | - return cpu_addr; |
---|
584 | | - |
---|
585 | | - /* let the implementation decide on the zone to allocate from: */ |
---|
586 | | - flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM); |
---|
587 | | - |
---|
588 | | - if (!arch_dma_alloc_attrs(&dev)) |
---|
589 | | - return NULL; |
---|
590 | | - if (!ops->alloc) |
---|
591 | | - return NULL; |
---|
592 | | - |
---|
593 | | - cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); |
---|
594 | | - debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); |
---|
595 | | - return cpu_addr; |
---|
596 | | -} |
---|
597 | | - |
---|
598 | | -static inline void dma_free_attrs(struct device *dev, size_t size, |
---|
599 | | - void *cpu_addr, dma_addr_t dma_handle, |
---|
600 | | - unsigned long attrs) |
---|
601 | | -{ |
---|
602 | | - const struct dma_map_ops *ops = get_dma_ops(dev); |
---|
603 | | - |
---|
604 | | - BUG_ON(!ops); |
---|
605 | | - |
---|
606 | | - if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr)) |
---|
607 | | - return; |
---|
608 | | - /* |
---|
609 | | - * On non-coherent platforms which implement DMA-coherent buffers via |
---|
610 | | - * non-cacheable remaps, ops->free() may call vunmap(). Thus getting |
---|
611 | | - * this far in IRQ context is a) at risk of a BUG_ON() or trying to |
---|
612 | | - * sleep on some machines, and b) an indication that the driver is |
---|
613 | | - * probably misusing the coherent API anyway. |
---|
614 | | - */ |
---|
615 | | - WARN_ON(irqs_disabled()); |
---|
616 | | - |
---|
617 | | - if (!ops->free || !cpu_addr) |
---|
618 | | - return; |
---|
619 | | - |
---|
620 | | - debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); |
---|
621 | | - ops->free(dev, size, cpu_addr, dma_handle, attrs); |
---|
622 | | -} |
---|
623 | | - |
---|
624 | | -static inline void *dma_alloc_coherent(struct device *dev, size_t size, |
---|
625 | | - dma_addr_t *dma_handle, gfp_t flag) |
---|
626 | | -{ |
---|
627 | | - return dma_alloc_attrs(dev, size, dma_handle, flag, 0); |
---|
628 | | -} |
---|
629 | | - |
---|
630 | | -static inline void dma_free_coherent(struct device *dev, size_t size, |
---|
631 | | - void *cpu_addr, dma_addr_t dma_handle) |
---|
632 | | -{ |
---|
633 | | - return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0); |
---|
634 | | -} |
---|
635 | | - |
---|
636 | | -static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
---|
637 | | -{ |
---|
638 | | - const struct dma_map_ops *ops = get_dma_ops(dev); |
---|
639 | | - |
---|
640 | | - debug_dma_mapping_error(dev, dma_addr); |
---|
641 | | - if (ops->mapping_error) |
---|
642 | | - return ops->mapping_error(dev, dma_addr); |
---|
643 | | - return 0; |
---|
644 | | -} |
---|
645 | | - |
---|
646 | | -static inline void dma_check_mask(struct device *dev, u64 mask) |
---|
647 | | -{ |
---|
648 | | - if (sme_active() && (mask < (((u64)sme_get_me_mask() << 1) - 1))) |
---|
649 | | - dev_warn(dev, "SME is active, device will require DMA bounce buffers\n"); |
---|
650 | | -} |
---|
651 | | - |
---|
652 | | -static inline int dma_supported(struct device *dev, u64 mask) |
---|
653 | | -{ |
---|
654 | | - const struct dma_map_ops *ops = get_dma_ops(dev); |
---|
655 | | - |
---|
656 | | - if (!ops) |
---|
657 | | - return 0; |
---|
658 | | - if (!ops->dma_supported) |
---|
659 | | - return 1; |
---|
660 | | - return ops->dma_supported(dev, mask); |
---|
661 | | -} |
---|
662 | | - |
---|
663 | | -#ifndef HAVE_ARCH_DMA_SET_MASK |
---|
664 | | -static inline int dma_set_mask(struct device *dev, u64 mask) |
---|
665 | | -{ |
---|
666 | | - const struct dma_map_ops *ops = get_dma_ops(dev); |
---|
667 | | - |
---|
668 | | - if (ops->set_dma_mask) |
---|
669 | | - return ops->set_dma_mask(dev, mask); |
---|
670 | | - |
---|
671 | | - if (!dev->dma_mask || !dma_supported(dev, mask)) |
---|
672 | | - return -EIO; |
---|
673 | | - |
---|
674 | | - dma_check_mask(dev, mask); |
---|
675 | | - |
---|
676 | | - *dev->dma_mask = mask; |
---|
677 | | - return 0; |
---|
678 | | -} |
---|
679 | | -#endif |
---|
680 | 319 | |
---|
681 | 320 | /** |
---|
682 | 321 | * dma_map_sgtable - Map the given buffer for DMA |
---|
.. | .. |
---|
758 | 397 | dma_sync_sg_for_device(dev, sgt->sgl, sgt->orig_nents, dir); |
---|
759 | 398 | } |
---|
760 | 399 | |
---|
761 | | -static inline void *dma_remap(struct device *dev, void *cpu_addr, |
---|
762 | | - dma_addr_t dma_handle, size_t size, unsigned long attrs) |
---|
| 400 | +#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0) |
---|
| 401 | +#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0) |
---|
| 402 | +#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0) |
---|
| 403 | +#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0) |
---|
| 404 | +#define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0) |
---|
| 405 | +#define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0) |
---|
| 406 | +#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0) |
---|
| 407 | +#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0) |
---|
| 408 | + |
---|
| 409 | +static inline void *dma_alloc_coherent(struct device *dev, size_t size, |
---|
| 410 | + dma_addr_t *dma_handle, gfp_t gfp) |
---|
763 | 411 | { |
---|
764 | | - const struct dma_map_ops *ops = get_dma_ops(dev); |
---|
765 | 412 | |
---|
766 | | - if (!ops->remap) { |
---|
767 | | - WARN_ONCE(1, "Remap function not implemented for %pS\n", |
---|
768 | | - ops->remap); |
---|
769 | | - return NULL; |
---|
770 | | - } |
---|
771 | | - |
---|
772 | | - return ops->remap(dev, cpu_addr, dma_handle, size, attrs); |
---|
| 413 | + return dma_alloc_attrs(dev, size, dma_handle, gfp, |
---|
| 414 | + (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0); |
---|
773 | 415 | } |
---|
774 | 416 | |
---|
775 | | - |
---|
776 | | -static inline void dma_unremap(struct device *dev, void *remapped_addr, |
---|
777 | | - size_t size) |
---|
| 417 | +static inline void dma_free_coherent(struct device *dev, size_t size, |
---|
| 418 | + void *cpu_addr, dma_addr_t dma_handle) |
---|
778 | 419 | { |
---|
779 | | - const struct dma_map_ops *ops = get_dma_ops(dev); |
---|
780 | | - |
---|
781 | | - if (!ops->unremap) { |
---|
782 | | - WARN_ONCE(1, "unremap function not implemented for %pS\n", |
---|
783 | | - ops->unremap); |
---|
784 | | - return; |
---|
785 | | - } |
---|
786 | | - |
---|
787 | | - return ops->unremap(dev, remapped_addr, size); |
---|
| 420 | + return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0); |
---|
788 | 421 | } |
---|
789 | 422 | |
---|
790 | 423 | |
---|
791 | 424 | static inline u64 dma_get_mask(struct device *dev) |
---|
792 | 425 | { |
---|
793 | | - if (dev && dev->dma_mask && *dev->dma_mask) |
---|
| 426 | + if (dev->dma_mask && *dev->dma_mask) |
---|
794 | 427 | return *dev->dma_mask; |
---|
795 | 428 | return DMA_BIT_MASK(32); |
---|
796 | 429 | } |
---|
797 | | - |
---|
798 | | -#ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK |
---|
799 | | -int dma_set_coherent_mask(struct device *dev, u64 mask); |
---|
800 | | -#else |
---|
801 | | -static inline int dma_set_coherent_mask(struct device *dev, u64 mask) |
---|
802 | | -{ |
---|
803 | | - if (!dma_supported(dev, mask)) |
---|
804 | | - return -EIO; |
---|
805 | | - |
---|
806 | | - dma_check_mask(dev, mask); |
---|
807 | | - |
---|
808 | | - dev->coherent_dma_mask = mask; |
---|
809 | | - return 0; |
---|
810 | | -} |
---|
811 | | -#endif |
---|
812 | 430 | |
---|
813 | 431 | /* |
---|
814 | 432 | * Set both the DMA mask and the coherent DMA mask to the same thing. |
---|
.. | .. |
---|
834 | 452 | return dma_set_mask_and_coherent(dev, mask); |
---|
835 | 453 | } |
---|
836 | 454 | |
---|
837 | | -extern u64 dma_get_required_mask(struct device *dev); |
---|
838 | | - |
---|
839 | | -#ifndef arch_setup_dma_ops |
---|
840 | | -static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, |
---|
841 | | - u64 size, const struct iommu_ops *iommu, |
---|
842 | | - bool coherent) { } |
---|
843 | | -#endif |
---|
844 | | - |
---|
845 | | -#ifndef arch_teardown_dma_ops |
---|
846 | | -static inline void arch_teardown_dma_ops(struct device *dev) { } |
---|
847 | | -#endif |
---|
| 455 | +/** |
---|
| 456 | + * dma_addressing_limited - return if the device is addressing limited |
---|
| 457 | + * @dev: device to check |
---|
| 458 | + * |
---|
| 459 | + * Return %true if the devices DMA mask is too small to address all memory in |
---|
| 460 | + * the system, else %false. Lack of addressing bits is the prime reason for |
---|
| 461 | + * bounce buffering, but might not be the only one. |
---|
| 462 | + */ |
---|
| 463 | +static inline bool dma_addressing_limited(struct device *dev) |
---|
| 464 | +{ |
---|
| 465 | + return min_not_zero(dma_get_mask(dev), dev->bus_dma_limit) < |
---|
| 466 | + dma_get_required_mask(dev); |
---|
| 467 | +} |
---|
848 | 468 | |
---|
849 | 469 | static inline unsigned int dma_get_max_seg_size(struct device *dev) |
---|
850 | 470 | { |
---|
.. | .. |
---|
866 | 486 | { |
---|
867 | 487 | if (dev->dma_parms && dev->dma_parms->segment_boundary_mask) |
---|
868 | 488 | return dev->dma_parms->segment_boundary_mask; |
---|
869 | | - return DMA_BIT_MASK(32); |
---|
| 489 | + return ULONG_MAX; |
---|
| 490 | +} |
---|
| 491 | + |
---|
| 492 | +/** |
---|
| 493 | + * dma_get_seg_boundary_nr_pages - return the segment boundary in "page" units |
---|
| 494 | + * @dev: device to guery the boundary for |
---|
| 495 | + * @page_shift: ilog() of the IOMMU page size |
---|
| 496 | + * |
---|
| 497 | + * Return the segment boundary in IOMMU page units (which may be different from |
---|
| 498 | + * the CPU page size) for the passed in device. |
---|
| 499 | + * |
---|
| 500 | + * If @dev is NULL a boundary of U32_MAX is assumed, this case is just for |
---|
| 501 | + * non-DMA API callers. |
---|
| 502 | + */ |
---|
| 503 | +static inline unsigned long dma_get_seg_boundary_nr_pages(struct device *dev, |
---|
| 504 | + unsigned int page_shift) |
---|
| 505 | +{ |
---|
| 506 | + if (!dev) |
---|
| 507 | + return (U32_MAX >> page_shift) + 1; |
---|
| 508 | + return (dma_get_seg_boundary(dev) >> page_shift) + 1; |
---|
870 | 509 | } |
---|
871 | 510 | |
---|
872 | 511 | static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask) |
---|
.. | .. |
---|
878 | 517 | return -EIO; |
---|
879 | 518 | } |
---|
880 | 519 | |
---|
881 | | -#ifndef dma_max_pfn |
---|
882 | | -static inline unsigned long dma_max_pfn(struct device *dev) |
---|
| 520 | +static inline unsigned int dma_get_min_align_mask(struct device *dev) |
---|
883 | 521 | { |
---|
884 | | - return (*dev->dma_mask >> PAGE_SHIFT) + dev->dma_pfn_offset; |
---|
| 522 | + if (dev->dma_parms) |
---|
| 523 | + return dev->dma_parms->min_align_mask; |
---|
| 524 | + return 0; |
---|
885 | 525 | } |
---|
886 | | -#endif |
---|
887 | 526 | |
---|
888 | | -static inline void *dma_zalloc_coherent(struct device *dev, size_t size, |
---|
889 | | - dma_addr_t *dma_handle, gfp_t flag) |
---|
| 527 | +static inline int dma_set_min_align_mask(struct device *dev, |
---|
| 528 | + unsigned int min_align_mask) |
---|
890 | 529 | { |
---|
891 | | - void *ret = dma_alloc_coherent(dev, size, dma_handle, |
---|
892 | | - flag | __GFP_ZERO); |
---|
893 | | - return ret; |
---|
| 530 | + if (WARN_ON_ONCE(!dev->dma_parms)) |
---|
| 531 | + return -EIO; |
---|
| 532 | + dev->dma_parms->min_align_mask = min_align_mask; |
---|
| 533 | + return 0; |
---|
894 | 534 | } |
---|
895 | 535 | |
---|
896 | 536 | static inline int dma_get_cache_alignment(void) |
---|
.. | .. |
---|
901 | 541 | return 1; |
---|
902 | 542 | } |
---|
903 | 543 | |
---|
904 | | -/* flags for the coherent memory api */ |
---|
905 | | -#define DMA_MEMORY_EXCLUSIVE 0x01 |
---|
906 | | - |
---|
907 | | -#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT |
---|
908 | | -int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, |
---|
909 | | - dma_addr_t device_addr, size_t size, int flags); |
---|
910 | | -void dma_release_declared_memory(struct device *dev); |
---|
911 | | -void *dma_mark_declared_memory_occupied(struct device *dev, |
---|
912 | | - dma_addr_t device_addr, size_t size); |
---|
913 | | -dma_addr_t dma_get_device_base(struct device *dev, |
---|
914 | | - struct dma_coherent_mem *mem); |
---|
915 | | -unsigned long dma_get_size(struct dma_coherent_mem *mem); |
---|
916 | | - |
---|
917 | | -#else |
---|
918 | | -static inline int |
---|
919 | | -dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, |
---|
920 | | - dma_addr_t device_addr, size_t size, int flags) |
---|
921 | | -{ |
---|
922 | | - return -ENOSYS; |
---|
923 | | -} |
---|
924 | | - |
---|
925 | | -static inline void |
---|
926 | | -dma_release_declared_memory(struct device *dev) |
---|
927 | | -{ |
---|
928 | | -} |
---|
929 | | - |
---|
930 | | -static inline void * |
---|
931 | | -dma_mark_declared_memory_occupied(struct device *dev, |
---|
932 | | - dma_addr_t device_addr, size_t size) |
---|
933 | | -{ |
---|
934 | | - return ERR_PTR(-EBUSY); |
---|
935 | | -} |
---|
936 | | -static inline dma_addr_t |
---|
937 | | -dma_get_device_base(struct device *dev, struct dma_coherent_mem *mem) |
---|
938 | | -{ |
---|
939 | | - return 0; |
---|
940 | | -} |
---|
941 | | - |
---|
942 | | -static inline unsigned long dma_get_size(struct dma_coherent_mem *mem) |
---|
943 | | -{ |
---|
944 | | - return 0; |
---|
945 | | -} |
---|
946 | | - |
---|
947 | | -#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ |
---|
948 | | - |
---|
949 | | -#ifdef CONFIG_HAS_DMA |
---|
950 | | -int dma_configure(struct device *dev); |
---|
951 | | -void dma_deconfigure(struct device *dev); |
---|
952 | | -#else |
---|
953 | | -static inline int dma_configure(struct device *dev) |
---|
954 | | -{ |
---|
955 | | - return 0; |
---|
956 | | -} |
---|
957 | | - |
---|
958 | | -static inline void dma_deconfigure(struct device *dev) {} |
---|
959 | | -#endif |
---|
960 | | - |
---|
961 | | -/* |
---|
962 | | - * Managed DMA API |
---|
963 | | - */ |
---|
964 | | -#ifdef CONFIG_HAS_DMA |
---|
965 | | -extern void *dmam_alloc_coherent(struct device *dev, size_t size, |
---|
966 | | - dma_addr_t *dma_handle, gfp_t gfp); |
---|
967 | | -extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, |
---|
968 | | - dma_addr_t dma_handle); |
---|
969 | | -#else /* !CONFIG_HAS_DMA */ |
---|
970 | 544 | static inline void *dmam_alloc_coherent(struct device *dev, size_t size, |
---|
971 | | - dma_addr_t *dma_handle, gfp_t gfp) |
---|
972 | | -{ return NULL; } |
---|
973 | | -static inline void dmam_free_coherent(struct device *dev, size_t size, |
---|
974 | | - void *vaddr, dma_addr_t dma_handle) { } |
---|
975 | | -#endif /* !CONFIG_HAS_DMA */ |
---|
976 | | - |
---|
977 | | -extern void *dmam_alloc_attrs(struct device *dev, size_t size, |
---|
978 | | - dma_addr_t *dma_handle, gfp_t gfp, |
---|
979 | | - unsigned long attrs); |
---|
980 | | -#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT |
---|
981 | | -extern int dmam_declare_coherent_memory(struct device *dev, |
---|
982 | | - phys_addr_t phys_addr, |
---|
983 | | - dma_addr_t device_addr, size_t size, |
---|
984 | | - int flags); |
---|
985 | | -extern void dmam_release_declared_memory(struct device *dev); |
---|
986 | | -#else /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ |
---|
987 | | -static inline int dmam_declare_coherent_memory(struct device *dev, |
---|
988 | | - phys_addr_t phys_addr, dma_addr_t device_addr, |
---|
989 | | - size_t size, gfp_t gfp) |
---|
| 545 | + dma_addr_t *dma_handle, gfp_t gfp) |
---|
990 | 546 | { |
---|
991 | | - return 0; |
---|
| 547 | + return dmam_alloc_attrs(dev, size, dma_handle, gfp, |
---|
| 548 | + (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0); |
---|
992 | 549 | } |
---|
993 | | - |
---|
994 | | -static inline void dmam_release_declared_memory(struct device *dev) |
---|
995 | | -{ |
---|
996 | | -} |
---|
997 | | -#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ |
---|
998 | 550 | |
---|
999 | 551 | static inline void *dma_alloc_wc(struct device *dev, size_t size, |
---|
1000 | 552 | dma_addr_t *dma_addr, gfp_t gfp) |
---|
1001 | 553 | { |
---|
1002 | | - return dma_alloc_attrs(dev, size, dma_addr, gfp, |
---|
1003 | | - DMA_ATTR_WRITE_COMBINE); |
---|
| 554 | + unsigned long attrs = DMA_ATTR_WRITE_COMBINE; |
---|
| 555 | + |
---|
| 556 | + if (gfp & __GFP_NOWARN) |
---|
| 557 | + attrs |= DMA_ATTR_NO_WARN; |
---|
| 558 | + |
---|
| 559 | + return dma_alloc_attrs(dev, size, dma_addr, gfp, attrs); |
---|
1004 | 560 | } |
---|
1005 | | -#ifndef dma_alloc_writecombine |
---|
1006 | | -#define dma_alloc_writecombine dma_alloc_wc |
---|
1007 | | -#endif |
---|
1008 | 561 | |
---|
1009 | 562 | static inline void dma_free_wc(struct device *dev, size_t size, |
---|
1010 | 563 | void *cpu_addr, dma_addr_t dma_addr) |
---|
.. | .. |
---|
1012 | 565 | return dma_free_attrs(dev, size, cpu_addr, dma_addr, |
---|
1013 | 566 | DMA_ATTR_WRITE_COMBINE); |
---|
1014 | 567 | } |
---|
1015 | | -#ifndef dma_free_writecombine |
---|
1016 | | -#define dma_free_writecombine dma_free_wc |
---|
1017 | | -#endif |
---|
1018 | 568 | |
---|
1019 | 569 | static inline int dma_mmap_wc(struct device *dev, |
---|
1020 | 570 | struct vm_area_struct *vma, |
---|
.. | .. |
---|
1024 | 574 | return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, |
---|
1025 | 575 | DMA_ATTR_WRITE_COMBINE); |
---|
1026 | 576 | } |
---|
1027 | | -#ifndef dma_mmap_writecombine |
---|
1028 | | -#define dma_mmap_writecombine dma_mmap_wc |
---|
1029 | | -#endif |
---|
1030 | 577 | |
---|
1031 | 578 | #ifdef CONFIG_NEED_DMA_MAP_STATE |
---|
1032 | 579 | #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME |
---|
.. | .. |
---|
1044 | 591 | #define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0) |
---|
1045 | 592 | #endif |
---|
1046 | 593 | |
---|
1047 | | -#endif |
---|
| 594 | +/* |
---|
| 595 | + * Legacy interface to set up the dma offset map. Drivers really should not |
---|
| 596 | + * actually use it, but we have a few legacy cases left. |
---|
| 597 | + */ |
---|
| 598 | +int dma_direct_set_offset(struct device *dev, phys_addr_t cpu_start, |
---|
| 599 | + dma_addr_t dma_start, u64 size); |
---|
| 600 | + |
---|
| 601 | +extern const struct dma_map_ops dma_virt_ops; |
---|
| 602 | + |
---|
| 603 | +#endif /* _LINUX_DMA_MAPPING_H */ |
---|