.. | .. |
---|
46 | 46 | |
---|
47 | 47 | buf->size = size; |
---|
48 | 48 | buf->vaddr = vmalloc_user(buf->size); |
---|
49 | | - buf->dma_dir = dma_dir; |
---|
50 | | - buf->handler.refcount = &buf->refcount; |
---|
51 | | - buf->handler.put = vb2_vmalloc_put; |
---|
52 | | - buf->handler.arg = buf; |
---|
53 | | - |
---|
54 | 49 | if (!buf->vaddr) { |
---|
55 | 50 | pr_debug("vmalloc of size %ld failed\n", buf->size); |
---|
56 | 51 | kfree(buf); |
---|
57 | 52 | return ERR_PTR(-ENOMEM); |
---|
58 | 53 | } |
---|
| 54 | + |
---|
| 55 | + buf->dma_dir = dma_dir; |
---|
| 56 | + buf->handler.refcount = &buf->refcount; |
---|
| 57 | + buf->handler.put = vb2_vmalloc_put; |
---|
| 58 | + buf->handler.arg = buf; |
---|
59 | 59 | |
---|
60 | 60 | refcount_set(&buf->refcount, 1); |
---|
61 | 61 | return buf; |
---|
.. | .. |
---|
87 | 87 | buf->dma_dir = dma_dir; |
---|
88 | 88 | offset = vaddr & ~PAGE_MASK; |
---|
89 | 89 | buf->size = size; |
---|
90 | | - vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE || |
---|
91 | | - dma_dir == DMA_BIDIRECTIONAL); |
---|
| 90 | + vec = vb2_create_framevec(vaddr, size); |
---|
92 | 91 | if (IS_ERR(vec)) { |
---|
93 | 92 | ret = PTR_ERR(vec); |
---|
94 | 93 | goto fail_pfnvec_create; |
---|
.. | .. |
---|
106 | 105 | if (nums[i-1] + 1 != nums[i]) |
---|
107 | 106 | goto fail_map; |
---|
108 | 107 | buf->vaddr = (__force void *) |
---|
109 | | - ioremap_nocache(__pfn_to_phys(nums[0]), size + offset); |
---|
| 108 | + ioremap(__pfn_to_phys(nums[0]), size + offset); |
---|
110 | 109 | } else { |
---|
111 | | - buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1, |
---|
112 | | - PAGE_KERNEL); |
---|
| 110 | + buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1); |
---|
113 | 111 | } |
---|
114 | 112 | |
---|
115 | 113 | if (!buf->vaddr) |
---|
.. | .. |
---|
231 | 229 | kfree(attach); |
---|
232 | 230 | return ret; |
---|
233 | 231 | } |
---|
234 | | - for_each_sg(sgt->sgl, sg, sgt->nents, i) { |
---|
| 232 | + for_each_sgtable_sg(sgt, sg, i) { |
---|
235 | 233 | struct page *page = vmalloc_to_page(vaddr); |
---|
236 | 234 | |
---|
237 | 235 | if (!page) { |
---|
.. | .. |
---|
261 | 259 | |
---|
262 | 260 | /* release the scatterlist cache */ |
---|
263 | 261 | if (attach->dma_dir != DMA_NONE) |
---|
264 | | - dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, |
---|
265 | | - attach->dma_dir); |
---|
| 262 | + dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0); |
---|
266 | 263 | sg_free_table(sgt); |
---|
267 | 264 | kfree(attach); |
---|
268 | 265 | db_attach->priv = NULL; |
---|
.. | .. |
---|
287 | 284 | |
---|
288 | 285 | /* release any previous cache */ |
---|
289 | 286 | if (attach->dma_dir != DMA_NONE) { |
---|
290 | | - dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, |
---|
291 | | - attach->dma_dir); |
---|
| 287 | + dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0); |
---|
292 | 288 | attach->dma_dir = DMA_NONE; |
---|
293 | 289 | } |
---|
294 | 290 | |
---|
295 | 291 | /* mapping to the client with new direction */ |
---|
296 | | - sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, |
---|
297 | | - dma_dir); |
---|
298 | | - if (!sgt->nents) { |
---|
| 292 | + if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, 0)) { |
---|
299 | 293 | pr_err("failed to map scatterlist\n"); |
---|
300 | 294 | mutex_unlock(lock); |
---|
301 | 295 | return ERR_PTR(-EIO); |
---|
.. | .. |
---|
320 | 314 | vb2_vmalloc_put(dbuf->priv); |
---|
321 | 315 | } |
---|
322 | 316 | |
---|
323 | | -static void *vb2_vmalloc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum) |
---|
324 | | -{ |
---|
325 | | - struct vb2_vmalloc_buf *buf = dbuf->priv; |
---|
326 | | - |
---|
327 | | - return buf->vaddr + pgnum * PAGE_SIZE; |
---|
328 | | -} |
---|
329 | | - |
---|
330 | 317 | static void *vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf) |
---|
331 | 318 | { |
---|
332 | 319 | struct vb2_vmalloc_buf *buf = dbuf->priv; |
---|
.. | .. |
---|
345 | 332 | .detach = vb2_vmalloc_dmabuf_ops_detach, |
---|
346 | 333 | .map_dma_buf = vb2_vmalloc_dmabuf_ops_map, |
---|
347 | 334 | .unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap, |
---|
348 | | - .map = vb2_vmalloc_dmabuf_ops_kmap, |
---|
349 | 335 | .vmap = vb2_vmalloc_dmabuf_ops_vmap, |
---|
350 | 336 | .mmap = vb2_vmalloc_dmabuf_ops_mmap, |
---|
351 | 337 | .release = vb2_vmalloc_dmabuf_ops_release, |
---|