| .. | .. |
|---|
| 3 | 3 | * |
|---|
| 4 | 4 | * Copyright (C) 2010 Samsung Electronics |
|---|
| 5 | 5 | * |
|---|
| 6 | | - * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com> |
|---|
| 6 | + * Author: Andrzej Pietrasiewicz <andrzejtp2010@gmail.com> |
|---|
| 7 | 7 | * |
|---|
| 8 | 8 | * This program is free software; you can redistribute it and/or modify |
|---|
| 9 | 9 | * it under the terms of the GNU General Public License as published by |
|---|
| .. | .. |
|---|
| 67 | 67 | int i; |
|---|
| 68 | 68 | |
|---|
| 69 | 69 | order = get_order(size); |
|---|
| 70 | | - /* Dont over allocate*/ |
|---|
| 70 | + /* Don't over allocate*/ |
|---|
| 71 | 71 | if ((PAGE_SIZE << order) > size) |
|---|
| 72 | 72 | order--; |
|---|
| 73 | 73 | |
|---|
| .. | .. |
|---|
| 120 | 120 | buf->num_pages = size >> PAGE_SHIFT; |
|---|
| 121 | 121 | buf->dma_sgt = &buf->sg_table; |
|---|
| 122 | 122 | |
|---|
| 123 | + /* |
|---|
| 124 | + * NOTE: dma-sg allocates memory using the page allocator directly, so |
|---|
| 125 | + * there is no memory consistency guarantee, hence dma-sg ignores DMA |
|---|
| 126 | + * attributes passed from the upper layer. |
|---|
| 127 | + */ |
|---|
| 123 | 128 | buf->pages = kvmalloc_array(buf->num_pages, sizeof(struct page *), |
|---|
| 124 | 129 | GFP_KERNEL | __GFP_ZERO); |
|---|
| 125 | 130 | if (!buf->pages) |
|---|
| .. | .. |
|---|
| 142 | 147 | * No need to sync to the device, this will happen later when the |
|---|
| 143 | 148 | * prepare() memop is called. |
|---|
| 144 | 149 | */ |
|---|
| 145 | | - sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, |
|---|
| 146 | | - buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC); |
|---|
| 147 | | - if (!sgt->nents) |
|---|
| 150 | + if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir, |
|---|
| 151 | + DMA_ATTR_SKIP_CPU_SYNC)) |
|---|
| 148 | 152 | goto fail_map; |
|---|
| 149 | 153 | |
|---|
| 150 | 154 | buf->handler.refcount = &buf->refcount; |
|---|
| .. | .. |
|---|
| 180 | 184 | if (refcount_dec_and_test(&buf->refcount)) { |
|---|
| 181 | 185 | dprintk(1, "%s: Freeing buffer of %d pages\n", __func__, |
|---|
| 182 | 186 | buf->num_pages); |
|---|
| 183 | | - dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, |
|---|
| 184 | | - buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC); |
|---|
| 187 | + dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir, |
|---|
| 188 | + DMA_ATTR_SKIP_CPU_SYNC); |
|---|
| 185 | 189 | if (buf->vaddr) |
|---|
| 186 | 190 | vm_unmap_ram(buf->vaddr, buf->num_pages); |
|---|
| 187 | 191 | sg_free_table(buf->dma_sgt); |
|---|
| .. | .. |
|---|
| 198 | 202 | struct vb2_dma_sg_buf *buf = buf_priv; |
|---|
| 199 | 203 | struct sg_table *sgt = buf->dma_sgt; |
|---|
| 200 | 204 | |
|---|
| 201 | | - /* DMABUF exporter will flush the cache for us */ |
|---|
| 202 | | - if (buf->db_attach) |
|---|
| 203 | | - return; |
|---|
| 204 | | - |
|---|
| 205 | | - dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents, |
|---|
| 206 | | - buf->dma_dir); |
|---|
| 205 | + dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir); |
|---|
| 207 | 206 | } |
|---|
| 208 | 207 | |
|---|
| 209 | 208 | static void vb2_dma_sg_finish(void *buf_priv) |
|---|
| .. | .. |
|---|
| 211 | 210 | struct vb2_dma_sg_buf *buf = buf_priv; |
|---|
| 212 | 211 | struct sg_table *sgt = buf->dma_sgt; |
|---|
| 213 | 212 | |
|---|
| 214 | | - /* DMABUF exporter will flush the cache for us */ |
|---|
| 215 | | - if (buf->db_attach) |
|---|
| 216 | | - return; |
|---|
| 217 | | - |
|---|
| 218 | | - dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir); |
|---|
| 213 | + dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir); |
|---|
| 219 | 214 | } |
|---|
| 220 | 215 | |
|---|
| 221 | 216 | static void *vb2_dma_sg_get_userptr(struct device *dev, unsigned long vaddr, |
|---|
| .. | .. |
|---|
| 239 | 234 | buf->offset = vaddr & ~PAGE_MASK; |
|---|
| 240 | 235 | buf->size = size; |
|---|
| 241 | 236 | buf->dma_sgt = &buf->sg_table; |
|---|
| 242 | | - vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE || |
|---|
| 243 | | - dma_dir == DMA_BIDIRECTIONAL); |
|---|
| 237 | + vec = vb2_create_framevec(vaddr, size); |
|---|
| 244 | 238 | if (IS_ERR(vec)) |
|---|
| 245 | 239 | goto userptr_fail_pfnvec; |
|---|
| 246 | 240 | buf->vec = vec; |
|---|
| .. | .. |
|---|
| 259 | 253 | * No need to sync to the device, this will happen later when the |
|---|
| 260 | 254 | * prepare() memop is called. |
|---|
| 261 | 255 | */ |
|---|
| 262 | | - sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, |
|---|
| 263 | | - buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC); |
|---|
| 264 | | - if (!sgt->nents) |
|---|
| 256 | + if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir, |
|---|
| 257 | + DMA_ATTR_SKIP_CPU_SYNC)) |
|---|
| 265 | 258 | goto userptr_fail_map; |
|---|
| 266 | 259 | |
|---|
| 267 | 260 | return buf; |
|---|
| .. | .. |
|---|
| 287 | 280 | |
|---|
| 288 | 281 | dprintk(1, "%s: Releasing userspace buffer of %d pages\n", |
|---|
| 289 | 282 | __func__, buf->num_pages); |
|---|
| 290 | | - dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir, |
|---|
| 291 | | - DMA_ATTR_SKIP_CPU_SYNC); |
|---|
| 283 | + dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC); |
|---|
| 292 | 284 | if (buf->vaddr) |
|---|
| 293 | 285 | vm_unmap_ram(buf->vaddr, buf->num_pages); |
|---|
| 294 | 286 | sg_free_table(buf->dma_sgt); |
|---|
| .. | .. |
|---|
| 310 | 302 | if (buf->db_attach) |
|---|
| 311 | 303 | buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf); |
|---|
| 312 | 304 | else |
|---|
| 313 | | - buf->vaddr = vm_map_ram(buf->pages, |
|---|
| 314 | | - buf->num_pages, -1, PAGE_KERNEL); |
|---|
| 305 | + buf->vaddr = vm_map_ram(buf->pages, buf->num_pages, -1); |
|---|
| 315 | 306 | } |
|---|
| 316 | 307 | |
|---|
| 317 | 308 | /* add offset in case userptr is not page-aligned */ |
|---|
| .. | .. |
|---|
| 328 | 319 | static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma) |
|---|
| 329 | 320 | { |
|---|
| 330 | 321 | struct vb2_dma_sg_buf *buf = buf_priv; |
|---|
| 331 | | - unsigned long uaddr = vma->vm_start; |
|---|
| 332 | | - unsigned long usize = vma->vm_end - vma->vm_start; |
|---|
| 333 | | - int i = 0; |
|---|
| 322 | + int err; |
|---|
| 334 | 323 | |
|---|
| 335 | 324 | if (!buf) { |
|---|
| 336 | 325 | printk(KERN_ERR "No memory to map\n"); |
|---|
| 337 | 326 | return -EINVAL; |
|---|
| 338 | 327 | } |
|---|
| 339 | 328 | |
|---|
| 340 | | - do { |
|---|
| 341 | | - int ret; |
|---|
| 342 | | - |
|---|
| 343 | | - ret = vm_insert_page(vma, uaddr, buf->pages[i++]); |
|---|
| 344 | | - if (ret) { |
|---|
| 345 | | - printk(KERN_ERR "Remapping memory, error: %d\n", ret); |
|---|
| 346 | | - return ret; |
|---|
| 347 | | - } |
|---|
| 348 | | - |
|---|
| 349 | | - uaddr += PAGE_SIZE; |
|---|
| 350 | | - usize -= PAGE_SIZE; |
|---|
| 351 | | - } while (usize > 0); |
|---|
| 352 | | - |
|---|
| 329 | + err = vm_map_pages(vma, buf->pages, buf->num_pages); |
|---|
| 330 | + if (err) { |
|---|
| 331 | + printk(KERN_ERR "Remapping memory, error: %d\n", err); |
|---|
| 332 | + return err; |
|---|
| 333 | + } |
|---|
| 353 | 334 | |
|---|
| 354 | 335 | /* |
|---|
| 355 | 336 | * Use common vm_area operations to track buffer refcount. |
|---|
| .. | .. |
|---|
| 422 | 403 | |
|---|
| 423 | 404 | /* release the scatterlist cache */ |
|---|
| 424 | 405 | if (attach->dma_dir != DMA_NONE) |
|---|
| 425 | | - dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, |
|---|
| 426 | | - attach->dma_dir); |
|---|
| 406 | + dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0); |
|---|
| 427 | 407 | sg_free_table(sgt); |
|---|
| 428 | 408 | kfree(attach); |
|---|
| 429 | 409 | db_attach->priv = NULL; |
|---|
| .. | .. |
|---|
| 448 | 428 | |
|---|
| 449 | 429 | /* release any previous cache */ |
|---|
| 450 | 430 | if (attach->dma_dir != DMA_NONE) { |
|---|
| 451 | | - dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, |
|---|
| 452 | | - attach->dma_dir); |
|---|
| 431 | + dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0); |
|---|
| 453 | 432 | attach->dma_dir = DMA_NONE; |
|---|
| 454 | 433 | } |
|---|
| 455 | 434 | |
|---|
| 456 | 435 | /* mapping to the client with new direction */ |
|---|
| 457 | | - sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, |
|---|
| 458 | | - dma_dir); |
|---|
| 459 | | - if (!sgt->nents) { |
|---|
| 436 | + if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, 0)) { |
|---|
| 460 | 437 | pr_err("failed to map scatterlist\n"); |
|---|
| 461 | 438 | mutex_unlock(lock); |
|---|
| 462 | 439 | return ERR_PTR(-EIO); |
|---|
| .. | .. |
|---|
| 481 | 458 | vb2_dma_sg_put(dbuf->priv); |
|---|
| 482 | 459 | } |
|---|
| 483 | 460 | |
|---|
| 484 | | -static void *vb2_dma_sg_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum) |
|---|
| 461 | +static int |
|---|
| 462 | +vb2_dma_sg_dmabuf_ops_begin_cpu_access(struct dma_buf *dbuf, |
|---|
| 463 | + enum dma_data_direction direction) |
|---|
| 485 | 464 | { |
|---|
| 486 | 465 | struct vb2_dma_sg_buf *buf = dbuf->priv; |
|---|
| 466 | + struct sg_table *sgt = buf->dma_sgt; |
|---|
| 487 | 467 | |
|---|
| 488 | | - return buf->vaddr ? buf->vaddr + pgnum * PAGE_SIZE : NULL; |
|---|
| 468 | + dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir); |
|---|
| 469 | + return 0; |
|---|
| 470 | +} |
|---|
| 471 | + |
|---|
| 472 | +static int |
|---|
| 473 | +vb2_dma_sg_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf, |
|---|
| 474 | + enum dma_data_direction direction) |
|---|
| 475 | +{ |
|---|
| 476 | + struct vb2_dma_sg_buf *buf = dbuf->priv; |
|---|
| 477 | + struct sg_table *sgt = buf->dma_sgt; |
|---|
| 478 | + |
|---|
| 479 | + dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir); |
|---|
| 480 | + return 0; |
|---|
| 489 | 481 | } |
|---|
| 490 | 482 | |
|---|
| 491 | 483 | static void *vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf) |
|---|
| .. | .. |
|---|
| 506 | 498 | .detach = vb2_dma_sg_dmabuf_ops_detach, |
|---|
| 507 | 499 | .map_dma_buf = vb2_dma_sg_dmabuf_ops_map, |
|---|
| 508 | 500 | .unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap, |
|---|
| 509 | | - .map = vb2_dma_sg_dmabuf_ops_kmap, |
|---|
| 501 | + .begin_cpu_access = vb2_dma_sg_dmabuf_ops_begin_cpu_access, |
|---|
| 502 | + .end_cpu_access = vb2_dma_sg_dmabuf_ops_end_cpu_access, |
|---|
| 510 | 503 | .vmap = vb2_dma_sg_dmabuf_ops_vmap, |
|---|
| 511 | 504 | .mmap = vb2_dma_sg_dmabuf_ops_mmap, |
|---|
| 512 | 505 | .release = vb2_dma_sg_dmabuf_ops_release, |
|---|