| .. | .. |
|---|
| 247 | 247 | |
|---|
| 248 | 248 | if (sgt) { |
|---|
| 249 | 249 | if (gntdev_dmabuf_attach->dir != DMA_NONE) |
|---|
| 250 | | - dma_unmap_sg_attrs(attach->dev, sgt->sgl, |
|---|
| 251 | | - sgt->nents, |
|---|
| 252 | | - gntdev_dmabuf_attach->dir, |
|---|
| 253 | | - DMA_ATTR_SKIP_CPU_SYNC); |
|---|
| 250 | + dma_unmap_sgtable(attach->dev, sgt, |
|---|
| 251 | + gntdev_dmabuf_attach->dir, |
|---|
| 252 | + DMA_ATTR_SKIP_CPU_SYNC); |
|---|
| 254 | 253 | sg_free_table(sgt); |
|---|
| 255 | 254 | } |
|---|
| 256 | 255 | |
|---|
| .. | .. |
|---|
| 288 | 287 | sgt = dmabuf_pages_to_sgt(gntdev_dmabuf->pages, |
|---|
| 289 | 288 | gntdev_dmabuf->nr_pages); |
|---|
| 290 | 289 | if (!IS_ERR(sgt)) { |
|---|
| 291 | | - if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir, |
|---|
| 292 | | - DMA_ATTR_SKIP_CPU_SYNC)) { |
|---|
| 290 | + if (dma_map_sgtable(attach->dev, sgt, dir, |
|---|
| 291 | + DMA_ATTR_SKIP_CPU_SYNC)) { |
|---|
| 293 | 292 | sg_free_table(sgt); |
|---|
| 294 | 293 | kfree(sgt); |
|---|
| 295 | 294 | sgt = ERR_PTR(-ENOMEM); |
|---|
| .. | .. |
|---|
| 342 | 341 | mutex_unlock(&priv->lock); |
|---|
| 343 | 342 | } |
|---|
| 344 | 343 | |
|---|
| 345 | | -static void *dmabuf_exp_ops_kmap(struct dma_buf *dma_buf, |
|---|
| 346 | | - unsigned long page_num) |
|---|
| 347 | | -{ |
|---|
| 348 | | - /* Not implemented. */ |
|---|
| 349 | | - return NULL; |
|---|
| 350 | | -} |
|---|
| 351 | | - |
|---|
| 352 | | -static void dmabuf_exp_ops_kunmap(struct dma_buf *dma_buf, |
|---|
| 353 | | - unsigned long page_num, void *addr) |
|---|
| 354 | | -{ |
|---|
| 355 | | - /* Not implemented. */ |
|---|
| 356 | | -} |
|---|
| 357 | | - |
|---|
| 358 | | -static int dmabuf_exp_ops_mmap(struct dma_buf *dma_buf, |
|---|
| 359 | | - struct vm_area_struct *vma) |
|---|
| 360 | | -{ |
|---|
| 361 | | - /* Not implemented. */ |
|---|
| 362 | | - return 0; |
|---|
| 363 | | -} |
|---|
| 364 | | - |
|---|
| 365 | 344 | static const struct dma_buf_ops dmabuf_exp_ops = { |
|---|
| 366 | 345 | .attach = dmabuf_exp_ops_attach, |
|---|
| 367 | 346 | .detach = dmabuf_exp_ops_detach, |
|---|
| 368 | 347 | .map_dma_buf = dmabuf_exp_ops_map_dma_buf, |
|---|
| 369 | 348 | .unmap_dma_buf = dmabuf_exp_ops_unmap_dma_buf, |
|---|
| 370 | 349 | .release = dmabuf_exp_ops_release, |
|---|
| 371 | | - .map = dmabuf_exp_ops_kmap, |
|---|
| 372 | | - .unmap = dmabuf_exp_ops_kunmap, |
|---|
| 373 | | - .mmap = dmabuf_exp_ops_mmap, |
|---|
| 374 | 350 | }; |
|---|
| 375 | 351 | |
|---|
| 376 | 352 | struct gntdev_dmabuf_export_args { |
|---|
| .. | .. |
|---|
| 446 | 422 | { |
|---|
| 447 | 423 | struct gntdev_grant_map *map; |
|---|
| 448 | 424 | |
|---|
| 449 | | - if (unlikely(count <= 0)) |
|---|
| 425 | + if (unlikely(gntdev_test_page_count(count))) |
|---|
| 450 | 426 | return ERR_PTR(-EINVAL); |
|---|
| 451 | 427 | |
|---|
| 452 | 428 | if ((dmabuf_flags & GNTDEV_DMA_FLAG_WC) && |
|---|
| .. | .. |
|---|
| 459 | 435 | if (!map) |
|---|
| 460 | 436 | return ERR_PTR(-ENOMEM); |
|---|
| 461 | 437 | |
|---|
| 462 | | - if (unlikely(gntdev_account_mapped_pages(count))) { |
|---|
| 463 | | - pr_debug("can't map %d pages: over limit\n", count); |
|---|
| 464 | | - gntdev_put_map(NULL, map); |
|---|
| 465 | | - return ERR_PTR(-ENOMEM); |
|---|
| 466 | | - } |
|---|
| 467 | 438 | return map; |
|---|
| 468 | 439 | } |
|---|
| 469 | 440 | |
|---|
| .. | .. |
|---|
| 661 | 632 | |
|---|
| 662 | 633 | /* Now convert sgt to array of pages and check for page validity. */ |
|---|
| 663 | 634 | i = 0; |
|---|
| 664 | | - for_each_sg_page(sgt->sgl, &sg_iter, sgt->nents, 0) { |
|---|
| 635 | + for_each_sgtable_page(sgt, &sg_iter, 0) { |
|---|
| 665 | 636 | struct page *page = sg_page_iter_page(&sg_iter); |
|---|
| 666 | 637 | /* |
|---|
| 667 | 638 | * Check if page is valid: this can happen if we are given |
|---|
| .. | .. |
|---|
| 753 | 724 | return 0; |
|---|
| 754 | 725 | } |
|---|
| 755 | 726 | |
|---|
| 727 | +static void dmabuf_imp_release_all(struct gntdev_dmabuf_priv *priv) |
|---|
| 728 | +{ |
|---|
| 729 | + struct gntdev_dmabuf *q, *gntdev_dmabuf; |
|---|
| 730 | + |
|---|
| 731 | + list_for_each_entry_safe(gntdev_dmabuf, q, &priv->imp_list, next) |
|---|
| 732 | + dmabuf_imp_release(priv, gntdev_dmabuf->fd); |
|---|
| 733 | +} |
|---|
| 734 | + |
|---|
| 756 | 735 | /* DMA buffer IOCTL support. */ |
|---|
| 757 | 736 | |
|---|
| 758 | 737 | long gntdev_ioctl_dmabuf_exp_from_refs(struct gntdev_priv *priv, int use_ptemod, |
|---|
| .. | .. |
|---|
| 771 | 750 | if (copy_from_user(&op, u, sizeof(op)) != 0) |
|---|
| 772 | 751 | return -EFAULT; |
|---|
| 773 | 752 | |
|---|
| 774 | | - if (unlikely(op.count <= 0)) |
|---|
| 753 | + if (unlikely(gntdev_test_page_count(op.count))) |
|---|
| 775 | 754 | return -EINVAL; |
|---|
| 776 | 755 | |
|---|
| 777 | 756 | refs = kcalloc(op.count, sizeof(*refs), GFP_KERNEL); |
|---|
| .. | .. |
|---|
| 818 | 797 | if (copy_from_user(&op, u, sizeof(op)) != 0) |
|---|
| 819 | 798 | return -EFAULT; |
|---|
| 820 | 799 | |
|---|
| 821 | | - if (unlikely(op.count <= 0)) |
|---|
| 800 | + if (unlikely(gntdev_test_page_count(op.count))) |
|---|
| 822 | 801 | return -EINVAL; |
|---|
| 823 | 802 | |
|---|
| 824 | 803 | gntdev_dmabuf = dmabuf_imp_to_refs(priv->dmabuf_priv, |
|---|
| .. | .. |
|---|
| 870 | 849 | |
|---|
| 871 | 850 | void gntdev_dmabuf_fini(struct gntdev_dmabuf_priv *priv) |
|---|
| 872 | 851 | { |
|---|
| 852 | + dmabuf_imp_release_all(priv); |
|---|
| 873 | 853 | kfree(priv); |
|---|
| 874 | 854 | } |
|---|