hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/drivers/xen/gntdev-dmabuf.c
....@@ -247,10 +247,9 @@
247247
248248 if (sgt) {
249249 if (gntdev_dmabuf_attach->dir != DMA_NONE)
250
- dma_unmap_sg_attrs(attach->dev, sgt->sgl,
251
- sgt->nents,
252
- gntdev_dmabuf_attach->dir,
253
- DMA_ATTR_SKIP_CPU_SYNC);
250
+ dma_unmap_sgtable(attach->dev, sgt,
251
+ gntdev_dmabuf_attach->dir,
252
+ DMA_ATTR_SKIP_CPU_SYNC);
254253 sg_free_table(sgt);
255254 }
256255
....@@ -288,8 +287,8 @@
288287 sgt = dmabuf_pages_to_sgt(gntdev_dmabuf->pages,
289288 gntdev_dmabuf->nr_pages);
290289 if (!IS_ERR(sgt)) {
291
- if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
292
- DMA_ATTR_SKIP_CPU_SYNC)) {
290
+ if (dma_map_sgtable(attach->dev, sgt, dir,
291
+ DMA_ATTR_SKIP_CPU_SYNC)) {
293292 sg_free_table(sgt);
294293 kfree(sgt);
295294 sgt = ERR_PTR(-ENOMEM);
....@@ -342,35 +341,12 @@
342341 mutex_unlock(&priv->lock);
343342 }
344343
345
-static void *dmabuf_exp_ops_kmap(struct dma_buf *dma_buf,
346
- unsigned long page_num)
347
-{
348
- /* Not implemented. */
349
- return NULL;
350
-}
351
-
352
-static void dmabuf_exp_ops_kunmap(struct dma_buf *dma_buf,
353
- unsigned long page_num, void *addr)
354
-{
355
- /* Not implemented. */
356
-}
357
-
358
-static int dmabuf_exp_ops_mmap(struct dma_buf *dma_buf,
359
- struct vm_area_struct *vma)
360
-{
361
- /* Not implemented. */
362
- return 0;
363
-}
364
-
365344 static const struct dma_buf_ops dmabuf_exp_ops = {
366345 .attach = dmabuf_exp_ops_attach,
367346 .detach = dmabuf_exp_ops_detach,
368347 .map_dma_buf = dmabuf_exp_ops_map_dma_buf,
369348 .unmap_dma_buf = dmabuf_exp_ops_unmap_dma_buf,
370349 .release = dmabuf_exp_ops_release,
371
- .map = dmabuf_exp_ops_kmap,
372
- .unmap = dmabuf_exp_ops_kunmap,
373
- .mmap = dmabuf_exp_ops_mmap,
374350 };
375351
376352 struct gntdev_dmabuf_export_args {
....@@ -446,7 +422,7 @@
446422 {
447423 struct gntdev_grant_map *map;
448424
449
- if (unlikely(count <= 0))
425
+ if (unlikely(gntdev_test_page_count(count)))
450426 return ERR_PTR(-EINVAL);
451427
452428 if ((dmabuf_flags & GNTDEV_DMA_FLAG_WC) &&
....@@ -459,11 +435,6 @@
459435 if (!map)
460436 return ERR_PTR(-ENOMEM);
461437
462
- if (unlikely(gntdev_account_mapped_pages(count))) {
463
- pr_debug("can't map %d pages: over limit\n", count);
464
- gntdev_put_map(NULL, map);
465
- return ERR_PTR(-ENOMEM);
466
- }
467438 return map;
468439 }
469440
....@@ -661,7 +632,7 @@
661632
662633 /* Now convert sgt to array of pages and check for page validity. */
663634 i = 0;
664
- for_each_sg_page(sgt->sgl, &sg_iter, sgt->nents, 0) {
635
+ for_each_sgtable_page(sgt, &sg_iter, 0) {
665636 struct page *page = sg_page_iter_page(&sg_iter);
666637 /*
667638 * Check if page is valid: this can happen if we are given
....@@ -753,6 +724,14 @@
753724 return 0;
754725 }
755726
727
+static void dmabuf_imp_release_all(struct gntdev_dmabuf_priv *priv)
728
+{
729
+ struct gntdev_dmabuf *q, *gntdev_dmabuf;
730
+
731
+ list_for_each_entry_safe(gntdev_dmabuf, q, &priv->imp_list, next)
732
+ dmabuf_imp_release(priv, gntdev_dmabuf->fd);
733
+}
734
+
756735 /* DMA buffer IOCTL support. */
757736
758737 long gntdev_ioctl_dmabuf_exp_from_refs(struct gntdev_priv *priv, int use_ptemod,
....@@ -771,7 +750,7 @@
771750 if (copy_from_user(&op, u, sizeof(op)) != 0)
772751 return -EFAULT;
773752
774
- if (unlikely(op.count <= 0))
753
+ if (unlikely(gntdev_test_page_count(op.count)))
775754 return -EINVAL;
776755
777756 refs = kcalloc(op.count, sizeof(*refs), GFP_KERNEL);
....@@ -818,7 +797,7 @@
818797 if (copy_from_user(&op, u, sizeof(op)) != 0)
819798 return -EFAULT;
820799
821
- if (unlikely(op.count <= 0))
800
+ if (unlikely(gntdev_test_page_count(op.count)))
822801 return -EINVAL;
823802
824803 gntdev_dmabuf = dmabuf_imp_to_refs(priv->dmabuf_priv,
....@@ -870,5 +849,6 @@
870849
871850 void gntdev_dmabuf_fini(struct gntdev_dmabuf_priv *priv)
872851 {
852
+ dmabuf_imp_release_all(priv);
873853 kfree(priv);
874854 }