forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-13 9d77db3c730780c8ef5ccd4b66403ff5675cfe4e
kernel/drivers/gpu/drm/armada/armada_gem.c
....@@ -1,16 +1,18 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Copyright (C) 2012 Russell King
3
- *
4
- * This program is free software; you can redistribute it and/or modify
5
- * it under the terms of the GNU General Public License version 2 as
6
- * published by the Free Software Foundation.
74 */
5
+
86 #include <linux/dma-buf.h>
97 #include <linux/dma-mapping.h>
8
+#include <linux/mman.h>
109 #include <linux/shmem_fs.h>
10
+
11
+#include <drm/armada_drm.h>
12
+#include <drm/drm_prime.h>
13
+
1114 #include "armada_drm.h"
1215 #include "armada_gem.h"
13
-#include <drm/armada_drm.h>
1416 #include "armada_ioctlP.h"
1517
1618 static vm_fault_t armada_gem_vm_fault(struct vm_fault *vmf)
....@@ -37,7 +39,7 @@
3739 void armada_gem_free_object(struct drm_gem_object *obj)
3840 {
3941 struct armada_gem_object *dobj = drm_to_armada_gem(obj);
40
- struct armada_private *priv = obj->dev->dev_private;
42
+ struct armada_private *priv = drm_to_armada_dev(obj->dev);
4143
4244 DRM_DEBUG_DRIVER("release obj %p\n", dobj);
4345
....@@ -75,7 +77,7 @@
7577 int
7678 armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
7779 {
78
- struct armada_private *priv = dev->dev_private;
80
+ struct armada_private *priv = drm_to_armada_dev(dev);
7981 size_t size = obj->obj.size;
8082
8183 if (obj->page || obj->linear)
....@@ -254,7 +256,7 @@
254256 /* drop reference from allocate - handle holds it now */
255257 DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
256258 err:
257
- drm_gem_object_put_unlocked(&dobj->obj);
259
+ drm_gem_object_put(&dobj->obj);
258260 return ret;
259261 }
260262
....@@ -286,7 +288,7 @@
286288 /* drop reference from allocate - handle holds it now */
287289 DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
288290 err:
289
- drm_gem_object_put_unlocked(&dobj->obj);
291
+ drm_gem_object_put(&dobj->obj);
290292 return ret;
291293 }
292294
....@@ -303,13 +305,13 @@
303305 return -ENOENT;
304306
305307 if (!dobj->obj.filp) {
306
- drm_gem_object_put_unlocked(&dobj->obj);
308
+ drm_gem_object_put(&dobj->obj);
307309 return -EINVAL;
308310 }
309311
310312 addr = vm_mmap(dobj->obj.filp, 0, args->size, PROT_READ | PROT_WRITE,
311313 MAP_SHARED, args->offset);
312
- drm_gem_object_put_unlocked(&dobj->obj);
314
+ drm_gem_object_put(&dobj->obj);
313315 if (IS_ERR_VALUE(addr))
314316 return addr;
315317
....@@ -334,7 +336,7 @@
334336
335337 ptr = (char __user *)(uintptr_t)args->ptr;
336338
337
- if (!access_ok(VERIFY_READ, ptr, args->size))
339
+ if (!access_ok(ptr, args->size))
338340 return -EFAULT;
339341
340342 ret = fault_in_pages_readable(ptr, args->size);
....@@ -364,7 +366,7 @@
364366 }
365367
366368 unref:
367
- drm_gem_object_put_unlocked(&dobj->obj);
369
+ drm_gem_object_put(&dobj->obj);
368370 return ret;
369371 }
370372
....@@ -377,7 +379,7 @@
377379 struct armada_gem_object *dobj = drm_to_armada_gem(obj);
378380 struct scatterlist *sg;
379381 struct sg_table *sgt;
380
- int i, num;
382
+ int i;
381383
382384 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
383385 if (!sgt)
....@@ -393,22 +395,18 @@
393395
394396 mapping = dobj->obj.filp->f_mapping;
395397
396
- for_each_sg(sgt->sgl, sg, count, i) {
398
+ for_each_sgtable_sg(sgt, sg, i) {
397399 struct page *page;
398400
399401 page = shmem_read_mapping_page(mapping, i);
400
- if (IS_ERR(page)) {
401
- num = i;
402
+ if (IS_ERR(page))
402403 goto release;
403
- }
404404
405405 sg_set_page(sg, page, PAGE_SIZE, 0);
406406 }
407407
408
- if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) {
409
- num = sgt->nents;
408
+ if (dma_map_sgtable(attach->dev, sgt, dir, 0))
410409 goto release;
411
- }
412410 } else if (dobj->page) {
413411 /* Single contiguous page */
414412 if (sg_alloc_table(sgt, 1, GFP_KERNEL))
....@@ -416,7 +414,7 @@
416414
417415 sg_set_page(sgt->sgl, dobj->page, dobj->obj.size, 0);
418416
419
- if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
417
+ if (dma_map_sgtable(attach->dev, sgt, dir, 0))
420418 goto free_table;
421419 } else if (dobj->linear) {
422420 /* Single contiguous physical region - no struct page */
....@@ -430,8 +428,9 @@
430428 return sgt;
431429
432430 release:
433
- for_each_sg(sgt->sgl, sg, num, i)
434
- put_page(sg_page(sg));
431
+ for_each_sgtable_sg(sgt, sg, i)
432
+ if (sg_page(sg))
433
+ put_page(sg_page(sg));
435434 free_table:
436435 sg_free_table(sgt);
437436 free_sgt:
....@@ -447,26 +446,17 @@
447446 int i;
448447
449448 if (!dobj->linear)
450
- dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
449
+ dma_unmap_sgtable(attach->dev, sgt, dir, 0);
451450
452451 if (dobj->obj.filp) {
453452 struct scatterlist *sg;
454
- for_each_sg(sgt->sgl, sg, sgt->nents, i)
453
+
454
+ for_each_sgtable_sg(sgt, sg, i)
455455 put_page(sg_page(sg));
456456 }
457457
458458 sg_free_table(sgt);
459459 kfree(sgt);
460
-}
461
-
462
-static void *armada_gem_dmabuf_no_kmap(struct dma_buf *buf, unsigned long n)
463
-{
464
- return NULL;
465
-}
466
-
467
-static void
468
-armada_gem_dmabuf_no_kunmap(struct dma_buf *buf, unsigned long n, void *addr)
469
-{
470460 }
471461
472462 static int
....@@ -479,14 +469,11 @@
479469 .map_dma_buf = armada_gem_prime_map_dma_buf,
480470 .unmap_dma_buf = armada_gem_prime_unmap_dma_buf,
481471 .release = drm_gem_dmabuf_release,
482
- .map = armada_gem_dmabuf_no_kmap,
483
- .unmap = armada_gem_dmabuf_no_kunmap,
484472 .mmap = armada_gem_dmabuf_mmap,
485473 };
486474
487475 struct dma_buf *
488
-armada_gem_prime_export(struct drm_device *dev, struct drm_gem_object *obj,
489
- int flags)
476
+armada_gem_prime_export(struct drm_gem_object *obj, int flags)
490477 {
491478 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
492479
....@@ -495,7 +482,7 @@
495482 exp_info.flags = O_RDWR;
496483 exp_info.priv = obj;
497484
498
- return drm_gem_dmabuf_export(dev, &exp_info);
485
+ return drm_gem_dmabuf_export(obj->dev, &exp_info);
499486 }
500487
501488 struct drm_gem_object *