forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-13 9d77db3c730780c8ef5ccd4b66403ff5675cfe4e
kernel/drivers/gpu/drm/msm/msm_gem.c
....@@ -1,24 +1,16 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Copyright (C) 2013 Red Hat
34 * Author: Rob Clark <robdclark@gmail.com>
4
- *
5
- * This program is free software; you can redistribute it and/or modify it
6
- * under the terms of the GNU General Public License version 2 as published by
7
- * the Free Software Foundation.
8
- *
9
- * This program is distributed in the hope that it will be useful, but WITHOUT
10
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12
- * more details.
13
- *
14
- * You should have received a copy of the GNU General Public License along with
15
- * this program. If not, see <http://www.gnu.org/licenses/>.
165 */
176
7
+#include <linux/dma-map-ops.h>
188 #include <linux/spinlock.h>
199 #include <linux/shmem_fs.h>
2010 #include <linux/dma-buf.h>
2111 #include <linux/pfn_t.h>
12
+
13
+#include <drm/drm_prime.h>
2214
2315 #include "msm_drv.h"
2416 #include "msm_fence.h"
....@@ -61,26 +53,14 @@
6153 {
6254 struct device *dev = msm_obj->base.dev->dev;
6355
64
- if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) {
65
- dma_sync_sg_for_device(dev, msm_obj->sgt->sgl,
66
- msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
67
- } else {
68
- dma_map_sg(dev, msm_obj->sgt->sgl,
69
- msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
70
- }
56
+ dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
7157 }
7258
7359 static void sync_for_cpu(struct msm_gem_object *msm_obj)
7460 {
7561 struct device *dev = msm_obj->base.dev->dev;
7662
77
- if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) {
78
- dma_sync_sg_for_cpu(dev, msm_obj->sgt->sgl,
79
- msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
80
- } else {
81
- dma_unmap_sg(dev, msm_obj->sgt->sgl,
82
- msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
83
- }
63
+ dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
8464 }
8565
8666 /* allocate pages from VRAM carveout, used when no IOMMU: */
....@@ -128,18 +108,18 @@
128108 p = get_pages_vram(obj, npages);
129109
130110 if (IS_ERR(p)) {
131
- dev_err(dev->dev, "could not get pages: %ld\n",
111
+ DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
132112 PTR_ERR(p));
133113 return p;
134114 }
135115
136116 msm_obj->pages = p;
137117
138
- msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
118
+ msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages);
139119 if (IS_ERR(msm_obj->sgt)) {
140120 void *ptr = ERR_CAST(msm_obj->sgt);
141121
142
- dev_err(dev->dev, "failed to allocate sgt\n");
122
+ DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
143123 msm_obj->sgt = NULL;
144124 return ptr;
145125 }
....@@ -317,7 +297,7 @@
317297 ret = drm_gem_create_mmap_offset(obj);
318298
319299 if (ret) {
320
- dev_err(dev->dev, "could not allocate mmap offset\n");
300
+ DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
321301 return 0;
322302 }
323303
....@@ -389,63 +369,127 @@
389369 WARN_ON(!mutex_is_locked(&msm_obj->lock));
390370
391371 list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
392
- msm_gem_unmap_vma(vma->aspace, vma, msm_obj->sgt);
372
+ if (vma->aspace) {
373
+ msm_gem_purge_vma(vma->aspace, vma);
374
+ msm_gem_close_vma(vma->aspace, vma);
375
+ }
393376 del_vma(vma);
394377 }
395378 }
396379
397
-/* get iova, taking a reference. Should have a matching put */
398
-int msm_gem_get_iova(struct drm_gem_object *obj,
399
- struct msm_gem_address_space *aspace, uint64_t *iova)
380
+static int msm_gem_get_iova_locked(struct drm_gem_object *obj,
381
+ struct msm_gem_address_space *aspace, uint64_t *iova,
382
+ u64 range_start, u64 range_end)
400383 {
401384 struct msm_gem_object *msm_obj = to_msm_bo(obj);
402385 struct msm_gem_vma *vma;
403386 int ret = 0;
404387
405
- mutex_lock(&msm_obj->lock);
406
-
407
- if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
408
- mutex_unlock(&msm_obj->lock);
409
- return -EBUSY;
410
- }
388
+ WARN_ON(!mutex_is_locked(&msm_obj->lock));
411389
412390 vma = lookup_vma(obj, aspace);
413391
414392 if (!vma) {
415
- struct page **pages;
416
-
417393 vma = add_vma(obj, aspace);
418
- if (IS_ERR(vma)) {
419
- ret = PTR_ERR(vma);
420
- goto unlock;
421
- }
394
+ if (IS_ERR(vma))
395
+ return PTR_ERR(vma);
422396
423
- pages = get_pages(obj);
424
- if (IS_ERR(pages)) {
425
- ret = PTR_ERR(pages);
426
- goto fail;
397
+ ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT,
398
+ range_start, range_end);
399
+ if (ret) {
400
+ del_vma(vma);
401
+ return ret;
427402 }
428
-
429
- ret = msm_gem_map_vma(aspace, vma, msm_obj->sgt,
430
- obj->size >> PAGE_SHIFT);
431
- if (ret)
432
- goto fail;
433403 }
434404
435405 *iova = vma->iova;
436
-
437
- mutex_unlock(&msm_obj->lock);
438406 return 0;
407
+}
439408
440
-fail:
441
- del_vma(vma);
442
-unlock:
409
+static int msm_gem_pin_iova(struct drm_gem_object *obj,
410
+ struct msm_gem_address_space *aspace)
411
+{
412
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
413
+ struct msm_gem_vma *vma;
414
+ struct page **pages;
415
+ int prot = IOMMU_READ;
416
+
417
+ if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
418
+ prot |= IOMMU_WRITE;
419
+
420
+ if (msm_obj->flags & MSM_BO_MAP_PRIV)
421
+ prot |= IOMMU_PRIV;
422
+
423
+ WARN_ON(!mutex_is_locked(&msm_obj->lock));
424
+
425
+ if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
426
+ return -EBUSY;
427
+
428
+ vma = lookup_vma(obj, aspace);
429
+ if (WARN_ON(!vma))
430
+ return -EINVAL;
431
+
432
+ pages = get_pages(obj);
433
+ if (IS_ERR(pages))
434
+ return PTR_ERR(pages);
435
+
436
+ return msm_gem_map_vma(aspace, vma, prot,
437
+ msm_obj->sgt, obj->size >> PAGE_SHIFT);
438
+}
439
+
440
+/*
441
+ * get iova and pin it. Should have a matching put
442
+ * limits iova to specified range (in pages)
443
+ */
444
+int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
445
+ struct msm_gem_address_space *aspace, uint64_t *iova,
446
+ u64 range_start, u64 range_end)
447
+{
448
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
449
+ u64 local;
450
+ int ret;
451
+
452
+ mutex_lock(&msm_obj->lock);
453
+
454
+ ret = msm_gem_get_iova_locked(obj, aspace, &local,
455
+ range_start, range_end);
456
+
457
+ if (!ret)
458
+ ret = msm_gem_pin_iova(obj, aspace);
459
+
460
+ if (!ret)
461
+ *iova = local;
462
+
443463 mutex_unlock(&msm_obj->lock);
444464 return ret;
445465 }
446466
467
+/* get iova and pin it. Should have a matching put */
468
+int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
469
+ struct msm_gem_address_space *aspace, uint64_t *iova)
470
+{
471
+ return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX);
472
+}
473
+
474
+/*
475
+ * Get an iova but don't pin it. Doesn't need a put because iovas are currently
476
+ * valid for the life of the object
477
+ */
478
+int msm_gem_get_iova(struct drm_gem_object *obj,
479
+ struct msm_gem_address_space *aspace, uint64_t *iova)
480
+{
481
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
482
+ int ret;
483
+
484
+ mutex_lock(&msm_obj->lock);
485
+ ret = msm_gem_get_iova_locked(obj, aspace, iova, 0, U64_MAX);
486
+ mutex_unlock(&msm_obj->lock);
487
+
488
+ return ret;
489
+}
490
+
447491 /* get iova without taking a reference, used in places where you have
448
- * already done a 'msm_gem_get_iova()'.
492
+ * already done a 'msm_gem_get_and_pin_iova' or 'msm_gem_get_iova'
449493 */
450494 uint64_t msm_gem_iova(struct drm_gem_object *obj,
451495 struct msm_gem_address_space *aspace)
....@@ -461,15 +505,24 @@
461505 return vma ? vma->iova : 0;
462506 }
463507
464
-void msm_gem_put_iova(struct drm_gem_object *obj,
508
+/*
509
+ * Unpin a iova by updating the reference counts. The memory isn't actually
510
+ * purged until something else (shrinker, mm_notifier, destroy, etc) decides
511
+ * to get rid of it
512
+ */
513
+void msm_gem_unpin_iova(struct drm_gem_object *obj,
465514 struct msm_gem_address_space *aspace)
466515 {
467
- // XXX TODO ..
468
- // NOTE: probably don't need a _locked() version.. we wouldn't
469
- // normally unmap here, but instead just mark that it could be
470
- // unmapped (if the iova refcnt drops to zero), but then later
471
- // if another _get_iova_locked() fails we can start unmapping
472
- // things that are no longer needed..
516
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
517
+ struct msm_gem_vma *vma;
518
+
519
+ mutex_lock(&msm_obj->lock);
520
+ vma = lookup_vma(obj, aspace);
521
+
522
+ if (!WARN_ON(!vma))
523
+ msm_gem_unmap_vma(aspace, vma);
524
+
525
+ mutex_unlock(&msm_obj->lock);
473526 }
474527
475528 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
....@@ -478,7 +531,7 @@
478531 args->pitch = align_pitch(args->width, args->bpp);
479532 args->size = PAGE_ALIGN(args->pitch * args->height);
480533 return msm_gem_new_handle(dev, file, args->size,
481
- MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
534
+ MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
482535 }
483536
484537 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
....@@ -496,7 +549,7 @@
496549
497550 *offset = msm_gem_mmap_offset(obj);
498551
499
- drm_gem_object_put_unlocked(obj);
552
+ drm_gem_object_put(obj);
500553
501554 fail:
502555 return ret;
....@@ -507,10 +560,13 @@
507560 struct msm_gem_object *msm_obj = to_msm_bo(obj);
508561 int ret = 0;
509562
563
+ if (obj->import_attach)
564
+ return ERR_PTR(-ENODEV);
565
+
510566 mutex_lock(&msm_obj->lock);
511567
512568 if (WARN_ON(msm_obj->madv > madv)) {
513
- dev_err(obj->dev->dev, "Invalid madv state: %u vs %u\n",
569
+ DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
514570 msm_obj->madv, madv);
515571 mutex_unlock(&msm_obj->lock);
516572 return ERR_PTR(-EBUSY);
....@@ -655,14 +711,13 @@
655711 int msm_gem_sync_object(struct drm_gem_object *obj,
656712 struct msm_fence_context *fctx, bool exclusive)
657713 {
658
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
659
- struct reservation_object_list *fobj;
714
+ struct dma_resv_list *fobj;
660715 struct dma_fence *fence;
661716 int i, ret;
662717
663
- fobj = reservation_object_get_list(msm_obj->resv);
718
+ fobj = dma_resv_get_list(obj->resv);
664719 if (!fobj || (fobj->shared_count == 0)) {
665
- fence = reservation_object_get_excl(msm_obj->resv);
720
+ fence = dma_resv_get_excl(obj->resv);
666721 /* don't need to wait on our own fences, since ring is fifo */
667722 if (fence && (fence->context != fctx->context)) {
668723 ret = dma_fence_wait(fence, true);
....@@ -676,7 +731,7 @@
676731
677732 for (i = 0; i < fobj->shared_count; i++) {
678733 fence = rcu_dereference_protected(fobj->shared[i],
679
- reservation_object_held(msm_obj->resv));
734
+ dma_resv_held(obj->resv));
680735 if (fence->context != fctx->context) {
681736 ret = dma_fence_wait(fence, true);
682737 if (ret)
....@@ -687,42 +742,41 @@
687742 return 0;
688743 }
689744
690
-void msm_gem_move_to_active(struct drm_gem_object *obj,
691
- struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence)
745
+void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu)
692746 {
693747 struct msm_gem_object *msm_obj = to_msm_bo(obj);
748
+ WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
694749 WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
695
- msm_obj->gpu = gpu;
696
- if (exclusive)
697
- reservation_object_add_excl_fence(msm_obj->resv, fence);
698
- else
699
- reservation_object_add_shared_fence(msm_obj->resv, fence);
700
- list_del_init(&msm_obj->mm_list);
701
- list_add_tail(&msm_obj->mm_list, &gpu->active_list);
750
+
751
+ if (!atomic_fetch_inc(&msm_obj->active_count)) {
752
+ msm_obj->gpu = gpu;
753
+ list_del_init(&msm_obj->mm_list);
754
+ list_add_tail(&msm_obj->mm_list, &gpu->active_list);
755
+ }
702756 }
703757
704
-void msm_gem_move_to_inactive(struct drm_gem_object *obj)
758
+void msm_gem_active_put(struct drm_gem_object *obj)
705759 {
706
- struct drm_device *dev = obj->dev;
707
- struct msm_drm_private *priv = dev->dev_private;
708760 struct msm_gem_object *msm_obj = to_msm_bo(obj);
761
+ struct msm_drm_private *priv = obj->dev->dev_private;
709762
710
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
763
+ WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
711764
712
- msm_obj->gpu = NULL;
713
- list_del_init(&msm_obj->mm_list);
714
- list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
765
+ if (!atomic_dec_return(&msm_obj->active_count)) {
766
+ msm_obj->gpu = NULL;
767
+ list_del_init(&msm_obj->mm_list);
768
+ list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
769
+ }
715770 }
716771
717772 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
718773 {
719
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
720774 bool write = !!(op & MSM_PREP_WRITE);
721775 unsigned long remain =
722776 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
723777 long ret;
724778
725
- ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write,
779
+ ret = dma_resv_wait_timeout_rcu(obj->resv, write,
726780 true, remain);
727781 if (ret == 0)
728782 return remain == 0 ? -EBUSY : -ETIMEDOUT;
....@@ -745,7 +799,7 @@
745799 struct seq_file *m)
746800 {
747801 if (!dma_fence_is_signaled(fence))
748
- seq_printf(m, "\t%9s: %s %s seq %u\n", type,
802
+ seq_printf(m, "\t%9s: %s %s seq %llu\n", type,
749803 fence->ops->get_driver_name(fence),
750804 fence->ops->get_timeline_name(fence),
751805 fence->seqno);
....@@ -754,8 +808,8 @@
754808 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
755809 {
756810 struct msm_gem_object *msm_obj = to_msm_bo(obj);
757
- struct reservation_object *robj = msm_obj->resv;
758
- struct reservation_object_list *fobj;
811
+ struct dma_resv *robj = obj->resv;
812
+ struct dma_resv_list *fobj;
759813 struct dma_fence *fence;
760814 struct msm_gem_vma *vma;
761815 uint64_t off = drm_vma_node_start(&obj->vma_node);
....@@ -776,16 +830,43 @@
776830 break;
777831 }
778832
779
- seq_printf(m, "%08x: %c %2d (%2d) %08llx %p\t",
833
+ seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
780834 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
781835 obj->name, kref_read(&obj->refcount),
782836 off, msm_obj->vaddr);
783837
784
- /* FIXME: we need to print the address space here too */
785
- list_for_each_entry(vma, &msm_obj->vmas, list)
786
- seq_printf(m, " %08llx", vma->iova);
838
+ seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
787839
788
- seq_printf(m, " %zu%s\n", obj->size, madv);
840
+ if (!list_empty(&msm_obj->vmas)) {
841
+
842
+ seq_puts(m, " vmas:");
843
+
844
+ list_for_each_entry(vma, &msm_obj->vmas, list) {
845
+ const char *name, *comm;
846
+ if (vma->aspace) {
847
+ struct msm_gem_address_space *aspace = vma->aspace;
848
+ struct task_struct *task =
849
+ get_pid_task(aspace->pid, PIDTYPE_PID);
850
+ if (task) {
851
+ comm = kstrdup(task->comm, GFP_KERNEL);
852
+ put_task_struct(task);
853
+ } else {
854
+ comm = NULL;
855
+ }
856
+ name = aspace->name;
857
+ } else {
858
+ name = comm = NULL;
859
+ }
860
+ seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s,inuse=%d]",
861
+ name, comm ? ":" : "", comm ? comm : "",
862
+ vma->aspace, vma->iova,
863
+ vma->mapped ? "mapped" : "unmapped",
864
+ vma->inuse);
865
+ kfree(comm);
866
+ }
867
+
868
+ seq_puts(m, "\n");
869
+ }
789870
790871 rcu_read_lock();
791872 fobj = rcu_dereference(robj->fence);
....@@ -812,9 +893,10 @@
812893 int count = 0;
813894 size_t size = 0;
814895
896
+ seq_puts(m, " flags id ref offset kaddr size madv name\n");
815897 list_for_each_entry(msm_obj, list, mm_list) {
816898 struct drm_gem_object *obj = &msm_obj->base;
817
- seq_printf(m, " ");
899
+ seq_puts(m, " ");
818900 msm_gem_describe(obj, m);
819901 count++;
820902 size += obj->size;
....@@ -824,11 +906,21 @@
824906 }
825907 #endif
826908
827
-/* don't call directly! Use drm_gem_object_put() and friends */
909
+/* don't call directly! Use drm_gem_object_put_locked() and friends */
828910 void msm_gem_free_object(struct drm_gem_object *obj)
829911 {
830
- struct drm_device *dev = obj->dev;
831912 struct msm_gem_object *msm_obj = to_msm_bo(obj);
913
+ struct drm_device *dev = obj->dev;
914
+ struct msm_drm_private *priv = dev->dev_private;
915
+
916
+ if (llist_add(&msm_obj->freed, &priv->free_list))
917
+ queue_work(priv->wq, &priv->free_work);
918
+}
919
+
920
+static void free_object(struct msm_gem_object *msm_obj)
921
+{
922
+ struct drm_gem_object *obj = &msm_obj->base;
923
+ struct drm_device *dev = obj->dev;
832924
833925 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
834926
....@@ -842,8 +934,7 @@
842934 put_iova(obj);
843935
844936 if (obj->import_attach) {
845
- if (msm_obj->vaddr)
846
- dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
937
+ WARN_ON(msm_obj->vaddr);
847938
848939 /* Don't drop the pages for imported dmabuf, as they are not
849940 * ours, just free the array we allocated:
....@@ -857,18 +948,39 @@
857948 put_pages(obj);
858949 }
859950
860
- if (msm_obj->resv == &msm_obj->_resv)
861
- reservation_object_fini(msm_obj->resv);
862
-
863951 drm_gem_object_release(obj);
864952
865953 mutex_unlock(&msm_obj->lock);
866954 kfree(msm_obj);
867955 }
868956
957
+void msm_gem_free_work(struct work_struct *work)
958
+{
959
+ struct msm_drm_private *priv =
960
+ container_of(work, struct msm_drm_private, free_work);
961
+ struct drm_device *dev = priv->dev;
962
+ struct llist_node *freed;
963
+ struct msm_gem_object *msm_obj, *next;
964
+
965
+ while ((freed = llist_del_all(&priv->free_list))) {
966
+
967
+ mutex_lock(&dev->struct_mutex);
968
+
969
+ llist_for_each_entry_safe(msm_obj, next,
970
+ freed, freed)
971
+ free_object(msm_obj);
972
+
973
+ mutex_unlock(&dev->struct_mutex);
974
+
975
+ if (need_resched())
976
+ break;
977
+ }
978
+}
979
+
869980 /* convenience method to construct a GEM buffer object, and userspace handle */
870981 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
871
- uint32_t size, uint32_t flags, uint32_t *handle)
982
+ uint32_t size, uint32_t flags, uint32_t *handle,
983
+ char *name)
872984 {
873985 struct drm_gem_object *obj;
874986 int ret;
....@@ -878,21 +990,21 @@
878990 if (IS_ERR(obj))
879991 return PTR_ERR(obj);
880992
993
+ if (name)
994
+ msm_gem_object_set_name(obj, "%s", name);
995
+
881996 ret = drm_gem_handle_create(file, obj, handle);
882997
883998 /* drop reference from allocate - handle holds it now */
884
- drm_gem_object_put_unlocked(obj);
999
+ drm_gem_object_put(obj);
8851000
8861001 return ret;
8871002 }
8881003
8891004 static int msm_gem_new_impl(struct drm_device *dev,
8901005 uint32_t size, uint32_t flags,
891
- struct reservation_object *resv,
892
- struct drm_gem_object **obj,
893
- bool struct_mutex_locked)
1006
+ struct drm_gem_object **obj)
8941007 {
895
- struct msm_drm_private *priv = dev->dev_private;
8961008 struct msm_gem_object *msm_obj;
8971009
8981010 switch (flags & MSM_BO_CACHE_MASK) {
....@@ -901,7 +1013,7 @@
9011013 case MSM_BO_WC:
9021014 break;
9031015 default:
904
- dev_err(dev->dev, "invalid cache flag: %x\n",
1016
+ DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n",
9051017 (flags & MSM_BO_CACHE_MASK));
9061018 return -EINVAL;
9071019 }
....@@ -915,24 +1027,8 @@
9151027 msm_obj->flags = flags;
9161028 msm_obj->madv = MSM_MADV_WILLNEED;
9171029
918
- if (resv) {
919
- msm_obj->resv = resv;
920
- } else {
921
- msm_obj->resv = &msm_obj->_resv;
922
- reservation_object_init(msm_obj->resv);
923
- }
924
-
9251030 INIT_LIST_HEAD(&msm_obj->submit_entry);
9261031 INIT_LIST_HEAD(&msm_obj->vmas);
927
-
928
- if (struct_mutex_locked) {
929
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
930
- list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
931
- } else {
932
- mutex_lock(&dev->struct_mutex);
933
- list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
934
- mutex_unlock(&dev->struct_mutex);
935
- }
9361032
9371033 *obj = &msm_obj->base;
9381034
....@@ -943,15 +1039,16 @@
9431039 uint32_t size, uint32_t flags, bool struct_mutex_locked)
9441040 {
9451041 struct msm_drm_private *priv = dev->dev_private;
1042
+ struct msm_gem_object *msm_obj;
9461043 struct drm_gem_object *obj = NULL;
9471044 bool use_vram = false;
9481045 int ret;
9491046
9501047 size = PAGE_ALIGN(size);
9511048
952
- if (!iommu_present(&platform_bus_type))
1049
+ if (!msm_use_mmu(dev))
9531050 use_vram = true;
954
- else if ((flags & MSM_BO_STOLEN) && priv->vram.size)
1051
+ else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
9551052 use_vram = true;
9561053
9571054 if (WARN_ON(use_vram && !priv->vram.size))
....@@ -963,14 +1060,15 @@
9631060 if (size == 0)
9641061 return ERR_PTR(-EINVAL);
9651062
966
- ret = msm_gem_new_impl(dev, size, flags, NULL, &obj, struct_mutex_locked);
1063
+ ret = msm_gem_new_impl(dev, size, flags, &obj);
9671064 if (ret)
9681065 return ERR_PTR(ret);
1066
+
1067
+ msm_obj = to_msm_bo(obj);
9691068
9701069 if (use_vram) {
9711070 struct msm_gem_vma *vma;
9721071 struct page **pages;
973
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
9741072
9751073 mutex_lock(&msm_obj->lock);
9761074
....@@ -996,12 +1094,28 @@
9961094 ret = drm_gem_object_init(dev, obj, size);
9971095 if (ret)
9981096 goto fail;
1097
+ /*
1098
+ * Our buffers are kept pinned, so allocating them from the
1099
+ * MOVABLE zone is a really bad idea, and conflicts with CMA.
1100
+ * See comments above new_inode() why this is required _and_
1101
+ * expected if you're going to pin these pages.
1102
+ */
1103
+ mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
1104
+ }
1105
+
1106
+ if (struct_mutex_locked) {
1107
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
1108
+ list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
1109
+ } else {
1110
+ mutex_lock(&dev->struct_mutex);
1111
+ list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
1112
+ mutex_unlock(&dev->struct_mutex);
9991113 }
10001114
10011115 return obj;
10021116
10031117 fail:
1004
- drm_gem_object_put_unlocked(obj);
1118
+ drm_gem_object_put(obj);
10051119 return ERR_PTR(ret);
10061120 }
10071121
....@@ -1020,20 +1134,21 @@
10201134 struct drm_gem_object *msm_gem_import(struct drm_device *dev,
10211135 struct dma_buf *dmabuf, struct sg_table *sgt)
10221136 {
1137
+ struct msm_drm_private *priv = dev->dev_private;
10231138 struct msm_gem_object *msm_obj;
10241139 struct drm_gem_object *obj;
10251140 uint32_t size;
10261141 int ret, npages;
10271142
10281143 /* if we don't have IOMMU, don't bother pretending we can import: */
1029
- if (!iommu_present(&platform_bus_type)) {
1030
- dev_err(dev->dev, "cannot import without IOMMU\n");
1144
+ if (!msm_use_mmu(dev)) {
1145
+ DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
10311146 return ERR_PTR(-EINVAL);
10321147 }
10331148
10341149 size = PAGE_ALIGN(dmabuf->size);
10351150
1036
- ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj, false);
1151
+ ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
10371152 if (ret)
10381153 return ERR_PTR(ret);
10391154
....@@ -1058,10 +1173,15 @@
10581173 }
10591174
10601175 mutex_unlock(&msm_obj->lock);
1176
+
1177
+ mutex_lock(&dev->struct_mutex);
1178
+ list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
1179
+ mutex_unlock(&dev->struct_mutex);
1180
+
10611181 return obj;
10621182
10631183 fail:
1064
- drm_gem_object_put_unlocked(obj);
1184
+ drm_gem_object_put(obj);
10651185 return ERR_PTR(ret);
10661186 }
10671187
....@@ -1077,24 +1197,30 @@
10771197 return ERR_CAST(obj);
10781198
10791199 if (iova) {
1080
- ret = msm_gem_get_iova(obj, aspace, iova);
1081
- if (ret) {
1082
- drm_gem_object_put(obj);
1083
- return ERR_PTR(ret);
1084
- }
1200
+ ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
1201
+ if (ret)
1202
+ goto err;
10851203 }
10861204
10871205 vaddr = msm_gem_get_vaddr(obj);
10881206 if (IS_ERR(vaddr)) {
1089
- msm_gem_put_iova(obj, aspace);
1090
- drm_gem_object_put(obj);
1091
- return ERR_CAST(vaddr);
1207
+ msm_gem_unpin_iova(obj, aspace);
1208
+ ret = PTR_ERR(vaddr);
1209
+ goto err;
10921210 }
10931211
10941212 if (bo)
10951213 *bo = obj;
10961214
10971215 return vaddr;
1216
+err:
1217
+ if (locked)
1218
+ drm_gem_object_put_locked(obj);
1219
+ else
1220
+ drm_gem_object_put(obj);
1221
+
1222
+ return ERR_PTR(ret);
1223
+
10981224 }
10991225
11001226 void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
....@@ -1110,3 +1236,31 @@
11101236 {
11111237 return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
11121238 }
1239
+
1240
+void msm_gem_kernel_put(struct drm_gem_object *bo,
1241
+ struct msm_gem_address_space *aspace, bool locked)
1242
+{
1243
+ if (IS_ERR_OR_NULL(bo))
1244
+ return;
1245
+
1246
+ msm_gem_put_vaddr(bo);
1247
+ msm_gem_unpin_iova(bo, aspace);
1248
+
1249
+ if (locked)
1250
+ drm_gem_object_put_locked(bo);
1251
+ else
1252
+ drm_gem_object_put(bo);
1253
+}
1254
+
1255
+void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
1256
+{
1257
+ struct msm_gem_object *msm_obj = to_msm_bo(bo);
1258
+ va_list ap;
1259
+
1260
+ if (!fmt)
1261
+ return;
1262
+
1263
+ va_start(ap, fmt);
1264
+ vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
1265
+ va_end(ap);
1266
+}