forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-13 9d77db3c730780c8ef5ccd4b66403ff5675cfe4e
kernel/drivers/gpu/drm/msm/msm_gpu.c
....@@ -1,28 +1,18 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Copyright (C) 2013 Red Hat
34 * Author: Rob Clark <robdclark@gmail.com>
4
- *
5
- * This program is free software; you can redistribute it and/or modify it
6
- * under the terms of the GNU General Public License version 2 as published by
7
- * the Free Software Foundation.
8
- *
9
- * This program is distributed in the hope that it will be useful, but WITHOUT
10
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12
- * more details.
13
- *
14
- * You should have received a copy of the GNU General Public License along with
15
- * this program. If not, see <http://www.gnu.org/licenses/>.
165 */
176
187 #include "msm_gpu.h"
198 #include "msm_gem.h"
209 #include "msm_mmu.h"
2110 #include "msm_fence.h"
11
+#include "msm_gpu_trace.h"
12
+#include "adreno/adreno_gpu.h"
2213
2314 #include <generated/utsrelease.h>
2415 #include <linux/string_helpers.h>
25
-#include <linux/pm_opp.h>
2616 #include <linux/devfreq.h>
2717 #include <linux/devcoredump.h>
2818 #include <linux/sched/task.h>
....@@ -34,7 +24,7 @@
3424 static int msm_devfreq_target(struct device *dev, unsigned long *freq,
3525 u32 flags)
3626 {
37
- struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
27
+ struct msm_gpu *gpu = dev_to_gpu(dev);
3828 struct dev_pm_opp *opp;
3929
4030 opp = devfreq_recommended_opp(dev, freq, flags);
....@@ -42,7 +32,13 @@
4232 if (IS_ERR(opp))
4333 return PTR_ERR(opp);
4434
45
- clk_set_rate(gpu->core_clk, *freq);
35
+ trace_msm_gpu_freq_change(dev_pm_opp_get_freq(opp));
36
+
37
+ if (gpu->funcs->gpu_set_freq)
38
+ gpu->funcs->gpu_set_freq(gpu, opp);
39
+ else
40
+ clk_set_rate(gpu->core_clk, *freq);
41
+
4642 dev_pm_opp_put(opp);
4743
4844 return 0;
....@@ -51,17 +47,15 @@
5147 static int msm_devfreq_get_dev_status(struct device *dev,
5248 struct devfreq_dev_status *status)
5349 {
54
- struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
55
- u64 cycles;
56
- u32 freq = ((u32) status->current_frequency) / 1000000;
50
+ struct msm_gpu *gpu = dev_to_gpu(dev);
5751 ktime_t time;
5852
59
- status->current_frequency = (unsigned long) clk_get_rate(gpu->core_clk);
60
- gpu->funcs->gpu_busy(gpu, &cycles);
53
+ if (gpu->funcs->gpu_get_freq)
54
+ status->current_frequency = gpu->funcs->gpu_get_freq(gpu);
55
+ else
56
+ status->current_frequency = clk_get_rate(gpu->core_clk);
6157
62
- status->busy_time = ((u32) (cycles - gpu->devfreq.busy_cycles)) / freq;
63
-
64
- gpu->devfreq.busy_cycles = cycles;
58
+ status->busy_time = gpu->funcs->gpu_busy(gpu);
6559
6660 time = ktime_get();
6761 status->total_time = ktime_us_delta(time, gpu->devfreq.time);
....@@ -72,9 +66,12 @@
7266
7367 static int msm_devfreq_get_cur_freq(struct device *dev, unsigned long *freq)
7468 {
75
- struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
69
+ struct msm_gpu *gpu = dev_to_gpu(dev);
7670
77
- *freq = (unsigned long) clk_get_rate(gpu->core_clk);
71
+ if (gpu->funcs->gpu_get_freq)
72
+ *freq = gpu->funcs->gpu_get_freq(gpu);
73
+ else
74
+ *freq = clk_get_rate(gpu->core_clk);
7875
7976 return 0;
8077 }
....@@ -89,7 +86,7 @@
8986 static void msm_devfreq_init(struct msm_gpu *gpu)
9087 {
9188 /* We need target support to do devfreq */
92
- if (!gpu->funcs->gpu_busy || !gpu->core_clk)
89
+ if (!gpu->funcs->gpu_busy)
9390 return;
9491
9592 msm_devfreq_profile.initial_freq = gpu->fast_rate;
....@@ -97,15 +94,22 @@
9794 /*
9895 * Don't set the freq_table or max_state and let devfreq build the table
9996 * from OPP
97
+ * After a deferred probe, these may have be left to non-zero values,
98
+ * so set them back to zero before creating the devfreq device
10099 */
100
+ msm_devfreq_profile.freq_table = NULL;
101
+ msm_devfreq_profile.max_state = 0;
101102
102103 gpu->devfreq.devfreq = devm_devfreq_add_device(&gpu->pdev->dev,
103
- &msm_devfreq_profile, "simple_ondemand", NULL);
104
+ &msm_devfreq_profile, DEVFREQ_GOV_SIMPLE_ONDEMAND,
105
+ NULL);
104106
105107 if (IS_ERR(gpu->devfreq.devfreq)) {
106
- dev_err(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n");
108
+ DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n");
107109 gpu->devfreq.devfreq = NULL;
108110 }
111
+
112
+ devfreq_suspend_device(gpu->devfreq.devfreq);
109113 }
110114
111115 static int enable_pwrrail(struct msm_gpu *gpu)
....@@ -116,7 +120,7 @@
116120 if (gpu->gpu_reg) {
117121 ret = regulator_enable(gpu->gpu_reg);
118122 if (ret) {
119
- dev_err(dev->dev, "failed to enable 'gpu_reg': %d\n", ret);
123
+ DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_reg': %d\n", ret);
120124 return ret;
121125 }
122126 }
....@@ -124,7 +128,7 @@
124128 if (gpu->gpu_cx) {
125129 ret = regulator_enable(gpu->gpu_cx);
126130 if (ret) {
127
- dev_err(dev->dev, "failed to enable 'gpu_cx': %d\n", ret);
131
+ DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_cx': %d\n", ret);
128132 return ret;
129133 }
130134 }
....@@ -185,11 +189,20 @@
185189 return 0;
186190 }
187191
192
+void msm_gpu_resume_devfreq(struct msm_gpu *gpu)
193
+{
194
+ gpu->devfreq.busy_cycles = 0;
195
+ gpu->devfreq.time = ktime_get();
196
+
197
+ devfreq_resume_device(gpu->devfreq.devfreq);
198
+}
199
+
188200 int msm_gpu_pm_resume(struct msm_gpu *gpu)
189201 {
190202 int ret;
191203
192204 DBG("%s", gpu->name);
205
+ trace_msm_gpu_resume(0);
193206
194207 ret = enable_pwrrail(gpu);
195208 if (ret)
....@@ -203,12 +216,7 @@
203216 if (ret)
204217 return ret;
205218
206
- if (gpu->devfreq.devfreq) {
207
- gpu->devfreq.busy_cycles = 0;
208
- gpu->devfreq.time = ktime_get();
209
-
210
- devfreq_resume_device(gpu->devfreq.devfreq);
211
- }
219
+ msm_gpu_resume_devfreq(gpu);
212220
213221 gpu->needs_hw_init = true;
214222
....@@ -220,9 +228,9 @@
220228 int ret;
221229
222230 DBG("%s", gpu->name);
231
+ trace_msm_gpu_suspend(0);
223232
224
- if (gpu->devfreq.devfreq)
225
- devfreq_suspend_device(gpu->devfreq.devfreq);
233
+ devfreq_suspend_device(gpu->devfreq.devfreq);
226234
227235 ret = disable_axi(gpu);
228236 if (ret)
....@@ -307,28 +315,28 @@
307315 struct msm_gpu_state_bo *state_bo = &state->bos[state->nr_bos];
308316
309317 /* Don't record write only objects */
310
-
311318 state_bo->size = obj->base.size;
312319 state_bo->iova = iova;
313320
314
- /* Only store the data for buffer objects marked for read */
315
- if ((flags & MSM_SUBMIT_BO_READ)) {
321
+ /* Only store data for non imported buffer objects marked for read */
322
+ if ((flags & MSM_SUBMIT_BO_READ) && !obj->base.import_attach) {
316323 void *ptr;
317324
318325 state_bo->data = kvmalloc(obj->base.size, GFP_KERNEL);
319326 if (!state_bo->data)
320
- return;
327
+ goto out;
321328
322329 ptr = msm_gem_get_vaddr_active(&obj->base);
323330 if (IS_ERR(ptr)) {
324331 kvfree(state_bo->data);
325
- return;
332
+ state_bo->data = NULL;
333
+ goto out;
326334 }
327335
328336 memcpy(state_bo->data, ptr, obj->base.size);
329337 msm_gem_put_vaddr(&obj->base);
330338 }
331
-
339
+out:
332340 state->nr_bos++;
333341 }
334342
....@@ -336,6 +344,10 @@
336344 struct msm_gem_submit *submit, char *comm, char *cmd)
337345 {
338346 struct msm_gpu_state *state;
347
+
348
+ /* Check if the target supports capturing crash state */
349
+ if (!gpu->funcs->gpu_state_get)
350
+ return;
339351
340352 /* Only save one crash state at a time */
341353 if (gpu->crashstate)
....@@ -350,14 +362,35 @@
350362 state->cmd = kstrdup(cmd, GFP_KERNEL);
351363
352364 if (submit) {
353
- int i;
365
+ int i, nr = 0;
354366
355
- state->bos = kcalloc(submit->nr_bos,
367
+ /* count # of buffers to dump: */
368
+ for (i = 0; i < submit->nr_bos; i++)
369
+ if (should_dump(submit, i))
370
+ nr++;
371
+ /* always dump cmd bo's, but don't double count them: */
372
+ for (i = 0; i < submit->nr_cmds; i++)
373
+ if (!should_dump(submit, submit->cmd[i].idx))
374
+ nr++;
375
+
376
+ state->bos = kcalloc(nr,
356377 sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
357378
358
- for (i = 0; state->bos && i < submit->nr_bos; i++)
359
- msm_gpu_crashstate_get_bo(state, submit->bos[i].obj,
360
- submit->bos[i].iova, submit->bos[i].flags);
379
+ for (i = 0; state->bos && i < submit->nr_bos; i++) {
380
+ if (should_dump(submit, i)) {
381
+ msm_gpu_crashstate_get_bo(state, submit->bos[i].obj,
382
+ submit->bos[i].iova, submit->bos[i].flags);
383
+ }
384
+ }
385
+
386
+ for (i = 0; state->bos && i < submit->nr_cmds; i++) {
387
+ int idx = submit->cmd[i].idx;
388
+
389
+ if (!should_dump(submit, submit->cmd[i].idx)) {
390
+ msm_gpu_crashstate_get_bo(state, submit->bos[idx].obj,
391
+ submit->bos[idx].iova, submit->bos[idx].flags);
392
+ }
393
+ }
361394 }
362395
363396 /* Set the active crash state to be dumped on failure */
....@@ -420,34 +453,25 @@
420453
421454 mutex_lock(&dev->struct_mutex);
422455
423
- dev_err(dev->dev, "%s: hangcheck recover!\n", gpu->name);
456
+ DRM_DEV_ERROR(dev->dev, "%s: hangcheck recover!\n", gpu->name);
424457
425458 submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
426459 if (submit) {
427460 struct task_struct *task;
428461
462
+ /* Increment the fault counts */
463
+ gpu->global_faults++;
464
+ submit->queue->faults++;
465
+
429466 task = get_pid_task(submit->pid, PIDTYPE_PID);
430467 if (task) {
431468 comm = kstrdup(task->comm, GFP_KERNEL);
432
-
433
- /*
434
- * So slightly annoying, in other paths like
435
- * mmap'ing gem buffers, mmap_sem is acquired
436
- * before struct_mutex, which means we can't
437
- * hold struct_mutex across the call to
438
- * get_cmdline(). But submits are retired
439
- * from the same in-order workqueue, so we can
440
- * safely drop the lock here without worrying
441
- * about the submit going away.
442
- */
443
- mutex_unlock(&dev->struct_mutex);
444469 cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
445470 put_task_struct(task);
446
- mutex_lock(&dev->struct_mutex);
447471 }
448472
449473 if (comm && cmd) {
450
- dev_err(dev->dev, "%s: offending task: %s (%s)\n",
474
+ DRM_DEV_ERROR(dev->dev, "%s: offending task: %s (%s)\n",
451475 gpu->name, comm, cmd);
452476
453477 msm_rd_dump_submit(priv->hangrd, submit,
....@@ -500,7 +524,7 @@
500524 struct msm_ringbuffer *ring = gpu->rb[i];
501525
502526 list_for_each_entry(submit, &ring->submits, node)
503
- gpu->funcs->submit(gpu, submit, NULL);
527
+ gpu->funcs->submit(gpu, submit);
504528 }
505529 }
506530
....@@ -530,11 +554,11 @@
530554 } else if (fence < ring->seqno) {
531555 /* no progress and not done.. hung! */
532556 ring->hangcheck_fence = fence;
533
- dev_err(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
557
+ DRM_DEV_ERROR(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
534558 gpu->name, ring->id);
535
- dev_err(dev->dev, "%s: completed fence: %u\n",
559
+ DRM_DEV_ERROR(dev->dev, "%s: completed fence: %u\n",
536560 gpu->name, fence);
537
- dev_err(dev->dev, "%s: submitted fence: %u\n",
561
+ DRM_DEV_ERROR(dev->dev, "%s: submitted fence: %u\n",
538562 gpu->name, ring->seqno);
539563
540564 queue_work(priv->wq, &gpu->recover_work);
....@@ -650,16 +674,34 @@
650674 * Cmdstream submission/retirement:
651675 */
652676
653
-static void retire_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
677
+static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
678
+ struct msm_gem_submit *submit)
654679 {
680
+ int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
681
+ volatile struct msm_gpu_submit_stats *stats;
682
+ u64 elapsed, clock = 0;
655683 int i;
684
+
685
+ stats = &ring->memptrs->stats[index];
686
+ /* Convert 19.2Mhz alwayson ticks to nanoseconds for elapsed time */
687
+ elapsed = (stats->alwayson_end - stats->alwayson_start) * 10000;
688
+ do_div(elapsed, 192);
689
+
690
+ /* Calculate the clock frequency from the number of CP cycles */
691
+ if (elapsed) {
692
+ clock = (stats->cpcycles_end - stats->cpcycles_start) * 1000;
693
+ do_div(clock, elapsed);
694
+ }
695
+
696
+ trace_msm_gpu_submit_retired(submit, elapsed, clock,
697
+ stats->alwayson_start, stats->alwayson_end);
656698
657699 for (i = 0; i < submit->nr_bos; i++) {
658700 struct msm_gem_object *msm_obj = submit->bos[i].obj;
659
- /* move to inactive: */
660
- msm_gem_move_to_inactive(&msm_obj->base);
661
- msm_gem_put_iova(&msm_obj->base, gpu->aspace);
662
- drm_gem_object_put(&msm_obj->base);
701
+
702
+ msm_gem_active_put(&msm_obj->base);
703
+ msm_gem_unpin_iova(&msm_obj->base, submit->aspace);
704
+ drm_gem_object_put_locked(&msm_obj->base);
663705 }
664706
665707 pm_runtime_mark_last_busy(&gpu->pdev->dev);
....@@ -681,7 +723,7 @@
681723
682724 list_for_each_entry_safe(submit, tmp, &ring->submits, node) {
683725 if (dma_fence_is_signaled(submit->fence))
684
- retire_submit(gpu, submit);
726
+ retire_submit(gpu, ring, submit);
685727 }
686728 }
687729 }
....@@ -709,8 +751,7 @@
709751 }
710752
711753 /* add bo's to gpu's ring, and kick gpu: */
712
-void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
713
- struct msm_file_private *ctx)
754
+void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
714755 {
715756 struct drm_device *dev = gpu->dev;
716757 struct msm_drm_private *priv = dev->dev_private;
....@@ -733,6 +774,7 @@
733774
734775 for (i = 0; i < submit->nr_bos; i++) {
735776 struct msm_gem_object *msm_obj = submit->bos[i].obj;
777
+ struct drm_gem_object *drm_obj = &msm_obj->base;
736778 uint64_t iova;
737779
738780 /* can't happen yet.. but when we add 2d support we'll have
....@@ -742,17 +784,18 @@
742784
743785 /* submit takes a reference to the bo and iova until retired: */
744786 drm_gem_object_get(&msm_obj->base);
745
- msm_gem_get_iova(&msm_obj->base,
746
- submit->gpu->aspace, &iova);
787
+ msm_gem_get_and_pin_iova(&msm_obj->base, submit->aspace, &iova);
747788
748789 if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
749
- msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
790
+ dma_resv_add_excl_fence(drm_obj->resv, submit->fence);
750791 else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
751
- msm_gem_move_to_active(&msm_obj->base, gpu, false, submit->fence);
792
+ dma_resv_add_shared_fence(drm_obj->resv, submit->fence);
793
+
794
+ msm_gem_active_get(drm_obj, gpu);
752795 }
753796
754
- gpu->funcs->submit(gpu, submit, ctx);
755
- priv->lastctx = ctx;
797
+ gpu->funcs->submit(gpu, submit);
798
+ priv->lastctx = submit->queue->ctx;
756799
757800 hangcheck_timer_reset(gpu);
758801 }
....@@ -769,7 +812,7 @@
769812
770813 static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
771814 {
772
- int ret = msm_clk_bulk_get(&pdev->dev, &gpu->grp_clks);
815
+ int ret = devm_clk_bulk_get_all(&pdev->dev, &gpu->grp_clks);
773816
774817 if (ret < 1) {
775818 gpu->nr_clocks = 0;
....@@ -787,41 +830,26 @@
787830 return 0;
788831 }
789832
790
-static struct msm_gem_address_space *
791
-msm_gpu_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev,
792
- uint64_t va_start, uint64_t va_end)
833
+/* Return a new address space for a msm_drm_private instance */
834
+struct msm_gem_address_space *
835
+msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *task)
793836 {
794
- struct iommu_domain *iommu;
795
- struct msm_gem_address_space *aspace;
796
- int ret;
797
-
798
- /*
799
- * Setup IOMMU.. eventually we will (I think) do this once per context
800
- * and have separate page tables per context. For now, to keep things
801
- * simple and to get something working, just use a single address space:
802
- */
803
- iommu = iommu_domain_alloc(&platform_bus_type);
804
- if (!iommu)
837
+ struct msm_gem_address_space *aspace = NULL;
838
+ if (!gpu)
805839 return NULL;
806840
807
- iommu->geometry.aperture_start = va_start;
808
- iommu->geometry.aperture_end = va_end;
809
-
810
- dev_info(gpu->dev->dev, "%s: using IOMMU\n", gpu->name);
811
-
812
- aspace = msm_gem_address_space_create(&pdev->dev, iommu, "gpu");
813
- if (IS_ERR(aspace)) {
814
- dev_err(gpu->dev->dev, "failed to init iommu: %ld\n",
815
- PTR_ERR(aspace));
816
- iommu_domain_free(iommu);
817
- return ERR_CAST(aspace);
841
+ /*
842
+ * If the target doesn't support private address spaces then return
843
+ * the global one
844
+ */
845
+ if (gpu->funcs->create_private_address_space) {
846
+ aspace = gpu->funcs->create_private_address_space(gpu);
847
+ if (!IS_ERR(aspace))
848
+ aspace->pid = get_pid(task_pid(task));
818849 }
819850
820
- ret = aspace->mmu->funcs->attach(aspace->mmu, NULL, 0);
821
- if (ret) {
822
- msm_gem_address_space_put(aspace);
823
- return ERR_PTR(ret);
824
- }
851
+ if (IS_ERR_OR_NULL(aspace))
852
+ aspace = msm_gem_address_space_get(gpu->aspace);
825853
826854 return aspace;
827855 }
....@@ -859,17 +887,17 @@
859887 }
860888
861889 /* Get Interrupt: */
862
- gpu->irq = platform_get_irq_byname(pdev, config->irqname);
890
+ gpu->irq = platform_get_irq(pdev, 0);
863891 if (gpu->irq < 0) {
864892 ret = gpu->irq;
865
- dev_err(drm->dev, "failed to get irq: %d\n", ret);
893
+ DRM_DEV_ERROR(drm->dev, "failed to get irq: %d\n", ret);
866894 goto fail;
867895 }
868896
869897 ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler,
870898 IRQF_TRIGGER_HIGH, gpu->name, gpu);
871899 if (ret) {
872
- dev_err(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret);
900
+ DRM_DEV_ERROR(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret);
873901 goto fail;
874902 }
875903
....@@ -894,29 +922,32 @@
894922 gpu->gpu_cx = NULL;
895923
896924 gpu->pdev = pdev;
897
- platform_set_drvdata(pdev, gpu);
925
+ platform_set_drvdata(pdev, &gpu->adreno_smmu);
898926
899927 msm_devfreq_init(gpu);
900928
901
- gpu->aspace = msm_gpu_create_address_space(gpu, pdev,
902
- config->va_start, config->va_end);
929
+
930
+ gpu->aspace = gpu->funcs->create_address_space(gpu, pdev);
903931
904932 if (gpu->aspace == NULL)
905
- dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
933
+ DRM_DEV_INFO(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
906934 else if (IS_ERR(gpu->aspace)) {
907935 ret = PTR_ERR(gpu->aspace);
908936 goto fail;
909937 }
910938
911
- memptrs = msm_gem_kernel_new(drm, sizeof(*gpu->memptrs_bo),
912
- MSM_BO_UNCACHED, gpu->aspace, &gpu->memptrs_bo,
939
+ memptrs = msm_gem_kernel_new(drm,
940
+ sizeof(struct msm_rbmemptrs) * nr_rings,
941
+ check_apriv(gpu, MSM_BO_UNCACHED), gpu->aspace, &gpu->memptrs_bo,
913942 &memptrs_iova);
914943
915944 if (IS_ERR(memptrs)) {
916945 ret = PTR_ERR(memptrs);
917
- dev_err(drm->dev, "could not allocate memptrs: %d\n", ret);
946
+ DRM_DEV_ERROR(drm->dev, "could not allocate memptrs: %d\n", ret);
918947 goto fail;
919948 }
949
+
950
+ msm_gem_object_set_name(gpu->memptrs_bo, "memptrs");
920951
921952 if (nr_rings > ARRAY_SIZE(gpu->rb)) {
922953 DRM_DEV_INFO_ONCE(drm->dev, "Only creating %zu ringbuffers\n",
....@@ -930,7 +961,7 @@
930961
931962 if (IS_ERR(gpu->rb[i])) {
932963 ret = PTR_ERR(gpu->rb[i]);
933
- dev_err(drm->dev,
964
+ DRM_DEV_ERROR(drm->dev,
934965 "could not create ringbuffer %d: %d\n", i, ret);
935966 goto fail;
936967 }
....@@ -949,11 +980,7 @@
949980 gpu->rb[i] = NULL;
950981 }
951982
952
- if (gpu->memptrs_bo) {
953
- msm_gem_put_vaddr(gpu->memptrs_bo);
954
- msm_gem_put_iova(gpu->memptrs_bo, gpu->aspace);
955
- drm_gem_object_put_unlocked(gpu->memptrs_bo);
956
- }
983
+ msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace, false);
957984
958985 platform_set_drvdata(pdev, NULL);
959986 return ret;
....@@ -972,15 +999,10 @@
972999 gpu->rb[i] = NULL;
9731000 }
9741001
975
- if (gpu->memptrs_bo) {
976
- msm_gem_put_vaddr(gpu->memptrs_bo);
977
- msm_gem_put_iova(gpu->memptrs_bo, gpu->aspace);
978
- drm_gem_object_put_unlocked(gpu->memptrs_bo);
979
- }
1002
+ msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace, false);
9801003
9811004 if (!IS_ERR_OR_NULL(gpu->aspace)) {
982
- gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu,
983
- NULL, 0);
1005
+ gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu);
9841006 msm_gem_address_space_put(gpu->aspace);
9851007 }
9861008 }