forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-13 9d77db3c730780c8ef5ccd4b66403ff5675cfe4e
kernel/drivers/gpu/drm/msm/msm_drv.c
....@@ -1,31 +1,31 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
34 * Copyright (C) 2013 Red Hat
45 * Author: Rob Clark <robdclark@gmail.com>
5
- *
6
- * This program is free software; you can redistribute it and/or modify it
7
- * under the terms of the GNU General Public License version 2 as published by
8
- * the Free Software Foundation.
9
- *
10
- * This program is distributed in the hope that it will be useful, but WITHOUT
11
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13
- * more details.
14
- *
15
- * You should have received a copy of the GNU General Public License along with
16
- * this program. If not, see <http://www.gnu.org/licenses/>.
176 */
187
8
+#include <linux/dma-mapping.h>
199 #include <linux/kthread.h>
10
+#include <linux/uaccess.h>
2011 #include <uapi/linux/sched/types.h>
12
+
13
+#include <drm/drm_bridge.h>
14
+#include <drm/drm_drv.h>
15
+#include <drm/drm_file.h>
16
+#include <drm/drm_ioctl.h>
17
+#include <drm/drm_irq.h>
18
+#include <drm/drm_prime.h>
2119 #include <drm/drm_of.h>
20
+#include <drm/drm_vblank.h>
2221
2322 #include "msm_drv.h"
2423 #include "msm_debugfs.h"
2524 #include "msm_fence.h"
25
+#include "msm_gem.h"
2626 #include "msm_gpu.h"
2727 #include "msm_kms.h"
28
-
28
+#include "adreno/adreno_gpu.h"
2929
3030 /*
3131 * MSM driver version:
....@@ -35,9 +35,13 @@
3535 * - 1.3.0 - adds GMEM_BASE + NR_RINGS params, SUBMITQUEUE_NEW +
3636 * SUBMITQUEUE_CLOSE ioctls, and MSM_INFO_IOVA flag for
3737 * MSM_GEM_INFO ioctl.
38
+ * - 1.4.0 - softpin, MSM_RELOC_BO_DUMP, and GEM_INFO support to set/get
39
+ * GEM object's debug name
40
+ * - 1.5.0 - Add SUBMITQUERY_QUERY ioctl
41
+ * - 1.6.0 - Syncobj support
3842 */
3943 #define MSM_VERSION_MAJOR 1
40
-#define MSM_VERSION_MINOR 3
44
+#define MSM_VERSION_MINOR 6
4145 #define MSM_VERSION_PATCHLEVEL 0
4246
4347 static const struct drm_mode_config_funcs mode_config_funcs = {
....@@ -81,46 +85,6 @@
8185 * Util/helpers:
8286 */
8387
84
-int msm_clk_bulk_get(struct device *dev, struct clk_bulk_data **bulk)
85
-{
86
- struct property *prop;
87
- const char *name;
88
- struct clk_bulk_data *local;
89
- int i = 0, ret, count;
90
-
91
- count = of_property_count_strings(dev->of_node, "clock-names");
92
- if (count < 1)
93
- return 0;
94
-
95
- local = devm_kcalloc(dev, sizeof(struct clk_bulk_data *),
96
- count, GFP_KERNEL);
97
- if (!local)
98
- return -ENOMEM;
99
-
100
- of_property_for_each_string(dev->of_node, "clock-names", prop, name) {
101
- local[i].id = devm_kstrdup(dev, name, GFP_KERNEL);
102
- if (!local[i].id) {
103
- devm_kfree(dev, local);
104
- return -ENOMEM;
105
- }
106
-
107
- i++;
108
- }
109
-
110
- ret = devm_clk_bulk_get(dev, count, local);
111
-
112
- if (ret) {
113
- for (i = 0; i < count; i++)
114
- devm_kfree(dev, (void *) local[i].id);
115
- devm_kfree(dev, local);
116
-
117
- return ret;
118
- }
119
-
120
- *bulk = local;
121
- return count;
122
-}
123
-
12488 struct clk *msm_clk_bulk_get_clock(struct clk_bulk_data *bulk, int count,
12589 const char *name)
12690 {
....@@ -157,8 +121,8 @@
157121 return clk;
158122 }
159123
160
-void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
161
- const char *dbgname)
124
+void __iomem *_msm_ioremap(struct platform_device *pdev, const char *name,
125
+ const char *dbgname, bool quiet)
162126 {
163127 struct resource *res;
164128 unsigned long size;
....@@ -170,15 +134,17 @@
170134 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
171135
172136 if (!res) {
173
- dev_err(&pdev->dev, "failed to get memory resource: %s\n", name);
137
+ if (!quiet)
138
+ DRM_DEV_ERROR(&pdev->dev, "failed to get memory resource: %s\n", name);
174139 return ERR_PTR(-EINVAL);
175140 }
176141
177142 size = resource_size(res);
178143
179
- ptr = devm_ioremap_nocache(&pdev->dev, res->start, size);
144
+ ptr = devm_ioremap(&pdev->dev, res->start, size);
180145 if (!ptr) {
181
- dev_err(&pdev->dev, "failed to ioremap: %s\n", name);
146
+ if (!quiet)
147
+ DRM_DEV_ERROR(&pdev->dev, "failed to ioremap: %s\n", name);
182148 return ERR_PTR(-ENOMEM);
183149 }
184150
....@@ -186,6 +152,18 @@
186152 printk(KERN_DEBUG "IO:region %s %p %08lx\n", dbgname, ptr, size);
187153
188154 return ptr;
155
+}
156
+
157
+void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
158
+ const char *dbgname)
159
+{
160
+ return _msm_ioremap(pdev, name, dbgname, false);
161
+}
162
+
163
+void __iomem *msm_ioremap_quiet(struct platform_device *pdev, const char *name,
164
+ const char *dbgname)
165
+{
166
+ return _msm_ioremap(pdev, name, dbgname, true);
189167 }
190168
191169 void msm_writel(u32 data, void __iomem *addr)
....@@ -203,62 +181,44 @@
203181 return val;
204182 }
205183
206
-struct vblank_event {
207
- struct list_head node;
184
+struct msm_vblank_work {
185
+ struct work_struct work;
208186 int crtc_id;
209187 bool enable;
188
+ struct msm_drm_private *priv;
210189 };
211190
212
-static void vblank_ctrl_worker(struct kthread_work *work)
191
+static void vblank_ctrl_worker(struct work_struct *work)
213192 {
214
- struct msm_vblank_ctrl *vbl_ctrl = container_of(work,
215
- struct msm_vblank_ctrl, work);
216
- struct msm_drm_private *priv = container_of(vbl_ctrl,
217
- struct msm_drm_private, vblank_ctrl);
193
+ struct msm_vblank_work *vbl_work = container_of(work,
194
+ struct msm_vblank_work, work);
195
+ struct msm_drm_private *priv = vbl_work->priv;
218196 struct msm_kms *kms = priv->kms;
219
- struct vblank_event *vbl_ev, *tmp;
220
- unsigned long flags;
221197
222
- spin_lock_irqsave(&vbl_ctrl->lock, flags);
223
- list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
224
- list_del(&vbl_ev->node);
225
- spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
198
+ if (vbl_work->enable)
199
+ kms->funcs->enable_vblank(kms, priv->crtcs[vbl_work->crtc_id]);
200
+ else
201
+ kms->funcs->disable_vblank(kms, priv->crtcs[vbl_work->crtc_id]);
226202
227
- if (vbl_ev->enable)
228
- kms->funcs->enable_vblank(kms,
229
- priv->crtcs[vbl_ev->crtc_id]);
230
- else
231
- kms->funcs->disable_vblank(kms,
232
- priv->crtcs[vbl_ev->crtc_id]);
233
-
234
- kfree(vbl_ev);
235
-
236
- spin_lock_irqsave(&vbl_ctrl->lock, flags);
237
- }
238
-
239
- spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
203
+ kfree(vbl_work);
240204 }
241205
242206 static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
243207 int crtc_id, bool enable)
244208 {
245
- struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
246
- struct vblank_event *vbl_ev;
247
- unsigned long flags;
209
+ struct msm_vblank_work *vbl_work;
248210
249
- vbl_ev = kzalloc(sizeof(*vbl_ev), GFP_ATOMIC);
250
- if (!vbl_ev)
211
+ vbl_work = kzalloc(sizeof(*vbl_work), GFP_ATOMIC);
212
+ if (!vbl_work)
251213 return -ENOMEM;
252214
253
- vbl_ev->crtc_id = crtc_id;
254
- vbl_ev->enable = enable;
215
+ INIT_WORK(&vbl_work->work, vblank_ctrl_worker);
255216
256
- spin_lock_irqsave(&vbl_ctrl->lock, flags);
257
- list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
258
- spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
217
+ vbl_work->crtc_id = crtc_id;
218
+ vbl_work->enable = enable;
219
+ vbl_work->priv = priv;
259220
260
- kthread_queue_work(&priv->disp_thread[crtc_id].worker,
261
- &vbl_ctrl->work);
221
+ queue_work(priv->wq, &vbl_work->work);
262222
263223 return 0;
264224 }
....@@ -270,40 +230,36 @@
270230 struct msm_drm_private *priv = ddev->dev_private;
271231 struct msm_kms *kms = priv->kms;
272232 struct msm_mdss *mdss = priv->mdss;
273
- struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
274
- struct vblank_event *vbl_ev, *tmp;
275233 int i;
234
+
235
+ /*
236
+ * Shutdown the hw if we're far enough along where things might be on.
237
+ * If we run this too early, we'll end up panicking in any variety of
238
+ * places. Since we don't register the drm device until late in
239
+ * msm_drm_init, drm_dev->registered is used as an indicator that the
240
+ * shutdown will be successful.
241
+ */
242
+ if (ddev->registered) {
243
+ drm_dev_unregister(ddev);
244
+ drm_atomic_helper_shutdown(ddev);
245
+ }
276246
277247 /* We must cancel and cleanup any pending vblank enable/disable
278248 * work before drm_irq_uninstall() to avoid work re-enabling an
279249 * irq after uninstall has disabled it.
280250 */
281
- kthread_flush_work(&vbl_ctrl->work);
282
- list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
283
- list_del(&vbl_ev->node);
284
- kfree(vbl_ev);
285
- }
286251
287
- /* clean up display commit/event worker threads */
252
+ flush_workqueue(priv->wq);
253
+
254
+ /* clean up event worker threads */
288255 for (i = 0; i < priv->num_crtcs; i++) {
289
- if (priv->disp_thread[i].thread) {
290
- kthread_flush_worker(&priv->disp_thread[i].worker);
291
- kthread_stop(priv->disp_thread[i].thread);
292
- priv->disp_thread[i].thread = NULL;
293
- }
294
-
295
- if (priv->event_thread[i].thread) {
296
- kthread_flush_worker(&priv->event_thread[i].worker);
297
- kthread_stop(priv->event_thread[i].thread);
298
- priv->event_thread[i].thread = NULL;
299
- }
256
+ if (priv->event_thread[i].worker)
257
+ kthread_destroy_worker(priv->event_thread[i].worker);
300258 }
301259
302260 msm_gem_shrinker_cleanup(ddev);
303261
304262 drm_kms_helper_poll_fini(ddev);
305
-
306
- drm_dev_unregister(ddev);
307263
308264 msm_perf_debugfs_cleanup(priv);
309265 msm_rd_debugfs_cleanup(priv);
....@@ -312,14 +268,12 @@
312268 if (fbdev && priv->fbdev)
313269 msm_fbdev_free(ddev);
314270 #endif
271
+
315272 drm_mode_config_cleanup(ddev);
316273
317274 pm_runtime_get_sync(dev);
318275 drm_irq_uninstall(ddev);
319276 pm_runtime_put_sync(dev);
320
-
321
- flush_workqueue(priv->wq);
322
- destroy_workqueue(priv->wq);
323277
324278 if (kms && kms->funcs)
325279 kms->funcs->destroy(kms);
....@@ -337,8 +291,9 @@
337291 mdss->funcs->destroy(ddev);
338292
339293 ddev->dev_private = NULL;
340
- drm_dev_unref(ddev);
294
+ drm_dev_put(ddev);
341295
296
+ destroy_workqueue(priv->wq);
342297 kfree(priv);
343298
344299 return 0;
....@@ -356,6 +311,14 @@
356311 }
357312
358313 #include <linux/of_address.h>
314
+
315
+bool msm_use_mmu(struct drm_device *dev)
316
+{
317
+ struct msm_drm_private *priv = dev->dev_private;
318
+
319
+ /* a2xx comes with its own MMU */
320
+ return priv->is_a2xx || iommu_present(&platform_bus_type);
321
+}
359322
360323 static int msm_init_vram(struct drm_device *dev)
361324 {
....@@ -395,7 +358,7 @@
395358 * Grab the entire CMA chunk carved out in early startup in
396359 * mach-msm:
397360 */
398
- } else if (!iommu_present(&platform_bus_type)) {
361
+ } else if (!msm_use_mmu(dev)) {
399362 DRM_INFO("using %s VRAM carveout\n", vram);
400363 size = memparse(vram, NULL);
401364 }
....@@ -418,12 +381,12 @@
418381 p = dma_alloc_attrs(dev->dev, size,
419382 &priv->vram.paddr, GFP_KERNEL, attrs);
420383 if (!p) {
421
- dev_err(dev->dev, "failed to allocate VRAM\n");
384
+ DRM_DEV_ERROR(dev->dev, "failed to allocate VRAM\n");
422385 priv->vram.paddr = 0;
423386 return -ENOMEM;
424387 }
425388
426
- dev_info(dev->dev, "VRAM: %08x->%08x\n",
389
+ DRM_DEV_INFO(dev->dev, "VRAM: %08x->%08x\n",
427390 (uint32_t)priv->vram.paddr,
428391 (uint32_t)(priv->vram.paddr + size));
429392 }
....@@ -439,11 +402,10 @@
439402 struct msm_kms *kms;
440403 struct msm_mdss *mdss;
441404 int ret, i;
442
- struct sched_param param;
443405
444406 ddev = drm_dev_alloc(drv, dev);
445407 if (IS_ERR(ddev)) {
446
- dev_err(dev, "failed to allocate drm_device\n");
408
+ DRM_DEV_ERROR(dev, "failed to allocate drm_device\n");
447409 return PTR_ERR(ddev);
448410 }
449411
....@@ -452,7 +414,7 @@
452414 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
453415 if (!priv) {
454416 ret = -ENOMEM;
455
- goto err_unref_drm_dev;
417
+ goto err_put_drm_dev;
456418 }
457419
458420 ddev->dev_private = priv;
....@@ -476,10 +438,10 @@
476438
477439 priv->wq = alloc_ordered_workqueue("msm", 0);
478440
441
+ INIT_WORK(&priv->free_work, msm_gem_free_work);
442
+ init_llist_head(&priv->free_list);
443
+
479444 INIT_LIST_HEAD(&priv->inactive_list);
480
- INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
481
- kthread_init_work(&priv->vblank_ctrl.work, vblank_ctrl_worker);
482
- spin_lock_init(&priv->vblank_ctrl.lock);
483445
484446 drm_mode_config_init(ddev);
485447
....@@ -492,15 +454,7 @@
492454 if (ret)
493455 goto err_destroy_mdss;
494456
495
- if (!dev->dma_parms) {
496
- dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
497
- GFP_KERNEL);
498
- if (!dev->dma_parms) {
499
- ret = -ENOMEM;
500
- goto err_msm_uninit;
501
- }
502
- }
503
- dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
457
+ dma_set_max_seg_size(dev, UINT_MAX);
504458
505459 msm_gem_shrinker_init(ddev);
506460
....@@ -517,19 +471,16 @@
517471 priv->kms = kms;
518472 break;
519473 default:
520
- kms = ERR_PTR(-ENODEV);
474
+ /* valid only for the dummy headless case, where of_node=NULL */
475
+ WARN_ON(dev->of_node);
476
+ kms = NULL;
521477 break;
522478 }
523479
524480 if (IS_ERR(kms)) {
525
- /*
526
- * NOTE: once we have GPU support, having no kms should not
527
- * be considered fatal.. ideally we would still support gpu
528
- * and (for example) use dmabuf/prime to share buffers with
529
- * imx drm driver on iMX5
530
- */
531
- dev_err(dev, "failed to load kms\n");
481
+ DRM_DEV_ERROR(dev, "failed to load kms\n");
532482 ret = PTR_ERR(kms);
483
+ priv->kms = NULL;
533484 goto err_msm_uninit;
534485 }
535486
....@@ -537,9 +488,10 @@
537488 ddev->mode_config.normalize_zpos = true;
538489
539490 if (kms) {
491
+ kms->dev = ddev;
540492 ret = kms->funcs->hw_init(kms);
541493 if (ret) {
542
- dev_err(dev, "kms hw init failed: %d\n", ret);
494
+ DRM_DEV_ERROR(dev, "kms hw init failed: %d\n", ret);
543495 goto err_msm_uninit;
544496 }
545497 }
....@@ -547,82 +499,24 @@
547499 ddev->mode_config.funcs = &mode_config_funcs;
548500 ddev->mode_config.helper_private = &mode_config_helper_funcs;
549501
550
- /**
551
- * this priority was found during empiric testing to have appropriate
552
- * realtime scheduling to process display updates and interact with
553
- * other real time and normal priority task
554
- */
555
- param.sched_priority = 16;
556502 for (i = 0; i < priv->num_crtcs; i++) {
557
-
558
- /* initialize display thread */
559
- priv->disp_thread[i].crtc_id = priv->crtcs[i]->base.id;
560
- kthread_init_worker(&priv->disp_thread[i].worker);
561
- priv->disp_thread[i].dev = ddev;
562
- priv->disp_thread[i].thread =
563
- kthread_run(kthread_worker_fn,
564
- &priv->disp_thread[i].worker,
565
- "crtc_commit:%d", priv->disp_thread[i].crtc_id);
566
- ret = sched_setscheduler(priv->disp_thread[i].thread,
567
- SCHED_FIFO, &param);
568
- if (ret)
569
- pr_warn("display thread priority update failed: %d\n",
570
- ret);
571
-
572
- if (IS_ERR(priv->disp_thread[i].thread)) {
573
- dev_err(dev, "failed to create crtc_commit kthread\n");
574
- priv->disp_thread[i].thread = NULL;
575
- }
576
-
577503 /* initialize event thread */
578504 priv->event_thread[i].crtc_id = priv->crtcs[i]->base.id;
579
- kthread_init_worker(&priv->event_thread[i].worker);
580505 priv->event_thread[i].dev = ddev;
581
- priv->event_thread[i].thread =
582
- kthread_run(kthread_worker_fn,
583
- &priv->event_thread[i].worker,
584
- "crtc_event:%d", priv->event_thread[i].crtc_id);
585
- /**
586
- * event thread should also run at same priority as disp_thread
587
- * because it is handling frame_done events. A lower priority
588
- * event thread and higher priority disp_thread can causes
589
- * frame_pending counters beyond 2. This can lead to commit
590
- * failure at crtc commit level.
591
- */
592
- ret = sched_setscheduler(priv->event_thread[i].thread,
593
- SCHED_FIFO, &param);
594
- if (ret)
595
- pr_warn("display event thread priority update failed: %d\n",
596
- ret);
597
-
598
- if (IS_ERR(priv->event_thread[i].thread)) {
599
- dev_err(dev, "failed to create crtc_event kthread\n");
600
- priv->event_thread[i].thread = NULL;
601
- }
602
-
603
- if ((!priv->disp_thread[i].thread) ||
604
- !priv->event_thread[i].thread) {
605
- /* clean up previously created threads if any */
606
- for ( ; i >= 0; i--) {
607
- if (priv->disp_thread[i].thread) {
608
- kthread_stop(
609
- priv->disp_thread[i].thread);
610
- priv->disp_thread[i].thread = NULL;
611
- }
612
-
613
- if (priv->event_thread[i].thread) {
614
- kthread_stop(
615
- priv->event_thread[i].thread);
616
- priv->event_thread[i].thread = NULL;
617
- }
618
- }
506
+ priv->event_thread[i].worker = kthread_create_worker(0,
507
+ "crtc_event:%d", priv->event_thread[i].crtc_id);
508
+ if (IS_ERR(priv->event_thread[i].worker)) {
509
+ ret = PTR_ERR(priv->event_thread[i].worker);
510
+ DRM_DEV_ERROR(dev, "failed to create crtc_event kthread\n");
619511 goto err_msm_uninit;
620512 }
513
+
514
+ sched_set_fifo(priv->event_thread[i].worker->task);
621515 }
622516
623517 ret = drm_vblank_init(ddev, priv->num_crtcs);
624518 if (ret < 0) {
625
- dev_err(dev, "failed to initialize vblank\n");
519
+ DRM_DEV_ERROR(dev, "failed to initialize vblank\n");
626520 goto err_msm_uninit;
627521 }
628522
....@@ -631,7 +525,7 @@
631525 ret = drm_irq_install(ddev, kms->irq);
632526 pm_runtime_put_sync(dev);
633527 if (ret < 0) {
634
- dev_err(dev, "failed to install IRQ handler\n");
528
+ DRM_DEV_ERROR(dev, "failed to install IRQ handler\n");
635529 goto err_msm_uninit;
636530 }
637531 }
....@@ -643,7 +537,7 @@
643537 drm_mode_config_reset(ddev);
644538
645539 #ifdef CONFIG_DRM_FBDEV_EMULATION
646
- if (fbdev)
540
+ if (kms && fbdev)
647541 priv->fbdev = msm_fbdev_init(ddev);
648542 #endif
649543
....@@ -663,8 +557,9 @@
663557 mdss->funcs->destroy(ddev);
664558 err_free_priv:
665559 kfree(priv);
666
-err_unref_drm_dev:
667
- drm_dev_unref(ddev);
560
+err_put_drm_dev:
561
+ drm_dev_put(ddev);
562
+ platform_set_drvdata(pdev, NULL);
668563 return ret;
669564 }
670565
....@@ -687,15 +582,21 @@
687582
688583 static int context_init(struct drm_device *dev, struct drm_file *file)
689584 {
585
+ static atomic_t ident = ATOMIC_INIT(0);
586
+ struct msm_drm_private *priv = dev->dev_private;
690587 struct msm_file_private *ctx;
691588
692589 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
693590 if (!ctx)
694591 return -ENOMEM;
695592
593
+ kref_init(&ctx->ref);
696594 msm_submitqueue_init(dev, ctx);
697595
596
+ ctx->aspace = msm_gpu_create_private_address_space(priv->gpu, current);
698597 file->driver_priv = ctx;
598
+
599
+ ctx->seqno = atomic_inc_return(&ident);
699600
700601 return 0;
701602 }
....@@ -713,7 +614,7 @@
713614 static void context_close(struct msm_file_private *ctx)
714615 {
715616 msm_submitqueue_close(ctx);
716
- kfree(ctx);
617
+ msm_file_private_put(ctx);
717618 }
718619
719620 static void msm_postclose(struct drm_device *dev, struct drm_file *file)
....@@ -751,7 +652,11 @@
751652 struct msm_drm_private *priv = dev->dev_private;
752653 struct msm_kms *kms = priv->kms;
753654 BUG_ON(!kms);
754
- return kms->funcs->irq_postinstall(kms);
655
+
656
+ if (kms->funcs->irq_postinstall)
657
+ return kms->funcs->irq_postinstall(kms);
658
+
659
+ return 0;
755660 }
756661
757662 static void msm_irq_uninstall(struct drm_device *dev)
....@@ -762,8 +667,10 @@
762667 kms->funcs->irq_uninstall(kms);
763668 }
764669
765
-static int msm_enable_vblank(struct drm_device *dev, unsigned int pipe)
670
+int msm_crtc_enable_vblank(struct drm_crtc *crtc)
766671 {
672
+ struct drm_device *dev = crtc->dev;
673
+ unsigned int pipe = crtc->index;
767674 struct msm_drm_private *priv = dev->dev_private;
768675 struct msm_kms *kms = priv->kms;
769676 if (!kms)
....@@ -772,8 +679,10 @@
772679 return vblank_ctrl_queue_work(priv, pipe, true);
773680 }
774681
775
-static void msm_disable_vblank(struct drm_device *dev, unsigned int pipe)
682
+void msm_crtc_disable_vblank(struct drm_crtc *crtc)
776683 {
684
+ struct drm_device *dev = crtc->dev;
685
+ unsigned int pipe = crtc->index;
777686 struct msm_drm_private *priv = dev->dev_private;
778687 struct msm_kms *kms = priv->kms;
779688 if (!kms)
....@@ -818,7 +727,7 @@
818727 }
819728
820729 return msm_gem_new_handle(dev, file, args->size,
821
- args->flags, &args->handle);
730
+ args->flags, &args->handle, NULL);
822731 }
823732
824733 static inline ktime_t to_ktime(struct drm_msm_timespec timeout)
....@@ -845,7 +754,7 @@
845754
846755 ret = msm_gem_cpu_prep(obj, args->op, &timeout);
847756
848
- drm_gem_object_put_unlocked(obj);
757
+ drm_gem_object_put(obj);
849758
850759 return ret;
851760 }
....@@ -863,20 +772,26 @@
863772
864773 ret = msm_gem_cpu_fini(obj);
865774
866
- drm_gem_object_put_unlocked(obj);
775
+ drm_gem_object_put(obj);
867776
868777 return ret;
869778 }
870779
871780 static int msm_ioctl_gem_info_iova(struct drm_device *dev,
872
- struct drm_gem_object *obj, uint64_t *iova)
781
+ struct drm_file *file, struct drm_gem_object *obj,
782
+ uint64_t *iova)
873783 {
874784 struct msm_drm_private *priv = dev->dev_private;
785
+ struct msm_file_private *ctx = file->driver_priv;
875786
876787 if (!priv->gpu)
877788 return -EINVAL;
878789
879
- return msm_gem_get_iova(obj, priv->gpu->aspace, iova);
790
+ /*
791
+ * Don't pin the memory here - just get an address so that userspace can
792
+ * be productive
793
+ */
794
+ return msm_gem_get_iova(obj, ctx->aspace, iova);
880795 }
881796
882797 static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
....@@ -884,26 +799,74 @@
884799 {
885800 struct drm_msm_gem_info *args = data;
886801 struct drm_gem_object *obj;
887
- int ret = 0;
802
+ struct msm_gem_object *msm_obj;
803
+ int i, ret = 0;
888804
889
- if (args->flags & ~MSM_INFO_FLAGS)
805
+ if (args->pad)
890806 return -EINVAL;
807
+
808
+ switch (args->info) {
809
+ case MSM_INFO_GET_OFFSET:
810
+ case MSM_INFO_GET_IOVA:
811
+ /* value returned as immediate, not pointer, so len==0: */
812
+ if (args->len)
813
+ return -EINVAL;
814
+ break;
815
+ case MSM_INFO_SET_NAME:
816
+ case MSM_INFO_GET_NAME:
817
+ break;
818
+ default:
819
+ return -EINVAL;
820
+ }
891821
892822 obj = drm_gem_object_lookup(file, args->handle);
893823 if (!obj)
894824 return -ENOENT;
895825
896
- if (args->flags & MSM_INFO_IOVA) {
897
- uint64_t iova;
826
+ msm_obj = to_msm_bo(obj);
898827
899
- ret = msm_ioctl_gem_info_iova(dev, obj, &iova);
900
- if (!ret)
901
- args->offset = iova;
902
- } else {
903
- args->offset = msm_gem_mmap_offset(obj);
828
+ switch (args->info) {
829
+ case MSM_INFO_GET_OFFSET:
830
+ args->value = msm_gem_mmap_offset(obj);
831
+ break;
832
+ case MSM_INFO_GET_IOVA:
833
+ ret = msm_ioctl_gem_info_iova(dev, file, obj, &args->value);
834
+ break;
835
+ case MSM_INFO_SET_NAME:
836
+ /* length check should leave room for terminating null: */
837
+ if (args->len >= sizeof(msm_obj->name)) {
838
+ ret = -EINVAL;
839
+ break;
840
+ }
841
+ if (copy_from_user(msm_obj->name, u64_to_user_ptr(args->value),
842
+ args->len)) {
843
+ msm_obj->name[0] = '\0';
844
+ ret = -EFAULT;
845
+ break;
846
+ }
847
+ msm_obj->name[args->len] = '\0';
848
+ for (i = 0; i < args->len; i++) {
849
+ if (!isprint(msm_obj->name[i])) {
850
+ msm_obj->name[i] = '\0';
851
+ break;
852
+ }
853
+ }
854
+ break;
855
+ case MSM_INFO_GET_NAME:
856
+ if (args->value && (args->len < strlen(msm_obj->name))) {
857
+ ret = -EINVAL;
858
+ break;
859
+ }
860
+ args->len = strlen(msm_obj->name);
861
+ if (args->value) {
862
+ if (copy_to_user(u64_to_user_ptr(args->value),
863
+ msm_obj->name, args->len))
864
+ ret = -EFAULT;
865
+ }
866
+ break;
904867 }
905868
906
- drm_gem_object_put_unlocked(obj);
869
+ drm_gem_object_put(obj);
907870
908871 return ret;
909872 }
....@@ -968,7 +931,7 @@
968931 ret = 0;
969932 }
970933
971
- drm_gem_object_put(obj);
934
+ drm_gem_object_put_locked(obj);
972935
973936 unlock:
974937 mutex_unlock(&dev->struct_mutex);
....@@ -988,6 +951,11 @@
988951 args->flags, &args->id);
989952 }
990953
954
+static int msm_ioctl_submitqueue_query(struct drm_device *dev, void *data,
955
+ struct drm_file *file)
956
+{
957
+ return msm_submitqueue_query(dev, file->driver_priv, data);
958
+}
991959
992960 static int msm_ioctl_submitqueue_close(struct drm_device *dev, void *data,
993961 struct drm_file *file)
....@@ -998,16 +966,17 @@
998966 }
999967
1000968 static const struct drm_ioctl_desc msm_ioctls[] = {
1001
- DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_AUTH|DRM_RENDER_ALLOW),
1002
- DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_AUTH|DRM_RENDER_ALLOW),
1003
- DRM_IOCTL_DEF_DRV(MSM_GEM_INFO, msm_ioctl_gem_info, DRM_AUTH|DRM_RENDER_ALLOW),
1004
- DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_AUTH|DRM_RENDER_ALLOW),
1005
- DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW),
1006
- DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_AUTH|DRM_RENDER_ALLOW),
1007
- DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_AUTH|DRM_RENDER_ALLOW),
1008
- DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE, msm_ioctl_gem_madvise, DRM_AUTH|DRM_RENDER_ALLOW),
1009
- DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_NEW, msm_ioctl_submitqueue_new, DRM_AUTH|DRM_RENDER_ALLOW),
1010
- DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_CLOSE, msm_ioctl_submitqueue_close, DRM_AUTH|DRM_RENDER_ALLOW),
969
+ DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_RENDER_ALLOW),
970
+ DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_RENDER_ALLOW),
971
+ DRM_IOCTL_DEF_DRV(MSM_GEM_INFO, msm_ioctl_gem_info, DRM_RENDER_ALLOW),
972
+ DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_RENDER_ALLOW),
973
+ DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_RENDER_ALLOW),
974
+ DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_RENDER_ALLOW),
975
+ DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_RENDER_ALLOW),
976
+ DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE, msm_ioctl_gem_madvise, DRM_RENDER_ALLOW),
977
+ DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_NEW, msm_ioctl_submitqueue_new, DRM_RENDER_ALLOW),
978
+ DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_CLOSE, msm_ioctl_submitqueue_close, DRM_RENDER_ALLOW),
979
+ DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_QUERY, msm_ioctl_submitqueue_query, DRM_RENDER_ALLOW),
1011980 };
1012981
1013982 static const struct vm_operations_struct vm_ops = {
....@@ -1029,12 +998,11 @@
1029998 };
1030999
10311000 static struct drm_driver msm_driver = {
1032
- .driver_features = DRIVER_HAVE_IRQ |
1033
- DRIVER_GEM |
1034
- DRIVER_PRIME |
1001
+ .driver_features = DRIVER_GEM |
10351002 DRIVER_RENDER |
10361003 DRIVER_ATOMIC |
1037
- DRIVER_MODESET,
1004
+ DRIVER_MODESET |
1005
+ DRIVER_SYNCOBJ,
10381006 .open = msm_open,
10391007 .postclose = msm_postclose,
10401008 .lastclose = drm_fb_helper_lastclose,
....@@ -1042,17 +1010,12 @@
10421010 .irq_preinstall = msm_irq_preinstall,
10431011 .irq_postinstall = msm_irq_postinstall,
10441012 .irq_uninstall = msm_irq_uninstall,
1045
- .enable_vblank = msm_enable_vblank,
1046
- .disable_vblank = msm_disable_vblank,
1047
- .gem_free_object = msm_gem_free_object,
1013
+ .gem_free_object_unlocked = msm_gem_free_object,
10481014 .gem_vm_ops = &vm_ops,
10491015 .dumb_create = msm_gem_dumb_create,
10501016 .dumb_map_offset = msm_gem_dumb_map_offset,
10511017 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
10521018 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
1053
- .gem_prime_export = drm_gem_prime_export,
1054
- .gem_prime_import = drm_gem_prime_import,
1055
- .gem_prime_res_obj = msm_gem_prime_res_obj,
10561019 .gem_prime_pin = msm_gem_prime_pin,
10571020 .gem_prime_unpin = msm_gem_prime_unpin,
10581021 .gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
....@@ -1074,47 +1037,7 @@
10741037 .patchlevel = MSM_VERSION_PATCHLEVEL,
10751038 };
10761039
1077
-#ifdef CONFIG_PM_SLEEP
1078
-static int msm_pm_suspend(struct device *dev)
1079
-{
1080
- struct drm_device *ddev = dev_get_drvdata(dev);
1081
- struct msm_drm_private *priv = ddev->dev_private;
1082
- struct msm_kms *kms = priv->kms;
1083
-
1084
- /* TODO: Use atomic helper suspend/resume */
1085
- if (kms && kms->funcs && kms->funcs->pm_suspend)
1086
- return kms->funcs->pm_suspend(dev);
1087
-
1088
- drm_kms_helper_poll_disable(ddev);
1089
-
1090
- priv->pm_state = drm_atomic_helper_suspend(ddev);
1091
- if (IS_ERR(priv->pm_state)) {
1092
- drm_kms_helper_poll_enable(ddev);
1093
- return PTR_ERR(priv->pm_state);
1094
- }
1095
-
1096
- return 0;
1097
-}
1098
-
1099
-static int msm_pm_resume(struct device *dev)
1100
-{
1101
- struct drm_device *ddev = dev_get_drvdata(dev);
1102
- struct msm_drm_private *priv = ddev->dev_private;
1103
- struct msm_kms *kms = priv->kms;
1104
-
1105
- /* TODO: Use atomic helper suspend/resume */
1106
- if (kms && kms->funcs && kms->funcs->pm_resume)
1107
- return kms->funcs->pm_resume(dev);
1108
-
1109
- drm_atomic_helper_resume(ddev, priv->pm_state);
1110
- drm_kms_helper_poll_enable(ddev);
1111
-
1112
- return 0;
1113
-}
1114
-#endif
1115
-
1116
-#ifdef CONFIG_PM
1117
-static int msm_runtime_suspend(struct device *dev)
1040
+static int __maybe_unused msm_runtime_suspend(struct device *dev)
11181041 {
11191042 struct drm_device *ddev = dev_get_drvdata(dev);
11201043 struct msm_drm_private *priv = ddev->dev_private;
....@@ -1128,7 +1051,7 @@
11281051 return 0;
11291052 }
11301053
1131
-static int msm_runtime_resume(struct device *dev)
1054
+static int __maybe_unused msm_runtime_resume(struct device *dev)
11321055 {
11331056 struct drm_device *ddev = dev_get_drvdata(dev);
11341057 struct msm_drm_private *priv = ddev->dev_private;
....@@ -1141,11 +1064,51 @@
11411064
11421065 return 0;
11431066 }
1144
-#endif
1067
+
1068
+static int __maybe_unused msm_pm_suspend(struct device *dev)
1069
+{
1070
+
1071
+ if (pm_runtime_suspended(dev))
1072
+ return 0;
1073
+
1074
+ return msm_runtime_suspend(dev);
1075
+}
1076
+
1077
+static int __maybe_unused msm_pm_resume(struct device *dev)
1078
+{
1079
+ if (pm_runtime_suspended(dev))
1080
+ return 0;
1081
+
1082
+ return msm_runtime_resume(dev);
1083
+}
1084
+
1085
+static int __maybe_unused msm_pm_prepare(struct device *dev)
1086
+{
1087
+ struct drm_device *ddev = dev_get_drvdata(dev);
1088
+ struct msm_drm_private *priv = ddev ? ddev->dev_private : NULL;
1089
+
1090
+ if (!priv || !priv->kms)
1091
+ return 0;
1092
+
1093
+ return drm_mode_config_helper_suspend(ddev);
1094
+}
1095
+
1096
+static void __maybe_unused msm_pm_complete(struct device *dev)
1097
+{
1098
+ struct drm_device *ddev = dev_get_drvdata(dev);
1099
+ struct msm_drm_private *priv = ddev ? ddev->dev_private : NULL;
1100
+
1101
+ if (!priv || !priv->kms)
1102
+ return;
1103
+
1104
+ drm_mode_config_helper_resume(ddev);
1105
+}
11451106
11461107 static const struct dev_pm_ops msm_pm_ops = {
11471108 SET_SYSTEM_SLEEP_PM_OPS(msm_pm_suspend, msm_pm_resume)
11481109 SET_RUNTIME_PM_OPS(msm_runtime_suspend, msm_runtime_resume, NULL)
1110
+ .prepare = msm_pm_prepare,
1111
+ .complete = msm_pm_complete,
11491112 };
11501113
11511114 /*
....@@ -1194,7 +1157,7 @@
11941157
11951158 ret = of_graph_parse_endpoint(ep_node, &ep);
11961159 if (ret) {
1197
- dev_err(mdp_dev, "unable to parse port endpoint\n");
1160
+ DRM_DEV_ERROR(mdp_dev, "unable to parse port endpoint\n");
11981161 of_node_put(ep_node);
11991162 return ret;
12001163 }
....@@ -1216,8 +1179,10 @@
12161179 if (!intf)
12171180 continue;
12181181
1219
- drm_of_component_match_add(master_dev, matchptr, compare_of,
1220
- intf);
1182
+ if (of_device_is_available(intf))
1183
+ drm_of_component_match_add(master_dev, matchptr,
1184
+ compare_of, intf);
1185
+
12211186 of_node_put(intf);
12221187 }
12231188
....@@ -1242,16 +1207,17 @@
12421207 * the interfaces to our components list.
12431208 */
12441209 if (of_device_is_compatible(dev->of_node, "qcom,mdss") ||
1245
- of_device_is_compatible(dev->of_node, "qcom,sdm845-mdss")) {
1210
+ of_device_is_compatible(dev->of_node, "qcom,sdm845-mdss") ||
1211
+ of_device_is_compatible(dev->of_node, "qcom,sc7180-mdss")) {
12461212 ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
12471213 if (ret) {
1248
- dev_err(dev, "failed to populate children devices\n");
1214
+ DRM_DEV_ERROR(dev, "failed to populate children devices\n");
12491215 return ret;
12501216 }
12511217
12521218 mdp_dev = device_find_child(dev, NULL, compare_name_mdp);
12531219 if (!mdp_dev) {
1254
- dev_err(dev, "failed to find MDSS MDP node\n");
1220
+ DRM_DEV_ERROR(dev, "failed to find MDSS MDP node\n");
12551221 of_platform_depopulate(dev);
12561222 return -ENODEV;
12571223 }
....@@ -1281,6 +1247,7 @@
12811247 static const struct of_device_id msm_gpu_match[] = {
12821248 { .compatible = "qcom,adreno" },
12831249 { .compatible = "qcom,adreno-3xx" },
1250
+ { .compatible = "amd,imageon" },
12841251 { .compatible = "qcom,kgsl-3d0" },
12851252 { },
12861253 };
....@@ -1326,9 +1293,11 @@
13261293 struct component_match *match = NULL;
13271294 int ret;
13281295
1329
- ret = add_display_components(&pdev->dev, &match);
1330
- if (ret)
1331
- return ret;
1296
+ if (get_mdp_ver(pdev)) {
1297
+ ret = add_display_components(&pdev->dev, &match);
1298
+ if (ret)
1299
+ return ret;
1300
+ }
13321301
13331302 ret = add_gpu_components(&pdev->dev, &match);
13341303 if (ret)
....@@ -1375,6 +1344,7 @@
13751344 { .compatible = "qcom,mdp4", .data = (void *)KMS_MDP4 },
13761345 { .compatible = "qcom,mdss", .data = (void *)KMS_MDP5 },
13771346 { .compatible = "qcom,sdm845-mdss", .data = (void *)KMS_DPU },
1347
+ { .compatible = "qcom,sc7180-mdss", .data = (void *)KMS_DPU },
13781348 {}
13791349 };
13801350 MODULE_DEVICE_TABLE(of, dt_match);
....@@ -1401,6 +1371,7 @@
14011371 msm_dsi_register();
14021372 msm_edp_register();
14031373 msm_hdmi_register();
1374
+ msm_dp_register();
14041375 adreno_register();
14051376 return platform_driver_register(&msm_platform_driver);
14061377 }
....@@ -1409,6 +1380,7 @@
14091380 {
14101381 DBG("fini");
14111382 platform_driver_unregister(&msm_platform_driver);
1383
+ msm_dp_unregister();
14121384 msm_hdmi_unregister();
14131385 adreno_unregister();
14141386 msm_edp_unregister();