forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-08 01573e231f18eb2d99162747186f59511f56b64d
kernel/drivers/gpu/drm/v3d/v3d_drv.c
....@@ -7,22 +7,27 @@
77 * This driver supports the Broadcom V3D 3.3 and 4.1 OpenGL ES GPUs.
88 * For V3D 2.x support, see the VC4 driver.
99 *
10
- * Currently only single-core rendering using the binner and renderer
11
- * is supported. The TFU (texture formatting unit) and V3D 4.x's CSD
12
- * (compute shader dispatch) are not yet supported.
10
+ * The V3D GPU includes a tiled render (composed of a bin and render
11
+ * pipelines), the TFU (texture formatting unit), and the CSD (compute
12
+ * shader dispatch).
1313 */
1414
1515 #include <linux/clk.h>
1616 #include <linux/device.h>
17
+#include <linux/dma-mapping.h>
1718 #include <linux/io.h>
1819 #include <linux/module.h>
1920 #include <linux/of_platform.h>
2021 #include <linux/platform_device.h>
2122 #include <linux/pm_runtime.h>
23
+#include <linux/reset.h>
24
+
25
+#include <drm/drm_drv.h>
2226 #include <drm/drm_fb_cma_helper.h>
2327 #include <drm/drm_fb_helper.h>
28
+#include <drm/drm_managed.h>
29
+#include <uapi/drm/v3d_drm.h>
2430
25
-#include "uapi/drm/v3d_drm.h"
2631 #include "v3d_drv.h"
2732 #include "v3d_regs.h"
2833
....@@ -100,22 +105,35 @@
100105 if (args->value != 0)
101106 return -EINVAL;
102107
103
- ret = pm_runtime_get_sync(v3d->dev);
108
+ ret = pm_runtime_get_sync(v3d->drm.dev);
109
+ if (ret < 0)
110
+ return ret;
104111 if (args->param >= DRM_V3D_PARAM_V3D_CORE0_IDENT0 &&
105112 args->param <= DRM_V3D_PARAM_V3D_CORE0_IDENT2) {
106113 args->value = V3D_CORE_READ(0, offset);
107114 } else {
108115 args->value = V3D_READ(offset);
109116 }
110
- pm_runtime_mark_last_busy(v3d->dev);
111
- pm_runtime_put_autosuspend(v3d->dev);
117
+ pm_runtime_mark_last_busy(v3d->drm.dev);
118
+ pm_runtime_put_autosuspend(v3d->drm.dev);
112119 return 0;
113120 }
114121
115
- /* Any params that aren't just register reads would go here. */
116122
117
- DRM_DEBUG("Unknown parameter %d\n", args->param);
118
- return -EINVAL;
123
+ switch (args->param) {
124
+ case DRM_V3D_PARAM_SUPPORTS_TFU:
125
+ args->value = 1;
126
+ return 0;
127
+ case DRM_V3D_PARAM_SUPPORTS_CSD:
128
+ args->value = v3d_has_csd(v3d);
129
+ return 0;
130
+ case DRM_V3D_PARAM_SUPPORTS_CACHE_FLUSH:
131
+ args->value = 1;
132
+ return 0;
133
+ default:
134
+ DRM_DEBUG("Unknown parameter %d\n", args->param);
135
+ return -EINVAL;
136
+ }
119137 }
120138
121139 static int
....@@ -123,7 +141,7 @@
123141 {
124142 struct v3d_dev *v3d = to_v3d_dev(dev);
125143 struct v3d_file_priv *v3d_priv;
126
- struct drm_sched_rq *rq;
144
+ struct drm_gpu_scheduler *sched;
127145 int i;
128146
129147 v3d_priv = kzalloc(sizeof(*v3d_priv), GFP_KERNEL);
....@@ -133,8 +151,10 @@
133151 v3d_priv->v3d = v3d;
134152
135153 for (i = 0; i < V3D_MAX_QUEUES; i++) {
136
- rq = &v3d->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
137
- drm_sched_entity_init(&v3d_priv->sched_entity[i], &rq, 1, NULL);
154
+ sched = &v3d->queue[i].sched;
155
+ drm_sched_entity_init(&v3d_priv->sched_entity[i],
156
+ DRM_SCHED_PRIORITY_NORMAL, &sched,
157
+ 1, NULL);
138158 }
139159
140160 file->driver_priv = v3d_priv;
....@@ -155,22 +175,13 @@
155175 kfree(v3d_priv);
156176 }
157177
158
-static const struct file_operations v3d_drm_fops = {
159
- .owner = THIS_MODULE,
160
- .open = drm_open,
161
- .release = drm_release,
162
- .unlocked_ioctl = drm_ioctl,
163
- .mmap = v3d_mmap,
164
- .poll = drm_poll,
165
- .read = drm_read,
166
- .compat_ioctl = drm_compat_ioctl,
167
- .llseek = noop_llseek,
168
-};
178
+DEFINE_DRM_GEM_FOPS(v3d_drm_fops);
169179
170180 /* DRM_AUTH is required on SUBMIT_CL for now, while we don't have GMP
171181 * protection between clients. Note that render nodes would be be
172182 * able to submit CLs that could access BOs from clients authenticated
173
- * with the master node.
183
+ * with the master node. The TFU doesn't use the GMP, so it would
184
+ * need to stay DRM_AUTH until we do buffer size/offset validation.
174185 */
175186 static const struct drm_ioctl_desc v3d_drm_ioctls[] = {
176187 DRM_IOCTL_DEF_DRV(V3D_SUBMIT_CL, v3d_submit_cl_ioctl, DRM_RENDER_ALLOW | DRM_AUTH),
....@@ -179,18 +190,13 @@
179190 DRM_IOCTL_DEF_DRV(V3D_MMAP_BO, v3d_mmap_bo_ioctl, DRM_RENDER_ALLOW),
180191 DRM_IOCTL_DEF_DRV(V3D_GET_PARAM, v3d_get_param_ioctl, DRM_RENDER_ALLOW),
181192 DRM_IOCTL_DEF_DRV(V3D_GET_BO_OFFSET, v3d_get_bo_offset_ioctl, DRM_RENDER_ALLOW),
182
-};
183
-
184
-static const struct vm_operations_struct v3d_vm_ops = {
185
- .fault = v3d_gem_fault,
186
- .open = drm_gem_vm_open,
187
- .close = drm_gem_vm_close,
193
+ DRM_IOCTL_DEF_DRV(V3D_SUBMIT_TFU, v3d_submit_tfu_ioctl, DRM_RENDER_ALLOW | DRM_AUTH),
194
+ DRM_IOCTL_DEF_DRV(V3D_SUBMIT_CSD, v3d_submit_csd_ioctl, DRM_RENDER_ALLOW | DRM_AUTH),
188195 };
189196
190197 static struct drm_driver v3d_drm_driver = {
191198 .driver_features = (DRIVER_GEM |
192199 DRIVER_RENDER |
193
- DRIVER_PRIME |
194200 DRIVER_SYNCOBJ),
195201
196202 .open = v3d_open,
....@@ -200,17 +206,11 @@
200206 .debugfs_init = v3d_debugfs_init,
201207 #endif
202208
203
- .gem_free_object_unlocked = v3d_free_object,
204
- .gem_vm_ops = &v3d_vm_ops,
205
-
209
+ .gem_create_object = v3d_create_object,
206210 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
207211 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
208
- .gem_prime_import = drm_gem_prime_import,
209
- .gem_prime_export = drm_gem_prime_export,
210
- .gem_prime_res_obj = v3d_prime_res_obj,
211
- .gem_prime_get_sg_table = v3d_prime_get_sg_table,
212212 .gem_prime_import_sg_table = v3d_prime_import_sg_table,
213
- .gem_prime_mmap = v3d_prime_mmap,
213
+ .gem_prime_mmap = drm_gem_prime_mmap,
214214
215215 .ioctls = v3d_drm_ioctls,
216216 .num_ioctls = ARRAY_SIZE(v3d_drm_ioctls),
....@@ -235,9 +235,9 @@
235235 map_regs(struct v3d_dev *v3d, void __iomem **regs, const char *name)
236236 {
237237 struct resource *res =
238
- platform_get_resource_byname(v3d->pdev, IORESOURCE_MEM, name);
238
+ platform_get_resource_byname(v3d_to_pdev(v3d), IORESOURCE_MEM, name);
239239
240
- *regs = devm_ioremap_resource(v3d->dev, res);
240
+ *regs = devm_ioremap_resource(v3d->drm.dev, res);
241241 return PTR_ERR_OR_ZERO(*regs);
242242 }
243243
....@@ -247,28 +247,30 @@
247247 struct drm_device *drm;
248248 struct v3d_dev *v3d;
249249 int ret;
250
+ u32 mmu_debug;
250251 u32 ident1;
251252
252
- dev->coherent_dma_mask = DMA_BIT_MASK(36);
253253
254
- v3d = kzalloc(sizeof(*v3d), GFP_KERNEL);
255
- if (!v3d)
256
- return -ENOMEM;
257
- v3d->dev = dev;
258
- v3d->pdev = pdev;
254
+ v3d = devm_drm_dev_alloc(dev, &v3d_drm_driver, struct v3d_dev, drm);
255
+ if (IS_ERR(v3d))
256
+ return PTR_ERR(v3d);
257
+
259258 drm = &v3d->drm;
260259
261
- ret = map_regs(v3d, &v3d->bridge_regs, "bridge");
262
- if (ret)
263
- goto dev_free;
260
+ platform_set_drvdata(pdev, drm);
264261
265262 ret = map_regs(v3d, &v3d->hub_regs, "hub");
266263 if (ret)
267
- goto dev_free;
264
+ return ret;
268265
269266 ret = map_regs(v3d, &v3d->core_regs[0], "core0");
270267 if (ret)
271
- goto dev_free;
268
+ return ret;
269
+
270
+ mmu_debug = V3D_READ(V3D_MMU_DEBUG_INFO);
271
+ dev->coherent_dma_mask =
272
+ DMA_BIT_MASK(30 + V3D_GET_FIELD(mmu_debug, V3D_MMU_PA_WIDTH));
273
+ v3d->va_width = 30 + V3D_GET_FIELD(mmu_debug, V3D_MMU_VA_WIDTH);
272274
273275 ident1 = V3D_READ(V3D_HUB_IDENT1);
274276 v3d->ver = (V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_TVER) * 10 +
....@@ -276,34 +278,42 @@
276278 v3d->cores = V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_NCORES);
277279 WARN_ON(v3d->cores > 1); /* multicore not yet implemented */
278280
281
+ v3d->reset = devm_reset_control_get_exclusive(dev, NULL);
282
+ if (IS_ERR(v3d->reset)) {
283
+ ret = PTR_ERR(v3d->reset);
284
+
285
+ if (ret == -EPROBE_DEFER)
286
+ return ret;
287
+
288
+ v3d->reset = NULL;
289
+ ret = map_regs(v3d, &v3d->bridge_regs, "bridge");
290
+ if (ret) {
291
+ dev_err(dev,
292
+ "Failed to get reset control or bridge regs\n");
293
+ return ret;
294
+ }
295
+ }
296
+
279297 if (v3d->ver < 41) {
280298 ret = map_regs(v3d, &v3d->gca_regs, "gca");
281299 if (ret)
282
- goto dev_free;
300
+ return ret;
283301 }
284302
285303 v3d->mmu_scratch = dma_alloc_wc(dev, 4096, &v3d->mmu_scratch_paddr,
286304 GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
287305 if (!v3d->mmu_scratch) {
288306 dev_err(dev, "Failed to allocate MMU scratch page\n");
289
- ret = -ENOMEM;
290
- goto dev_free;
307
+ return -ENOMEM;
291308 }
292309
293310 pm_runtime_use_autosuspend(dev);
294311 pm_runtime_set_autosuspend_delay(dev, 50);
295312 pm_runtime_enable(dev);
296313
297
- ret = drm_dev_init(&v3d->drm, &v3d_drm_driver, dev);
298
- if (ret)
299
- goto dma_free;
300
-
301
- platform_set_drvdata(pdev, drm);
302
- drm->dev_private = v3d;
303
-
304314 ret = v3d_gem_init(drm);
305315 if (ret)
306
- goto dev_destroy;
316
+ goto dma_free;
307317
308318 ret = v3d_irq_init(v3d);
309319 if (ret)
....@@ -319,12 +329,8 @@
319329 v3d_irq_disable(v3d);
320330 gem_destroy:
321331 v3d_gem_destroy(drm);
322
-dev_destroy:
323
- drm_dev_put(drm);
324332 dma_free:
325333 dma_free_wc(dev, 4096, v3d->mmu_scratch, v3d->mmu_scratch_paddr);
326
-dev_free:
327
- kfree(v3d);
328334 return ret;
329335 }
330336
....@@ -337,9 +343,8 @@
337343
338344 v3d_gem_destroy(drm);
339345
340
- drm_dev_put(drm);
341
-
342
- dma_free_wc(v3d->dev, 4096, v3d->mmu_scratch, v3d->mmu_scratch_paddr);
346
+ dma_free_wc(v3d->drm.dev, 4096, v3d->mmu_scratch,
347
+ v3d->mmu_scratch_paddr);
343348
344349 return 0;
345350 }
....@@ -353,18 +358,7 @@
353358 },
354359 };
355360
356
-static int __init v3d_drm_register(void)
357
-{
358
- return platform_driver_register(&v3d_platform_driver);
359
-}
360
-
361
-static void __exit v3d_drm_unregister(void)
362
-{
363
- platform_driver_unregister(&v3d_platform_driver);
364
-}
365
-
366
-module_init(v3d_drm_register);
367
-module_exit(v3d_drm_unregister);
361
+module_platform_driver(v3d_platform_driver);
368362
369363 MODULE_ALIAS("platform:v3d-drm");
370364 MODULE_DESCRIPTION("Broadcom V3D DRM Driver");