forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-01-31 f70575805708cabdedea7498aaa3f710fde4d920
kernel/drivers/gpu/drm/xen/xen_drm_front.c
....@@ -8,17 +8,23 @@
88 * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
99 */
1010
11
-#include <drm/drmP.h>
12
-#include <drm/drm_atomic_helper.h>
13
-#include <drm/drm_crtc_helper.h>
14
-#include <drm/drm_gem.h>
15
-
11
+#include <linux/delay.h>
12
+#include <linux/dma-mapping.h>
13
+#include <linux/module.h>
1614 #include <linux/of_device.h>
15
+
16
+#include <drm/drm_atomic_helper.h>
17
+#include <drm/drm_drv.h>
18
+#include <drm/drm_ioctl.h>
19
+#include <drm/drm_probe_helper.h>
20
+#include <drm/drm_file.h>
21
+#include <drm/drm_gem.h>
1722
1823 #include <xen/platform_pci.h>
1924 #include <xen/xen.h>
2025 #include <xen/xenbus.h>
2126
27
+#include <xen/xen-front-pgdir-shbuf.h>
2228 #include <xen/interface/io/displif.h>
2329
2430 #include "xen_drm_front.h"
....@@ -26,28 +32,20 @@
2632 #include "xen_drm_front_evtchnl.h"
2733 #include "xen_drm_front_gem.h"
2834 #include "xen_drm_front_kms.h"
29
-#include "xen_drm_front_shbuf.h"
3035
3136 struct xen_drm_front_dbuf {
3237 struct list_head list;
3338 u64 dbuf_cookie;
3439 u64 fb_cookie;
35
- struct xen_drm_front_shbuf *shbuf;
40
+
41
+ struct xen_front_pgdir_shbuf shbuf;
3642 };
3743
38
-static int dbuf_add_to_list(struct xen_drm_front_info *front_info,
39
- struct xen_drm_front_shbuf *shbuf, u64 dbuf_cookie)
44
+static void dbuf_add_to_list(struct xen_drm_front_info *front_info,
45
+ struct xen_drm_front_dbuf *dbuf, u64 dbuf_cookie)
4046 {
41
- struct xen_drm_front_dbuf *dbuf;
42
-
43
- dbuf = kzalloc(sizeof(*dbuf), GFP_KERNEL);
44
- if (!dbuf)
45
- return -ENOMEM;
46
-
4747 dbuf->dbuf_cookie = dbuf_cookie;
48
- dbuf->shbuf = shbuf;
4948 list_add(&dbuf->list, &front_info->dbuf_list);
50
- return 0;
5149 }
5250
5351 static struct xen_drm_front_dbuf *dbuf_get(struct list_head *dbuf_list,
....@@ -62,15 +60,6 @@
6260 return NULL;
6361 }
6462
65
-static void dbuf_flush_fb(struct list_head *dbuf_list, u64 fb_cookie)
66
-{
67
- struct xen_drm_front_dbuf *buf, *q;
68
-
69
- list_for_each_entry_safe(buf, q, dbuf_list, list)
70
- if (buf->fb_cookie == fb_cookie)
71
- xen_drm_front_shbuf_flush(buf->shbuf);
72
-}
73
-
7463 static void dbuf_free(struct list_head *dbuf_list, u64 dbuf_cookie)
7564 {
7665 struct xen_drm_front_dbuf *buf, *q;
....@@ -78,8 +67,8 @@
7867 list_for_each_entry_safe(buf, q, dbuf_list, list)
7968 if (buf->dbuf_cookie == dbuf_cookie) {
8069 list_del(&buf->list);
81
- xen_drm_front_shbuf_unmap(buf->shbuf);
82
- xen_drm_front_shbuf_free(buf->shbuf);
70
+ xen_front_pgdir_shbuf_unmap(&buf->shbuf);
71
+ xen_front_pgdir_shbuf_free(&buf->shbuf);
8372 kfree(buf);
8473 break;
8574 }
....@@ -91,8 +80,8 @@
9180
9281 list_for_each_entry_safe(buf, q, dbuf_list, list) {
9382 list_del(&buf->list);
94
- xen_drm_front_shbuf_unmap(buf->shbuf);
95
- xen_drm_front_shbuf_free(buf->shbuf);
83
+ xen_front_pgdir_shbuf_unmap(&buf->shbuf);
84
+ xen_front_pgdir_shbuf_free(&buf->shbuf);
9685 kfree(buf);
9786 }
9887 }
....@@ -168,12 +157,13 @@
168157
169158 int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info,
170159 u64 dbuf_cookie, u32 width, u32 height,
171
- u32 bpp, u64 size, struct page **pages)
160
+ u32 bpp, u64 size, u32 offset,
161
+ struct page **pages)
172162 {
173163 struct xen_drm_front_evtchnl *evtchnl;
174
- struct xen_drm_front_shbuf *shbuf;
164
+ struct xen_drm_front_dbuf *dbuf;
175165 struct xendispl_req *req;
176
- struct xen_drm_front_shbuf_cfg buf_cfg;
166
+ struct xen_front_pgdir_shbuf_cfg buf_cfg;
177167 unsigned long flags;
178168 int ret;
179169
....@@ -181,29 +171,31 @@
181171 if (unlikely(!evtchnl))
182172 return -EIO;
183173
174
+ dbuf = kzalloc(sizeof(*dbuf), GFP_KERNEL);
175
+ if (!dbuf)
176
+ return -ENOMEM;
177
+
178
+ dbuf_add_to_list(front_info, dbuf, dbuf_cookie);
179
+
184180 memset(&buf_cfg, 0, sizeof(buf_cfg));
185181 buf_cfg.xb_dev = front_info->xb_dev;
182
+ buf_cfg.num_pages = DIV_ROUND_UP(size, PAGE_SIZE);
186183 buf_cfg.pages = pages;
187
- buf_cfg.size = size;
184
+ buf_cfg.pgdir = &dbuf->shbuf;
188185 buf_cfg.be_alloc = front_info->cfg.be_alloc;
189186
190
- shbuf = xen_drm_front_shbuf_alloc(&buf_cfg);
191
- if (IS_ERR(shbuf))
192
- return PTR_ERR(shbuf);
193
-
194
- ret = dbuf_add_to_list(front_info, shbuf, dbuf_cookie);
195
- if (ret < 0) {
196
- xen_drm_front_shbuf_free(shbuf);
197
- return ret;
198
- }
187
+ ret = xen_front_pgdir_shbuf_alloc(&buf_cfg);
188
+ if (ret < 0)
189
+ goto fail_shbuf_alloc;
199190
200191 mutex_lock(&evtchnl->u.req.req_io_lock);
201192
202193 spin_lock_irqsave(&front_info->io_lock, flags);
203194 req = be_prepare_req(evtchnl, XENDISPL_OP_DBUF_CREATE);
204195 req->op.dbuf_create.gref_directory =
205
- xen_drm_front_shbuf_get_dir_start(shbuf);
196
+ xen_front_pgdir_shbuf_get_dir_start(&dbuf->shbuf);
206197 req->op.dbuf_create.buffer_sz = size;
198
+ req->op.dbuf_create.data_ofs = offset;
207199 req->op.dbuf_create.dbuf_cookie = dbuf_cookie;
208200 req->op.dbuf_create.width = width;
209201 req->op.dbuf_create.height = height;
....@@ -221,7 +213,7 @@
221213 if (ret < 0)
222214 goto fail;
223215
224
- ret = xen_drm_front_shbuf_map(shbuf);
216
+ ret = xen_front_pgdir_shbuf_map(&dbuf->shbuf);
225217 if (ret < 0)
226218 goto fail;
227219
....@@ -230,6 +222,7 @@
230222
231223 fail:
232224 mutex_unlock(&evtchnl->u.req.req_io_lock);
225
+fail_shbuf_alloc:
233226 dbuf_free(&front_info->dbuf_list, dbuf_cookie);
234227 return ret;
235228 }
....@@ -358,7 +351,6 @@
358351 if (unlikely(conn_idx >= front_info->num_evt_pairs))
359352 return -EINVAL;
360353
361
- dbuf_flush_fb(&front_info->dbuf_list, fb_cookie);
362354 evtchnl = &front_info->evt_pairs[conn_idx].req;
363355
364356 mutex_lock(&evtchnl->u.req.req_io_lock);
....@@ -418,7 +410,7 @@
418410 ret = xen_drm_front_dbuf_create(drm_info->front_info,
419411 xen_drm_front_dbuf_to_cookie(obj),
420412 args->width, args->height, args->bpp,
421
- args->size,
413
+ args->size, 0,
422414 xen_drm_front_gem_get_pages(obj));
423415 if (ret)
424416 goto fail_backend;
....@@ -429,7 +421,7 @@
429421 goto fail_handle;
430422
431423 /* Drop reference from allocate - handle holds it now */
432
- drm_gem_object_put_unlocked(obj);
424
+ drm_gem_object_put(obj);
433425 return 0;
434426
435427 fail_handle:
....@@ -437,7 +429,7 @@
437429 xen_drm_front_dbuf_to_cookie(obj));
438430 fail_backend:
439431 /* drop reference from allocate */
440
- drm_gem_object_put_unlocked(obj);
432
+ drm_gem_object_put(obj);
441433 fail:
442434 DRM_ERROR("Failed to create dumb buffer: %d\n", ret);
443435 return ret;
....@@ -470,9 +462,6 @@
470462 drm_atomic_helper_shutdown(dev);
471463 drm_mode_config_cleanup(dev);
472464
473
- drm_dev_fini(dev);
474
- kfree(dev);
475
-
476465 if (front_info->cfg.be_alloc)
477466 xenbus_switch_state(front_info->xb_dev,
478467 XenbusStateInitialising);
....@@ -500,15 +489,12 @@
500489 };
501490
502491 static struct drm_driver xen_drm_driver = {
503
- .driver_features = DRIVER_GEM | DRIVER_MODESET |
504
- DRIVER_PRIME | DRIVER_ATOMIC,
492
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
505493 .release = xen_drm_drv_release,
506494 .gem_vm_ops = &xen_drm_drv_vm_ops,
507495 .gem_free_object_unlocked = xen_drm_drv_free_object_unlocked,
508496 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
509497 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
510
- .gem_prime_import = drm_gem_prime_import,
511
- .gem_prime_export = drm_gem_prime_export,
512498 .gem_prime_import_sg_table = xen_drm_front_gem_import_sg_table,
513499 .gem_prime_get_sg_table = xen_drm_front_gem_get_sg_table,
514500 .gem_prime_vmap = xen_drm_front_gem_prime_vmap,
....@@ -545,7 +531,7 @@
545531 drm_dev = drm_dev_alloc(&xen_drm_driver, dev);
546532 if (IS_ERR(drm_dev)) {
547533 ret = PTR_ERR(drm_dev);
548
- goto fail;
534
+ goto fail_dev;
549535 }
550536
551537 drm_info->drm_dev = drm_dev;
....@@ -574,8 +560,11 @@
574560 fail_modeset:
575561 drm_kms_helper_poll_fini(drm_dev);
576562 drm_mode_config_cleanup(drm_dev);
577
-fail:
563
+ drm_dev_put(drm_dev);
564
+fail_dev:
578565 kfree(drm_info);
566
+ front_info->drm_info = NULL;
567
+fail:
579568 return ret;
580569 }
581570
....@@ -597,6 +586,7 @@
597586
598587 drm_kms_helper_poll_fini(dev);
599588 drm_dev_unplug(dev);
589
+ drm_dev_put(dev);
600590
601591 front_info->drm_info = NULL;
602592
....@@ -661,9 +651,7 @@
661651
662652 switch (backend_state) {
663653 case XenbusStateReconfiguring:
664
- /* fall through */
665654 case XenbusStateReconfigured:
666
- /* fall through */
667655 case XenbusStateInitialised:
668656 break;
669657
....@@ -713,7 +701,6 @@
713701 break;
714702
715703 case XenbusStateUnknown:
716
- /* fall through */
717704 case XenbusStateClosed:
718705 if (xb_dev->state == XenbusStateClosed)
719706 break;
....@@ -730,17 +717,9 @@
730717 struct device *dev = &xb_dev->dev;
731718 int ret;
732719
733
- /*
734
- * The device is not spawn from a device tree, so arch_setup_dma_ops
735
- * is not called, thus leaving the device with dummy DMA ops.
736
- * This makes the device return error on PRIME buffer import, which
737
- * is not correct: to fix this call of_dma_configure() with a NULL
738
- * node to set default DMA ops.
739
- */
740
- dev->coherent_dma_mask = DMA_BIT_MASK(32);
741
- ret = of_dma_configure(dev, NULL, true);
720
+ ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
742721 if (ret < 0) {
743
- DRM_ERROR("Cannot setup DMA ops, ret %d", ret);
722
+ DRM_ERROR("Cannot setup DMA mask, ret %d", ret);
744723 return ret;
745724 }
746725