.. | .. |
---|
8 | 8 | * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com> |
---|
9 | 9 | */ |
---|
10 | 10 | |
---|
11 | | -#include <drm/drmP.h> |
---|
12 | | -#include <drm/drm_atomic_helper.h> |
---|
13 | | -#include <drm/drm_crtc_helper.h> |
---|
14 | | -#include <drm/drm_gem.h> |
---|
15 | | - |
---|
| 11 | +#include <linux/delay.h> |
---|
| 12 | +#include <linux/dma-mapping.h> |
---|
| 13 | +#include <linux/module.h> |
---|
16 | 14 | #include <linux/of_device.h> |
---|
| 15 | + |
---|
| 16 | +#include <drm/drm_atomic_helper.h> |
---|
| 17 | +#include <drm/drm_drv.h> |
---|
| 18 | +#include <drm/drm_ioctl.h> |
---|
| 19 | +#include <drm/drm_probe_helper.h> |
---|
| 20 | +#include <drm/drm_file.h> |
---|
| 21 | +#include <drm/drm_gem.h> |
---|
17 | 22 | |
---|
18 | 23 | #include <xen/platform_pci.h> |
---|
19 | 24 | #include <xen/xen.h> |
---|
20 | 25 | #include <xen/xenbus.h> |
---|
21 | 26 | |
---|
| 27 | +#include <xen/xen-front-pgdir-shbuf.h> |
---|
22 | 28 | #include <xen/interface/io/displif.h> |
---|
23 | 29 | |
---|
24 | 30 | #include "xen_drm_front.h" |
---|
.. | .. |
---|
26 | 32 | #include "xen_drm_front_evtchnl.h" |
---|
27 | 33 | #include "xen_drm_front_gem.h" |
---|
28 | 34 | #include "xen_drm_front_kms.h" |
---|
29 | | -#include "xen_drm_front_shbuf.h" |
---|
30 | 35 | |
---|
31 | 36 | struct xen_drm_front_dbuf { |
---|
32 | 37 | struct list_head list; |
---|
33 | 38 | u64 dbuf_cookie; |
---|
34 | 39 | u64 fb_cookie; |
---|
35 | | - struct xen_drm_front_shbuf *shbuf; |
---|
| 40 | + |
---|
| 41 | + struct xen_front_pgdir_shbuf shbuf; |
---|
36 | 42 | }; |
---|
37 | 43 | |
---|
38 | | -static int dbuf_add_to_list(struct xen_drm_front_info *front_info, |
---|
39 | | - struct xen_drm_front_shbuf *shbuf, u64 dbuf_cookie) |
---|
| 44 | +static void dbuf_add_to_list(struct xen_drm_front_info *front_info, |
---|
| 45 | + struct xen_drm_front_dbuf *dbuf, u64 dbuf_cookie) |
---|
40 | 46 | { |
---|
41 | | - struct xen_drm_front_dbuf *dbuf; |
---|
42 | | - |
---|
43 | | - dbuf = kzalloc(sizeof(*dbuf), GFP_KERNEL); |
---|
44 | | - if (!dbuf) |
---|
45 | | - return -ENOMEM; |
---|
46 | | - |
---|
47 | 47 | dbuf->dbuf_cookie = dbuf_cookie; |
---|
48 | | - dbuf->shbuf = shbuf; |
---|
49 | 48 | list_add(&dbuf->list, &front_info->dbuf_list); |
---|
50 | | - return 0; |
---|
51 | 49 | } |
---|
52 | 50 | |
---|
53 | 51 | static struct xen_drm_front_dbuf *dbuf_get(struct list_head *dbuf_list, |
---|
.. | .. |
---|
62 | 60 | return NULL; |
---|
63 | 61 | } |
---|
64 | 62 | |
---|
65 | | -static void dbuf_flush_fb(struct list_head *dbuf_list, u64 fb_cookie) |
---|
66 | | -{ |
---|
67 | | - struct xen_drm_front_dbuf *buf, *q; |
---|
68 | | - |
---|
69 | | - list_for_each_entry_safe(buf, q, dbuf_list, list) |
---|
70 | | - if (buf->fb_cookie == fb_cookie) |
---|
71 | | - xen_drm_front_shbuf_flush(buf->shbuf); |
---|
72 | | -} |
---|
73 | | - |
---|
74 | 63 | static void dbuf_free(struct list_head *dbuf_list, u64 dbuf_cookie) |
---|
75 | 64 | { |
---|
76 | 65 | struct xen_drm_front_dbuf *buf, *q; |
---|
.. | .. |
---|
78 | 67 | list_for_each_entry_safe(buf, q, dbuf_list, list) |
---|
79 | 68 | if (buf->dbuf_cookie == dbuf_cookie) { |
---|
80 | 69 | list_del(&buf->list); |
---|
81 | | - xen_drm_front_shbuf_unmap(buf->shbuf); |
---|
82 | | - xen_drm_front_shbuf_free(buf->shbuf); |
---|
| 70 | + xen_front_pgdir_shbuf_unmap(&buf->shbuf); |
---|
| 71 | + xen_front_pgdir_shbuf_free(&buf->shbuf); |
---|
83 | 72 | kfree(buf); |
---|
84 | 73 | break; |
---|
85 | 74 | } |
---|
.. | .. |
---|
91 | 80 | |
---|
92 | 81 | list_for_each_entry_safe(buf, q, dbuf_list, list) { |
---|
93 | 82 | list_del(&buf->list); |
---|
94 | | - xen_drm_front_shbuf_unmap(buf->shbuf); |
---|
95 | | - xen_drm_front_shbuf_free(buf->shbuf); |
---|
| 83 | + xen_front_pgdir_shbuf_unmap(&buf->shbuf); |
---|
| 84 | + xen_front_pgdir_shbuf_free(&buf->shbuf); |
---|
96 | 85 | kfree(buf); |
---|
97 | 86 | } |
---|
98 | 87 | } |
---|
.. | .. |
---|
168 | 157 | |
---|
169 | 158 | int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info, |
---|
170 | 159 | u64 dbuf_cookie, u32 width, u32 height, |
---|
171 | | - u32 bpp, u64 size, struct page **pages) |
---|
| 160 | + u32 bpp, u64 size, u32 offset, |
---|
| 161 | + struct page **pages) |
---|
172 | 162 | { |
---|
173 | 163 | struct xen_drm_front_evtchnl *evtchnl; |
---|
174 | | - struct xen_drm_front_shbuf *shbuf; |
---|
| 164 | + struct xen_drm_front_dbuf *dbuf; |
---|
175 | 165 | struct xendispl_req *req; |
---|
176 | | - struct xen_drm_front_shbuf_cfg buf_cfg; |
---|
| 166 | + struct xen_front_pgdir_shbuf_cfg buf_cfg; |
---|
177 | 167 | unsigned long flags; |
---|
178 | 168 | int ret; |
---|
179 | 169 | |
---|
.. | .. |
---|
181 | 171 | if (unlikely(!evtchnl)) |
---|
182 | 172 | return -EIO; |
---|
183 | 173 | |
---|
| 174 | + dbuf = kzalloc(sizeof(*dbuf), GFP_KERNEL); |
---|
| 175 | + if (!dbuf) |
---|
| 176 | + return -ENOMEM; |
---|
| 177 | + |
---|
| 178 | + dbuf_add_to_list(front_info, dbuf, dbuf_cookie); |
---|
| 179 | + |
---|
184 | 180 | memset(&buf_cfg, 0, sizeof(buf_cfg)); |
---|
185 | 181 | buf_cfg.xb_dev = front_info->xb_dev; |
---|
| 182 | + buf_cfg.num_pages = DIV_ROUND_UP(size, PAGE_SIZE); |
---|
186 | 183 | buf_cfg.pages = pages; |
---|
187 | | - buf_cfg.size = size; |
---|
| 184 | + buf_cfg.pgdir = &dbuf->shbuf; |
---|
188 | 185 | buf_cfg.be_alloc = front_info->cfg.be_alloc; |
---|
189 | 186 | |
---|
190 | | - shbuf = xen_drm_front_shbuf_alloc(&buf_cfg); |
---|
191 | | - if (IS_ERR(shbuf)) |
---|
192 | | - return PTR_ERR(shbuf); |
---|
193 | | - |
---|
194 | | - ret = dbuf_add_to_list(front_info, shbuf, dbuf_cookie); |
---|
195 | | - if (ret < 0) { |
---|
196 | | - xen_drm_front_shbuf_free(shbuf); |
---|
197 | | - return ret; |
---|
198 | | - } |
---|
| 187 | + ret = xen_front_pgdir_shbuf_alloc(&buf_cfg); |
---|
| 188 | + if (ret < 0) |
---|
| 189 | + goto fail_shbuf_alloc; |
---|
199 | 190 | |
---|
200 | 191 | mutex_lock(&evtchnl->u.req.req_io_lock); |
---|
201 | 192 | |
---|
202 | 193 | spin_lock_irqsave(&front_info->io_lock, flags); |
---|
203 | 194 | req = be_prepare_req(evtchnl, XENDISPL_OP_DBUF_CREATE); |
---|
204 | 195 | req->op.dbuf_create.gref_directory = |
---|
205 | | - xen_drm_front_shbuf_get_dir_start(shbuf); |
---|
| 196 | + xen_front_pgdir_shbuf_get_dir_start(&dbuf->shbuf); |
---|
206 | 197 | req->op.dbuf_create.buffer_sz = size; |
---|
| 198 | + req->op.dbuf_create.data_ofs = offset; |
---|
207 | 199 | req->op.dbuf_create.dbuf_cookie = dbuf_cookie; |
---|
208 | 200 | req->op.dbuf_create.width = width; |
---|
209 | 201 | req->op.dbuf_create.height = height; |
---|
.. | .. |
---|
221 | 213 | if (ret < 0) |
---|
222 | 214 | goto fail; |
---|
223 | 215 | |
---|
224 | | - ret = xen_drm_front_shbuf_map(shbuf); |
---|
| 216 | + ret = xen_front_pgdir_shbuf_map(&dbuf->shbuf); |
---|
225 | 217 | if (ret < 0) |
---|
226 | 218 | goto fail; |
---|
227 | 219 | |
---|
.. | .. |
---|
230 | 222 | |
---|
231 | 223 | fail: |
---|
232 | 224 | mutex_unlock(&evtchnl->u.req.req_io_lock); |
---|
| 225 | +fail_shbuf_alloc: |
---|
233 | 226 | dbuf_free(&front_info->dbuf_list, dbuf_cookie); |
---|
234 | 227 | return ret; |
---|
235 | 228 | } |
---|
.. | .. |
---|
358 | 351 | if (unlikely(conn_idx >= front_info->num_evt_pairs)) |
---|
359 | 352 | return -EINVAL; |
---|
360 | 353 | |
---|
361 | | - dbuf_flush_fb(&front_info->dbuf_list, fb_cookie); |
---|
362 | 354 | evtchnl = &front_info->evt_pairs[conn_idx].req; |
---|
363 | 355 | |
---|
364 | 356 | mutex_lock(&evtchnl->u.req.req_io_lock); |
---|
.. | .. |
---|
418 | 410 | ret = xen_drm_front_dbuf_create(drm_info->front_info, |
---|
419 | 411 | xen_drm_front_dbuf_to_cookie(obj), |
---|
420 | 412 | args->width, args->height, args->bpp, |
---|
421 | | - args->size, |
---|
| 413 | + args->size, 0, |
---|
422 | 414 | xen_drm_front_gem_get_pages(obj)); |
---|
423 | 415 | if (ret) |
---|
424 | 416 | goto fail_backend; |
---|
.. | .. |
---|
429 | 421 | goto fail_handle; |
---|
430 | 422 | |
---|
431 | 423 | /* Drop reference from allocate - handle holds it now */ |
---|
432 | | - drm_gem_object_put_unlocked(obj); |
---|
| 424 | + drm_gem_object_put(obj); |
---|
433 | 425 | return 0; |
---|
434 | 426 | |
---|
435 | 427 | fail_handle: |
---|
.. | .. |
---|
437 | 429 | xen_drm_front_dbuf_to_cookie(obj)); |
---|
438 | 430 | fail_backend: |
---|
439 | 431 | /* drop reference from allocate */ |
---|
440 | | - drm_gem_object_put_unlocked(obj); |
---|
| 432 | + drm_gem_object_put(obj); |
---|
441 | 433 | fail: |
---|
442 | 434 | DRM_ERROR("Failed to create dumb buffer: %d\n", ret); |
---|
443 | 435 | return ret; |
---|
.. | .. |
---|
470 | 462 | drm_atomic_helper_shutdown(dev); |
---|
471 | 463 | drm_mode_config_cleanup(dev); |
---|
472 | 464 | |
---|
473 | | - drm_dev_fini(dev); |
---|
474 | | - kfree(dev); |
---|
475 | | - |
---|
476 | 465 | if (front_info->cfg.be_alloc) |
---|
477 | 466 | xenbus_switch_state(front_info->xb_dev, |
---|
478 | 467 | XenbusStateInitialising); |
---|
.. | .. |
---|
500 | 489 | }; |
---|
501 | 490 | |
---|
502 | 491 | static struct drm_driver xen_drm_driver = { |
---|
503 | | - .driver_features = DRIVER_GEM | DRIVER_MODESET | |
---|
504 | | - DRIVER_PRIME | DRIVER_ATOMIC, |
---|
| 492 | + .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, |
---|
505 | 493 | .release = xen_drm_drv_release, |
---|
506 | 494 | .gem_vm_ops = &xen_drm_drv_vm_ops, |
---|
507 | 495 | .gem_free_object_unlocked = xen_drm_drv_free_object_unlocked, |
---|
508 | 496 | .prime_handle_to_fd = drm_gem_prime_handle_to_fd, |
---|
509 | 497 | .prime_fd_to_handle = drm_gem_prime_fd_to_handle, |
---|
510 | | - .gem_prime_import = drm_gem_prime_import, |
---|
511 | | - .gem_prime_export = drm_gem_prime_export, |
---|
512 | 498 | .gem_prime_import_sg_table = xen_drm_front_gem_import_sg_table, |
---|
513 | 499 | .gem_prime_get_sg_table = xen_drm_front_gem_get_sg_table, |
---|
514 | 500 | .gem_prime_vmap = xen_drm_front_gem_prime_vmap, |
---|
.. | .. |
---|
545 | 531 | drm_dev = drm_dev_alloc(&xen_drm_driver, dev); |
---|
546 | 532 | if (IS_ERR(drm_dev)) { |
---|
547 | 533 | ret = PTR_ERR(drm_dev); |
---|
548 | | - goto fail; |
---|
| 534 | + goto fail_dev; |
---|
549 | 535 | } |
---|
550 | 536 | |
---|
551 | 537 | drm_info->drm_dev = drm_dev; |
---|
.. | .. |
---|
574 | 560 | fail_modeset: |
---|
575 | 561 | drm_kms_helper_poll_fini(drm_dev); |
---|
576 | 562 | drm_mode_config_cleanup(drm_dev); |
---|
577 | | -fail: |
---|
| 563 | + drm_dev_put(drm_dev); |
---|
| 564 | +fail_dev: |
---|
578 | 565 | kfree(drm_info); |
---|
| 566 | + front_info->drm_info = NULL; |
---|
| 567 | +fail: |
---|
579 | 568 | return ret; |
---|
580 | 569 | } |
---|
581 | 570 | |
---|
.. | .. |
---|
597 | 586 | |
---|
598 | 587 | drm_kms_helper_poll_fini(dev); |
---|
599 | 588 | drm_dev_unplug(dev); |
---|
| 589 | + drm_dev_put(dev); |
---|
600 | 590 | |
---|
601 | 591 | front_info->drm_info = NULL; |
---|
602 | 592 | |
---|
.. | .. |
---|
661 | 651 | |
---|
662 | 652 | switch (backend_state) { |
---|
663 | 653 | case XenbusStateReconfiguring: |
---|
664 | | - /* fall through */ |
---|
665 | 654 | case XenbusStateReconfigured: |
---|
666 | | - /* fall through */ |
---|
667 | 655 | case XenbusStateInitialised: |
---|
668 | 656 | break; |
---|
669 | 657 | |
---|
.. | .. |
---|
713 | 701 | break; |
---|
714 | 702 | |
---|
715 | 703 | case XenbusStateUnknown: |
---|
716 | | - /* fall through */ |
---|
717 | 704 | case XenbusStateClosed: |
---|
718 | 705 | if (xb_dev->state == XenbusStateClosed) |
---|
719 | 706 | break; |
---|
.. | .. |
---|
730 | 717 | struct device *dev = &xb_dev->dev; |
---|
731 | 718 | int ret; |
---|
732 | 719 | |
---|
733 | | - /* |
---|
734 | | - * The device is not spawn from a device tree, so arch_setup_dma_ops |
---|
735 | | - * is not called, thus leaving the device with dummy DMA ops. |
---|
736 | | - * This makes the device return error on PRIME buffer import, which |
---|
737 | | - * is not correct: to fix this call of_dma_configure() with a NULL |
---|
738 | | - * node to set default DMA ops. |
---|
739 | | - */ |
---|
740 | | - dev->coherent_dma_mask = DMA_BIT_MASK(32); |
---|
741 | | - ret = of_dma_configure(dev, NULL, true); |
---|
| 720 | + ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64)); |
---|
742 | 721 | if (ret < 0) { |
---|
743 | | - DRM_ERROR("Cannot setup DMA ops, ret %d", ret); |
---|
| 722 | + DRM_ERROR("Cannot setup DMA mask, ret %d", ret); |
---|
744 | 723 | return ret; |
---|
745 | 724 | } |
---|
746 | 725 | |
---|