forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-13 9d77db3c730780c8ef5ccd4b66403ff5675cfe4e
kernel/drivers/gpu/drm/xen/xen_drm_front_gem.c
....@@ -8,21 +8,20 @@
88 * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
99 */
1010
11
-#include "xen_drm_front_gem.h"
12
-
13
-#include <drm/drmP.h>
14
-#include <drm/drm_crtc_helper.h>
15
-#include <drm/drm_fb_helper.h>
16
-#include <drm/drm_gem.h>
17
-
1811 #include <linux/dma-buf.h>
1912 #include <linux/scatterlist.h>
2013 #include <linux/shmem_fs.h>
2114
15
+#include <drm/drm_fb_helper.h>
16
+#include <drm/drm_gem.h>
17
+#include <drm/drm_prime.h>
18
+#include <drm/drm_probe_helper.h>
19
+
2220 #include <xen/balloon.h>
21
+#include <xen/xen.h>
2322
2423 #include "xen_drm_front.h"
25
-#include "xen_drm_front_shbuf.h"
24
+#include "xen_drm_front_gem.h"
2625
2726 struct xen_gem_object {
2827 struct drm_gem_object base;
....@@ -101,8 +100,8 @@
101100 * allocate ballooned pages which will be used to map
102101 * grant references provided by the backend
103102 */
104
- ret = alloc_xenballooned_pages(xen_obj->num_pages,
105
- xen_obj->pages);
103
+ ret = xen_alloc_unpopulated_pages(xen_obj->num_pages,
104
+ xen_obj->pages);
106105 if (ret < 0) {
107106 DRM_ERROR("Cannot allocate %zu ballooned pages: %d\n",
108107 xen_obj->num_pages, ret);
....@@ -154,8 +153,8 @@
154153 } else {
155154 if (xen_obj->pages) {
156155 if (xen_obj->be_alloc) {
157
- free_xenballooned_pages(xen_obj->num_pages,
158
- xen_obj->pages);
156
+ xen_free_unpopulated_pages(xen_obj->num_pages,
157
+ xen_obj->pages);
159158 gem_free_pages_array(xen_obj);
160159 } else {
161160 drm_gem_put_pages(&xen_obj->base,
....@@ -179,9 +178,10 @@
179178 struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
180179
181180 if (!xen_obj->pages)
182
- return NULL;
181
+ return ERR_PTR(-ENOMEM);
183182
184
- return drm_prime_pages_to_sg(xen_obj->pages, xen_obj->num_pages);
183
+ return drm_prime_pages_to_sg(gem_obj->dev,
184
+ xen_obj->pages, xen_obj->num_pages);
185185 }
186186
187187 struct drm_gem_object *
....@@ -212,12 +212,13 @@
212212
213213 ret = xen_drm_front_dbuf_create(drm_info->front_info,
214214 xen_drm_front_dbuf_to_cookie(&xen_obj->base),
215
- 0, 0, 0, size, xen_obj->pages);
215
+ 0, 0, 0, size, sgt->sgl->offset,
216
+ xen_obj->pages);
216217 if (ret < 0)
217218 return ERR_PTR(ret);
218219
219220 DRM_DEBUG("Imported buffer of size %zu with nents %u\n",
220
- size, sgt->nents);
221
+ size, sgt->orig_nents);
221222
222223 return &xen_obj->base;
223224 }
....@@ -225,8 +226,7 @@
225226 static int gem_mmap_obj(struct xen_gem_object *xen_obj,
226227 struct vm_area_struct *vma)
227228 {
228
- unsigned long addr = vma->vm_start;
229
- int i;
229
+ int ret;
230230
231231 /*
232232 * clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
....@@ -253,18 +253,11 @@
253253 * FIXME: as we insert all the pages now then no .fault handler must
254254 * be called, so don't provide one
255255 */
256
- for (i = 0; i < xen_obj->num_pages; i++) {
257
- int ret;
256
+ ret = vm_map_pages(vma, xen_obj->pages, xen_obj->num_pages);
257
+ if (ret < 0)
258
+ DRM_ERROR("Failed to map pages into vma: %d\n", ret);
258259
259
- ret = vm_insert_page(vma, addr, xen_obj->pages[i]);
260
- if (ret < 0) {
261
- DRM_ERROR("Failed to insert pages into vma: %d\n", ret);
262
- return ret;
263
- }
264
-
265
- addr += PAGE_SIZE;
266
- }
267
- return 0;
260
+ return ret;
268261 }
269262
270263 int xen_drm_front_gem_mmap(struct file *filp, struct vm_area_struct *vma)