| .. | .. |
|---|
| 8 | 8 | * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com> |
|---|
| 9 | 9 | */ |
|---|
| 10 | 10 | |
|---|
| 11 | | -#include "xen_drm_front_gem.h" |
|---|
| 12 | | - |
|---|
| 13 | | -#include <drm/drmP.h> |
|---|
| 14 | | -#include <drm/drm_crtc_helper.h> |
|---|
| 15 | | -#include <drm/drm_fb_helper.h> |
|---|
| 16 | | -#include <drm/drm_gem.h> |
|---|
| 17 | | - |
|---|
| 18 | 11 | #include <linux/dma-buf.h> |
|---|
| 19 | 12 | #include <linux/scatterlist.h> |
|---|
| 20 | 13 | #include <linux/shmem_fs.h> |
|---|
| 21 | 14 | |
|---|
| 15 | +#include <drm/drm_fb_helper.h> |
|---|
| 16 | +#include <drm/drm_gem.h> |
|---|
| 17 | +#include <drm/drm_prime.h> |
|---|
| 18 | +#include <drm/drm_probe_helper.h> |
|---|
| 19 | + |
|---|
| 22 | 20 | #include <xen/balloon.h> |
|---|
| 21 | +#include <xen/xen.h> |
|---|
| 23 | 22 | |
|---|
| 24 | 23 | #include "xen_drm_front.h" |
|---|
| 25 | | -#include "xen_drm_front_shbuf.h" |
|---|
| 24 | +#include "xen_drm_front_gem.h" |
|---|
| 26 | 25 | |
|---|
| 27 | 26 | struct xen_gem_object { |
|---|
| 28 | 27 | struct drm_gem_object base; |
|---|
| .. | .. |
|---|
| 101 | 100 | * allocate ballooned pages which will be used to map |
|---|
| 102 | 101 | * grant references provided by the backend |
|---|
| 103 | 102 | */ |
|---|
| 104 | | - ret = alloc_xenballooned_pages(xen_obj->num_pages, |
|---|
| 105 | | - xen_obj->pages); |
|---|
| 103 | + ret = xen_alloc_unpopulated_pages(xen_obj->num_pages, |
|---|
| 104 | + xen_obj->pages); |
|---|
| 106 | 105 | if (ret < 0) { |
|---|
| 107 | 106 | DRM_ERROR("Cannot allocate %zu ballooned pages: %d\n", |
|---|
| 108 | 107 | xen_obj->num_pages, ret); |
|---|
| .. | .. |
|---|
| 154 | 153 | } else { |
|---|
| 155 | 154 | if (xen_obj->pages) { |
|---|
| 156 | 155 | if (xen_obj->be_alloc) { |
|---|
| 157 | | - free_xenballooned_pages(xen_obj->num_pages, |
|---|
| 158 | | - xen_obj->pages); |
|---|
| 156 | + xen_free_unpopulated_pages(xen_obj->num_pages, |
|---|
| 157 | + xen_obj->pages); |
|---|
| 159 | 158 | gem_free_pages_array(xen_obj); |
|---|
| 160 | 159 | } else { |
|---|
| 161 | 160 | drm_gem_put_pages(&xen_obj->base, |
|---|
| .. | .. |
|---|
| 179 | 178 | struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj); |
|---|
| 180 | 179 | |
|---|
| 181 | 180 | if (!xen_obj->pages) |
|---|
| 182 | | - return NULL; |
|---|
| 181 | + return ERR_PTR(-ENOMEM); |
|---|
| 183 | 182 | |
|---|
| 184 | | - return drm_prime_pages_to_sg(xen_obj->pages, xen_obj->num_pages); |
|---|
| 183 | + return drm_prime_pages_to_sg(gem_obj->dev, |
|---|
| 184 | + xen_obj->pages, xen_obj->num_pages); |
|---|
| 185 | 185 | } |
|---|
| 186 | 186 | |
|---|
| 187 | 187 | struct drm_gem_object * |
|---|
| .. | .. |
|---|
| 212 | 212 | |
|---|
| 213 | 213 | ret = xen_drm_front_dbuf_create(drm_info->front_info, |
|---|
| 214 | 214 | xen_drm_front_dbuf_to_cookie(&xen_obj->base), |
|---|
| 215 | | - 0, 0, 0, size, xen_obj->pages); |
|---|
| 215 | + 0, 0, 0, size, sgt->sgl->offset, |
|---|
| 216 | + xen_obj->pages); |
|---|
| 216 | 217 | if (ret < 0) |
|---|
| 217 | 218 | return ERR_PTR(ret); |
|---|
| 218 | 219 | |
|---|
| 219 | 220 | DRM_DEBUG("Imported buffer of size %zu with nents %u\n", |
|---|
| 220 | | - size, sgt->nents); |
|---|
| 221 | + size, sgt->orig_nents); |
|---|
| 221 | 222 | |
|---|
| 222 | 223 | return &xen_obj->base; |
|---|
| 223 | 224 | } |
|---|
| .. | .. |
|---|
| 225 | 226 | static int gem_mmap_obj(struct xen_gem_object *xen_obj, |
|---|
| 226 | 227 | struct vm_area_struct *vma) |
|---|
| 227 | 228 | { |
|---|
| 228 | | - unsigned long addr = vma->vm_start; |
|---|
| 229 | | - int i; |
|---|
| 229 | + int ret; |
|---|
| 230 | 230 | |
|---|
| 231 | 231 | /* |
|---|
| 232 | 232 | * clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the |
|---|
| .. | .. |
|---|
| 253 | 253 | * FIXME: as we insert all the pages now then no .fault handler must |
|---|
| 254 | 254 | * be called, so don't provide one |
|---|
| 255 | 255 | */ |
|---|
| 256 | | - for (i = 0; i < xen_obj->num_pages; i++) { |
|---|
| 257 | | - int ret; |
|---|
| 256 | + ret = vm_map_pages(vma, xen_obj->pages, xen_obj->num_pages); |
|---|
| 257 | + if (ret < 0) |
|---|
| 258 | + DRM_ERROR("Failed to map pages into vma: %d\n", ret); |
|---|
| 258 | 259 | |
|---|
| 259 | | - ret = vm_insert_page(vma, addr, xen_obj->pages[i]); |
|---|
| 260 | | - if (ret < 0) { |
|---|
| 261 | | - DRM_ERROR("Failed to insert pages into vma: %d\n", ret); |
|---|
| 262 | | - return ret; |
|---|
| 263 | | - } |
|---|
| 264 | | - |
|---|
| 265 | | - addr += PAGE_SIZE; |
|---|
| 266 | | - } |
|---|
| 267 | | - return 0; |
|---|
| 260 | + return ret; |
|---|
| 268 | 261 | } |
|---|
| 269 | 262 | |
|---|
| 270 | 263 | int xen_drm_front_gem_mmap(struct file *filp, struct vm_area_struct *vma) |
|---|