From b22da3d8526a935aa31e086e63f60ff3246cb61c Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Sat, 09 Dec 2023 07:24:11 +0000
Subject: [PATCH] add stmac read mac form eeprom
---
kernel/drivers/gpu/drm/drm_gem.c | 554 +++++++++++++++++++++++++++++++++++++++++++------------
1 files changed, 430 insertions(+), 124 deletions(-)
diff --git a/kernel/drivers/gpu/drm/drm_gem.c b/kernel/drivers/gpu/drm/drm_gem.c
index 2835ba2..8b30e8d 100644
--- a/kernel/drivers/gpu/drm/drm_gem.c
+++ b/kernel/drivers/gpu/drm/drm_gem.c
@@ -38,10 +38,16 @@
#include <linux/dma-buf.h>
#include <linux/mem_encrypt.h>
#include <linux/pagevec.h>
-#include <drm/drmP.h>
-#include <drm/drm_vma_manager.h>
+
+#include <drm/drm.h>
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
#include <drm/drm_gem.h>
+#include <drm/drm_managed.h>
#include <drm/drm_print.h>
+#include <drm/drm_vma_manager.h>
+
#include "drm_internal.h"
/** @file drm_gem.c
@@ -72,22 +78,11 @@
* up at a later date, and as our interface with shmfs for memory allocation.
*/
-/*
- * We make up offsets for buffer objects so we can recognize them at
- * mmap time.
- */
-
-/* pgoff in mmap is an unsigned long, so we need to make sure that
- * the faked up offset will fit
- */
-
-#if BITS_PER_LONG == 64
-#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
-#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
-#else
-#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
-#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
-#endif
+static void
+drm_gem_init_release(struct drm_device *dev, void *ptr)
+{
+ drm_vma_offset_manager_destroy(dev->vma_offset_manager);
+}
/**
* drm_gem_init - Initialize the GEM device fields
@@ -101,7 +96,8 @@
mutex_init(&dev->object_name_lock);
idr_init_base(&dev->object_name_idr, 1);
- vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL);
+ vma_offset_manager = drmm_kzalloc(dev, sizeof(*vma_offset_manager),
+ GFP_KERNEL);
if (!vma_offset_manager) {
DRM_ERROR("out of memory\n");
return -ENOMEM;
@@ -112,16 +108,7 @@
DRM_FILE_PAGE_OFFSET_START,
DRM_FILE_PAGE_OFFSET_SIZE);
- return 0;
-}
-
-void
-drm_gem_destroy(struct drm_device *dev)
-{
-
- drm_vma_offset_manager_destroy(dev->vma_offset_manager);
- kfree(dev->vma_offset_manager);
- dev->vma_offset_manager = NULL;
+ return drmm_add_action(dev, drm_gem_init_release, NULL);
}
/**
@@ -171,24 +158,13 @@
kref_init(&obj->refcount);
obj->handle_count = 0;
obj->size = size;
+ dma_resv_init(&obj->_resv);
+ if (!obj->resv)
+ obj->resv = &obj->_resv;
+
drm_vma_node_reset(&obj->vma_node);
}
EXPORT_SYMBOL(drm_gem_private_object_init);
-
-static void
-drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
-{
- /*
- * Note: obj->dma_buf can't disappear as long as we still hold a
- * handle reference in obj->handle_count.
- */
- mutex_lock(&filp->prime.lock);
- if (obj->dma_buf) {
- drm_prime_remove_buf_handle_locked(&filp->prime,
- obj->dma_buf);
- }
- mutex_unlock(&filp->prime.lock);
-}
/**
* drm_gem_object_handle_free - release resources bound to userspace handles
@@ -226,7 +202,7 @@
struct drm_device *dev = obj->dev;
bool final = false;
- if (WARN_ON(obj->handle_count == 0))
+ if (WARN_ON(READ_ONCE(obj->handle_count) == 0))
return;
/*
@@ -244,7 +220,7 @@
mutex_unlock(&dev->object_name_lock);
if (final)
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
}
/*
@@ -258,11 +234,12 @@
struct drm_gem_object *obj = ptr;
struct drm_device *dev = obj->dev;
- if (dev->driver->gem_close_object)
+ if (obj->funcs && obj->funcs->close)
+ obj->funcs->close(obj, file_priv);
+ else if (dev->driver->gem_close_object)
dev->driver->gem_close_object(obj, file_priv);
- if (drm_core_check_feature(dev, DRIVER_PRIME))
- drm_gem_remove_prime_handles(obj, file_priv);
+ drm_prime_remove_buf_handle(&file_priv->prime, id);
drm_vma_node_revoke(&obj->vma_node, file_priv);
drm_gem_object_handle_put_unlocked(obj);
@@ -339,7 +316,7 @@
*offset = drm_vma_node_offset_addr(&obj->vma_node);
out:
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
return ret;
}
@@ -411,7 +388,11 @@
if (ret)
goto err_remove;
- if (dev->driver->gem_open_object) {
+ if (obj->funcs && obj->funcs->open) {
+ ret = obj->funcs->open(obj, file_priv);
+ if (ret)
+ goto err_revoke;
+ } else if (dev->driver->gem_open_object) {
ret = dev->driver->gem_open_object(obj, file_priv);
if (ret)
goto err_revoke;
@@ -435,7 +416,7 @@
* drm_gem_handle_create - create a gem handle for an object
* @file_priv: drm file-private structure to register the handle for
* @obj: object to register
- * @handlep: pionter to return the created handle to the caller
+ * @handlep: pointer to return the created handle to the caller
*
* Create a handle for this object. This adds a handle reference to the object,
* which includes a regular reference count. Callers will likely want to
@@ -552,6 +533,10 @@
* set during initialization. If you have special zone constraints, set them
* after drm_gem_object_init() via mapping_set_gfp_mask(). shmem-core takes care
* to keep pages in the required zone during swap-in.
+ *
+ * This function is only valid on objects initialized with
+ * drm_gem_object_init(), but not for those initialized with
+ * drm_gem_private_object_init() only.
*/
struct page **drm_gem_get_pages(struct drm_gem_object *obj)
{
@@ -559,6 +544,10 @@
struct page *p, **pages;
struct pagevec pvec;
int i, npages;
+
+
+ if (WARN_ON(!obj->filp))
+ return ERR_PTR(-EINVAL);
/* This is the shared memory object that backs the GEM resource */
mapping = obj->filp->f_mapping;
@@ -636,6 +625,9 @@
pagevec_init(&pvec);
for (i = 0; i < npages; i++) {
+ if (!pages[i])
+ continue;
+
if (dirty)
set_page_dirty(pages[i]);
@@ -653,8 +645,87 @@
}
EXPORT_SYMBOL(drm_gem_put_pages);
+static int objects_lookup(struct drm_file *filp, u32 *handle, int count,
+ struct drm_gem_object **objs)
+{
+ int i, ret = 0;
+ struct drm_gem_object *obj;
+
+ spin_lock(&filp->table_lock);
+
+ for (i = 0; i < count; i++) {
+ /* Check if we currently have a reference on the object */
+ obj = idr_find(&filp->object_idr, handle[i]);
+ if (!obj) {
+ ret = -ENOENT;
+ break;
+ }
+ drm_gem_object_get(obj);
+ objs[i] = obj;
+ }
+ spin_unlock(&filp->table_lock);
+
+ return ret;
+}
+
/**
- * drm_gem_object_lookup - look up a GEM object from it's handle
+ * drm_gem_objects_lookup - look up GEM objects from an array of handles
+ * @filp: DRM file private date
+ * @bo_handles: user pointer to array of userspace handle
+ * @count: size of handle array
+ * @objs_out: returned pointer to array of drm_gem_object pointers
+ *
+ * Takes an array of userspace handles and returns a newly allocated array of
+ * GEM objects.
+ *
+ * For a single handle lookup, use drm_gem_object_lookup().
+ *
+ * Returns:
+ *
+ * @objs filled in with GEM object pointers. Returned GEM objects need to be
+ * released with drm_gem_object_put(). -ENOENT is returned on a lookup
+ * failure. 0 is returned on success.
+ *
+ */
+int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
+ int count, struct drm_gem_object ***objs_out)
+{
+ int ret;
+ u32 *handles;
+ struct drm_gem_object **objs;
+
+ if (!count)
+ return 0;
+
+ objs = kvmalloc_array(count, sizeof(struct drm_gem_object *),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!objs)
+ return -ENOMEM;
+
+ *objs_out = objs;
+
+ handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL);
+ if (!handles) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (copy_from_user(handles, bo_handles, count * sizeof(u32))) {
+ ret = -EFAULT;
+ DRM_DEBUG("Failed to copy in GEM handles\n");
+ goto out;
+ }
+
+ ret = objects_lookup(filp, handles, count, objs);
+out:
+ kvfree(handles);
+ return ret;
+
+}
+EXPORT_SYMBOL(drm_gem_objects_lookup);
+
+/**
+ * drm_gem_object_lookup - look up a GEM object from its handle
* @filp: DRM file private date
* @handle: userspace handle
*
@@ -662,24 +733,56 @@
*
* A reference to the object named by the handle if such exists on @filp, NULL
* otherwise.
+ *
+ * If looking up an array of handles, use drm_gem_objects_lookup().
*/
struct drm_gem_object *
drm_gem_object_lookup(struct drm_file *filp, u32 handle)
{
- struct drm_gem_object *obj;
+ struct drm_gem_object *obj = NULL;
- spin_lock(&filp->table_lock);
-
- /* Check if we currently have a reference on the object */
- obj = idr_find(&filp->object_idr, handle);
- if (obj)
- drm_gem_object_get(obj);
-
- spin_unlock(&filp->table_lock);
-
+ objects_lookup(filp, &handle, 1, &obj);
return obj;
}
EXPORT_SYMBOL(drm_gem_object_lookup);
+
+/**
+ * drm_gem_dma_resv_wait - Wait on GEM object's reservation's objects
+ * shared and/or exclusive fences.
+ * @filep: DRM file private date
+ * @handle: userspace handle
+ * @wait_all: if true, wait on all fences, else wait on just exclusive fence
+ * @timeout: timeout value in jiffies or zero to return immediately
+ *
+ * Returns:
+ *
+ * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
+ * greater than 0 on success.
+ */
+long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
+ bool wait_all, unsigned long timeout)
+{
+ long ret;
+ struct drm_gem_object *obj;
+
+ obj = drm_gem_object_lookup(filep, handle);
+ if (!obj) {
+ DRM_DEBUG("Failed to look up GEM BO %d\n", handle);
+ return -EINVAL;
+ }
+
+ ret = dma_resv_wait_timeout_rcu(obj->resv, wait_all,
+ true, timeout);
+ if (ret == 0)
+ ret = -ETIME;
+ else if (ret > 0)
+ ret = 0;
+
+ drm_gem_object_put(obj);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_gem_dma_resv_wait);
/**
* drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
@@ -697,7 +800,7 @@
int ret;
if (!drm_core_check_feature(dev, DRIVER_GEM))
- return -ENODEV;
+ return -EOPNOTSUPP;
ret = drm_gem_handle_delete(file_priv, args->handle);
@@ -724,7 +827,7 @@
int ret;
if (!drm_core_check_feature(dev, DRIVER_GEM))
- return -ENODEV;
+ return -EOPNOTSUPP;
obj = drm_gem_object_lookup(file_priv, args->handle);
if (obj == NULL)
@@ -750,7 +853,7 @@
err:
mutex_unlock(&dev->object_name_lock);
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
return ret;
}
@@ -761,6 +864,9 @@
* @file_priv: drm file-private structure
*
* Open an object using the global name, returning a handle and the size.
+ *
+ * This handle (of course) holds a reference to the object, so the object
+ * will not go away until the handle is deleted.
*/
int
drm_gem_open_ioctl(struct drm_device *dev, void *data,
@@ -772,7 +878,7 @@
u32 handle;
if (!drm_core_check_feature(dev, DRIVER_GEM))
- return -ENODEV;
+ return -EOPNOTSUPP;
mutex_lock(&dev->object_name_lock);
obj = idr_find(&dev->object_name_idr, (int) args->name);
@@ -792,7 +898,7 @@
args->size = obj->size;
err:
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
return ret;
}
@@ -843,6 +949,7 @@
if (obj->filp)
fput(obj->filp);
+ dma_resv_fini(&obj->_resv);
drm_gem_free_mmap_offset(obj);
}
EXPORT_SYMBOL(drm_gem_object_release);
@@ -852,7 +959,6 @@
* @kref: kref of the object to free
*
* Called after the last reference to the object has been lost.
- * Must be called holding &drm_device.struct_mutex.
*
* Frees the object
*/
@@ -863,48 +969,15 @@
container_of(kref, struct drm_gem_object, refcount);
struct drm_device *dev = obj->dev;
- if (dev->driver->gem_free_object_unlocked) {
+ if (obj->funcs)
+ obj->funcs->free(obj);
+ else if (dev->driver->gem_free_object_unlocked)
dev->driver->gem_free_object_unlocked(obj);
- } else if (dev->driver->gem_free_object) {
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
- dev->driver->gem_free_object(obj);
- }
}
EXPORT_SYMBOL(drm_gem_object_free);
/**
- * drm_gem_object_put_unlocked - drop a GEM buffer object reference
- * @obj: GEM buffer object
- *
- * This releases a reference to @obj. Callers must not hold the
- * &drm_device.struct_mutex lock when calling this function.
- *
- * See also __drm_gem_object_put().
- */
-void
-drm_gem_object_put_unlocked(struct drm_gem_object *obj)
-{
- struct drm_device *dev;
-
- if (!obj)
- return;
-
- dev = obj->dev;
-
- if (dev->driver->gem_free_object_unlocked) {
- kref_put(&obj->refcount, drm_gem_object_free);
- } else {
- might_lock(&dev->struct_mutex);
- if (kref_put_mutex(&obj->refcount, drm_gem_object_free,
- &dev->struct_mutex))
- mutex_unlock(&dev->struct_mutex);
- }
-}
-EXPORT_SYMBOL(drm_gem_object_put_unlocked);
-
-/**
- * drm_gem_object_put - release a GEM buffer object reference
+ * drm_gem_object_put_locked - release a GEM buffer object reference
* @obj: GEM buffer object
*
* This releases a reference to @obj. Callers must hold the
@@ -912,10 +985,10 @@
* driver doesn't use &drm_device.struct_mutex for anything.
*
* For drivers not encumbered with legacy locking use
- * drm_gem_object_put_unlocked() instead.
+ * drm_gem_object_put() instead.
*/
void
-drm_gem_object_put(struct drm_gem_object *obj)
+drm_gem_object_put_locked(struct drm_gem_object *obj)
{
if (obj) {
WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
@@ -923,7 +996,7 @@
kref_put(&obj->refcount, drm_gem_object_free);
}
}
-EXPORT_SYMBOL(drm_gem_object_put);
+EXPORT_SYMBOL(drm_gem_object_put_locked);
/**
* drm_gem_vm_open - vma->ops->open implementation for GEM
@@ -951,7 +1024,7 @@
{
struct drm_gem_object *obj = vma->vm_private_data;
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
}
EXPORT_SYMBOL(drm_gem_vm_close);
@@ -983,19 +1056,11 @@
struct vm_area_struct *vma)
{
struct drm_device *dev = obj->dev;
+ int ret;
/* Check for valid size. */
if (obj_size < vma->vm_end - vma->vm_start)
return -EINVAL;
-
- if (!dev->driver->gem_vm_ops)
- return -EINVAL;
-
- vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
- vma->vm_ops = dev->driver->gem_vm_ops;
- vma->vm_private_data = obj;
- vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
- vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
/* Take a ref for this mapping of the object, so that the fault
* handler can dereference the mmap offset's pointer to the object.
@@ -1004,6 +1069,30 @@
* by a vm_open due to mremap or partial unmap or whatever).
*/
drm_gem_object_get(obj);
+
+ vma->vm_private_data = obj;
+
+ if (obj->funcs && obj->funcs->mmap) {
+ ret = obj->funcs->mmap(obj, vma);
+ if (ret) {
+ drm_gem_object_put(obj);
+ return ret;
+ }
+ WARN_ON(!(vma->vm_flags & VM_DONTEXPAND));
+ } else {
+ if (obj->funcs && obj->funcs->vm_ops)
+ vma->vm_ops = obj->funcs->vm_ops;
+ else if (dev->driver->gem_vm_ops)
+ vma->vm_ops = dev->driver->gem_vm_ops;
+ else {
+ drm_gem_object_put(obj);
+ return -EINVAL;
+ }
+
+ vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+ }
return 0;
}
@@ -1060,13 +1149,13 @@
return -EINVAL;
if (!drm_vma_node_is_allowed(node, priv)) {
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
return -EACCES;
}
if (node->readonly) {
if (vma->vm_flags & VM_WRITE) {
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
return -EINVAL;
}
@@ -1076,7 +1165,7 @@
ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
vma);
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
return ret;
}
@@ -1094,6 +1183,223 @@
drm_printf_indent(p, indent, "imported=%s\n",
obj->import_attach ? "yes" : "no");
- if (obj->dev->driver->gem_print_info)
- obj->dev->driver->gem_print_info(p, indent, obj);
+ if (obj->funcs && obj->funcs->print_info)
+ obj->funcs->print_info(p, indent, obj);
}
+
+int drm_gem_pin(struct drm_gem_object *obj)
+{
+ if (obj->funcs && obj->funcs->pin)
+ return obj->funcs->pin(obj);
+ else if (obj->dev->driver->gem_prime_pin)
+ return obj->dev->driver->gem_prime_pin(obj);
+ else
+ return 0;
+}
+
+void drm_gem_unpin(struct drm_gem_object *obj)
+{
+ if (obj->funcs && obj->funcs->unpin)
+ obj->funcs->unpin(obj);
+ else if (obj->dev->driver->gem_prime_unpin)
+ obj->dev->driver->gem_prime_unpin(obj);
+}
+
+void *drm_gem_vmap(struct drm_gem_object *obj)
+{
+ void *vaddr;
+
+ if (obj->funcs && obj->funcs->vmap)
+ vaddr = obj->funcs->vmap(obj);
+ else if (obj->dev->driver->gem_prime_vmap)
+ vaddr = obj->dev->driver->gem_prime_vmap(obj);
+ else
+ vaddr = ERR_PTR(-EOPNOTSUPP);
+
+ if (!vaddr)
+ vaddr = ERR_PTR(-ENOMEM);
+
+ return vaddr;
+}
+
+void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr)
+{
+ if (!vaddr)
+ return;
+
+ if (obj->funcs && obj->funcs->vunmap)
+ obj->funcs->vunmap(obj, vaddr);
+ else if (obj->dev->driver->gem_prime_vunmap)
+ obj->dev->driver->gem_prime_vunmap(obj, vaddr);
+}
+
+/**
+ * drm_gem_lock_reservations - Sets up the ww context and acquires
+ * the lock on an array of GEM objects.
+ *
+ * Once you've locked your reservations, you'll want to set up space
+ * for your shared fences (if applicable), submit your job, then
+ * drm_gem_unlock_reservations().
+ *
+ * @objs: drm_gem_objects to lock
+ * @count: Number of objects in @objs
+ * @acquire_ctx: struct ww_acquire_ctx that will be initialized as
+ * part of tracking this set of locked reservations.
+ */
+int
+drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
+ struct ww_acquire_ctx *acquire_ctx)
+{
+ int contended = -1;
+ int i, ret;
+
+ ww_acquire_init(acquire_ctx, &reservation_ww_class);
+
+retry:
+ if (contended != -1) {
+ struct drm_gem_object *obj = objs[contended];
+
+ ret = dma_resv_lock_slow_interruptible(obj->resv,
+ acquire_ctx);
+ if (ret) {
+ ww_acquire_fini(acquire_ctx);
+ return ret;
+ }
+ }
+
+ for (i = 0; i < count; i++) {
+ if (i == contended)
+ continue;
+
+ ret = dma_resv_lock_interruptible(objs[i]->resv,
+ acquire_ctx);
+ if (ret) {
+ int j;
+
+ for (j = 0; j < i; j++)
+ dma_resv_unlock(objs[j]->resv);
+
+ if (contended != -1 && contended >= i)
+ dma_resv_unlock(objs[contended]->resv);
+
+ if (ret == -EDEADLK) {
+ contended = i;
+ goto retry;
+ }
+
+ ww_acquire_fini(acquire_ctx);
+ return ret;
+ }
+ }
+
+ ww_acquire_done(acquire_ctx);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_gem_lock_reservations);
+
+void
+drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
+ struct ww_acquire_ctx *acquire_ctx)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ dma_resv_unlock(objs[i]->resv);
+
+ ww_acquire_fini(acquire_ctx);
+}
+EXPORT_SYMBOL(drm_gem_unlock_reservations);
+
+/**
+ * drm_gem_fence_array_add - Adds the fence to an array of fences to be
+ * waited on, deduplicating fences from the same context.
+ *
+ * @fence_array: array of dma_fence * for the job to block on.
+ * @fence: the dma_fence to add to the list of dependencies.
+ *
+ * Returns:
+ * 0 on success, or an error on failing to expand the array.
+ */
+int drm_gem_fence_array_add(struct xarray *fence_array,
+ struct dma_fence *fence)
+{
+ struct dma_fence *entry;
+ unsigned long index;
+ u32 id = 0;
+ int ret;
+
+ if (!fence)
+ return 0;
+
+ /* Deduplicate if we already depend on a fence from the same context.
+ * This lets the size of the array of deps scale with the number of
+ * engines involved, rather than the number of BOs.
+ */
+ xa_for_each(fence_array, index, entry) {
+ if (entry->context != fence->context)
+ continue;
+
+ if (dma_fence_is_later(fence, entry)) {
+ dma_fence_put(entry);
+ xa_store(fence_array, index, fence, GFP_KERNEL);
+ } else {
+ dma_fence_put(fence);
+ }
+ return 0;
+ }
+
+ ret = xa_alloc(fence_array, &id, fence, xa_limit_32b, GFP_KERNEL);
+ if (ret != 0)
+ dma_fence_put(fence);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_gem_fence_array_add);
+
+/**
+ * drm_gem_fence_array_add_implicit - Adds the implicit dependencies tracked
+ * in the GEM object's reservation object to an array of dma_fences for use in
+ * scheduling a rendering job.
+ *
+ * This should be called after drm_gem_lock_reservations() on your array of
+ * GEM objects used in the job but before updating the reservations with your
+ * own fences.
+ *
+ * @fence_array: array of dma_fence * for the job to block on.
+ * @obj: the gem object to add new dependencies from.
+ * @write: whether the job might write the object (so we need to depend on
+ * shared fences in the reservation object).
+ */
+int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
+ struct drm_gem_object *obj,
+ bool write)
+{
+ int ret;
+ struct dma_fence **fences;
+ unsigned int i, fence_count;
+
+ if (!write) {
+ struct dma_fence *fence =
+ dma_resv_get_excl_rcu(obj->resv);
+
+ return drm_gem_fence_array_add(fence_array, fence);
+ }
+
+ ret = dma_resv_get_fences_rcu(obj->resv, NULL,
+ &fence_count, &fences);
+ if (ret || !fence_count)
+ return ret;
+
+ for (i = 0; i < fence_count; i++) {
+ ret = drm_gem_fence_array_add(fence_array, fences[i]);
+ if (ret)
+ break;
+ }
+
+ for (; i < fence_count; i++)
+ dma_fence_put(fences[i]);
+ kfree(fences);
+ return ret;
+}
+EXPORT_SYMBOL(drm_gem_fence_array_add_implicit);
--
Gitblit v1.6.2