From 9d77db3c730780c8ef5ccd4b66403ff5675cfe4e Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Mon, 13 May 2024 10:30:14 +0000
Subject: [PATCH] modify sin led gpio

---
 kernel/drivers/gpu/drm/ttm/ttm_bo_util.c |  528 ++++++++++++++++++----------------------------------------
 1 files changed, 162 insertions(+), 366 deletions(-)

diff --git a/kernel/drivers/gpu/drm/ttm/ttm_bo_util.c b/kernel/drivers/gpu/drm/ttm/ttm_bo_util.c
index 40904e8..fb2a25f 100644
--- a/kernel/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/kernel/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -38,7 +38,7 @@
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
 #include <linux/module.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
 
 struct ttm_transfer_obj {
 	struct ttm_buffer_object base;
@@ -47,15 +47,15 @@
 
 void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
 {
-	ttm_bo_mem_put(bo, &bo->mem);
+	ttm_resource_free(bo, &bo->mem);
 }
 
 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
 		   struct ttm_operation_ctx *ctx,
-		    struct ttm_mem_reg *new_mem)
+		    struct ttm_resource *new_mem)
 {
 	struct ttm_tt *ttm = bo->ttm;
-	struct ttm_mem_reg *old_mem = &bo->mem;
+	struct ttm_resource *old_mem = &bo->mem;
 	int ret;
 
 	if (old_mem->mem_type != TTM_PL_SYSTEM) {
@@ -67,10 +67,8 @@
 			return ret;
 		}
 
-		ttm_tt_unbind(ttm);
+		ttm_bo_tt_unbind(bo);
 		ttm_bo_free_old_node(bo);
-		ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
-				TTM_PL_MASK_MEM);
 		old_mem->mem_type = TTM_PL_SYSTEM;
 	}
 
@@ -79,154 +77,70 @@
 		return ret;
 
 	if (new_mem->mem_type != TTM_PL_SYSTEM) {
-		ret = ttm_tt_bind(ttm, new_mem, ctx);
+
+		ret = ttm_tt_populate(bo->bdev, ttm, ctx);
+		if (unlikely(ret != 0))
+			return ret;
+
+		ret = ttm_bo_tt_bind(bo, new_mem);
 		if (unlikely(ret != 0))
 			return ret;
 	}
 
-	*old_mem = *new_mem;
-	new_mem->mm_node = NULL;
-
+	ttm_bo_assign_mem(bo, new_mem);
 	return 0;
 }
 EXPORT_SYMBOL(ttm_bo_move_ttm);
 
-int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
+int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
+		       struct ttm_resource *mem)
 {
-	if (likely(man->io_reserve_fastpath))
+	if (mem->bus.offset || mem->bus.addr)
 		return 0;
 
-	if (interruptible)
-		return mutex_lock_interruptible(&man->io_reserve_mutex);
-
-	mutex_lock(&man->io_reserve_mutex);
-	return 0;
-}
-EXPORT_SYMBOL(ttm_mem_io_lock);
-
-void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
-{
-	if (likely(man->io_reserve_fastpath))
-		return;
-
-	mutex_unlock(&man->io_reserve_mutex);
-}
-EXPORT_SYMBOL(ttm_mem_io_unlock);
-
-static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
-{
-	struct ttm_buffer_object *bo;
-
-	if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
-		return -EAGAIN;
-
-	bo = list_first_entry(&man->io_reserve_lru,
-			      struct ttm_buffer_object,
-			      io_reserve_lru);
-	list_del_init(&bo->io_reserve_lru);
-	ttm_bo_unmap_virtual_locked(bo);
-
-	return 0;
-}
-
-
-int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
-		       struct ttm_mem_reg *mem)
-{
-	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
-	int ret = 0;
-
+	mem->bus.is_iomem = false;
 	if (!bdev->driver->io_mem_reserve)
 		return 0;
-	if (likely(man->io_reserve_fastpath))
-		return bdev->driver->io_mem_reserve(bdev, mem);
 
-	if (bdev->driver->io_mem_reserve &&
-	    mem->bus.io_reserved_count++ == 0) {
-retry:
-		ret = bdev->driver->io_mem_reserve(bdev, mem);
-		if (ret == -EAGAIN) {
-			ret = ttm_mem_io_evict(man);
-			if (ret == 0)
-				goto retry;
-		}
-	}
-	return ret;
+	return bdev->driver->io_mem_reserve(bdev, mem);
 }
-EXPORT_SYMBOL(ttm_mem_io_reserve);
 
 void ttm_mem_io_free(struct ttm_bo_device *bdev,
-		     struct ttm_mem_reg *mem)
+		     struct ttm_resource *mem)
 {
-	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
-
-	if (likely(man->io_reserve_fastpath))
+	if (!mem->bus.offset && !mem->bus.addr)
 		return;
 
-	if (bdev->driver->io_mem_reserve &&
-	    --mem->bus.io_reserved_count == 0 &&
-	    bdev->driver->io_mem_free)
+	if (bdev->driver->io_mem_free)
 		bdev->driver->io_mem_free(bdev, mem);
 
-}
-EXPORT_SYMBOL(ttm_mem_io_free);
-
-int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
-{
-	struct ttm_mem_reg *mem = &bo->mem;
-	int ret;
-
-	if (!mem->bus.io_reserved_vm) {
-		struct ttm_mem_type_manager *man =
-			&bo->bdev->man[mem->mem_type];
-
-		ret = ttm_mem_io_reserve(bo->bdev, mem);
-		if (unlikely(ret != 0))
-			return ret;
-		mem->bus.io_reserved_vm = true;
-		if (man->use_io_reserve_lru)
-			list_add_tail(&bo->io_reserve_lru,
-				      &man->io_reserve_lru);
-	}
-	return 0;
+	mem->bus.offset = 0;
+	mem->bus.addr = NULL;
 }
 
-void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
+static int ttm_resource_ioremap(struct ttm_bo_device *bdev,
+			       struct ttm_resource *mem,
+			       void **virtual)
 {
-	struct ttm_mem_reg *mem = &bo->mem;
-
-	if (mem->bus.io_reserved_vm) {
-		mem->bus.io_reserved_vm = false;
-		list_del_init(&bo->io_reserve_lru);
-		ttm_mem_io_free(bo->bdev, mem);
-	}
-}
-
-static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
-			void **virtual)
-{
-	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
 	int ret;
 	void *addr;
 
 	*virtual = NULL;
-	(void) ttm_mem_io_lock(man, false);
 	ret = ttm_mem_io_reserve(bdev, mem);
-	ttm_mem_io_unlock(man);
 	if (ret || !mem->bus.is_iomem)
 		return ret;
 
 	if (mem->bus.addr) {
 		addr = mem->bus.addr;
 	} else {
+		size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
+
 		if (mem->placement & TTM_PL_FLAG_WC)
-			addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
+			addr = ioremap_wc(mem->bus.offset, bus_size);
 		else
-			addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
+			addr = ioremap(mem->bus.offset, bus_size);
 		if (!addr) {
-			(void) ttm_mem_io_lock(man, false);
 			ttm_mem_io_free(bdev, mem);
-			ttm_mem_io_unlock(man);
 			return -ENOMEM;
 		}
 	}
@@ -234,18 +148,13 @@
 	return 0;
 }
 
-static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
-			 void *virtual)
+static void ttm_resource_iounmap(struct ttm_bo_device *bdev,
+				struct ttm_resource *mem,
+				void *virtual)
 {
-	struct ttm_mem_type_manager *man;
-
-	man = &bdev->man[mem->mem_type];
-
 	if (virtual && mem->bus.addr == NULL)
 		iounmap(virtual);
-	(void) ttm_mem_io_lock(man, false);
 	ttm_mem_io_free(bdev, mem);
-	ttm_mem_io_unlock(man);
 }
 
 static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
@@ -261,54 +170,6 @@
 	return 0;
 }
 
-#ifdef CONFIG_X86
-#define __ttm_kmap_atomic_prot(__page, __prot) kmap_atomic_prot(__page, __prot)
-#define __ttm_kunmap_atomic(__addr) kunmap_atomic(__addr)
-#else
-#define __ttm_kmap_atomic_prot(__page, __prot) vmap(&__page, 1, 0,  __prot)
-#define __ttm_kunmap_atomic(__addr) vunmap(__addr)
-#endif
-
-
-/**
- * ttm_kmap_atomic_prot - Efficient kernel map of a single page with
- * specified page protection.
- *
- * @page: The page to map.
- * @prot: The page protection.
- *
- * This function maps a TTM page using the kmap_atomic api if available,
- * otherwise falls back to vmap. The user must make sure that the
- * specified page does not have an aliased mapping with a different caching
- * policy unless the architecture explicitly allows it. Also mapping and
- * unmapping using this api must be correctly nested. Unmapping should
- * occur in the reverse order of mapping.
- */
-void *ttm_kmap_atomic_prot(struct page *page, pgprot_t prot)
-{
-	if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL))
-		return kmap_atomic(page);
-	else
-		return __ttm_kmap_atomic_prot(page, prot);
-}
-EXPORT_SYMBOL(ttm_kmap_atomic_prot);
-
-/**
- * ttm_kunmap_atomic_prot - Unmap a page that was mapped using
- * ttm_kmap_atomic_prot.
- *
- * @addr: The virtual address from the map.
- * @prot: The page protection.
- */
-void ttm_kunmap_atomic_prot(void *addr, pgprot_t prot)
-{
-	if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL))
-		kunmap_atomic(addr);
-	else
-		__ttm_kunmap_atomic(addr);
-}
-EXPORT_SYMBOL(ttm_kunmap_atomic_prot);
-
 static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
 				unsigned long page,
 				pgprot_t prot)
@@ -320,13 +181,13 @@
 		return -ENOMEM;
 
 	src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
-	dst = ttm_kmap_atomic_prot(d, prot);
+	dst = kmap_atomic_prot(d, prot);
 	if (!dst)
 		return -ENOMEM;
 
 	memcpy_fromio(dst, src, PAGE_SIZE);
 
-	ttm_kunmap_atomic_prot(dst, prot);
+	kunmap_atomic(dst);
 
 	return 0;
 }
@@ -342,26 +203,26 @@
 		return -ENOMEM;
 
 	dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
-	src = ttm_kmap_atomic_prot(s, prot);
+	src = kmap_atomic_prot(s, prot);
 	if (!src)
 		return -ENOMEM;
 
 	memcpy_toio(dst, src, PAGE_SIZE);
 
-	ttm_kunmap_atomic_prot(src, prot);
+	kunmap_atomic(src);
 
 	return 0;
 }
 
 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
 		       struct ttm_operation_ctx *ctx,
-		       struct ttm_mem_reg *new_mem)
+		       struct ttm_resource *new_mem)
 {
 	struct ttm_bo_device *bdev = bo->bdev;
-	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
+	struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
 	struct ttm_tt *ttm = bo->ttm;
-	struct ttm_mem_reg *old_mem = &bo->mem;
-	struct ttm_mem_reg old_copy = *old_mem;
+	struct ttm_resource *old_mem = &bo->mem;
+	struct ttm_resource old_copy = *old_mem;
 	void *old_iomap;
 	void *new_iomap;
 	int ret;
@@ -374,10 +235,10 @@
 	if (ret)
 		return ret;
 
-	ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
+	ret = ttm_resource_ioremap(bdev, old_mem, &old_iomap);
 	if (ret)
 		return ret;
-	ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
+	ret = ttm_resource_ioremap(bdev, new_mem, &new_iomap);
 	if (ret)
 		goto out;
 
@@ -391,7 +252,7 @@
 	 * Don't move nonexistent data. Clear destination instead.
 	 */
 	if (old_iomap == NULL &&
-	    (ttm == NULL || (ttm->state == tt_unpopulated &&
+	    (ttm == NULL || (!ttm_tt_is_populated(ttm) &&
 			     !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
 		memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
 		goto out2;
@@ -401,7 +262,7 @@
 	 * TTM might be null for moves within the same region.
 	 */
 	if (ttm) {
-		ret = ttm_tt_populate(ttm, ctx);
+		ret = ttm_tt_populate(bdev, ttm, ctx);
 		if (ret)
 			goto out1;
 	}
@@ -436,24 +297,22 @@
 	mb();
 out2:
 	old_copy = *old_mem;
-	*old_mem = *new_mem;
-	new_mem->mm_node = NULL;
 
-	if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
-		ttm_tt_destroy(ttm);
-		bo->ttm = NULL;
-	}
+	ttm_bo_assign_mem(bo, new_mem);
+
+	if (!man->use_tt)
+		ttm_bo_tt_destroy(bo);
 
 out1:
-	ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
+	ttm_resource_iounmap(bdev, old_mem, new_iomap);
 out:
-	ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
+	ttm_resource_iounmap(bdev, &old_copy, old_iomap);
 
 	/*
 	 * On error, keep the mm node!
 	 */
 	if (!ret)
-		ttm_bo_mem_put(bo, &old_copy);
+		ttm_resource_free(bo, &old_copy);
 	return ret;
 }
 EXPORT_SYMBOL(ttm_bo_move_memcpy);
@@ -503,23 +362,22 @@
 	 * TODO: Explicit member copy would probably be better here.
 	 */
 
-	atomic_inc(&bo->bdev->glob->bo_count);
+	atomic_inc(&ttm_bo_glob.bo_count);
 	INIT_LIST_HEAD(&fbo->base.ddestroy);
 	INIT_LIST_HEAD(&fbo->base.lru);
 	INIT_LIST_HEAD(&fbo->base.swap);
-	INIT_LIST_HEAD(&fbo->base.io_reserve_lru);
-	mutex_init(&fbo->base.wu_mutex);
 	fbo->base.moving = NULL;
-	drm_vma_node_reset(&fbo->base.vma_node);
-	atomic_set(&fbo->base.cpu_writers, 0);
+	drm_vma_node_reset(&fbo->base.base.vma_node);
 
-	kref_init(&fbo->base.list_kref);
 	kref_init(&fbo->base.kref);
 	fbo->base.destroy = &ttm_transfered_destroy;
 	fbo->base.acc_size = 0;
-	fbo->base.resv = &fbo->base.ttm_resv;
-	reservation_object_init(fbo->base.resv);
-	ret = reservation_object_trylock(fbo->base.resv);
+	if (bo->type != ttm_bo_type_sg)
+		fbo->base.base.resv = &fbo->base.base._resv;
+
+	dma_resv_init(&fbo->base.base._resv);
+	fbo->base.base.dev = NULL;
+	ret = dma_resv_trylock(&fbo->base.base._resv);
 	WARN_ON(!ret);
 
 	*new_obj = &fbo->base;
@@ -539,13 +397,13 @@
 		tmp = pgprot_noncached(tmp);
 #endif
 #if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
-    defined(__powerpc__)
+    defined(__powerpc__) || defined(__mips__)
 	if (caching_flags & TTM_PL_FLAG_WC)
 		tmp = pgprot_writecombine(tmp);
 	else
 		tmp = pgprot_noncached(tmp);
 #endif
-#if defined(__sparc__) || defined(__mips__)
+#if defined(__sparc__)
 	tmp = pgprot_noncached(tmp);
 #endif
 	return tmp;
@@ -557,7 +415,7 @@
 			  unsigned long size,
 			  struct ttm_bo_kmap_obj *map)
 {
-	struct ttm_mem_reg *mem = &bo->mem;
+	struct ttm_resource *mem = &bo->mem;
 
 	if (bo->mem.bus.addr) {
 		map->bo_kmap_type = ttm_bo_map_premapped;
@@ -565,11 +423,11 @@
 	} else {
 		map->bo_kmap_type = ttm_bo_map_iomap;
 		if (mem->placement & TTM_PL_FLAG_WC)
-			map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
+			map->virtual = ioremap_wc(bo->mem.bus.offset + offset,
 						  size);
 		else
-			map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
-						       size);
+			map->virtual = ioremap(bo->mem.bus.offset + offset,
+					       size);
 	}
 	return (!map->virtual) ? -ENOMEM : 0;
 }
@@ -579,7 +437,7 @@
 			   unsigned long num_pages,
 			   struct ttm_bo_kmap_obj *map)
 {
-	struct ttm_mem_reg *mem = &bo->mem;
+	struct ttm_resource *mem = &bo->mem;
 	struct ttm_operation_ctx ctx = {
 		.interruptible = false,
 		.no_wait_gpu = false
@@ -590,7 +448,7 @@
 
 	BUG_ON(!ttm);
 
-	ret = ttm_tt_populate(ttm, &ctx);
+	ret = ttm_tt_populate(bo->bdev, ttm, &ctx);
 	if (ret)
 		return ret;
 
@@ -620,8 +478,6 @@
 		unsigned long start_page, unsigned long num_pages,
 		struct ttm_bo_kmap_obj *map)
 {
-	struct ttm_mem_type_manager *man =
-		&bo->bdev->man[bo->mem.mem_type];
 	unsigned long offset, size;
 	int ret;
 
@@ -631,13 +487,8 @@
 		return -EINVAL;
 	if (start_page > bo->num_pages)
 		return -EINVAL;
-#if 0
-	if (num_pages > 1 && !capable(CAP_SYS_ADMIN))
-		return -EPERM;
-#endif
-	(void) ttm_mem_io_lock(man, false);
+
 	ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
-	ttm_mem_io_unlock(man);
 	if (ret)
 		return ret;
 	if (!bo->mem.bus.is_iomem) {
@@ -652,10 +503,6 @@
 
 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
 {
-	struct ttm_buffer_object *bo = map->bo;
-	struct ttm_mem_type_manager *man =
-		&bo->bdev->man[bo->mem.mem_type];
-
 	if (!map->virtual)
 		return;
 	switch (map->bo_kmap_type) {
@@ -673,167 +520,116 @@
 	default:
 		BUG();
 	}
-	(void) ttm_mem_io_lock(man, false);
 	ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
-	ttm_mem_io_unlock(man);
 	map->virtual = NULL;
 	map->page = NULL;
 }
 EXPORT_SYMBOL(ttm_bo_kunmap);
 
+static int ttm_bo_wait_free_node(struct ttm_buffer_object *bo,
+				 bool dst_use_tt)
+{
+	int ret;
+	ret = ttm_bo_wait(bo, false, false);
+	if (ret)
+		return ret;
+
+	if (!dst_use_tt)
+		ttm_bo_tt_destroy(bo);
+	ttm_bo_free_old_node(bo);
+	return 0;
+}
+
+static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo,
+				struct dma_fence *fence,
+				bool dst_use_tt)
+{
+	struct ttm_buffer_object *ghost_obj;
+	int ret;
+
+	/**
+	 * This should help pipeline ordinary buffer moves.
+	 *
+	 * Hang old buffer memory on a new buffer object,
+	 * and leave it to be released when the GPU
+	 * operation has completed.
+	 */
+
+	dma_fence_put(bo->moving);
+	bo->moving = dma_fence_get(fence);
+
+	ret = ttm_buffer_object_transfer(bo, &ghost_obj);
+	if (ret)
+		return ret;
+
+	dma_resv_add_excl_fence(&ghost_obj->base._resv, fence);
+
+	/**
+	 * If we're not moving to fixed memory, the TTM object
+	 * needs to stay alive. Otherwhise hang it on the ghost
+	 * bo to be unbound and destroyed.
+	 */
+
+	if (dst_use_tt)
+		ghost_obj->ttm = NULL;
+	else
+		bo->ttm = NULL;
+
+	dma_resv_unlock(&ghost_obj->base._resv);
+	ttm_bo_put(ghost_obj);
+	return 0;
+}
+
+static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo,
+				       struct dma_fence *fence)
+{
+	struct ttm_bo_device *bdev = bo->bdev;
+	struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->mem.mem_type);
+
+	/**
+	 * BO doesn't have a TTM we need to bind/unbind. Just remember
+	 * this eviction and free up the allocation
+	 */
+	spin_lock(&from->move_lock);
+	if (!from->move || dma_fence_is_later(fence, from->move)) {
+		dma_fence_put(from->move);
+		from->move = dma_fence_get(fence);
+	}
+	spin_unlock(&from->move_lock);
+
+	ttm_bo_free_old_node(bo);
+
+	dma_fence_put(bo->moving);
+	bo->moving = dma_fence_get(fence);
+}
+
 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
 			      struct dma_fence *fence,
 			      bool evict,
-			      struct ttm_mem_reg *new_mem)
+			      bool pipeline,
+			      struct ttm_resource *new_mem)
 {
 	struct ttm_bo_device *bdev = bo->bdev;
-	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
-	struct ttm_mem_reg *old_mem = &bo->mem;
-	int ret;
-	struct ttm_buffer_object *ghost_obj;
+	struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->mem.mem_type);
+	struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
+	int ret = 0;
 
-	reservation_object_add_excl_fence(bo->resv, fence);
-	if (evict) {
-		ret = ttm_bo_wait(bo, false, false);
-		if (ret)
-			return ret;
+	dma_resv_add_excl_fence(bo->base.resv, fence);
+	if (!evict)
+		ret = ttm_bo_move_to_ghost(bo, fence, man->use_tt);
+	else if (!from->use_tt && pipeline)
+		ttm_bo_move_pipeline_evict(bo, fence);
+	else
+		ret = ttm_bo_wait_free_node(bo, man->use_tt);
 
-		if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
-			ttm_tt_destroy(bo->ttm);
-			bo->ttm = NULL;
-		}
-		ttm_bo_free_old_node(bo);
-	} else {
-		/**
-		 * This should help pipeline ordinary buffer moves.
-		 *
-		 * Hang old buffer memory on a new buffer object,
-		 * and leave it to be released when the GPU
-		 * operation has completed.
-		 */
+	if (ret)
+		return ret;
 
-		dma_fence_put(bo->moving);
-		bo->moving = dma_fence_get(fence);
-
-		ret = ttm_buffer_object_transfer(bo, &ghost_obj);
-		if (ret)
-			return ret;
-
-		reservation_object_add_excl_fence(ghost_obj->resv, fence);
-
-		/**
-		 * If we're not moving to fixed memory, the TTM object
-		 * needs to stay alive. Otherwhise hang it on the ghost
-		 * bo to be unbound and destroyed.
-		 */
-
-		if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
-			ghost_obj->ttm = NULL;
-		else
-			bo->ttm = NULL;
-
-		ttm_bo_unreserve(ghost_obj);
-		ttm_bo_put(ghost_obj);
-	}
-
-	*old_mem = *new_mem;
-	new_mem->mm_node = NULL;
+	ttm_bo_assign_mem(bo, new_mem);
 
 	return 0;
 }
 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
-
-int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
-			 struct dma_fence *fence, bool evict,
-			 struct ttm_mem_reg *new_mem)
-{
-	struct ttm_bo_device *bdev = bo->bdev;
-	struct ttm_mem_reg *old_mem = &bo->mem;
-
-	struct ttm_mem_type_manager *from = &bdev->man[old_mem->mem_type];
-	struct ttm_mem_type_manager *to = &bdev->man[new_mem->mem_type];
-
-	int ret;
-
-	reservation_object_add_excl_fence(bo->resv, fence);
-
-	if (!evict) {
-		struct ttm_buffer_object *ghost_obj;
-
-		/**
-		 * This should help pipeline ordinary buffer moves.
-		 *
-		 * Hang old buffer memory on a new buffer object,
-		 * and leave it to be released when the GPU
-		 * operation has completed.
-		 */
-
-		dma_fence_put(bo->moving);
-		bo->moving = dma_fence_get(fence);
-
-		ret = ttm_buffer_object_transfer(bo, &ghost_obj);
-		if (ret)
-			return ret;
-
-		reservation_object_add_excl_fence(ghost_obj->resv, fence);
-
-		/**
-		 * If we're not moving to fixed memory, the TTM object
-		 * needs to stay alive. Otherwhise hang it on the ghost
-		 * bo to be unbound and destroyed.
-		 */
-
-		if (!(to->flags & TTM_MEMTYPE_FLAG_FIXED))
-			ghost_obj->ttm = NULL;
-		else
-			bo->ttm = NULL;
-
-		ttm_bo_unreserve(ghost_obj);
-		ttm_bo_put(ghost_obj);
-
-	} else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) {
-
-		/**
-		 * BO doesn't have a TTM we need to bind/unbind. Just remember
-		 * this eviction and free up the allocation
-		 */
-
-		spin_lock(&from->move_lock);
-		if (!from->move || dma_fence_is_later(fence, from->move)) {
-			dma_fence_put(from->move);
-			from->move = dma_fence_get(fence);
-		}
-		spin_unlock(&from->move_lock);
-
-		ttm_bo_free_old_node(bo);
-
-		dma_fence_put(bo->moving);
-		bo->moving = dma_fence_get(fence);
-
-	} else {
-		/**
-		 * Last resort, wait for the move to be completed.
-		 *
-		 * Should never happen in pratice.
-		 */
-
-		ret = ttm_bo_wait(bo, false, false);
-		if (ret)
-			return ret;
-
-		if (to->flags & TTM_MEMTYPE_FLAG_FIXED) {
-			ttm_tt_destroy(bo->ttm);
-			bo->ttm = NULL;
-		}
-		ttm_bo_free_old_node(bo);
-	}
-
-	*old_mem = *new_mem;
-	new_mem->mm_node = NULL;
-
-	return 0;
-}
-EXPORT_SYMBOL(ttm_bo_pipeline_move);
 
 int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
 {
@@ -844,7 +640,7 @@
 	if (ret)
 		return ret;
 
-	ret = reservation_object_copy_fences(ghost->resv, bo->resv);
+	ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv);
 	/* Last resort, wait for the BO to be idle when we are OOM */
 	if (ret)
 		ttm_bo_wait(bo, false, false);
@@ -853,7 +649,7 @@
 	bo->mem.mem_type = TTM_PL_SYSTEM;
 	bo->ttm = NULL;
 
-	ttm_bo_unreserve(ghost);
+	dma_resv_unlock(&ghost->base._resv);
 	ttm_bo_put(ghost);
 
 	return 0;

--
Gitblit v1.6.2