From 08f87f769b595151be1afeff53e144f543faa614 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Wed, 06 Dec 2023 09:51:13 +0000
Subject: [PATCH] add dts config

---
 kernel/drivers/gpu/drm/nouveau/nouveau_bo.c |  923 ++++++++++++++++++++-------------------------------------
 1 files changed, 330 insertions(+), 593 deletions(-)

diff --git a/kernel/drivers/gpu/drm/nouveau/nouveau_bo.c b/kernel/drivers/gpu/drm/nouveau/nouveau_bo.c
index d230536..b57dcad 100644
--- a/kernel/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/kernel/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -31,7 +31,7 @@
 #include <linux/swiotlb.h>
 
 #include "nouveau_drv.h"
-#include "nouveau_dma.h"
+#include "nouveau_chan.h"
 #include "nouveau_fence.h"
 
 #include "nouveau_bo.h"
@@ -43,6 +43,9 @@
 #include <nvif/class.h>
 #include <nvif/if500b.h>
 #include <nvif/if900b.h>
+
+static int nouveau_ttm_tt_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm,
+			       struct ttm_resource *reg);
 
 /*
  * NV10-NV40 tiling helpers
@@ -136,10 +139,17 @@
 	struct drm_device *dev = drm->dev;
 	struct nouveau_bo *nvbo = nouveau_bo(bo);
 
-	if (unlikely(nvbo->gem.filp))
-		DRM_ERROR("bo %p still attached to GEM object\n", bo);
 	WARN_ON(nvbo->pin_refcnt > 0);
+	nouveau_bo_del_io_reserve_lru(bo);
 	nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
+
+	/*
+	 * If nouveau_bo_new() allocated this buffer, the GEM object was never
+	 * initialized, so don't attempt to release it.
+	 */
+	if (bo->base.dev)
+		drm_gem_object_release(&bo->base);
+
 	kfree(nvbo);
 }
 
@@ -152,8 +162,7 @@
 }
 
 static void
-nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
-		       int *align, u64 *size)
+nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, u64 *size)
 {
 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
 	struct nvif_device *device = &drm->client.device;
@@ -185,31 +194,24 @@
 	*size = roundup_64(*size, PAGE_SIZE);
 }
 
-int
-nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
-	       uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
-	       struct sg_table *sg, struct reservation_object *robj,
-	       struct nouveau_bo **pnvbo)
+struct nouveau_bo *
+nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain,
+		 u32 tile_mode, u32 tile_flags)
 {
 	struct nouveau_drm *drm = cli->drm;
 	struct nouveau_bo *nvbo;
 	struct nvif_mmu *mmu = &cli->mmu;
-	struct nvif_vmm *vmm = &cli->vmm.vmm;
-	size_t acc_size;
-	int type = ttm_bo_type_device;
-	int ret, i, pi = -1;
+	struct nvif_vmm *vmm = cli->svm.cli ? &cli->svm.vmm : &cli->vmm.vmm;
+	int i, pi = -1;
 
-	if (!size) {
-		NV_WARN(drm, "skipped size %016llx\n", size);
-		return -EINVAL;
+	if (!*size) {
+		NV_WARN(drm, "skipped size %016llx\n", *size);
+		return ERR_PTR(-EINVAL);
 	}
-
-	if (sg)
-		type = ttm_bo_type_sg;
 
 	nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
 	if (!nvbo)
-		return -ENOMEM;
+		return ERR_PTR(-ENOMEM);
 	INIT_LIST_HEAD(&nvbo->head);
 	INIT_LIST_HEAD(&nvbo->entry);
 	INIT_LIST_HEAD(&nvbo->vma_list);
@@ -219,7 +221,7 @@
 	 * mapping, but is what NOUVEAU_GEM_DOMAIN_COHERENT gets translated
 	 * into in nouveau_gem_new().
 	 */
-	if (flags & TTM_PL_FLAG_UNCACHED) {
+	if (domain & NOUVEAU_GEM_DOMAIN_COHERENT) {
 		/* Determine if we can get a cache-coherent map, forcing
 		 * uncached mapping if we can't.
 		 */
@@ -231,7 +233,7 @@
 		nvbo->kind = (tile_flags & 0x0000ff00) >> 8;
 		if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
 			kfree(nvbo);
-			return -EINVAL;
+			return ERR_PTR(-EINVAL);
 		}
 
 		nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind;
@@ -241,7 +243,7 @@
 		nvbo->comp = (tile_flags & 0x00030000) >> 16;
 		if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
 			kfree(nvbo);
-			return -EINVAL;
+			return ERR_PTR(-EINVAL);
 		}
 	} else {
 		nvbo->zeta = (tile_flags & 0x00000007);
@@ -259,9 +261,9 @@
 		 * Skip page sizes that can't support needed domains.
 		 */
 		if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE &&
-		    (flags & TTM_PL_FLAG_VRAM) && !vmm->page[i].vram)
+		    (domain & NOUVEAU_GEM_DOMAIN_VRAM) && !vmm->page[i].vram)
 			continue;
-		if ((flags & TTM_PL_FLAG_TT) &&
+		if ((domain & NOUVEAU_GEM_DOMAIN_GART) &&
 		    (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT))
 			continue;
 
@@ -273,12 +275,14 @@
 			pi = i;
 
 		/* Stop once the buffer is larger than the current page size. */
-		if (size >= 1ULL << vmm->page[i].shift)
+		if (*size >= 1ULL << vmm->page[i].shift)
 			break;
 	}
 
-	if (WARN_ON(pi < 0))
-		return -EINVAL;
+	if (WARN_ON(pi < 0)) {
+		kfree(nvbo);
+		return ERR_PTR(-EINVAL);
+	}
 
 	/* Disable compression if suitable settings couldn't be found. */
 	if (nvbo->comp && !vmm->page[pi].comp) {
@@ -288,48 +292,101 @@
 	}
 	nvbo->page = vmm->page[pi].shift;
 
-	nouveau_bo_fixup_align(nvbo, flags, &align, &size);
+	nouveau_bo_fixup_align(nvbo, align, size);
+
+	return nvbo;
+}
+
+int
+nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 domain,
+		struct sg_table *sg, struct dma_resv *robj)
+{
+	int type = sg ? ttm_bo_type_sg : ttm_bo_type_device;
+	size_t acc_size;
+	int ret;
+
+	acc_size = ttm_bo_dma_acc_size(nvbo->bo.bdev, size, sizeof(*nvbo));
+
 	nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
-	nouveau_bo_placement_set(nvbo, flags, 0);
+	nouveau_bo_placement_set(nvbo, domain, 0);
+	INIT_LIST_HEAD(&nvbo->io_reserve_lru);
 
-	acc_size = ttm_bo_dma_acc_size(&drm->ttm.bdev, size,
-				       sizeof(struct nouveau_bo));
-
-	ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size,
-			  type, &nvbo->placement,
-			  align >> PAGE_SHIFT, false, acc_size, sg,
-			  robj, nouveau_bo_del_ttm);
+	ret = ttm_bo_init(nvbo->bo.bdev, &nvbo->bo, size, type,
+			  &nvbo->placement, align >> PAGE_SHIFT, false,
+			  acc_size, sg, robj, nouveau_bo_del_ttm);
 	if (ret) {
 		/* ttm will call nouveau_bo_del_ttm if it fails.. */
 		return ret;
 	}
+
+	return 0;
+}
+
+int
+nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
+	       uint32_t domain, uint32_t tile_mode, uint32_t tile_flags,
+	       struct sg_table *sg, struct dma_resv *robj,
+	       struct nouveau_bo **pnvbo)
+{
+	struct nouveau_bo *nvbo;
+	int ret;
+
+	nvbo = nouveau_bo_alloc(cli, &size, &align, domain, tile_mode,
+				tile_flags);
+	if (IS_ERR(nvbo))
+		return PTR_ERR(nvbo);
+
+	ret = nouveau_bo_init(nvbo, size, align, domain, sg, robj);
+	if (ret)
+		return ret;
 
 	*pnvbo = nvbo;
 	return 0;
 }
 
 static void
-set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t type, uint32_t flags)
+set_placement_list(struct nouveau_drm *drm, struct ttm_place *pl, unsigned *n,
+		   uint32_t domain, uint32_t flags)
 {
 	*n = 0;
 
-	if (type & TTM_PL_FLAG_VRAM)
-		pl[(*n)++].flags = TTM_PL_FLAG_VRAM | flags;
-	if (type & TTM_PL_FLAG_TT)
-		pl[(*n)++].flags = TTM_PL_FLAG_TT | flags;
-	if (type & TTM_PL_FLAG_SYSTEM)
-		pl[(*n)++].flags = TTM_PL_FLAG_SYSTEM | flags;
+	if (domain & NOUVEAU_GEM_DOMAIN_VRAM) {
+		struct nvif_mmu *mmu = &drm->client.mmu;
+
+		pl[*n].mem_type = TTM_PL_VRAM;
+		pl[*n].flags = flags & ~TTM_PL_FLAG_CACHED;
+
+		/* Some BARs do not support being ioremapped WC */
+		if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
+		    mmu->type[drm->ttm.type_vram].type & NVIF_MEM_UNCACHED)
+			pl[*n].flags &= ~TTM_PL_FLAG_WC;
+
+		(*n)++;
+	}
+	if (domain & NOUVEAU_GEM_DOMAIN_GART) {
+		pl[*n].mem_type = TTM_PL_TT;
+		pl[*n].flags = flags;
+
+		if (drm->agp.bridge)
+			pl[*n].flags &= ~TTM_PL_FLAG_CACHED;
+
+		(*n)++;
+	}
+	if (domain & NOUVEAU_GEM_DOMAIN_CPU) {
+		pl[*n].mem_type = TTM_PL_SYSTEM;
+		pl[(*n)++].flags = flags;
+	}
 }
 
 static void
-set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
+set_placement_range(struct nouveau_bo *nvbo, uint32_t domain)
 {
 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
 	u32 vram_pages = drm->client.device.info.ram_size >> PAGE_SHIFT;
 	unsigned i, fpfn, lpfn;
 
 	if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
-	    nvbo->mode && (type & TTM_PL_FLAG_VRAM) &&
+	    nvbo->mode && (domain & NOUVEAU_GEM_DOMAIN_VRAM) &&
 	    nvbo->bo.mem.num_pages < vram_pages / 4) {
 		/*
 		 * Make sure that the color and depth buffers are handled
@@ -356,26 +413,28 @@
 }
 
 void
-nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
+nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t domain,
+			 uint32_t busy)
 {
+	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
 	struct ttm_placement *pl = &nvbo->placement;
 	uint32_t flags = (nvbo->force_coherent ? TTM_PL_FLAG_UNCACHED :
 						 TTM_PL_MASK_CACHING) |
 			 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
 
 	pl->placement = nvbo->placements;
-	set_placement_list(nvbo->placements, &pl->num_placement,
-			   type, flags);
+	set_placement_list(drm, nvbo->placements, &pl->num_placement,
+			   domain, flags);
 
 	pl->busy_placement = nvbo->busy_placements;
-	set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
-			   type | busy, flags);
+	set_placement_list(drm, nvbo->busy_placements, &pl->num_busy_placement,
+			   domain | busy, flags);
 
-	set_placement_range(nvbo, type);
+	set_placement_range(nvbo, domain);
 }
 
 int
-nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig)
+nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
 {
 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
 	struct ttm_buffer_object *bo = &nvbo->bo;
@@ -387,7 +446,7 @@
 		return ret;
 
 	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
-	    memtype == TTM_PL_FLAG_VRAM && contig) {
+	    domain == NOUVEAU_GEM_DOMAIN_VRAM && contig) {
 		if (!nvbo->contig) {
 			nvbo->contig = true;
 			force = true;
@@ -396,10 +455,22 @@
 	}
 
 	if (nvbo->pin_refcnt) {
-		if (!(memtype & (1 << bo->mem.mem_type)) || evict) {
+		bool error = evict;
+
+		switch (bo->mem.mem_type) {
+		case TTM_PL_VRAM:
+			error |= !(domain & NOUVEAU_GEM_DOMAIN_VRAM);
+			break;
+		case TTM_PL_TT:
+			error |= !(domain & NOUVEAU_GEM_DOMAIN_GART);
+		default:
+			break;
+		}
+
+		if (error) {
 			NV_ERROR(drm, "bo %p pinned elsewhere: "
 				      "0x%08x vs 0x%08x\n", bo,
-				 1 << bo->mem.mem_type, memtype);
+				 bo->mem.mem_type, domain);
 			ret = -EBUSY;
 		}
 		nvbo->pin_refcnt++;
@@ -407,14 +478,14 @@
 	}
 
 	if (evict) {
-		nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT, 0);
+		nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0);
 		ret = nouveau_bo_validate(nvbo, false, false);
 		if (ret)
 			goto out;
 	}
 
 	nvbo->pin_refcnt++;
-	nouveau_bo_placement_set(nvbo, memtype, 0);
+	nouveau_bo_placement_set(nvbo, domain, 0);
 
 	/* drop pin_refcnt temporarily, so we don't trip the assertion
 	 * in nouveau_bo_move() that makes sure we're not trying to
@@ -460,7 +531,16 @@
 	if (ref)
 		goto out;
 
-	nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
+	switch (bo->mem.mem_type) {
+	case TTM_PL_VRAM:
+		nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, 0);
+		break;
+	case TTM_PL_TT:
+		nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0);
+		break;
+	default:
+		break;
+	}
 
 	ret = nouveau_bo_validate(nvbo, false, false);
 	if (ret == 0) {
@@ -544,6 +624,26 @@
 					PAGE_SIZE, DMA_FROM_DEVICE);
 }
 
+void nouveau_bo_add_io_reserve_lru(struct ttm_buffer_object *bo)
+{
+	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
+	struct nouveau_bo *nvbo = nouveau_bo(bo);
+
+	mutex_lock(&drm->ttm.io_reserve_mutex);
+	list_move_tail(&nvbo->io_reserve_lru, &drm->ttm.io_reserve_lru);
+	mutex_unlock(&drm->ttm.io_reserve_mutex);
+}
+
+void nouveau_bo_del_io_reserve_lru(struct ttm_buffer_object *bo)
+{
+	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
+	struct nouveau_bo *nvbo = nouveau_bo(bo);
+
+	mutex_lock(&drm->ttm.io_reserve_mutex);
+	list_del_init(&nvbo->io_reserve_lru);
+	mutex_unlock(&drm->ttm.io_reserve_mutex);
+}
+
 int
 nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
 		    bool no_wait_gpu)
@@ -617,73 +717,33 @@
 }
 
 static int
-nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
+nouveau_ttm_tt_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm,
+		    struct ttm_resource *reg)
 {
-	/* We'll do this from user space. */
-	return 0;
+#if IS_ENABLED(CONFIG_AGP)
+	struct nouveau_drm *drm = nouveau_bdev(bdev);
+#endif
+	if (!reg)
+		return -EINVAL;
+#if IS_ENABLED(CONFIG_AGP)
+	if (drm->agp.bridge)
+		return ttm_agp_bind(ttm, reg);
+#endif
+	return nouveau_sgdma_bind(bdev, ttm, reg);
 }
 
-static int
-nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
-			 struct ttm_mem_type_manager *man)
+static void
+nouveau_ttm_tt_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
 {
+#if IS_ENABLED(CONFIG_AGP)
 	struct nouveau_drm *drm = nouveau_bdev(bdev);
-	struct nvif_mmu *mmu = &drm->client.mmu;
 
-	switch (type) {
-	case TTM_PL_SYSTEM:
-		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
-		man->available_caching = TTM_PL_MASK_CACHING;
-		man->default_caching = TTM_PL_FLAG_CACHED;
-		break;
-	case TTM_PL_VRAM:
-		man->flags = TTM_MEMTYPE_FLAG_FIXED |
-			     TTM_MEMTYPE_FLAG_MAPPABLE;
-		man->available_caching = TTM_PL_FLAG_UNCACHED |
-					 TTM_PL_FLAG_WC;
-		man->default_caching = TTM_PL_FLAG_WC;
-
-		if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
-			/* Some BARs do not support being ioremapped WC */
-			const u8 type = mmu->type[drm->ttm.type_vram].type;
-			if (type & NVIF_MEM_UNCACHED) {
-				man->available_caching = TTM_PL_FLAG_UNCACHED;
-				man->default_caching = TTM_PL_FLAG_UNCACHED;
-			}
-
-			man->func = &nouveau_vram_manager;
-			man->io_reserve_fastpath = false;
-			man->use_io_reserve_lru = true;
-		} else {
-			man->func = &ttm_bo_manager_func;
-		}
-		break;
-	case TTM_PL_TT:
-		if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
-			man->func = &nouveau_gart_manager;
-		else
-		if (!drm->agp.bridge)
-			man->func = &nv04_gart_manager;
-		else
-			man->func = &ttm_bo_manager_func;
-
-		if (drm->agp.bridge) {
-			man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
-			man->available_caching = TTM_PL_FLAG_UNCACHED |
-				TTM_PL_FLAG_WC;
-			man->default_caching = TTM_PL_FLAG_WC;
-		} else {
-			man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
-				     TTM_MEMTYPE_FLAG_CMA;
-			man->available_caching = TTM_PL_MASK_CACHING;
-			man->default_caching = TTM_PL_FLAG_CACHED;
-		}
-
-		break;
-	default:
-		return -EINVAL;
+	if (drm->agp.bridge) {
+		ttm_agp_unbind(ttm);
+		return;
 	}
-	return 0;
+#endif
+	nouveau_sgdma_unbind(bdev, ttm);
 }
 
 static void
@@ -693,374 +753,20 @@
 
 	switch (bo->mem.mem_type) {
 	case TTM_PL_VRAM:
-		nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
-					 TTM_PL_FLAG_SYSTEM);
+		nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART,
+					 NOUVEAU_GEM_DOMAIN_CPU);
 		break;
 	default:
-		nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
+		nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_CPU, 0);
 		break;
 	}
 
 	*pl = nvbo->placement;
 }
 
-
-static int
-nve0_bo_move_init(struct nouveau_channel *chan, u32 handle)
-{
-	int ret = RING_SPACE(chan, 2);
-	if (ret == 0) {
-		BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
-		OUT_RING  (chan, handle & 0x0000ffff);
-		FIRE_RING (chan);
-	}
-	return ret;
-}
-
-static int
-nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
-		  struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
-{
-	struct nouveau_mem *mem = nouveau_mem(old_reg);
-	int ret = RING_SPACE(chan, 10);
-	if (ret == 0) {
-		BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
-		OUT_RING  (chan, upper_32_bits(mem->vma[0].addr));
-		OUT_RING  (chan, lower_32_bits(mem->vma[0].addr));
-		OUT_RING  (chan, upper_32_bits(mem->vma[1].addr));
-		OUT_RING  (chan, lower_32_bits(mem->vma[1].addr));
-		OUT_RING  (chan, PAGE_SIZE);
-		OUT_RING  (chan, PAGE_SIZE);
-		OUT_RING  (chan, PAGE_SIZE);
-		OUT_RING  (chan, new_reg->num_pages);
-		BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386);
-	}
-	return ret;
-}
-
-static int
-nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle)
-{
-	int ret = RING_SPACE(chan, 2);
-	if (ret == 0) {
-		BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
-		OUT_RING  (chan, handle);
-	}
-	return ret;
-}
-
-static int
-nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
-		  struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
-{
-	struct nouveau_mem *mem = nouveau_mem(old_reg);
-	u64 src_offset = mem->vma[0].addr;
-	u64 dst_offset = mem->vma[1].addr;
-	u32 page_count = new_reg->num_pages;
-	int ret;
-
-	page_count = new_reg->num_pages;
-	while (page_count) {
-		int line_count = (page_count > 8191) ? 8191 : page_count;
-
-		ret = RING_SPACE(chan, 11);
-		if (ret)
-			return ret;
-
-		BEGIN_NVC0(chan, NvSubCopy, 0x030c, 8);
-		OUT_RING  (chan, upper_32_bits(src_offset));
-		OUT_RING  (chan, lower_32_bits(src_offset));
-		OUT_RING  (chan, upper_32_bits(dst_offset));
-		OUT_RING  (chan, lower_32_bits(dst_offset));
-		OUT_RING  (chan, PAGE_SIZE);
-		OUT_RING  (chan, PAGE_SIZE);
-		OUT_RING  (chan, PAGE_SIZE);
-		OUT_RING  (chan, line_count);
-		BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
-		OUT_RING  (chan, 0x00000110);
-
-		page_count -= line_count;
-		src_offset += (PAGE_SIZE * line_count);
-		dst_offset += (PAGE_SIZE * line_count);
-	}
-
-	return 0;
-}
-
-static int
-nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
-		  struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
-{
-	struct nouveau_mem *mem = nouveau_mem(old_reg);
-	u64 src_offset = mem->vma[0].addr;
-	u64 dst_offset = mem->vma[1].addr;
-	u32 page_count = new_reg->num_pages;
-	int ret;
-
-	page_count = new_reg->num_pages;
-	while (page_count) {
-		int line_count = (page_count > 2047) ? 2047 : page_count;
-
-		ret = RING_SPACE(chan, 12);
-		if (ret)
-			return ret;
-
-		BEGIN_NVC0(chan, NvSubCopy, 0x0238, 2);
-		OUT_RING  (chan, upper_32_bits(dst_offset));
-		OUT_RING  (chan, lower_32_bits(dst_offset));
-		BEGIN_NVC0(chan, NvSubCopy, 0x030c, 6);
-		OUT_RING  (chan, upper_32_bits(src_offset));
-		OUT_RING  (chan, lower_32_bits(src_offset));
-		OUT_RING  (chan, PAGE_SIZE); /* src_pitch */
-		OUT_RING  (chan, PAGE_SIZE); /* dst_pitch */
-		OUT_RING  (chan, PAGE_SIZE); /* line_length */
-		OUT_RING  (chan, line_count);
-		BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
-		OUT_RING  (chan, 0x00100110);
-
-		page_count -= line_count;
-		src_offset += (PAGE_SIZE * line_count);
-		dst_offset += (PAGE_SIZE * line_count);
-	}
-
-	return 0;
-}
-
-static int
-nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
-		  struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
-{
-	struct nouveau_mem *mem = nouveau_mem(old_reg);
-	u64 src_offset = mem->vma[0].addr;
-	u64 dst_offset = mem->vma[1].addr;
-	u32 page_count = new_reg->num_pages;
-	int ret;
-
-	page_count = new_reg->num_pages;
-	while (page_count) {
-		int line_count = (page_count > 8191) ? 8191 : page_count;
-
-		ret = RING_SPACE(chan, 11);
-		if (ret)
-			return ret;
-
-		BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
-		OUT_RING  (chan, upper_32_bits(src_offset));
-		OUT_RING  (chan, lower_32_bits(src_offset));
-		OUT_RING  (chan, upper_32_bits(dst_offset));
-		OUT_RING  (chan, lower_32_bits(dst_offset));
-		OUT_RING  (chan, PAGE_SIZE);
-		OUT_RING  (chan, PAGE_SIZE);
-		OUT_RING  (chan, PAGE_SIZE);
-		OUT_RING  (chan, line_count);
-		BEGIN_NV04(chan, NvSubCopy, 0x0300, 1);
-		OUT_RING  (chan, 0x00000110);
-
-		page_count -= line_count;
-		src_offset += (PAGE_SIZE * line_count);
-		dst_offset += (PAGE_SIZE * line_count);
-	}
-
-	return 0;
-}
-
-static int
-nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
-		  struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
-{
-	struct nouveau_mem *mem = nouveau_mem(old_reg);
-	int ret = RING_SPACE(chan, 7);
-	if (ret == 0) {
-		BEGIN_NV04(chan, NvSubCopy, 0x0320, 6);
-		OUT_RING  (chan, upper_32_bits(mem->vma[0].addr));
-		OUT_RING  (chan, lower_32_bits(mem->vma[0].addr));
-		OUT_RING  (chan, upper_32_bits(mem->vma[1].addr));
-		OUT_RING  (chan, lower_32_bits(mem->vma[1].addr));
-		OUT_RING  (chan, 0x00000000 /* COPY */);
-		OUT_RING  (chan, new_reg->num_pages << PAGE_SHIFT);
-	}
-	return ret;
-}
-
-static int
-nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
-		  struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
-{
-	struct nouveau_mem *mem = nouveau_mem(old_reg);
-	int ret = RING_SPACE(chan, 7);
-	if (ret == 0) {
-		BEGIN_NV04(chan, NvSubCopy, 0x0304, 6);
-		OUT_RING  (chan, new_reg->num_pages << PAGE_SHIFT);
-		OUT_RING  (chan, upper_32_bits(mem->vma[0].addr));
-		OUT_RING  (chan, lower_32_bits(mem->vma[0].addr));
-		OUT_RING  (chan, upper_32_bits(mem->vma[1].addr));
-		OUT_RING  (chan, lower_32_bits(mem->vma[1].addr));
-		OUT_RING  (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */);
-	}
-	return ret;
-}
-
-static int
-nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
-{
-	int ret = RING_SPACE(chan, 6);
-	if (ret == 0) {
-		BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
-		OUT_RING  (chan, handle);
-		BEGIN_NV04(chan, NvSubCopy, 0x0180, 3);
-		OUT_RING  (chan, chan->drm->ntfy.handle);
-		OUT_RING  (chan, chan->vram.handle);
-		OUT_RING  (chan, chan->vram.handle);
-	}
-
-	return ret;
-}
-
-static int
-nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
-		  struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
-{
-	struct nouveau_mem *mem = nouveau_mem(old_reg);
-	u64 length = (new_reg->num_pages << PAGE_SHIFT);
-	u64 src_offset = mem->vma[0].addr;
-	u64 dst_offset = mem->vma[1].addr;
-	int src_tiled = !!mem->kind;
-	int dst_tiled = !!nouveau_mem(new_reg)->kind;
-	int ret;
-
-	while (length) {
-		u32 amount, stride, height;
-
-		ret = RING_SPACE(chan, 18 + 6 * (src_tiled + dst_tiled));
-		if (ret)
-			return ret;
-
-		amount  = min(length, (u64)(4 * 1024 * 1024));
-		stride  = 16 * 4;
-		height  = amount / stride;
-
-		if (src_tiled) {
-			BEGIN_NV04(chan, NvSubCopy, 0x0200, 7);
-			OUT_RING  (chan, 0);
-			OUT_RING  (chan, 0);
-			OUT_RING  (chan, stride);
-			OUT_RING  (chan, height);
-			OUT_RING  (chan, 1);
-			OUT_RING  (chan, 0);
-			OUT_RING  (chan, 0);
-		} else {
-			BEGIN_NV04(chan, NvSubCopy, 0x0200, 1);
-			OUT_RING  (chan, 1);
-		}
-		if (dst_tiled) {
-			BEGIN_NV04(chan, NvSubCopy, 0x021c, 7);
-			OUT_RING  (chan, 0);
-			OUT_RING  (chan, 0);
-			OUT_RING  (chan, stride);
-			OUT_RING  (chan, height);
-			OUT_RING  (chan, 1);
-			OUT_RING  (chan, 0);
-			OUT_RING  (chan, 0);
-		} else {
-			BEGIN_NV04(chan, NvSubCopy, 0x021c, 1);
-			OUT_RING  (chan, 1);
-		}
-
-		BEGIN_NV04(chan, NvSubCopy, 0x0238, 2);
-		OUT_RING  (chan, upper_32_bits(src_offset));
-		OUT_RING  (chan, upper_32_bits(dst_offset));
-		BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
-		OUT_RING  (chan, lower_32_bits(src_offset));
-		OUT_RING  (chan, lower_32_bits(dst_offset));
-		OUT_RING  (chan, stride);
-		OUT_RING  (chan, stride);
-		OUT_RING  (chan, stride);
-		OUT_RING  (chan, height);
-		OUT_RING  (chan, 0x00000101);
-		OUT_RING  (chan, 0x00000000);
-		BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
-		OUT_RING  (chan, 0);
-
-		length -= amount;
-		src_offset += amount;
-		dst_offset += amount;
-	}
-
-	return 0;
-}
-
-static int
-nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
-{
-	int ret = RING_SPACE(chan, 4);
-	if (ret == 0) {
-		BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
-		OUT_RING  (chan, handle);
-		BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
-		OUT_RING  (chan, chan->drm->ntfy.handle);
-	}
-
-	return ret;
-}
-
-static inline uint32_t
-nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
-		      struct nouveau_channel *chan, struct ttm_mem_reg *reg)
-{
-	if (reg->mem_type == TTM_PL_TT)
-		return NvDmaTT;
-	return chan->vram.handle;
-}
-
-static int
-nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
-		  struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
-{
-	u32 src_offset = old_reg->start << PAGE_SHIFT;
-	u32 dst_offset = new_reg->start << PAGE_SHIFT;
-	u32 page_count = new_reg->num_pages;
-	int ret;
-
-	ret = RING_SPACE(chan, 3);
-	if (ret)
-		return ret;
-
-	BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
-	OUT_RING  (chan, nouveau_bo_mem_ctxdma(bo, chan, old_reg));
-	OUT_RING  (chan, nouveau_bo_mem_ctxdma(bo, chan, new_reg));
-
-	page_count = new_reg->num_pages;
-	while (page_count) {
-		int line_count = (page_count > 2047) ? 2047 : page_count;
-
-		ret = RING_SPACE(chan, 11);
-		if (ret)
-			return ret;
-
-		BEGIN_NV04(chan, NvSubCopy,
-				 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
-		OUT_RING  (chan, src_offset);
-		OUT_RING  (chan, dst_offset);
-		OUT_RING  (chan, PAGE_SIZE); /* src_pitch */
-		OUT_RING  (chan, PAGE_SIZE); /* dst_pitch */
-		OUT_RING  (chan, PAGE_SIZE); /* line_length */
-		OUT_RING  (chan, line_count);
-		OUT_RING  (chan, 0x00000101);
-		OUT_RING  (chan, 0x00000000);
-		BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
-		OUT_RING  (chan, 0);
-
-		page_count -= line_count;
-		src_offset += (PAGE_SIZE * line_count);
-		dst_offset += (PAGE_SIZE * line_count);
-	}
-
-	return 0;
-}
-
 static int
 nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
-		     struct ttm_mem_reg *reg)
+		     struct ttm_resource *reg)
 {
 	struct nouveau_mem *old_mem = nouveau_mem(&bo->mem);
 	struct nouveau_mem *new_mem = nouveau_mem(reg);
@@ -1092,7 +798,7 @@
 
 static int
 nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
-		     bool no_wait_gpu, struct ttm_mem_reg *new_reg)
+		     bool no_wait_gpu, struct ttm_resource *new_reg)
 {
 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
 	struct nouveau_channel *chan = drm->ttm.chan;
@@ -1102,7 +808,7 @@
 
 	/* create temporary vmas for the transfer and attach them to the
 	 * old nvkm_mem node, these will get cleaned up after ttm has
-	 * destroyed the ttm_mem_reg
+	 * destroyed the ttm_resource
 	 */
 	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
 		ret = nouveau_bo_move_prep(drm, bo, new_reg);
@@ -1119,7 +825,7 @@
 			if (ret == 0) {
 				ret = ttm_bo_move_accel_cleanup(bo,
 								&fence->base,
-								evict,
+								evict, false,
 								new_reg);
 				nouveau_fence_unref(&fence);
 			}
@@ -1132,15 +838,17 @@
 void
 nouveau_bo_move_init(struct nouveau_drm *drm)
 {
-	static const struct {
+	static const struct _method_table {
 		const char *name;
 		int engine;
 		s32 oclass;
 		int (*exec)(struct nouveau_channel *,
 			    struct ttm_buffer_object *,
-			    struct ttm_mem_reg *, struct ttm_mem_reg *);
+			    struct ttm_resource *, struct ttm_resource *);
 		int (*init)(struct nouveau_channel *, u32 handle);
 	} _methods[] = {
+		{  "COPY", 4, 0xc5b5, nve0_bo_move_copy, nve0_bo_move_init },
+		{  "GRCE", 0, 0xc5b5, nve0_bo_move_copy, nvc0_bo_move_init },
 		{  "COPY", 4, 0xc3b5, nve0_bo_move_copy, nve0_bo_move_init },
 		{  "GRCE", 0, 0xc3b5, nve0_bo_move_copy, nvc0_bo_move_init },
 		{  "COPY", 4, 0xc1b5, nve0_bo_move_copy, nve0_bo_move_init },
@@ -1159,8 +867,8 @@
 		{  "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init },
 		{  "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
 		{},
-		{ "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init },
-	}, *mthd = _methods;
+	};
+	const struct _method_table *mthd = _methods;
 	const char *name = "CPU";
 	int ret;
 
@@ -1174,14 +882,14 @@
 		if (chan == NULL)
 			continue;
 
-		ret = nvif_object_init(&chan->user,
+		ret = nvif_object_ctor(&chan->user, "ttmBoMove",
 				       mthd->oclass | (mthd->engine << 16),
 				       mthd->oclass, NULL, 0,
 				       &drm->ttm.copy);
 		if (ret == 0) {
 			ret = mthd->init(chan, drm->ttm.copy.handle);
 			if (ret) {
-				nvif_object_fini(&drm->ttm.copy);
+				nvif_object_dtor(&drm->ttm.copy);
 				continue;
 			}
 
@@ -1197,16 +905,17 @@
 
 static int
 nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
-		      bool no_wait_gpu, struct ttm_mem_reg *new_reg)
+		      bool no_wait_gpu, struct ttm_resource *new_reg)
 {
 	struct ttm_operation_ctx ctx = { intr, no_wait_gpu };
 	struct ttm_place placement_memtype = {
 		.fpfn = 0,
 		.lpfn = 0,
-		.flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
+		.mem_type = TTM_PL_TT,
+		.flags = TTM_PL_MASK_CACHING
 	};
 	struct ttm_placement placement;
-	struct ttm_mem_reg tmp_reg;
+	struct ttm_resource tmp_reg;
 	int ret;
 
 	placement.num_placement = placement.num_busy_placement = 1;
@@ -1218,7 +927,11 @@
 	if (ret)
 		return ret;
 
-	ret = ttm_tt_bind(bo->ttm, &tmp_reg, &ctx);
+	ret = ttm_tt_populate(bo->bdev, bo->ttm, &ctx);
+	if (ret)
+		goto out;
+
+	ret = nouveau_ttm_tt_bind(bo->bdev, bo->ttm, &tmp_reg);
 	if (ret)
 		goto out;
 
@@ -1228,22 +941,23 @@
 
 	ret = ttm_bo_move_ttm(bo, &ctx, new_reg);
 out:
-	ttm_bo_mem_put(bo, &tmp_reg);
+	ttm_resource_free(bo, &tmp_reg);
 	return ret;
 }
 
 static int
 nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
-		      bool no_wait_gpu, struct ttm_mem_reg *new_reg)
+		      bool no_wait_gpu, struct ttm_resource *new_reg)
 {
 	struct ttm_operation_ctx ctx = { intr, no_wait_gpu };
 	struct ttm_place placement_memtype = {
 		.fpfn = 0,
 		.lpfn = 0,
-		.flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
+		.mem_type = TTM_PL_TT,
+		.flags = TTM_PL_MASK_CACHING
 	};
 	struct ttm_placement placement;
-	struct ttm_mem_reg tmp_reg;
+	struct ttm_resource tmp_reg;
 	int ret;
 
 	placement.num_placement = placement.num_busy_placement = 1;
@@ -1264,13 +978,13 @@
 		goto out;
 
 out:
-	ttm_bo_mem_put(bo, &tmp_reg);
+	ttm_resource_free(bo, &tmp_reg);
 	return ret;
 }
 
 static void
 nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict,
-		     struct ttm_mem_reg *new_reg)
+		     struct ttm_resource *new_reg)
 {
 	struct nouveau_mem *mem = new_reg ? nouveau_mem(new_reg) : NULL;
 	struct nouveau_bo *nvbo = nouveau_bo(bo);
@@ -1279,6 +993,8 @@
 	/* ttm can now (stupidly) pass the driver bos it didn't create... */
 	if (bo->destroy != nouveau_bo_del_ttm)
 		return;
+
+	nouveau_bo_del_io_reserve_lru(bo);
 
 	if (mem && new_reg->mem_type != TTM_PL_SYSTEM &&
 	    mem->mem.page == nvbo->page) {
@@ -1291,10 +1007,18 @@
 			nouveau_vma_unmap(vma);
 		}
 	}
+
+	if (new_reg) {
+		if (new_reg->mm_node)
+			nvbo->offset = (new_reg->start << PAGE_SHIFT);
+		else
+			nvbo->offset = 0;
+	}
+
 }
 
 static int
-nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_reg,
+nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_resource *new_reg,
 		   struct nouveau_drm_tile **new_tile)
 {
 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
@@ -1321,7 +1045,7 @@
 {
 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
 	struct drm_device *dev = drm->dev;
-	struct dma_fence *fence = reservation_object_get_excl(bo->resv);
+	struct dma_fence *fence = dma_resv_get_excl(bo->base.resv);
 
 	nv10_bo_put_tile_region(dev, *old_tile, fence);
 	*old_tile = new_tile;
@@ -1330,11 +1054,11 @@
 static int
 nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
 		struct ttm_operation_ctx *ctx,
-		struct ttm_mem_reg *new_reg)
+		struct ttm_resource *new_reg)
 {
 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
 	struct nouveau_bo *nvbo = nouveau_bo(bo);
-	struct ttm_mem_reg *old_reg = &bo->mem;
+	struct ttm_resource *old_reg = &bo->mem;
 	struct nouveau_drm_tile *new_tile = NULL;
 	int ret = 0;
 
@@ -1353,9 +1077,7 @@
 
 	/* Fake bo copy. */
 	if (old_reg->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
-		BUG_ON(bo->mem.mm_node != NULL);
-		bo->mem = *new_reg;
-		new_reg->mm_node = NULL;
+		ttm_bo_move_null(bo, new_reg);
 		goto out;
 	}
 
@@ -1398,44 +1120,64 @@
 {
 	struct nouveau_bo *nvbo = nouveau_bo(bo);
 
-	return drm_vma_node_verify_access(&nvbo->gem.vma_node,
+	return drm_vma_node_verify_access(&nvbo->bo.base.vma_node,
 					  filp->private_data);
 }
 
-static int
-nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
+static void
+nouveau_ttm_io_mem_free_locked(struct nouveau_drm *drm,
+			       struct ttm_resource *reg)
 {
-	struct ttm_mem_type_manager *man = &bdev->man[reg->mem_type];
+	struct nouveau_mem *mem = nouveau_mem(reg);
+
+	if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) {
+		switch (reg->mem_type) {
+		case TTM_PL_TT:
+			if (mem->kind)
+				nvif_object_unmap_handle(&mem->mem.object);
+			break;
+		case TTM_PL_VRAM:
+			nvif_object_unmap_handle(&mem->mem.object);
+			break;
+		default:
+			break;
+		}
+	}
+}
+
+static int
+nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *reg)
+{
 	struct nouveau_drm *drm = nouveau_bdev(bdev);
 	struct nvkm_device *device = nvxx_device(&drm->client.device);
 	struct nouveau_mem *mem = nouveau_mem(reg);
+	int ret;
 
-	reg->bus.addr = NULL;
-	reg->bus.offset = 0;
-	reg->bus.size = reg->num_pages << PAGE_SHIFT;
-	reg->bus.base = 0;
-	reg->bus.is_iomem = false;
-	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
-		return -EINVAL;
+	mutex_lock(&drm->ttm.io_reserve_mutex);
+retry:
 	switch (reg->mem_type) {
 	case TTM_PL_SYSTEM:
 		/* System memory */
-		return 0;
+		ret = 0;
+		goto out;
 	case TTM_PL_TT:
 #if IS_ENABLED(CONFIG_AGP)
 		if (drm->agp.bridge) {
-			reg->bus.offset = reg->start << PAGE_SHIFT;
-			reg->bus.base = drm->agp.base;
+			reg->bus.offset = (reg->start << PAGE_SHIFT) +
+				drm->agp.base;
 			reg->bus.is_iomem = !drm->agp.cma;
 		}
 #endif
-		if (drm->client.mem->oclass < NVIF_CLASS_MEM_NV50 || !mem->kind)
+		if (drm->client.mem->oclass < NVIF_CLASS_MEM_NV50 ||
+		    !mem->kind) {
 			/* untiled */
+			ret = 0;
 			break;
-		/* fallthrough, tiled memory */
+		}
+		fallthrough;	/* tiled memory */
 	case TTM_PL_VRAM:
-		reg->bus.offset = reg->start << PAGE_SHIFT;
-		reg->bus.base = device->func->resource_addr(device, 1);
+		reg->bus.offset = (reg->start << PAGE_SHIFT) +
+			device->func->resource_addr(device, 1);
 		reg->bus.is_iomem = true;
 		if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) {
 			union {
@@ -1444,7 +1186,6 @@
 			} args;
 			u64 handle, length;
 			u32 argc = 0;
-			int ret;
 
 			switch (mem->mem.object.oclass) {
 			case NVIF_CLASS_MEM_NV50:
@@ -1468,38 +1209,48 @@
 			ret = nvif_object_map_handle(&mem->mem.object,
 						     &args, argc,
 						     &handle, &length);
-			if (ret != 1)
-				return ret ? ret : -EINVAL;
+			if (ret != 1) {
+				if (WARN_ON(ret == 0))
+					ret = -EINVAL;
+				goto out;
+			}
 
-			reg->bus.base = 0;
 			reg->bus.offset = handle;
 		}
+		ret = 0;
 		break;
 	default:
-		return -EINVAL;
+		ret = -EINVAL;
 	}
-	return 0;
+
+out:
+	if (ret == -ENOSPC) {
+		struct nouveau_bo *nvbo;
+
+		nvbo = list_first_entry_or_null(&drm->ttm.io_reserve_lru,
+						typeof(*nvbo),
+						io_reserve_lru);
+		if (nvbo) {
+			list_del_init(&nvbo->io_reserve_lru);
+			drm_vma_node_unmap(&nvbo->bo.base.vma_node,
+					   bdev->dev_mapping);
+			nouveau_ttm_io_mem_free_locked(drm, &nvbo->bo.mem);
+			goto retry;
+		}
+
+	}
+	mutex_unlock(&drm->ttm.io_reserve_mutex);
+	return ret;
 }
 
 static void
-nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
+nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_resource *reg)
 {
 	struct nouveau_drm *drm = nouveau_bdev(bdev);
-	struct nouveau_mem *mem = nouveau_mem(reg);
 
-	if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) {
-		switch (reg->mem_type) {
-		case TTM_PL_TT:
-			if (mem->kind)
-				nvif_object_unmap_handle(&mem->mem.object);
-			break;
-		case TTM_PL_VRAM:
-			nvif_object_unmap_handle(&mem->mem.object);
-			break;
-		default:
-			break;
-		}
-	}
+	mutex_lock(&drm->ttm.io_reserve_mutex);
+	nouveau_ttm_io_mem_free_locked(drm, reg);
+	mutex_unlock(&drm->ttm.io_reserve_mutex);
 }
 
 static int
@@ -1520,7 +1271,8 @@
 			return 0;
 
 		if (bo->mem.mem_type == TTM_PL_SYSTEM) {
-			nouveau_bo_placement_set(nvbo, TTM_PL_TT, 0);
+			nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART,
+						 0);
 
 			ret = nouveau_bo_validate(nvbo, false, false);
 			if (ret)
@@ -1544,37 +1296,36 @@
 		nvbo->busy_placements[i].lpfn = mappable;
 	}
 
-	nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
+	nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, 0);
 	return nouveau_bo_validate(nvbo, false, false);
 }
 
 static int
-nouveau_ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
+nouveau_ttm_tt_populate(struct ttm_bo_device *bdev,
+			struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
 {
 	struct ttm_dma_tt *ttm_dma = (void *)ttm;
 	struct nouveau_drm *drm;
 	struct device *dev;
-	unsigned i;
-	int r;
 	bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
 
-	if (ttm->state != tt_unpopulated)
+	if (ttm_tt_is_populated(ttm))
 		return 0;
 
 	if (slave && ttm->sg) {
 		/* make userspace faulting work */
 		drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
 						 ttm_dma->dma_address, ttm->num_pages);
-		ttm->state = tt_unbound;
+		ttm_tt_set_populated(ttm);
 		return 0;
 	}
 
-	drm = nouveau_bdev(ttm->bdev);
+	drm = nouveau_bdev(bdev);
 	dev = drm->dev->dev;
 
 #if IS_ENABLED(CONFIG_AGP)
 	if (drm->agp.bridge) {
-		return ttm_agp_tt_populate(ttm, ctx);
+		return ttm_pool_populate(ttm, ctx);
 	}
 #endif
 
@@ -1583,51 +1334,27 @@
 		return ttm_dma_populate((void *)ttm, dev, ctx);
 	}
 #endif
-
-	r = ttm_pool_populate(ttm, ctx);
-	if (r) {
-		return r;
-	}
-
-	for (i = 0; i < ttm->num_pages; i++) {
-		dma_addr_t addr;
-
-		addr = dma_map_page(dev, ttm->pages[i], 0, PAGE_SIZE,
-				    DMA_BIDIRECTIONAL);
-
-		if (dma_mapping_error(dev, addr)) {
-			while (i--) {
-				dma_unmap_page(dev, ttm_dma->dma_address[i],
-					       PAGE_SIZE, DMA_BIDIRECTIONAL);
-				ttm_dma->dma_address[i] = 0;
-			}
-			ttm_pool_unpopulate(ttm);
-			return -EFAULT;
-		}
-
-		ttm_dma->dma_address[i] = addr;
-	}
-	return 0;
+	return ttm_populate_and_map_pages(dev, ttm_dma, ctx);
 }
 
 static void
-nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
+nouveau_ttm_tt_unpopulate(struct ttm_bo_device *bdev,
+			  struct ttm_tt *ttm)
 {
 	struct ttm_dma_tt *ttm_dma = (void *)ttm;
 	struct nouveau_drm *drm;
 	struct device *dev;
-	unsigned i;
 	bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
 
 	if (slave)
 		return;
 
-	drm = nouveau_bdev(ttm->bdev);
+	drm = nouveau_bdev(bdev);
 	dev = drm->dev->dev;
 
 #if IS_ENABLED(CONFIG_AGP)
 	if (drm->agp.bridge) {
-		ttm_agp_tt_unpopulate(ttm);
+		ttm_pool_unpopulate(ttm);
 		return;
 	}
 #endif
@@ -1639,33 +1366,43 @@
 	}
 #endif
 
-	for (i = 0; i < ttm->num_pages; i++) {
-		if (ttm_dma->dma_address[i]) {
-			dma_unmap_page(dev, ttm_dma->dma_address[i], PAGE_SIZE,
-				       DMA_BIDIRECTIONAL);
-		}
-	}
+	ttm_unmap_and_unpopulate_pages(dev, ttm_dma);
+}
 
-	ttm_pool_unpopulate(ttm);
+static void
+nouveau_ttm_tt_destroy(struct ttm_bo_device *bdev,
+		       struct ttm_tt *ttm)
+{
+#if IS_ENABLED(CONFIG_AGP)
+	struct nouveau_drm *drm = nouveau_bdev(bdev);
+	if (drm->agp.bridge) {
+		ttm_agp_unbind(ttm);
+		ttm_tt_destroy_common(bdev, ttm);
+		ttm_agp_destroy(ttm);
+		return;
+	}
+#endif
+	nouveau_sgdma_destroy(bdev, ttm);
 }
 
 void
 nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive)
 {
-	struct reservation_object *resv = nvbo->bo.resv;
+	struct dma_resv *resv = nvbo->bo.base.resv;
 
 	if (exclusive)
-		reservation_object_add_excl_fence(resv, &fence->base);
+		dma_resv_add_excl_fence(resv, &fence->base);
 	else if (fence)
-		reservation_object_add_shared_fence(resv, &fence->base);
+		dma_resv_add_shared_fence(resv, &fence->base);
 }
 
 struct ttm_bo_driver nouveau_bo_driver = {
 	.ttm_tt_create = &nouveau_ttm_tt_create,
 	.ttm_tt_populate = &nouveau_ttm_tt_populate,
 	.ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
-	.invalidate_caches = nouveau_bo_invalidate_caches,
-	.init_mem_type = nouveau_bo_init_mem_type,
+	.ttm_tt_bind = &nouveau_ttm_tt_bind,
+	.ttm_tt_unbind = &nouveau_ttm_tt_unbind,
+	.ttm_tt_destroy = &nouveau_ttm_tt_destroy,
 	.eviction_valuable = ttm_bo_eviction_valuable,
 	.evict_flags = nouveau_bo_evict_flags,
 	.move_notify = nouveau_bo_move_ntfy,

--
Gitblit v1.6.2