From d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Mon, 11 Dec 2023 02:45:28 +0000 Subject: [PATCH] add boot partition size --- kernel/drivers/gpu/drm/radeon/radeon_object.c | 89 +++++++++++++++++++++++--------------------- 1 files changed, 46 insertions(+), 43 deletions(-) diff --git a/kernel/drivers/gpu/drm/radeon/radeon_object.c b/kernel/drivers/gpu/drm/radeon/radeon_object.c index ba2fd29..316e35d 100644 --- a/kernel/drivers/gpu/drm/radeon/radeon_object.c +++ b/kernel/drivers/gpu/drm/radeon/radeon_object.c @@ -29,14 +29,17 @@ * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> * Dave Airlie */ + +#include <linux/io.h> #include <linux/list.h> #include <linux/slab.h> -#include <drm/drmP.h> -#include <drm/radeon_drm.h> + #include <drm/drm_cache.h> +#include <drm/drm_prime.h> +#include <drm/radeon_drm.h> + #include "radeon.h" #include "radeon_trace.h" - int radeon_ttm_init(struct radeon_device *rdev); void radeon_ttm_fini(struct radeon_device *rdev); @@ -82,9 +85,9 @@ mutex_unlock(&bo->rdev->gem.mutex); radeon_bo_clear_surface_reg(bo); WARN_ON_ONCE(!list_empty(&bo->va)); - if (bo->gem_base.import_attach) - drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg); - drm_gem_object_release(&bo->gem_base); + if (bo->tbo.base.import_attach) + drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg); + drm_gem_object_release(&bo->tbo.base); kfree(bo); } @@ -109,58 +112,58 @@ rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size) { rbo->placements[c].fpfn = rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT; + rbo->placements[c].mem_type = TTM_PL_VRAM; rbo->placements[c++].flags = TTM_PL_FLAG_WC | - TTM_PL_FLAG_UNCACHED | - TTM_PL_FLAG_VRAM; + TTM_PL_FLAG_UNCACHED; } rbo->placements[c].fpfn = 0; + rbo->placements[c].mem_type = TTM_PL_VRAM; rbo->placements[c++].flags = TTM_PL_FLAG_WC | - TTM_PL_FLAG_UNCACHED | - TTM_PL_FLAG_VRAM; + TTM_PL_FLAG_UNCACHED; } if (domain & RADEON_GEM_DOMAIN_GTT) { if (rbo->flags & RADEON_GEM_GTT_UC) { rbo->placements[c].fpfn = 0; - rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED | - TTM_PL_FLAG_TT; + rbo->placements[c].mem_type = TTM_PL_TT; + rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED; } else if ((rbo->flags & RADEON_GEM_GTT_WC) || (rbo->rdev->flags & RADEON_IS_AGP)) { rbo->placements[c].fpfn = 0; + rbo->placements[c].mem_type = TTM_PL_TT; rbo->placements[c++].flags = TTM_PL_FLAG_WC | - TTM_PL_FLAG_UNCACHED | - TTM_PL_FLAG_TT; + TTM_PL_FLAG_UNCACHED; } else { rbo->placements[c].fpfn = 0; - rbo->placements[c++].flags = TTM_PL_FLAG_CACHED | - TTM_PL_FLAG_TT; + rbo->placements[c].mem_type = TTM_PL_TT; + rbo->placements[c++].flags = TTM_PL_FLAG_CACHED; } } if (domain & RADEON_GEM_DOMAIN_CPU) { if (rbo->flags & RADEON_GEM_GTT_UC) { rbo->placements[c].fpfn = 0; - rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED | - TTM_PL_FLAG_SYSTEM; + rbo->placements[c].mem_type = TTM_PL_SYSTEM; + rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED; } else if ((rbo->flags & RADEON_GEM_GTT_WC) || rbo->rdev->flags & RADEON_IS_AGP) { rbo->placements[c].fpfn = 0; + rbo->placements[c].mem_type = TTM_PL_SYSTEM; rbo->placements[c++].flags = TTM_PL_FLAG_WC | - TTM_PL_FLAG_UNCACHED | - TTM_PL_FLAG_SYSTEM; + TTM_PL_FLAG_UNCACHED; } else { rbo->placements[c].fpfn = 0; - rbo->placements[c++].flags = TTM_PL_FLAG_CACHED | - TTM_PL_FLAG_SYSTEM; + rbo->placements[c].mem_type = TTM_PL_SYSTEM; + rbo->placements[c++].flags = TTM_PL_FLAG_CACHED; } } if (!c) { rbo->placements[c].fpfn = 0; - rbo->placements[c++].flags = TTM_PL_MASK_CACHING | - TTM_PL_FLAG_SYSTEM; + rbo->placements[c].mem_type = TTM_PL_SYSTEM; + rbo->placements[c++].flags = TTM_PL_MASK_CACHING; } rbo->placement.num_placement = c; @@ -168,7 +171,7 @@ for (i = 0; i < c; ++i) { if ((rbo->flags & RADEON_GEM_CPU_ACCESS) && - (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) && + (rbo->placements[i].mem_type == TTM_PL_VRAM) && !rbo->placements[i].fpfn) rbo->placements[i].lpfn = rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT; @@ -180,7 +183,7 @@ int radeon_bo_create(struct radeon_device *rdev, unsigned long size, int byte_align, bool kernel, u32 domain, u32 flags, struct sg_table *sg, - struct reservation_object *resv, + struct dma_resv *resv, struct radeon_bo **bo_ptr) { struct radeon_bo *bo; @@ -206,7 +209,7 @@ bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); if (bo == NULL) return -ENOMEM; - drm_gem_private_object_init(rdev->ddev, &bo->gem_base, size); + drm_gem_private_object_init(rdev->ddev, &bo->tbo.base, size); bo->rdev = rdev; bo->surface_reg = -1; INIT_LIST_HEAD(&bo->list); @@ -314,11 +317,9 @@ void radeon_bo_unref(struct radeon_bo **bo) { struct ttm_buffer_object *tbo; - struct radeon_device *rdev; if ((*bo) == NULL) return; - rdev = (*bo)->rdev; tbo = &((*bo)->tbo); ttm_bo_put(tbo); *bo = NULL; @@ -330,7 +331,7 @@ struct ttm_operation_ctx ctx = { false, false }; int r, i; - if (radeon_ttm_tt_has_userptr(bo->tbo.ttm)) + if (radeon_ttm_tt_has_userptr(bo->rdev, bo->tbo.ttm)) return -EPERM; if (bo->pin_count) { @@ -359,7 +360,7 @@ radeon_ttm_placement_from_domain(bo, domain); for (i = 0; i < bo->placement.num_placement; i++) { /* force to pin into visible video ram */ - if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) && + if ((bo->placements[i].mem_type == TTM_PL_VRAM) && !(bo->flags & RADEON_GEM_NO_CPU_ACCESS) && (!max_offset || max_offset > bo->rdev->mc.visible_vram_size)) bo->placements[i].lpfn = @@ -421,11 +422,13 @@ int radeon_bo_evict_vram(struct radeon_device *rdev) { /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */ - if (0 && (rdev->flags & RADEON_IS_IGP)) { +#ifndef CONFIG_HIBERNATION + if (rdev->flags & RADEON_IS_IGP) { if (rdev->mc.igp_sideport_enabled == false) /* Useless to evict on IGP chips */ return 0; } +#endif return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM); } @@ -439,13 +442,13 @@ dev_err(rdev->dev, "Userspace still has active objects !\n"); list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) { dev_err(rdev->dev, "%p %p %lu %lu force free\n", - &bo->gem_base, bo, (unsigned long)bo->gem_base.size, - *((unsigned long *)&bo->gem_base.refcount)); + &bo->tbo.base, bo, (unsigned long)bo->tbo.base.size, + *((unsigned long *)&bo->tbo.base.refcount)); mutex_lock(&bo->rdev->gem.mutex); list_del_init(&bo->list); mutex_unlock(&bo->rdev->gem.mutex); /* this should unref the ttm bo */ - drm_gem_object_put_unlocked(&bo->gem_base); + drm_gem_object_put(&bo->tbo.base); } } @@ -607,7 +610,7 @@ int steal; int i; - lockdep_assert_held(&bo->tbo.resv->lock.base); + dma_resv_assert_held(bo->tbo.base.resv); if (!bo->tiling_flags) return 0; @@ -733,7 +736,7 @@ uint32_t *tiling_flags, uint32_t *pitch) { - lockdep_assert_held(&bo->tbo.resv->lock.base); + dma_resv_assert_held(bo->tbo.base.resv); if (tiling_flags) *tiling_flags = bo->tiling_flags; @@ -745,7 +748,7 @@ bool force_drop) { if (!force_drop) - lockdep_assert_held(&bo->tbo.resv->lock.base); + dma_resv_assert_held(bo->tbo.base.resv); if (!(bo->tiling_flags & RADEON_TILING_SURFACE)) return 0; @@ -772,7 +775,7 @@ void radeon_bo_move_notify(struct ttm_buffer_object *bo, bool evict, - struct ttm_mem_reg *new_mem) + struct ttm_resource *new_mem) { struct radeon_bo *rbo; @@ -821,7 +824,7 @@ lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; for (i = 0; i < rbo->placement.num_placement; i++) { /* Force into visible VRAM */ - if ((rbo->placements[i].flags & TTM_PL_FLAG_VRAM) && + if ((rbo->placements[i].mem_type == TTM_PL_VRAM) && (!rbo->placements[i].lpfn || rbo->placements[i].lpfn > lpfn)) rbo->placements[i].lpfn = lpfn; } @@ -867,10 +870,10 @@ void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence, bool shared) { - struct reservation_object *resv = bo->tbo.resv; + struct dma_resv *resv = bo->tbo.base.resv; if (shared) - reservation_object_add_shared_fence(resv, &fence->base); + dma_resv_add_shared_fence(resv, &fence->base); else - reservation_object_add_excl_fence(resv, &fence->base); + dma_resv_add_excl_fence(resv, &fence->base); } -- Gitblit v1.6.2