From 04dd17822334871b23ea2862f7798fb0e0007777 Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Sat, 11 May 2024 08:53:19 +0000 Subject: [PATCH] change otg to host mode --- kernel/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c | 313 ++++++++++++++++++++++----------------------------- 1 files changed, 134 insertions(+), 179 deletions(-) diff --git a/kernel/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/kernel/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c index f388ad5..73116ec 100644 --- a/kernel/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c +++ b/kernel/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c @@ -33,49 +33,57 @@ static const struct ttm_place vram_placement_flags = { .fpfn = 0, .lpfn = 0, - .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED + .mem_type = TTM_PL_VRAM, + .flags = TTM_PL_FLAG_CACHED }; static const struct ttm_place vram_ne_placement_flags = { .fpfn = 0, .lpfn = 0, - .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT + .mem_type = TTM_PL_VRAM, + .flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT }; static const struct ttm_place sys_placement_flags = { .fpfn = 0, .lpfn = 0, - .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED + .mem_type = TTM_PL_SYSTEM, + .flags = TTM_PL_FLAG_CACHED }; static const struct ttm_place sys_ne_placement_flags = { .fpfn = 0, .lpfn = 0, - .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT + .mem_type = TTM_PL_SYSTEM, + .flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT }; static const struct ttm_place gmr_placement_flags = { .fpfn = 0, .lpfn = 0, - .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED + .mem_type = VMW_PL_GMR, + .flags = TTM_PL_FLAG_CACHED }; static const struct ttm_place gmr_ne_placement_flags = { .fpfn = 0, .lpfn = 0, - .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT + .mem_type = VMW_PL_GMR, + .flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT }; static const struct ttm_place mob_placement_flags = { .fpfn = 0, .lpfn = 0, - .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED + .mem_type = VMW_PL_MOB, + .flags = TTM_PL_FLAG_CACHED }; static const struct ttm_place mob_ne_placement_flags = { .fpfn = 0, .lpfn = 0, - .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT + .mem_type = VMW_PL_MOB, + .flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT }; struct ttm_placement vmw_vram_placement = { @@ -89,11 +97,13 @@ { .fpfn = 0, .lpfn = 0, - .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED + .mem_type = TTM_PL_VRAM, + .flags = TTM_PL_FLAG_CACHED }, { .fpfn = 0, .lpfn = 0, - .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED + .mem_type = VMW_PL_GMR, + .flags = TTM_PL_FLAG_CACHED } }; @@ -101,11 +111,13 @@ { .fpfn = 0, .lpfn = 0, - .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED + .mem_type = VMW_PL_GMR, + .flags = TTM_PL_FLAG_CACHED }, { .fpfn = 0, .lpfn = 0, - .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED + .mem_type = TTM_PL_VRAM, + .flags = TTM_PL_FLAG_CACHED } }; @@ -120,12 +132,14 @@ { .fpfn = 0, .lpfn = 0, - .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | + .mem_type = TTM_PL_VRAM, + .flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT }, { .fpfn = 0, .lpfn = 0, - .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | + .mem_type = VMW_PL_GMR, + .flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT } }; @@ -169,19 +183,23 @@ { .fpfn = 0, .lpfn = 0, - .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED + .mem_type = TTM_PL_SYSTEM, + .flags = TTM_PL_FLAG_CACHED }, { .fpfn = 0, .lpfn = 0, - .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED + .mem_type = TTM_PL_VRAM, + .flags = TTM_PL_FLAG_CACHED }, { .fpfn = 0, .lpfn = 0, - .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED + .mem_type = VMW_PL_GMR, + .flags = TTM_PL_FLAG_CACHED }, { .fpfn = 0, .lpfn = 0, - .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED + .mem_type = VMW_PL_MOB, + .flags = TTM_PL_FLAG_CACHED } }; @@ -189,15 +207,18 @@ { .fpfn = 0, .lpfn = 0, - .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED + .mem_type = TTM_PL_SYSTEM, + .flags = TTM_PL_FLAG_CACHED }, { .fpfn = 0, .lpfn = 0, - .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED + .mem_type = VMW_PL_GMR, + .flags = TTM_PL_FLAG_CACHED }, { .fpfn = 0, .lpfn = 0, - .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED + .mem_type = VMW_PL_MOB, + .flags = TTM_PL_FLAG_CACHED } }; @@ -246,6 +267,7 @@ struct vmw_sg_table vsgt; uint64_t sg_alloc_size; bool mapped; + bool bound; }; const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt); @@ -266,7 +288,9 @@ static bool __vmw_piter_sg_next(struct vmw_piter *viter) { - return __sg_page_iter_next(&viter->iter); + bool ret = __vmw_piter_non_sg_next(viter); + + return __sg_page_iter_dma_next(&viter->iter) && ret; } @@ -283,12 +307,6 @@ { return viter->pages[viter->i]; } - -static struct page *__vmw_piter_sg_page(struct vmw_piter *viter) -{ - return sg_page_iter_page(&viter->iter); -} - /** * Helper functions to return the DMA address of the current page. @@ -330,26 +348,23 @@ { viter->i = p_offset - 1; viter->num_pages = vsgt->num_pages; + viter->page = &__vmw_piter_non_sg_page; + viter->pages = vsgt->pages; switch (vsgt->mode) { case vmw_dma_phys: viter->next = &__vmw_piter_non_sg_next; viter->dma_address = &__vmw_piter_phys_addr; - viter->page = &__vmw_piter_non_sg_page; - viter->pages = vsgt->pages; break; case vmw_dma_alloc_coherent: viter->next = &__vmw_piter_non_sg_next; viter->dma_address = &__vmw_piter_dma_addr; - viter->page = &__vmw_piter_non_sg_page; viter->addrs = vsgt->addrs; - viter->pages = vsgt->pages; break; case vmw_dma_map_populate: case vmw_dma_map_bind: viter->next = &__vmw_piter_sg_next; viter->dma_address = &__vmw_piter_sg_addr; - viter->page = &__vmw_piter_sg_page; - __sg_page_iter_start(&viter->iter, vsgt->sgt->sgl, + __sg_page_iter_start(&viter->iter.base, vsgt->sgt->sgl, vsgt->sgt->orig_nents, p_offset); break; default: @@ -369,8 +384,7 @@ { struct device *dev = vmw_tt->dev_priv->dev->dev; - dma_unmap_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.nents, - DMA_BIDIRECTIONAL); + dma_unmap_sgtable(dev, &vmw_tt->sgt, DMA_BIDIRECTIONAL, 0); vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents; } @@ -390,16 +404,8 @@ static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt) { struct device *dev = vmw_tt->dev_priv->dev->dev; - int ret; - ret = dma_map_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.orig_nents, - DMA_BIDIRECTIONAL); - if (unlikely(ret == 0)) - return -ENOMEM; - - vmw_tt->sgt.nents = ret; - - return 0; + return dma_map_sgtable(dev, &vmw_tt->sgt, DMA_BIDIRECTIONAL, 0); } /** @@ -426,6 +432,7 @@ int ret = 0; static size_t sgl_size; static size_t sgt_size; + struct scatterlist *sg; if (vmw_tt->mapped) return 0; @@ -448,18 +455,20 @@ if (unlikely(ret != 0)) return ret; - ret = __sg_alloc_table_from_pages - (&vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0, - (unsigned long) vsgt->num_pages << PAGE_SHIFT, - dma_get_max_seg_size(dev_priv->dev->dev), - GFP_KERNEL); - if (unlikely(ret != 0)) + sg = __sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages, + vsgt->num_pages, 0, + (unsigned long) vsgt->num_pages << PAGE_SHIFT, + dma_get_max_seg_size(dev_priv->dev->dev), + NULL, 0, GFP_KERNEL); + if (IS_ERR(sg)) { + ret = PTR_ERR(sg); goto out_sg_alloc_fail; + } - if (vsgt->num_pages > vmw_tt->sgt.nents) { + if (vsgt->num_pages > vmw_tt->sgt.orig_nents) { uint64_t over_alloc = sgl_size * (vsgt->num_pages - - vmw_tt->sgt.nents); + vmw_tt->sgt.orig_nents); ttm_mem_global_free(glob, over_alloc); vmw_tt->sg_alloc_size -= over_alloc; @@ -526,43 +535,6 @@ vmw_tt->mapped = false; } - -/** - * vmw_bo_map_dma - Make sure buffer object pages are visible to the device - * - * @bo: Pointer to a struct ttm_buffer_object - * - * Wrapper around vmw_ttm_map_dma, that takes a TTM buffer object pointer - * instead of a pointer to a struct vmw_ttm_backend as argument. - * Note that the buffer object must be either pinned or reserved before - * calling this function. - */ -int vmw_bo_map_dma(struct ttm_buffer_object *bo) -{ - struct vmw_ttm_tt *vmw_tt = - container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); - - return vmw_ttm_map_dma(vmw_tt); -} - - -/** - * vmw_bo_unmap_dma - Make sure buffer object pages are visible to the device - * - * @bo: Pointer to a struct ttm_buffer_object - * - * Wrapper around vmw_ttm_unmap_dma, that takes a TTM buffer object pointer - * instead of a pointer to a struct vmw_ttm_backend as argument. - */ -void vmw_bo_unmap_dma(struct ttm_buffer_object *bo) -{ - struct vmw_ttm_tt *vmw_tt = - container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); - - vmw_ttm_unmap_dma(vmw_tt); -} - - /** * vmw_bo_sg_table - Return a struct vmw_sg_table object for a * TTM buffer object @@ -583,11 +555,18 @@ } -static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) +static int vmw_ttm_bind(struct ttm_bo_device *bdev, + struct ttm_tt *ttm, struct ttm_resource *bo_mem) { struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); - int ret; + int ret = 0; + + if (!bo_mem) + return -EINVAL; + + if (vmw_be->bound) + return 0; ret = vmw_ttm_map_dma(vmw_be); if (unlikely(ret != 0)) @@ -598,8 +577,9 @@ switch (bo_mem->mem_type) { case VMW_PL_GMR: - return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt, + ret = vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt, ttm->num_pages, vmw_be->gmr_id); + break; case VMW_PL_MOB: if (unlikely(vmw_be->mob == NULL)) { vmw_be->mob = @@ -608,19 +588,25 @@ return -ENOMEM; } - return vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob, + ret = vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob, &vmw_be->vsgt, ttm->num_pages, vmw_be->gmr_id); + break; default: BUG(); } - return 0; + vmw_be->bound = true; + return ret; } -static int vmw_ttm_unbind(struct ttm_tt *ttm) +static void vmw_ttm_unbind(struct ttm_bo_device *bdev, + struct ttm_tt *ttm) { struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); + + if (!vmw_be->bound) + return; switch (vmw_be->mem_type) { case VMW_PL_GMR: @@ -635,16 +621,17 @@ if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind) vmw_ttm_unmap_dma(vmw_be); - - return 0; + vmw_be->bound = false; } -static void vmw_ttm_destroy(struct ttm_tt *ttm) +static void vmw_ttm_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm) { struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); + vmw_ttm_unbind(bdev, ttm); + ttm_tt_destroy_common(bdev, ttm); vmw_ttm_unmap_dma(vmw_be); if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent) ttm_dma_tt_fini(&vmw_be->dma_ttm); @@ -658,7 +645,8 @@ } -static int vmw_ttm_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) +static int vmw_ttm_populate(struct ttm_bo_device *bdev, + struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) { struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); @@ -666,7 +654,7 @@ struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); int ret; - if (ttm->state != tt_unpopulated) + if (ttm_tt_is_populated(ttm)) return 0; if (dev_priv->map_mode == vmw_dma_alloc_coherent) { @@ -686,7 +674,8 @@ return ret; } -static void vmw_ttm_unpopulate(struct ttm_tt *ttm) +static void vmw_ttm_unpopulate(struct ttm_bo_device *bdev, + struct ttm_tt *ttm) { struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); @@ -710,12 +699,6 @@ ttm_pool_unpopulate(ttm); } -static struct ttm_backend_func vmw_ttm_func = { - .bind = vmw_ttm_bind, - .unbind = vmw_ttm_unbind, - .destroy = vmw_ttm_destroy, -}; - static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags) { @@ -726,7 +709,6 @@ if (!vmw_be) return NULL; - vmw_be->dma_ttm.ttm.func = &vmw_ttm_func; vmw_be->dev_priv = container_of(bo->bdev, struct vmw_private, bdev); vmw_be->mob = NULL; @@ -743,50 +725,6 @@ return NULL; } -static int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) -{ - return 0; -} - -static int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, - struct ttm_mem_type_manager *man) -{ - switch (type) { - case TTM_PL_SYSTEM: - /* System memory */ - - man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; - man->available_caching = TTM_PL_FLAG_CACHED; - man->default_caching = TTM_PL_FLAG_CACHED; - break; - case TTM_PL_VRAM: - /* "On-card" video ram */ - man->func = &ttm_bo_manager_func; - man->gpu_offset = 0; - man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE; - man->available_caching = TTM_PL_FLAG_CACHED; - man->default_caching = TTM_PL_FLAG_CACHED; - break; - case VMW_PL_GMR: - case VMW_PL_MOB: - /* - * "Guest Memory Regions" is an aperture like feature with - * one slot per bo. There is an upper limit of the number of - * slots as well as the bo size. - */ - man->func = &vmw_gmrid_manager_func; - man->gpu_offset = 0; - man->flags = TTM_MEMTYPE_FLAG_CMA | TTM_MEMTYPE_FLAG_MAPPABLE; - man->available_caching = TTM_PL_FLAG_CACHED; - man->default_caching = TTM_PL_FLAG_CACHED; - break; - default: - DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); - return -EINVAL; - } - return 0; -} - static void vmw_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *placement) { @@ -801,26 +739,18 @@ return vmw_user_bo_verify_access(bo, tfile); } -static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) +static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *mem) { - struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev); - mem->bus.addr = NULL; - mem->bus.is_iomem = false; - mem->bus.offset = 0; - mem->bus.size = mem->num_pages << PAGE_SHIFT; - mem->bus.base = 0; - if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) - return -EINVAL; switch (mem->mem_type) { case TTM_PL_SYSTEM: case VMW_PL_GMR: case VMW_PL_MOB: return 0; case TTM_PL_VRAM: - mem->bus.offset = mem->start << PAGE_SHIFT; - mem->bus.base = dev_priv->vram_start; + mem->bus.offset = (mem->start << PAGE_SHIFT) + + dev_priv->vram_start; mem->bus.is_iomem = true; break; default: @@ -829,20 +759,11 @@ return 0; } -static void vmw_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) -{ -} - -static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) -{ - return 0; -} - /** * vmw_move_notify - TTM move_notify_callback * * @bo: The TTM buffer object about to move. - * @mem: The struct ttm_mem_reg indicating to what memory + * @mem: The struct ttm_resource indicating to what memory * region the move is taking place. * * Calls move_notify for all subsystems needing it. @@ -850,7 +771,7 @@ */ static void vmw_move_notify(struct ttm_buffer_object *bo, bool evict, - struct ttm_mem_reg *mem) + struct ttm_resource *mem) { vmw_bo_move_notify(bo, mem); vmw_query_move_notify(bo, mem); @@ -873,15 +794,49 @@ .ttm_tt_create = &vmw_ttm_tt_create, .ttm_tt_populate = &vmw_ttm_populate, .ttm_tt_unpopulate = &vmw_ttm_unpopulate, - .invalidate_caches = vmw_invalidate_caches, - .init_mem_type = vmw_init_mem_type, + .ttm_tt_bind = &vmw_ttm_bind, + .ttm_tt_unbind = &vmw_ttm_unbind, + .ttm_tt_destroy = &vmw_ttm_destroy, .eviction_valuable = ttm_bo_eviction_valuable, .evict_flags = vmw_evict_flags, .move = NULL, .verify_access = vmw_verify_access, .move_notify = vmw_move_notify, .swap_notify = vmw_swap_notify, - .fault_reserve_notify = &vmw_ttm_fault_reserve_notify, .io_mem_reserve = &vmw_ttm_io_mem_reserve, - .io_mem_free = &vmw_ttm_io_mem_free, }; + +int vmw_bo_create_and_populate(struct vmw_private *dev_priv, + unsigned long bo_size, + struct ttm_buffer_object **bo_p) +{ + struct ttm_operation_ctx ctx = { + .interruptible = false, + .no_wait_gpu = false + }; + struct ttm_buffer_object *bo; + int ret; + + ret = ttm_bo_create(&dev_priv->bdev, bo_size, + ttm_bo_type_device, + &vmw_sys_ne_placement, + 0, false, &bo); + + if (unlikely(ret != 0)) + return ret; + + ret = ttm_bo_reserve(bo, false, true, NULL); + BUG_ON(ret != 0); + ret = vmw_ttm_populate(bo->bdev, bo->ttm, &ctx); + if (likely(ret == 0)) { + struct vmw_ttm_tt *vmw_tt = + container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); + ret = vmw_ttm_map_dma(vmw_tt); + } + + ttm_bo_unreserve(bo); + + if (likely(ret == 0)) + *bo_p = bo; + return ret; +} -- Gitblit v1.6.2