From f70575805708cabdedea7498aaa3f710fde4d920 Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Wed, 31 Jan 2024 03:29:01 +0000 Subject: [PATCH] add lvds1024*800 --- kernel/drivers/gpu/drm/rockchip/rockchip_drm_gem.c | 361 ++++++++++++--------------------------------------- 1 files changed, 87 insertions(+), 274 deletions(-) diff --git a/kernel/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/kernel/drivers/gpu/drm/rockchip/rockchip_drm_gem.c index ce31e32..9a4b78d 100644 --- a/kernel/drivers/gpu/drm/rockchip/rockchip_drm_gem.c +++ b/kernel/drivers/gpu/drm/rockchip/rockchip_drm_gem.c @@ -1,31 +1,29 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd * Author:Mark Yao <mark.yao@rock-chips.com> - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ +#include <linux/dma-buf-cache.h> +#include <linux/iommu.h> +#include <linux/vmalloc.h> + #include <drm/drm.h> -#include <drm/drmP.h> #include <drm/drm_gem.h> +#include <drm/drm_prime.h> #include <drm/drm_vma_manager.h> -#include <linux/dma-buf.h> #include <linux/genalloc.h> #include <linux/iommu.h> #include <linux/pagemap.h> #include <linux/vmalloc.h> -#include <linux/swiotlb.h> +#include <linux/rockchip/rockchip_sip.h> #include "rockchip_drm_drv.h" #include "rockchip_drm_gem.h" + +static u32 bank_bit_first = 12; +static u32 bank_bit_mask = 0x7; struct page_info { struct page *page; @@ -54,8 +52,8 @@ rk_obj->dma_addr = rk_obj->mm.start; - ret = iommu_map_sg(private->domain, rk_obj->dma_addr, rk_obj->sgt->sgl, - rk_obj->sgt->nents, prot); + ret = iommu_map_sgtable(private->domain, rk_obj->dma_addr, rk_obj->sgt, + prot); if (ret < rk_obj->base.size) { DRM_ERROR("failed to map buffer: size=%zd request_size=%zd\n", ret, rk_obj->base.size); @@ -63,10 +61,7 @@ goto err_remove_node; } -#ifdef CONFIG_IOMMU_API - if (private->domain->ops->flush_iotlb_all) - private->domain->ops->flush_iotlb_all(private->domain); -#endif + iommu_flush_iotlb_all(private->domain); rk_obj->size = ret; @@ -109,59 +104,15 @@ } } -static struct sg_table *rockchip_gem_pages_to_sg(struct page **pages, unsigned int nr_pages) +void rockchip_gem_get_ddr_info(void) { - struct sg_table *sg = NULL; - int ret; -#define SG_SIZE_MAX (IO_TLB_SEGSIZE * (1 << IO_TLB_SHIFT)) + struct dram_addrmap_info *ddr_map_info; - sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL); - if (!sg) { - ret = -ENOMEM; - goto out; + ddr_map_info = sip_smc_get_dram_map(); + if (ddr_map_info) { + bank_bit_first = ddr_map_info->bank_bit_first; + bank_bit_mask = ddr_map_info->bank_bit_mask; } - - ret = __sg_alloc_table_from_pages(sg, pages, nr_pages, 0, - nr_pages << PAGE_SHIFT, - SG_SIZE_MAX, GFP_KERNEL); - if (ret) - goto out; - - return sg; -out: - kfree(sg); - return ERR_PTR(ret); -} - -static struct page **get_pages(struct drm_gem_object *obj) -{ - if (IS_ENABLED(CONFIG_DMABUF_PAGE_POOL)) { - struct drm_device *drm = obj->dev; - struct rockchip_drm_private *priv = drm->dev_private; - struct dmabuf_page_pool *pool = priv->page_pools; - - return dmabuf_page_pool_alloc_pages_array(pool, - obj->size >> - PAGE_SHIFT); - } - - return drm_gem_get_pages(obj); -} - -static void put_pages(struct drm_gem_object *obj, struct page **pages, - bool dirty, bool accessed) -{ - if (IS_ENABLED(CONFIG_DMABUF_PAGE_POOL)) { - struct drm_device *drm = obj->dev; - struct rockchip_drm_private *priv = drm->dev_private; - struct dmabuf_page_pool *pool = priv->page_pools; - - return dmabuf_page_pool_free_pages_array(pool, pages, - obj->size >> - PAGE_SHIFT); - } - - return drm_gem_put_pages(obj, pages, dirty, accessed); } static int rockchip_gem_get_pages(struct rockchip_gem_object *rk_obj) @@ -178,7 +129,7 @@ struct list_head lists[PG_ROUND]; dma_addr_t phys; int end = 0; - unsigned int bit12_14; + unsigned int bit_index; unsigned int block_index[PG_ROUND] = {0}; struct page_info *info; unsigned int maximum; @@ -186,7 +137,7 @@ for (i = 0; i < PG_ROUND; i++) INIT_LIST_HEAD(&lists[i]); - pages = get_pages(&rk_obj->base); + pages = drm_gem_get_pages(&rk_obj->base); if (IS_ERR(pages)) return PTR_ERR(pages); @@ -197,11 +148,14 @@ n_pages = rk_obj->num_pages; dst_pages = __vmalloc(sizeof(struct page *) * n_pages, - GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL); + GFP_KERNEL | __GFP_HIGHMEM); if (!dst_pages) { ret = -ENOMEM; goto err_put_pages; } + + DRM_DEBUG_KMS("bank_bit_first = 0x%x, bank_bit_mask = 0x%x\n", + bank_bit_first, bank_bit_mask); cur_page = 0; remain = n_pages; @@ -214,7 +168,7 @@ } chunk_pages = j - cur_page; - if (chunk_pages > 7) { + if (chunk_pages >= PG_ROUND) { for (i = 0; i < chunk_pages; i++) dst_pages[end + i] = pages[cur_page + i]; end += chunk_pages; @@ -229,9 +183,9 @@ INIT_LIST_HEAD(&info->list); info->page = pages[cur_page + i]; phys = page_to_phys(info->page); - bit12_14 = (phys >> 12) & 0x7; - list_add_tail(&info->list, &lists[bit12_14]); - block_index[bit12_14]++; + bit_index = ((phys >> bank_bit_first) & bank_bit_mask) % PG_ROUND; + list_add_tail(&info->list, &lists[bit_index]); + block_index[bit_index]++; } } @@ -259,9 +213,8 @@ DRM_DEBUG_KMS("%s, %d, end = %d, n_pages = %d\n", __func__, __LINE__, end, n_pages); - - rk_obj->sgt = rockchip_gem_pages_to_sg(dst_pages, rk_obj->num_pages); - + rk_obj->sgt = drm_prime_pages_to_sg(rk_obj->base.dev, + dst_pages, rk_obj->num_pages); if (IS_ERR(rk_obj->sgt)) { ret = PTR_ERR(rk_obj->sgt); goto err_put_list; @@ -276,11 +229,10 @@ * TODO: Replace this by drm_clflush_sg() once it can be implemented * without relying on symbols that are not exported. */ - for_each_sg(rk_obj->sgt->sgl, s, rk_obj->sgt->nents, i) + for_each_sgtable_sg(rk_obj->sgt, s, i) sg_dma_address(s) = sg_phys(s); - dma_sync_sg_for_device(drm->dev, rk_obj->sgt->sgl, rk_obj->sgt->nents, - DMA_TO_DEVICE); + dma_sync_sgtable_for_device(drm->dev, rk_obj->sgt, DMA_TO_DEVICE); kvfree(pages); @@ -290,8 +242,7 @@ rockchip_gem_free_list(lists); kvfree(dst_pages); err_put_pages: - put_pages(&rk_obj->base, rk_obj->pages, false, false); - rk_obj->pages = NULL; + drm_gem_put_pages(&rk_obj->base, rk_obj->pages, false, false); return ret; } @@ -299,9 +250,7 @@ { sg_free_table(rk_obj->sgt); kfree(rk_obj->sgt); - rk_obj->sgt = NULL; - put_pages(&rk_obj->base, rk_obj->pages, true, true); - rk_obj->pages = NULL; + drm_gem_put_pages(&rk_obj->base, rk_obj->pages, true, true); } static inline void *drm_calloc_large(size_t nmemb, size_t size); @@ -351,6 +300,7 @@ rk_obj->pages = drm_calloc_large(rk_obj->num_pages, sizeof(*rk_obj->pages)); if (!rk_obj->pages) { + ret = -ENOMEM; DRM_ERROR("failed to allocate pages.\n"); goto err_sg_table_free; } @@ -388,7 +338,7 @@ return kcalloc(nmemb, size, GFP_KERNEL); return __vmalloc(size * nmemb, - GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL); + GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); } static inline void drm_free_large(void *ptr) @@ -433,7 +383,7 @@ paddr += PAGE_SIZE; i++; } - sgt = rockchip_gem_pages_to_sg(rk_obj->pages, rk_obj->num_pages); + sgt = drm_prime_pages_to_sg(obj->dev, rk_obj->pages, rk_obj->num_pages); if (IS_ERR(sgt)) { ret = PTR_ERR(sgt); goto err_free_pages; @@ -567,42 +517,6 @@ } } -/* - * __vm_map_pages - maps range of kernel pages into user vma - * @vma: user vma to map to - * @pages: pointer to array of source kernel pages - * @num: number of pages in page array - * @offset: user's requested vm_pgoff - * - * This allows drivers to map range of kernel pages into a user vma. - * - * Return: 0 on success and error code otherwise. - */ -static int __vm_map_pages(struct vm_area_struct *vma, struct page **pages, - unsigned long num, unsigned long offset) -{ - unsigned long count = vma_pages(vma); - unsigned long uaddr = vma->vm_start; - int ret, i; - - /* Fail if the user requested offset is beyond the end of the object */ - if (offset > num) - return -ENXIO; - - /* Fail if the user requested size exceeds available object size */ - if (count > num - offset) - return -ENXIO; - - for (i = 0; i < count; i++) { - ret = vm_insert_page(vma, uaddr, pages[offset + i]); - if (ret < 0) - return ret; - uaddr += PAGE_SIZE; - } - - return 0; -} - static int rockchip_drm_gem_object_mmap_iommu(struct drm_gem_object *obj, struct vm_area_struct *vma) { @@ -613,7 +527,7 @@ if (user_count == 0) return -ENXIO; - return __vm_map_pages(vma, rk_obj->pages, count, vma->vm_pgoff); + return vm_map_pages(vma, rk_obj->pages, count); } static int rockchip_drm_gem_object_mmap_dma(struct drm_gem_object *obj, @@ -650,9 +564,6 @@ } else { ret = rockchip_drm_gem_object_mmap_dma(obj, vma); } - - if (ret) - drm_gem_vm_close(vma); return ret; } @@ -697,7 +608,8 @@ } static struct rockchip_gem_object * - rockchip_gem_alloc_object(struct drm_device *drm, unsigned int size) +rockchip_gem_alloc_object(struct drm_device *drm, unsigned int size, + unsigned int flags) { struct address_space *mapping; struct rockchip_gem_object *rk_obj; @@ -708,6 +620,10 @@ #else gfp_t gfp_mask = GFP_HIGHUSER | __GFP_RECLAIMABLE; #endif + + if (flags & ROCKCHIP_BO_DMA32) + gfp_mask |= __GFP_DMA32; + size = round_up(size, PAGE_SIZE); rk_obj = kzalloc(sizeof(*rk_obj), GFP_KERNEL); @@ -731,7 +647,7 @@ struct rockchip_gem_object *rk_obj; int ret; - rk_obj = rockchip_gem_alloc_object(drm, size); + rk_obj = rockchip_gem_alloc_object(drm, size, flags); if (IS_ERR(rk_obj)) return rk_obj; rk_obj->flags = flags; @@ -748,6 +664,28 @@ } /* + * rockchip_gem_destroy - destroy gem object + * + * The dma_buf_unmap_attachment and dma_buf_detach will be re-defined if + * CONFIG_DMABUF_CACHE is enabled. + * + * Same as drm_prime_gem_destroy + */ +static void rockchip_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg) +{ + struct dma_buf_attachment *attach; + struct dma_buf *dma_buf; + + attach = obj->import_attach; + if (sg) + dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL); + dma_buf = attach->dmabuf; + dma_buf_detach(attach->dmabuf, attach); + /* remove the reference */ + dma_buf_put(dma_buf); +} + +/* * rockchip_gem_free_object - (struct drm_driver)->gem_free_object_unlocked * callback function */ @@ -761,13 +699,11 @@ if (private->domain) { rockchip_gem_iommu_unmap(rk_obj); } else { - dma_unmap_sg(drm->dev, rk_obj->sgt->sgl, - rk_obj->sgt->nents, DMA_BIDIRECTIONAL); + dma_unmap_sgtable(drm->dev, rk_obj->sgt, + DMA_BIDIRECTIONAL, 0); } drm_free_large(rk_obj->pages); -#ifndef CONFIG_ARCH_ROCKCHIP - drm_prime_gem_destroy(obj, rk_obj->sgt); -#endif + rockchip_gem_destroy(obj, rk_obj->sgt); } else { rockchip_gem_free_buf(rk_obj); } @@ -807,7 +743,7 @@ goto err_handle_create; /* drop reference from allocate - handle holds it now. */ - drm_gem_object_put_unlocked(obj); + drm_gem_object_put(obj); return rk_obj; @@ -857,7 +793,7 @@ int ret; if (rk_obj->pages) - return rockchip_gem_pages_to_sg(rk_obj->pages, rk_obj->num_pages); + return drm_prime_pages_to_sg(obj->dev, rk_obj->pages, rk_obj->num_pages); sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); if (!sgt) @@ -873,23 +809,6 @@ } return sgt; -} - -static unsigned long rockchip_sg_get_contiguous_size(struct sg_table *sgt, - int count) -{ - struct scatterlist *s; - dma_addr_t expected = sg_dma_address(sgt->sgl); - unsigned int i; - unsigned long size = 0; - - for_each_sg(sgt->sgl, s, count, i) { - if (sg_dma_address(s) != expected) - break; - expected = sg_dma_address(s) + sg_dma_len(s); - size += sg_dma_len(s); - } - return size; } static int @@ -908,15 +827,13 @@ struct sg_table *sg, struct rockchip_gem_object *rk_obj) { - int count = dma_map_sg(drm->dev, sg->sgl, sg->nents, - DMA_BIDIRECTIONAL); - if (!count) - return -EINVAL; + int err = dma_map_sgtable(drm->dev, sg, DMA_BIDIRECTIONAL, 0); + if (err) + return err; - if (rockchip_sg_get_contiguous_size(sg, count) < attach->dmabuf->size) { + if (drm_prime_get_contiguous_size(sg) < attach->dmabuf->size) { DRM_ERROR("failed to map sg_table to contiguous linear address.\n"); - dma_unmap_sg(drm->dev, sg->sgl, sg->nents, - DMA_BIDIRECTIONAL); + dma_unmap_sgtable(drm->dev, sg, DMA_BIDIRECTIONAL, 0); return -EINVAL; } @@ -934,7 +851,7 @@ struct rockchip_gem_object *rk_obj; int ret; - rk_obj = rockchip_gem_alloc_object(drm, attach->dmabuf->size); + rk_obj = rockchip_gem_alloc_object(drm, attach->dmabuf->size, 0); if (IS_ERR(rk_obj)) return ERR_CAST(rk_obj); @@ -974,13 +891,9 @@ { struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); - if (rk_obj->pages) { - pgprot_t prot; - - prot = rk_obj->flags & ROCKCHIP_BO_CACHABLE ? PAGE_KERNEL : pgprot_writecombine(PAGE_KERNEL); - - return vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP, prot); - } + if (rk_obj->pages) + return vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP, + pgprot_writecombine(PAGE_KERNEL)); if (rk_obj->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING) return NULL; @@ -1000,32 +913,6 @@ /* Nothing to do if allocated by DMA mapping API. */ } -int rockchip_gem_dumb_map_offset(struct drm_file *file_priv, - struct drm_device *dev, uint32_t handle, - uint64_t *offset) -{ - struct drm_gem_object *obj; - int ret = 0; - - obj = drm_gem_object_lookup(file_priv, handle); - if (!obj) { - DRM_ERROR("failed to lookup gem object.\n"); - return -EINVAL; - } - - ret = drm_gem_create_mmap_offset(obj); - if (ret) - goto out; - - *offset = drm_vma_node_offset_addr(&obj->vma_node); - DRM_DEBUG_KMS("offset = 0x%llx\n", *offset); - -out: - drm_gem_object_unreference_unlocked(obj); - - return ret; -} - int rockchip_gem_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { @@ -1042,8 +929,8 @@ { struct drm_rockchip_gem_map_off *args = data; - return rockchip_gem_dumb_map_offset(file_priv, drm, args->handle, - &args->offset); + return drm_gem_dumb_map_offset(file_priv, drm, args->handle, + &args->offset); } int rockchip_gem_get_phys_ioctl(struct drm_device *dev, void *data, @@ -1070,7 +957,8 @@ args->phy_addr = page_to_phys(rk_obj->pages[0]); out: - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put(obj); + return ret; } @@ -1089,7 +977,7 @@ } int rockchip_gem_prime_end_cpu_access(struct drm_gem_object *obj, - enum dma_data_direction dir) + enum dma_data_direction dir) { struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); struct drm_device *drm = obj->dev; @@ -1099,80 +987,5 @@ dma_sync_sg_for_device(drm->dev, rk_obj->sgt->sgl, rk_obj->sgt->nents, dir); - return 0; -} - -static int rockchip_gem_prime_sgl_sync_range(struct device *dev, - struct scatterlist *sgl, unsigned int nents, - unsigned int offset, unsigned int length, - enum dma_data_direction dir, bool for_cpu) -{ - int i; - struct scatterlist *sg; - unsigned int len = 0; - dma_addr_t sg_dma_addr; - - for_each_sg(sgl, sg, nents, i) { - unsigned int sg_offset, sg_left, size = 0; - - len += sg->length; - if (len <= offset) - continue; - - sg_dma_addr = sg_dma_address(sg); - sg_left = len - offset; - sg_offset = sg->length - sg_left; - - size = (length < sg_left) ? length : sg_left; - if (for_cpu) - dma_sync_single_range_for_cpu(dev, sg_dma_addr, - sg_offset, size, dir); - else - dma_sync_single_range_for_device(dev, sg_dma_addr, - sg_offset, size, dir); - - offset += size; - length -= size; - - if (length == 0) - break; - } - - return 0; -} - -int rockchip_gem_prime_begin_cpu_access_partial(struct drm_gem_object *obj, - enum dma_data_direction dir, - unsigned int offset, - unsigned int len) -{ - struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); - struct drm_device *drm = obj->dev; - - if (!rk_obj->sgt) - return 0; - - rockchip_gem_prime_sgl_sync_range(drm->dev, rk_obj->sgt->sgl, - rk_obj->sgt->nents, - offset, len, dir, true); - - return 0; -} - -int rockchip_gem_prime_end_cpu_access_partial(struct drm_gem_object *obj, - enum dma_data_direction dir, - unsigned int offset, - unsigned int len) -{ - struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); - struct drm_device *drm = obj->dev; - - if (!rk_obj->sgt) - return 0; - - rockchip_gem_prime_sgl_sync_range(drm->dev, rk_obj->sgt->sgl, - rk_obj->sgt->nents, - offset, len, dir, false); - return 0; } -- Gitblit v1.6.2