From f70575805708cabdedea7498aaa3f710fde4d920 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Wed, 31 Jan 2024 03:29:01 +0000
Subject: [PATCH] add lvds1024*800

---
 kernel/drivers/gpu/arm/bifrost/csf/mali_kbase_csf_protected_memory.c |   62 ++++++++++++++++++++++++++----
 1 files changed, 53 insertions(+), 9 deletions(-)

diff --git a/kernel/drivers/gpu/arm/bifrost/csf/mali_kbase_csf_protected_memory.c b/kernel/drivers/gpu/arm/bifrost/csf/mali_kbase_csf_protected_memory.c
index 5997483..bf1835b 100644
--- a/kernel/drivers/gpu/arm/bifrost/csf/mali_kbase_csf_protected_memory.c
+++ b/kernel/drivers/gpu/arm/bifrost/csf/mali_kbase_csf_protected_memory.c
@@ -71,29 +71,60 @@
 		kbase_csf_protected_memory_alloc(
 		struct kbase_device *const kbdev,
 		struct tagged_addr *phys,
-		size_t num_pages)
+		size_t num_pages,
+		bool is_small_page)
 {
 	size_t i;
 	struct protected_memory_allocator_device *pma_dev =
 		kbdev->csf.pma_dev;
-	struct protected_memory_allocation **pma =
-		kmalloc_array(num_pages, sizeof(*pma), GFP_KERNEL);
+	struct protected_memory_allocation **pma = NULL;
+	unsigned int order = KBASE_MEM_POOL_2MB_PAGE_TABLE_ORDER;
+	unsigned int num_pages_order;
+
+	if (is_small_page)
+		order = KBASE_MEM_POOL_4KB_PAGE_TABLE_ORDER;
+
+	num_pages_order = (1u << order);
+
+	/* Ensure the requested num_pages is aligned with
+	 * the order type passed as argument.
+	 *
+	 * pma_alloc_page() will then handle the granularity
+	 * of the allocation based on order.
+	 */
+	num_pages = div64_u64(num_pages + num_pages_order - 1, num_pages_order);
+
+	pma = kmalloc_array(num_pages, sizeof(*pma), GFP_KERNEL);
 
 	if (WARN_ON(!pma_dev) || WARN_ON(!phys) || !pma)
 		return NULL;
 
 	for (i = 0; i < num_pages; i++) {
-		pma[i] = pma_dev->ops.pma_alloc_page(pma_dev,
-				KBASE_MEM_POOL_4KB_PAGE_TABLE_ORDER);
+		phys_addr_t phys_addr;
+
+		pma[i] = pma_dev->ops.pma_alloc_page(pma_dev, order);
 		if (!pma[i])
 			break;
 
-		phys[i] = as_tagged(pma_dev->ops.pma_get_phys_addr(pma_dev,
-					pma[i]));
+		phys_addr = pma_dev->ops.pma_get_phys_addr(pma_dev, pma[i]);
+
+		if (order) {
+			size_t j;
+
+			*phys++ = as_tagged_tag(phys_addr, HUGE_HEAD | HUGE_PAGE);
+
+			for (j = 1; j < num_pages_order; j++) {
+				*phys++ = as_tagged_tag(phys_addr +
+							PAGE_SIZE * j,
+							HUGE_PAGE);
+			}
+		} else {
+			phys[i] = as_tagged(phys_addr);
+		}
 	}
 
 	if (i != num_pages) {
-		kbase_csf_protected_memory_free(kbdev, pma, i);
+		kbase_csf_protected_memory_free(kbdev, pma, i * num_pages_order, is_small_page);
 		return NULL;
 	}
 
@@ -103,15 +134,28 @@
 void kbase_csf_protected_memory_free(
 		struct kbase_device *const kbdev,
 		struct protected_memory_allocation **pma,
-		size_t num_pages)
+		size_t num_pages,
+		bool is_small_page)
 {
 	size_t i;
 	struct protected_memory_allocator_device *pma_dev =
 		kbdev->csf.pma_dev;
+	unsigned int num_pages_order = (1u << KBASE_MEM_POOL_2MB_PAGE_TABLE_ORDER);
+
+	if (is_small_page)
+		num_pages_order = (1u << KBASE_MEM_POOL_4KB_PAGE_TABLE_ORDER);
 
 	if (WARN_ON(!pma_dev) || WARN_ON(!pma))
 		return;
 
+	/* Ensure the requested num_pages is aligned with
+	 * the order type passed as argument.
+	 *
+	 * pma_alloc_page() will then handle the granularity
+	 * of the allocation based on order.
+	 */
+	num_pages = div64_u64(num_pages + num_pages_order - 1, num_pages_order);
+
 	for (i = 0; i < num_pages; i++)
 		pma_dev->ops.pma_free_page(pma_dev, pma[i]);
 

--
Gitblit v1.6.2