From 08f87f769b595151be1afeff53e144f543faa614 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Wed, 06 Dec 2023 09:51:13 +0000
Subject: [PATCH] add dts config

---
 kernel/drivers/gpu/arm/bifrost/mali_kbase_mem_linux.h |  107 +++++++++++++++++++++++++++++++++++++++++------------
 1 files changed, 83 insertions(+), 24 deletions(-)

diff --git a/kernel/drivers/gpu/arm/bifrost/mali_kbase_mem_linux.h b/kernel/drivers/gpu/arm/bifrost/mali_kbase_mem_linux.h
index 36159c1..6dda44b 100644
--- a/kernel/drivers/gpu/arm/bifrost/mali_kbase_mem_linux.h
+++ b/kernel/drivers/gpu/arm/bifrost/mali_kbase_mem_linux.h
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
 /*
  *
- * (C) COPYRIGHT 2010, 2012-2021 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010, 2012-2022 ARM Limited. All rights reserved.
  *
  * This program is free software and is provided to you under the terms of the
  * GNU General Public License version 2 as published by the Free Software
@@ -20,7 +20,7 @@
  */
 
 /**
- * Base kernel memory APIs, Linux implementation.
+ * DOC: Base kernel memory APIs, Linux implementation.
  */
 
 #ifndef _KBASE_MEM_LINUX_H_
@@ -44,13 +44,15 @@
  * @flags:        bitmask of BASE_MEM_* flags to convey special requirements &
  *                properties for the new allocation.
  * @gpu_va:       Start address of the memory region which was allocated from GPU
- *                virtual address space.
+ *                virtual address space. If the BASE_MEM_FLAG_MAP_FIXED is set
+ *                then this parameter shall be provided by the caller.
+ * @mmu_sync_info: Indicates whether this call is synchronous wrt MMU ops.
  *
  * Return: 0 on success or error code
  */
-struct kbase_va_region *kbase_mem_alloc(struct kbase_context *kctx,
-					u64 va_pages, u64 commit_pages,
-					u64 extension, u64 *flags, u64 *gpu_va);
+struct kbase_va_region *kbase_mem_alloc(struct kbase_context *kctx, u64 va_pages, u64 commit_pages,
+					u64 extension, u64 *flags, u64 *gpu_va,
+					enum kbase_caller_mmu_sync_info mmu_sync_info);
 
 /**
  * kbase_mem_query - Query properties of a GPU memory region
@@ -169,6 +171,7 @@
  * @reg:       The GPU region
  * @new_pages: The number of pages after the grow
  * @old_pages: The number of pages before the grow
+ * @mmu_sync_info: Indicates whether this call is synchronous wrt MMU ops.
  *
  * Return: 0 on success, -errno on error.
  *
@@ -178,8 +181,9 @@
  * Note: Caller must be holding the region lock.
  */
 int kbase_mem_grow_gpu_mapping(struct kbase_context *kctx,
-		struct kbase_va_region *reg,
-		u64 new_pages, u64 old_pages);
+			       struct kbase_va_region *reg, u64 new_pages,
+			       u64 old_pages,
+			       enum kbase_caller_mmu_sync_info mmu_sync_info);
 
 /**
  * kbase_mem_evictable_make - Make a physical allocation eligible for eviction
@@ -213,6 +217,26 @@
  */
 bool kbase_mem_evictable_unmake(struct kbase_mem_phy_alloc *alloc);
 
+typedef unsigned int kbase_vmap_flag;
+
+/* Sync operations are needed on beginning and ending of access to kernel-mapped GPU memory.
+ *
+ * This is internal to the struct kbase_vmap_struct and should not be passed in by callers of
+ * kbase_vmap-related functions.
+ */
+#define KBASE_VMAP_FLAG_SYNC_NEEDED (((kbase_vmap_flag)1) << 0)
+
+/* Permanently mapped memory accounting (including enforcing limits) should be done on the
+ * kernel-mapped GPU memory.
+ *
+ * This should be used if the kernel mapping is going to live for a potentially long time, for
+ * example if it will persist after the caller has returned.
+ */
+#define KBASE_VMAP_FLAG_PERMANENT_MAP_ACCOUNTING (((kbase_vmap_flag)1) << 1)
+
+/* Set of flags that can be passed into kbase_vmap-related functions */
+#define KBASE_VMAP_INPUT_FLAGS (KBASE_VMAP_FLAG_PERMANENT_MAP_ACCOUNTING)
+
 struct kbase_vmap_struct {
 	off_t offset_in_page;
 	struct kbase_mem_phy_alloc *cpu_alloc;
@@ -221,9 +245,55 @@
 	struct tagged_addr *gpu_pages;
 	void *addr;
 	size_t size;
-	bool sync_needed;
+	kbase_vmap_flag flags;
 };
 
+/**
+ * kbase_mem_shrink_gpu_mapping - Shrink the GPU mapping of an allocation
+ * @kctx:      Context the region belongs to
+ * @reg:       The GPU region or NULL if there isn't one
+ * @new_pages: The number of pages after the shrink
+ * @old_pages: The number of pages before the shrink
+ *
+ * Return: 0 on success, negative -errno on error
+ *
+ * Unmap the shrunk pages from the GPU mapping. Note that the size of the region
+ * itself is unmodified as we still need to reserve the VA, only the page tables
+ * will be modified by this function.
+ */
+int kbase_mem_shrink_gpu_mapping(struct kbase_context *kctx, struct kbase_va_region *reg,
+				 u64 new_pages, u64 old_pages);
+
+/**
+ * kbase_vmap_reg - Map part of an existing region into the kernel safely, only if the requested
+ *                  access permissions are supported
+ * @kctx:         Context @reg belongs to
+ * @reg:          The GPU region to map part of
+ * @gpu_addr:     Start address of VA range to map, which must be within @reg
+ * @size:         Size of VA range, which when added to @gpu_addr must be within @reg
+ * @prot_request: Flags indicating how the caller will then access the memory
+ * @map:          Structure to be given to kbase_vunmap() on freeing
+ * @vmap_flags:   Flags of type kbase_vmap_flag
+ *
+ * Return: Kernel-accessible CPU pointer to the VA range, or NULL on error
+ *
+ * Variant of kbase_vmap_prot() that can be used given an existing region.
+ *
+ * The caller must satisfy one of the following for @reg:
+ * * It must have been obtained by finding it on the region tracker, and the region lock must not
+ *   have been released in the mean time.
+ * * Or, it must have been refcounted with a call to kbase_va_region_alloc_get(), and the region
+ *   lock is now held again.
+ * * Or, @reg has had NO_USER_FREE set at creation time or under the region lock, and the
+ *   region lock is now held again.
+ *
+ * The acceptable @vmap_flags are those in %KBASE_VMAP_INPUT_FLAGS.
+ *
+ * Refer to kbase_vmap_prot() for more information on the operation of this function.
+ */
+void *kbase_vmap_reg(struct kbase_context *kctx, struct kbase_va_region *reg, u64 gpu_addr,
+		     size_t size, unsigned long prot_request, struct kbase_vmap_struct *map,
+		     kbase_vmap_flag vmap_flags);
 
 /**
  * kbase_vmap_prot - Map a GPU VA range into the kernel safely, only if the
@@ -254,7 +324,7 @@
  * The checks are also there to help catch access errors on memory where
  * security is not a concern: imported memory that is always RW, and memory
  * that was allocated and owned by the process attached to @kctx. In this case,
- * it helps to identify memory that was was mapped with the wrong access type.
+ * it helps to identify memory that was mapped with the wrong access type.
  *
  * Note: KBASE_REG_GPU_{RD,WR} flags are currently supported for legacy cases
  * where either the security of memory is solely dependent on those flags, or
@@ -422,12 +492,12 @@
 /**
  * kbase_get_cache_line_alignment - Return cache line alignment
  *
+ * @kbdev: Device pointer.
+ *
  * Helper function to return the maximum cache line alignment considering
  * both CPU and GPU cache sizes.
  *
  * Return: CPU and GPU cache line alignment, in bytes.
- *
- * @kbdev: Device pointer.
  */
 u32 kbase_get_cache_line_alignment(struct kbase_device *kbdev);
 
@@ -435,18 +505,7 @@
 static inline vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma,
 			unsigned long addr, unsigned long pfn, pgprot_t pgprot)
 {
-	int err;
-
-#if ((KERNEL_VERSION(4, 4, 147) >= LINUX_VERSION_CODE) || \
-		((KERNEL_VERSION(4, 6, 0) > LINUX_VERSION_CODE) && \
-		 (KERNEL_VERSION(4, 5, 0) <= LINUX_VERSION_CODE)))
-	if (pgprot_val(pgprot) != pgprot_val(vma->vm_page_prot))
-		return VM_FAULT_SIGBUS;
-
-	err = vm_insert_pfn(vma, addr, pfn);
-#else
-	err = vm_insert_pfn_prot(vma, addr, pfn, pgprot);
-#endif
+	int err = vm_insert_pfn_prot(vma, addr, pfn, pgprot);
 
 	if (unlikely(err == -ENOMEM))
 		return VM_FAULT_OOM;

--
Gitblit v1.6.2