hc
2024-05-16 8d2a02b24d66aa359e83eebc1ed3c0f85367a1cb
kernel/drivers/gpu/arm/bifrost/mali_kbase_mem_linux.h
....@@ -1,7 +1,7 @@
11 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
22 /*
33 *
4
- * (C) COPYRIGHT 2010, 2012-2021 ARM Limited. All rights reserved.
4
+ * (C) COPYRIGHT 2010, 2012-2022 ARM Limited. All rights reserved.
55 *
66 * This program is free software and is provided to you under the terms of the
77 * GNU General Public License version 2 as published by the Free Software
....@@ -20,7 +20,7 @@
2020 */
2121
2222 /**
23
- * Base kernel memory APIs, Linux implementation.
23
+ * DOC: Base kernel memory APIs, Linux implementation.
2424 */
2525
2626 #ifndef _KBASE_MEM_LINUX_H_
....@@ -44,13 +44,15 @@
4444 * @flags: bitmask of BASE_MEM_* flags to convey special requirements &
4545 * properties for the new allocation.
4646 * @gpu_va: Start address of the memory region which was allocated from GPU
47
- * virtual address space.
47
+ * virtual address space. If the BASE_MEM_FLAG_MAP_FIXED is set
48
+ * then this parameter shall be provided by the caller.
49
+ * @mmu_sync_info: Indicates whether this call is synchronous wrt MMU ops.
4850 *
4951 * Return: 0 on success or error code
5052 */
51
-struct kbase_va_region *kbase_mem_alloc(struct kbase_context *kctx,
52
- u64 va_pages, u64 commit_pages,
53
- u64 extension, u64 *flags, u64 *gpu_va);
53
+struct kbase_va_region *kbase_mem_alloc(struct kbase_context *kctx, u64 va_pages, u64 commit_pages,
54
+ u64 extension, u64 *flags, u64 *gpu_va,
55
+ enum kbase_caller_mmu_sync_info mmu_sync_info);
5456
5557 /**
5658 * kbase_mem_query - Query properties of a GPU memory region
....@@ -169,6 +171,7 @@
169171 * @reg: The GPU region
170172 * @new_pages: The number of pages after the grow
171173 * @old_pages: The number of pages before the grow
174
+ * @mmu_sync_info: Indicates whether this call is synchronous wrt MMU ops.
172175 *
173176 * Return: 0 on success, -errno on error.
174177 *
....@@ -178,8 +181,9 @@
178181 * Note: Caller must be holding the region lock.
179182 */
180183 int kbase_mem_grow_gpu_mapping(struct kbase_context *kctx,
181
- struct kbase_va_region *reg,
182
- u64 new_pages, u64 old_pages);
184
+ struct kbase_va_region *reg, u64 new_pages,
185
+ u64 old_pages,
186
+ enum kbase_caller_mmu_sync_info mmu_sync_info);
183187
184188 /**
185189 * kbase_mem_evictable_make - Make a physical allocation eligible for eviction
....@@ -213,6 +217,26 @@
213217 */
214218 bool kbase_mem_evictable_unmake(struct kbase_mem_phy_alloc *alloc);
215219
220
+typedef unsigned int kbase_vmap_flag;
221
+
222
+/* Sync operations are needed on beginning and ending of access to kernel-mapped GPU memory.
223
+ *
224
+ * This is internal to the struct kbase_vmap_struct and should not be passed in by callers of
225
+ * kbase_vmap-related functions.
226
+ */
227
+#define KBASE_VMAP_FLAG_SYNC_NEEDED (((kbase_vmap_flag)1) << 0)
228
+
229
+/* Permanently mapped memory accounting (including enforcing limits) should be done on the
230
+ * kernel-mapped GPU memory.
231
+ *
232
+ * This should be used if the kernel mapping is going to live for a potentially long time, for
233
+ * example if it will persist after the caller has returned.
234
+ */
235
+#define KBASE_VMAP_FLAG_PERMANENT_MAP_ACCOUNTING (((kbase_vmap_flag)1) << 1)
236
+
237
+/* Set of flags that can be passed into kbase_vmap-related functions */
238
+#define KBASE_VMAP_INPUT_FLAGS (KBASE_VMAP_FLAG_PERMANENT_MAP_ACCOUNTING)
239
+
216240 struct kbase_vmap_struct {
217241 off_t offset_in_page;
218242 struct kbase_mem_phy_alloc *cpu_alloc;
....@@ -221,9 +245,55 @@
221245 struct tagged_addr *gpu_pages;
222246 void *addr;
223247 size_t size;
224
- bool sync_needed;
248
+ kbase_vmap_flag flags;
225249 };
226250
251
+/**
252
+ * kbase_mem_shrink_gpu_mapping - Shrink the GPU mapping of an allocation
253
+ * @kctx: Context the region belongs to
254
+ * @reg: The GPU region or NULL if there isn't one
255
+ * @new_pages: The number of pages after the shrink
256
+ * @old_pages: The number of pages before the shrink
257
+ *
258
+ * Return: 0 on success, negative -errno on error
259
+ *
260
+ * Unmap the shrunk pages from the GPU mapping. Note that the size of the region
261
+ * itself is unmodified as we still need to reserve the VA, only the page tables
262
+ * will be modified by this function.
263
+ */
264
+int kbase_mem_shrink_gpu_mapping(struct kbase_context *kctx, struct kbase_va_region *reg,
265
+ u64 new_pages, u64 old_pages);
266
+
267
+/**
268
+ * kbase_vmap_reg - Map part of an existing region into the kernel safely, only if the requested
269
+ * access permissions are supported
270
+ * @kctx: Context @reg belongs to
271
+ * @reg: The GPU region to map part of
272
+ * @gpu_addr: Start address of VA range to map, which must be within @reg
273
+ * @size: Size of VA range, which when added to @gpu_addr must be within @reg
274
+ * @prot_request: Flags indicating how the caller will then access the memory
275
+ * @map: Structure to be given to kbase_vunmap() on freeing
276
+ * @vmap_flags: Flags of type kbase_vmap_flag
277
+ *
278
+ * Return: Kernel-accessible CPU pointer to the VA range, or NULL on error
279
+ *
280
+ * Variant of kbase_vmap_prot() that can be used given an existing region.
281
+ *
282
+ * The caller must satisfy one of the following for @reg:
283
+ * * It must have been obtained by finding it on the region tracker, and the region lock must not
284
+ * have been released in the mean time.
285
+ * * Or, it must have been refcounted with a call to kbase_va_region_alloc_get(), and the region
286
+ * lock is now held again.
287
+ * * Or, @reg has had NO_USER_FREE set at creation time or under the region lock, and the
288
+ * region lock is now held again.
289
+ *
290
+ * The acceptable @vmap_flags are those in %KBASE_VMAP_INPUT_FLAGS.
291
+ *
292
+ * Refer to kbase_vmap_prot() for more information on the operation of this function.
293
+ */
294
+void *kbase_vmap_reg(struct kbase_context *kctx, struct kbase_va_region *reg, u64 gpu_addr,
295
+ size_t size, unsigned long prot_request, struct kbase_vmap_struct *map,
296
+ kbase_vmap_flag vmap_flags);
227297
228298 /**
229299 * kbase_vmap_prot - Map a GPU VA range into the kernel safely, only if the
....@@ -254,7 +324,7 @@
254324 * The checks are also there to help catch access errors on memory where
255325 * security is not a concern: imported memory that is always RW, and memory
256326 * that was allocated and owned by the process attached to @kctx. In this case,
257
- * it helps to identify memory that was was mapped with the wrong access type.
327
+ * it helps to identify memory that was mapped with the wrong access type.
258328 *
259329 * Note: KBASE_REG_GPU_{RD,WR} flags are currently supported for legacy cases
260330 * where either the security of memory is solely dependent on those flags, or
....@@ -422,12 +492,12 @@
422492 /**
423493 * kbase_get_cache_line_alignment - Return cache line alignment
424494 *
495
+ * @kbdev: Device pointer.
496
+ *
425497 * Helper function to return the maximum cache line alignment considering
426498 * both CPU and GPU cache sizes.
427499 *
428500 * Return: CPU and GPU cache line alignment, in bytes.
429
- *
430
- * @kbdev: Device pointer.
431501 */
432502 u32 kbase_get_cache_line_alignment(struct kbase_device *kbdev);
433503
....@@ -435,18 +505,7 @@
435505 static inline vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma,
436506 unsigned long addr, unsigned long pfn, pgprot_t pgprot)
437507 {
438
- int err;
439
-
440
-#if ((KERNEL_VERSION(4, 4, 147) >= LINUX_VERSION_CODE) || \
441
- ((KERNEL_VERSION(4, 6, 0) > LINUX_VERSION_CODE) && \
442
- (KERNEL_VERSION(4, 5, 0) <= LINUX_VERSION_CODE)))
443
- if (pgprot_val(pgprot) != pgprot_val(vma->vm_page_prot))
444
- return VM_FAULT_SIGBUS;
445
-
446
- err = vm_insert_pfn(vma, addr, pfn);
447
-#else
448
- err = vm_insert_pfn_prot(vma, addr, pfn, pgprot);
449
-#endif
508
+ int err = vm_insert_pfn_prot(vma, addr, pfn, pgprot);
450509
451510 if (unlikely(err == -ENOMEM))
452511 return VM_FAULT_OOM;