.. | .. |
---|
1 | 1 | /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ |
---|
2 | 2 | /* |
---|
3 | 3 | * |
---|
4 | | - * (C) COPYRIGHT 2010, 2012-2021 ARM Limited. All rights reserved. |
---|
| 4 | + * (C) COPYRIGHT 2010, 2012-2022 ARM Limited. All rights reserved. |
---|
5 | 5 | * |
---|
6 | 6 | * This program is free software and is provided to you under the terms of the |
---|
7 | 7 | * GNU General Public License version 2 as published by the Free Software |
---|
.. | .. |
---|
20 | 20 | */ |
---|
21 | 21 | |
---|
22 | 22 | /** |
---|
23 | | - * Base kernel memory APIs, Linux implementation. |
---|
| 23 | + * DOC: Base kernel memory APIs, Linux implementation. |
---|
24 | 24 | */ |
---|
25 | 25 | |
---|
26 | 26 | #ifndef _KBASE_MEM_LINUX_H_ |
---|
.. | .. |
---|
44 | 44 | * @flags: bitmask of BASE_MEM_* flags to convey special requirements & |
---|
45 | 45 | * properties for the new allocation. |
---|
46 | 46 | * @gpu_va: Start address of the memory region which was allocated from GPU |
---|
47 | | - * virtual address space. |
---|
| 47 | + * virtual address space. If the BASE_MEM_FLAG_MAP_FIXED is set |
---|
| 48 | + * then this parameter shall be provided by the caller. |
---|
| 49 | + * @mmu_sync_info: Indicates whether this call is synchronous wrt MMU ops. |
---|
48 | 50 | * |
---|
49 | 51 | * Return: 0 on success or error code |
---|
50 | 52 | */ |
---|
51 | | -struct kbase_va_region *kbase_mem_alloc(struct kbase_context *kctx, |
---|
52 | | - u64 va_pages, u64 commit_pages, |
---|
53 | | - u64 extension, u64 *flags, u64 *gpu_va); |
---|
| 53 | +struct kbase_va_region *kbase_mem_alloc(struct kbase_context *kctx, u64 va_pages, u64 commit_pages, |
---|
| 54 | + u64 extension, u64 *flags, u64 *gpu_va, |
---|
| 55 | + enum kbase_caller_mmu_sync_info mmu_sync_info); |
---|
54 | 56 | |
---|
55 | 57 | /** |
---|
56 | 58 | * kbase_mem_query - Query properties of a GPU memory region |
---|
.. | .. |
---|
169 | 171 | * @reg: The GPU region |
---|
170 | 172 | * @new_pages: The number of pages after the grow |
---|
171 | 173 | * @old_pages: The number of pages before the grow |
---|
| 174 | + * @mmu_sync_info: Indicates whether this call is synchronous wrt MMU ops. |
---|
172 | 175 | * |
---|
173 | 176 | * Return: 0 on success, -errno on error. |
---|
174 | 177 | * |
---|
.. | .. |
---|
178 | 181 | * Note: Caller must be holding the region lock. |
---|
179 | 182 | */ |
---|
180 | 183 | int kbase_mem_grow_gpu_mapping(struct kbase_context *kctx, |
---|
181 | | - struct kbase_va_region *reg, |
---|
182 | | - u64 new_pages, u64 old_pages); |
---|
| 184 | + struct kbase_va_region *reg, u64 new_pages, |
---|
| 185 | + u64 old_pages, |
---|
| 186 | + enum kbase_caller_mmu_sync_info mmu_sync_info); |
---|
183 | 187 | |
---|
184 | 188 | /** |
---|
185 | 189 | * kbase_mem_evictable_make - Make a physical allocation eligible for eviction |
---|
.. | .. |
---|
213 | 217 | */ |
---|
214 | 218 | bool kbase_mem_evictable_unmake(struct kbase_mem_phy_alloc *alloc); |
---|
215 | 219 | |
---|
| 220 | +typedef unsigned int kbase_vmap_flag; |
---|
| 221 | + |
---|
| 222 | +/* Sync operations are needed on beginning and ending of access to kernel-mapped GPU memory. |
---|
| 223 | + * |
---|
| 224 | + * This is internal to the struct kbase_vmap_struct and should not be passed in by callers of |
---|
| 225 | + * kbase_vmap-related functions. |
---|
| 226 | + */ |
---|
| 227 | +#define KBASE_VMAP_FLAG_SYNC_NEEDED (((kbase_vmap_flag)1) << 0) |
---|
| 228 | + |
---|
| 229 | +/* Permanently mapped memory accounting (including enforcing limits) should be done on the |
---|
| 230 | + * kernel-mapped GPU memory. |
---|
| 231 | + * |
---|
| 232 | + * This should be used if the kernel mapping is going to live for a potentially long time, for |
---|
| 233 | + * example if it will persist after the caller has returned. |
---|
| 234 | + */ |
---|
| 235 | +#define KBASE_VMAP_FLAG_PERMANENT_MAP_ACCOUNTING (((kbase_vmap_flag)1) << 1) |
---|
| 236 | + |
---|
| 237 | +/* Set of flags that can be passed into kbase_vmap-related functions */ |
---|
| 238 | +#define KBASE_VMAP_INPUT_FLAGS (KBASE_VMAP_FLAG_PERMANENT_MAP_ACCOUNTING) |
---|
| 239 | + |
---|
216 | 240 | struct kbase_vmap_struct { |
---|
217 | 241 | off_t offset_in_page; |
---|
218 | 242 | struct kbase_mem_phy_alloc *cpu_alloc; |
---|
.. | .. |
---|
221 | 245 | struct tagged_addr *gpu_pages; |
---|
222 | 246 | void *addr; |
---|
223 | 247 | size_t size; |
---|
224 | | - bool sync_needed; |
---|
| 248 | + kbase_vmap_flag flags; |
---|
225 | 249 | }; |
---|
226 | 250 | |
---|
| 251 | +/** |
---|
| 252 | + * kbase_mem_shrink_gpu_mapping - Shrink the GPU mapping of an allocation |
---|
| 253 | + * @kctx: Context the region belongs to |
---|
| 254 | + * @reg: The GPU region or NULL if there isn't one |
---|
| 255 | + * @new_pages: The number of pages after the shrink |
---|
| 256 | + * @old_pages: The number of pages before the shrink |
---|
| 257 | + * |
---|
| 258 | + * Return: 0 on success, negative -errno on error |
---|
| 259 | + * |
---|
| 260 | + * Unmap the shrunk pages from the GPU mapping. Note that the size of the region |
---|
| 261 | + * itself is unmodified as we still need to reserve the VA, only the page tables |
---|
| 262 | + * will be modified by this function. |
---|
| 263 | + */ |
---|
| 264 | +int kbase_mem_shrink_gpu_mapping(struct kbase_context *kctx, struct kbase_va_region *reg, |
---|
| 265 | + u64 new_pages, u64 old_pages); |
---|
| 266 | + |
---|
| 267 | +/** |
---|
| 268 | + * kbase_vmap_reg - Map part of an existing region into the kernel safely, only if the requested |
---|
| 269 | + * access permissions are supported |
---|
| 270 | + * @kctx: Context @reg belongs to |
---|
| 271 | + * @reg: The GPU region to map part of |
---|
| 272 | + * @gpu_addr: Start address of VA range to map, which must be within @reg |
---|
| 273 | + * @size: Size of VA range, which when added to @gpu_addr must be within @reg |
---|
| 274 | + * @prot_request: Flags indicating how the caller will then access the memory |
---|
| 275 | + * @map: Structure to be given to kbase_vunmap() on freeing |
---|
| 276 | + * @vmap_flags: Flags of type kbase_vmap_flag |
---|
| 277 | + * |
---|
| 278 | + * Return: Kernel-accessible CPU pointer to the VA range, or NULL on error |
---|
| 279 | + * |
---|
| 280 | + * Variant of kbase_vmap_prot() that can be used given an existing region. |
---|
| 281 | + * |
---|
| 282 | + * The caller must satisfy one of the following for @reg: |
---|
| 283 | + * * It must have been obtained by finding it on the region tracker, and the region lock must not |
---|
| 284 | + * have been released in the mean time. |
---|
| 285 | + * * Or, it must have been refcounted with a call to kbase_va_region_alloc_get(), and the region |
---|
| 286 | + * lock is now held again. |
---|
| 287 | + * * Or, @reg has had NO_USER_FREE set at creation time or under the region lock, and the |
---|
| 288 | + * region lock is now held again. |
---|
| 289 | + * |
---|
| 290 | + * The acceptable @vmap_flags are those in %KBASE_VMAP_INPUT_FLAGS. |
---|
| 291 | + * |
---|
| 292 | + * Refer to kbase_vmap_prot() for more information on the operation of this function. |
---|
| 293 | + */ |
---|
| 294 | +void *kbase_vmap_reg(struct kbase_context *kctx, struct kbase_va_region *reg, u64 gpu_addr, |
---|
| 295 | + size_t size, unsigned long prot_request, struct kbase_vmap_struct *map, |
---|
| 296 | + kbase_vmap_flag vmap_flags); |
---|
227 | 297 | |
---|
228 | 298 | /** |
---|
229 | 299 | * kbase_vmap_prot - Map a GPU VA range into the kernel safely, only if the |
---|
.. | .. |
---|
254 | 324 | * The checks are also there to help catch access errors on memory where |
---|
255 | 325 | * security is not a concern: imported memory that is always RW, and memory |
---|
256 | 326 | * that was allocated and owned by the process attached to @kctx. In this case, |
---|
257 | | - * it helps to identify memory that was was mapped with the wrong access type. |
---|
| 327 | + * it helps to identify memory that was mapped with the wrong access type. |
---|
258 | 328 | * |
---|
259 | 329 | * Note: KBASE_REG_GPU_{RD,WR} flags are currently supported for legacy cases |
---|
260 | 330 | * where either the security of memory is solely dependent on those flags, or |
---|
.. | .. |
---|
422 | 492 | /** |
---|
423 | 493 | * kbase_get_cache_line_alignment - Return cache line alignment |
---|
424 | 494 | * |
---|
| 495 | + * @kbdev: Device pointer. |
---|
| 496 | + * |
---|
425 | 497 | * Helper function to return the maximum cache line alignment considering |
---|
426 | 498 | * both CPU and GPU cache sizes. |
---|
427 | 499 | * |
---|
428 | 500 | * Return: CPU and GPU cache line alignment, in bytes. |
---|
429 | | - * |
---|
430 | | - * @kbdev: Device pointer. |
---|
431 | 501 | */ |
---|
432 | 502 | u32 kbase_get_cache_line_alignment(struct kbase_device *kbdev); |
---|
433 | 503 | |
---|
.. | .. |
---|
435 | 505 | static inline vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, |
---|
436 | 506 | unsigned long addr, unsigned long pfn, pgprot_t pgprot) |
---|
437 | 507 | { |
---|
438 | | - int err; |
---|
439 | | - |
---|
440 | | -#if ((KERNEL_VERSION(4, 4, 147) >= LINUX_VERSION_CODE) || \ |
---|
441 | | - ((KERNEL_VERSION(4, 6, 0) > LINUX_VERSION_CODE) && \ |
---|
442 | | - (KERNEL_VERSION(4, 5, 0) <= LINUX_VERSION_CODE))) |
---|
443 | | - if (pgprot_val(pgprot) != pgprot_val(vma->vm_page_prot)) |
---|
444 | | - return VM_FAULT_SIGBUS; |
---|
445 | | - |
---|
446 | | - err = vm_insert_pfn(vma, addr, pfn); |
---|
447 | | -#else |
---|
448 | | - err = vm_insert_pfn_prot(vma, addr, pfn, pgprot); |
---|
449 | | -#endif |
---|
| 508 | + int err = vm_insert_pfn_prot(vma, addr, pfn, pgprot); |
---|
450 | 509 | |
---|
451 | 510 | if (unlikely(err == -ENOMEM)) |
---|
452 | 511 | return VM_FAULT_OOM; |
---|