.. | .. |
---|
1 | 1 | /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ |
---|
2 | 2 | /* |
---|
3 | 3 | * |
---|
4 | | - * (C) COPYRIGHT 2019-2021 ARM Limited. All rights reserved. |
---|
| 4 | + * (C) COPYRIGHT 2019-2022 ARM Limited. All rights reserved. |
---|
5 | 5 | * |
---|
6 | 6 | * This program is free software and is provided to you under the terms of the |
---|
7 | 7 | * GNU General Public License version 2 as published by the Free Software |
---|
.. | .. |
---|
22 | 22 | #ifndef _KBASE_MMU_H_ |
---|
23 | 23 | #define _KBASE_MMU_H_ |
---|
24 | 24 | |
---|
| 25 | +#include <uapi/gpu/arm/bifrost/mali_base_kernel.h> |
---|
| 26 | + |
---|
| 27 | +#define KBASE_MMU_PAGE_ENTRIES 512 |
---|
| 28 | +#define KBASE_MMU_INVALID_PGD_ADDRESS (~(phys_addr_t)0) |
---|
| 29 | + |
---|
| 30 | +struct kbase_context; |
---|
| 31 | +struct kbase_mmu_table; |
---|
| 32 | +struct kbase_va_region; |
---|
| 33 | + |
---|
| 34 | +/** |
---|
| 35 | + * enum kbase_caller_mmu_sync_info - MMU-synchronous caller info. |
---|
| 36 | + * A pointer to this type is passed down from the outer-most callers in the kbase |
---|
| 37 | + * module - where the information resides as to the synchronous / asynchronous |
---|
| 38 | + * nature of the call flow, with respect to MMU operations. ie - does the call flow relate to |
---|
| 39 | + * existing GPU work does it come from requests (like ioctl) from user-space, power management, |
---|
| 40 | + * etc. |
---|
| 41 | + * |
---|
| 42 | + * @CALLER_MMU_UNSET_SYNCHRONICITY: default value must be invalid to avoid accidental choice |
---|
| 43 | + * of a 'valid' value |
---|
| 44 | + * @CALLER_MMU_SYNC: Arbitrary value for 'synchronous that isn't easy to choose by accident |
---|
| 45 | + * @CALLER_MMU_ASYNC: Also hard to choose by accident |
---|
| 46 | + */ |
---|
| 47 | +enum kbase_caller_mmu_sync_info { |
---|
| 48 | + CALLER_MMU_UNSET_SYNCHRONICITY, |
---|
| 49 | + CALLER_MMU_SYNC = 0x02, |
---|
| 50 | + CALLER_MMU_ASYNC |
---|
| 51 | +}; |
---|
| 52 | + |
---|
| 53 | +/** |
---|
| 54 | + * enum kbase_mmu_op_type - enum for MMU operations |
---|
| 55 | + * @KBASE_MMU_OP_NONE: To help catch uninitialized struct |
---|
| 56 | + * @KBASE_MMU_OP_FIRST: The lower boundary of enum |
---|
| 57 | + * @KBASE_MMU_OP_LOCK: Lock memory region |
---|
| 58 | + * @KBASE_MMU_OP_UNLOCK: Unlock memory region |
---|
| 59 | + * @KBASE_MMU_OP_FLUSH_PT: Flush page table (CLN+INV L2 only) |
---|
| 60 | + * @KBASE_MMU_OP_FLUSH_MEM: Flush memory (CLN+INV L2+LSC) |
---|
| 61 | + * @KBASE_MMU_OP_COUNT: The upper boundary of enum |
---|
| 62 | + */ |
---|
| 63 | +enum kbase_mmu_op_type { |
---|
| 64 | + KBASE_MMU_OP_NONE = 0, /* Must be zero */ |
---|
| 65 | + KBASE_MMU_OP_FIRST, /* Must be the first non-zero op */ |
---|
| 66 | + KBASE_MMU_OP_LOCK = KBASE_MMU_OP_FIRST, |
---|
| 67 | + KBASE_MMU_OP_UNLOCK, |
---|
| 68 | + KBASE_MMU_OP_FLUSH_PT, |
---|
| 69 | + KBASE_MMU_OP_FLUSH_MEM, |
---|
| 70 | + KBASE_MMU_OP_COUNT /* Must be the last in enum */ |
---|
| 71 | +}; |
---|
| 72 | + |
---|
25 | 73 | /** |
---|
26 | 74 | * kbase_mmu_as_init() - Initialising GPU address space object. |
---|
| 75 | + * |
---|
| 76 | + * @kbdev: The kbase device structure for the device (must be a valid pointer). |
---|
| 77 | + * @i: Array index of address space object. |
---|
27 | 78 | * |
---|
28 | 79 | * This is called from device probe to initialise an address space object |
---|
29 | 80 | * of the device. |
---|
30 | 81 | * |
---|
31 | | - * @kbdev: The kbase device structure for the device (must be a valid pointer). |
---|
32 | | - * @i: Array index of address space object. |
---|
33 | | - * |
---|
34 | 82 | * Return: 0 on success and non-zero value on failure. |
---|
35 | 83 | */ |
---|
36 | | -int kbase_mmu_as_init(struct kbase_device *kbdev, int i); |
---|
| 84 | +int kbase_mmu_as_init(struct kbase_device *kbdev, unsigned int i); |
---|
37 | 85 | |
---|
38 | 86 | /** |
---|
39 | 87 | * kbase_mmu_as_term() - Terminate address space object. |
---|
40 | 88 | * |
---|
41 | | - * This is called upon device termination to destroy |
---|
42 | | - * the address space object of the device. |
---|
43 | | - * |
---|
44 | 89 | * @kbdev: The kbase device structure for the device (must be a valid pointer). |
---|
45 | 90 | * @i: Array index of address space object. |
---|
| 91 | + * |
---|
| 92 | + * This is called upon device termination to destroy |
---|
| 93 | + * the address space object of the device. |
---|
46 | 94 | */ |
---|
47 | | -void kbase_mmu_as_term(struct kbase_device *kbdev, int i); |
---|
| 95 | +void kbase_mmu_as_term(struct kbase_device *kbdev, unsigned int i); |
---|
48 | 96 | |
---|
49 | 97 | /** |
---|
50 | 98 | * kbase_mmu_init - Initialise an object representing GPU page tables |
---|
51 | | - * |
---|
52 | | - * The structure should be terminated using kbase_mmu_term() |
---|
53 | 99 | * |
---|
54 | 100 | * @kbdev: Instance of GPU platform device, allocated from the probe method. |
---|
55 | 101 | * @mmut: GPU page tables to be initialized. |
---|
.. | .. |
---|
57 | 103 | * is not associated with a context. |
---|
58 | 104 | * @group_id: The physical group ID from which to allocate GPU page tables. |
---|
59 | 105 | * Valid range is 0..(MEMORY_GROUP_MANAGER_NR_GROUPS-1). |
---|
| 106 | + * |
---|
| 107 | + * The structure should be terminated using kbase_mmu_term() |
---|
60 | 108 | * |
---|
61 | 109 | * Return: 0 if successful, otherwise a negative error code. |
---|
62 | 110 | */ |
---|
.. | .. |
---|
66 | 114 | /** |
---|
67 | 115 | * kbase_mmu_interrupt - Process an MMU interrupt. |
---|
68 | 116 | * |
---|
69 | | - * Process the MMU interrupt that was reported by the &kbase_device. |
---|
70 | | - * |
---|
71 | 117 | * @kbdev: Pointer to the kbase device for which the interrupt happened. |
---|
72 | 118 | * @irq_stat: Value of the MMU_IRQ_STATUS register. |
---|
| 119 | + * |
---|
| 120 | + * Process the MMU interrupt that was reported by the &kbase_device. |
---|
73 | 121 | */ |
---|
74 | 122 | void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat); |
---|
75 | 123 | |
---|
76 | 124 | /** |
---|
77 | 125 | * kbase_mmu_term - Terminate an object representing GPU page tables |
---|
78 | 126 | * |
---|
79 | | - * This will free any page tables that have been allocated |
---|
80 | | - * |
---|
81 | 127 | * @kbdev: Instance of GPU platform device, allocated from the probe method. |
---|
82 | 128 | * @mmut: GPU page tables to be destroyed. |
---|
| 129 | + * |
---|
| 130 | + * This will free any page tables that have been allocated |
---|
83 | 131 | */ |
---|
84 | 132 | void kbase_mmu_term(struct kbase_device *kbdev, struct kbase_mmu_table *mmut); |
---|
85 | 133 | |
---|
.. | .. |
---|
103 | 151 | u64 kbase_mmu_create_ate(struct kbase_device *kbdev, |
---|
104 | 152 | struct tagged_addr phy, unsigned long flags, int level, int group_id); |
---|
105 | 153 | |
---|
106 | | -int kbase_mmu_insert_pages_no_flush(struct kbase_device *kbdev, |
---|
107 | | - struct kbase_mmu_table *mmut, |
---|
108 | | - const u64 start_vpfn, |
---|
109 | | - struct tagged_addr *phys, size_t nr, |
---|
110 | | - unsigned long flags, int group_id); |
---|
111 | | -int kbase_mmu_insert_pages(struct kbase_device *kbdev, |
---|
112 | | - struct kbase_mmu_table *mmut, u64 vpfn, |
---|
113 | | - struct tagged_addr *phys, size_t nr, |
---|
114 | | - unsigned long flags, int as_nr, int group_id); |
---|
115 | | -int kbase_mmu_insert_single_page(struct kbase_context *kctx, u64 vpfn, |
---|
116 | | - struct tagged_addr phys, size_t nr, |
---|
117 | | - unsigned long flags, int group_id); |
---|
| 154 | +int kbase_mmu_insert_pages_no_flush(struct kbase_device *kbdev, struct kbase_mmu_table *mmut, |
---|
| 155 | + u64 vpfn, struct tagged_addr *phys, size_t nr, |
---|
| 156 | + unsigned long flags, int group_id, u64 *dirty_pgds, |
---|
| 157 | + struct kbase_va_region *reg, bool ignore_page_migration); |
---|
| 158 | +int kbase_mmu_insert_pages(struct kbase_device *kbdev, struct kbase_mmu_table *mmut, u64 vpfn, |
---|
| 159 | + struct tagged_addr *phys, size_t nr, unsigned long flags, int as_nr, |
---|
| 160 | + int group_id, enum kbase_caller_mmu_sync_info mmu_sync_info, |
---|
| 161 | + struct kbase_va_region *reg, bool ignore_page_migration); |
---|
| 162 | +int kbase_mmu_insert_imported_pages(struct kbase_device *kbdev, struct kbase_mmu_table *mmut, |
---|
| 163 | + u64 vpfn, struct tagged_addr *phys, size_t nr, |
---|
| 164 | + unsigned long flags, int as_nr, int group_id, |
---|
| 165 | + enum kbase_caller_mmu_sync_info mmu_sync_info, |
---|
| 166 | + struct kbase_va_region *reg); |
---|
| 167 | +int kbase_mmu_insert_aliased_pages(struct kbase_device *kbdev, struct kbase_mmu_table *mmut, |
---|
| 168 | + u64 vpfn, struct tagged_addr *phys, size_t nr, |
---|
| 169 | + unsigned long flags, int as_nr, int group_id, |
---|
| 170 | + enum kbase_caller_mmu_sync_info mmu_sync_info, |
---|
| 171 | + struct kbase_va_region *reg); |
---|
| 172 | +int kbase_mmu_insert_single_page(struct kbase_context *kctx, u64 vpfn, struct tagged_addr phys, |
---|
| 173 | + size_t nr, unsigned long flags, int group_id, |
---|
| 174 | + enum kbase_caller_mmu_sync_info mmu_sync_info, |
---|
| 175 | + bool ignore_page_migration); |
---|
| 176 | +int kbase_mmu_insert_single_imported_page(struct kbase_context *kctx, u64 vpfn, |
---|
| 177 | + struct tagged_addr phys, size_t nr, unsigned long flags, |
---|
| 178 | + int group_id, |
---|
| 179 | + enum kbase_caller_mmu_sync_info mmu_sync_info); |
---|
| 180 | +int kbase_mmu_insert_single_aliased_page(struct kbase_context *kctx, u64 vpfn, |
---|
| 181 | + struct tagged_addr phys, size_t nr, unsigned long flags, |
---|
| 182 | + int group_id, |
---|
| 183 | + enum kbase_caller_mmu_sync_info mmu_sync_info); |
---|
118 | 184 | |
---|
119 | | -int kbase_mmu_teardown_pages(struct kbase_device *kbdev, |
---|
120 | | - struct kbase_mmu_table *mmut, u64 vpfn, |
---|
121 | | - size_t nr, int as_nr); |
---|
| 185 | +/** |
---|
| 186 | + * kbase_mmu_teardown_pages - Remove GPU virtual addresses from the MMU page table |
---|
| 187 | + * |
---|
| 188 | + * @kbdev: Pointer to kbase device. |
---|
| 189 | + * @mmut: Pointer to GPU MMU page table. |
---|
| 190 | + * @vpfn: Start page frame number of the GPU virtual pages to unmap. |
---|
| 191 | + * @phys: Array of physical pages currently mapped to the virtual |
---|
| 192 | + * pages to unmap, or NULL. This is used for GPU cache maintenance |
---|
| 193 | + * and page migration support. |
---|
| 194 | + * @nr_phys_pages: Number of physical pages to flush. |
---|
| 195 | + * @nr_virt_pages: Number of virtual pages whose PTEs should be destroyed. |
---|
| 196 | + * @as_nr: Address space number, for GPU cache maintenance operations |
---|
| 197 | + * that happen outside a specific kbase context. |
---|
| 198 | + * @ignore_page_migration: Whether page migration metadata should be ignored. |
---|
| 199 | + * |
---|
| 200 | + * We actually discard the ATE and free the page table pages if no valid entries |
---|
| 201 | + * exist in PGD. |
---|
| 202 | + * |
---|
| 203 | + * IMPORTANT: This uses kbasep_js_runpool_release_ctx() when the context is |
---|
| 204 | + * currently scheduled into the runpool, and so potentially uses a lot of locks. |
---|
| 205 | + * These locks must be taken in the correct order with respect to others |
---|
| 206 | + * already held by the caller. Refer to kbasep_js_runpool_release_ctx() for more |
---|
| 207 | + * information. |
---|
| 208 | + * |
---|
| 209 | + * The @p phys pointer to physical pages is not necessary for unmapping virtual memory, |
---|
| 210 | + * but it is used for fine-grained GPU cache maintenance. If @p phys is NULL, |
---|
| 211 | + * GPU cache maintenance will be done as usual, that is invalidating the whole GPU caches |
---|
| 212 | + * instead of specific physical address ranges. |
---|
| 213 | + * |
---|
| 214 | + * Return: 0 on success, otherwise an error code. |
---|
| 215 | + */ |
---|
| 216 | +int kbase_mmu_teardown_pages(struct kbase_device *kbdev, struct kbase_mmu_table *mmut, u64 vpfn, |
---|
| 217 | + struct tagged_addr *phys, size_t nr_phys_pages, size_t nr_virt_pages, |
---|
| 218 | + int as_nr, bool ignore_page_migration); |
---|
| 219 | + |
---|
122 | 220 | int kbase_mmu_update_pages(struct kbase_context *kctx, u64 vpfn, |
---|
123 | 221 | struct tagged_addr *phys, size_t nr, |
---|
124 | 222 | unsigned long flags, int const group_id); |
---|
| 223 | +#if MALI_USE_CSF |
---|
| 224 | +/** |
---|
| 225 | + * kbase_mmu_update_csf_mcu_pages - Update MCU mappings with changes of phys and flags |
---|
| 226 | + * |
---|
| 227 | + * @kbdev: Pointer to kbase device. |
---|
| 228 | + * @vpfn: Virtual PFN (Page Frame Number) of the first page to update |
---|
| 229 | + * @phys: Pointer to the array of tagged physical addresses of the physical |
---|
| 230 | + * pages that are pointed to by the page table entries (that need to |
---|
| 231 | + * be updated). |
---|
| 232 | + * @nr: Number of pages to update |
---|
| 233 | + * @flags: Flags |
---|
| 234 | + * @group_id: The physical memory group in which the page was allocated. |
---|
| 235 | + * Valid range is 0..(MEMORY_GROUP_MANAGER_NR_GROUPS-1). |
---|
| 236 | + * |
---|
| 237 | + * Return: 0 on success, otherwise an error code. |
---|
| 238 | + */ |
---|
| 239 | +int kbase_mmu_update_csf_mcu_pages(struct kbase_device *kbdev, u64 vpfn, struct tagged_addr *phys, |
---|
| 240 | + size_t nr, unsigned long flags, int const group_id); |
---|
| 241 | +#endif |
---|
| 242 | + |
---|
| 243 | +/** |
---|
| 244 | + * kbase_mmu_migrate_page - Migrate GPU mappings and content between memory pages |
---|
| 245 | + * |
---|
| 246 | + * @old_phys: Old physical page to be replaced. |
---|
| 247 | + * @new_phys: New physical page used to replace old physical page. |
---|
| 248 | + * @old_dma_addr: DMA address of the old page. |
---|
| 249 | + * @new_dma_addr: DMA address of the new page. |
---|
| 250 | + * @level: MMU page table level of the provided PGD. |
---|
| 251 | + * |
---|
| 252 | + * The page migration process is made of 2 big steps: |
---|
| 253 | + * |
---|
| 254 | + * 1) Copy the content of the old page to the new page. |
---|
| 255 | + * 2) Remap the virtual page, that is: replace either the ATE (if the old page |
---|
| 256 | + * was a regular page) or the PTE (if the old page was used as a PGD) in the |
---|
| 257 | + * MMU page table with the new page. |
---|
| 258 | + * |
---|
| 259 | + * During the process, the MMU region is locked to prevent GPU access to the |
---|
| 260 | + * virtual memory page that is being remapped. |
---|
| 261 | + * |
---|
| 262 | + * Before copying the content of the old page to the new page and while the |
---|
| 263 | + * MMU region is locked, a GPU cache flush is performed to make sure that |
---|
| 264 | + * pending GPU writes are finalized to the old page before copying. |
---|
| 265 | + * That is necessary because otherwise there's a risk that GPU writes might |
---|
| 266 | + * be finalized to the old page, and not new page, after migration. |
---|
| 267 | + * The MMU region is unlocked only at the end of the migration operation. |
---|
| 268 | + * |
---|
| 269 | + * Return: 0 on success, otherwise an error code. |
---|
| 270 | + */ |
---|
| 271 | +int kbase_mmu_migrate_page(struct tagged_addr old_phys, struct tagged_addr new_phys, |
---|
| 272 | + dma_addr_t old_dma_addr, dma_addr_t new_dma_addr, int level); |
---|
| 273 | + |
---|
| 274 | +/** |
---|
| 275 | + * kbase_mmu_flush_pa_range() - Flush physical address range from the GPU caches |
---|
| 276 | + * |
---|
| 277 | + * @kbdev: Instance of GPU platform device, allocated from the probe method. |
---|
| 278 | + * @kctx: Pointer to kbase context, it can be NULL if the physical address |
---|
| 279 | + * range is not associated with User created context. |
---|
| 280 | + * @phys: Starting address of the physical range to start the operation on. |
---|
| 281 | + * @size: Number of bytes to work on. |
---|
| 282 | + * @flush_op: Type of cache flush operation to perform. |
---|
| 283 | + * |
---|
| 284 | + * Issue a cache flush physical range command. This function won't perform any |
---|
| 285 | + * flush if the GPU doesn't support FLUSH_PA_RANGE command. The flush would be |
---|
| 286 | + * performed only if the context has a JASID assigned to it. |
---|
| 287 | + * This function is basically a wrapper for kbase_gpu_cache_flush_pa_range_and_busy_wait(). |
---|
| 288 | + */ |
---|
| 289 | +void kbase_mmu_flush_pa_range(struct kbase_device *kbdev, struct kbase_context *kctx, |
---|
| 290 | + phys_addr_t phys, size_t size, |
---|
| 291 | + enum kbase_mmu_op_type flush_op); |
---|
125 | 292 | |
---|
126 | 293 | /** |
---|
127 | 294 | * kbase_mmu_bus_fault_interrupt - Process a bus fault interrupt. |
---|
128 | 295 | * |
---|
129 | | - * Process the bus fault interrupt that was reported for a particular GPU |
---|
130 | | - * address space. |
---|
131 | | - * |
---|
132 | 296 | * @kbdev: Pointer to the kbase device for which bus fault was reported. |
---|
133 | 297 | * @status: Value of the GPU_FAULTSTATUS register. |
---|
134 | 298 | * @as_nr: GPU address space for which the bus fault occurred. |
---|
| 299 | + * |
---|
| 300 | + * Process the bus fault interrupt that was reported for a particular GPU |
---|
| 301 | + * address space. |
---|
135 | 302 | * |
---|
136 | 303 | * Return: zero if the operation was successful, non-zero otherwise. |
---|
137 | 304 | */ |
---|
.. | .. |
---|
140 | 307 | |
---|
141 | 308 | /** |
---|
142 | 309 | * kbase_mmu_gpu_fault_interrupt() - Report a GPU fault. |
---|
| 310 | + * |
---|
143 | 311 | * @kbdev: Kbase device pointer |
---|
144 | 312 | * @status: GPU fault status |
---|
145 | 313 | * @as_nr: Faulty address space |
---|
.. | .. |
---|
152 | 320 | void kbase_mmu_gpu_fault_interrupt(struct kbase_device *kbdev, u32 status, |
---|
153 | 321 | u32 as_nr, u64 address, bool as_valid); |
---|
154 | 322 | |
---|
| 323 | +/** |
---|
| 324 | + * kbase_context_mmu_group_id_get - Decode a memory group ID from |
---|
| 325 | + * base_context_create_flags |
---|
| 326 | + * |
---|
| 327 | + * @flags: Bitmask of flags to pass to base_context_init. |
---|
| 328 | + * |
---|
| 329 | + * Memory allocated for GPU page tables will come from the returned group. |
---|
| 330 | + * |
---|
| 331 | + * Return: Physical memory group ID. Valid range is 0..(BASE_MEM_GROUP_COUNT-1). |
---|
| 332 | + */ |
---|
| 333 | +static inline int |
---|
| 334 | +kbase_context_mmu_group_id_get(base_context_create_flags const flags) |
---|
| 335 | +{ |
---|
| 336 | + KBASE_DEBUG_ASSERT(flags == |
---|
| 337 | + (flags & BASEP_CONTEXT_CREATE_ALLOWED_FLAGS)); |
---|
| 338 | + return (int)BASE_CONTEXT_MMU_GROUP_ID_GET(flags); |
---|
| 339 | +} |
---|
| 340 | + |
---|
155 | 341 | #endif /* _KBASE_MMU_H_ */ |
---|