forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-01-31 f70575805708cabdedea7498aaa3f710fde4d920
kernel/drivers/gpu/arm/bifrost/mmu/mali_kbase_mmu.h
....@@ -1,7 +1,7 @@
11 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
22 /*
33 *
4
- * (C) COPYRIGHT 2019-2021 ARM Limited. All rights reserved.
4
+ * (C) COPYRIGHT 2019-2022 ARM Limited. All rights reserved.
55 *
66 * This program is free software and is provided to you under the terms of the
77 * GNU General Public License version 2 as published by the Free Software
....@@ -22,34 +22,80 @@
2222 #ifndef _KBASE_MMU_H_
2323 #define _KBASE_MMU_H_
2424
25
+#include <uapi/gpu/arm/bifrost/mali_base_kernel.h>
26
+
27
+#define KBASE_MMU_PAGE_ENTRIES 512
28
+#define KBASE_MMU_INVALID_PGD_ADDRESS (~(phys_addr_t)0)
29
+
30
+struct kbase_context;
31
+struct kbase_mmu_table;
32
+struct kbase_va_region;
33
+
34
+/**
35
+ * enum kbase_caller_mmu_sync_info - MMU-synchronous caller info.
36
+ * A pointer to this type is passed down from the outer-most callers in the kbase
37
+ * module - where the information resides as to the synchronous / asynchronous
38
+ * nature of the call flow, with respect to MMU operations. ie - does the call flow relate to
39
+ * existing GPU work does it come from requests (like ioctl) from user-space, power management,
40
+ * etc.
41
+ *
42
+ * @CALLER_MMU_UNSET_SYNCHRONICITY: default value must be invalid to avoid accidental choice
43
+ * of a 'valid' value
44
+ * @CALLER_MMU_SYNC: Arbitrary value for 'synchronous that isn't easy to choose by accident
45
+ * @CALLER_MMU_ASYNC: Also hard to choose by accident
46
+ */
47
+enum kbase_caller_mmu_sync_info {
48
+ CALLER_MMU_UNSET_SYNCHRONICITY,
49
+ CALLER_MMU_SYNC = 0x02,
50
+ CALLER_MMU_ASYNC
51
+};
52
+
53
+/**
54
+ * enum kbase_mmu_op_type - enum for MMU operations
55
+ * @KBASE_MMU_OP_NONE: To help catch uninitialized struct
56
+ * @KBASE_MMU_OP_FIRST: The lower boundary of enum
57
+ * @KBASE_MMU_OP_LOCK: Lock memory region
58
+ * @KBASE_MMU_OP_UNLOCK: Unlock memory region
59
+ * @KBASE_MMU_OP_FLUSH_PT: Flush page table (CLN+INV L2 only)
60
+ * @KBASE_MMU_OP_FLUSH_MEM: Flush memory (CLN+INV L2+LSC)
61
+ * @KBASE_MMU_OP_COUNT: The upper boundary of enum
62
+ */
63
+enum kbase_mmu_op_type {
64
+ KBASE_MMU_OP_NONE = 0, /* Must be zero */
65
+ KBASE_MMU_OP_FIRST, /* Must be the first non-zero op */
66
+ KBASE_MMU_OP_LOCK = KBASE_MMU_OP_FIRST,
67
+ KBASE_MMU_OP_UNLOCK,
68
+ KBASE_MMU_OP_FLUSH_PT,
69
+ KBASE_MMU_OP_FLUSH_MEM,
70
+ KBASE_MMU_OP_COUNT /* Must be the last in enum */
71
+};
72
+
2573 /**
2674 * kbase_mmu_as_init() - Initialising GPU address space object.
75
+ *
76
+ * @kbdev: The kbase device structure for the device (must be a valid pointer).
77
+ * @i: Array index of address space object.
2778 *
2879 * This is called from device probe to initialise an address space object
2980 * of the device.
3081 *
31
- * @kbdev: The kbase device structure for the device (must be a valid pointer).
32
- * @i: Array index of address space object.
33
- *
3482 * Return: 0 on success and non-zero value on failure.
3583 */
36
-int kbase_mmu_as_init(struct kbase_device *kbdev, int i);
84
+int kbase_mmu_as_init(struct kbase_device *kbdev, unsigned int i);
3785
3886 /**
3987 * kbase_mmu_as_term() - Terminate address space object.
4088 *
41
- * This is called upon device termination to destroy
42
- * the address space object of the device.
43
- *
4489 * @kbdev: The kbase device structure for the device (must be a valid pointer).
4590 * @i: Array index of address space object.
91
+ *
92
+ * This is called upon device termination to destroy
93
+ * the address space object of the device.
4694 */
47
-void kbase_mmu_as_term(struct kbase_device *kbdev, int i);
95
+void kbase_mmu_as_term(struct kbase_device *kbdev, unsigned int i);
4896
4997 /**
5098 * kbase_mmu_init - Initialise an object representing GPU page tables
51
- *
52
- * The structure should be terminated using kbase_mmu_term()
5399 *
54100 * @kbdev: Instance of GPU platform device, allocated from the probe method.
55101 * @mmut: GPU page tables to be initialized.
....@@ -57,6 +103,8 @@
57103 * is not associated with a context.
58104 * @group_id: The physical group ID from which to allocate GPU page tables.
59105 * Valid range is 0..(MEMORY_GROUP_MANAGER_NR_GROUPS-1).
106
+ *
107
+ * The structure should be terminated using kbase_mmu_term()
60108 *
61109 * Return: 0 if successful, otherwise a negative error code.
62110 */
....@@ -66,20 +114,20 @@
66114 /**
67115 * kbase_mmu_interrupt - Process an MMU interrupt.
68116 *
69
- * Process the MMU interrupt that was reported by the &kbase_device.
70
- *
71117 * @kbdev: Pointer to the kbase device for which the interrupt happened.
72118 * @irq_stat: Value of the MMU_IRQ_STATUS register.
119
+ *
120
+ * Process the MMU interrupt that was reported by the &kbase_device.
73121 */
74122 void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat);
75123
76124 /**
77125 * kbase_mmu_term - Terminate an object representing GPU page tables
78126 *
79
- * This will free any page tables that have been allocated
80
- *
81127 * @kbdev: Instance of GPU platform device, allocated from the probe method.
82128 * @mmut: GPU page tables to be destroyed.
129
+ *
130
+ * This will free any page tables that have been allocated
83131 */
84132 void kbase_mmu_term(struct kbase_device *kbdev, struct kbase_mmu_table *mmut);
85133
....@@ -103,35 +151,154 @@
103151 u64 kbase_mmu_create_ate(struct kbase_device *kbdev,
104152 struct tagged_addr phy, unsigned long flags, int level, int group_id);
105153
106
-int kbase_mmu_insert_pages_no_flush(struct kbase_device *kbdev,
107
- struct kbase_mmu_table *mmut,
108
- const u64 start_vpfn,
109
- struct tagged_addr *phys, size_t nr,
110
- unsigned long flags, int group_id);
111
-int kbase_mmu_insert_pages(struct kbase_device *kbdev,
112
- struct kbase_mmu_table *mmut, u64 vpfn,
113
- struct tagged_addr *phys, size_t nr,
114
- unsigned long flags, int as_nr, int group_id);
115
-int kbase_mmu_insert_single_page(struct kbase_context *kctx, u64 vpfn,
116
- struct tagged_addr phys, size_t nr,
117
- unsigned long flags, int group_id);
154
+int kbase_mmu_insert_pages_no_flush(struct kbase_device *kbdev, struct kbase_mmu_table *mmut,
155
+ u64 vpfn, struct tagged_addr *phys, size_t nr,
156
+ unsigned long flags, int group_id, u64 *dirty_pgds,
157
+ struct kbase_va_region *reg, bool ignore_page_migration);
158
+int kbase_mmu_insert_pages(struct kbase_device *kbdev, struct kbase_mmu_table *mmut, u64 vpfn,
159
+ struct tagged_addr *phys, size_t nr, unsigned long flags, int as_nr,
160
+ int group_id, enum kbase_caller_mmu_sync_info mmu_sync_info,
161
+ struct kbase_va_region *reg, bool ignore_page_migration);
162
+int kbase_mmu_insert_imported_pages(struct kbase_device *kbdev, struct kbase_mmu_table *mmut,
163
+ u64 vpfn, struct tagged_addr *phys, size_t nr,
164
+ unsigned long flags, int as_nr, int group_id,
165
+ enum kbase_caller_mmu_sync_info mmu_sync_info,
166
+ struct kbase_va_region *reg);
167
+int kbase_mmu_insert_aliased_pages(struct kbase_device *kbdev, struct kbase_mmu_table *mmut,
168
+ u64 vpfn, struct tagged_addr *phys, size_t nr,
169
+ unsigned long flags, int as_nr, int group_id,
170
+ enum kbase_caller_mmu_sync_info mmu_sync_info,
171
+ struct kbase_va_region *reg);
172
+int kbase_mmu_insert_single_page(struct kbase_context *kctx, u64 vpfn, struct tagged_addr phys,
173
+ size_t nr, unsigned long flags, int group_id,
174
+ enum kbase_caller_mmu_sync_info mmu_sync_info,
175
+ bool ignore_page_migration);
176
+int kbase_mmu_insert_single_imported_page(struct kbase_context *kctx, u64 vpfn,
177
+ struct tagged_addr phys, size_t nr, unsigned long flags,
178
+ int group_id,
179
+ enum kbase_caller_mmu_sync_info mmu_sync_info);
180
+int kbase_mmu_insert_single_aliased_page(struct kbase_context *kctx, u64 vpfn,
181
+ struct tagged_addr phys, size_t nr, unsigned long flags,
182
+ int group_id,
183
+ enum kbase_caller_mmu_sync_info mmu_sync_info);
118184
119
-int kbase_mmu_teardown_pages(struct kbase_device *kbdev,
120
- struct kbase_mmu_table *mmut, u64 vpfn,
121
- size_t nr, int as_nr);
185
+/**
186
+ * kbase_mmu_teardown_pages - Remove GPU virtual addresses from the MMU page table
187
+ *
188
+ * @kbdev: Pointer to kbase device.
189
+ * @mmut: Pointer to GPU MMU page table.
190
+ * @vpfn: Start page frame number of the GPU virtual pages to unmap.
191
+ * @phys: Array of physical pages currently mapped to the virtual
192
+ * pages to unmap, or NULL. This is used for GPU cache maintenance
193
+ * and page migration support.
194
+ * @nr_phys_pages: Number of physical pages to flush.
195
+ * @nr_virt_pages: Number of virtual pages whose PTEs should be destroyed.
196
+ * @as_nr: Address space number, for GPU cache maintenance operations
197
+ * that happen outside a specific kbase context.
198
+ * @ignore_page_migration: Whether page migration metadata should be ignored.
199
+ *
200
+ * We actually discard the ATE and free the page table pages if no valid entries
201
+ * exist in PGD.
202
+ *
203
+ * IMPORTANT: This uses kbasep_js_runpool_release_ctx() when the context is
204
+ * currently scheduled into the runpool, and so potentially uses a lot of locks.
205
+ * These locks must be taken in the correct order with respect to others
206
+ * already held by the caller. Refer to kbasep_js_runpool_release_ctx() for more
207
+ * information.
208
+ *
209
+ * The @p phys pointer to physical pages is not necessary for unmapping virtual memory,
210
+ * but it is used for fine-grained GPU cache maintenance. If @p phys is NULL,
211
+ * GPU cache maintenance will be done as usual, that is invalidating the whole GPU caches
212
+ * instead of specific physical address ranges.
213
+ *
214
+ * Return: 0 on success, otherwise an error code.
215
+ */
216
+int kbase_mmu_teardown_pages(struct kbase_device *kbdev, struct kbase_mmu_table *mmut, u64 vpfn,
217
+ struct tagged_addr *phys, size_t nr_phys_pages, size_t nr_virt_pages,
218
+ int as_nr, bool ignore_page_migration);
219
+
122220 int kbase_mmu_update_pages(struct kbase_context *kctx, u64 vpfn,
123221 struct tagged_addr *phys, size_t nr,
124222 unsigned long flags, int const group_id);
223
+#if MALI_USE_CSF
224
+/**
225
+ * kbase_mmu_update_csf_mcu_pages - Update MCU mappings with changes of phys and flags
226
+ *
227
+ * @kbdev: Pointer to kbase device.
228
+ * @vpfn: Virtual PFN (Page Frame Number) of the first page to update
229
+ * @phys: Pointer to the array of tagged physical addresses of the physical
230
+ * pages that are pointed to by the page table entries (that need to
231
+ * be updated).
232
+ * @nr: Number of pages to update
233
+ * @flags: Flags
234
+ * @group_id: The physical memory group in which the page was allocated.
235
+ * Valid range is 0..(MEMORY_GROUP_MANAGER_NR_GROUPS-1).
236
+ *
237
+ * Return: 0 on success, otherwise an error code.
238
+ */
239
+int kbase_mmu_update_csf_mcu_pages(struct kbase_device *kbdev, u64 vpfn, struct tagged_addr *phys,
240
+ size_t nr, unsigned long flags, int const group_id);
241
+#endif
242
+
243
+/**
244
+ * kbase_mmu_migrate_page - Migrate GPU mappings and content between memory pages
245
+ *
246
+ * @old_phys: Old physical page to be replaced.
247
+ * @new_phys: New physical page used to replace old physical page.
248
+ * @old_dma_addr: DMA address of the old page.
249
+ * @new_dma_addr: DMA address of the new page.
250
+ * @level: MMU page table level of the provided PGD.
251
+ *
252
+ * The page migration process is made of 2 big steps:
253
+ *
254
+ * 1) Copy the content of the old page to the new page.
255
+ * 2) Remap the virtual page, that is: replace either the ATE (if the old page
256
+ * was a regular page) or the PTE (if the old page was used as a PGD) in the
257
+ * MMU page table with the new page.
258
+ *
259
+ * During the process, the MMU region is locked to prevent GPU access to the
260
+ * virtual memory page that is being remapped.
261
+ *
262
+ * Before copying the content of the old page to the new page and while the
263
+ * MMU region is locked, a GPU cache flush is performed to make sure that
264
+ * pending GPU writes are finalized to the old page before copying.
265
+ * That is necessary because otherwise there's a risk that GPU writes might
266
+ * be finalized to the old page, and not new page, after migration.
267
+ * The MMU region is unlocked only at the end of the migration operation.
268
+ *
269
+ * Return: 0 on success, otherwise an error code.
270
+ */
271
+int kbase_mmu_migrate_page(struct tagged_addr old_phys, struct tagged_addr new_phys,
272
+ dma_addr_t old_dma_addr, dma_addr_t new_dma_addr, int level);
273
+
274
+/**
275
+ * kbase_mmu_flush_pa_range() - Flush physical address range from the GPU caches
276
+ *
277
+ * @kbdev: Instance of GPU platform device, allocated from the probe method.
278
+ * @kctx: Pointer to kbase context, it can be NULL if the physical address
279
+ * range is not associated with User created context.
280
+ * @phys: Starting address of the physical range to start the operation on.
281
+ * @size: Number of bytes to work on.
282
+ * @flush_op: Type of cache flush operation to perform.
283
+ *
284
+ * Issue a cache flush physical range command. This function won't perform any
285
+ * flush if the GPU doesn't support FLUSH_PA_RANGE command. The flush would be
286
+ * performed only if the context has a JASID assigned to it.
287
+ * This function is basically a wrapper for kbase_gpu_cache_flush_pa_range_and_busy_wait().
288
+ */
289
+void kbase_mmu_flush_pa_range(struct kbase_device *kbdev, struct kbase_context *kctx,
290
+ phys_addr_t phys, size_t size,
291
+ enum kbase_mmu_op_type flush_op);
125292
126293 /**
127294 * kbase_mmu_bus_fault_interrupt - Process a bus fault interrupt.
128295 *
129
- * Process the bus fault interrupt that was reported for a particular GPU
130
- * address space.
131
- *
132296 * @kbdev: Pointer to the kbase device for which bus fault was reported.
133297 * @status: Value of the GPU_FAULTSTATUS register.
134298 * @as_nr: GPU address space for which the bus fault occurred.
299
+ *
300
+ * Process the bus fault interrupt that was reported for a particular GPU
301
+ * address space.
135302 *
136303 * Return: zero if the operation was successful, non-zero otherwise.
137304 */
....@@ -140,6 +307,7 @@
140307
141308 /**
142309 * kbase_mmu_gpu_fault_interrupt() - Report a GPU fault.
310
+ *
143311 * @kbdev: Kbase device pointer
144312 * @status: GPU fault status
145313 * @as_nr: Faulty address space
....@@ -152,4 +320,22 @@
152320 void kbase_mmu_gpu_fault_interrupt(struct kbase_device *kbdev, u32 status,
153321 u32 as_nr, u64 address, bool as_valid);
154322
323
+/**
324
+ * kbase_context_mmu_group_id_get - Decode a memory group ID from
325
+ * base_context_create_flags
326
+ *
327
+ * @flags: Bitmask of flags to pass to base_context_init.
328
+ *
329
+ * Memory allocated for GPU page tables will come from the returned group.
330
+ *
331
+ * Return: Physical memory group ID. Valid range is 0..(BASE_MEM_GROUP_COUNT-1).
332
+ */
333
+static inline int
334
+kbase_context_mmu_group_id_get(base_context_create_flags const flags)
335
+{
336
+ KBASE_DEBUG_ASSERT(flags ==
337
+ (flags & BASEP_CONTEXT_CREATE_ALLOWED_FLAGS));
338
+ return (int)BASE_CONTEXT_MMU_GROUP_ID_GET(flags);
339
+}
340
+
155341 #endif /* _KBASE_MMU_H_ */