hc
2023-12-06 08f87f769b595151be1afeff53e144f543faa614
kernel/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
....@@ -3,16 +3,16 @@
33 * Copyright (C) 2016-2018 Etnaviv Project
44 */
55
6
+#include <linux/bitops.h>
7
+#include <linux/dma-mapping.h>
68 #include <linux/platform_device.h>
79 #include <linux/sizes.h>
810 #include <linux/slab.h>
9
-#include <linux/dma-mapping.h>
10
-#include <linux/bitops.h>
11
+#include <linux/vmalloc.h>
1112
1213 #include "etnaviv_cmdbuf.h"
1314 #include "etnaviv_gpu.h"
1415 #include "etnaviv_mmu.h"
15
-#include "etnaviv_iommu.h"
1616 #include "state.xml.h"
1717 #include "state_hi.xml.h"
1818
....@@ -27,11 +27,9 @@
2727
2828 #define MMUv2_MAX_STLB_ENTRIES 1024
2929
30
-struct etnaviv_iommuv2_domain {
31
- struct etnaviv_iommu_domain base;
32
- /* P(age) T(able) A(rray) */
33
- u64 *pta_cpu;
34
- dma_addr_t pta_dma;
30
+struct etnaviv_iommuv2_context {
31
+ struct etnaviv_iommu_context base;
32
+ unsigned short id;
3533 /* M(aster) TLB aka first level pagetable */
3634 u32 *mtlb_cpu;
3735 dma_addr_t mtlb_dma;
....@@ -40,41 +38,62 @@
4038 dma_addr_t stlb_dma[MMUv2_MAX_STLB_ENTRIES];
4139 };
4240
43
-static struct etnaviv_iommuv2_domain *
44
-to_etnaviv_domain(struct etnaviv_iommu_domain *domain)
41
+static struct etnaviv_iommuv2_context *
42
+to_v2_context(struct etnaviv_iommu_context *context)
4543 {
46
- return container_of(domain, struct etnaviv_iommuv2_domain, base);
44
+ return container_of(context, struct etnaviv_iommuv2_context, base);
4745 }
4846
47
+static void etnaviv_iommuv2_free(struct etnaviv_iommu_context *context)
48
+{
49
+ struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
50
+ int i;
51
+
52
+ drm_mm_takedown(&context->mm);
53
+
54
+ for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
55
+ if (v2_context->stlb_cpu[i])
56
+ dma_free_wc(context->global->dev, SZ_4K,
57
+ v2_context->stlb_cpu[i],
58
+ v2_context->stlb_dma[i]);
59
+ }
60
+
61
+ dma_free_wc(context->global->dev, SZ_4K, v2_context->mtlb_cpu,
62
+ v2_context->mtlb_dma);
63
+
64
+ clear_bit(v2_context->id, context->global->v2.pta_alloc);
65
+
66
+ vfree(v2_context);
67
+}
4968 static int
50
-etnaviv_iommuv2_ensure_stlb(struct etnaviv_iommuv2_domain *etnaviv_domain,
69
+etnaviv_iommuv2_ensure_stlb(struct etnaviv_iommuv2_context *v2_context,
5170 int stlb)
5271 {
53
- if (etnaviv_domain->stlb_cpu[stlb])
72
+ if (v2_context->stlb_cpu[stlb])
5473 return 0;
5574
56
- etnaviv_domain->stlb_cpu[stlb] =
57
- dma_alloc_wc(etnaviv_domain->base.dev, SZ_4K,
58
- &etnaviv_domain->stlb_dma[stlb],
75
+ v2_context->stlb_cpu[stlb] =
76
+ dma_alloc_wc(v2_context->base.global->dev, SZ_4K,
77
+ &v2_context->stlb_dma[stlb],
5978 GFP_KERNEL);
6079
61
- if (!etnaviv_domain->stlb_cpu[stlb])
80
+ if (!v2_context->stlb_cpu[stlb])
6281 return -ENOMEM;
6382
64
- memset32(etnaviv_domain->stlb_cpu[stlb], MMUv2_PTE_EXCEPTION,
83
+ memset32(v2_context->stlb_cpu[stlb], MMUv2_PTE_EXCEPTION,
6584 SZ_4K / sizeof(u32));
6685
67
- etnaviv_domain->mtlb_cpu[stlb] = etnaviv_domain->stlb_dma[stlb] |
68
- MMUv2_PTE_PRESENT;
86
+ v2_context->mtlb_cpu[stlb] =
87
+ v2_context->stlb_dma[stlb] | MMUv2_PTE_PRESENT;
88
+
6989 return 0;
7090 }
7191
72
-static int etnaviv_iommuv2_map(struct etnaviv_iommu_domain *domain,
92
+static int etnaviv_iommuv2_map(struct etnaviv_iommu_context *context,
7393 unsigned long iova, phys_addr_t paddr,
7494 size_t size, int prot)
7595 {
76
- struct etnaviv_iommuv2_domain *etnaviv_domain =
77
- to_etnaviv_domain(domain);
96
+ struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
7897 int mtlb_entry, stlb_entry, ret;
7998 u32 entry = lower_32_bits(paddr) | MMUv2_PTE_PRESENT;
8099
....@@ -90,20 +109,19 @@
90109 mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
91110 stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
92111
93
- ret = etnaviv_iommuv2_ensure_stlb(etnaviv_domain, mtlb_entry);
112
+ ret = etnaviv_iommuv2_ensure_stlb(v2_context, mtlb_entry);
94113 if (ret)
95114 return ret;
96115
97
- etnaviv_domain->stlb_cpu[mtlb_entry][stlb_entry] = entry;
116
+ v2_context->stlb_cpu[mtlb_entry][stlb_entry] = entry;
98117
99118 return 0;
100119 }
101120
102
-static size_t etnaviv_iommuv2_unmap(struct etnaviv_iommu_domain *domain,
121
+static size_t etnaviv_iommuv2_unmap(struct etnaviv_iommu_context *context,
103122 unsigned long iova, size_t size)
104123 {
105
- struct etnaviv_iommuv2_domain *etnaviv_domain =
106
- to_etnaviv_domain(domain);
124
+ struct etnaviv_iommuv2_context *etnaviv_domain = to_v2_context(context);
107125 int mtlb_entry, stlb_entry;
108126
109127 if (size != SZ_4K)
....@@ -117,127 +135,50 @@
117135 return SZ_4K;
118136 }
119137
120
-static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
138
+static size_t etnaviv_iommuv2_dump_size(struct etnaviv_iommu_context *context)
121139 {
122
- int ret;
123
-
124
- /* allocate scratch page */
125
- etnaviv_domain->base.bad_page_cpu =
126
- dma_alloc_wc(etnaviv_domain->base.dev, SZ_4K,
127
- &etnaviv_domain->base.bad_page_dma,
128
- GFP_KERNEL);
129
- if (!etnaviv_domain->base.bad_page_cpu) {
130
- ret = -ENOMEM;
131
- goto fail_mem;
132
- }
133
-
134
- memset32(etnaviv_domain->base.bad_page_cpu, 0xdead55aa,
135
- SZ_4K / sizeof(u32));
136
-
137
- etnaviv_domain->pta_cpu = dma_alloc_wc(etnaviv_domain->base.dev,
138
- SZ_4K, &etnaviv_domain->pta_dma,
139
- GFP_KERNEL);
140
- if (!etnaviv_domain->pta_cpu) {
141
- ret = -ENOMEM;
142
- goto fail_mem;
143
- }
144
-
145
- etnaviv_domain->mtlb_cpu = dma_alloc_wc(etnaviv_domain->base.dev,
146
- SZ_4K, &etnaviv_domain->mtlb_dma,
147
- GFP_KERNEL);
148
- if (!etnaviv_domain->mtlb_cpu) {
149
- ret = -ENOMEM;
150
- goto fail_mem;
151
- }
152
-
153
- memset32(etnaviv_domain->mtlb_cpu, MMUv2_PTE_EXCEPTION,
154
- MMUv2_MAX_STLB_ENTRIES);
155
-
156
- return 0;
157
-
158
-fail_mem:
159
- if (etnaviv_domain->base.bad_page_cpu)
160
- dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
161
- etnaviv_domain->base.bad_page_cpu,
162
- etnaviv_domain->base.bad_page_dma);
163
-
164
- if (etnaviv_domain->pta_cpu)
165
- dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
166
- etnaviv_domain->pta_cpu, etnaviv_domain->pta_dma);
167
-
168
- if (etnaviv_domain->mtlb_cpu)
169
- dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
170
- etnaviv_domain->mtlb_cpu, etnaviv_domain->mtlb_dma);
171
-
172
- return ret;
173
-}
174
-
175
-static void etnaviv_iommuv2_domain_free(struct etnaviv_iommu_domain *domain)
176
-{
177
- struct etnaviv_iommuv2_domain *etnaviv_domain =
178
- to_etnaviv_domain(domain);
179
- int i;
180
-
181
- dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
182
- etnaviv_domain->base.bad_page_cpu,
183
- etnaviv_domain->base.bad_page_dma);
184
-
185
- dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
186
- etnaviv_domain->pta_cpu, etnaviv_domain->pta_dma);
187
-
188
- dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
189
- etnaviv_domain->mtlb_cpu, etnaviv_domain->mtlb_dma);
190
-
191
- for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
192
- if (etnaviv_domain->stlb_cpu[i])
193
- dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
194
- etnaviv_domain->stlb_cpu[i],
195
- etnaviv_domain->stlb_dma[i]);
196
- }
197
-
198
- vfree(etnaviv_domain);
199
-}
200
-
201
-static size_t etnaviv_iommuv2_dump_size(struct etnaviv_iommu_domain *domain)
202
-{
203
- struct etnaviv_iommuv2_domain *etnaviv_domain =
204
- to_etnaviv_domain(domain);
140
+ struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
205141 size_t dump_size = SZ_4K;
206142 int i;
207143
208144 for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++)
209
- if (etnaviv_domain->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
145
+ if (v2_context->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
210146 dump_size += SZ_4K;
211147
212148 return dump_size;
213149 }
214150
215
-static void etnaviv_iommuv2_dump(struct etnaviv_iommu_domain *domain, void *buf)
151
+static void etnaviv_iommuv2_dump(struct etnaviv_iommu_context *context, void *buf)
216152 {
217
- struct etnaviv_iommuv2_domain *etnaviv_domain =
218
- to_etnaviv_domain(domain);
153
+ struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
219154 int i;
220155
221
- memcpy(buf, etnaviv_domain->mtlb_cpu, SZ_4K);
156
+ memcpy(buf, v2_context->mtlb_cpu, SZ_4K);
222157 buf += SZ_4K;
223
- for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++, buf += SZ_4K)
224
- if (etnaviv_domain->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
225
- memcpy(buf, etnaviv_domain->stlb_cpu[i], SZ_4K);
158
+ for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++)
159
+ if (v2_context->mtlb_cpu[i] & MMUv2_PTE_PRESENT) {
160
+ memcpy(buf, v2_context->stlb_cpu[i], SZ_4K);
161
+ buf += SZ_4K;
162
+ }
226163 }
227164
228
-static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu)
165
+static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu,
166
+ struct etnaviv_iommu_context *context)
229167 {
230
- struct etnaviv_iommuv2_domain *etnaviv_domain =
231
- to_etnaviv_domain(gpu->mmu->domain);
168
+ struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
232169 u16 prefetch;
233170
234171 /* If the MMU is already enabled the state is still there. */
235172 if (gpu_read(gpu, VIVS_MMUv2_CONTROL) & VIVS_MMUv2_CONTROL_ENABLE)
236173 return;
237174
175
+ if (gpu->mmu_context)
176
+ etnaviv_iommu_context_put(gpu->mmu_context);
177
+ gpu->mmu_context = etnaviv_iommu_context_get(context);
178
+
238179 prefetch = etnaviv_buffer_config_mmuv2(gpu,
239
- (u32)etnaviv_domain->mtlb_dma,
240
- (u32)etnaviv_domain->base.bad_page_dma);
180
+ (u32)v2_context->mtlb_dma,
181
+ (u32)context->global->bad_page_dma);
241182 etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(&gpu->buffer),
242183 prefetch);
243184 etnaviv_gpu_wait_idle(gpu, 100);
....@@ -245,37 +186,41 @@
245186 gpu_write(gpu, VIVS_MMUv2_CONTROL, VIVS_MMUv2_CONTROL_ENABLE);
246187 }
247188
248
-static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu)
189
+static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu,
190
+ struct etnaviv_iommu_context *context)
249191 {
250
- struct etnaviv_iommuv2_domain *etnaviv_domain =
251
- to_etnaviv_domain(gpu->mmu->domain);
192
+ struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
252193 u16 prefetch;
253194
254195 /* If the MMU is already enabled the state is still there. */
255196 if (gpu_read(gpu, VIVS_MMUv2_SEC_CONTROL) & VIVS_MMUv2_SEC_CONTROL_ENABLE)
256197 return;
257198
199
+ if (gpu->mmu_context)
200
+ etnaviv_iommu_context_put(gpu->mmu_context);
201
+ gpu->mmu_context = etnaviv_iommu_context_get(context);
202
+
258203 gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_LOW,
259
- lower_32_bits(etnaviv_domain->pta_dma));
204
+ lower_32_bits(context->global->v2.pta_dma));
260205 gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_HIGH,
261
- upper_32_bits(etnaviv_domain->pta_dma));
206
+ upper_32_bits(context->global->v2.pta_dma));
262207 gpu_write(gpu, VIVS_MMUv2_PTA_CONTROL, VIVS_MMUv2_PTA_CONTROL_ENABLE);
263208
264209 gpu_write(gpu, VIVS_MMUv2_NONSEC_SAFE_ADDR_LOW,
265
- lower_32_bits(etnaviv_domain->base.bad_page_dma));
210
+ lower_32_bits(context->global->bad_page_dma));
266211 gpu_write(gpu, VIVS_MMUv2_SEC_SAFE_ADDR_LOW,
267
- lower_32_bits(etnaviv_domain->base.bad_page_dma));
212
+ lower_32_bits(context->global->bad_page_dma));
268213 gpu_write(gpu, VIVS_MMUv2_SAFE_ADDRESS_CONFIG,
269214 VIVS_MMUv2_SAFE_ADDRESS_CONFIG_NON_SEC_SAFE_ADDR_HIGH(
270
- upper_32_bits(etnaviv_domain->base.bad_page_dma)) |
215
+ upper_32_bits(context->global->bad_page_dma)) |
271216 VIVS_MMUv2_SAFE_ADDRESS_CONFIG_SEC_SAFE_ADDR_HIGH(
272
- upper_32_bits(etnaviv_domain->base.bad_page_dma)));
217
+ upper_32_bits(context->global->bad_page_dma)));
273218
274
- etnaviv_domain->pta_cpu[0] = etnaviv_domain->mtlb_dma |
275
- VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K;
219
+ context->global->v2.pta_cpu[v2_context->id] = v2_context->mtlb_dma |
220
+ VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K;
276221
277222 /* trigger a PTA load through the FE */
278
- prefetch = etnaviv_buffer_config_pta(gpu);
223
+ prefetch = etnaviv_buffer_config_pta(gpu, v2_context->id);
279224 etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(&gpu->buffer),
280225 prefetch);
281226 etnaviv_gpu_wait_idle(gpu, 100);
....@@ -283,14 +228,28 @@
283228 gpu_write(gpu, VIVS_MMUv2_SEC_CONTROL, VIVS_MMUv2_SEC_CONTROL_ENABLE);
284229 }
285230
286
-void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu)
231
+u32 etnaviv_iommuv2_get_mtlb_addr(struct etnaviv_iommu_context *context)
232
+{
233
+ struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
234
+
235
+ return v2_context->mtlb_dma;
236
+}
237
+
238
+unsigned short etnaviv_iommuv2_get_pta_id(struct etnaviv_iommu_context *context)
239
+{
240
+ struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
241
+
242
+ return v2_context->id;
243
+}
244
+static void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu,
245
+ struct etnaviv_iommu_context *context)
287246 {
288247 switch (gpu->sec_mode) {
289248 case ETNA_SEC_NONE:
290
- etnaviv_iommuv2_restore_nonsec(gpu);
249
+ etnaviv_iommuv2_restore_nonsec(gpu, context);
291250 break;
292251 case ETNA_SEC_KERNEL:
293
- etnaviv_iommuv2_restore_sec(gpu);
252
+ etnaviv_iommuv2_restore_sec(gpu, context);
294253 break;
295254 default:
296255 WARN(1, "unhandled GPU security mode\n");
....@@ -298,39 +257,58 @@
298257 }
299258 }
300259
301
-static const struct etnaviv_iommu_domain_ops etnaviv_iommuv2_ops = {
302
- .free = etnaviv_iommuv2_domain_free,
260
+const struct etnaviv_iommu_ops etnaviv_iommuv2_ops = {
261
+ .free = etnaviv_iommuv2_free,
303262 .map = etnaviv_iommuv2_map,
304263 .unmap = etnaviv_iommuv2_unmap,
305264 .dump_size = etnaviv_iommuv2_dump_size,
306265 .dump = etnaviv_iommuv2_dump,
266
+ .restore = etnaviv_iommuv2_restore,
307267 };
308268
309
-struct etnaviv_iommu_domain *
310
-etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu)
269
+struct etnaviv_iommu_context *
270
+etnaviv_iommuv2_context_alloc(struct etnaviv_iommu_global *global)
311271 {
312
- struct etnaviv_iommuv2_domain *etnaviv_domain;
313
- struct etnaviv_iommu_domain *domain;
314
- int ret;
272
+ struct etnaviv_iommuv2_context *v2_context;
273
+ struct etnaviv_iommu_context *context;
315274
316
- etnaviv_domain = vzalloc(sizeof(*etnaviv_domain));
317
- if (!etnaviv_domain)
275
+ v2_context = vzalloc(sizeof(*v2_context));
276
+ if (!v2_context)
318277 return NULL;
319278
320
- domain = &etnaviv_domain->base;
321
-
322
- domain->dev = gpu->dev;
323
- domain->base = 0;
324
- domain->size = (u64)SZ_1G * 4;
325
- domain->ops = &etnaviv_iommuv2_ops;
326
-
327
- ret = etnaviv_iommuv2_init(etnaviv_domain);
328
- if (ret)
279
+ mutex_lock(&global->lock);
280
+ v2_context->id = find_first_zero_bit(global->v2.pta_alloc,
281
+ ETNAVIV_PTA_ENTRIES);
282
+ if (v2_context->id < ETNAVIV_PTA_ENTRIES) {
283
+ set_bit(v2_context->id, global->v2.pta_alloc);
284
+ } else {
285
+ mutex_unlock(&global->lock);
329286 goto out_free;
287
+ }
288
+ mutex_unlock(&global->lock);
330289
331
- return &etnaviv_domain->base;
290
+ v2_context->mtlb_cpu = dma_alloc_wc(global->dev, SZ_4K,
291
+ &v2_context->mtlb_dma, GFP_KERNEL);
292
+ if (!v2_context->mtlb_cpu)
293
+ goto out_free_id;
332294
295
+ memset32(v2_context->mtlb_cpu, MMUv2_PTE_EXCEPTION,
296
+ MMUv2_MAX_STLB_ENTRIES);
297
+
298
+ global->v2.pta_cpu[v2_context->id] = v2_context->mtlb_dma;
299
+
300
+ context = &v2_context->base;
301
+ context->global = global;
302
+ kref_init(&context->refcount);
303
+ mutex_init(&context->lock);
304
+ INIT_LIST_HEAD(&context->mappings);
305
+ drm_mm_init(&context->mm, SZ_4K, (u64)SZ_1G * 4 - SZ_4K);
306
+
307
+ return context;
308
+
309
+out_free_id:
310
+ clear_bit(v2_context->id, global->v2.pta_alloc);
333311 out_free:
334
- vfree(etnaviv_domain);
312
+ vfree(v2_context);
335313 return NULL;
336314 }