forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-13 9d77db3c730780c8ef5ccd4b66403ff5675cfe4e
kernel/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
....@@ -3,15 +3,14 @@
33 * Copyright (C) 2014-2018 Etnaviv Project
44 */
55
6
+#include <linux/bitops.h>
7
+#include <linux/dma-mapping.h>
68 #include <linux/platform_device.h>
79 #include <linux/sizes.h>
810 #include <linux/slab.h>
9
-#include <linux/dma-mapping.h>
10
-#include <linux/bitops.h>
1111
1212 #include "etnaviv_gpu.h"
1313 #include "etnaviv_mmu.h"
14
-#include "etnaviv_iommu.h"
1514 #include "state_hi.xml.h"
1615
1716 #define PT_SIZE SZ_2M
....@@ -19,124 +18,93 @@
1918
2019 #define GPU_MEM_START 0x80000000
2120
22
-struct etnaviv_iommuv1_domain {
23
- struct etnaviv_iommu_domain base;
21
+struct etnaviv_iommuv1_context {
22
+ struct etnaviv_iommu_context base;
2423 u32 *pgtable_cpu;
2524 dma_addr_t pgtable_dma;
2625 };
2726
28
-static struct etnaviv_iommuv1_domain *
29
-to_etnaviv_domain(struct etnaviv_iommu_domain *domain)
27
+static struct etnaviv_iommuv1_context *
28
+to_v1_context(struct etnaviv_iommu_context *context)
3029 {
31
- return container_of(domain, struct etnaviv_iommuv1_domain, base);
30
+ return container_of(context, struct etnaviv_iommuv1_context, base);
3231 }
3332
34
-static int __etnaviv_iommu_init(struct etnaviv_iommuv1_domain *etnaviv_domain)
33
+static void etnaviv_iommuv1_free(struct etnaviv_iommu_context *context)
3534 {
36
- u32 *p;
37
- int i;
35
+ struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
3836
39
- etnaviv_domain->base.bad_page_cpu =
40
- dma_alloc_wc(etnaviv_domain->base.dev, SZ_4K,
41
- &etnaviv_domain->base.bad_page_dma,
42
- GFP_KERNEL);
43
- if (!etnaviv_domain->base.bad_page_cpu)
44
- return -ENOMEM;
37
+ drm_mm_takedown(&context->mm);
4538
46
- p = etnaviv_domain->base.bad_page_cpu;
47
- for (i = 0; i < SZ_4K / 4; i++)
48
- *p++ = 0xdead55aa;
39
+ dma_free_wc(context->global->dev, PT_SIZE, v1_context->pgtable_cpu,
40
+ v1_context->pgtable_dma);
4941
50
- etnaviv_domain->pgtable_cpu = dma_alloc_wc(etnaviv_domain->base.dev,
51
- PT_SIZE,
52
- &etnaviv_domain->pgtable_dma,
53
- GFP_KERNEL);
54
- if (!etnaviv_domain->pgtable_cpu) {
55
- dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
56
- etnaviv_domain->base.bad_page_cpu,
57
- etnaviv_domain->base.bad_page_dma);
58
- return -ENOMEM;
59
- }
42
+ context->global->v1.shared_context = NULL;
6043
61
- memset32(etnaviv_domain->pgtable_cpu, etnaviv_domain->base.bad_page_dma,
62
- PT_ENTRIES);
63
-
64
- return 0;
44
+ kfree(v1_context);
6545 }
6646
67
-static void etnaviv_iommuv1_domain_free(struct etnaviv_iommu_domain *domain)
68
-{
69
- struct etnaviv_iommuv1_domain *etnaviv_domain =
70
- to_etnaviv_domain(domain);
71
-
72
- dma_free_wc(etnaviv_domain->base.dev, PT_SIZE,
73
- etnaviv_domain->pgtable_cpu, etnaviv_domain->pgtable_dma);
74
-
75
- dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
76
- etnaviv_domain->base.bad_page_cpu,
77
- etnaviv_domain->base.bad_page_dma);
78
-
79
- kfree(etnaviv_domain);
80
-}
81
-
82
-static int etnaviv_iommuv1_map(struct etnaviv_iommu_domain *domain,
47
+static int etnaviv_iommuv1_map(struct etnaviv_iommu_context *context,
8348 unsigned long iova, phys_addr_t paddr,
8449 size_t size, int prot)
8550 {
86
- struct etnaviv_iommuv1_domain *etnaviv_domain = to_etnaviv_domain(domain);
51
+ struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
8752 unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
8853
8954 if (size != SZ_4K)
9055 return -EINVAL;
9156
92
- etnaviv_domain->pgtable_cpu[index] = paddr;
57
+ v1_context->pgtable_cpu[index] = paddr;
9358
9459 return 0;
9560 }
9661
97
-static size_t etnaviv_iommuv1_unmap(struct etnaviv_iommu_domain *domain,
62
+static size_t etnaviv_iommuv1_unmap(struct etnaviv_iommu_context *context,
9863 unsigned long iova, size_t size)
9964 {
100
- struct etnaviv_iommuv1_domain *etnaviv_domain =
101
- to_etnaviv_domain(domain);
65
+ struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
10266 unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
10367
10468 if (size != SZ_4K)
10569 return -EINVAL;
10670
107
- etnaviv_domain->pgtable_cpu[index] = etnaviv_domain->base.bad_page_dma;
71
+ v1_context->pgtable_cpu[index] = context->global->bad_page_dma;
10872
10973 return SZ_4K;
11074 }
11175
112
-static size_t etnaviv_iommuv1_dump_size(struct etnaviv_iommu_domain *domain)
76
+static size_t etnaviv_iommuv1_dump_size(struct etnaviv_iommu_context *context)
11377 {
11478 return PT_SIZE;
11579 }
11680
117
-static void etnaviv_iommuv1_dump(struct etnaviv_iommu_domain *domain, void *buf)
81
+static void etnaviv_iommuv1_dump(struct etnaviv_iommu_context *context,
82
+ void *buf)
11883 {
119
- struct etnaviv_iommuv1_domain *etnaviv_domain =
120
- to_etnaviv_domain(domain);
84
+ struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
12185
122
- memcpy(buf, etnaviv_domain->pgtable_cpu, PT_SIZE);
86
+ memcpy(buf, v1_context->pgtable_cpu, PT_SIZE);
12387 }
12488
125
-void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu)
89
+static void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu,
90
+ struct etnaviv_iommu_context *context)
12691 {
127
- struct etnaviv_iommuv1_domain *etnaviv_domain =
128
- to_etnaviv_domain(gpu->mmu->domain);
92
+ struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
12993 u32 pgtable;
13094
95
+ if (gpu->mmu_context)
96
+ etnaviv_iommu_context_put(gpu->mmu_context);
97
+ gpu->mmu_context = etnaviv_iommu_context_get(context);
98
+
13199 /* set base addresses */
132
- gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, gpu->memory_base);
133
- gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, gpu->memory_base);
134
- gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_TX, gpu->memory_base);
135
- gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PEZ, gpu->memory_base);
136
- gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PE, gpu->memory_base);
100
+ gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, context->global->memory_base);
101
+ gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, context->global->memory_base);
102
+ gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_TX, context->global->memory_base);
103
+ gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PEZ, context->global->memory_base);
104
+ gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PE, context->global->memory_base);
137105
138106 /* set page table address in MC */
139
- pgtable = (u32)etnaviv_domain->pgtable_dma;
107
+ pgtable = (u32)v1_context->pgtable_dma;
140108
141109 gpu_write(gpu, VIVS_MC_MMU_FE_PAGE_TABLE, pgtable);
142110 gpu_write(gpu, VIVS_MC_MMU_TX_PAGE_TABLE, pgtable);
....@@ -145,39 +113,64 @@
145113 gpu_write(gpu, VIVS_MC_MMU_RA_PAGE_TABLE, pgtable);
146114 }
147115
148
-static const struct etnaviv_iommu_domain_ops etnaviv_iommuv1_ops = {
149
- .free = etnaviv_iommuv1_domain_free,
116
+
117
+const struct etnaviv_iommu_ops etnaviv_iommuv1_ops = {
118
+ .free = etnaviv_iommuv1_free,
150119 .map = etnaviv_iommuv1_map,
151120 .unmap = etnaviv_iommuv1_unmap,
152121 .dump_size = etnaviv_iommuv1_dump_size,
153122 .dump = etnaviv_iommuv1_dump,
123
+ .restore = etnaviv_iommuv1_restore,
154124 };
155125
156
-struct etnaviv_iommu_domain *
157
-etnaviv_iommuv1_domain_alloc(struct etnaviv_gpu *gpu)
126
+struct etnaviv_iommu_context *
127
+etnaviv_iommuv1_context_alloc(struct etnaviv_iommu_global *global)
158128 {
159
- struct etnaviv_iommuv1_domain *etnaviv_domain;
160
- struct etnaviv_iommu_domain *domain;
161
- int ret;
129
+ struct etnaviv_iommuv1_context *v1_context;
130
+ struct etnaviv_iommu_context *context;
162131
163
- etnaviv_domain = kzalloc(sizeof(*etnaviv_domain), GFP_KERNEL);
164
- if (!etnaviv_domain)
132
+ mutex_lock(&global->lock);
133
+
134
+ /*
135
+ * MMUv1 does not support switching between different contexts without
136
+ * a stop the world operation, so we only support a single shared
137
+ * context with this version.
138
+ */
139
+ if (global->v1.shared_context) {
140
+ context = global->v1.shared_context;
141
+ etnaviv_iommu_context_get(context);
142
+ mutex_unlock(&global->lock);
143
+ return context;
144
+ }
145
+
146
+ v1_context = kzalloc(sizeof(*v1_context), GFP_KERNEL);
147
+ if (!v1_context) {
148
+ mutex_unlock(&global->lock);
165149 return NULL;
150
+ }
166151
167
- domain = &etnaviv_domain->base;
168
-
169
- domain->dev = gpu->dev;
170
- domain->base = GPU_MEM_START;
171
- domain->size = PT_ENTRIES * SZ_4K;
172
- domain->ops = &etnaviv_iommuv1_ops;
173
-
174
- ret = __etnaviv_iommu_init(etnaviv_domain);
175
- if (ret)
152
+ v1_context->pgtable_cpu = dma_alloc_wc(global->dev, PT_SIZE,
153
+ &v1_context->pgtable_dma,
154
+ GFP_KERNEL);
155
+ if (!v1_context->pgtable_cpu)
176156 goto out_free;
177157
178
- return &etnaviv_domain->base;
158
+ memset32(v1_context->pgtable_cpu, global->bad_page_dma, PT_ENTRIES);
159
+
160
+ context = &v1_context->base;
161
+ context->global = global;
162
+ kref_init(&context->refcount);
163
+ mutex_init(&context->lock);
164
+ INIT_LIST_HEAD(&context->mappings);
165
+ drm_mm_init(&context->mm, GPU_MEM_START, PT_ENTRIES * SZ_4K);
166
+ context->global->v1.shared_context = context;
167
+
168
+ mutex_unlock(&global->lock);
169
+
170
+ return context;
179171
180172 out_free:
181
- kfree(etnaviv_domain);
173
+ mutex_unlock(&global->lock);
174
+ kfree(v1_context);
182175 return NULL;
183176 }