forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-09 b22da3d8526a935aa31e086e63f60ff3246cb61c
kernel/drivers/gpu/drm/msm/msm_iommu.c
....@@ -1,28 +1,211 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Copyright (C) 2013 Red Hat
34 * Author: Rob Clark <robdclark@gmail.com>
4
- *
5
- * This program is free software; you can redistribute it and/or modify it
6
- * under the terms of the GNU General Public License version 2 as published by
7
- * the Free Software Foundation.
8
- *
9
- * This program is distributed in the hope that it will be useful, but WITHOUT
10
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12
- * more details.
13
- *
14
- * You should have received a copy of the GNU General Public License along with
15
- * this program. If not, see <http://www.gnu.org/licenses/>.
165 */
176
7
+#include <linux/adreno-smmu-priv.h>
8
+#include <linux/io-pgtable.h>
189 #include "msm_drv.h"
1910 #include "msm_mmu.h"
2011
2112 struct msm_iommu {
2213 struct msm_mmu base;
2314 struct iommu_domain *domain;
15
+ atomic_t pagetables;
2416 };
17
+
2518 #define to_msm_iommu(x) container_of(x, struct msm_iommu, base)
19
+
20
+struct msm_iommu_pagetable {
21
+ struct msm_mmu base;
22
+ struct msm_mmu *parent;
23
+ struct io_pgtable_ops *pgtbl_ops;
24
+ phys_addr_t ttbr;
25
+ u32 asid;
26
+};
27
+static struct msm_iommu_pagetable *to_pagetable(struct msm_mmu *mmu)
28
+{
29
+ return container_of(mmu, struct msm_iommu_pagetable, base);
30
+}
31
+
32
+static int msm_iommu_pagetable_unmap(struct msm_mmu *mmu, u64 iova,
33
+ size_t size)
34
+{
35
+ struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
36
+ struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
37
+ size_t unmapped = 0;
38
+
39
+ /* Unmap the block one page at a time */
40
+ while (size) {
41
+ unmapped += ops->unmap(ops, iova, 4096, NULL);
42
+ iova += 4096;
43
+ size -= 4096;
44
+ }
45
+
46
+ iommu_flush_iotlb_all(to_msm_iommu(pagetable->parent)->domain);
47
+
48
+ return (unmapped == size) ? 0 : -EINVAL;
49
+}
50
+
51
+static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,
52
+ struct sg_table *sgt, size_t len, int prot)
53
+{
54
+ struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
55
+ struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
56
+ struct scatterlist *sg;
57
+ size_t mapped = 0;
58
+ u64 addr = iova;
59
+ unsigned int i;
60
+
61
+ for_each_sgtable_sg(sgt, sg, i) {
62
+ size_t size = sg->length;
63
+ phys_addr_t phys = sg_phys(sg);
64
+
65
+ /* Map the block one page at a time */
66
+ while (size) {
67
+ if (ops->map(ops, addr, phys, 4096, prot, GFP_KERNEL)) {
68
+ msm_iommu_pagetable_unmap(mmu, iova, mapped);
69
+ return -EINVAL;
70
+ }
71
+
72
+ phys += 4096;
73
+ addr += 4096;
74
+ size -= 4096;
75
+ mapped += 4096;
76
+ }
77
+ }
78
+
79
+ return 0;
80
+}
81
+
82
+static void msm_iommu_pagetable_destroy(struct msm_mmu *mmu)
83
+{
84
+ struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
85
+ struct msm_iommu *iommu = to_msm_iommu(pagetable->parent);
86
+ struct adreno_smmu_priv *adreno_smmu =
87
+ dev_get_drvdata(pagetable->parent->dev);
88
+
89
+ /*
90
+ * If this is the last attached pagetable for the parent,
91
+ * disable TTBR0 in the arm-smmu driver
92
+ */
93
+ if (atomic_dec_return(&iommu->pagetables) == 0)
94
+ adreno_smmu->set_ttbr0_cfg(adreno_smmu->cookie, NULL);
95
+
96
+ free_io_pgtable_ops(pagetable->pgtbl_ops);
97
+ kfree(pagetable);
98
+}
99
+
100
+int msm_iommu_pagetable_params(struct msm_mmu *mmu,
101
+ phys_addr_t *ttbr, int *asid)
102
+{
103
+ struct msm_iommu_pagetable *pagetable;
104
+
105
+ if (mmu->type != MSM_MMU_IOMMU_PAGETABLE)
106
+ return -EINVAL;
107
+
108
+ pagetable = to_pagetable(mmu);
109
+
110
+ if (ttbr)
111
+ *ttbr = pagetable->ttbr;
112
+
113
+ if (asid)
114
+ *asid = pagetable->asid;
115
+
116
+ return 0;
117
+}
118
+
119
+static const struct msm_mmu_funcs pagetable_funcs = {
120
+ .map = msm_iommu_pagetable_map,
121
+ .unmap = msm_iommu_pagetable_unmap,
122
+ .destroy = msm_iommu_pagetable_destroy,
123
+};
124
+
125
+static void msm_iommu_tlb_flush_all(void *cookie)
126
+{
127
+}
128
+
129
+static void msm_iommu_tlb_flush_walk(unsigned long iova, size_t size,
130
+ size_t granule, void *cookie)
131
+{
132
+}
133
+
134
+static void msm_iommu_tlb_add_page(struct iommu_iotlb_gather *gather,
135
+ unsigned long iova, size_t granule, void *cookie)
136
+{
137
+}
138
+
139
+static const struct iommu_flush_ops null_tlb_ops = {
140
+ .tlb_flush_all = msm_iommu_tlb_flush_all,
141
+ .tlb_flush_walk = msm_iommu_tlb_flush_walk,
142
+ .tlb_add_page = msm_iommu_tlb_add_page,
143
+};
144
+
145
+struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
146
+{
147
+ struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(parent->dev);
148
+ struct msm_iommu *iommu = to_msm_iommu(parent);
149
+ struct msm_iommu_pagetable *pagetable;
150
+ const struct io_pgtable_cfg *ttbr1_cfg = NULL;
151
+ struct io_pgtable_cfg ttbr0_cfg;
152
+ int ret;
153
+
154
+ /* Get the pagetable configuration from the domain */
155
+ if (adreno_smmu->cookie)
156
+ ttbr1_cfg = adreno_smmu->get_ttbr1_cfg(adreno_smmu->cookie);
157
+ if (!ttbr1_cfg)
158
+ return ERR_PTR(-ENODEV);
159
+
160
+ pagetable = kzalloc(sizeof(*pagetable), GFP_KERNEL);
161
+ if (!pagetable)
162
+ return ERR_PTR(-ENOMEM);
163
+
164
+ msm_mmu_init(&pagetable->base, parent->dev, &pagetable_funcs,
165
+ MSM_MMU_IOMMU_PAGETABLE);
166
+
167
+ /* Clone the TTBR1 cfg as starting point for TTBR0 cfg: */
168
+ ttbr0_cfg = *ttbr1_cfg;
169
+
170
+ /* The incoming cfg will have the TTBR1 quirk enabled */
171
+ ttbr0_cfg.quirks &= ~IO_PGTABLE_QUIRK_ARM_TTBR1;
172
+ ttbr0_cfg.tlb = &null_tlb_ops;
173
+
174
+ pagetable->pgtbl_ops = alloc_io_pgtable_ops(ARM_64_LPAE_S1,
175
+ &ttbr0_cfg, iommu->domain);
176
+
177
+ if (!pagetable->pgtbl_ops) {
178
+ kfree(pagetable);
179
+ return ERR_PTR(-ENOMEM);
180
+ }
181
+
182
+ /*
183
+ * If this is the first pagetable that we've allocated, send it back to
184
+ * the arm-smmu driver as a trigger to set up TTBR0
185
+ */
186
+ if (atomic_inc_return(&iommu->pagetables) == 1) {
187
+ ret = adreno_smmu->set_ttbr0_cfg(adreno_smmu->cookie, &ttbr0_cfg);
188
+ if (ret) {
189
+ free_io_pgtable_ops(pagetable->pgtbl_ops);
190
+ kfree(pagetable);
191
+ return ERR_PTR(ret);
192
+ }
193
+ }
194
+
195
+ /* Needed later for TLB flush */
196
+ pagetable->parent = parent;
197
+ pagetable->ttbr = ttbr0_cfg.arm_lpae_s1_cfg.ttbr;
198
+
199
+ /*
200
+ * TODO we would like each set of page tables to have a unique ASID
201
+ * to optimize TLB invalidation. But iommu_flush_iotlb_all() will
202
+ * end up flushing the ASID used for TTBR1 pagetables, which is not
203
+ * what we want. So for now just use the same ASID as TTBR1.
204
+ */
205
+ pagetable->asid = 0;
206
+
207
+ return &pagetable->base;
208
+}
26209
27210 static int msm_fault_handler(struct iommu_domain *domain, struct device *dev,
28211 unsigned long iova, int flags, void *arg)
....@@ -30,55 +213,41 @@
30213 struct msm_iommu *iommu = arg;
31214 if (iommu->base.handler)
32215 return iommu->base.handler(iommu->base.arg, iova, flags);
33
- pr_warn_ratelimited("*** fault: iova=%08lx, flags=%d\n", iova, flags);
216
+ pr_warn_ratelimited("*** fault: iova=%16lx, flags=%d\n", iova, flags);
34217 return 0;
35218 }
36219
37
-static int msm_iommu_attach(struct msm_mmu *mmu, const char * const *names,
38
- int cnt)
39
-{
40
- struct msm_iommu *iommu = to_msm_iommu(mmu);
41
- int ret;
42
-
43
- pm_runtime_get_sync(mmu->dev);
44
- ret = iommu_attach_device(iommu->domain, mmu->dev);
45
- pm_runtime_put_sync(mmu->dev);
46
-
47
- return ret;
48
-}
49
-
50
-static void msm_iommu_detach(struct msm_mmu *mmu, const char * const *names,
51
- int cnt)
220
+static void msm_iommu_detach(struct msm_mmu *mmu)
52221 {
53222 struct msm_iommu *iommu = to_msm_iommu(mmu);
54223
55
- pm_runtime_get_sync(mmu->dev);
56224 iommu_detach_device(iommu->domain, mmu->dev);
57
- pm_runtime_put_sync(mmu->dev);
58225 }
59226
60227 static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
61
- struct sg_table *sgt, unsigned len, int prot)
228
+ struct sg_table *sgt, size_t len, int prot)
62229 {
63230 struct msm_iommu *iommu = to_msm_iommu(mmu);
64231 size_t ret;
65232
66
-// pm_runtime_get_sync(mmu->dev);
67
- ret = iommu_map_sg(iommu->domain, iova, sgt->sgl, sgt->nents, prot);
68
-// pm_runtime_put_sync(mmu->dev);
233
+ /* The arm-smmu driver expects the addresses to be sign extended */
234
+ if (iova & BIT_ULL(48))
235
+ iova |= GENMASK_ULL(63, 49);
236
+
237
+ ret = iommu_map_sgtable(iommu->domain, iova, sgt, prot);
69238 WARN_ON(!ret);
70239
71240 return (ret == len) ? 0 : -EINVAL;
72241 }
73242
74
-static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova,
75
- struct sg_table *sgt, unsigned len)
243
+static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova, size_t len)
76244 {
77245 struct msm_iommu *iommu = to_msm_iommu(mmu);
78246
79
- pm_runtime_get_sync(mmu->dev);
247
+ if (iova & BIT_ULL(48))
248
+ iova |= GENMASK_ULL(63, 49);
249
+
80250 iommu_unmap(iommu->domain, iova, len);
81
- pm_runtime_put_sync(mmu->dev);
82251
83252 return 0;
84253 }
....@@ -91,7 +260,6 @@
91260 }
92261
93262 static const struct msm_mmu_funcs funcs = {
94
- .attach = msm_iommu_attach,
95263 .detach = msm_iommu_detach,
96264 .map = msm_iommu_map,
97265 .unmap = msm_iommu_unmap,
....@@ -101,14 +269,26 @@
101269 struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
102270 {
103271 struct msm_iommu *iommu;
272
+ int ret;
273
+
274
+ if (!domain)
275
+ return ERR_PTR(-ENODEV);
104276
105277 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
106278 if (!iommu)
107279 return ERR_PTR(-ENOMEM);
108280
109281 iommu->domain = domain;
110
- msm_mmu_init(&iommu->base, dev, &funcs);
282
+ msm_mmu_init(&iommu->base, dev, &funcs, MSM_MMU_IOMMU);
111283 iommu_set_fault_handler(domain, msm_fault_handler, iommu);
112284
285
+ atomic_set(&iommu->pagetables, 0);
286
+
287
+ ret = iommu_attach_device(iommu->domain, dev);
288
+ if (ret) {
289
+ kfree(iommu);
290
+ return ERR_PTR(ret);
291
+ }
292
+
113293 return &iommu->base;
114294 }