hc
2023-12-09 b22da3d8526a935aa31e086e63f60ff3246cb61c
kernel/drivers/iommu/ipmmu-vmsa.c
....@@ -1,11 +1,9 @@
1
+// SPDX-License-Identifier: GPL-2.0
12 /*
2
- * IPMMU VMSA
3
+ * IOMMU API for Renesas VMSA-compatible IPMMU
4
+ * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
35 *
4
- * Copyright (C) 2014 Renesas Electronics Corporation
5
- *
6
- * This program is free software; you can redistribute it and/or modify
7
- * it under the terms of the GNU General Public License as published by
8
- * the Free Software Foundation; version 2 of the License.
6
+ * Copyright (C) 2014-2020 Renesas Electronics Corporation
97 */
108
119 #include <linux/bitmap.h>
....@@ -14,11 +12,11 @@
1412 #include <linux/dma-mapping.h>
1513 #include <linux/err.h>
1614 #include <linux/export.h>
15
+#include <linux/init.h>
1716 #include <linux/interrupt.h>
1817 #include <linux/io.h>
1918 #include <linux/io-pgtable.h>
2019 #include <linux/iommu.h>
21
-#include <linux/module.h>
2220 #include <linux/of.h>
2321 #include <linux/of_device.h>
2422 #include <linux/of_iommu.h>
....@@ -30,7 +28,6 @@
3028
3129 #if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
3230 #include <asm/dma-iommu.h>
33
-#include <asm/pgalloc.h>
3431 #else
3532 #define arm_iommu_create_mapping(...) NULL
3633 #define arm_iommu_attach_device(...) -ENODEV
....@@ -38,15 +35,23 @@
3835 #define arm_iommu_detach_device(...) do {} while (0)
3936 #endif
4037
41
-#define IPMMU_CTX_MAX 8
38
+#define IPMMU_CTX_MAX 8U
39
+#define IPMMU_CTX_INVALID -1
40
+
41
+#define IPMMU_UTLB_MAX 48U
4242
4343 struct ipmmu_features {
4444 bool use_ns_alias_offset;
4545 bool has_cache_leaf_nodes;
4646 unsigned int number_of_contexts;
47
+ unsigned int num_utlbs;
4748 bool setup_imbuscr;
4849 bool twobit_imttbcr_sl0;
4950 bool reserved_context;
51
+ bool cache_snoop;
52
+ unsigned int ctx_offset_base;
53
+ unsigned int ctx_offset_stride;
54
+ unsigned int utlb_offset_base;
5055 };
5156
5257 struct ipmmu_vmsa_device {
....@@ -55,11 +60,11 @@
5560 struct iommu_device iommu;
5661 struct ipmmu_vmsa_device *root;
5762 const struct ipmmu_features *features;
58
- unsigned int num_utlbs;
5963 unsigned int num_ctx;
6064 spinlock_t lock; /* Protects ctx and domains[] */
6165 DECLARE_BITMAP(ctx, IPMMU_CTX_MAX);
6266 struct ipmmu_vmsa_domain *domains[IPMMU_CTX_MAX];
67
+ s8 utlb_ctx[IPMMU_UTLB_MAX];
6368
6469 struct iommu_group *group;
6570 struct dma_iommu_mapping *mapping;
....@@ -83,7 +88,7 @@
8388
8489 static struct ipmmu_vmsa_device *to_ipmmu(struct device *dev)
8590 {
86
- return dev->iommu_fwspec ? dev->iommu_fwspec->iommu_priv : NULL;
91
+ return dev_iommu_priv_get(dev);
8792 }
8893
8994 #define TLB_LOOP_TIMEOUT 100 /* 100us */
....@@ -94,125 +99,49 @@
9499
95100 #define IM_NS_ALIAS_OFFSET 0x800
96101
97
-#define IM_CTX_SIZE 0x40
102
+/* MMU "context" registers */
103
+#define IMCTR 0x0000 /* R-Car Gen2/3 */
104
+#define IMCTR_INTEN (1 << 2) /* R-Car Gen2/3 */
105
+#define IMCTR_FLUSH (1 << 1) /* R-Car Gen2/3 */
106
+#define IMCTR_MMUEN (1 << 0) /* R-Car Gen2/3 */
98107
99
-#define IMCTR 0x0000
100
-#define IMCTR_TRE (1 << 17)
101
-#define IMCTR_AFE (1 << 16)
102
-#define IMCTR_RTSEL_MASK (3 << 4)
103
-#define IMCTR_RTSEL_SHIFT 4
104
-#define IMCTR_TREN (1 << 3)
105
-#define IMCTR_INTEN (1 << 2)
106
-#define IMCTR_FLUSH (1 << 1)
107
-#define IMCTR_MMUEN (1 << 0)
108
+#define IMTTBCR 0x0008 /* R-Car Gen2/3 */
109
+#define IMTTBCR_EAE (1 << 31) /* R-Car Gen2/3 */
110
+#define IMTTBCR_SH0_INNER_SHAREABLE (3 << 12) /* R-Car Gen2 only */
111
+#define IMTTBCR_ORGN0_WB_WA (1 << 10) /* R-Car Gen2 only */
112
+#define IMTTBCR_IRGN0_WB_WA (1 << 8) /* R-Car Gen2 only */
113
+#define IMTTBCR_SL0_TWOBIT_LVL_1 (2 << 6) /* R-Car Gen3 only */
114
+#define IMTTBCR_SL0_LVL_1 (1 << 4) /* R-Car Gen2 only */
108115
109
-#define IMCAAR 0x0004
116
+#define IMBUSCR 0x000c /* R-Car Gen2 only */
117
+#define IMBUSCR_DVM (1 << 2) /* R-Car Gen2 only */
118
+#define IMBUSCR_BUSSEL_MASK (3 << 0) /* R-Car Gen2 only */
110119
111
-#define IMTTBCR 0x0008
112
-#define IMTTBCR_EAE (1 << 31)
113
-#define IMTTBCR_PMB (1 << 30)
114
-#define IMTTBCR_SH1_NON_SHAREABLE (0 << 28)
115
-#define IMTTBCR_SH1_OUTER_SHAREABLE (2 << 28)
116
-#define IMTTBCR_SH1_INNER_SHAREABLE (3 << 28)
117
-#define IMTTBCR_SH1_MASK (3 << 28)
118
-#define IMTTBCR_ORGN1_NC (0 << 26)
119
-#define IMTTBCR_ORGN1_WB_WA (1 << 26)
120
-#define IMTTBCR_ORGN1_WT (2 << 26)
121
-#define IMTTBCR_ORGN1_WB (3 << 26)
122
-#define IMTTBCR_ORGN1_MASK (3 << 26)
123
-#define IMTTBCR_IRGN1_NC (0 << 24)
124
-#define IMTTBCR_IRGN1_WB_WA (1 << 24)
125
-#define IMTTBCR_IRGN1_WT (2 << 24)
126
-#define IMTTBCR_IRGN1_WB (3 << 24)
127
-#define IMTTBCR_IRGN1_MASK (3 << 24)
128
-#define IMTTBCR_TSZ1_MASK (7 << 16)
129
-#define IMTTBCR_TSZ1_SHIFT 16
130
-#define IMTTBCR_SH0_NON_SHAREABLE (0 << 12)
131
-#define IMTTBCR_SH0_OUTER_SHAREABLE (2 << 12)
132
-#define IMTTBCR_SH0_INNER_SHAREABLE (3 << 12)
133
-#define IMTTBCR_SH0_MASK (3 << 12)
134
-#define IMTTBCR_ORGN0_NC (0 << 10)
135
-#define IMTTBCR_ORGN0_WB_WA (1 << 10)
136
-#define IMTTBCR_ORGN0_WT (2 << 10)
137
-#define IMTTBCR_ORGN0_WB (3 << 10)
138
-#define IMTTBCR_ORGN0_MASK (3 << 10)
139
-#define IMTTBCR_IRGN0_NC (0 << 8)
140
-#define IMTTBCR_IRGN0_WB_WA (1 << 8)
141
-#define IMTTBCR_IRGN0_WT (2 << 8)
142
-#define IMTTBCR_IRGN0_WB (3 << 8)
143
-#define IMTTBCR_IRGN0_MASK (3 << 8)
144
-#define IMTTBCR_SL0_LVL_2 (0 << 4)
145
-#define IMTTBCR_SL0_LVL_1 (1 << 4)
146
-#define IMTTBCR_TSZ0_MASK (7 << 0)
147
-#define IMTTBCR_TSZ0_SHIFT O
120
+#define IMTTLBR0 0x0010 /* R-Car Gen2/3 */
121
+#define IMTTUBR0 0x0014 /* R-Car Gen2/3 */
148122
149
-#define IMTTBCR_SL0_TWOBIT_LVL_3 (0 << 6)
150
-#define IMTTBCR_SL0_TWOBIT_LVL_2 (1 << 6)
151
-#define IMTTBCR_SL0_TWOBIT_LVL_1 (2 << 6)
123
+#define IMSTR 0x0020 /* R-Car Gen2/3 */
124
+#define IMSTR_MHIT (1 << 4) /* R-Car Gen2/3 */
125
+#define IMSTR_ABORT (1 << 2) /* R-Car Gen2/3 */
126
+#define IMSTR_PF (1 << 1) /* R-Car Gen2/3 */
127
+#define IMSTR_TF (1 << 0) /* R-Car Gen2/3 */
152128
153
-#define IMBUSCR 0x000c
154
-#define IMBUSCR_DVM (1 << 2)
155
-#define IMBUSCR_BUSSEL_SYS (0 << 0)
156
-#define IMBUSCR_BUSSEL_CCI (1 << 0)
157
-#define IMBUSCR_BUSSEL_IMCAAR (2 << 0)
158
-#define IMBUSCR_BUSSEL_CCI_IMCAAR (3 << 0)
159
-#define IMBUSCR_BUSSEL_MASK (3 << 0)
129
+#define IMMAIR0 0x0028 /* R-Car Gen2/3 */
160130
161
-#define IMTTLBR0 0x0010
162
-#define IMTTUBR0 0x0014
163
-#define IMTTLBR1 0x0018
164
-#define IMTTUBR1 0x001c
131
+#define IMELAR 0x0030 /* R-Car Gen2/3, IMEAR on R-Car Gen2 */
132
+#define IMEUAR 0x0034 /* R-Car Gen3 only */
165133
166
-#define IMSTR 0x0020
167
-#define IMSTR_ERRLVL_MASK (3 << 12)
168
-#define IMSTR_ERRLVL_SHIFT 12
169
-#define IMSTR_ERRCODE_TLB_FORMAT (1 << 8)
170
-#define IMSTR_ERRCODE_ACCESS_PERM (4 << 8)
171
-#define IMSTR_ERRCODE_SECURE_ACCESS (5 << 8)
172
-#define IMSTR_ERRCODE_MASK (7 << 8)
173
-#define IMSTR_MHIT (1 << 4)
174
-#define IMSTR_ABORT (1 << 2)
175
-#define IMSTR_PF (1 << 1)
176
-#define IMSTR_TF (1 << 0)
177
-
178
-#define IMMAIR0 0x0028
179
-#define IMMAIR1 0x002c
180
-#define IMMAIR_ATTR_MASK 0xff
181
-#define IMMAIR_ATTR_DEVICE 0x04
182
-#define IMMAIR_ATTR_NC 0x44
183
-#define IMMAIR_ATTR_WBRWA 0xff
184
-#define IMMAIR_ATTR_SHIFT(n) ((n) << 3)
185
-#define IMMAIR_ATTR_IDX_NC 0
186
-#define IMMAIR_ATTR_IDX_WBRWA 1
187
-#define IMMAIR_ATTR_IDX_DEV 2
188
-
189
-#define IMEAR 0x0030
190
-
191
-#define IMPCTR 0x0200
192
-#define IMPSTR 0x0208
193
-#define IMPEAR 0x020c
194
-#define IMPMBA(n) (0x0280 + ((n) * 4))
195
-#define IMPMBD(n) (0x02c0 + ((n) * 4))
196
-
134
+/* uTLB registers */
197135 #define IMUCTR(n) ((n) < 32 ? IMUCTR0(n) : IMUCTR32(n))
198
-#define IMUCTR0(n) (0x0300 + ((n) * 16))
199
-#define IMUCTR32(n) (0x0600 + (((n) - 32) * 16))
200
-#define IMUCTR_FIXADDEN (1 << 31)
201
-#define IMUCTR_FIXADD_MASK (0xff << 16)
202
-#define IMUCTR_FIXADD_SHIFT 16
203
-#define IMUCTR_TTSEL_MMU(n) ((n) << 4)
204
-#define IMUCTR_TTSEL_PMB (8 << 4)
205
-#define IMUCTR_TTSEL_MASK (15 << 4)
206
-#define IMUCTR_FLUSH (1 << 1)
207
-#define IMUCTR_MMUEN (1 << 0)
136
+#define IMUCTR0(n) (0x0300 + ((n) * 16)) /* R-Car Gen2/3 */
137
+#define IMUCTR32(n) (0x0600 + (((n) - 32) * 16)) /* R-Car Gen3 only */
138
+#define IMUCTR_TTSEL_MMU(n) ((n) << 4) /* R-Car Gen2/3 */
139
+#define IMUCTR_FLUSH (1 << 1) /* R-Car Gen2/3 */
140
+#define IMUCTR_MMUEN (1 << 0) /* R-Car Gen2/3 */
208141
209142 #define IMUASID(n) ((n) < 32 ? IMUASID0(n) : IMUASID32(n))
210
-#define IMUASID0(n) (0x0308 + ((n) * 16))
211
-#define IMUASID32(n) (0x0608 + (((n) - 32) * 16))
212
-#define IMUASID_ASID8_MASK (0xff << 8)
213
-#define IMUASID_ASID8_SHIFT 8
214
-#define IMUASID_ASID0_MASK (0xff << 0)
215
-#define IMUASID_ASID0_SHIFT 0
143
+#define IMUASID0(n) (0x0308 + ((n) * 16)) /* R-Car Gen2/3 */
144
+#define IMUASID32(n) (0x0608 + (((n) - 32) * 16)) /* R-Car Gen3 only */
216145
217146 /* -----------------------------------------------------------------------------
218147 * Root device handling
....@@ -259,29 +188,61 @@
259188 iowrite32(data, mmu->base + offset);
260189 }
261190
191
+static unsigned int ipmmu_ctx_reg(struct ipmmu_vmsa_device *mmu,
192
+ unsigned int context_id, unsigned int reg)
193
+{
194
+ return mmu->features->ctx_offset_base +
195
+ context_id * mmu->features->ctx_offset_stride + reg;
196
+}
197
+
198
+static u32 ipmmu_ctx_read(struct ipmmu_vmsa_device *mmu,
199
+ unsigned int context_id, unsigned int reg)
200
+{
201
+ return ipmmu_read(mmu, ipmmu_ctx_reg(mmu, context_id, reg));
202
+}
203
+
204
+static void ipmmu_ctx_write(struct ipmmu_vmsa_device *mmu,
205
+ unsigned int context_id, unsigned int reg, u32 data)
206
+{
207
+ ipmmu_write(mmu, ipmmu_ctx_reg(mmu, context_id, reg), data);
208
+}
209
+
262210 static u32 ipmmu_ctx_read_root(struct ipmmu_vmsa_domain *domain,
263211 unsigned int reg)
264212 {
265
- return ipmmu_read(domain->mmu->root,
266
- domain->context_id * IM_CTX_SIZE + reg);
213
+ return ipmmu_ctx_read(domain->mmu->root, domain->context_id, reg);
267214 }
268215
269216 static void ipmmu_ctx_write_root(struct ipmmu_vmsa_domain *domain,
270217 unsigned int reg, u32 data)
271218 {
272
- ipmmu_write(domain->mmu->root,
273
- domain->context_id * IM_CTX_SIZE + reg, data);
219
+ ipmmu_ctx_write(domain->mmu->root, domain->context_id, reg, data);
274220 }
275221
276222 static void ipmmu_ctx_write_all(struct ipmmu_vmsa_domain *domain,
277223 unsigned int reg, u32 data)
278224 {
279225 if (domain->mmu != domain->mmu->root)
280
- ipmmu_write(domain->mmu,
281
- domain->context_id * IM_CTX_SIZE + reg, data);
226
+ ipmmu_ctx_write(domain->mmu, domain->context_id, reg, data);
282227
283
- ipmmu_write(domain->mmu->root,
284
- domain->context_id * IM_CTX_SIZE + reg, data);
228
+ ipmmu_ctx_write(domain->mmu->root, domain->context_id, reg, data);
229
+}
230
+
231
+static u32 ipmmu_utlb_reg(struct ipmmu_vmsa_device *mmu, unsigned int reg)
232
+{
233
+ return mmu->features->utlb_offset_base + reg;
234
+}
235
+
236
+static void ipmmu_imuasid_write(struct ipmmu_vmsa_device *mmu,
237
+ unsigned int utlb, u32 data)
238
+{
239
+ ipmmu_write(mmu, ipmmu_utlb_reg(mmu, IMUASID(utlb)), data);
240
+}
241
+
242
+static void ipmmu_imuctr_write(struct ipmmu_vmsa_device *mmu,
243
+ unsigned int utlb, u32 data)
244
+{
245
+ ipmmu_write(mmu, ipmmu_utlb_reg(mmu, IMUCTR(utlb)), data);
285246 }
286247
287248 /* -----------------------------------------------------------------------------
....@@ -329,11 +290,11 @@
329290 */
330291
331292 /* TODO: What should we set the ASID to ? */
332
- ipmmu_write(mmu, IMUASID(utlb), 0);
293
+ ipmmu_imuasid_write(mmu, utlb, 0);
333294 /* TODO: Do we need to flush the microTLB ? */
334
- ipmmu_write(mmu, IMUCTR(utlb),
335
- IMUCTR_TTSEL_MMU(domain->context_id) | IMUCTR_FLUSH |
336
- IMUCTR_MMUEN);
295
+ ipmmu_imuctr_write(mmu, utlb, IMUCTR_TTSEL_MMU(domain->context_id) |
296
+ IMUCTR_FLUSH | IMUCTR_MMUEN);
297
+ mmu->utlb_ctx[utlb] = domain->context_id;
337298 }
338299
339300 /*
....@@ -344,7 +305,8 @@
344305 {
345306 struct ipmmu_vmsa_device *mmu = domain->mmu;
346307
347
- ipmmu_write(mmu, IMUCTR(utlb), 0);
308
+ ipmmu_imuctr_write(mmu, utlb, 0);
309
+ mmu->utlb_ctx[utlb] = IPMMU_CTX_INVALID;
348310 }
349311
350312 static void ipmmu_tlb_flush_all(void *cookie)
....@@ -354,16 +316,15 @@
354316 ipmmu_tlb_invalidate(domain);
355317 }
356318
357
-static void ipmmu_tlb_add_flush(unsigned long iova, size_t size,
358
- size_t granule, bool leaf, void *cookie)
319
+static void ipmmu_tlb_flush(unsigned long iova, size_t size,
320
+ size_t granule, void *cookie)
359321 {
360
- /* The hardware doesn't support selective TLB flush. */
322
+ ipmmu_tlb_flush_all(cookie);
361323 }
362324
363
-static const struct iommu_gather_ops ipmmu_gather_ops = {
325
+static const struct iommu_flush_ops ipmmu_flush_ops = {
364326 .tlb_flush_all = ipmmu_tlb_flush_all,
365
- .tlb_add_flush = ipmmu_tlb_add_flush,
366
- .tlb_sync = ipmmu_tlb_flush_all,
327
+ .tlb_flush_walk = ipmmu_tlb_flush,
367328 };
368329
369330 /* -----------------------------------------------------------------------------
....@@ -403,75 +364,35 @@
403364 spin_unlock_irqrestore(&mmu->lock, flags);
404365 }
405366
406
-static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
367
+static void ipmmu_domain_setup_context(struct ipmmu_vmsa_domain *domain)
407368 {
408369 u64 ttbr;
409370 u32 tmp;
410
- int ret;
411
-
412
- /*
413
- * Allocate the page table operations.
414
- *
415
- * VMSA states in section B3.6.3 "Control of Secure or Non-secure memory
416
- * access, Long-descriptor format" that the NStable bit being set in a
417
- * table descriptor will result in the NStable and NS bits of all child
418
- * entries being ignored and considered as being set. The IPMMU seems
419
- * not to comply with this, as it generates a secure access page fault
420
- * if any of the NStable and NS bits isn't set when running in
421
- * non-secure mode.
422
- */
423
- domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS;
424
- domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K;
425
- domain->cfg.ias = 32;
426
- domain->cfg.oas = 40;
427
- domain->cfg.tlb = &ipmmu_gather_ops;
428
- domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32);
429
- domain->io_domain.geometry.force_aperture = true;
430
- /*
431
- * TODO: Add support for coherent walk through CCI with DVM and remove
432
- * cache handling. For now, delegate it to the io-pgtable code.
433
- */
434
- domain->cfg.iommu_dev = domain->mmu->root->dev;
435
-
436
- /*
437
- * Find an unused context.
438
- */
439
- ret = ipmmu_domain_allocate_context(domain->mmu->root, domain);
440
- if (ret < 0)
441
- return ret;
442
-
443
- domain->context_id = ret;
444
-
445
- domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg,
446
- domain);
447
- if (!domain->iop) {
448
- ipmmu_domain_free_context(domain->mmu->root,
449
- domain->context_id);
450
- return -EINVAL;
451
- }
452371
453372 /* TTBR0 */
454
- ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr[0];
373
+ ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr;
455374 ipmmu_ctx_write_root(domain, IMTTLBR0, ttbr);
456375 ipmmu_ctx_write_root(domain, IMTTUBR0, ttbr >> 32);
457376
458377 /*
459378 * TTBCR
460
- * We use long descriptors with inner-shareable WBWA tables and allocate
461
- * the whole 32-bit VA space to TTBR0.
379
+ * We use long descriptors and allocate the whole 32-bit VA space to
380
+ * TTBR0.
462381 */
463382 if (domain->mmu->features->twobit_imttbcr_sl0)
464383 tmp = IMTTBCR_SL0_TWOBIT_LVL_1;
465384 else
466385 tmp = IMTTBCR_SL0_LVL_1;
467386
468
- ipmmu_ctx_write_root(domain, IMTTBCR, IMTTBCR_EAE |
469
- IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA |
470
- IMTTBCR_IRGN0_WB_WA | tmp);
387
+ if (domain->mmu->features->cache_snoop)
388
+ tmp |= IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA |
389
+ IMTTBCR_IRGN0_WB_WA;
390
+
391
+ ipmmu_ctx_write_root(domain, IMTTBCR, IMTTBCR_EAE | tmp);
471392
472393 /* MAIR0 */
473394 ipmmu_ctx_write_root(domain, IMMAIR0,
474
- domain->cfg.arm_lpae_s1_cfg.mair[0]);
395
+ domain->cfg.arm_lpae_s1_cfg.mair);
475396
476397 /* IMBUSCR */
477398 if (domain->mmu->features->setup_imbuscr)
....@@ -494,7 +415,55 @@
494415 */
495416 ipmmu_ctx_write_all(domain, IMCTR,
496417 IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN);
418
+}
497419
420
+static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
421
+{
422
+ int ret;
423
+
424
+ /*
425
+ * Allocate the page table operations.
426
+ *
427
+ * VMSA states in section B3.6.3 "Control of Secure or Non-secure memory
428
+ * access, Long-descriptor format" that the NStable bit being set in a
429
+ * table descriptor will result in the NStable and NS bits of all child
430
+ * entries being ignored and considered as being set. The IPMMU seems
431
+ * not to comply with this, as it generates a secure access page fault
432
+ * if any of the NStable and NS bits isn't set when running in
433
+ * non-secure mode.
434
+ */
435
+ domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS;
436
+ domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K;
437
+ domain->cfg.ias = 32;
438
+ domain->cfg.oas = 40;
439
+ domain->cfg.tlb = &ipmmu_flush_ops;
440
+ domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32);
441
+ domain->io_domain.geometry.force_aperture = true;
442
+ /*
443
+ * TODO: Add support for coherent walk through CCI with DVM and remove
444
+ * cache handling. For now, delegate it to the io-pgtable code.
445
+ */
446
+ domain->cfg.coherent_walk = false;
447
+ domain->cfg.iommu_dev = domain->mmu->root->dev;
448
+
449
+ /*
450
+ * Find an unused context.
451
+ */
452
+ ret = ipmmu_domain_allocate_context(domain->mmu->root, domain);
453
+ if (ret < 0)
454
+ return ret;
455
+
456
+ domain->context_id = ret;
457
+
458
+ domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg,
459
+ domain);
460
+ if (!domain->iop) {
461
+ ipmmu_domain_free_context(domain->mmu->root,
462
+ domain->context_id);
463
+ return -EINVAL;
464
+ }
465
+
466
+ ipmmu_domain_setup_context(domain);
498467 return 0;
499468 }
500469
....@@ -522,14 +491,16 @@
522491 {
523492 const u32 err_mask = IMSTR_MHIT | IMSTR_ABORT | IMSTR_PF | IMSTR_TF;
524493 struct ipmmu_vmsa_device *mmu = domain->mmu;
494
+ unsigned long iova;
525495 u32 status;
526
- u32 iova;
527496
528497 status = ipmmu_ctx_read_root(domain, IMSTR);
529498 if (!(status & err_mask))
530499 return IRQ_NONE;
531500
532
- iova = ipmmu_ctx_read_root(domain, IMEAR);
501
+ iova = ipmmu_ctx_read_root(domain, IMELAR);
502
+ if (IS_ENABLED(CONFIG_64BIT))
503
+ iova |= (u64)ipmmu_ctx_read_root(domain, IMEUAR) << 32;
533504
534505 /*
535506 * Clear the error status flags. Unlike traditional interrupt flag
....@@ -541,10 +512,10 @@
541512
542513 /* Log fatal errors. */
543514 if (status & IMSTR_MHIT)
544
- dev_err_ratelimited(mmu->dev, "Multiple TLB hits @0x%08x\n",
515
+ dev_err_ratelimited(mmu->dev, "Multiple TLB hits @0x%lx\n",
545516 iova);
546517 if (status & IMSTR_ABORT)
547
- dev_err_ratelimited(mmu->dev, "Page Table Walk Abort @0x%08x\n",
518
+ dev_err_ratelimited(mmu->dev, "Page Table Walk Abort @0x%lx\n",
548519 iova);
549520
550521 if (!(status & (IMSTR_PF | IMSTR_TF)))
....@@ -560,7 +531,7 @@
560531 return IRQ_HANDLED;
561532
562533 dev_err_ratelimited(mmu->dev,
563
- "Unhandled fault: status 0x%08x iova 0x%08x\n",
534
+ "Unhandled fault: status 0x%08x iova 0x%lx\n",
564535 status, iova);
565536
566537 return IRQ_HANDLED;
....@@ -645,7 +616,7 @@
645616 static int ipmmu_attach_device(struct iommu_domain *io_domain,
646617 struct device *dev)
647618 {
648
- struct iommu_fwspec *fwspec = dev->iommu_fwspec;
619
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
649620 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
650621 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
651622 unsigned int i;
....@@ -694,7 +665,7 @@
694665 static void ipmmu_detach_device(struct iommu_domain *io_domain,
695666 struct device *dev)
696667 {
697
- struct iommu_fwspec *fwspec = dev->iommu_fwspec;
668
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
698669 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
699670 unsigned int i;
700671
....@@ -707,30 +678,36 @@
707678 }
708679
709680 static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
710
- phys_addr_t paddr, size_t size, int prot)
681
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
711682 {
712683 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
713684
714685 if (!domain)
715686 return -ENODEV;
716687
717
- return domain->iop->map(domain->iop, iova, paddr, size, prot);
688
+ return domain->iop->map(domain->iop, iova, paddr, size, prot, gfp);
718689 }
719690
720691 static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
721
- size_t size)
692
+ size_t size, struct iommu_iotlb_gather *gather)
722693 {
723694 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
724695
725
- return domain->iop->unmap(domain->iop, iova, size);
696
+ return domain->iop->unmap(domain->iop, iova, size, gather);
726697 }
727698
728
-static void ipmmu_iotlb_sync(struct iommu_domain *io_domain)
699
+static void ipmmu_flush_iotlb_all(struct iommu_domain *io_domain)
729700 {
730701 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
731702
732703 if (domain->mmu)
733704 ipmmu_tlb_flush_all(domain);
705
+}
706
+
707
+static void ipmmu_iotlb_sync(struct iommu_domain *io_domain,
708
+ struct iommu_iotlb_gather *gather)
709
+{
710
+ ipmmu_flush_iotlb_all(io_domain);
734711 }
735712
736713 static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
....@@ -752,30 +729,70 @@
752729 if (!ipmmu_pdev)
753730 return -ENODEV;
754731
755
- dev->iommu_fwspec->iommu_priv = platform_get_drvdata(ipmmu_pdev);
732
+ dev_iommu_priv_set(dev, platform_get_drvdata(ipmmu_pdev));
733
+
756734 return 0;
757735 }
758736
759
-static bool ipmmu_slave_whitelist(struct device *dev)
760
-{
761
- /* By default, do not allow use of IPMMU */
762
- return false;
763
-}
764
-
765737 static const struct soc_device_attribute soc_rcar_gen3[] = {
738
+ { .soc_id = "r8a774a1", },
739
+ { .soc_id = "r8a774b1", },
740
+ { .soc_id = "r8a774c0", },
741
+ { .soc_id = "r8a774e1", },
766742 { .soc_id = "r8a7795", },
743
+ { .soc_id = "r8a77961", },
767744 { .soc_id = "r8a7796", },
768745 { .soc_id = "r8a77965", },
769746 { .soc_id = "r8a77970", },
747
+ { .soc_id = "r8a77990", },
770748 { .soc_id = "r8a77995", },
771749 { /* sentinel */ }
772750 };
773751
752
+static const struct soc_device_attribute soc_rcar_gen3_whitelist[] = {
753
+ { .soc_id = "r8a774b1", },
754
+ { .soc_id = "r8a774c0", },
755
+ { .soc_id = "r8a774e1", },
756
+ { .soc_id = "r8a7795", .revision = "ES3.*" },
757
+ { .soc_id = "r8a77961", },
758
+ { .soc_id = "r8a77965", },
759
+ { .soc_id = "r8a77990", },
760
+ { .soc_id = "r8a77995", },
761
+ { /* sentinel */ }
762
+};
763
+
764
+static const char * const rcar_gen3_slave_whitelist[] = {
765
+};
766
+
767
+static bool ipmmu_slave_whitelist(struct device *dev)
768
+{
769
+ unsigned int i;
770
+
771
+ /*
772
+ * For R-Car Gen3 use a white list to opt-in slave devices.
773
+ * For Other SoCs, this returns true anyway.
774
+ */
775
+ if (!soc_device_match(soc_rcar_gen3))
776
+ return true;
777
+
778
+ /* Check whether this R-Car Gen3 can use the IPMMU correctly or not */
779
+ if (!soc_device_match(soc_rcar_gen3_whitelist))
780
+ return false;
781
+
782
+ /* Check whether this slave device can work with the IPMMU */
783
+ for (i = 0; i < ARRAY_SIZE(rcar_gen3_slave_whitelist); i++) {
784
+ if (!strcmp(dev_name(dev), rcar_gen3_slave_whitelist[i]))
785
+ return true;
786
+ }
787
+
788
+ /* Otherwise, do not allow use of IPMMU */
789
+ return false;
790
+}
791
+
774792 static int ipmmu_of_xlate(struct device *dev,
775793 struct of_phandle_args *spec)
776794 {
777
- /* For R-Car Gen3 use a white list to opt-in slave devices */
778
- if (soc_device_match(soc_rcar_gen3) && !ipmmu_slave_whitelist(dev))
795
+ if (!ipmmu_slave_whitelist(dev))
779796 return -ENODEV;
780797
781798 iommu_fwspec_add_ids(dev, spec->args, 1);
....@@ -790,23 +807,7 @@
790807 static int ipmmu_init_arm_mapping(struct device *dev)
791808 {
792809 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
793
- struct iommu_group *group;
794810 int ret;
795
-
796
- /* Create a device group and add the device to it. */
797
- group = iommu_group_alloc();
798
- if (IS_ERR(group)) {
799
- dev_err(dev, "Failed to allocate IOMMU group\n");
800
- return PTR_ERR(group);
801
- }
802
-
803
- ret = iommu_group_add_device(group, dev);
804
- iommu_group_put(group);
805
-
806
- if (ret < 0) {
807
- dev_err(dev, "Failed to add device to IPMMU group\n");
808
- return ret;
809
- }
810811
811812 /*
812813 * Create the ARM mapping, used by the ARM DMA mapping core to allocate
....@@ -841,38 +842,39 @@
841842 return 0;
842843
843844 error:
844
- iommu_group_remove_device(dev);
845845 if (mmu->mapping)
846846 arm_iommu_release_mapping(mmu->mapping);
847847
848848 return ret;
849849 }
850850
851
-static int ipmmu_add_device(struct device *dev)
851
+static struct iommu_device *ipmmu_probe_device(struct device *dev)
852852 {
853
- struct iommu_group *group;
853
+ struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
854854
855855 /*
856856 * Only let through devices that have been verified in xlate()
857857 */
858
- if (!to_ipmmu(dev))
859
- return -ENODEV;
858
+ if (!mmu)
859
+ return ERR_PTR(-ENODEV);
860860
861
- if (IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA))
862
- return ipmmu_init_arm_mapping(dev);
863
-
864
- group = iommu_group_get_for_dev(dev);
865
- if (IS_ERR(group))
866
- return PTR_ERR(group);
867
-
868
- iommu_group_put(group);
869
- return 0;
861
+ return &mmu->iommu;
870862 }
871863
872
-static void ipmmu_remove_device(struct device *dev)
864
+static void ipmmu_probe_finalize(struct device *dev)
865
+{
866
+ int ret = 0;
867
+
868
+ if (IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA))
869
+ ret = ipmmu_init_arm_mapping(dev);
870
+
871
+ if (ret)
872
+ dev_err(dev, "Can't create IOMMU mapping - DMA-OPS will not work\n");
873
+}
874
+
875
+static void ipmmu_release_device(struct device *dev)
873876 {
874877 arm_iommu_detach_device(dev);
875
- iommu_group_remove_device(dev);
876878 }
877879
878880 static struct iommu_group *ipmmu_find_group(struct device *dev)
....@@ -897,12 +899,14 @@
897899 .detach_dev = ipmmu_detach_device,
898900 .map = ipmmu_map,
899901 .unmap = ipmmu_unmap,
900
- .flush_iotlb_all = ipmmu_iotlb_sync,
902
+ .flush_iotlb_all = ipmmu_flush_iotlb_all,
901903 .iotlb_sync = ipmmu_iotlb_sync,
902904 .iova_to_phys = ipmmu_iova_to_phys,
903
- .add_device = ipmmu_add_device,
904
- .remove_device = ipmmu_remove_device,
905
- .device_group = ipmmu_find_group,
905
+ .probe_device = ipmmu_probe_device,
906
+ .release_device = ipmmu_release_device,
907
+ .probe_finalize = ipmmu_probe_finalize,
908
+ .device_group = IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA)
909
+ ? generic_device_group : ipmmu_find_group,
906910 .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K,
907911 .of_xlate = ipmmu_of_xlate,
908912 };
....@@ -917,25 +921,35 @@
917921
918922 /* Disable all contexts. */
919923 for (i = 0; i < mmu->num_ctx; ++i)
920
- ipmmu_write(mmu, i * IM_CTX_SIZE + IMCTR, 0);
924
+ ipmmu_ctx_write(mmu, i, IMCTR, 0);
921925 }
922926
923927 static const struct ipmmu_features ipmmu_features_default = {
924928 .use_ns_alias_offset = true,
925929 .has_cache_leaf_nodes = false,
926930 .number_of_contexts = 1, /* software only tested with one context */
931
+ .num_utlbs = 32,
927932 .setup_imbuscr = true,
928933 .twobit_imttbcr_sl0 = false,
929934 .reserved_context = false,
935
+ .cache_snoop = true,
936
+ .ctx_offset_base = 0,
937
+ .ctx_offset_stride = 0x40,
938
+ .utlb_offset_base = 0,
930939 };
931940
932941 static const struct ipmmu_features ipmmu_features_rcar_gen3 = {
933942 .use_ns_alias_offset = false,
934943 .has_cache_leaf_nodes = true,
935944 .number_of_contexts = 8,
945
+ .num_utlbs = 48,
936946 .setup_imbuscr = false,
937947 .twobit_imttbcr_sl0 = true,
938948 .reserved_context = true,
949
+ .cache_snoop = false,
950
+ .ctx_offset_base = 0,
951
+ .ctx_offset_stride = 0x40,
952
+ .utlb_offset_base = 0,
939953 };
940954
941955 static const struct of_device_id ipmmu_of_ids[] = {
....@@ -943,10 +957,25 @@
943957 .compatible = "renesas,ipmmu-vmsa",
944958 .data = &ipmmu_features_default,
945959 }, {
960
+ .compatible = "renesas,ipmmu-r8a774a1",
961
+ .data = &ipmmu_features_rcar_gen3,
962
+ }, {
963
+ .compatible = "renesas,ipmmu-r8a774b1",
964
+ .data = &ipmmu_features_rcar_gen3,
965
+ }, {
966
+ .compatible = "renesas,ipmmu-r8a774c0",
967
+ .data = &ipmmu_features_rcar_gen3,
968
+ }, {
969
+ .compatible = "renesas,ipmmu-r8a774e1",
970
+ .data = &ipmmu_features_rcar_gen3,
971
+ }, {
946972 .compatible = "renesas,ipmmu-r8a7795",
947973 .data = &ipmmu_features_rcar_gen3,
948974 }, {
949975 .compatible = "renesas,ipmmu-r8a7796",
976
+ .data = &ipmmu_features_rcar_gen3,
977
+ }, {
978
+ .compatible = "renesas,ipmmu-r8a77961",
950979 .data = &ipmmu_features_rcar_gen3,
951980 }, {
952981 .compatible = "renesas,ipmmu-r8a77965",
....@@ -955,14 +984,15 @@
955984 .compatible = "renesas,ipmmu-r8a77970",
956985 .data = &ipmmu_features_rcar_gen3,
957986 }, {
987
+ .compatible = "renesas,ipmmu-r8a77990",
988
+ .data = &ipmmu_features_rcar_gen3,
989
+ }, {
958990 .compatible = "renesas,ipmmu-r8a77995",
959991 .data = &ipmmu_features_rcar_gen3,
960992 }, {
961993 /* Terminator */
962994 },
963995 };
964
-
965
-MODULE_DEVICE_TABLE(of, ipmmu_of_ids);
966996
967997 static int ipmmu_probe(struct platform_device *pdev)
968998 {
....@@ -978,11 +1008,13 @@
9781008 }
9791009
9801010 mmu->dev = &pdev->dev;
981
- mmu->num_utlbs = 48;
9821011 spin_lock_init(&mmu->lock);
9831012 bitmap_zero(mmu->ctx, IPMMU_CTX_MAX);
9841013 mmu->features = of_device_get_match_data(&pdev->dev);
985
- dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
1014
+ memset(mmu->utlb_ctx, IPMMU_CTX_INVALID, mmu->features->num_utlbs);
1015
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
1016
+ if (ret)
1017
+ return ret;
9861018
9871019 /* Map I/O memory and request IRQ. */
9881020 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
....@@ -1005,10 +1037,7 @@
10051037 if (mmu->features->use_ns_alias_offset)
10061038 mmu->base += IM_NS_ALIAS_OFFSET;
10071039
1008
- mmu->num_ctx = min_t(unsigned int, IPMMU_CTX_MAX,
1009
- mmu->features->number_of_contexts);
1010
-
1011
- irq = platform_get_irq(pdev, 0);
1040
+ mmu->num_ctx = min(IPMMU_CTX_MAX, mmu->features->number_of_contexts);
10121041
10131042 /*
10141043 * Determine if this IPMMU instance is a root device by checking for
....@@ -1028,10 +1057,9 @@
10281057
10291058 /* Root devices have mandatory IRQs */
10301059 if (ipmmu_is_root(mmu)) {
1031
- if (irq < 0) {
1032
- dev_err(&pdev->dev, "no IRQ found\n");
1060
+ irq = platform_get_irq(pdev, 0);
1061
+ if (irq < 0)
10331062 return irq;
1034
- }
10351063
10361064 ret = devm_request_irq(&pdev->dev, irq, ipmmu_irq, 0,
10371065 dev_name(&pdev->dev), mmu);
....@@ -1098,10 +1126,48 @@
10981126 return 0;
10991127 }
11001128
1129
+#ifdef CONFIG_PM_SLEEP
1130
+static int ipmmu_resume_noirq(struct device *dev)
1131
+{
1132
+ struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev);
1133
+ unsigned int i;
1134
+
1135
+ /* Reset root MMU and restore contexts */
1136
+ if (ipmmu_is_root(mmu)) {
1137
+ ipmmu_device_reset(mmu);
1138
+
1139
+ for (i = 0; i < mmu->num_ctx; i++) {
1140
+ if (!mmu->domains[i])
1141
+ continue;
1142
+
1143
+ ipmmu_domain_setup_context(mmu->domains[i]);
1144
+ }
1145
+ }
1146
+
1147
+ /* Re-enable active micro-TLBs */
1148
+ for (i = 0; i < mmu->features->num_utlbs; i++) {
1149
+ if (mmu->utlb_ctx[i] == IPMMU_CTX_INVALID)
1150
+ continue;
1151
+
1152
+ ipmmu_utlb_enable(mmu->root->domains[mmu->utlb_ctx[i]], i);
1153
+ }
1154
+
1155
+ return 0;
1156
+}
1157
+
1158
+static const struct dev_pm_ops ipmmu_pm = {
1159
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, ipmmu_resume_noirq)
1160
+};
1161
+#define DEV_PM_OPS &ipmmu_pm
1162
+#else
1163
+#define DEV_PM_OPS NULL
1164
+#endif /* CONFIG_PM_SLEEP */
1165
+
11011166 static struct platform_driver ipmmu_driver = {
11021167 .driver = {
11031168 .name = "ipmmu-vmsa",
11041169 .of_match_table = of_match_ptr(ipmmu_of_ids),
1170
+ .pm = DEV_PM_OPS,
11051171 },
11061172 .probe = ipmmu_probe,
11071173 .remove = ipmmu_remove,
....@@ -1134,15 +1200,4 @@
11341200 setup_done = true;
11351201 return 0;
11361202 }
1137
-
1138
-static void __exit ipmmu_exit(void)
1139
-{
1140
- return platform_driver_unregister(&ipmmu_driver);
1141
-}
1142
-
11431203 subsys_initcall(ipmmu_init);
1144
-module_exit(ipmmu_exit);
1145
-
1146
-MODULE_DESCRIPTION("IOMMU API for Renesas VMSA-compatible IPMMU");
1147
-MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
1148
-MODULE_LICENSE("GPL v2");