hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/drivers/iommu/mtk_iommu.c
....@@ -1,21 +1,14 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Copyright (c) 2015-2016 MediaTek Inc.
34 * Author: Yong Wu <yong.wu@mediatek.com>
4
- *
5
- * This program is free software; you can redistribute it and/or modify
6
- * it under the terms of the GNU General Public License version 2 as
7
- * published by the Free Software Foundation.
8
- *
9
- * This program is distributed in the hope that it will be useful,
10
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
11
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12
- * GNU General Public License for more details.
135 */
14
-#include <linux/bootmem.h>
6
+#include <linux/bitfield.h>
157 #include <linux/bug.h>
168 #include <linux/clk.h>
179 #include <linux/component.h>
1810 #include <linux/device.h>
11
+#include <linux/dma-direct.h>
1912 #include <linux/dma-iommu.h>
2013 #include <linux/err.h>
2114 #include <linux/interrupt.h>
....@@ -23,19 +16,24 @@
2316 #include <linux/iommu.h>
2417 #include <linux/iopoll.h>
2518 #include <linux/list.h>
19
+#include <linux/mfd/syscon.h>
2620 #include <linux/of_address.h>
2721 #include <linux/of_iommu.h>
2822 #include <linux/of_irq.h>
2923 #include <linux/of_platform.h>
3024 #include <linux/platform_device.h>
25
+#include <linux/pm_runtime.h>
26
+#include <linux/regmap.h>
3127 #include <linux/slab.h>
3228 #include <linux/spinlock.h>
29
+#include <linux/soc/mediatek/infracfg.h>
3330 #include <asm/barrier.h>
3431 #include <soc/mediatek/smi.h>
3532
3633 #include "mtk_iommu.h"
3734
3835 #define REG_MMU_PT_BASE_ADDR 0x000
36
+#define MMU_PT_ADDR_MASK GENMASK(31, 7)
3937
4038 #define REG_MMU_INVALIDATE 0x020
4139 #define F_ALL_INVLD 0x2
....@@ -44,20 +42,23 @@
4442 #define REG_MMU_INVLD_START_A 0x024
4543 #define REG_MMU_INVLD_END_A 0x028
4644
47
-#define REG_MMU_INV_SEL 0x038
45
+#define REG_MMU_INV_SEL_GEN2 0x02c
46
+#define REG_MMU_INV_SEL_GEN1 0x038
4847 #define F_INVLD_EN0 BIT(0)
4948 #define F_INVLD_EN1 BIT(1)
5049
51
-#define REG_MMU_STANDARD_AXI_MODE 0x048
50
+#define REG_MMU_MISC_CTRL 0x048
51
+#define F_MMU_IN_ORDER_WR_EN_MASK (BIT(1) | BIT(17))
52
+#define F_MMU_STANDARD_AXI_MODE_MASK (BIT(3) | BIT(19))
53
+
5254 #define REG_MMU_DCM_DIS 0x050
55
+#define REG_MMU_WR_LEN_CTRL 0x054
56
+#define F_MMU_WR_THROT_DIS_MASK (BIT(5) | BIT(21))
5357
5458 #define REG_MMU_CTRL_REG 0x110
59
+#define F_MMU_TF_PROT_TO_PROGRAM_ADDR (2 << 4)
5560 #define F_MMU_PREFETCH_RT_REPLACE_MOD BIT(4)
56
-#define F_MMU_TF_PROTECT_SEL_SHIFT(data) \
57
- ((data)->m4u_plat == M4U_MT2712 ? 4 : 5)
58
-/* It's named by F_MMU_TF_PROT_SEL in mt2712. */
59
-#define F_MMU_TF_PROTECT_SEL(prot, data) \
60
- (((prot) & 0x3) << F_MMU_TF_PROTECT_SEL_SHIFT(data))
61
+#define F_MMU_TF_PROT_TO_PROGRAM_ADDR_MT8173 (2 << 5)
6162
6263 #define REG_MMU_IVRP_PADDR 0x114
6364
....@@ -74,46 +75,70 @@
7475 #define F_INT_CLR_BIT BIT(12)
7576
7677 #define REG_MMU_INT_MAIN_CONTROL 0x124
77
-#define F_INT_TRANSLATION_FAULT BIT(0)
78
-#define F_INT_MAIN_MULTI_HIT_FAULT BIT(1)
79
-#define F_INT_INVALID_PA_FAULT BIT(2)
80
-#define F_INT_ENTRY_REPLACEMENT_FAULT BIT(3)
81
-#define F_INT_TLB_MISS_FAULT BIT(4)
82
-#define F_INT_MISS_TRANSACTION_FIFO_FAULT BIT(5)
83
-#define F_INT_PRETETCH_TRANSATION_FIFO_FAULT BIT(6)
78
+ /* mmu0 | mmu1 */
79
+#define F_INT_TRANSLATION_FAULT (BIT(0) | BIT(7))
80
+#define F_INT_MAIN_MULTI_HIT_FAULT (BIT(1) | BIT(8))
81
+#define F_INT_INVALID_PA_FAULT (BIT(2) | BIT(9))
82
+#define F_INT_ENTRY_REPLACEMENT_FAULT (BIT(3) | BIT(10))
83
+#define F_INT_TLB_MISS_FAULT (BIT(4) | BIT(11))
84
+#define F_INT_MISS_TRANSACTION_FIFO_FAULT (BIT(5) | BIT(12))
85
+#define F_INT_PRETETCH_TRANSATION_FIFO_FAULT (BIT(6) | BIT(13))
8486
8587 #define REG_MMU_CPE_DONE 0x12C
8688
8789 #define REG_MMU_FAULT_ST1 0x134
90
+#define F_REG_MMU0_FAULT_MASK GENMASK(6, 0)
91
+#define F_REG_MMU1_FAULT_MASK GENMASK(13, 7)
8892
89
-#define REG_MMU_FAULT_VA 0x13c
93
+#define REG_MMU0_FAULT_VA 0x13c
94
+#define F_MMU_INVAL_VA_31_12_MASK GENMASK(31, 12)
95
+#define F_MMU_INVAL_VA_34_32_MASK GENMASK(11, 9)
96
+#define F_MMU_INVAL_PA_34_32_MASK GENMASK(8, 6)
9097 #define F_MMU_FAULT_VA_WRITE_BIT BIT(1)
9198 #define F_MMU_FAULT_VA_LAYER_BIT BIT(0)
9299
93
-#define REG_MMU_INVLD_PA 0x140
94
-#define REG_MMU_INT_ID 0x150
95
-#define F_MMU0_INT_ID_LARB_ID(a) (((a) >> 7) & 0x7)
96
-#define F_MMU0_INT_ID_PORT_ID(a) (((a) >> 2) & 0x1f)
100
+#define REG_MMU0_INVLD_PA 0x140
101
+#define REG_MMU1_FAULT_VA 0x144
102
+#define REG_MMU1_INVLD_PA 0x148
103
+#define REG_MMU0_INT_ID 0x150
104
+#define REG_MMU1_INT_ID 0x154
105
+#define F_MMU_INT_ID_COMM_ID(a) (((a) >> 9) & 0x7)
106
+#define F_MMU_INT_ID_SUB_COMM_ID(a) (((a) >> 7) & 0x3)
107
+#define F_MMU_INT_ID_LARB_ID(a) (((a) >> 7) & 0x7)
108
+#define F_MMU_INT_ID_PORT_ID(a) (((a) >> 2) & 0x1f)
97109
98
-#define MTK_PROTECT_PA_ALIGN 128
110
+#define MTK_PROTECT_PA_ALIGN 256
99111
100
-/*
101
- * Get the local arbiter ID and the portid within the larb arbiter
102
- * from mtk_m4u_id which is defined by MTK_M4U_ID.
103
- */
104
-#define MTK_M4U_TO_LARB(id) (((id) >> 5) & 0xf)
105
-#define MTK_M4U_TO_PORT(id) ((id) & 0x1f)
112
+#define HAS_4GB_MODE BIT(0)
113
+/* HW will use the EMI clock if there isn't the "bclk". */
114
+#define HAS_BCLK BIT(1)
115
+#define HAS_VLD_PA_RNG BIT(2)
116
+#define RESET_AXI BIT(3)
117
+#define OUT_ORDER_WR_EN BIT(4)
118
+#define HAS_SUB_COMM BIT(5)
119
+#define WR_THROT_EN BIT(6)
120
+#define HAS_LEGACY_IVRP_PADDR BIT(7)
121
+#define IOVA_34_EN BIT(8)
122
+
123
+#define MTK_IOMMU_HAS_FLAG(pdata, _x) \
124
+ ((((pdata)->flags) & (_x)) == (_x))
106125
107126 struct mtk_iommu_domain {
108
- spinlock_t pgtlock; /* lock for page table */
109
-
110127 struct io_pgtable_cfg cfg;
111128 struct io_pgtable_ops *iop;
112129
130
+ struct mtk_iommu_data *data;
113131 struct iommu_domain domain;
114132 };
115133
116
-static struct iommu_ops mtk_iommu_ops;
134
+static const struct iommu_ops mtk_iommu_ops;
135
+
136
+static int mtk_iommu_hw_init(const struct mtk_iommu_data *data);
137
+
138
+#define MTK_IOMMU_TLB_ADDR(iova) ({ \
139
+ dma_addr_t _addr = iova; \
140
+ ((lower_32_bits(_addr) & GENMASK(31, 12)) | upper_32_bits(_addr));\
141
+})
117142
118143 /*
119144 * In M4U 4GB mode, the physical address is remapped as below:
....@@ -137,11 +162,30 @@
137162 * 'E', the CPU physical address keep as is.
138163 * Additionally, The iommu consumers always use the CPU phyiscal address.
139164 */
140
-#define MTK_IOMMU_4GB_MODE_REMAP_BASE 0x40000000
165
+#define MTK_IOMMU_4GB_MODE_REMAP_BASE 0x140000000UL
141166
142167 static LIST_HEAD(m4ulist); /* List all the M4U HWs */
143168
144169 #define for_each_m4u(data) list_for_each_entry(data, &m4ulist, list)
170
+
171
+struct mtk_iommu_iova_region {
172
+ dma_addr_t iova_base;
173
+ unsigned long long size;
174
+};
175
+
176
+static const struct mtk_iommu_iova_region single_domain[] = {
177
+ {.iova_base = 0, .size = SZ_4G},
178
+};
179
+
180
+static const struct mtk_iommu_iova_region mt8192_multi_dom[] = {
181
+ { .iova_base = 0x0, .size = SZ_4G}, /* disp: 0 ~ 4G */
182
+ #if IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)
183
+ { .iova_base = SZ_4G, .size = SZ_4G}, /* vdec: 4G ~ 8G */
184
+ { .iova_base = SZ_4G * 2, .size = SZ_4G}, /* CAM/MDP: 8G ~ 12G */
185
+ { .iova_base = 0x240000000ULL, .size = 0x4000000}, /* CCU0 */
186
+ { .iova_base = 0x244000000ULL, .size = 0x4000000}, /* CCU1 */
187
+ #endif
188
+};
145189
146190 /*
147191 * There may be 1 or 2 M4U HWs, But we always expect they are in the same domain
....@@ -165,90 +209,108 @@
165209 return container_of(dom, struct mtk_iommu_domain, domain);
166210 }
167211
168
-static void mtk_iommu_tlb_flush_all(void *cookie)
212
+static void mtk_iommu_tlb_flush_all(struct mtk_iommu_data *data)
169213 {
170
- struct mtk_iommu_data *data = cookie;
171
-
172214 for_each_m4u(data) {
215
+ if (pm_runtime_get_if_in_use(data->dev) <= 0)
216
+ continue;
217
+
173218 writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
174
- data->base + REG_MMU_INV_SEL);
219
+ data->base + data->plat_data->inv_sel_reg);
175220 writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE);
176221 wmb(); /* Make sure the tlb flush all done */
222
+
223
+ pm_runtime_put(data->dev);
177224 }
178225 }
179226
180
-static void mtk_iommu_tlb_add_flush_nosync(unsigned long iova, size_t size,
181
- size_t granule, bool leaf,
182
- void *cookie)
227
+static void mtk_iommu_tlb_flush_range_sync(unsigned long iova, size_t size,
228
+ size_t granule,
229
+ struct mtk_iommu_data *data)
183230 {
184
- struct mtk_iommu_data *data = cookie;
185
-
186
- for_each_m4u(data) {
187
- writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
188
- data->base + REG_MMU_INV_SEL);
189
-
190
- writel_relaxed(iova, data->base + REG_MMU_INVLD_START_A);
191
- writel_relaxed(iova + size - 1,
192
- data->base + REG_MMU_INVLD_END_A);
193
- writel_relaxed(F_MMU_INV_RANGE,
194
- data->base + REG_MMU_INVALIDATE);
195
- data->tlb_flush_active = true;
196
- }
197
-}
198
-
199
-static void mtk_iommu_tlb_sync(void *cookie)
200
-{
201
- struct mtk_iommu_data *data = cookie;
231
+ bool has_pm = !!data->dev->pm_domain;
232
+ unsigned long flags;
202233 int ret;
203234 u32 tmp;
204235
205236 for_each_m4u(data) {
206
- /* Avoid timing out if there's nothing to wait for */
207
- if (!data->tlb_flush_active)
208
- return;
237
+ if (has_pm) {
238
+ if (pm_runtime_get_if_in_use(data->dev) <= 0)
239
+ continue;
240
+ }
209241
242
+ spin_lock_irqsave(&data->tlb_lock, flags);
243
+ writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
244
+ data->base + data->plat_data->inv_sel_reg);
245
+
246
+ writel_relaxed(MTK_IOMMU_TLB_ADDR(iova),
247
+ data->base + REG_MMU_INVLD_START_A);
248
+ writel_relaxed(MTK_IOMMU_TLB_ADDR(iova + size - 1),
249
+ data->base + REG_MMU_INVLD_END_A);
250
+ writel_relaxed(F_MMU_INV_RANGE,
251
+ data->base + REG_MMU_INVALIDATE);
252
+
253
+ /* tlb sync */
210254 ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE,
211
- tmp, tmp != 0, 10, 100000);
255
+ tmp, tmp != 0, 10, 1000);
212256 if (ret) {
213257 dev_warn(data->dev,
214258 "Partial TLB flush timed out, falling back to full flush\n");
215
- mtk_iommu_tlb_flush_all(cookie);
259
+ mtk_iommu_tlb_flush_all(data);
216260 }
217261 /* Clear the CPE status */
218262 writel_relaxed(0, data->base + REG_MMU_CPE_DONE);
219
- data->tlb_flush_active = false;
263
+ spin_unlock_irqrestore(&data->tlb_lock, flags);
264
+
265
+ if (has_pm)
266
+ pm_runtime_put(data->dev);
220267 }
221268 }
222
-
223
-static const struct iommu_gather_ops mtk_iommu_gather_ops = {
224
- .tlb_flush_all = mtk_iommu_tlb_flush_all,
225
- .tlb_add_flush = mtk_iommu_tlb_add_flush_nosync,
226
- .tlb_sync = mtk_iommu_tlb_sync,
227
-};
228269
229270 static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
230271 {
231272 struct mtk_iommu_data *data = dev_id;
232273 struct mtk_iommu_domain *dom = data->m4u_dom;
233
- u32 int_state, regval, fault_iova, fault_pa;
234
- unsigned int fault_larb, fault_port;
274
+ unsigned int fault_larb, fault_port, sub_comm = 0;
275
+ u32 int_state, regval, va34_32, pa34_32;
276
+ u64 fault_iova, fault_pa;
235277 bool layer, write;
236278
237279 /* Read error info from registers */
238280 int_state = readl_relaxed(data->base + REG_MMU_FAULT_ST1);
239
- fault_iova = readl_relaxed(data->base + REG_MMU_FAULT_VA);
281
+ if (int_state & F_REG_MMU0_FAULT_MASK) {
282
+ regval = readl_relaxed(data->base + REG_MMU0_INT_ID);
283
+ fault_iova = readl_relaxed(data->base + REG_MMU0_FAULT_VA);
284
+ fault_pa = readl_relaxed(data->base + REG_MMU0_INVLD_PA);
285
+ } else {
286
+ regval = readl_relaxed(data->base + REG_MMU1_INT_ID);
287
+ fault_iova = readl_relaxed(data->base + REG_MMU1_FAULT_VA);
288
+ fault_pa = readl_relaxed(data->base + REG_MMU1_INVLD_PA);
289
+ }
240290 layer = fault_iova & F_MMU_FAULT_VA_LAYER_BIT;
241291 write = fault_iova & F_MMU_FAULT_VA_WRITE_BIT;
242
- fault_pa = readl_relaxed(data->base + REG_MMU_INVLD_PA);
243
- regval = readl_relaxed(data->base + REG_MMU_INT_ID);
244
- fault_larb = F_MMU0_INT_ID_LARB_ID(regval);
245
- fault_port = F_MMU0_INT_ID_PORT_ID(regval);
292
+ if (MTK_IOMMU_HAS_FLAG(data->plat_data, IOVA_34_EN)) {
293
+ va34_32 = FIELD_GET(F_MMU_INVAL_VA_34_32_MASK, fault_iova);
294
+ pa34_32 = FIELD_GET(F_MMU_INVAL_PA_34_32_MASK, fault_iova);
295
+ fault_iova = fault_iova & F_MMU_INVAL_VA_31_12_MASK;
296
+ fault_iova |= (u64)va34_32 << 32;
297
+ fault_pa |= (u64)pa34_32 << 32;
298
+ }
299
+
300
+ fault_port = F_MMU_INT_ID_PORT_ID(regval);
301
+ if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_SUB_COMM)) {
302
+ fault_larb = F_MMU_INT_ID_COMM_ID(regval);
303
+ sub_comm = F_MMU_INT_ID_SUB_COMM_ID(regval);
304
+ } else {
305
+ fault_larb = F_MMU_INT_ID_LARB_ID(regval);
306
+ }
307
+ fault_larb = data->plat_data->larbid_remap[fault_larb][sub_comm];
246308
247309 if (report_iommu_fault(&dom->domain, data->dev, fault_iova,
248310 write ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ)) {
249311 dev_err_ratelimited(
250312 data->dev,
251
- "fault type=0x%x iova=0x%x pa=0x%x larb=%d port=%d layer=%d %s\n",
313
+ "fault type=0x%x iova=0x%llx pa=0x%llx larb=%d port=%d layer=%d %s\n",
252314 int_state, fault_iova, fault_pa, fault_larb, fault_port,
253315 layer, write ? "write" : "read");
254316 }
....@@ -263,21 +325,57 @@
263325 return IRQ_HANDLED;
264326 }
265327
266
-static void mtk_iommu_config(struct mtk_iommu_data *data,
267
- struct device *dev, bool enable)
328
+static int mtk_iommu_get_domain_id(struct device *dev,
329
+ const struct mtk_iommu_plat_data *plat_data)
330
+{
331
+ const struct mtk_iommu_iova_region *rgn = plat_data->iova_region;
332
+ const struct bus_dma_region *dma_rgn = dev->dma_range_map;
333
+ int i, candidate = -1;
334
+ dma_addr_t dma_end;
335
+
336
+ if (!dma_rgn || plat_data->iova_region_nr == 1)
337
+ return 0;
338
+
339
+ dma_end = dma_rgn->dma_start + dma_rgn->size - 1;
340
+ for (i = 0; i < plat_data->iova_region_nr; i++, rgn++) {
341
+ /* Best fit. */
342
+ if (dma_rgn->dma_start == rgn->iova_base &&
343
+ dma_end == rgn->iova_base + rgn->size - 1)
344
+ return i;
345
+ /* ok if it is inside this region. */
346
+ if (dma_rgn->dma_start >= rgn->iova_base &&
347
+ dma_end < rgn->iova_base + rgn->size)
348
+ candidate = i;
349
+ }
350
+
351
+ if (candidate >= 0)
352
+ return candidate;
353
+ dev_err(dev, "Can NOT find the iommu domain id(%pad 0x%llx).\n",
354
+ &dma_rgn->dma_start, dma_rgn->size);
355
+ return -EINVAL;
356
+}
357
+
358
+static void mtk_iommu_config(struct mtk_iommu_data *data, struct device *dev,
359
+ bool enable, unsigned int domid)
268360 {
269361 struct mtk_smi_larb_iommu *larb_mmu;
270362 unsigned int larbid, portid;
271
- struct iommu_fwspec *fwspec = dev->iommu_fwspec;
363
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
364
+ const struct mtk_iommu_iova_region *region;
272365 int i;
273366
274367 for (i = 0; i < fwspec->num_ids; ++i) {
275368 larbid = MTK_M4U_TO_LARB(fwspec->ids[i]);
276369 portid = MTK_M4U_TO_PORT(fwspec->ids[i]);
277
- larb_mmu = &data->smi_imu.larb_imu[larbid];
278370
279
- dev_dbg(dev, "%s iommu port: %d\n",
280
- enable ? "enable" : "disable", portid);
371
+ larb_mmu = &data->larb_imu[larbid];
372
+
373
+ region = data->plat_data->iova_region + domid;
374
+ larb_mmu->bank[portid] = upper_32_bits(region->iova_base);
375
+
376
+ dev_dbg(dev, "%s iommu for larb(%s) port %d dom %d bank %d.\n",
377
+ enable ? "enable" : "disable", dev_name(larb_mmu->dev),
378
+ portid, domid, larb_mmu->bank[portid]);
281379
282380 if (enable)
283381 larb_mmu->mmu |= MTK_SMI_MMU_EN(portid);
....@@ -286,25 +384,33 @@
286384 }
287385 }
288386
289
-static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom)
387
+static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom,
388
+ struct mtk_iommu_data *data,
389
+ unsigned int domid)
290390 {
291
- struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
391
+ const struct mtk_iommu_iova_region *region;
292392
293
- spin_lock_init(&dom->pgtlock);
393
+ /* Use the exist domain as there is only one pgtable here. */
394
+ if (data->m4u_dom) {
395
+ dom->iop = data->m4u_dom->iop;
396
+ dom->cfg = data->m4u_dom->cfg;
397
+ dom->domain.pgsize_bitmap = data->m4u_dom->cfg.pgsize_bitmap;
398
+ goto update_iova_region;
399
+ }
294400
295401 dom->cfg = (struct io_pgtable_cfg) {
296402 .quirks = IO_PGTABLE_QUIRK_ARM_NS |
297403 IO_PGTABLE_QUIRK_NO_PERMS |
298
- IO_PGTABLE_QUIRK_TLBI_ON_MAP,
404
+ IO_PGTABLE_QUIRK_ARM_MTK_EXT,
299405 .pgsize_bitmap = mtk_iommu_ops.pgsize_bitmap,
300
- .ias = 32,
301
- .oas = 32,
302
- .tlb = &mtk_iommu_gather_ops,
406
+ .ias = MTK_IOMMU_HAS_FLAG(data->plat_data, IOVA_34_EN) ? 34 : 32,
303407 .iommu_dev = data->dev,
304408 };
305409
306
- if (data->enable_4GB)
307
- dom->cfg.quirks |= IO_PGTABLE_QUIRK_ARM_MTK_4GB;
410
+ if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_4GB_MODE))
411
+ dom->cfg.oas = data->enable_4GB ? 33 : 32;
412
+ else
413
+ dom->cfg.oas = 35;
308414
309415 dom->iop = alloc_io_pgtable_ops(ARM_V7S, &dom->cfg, data);
310416 if (!dom->iop) {
....@@ -314,6 +420,13 @@
314420
315421 /* Update our support page sizes bitmap */
316422 dom->domain.pgsize_bitmap = dom->cfg.pgsize_bitmap;
423
+
424
+update_iova_region:
425
+ /* Update the iova region for this domain */
426
+ region = data->plat_data->iova_region + domid;
427
+ dom->domain.geometry.aperture_start = region->iova_base;
428
+ dom->domain.geometry.aperture_end = region->iova_base + region->size - 1;
429
+ dom->domain.geometry.force_aperture = true;
317430 return 0;
318431 }
319432
....@@ -328,30 +441,16 @@
328441 if (!dom)
329442 return NULL;
330443
331
- if (iommu_get_dma_cookie(&dom->domain))
332
- goto free_dom;
333
-
334
- if (mtk_iommu_domain_finalise(dom))
335
- goto put_dma_cookie;
336
-
337
- dom->domain.geometry.aperture_start = 0;
338
- dom->domain.geometry.aperture_end = DMA_BIT_MASK(32);
339
- dom->domain.geometry.force_aperture = true;
444
+ if (iommu_get_dma_cookie(&dom->domain)) {
445
+ kfree(dom);
446
+ return NULL;
447
+ }
340448
341449 return &dom->domain;
342
-
343
-put_dma_cookie:
344
- iommu_put_dma_cookie(&dom->domain);
345
-free_dom:
346
- kfree(dom);
347
- return NULL;
348450 }
349451
350452 static void mtk_iommu_domain_free(struct iommu_domain *domain)
351453 {
352
- struct mtk_iommu_domain *dom = to_mtk_domain(domain);
353
-
354
- free_io_pgtable_ops(dom->iop);
355454 iommu_put_dma_cookie(domain);
356455 kfree(to_mtk_domain(domain));
357456 }
....@@ -359,135 +458,171 @@
359458 static int mtk_iommu_attach_device(struct iommu_domain *domain,
360459 struct device *dev)
361460 {
461
+ struct mtk_iommu_data *data = dev_iommu_priv_get(dev), *frstdata;
362462 struct mtk_iommu_domain *dom = to_mtk_domain(domain);
363
- struct mtk_iommu_data *data = dev->iommu_fwspec->iommu_priv;
463
+ struct device *m4udev = data->dev;
464
+ int ret, domid;
364465
365
- if (!data)
366
- return -ENODEV;
466
+ domid = mtk_iommu_get_domain_id(dev, data->plat_data);
467
+ if (domid < 0)
468
+ return domid;
367469
368
- /* Update the pgtable base address register of the M4U HW */
369
- if (!data->m4u_dom) {
370
- data->m4u_dom = dom;
371
- writel(dom->cfg.arm_v7s_cfg.ttbr[0],
372
- data->base + REG_MMU_PT_BASE_ADDR);
470
+ if (!dom->data) {
471
+ /* Data is in the frstdata in sharing pgtable case. */
472
+ frstdata = mtk_iommu_get_m4u_data();
473
+
474
+ if (mtk_iommu_domain_finalise(dom, frstdata, domid))
475
+ return -ENODEV;
476
+ dom->data = data;
373477 }
374478
375
- mtk_iommu_config(data, dev, true);
479
+ mutex_lock(&data->mutex);
480
+ if (!data->m4u_dom) { /* Initialize the M4U HW */
481
+ ret = pm_runtime_resume_and_get(m4udev);
482
+ if (ret < 0)
483
+ goto err_unlock;
484
+
485
+ ret = mtk_iommu_hw_init(data);
486
+ if (ret) {
487
+ pm_runtime_put(m4udev);
488
+ goto err_unlock;
489
+ }
490
+ data->m4u_dom = dom;
491
+ writel(dom->cfg.arm_v7s_cfg.ttbr & MMU_PT_ADDR_MASK,
492
+ data->base + REG_MMU_PT_BASE_ADDR);
493
+
494
+ pm_runtime_put(m4udev);
495
+ }
496
+ mutex_unlock(&data->mutex);
497
+
498
+ mtk_iommu_config(data, dev, true, domid);
376499 return 0;
500
+
501
+err_unlock:
502
+ mutex_unlock(&data->mutex);
503
+ return ret;
377504 }
378505
379506 static void mtk_iommu_detach_device(struct iommu_domain *domain,
380507 struct device *dev)
381508 {
382
- struct mtk_iommu_data *data = dev->iommu_fwspec->iommu_priv;
509
+ struct mtk_iommu_data *data = dev_iommu_priv_get(dev);
383510
384
- if (!data)
385
- return;
386
-
387
- mtk_iommu_config(data, dev, false);
511
+ mtk_iommu_config(data, dev, false, 0);
388512 }
389513
390514 static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
391
- phys_addr_t paddr, size_t size, int prot)
515
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
392516 {
393517 struct mtk_iommu_domain *dom = to_mtk_domain(domain);
394
- unsigned long flags;
395
- int ret;
396518
397
- spin_lock_irqsave(&dom->pgtlock, flags);
398
- ret = dom->iop->map(dom->iop, iova, paddr & DMA_BIT_MASK(32),
399
- size, prot);
400
- spin_unlock_irqrestore(&dom->pgtlock, flags);
519
+ /* The "4GB mode" M4U physically can not use the lower remap of Dram. */
520
+ if (dom->data->enable_4GB)
521
+ paddr |= BIT_ULL(32);
401522
402
- return ret;
523
+ /* Synchronize with the tlb_lock */
524
+ return dom->iop->map(dom->iop, iova, paddr, size, prot, gfp);
403525 }
404526
405527 static size_t mtk_iommu_unmap(struct iommu_domain *domain,
406
- unsigned long iova, size_t size)
528
+ unsigned long iova, size_t size,
529
+ struct iommu_iotlb_gather *gather)
407530 {
408531 struct mtk_iommu_domain *dom = to_mtk_domain(domain);
409
- unsigned long flags;
410
- size_t unmapsz;
532
+ unsigned long end = iova + size - 1;
411533
412
- spin_lock_irqsave(&dom->pgtlock, flags);
413
- unmapsz = dom->iop->unmap(dom->iop, iova, size);
414
- spin_unlock_irqrestore(&dom->pgtlock, flags);
415
-
416
- return unmapsz;
534
+ if (gather->start > iova)
535
+ gather->start = iova;
536
+ if (gather->end < end)
537
+ gather->end = end;
538
+ return dom->iop->unmap(dom->iop, iova, size, gather);
417539 }
418540
419
-static void mtk_iommu_iotlb_sync(struct iommu_domain *domain)
541
+static void mtk_iommu_flush_iotlb_all(struct iommu_domain *domain)
420542 {
421
- mtk_iommu_tlb_flush_all(mtk_iommu_get_m4u_data());
543
+ struct mtk_iommu_domain *dom = to_mtk_domain(domain);
544
+
545
+ mtk_iommu_tlb_flush_all(dom->data);
546
+}
547
+
548
+static void mtk_iommu_iotlb_sync(struct iommu_domain *domain,
549
+ struct iommu_iotlb_gather *gather)
550
+{
551
+ struct mtk_iommu_domain *dom = to_mtk_domain(domain);
552
+ size_t length = gather->end - gather->start + 1;
553
+
554
+ mtk_iommu_tlb_flush_range_sync(gather->start, length, gather->pgsize,
555
+ dom->data);
556
+}
557
+
558
+static void mtk_iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
559
+ size_t size)
560
+{
561
+ struct mtk_iommu_domain *dom = to_mtk_domain(domain);
562
+
563
+ mtk_iommu_tlb_flush_range_sync(iova, size, size, dom->data);
422564 }
423565
424566 static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
425567 dma_addr_t iova)
426568 {
427569 struct mtk_iommu_domain *dom = to_mtk_domain(domain);
428
- struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
429
- unsigned long flags;
430570 phys_addr_t pa;
431571
432
- spin_lock_irqsave(&dom->pgtlock, flags);
433572 pa = dom->iop->iova_to_phys(dom->iop, iova);
434
- spin_unlock_irqrestore(&dom->pgtlock, flags);
435
-
436
- if (data->enable_4GB && pa < MTK_IOMMU_4GB_MODE_REMAP_BASE)
437
- pa |= BIT_ULL(32);
573
+ if (dom->data->enable_4GB && pa >= MTK_IOMMU_4GB_MODE_REMAP_BASE)
574
+ pa &= ~BIT_ULL(32);
438575
439576 return pa;
440577 }
441578
442
-static int mtk_iommu_add_device(struct device *dev)
579
+static struct iommu_device *mtk_iommu_probe_device(struct device *dev)
443580 {
581
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
444582 struct mtk_iommu_data *data;
445
- struct iommu_group *group;
446583
447
- if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops)
448
- return -ENODEV; /* Not a iommu client device */
584
+ if (!fwspec || fwspec->ops != &mtk_iommu_ops)
585
+ return ERR_PTR(-ENODEV); /* Not a iommu client device */
449586
450
- data = dev->iommu_fwspec->iommu_priv;
451
- iommu_device_link(&data->iommu, dev);
587
+ data = dev_iommu_priv_get(dev);
452588
453
- group = iommu_group_get_for_dev(dev);
454
- if (IS_ERR(group))
455
- return PTR_ERR(group);
456
-
457
- iommu_group_put(group);
458
- return 0;
589
+ return &data->iommu;
459590 }
460591
461
-static void mtk_iommu_remove_device(struct device *dev)
592
+static void mtk_iommu_release_device(struct device *dev)
462593 {
463
- struct mtk_iommu_data *data;
594
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
464595
465
- if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops)
596
+ if (!fwspec || fwspec->ops != &mtk_iommu_ops)
466597 return;
467598
468
- data = dev->iommu_fwspec->iommu_priv;
469
- iommu_device_unlink(&data->iommu, dev);
470
-
471
- iommu_group_remove_device(dev);
472599 iommu_fwspec_free(dev);
473600 }
474601
475602 static struct iommu_group *mtk_iommu_device_group(struct device *dev)
476603 {
477604 struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
605
+ struct iommu_group *group;
606
+ int domid;
478607
479608 if (!data)
480609 return ERR_PTR(-ENODEV);
481610
482
- /* All the client devices are in the same m4u iommu-group */
483
- if (!data->m4u_group) {
484
- data->m4u_group = iommu_group_alloc();
485
- if (IS_ERR(data->m4u_group))
486
- dev_err(dev, "Failed to allocate M4U IOMMU group\n");
611
+ domid = mtk_iommu_get_domain_id(dev, data->plat_data);
612
+ if (domid < 0)
613
+ return ERR_PTR(domid);
614
+
615
+ mutex_lock(&data->mutex);
616
+ group = data->m4u_group[domid];
617
+ if (!group) {
618
+ group = iommu_group_alloc();
619
+ if (!IS_ERR(group))
620
+ data->m4u_group[domid] = group;
487621 } else {
488
- iommu_group_ref_get(data->m4u_group);
622
+ iommu_group_ref_get(group);
489623 }
490
- return data->m4u_group;
624
+ mutex_unlock(&data->mutex);
625
+ return group;
491626 }
492627
493628 static int mtk_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
....@@ -500,49 +635,78 @@
500635 return -EINVAL;
501636 }
502637
503
- if (!dev->iommu_fwspec->iommu_priv) {
638
+ if (!dev_iommu_priv_get(dev)) {
504639 /* Get the m4u device */
505640 m4updev = of_find_device_by_node(args->np);
506641 if (WARN_ON(!m4updev))
507642 return -EINVAL;
508643
509
- dev->iommu_fwspec->iommu_priv = platform_get_drvdata(m4updev);
644
+ dev_iommu_priv_set(dev, platform_get_drvdata(m4updev));
510645 }
511646
512647 return iommu_fwspec_add_ids(dev, args->args, 1);
513648 }
514649
515
-static struct iommu_ops mtk_iommu_ops = {
650
+static void mtk_iommu_get_resv_regions(struct device *dev,
651
+ struct list_head *head)
652
+{
653
+ struct mtk_iommu_data *data = dev_iommu_priv_get(dev);
654
+ unsigned int domid = mtk_iommu_get_domain_id(dev, data->plat_data), i;
655
+ const struct mtk_iommu_iova_region *resv, *curdom;
656
+ struct iommu_resv_region *region;
657
+ int prot = IOMMU_WRITE | IOMMU_READ;
658
+
659
+ if ((int)domid < 0)
660
+ return;
661
+ curdom = data->plat_data->iova_region + domid;
662
+ for (i = 0; i < data->plat_data->iova_region_nr; i++) {
663
+ resv = data->plat_data->iova_region + i;
664
+
665
+ /* Only reserve when the region is inside the current domain */
666
+ if (resv->iova_base <= curdom->iova_base ||
667
+ resv->iova_base + resv->size >= curdom->iova_base + curdom->size)
668
+ continue;
669
+
670
+ region = iommu_alloc_resv_region(resv->iova_base, resv->size,
671
+ prot, IOMMU_RESV_RESERVED);
672
+ if (!region)
673
+ return;
674
+
675
+ list_add_tail(&region->list, head);
676
+ }
677
+}
678
+
679
+static const struct iommu_ops mtk_iommu_ops = {
516680 .domain_alloc = mtk_iommu_domain_alloc,
517681 .domain_free = mtk_iommu_domain_free,
518682 .attach_dev = mtk_iommu_attach_device,
519683 .detach_dev = mtk_iommu_detach_device,
520684 .map = mtk_iommu_map,
521685 .unmap = mtk_iommu_unmap,
522
- .flush_iotlb_all = mtk_iommu_iotlb_sync,
686
+ .flush_iotlb_all = mtk_iommu_flush_iotlb_all,
523687 .iotlb_sync = mtk_iommu_iotlb_sync,
688
+ .iotlb_sync_map = mtk_iommu_sync_map,
524689 .iova_to_phys = mtk_iommu_iova_to_phys,
525
- .add_device = mtk_iommu_add_device,
526
- .remove_device = mtk_iommu_remove_device,
690
+ .probe_device = mtk_iommu_probe_device,
691
+ .release_device = mtk_iommu_release_device,
527692 .device_group = mtk_iommu_device_group,
528693 .of_xlate = mtk_iommu_of_xlate,
694
+ .get_resv_regions = mtk_iommu_get_resv_regions,
695
+ .put_resv_regions = generic_iommu_put_resv_regions,
529696 .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
530697 };
531698
532699 static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
533700 {
534701 u32 regval;
535
- int ret;
536702
537
- ret = clk_prepare_enable(data->bclk);
538
- if (ret) {
539
- dev_err(data->dev, "Failed to enable iommu bclk(%d)\n", ret);
540
- return ret;
703
+ if (data->plat_data->m4u_plat == M4U_MT8173) {
704
+ regval = F_MMU_PREFETCH_RT_REPLACE_MOD |
705
+ F_MMU_TF_PROT_TO_PROGRAM_ADDR_MT8173;
706
+ } else {
707
+ regval = readl_relaxed(data->base + REG_MMU_CTRL_REG);
708
+ regval |= F_MMU_TF_PROT_TO_PROGRAM_ADDR;
541709 }
542
-
543
- regval = F_MMU_TF_PROTECT_SEL(2, data);
544
- if (data->m4u_plat == M4U_MT8173)
545
- regval |= F_MMU_PREFETCH_RT_REPLACE_MOD;
546710 writel_relaxed(regval, data->base + REG_MMU_CTRL_REG);
547711
548712 regval = F_L2_MULIT_HIT_EN |
....@@ -562,14 +726,15 @@
562726 F_INT_PRETETCH_TRANSATION_FIFO_FAULT;
563727 writel_relaxed(regval, data->base + REG_MMU_INT_MAIN_CONTROL);
564728
565
- if (data->m4u_plat == M4U_MT8173)
729
+ if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_LEGACY_IVRP_PADDR))
566730 regval = (data->protect_base >> 1) | (data->enable_4GB << 31);
567731 else
568732 regval = lower_32_bits(data->protect_base) |
569733 upper_32_bits(data->protect_base);
570734 writel_relaxed(regval, data->base + REG_MMU_IVRP_PADDR);
571735
572
- if (data->enable_4GB && data->m4u_plat != M4U_MT8173) {
736
+ if (data->enable_4GB &&
737
+ MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_VLD_PA_RNG)) {
573738 /*
574739 * If 4GB mode is enabled, the validate PA range is from
575740 * 0x1_0000_0000 to 0x1_ffff_ffff. here record bit[32:30].
....@@ -578,15 +743,27 @@
578743 writel_relaxed(regval, data->base + REG_MMU_VLD_PA_RNG);
579744 }
580745 writel_relaxed(0, data->base + REG_MMU_DCM_DIS);
746
+ if (MTK_IOMMU_HAS_FLAG(data->plat_data, WR_THROT_EN)) {
747
+ /* write command throttling mode */
748
+ regval = readl_relaxed(data->base + REG_MMU_WR_LEN_CTRL);
749
+ regval &= ~F_MMU_WR_THROT_DIS_MASK;
750
+ writel_relaxed(regval, data->base + REG_MMU_WR_LEN_CTRL);
751
+ }
581752
582
- /* It's MISC control register whose default value is ok except mt8173.*/
583
- if (data->m4u_plat == M4U_MT8173)
584
- writel_relaxed(0, data->base + REG_MMU_STANDARD_AXI_MODE);
753
+ if (MTK_IOMMU_HAS_FLAG(data->plat_data, RESET_AXI)) {
754
+ /* The register is called STANDARD_AXI_MODE in this case */
755
+ regval = 0;
756
+ } else {
757
+ regval = readl_relaxed(data->base + REG_MMU_MISC_CTRL);
758
+ regval &= ~F_MMU_STANDARD_AXI_MODE_MASK;
759
+ if (MTK_IOMMU_HAS_FLAG(data->plat_data, OUT_ORDER_WR_EN))
760
+ regval &= ~F_MMU_IN_ORDER_WR_EN_MASK;
761
+ }
762
+ writel_relaxed(regval, data->base + REG_MMU_MISC_CTRL);
585763
586764 if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0,
587765 dev_name(data->dev), (void *)data)) {
588766 writel_relaxed(0, data->base + REG_MMU_PT_BASE_ADDR);
589
- clk_disable_unprepare(data->bclk);
590767 dev_err(data->dev, "Failed @ IRQ-%d Request\n", data->irq);
591768 return -ENODEV;
592769 }
....@@ -603,17 +780,23 @@
603780 {
604781 struct mtk_iommu_data *data;
605782 struct device *dev = &pdev->dev;
783
+ struct device_node *larbnode, *smicomm_node;
784
+ struct platform_device *plarbdev;
785
+ struct device_link *link;
606786 struct resource *res;
607787 resource_size_t ioaddr;
608788 struct component_match *match = NULL;
789
+ struct regmap *infracfg;
609790 void *protect;
610791 int i, larb_nr, ret;
792
+ u32 val;
793
+ char *p;
611794
612795 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
613796 if (!data)
614797 return -ENOMEM;
615798 data->dev = dev;
616
- data->m4u_plat = (enum mtk_iommu_plat)of_device_get_match_data(dev);
799
+ data->plat_data = of_device_get_match_data(dev);
617800
618801 /* Protect memory. HW will access here while translation fault.*/
619802 protect = devm_kzalloc(dev, MTK_PROTECT_PA_ALIGN * 2, GFP_KERNEL);
....@@ -621,8 +804,28 @@
621804 return -ENOMEM;
622805 data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN);
623806
624
- /* Whether the current dram is over 4GB */
625
- data->enable_4GB = !!(max_pfn > (BIT_ULL(32) >> PAGE_SHIFT));
807
+ if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_4GB_MODE)) {
808
+ switch (data->plat_data->m4u_plat) {
809
+ case M4U_MT2712:
810
+ p = "mediatek,mt2712-infracfg";
811
+ break;
812
+ case M4U_MT8173:
813
+ p = "mediatek,mt8173-infracfg";
814
+ break;
815
+ default:
816
+ p = NULL;
817
+ }
818
+
819
+ infracfg = syscon_regmap_lookup_by_compatible(p);
820
+
821
+ if (IS_ERR(infracfg))
822
+ return PTR_ERR(infracfg);
823
+
824
+ ret = regmap_read(infracfg, REG_INFRA_MISC, &val);
825
+ if (ret)
826
+ return ret;
827
+ data->enable_4GB = !!(val & F_DDR_4GB_SUPPORT_EN);
828
+ }
626829
627830 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
628831 data->base = devm_ioremap_resource(dev, res);
....@@ -634,65 +837,104 @@
634837 if (data->irq < 0)
635838 return data->irq;
636839
637
- data->bclk = devm_clk_get(dev, "bclk");
638
- if (IS_ERR(data->bclk))
639
- return PTR_ERR(data->bclk);
840
+ if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_BCLK)) {
841
+ data->bclk = devm_clk_get(dev, "bclk");
842
+ if (IS_ERR(data->bclk))
843
+ return PTR_ERR(data->bclk);
844
+ }
640845
641846 larb_nr = of_count_phandle_with_args(dev->of_node,
642847 "mediatek,larbs", NULL);
643848 if (larb_nr < 0)
644849 return larb_nr;
645
- data->smi_imu.larb_nr = larb_nr;
646850
647851 for (i = 0; i < larb_nr; i++) {
648
- struct device_node *larbnode;
649
- struct platform_device *plarbdev;
650852 u32 id;
651853
652854 larbnode = of_parse_phandle(dev->of_node, "mediatek,larbs", i);
653855 if (!larbnode)
654856 return -EINVAL;
655857
656
- if (!of_device_is_available(larbnode))
858
+ if (!of_device_is_available(larbnode)) {
859
+ of_node_put(larbnode);
657860 continue;
861
+ }
658862
659863 ret = of_property_read_u32(larbnode, "mediatek,larb-id", &id);
660864 if (ret)/* The id is consecutive if there is no this property */
661865 id = i;
662866
663867 plarbdev = of_find_device_by_node(larbnode);
664
- if (!plarbdev)
868
+ if (!plarbdev) {
869
+ of_node_put(larbnode);
665870 return -EPROBE_DEFER;
666
- data->smi_imu.larb_imu[id].dev = &plarbdev->dev;
871
+ }
872
+ data->larb_imu[id].dev = &plarbdev->dev;
667873
668874 component_match_add_release(dev, &match, release_of,
669875 compare_of, larbnode);
670876 }
671877
672
- platform_set_drvdata(pdev, data);
878
+ /* Get smi-common dev from the last larb. */
879
+ smicomm_node = of_parse_phandle(larbnode, "mediatek,smi", 0);
880
+ if (!smicomm_node)
881
+ return -EINVAL;
673882
674
- ret = mtk_iommu_hw_init(data);
675
- if (ret)
676
- return ret;
883
+ plarbdev = of_find_device_by_node(smicomm_node);
884
+ of_node_put(smicomm_node);
885
+ data->smicomm_dev = &plarbdev->dev;
886
+
887
+ pm_runtime_enable(dev);
888
+
889
+ link = device_link_add(data->smicomm_dev, dev,
890
+ DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME);
891
+ if (!link) {
892
+ dev_err(dev, "Unable to link %s.\n", dev_name(data->smicomm_dev));
893
+ ret = -EINVAL;
894
+ goto out_runtime_disable;
895
+ }
896
+
897
+ platform_set_drvdata(pdev, data);
898
+ mutex_init(&data->mutex);
677899
678900 ret = iommu_device_sysfs_add(&data->iommu, dev, NULL,
679901 "mtk-iommu.%pa", &ioaddr);
680902 if (ret)
681
- return ret;
903
+ goto out_link_remove;
682904
683905 iommu_device_set_ops(&data->iommu, &mtk_iommu_ops);
684906 iommu_device_set_fwnode(&data->iommu, &pdev->dev.of_node->fwnode);
685907
686908 ret = iommu_device_register(&data->iommu);
687909 if (ret)
688
- return ret;
910
+ goto out_sysfs_remove;
689911
912
+ spin_lock_init(&data->tlb_lock);
690913 list_add_tail(&data->list, &m4ulist);
691914
692
- if (!iommu_present(&platform_bus_type))
693
- bus_set_iommu(&platform_bus_type, &mtk_iommu_ops);
915
+ if (!iommu_present(&platform_bus_type)) {
916
+ ret = bus_set_iommu(&platform_bus_type, &mtk_iommu_ops);
917
+ if (ret)
918
+ goto out_list_del;
919
+ }
694920
695
- return component_master_add_with_match(dev, &mtk_iommu_com_ops, match);
921
+ ret = component_master_add_with_match(dev, &mtk_iommu_com_ops, match);
922
+ if (ret)
923
+ goto out_bus_set_null;
924
+ return ret;
925
+
926
+out_bus_set_null:
927
+ bus_set_iommu(&platform_bus_type, NULL);
928
+out_list_del:
929
+ list_del(&data->list);
930
+ iommu_device_unregister(&data->iommu);
931
+out_sysfs_remove:
932
+ iommu_device_sysfs_remove(&data->iommu);
933
+out_link_remove:
934
+ device_link_remove(data->smicomm_dev, dev);
935
+out_runtime_disable:
936
+ pm_runtime_disable(dev);
937
+ return ret;
696938 }
697939
698940 static int mtk_iommu_remove(struct platform_device *pdev)
....@@ -702,36 +944,38 @@
702944 iommu_device_sysfs_remove(&data->iommu);
703945 iommu_device_unregister(&data->iommu);
704946
705
- if (iommu_present(&platform_bus_type))
706
- bus_set_iommu(&platform_bus_type, NULL);
947
+ list_del(&data->list);
707948
708
- clk_disable_unprepare(data->bclk);
949
+ device_link_remove(data->smicomm_dev, &pdev->dev);
950
+ pm_runtime_disable(&pdev->dev);
709951 devm_free_irq(&pdev->dev, data->irq, data);
710952 component_master_del(&pdev->dev, &mtk_iommu_com_ops);
711953 return 0;
712954 }
713955
714
-static int __maybe_unused mtk_iommu_suspend(struct device *dev)
956
+static int __maybe_unused mtk_iommu_runtime_suspend(struct device *dev)
715957 {
716958 struct mtk_iommu_data *data = dev_get_drvdata(dev);
717959 struct mtk_iommu_suspend_reg *reg = &data->reg;
718960 void __iomem *base = data->base;
719961
720
- reg->standard_axi_mode = readl_relaxed(base +
721
- REG_MMU_STANDARD_AXI_MODE);
962
+ reg->wr_len_ctrl = readl_relaxed(base + REG_MMU_WR_LEN_CTRL);
963
+ reg->misc_ctrl = readl_relaxed(base + REG_MMU_MISC_CTRL);
722964 reg->dcm_dis = readl_relaxed(base + REG_MMU_DCM_DIS);
723965 reg->ctrl_reg = readl_relaxed(base + REG_MMU_CTRL_REG);
724966 reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL0);
725967 reg->int_main_control = readl_relaxed(base + REG_MMU_INT_MAIN_CONTROL);
726968 reg->ivrp_paddr = readl_relaxed(base + REG_MMU_IVRP_PADDR);
969
+ reg->vld_pa_rng = readl_relaxed(base + REG_MMU_VLD_PA_RNG);
727970 clk_disable_unprepare(data->bclk);
728971 return 0;
729972 }
730973
731
-static int __maybe_unused mtk_iommu_resume(struct device *dev)
974
+static int __maybe_unused mtk_iommu_runtime_resume(struct device *dev)
732975 {
733976 struct mtk_iommu_data *data = dev_get_drvdata(dev);
734977 struct mtk_iommu_suspend_reg *reg = &data->reg;
978
+ struct mtk_iommu_domain *m4u_dom = data->m4u_dom;
735979 void __iomem *base = data->base;
736980 int ret;
737981
....@@ -740,26 +984,96 @@
740984 dev_err(data->dev, "Failed to enable clk(%d) in resume\n", ret);
741985 return ret;
742986 }
743
- writel_relaxed(reg->standard_axi_mode,
744
- base + REG_MMU_STANDARD_AXI_MODE);
987
+
988
+ /*
989
+ * Uppon first resume, only enable the clk and return, since the values of the
990
+ * registers are not yet set.
991
+ */
992
+ if (!m4u_dom)
993
+ return 0;
994
+
995
+ writel_relaxed(reg->wr_len_ctrl, base + REG_MMU_WR_LEN_CTRL);
996
+ writel_relaxed(reg->misc_ctrl, base + REG_MMU_MISC_CTRL);
745997 writel_relaxed(reg->dcm_dis, base + REG_MMU_DCM_DIS);
746998 writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG);
747999 writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL0);
7481000 writel_relaxed(reg->int_main_control, base + REG_MMU_INT_MAIN_CONTROL);
7491001 writel_relaxed(reg->ivrp_paddr, base + REG_MMU_IVRP_PADDR);
750
- if (data->m4u_dom)
751
- writel(data->m4u_dom->cfg.arm_v7s_cfg.ttbr[0],
752
- base + REG_MMU_PT_BASE_ADDR);
1002
+ writel_relaxed(reg->vld_pa_rng, base + REG_MMU_VLD_PA_RNG);
1003
+ writel(m4u_dom->cfg.arm_v7s_cfg.ttbr & MMU_PT_ADDR_MASK, base + REG_MMU_PT_BASE_ADDR);
7531004 return 0;
7541005 }
7551006
7561007 static const struct dev_pm_ops mtk_iommu_pm_ops = {
757
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_iommu_suspend, mtk_iommu_resume)
1008
+ SET_RUNTIME_PM_OPS(mtk_iommu_runtime_suspend, mtk_iommu_runtime_resume, NULL)
1009
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1010
+ pm_runtime_force_resume)
1011
+};
1012
+
1013
+static const struct mtk_iommu_plat_data mt2712_data = {
1014
+ .m4u_plat = M4U_MT2712,
1015
+ .flags = HAS_4GB_MODE | HAS_BCLK | HAS_VLD_PA_RNG,
1016
+ .inv_sel_reg = REG_MMU_INV_SEL_GEN1,
1017
+ .iova_region = single_domain,
1018
+ .iova_region_nr = ARRAY_SIZE(single_domain),
1019
+ .larbid_remap = {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}},
1020
+};
1021
+
1022
+static const struct mtk_iommu_plat_data mt6779_data = {
1023
+ .m4u_plat = M4U_MT6779,
1024
+ .flags = HAS_SUB_COMM | OUT_ORDER_WR_EN | WR_THROT_EN,
1025
+ .inv_sel_reg = REG_MMU_INV_SEL_GEN2,
1026
+ .iova_region = single_domain,
1027
+ .iova_region_nr = ARRAY_SIZE(single_domain),
1028
+ .larbid_remap = {{0}, {1}, {2}, {3}, {5}, {7, 8}, {10}, {9}},
1029
+};
1030
+
1031
+static const struct mtk_iommu_plat_data mt8167_data = {
1032
+ .m4u_plat = M4U_MT8167,
1033
+ .flags = RESET_AXI | HAS_LEGACY_IVRP_PADDR,
1034
+ .inv_sel_reg = REG_MMU_INV_SEL_GEN1,
1035
+ .iova_region = single_domain,
1036
+ .iova_region_nr = ARRAY_SIZE(single_domain),
1037
+ .larbid_remap = {{0}, {1}, {2}}, /* Linear mapping. */
1038
+};
1039
+
1040
+static const struct mtk_iommu_plat_data mt8173_data = {
1041
+ .m4u_plat = M4U_MT8173,
1042
+ .flags = HAS_4GB_MODE | HAS_BCLK | RESET_AXI |
1043
+ HAS_LEGACY_IVRP_PADDR,
1044
+ .inv_sel_reg = REG_MMU_INV_SEL_GEN1,
1045
+ .iova_region = single_domain,
1046
+ .iova_region_nr = ARRAY_SIZE(single_domain),
1047
+ .larbid_remap = {{0}, {1}, {2}, {3}, {4}, {5}}, /* Linear mapping. */
1048
+};
1049
+
1050
+static const struct mtk_iommu_plat_data mt8183_data = {
1051
+ .m4u_plat = M4U_MT8183,
1052
+ .flags = RESET_AXI,
1053
+ .inv_sel_reg = REG_MMU_INV_SEL_GEN1,
1054
+ .iova_region = single_domain,
1055
+ .iova_region_nr = ARRAY_SIZE(single_domain),
1056
+ .larbid_remap = {{0}, {4}, {5}, {6}, {7}, {2}, {3}, {1}},
1057
+};
1058
+
1059
+static const struct mtk_iommu_plat_data mt8192_data = {
1060
+ .m4u_plat = M4U_MT8192,
1061
+ .flags = HAS_BCLK | HAS_SUB_COMM | OUT_ORDER_WR_EN |
1062
+ WR_THROT_EN | IOVA_34_EN,
1063
+ .inv_sel_reg = REG_MMU_INV_SEL_GEN2,
1064
+ .iova_region = mt8192_multi_dom,
1065
+ .iova_region_nr = ARRAY_SIZE(mt8192_multi_dom),
1066
+ .larbid_remap = {{0}, {1}, {4, 5}, {7}, {2}, {9, 11, 19, 20},
1067
+ {0, 14, 16}, {0, 13, 18, 17}},
7581068 };
7591069
7601070 static const struct of_device_id mtk_iommu_of_ids[] = {
761
- { .compatible = "mediatek,mt2712-m4u", .data = (void *)M4U_MT2712},
762
- { .compatible = "mediatek,mt8173-m4u", .data = (void *)M4U_MT8173},
1071
+ { .compatible = "mediatek,mt2712-m4u", .data = &mt2712_data},
1072
+ { .compatible = "mediatek,mt6779-m4u", .data = &mt6779_data},
1073
+ { .compatible = "mediatek,mt8167-m4u", .data = &mt8167_data},
1074
+ { .compatible = "mediatek,mt8173-m4u", .data = &mt8173_data},
1075
+ { .compatible = "mediatek,mt8183-m4u", .data = &mt8183_data},
1076
+ { .compatible = "mediatek,mt8192-m4u", .data = &mt8192_data},
7631077 {}
7641078 };
7651079
....@@ -768,7 +1082,7 @@
7681082 .remove = mtk_iommu_remove,
7691083 .driver = {
7701084 .name = "mtk-iommu",
771
- .of_match_table = of_match_ptr(mtk_iommu_of_ids),
1085
+ .of_match_table = mtk_iommu_of_ids,
7721086 .pm = &mtk_iommu_pm_ops,
7731087 }
7741088 };