| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * Copyright (c) 2015-2016 MediaTek Inc. |
|---|
| 3 | 4 | * Author: Yong Wu <yong.wu@mediatek.com> |
|---|
| 4 | | - * |
|---|
| 5 | | - * This program is free software; you can redistribute it and/or modify |
|---|
| 6 | | - * it under the terms of the GNU General Public License version 2 as |
|---|
| 7 | | - * published by the Free Software Foundation. |
|---|
| 8 | | - * |
|---|
| 9 | | - * This program is distributed in the hope that it will be useful, |
|---|
| 10 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
|---|
| 11 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|---|
| 12 | | - * GNU General Public License for more details. |
|---|
| 13 | 5 | */ |
|---|
| 14 | | -#include <linux/bootmem.h> |
|---|
| 6 | +#include <linux/bitfield.h> |
|---|
| 15 | 7 | #include <linux/bug.h> |
|---|
| 16 | 8 | #include <linux/clk.h> |
|---|
| 17 | 9 | #include <linux/component.h> |
|---|
| 18 | 10 | #include <linux/device.h> |
|---|
| 11 | +#include <linux/dma-direct.h> |
|---|
| 19 | 12 | #include <linux/dma-iommu.h> |
|---|
| 20 | 13 | #include <linux/err.h> |
|---|
| 21 | 14 | #include <linux/interrupt.h> |
|---|
| .. | .. |
|---|
| 23 | 16 | #include <linux/iommu.h> |
|---|
| 24 | 17 | #include <linux/iopoll.h> |
|---|
| 25 | 18 | #include <linux/list.h> |
|---|
| 19 | +#include <linux/mfd/syscon.h> |
|---|
| 26 | 20 | #include <linux/of_address.h> |
|---|
| 27 | 21 | #include <linux/of_iommu.h> |
|---|
| 28 | 22 | #include <linux/of_irq.h> |
|---|
| 29 | 23 | #include <linux/of_platform.h> |
|---|
| 30 | 24 | #include <linux/platform_device.h> |
|---|
| 25 | +#include <linux/pm_runtime.h> |
|---|
| 26 | +#include <linux/regmap.h> |
|---|
| 31 | 27 | #include <linux/slab.h> |
|---|
| 32 | 28 | #include <linux/spinlock.h> |
|---|
| 29 | +#include <linux/soc/mediatek/infracfg.h> |
|---|
| 33 | 30 | #include <asm/barrier.h> |
|---|
| 34 | 31 | #include <soc/mediatek/smi.h> |
|---|
| 35 | 32 | |
|---|
| 36 | 33 | #include "mtk_iommu.h" |
|---|
| 37 | 34 | |
|---|
| 38 | 35 | #define REG_MMU_PT_BASE_ADDR 0x000 |
|---|
| 36 | +#define MMU_PT_ADDR_MASK GENMASK(31, 7) |
|---|
| 39 | 37 | |
|---|
| 40 | 38 | #define REG_MMU_INVALIDATE 0x020 |
|---|
| 41 | 39 | #define F_ALL_INVLD 0x2 |
|---|
| .. | .. |
|---|
| 44 | 42 | #define REG_MMU_INVLD_START_A 0x024 |
|---|
| 45 | 43 | #define REG_MMU_INVLD_END_A 0x028 |
|---|
| 46 | 44 | |
|---|
| 47 | | -#define REG_MMU_INV_SEL 0x038 |
|---|
| 45 | +#define REG_MMU_INV_SEL_GEN2 0x02c |
|---|
| 46 | +#define REG_MMU_INV_SEL_GEN1 0x038 |
|---|
| 48 | 47 | #define F_INVLD_EN0 BIT(0) |
|---|
| 49 | 48 | #define F_INVLD_EN1 BIT(1) |
|---|
| 50 | 49 | |
|---|
| 51 | | -#define REG_MMU_STANDARD_AXI_MODE 0x048 |
|---|
| 50 | +#define REG_MMU_MISC_CTRL 0x048 |
|---|
| 51 | +#define F_MMU_IN_ORDER_WR_EN_MASK (BIT(1) | BIT(17)) |
|---|
| 52 | +#define F_MMU_STANDARD_AXI_MODE_MASK (BIT(3) | BIT(19)) |
|---|
| 53 | + |
|---|
| 52 | 54 | #define REG_MMU_DCM_DIS 0x050 |
|---|
| 55 | +#define REG_MMU_WR_LEN_CTRL 0x054 |
|---|
| 56 | +#define F_MMU_WR_THROT_DIS_MASK (BIT(5) | BIT(21)) |
|---|
| 53 | 57 | |
|---|
| 54 | 58 | #define REG_MMU_CTRL_REG 0x110 |
|---|
| 59 | +#define F_MMU_TF_PROT_TO_PROGRAM_ADDR (2 << 4) |
|---|
| 55 | 60 | #define F_MMU_PREFETCH_RT_REPLACE_MOD BIT(4) |
|---|
| 56 | | -#define F_MMU_TF_PROTECT_SEL_SHIFT(data) \ |
|---|
| 57 | | - ((data)->m4u_plat == M4U_MT2712 ? 4 : 5) |
|---|
| 58 | | -/* It's named by F_MMU_TF_PROT_SEL in mt2712. */ |
|---|
| 59 | | -#define F_MMU_TF_PROTECT_SEL(prot, data) \ |
|---|
| 60 | | - (((prot) & 0x3) << F_MMU_TF_PROTECT_SEL_SHIFT(data)) |
|---|
| 61 | +#define F_MMU_TF_PROT_TO_PROGRAM_ADDR_MT8173 (2 << 5) |
|---|
| 61 | 62 | |
|---|
| 62 | 63 | #define REG_MMU_IVRP_PADDR 0x114 |
|---|
| 63 | 64 | |
|---|
| .. | .. |
|---|
| 74 | 75 | #define F_INT_CLR_BIT BIT(12) |
|---|
| 75 | 76 | |
|---|
| 76 | 77 | #define REG_MMU_INT_MAIN_CONTROL 0x124 |
|---|
| 77 | | -#define F_INT_TRANSLATION_FAULT BIT(0) |
|---|
| 78 | | -#define F_INT_MAIN_MULTI_HIT_FAULT BIT(1) |
|---|
| 79 | | -#define F_INT_INVALID_PA_FAULT BIT(2) |
|---|
| 80 | | -#define F_INT_ENTRY_REPLACEMENT_FAULT BIT(3) |
|---|
| 81 | | -#define F_INT_TLB_MISS_FAULT BIT(4) |
|---|
| 82 | | -#define F_INT_MISS_TRANSACTION_FIFO_FAULT BIT(5) |
|---|
| 83 | | -#define F_INT_PRETETCH_TRANSATION_FIFO_FAULT BIT(6) |
|---|
| 78 | + /* mmu0 | mmu1 */ |
|---|
| 79 | +#define F_INT_TRANSLATION_FAULT (BIT(0) | BIT(7)) |
|---|
| 80 | +#define F_INT_MAIN_MULTI_HIT_FAULT (BIT(1) | BIT(8)) |
|---|
| 81 | +#define F_INT_INVALID_PA_FAULT (BIT(2) | BIT(9)) |
|---|
| 82 | +#define F_INT_ENTRY_REPLACEMENT_FAULT (BIT(3) | BIT(10)) |
|---|
| 83 | +#define F_INT_TLB_MISS_FAULT (BIT(4) | BIT(11)) |
|---|
| 84 | +#define F_INT_MISS_TRANSACTION_FIFO_FAULT (BIT(5) | BIT(12)) |
|---|
| 85 | +#define F_INT_PRETETCH_TRANSATION_FIFO_FAULT (BIT(6) | BIT(13)) |
|---|
| 84 | 86 | |
|---|
| 85 | 87 | #define REG_MMU_CPE_DONE 0x12C |
|---|
| 86 | 88 | |
|---|
| 87 | 89 | #define REG_MMU_FAULT_ST1 0x134 |
|---|
| 90 | +#define F_REG_MMU0_FAULT_MASK GENMASK(6, 0) |
|---|
| 91 | +#define F_REG_MMU1_FAULT_MASK GENMASK(13, 7) |
|---|
| 88 | 92 | |
|---|
| 89 | | -#define REG_MMU_FAULT_VA 0x13c |
|---|
| 93 | +#define REG_MMU0_FAULT_VA 0x13c |
|---|
| 94 | +#define F_MMU_INVAL_VA_31_12_MASK GENMASK(31, 12) |
|---|
| 95 | +#define F_MMU_INVAL_VA_34_32_MASK GENMASK(11, 9) |
|---|
| 96 | +#define F_MMU_INVAL_PA_34_32_MASK GENMASK(8, 6) |
|---|
| 90 | 97 | #define F_MMU_FAULT_VA_WRITE_BIT BIT(1) |
|---|
| 91 | 98 | #define F_MMU_FAULT_VA_LAYER_BIT BIT(0) |
|---|
| 92 | 99 | |
|---|
| 93 | | -#define REG_MMU_INVLD_PA 0x140 |
|---|
| 94 | | -#define REG_MMU_INT_ID 0x150 |
|---|
| 95 | | -#define F_MMU0_INT_ID_LARB_ID(a) (((a) >> 7) & 0x7) |
|---|
| 96 | | -#define F_MMU0_INT_ID_PORT_ID(a) (((a) >> 2) & 0x1f) |
|---|
| 100 | +#define REG_MMU0_INVLD_PA 0x140 |
|---|
| 101 | +#define REG_MMU1_FAULT_VA 0x144 |
|---|
| 102 | +#define REG_MMU1_INVLD_PA 0x148 |
|---|
| 103 | +#define REG_MMU0_INT_ID 0x150 |
|---|
| 104 | +#define REG_MMU1_INT_ID 0x154 |
|---|
| 105 | +#define F_MMU_INT_ID_COMM_ID(a) (((a) >> 9) & 0x7) |
|---|
| 106 | +#define F_MMU_INT_ID_SUB_COMM_ID(a) (((a) >> 7) & 0x3) |
|---|
| 107 | +#define F_MMU_INT_ID_LARB_ID(a) (((a) >> 7) & 0x7) |
|---|
| 108 | +#define F_MMU_INT_ID_PORT_ID(a) (((a) >> 2) & 0x1f) |
|---|
| 97 | 109 | |
|---|
| 98 | | -#define MTK_PROTECT_PA_ALIGN 128 |
|---|
| 110 | +#define MTK_PROTECT_PA_ALIGN 256 |
|---|
| 99 | 111 | |
|---|
| 100 | | -/* |
|---|
| 101 | | - * Get the local arbiter ID and the portid within the larb arbiter |
|---|
| 102 | | - * from mtk_m4u_id which is defined by MTK_M4U_ID. |
|---|
| 103 | | - */ |
|---|
| 104 | | -#define MTK_M4U_TO_LARB(id) (((id) >> 5) & 0xf) |
|---|
| 105 | | -#define MTK_M4U_TO_PORT(id) ((id) & 0x1f) |
|---|
| 112 | +#define HAS_4GB_MODE BIT(0) |
|---|
| 113 | +/* HW will use the EMI clock if there isn't the "bclk". */ |
|---|
| 114 | +#define HAS_BCLK BIT(1) |
|---|
| 115 | +#define HAS_VLD_PA_RNG BIT(2) |
|---|
| 116 | +#define RESET_AXI BIT(3) |
|---|
| 117 | +#define OUT_ORDER_WR_EN BIT(4) |
|---|
| 118 | +#define HAS_SUB_COMM BIT(5) |
|---|
| 119 | +#define WR_THROT_EN BIT(6) |
|---|
| 120 | +#define HAS_LEGACY_IVRP_PADDR BIT(7) |
|---|
| 121 | +#define IOVA_34_EN BIT(8) |
|---|
| 122 | + |
|---|
| 123 | +#define MTK_IOMMU_HAS_FLAG(pdata, _x) \ |
|---|
| 124 | + ((((pdata)->flags) & (_x)) == (_x)) |
|---|
| 106 | 125 | |
|---|
| 107 | 126 | struct mtk_iommu_domain { |
|---|
| 108 | | - spinlock_t pgtlock; /* lock for page table */ |
|---|
| 109 | | - |
|---|
| 110 | 127 | struct io_pgtable_cfg cfg; |
|---|
| 111 | 128 | struct io_pgtable_ops *iop; |
|---|
| 112 | 129 | |
|---|
| 130 | + struct mtk_iommu_data *data; |
|---|
| 113 | 131 | struct iommu_domain domain; |
|---|
| 114 | 132 | }; |
|---|
| 115 | 133 | |
|---|
| 116 | | -static struct iommu_ops mtk_iommu_ops; |
|---|
| 134 | +static const struct iommu_ops mtk_iommu_ops; |
|---|
| 135 | + |
|---|
| 136 | +static int mtk_iommu_hw_init(const struct mtk_iommu_data *data); |
|---|
| 137 | + |
|---|
| 138 | +#define MTK_IOMMU_TLB_ADDR(iova) ({ \ |
|---|
| 139 | + dma_addr_t _addr = iova; \ |
|---|
| 140 | + ((lower_32_bits(_addr) & GENMASK(31, 12)) | upper_32_bits(_addr));\ |
|---|
| 141 | +}) |
|---|
| 117 | 142 | |
|---|
| 118 | 143 | /* |
|---|
| 119 | 144 | * In M4U 4GB mode, the physical address is remapped as below: |
|---|
| .. | .. |
|---|
| 137 | 162 | * 'E', the CPU physical address keep as is. |
|---|
| 138 | 163 | * Additionally, The iommu consumers always use the CPU phyiscal address. |
|---|
| 139 | 164 | */ |
|---|
| 140 | | -#define MTK_IOMMU_4GB_MODE_REMAP_BASE 0x40000000 |
|---|
| 165 | +#define MTK_IOMMU_4GB_MODE_REMAP_BASE 0x140000000UL |
|---|
| 141 | 166 | |
|---|
| 142 | 167 | static LIST_HEAD(m4ulist); /* List all the M4U HWs */ |
|---|
| 143 | 168 | |
|---|
| 144 | 169 | #define for_each_m4u(data) list_for_each_entry(data, &m4ulist, list) |
|---|
| 170 | + |
|---|
| 171 | +struct mtk_iommu_iova_region { |
|---|
| 172 | + dma_addr_t iova_base; |
|---|
| 173 | + unsigned long long size; |
|---|
| 174 | +}; |
|---|
| 175 | + |
|---|
| 176 | +static const struct mtk_iommu_iova_region single_domain[] = { |
|---|
| 177 | + {.iova_base = 0, .size = SZ_4G}, |
|---|
| 178 | +}; |
|---|
| 179 | + |
|---|
| 180 | +static const struct mtk_iommu_iova_region mt8192_multi_dom[] = { |
|---|
| 181 | + { .iova_base = 0x0, .size = SZ_4G}, /* disp: 0 ~ 4G */ |
|---|
| 182 | + #if IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) |
|---|
| 183 | + { .iova_base = SZ_4G, .size = SZ_4G}, /* vdec: 4G ~ 8G */ |
|---|
| 184 | + { .iova_base = SZ_4G * 2, .size = SZ_4G}, /* CAM/MDP: 8G ~ 12G */ |
|---|
| 185 | + { .iova_base = 0x240000000ULL, .size = 0x4000000}, /* CCU0 */ |
|---|
| 186 | + { .iova_base = 0x244000000ULL, .size = 0x4000000}, /* CCU1 */ |
|---|
| 187 | + #endif |
|---|
| 188 | +}; |
|---|
| 145 | 189 | |
|---|
| 146 | 190 | /* |
|---|
| 147 | 191 | * There may be 1 or 2 M4U HWs, But we always expect they are in the same domain |
|---|
| .. | .. |
|---|
| 165 | 209 | return container_of(dom, struct mtk_iommu_domain, domain); |
|---|
| 166 | 210 | } |
|---|
| 167 | 211 | |
|---|
| 168 | | -static void mtk_iommu_tlb_flush_all(void *cookie) |
|---|
| 212 | +static void mtk_iommu_tlb_flush_all(struct mtk_iommu_data *data) |
|---|
| 169 | 213 | { |
|---|
| 170 | | - struct mtk_iommu_data *data = cookie; |
|---|
| 171 | | - |
|---|
| 172 | 214 | for_each_m4u(data) { |
|---|
| 215 | + if (pm_runtime_get_if_in_use(data->dev) <= 0) |
|---|
| 216 | + continue; |
|---|
| 217 | + |
|---|
| 173 | 218 | writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, |
|---|
| 174 | | - data->base + REG_MMU_INV_SEL); |
|---|
| 219 | + data->base + data->plat_data->inv_sel_reg); |
|---|
| 175 | 220 | writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE); |
|---|
| 176 | 221 | wmb(); /* Make sure the tlb flush all done */ |
|---|
| 222 | + |
|---|
| 223 | + pm_runtime_put(data->dev); |
|---|
| 177 | 224 | } |
|---|
| 178 | 225 | } |
|---|
| 179 | 226 | |
|---|
| 180 | | -static void mtk_iommu_tlb_add_flush_nosync(unsigned long iova, size_t size, |
|---|
| 181 | | - size_t granule, bool leaf, |
|---|
| 182 | | - void *cookie) |
|---|
| 227 | +static void mtk_iommu_tlb_flush_range_sync(unsigned long iova, size_t size, |
|---|
| 228 | + size_t granule, |
|---|
| 229 | + struct mtk_iommu_data *data) |
|---|
| 183 | 230 | { |
|---|
| 184 | | - struct mtk_iommu_data *data = cookie; |
|---|
| 185 | | - |
|---|
| 186 | | - for_each_m4u(data) { |
|---|
| 187 | | - writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, |
|---|
| 188 | | - data->base + REG_MMU_INV_SEL); |
|---|
| 189 | | - |
|---|
| 190 | | - writel_relaxed(iova, data->base + REG_MMU_INVLD_START_A); |
|---|
| 191 | | - writel_relaxed(iova + size - 1, |
|---|
| 192 | | - data->base + REG_MMU_INVLD_END_A); |
|---|
| 193 | | - writel_relaxed(F_MMU_INV_RANGE, |
|---|
| 194 | | - data->base + REG_MMU_INVALIDATE); |
|---|
| 195 | | - data->tlb_flush_active = true; |
|---|
| 196 | | - } |
|---|
| 197 | | -} |
|---|
| 198 | | - |
|---|
| 199 | | -static void mtk_iommu_tlb_sync(void *cookie) |
|---|
| 200 | | -{ |
|---|
| 201 | | - struct mtk_iommu_data *data = cookie; |
|---|
| 231 | + bool has_pm = !!data->dev->pm_domain; |
|---|
| 232 | + unsigned long flags; |
|---|
| 202 | 233 | int ret; |
|---|
| 203 | 234 | u32 tmp; |
|---|
| 204 | 235 | |
|---|
| 205 | 236 | for_each_m4u(data) { |
|---|
| 206 | | - /* Avoid timing out if there's nothing to wait for */ |
|---|
| 207 | | - if (!data->tlb_flush_active) |
|---|
| 208 | | - return; |
|---|
| 237 | + if (has_pm) { |
|---|
| 238 | + if (pm_runtime_get_if_in_use(data->dev) <= 0) |
|---|
| 239 | + continue; |
|---|
| 240 | + } |
|---|
| 209 | 241 | |
|---|
| 242 | + spin_lock_irqsave(&data->tlb_lock, flags); |
|---|
| 243 | + writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, |
|---|
| 244 | + data->base + data->plat_data->inv_sel_reg); |
|---|
| 245 | + |
|---|
| 246 | + writel_relaxed(MTK_IOMMU_TLB_ADDR(iova), |
|---|
| 247 | + data->base + REG_MMU_INVLD_START_A); |
|---|
| 248 | + writel_relaxed(MTK_IOMMU_TLB_ADDR(iova + size - 1), |
|---|
| 249 | + data->base + REG_MMU_INVLD_END_A); |
|---|
| 250 | + writel_relaxed(F_MMU_INV_RANGE, |
|---|
| 251 | + data->base + REG_MMU_INVALIDATE); |
|---|
| 252 | + |
|---|
| 253 | + /* tlb sync */ |
|---|
| 210 | 254 | ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE, |
|---|
| 211 | | - tmp, tmp != 0, 10, 100000); |
|---|
| 255 | + tmp, tmp != 0, 10, 1000); |
|---|
| 212 | 256 | if (ret) { |
|---|
| 213 | 257 | dev_warn(data->dev, |
|---|
| 214 | 258 | "Partial TLB flush timed out, falling back to full flush\n"); |
|---|
| 215 | | - mtk_iommu_tlb_flush_all(cookie); |
|---|
| 259 | + mtk_iommu_tlb_flush_all(data); |
|---|
| 216 | 260 | } |
|---|
| 217 | 261 | /* Clear the CPE status */ |
|---|
| 218 | 262 | writel_relaxed(0, data->base + REG_MMU_CPE_DONE); |
|---|
| 219 | | - data->tlb_flush_active = false; |
|---|
| 263 | + spin_unlock_irqrestore(&data->tlb_lock, flags); |
|---|
| 264 | + |
|---|
| 265 | + if (has_pm) |
|---|
| 266 | + pm_runtime_put(data->dev); |
|---|
| 220 | 267 | } |
|---|
| 221 | 268 | } |
|---|
| 222 | | - |
|---|
| 223 | | -static const struct iommu_gather_ops mtk_iommu_gather_ops = { |
|---|
| 224 | | - .tlb_flush_all = mtk_iommu_tlb_flush_all, |
|---|
| 225 | | - .tlb_add_flush = mtk_iommu_tlb_add_flush_nosync, |
|---|
| 226 | | - .tlb_sync = mtk_iommu_tlb_sync, |
|---|
| 227 | | -}; |
|---|
| 228 | 269 | |
|---|
| 229 | 270 | static irqreturn_t mtk_iommu_isr(int irq, void *dev_id) |
|---|
| 230 | 271 | { |
|---|
| 231 | 272 | struct mtk_iommu_data *data = dev_id; |
|---|
| 232 | 273 | struct mtk_iommu_domain *dom = data->m4u_dom; |
|---|
| 233 | | - u32 int_state, regval, fault_iova, fault_pa; |
|---|
| 234 | | - unsigned int fault_larb, fault_port; |
|---|
| 274 | + unsigned int fault_larb, fault_port, sub_comm = 0; |
|---|
| 275 | + u32 int_state, regval, va34_32, pa34_32; |
|---|
| 276 | + u64 fault_iova, fault_pa; |
|---|
| 235 | 277 | bool layer, write; |
|---|
| 236 | 278 | |
|---|
| 237 | 279 | /* Read error info from registers */ |
|---|
| 238 | 280 | int_state = readl_relaxed(data->base + REG_MMU_FAULT_ST1); |
|---|
| 239 | | - fault_iova = readl_relaxed(data->base + REG_MMU_FAULT_VA); |
|---|
| 281 | + if (int_state & F_REG_MMU0_FAULT_MASK) { |
|---|
| 282 | + regval = readl_relaxed(data->base + REG_MMU0_INT_ID); |
|---|
| 283 | + fault_iova = readl_relaxed(data->base + REG_MMU0_FAULT_VA); |
|---|
| 284 | + fault_pa = readl_relaxed(data->base + REG_MMU0_INVLD_PA); |
|---|
| 285 | + } else { |
|---|
| 286 | + regval = readl_relaxed(data->base + REG_MMU1_INT_ID); |
|---|
| 287 | + fault_iova = readl_relaxed(data->base + REG_MMU1_FAULT_VA); |
|---|
| 288 | + fault_pa = readl_relaxed(data->base + REG_MMU1_INVLD_PA); |
|---|
| 289 | + } |
|---|
| 240 | 290 | layer = fault_iova & F_MMU_FAULT_VA_LAYER_BIT; |
|---|
| 241 | 291 | write = fault_iova & F_MMU_FAULT_VA_WRITE_BIT; |
|---|
| 242 | | - fault_pa = readl_relaxed(data->base + REG_MMU_INVLD_PA); |
|---|
| 243 | | - regval = readl_relaxed(data->base + REG_MMU_INT_ID); |
|---|
| 244 | | - fault_larb = F_MMU0_INT_ID_LARB_ID(regval); |
|---|
| 245 | | - fault_port = F_MMU0_INT_ID_PORT_ID(regval); |
|---|
| 292 | + if (MTK_IOMMU_HAS_FLAG(data->plat_data, IOVA_34_EN)) { |
|---|
| 293 | + va34_32 = FIELD_GET(F_MMU_INVAL_VA_34_32_MASK, fault_iova); |
|---|
| 294 | + pa34_32 = FIELD_GET(F_MMU_INVAL_PA_34_32_MASK, fault_iova); |
|---|
| 295 | + fault_iova = fault_iova & F_MMU_INVAL_VA_31_12_MASK; |
|---|
| 296 | + fault_iova |= (u64)va34_32 << 32; |
|---|
| 297 | + fault_pa |= (u64)pa34_32 << 32; |
|---|
| 298 | + } |
|---|
| 299 | + |
|---|
| 300 | + fault_port = F_MMU_INT_ID_PORT_ID(regval); |
|---|
| 301 | + if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_SUB_COMM)) { |
|---|
| 302 | + fault_larb = F_MMU_INT_ID_COMM_ID(regval); |
|---|
| 303 | + sub_comm = F_MMU_INT_ID_SUB_COMM_ID(regval); |
|---|
| 304 | + } else { |
|---|
| 305 | + fault_larb = F_MMU_INT_ID_LARB_ID(regval); |
|---|
| 306 | + } |
|---|
| 307 | + fault_larb = data->plat_data->larbid_remap[fault_larb][sub_comm]; |
|---|
| 246 | 308 | |
|---|
| 247 | 309 | if (report_iommu_fault(&dom->domain, data->dev, fault_iova, |
|---|
| 248 | 310 | write ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ)) { |
|---|
| 249 | 311 | dev_err_ratelimited( |
|---|
| 250 | 312 | data->dev, |
|---|
| 251 | | - "fault type=0x%x iova=0x%x pa=0x%x larb=%d port=%d layer=%d %s\n", |
|---|
| 313 | + "fault type=0x%x iova=0x%llx pa=0x%llx larb=%d port=%d layer=%d %s\n", |
|---|
| 252 | 314 | int_state, fault_iova, fault_pa, fault_larb, fault_port, |
|---|
| 253 | 315 | layer, write ? "write" : "read"); |
|---|
| 254 | 316 | } |
|---|
| .. | .. |
|---|
| 263 | 325 | return IRQ_HANDLED; |
|---|
| 264 | 326 | } |
|---|
| 265 | 327 | |
|---|
| 266 | | -static void mtk_iommu_config(struct mtk_iommu_data *data, |
|---|
| 267 | | - struct device *dev, bool enable) |
|---|
| 328 | +static int mtk_iommu_get_domain_id(struct device *dev, |
|---|
| 329 | + const struct mtk_iommu_plat_data *plat_data) |
|---|
| 330 | +{ |
|---|
| 331 | + const struct mtk_iommu_iova_region *rgn = plat_data->iova_region; |
|---|
| 332 | + const struct bus_dma_region *dma_rgn = dev->dma_range_map; |
|---|
| 333 | + int i, candidate = -1; |
|---|
| 334 | + dma_addr_t dma_end; |
|---|
| 335 | + |
|---|
| 336 | + if (!dma_rgn || plat_data->iova_region_nr == 1) |
|---|
| 337 | + return 0; |
|---|
| 338 | + |
|---|
| 339 | + dma_end = dma_rgn->dma_start + dma_rgn->size - 1; |
|---|
| 340 | + for (i = 0; i < plat_data->iova_region_nr; i++, rgn++) { |
|---|
| 341 | + /* Best fit. */ |
|---|
| 342 | + if (dma_rgn->dma_start == rgn->iova_base && |
|---|
| 343 | + dma_end == rgn->iova_base + rgn->size - 1) |
|---|
| 344 | + return i; |
|---|
| 345 | + /* ok if it is inside this region. */ |
|---|
| 346 | + if (dma_rgn->dma_start >= rgn->iova_base && |
|---|
| 347 | + dma_end < rgn->iova_base + rgn->size) |
|---|
| 348 | + candidate = i; |
|---|
| 349 | + } |
|---|
| 350 | + |
|---|
| 351 | + if (candidate >= 0) |
|---|
| 352 | + return candidate; |
|---|
| 353 | + dev_err(dev, "Can NOT find the iommu domain id(%pad 0x%llx).\n", |
|---|
| 354 | + &dma_rgn->dma_start, dma_rgn->size); |
|---|
| 355 | + return -EINVAL; |
|---|
| 356 | +} |
|---|
| 357 | + |
|---|
| 358 | +static void mtk_iommu_config(struct mtk_iommu_data *data, struct device *dev, |
|---|
| 359 | + bool enable, unsigned int domid) |
|---|
| 268 | 360 | { |
|---|
| 269 | 361 | struct mtk_smi_larb_iommu *larb_mmu; |
|---|
| 270 | 362 | unsigned int larbid, portid; |
|---|
| 271 | | - struct iommu_fwspec *fwspec = dev->iommu_fwspec; |
|---|
| 363 | + struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); |
|---|
| 364 | + const struct mtk_iommu_iova_region *region; |
|---|
| 272 | 365 | int i; |
|---|
| 273 | 366 | |
|---|
| 274 | 367 | for (i = 0; i < fwspec->num_ids; ++i) { |
|---|
| 275 | 368 | larbid = MTK_M4U_TO_LARB(fwspec->ids[i]); |
|---|
| 276 | 369 | portid = MTK_M4U_TO_PORT(fwspec->ids[i]); |
|---|
| 277 | | - larb_mmu = &data->smi_imu.larb_imu[larbid]; |
|---|
| 278 | 370 | |
|---|
| 279 | | - dev_dbg(dev, "%s iommu port: %d\n", |
|---|
| 280 | | - enable ? "enable" : "disable", portid); |
|---|
| 371 | + larb_mmu = &data->larb_imu[larbid]; |
|---|
| 372 | + |
|---|
| 373 | + region = data->plat_data->iova_region + domid; |
|---|
| 374 | + larb_mmu->bank[portid] = upper_32_bits(region->iova_base); |
|---|
| 375 | + |
|---|
| 376 | + dev_dbg(dev, "%s iommu for larb(%s) port %d dom %d bank %d.\n", |
|---|
| 377 | + enable ? "enable" : "disable", dev_name(larb_mmu->dev), |
|---|
| 378 | + portid, domid, larb_mmu->bank[portid]); |
|---|
| 281 | 379 | |
|---|
| 282 | 380 | if (enable) |
|---|
| 283 | 381 | larb_mmu->mmu |= MTK_SMI_MMU_EN(portid); |
|---|
| .. | .. |
|---|
| 286 | 384 | } |
|---|
| 287 | 385 | } |
|---|
| 288 | 386 | |
|---|
| 289 | | -static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom) |
|---|
| 387 | +static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom, |
|---|
| 388 | + struct mtk_iommu_data *data, |
|---|
| 389 | + unsigned int domid) |
|---|
| 290 | 390 | { |
|---|
| 291 | | - struct mtk_iommu_data *data = mtk_iommu_get_m4u_data(); |
|---|
| 391 | + const struct mtk_iommu_iova_region *region; |
|---|
| 292 | 392 | |
|---|
| 293 | | - spin_lock_init(&dom->pgtlock); |
|---|
| 393 | + /* Use the exist domain as there is only one pgtable here. */ |
|---|
| 394 | + if (data->m4u_dom) { |
|---|
| 395 | + dom->iop = data->m4u_dom->iop; |
|---|
| 396 | + dom->cfg = data->m4u_dom->cfg; |
|---|
| 397 | + dom->domain.pgsize_bitmap = data->m4u_dom->cfg.pgsize_bitmap; |
|---|
| 398 | + goto update_iova_region; |
|---|
| 399 | + } |
|---|
| 294 | 400 | |
|---|
| 295 | 401 | dom->cfg = (struct io_pgtable_cfg) { |
|---|
| 296 | 402 | .quirks = IO_PGTABLE_QUIRK_ARM_NS | |
|---|
| 297 | 403 | IO_PGTABLE_QUIRK_NO_PERMS | |
|---|
| 298 | | - IO_PGTABLE_QUIRK_TLBI_ON_MAP, |
|---|
| 404 | + IO_PGTABLE_QUIRK_ARM_MTK_EXT, |
|---|
| 299 | 405 | .pgsize_bitmap = mtk_iommu_ops.pgsize_bitmap, |
|---|
| 300 | | - .ias = 32, |
|---|
| 301 | | - .oas = 32, |
|---|
| 302 | | - .tlb = &mtk_iommu_gather_ops, |
|---|
| 406 | + .ias = MTK_IOMMU_HAS_FLAG(data->plat_data, IOVA_34_EN) ? 34 : 32, |
|---|
| 303 | 407 | .iommu_dev = data->dev, |
|---|
| 304 | 408 | }; |
|---|
| 305 | 409 | |
|---|
| 306 | | - if (data->enable_4GB) |
|---|
| 307 | | - dom->cfg.quirks |= IO_PGTABLE_QUIRK_ARM_MTK_4GB; |
|---|
| 410 | + if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_4GB_MODE)) |
|---|
| 411 | + dom->cfg.oas = data->enable_4GB ? 33 : 32; |
|---|
| 412 | + else |
|---|
| 413 | + dom->cfg.oas = 35; |
|---|
| 308 | 414 | |
|---|
| 309 | 415 | dom->iop = alloc_io_pgtable_ops(ARM_V7S, &dom->cfg, data); |
|---|
| 310 | 416 | if (!dom->iop) { |
|---|
| .. | .. |
|---|
| 314 | 420 | |
|---|
| 315 | 421 | /* Update our support page sizes bitmap */ |
|---|
| 316 | 422 | dom->domain.pgsize_bitmap = dom->cfg.pgsize_bitmap; |
|---|
| 423 | + |
|---|
| 424 | +update_iova_region: |
|---|
| 425 | + /* Update the iova region for this domain */ |
|---|
| 426 | + region = data->plat_data->iova_region + domid; |
|---|
| 427 | + dom->domain.geometry.aperture_start = region->iova_base; |
|---|
| 428 | + dom->domain.geometry.aperture_end = region->iova_base + region->size - 1; |
|---|
| 429 | + dom->domain.geometry.force_aperture = true; |
|---|
| 317 | 430 | return 0; |
|---|
| 318 | 431 | } |
|---|
| 319 | 432 | |
|---|
| .. | .. |
|---|
| 328 | 441 | if (!dom) |
|---|
| 329 | 442 | return NULL; |
|---|
| 330 | 443 | |
|---|
| 331 | | - if (iommu_get_dma_cookie(&dom->domain)) |
|---|
| 332 | | - goto free_dom; |
|---|
| 333 | | - |
|---|
| 334 | | - if (mtk_iommu_domain_finalise(dom)) |
|---|
| 335 | | - goto put_dma_cookie; |
|---|
| 336 | | - |
|---|
| 337 | | - dom->domain.geometry.aperture_start = 0; |
|---|
| 338 | | - dom->domain.geometry.aperture_end = DMA_BIT_MASK(32); |
|---|
| 339 | | - dom->domain.geometry.force_aperture = true; |
|---|
| 444 | + if (iommu_get_dma_cookie(&dom->domain)) { |
|---|
| 445 | + kfree(dom); |
|---|
| 446 | + return NULL; |
|---|
| 447 | + } |
|---|
| 340 | 448 | |
|---|
| 341 | 449 | return &dom->domain; |
|---|
| 342 | | - |
|---|
| 343 | | -put_dma_cookie: |
|---|
| 344 | | - iommu_put_dma_cookie(&dom->domain); |
|---|
| 345 | | -free_dom: |
|---|
| 346 | | - kfree(dom); |
|---|
| 347 | | - return NULL; |
|---|
| 348 | 450 | } |
|---|
| 349 | 451 | |
|---|
| 350 | 452 | static void mtk_iommu_domain_free(struct iommu_domain *domain) |
|---|
| 351 | 453 | { |
|---|
| 352 | | - struct mtk_iommu_domain *dom = to_mtk_domain(domain); |
|---|
| 353 | | - |
|---|
| 354 | | - free_io_pgtable_ops(dom->iop); |
|---|
| 355 | 454 | iommu_put_dma_cookie(domain); |
|---|
| 356 | 455 | kfree(to_mtk_domain(domain)); |
|---|
| 357 | 456 | } |
|---|
| .. | .. |
|---|
| 359 | 458 | static int mtk_iommu_attach_device(struct iommu_domain *domain, |
|---|
| 360 | 459 | struct device *dev) |
|---|
| 361 | 460 | { |
|---|
| 461 | + struct mtk_iommu_data *data = dev_iommu_priv_get(dev), *frstdata; |
|---|
| 362 | 462 | struct mtk_iommu_domain *dom = to_mtk_domain(domain); |
|---|
| 363 | | - struct mtk_iommu_data *data = dev->iommu_fwspec->iommu_priv; |
|---|
| 463 | + struct device *m4udev = data->dev; |
|---|
| 464 | + int ret, domid; |
|---|
| 364 | 465 | |
|---|
| 365 | | - if (!data) |
|---|
| 366 | | - return -ENODEV; |
|---|
| 466 | + domid = mtk_iommu_get_domain_id(dev, data->plat_data); |
|---|
| 467 | + if (domid < 0) |
|---|
| 468 | + return domid; |
|---|
| 367 | 469 | |
|---|
| 368 | | - /* Update the pgtable base address register of the M4U HW */ |
|---|
| 369 | | - if (!data->m4u_dom) { |
|---|
| 370 | | - data->m4u_dom = dom; |
|---|
| 371 | | - writel(dom->cfg.arm_v7s_cfg.ttbr[0], |
|---|
| 372 | | - data->base + REG_MMU_PT_BASE_ADDR); |
|---|
| 470 | + if (!dom->data) { |
|---|
| 471 | + /* Data is in the frstdata in sharing pgtable case. */ |
|---|
| 472 | + frstdata = mtk_iommu_get_m4u_data(); |
|---|
| 473 | + |
|---|
| 474 | + if (mtk_iommu_domain_finalise(dom, frstdata, domid)) |
|---|
| 475 | + return -ENODEV; |
|---|
| 476 | + dom->data = data; |
|---|
| 373 | 477 | } |
|---|
| 374 | 478 | |
|---|
| 375 | | - mtk_iommu_config(data, dev, true); |
|---|
| 479 | + mutex_lock(&data->mutex); |
|---|
| 480 | + if (!data->m4u_dom) { /* Initialize the M4U HW */ |
|---|
| 481 | + ret = pm_runtime_resume_and_get(m4udev); |
|---|
| 482 | + if (ret < 0) |
|---|
| 483 | + goto err_unlock; |
|---|
| 484 | + |
|---|
| 485 | + ret = mtk_iommu_hw_init(data); |
|---|
| 486 | + if (ret) { |
|---|
| 487 | + pm_runtime_put(m4udev); |
|---|
| 488 | + goto err_unlock; |
|---|
| 489 | + } |
|---|
| 490 | + data->m4u_dom = dom; |
|---|
| 491 | + writel(dom->cfg.arm_v7s_cfg.ttbr & MMU_PT_ADDR_MASK, |
|---|
| 492 | + data->base + REG_MMU_PT_BASE_ADDR); |
|---|
| 493 | + |
|---|
| 494 | + pm_runtime_put(m4udev); |
|---|
| 495 | + } |
|---|
| 496 | + mutex_unlock(&data->mutex); |
|---|
| 497 | + |
|---|
| 498 | + mtk_iommu_config(data, dev, true, domid); |
|---|
| 376 | 499 | return 0; |
|---|
| 500 | + |
|---|
| 501 | +err_unlock: |
|---|
| 502 | + mutex_unlock(&data->mutex); |
|---|
| 503 | + return ret; |
|---|
| 377 | 504 | } |
|---|
| 378 | 505 | |
|---|
| 379 | 506 | static void mtk_iommu_detach_device(struct iommu_domain *domain, |
|---|
| 380 | 507 | struct device *dev) |
|---|
| 381 | 508 | { |
|---|
| 382 | | - struct mtk_iommu_data *data = dev->iommu_fwspec->iommu_priv; |
|---|
| 509 | + struct mtk_iommu_data *data = dev_iommu_priv_get(dev); |
|---|
| 383 | 510 | |
|---|
| 384 | | - if (!data) |
|---|
| 385 | | - return; |
|---|
| 386 | | - |
|---|
| 387 | | - mtk_iommu_config(data, dev, false); |
|---|
| 511 | + mtk_iommu_config(data, dev, false, 0); |
|---|
| 388 | 512 | } |
|---|
| 389 | 513 | |
|---|
| 390 | 514 | static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova, |
|---|
| 391 | | - phys_addr_t paddr, size_t size, int prot) |
|---|
| 515 | + phys_addr_t paddr, size_t size, int prot, gfp_t gfp) |
|---|
| 392 | 516 | { |
|---|
| 393 | 517 | struct mtk_iommu_domain *dom = to_mtk_domain(domain); |
|---|
| 394 | | - unsigned long flags; |
|---|
| 395 | | - int ret; |
|---|
| 396 | 518 | |
|---|
| 397 | | - spin_lock_irqsave(&dom->pgtlock, flags); |
|---|
| 398 | | - ret = dom->iop->map(dom->iop, iova, paddr & DMA_BIT_MASK(32), |
|---|
| 399 | | - size, prot); |
|---|
| 400 | | - spin_unlock_irqrestore(&dom->pgtlock, flags); |
|---|
| 519 | + /* The "4GB mode" M4U physically can not use the lower remap of Dram. */ |
|---|
| 520 | + if (dom->data->enable_4GB) |
|---|
| 521 | + paddr |= BIT_ULL(32); |
|---|
| 401 | 522 | |
|---|
| 402 | | - return ret; |
|---|
| 523 | + /* Synchronize with the tlb_lock */ |
|---|
| 524 | + return dom->iop->map(dom->iop, iova, paddr, size, prot, gfp); |
|---|
| 403 | 525 | } |
|---|
| 404 | 526 | |
|---|
| 405 | 527 | static size_t mtk_iommu_unmap(struct iommu_domain *domain, |
|---|
| 406 | | - unsigned long iova, size_t size) |
|---|
| 528 | + unsigned long iova, size_t size, |
|---|
| 529 | + struct iommu_iotlb_gather *gather) |
|---|
| 407 | 530 | { |
|---|
| 408 | 531 | struct mtk_iommu_domain *dom = to_mtk_domain(domain); |
|---|
| 409 | | - unsigned long flags; |
|---|
| 410 | | - size_t unmapsz; |
|---|
| 532 | + unsigned long end = iova + size - 1; |
|---|
| 411 | 533 | |
|---|
| 412 | | - spin_lock_irqsave(&dom->pgtlock, flags); |
|---|
| 413 | | - unmapsz = dom->iop->unmap(dom->iop, iova, size); |
|---|
| 414 | | - spin_unlock_irqrestore(&dom->pgtlock, flags); |
|---|
| 415 | | - |
|---|
| 416 | | - return unmapsz; |
|---|
| 534 | + if (gather->start > iova) |
|---|
| 535 | + gather->start = iova; |
|---|
| 536 | + if (gather->end < end) |
|---|
| 537 | + gather->end = end; |
|---|
| 538 | + return dom->iop->unmap(dom->iop, iova, size, gather); |
|---|
| 417 | 539 | } |
|---|
| 418 | 540 | |
|---|
| 419 | | -static void mtk_iommu_iotlb_sync(struct iommu_domain *domain) |
|---|
| 541 | +static void mtk_iommu_flush_iotlb_all(struct iommu_domain *domain) |
|---|
| 420 | 542 | { |
|---|
| 421 | | - mtk_iommu_tlb_flush_all(mtk_iommu_get_m4u_data()); |
|---|
| 543 | + struct mtk_iommu_domain *dom = to_mtk_domain(domain); |
|---|
| 544 | + |
|---|
| 545 | + mtk_iommu_tlb_flush_all(dom->data); |
|---|
| 546 | +} |
|---|
| 547 | + |
|---|
| 548 | +static void mtk_iommu_iotlb_sync(struct iommu_domain *domain, |
|---|
| 549 | + struct iommu_iotlb_gather *gather) |
|---|
| 550 | +{ |
|---|
| 551 | + struct mtk_iommu_domain *dom = to_mtk_domain(domain); |
|---|
| 552 | + size_t length = gather->end - gather->start + 1; |
|---|
| 553 | + |
|---|
| 554 | + mtk_iommu_tlb_flush_range_sync(gather->start, length, gather->pgsize, |
|---|
| 555 | + dom->data); |
|---|
| 556 | +} |
|---|
| 557 | + |
|---|
| 558 | +static void mtk_iommu_sync_map(struct iommu_domain *domain, unsigned long iova, |
|---|
| 559 | + size_t size) |
|---|
| 560 | +{ |
|---|
| 561 | + struct mtk_iommu_domain *dom = to_mtk_domain(domain); |
|---|
| 562 | + |
|---|
| 563 | + mtk_iommu_tlb_flush_range_sync(iova, size, size, dom->data); |
|---|
| 422 | 564 | } |
|---|
| 423 | 565 | |
|---|
| 424 | 566 | static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain, |
|---|
| 425 | 567 | dma_addr_t iova) |
|---|
| 426 | 568 | { |
|---|
| 427 | 569 | struct mtk_iommu_domain *dom = to_mtk_domain(domain); |
|---|
| 428 | | - struct mtk_iommu_data *data = mtk_iommu_get_m4u_data(); |
|---|
| 429 | | - unsigned long flags; |
|---|
| 430 | 570 | phys_addr_t pa; |
|---|
| 431 | 571 | |
|---|
| 432 | | - spin_lock_irqsave(&dom->pgtlock, flags); |
|---|
| 433 | 572 | pa = dom->iop->iova_to_phys(dom->iop, iova); |
|---|
| 434 | | - spin_unlock_irqrestore(&dom->pgtlock, flags); |
|---|
| 435 | | - |
|---|
| 436 | | - if (data->enable_4GB && pa < MTK_IOMMU_4GB_MODE_REMAP_BASE) |
|---|
| 437 | | - pa |= BIT_ULL(32); |
|---|
| 573 | + if (dom->data->enable_4GB && pa >= MTK_IOMMU_4GB_MODE_REMAP_BASE) |
|---|
| 574 | + pa &= ~BIT_ULL(32); |
|---|
| 438 | 575 | |
|---|
| 439 | 576 | return pa; |
|---|
| 440 | 577 | } |
|---|
| 441 | 578 | |
|---|
| 442 | | -static int mtk_iommu_add_device(struct device *dev) |
|---|
| 579 | +static struct iommu_device *mtk_iommu_probe_device(struct device *dev) |
|---|
| 443 | 580 | { |
|---|
| 581 | + struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); |
|---|
| 444 | 582 | struct mtk_iommu_data *data; |
|---|
| 445 | | - struct iommu_group *group; |
|---|
| 446 | 583 | |
|---|
| 447 | | - if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops) |
|---|
| 448 | | - return -ENODEV; /* Not a iommu client device */ |
|---|
| 584 | + if (!fwspec || fwspec->ops != &mtk_iommu_ops) |
|---|
| 585 | + return ERR_PTR(-ENODEV); /* Not a iommu client device */ |
|---|
| 449 | 586 | |
|---|
| 450 | | - data = dev->iommu_fwspec->iommu_priv; |
|---|
| 451 | | - iommu_device_link(&data->iommu, dev); |
|---|
| 587 | + data = dev_iommu_priv_get(dev); |
|---|
| 452 | 588 | |
|---|
| 453 | | - group = iommu_group_get_for_dev(dev); |
|---|
| 454 | | - if (IS_ERR(group)) |
|---|
| 455 | | - return PTR_ERR(group); |
|---|
| 456 | | - |
|---|
| 457 | | - iommu_group_put(group); |
|---|
| 458 | | - return 0; |
|---|
| 589 | + return &data->iommu; |
|---|
| 459 | 590 | } |
|---|
| 460 | 591 | |
|---|
| 461 | | -static void mtk_iommu_remove_device(struct device *dev) |
|---|
| 592 | +static void mtk_iommu_release_device(struct device *dev) |
|---|
| 462 | 593 | { |
|---|
| 463 | | - struct mtk_iommu_data *data; |
|---|
| 594 | + struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); |
|---|
| 464 | 595 | |
|---|
| 465 | | - if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops) |
|---|
| 596 | + if (!fwspec || fwspec->ops != &mtk_iommu_ops) |
|---|
| 466 | 597 | return; |
|---|
| 467 | 598 | |
|---|
| 468 | | - data = dev->iommu_fwspec->iommu_priv; |
|---|
| 469 | | - iommu_device_unlink(&data->iommu, dev); |
|---|
| 470 | | - |
|---|
| 471 | | - iommu_group_remove_device(dev); |
|---|
| 472 | 599 | iommu_fwspec_free(dev); |
|---|
| 473 | 600 | } |
|---|
| 474 | 601 | |
|---|
| 475 | 602 | static struct iommu_group *mtk_iommu_device_group(struct device *dev) |
|---|
| 476 | 603 | { |
|---|
| 477 | 604 | struct mtk_iommu_data *data = mtk_iommu_get_m4u_data(); |
|---|
| 605 | + struct iommu_group *group; |
|---|
| 606 | + int domid; |
|---|
| 478 | 607 | |
|---|
| 479 | 608 | if (!data) |
|---|
| 480 | 609 | return ERR_PTR(-ENODEV); |
|---|
| 481 | 610 | |
|---|
| 482 | | - /* All the client devices are in the same m4u iommu-group */ |
|---|
| 483 | | - if (!data->m4u_group) { |
|---|
| 484 | | - data->m4u_group = iommu_group_alloc(); |
|---|
| 485 | | - if (IS_ERR(data->m4u_group)) |
|---|
| 486 | | - dev_err(dev, "Failed to allocate M4U IOMMU group\n"); |
|---|
| 611 | + domid = mtk_iommu_get_domain_id(dev, data->plat_data); |
|---|
| 612 | + if (domid < 0) |
|---|
| 613 | + return ERR_PTR(domid); |
|---|
| 614 | + |
|---|
| 615 | + mutex_lock(&data->mutex); |
|---|
| 616 | + group = data->m4u_group[domid]; |
|---|
| 617 | + if (!group) { |
|---|
| 618 | + group = iommu_group_alloc(); |
|---|
| 619 | + if (!IS_ERR(group)) |
|---|
| 620 | + data->m4u_group[domid] = group; |
|---|
| 487 | 621 | } else { |
|---|
| 488 | | - iommu_group_ref_get(data->m4u_group); |
|---|
| 622 | + iommu_group_ref_get(group); |
|---|
| 489 | 623 | } |
|---|
| 490 | | - return data->m4u_group; |
|---|
| 624 | + mutex_unlock(&data->mutex); |
|---|
| 625 | + return group; |
|---|
| 491 | 626 | } |
|---|
| 492 | 627 | |
|---|
| 493 | 628 | static int mtk_iommu_of_xlate(struct device *dev, struct of_phandle_args *args) |
|---|
| .. | .. |
|---|
| 500 | 635 | return -EINVAL; |
|---|
| 501 | 636 | } |
|---|
| 502 | 637 | |
|---|
| 503 | | - if (!dev->iommu_fwspec->iommu_priv) { |
|---|
| 638 | + if (!dev_iommu_priv_get(dev)) { |
|---|
| 504 | 639 | /* Get the m4u device */ |
|---|
| 505 | 640 | m4updev = of_find_device_by_node(args->np); |
|---|
| 506 | 641 | if (WARN_ON(!m4updev)) |
|---|
| 507 | 642 | return -EINVAL; |
|---|
| 508 | 643 | |
|---|
| 509 | | - dev->iommu_fwspec->iommu_priv = platform_get_drvdata(m4updev); |
|---|
| 644 | + dev_iommu_priv_set(dev, platform_get_drvdata(m4updev)); |
|---|
| 510 | 645 | } |
|---|
| 511 | 646 | |
|---|
| 512 | 647 | return iommu_fwspec_add_ids(dev, args->args, 1); |
|---|
| 513 | 648 | } |
|---|
| 514 | 649 | |
|---|
| 515 | | -static struct iommu_ops mtk_iommu_ops = { |
|---|
| 650 | +static void mtk_iommu_get_resv_regions(struct device *dev, |
|---|
| 651 | + struct list_head *head) |
|---|
| 652 | +{ |
|---|
| 653 | + struct mtk_iommu_data *data = dev_iommu_priv_get(dev); |
|---|
| 654 | + unsigned int domid = mtk_iommu_get_domain_id(dev, data->plat_data), i; |
|---|
| 655 | + const struct mtk_iommu_iova_region *resv, *curdom; |
|---|
| 656 | + struct iommu_resv_region *region; |
|---|
| 657 | + int prot = IOMMU_WRITE | IOMMU_READ; |
|---|
| 658 | + |
|---|
| 659 | + if ((int)domid < 0) |
|---|
| 660 | + return; |
|---|
| 661 | + curdom = data->plat_data->iova_region + domid; |
|---|
| 662 | + for (i = 0; i < data->plat_data->iova_region_nr; i++) { |
|---|
| 663 | + resv = data->plat_data->iova_region + i; |
|---|
| 664 | + |
|---|
| 665 | + /* Only reserve when the region is inside the current domain */ |
|---|
| 666 | + if (resv->iova_base <= curdom->iova_base || |
|---|
| 667 | + resv->iova_base + resv->size >= curdom->iova_base + curdom->size) |
|---|
| 668 | + continue; |
|---|
| 669 | + |
|---|
| 670 | + region = iommu_alloc_resv_region(resv->iova_base, resv->size, |
|---|
| 671 | + prot, IOMMU_RESV_RESERVED); |
|---|
| 672 | + if (!region) |
|---|
| 673 | + return; |
|---|
| 674 | + |
|---|
| 675 | + list_add_tail(®ion->list, head); |
|---|
| 676 | + } |
|---|
| 677 | +} |
|---|
| 678 | + |
|---|
| 679 | +static const struct iommu_ops mtk_iommu_ops = { |
|---|
| 516 | 680 | .domain_alloc = mtk_iommu_domain_alloc, |
|---|
| 517 | 681 | .domain_free = mtk_iommu_domain_free, |
|---|
| 518 | 682 | .attach_dev = mtk_iommu_attach_device, |
|---|
| 519 | 683 | .detach_dev = mtk_iommu_detach_device, |
|---|
| 520 | 684 | .map = mtk_iommu_map, |
|---|
| 521 | 685 | .unmap = mtk_iommu_unmap, |
|---|
| 522 | | - .flush_iotlb_all = mtk_iommu_iotlb_sync, |
|---|
| 686 | + .flush_iotlb_all = mtk_iommu_flush_iotlb_all, |
|---|
| 523 | 687 | .iotlb_sync = mtk_iommu_iotlb_sync, |
|---|
| 688 | + .iotlb_sync_map = mtk_iommu_sync_map, |
|---|
| 524 | 689 | .iova_to_phys = mtk_iommu_iova_to_phys, |
|---|
| 525 | | - .add_device = mtk_iommu_add_device, |
|---|
| 526 | | - .remove_device = mtk_iommu_remove_device, |
|---|
| 690 | + .probe_device = mtk_iommu_probe_device, |
|---|
| 691 | + .release_device = mtk_iommu_release_device, |
|---|
| 527 | 692 | .device_group = mtk_iommu_device_group, |
|---|
| 528 | 693 | .of_xlate = mtk_iommu_of_xlate, |
|---|
| 694 | + .get_resv_regions = mtk_iommu_get_resv_regions, |
|---|
| 695 | + .put_resv_regions = generic_iommu_put_resv_regions, |
|---|
| 529 | 696 | .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M, |
|---|
| 530 | 697 | }; |
|---|
| 531 | 698 | |
|---|
| 532 | 699 | static int mtk_iommu_hw_init(const struct mtk_iommu_data *data) |
|---|
| 533 | 700 | { |
|---|
| 534 | 701 | u32 regval; |
|---|
| 535 | | - int ret; |
|---|
| 536 | 702 | |
|---|
| 537 | | - ret = clk_prepare_enable(data->bclk); |
|---|
| 538 | | - if (ret) { |
|---|
| 539 | | - dev_err(data->dev, "Failed to enable iommu bclk(%d)\n", ret); |
|---|
| 540 | | - return ret; |
|---|
| 703 | + if (data->plat_data->m4u_plat == M4U_MT8173) { |
|---|
| 704 | + regval = F_MMU_PREFETCH_RT_REPLACE_MOD | |
|---|
| 705 | + F_MMU_TF_PROT_TO_PROGRAM_ADDR_MT8173; |
|---|
| 706 | + } else { |
|---|
| 707 | + regval = readl_relaxed(data->base + REG_MMU_CTRL_REG); |
|---|
| 708 | + regval |= F_MMU_TF_PROT_TO_PROGRAM_ADDR; |
|---|
| 541 | 709 | } |
|---|
| 542 | | - |
|---|
| 543 | | - regval = F_MMU_TF_PROTECT_SEL(2, data); |
|---|
| 544 | | - if (data->m4u_plat == M4U_MT8173) |
|---|
| 545 | | - regval |= F_MMU_PREFETCH_RT_REPLACE_MOD; |
|---|
| 546 | 710 | writel_relaxed(regval, data->base + REG_MMU_CTRL_REG); |
|---|
| 547 | 711 | |
|---|
| 548 | 712 | regval = F_L2_MULIT_HIT_EN | |
|---|
| .. | .. |
|---|
| 562 | 726 | F_INT_PRETETCH_TRANSATION_FIFO_FAULT; |
|---|
| 563 | 727 | writel_relaxed(regval, data->base + REG_MMU_INT_MAIN_CONTROL); |
|---|
| 564 | 728 | |
|---|
| 565 | | - if (data->m4u_plat == M4U_MT8173) |
|---|
| 729 | + if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_LEGACY_IVRP_PADDR)) |
|---|
| 566 | 730 | regval = (data->protect_base >> 1) | (data->enable_4GB << 31); |
|---|
| 567 | 731 | else |
|---|
| 568 | 732 | regval = lower_32_bits(data->protect_base) | |
|---|
| 569 | 733 | upper_32_bits(data->protect_base); |
|---|
| 570 | 734 | writel_relaxed(regval, data->base + REG_MMU_IVRP_PADDR); |
|---|
| 571 | 735 | |
|---|
| 572 | | - if (data->enable_4GB && data->m4u_plat != M4U_MT8173) { |
|---|
| 736 | + if (data->enable_4GB && |
|---|
| 737 | + MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_VLD_PA_RNG)) { |
|---|
| 573 | 738 | /* |
|---|
| 574 | 739 | * If 4GB mode is enabled, the validate PA range is from |
|---|
| 575 | 740 | * 0x1_0000_0000 to 0x1_ffff_ffff. here record bit[32:30]. |
|---|
| .. | .. |
|---|
| 578 | 743 | writel_relaxed(regval, data->base + REG_MMU_VLD_PA_RNG); |
|---|
| 579 | 744 | } |
|---|
| 580 | 745 | writel_relaxed(0, data->base + REG_MMU_DCM_DIS); |
|---|
| 746 | + if (MTK_IOMMU_HAS_FLAG(data->plat_data, WR_THROT_EN)) { |
|---|
| 747 | + /* write command throttling mode */ |
|---|
| 748 | + regval = readl_relaxed(data->base + REG_MMU_WR_LEN_CTRL); |
|---|
| 749 | + regval &= ~F_MMU_WR_THROT_DIS_MASK; |
|---|
| 750 | + writel_relaxed(regval, data->base + REG_MMU_WR_LEN_CTRL); |
|---|
| 751 | + } |
|---|
| 581 | 752 | |
|---|
| 582 | | - /* It's MISC control register whose default value is ok except mt8173.*/ |
|---|
| 583 | | - if (data->m4u_plat == M4U_MT8173) |
|---|
| 584 | | - writel_relaxed(0, data->base + REG_MMU_STANDARD_AXI_MODE); |
|---|
| 753 | + if (MTK_IOMMU_HAS_FLAG(data->plat_data, RESET_AXI)) { |
|---|
| 754 | + /* The register is called STANDARD_AXI_MODE in this case */ |
|---|
| 755 | + regval = 0; |
|---|
| 756 | + } else { |
|---|
| 757 | + regval = readl_relaxed(data->base + REG_MMU_MISC_CTRL); |
|---|
| 758 | + regval &= ~F_MMU_STANDARD_AXI_MODE_MASK; |
|---|
| 759 | + if (MTK_IOMMU_HAS_FLAG(data->plat_data, OUT_ORDER_WR_EN)) |
|---|
| 760 | + regval &= ~F_MMU_IN_ORDER_WR_EN_MASK; |
|---|
| 761 | + } |
|---|
| 762 | + writel_relaxed(regval, data->base + REG_MMU_MISC_CTRL); |
|---|
| 585 | 763 | |
|---|
| 586 | 764 | if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0, |
|---|
| 587 | 765 | dev_name(data->dev), (void *)data)) { |
|---|
| 588 | 766 | writel_relaxed(0, data->base + REG_MMU_PT_BASE_ADDR); |
|---|
| 589 | | - clk_disable_unprepare(data->bclk); |
|---|
| 590 | 767 | dev_err(data->dev, "Failed @ IRQ-%d Request\n", data->irq); |
|---|
| 591 | 768 | return -ENODEV; |
|---|
| 592 | 769 | } |
|---|
| .. | .. |
|---|
| 603 | 780 | { |
|---|
| 604 | 781 | struct mtk_iommu_data *data; |
|---|
| 605 | 782 | struct device *dev = &pdev->dev; |
|---|
| 783 | + struct device_node *larbnode, *smicomm_node; |
|---|
| 784 | + struct platform_device *plarbdev; |
|---|
| 785 | + struct device_link *link; |
|---|
| 606 | 786 | struct resource *res; |
|---|
| 607 | 787 | resource_size_t ioaddr; |
|---|
| 608 | 788 | struct component_match *match = NULL; |
|---|
| 789 | + struct regmap *infracfg; |
|---|
| 609 | 790 | void *protect; |
|---|
| 610 | 791 | int i, larb_nr, ret; |
|---|
| 792 | + u32 val; |
|---|
| 793 | + char *p; |
|---|
| 611 | 794 | |
|---|
| 612 | 795 | data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); |
|---|
| 613 | 796 | if (!data) |
|---|
| 614 | 797 | return -ENOMEM; |
|---|
| 615 | 798 | data->dev = dev; |
|---|
| 616 | | - data->m4u_plat = (enum mtk_iommu_plat)of_device_get_match_data(dev); |
|---|
| 799 | + data->plat_data = of_device_get_match_data(dev); |
|---|
| 617 | 800 | |
|---|
| 618 | 801 | /* Protect memory. HW will access here while translation fault.*/ |
|---|
| 619 | 802 | protect = devm_kzalloc(dev, MTK_PROTECT_PA_ALIGN * 2, GFP_KERNEL); |
|---|
| .. | .. |
|---|
| 621 | 804 | return -ENOMEM; |
|---|
| 622 | 805 | data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN); |
|---|
| 623 | 806 | |
|---|
| 624 | | - /* Whether the current dram is over 4GB */ |
|---|
| 625 | | - data->enable_4GB = !!(max_pfn > (BIT_ULL(32) >> PAGE_SHIFT)); |
|---|
| 807 | + if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_4GB_MODE)) { |
|---|
| 808 | + switch (data->plat_data->m4u_plat) { |
|---|
| 809 | + case M4U_MT2712: |
|---|
| 810 | + p = "mediatek,mt2712-infracfg"; |
|---|
| 811 | + break; |
|---|
| 812 | + case M4U_MT8173: |
|---|
| 813 | + p = "mediatek,mt8173-infracfg"; |
|---|
| 814 | + break; |
|---|
| 815 | + default: |
|---|
| 816 | + p = NULL; |
|---|
| 817 | + } |
|---|
| 818 | + |
|---|
| 819 | + infracfg = syscon_regmap_lookup_by_compatible(p); |
|---|
| 820 | + |
|---|
| 821 | + if (IS_ERR(infracfg)) |
|---|
| 822 | + return PTR_ERR(infracfg); |
|---|
| 823 | + |
|---|
| 824 | + ret = regmap_read(infracfg, REG_INFRA_MISC, &val); |
|---|
| 825 | + if (ret) |
|---|
| 826 | + return ret; |
|---|
| 827 | + data->enable_4GB = !!(val & F_DDR_4GB_SUPPORT_EN); |
|---|
| 828 | + } |
|---|
| 626 | 829 | |
|---|
| 627 | 830 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
|---|
| 628 | 831 | data->base = devm_ioremap_resource(dev, res); |
|---|
| .. | .. |
|---|
| 634 | 837 | if (data->irq < 0) |
|---|
| 635 | 838 | return data->irq; |
|---|
| 636 | 839 | |
|---|
| 637 | | - data->bclk = devm_clk_get(dev, "bclk"); |
|---|
| 638 | | - if (IS_ERR(data->bclk)) |
|---|
| 639 | | - return PTR_ERR(data->bclk); |
|---|
| 840 | + if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_BCLK)) { |
|---|
| 841 | + data->bclk = devm_clk_get(dev, "bclk"); |
|---|
| 842 | + if (IS_ERR(data->bclk)) |
|---|
| 843 | + return PTR_ERR(data->bclk); |
|---|
| 844 | + } |
|---|
| 640 | 845 | |
|---|
| 641 | 846 | larb_nr = of_count_phandle_with_args(dev->of_node, |
|---|
| 642 | 847 | "mediatek,larbs", NULL); |
|---|
| 643 | 848 | if (larb_nr < 0) |
|---|
| 644 | 849 | return larb_nr; |
|---|
| 645 | | - data->smi_imu.larb_nr = larb_nr; |
|---|
| 646 | 850 | |
|---|
| 647 | 851 | for (i = 0; i < larb_nr; i++) { |
|---|
| 648 | | - struct device_node *larbnode; |
|---|
| 649 | | - struct platform_device *plarbdev; |
|---|
| 650 | 852 | u32 id; |
|---|
| 651 | 853 | |
|---|
| 652 | 854 | larbnode = of_parse_phandle(dev->of_node, "mediatek,larbs", i); |
|---|
| 653 | 855 | if (!larbnode) |
|---|
| 654 | 856 | return -EINVAL; |
|---|
| 655 | 857 | |
|---|
| 656 | | - if (!of_device_is_available(larbnode)) |
|---|
| 858 | + if (!of_device_is_available(larbnode)) { |
|---|
| 859 | + of_node_put(larbnode); |
|---|
| 657 | 860 | continue; |
|---|
| 861 | + } |
|---|
| 658 | 862 | |
|---|
| 659 | 863 | ret = of_property_read_u32(larbnode, "mediatek,larb-id", &id); |
|---|
| 660 | 864 | if (ret)/* The id is consecutive if there is no this property */ |
|---|
| 661 | 865 | id = i; |
|---|
| 662 | 866 | |
|---|
| 663 | 867 | plarbdev = of_find_device_by_node(larbnode); |
|---|
| 664 | | - if (!plarbdev) |
|---|
| 868 | + if (!plarbdev) { |
|---|
| 869 | + of_node_put(larbnode); |
|---|
| 665 | 870 | return -EPROBE_DEFER; |
|---|
| 666 | | - data->smi_imu.larb_imu[id].dev = &plarbdev->dev; |
|---|
| 871 | + } |
|---|
| 872 | + data->larb_imu[id].dev = &plarbdev->dev; |
|---|
| 667 | 873 | |
|---|
| 668 | 874 | component_match_add_release(dev, &match, release_of, |
|---|
| 669 | 875 | compare_of, larbnode); |
|---|
| 670 | 876 | } |
|---|
| 671 | 877 | |
|---|
| 672 | | - platform_set_drvdata(pdev, data); |
|---|
| 878 | + /* Get smi-common dev from the last larb. */ |
|---|
| 879 | + smicomm_node = of_parse_phandle(larbnode, "mediatek,smi", 0); |
|---|
| 880 | + if (!smicomm_node) |
|---|
| 881 | + return -EINVAL; |
|---|
| 673 | 882 | |
|---|
| 674 | | - ret = mtk_iommu_hw_init(data); |
|---|
| 675 | | - if (ret) |
|---|
| 676 | | - return ret; |
|---|
| 883 | + plarbdev = of_find_device_by_node(smicomm_node); |
|---|
| 884 | + of_node_put(smicomm_node); |
|---|
| 885 | + data->smicomm_dev = &plarbdev->dev; |
|---|
| 886 | + |
|---|
| 887 | + pm_runtime_enable(dev); |
|---|
| 888 | + |
|---|
| 889 | + link = device_link_add(data->smicomm_dev, dev, |
|---|
| 890 | + DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME); |
|---|
| 891 | + if (!link) { |
|---|
| 892 | + dev_err(dev, "Unable to link %s.\n", dev_name(data->smicomm_dev)); |
|---|
| 893 | + ret = -EINVAL; |
|---|
| 894 | + goto out_runtime_disable; |
|---|
| 895 | + } |
|---|
| 896 | + |
|---|
| 897 | + platform_set_drvdata(pdev, data); |
|---|
| 898 | + mutex_init(&data->mutex); |
|---|
| 677 | 899 | |
|---|
| 678 | 900 | ret = iommu_device_sysfs_add(&data->iommu, dev, NULL, |
|---|
| 679 | 901 | "mtk-iommu.%pa", &ioaddr); |
|---|
| 680 | 902 | if (ret) |
|---|
| 681 | | - return ret; |
|---|
| 903 | + goto out_link_remove; |
|---|
| 682 | 904 | |
|---|
| 683 | 905 | iommu_device_set_ops(&data->iommu, &mtk_iommu_ops); |
|---|
| 684 | 906 | iommu_device_set_fwnode(&data->iommu, &pdev->dev.of_node->fwnode); |
|---|
| 685 | 907 | |
|---|
| 686 | 908 | ret = iommu_device_register(&data->iommu); |
|---|
| 687 | 909 | if (ret) |
|---|
| 688 | | - return ret; |
|---|
| 910 | + goto out_sysfs_remove; |
|---|
| 689 | 911 | |
|---|
| 912 | + spin_lock_init(&data->tlb_lock); |
|---|
| 690 | 913 | list_add_tail(&data->list, &m4ulist); |
|---|
| 691 | 914 | |
|---|
| 692 | | - if (!iommu_present(&platform_bus_type)) |
|---|
| 693 | | - bus_set_iommu(&platform_bus_type, &mtk_iommu_ops); |
|---|
| 915 | + if (!iommu_present(&platform_bus_type)) { |
|---|
| 916 | + ret = bus_set_iommu(&platform_bus_type, &mtk_iommu_ops); |
|---|
| 917 | + if (ret) |
|---|
| 918 | + goto out_list_del; |
|---|
| 919 | + } |
|---|
| 694 | 920 | |
|---|
| 695 | | - return component_master_add_with_match(dev, &mtk_iommu_com_ops, match); |
|---|
| 921 | + ret = component_master_add_with_match(dev, &mtk_iommu_com_ops, match); |
|---|
| 922 | + if (ret) |
|---|
| 923 | + goto out_bus_set_null; |
|---|
| 924 | + return ret; |
|---|
| 925 | + |
|---|
| 926 | +out_bus_set_null: |
|---|
| 927 | + bus_set_iommu(&platform_bus_type, NULL); |
|---|
| 928 | +out_list_del: |
|---|
| 929 | + list_del(&data->list); |
|---|
| 930 | + iommu_device_unregister(&data->iommu); |
|---|
| 931 | +out_sysfs_remove: |
|---|
| 932 | + iommu_device_sysfs_remove(&data->iommu); |
|---|
| 933 | +out_link_remove: |
|---|
| 934 | + device_link_remove(data->smicomm_dev, dev); |
|---|
| 935 | +out_runtime_disable: |
|---|
| 936 | + pm_runtime_disable(dev); |
|---|
| 937 | + return ret; |
|---|
| 696 | 938 | } |
|---|
| 697 | 939 | |
|---|
| 698 | 940 | static int mtk_iommu_remove(struct platform_device *pdev) |
|---|
| .. | .. |
|---|
| 702 | 944 | iommu_device_sysfs_remove(&data->iommu); |
|---|
| 703 | 945 | iommu_device_unregister(&data->iommu); |
|---|
| 704 | 946 | |
|---|
| 705 | | - if (iommu_present(&platform_bus_type)) |
|---|
| 706 | | - bus_set_iommu(&platform_bus_type, NULL); |
|---|
| 947 | + list_del(&data->list); |
|---|
| 707 | 948 | |
|---|
| 708 | | - clk_disable_unprepare(data->bclk); |
|---|
| 949 | + device_link_remove(data->smicomm_dev, &pdev->dev); |
|---|
| 950 | + pm_runtime_disable(&pdev->dev); |
|---|
| 709 | 951 | devm_free_irq(&pdev->dev, data->irq, data); |
|---|
| 710 | 952 | component_master_del(&pdev->dev, &mtk_iommu_com_ops); |
|---|
| 711 | 953 | return 0; |
|---|
| 712 | 954 | } |
|---|
| 713 | 955 | |
|---|
| 714 | | -static int __maybe_unused mtk_iommu_suspend(struct device *dev) |
|---|
| 956 | +static int __maybe_unused mtk_iommu_runtime_suspend(struct device *dev) |
|---|
| 715 | 957 | { |
|---|
| 716 | 958 | struct mtk_iommu_data *data = dev_get_drvdata(dev); |
|---|
| 717 | 959 | struct mtk_iommu_suspend_reg *reg = &data->reg; |
|---|
| 718 | 960 | void __iomem *base = data->base; |
|---|
| 719 | 961 | |
|---|
| 720 | | - reg->standard_axi_mode = readl_relaxed(base + |
|---|
| 721 | | - REG_MMU_STANDARD_AXI_MODE); |
|---|
| 962 | + reg->wr_len_ctrl = readl_relaxed(base + REG_MMU_WR_LEN_CTRL); |
|---|
| 963 | + reg->misc_ctrl = readl_relaxed(base + REG_MMU_MISC_CTRL); |
|---|
| 722 | 964 | reg->dcm_dis = readl_relaxed(base + REG_MMU_DCM_DIS); |
|---|
| 723 | 965 | reg->ctrl_reg = readl_relaxed(base + REG_MMU_CTRL_REG); |
|---|
| 724 | 966 | reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL0); |
|---|
| 725 | 967 | reg->int_main_control = readl_relaxed(base + REG_MMU_INT_MAIN_CONTROL); |
|---|
| 726 | 968 | reg->ivrp_paddr = readl_relaxed(base + REG_MMU_IVRP_PADDR); |
|---|
| 969 | + reg->vld_pa_rng = readl_relaxed(base + REG_MMU_VLD_PA_RNG); |
|---|
| 727 | 970 | clk_disable_unprepare(data->bclk); |
|---|
| 728 | 971 | return 0; |
|---|
| 729 | 972 | } |
|---|
| 730 | 973 | |
|---|
| 731 | | -static int __maybe_unused mtk_iommu_resume(struct device *dev) |
|---|
| 974 | +static int __maybe_unused mtk_iommu_runtime_resume(struct device *dev) |
|---|
| 732 | 975 | { |
|---|
| 733 | 976 | struct mtk_iommu_data *data = dev_get_drvdata(dev); |
|---|
| 734 | 977 | struct mtk_iommu_suspend_reg *reg = &data->reg; |
|---|
| 978 | + struct mtk_iommu_domain *m4u_dom = data->m4u_dom; |
|---|
| 735 | 979 | void __iomem *base = data->base; |
|---|
| 736 | 980 | int ret; |
|---|
| 737 | 981 | |
|---|
| .. | .. |
|---|
| 740 | 984 | dev_err(data->dev, "Failed to enable clk(%d) in resume\n", ret); |
|---|
| 741 | 985 | return ret; |
|---|
| 742 | 986 | } |
|---|
| 743 | | - writel_relaxed(reg->standard_axi_mode, |
|---|
| 744 | | - base + REG_MMU_STANDARD_AXI_MODE); |
|---|
| 987 | + |
|---|
| 988 | + /* |
|---|
| 989 | + * Uppon first resume, only enable the clk and return, since the values of the |
|---|
| 990 | + * registers are not yet set. |
|---|
| 991 | + */ |
|---|
| 992 | + if (!m4u_dom) |
|---|
| 993 | + return 0; |
|---|
| 994 | + |
|---|
| 995 | + writel_relaxed(reg->wr_len_ctrl, base + REG_MMU_WR_LEN_CTRL); |
|---|
| 996 | + writel_relaxed(reg->misc_ctrl, base + REG_MMU_MISC_CTRL); |
|---|
| 745 | 997 | writel_relaxed(reg->dcm_dis, base + REG_MMU_DCM_DIS); |
|---|
| 746 | 998 | writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG); |
|---|
| 747 | 999 | writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL0); |
|---|
| 748 | 1000 | writel_relaxed(reg->int_main_control, base + REG_MMU_INT_MAIN_CONTROL); |
|---|
| 749 | 1001 | writel_relaxed(reg->ivrp_paddr, base + REG_MMU_IVRP_PADDR); |
|---|
| 750 | | - if (data->m4u_dom) |
|---|
| 751 | | - writel(data->m4u_dom->cfg.arm_v7s_cfg.ttbr[0], |
|---|
| 752 | | - base + REG_MMU_PT_BASE_ADDR); |
|---|
| 1002 | + writel_relaxed(reg->vld_pa_rng, base + REG_MMU_VLD_PA_RNG); |
|---|
| 1003 | + writel(m4u_dom->cfg.arm_v7s_cfg.ttbr & MMU_PT_ADDR_MASK, base + REG_MMU_PT_BASE_ADDR); |
|---|
| 753 | 1004 | return 0; |
|---|
| 754 | 1005 | } |
|---|
| 755 | 1006 | |
|---|
| 756 | 1007 | static const struct dev_pm_ops mtk_iommu_pm_ops = { |
|---|
| 757 | | - SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_iommu_suspend, mtk_iommu_resume) |
|---|
| 1008 | + SET_RUNTIME_PM_OPS(mtk_iommu_runtime_suspend, mtk_iommu_runtime_resume, NULL) |
|---|
| 1009 | + SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, |
|---|
| 1010 | + pm_runtime_force_resume) |
|---|
| 1011 | +}; |
|---|
| 1012 | + |
|---|
| 1013 | +static const struct mtk_iommu_plat_data mt2712_data = { |
|---|
| 1014 | + .m4u_plat = M4U_MT2712, |
|---|
| 1015 | + .flags = HAS_4GB_MODE | HAS_BCLK | HAS_VLD_PA_RNG, |
|---|
| 1016 | + .inv_sel_reg = REG_MMU_INV_SEL_GEN1, |
|---|
| 1017 | + .iova_region = single_domain, |
|---|
| 1018 | + .iova_region_nr = ARRAY_SIZE(single_domain), |
|---|
| 1019 | + .larbid_remap = {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}}, |
|---|
| 1020 | +}; |
|---|
| 1021 | + |
|---|
| 1022 | +static const struct mtk_iommu_plat_data mt6779_data = { |
|---|
| 1023 | + .m4u_plat = M4U_MT6779, |
|---|
| 1024 | + .flags = HAS_SUB_COMM | OUT_ORDER_WR_EN | WR_THROT_EN, |
|---|
| 1025 | + .inv_sel_reg = REG_MMU_INV_SEL_GEN2, |
|---|
| 1026 | + .iova_region = single_domain, |
|---|
| 1027 | + .iova_region_nr = ARRAY_SIZE(single_domain), |
|---|
| 1028 | + .larbid_remap = {{0}, {1}, {2}, {3}, {5}, {7, 8}, {10}, {9}}, |
|---|
| 1029 | +}; |
|---|
| 1030 | + |
|---|
| 1031 | +static const struct mtk_iommu_plat_data mt8167_data = { |
|---|
| 1032 | + .m4u_plat = M4U_MT8167, |
|---|
| 1033 | + .flags = RESET_AXI | HAS_LEGACY_IVRP_PADDR, |
|---|
| 1034 | + .inv_sel_reg = REG_MMU_INV_SEL_GEN1, |
|---|
| 1035 | + .iova_region = single_domain, |
|---|
| 1036 | + .iova_region_nr = ARRAY_SIZE(single_domain), |
|---|
| 1037 | + .larbid_remap = {{0}, {1}, {2}}, /* Linear mapping. */ |
|---|
| 1038 | +}; |
|---|
| 1039 | + |
|---|
| 1040 | +static const struct mtk_iommu_plat_data mt8173_data = { |
|---|
| 1041 | + .m4u_plat = M4U_MT8173, |
|---|
| 1042 | + .flags = HAS_4GB_MODE | HAS_BCLK | RESET_AXI | |
|---|
| 1043 | + HAS_LEGACY_IVRP_PADDR, |
|---|
| 1044 | + .inv_sel_reg = REG_MMU_INV_SEL_GEN1, |
|---|
| 1045 | + .iova_region = single_domain, |
|---|
| 1046 | + .iova_region_nr = ARRAY_SIZE(single_domain), |
|---|
| 1047 | + .larbid_remap = {{0}, {1}, {2}, {3}, {4}, {5}}, /* Linear mapping. */ |
|---|
| 1048 | +}; |
|---|
| 1049 | + |
|---|
| 1050 | +static const struct mtk_iommu_plat_data mt8183_data = { |
|---|
| 1051 | + .m4u_plat = M4U_MT8183, |
|---|
| 1052 | + .flags = RESET_AXI, |
|---|
| 1053 | + .inv_sel_reg = REG_MMU_INV_SEL_GEN1, |
|---|
| 1054 | + .iova_region = single_domain, |
|---|
| 1055 | + .iova_region_nr = ARRAY_SIZE(single_domain), |
|---|
| 1056 | + .larbid_remap = {{0}, {4}, {5}, {6}, {7}, {2}, {3}, {1}}, |
|---|
| 1057 | +}; |
|---|
| 1058 | + |
|---|
| 1059 | +static const struct mtk_iommu_plat_data mt8192_data = { |
|---|
| 1060 | + .m4u_plat = M4U_MT8192, |
|---|
| 1061 | + .flags = HAS_BCLK | HAS_SUB_COMM | OUT_ORDER_WR_EN | |
|---|
| 1062 | + WR_THROT_EN | IOVA_34_EN, |
|---|
| 1063 | + .inv_sel_reg = REG_MMU_INV_SEL_GEN2, |
|---|
| 1064 | + .iova_region = mt8192_multi_dom, |
|---|
| 1065 | + .iova_region_nr = ARRAY_SIZE(mt8192_multi_dom), |
|---|
| 1066 | + .larbid_remap = {{0}, {1}, {4, 5}, {7}, {2}, {9, 11, 19, 20}, |
|---|
| 1067 | + {0, 14, 16}, {0, 13, 18, 17}}, |
|---|
| 758 | 1068 | }; |
|---|
| 759 | 1069 | |
|---|
| 760 | 1070 | static const struct of_device_id mtk_iommu_of_ids[] = { |
|---|
| 761 | | - { .compatible = "mediatek,mt2712-m4u", .data = (void *)M4U_MT2712}, |
|---|
| 762 | | - { .compatible = "mediatek,mt8173-m4u", .data = (void *)M4U_MT8173}, |
|---|
| 1071 | + { .compatible = "mediatek,mt2712-m4u", .data = &mt2712_data}, |
|---|
| 1072 | + { .compatible = "mediatek,mt6779-m4u", .data = &mt6779_data}, |
|---|
| 1073 | + { .compatible = "mediatek,mt8167-m4u", .data = &mt8167_data}, |
|---|
| 1074 | + { .compatible = "mediatek,mt8173-m4u", .data = &mt8173_data}, |
|---|
| 1075 | + { .compatible = "mediatek,mt8183-m4u", .data = &mt8183_data}, |
|---|
| 1076 | + { .compatible = "mediatek,mt8192-m4u", .data = &mt8192_data}, |
|---|
| 763 | 1077 | {} |
|---|
| 764 | 1078 | }; |
|---|
| 765 | 1079 | |
|---|
| .. | .. |
|---|
| 768 | 1082 | .remove = mtk_iommu_remove, |
|---|
| 769 | 1083 | .driver = { |
|---|
| 770 | 1084 | .name = "mtk-iommu", |
|---|
| 771 | | - .of_match_table = of_match_ptr(mtk_iommu_of_ids), |
|---|
| 1085 | + .of_match_table = mtk_iommu_of_ids, |
|---|
| 772 | 1086 | .pm = &mtk_iommu_pm_ops, |
|---|
| 773 | 1087 | } |
|---|
| 774 | 1088 | }; |
|---|