From b22da3d8526a935aa31e086e63f60ff3246cb61c Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Sat, 09 Dec 2023 07:24:11 +0000 Subject: [PATCH] add stmac read mac form eeprom --- kernel/drivers/iommu/rockchip-iommu.c | 824 ++++++++++++++++++++++++---------------------------------- 1 files changed, 340 insertions(+), 484 deletions(-) diff --git a/kernel/drivers/iommu/rockchip-iommu.c b/kernel/drivers/iommu/rockchip-iommu.c index 38e70c1..f6720d0 100644 --- a/kernel/drivers/iommu/rockchip-iommu.c +++ b/kernel/drivers/iommu/rockchip-iommu.c @@ -1,7 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-only /* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. + * IOMMU API for Rockchip + * + * Module Authors: Simon Xue <xxm@rock-chips.com> + * Daniel Kurtz <djkurtz@chromium.org> */ #include <linux/clk.h> @@ -18,6 +20,7 @@ #include <linux/list.h> #include <linux/mm.h> #include <linux/module.h> +#include <linux/init.h> #include <linux/of.h> #include <linux/of_iommu.h> #include <linux/of_platform.h> @@ -84,59 +87,45 @@ */ #define RK_IOMMU_PGSIZE_BITMAP 0x007ff000 -#define DT_LO_MASK 0xfffff000 -#define DT_HI_MASK GENMASK_ULL(39, 32) -#define DT_SHIFT 28 - -#define DTE_BASE_HI_MASK GENMASK(11, 4) - -#define PAGE_DESC_LO_MASK 0xfffff000 -#define PAGE_DESC_HI1_LOWER 32 -#define PAGE_DESC_HI1_UPPER 35 -#define PAGE_DESC_HI2_LOWER 36 -#define PAGE_DESC_HI2_UPPER 39 -#define PAGE_DESC_HI_MASK1 GENMASK_ULL(PAGE_DESC_HI1_UPPER, PAGE_DESC_HI1_LOWER) -#define PAGE_DESC_HI_MASK2 GENMASK_ULL(PAGE_DESC_HI2_UPPER, PAGE_DESC_HI2_LOWER) - -#define DTE_HI1_LOWER 8 -#define DTE_HI1_UPPER 11 -#define DTE_HI2_LOWER 4 -#define DTE_HI2_UPPER 7 -#define DTE_HI_MASK1 GENMASK(DTE_HI1_UPPER, DTE_HI1_LOWER) -#define DTE_HI_MASK2 GENMASK(DTE_HI2_UPPER, DTE_HI2_LOWER) - -#define PAGE_DESC_HI_SHIFT1 (PAGE_DESC_HI1_LOWER - DTE_HI1_LOWER) -#define PAGE_DESC_HI_SHIFT2 (PAGE_DESC_HI2_LOWER - DTE_HI2_LOWER) - struct rk_iommu_domain { struct list_head iommus; u32 *dt; /* page directory table */ dma_addr_t dt_dma; spinlock_t iommus_lock; /* lock for iommus list */ spinlock_t dt_lock; /* lock for modifying page directory table */ + bool shootdown_entire; struct iommu_domain domain; }; -struct rockchip_iommu_data { - u32 version; +struct rk_iommu_ops { + phys_addr_t (*pt_address)(u32 dte); + u32 (*mk_dtentries)(dma_addr_t pt_dma); + u32 (*mk_ptentries)(phys_addr_t page, int prot); + phys_addr_t (*dte_addr_phys)(u32 addr); + u32 (*dma_addr_dte)(dma_addr_t dt_dma); + u64 dma_bit_mask; }; struct rk_iommu { struct device *dev; void __iomem **bases; int num_mmu; + int num_irq; struct clk_bulk_data *clocks; int num_clocks; bool reset_disabled; - bool skip_read; /* rk3126/rk3128 can't read vop iommu registers */ + bool skip_read; /* rk3126/rk3128 can't read vop iommu registers */ bool dlr_disable; /* avoid access iommu when runtime ops called */ bool cmd_retry; + bool master_handle_irq; struct iommu_device iommu; struct list_head node; /* entry in rk_iommu_domain.iommus */ struct iommu_domain *domain; /* domain to which iommu is attached */ struct iommu_group *group; - u32 version; + bool shootdown_entire; + bool iommu_enabled; + bool need_res_map; }; struct rk_iommudata { @@ -146,6 +135,10 @@ }; static struct device *dma_dev; +static const struct rk_iommu_ops *rk_ops; +static struct rk_iommu *rk_iommu_from_dev(struct device *dev); +static char reserve_range[PAGE_SIZE] __aligned(PAGE_SIZE); +static phys_addr_t res_page; static inline void rk_table_flush(struct rk_iommu_domain *dom, dma_addr_t dma, unsigned int count) @@ -204,6 +197,11 @@ #define RK_DTE_PT_ADDRESS_MASK 0xfffff000 #define RK_DTE_PT_VALID BIT(0) +static inline phys_addr_t rk_dte_pt_address(u32 dte) +{ + return (phys_addr_t)dte & RK_DTE_PT_ADDRESS_MASK; +} + /* * In v2: * 31:12 - PT address bit 31:0 @@ -212,20 +210,21 @@ * 3: 1 - Reserved * 0 - 1 if PT @ PT address is valid */ -#define RK_DTE_PT_ADDRESS_MASK_V2 0xfffffff0 - -static inline phys_addr_t rk_dte_pt_address(u32 dte) -{ - return (phys_addr_t)dte & RK_DTE_PT_ADDRESS_MASK; -} +#define RK_DTE_PT_ADDRESS_MASK_V2 GENMASK_ULL(31, 4) +#define DTE_HI_MASK1 GENMASK(11, 8) +#define DTE_HI_MASK2 GENMASK(7, 4) +#define DTE_HI_SHIFT1 24 /* shift bit 8 to bit 32 */ +#define DTE_HI_SHIFT2 32 /* shift bit 4 to bit 36 */ +#define PAGE_DESC_HI_MASK1 GENMASK_ULL(35, 32) +#define PAGE_DESC_HI_MASK2 GENMASK_ULL(39, 36) static inline phys_addr_t rk_dte_pt_address_v2(u32 dte) { u64 dte_v2 = dte; - dte_v2 = ((dte_v2 & DTE_HI_MASK2) << PAGE_DESC_HI_SHIFT2) | - ((dte_v2 & DTE_HI_MASK1) << PAGE_DESC_HI_SHIFT1) | - (dte_v2 & PAGE_DESC_LO_MASK); + dte_v2 = ((dte_v2 & DTE_HI_MASK2) << DTE_HI_SHIFT2) | + ((dte_v2 & DTE_HI_MASK1) << DTE_HI_SHIFT1) | + (dte_v2 & RK_DTE_PT_ADDRESS_MASK); return (phys_addr_t)dte_v2; } @@ -242,9 +241,9 @@ static inline u32 rk_mk_dte_v2(dma_addr_t pt_dma) { - pt_dma = (pt_dma & PAGE_DESC_LO_MASK) | - ((pt_dma & PAGE_DESC_HI_MASK1) >> PAGE_DESC_HI_SHIFT1) | - (pt_dma & PAGE_DESC_HI_MASK2) >> PAGE_DESC_HI_SHIFT2; + pt_dma = (pt_dma & RK_DTE_PT_ADDRESS_MASK) | + ((pt_dma & PAGE_DESC_HI_MASK1) >> DTE_HI_SHIFT1) | + (pt_dma & PAGE_DESC_HI_MASK2) >> DTE_HI_SHIFT2; return (pt_dma & RK_DTE_PT_ADDRESS_MASK_V2) | RK_DTE_PT_VALID; } @@ -275,49 +274,25 @@ #define RK_PTE_PAGE_READABLE BIT(1) #define RK_PTE_PAGE_VALID BIT(0) -/* - * In v2: - * 31:12 - Page address bit 31:0 - * 11:9 - Page address bit 34:32 - * 8:4 - Page address bit 39:35 - * 3 - Security - * 2 - Readable - * 1 - Writable - * 0 - 1 if Page @ Page address is valid - */ -#define RK_PTE_PAGE_ADDRESS_MASK_V2 0xfffffff0 -#define RK_PTE_PAGE_FLAGS_MASK_V2 0x0000000e -#define RK_PTE_PAGE_READABLE_V2 BIT(2) -#define RK_PTE_PAGE_WRITABLE_V2 BIT(1) - -static inline phys_addr_t rk_pte_page_address(u32 pte) -{ - return (phys_addr_t)pte & RK_PTE_PAGE_ADDRESS_MASK; -} - -static inline phys_addr_t rk_pte_page_address_v2(u32 pte) -{ - u64 pte_v2 = pte; - - pte_v2 = ((pte_v2 & DTE_HI_MASK2) << PAGE_DESC_HI_SHIFT2) | - ((pte_v2 & DTE_HI_MASK1) << PAGE_DESC_HI_SHIFT1) | - (pte_v2 & PAGE_DESC_LO_MASK); - - return (phys_addr_t)pte_v2; -} - static inline bool rk_pte_is_page_valid(u32 pte) { return pte & RK_PTE_PAGE_VALID; +} + +#define RK_PTE_PAGE_REPRESENT BIT(3) + +static inline bool rk_pte_is_page_represent(u32 pte) +{ + return pte & RK_PTE_PAGE_REPRESENT; } /* TODO: set cache flags per prot IOMMU_CACHE */ static u32 rk_mk_pte(phys_addr_t page, int prot) { u32 flags = 0; - flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE : 0; flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE : 0; + flags |= (prot & IOMMU_PRIV) ? RK_PTE_PAGE_REPRESENT : 0; page &= RK_PTE_PAGE_ADDRESS_MASK; return page | flags | RK_PTE_PAGE_VALID; } @@ -326,18 +301,21 @@ { u32 flags = 0; - flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE_V2 : 0; - flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE_V2 : 0; - page = (page & PAGE_DESC_LO_MASK) | - ((page & PAGE_DESC_HI_MASK1) >> PAGE_DESC_HI_SHIFT1) | - (page & PAGE_DESC_HI_MASK2) >> PAGE_DESC_HI_SHIFT2; - page &= RK_PTE_PAGE_ADDRESS_MASK_V2; - return page | flags | RK_PTE_PAGE_VALID; + /* If BIT(3) set, don't break iommu_map if BIT(0) set. + * Means we can reupdate a page that already presented. We can use + * this bit to reupdate a pre-mapped 4G range. + */ + flags |= (prot & IOMMU_PRIV) ? RK_PTE_PAGE_REPRESENT : 0; + + flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE : 0; + flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE : 0; + + return rk_mk_dte_v2(page) | flags; } static u32 rk_mk_pte_invalid(u32 pte) { - return pte & ~RK_PTE_PAGE_VALID; + return pte & ~(RK_PTE_PAGE_VALID | RK_PTE_PAGE_REPRESENT); } /* @@ -578,12 +556,17 @@ return ret; } +static u32 rk_iommu_read_dte_addr(void __iomem *base) +{ + return rk_iommu_read(base, RK_MMU_DTE_ADDR); +} + static int rk_iommu_force_reset(struct rk_iommu *iommu) { int ret, i; u32 dte_addr; bool val; - u32 address_mask; + u32 dte_address_mask; if (iommu->reset_disabled) return 0; @@ -600,14 +583,13 @@ * In v2: upper 7 nybbles are read back. */ for (i = 0; i < iommu->num_mmu; i++) { - rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, DTE_ADDR_DUMMY); + dte_address_mask = rk_ops->pt_address(DTE_ADDR_DUMMY); + rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, dte_address_mask); - if (iommu->version >= 0x2) - address_mask = RK_DTE_PT_ADDRESS_MASK_V2; - else - address_mask = RK_DTE_PT_ADDRESS_MASK; - dte_addr = rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR); - if (dte_addr != (DTE_ADDR_DUMMY & address_mask)) { + ret = readx_poll_timeout(rk_iommu_read_dte_addr, iommu->bases[i], dte_addr, + dte_addr == dte_address_mask, + RK_MMU_POLL_PERIOD_US, RK_MMU_POLL_TIMEOUT_US); + if (ret) { dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n"); return -EFAULT; } @@ -619,14 +601,41 @@ return 0; ret = readx_poll_timeout(rk_iommu_is_reset_done, iommu, val, - val, RK_MMU_FORCE_RESET_TIMEOUT_US, - RK_MMU_POLL_TIMEOUT_US); + val, RK_MMU_POLL_TIMEOUT_US, + RK_MMU_FORCE_RESET_TIMEOUT_US); if (ret) { dev_err(iommu->dev, "FORCE_RESET command timed out\n"); return ret; } return 0; +} + +static inline phys_addr_t rk_dte_addr_phys(u32 addr) +{ + return (phys_addr_t)addr; +} + +static inline u32 rk_dma_addr_dte(dma_addr_t dt_dma) +{ + return dt_dma; +} + +#define DT_HI_MASK GENMASK_ULL(39, 32) +#define DTE_BASE_HI_MASK GENMASK(11, 4) +#define DT_SHIFT 28 + +static inline phys_addr_t rk_dte_addr_phys_v2(u32 addr) +{ + u64 addr64 = addr; + return (phys_addr_t)(addr64 & RK_DTE_PT_ADDRESS_MASK) | + ((addr64 & DTE_BASE_HI_MASK) << DT_SHIFT); +} + +static inline u32 rk_dma_addr_dte_v2(dma_addr_t dt_dma) +{ + return (dt_dma & RK_DTE_PT_ADDRESS_MASK) | + ((dt_dma & DT_HI_MASK) >> DT_SHIFT); } static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova) @@ -648,11 +657,7 @@ page_offset = rk_iova_page_offset(iova); mmu_dte_addr = rk_iommu_read(base, RK_MMU_DTE_ADDR); - mmu_dte_addr_phys = (phys_addr_t)mmu_dte_addr; - if (iommu->version >= 0x2) { - mmu_dte_addr_phys = (mmu_dte_addr_phys & DT_LO_MASK) | - ((mmu_dte_addr_phys & DTE_BASE_HI_MASK) << DT_SHIFT); - } + mmu_dte_addr_phys = rk_ops->dte_addr_phys(mmu_dte_addr); dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index); dte_addr = phys_to_virt(dte_addr_phys); @@ -661,20 +666,14 @@ if (!rk_dte_is_pt_valid(dte)) goto print_it; - if (iommu->version >= 0x2) - pte_addr_phys = rk_dte_pt_address_v2(dte) + (pte_index * 4); - else - pte_addr_phys = rk_dte_pt_address(dte) + (pte_index * 4); + pte_addr_phys = rk_ops->pt_address(dte) + (pte_index * 4); pte_addr = phys_to_virt(pte_addr_phys); pte = *pte_addr; if (!rk_pte_is_page_valid(pte)) goto print_it; - if (iommu->version >= 0x2) - page_addr_phys = rk_pte_page_address_v2(pte) + page_offset; - else - page_addr_phys = rk_pte_page_address(pte) + page_offset; + page_addr_phys = rk_ops->pt_address(pte) + page_offset; page_flags = pte & RK_PTE_PAGE_FLAGS_MASK; print_it: @@ -686,22 +685,14 @@ rk_pte_is_page_valid(pte), &page_addr_phys, page_flags); } -static irqreturn_t rk_iommu_irq(int irq, void *dev_id) +static int rk_pagefault_done(struct rk_iommu *iommu) { - struct rk_iommu *iommu = dev_id; u32 status; u32 int_status; - u32 int_mask; dma_addr_t iova; + int i; + u32 int_mask; irqreturn_t ret = IRQ_NONE; - int i, err; - - err = pm_runtime_get_if_in_use(iommu->dev); - if (WARN_ON_ONCE(err <= 0)) - return ret; - - if (WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks))) - goto out; for (i = 0; i < iommu->num_mmu; i++) { int_status = rk_iommu_read(iommu->bases[i], RK_MMU_INT_STATUS); @@ -724,16 +715,18 @@ log_iova(iommu, i, iova); - /* - * Report page fault to any installed handlers. - * Ignore the return code, though, since we always zap cache - * and clear the page fault anyway. - */ - if (iommu->domain) - report_iommu_fault(iommu->domain, iommu->dev, iova, + if (!iommu->master_handle_irq) { + /* + * Report page fault to any installed handlers. + * Ignore the return code, though, since we always zap cache + * and clear the page fault anyway. + */ + if (iommu->domain) + report_iommu_fault(iommu->domain, iommu->dev, iova, status); - else - dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n"); + else + dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n"); + } rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE); @@ -755,6 +748,46 @@ int_status); rk_iommu_write(iommu->bases[i], RK_MMU_INT_CLEAR, int_status); + } + + return ret; +} + +int rockchip_pagefault_done(struct device *master_dev) +{ + struct rk_iommu *iommu = rk_iommu_from_dev(master_dev); + + return rk_pagefault_done(iommu); +} +EXPORT_SYMBOL_GPL(rockchip_pagefault_done); + +void __iomem *rockchip_get_iommu_base(struct device *master_dev, int idx) +{ + struct rk_iommu *iommu = rk_iommu_from_dev(master_dev); + + return iommu->bases[idx]; +} +EXPORT_SYMBOL_GPL(rockchip_get_iommu_base); + +static irqreturn_t rk_iommu_irq(int irq, void *dev_id) +{ + struct rk_iommu *iommu = dev_id; + irqreturn_t ret = IRQ_NONE; + int err; + + err = pm_runtime_get_if_in_use(iommu->dev); + if (WARN_ON_ONCE(err <= 0)) + return ret; + + if (WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks))) + goto out; + + /* Master must call rockchip_pagefault_done to handle pagefault */ + if (iommu->master_handle_irq) { + if (iommu->domain) + ret = report_iommu_fault(iommu->domain, iommu->dev, -1, 0x0); + } else { + ret = rk_pagefault_done(iommu); } clk_bulk_disable(iommu->num_clocks, iommu->clocks); @@ -779,41 +812,13 @@ if (!rk_dte_is_pt_valid(dte)) goto out; - pt_phys = rk_dte_pt_address(dte); + pt_phys = rk_ops->pt_address(dte); page_table = (u32 *)phys_to_virt(pt_phys); pte = page_table[rk_iova_pte_index(iova)]; if (!rk_pte_is_page_valid(pte)) goto out; - phys = rk_pte_page_address(pte) + rk_iova_page_offset(iova); -out: - spin_unlock_irqrestore(&rk_domain->dt_lock, flags); - - return phys; -} - -static phys_addr_t rk_iommu_iova_to_phys_v2(struct iommu_domain *domain, - dma_addr_t iova) -{ - struct rk_iommu_domain *rk_domain = to_rk_domain(domain); - unsigned long flags; - phys_addr_t pt_phys, phys = 0; - u32 dte, pte; - u32 *page_table; - - spin_lock_irqsave(&rk_domain->dt_lock, flags); - - dte = rk_domain->dt[rk_iova_dte_index(iova)]; - if (!rk_dte_is_pt_valid(dte)) - goto out; - - pt_phys = rk_dte_pt_address_v2(dte); - page_table = (u32 *)phys_to_virt(pt_phys); - pte = page_table[rk_iova_pte_index(iova)]; - if (!rk_pte_is_page_valid(pte)) - goto out; - - phys = rk_pte_page_address_v2(pte) + rk_iova_page_offset(iova); + phys = rk_ops->pt_address(pte) + rk_iova_page_offset(iova); out: spin_unlock_irqrestore(&rk_domain->dt_lock, flags); @@ -825,6 +830,10 @@ { struct list_head *pos; unsigned long flags; + + /* Do not zap tlb cache line if shootdown_entire set */ + if (rk_domain->shootdown_entire) + return; /* shootdown these iova from all iommus using this domain */ spin_lock_irqsave(&rk_domain->iommus_lock, flags); @@ -885,61 +894,23 @@ return ERR_PTR(-ENOMEM); } - dte = rk_mk_dte(pt_dma); + dte = rk_ops->mk_dtentries(pt_dma); *dte_addr = dte; - rk_table_flush(rk_domain, pt_dma, NUM_PT_ENTRIES); rk_table_flush(rk_domain, rk_domain->dt_dma + dte_index * sizeof(u32), 1); done: - pt_phys = rk_dte_pt_address(dte); - return (u32 *)phys_to_virt(pt_phys); -} - -static u32 *rk_dte_get_page_table_v2(struct rk_iommu_domain *rk_domain, - dma_addr_t iova) -{ - u32 *page_table, *dte_addr; - u32 dte_index, dte; - phys_addr_t pt_phys; - dma_addr_t pt_dma; - - assert_spin_locked(&rk_domain->dt_lock); - - dte_index = rk_iova_dte_index(iova); - dte_addr = &rk_domain->dt[dte_index]; - dte = *dte_addr; - if (rk_dte_is_pt_valid(dte)) - goto done; - - page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32); - if (!page_table) - return ERR_PTR(-ENOMEM); - - pt_dma = dma_map_single(dma_dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE); - if (dma_mapping_error(dma_dev, pt_dma)) { - dev_err(dma_dev, "DMA mapping error while allocating page table\n"); - free_page((unsigned long)page_table); - return ERR_PTR(-ENOMEM); - } - - dte = rk_mk_dte_v2(pt_dma); - *dte_addr = dte; - - rk_table_flush(rk_domain, pt_dma, NUM_PT_ENTRIES); - rk_table_flush(rk_domain, - rk_domain->dt_dma + dte_index * sizeof(u32), 1); -done: - pt_phys = rk_dte_pt_address_v2(dte); + pt_phys = rk_ops->pt_address(dte); return (u32 *)phys_to_virt(pt_phys); } static size_t rk_iommu_unmap_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr, dma_addr_t pte_dma, - size_t size) + size_t size, struct rk_iommu *iommu) { unsigned int pte_count; unsigned int pte_total = size / SPAGE_SIZE; + int prot = IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV; assert_spin_locked(&rk_domain->dt_lock); @@ -948,12 +919,33 @@ if (!rk_pte_is_page_valid(pte)) break; - pte_addr[pte_count] = rk_mk_pte_invalid(pte); + if (iommu && iommu->need_res_map) + pte_addr[pte_count] = rk_ops->mk_ptentries(res_page, + prot); + else + pte_addr[pte_count] = rk_mk_pte_invalid(pte); } rk_table_flush(rk_domain, pte_dma, pte_count); return pte_count * SPAGE_SIZE; +} + +static struct rk_iommu *rk_iommu_get(struct rk_iommu_domain *rk_domain) +{ + unsigned long flags; + struct list_head *pos; + struct rk_iommu *iommu = NULL; + + spin_lock_irqsave(&rk_domain->iommus_lock, flags); + list_for_each(pos, &rk_domain->iommus) { + iommu = list_entry(pos, struct rk_iommu, node); + if (iommu->need_res_map) + break; + } + spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); + + return iommu; } static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr, @@ -969,12 +961,16 @@ for (pte_count = 0; pte_count < pte_total; pte_count++) { u32 pte = pte_addr[pte_count]; - if (rk_pte_is_page_valid(pte)) + if (rk_pte_is_page_valid(pte) && !rk_pte_is_page_represent(pte)) goto unwind; - pte_addr[pte_count] = rk_mk_pte(paddr, prot); + if (prot & IOMMU_PRIV) { + pte_addr[pte_count] = rk_ops->mk_ptentries(res_page, prot); + } else { + pte_addr[pte_count] = rk_ops->mk_ptentries(paddr, prot); - paddr += SPAGE_SIZE; + paddr += SPAGE_SIZE; + } } rk_table_flush(rk_domain, pte_dma, pte_total); @@ -985,67 +981,16 @@ * We only zap the first and last iova, since only they could have * dte or pte shared with an existing mapping. */ - - /* Do not zap tlb cache line if IOMMU_TLB_SHOT_ENTIRE set */ - if (!(prot & IOMMU_TLB_SHOT_ENTIRE)) - rk_iommu_zap_iova_first_last(rk_domain, iova, size); + rk_iommu_zap_iova_first_last(rk_domain, iova, size); return 0; unwind: /* Unmap the range of iovas that we just mapped */ rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, - pte_count * SPAGE_SIZE); + pte_count * SPAGE_SIZE, NULL); iova += pte_count * SPAGE_SIZE; - page_phys = rk_pte_page_address(pte_addr[pte_count]); - pr_err("iova: %pad already mapped to %pa cannot remap to phys: %pa prot: %#x\n", - &iova, &page_phys, &paddr, prot); - - return -EADDRINUSE; -} - -static int rk_iommu_map_iova_v2(struct rk_iommu_domain *rk_domain, u32 *pte_addr, - dma_addr_t pte_dma, dma_addr_t iova, - phys_addr_t paddr, size_t size, int prot) -{ - unsigned int pte_count; - unsigned int pte_total = size / SPAGE_SIZE; - phys_addr_t page_phys; - - assert_spin_locked(&rk_domain->dt_lock); - - for (pte_count = 0; pte_count < pte_total; pte_count++) { - u32 pte = pte_addr[pte_count]; - - if (rk_pte_is_page_valid(pte)) - goto unwind; - - pte_addr[pte_count] = rk_mk_pte_v2(paddr, prot); - - paddr += SPAGE_SIZE; - } - - rk_table_flush(rk_domain, pte_dma, pte_total); - - /* - * Zap the first and last iova to evict from iotlb any previously - * mapped cachelines holding stale values for its dte and pte. - * We only zap the first and last iova, since only they could have - * dte or pte shared with an existing mapping. - */ - - /* Do not zap tlb cache line if IOMMU_TLB_SHOT_ENTIRE set */ - if (!(prot & IOMMU_TLB_SHOT_ENTIRE)) - rk_iommu_zap_iova_first_last(rk_domain, iova, size); - - return 0; -unwind: - /* Unmap the range of iovas that we just mapped */ - rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, - pte_count * SPAGE_SIZE); - - iova += pte_count * SPAGE_SIZE; - page_phys = rk_pte_page_address_v2(pte_addr[pte_count]); + page_phys = rk_ops->pt_address(pte_addr[pte_count]); pr_err("iova: %pad already mapped to %pa cannot remap to phys: %pa prot: %#x\n", &iova, &page_phys, &paddr, prot); @@ -1053,7 +998,7 @@ } static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova, - phys_addr_t paddr, size_t size, int prot) + phys_addr_t paddr, size_t size, int prot, gfp_t gfp) { struct rk_iommu_domain *rk_domain = to_rk_domain(domain); unsigned long flags; @@ -1080,7 +1025,7 @@ dte = rk_domain->dt[rk_iova_dte_index(iova)]; pte_index = rk_iova_pte_index(iova); pte_addr = &page_table[pte_index]; - pte_dma = rk_dte_pt_address(dte) + pte_index * sizeof(u32); + pte_dma = rk_ops->pt_address(dte) + pte_index * sizeof(u32); ret = rk_iommu_map_iova(rk_domain, pte_addr, pte_dma, iova, paddr, size, prot); @@ -1089,45 +1034,8 @@ return ret; } -static int rk_iommu_map_v2(struct iommu_domain *domain, unsigned long _iova, - phys_addr_t paddr, size_t size, int prot) -{ - struct rk_iommu_domain *rk_domain = to_rk_domain(domain); - unsigned long flags; - dma_addr_t pte_dma, iova = (dma_addr_t)_iova; - u32 *page_table, *pte_addr; - u32 dte, pte_index; - int ret; - - spin_lock_irqsave(&rk_domain->dt_lock, flags); - - /* - * pgsize_bitmap specifies iova sizes that fit in one page table - * (1024 4-KiB pages = 4 MiB). - * So, size will always be 4096 <= size <= 4194304. - * Since iommu_map() guarantees that both iova and size will be - * aligned, we will always only be mapping from a single dte here. - */ - page_table = rk_dte_get_page_table_v2(rk_domain, iova); - if (IS_ERR(page_table)) { - spin_unlock_irqrestore(&rk_domain->dt_lock, flags); - return PTR_ERR(page_table); - } - - dte = rk_domain->dt[rk_iova_dte_index(iova)]; - pte_index = rk_iova_pte_index(iova); - pte_addr = &page_table[pte_index]; - pte_dma = rk_dte_pt_address_v2(dte) + pte_index * sizeof(u32); - ret = rk_iommu_map_iova_v2(rk_domain, pte_addr, pte_dma, iova, - paddr, size, prot); - - spin_unlock_irqrestore(&rk_domain->dt_lock, flags); - - return ret; -} - static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova, - size_t size) + size_t size, struct iommu_iotlb_gather *gather) { struct rk_iommu_domain *rk_domain = to_rk_domain(domain); unsigned long flags; @@ -1136,6 +1044,7 @@ u32 dte; u32 *pte_addr; size_t unmap_size; + struct rk_iommu *iommu = rk_iommu_get(rk_domain); spin_lock_irqsave(&rk_domain->dt_lock, flags); @@ -1153,50 +1062,11 @@ return 0; } - pt_phys = rk_dte_pt_address(dte); + pt_phys = rk_ops->pt_address(dte); pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova); pte_dma = pt_phys + rk_iova_pte_index(iova) * sizeof(u32); - unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, size); - - spin_unlock_irqrestore(&rk_domain->dt_lock, flags); - - /* Shootdown iotlb entries for iova range that was just unmapped */ - rk_iommu_zap_iova(rk_domain, iova, unmap_size); - - return unmap_size; -} - -static size_t rk_iommu_unmap_v2(struct iommu_domain *domain, unsigned long _iova, - size_t size) -{ - struct rk_iommu_domain *rk_domain = to_rk_domain(domain); - unsigned long flags; - dma_addr_t pte_dma, iova = (dma_addr_t)_iova; - phys_addr_t pt_phys; - u32 dte; - u32 *pte_addr; - size_t unmap_size; - - spin_lock_irqsave(&rk_domain->dt_lock, flags); - - /* - * pgsize_bitmap specifies iova sizes that fit in one page table - * (1024 4-KiB pages = 4 MiB). - * So, size will always be 4096 <= size <= 4194304. - * Since iommu_unmap() guarantees that both iova and size will be - * aligned, we will always only be unmapping from a single dte here. - */ - dte = rk_domain->dt[rk_iova_dte_index(iova)]; - /* Just return 0 if iova is unmapped */ - if (!rk_dte_is_pt_valid(dte)) { - spin_unlock_irqrestore(&rk_domain->dt_lock, flags); - return 0; - } - - pt_phys = rk_dte_pt_address_v2(dte); - pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova); - pte_dma = pt_phys + rk_iova_pte_index(iova) * sizeof(u32); - unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, size); + unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, size, + iommu); spin_unlock_irqrestore(&rk_domain->dt_lock, flags); @@ -1237,7 +1107,7 @@ static struct rk_iommu *rk_iommu_from_dev(struct device *dev) { - struct rk_iommudata *data = dev->archdata.iommu; + struct rk_iommudata *data = dev_iommu_priv_get(dev); return data ? data->iommu : NULL; } @@ -1257,6 +1127,8 @@ } rk_iommu_disable_stall(iommu); clk_bulk_disable(iommu->num_clocks, iommu->clocks); + + iommu->iommu_enabled = false; } int rockchip_iommu_disable(struct device *dev) @@ -1279,7 +1151,6 @@ struct iommu_domain *domain = iommu->domain; struct rk_iommu_domain *rk_domain = to_rk_domain(domain); int ret, i; - u32 dt_v2; u32 auto_gate; ret = clk_bulk_enable(iommu->num_clocks, iommu->clocks); @@ -1295,14 +1166,8 @@ goto out_disable_stall; for (i = 0; i < iommu->num_mmu; i++) { - if (iommu->version >= 0x2) { - dt_v2 = (rk_domain->dt_dma & DT_LO_MASK) | - ((rk_domain->dt_dma & DT_HI_MASK) >> DT_SHIFT); - rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, dt_v2); - } else { - rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, - rk_domain->dt_dma); - } + rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, + rk_ops->dma_addr_dte(rk_domain->dt_dma)); rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE); rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK); @@ -1318,6 +1183,10 @@ rk_iommu_disable_stall(iommu); out_disable_clocks: clk_bulk_disable(iommu->num_clocks, iommu->clocks); + + if (!ret) + iommu->iommu_enabled = true; + return ret; } @@ -1341,9 +1210,31 @@ if (!iommu) return false; - return rk_iommu_is_paging_enabled(iommu); + return iommu->iommu_enabled; } EXPORT_SYMBOL(rockchip_iommu_is_enabled); + +int rockchip_iommu_force_reset(struct device *dev) +{ + struct rk_iommu *iommu; + int ret; + + iommu = rk_iommu_from_dev(dev); + if (!iommu) + return -ENODEV; + + ret = rk_iommu_enable_stall(iommu); + if (ret) + return ret; + + ret = rk_iommu_force_reset(iommu); + + rk_iommu_disable_stall(iommu); + + return ret; + +} +EXPORT_SYMBOL(rockchip_iommu_force_reset); static void rk_iommu_detach_device(struct iommu_domain *domain, struct device *dev) @@ -1360,8 +1251,7 @@ dev_dbg(dev, "Detaching from iommu domain\n"); - /* iommu already detached */ - if (iommu->domain != domain) + if (!iommu->domain) return; iommu->domain = NULL; @@ -1396,10 +1286,6 @@ dev_dbg(dev, "Attaching to iommu domain\n"); - /* iommu already attached */ - if (iommu->domain == domain) - return 0; - if (iommu->domain) rk_iommu_detach_device(iommu->domain, dev); @@ -1413,6 +1299,7 @@ list_add_tail(&iommu->node, &rk_domain->iommus); spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); + rk_domain->shootdown_entire = iommu->shootdown_entire; ret = pm_runtime_get_if_in_use(iommu->dev); if (!ret || WARN_ON_ONCE(ret < 0)) return 0; @@ -1460,8 +1347,6 @@ goto err_free_dt; } - rk_table_flush(rk_domain, rk_domain->dt_dma, NUM_DT_ENTRIES); - spin_lock_init(&rk_domain->iommus_lock); spin_lock_init(&rk_domain->dt_lock); INIT_LIST_HEAD(&rk_domain->iommus); @@ -1493,7 +1378,7 @@ for (i = 0; i < NUM_DT_ENTRIES; i++) { u32 dte = rk_domain->dt[i]; if (rk_dte_is_pt_valid(dte)) { - phys_addr_t pt_phys = rk_dte_pt_address(dte); + phys_addr_t pt_phys = rk_ops->pt_address(dte); u32 *page_table = phys_to_virt(pt_phys); dma_unmap_single(dma_dev, pt_phys, SPAGE_SIZE, DMA_TO_DEVICE); @@ -1505,58 +1390,20 @@ SPAGE_SIZE, DMA_TO_DEVICE); free_page((unsigned long)rk_domain->dt); - if (domain->type == IOMMU_DOMAIN_DMA) - iommu_put_dma_cookie(&rk_domain->domain); kfree(rk_domain); } -static void rk_iommu_domain_free_v2(struct iommu_domain *domain) +static struct iommu_device *rk_iommu_probe_device(struct device *dev) { - struct rk_iommu_domain *rk_domain = to_rk_domain(domain); - int i; - - WARN_ON(!list_empty(&rk_domain->iommus)); - - for (i = 0; i < NUM_DT_ENTRIES; i++) { - u32 dte = rk_domain->dt[i]; - - if (rk_dte_is_pt_valid(dte)) { - phys_addr_t pt_phys = rk_dte_pt_address_v2(dte); - u32 *page_table = phys_to_virt(pt_phys); - - dma_unmap_single(dma_dev, pt_phys, - SPAGE_SIZE, DMA_TO_DEVICE); - free_page((unsigned long)page_table); - } - } - - dma_unmap_single(dma_dev, rk_domain->dt_dma, - SPAGE_SIZE, DMA_TO_DEVICE); - free_page((unsigned long)rk_domain->dt); - - if (domain->type == IOMMU_DOMAIN_DMA) - iommu_put_dma_cookie(&rk_domain->domain); - kfree(rk_domain); -} - -static int rk_iommu_add_device(struct device *dev) -{ - struct iommu_group *group; - struct rk_iommu *iommu; struct rk_iommudata *data; + struct rk_iommu *iommu; - data = dev->archdata.iommu; + data = dev_iommu_priv_get(dev); if (!data) - return -ENODEV; + return ERR_PTR(-ENODEV); iommu = rk_iommu_from_dev(dev); - group = iommu_group_get_for_dev(dev); - if (IS_ERR(group)) - return PTR_ERR(group); - iommu_group_put(group); - - iommu_device_link(&iommu->iommu, dev); data->link = device_link_add(dev, iommu->dev, DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME); @@ -1566,26 +1413,18 @@ if (!dev->dma_parms) dev->dma_parms = kzalloc(sizeof(*dev->dma_parms), GFP_KERNEL); if (!dev->dma_parms) - return -ENOMEM; + return ERR_PTR(-ENOMEM); dma_set_max_seg_size(dev, DMA_BIT_MASK(32)); - return 0; + return &iommu->iommu; } -static void rk_iommu_remove_device(struct device *dev) +static void rk_iommu_release_device(struct device *dev) { - struct rk_iommu *iommu; - struct rk_iommudata *data = dev->archdata.iommu; - - iommu = rk_iommu_from_dev(dev); - - kfree(dev->dma_parms); - dev->dma_parms = NULL; + struct rk_iommudata *data = dev_iommu_priv_get(dev); device_link_del(data->link); - iommu_device_unlink(&iommu->iommu, dev); - iommu_group_remove_device(dev); } static struct iommu_group *rk_iommu_device_group(struct device *dev) @@ -1598,9 +1437,9 @@ } static bool rk_iommu_is_attach_deferred(struct iommu_domain *domain, - struct device *dev) + struct device *dev) { - struct rk_iommudata *data = dev->archdata.iommu; + struct rk_iommudata *data = dev_iommu_priv_get(dev); return data->defer_attach; } @@ -1622,14 +1461,14 @@ if (strstr(dev_name(dev), "vop")) data->defer_attach = true; - dev->archdata.iommu = data; + dev_iommu_priv_set(dev, data); platform_device_put(iommu_dev); return 0; } -void rk_iommu_mask_irq(struct device *dev) +void rockchip_iommu_mask_irq(struct device *dev) { struct rk_iommu *iommu = rk_iommu_from_dev(dev); int i; @@ -1640,9 +1479,9 @@ for (i = 0; i < iommu->num_mmu; i++) rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0); } -EXPORT_SYMBOL(rk_iommu_mask_irq); +EXPORT_SYMBOL(rockchip_iommu_mask_irq); -void rk_iommu_unmask_irq(struct device *dev) +void rockchip_iommu_unmask_irq(struct device *dev) { struct rk_iommu *iommu = rk_iommu_from_dev(dev); int i; @@ -1658,7 +1497,7 @@ rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_PAGE_FAULT_DONE); } } -EXPORT_SYMBOL(rk_iommu_unmask_irq); +EXPORT_SYMBOL(rockchip_iommu_unmask_irq); static const struct iommu_ops rk_iommu_ops = { .domain_alloc = rk_iommu_domain_alloc, @@ -1666,11 +1505,10 @@ .attach_dev = rk_iommu_attach_device, .detach_dev = rk_iommu_detach_device, .map = rk_iommu_map, - .map_sg = default_iommu_map_sg, .unmap = rk_iommu_unmap, .flush_iotlb_all = rk_iommu_flush_tlb_all, - .add_device = rk_iommu_add_device, - .remove_device = rk_iommu_remove_device, + .probe_device = rk_iommu_probe_device, + .release_device = rk_iommu_release_device, .iova_to_phys = rk_iommu_iova_to_phys, .is_attach_deferred = rk_iommu_is_attach_deferred, .device_group = rk_iommu_device_group, @@ -1678,68 +1516,33 @@ .of_xlate = rk_iommu_of_xlate, }; -static const struct iommu_ops rk_iommu_ops_v2 = { - .domain_alloc = rk_iommu_domain_alloc, - .domain_free = rk_iommu_domain_free_v2, - .attach_dev = rk_iommu_attach_device, - .detach_dev = rk_iommu_detach_device, - .map = rk_iommu_map_v2, - .unmap = rk_iommu_unmap_v2, - .map_sg = default_iommu_map_sg, - .flush_iotlb_all = rk_iommu_flush_tlb_all, - .add_device = rk_iommu_add_device, - .remove_device = rk_iommu_remove_device, - .iova_to_phys = rk_iommu_iova_to_phys_v2, - .is_attach_deferred = rk_iommu_is_attach_deferred, - .device_group = rk_iommu_device_group, - .pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP, - .of_xlate = rk_iommu_of_xlate, -}; - -static const struct rockchip_iommu_data iommu_data_v1 = { - .version = 0x1, -}; - -static const struct rockchip_iommu_data iommu_data_v2 = { - .version = 0x2, -}; - -static const struct of_device_id rk_iommu_dt_ids[] = { - { .compatible = "rockchip,iommu", - .data = &iommu_data_v1, - }, { - .compatible = "rockchip,iommu-v2", - .data = &iommu_data_v2, - }, - { /* sentinel */ } -}; -MODULE_DEVICE_TABLE(of, rk_iommu_dt_ids); - static int rk_iommu_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct rk_iommu *iommu; struct resource *res; + const struct rk_iommu_ops *ops; int num_res = pdev->num_resources; - int err, i, irq; - const struct of_device_id *match; - struct rockchip_iommu_data *data; + int err, i; iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL); if (!iommu) return -ENOMEM; - match = of_match_device(rk_iommu_dt_ids, dev); - if (!match) - return -EINVAL; - - data = (struct rockchip_iommu_data *)match->data; - iommu->version = data->version; - dev_info(dev, "version = %x\n", iommu->version); - platform_set_drvdata(pdev, iommu); iommu->dev = dev; iommu->num_mmu = 0; + + ops = of_device_get_match_data(dev); + if (!rk_ops) + rk_ops = ops; + + /* + * That should not happen unless different versions of the + * hardware block are embedded the same SoC + */ + if (WARN_ON(rk_ops != ops)) + return -EINVAL; iommu->bases = devm_kcalloc(dev, num_res, sizeof(*iommu->bases), GFP_KERNEL); @@ -1758,17 +1561,27 @@ if (iommu->num_mmu == 0) return PTR_ERR(iommu->bases[0]); + iommu->num_irq = platform_irq_count(pdev); + if (iommu->num_irq < 0) + return iommu->num_irq; + iommu->reset_disabled = device_property_read_bool(dev, "rockchip,disable-mmu-reset"); iommu->skip_read = device_property_read_bool(dev, "rockchip,skip-mmu-read"); iommu->dlr_disable = device_property_read_bool(dev, "rockchip,disable-device-link-resume"); - + iommu->shootdown_entire = device_property_read_bool(dev, + "rockchip,shootdown-entire"); + iommu->master_handle_irq = device_property_read_bool(dev, + "rockchip,master-handle-irq"); if (of_machine_is_compatible("rockchip,rv1126") || of_machine_is_compatible("rockchip,rv1109")) iommu->cmd_retry = device_property_read_bool(dev, "rockchip,enable-cmd-retry"); + + iommu->need_res_map = device_property_read_bool(dev, + "rockchip,reserve-map"); /* * iommu clocks should be present for all new devices and devicetrees @@ -1797,10 +1610,8 @@ if (err) goto err_put_group; - if (iommu->version >= 0x2) - iommu_device_set_ops(&iommu->iommu, &rk_iommu_ops_v2); - else - iommu_device_set_ops(&iommu->iommu, &rk_iommu_ops); + iommu_device_set_ops(&iommu->iommu, &rk_iommu_ops); + iommu_device_set_fwnode(&iommu->iommu, &dev->of_node->fwnode); err = iommu_device_register(&iommu->iommu); @@ -1815,18 +1626,16 @@ if (!dma_dev) dma_dev = &pdev->dev; - if (iommu->version >= 0x2) - bus_set_iommu(&platform_bus_type, &rk_iommu_ops_v2); - else - bus_set_iommu(&platform_bus_type, &rk_iommu_ops); + bus_set_iommu(&platform_bus_type, &rk_iommu_ops); pm_runtime_enable(dev); if (iommu->skip_read) goto skip_request_irq; - i = 0; - while ((irq = platform_get_irq(pdev, i++)) != -ENXIO) { + for (i = 0; i < iommu->num_irq; i++) { + int irq = platform_get_irq(pdev, i); + if (irq < 0) return irq; @@ -1839,6 +1648,14 @@ } skip_request_irq: + if (!res_page && iommu->need_res_map) { + res_page = __pa_symbol(reserve_range); + + pr_info("%s,%d, res_page = 0x%pa\n", __func__, __LINE__, &res_page); + } + + dma_set_mask_and_coherent(dev, rk_ops->dma_bit_mask); + return 0; err_remove_sysfs: iommu_device_sysfs_remove(&iommu->iommu); @@ -1852,12 +1669,20 @@ static void rk_iommu_shutdown(struct platform_device *pdev) { struct rk_iommu *iommu = platform_get_drvdata(pdev); - int i = 0, irq; + int i; - while ((irq = platform_get_irq(pdev, i++)) != -ENXIO) + if (iommu->skip_read) + goto skip_free_irq; + + for (i = 0; i < iommu->num_irq; i++) { + int irq = platform_get_irq(pdev, i); + devm_free_irq(iommu->dev, irq, iommu); + } - pm_runtime_force_suspend(&pdev->dev); +skip_free_irq: + if (!iommu->dlr_disable) + pm_runtime_force_suspend(&pdev->dev); } static int __maybe_unused rk_iommu_suspend(struct device *dev) @@ -1893,6 +1718,37 @@ pm_runtime_force_resume) }; +static struct rk_iommu_ops iommu_data_ops_v1 = { + .pt_address = &rk_dte_pt_address, + .mk_dtentries = &rk_mk_dte, + .mk_ptentries = &rk_mk_pte, + .dte_addr_phys = &rk_dte_addr_phys, + .dma_addr_dte = &rk_dma_addr_dte, + .dma_bit_mask = DMA_BIT_MASK(32), +}; + +static struct rk_iommu_ops iommu_data_ops_v2 = { + .pt_address = &rk_dte_pt_address_v2, + .mk_dtentries = &rk_mk_dte_v2, + .mk_ptentries = &rk_mk_pte_v2, + .dte_addr_phys = &rk_dte_addr_phys_v2, + .dma_addr_dte = &rk_dma_addr_dte_v2, + .dma_bit_mask = DMA_BIT_MASK(40), +}; + +static const struct of_device_id rk_iommu_dt_ids[] = { + { .compatible = "rockchip,iommu", + .data = &iommu_data_ops_v1, + }, + { .compatible = "rockchip,iommu-v2", + .data = &iommu_data_ops_v2, + }, + { .compatible = "rockchip,rk3568-iommu", + .data = &iommu_data_ops_v2, + }, + { /* sentinel */ } +}; + static struct platform_driver rk_iommu_driver = { .probe = rk_iommu_probe, .shutdown = rk_iommu_shutdown, -- Gitblit v1.6.2