hc
2024-05-14 bedbef8ad3e75a304af6361af235302bcc61d06b
kernel/drivers/iommu/rockchip-iommu.c
....@@ -1,7 +1,9 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
2
- * This program is free software; you can redistribute it and/or modify
3
- * it under the terms of the GNU General Public License version 2 as
4
- * published by the Free Software Foundation.
3
+ * IOMMU API for Rockchip
4
+ *
5
+ * Module Authors: Simon Xue <xxm@rock-chips.com>
6
+ * Daniel Kurtz <djkurtz@chromium.org>
57 */
68
79 #include <linux/clk.h>
....@@ -18,6 +20,7 @@
1820 #include <linux/list.h>
1921 #include <linux/mm.h>
2022 #include <linux/module.h>
23
+#include <linux/init.h>
2124 #include <linux/of.h>
2225 #include <linux/of_iommu.h>
2326 #include <linux/of_platform.h>
....@@ -84,59 +87,45 @@
8487 */
8588 #define RK_IOMMU_PGSIZE_BITMAP 0x007ff000
8689
87
-#define DT_LO_MASK 0xfffff000
88
-#define DT_HI_MASK GENMASK_ULL(39, 32)
89
-#define DT_SHIFT 28
90
-
91
-#define DTE_BASE_HI_MASK GENMASK(11, 4)
92
-
93
-#define PAGE_DESC_LO_MASK 0xfffff000
94
-#define PAGE_DESC_HI1_LOWER 32
95
-#define PAGE_DESC_HI1_UPPER 35
96
-#define PAGE_DESC_HI2_LOWER 36
97
-#define PAGE_DESC_HI2_UPPER 39
98
-#define PAGE_DESC_HI_MASK1 GENMASK_ULL(PAGE_DESC_HI1_UPPER, PAGE_DESC_HI1_LOWER)
99
-#define PAGE_DESC_HI_MASK2 GENMASK_ULL(PAGE_DESC_HI2_UPPER, PAGE_DESC_HI2_LOWER)
100
-
101
-#define DTE_HI1_LOWER 8
102
-#define DTE_HI1_UPPER 11
103
-#define DTE_HI2_LOWER 4
104
-#define DTE_HI2_UPPER 7
105
-#define DTE_HI_MASK1 GENMASK(DTE_HI1_UPPER, DTE_HI1_LOWER)
106
-#define DTE_HI_MASK2 GENMASK(DTE_HI2_UPPER, DTE_HI2_LOWER)
107
-
108
-#define PAGE_DESC_HI_SHIFT1 (PAGE_DESC_HI1_LOWER - DTE_HI1_LOWER)
109
-#define PAGE_DESC_HI_SHIFT2 (PAGE_DESC_HI2_LOWER - DTE_HI2_LOWER)
110
-
11190 struct rk_iommu_domain {
11291 struct list_head iommus;
11392 u32 *dt; /* page directory table */
11493 dma_addr_t dt_dma;
11594 spinlock_t iommus_lock; /* lock for iommus list */
11695 spinlock_t dt_lock; /* lock for modifying page directory table */
96
+ bool shootdown_entire;
11797
11898 struct iommu_domain domain;
11999 };
120100
121
-struct rockchip_iommu_data {
122
- u32 version;
101
+struct rk_iommu_ops {
102
+ phys_addr_t (*pt_address)(u32 dte);
103
+ u32 (*mk_dtentries)(dma_addr_t pt_dma);
104
+ u32 (*mk_ptentries)(phys_addr_t page, int prot);
105
+ phys_addr_t (*dte_addr_phys)(u32 addr);
106
+ u32 (*dma_addr_dte)(dma_addr_t dt_dma);
107
+ u64 dma_bit_mask;
123108 };
124109
125110 struct rk_iommu {
126111 struct device *dev;
127112 void __iomem **bases;
128113 int num_mmu;
114
+ int num_irq;
129115 struct clk_bulk_data *clocks;
130116 int num_clocks;
131117 bool reset_disabled;
132
- bool skip_read; /* rk3126/rk3128 can't read vop iommu registers */
118
+ bool skip_read; /* rk3126/rk3128 can't read vop iommu registers */
133119 bool dlr_disable; /* avoid access iommu when runtime ops called */
134120 bool cmd_retry;
121
+ bool master_handle_irq;
135122 struct iommu_device iommu;
136123 struct list_head node; /* entry in rk_iommu_domain.iommus */
137124 struct iommu_domain *domain; /* domain to which iommu is attached */
138125 struct iommu_group *group;
139
- u32 version;
126
+ bool shootdown_entire;
127
+ bool iommu_enabled;
128
+ bool need_res_map;
140129 };
141130
142131 struct rk_iommudata {
....@@ -146,6 +135,10 @@
146135 };
147136
148137 static struct device *dma_dev;
138
+static const struct rk_iommu_ops *rk_ops;
139
+static struct rk_iommu *rk_iommu_from_dev(struct device *dev);
140
+static char reserve_range[PAGE_SIZE] __aligned(PAGE_SIZE);
141
+static phys_addr_t res_page;
149142
150143 static inline void rk_table_flush(struct rk_iommu_domain *dom, dma_addr_t dma,
151144 unsigned int count)
....@@ -204,6 +197,11 @@
204197 #define RK_DTE_PT_ADDRESS_MASK 0xfffff000
205198 #define RK_DTE_PT_VALID BIT(0)
206199
200
+static inline phys_addr_t rk_dte_pt_address(u32 dte)
201
+{
202
+ return (phys_addr_t)dte & RK_DTE_PT_ADDRESS_MASK;
203
+}
204
+
207205 /*
208206 * In v2:
209207 * 31:12 - PT address bit 31:0
....@@ -212,20 +210,21 @@
212210 * 3: 1 - Reserved
213211 * 0 - 1 if PT @ PT address is valid
214212 */
215
-#define RK_DTE_PT_ADDRESS_MASK_V2 0xfffffff0
216
-
217
-static inline phys_addr_t rk_dte_pt_address(u32 dte)
218
-{
219
- return (phys_addr_t)dte & RK_DTE_PT_ADDRESS_MASK;
220
-}
213
+#define RK_DTE_PT_ADDRESS_MASK_V2 GENMASK_ULL(31, 4)
214
+#define DTE_HI_MASK1 GENMASK(11, 8)
215
+#define DTE_HI_MASK2 GENMASK(7, 4)
216
+#define DTE_HI_SHIFT1 24 /* shift bit 8 to bit 32 */
217
+#define DTE_HI_SHIFT2 32 /* shift bit 4 to bit 36 */
218
+#define PAGE_DESC_HI_MASK1 GENMASK_ULL(35, 32)
219
+#define PAGE_DESC_HI_MASK2 GENMASK_ULL(39, 36)
221220
222221 static inline phys_addr_t rk_dte_pt_address_v2(u32 dte)
223222 {
224223 u64 dte_v2 = dte;
225224
226
- dte_v2 = ((dte_v2 & DTE_HI_MASK2) << PAGE_DESC_HI_SHIFT2) |
227
- ((dte_v2 & DTE_HI_MASK1) << PAGE_DESC_HI_SHIFT1) |
228
- (dte_v2 & PAGE_DESC_LO_MASK);
225
+ dte_v2 = ((dte_v2 & DTE_HI_MASK2) << DTE_HI_SHIFT2) |
226
+ ((dte_v2 & DTE_HI_MASK1) << DTE_HI_SHIFT1) |
227
+ (dte_v2 & RK_DTE_PT_ADDRESS_MASK);
229228
230229 return (phys_addr_t)dte_v2;
231230 }
....@@ -242,9 +241,9 @@
242241
243242 static inline u32 rk_mk_dte_v2(dma_addr_t pt_dma)
244243 {
245
- pt_dma = (pt_dma & PAGE_DESC_LO_MASK) |
246
- ((pt_dma & PAGE_DESC_HI_MASK1) >> PAGE_DESC_HI_SHIFT1) |
247
- (pt_dma & PAGE_DESC_HI_MASK2) >> PAGE_DESC_HI_SHIFT2;
244
+ pt_dma = (pt_dma & RK_DTE_PT_ADDRESS_MASK) |
245
+ ((pt_dma & PAGE_DESC_HI_MASK1) >> DTE_HI_SHIFT1) |
246
+ (pt_dma & PAGE_DESC_HI_MASK2) >> DTE_HI_SHIFT2;
248247
249248 return (pt_dma & RK_DTE_PT_ADDRESS_MASK_V2) | RK_DTE_PT_VALID;
250249 }
....@@ -275,49 +274,25 @@
275274 #define RK_PTE_PAGE_READABLE BIT(1)
276275 #define RK_PTE_PAGE_VALID BIT(0)
277276
278
-/*
279
- * In v2:
280
- * 31:12 - Page address bit 31:0
281
- * 11:9 - Page address bit 34:32
282
- * 8:4 - Page address bit 39:35
283
- * 3 - Security
284
- * 2 - Readable
285
- * 1 - Writable
286
- * 0 - 1 if Page @ Page address is valid
287
- */
288
-#define RK_PTE_PAGE_ADDRESS_MASK_V2 0xfffffff0
289
-#define RK_PTE_PAGE_FLAGS_MASK_V2 0x0000000e
290
-#define RK_PTE_PAGE_READABLE_V2 BIT(2)
291
-#define RK_PTE_PAGE_WRITABLE_V2 BIT(1)
292
-
293
-static inline phys_addr_t rk_pte_page_address(u32 pte)
294
-{
295
- return (phys_addr_t)pte & RK_PTE_PAGE_ADDRESS_MASK;
296
-}
297
-
298
-static inline phys_addr_t rk_pte_page_address_v2(u32 pte)
299
-{
300
- u64 pte_v2 = pte;
301
-
302
- pte_v2 = ((pte_v2 & DTE_HI_MASK2) << PAGE_DESC_HI_SHIFT2) |
303
- ((pte_v2 & DTE_HI_MASK1) << PAGE_DESC_HI_SHIFT1) |
304
- (pte_v2 & PAGE_DESC_LO_MASK);
305
-
306
- return (phys_addr_t)pte_v2;
307
-}
308
-
309277 static inline bool rk_pte_is_page_valid(u32 pte)
310278 {
311279 return pte & RK_PTE_PAGE_VALID;
280
+}
281
+
282
+#define RK_PTE_PAGE_REPRESENT BIT(3)
283
+
284
+static inline bool rk_pte_is_page_represent(u32 pte)
285
+{
286
+ return pte & RK_PTE_PAGE_REPRESENT;
312287 }
313288
314289 /* TODO: set cache flags per prot IOMMU_CACHE */
315290 static u32 rk_mk_pte(phys_addr_t page, int prot)
316291 {
317292 u32 flags = 0;
318
-
319293 flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE : 0;
320294 flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE : 0;
295
+ flags |= (prot & IOMMU_PRIV) ? RK_PTE_PAGE_REPRESENT : 0;
321296 page &= RK_PTE_PAGE_ADDRESS_MASK;
322297 return page | flags | RK_PTE_PAGE_VALID;
323298 }
....@@ -326,18 +301,21 @@
326301 {
327302 u32 flags = 0;
328303
329
- flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE_V2 : 0;
330
- flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE_V2 : 0;
331
- page = (page & PAGE_DESC_LO_MASK) |
332
- ((page & PAGE_DESC_HI_MASK1) >> PAGE_DESC_HI_SHIFT1) |
333
- (page & PAGE_DESC_HI_MASK2) >> PAGE_DESC_HI_SHIFT2;
334
- page &= RK_PTE_PAGE_ADDRESS_MASK_V2;
335
- return page | flags | RK_PTE_PAGE_VALID;
304
+ /* If BIT(3) set, don't break iommu_map if BIT(0) set.
305
+ * Means we can reupdate a page that already presented. We can use
306
+ * this bit to reupdate a pre-mapped 4G range.
307
+ */
308
+ flags |= (prot & IOMMU_PRIV) ? RK_PTE_PAGE_REPRESENT : 0;
309
+
310
+ flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE : 0;
311
+ flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE : 0;
312
+
313
+ return rk_mk_dte_v2(page) | flags;
336314 }
337315
338316 static u32 rk_mk_pte_invalid(u32 pte)
339317 {
340
- return pte & ~RK_PTE_PAGE_VALID;
318
+ return pte & ~(RK_PTE_PAGE_VALID | RK_PTE_PAGE_REPRESENT);
341319 }
342320
343321 /*
....@@ -578,12 +556,17 @@
578556 return ret;
579557 }
580558
559
+static u32 rk_iommu_read_dte_addr(void __iomem *base)
560
+{
561
+ return rk_iommu_read(base, RK_MMU_DTE_ADDR);
562
+}
563
+
581564 static int rk_iommu_force_reset(struct rk_iommu *iommu)
582565 {
583566 int ret, i;
584567 u32 dte_addr;
585568 bool val;
586
- u32 address_mask;
569
+ u32 dte_address_mask;
587570
588571 if (iommu->reset_disabled)
589572 return 0;
....@@ -600,14 +583,13 @@
600583 * In v2: upper 7 nybbles are read back.
601584 */
602585 for (i = 0; i < iommu->num_mmu; i++) {
603
- rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, DTE_ADDR_DUMMY);
586
+ dte_address_mask = rk_ops->pt_address(DTE_ADDR_DUMMY);
587
+ rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, dte_address_mask);
604588
605
- if (iommu->version >= 0x2)
606
- address_mask = RK_DTE_PT_ADDRESS_MASK_V2;
607
- else
608
- address_mask = RK_DTE_PT_ADDRESS_MASK;
609
- dte_addr = rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR);
610
- if (dte_addr != (DTE_ADDR_DUMMY & address_mask)) {
589
+ ret = readx_poll_timeout(rk_iommu_read_dte_addr, iommu->bases[i], dte_addr,
590
+ dte_addr == dte_address_mask,
591
+ RK_MMU_POLL_PERIOD_US, RK_MMU_POLL_TIMEOUT_US);
592
+ if (ret) {
611593 dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n");
612594 return -EFAULT;
613595 }
....@@ -619,14 +601,41 @@
619601 return 0;
620602
621603 ret = readx_poll_timeout(rk_iommu_is_reset_done, iommu, val,
622
- val, RK_MMU_FORCE_RESET_TIMEOUT_US,
623
- RK_MMU_POLL_TIMEOUT_US);
604
+ val, RK_MMU_POLL_TIMEOUT_US,
605
+ RK_MMU_FORCE_RESET_TIMEOUT_US);
624606 if (ret) {
625607 dev_err(iommu->dev, "FORCE_RESET command timed out\n");
626608 return ret;
627609 }
628610
629611 return 0;
612
+}
613
+
614
+static inline phys_addr_t rk_dte_addr_phys(u32 addr)
615
+{
616
+ return (phys_addr_t)addr;
617
+}
618
+
619
+static inline u32 rk_dma_addr_dte(dma_addr_t dt_dma)
620
+{
621
+ return dt_dma;
622
+}
623
+
624
+#define DT_HI_MASK GENMASK_ULL(39, 32)
625
+#define DTE_BASE_HI_MASK GENMASK(11, 4)
626
+#define DT_SHIFT 28
627
+
628
+static inline phys_addr_t rk_dte_addr_phys_v2(u32 addr)
629
+{
630
+ u64 addr64 = addr;
631
+ return (phys_addr_t)(addr64 & RK_DTE_PT_ADDRESS_MASK) |
632
+ ((addr64 & DTE_BASE_HI_MASK) << DT_SHIFT);
633
+}
634
+
635
+static inline u32 rk_dma_addr_dte_v2(dma_addr_t dt_dma)
636
+{
637
+ return (dt_dma & RK_DTE_PT_ADDRESS_MASK) |
638
+ ((dt_dma & DT_HI_MASK) >> DT_SHIFT);
630639 }
631640
632641 static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova)
....@@ -648,11 +657,7 @@
648657 page_offset = rk_iova_page_offset(iova);
649658
650659 mmu_dte_addr = rk_iommu_read(base, RK_MMU_DTE_ADDR);
651
- mmu_dte_addr_phys = (phys_addr_t)mmu_dte_addr;
652
- if (iommu->version >= 0x2) {
653
- mmu_dte_addr_phys = (mmu_dte_addr_phys & DT_LO_MASK) |
654
- ((mmu_dte_addr_phys & DTE_BASE_HI_MASK) << DT_SHIFT);
655
- }
660
+ mmu_dte_addr_phys = rk_ops->dte_addr_phys(mmu_dte_addr);
656661
657662 dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index);
658663 dte_addr = phys_to_virt(dte_addr_phys);
....@@ -661,20 +666,14 @@
661666 if (!rk_dte_is_pt_valid(dte))
662667 goto print_it;
663668
664
- if (iommu->version >= 0x2)
665
- pte_addr_phys = rk_dte_pt_address_v2(dte) + (pte_index * 4);
666
- else
667
- pte_addr_phys = rk_dte_pt_address(dte) + (pte_index * 4);
669
+ pte_addr_phys = rk_ops->pt_address(dte) + (pte_index * 4);
668670 pte_addr = phys_to_virt(pte_addr_phys);
669671 pte = *pte_addr;
670672
671673 if (!rk_pte_is_page_valid(pte))
672674 goto print_it;
673675
674
- if (iommu->version >= 0x2)
675
- page_addr_phys = rk_pte_page_address_v2(pte) + page_offset;
676
- else
677
- page_addr_phys = rk_pte_page_address(pte) + page_offset;
676
+ page_addr_phys = rk_ops->pt_address(pte) + page_offset;
678677 page_flags = pte & RK_PTE_PAGE_FLAGS_MASK;
679678
680679 print_it:
....@@ -686,22 +685,14 @@
686685 rk_pte_is_page_valid(pte), &page_addr_phys, page_flags);
687686 }
688687
689
-static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
688
+static int rk_pagefault_done(struct rk_iommu *iommu)
690689 {
691
- struct rk_iommu *iommu = dev_id;
692690 u32 status;
693691 u32 int_status;
694
- u32 int_mask;
695692 dma_addr_t iova;
693
+ int i;
694
+ u32 int_mask;
696695 irqreturn_t ret = IRQ_NONE;
697
- int i, err;
698
-
699
- err = pm_runtime_get_if_in_use(iommu->dev);
700
- if (WARN_ON_ONCE(err <= 0))
701
- return ret;
702
-
703
- if (WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks)))
704
- goto out;
705696
706697 for (i = 0; i < iommu->num_mmu; i++) {
707698 int_status = rk_iommu_read(iommu->bases[i], RK_MMU_INT_STATUS);
....@@ -724,16 +715,18 @@
724715
725716 log_iova(iommu, i, iova);
726717
727
- /*
728
- * Report page fault to any installed handlers.
729
- * Ignore the return code, though, since we always zap cache
730
- * and clear the page fault anyway.
731
- */
732
- if (iommu->domain)
733
- report_iommu_fault(iommu->domain, iommu->dev, iova,
718
+ if (!iommu->master_handle_irq) {
719
+ /*
720
+ * Report page fault to any installed handlers.
721
+ * Ignore the return code, though, since we always zap cache
722
+ * and clear the page fault anyway.
723
+ */
724
+ if (iommu->domain)
725
+ report_iommu_fault(iommu->domain, iommu->dev, iova,
734726 status);
735
- else
736
- dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n");
727
+ else
728
+ dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n");
729
+ }
737730
738731 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
739732
....@@ -755,6 +748,46 @@
755748 int_status);
756749
757750 rk_iommu_write(iommu->bases[i], RK_MMU_INT_CLEAR, int_status);
751
+ }
752
+
753
+ return ret;
754
+}
755
+
756
+int rockchip_pagefault_done(struct device *master_dev)
757
+{
758
+ struct rk_iommu *iommu = rk_iommu_from_dev(master_dev);
759
+
760
+ return rk_pagefault_done(iommu);
761
+}
762
+EXPORT_SYMBOL_GPL(rockchip_pagefault_done);
763
+
764
+void __iomem *rockchip_get_iommu_base(struct device *master_dev, int idx)
765
+{
766
+ struct rk_iommu *iommu = rk_iommu_from_dev(master_dev);
767
+
768
+ return iommu->bases[idx];
769
+}
770
+EXPORT_SYMBOL_GPL(rockchip_get_iommu_base);
771
+
772
+static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
773
+{
774
+ struct rk_iommu *iommu = dev_id;
775
+ irqreturn_t ret = IRQ_NONE;
776
+ int err;
777
+
778
+ err = pm_runtime_get_if_in_use(iommu->dev);
779
+ if (WARN_ON_ONCE(err <= 0))
780
+ return ret;
781
+
782
+ if (WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks)))
783
+ goto out;
784
+
785
+ /* Master must call rockchip_pagefault_done to handle pagefault */
786
+ if (iommu->master_handle_irq) {
787
+ if (iommu->domain)
788
+ ret = report_iommu_fault(iommu->domain, iommu->dev, -1, 0x0);
789
+ } else {
790
+ ret = rk_pagefault_done(iommu);
758791 }
759792
760793 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
....@@ -779,41 +812,13 @@
779812 if (!rk_dte_is_pt_valid(dte))
780813 goto out;
781814
782
- pt_phys = rk_dte_pt_address(dte);
815
+ pt_phys = rk_ops->pt_address(dte);
783816 page_table = (u32 *)phys_to_virt(pt_phys);
784817 pte = page_table[rk_iova_pte_index(iova)];
785818 if (!rk_pte_is_page_valid(pte))
786819 goto out;
787820
788
- phys = rk_pte_page_address(pte) + rk_iova_page_offset(iova);
789
-out:
790
- spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
791
-
792
- return phys;
793
-}
794
-
795
-static phys_addr_t rk_iommu_iova_to_phys_v2(struct iommu_domain *domain,
796
- dma_addr_t iova)
797
-{
798
- struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
799
- unsigned long flags;
800
- phys_addr_t pt_phys, phys = 0;
801
- u32 dte, pte;
802
- u32 *page_table;
803
-
804
- spin_lock_irqsave(&rk_domain->dt_lock, flags);
805
-
806
- dte = rk_domain->dt[rk_iova_dte_index(iova)];
807
- if (!rk_dte_is_pt_valid(dte))
808
- goto out;
809
-
810
- pt_phys = rk_dte_pt_address_v2(dte);
811
- page_table = (u32 *)phys_to_virt(pt_phys);
812
- pte = page_table[rk_iova_pte_index(iova)];
813
- if (!rk_pte_is_page_valid(pte))
814
- goto out;
815
-
816
- phys = rk_pte_page_address_v2(pte) + rk_iova_page_offset(iova);
821
+ phys = rk_ops->pt_address(pte) + rk_iova_page_offset(iova);
817822 out:
818823 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
819824
....@@ -825,6 +830,10 @@
825830 {
826831 struct list_head *pos;
827832 unsigned long flags;
833
+
834
+ /* Do not zap tlb cache line if shootdown_entire set */
835
+ if (rk_domain->shootdown_entire)
836
+ return;
828837
829838 /* shootdown these iova from all iommus using this domain */
830839 spin_lock_irqsave(&rk_domain->iommus_lock, flags);
....@@ -885,61 +894,23 @@
885894 return ERR_PTR(-ENOMEM);
886895 }
887896
888
- dte = rk_mk_dte(pt_dma);
897
+ dte = rk_ops->mk_dtentries(pt_dma);
889898 *dte_addr = dte;
890899
891
- rk_table_flush(rk_domain, pt_dma, NUM_PT_ENTRIES);
892900 rk_table_flush(rk_domain,
893901 rk_domain->dt_dma + dte_index * sizeof(u32), 1);
894902 done:
895
- pt_phys = rk_dte_pt_address(dte);
896
- return (u32 *)phys_to_virt(pt_phys);
897
-}
898
-
899
-static u32 *rk_dte_get_page_table_v2(struct rk_iommu_domain *rk_domain,
900
- dma_addr_t iova)
901
-{
902
- u32 *page_table, *dte_addr;
903
- u32 dte_index, dte;
904
- phys_addr_t pt_phys;
905
- dma_addr_t pt_dma;
906
-
907
- assert_spin_locked(&rk_domain->dt_lock);
908
-
909
- dte_index = rk_iova_dte_index(iova);
910
- dte_addr = &rk_domain->dt[dte_index];
911
- dte = *dte_addr;
912
- if (rk_dte_is_pt_valid(dte))
913
- goto done;
914
-
915
- page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
916
- if (!page_table)
917
- return ERR_PTR(-ENOMEM);
918
-
919
- pt_dma = dma_map_single(dma_dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE);
920
- if (dma_mapping_error(dma_dev, pt_dma)) {
921
- dev_err(dma_dev, "DMA mapping error while allocating page table\n");
922
- free_page((unsigned long)page_table);
923
- return ERR_PTR(-ENOMEM);
924
- }
925
-
926
- dte = rk_mk_dte_v2(pt_dma);
927
- *dte_addr = dte;
928
-
929
- rk_table_flush(rk_domain, pt_dma, NUM_PT_ENTRIES);
930
- rk_table_flush(rk_domain,
931
- rk_domain->dt_dma + dte_index * sizeof(u32), 1);
932
-done:
933
- pt_phys = rk_dte_pt_address_v2(dte);
903
+ pt_phys = rk_ops->pt_address(dte);
934904 return (u32 *)phys_to_virt(pt_phys);
935905 }
936906
937907 static size_t rk_iommu_unmap_iova(struct rk_iommu_domain *rk_domain,
938908 u32 *pte_addr, dma_addr_t pte_dma,
939
- size_t size)
909
+ size_t size, struct rk_iommu *iommu)
940910 {
941911 unsigned int pte_count;
942912 unsigned int pte_total = size / SPAGE_SIZE;
913
+ int prot = IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV;
943914
944915 assert_spin_locked(&rk_domain->dt_lock);
945916
....@@ -948,12 +919,33 @@
948919 if (!rk_pte_is_page_valid(pte))
949920 break;
950921
951
- pte_addr[pte_count] = rk_mk_pte_invalid(pte);
922
+ if (iommu && iommu->need_res_map)
923
+ pte_addr[pte_count] = rk_ops->mk_ptentries(res_page,
924
+ prot);
925
+ else
926
+ pte_addr[pte_count] = rk_mk_pte_invalid(pte);
952927 }
953928
954929 rk_table_flush(rk_domain, pte_dma, pte_count);
955930
956931 return pte_count * SPAGE_SIZE;
932
+}
933
+
934
+static struct rk_iommu *rk_iommu_get(struct rk_iommu_domain *rk_domain)
935
+{
936
+ unsigned long flags;
937
+ struct list_head *pos;
938
+ struct rk_iommu *iommu = NULL;
939
+
940
+ spin_lock_irqsave(&rk_domain->iommus_lock, flags);
941
+ list_for_each(pos, &rk_domain->iommus) {
942
+ iommu = list_entry(pos, struct rk_iommu, node);
943
+ if (iommu->need_res_map)
944
+ break;
945
+ }
946
+ spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
947
+
948
+ return iommu;
957949 }
958950
959951 static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr,
....@@ -969,12 +961,16 @@
969961 for (pte_count = 0; pte_count < pte_total; pte_count++) {
970962 u32 pte = pte_addr[pte_count];
971963
972
- if (rk_pte_is_page_valid(pte))
964
+ if (rk_pte_is_page_valid(pte) && !rk_pte_is_page_represent(pte))
973965 goto unwind;
974966
975
- pte_addr[pte_count] = rk_mk_pte(paddr, prot);
967
+ if (prot & IOMMU_PRIV) {
968
+ pte_addr[pte_count] = rk_ops->mk_ptentries(res_page, prot);
969
+ } else {
970
+ pte_addr[pte_count] = rk_ops->mk_ptentries(paddr, prot);
976971
977
- paddr += SPAGE_SIZE;
972
+ paddr += SPAGE_SIZE;
973
+ }
978974 }
979975
980976 rk_table_flush(rk_domain, pte_dma, pte_total);
....@@ -985,67 +981,16 @@
985981 * We only zap the first and last iova, since only they could have
986982 * dte or pte shared with an existing mapping.
987983 */
988
-
989
- /* Do not zap tlb cache line if IOMMU_TLB_SHOT_ENTIRE set */
990
- if (!(prot & IOMMU_TLB_SHOT_ENTIRE))
991
- rk_iommu_zap_iova_first_last(rk_domain, iova, size);
984
+ rk_iommu_zap_iova_first_last(rk_domain, iova, size);
992985
993986 return 0;
994987 unwind:
995988 /* Unmap the range of iovas that we just mapped */
996989 rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma,
997
- pte_count * SPAGE_SIZE);
990
+ pte_count * SPAGE_SIZE, NULL);
998991
999992 iova += pte_count * SPAGE_SIZE;
1000
- page_phys = rk_pte_page_address(pte_addr[pte_count]);
1001
- pr_err("iova: %pad already mapped to %pa cannot remap to phys: %pa prot: %#x\n",
1002
- &iova, &page_phys, &paddr, prot);
1003
-
1004
- return -EADDRINUSE;
1005
-}
1006
-
1007
-static int rk_iommu_map_iova_v2(struct rk_iommu_domain *rk_domain, u32 *pte_addr,
1008
- dma_addr_t pte_dma, dma_addr_t iova,
1009
- phys_addr_t paddr, size_t size, int prot)
1010
-{
1011
- unsigned int pte_count;
1012
- unsigned int pte_total = size / SPAGE_SIZE;
1013
- phys_addr_t page_phys;
1014
-
1015
- assert_spin_locked(&rk_domain->dt_lock);
1016
-
1017
- for (pte_count = 0; pte_count < pte_total; pte_count++) {
1018
- u32 pte = pte_addr[pte_count];
1019
-
1020
- if (rk_pte_is_page_valid(pte))
1021
- goto unwind;
1022
-
1023
- pte_addr[pte_count] = rk_mk_pte_v2(paddr, prot);
1024
-
1025
- paddr += SPAGE_SIZE;
1026
- }
1027
-
1028
- rk_table_flush(rk_domain, pte_dma, pte_total);
1029
-
1030
- /*
1031
- * Zap the first and last iova to evict from iotlb any previously
1032
- * mapped cachelines holding stale values for its dte and pte.
1033
- * We only zap the first and last iova, since only they could have
1034
- * dte or pte shared with an existing mapping.
1035
- */
1036
-
1037
- /* Do not zap tlb cache line if IOMMU_TLB_SHOT_ENTIRE set */
1038
- if (!(prot & IOMMU_TLB_SHOT_ENTIRE))
1039
- rk_iommu_zap_iova_first_last(rk_domain, iova, size);
1040
-
1041
- return 0;
1042
-unwind:
1043
- /* Unmap the range of iovas that we just mapped */
1044
- rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma,
1045
- pte_count * SPAGE_SIZE);
1046
-
1047
- iova += pte_count * SPAGE_SIZE;
1048
- page_phys = rk_pte_page_address_v2(pte_addr[pte_count]);
993
+ page_phys = rk_ops->pt_address(pte_addr[pte_count]);
1049994 pr_err("iova: %pad already mapped to %pa cannot remap to phys: %pa prot: %#x\n",
1050995 &iova, &page_phys, &paddr, prot);
1051996
....@@ -1053,7 +998,7 @@
1053998 }
1054999
10551000 static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
1056
- phys_addr_t paddr, size_t size, int prot)
1001
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
10571002 {
10581003 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
10591004 unsigned long flags;
....@@ -1080,7 +1025,7 @@
10801025 dte = rk_domain->dt[rk_iova_dte_index(iova)];
10811026 pte_index = rk_iova_pte_index(iova);
10821027 pte_addr = &page_table[pte_index];
1083
- pte_dma = rk_dte_pt_address(dte) + pte_index * sizeof(u32);
1028
+ pte_dma = rk_ops->pt_address(dte) + pte_index * sizeof(u32);
10841029 ret = rk_iommu_map_iova(rk_domain, pte_addr, pte_dma, iova,
10851030 paddr, size, prot);
10861031
....@@ -1089,45 +1034,8 @@
10891034 return ret;
10901035 }
10911036
1092
-static int rk_iommu_map_v2(struct iommu_domain *domain, unsigned long _iova,
1093
- phys_addr_t paddr, size_t size, int prot)
1094
-{
1095
- struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
1096
- unsigned long flags;
1097
- dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
1098
- u32 *page_table, *pte_addr;
1099
- u32 dte, pte_index;
1100
- int ret;
1101
-
1102
- spin_lock_irqsave(&rk_domain->dt_lock, flags);
1103
-
1104
- /*
1105
- * pgsize_bitmap specifies iova sizes that fit in one page table
1106
- * (1024 4-KiB pages = 4 MiB).
1107
- * So, size will always be 4096 <= size <= 4194304.
1108
- * Since iommu_map() guarantees that both iova and size will be
1109
- * aligned, we will always only be mapping from a single dte here.
1110
- */
1111
- page_table = rk_dte_get_page_table_v2(rk_domain, iova);
1112
- if (IS_ERR(page_table)) {
1113
- spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
1114
- return PTR_ERR(page_table);
1115
- }
1116
-
1117
- dte = rk_domain->dt[rk_iova_dte_index(iova)];
1118
- pte_index = rk_iova_pte_index(iova);
1119
- pte_addr = &page_table[pte_index];
1120
- pte_dma = rk_dte_pt_address_v2(dte) + pte_index * sizeof(u32);
1121
- ret = rk_iommu_map_iova_v2(rk_domain, pte_addr, pte_dma, iova,
1122
- paddr, size, prot);
1123
-
1124
- spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
1125
-
1126
- return ret;
1127
-}
1128
-
11291037 static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
1130
- size_t size)
1038
+ size_t size, struct iommu_iotlb_gather *gather)
11311039 {
11321040 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
11331041 unsigned long flags;
....@@ -1136,6 +1044,7 @@
11361044 u32 dte;
11371045 u32 *pte_addr;
11381046 size_t unmap_size;
1047
+ struct rk_iommu *iommu = rk_iommu_get(rk_domain);
11391048
11401049 spin_lock_irqsave(&rk_domain->dt_lock, flags);
11411050
....@@ -1153,50 +1062,11 @@
11531062 return 0;
11541063 }
11551064
1156
- pt_phys = rk_dte_pt_address(dte);
1065
+ pt_phys = rk_ops->pt_address(dte);
11571066 pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova);
11581067 pte_dma = pt_phys + rk_iova_pte_index(iova) * sizeof(u32);
1159
- unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, size);
1160
-
1161
- spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
1162
-
1163
- /* Shootdown iotlb entries for iova range that was just unmapped */
1164
- rk_iommu_zap_iova(rk_domain, iova, unmap_size);
1165
-
1166
- return unmap_size;
1167
-}
1168
-
1169
-static size_t rk_iommu_unmap_v2(struct iommu_domain *domain, unsigned long _iova,
1170
- size_t size)
1171
-{
1172
- struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
1173
- unsigned long flags;
1174
- dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
1175
- phys_addr_t pt_phys;
1176
- u32 dte;
1177
- u32 *pte_addr;
1178
- size_t unmap_size;
1179
-
1180
- spin_lock_irqsave(&rk_domain->dt_lock, flags);
1181
-
1182
- /*
1183
- * pgsize_bitmap specifies iova sizes that fit in one page table
1184
- * (1024 4-KiB pages = 4 MiB).
1185
- * So, size will always be 4096 <= size <= 4194304.
1186
- * Since iommu_unmap() guarantees that both iova and size will be
1187
- * aligned, we will always only be unmapping from a single dte here.
1188
- */
1189
- dte = rk_domain->dt[rk_iova_dte_index(iova)];
1190
- /* Just return 0 if iova is unmapped */
1191
- if (!rk_dte_is_pt_valid(dte)) {
1192
- spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
1193
- return 0;
1194
- }
1195
-
1196
- pt_phys = rk_dte_pt_address_v2(dte);
1197
- pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova);
1198
- pte_dma = pt_phys + rk_iova_pte_index(iova) * sizeof(u32);
1199
- unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, size);
1068
+ unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, size,
1069
+ iommu);
12001070
12011071 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
12021072
....@@ -1237,7 +1107,7 @@
12371107
12381108 static struct rk_iommu *rk_iommu_from_dev(struct device *dev)
12391109 {
1240
- struct rk_iommudata *data = dev->archdata.iommu;
1110
+ struct rk_iommudata *data = dev_iommu_priv_get(dev);
12411111
12421112 return data ? data->iommu : NULL;
12431113 }
....@@ -1257,6 +1127,8 @@
12571127 }
12581128 rk_iommu_disable_stall(iommu);
12591129 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
1130
+
1131
+ iommu->iommu_enabled = false;
12601132 }
12611133
12621134 int rockchip_iommu_disable(struct device *dev)
....@@ -1279,7 +1151,6 @@
12791151 struct iommu_domain *domain = iommu->domain;
12801152 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
12811153 int ret, i;
1282
- u32 dt_v2;
12831154 u32 auto_gate;
12841155
12851156 ret = clk_bulk_enable(iommu->num_clocks, iommu->clocks);
....@@ -1295,14 +1166,8 @@
12951166 goto out_disable_stall;
12961167
12971168 for (i = 0; i < iommu->num_mmu; i++) {
1298
- if (iommu->version >= 0x2) {
1299
- dt_v2 = (rk_domain->dt_dma & DT_LO_MASK) |
1300
- ((rk_domain->dt_dma & DT_HI_MASK) >> DT_SHIFT);
1301
- rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, dt_v2);
1302
- } else {
1303
- rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR,
1304
- rk_domain->dt_dma);
1305
- }
1169
+ rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR,
1170
+ rk_ops->dma_addr_dte(rk_domain->dt_dma));
13061171 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
13071172 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
13081173
....@@ -1318,6 +1183,10 @@
13181183 rk_iommu_disable_stall(iommu);
13191184 out_disable_clocks:
13201185 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
1186
+
1187
+ if (!ret)
1188
+ iommu->iommu_enabled = true;
1189
+
13211190 return ret;
13221191 }
13231192
....@@ -1341,9 +1210,31 @@
13411210 if (!iommu)
13421211 return false;
13431212
1344
- return rk_iommu_is_paging_enabled(iommu);
1213
+ return iommu->iommu_enabled;
13451214 }
13461215 EXPORT_SYMBOL(rockchip_iommu_is_enabled);
1216
+
1217
+int rockchip_iommu_force_reset(struct device *dev)
1218
+{
1219
+ struct rk_iommu *iommu;
1220
+ int ret;
1221
+
1222
+ iommu = rk_iommu_from_dev(dev);
1223
+ if (!iommu)
1224
+ return -ENODEV;
1225
+
1226
+ ret = rk_iommu_enable_stall(iommu);
1227
+ if (ret)
1228
+ return ret;
1229
+
1230
+ ret = rk_iommu_force_reset(iommu);
1231
+
1232
+ rk_iommu_disable_stall(iommu);
1233
+
1234
+ return ret;
1235
+
1236
+}
1237
+EXPORT_SYMBOL(rockchip_iommu_force_reset);
13471238
13481239 static void rk_iommu_detach_device(struct iommu_domain *domain,
13491240 struct device *dev)
....@@ -1360,8 +1251,7 @@
13601251
13611252 dev_dbg(dev, "Detaching from iommu domain\n");
13621253
1363
- /* iommu already detached */
1364
- if (iommu->domain != domain)
1254
+ if (!iommu->domain)
13651255 return;
13661256
13671257 iommu->domain = NULL;
....@@ -1396,10 +1286,6 @@
13961286
13971287 dev_dbg(dev, "Attaching to iommu domain\n");
13981288
1399
- /* iommu already attached */
1400
- if (iommu->domain == domain)
1401
- return 0;
1402
-
14031289 if (iommu->domain)
14041290 rk_iommu_detach_device(iommu->domain, dev);
14051291
....@@ -1413,6 +1299,7 @@
14131299 list_add_tail(&iommu->node, &rk_domain->iommus);
14141300 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
14151301
1302
+ rk_domain->shootdown_entire = iommu->shootdown_entire;
14161303 ret = pm_runtime_get_if_in_use(iommu->dev);
14171304 if (!ret || WARN_ON_ONCE(ret < 0))
14181305 return 0;
....@@ -1460,8 +1347,6 @@
14601347 goto err_free_dt;
14611348 }
14621349
1463
- rk_table_flush(rk_domain, rk_domain->dt_dma, NUM_DT_ENTRIES);
1464
-
14651350 spin_lock_init(&rk_domain->iommus_lock);
14661351 spin_lock_init(&rk_domain->dt_lock);
14671352 INIT_LIST_HEAD(&rk_domain->iommus);
....@@ -1493,7 +1378,7 @@
14931378 for (i = 0; i < NUM_DT_ENTRIES; i++) {
14941379 u32 dte = rk_domain->dt[i];
14951380 if (rk_dte_is_pt_valid(dte)) {
1496
- phys_addr_t pt_phys = rk_dte_pt_address(dte);
1381
+ phys_addr_t pt_phys = rk_ops->pt_address(dte);
14971382 u32 *page_table = phys_to_virt(pt_phys);
14981383 dma_unmap_single(dma_dev, pt_phys,
14991384 SPAGE_SIZE, DMA_TO_DEVICE);
....@@ -1505,58 +1390,20 @@
15051390 SPAGE_SIZE, DMA_TO_DEVICE);
15061391 free_page((unsigned long)rk_domain->dt);
15071392
1508
- if (domain->type == IOMMU_DOMAIN_DMA)
1509
- iommu_put_dma_cookie(&rk_domain->domain);
15101393 kfree(rk_domain);
15111394 }
15121395
1513
-static void rk_iommu_domain_free_v2(struct iommu_domain *domain)
1396
+static struct iommu_device *rk_iommu_probe_device(struct device *dev)
15141397 {
1515
- struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
1516
- int i;
1517
-
1518
- WARN_ON(!list_empty(&rk_domain->iommus));
1519
-
1520
- for (i = 0; i < NUM_DT_ENTRIES; i++) {
1521
- u32 dte = rk_domain->dt[i];
1522
-
1523
- if (rk_dte_is_pt_valid(dte)) {
1524
- phys_addr_t pt_phys = rk_dte_pt_address_v2(dte);
1525
- u32 *page_table = phys_to_virt(pt_phys);
1526
-
1527
- dma_unmap_single(dma_dev, pt_phys,
1528
- SPAGE_SIZE, DMA_TO_DEVICE);
1529
- free_page((unsigned long)page_table);
1530
- }
1531
- }
1532
-
1533
- dma_unmap_single(dma_dev, rk_domain->dt_dma,
1534
- SPAGE_SIZE, DMA_TO_DEVICE);
1535
- free_page((unsigned long)rk_domain->dt);
1536
-
1537
- if (domain->type == IOMMU_DOMAIN_DMA)
1538
- iommu_put_dma_cookie(&rk_domain->domain);
1539
- kfree(rk_domain);
1540
-}
1541
-
1542
-static int rk_iommu_add_device(struct device *dev)
1543
-{
1544
- struct iommu_group *group;
1545
- struct rk_iommu *iommu;
15461398 struct rk_iommudata *data;
1399
+ struct rk_iommu *iommu;
15471400
1548
- data = dev->archdata.iommu;
1401
+ data = dev_iommu_priv_get(dev);
15491402 if (!data)
1550
- return -ENODEV;
1403
+ return ERR_PTR(-ENODEV);
15511404
15521405 iommu = rk_iommu_from_dev(dev);
15531406
1554
- group = iommu_group_get_for_dev(dev);
1555
- if (IS_ERR(group))
1556
- return PTR_ERR(group);
1557
- iommu_group_put(group);
1558
-
1559
- iommu_device_link(&iommu->iommu, dev);
15601407 data->link = device_link_add(dev, iommu->dev,
15611408 DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME);
15621409
....@@ -1566,26 +1413,18 @@
15661413 if (!dev->dma_parms)
15671414 dev->dma_parms = kzalloc(sizeof(*dev->dma_parms), GFP_KERNEL);
15681415 if (!dev->dma_parms)
1569
- return -ENOMEM;
1416
+ return ERR_PTR(-ENOMEM);
15701417
15711418 dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
15721419
1573
- return 0;
1420
+ return &iommu->iommu;
15741421 }
15751422
1576
-static void rk_iommu_remove_device(struct device *dev)
1423
+static void rk_iommu_release_device(struct device *dev)
15771424 {
1578
- struct rk_iommu *iommu;
1579
- struct rk_iommudata *data = dev->archdata.iommu;
1580
-
1581
- iommu = rk_iommu_from_dev(dev);
1582
-
1583
- kfree(dev->dma_parms);
1584
- dev->dma_parms = NULL;
1425
+ struct rk_iommudata *data = dev_iommu_priv_get(dev);
15851426
15861427 device_link_del(data->link);
1587
- iommu_device_unlink(&iommu->iommu, dev);
1588
- iommu_group_remove_device(dev);
15891428 }
15901429
15911430 static struct iommu_group *rk_iommu_device_group(struct device *dev)
....@@ -1598,9 +1437,9 @@
15981437 }
15991438
16001439 static bool rk_iommu_is_attach_deferred(struct iommu_domain *domain,
1601
- struct device *dev)
1440
+ struct device *dev)
16021441 {
1603
- struct rk_iommudata *data = dev->archdata.iommu;
1442
+ struct rk_iommudata *data = dev_iommu_priv_get(dev);
16041443
16051444 return data->defer_attach;
16061445 }
....@@ -1622,14 +1461,14 @@
16221461 if (strstr(dev_name(dev), "vop"))
16231462 data->defer_attach = true;
16241463
1625
- dev->archdata.iommu = data;
1464
+ dev_iommu_priv_set(dev, data);
16261465
16271466 platform_device_put(iommu_dev);
16281467
16291468 return 0;
16301469 }
16311470
1632
-void rk_iommu_mask_irq(struct device *dev)
1471
+void rockchip_iommu_mask_irq(struct device *dev)
16331472 {
16341473 struct rk_iommu *iommu = rk_iommu_from_dev(dev);
16351474 int i;
....@@ -1640,9 +1479,9 @@
16401479 for (i = 0; i < iommu->num_mmu; i++)
16411480 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0);
16421481 }
1643
-EXPORT_SYMBOL(rk_iommu_mask_irq);
1482
+EXPORT_SYMBOL(rockchip_iommu_mask_irq);
16441483
1645
-void rk_iommu_unmask_irq(struct device *dev)
1484
+void rockchip_iommu_unmask_irq(struct device *dev)
16461485 {
16471486 struct rk_iommu *iommu = rk_iommu_from_dev(dev);
16481487 int i;
....@@ -1658,7 +1497,7 @@
16581497 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_PAGE_FAULT_DONE);
16591498 }
16601499 }
1661
-EXPORT_SYMBOL(rk_iommu_unmask_irq);
1500
+EXPORT_SYMBOL(rockchip_iommu_unmask_irq);
16621501
16631502 static const struct iommu_ops rk_iommu_ops = {
16641503 .domain_alloc = rk_iommu_domain_alloc,
....@@ -1666,11 +1505,10 @@
16661505 .attach_dev = rk_iommu_attach_device,
16671506 .detach_dev = rk_iommu_detach_device,
16681507 .map = rk_iommu_map,
1669
- .map_sg = default_iommu_map_sg,
16701508 .unmap = rk_iommu_unmap,
16711509 .flush_iotlb_all = rk_iommu_flush_tlb_all,
1672
- .add_device = rk_iommu_add_device,
1673
- .remove_device = rk_iommu_remove_device,
1510
+ .probe_device = rk_iommu_probe_device,
1511
+ .release_device = rk_iommu_release_device,
16741512 .iova_to_phys = rk_iommu_iova_to_phys,
16751513 .is_attach_deferred = rk_iommu_is_attach_deferred,
16761514 .device_group = rk_iommu_device_group,
....@@ -1678,68 +1516,33 @@
16781516 .of_xlate = rk_iommu_of_xlate,
16791517 };
16801518
1681
-static const struct iommu_ops rk_iommu_ops_v2 = {
1682
- .domain_alloc = rk_iommu_domain_alloc,
1683
- .domain_free = rk_iommu_domain_free_v2,
1684
- .attach_dev = rk_iommu_attach_device,
1685
- .detach_dev = rk_iommu_detach_device,
1686
- .map = rk_iommu_map_v2,
1687
- .unmap = rk_iommu_unmap_v2,
1688
- .map_sg = default_iommu_map_sg,
1689
- .flush_iotlb_all = rk_iommu_flush_tlb_all,
1690
- .add_device = rk_iommu_add_device,
1691
- .remove_device = rk_iommu_remove_device,
1692
- .iova_to_phys = rk_iommu_iova_to_phys_v2,
1693
- .is_attach_deferred = rk_iommu_is_attach_deferred,
1694
- .device_group = rk_iommu_device_group,
1695
- .pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP,
1696
- .of_xlate = rk_iommu_of_xlate,
1697
-};
1698
-
1699
-static const struct rockchip_iommu_data iommu_data_v1 = {
1700
- .version = 0x1,
1701
-};
1702
-
1703
-static const struct rockchip_iommu_data iommu_data_v2 = {
1704
- .version = 0x2,
1705
-};
1706
-
1707
-static const struct of_device_id rk_iommu_dt_ids[] = {
1708
- { .compatible = "rockchip,iommu",
1709
- .data = &iommu_data_v1,
1710
- }, {
1711
- .compatible = "rockchip,iommu-v2",
1712
- .data = &iommu_data_v2,
1713
- },
1714
- { /* sentinel */ }
1715
-};
1716
-MODULE_DEVICE_TABLE(of, rk_iommu_dt_ids);
1717
-
17181519 static int rk_iommu_probe(struct platform_device *pdev)
17191520 {
17201521 struct device *dev = &pdev->dev;
17211522 struct rk_iommu *iommu;
17221523 struct resource *res;
1524
+ const struct rk_iommu_ops *ops;
17231525 int num_res = pdev->num_resources;
1724
- int err, i, irq;
1725
- const struct of_device_id *match;
1726
- struct rockchip_iommu_data *data;
1526
+ int err, i;
17271527
17281528 iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL);
17291529 if (!iommu)
17301530 return -ENOMEM;
17311531
1732
- match = of_match_device(rk_iommu_dt_ids, dev);
1733
- if (!match)
1734
- return -EINVAL;
1735
-
1736
- data = (struct rockchip_iommu_data *)match->data;
1737
- iommu->version = data->version;
1738
- dev_info(dev, "version = %x\n", iommu->version);
1739
-
17401532 platform_set_drvdata(pdev, iommu);
17411533 iommu->dev = dev;
17421534 iommu->num_mmu = 0;
1535
+
1536
+ ops = of_device_get_match_data(dev);
1537
+ if (!rk_ops)
1538
+ rk_ops = ops;
1539
+
1540
+ /*
1541
+ * That should not happen unless different versions of the
1542
+ * hardware block are embedded the same SoC
1543
+ */
1544
+ if (WARN_ON(rk_ops != ops))
1545
+ return -EINVAL;
17431546
17441547 iommu->bases = devm_kcalloc(dev, num_res, sizeof(*iommu->bases),
17451548 GFP_KERNEL);
....@@ -1758,17 +1561,27 @@
17581561 if (iommu->num_mmu == 0)
17591562 return PTR_ERR(iommu->bases[0]);
17601563
1564
+ iommu->num_irq = platform_irq_count(pdev);
1565
+ if (iommu->num_irq < 0)
1566
+ return iommu->num_irq;
1567
+
17611568 iommu->reset_disabled = device_property_read_bool(dev,
17621569 "rockchip,disable-mmu-reset");
17631570 iommu->skip_read = device_property_read_bool(dev,
17641571 "rockchip,skip-mmu-read");
17651572 iommu->dlr_disable = device_property_read_bool(dev,
17661573 "rockchip,disable-device-link-resume");
1767
-
1574
+ iommu->shootdown_entire = device_property_read_bool(dev,
1575
+ "rockchip,shootdown-entire");
1576
+ iommu->master_handle_irq = device_property_read_bool(dev,
1577
+ "rockchip,master-handle-irq");
17681578 if (of_machine_is_compatible("rockchip,rv1126") ||
17691579 of_machine_is_compatible("rockchip,rv1109"))
17701580 iommu->cmd_retry = device_property_read_bool(dev,
17711581 "rockchip,enable-cmd-retry");
1582
+
1583
+ iommu->need_res_map = device_property_read_bool(dev,
1584
+ "rockchip,reserve-map");
17721585
17731586 /*
17741587 * iommu clocks should be present for all new devices and devicetrees
....@@ -1797,10 +1610,8 @@
17971610 if (err)
17981611 goto err_put_group;
17991612
1800
- if (iommu->version >= 0x2)
1801
- iommu_device_set_ops(&iommu->iommu, &rk_iommu_ops_v2);
1802
- else
1803
- iommu_device_set_ops(&iommu->iommu, &rk_iommu_ops);
1613
+ iommu_device_set_ops(&iommu->iommu, &rk_iommu_ops);
1614
+
18041615 iommu_device_set_fwnode(&iommu->iommu, &dev->of_node->fwnode);
18051616
18061617 err = iommu_device_register(&iommu->iommu);
....@@ -1815,31 +1626,39 @@
18151626 if (!dma_dev)
18161627 dma_dev = &pdev->dev;
18171628
1818
- if (iommu->version >= 0x2)
1819
- bus_set_iommu(&platform_bus_type, &rk_iommu_ops_v2);
1820
- else
1821
- bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
1629
+ bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
18221630
18231631 pm_runtime_enable(dev);
18241632
18251633 if (iommu->skip_read)
18261634 goto skip_request_irq;
18271635
1828
- i = 0;
1829
- while ((irq = platform_get_irq(pdev, i++)) != -ENXIO) {
1830
- if (irq < 0)
1831
- return irq;
1636
+ for (i = 0; i < iommu->num_irq; i++) {
1637
+ int irq = platform_get_irq(pdev, i);
1638
+
1639
+ if (irq < 0) {
1640
+ err = irq;
1641
+ goto err_pm_disable;
1642
+ }
18321643
18331644 err = devm_request_irq(iommu->dev, irq, rk_iommu_irq,
18341645 IRQF_SHARED, dev_name(dev), iommu);
1835
- if (err) {
1836
- pm_runtime_disable(dev);
1837
- goto err_remove_sysfs;
1838
- }
1646
+ if (err)
1647
+ goto err_pm_disable;
18391648 }
18401649
18411650 skip_request_irq:
1651
+ if (!res_page && iommu->need_res_map) {
1652
+ res_page = __pa_symbol(reserve_range);
1653
+
1654
+ pr_info("%s,%d, res_page = 0x%pa\n", __func__, __LINE__, &res_page);
1655
+ }
1656
+
1657
+ dma_set_mask_and_coherent(dev, rk_ops->dma_bit_mask);
1658
+
18421659 return 0;
1660
+err_pm_disable:
1661
+ pm_runtime_disable(dev);
18431662 err_remove_sysfs:
18441663 iommu_device_sysfs_remove(&iommu->iommu);
18451664 err_put_group:
....@@ -1852,12 +1671,20 @@
18521671 static void rk_iommu_shutdown(struct platform_device *pdev)
18531672 {
18541673 struct rk_iommu *iommu = platform_get_drvdata(pdev);
1855
- int i = 0, irq;
1674
+ int i;
18561675
1857
- while ((irq = platform_get_irq(pdev, i++)) != -ENXIO)
1676
+ if (iommu->skip_read)
1677
+ goto skip_free_irq;
1678
+
1679
+ for (i = 0; i < iommu->num_irq; i++) {
1680
+ int irq = platform_get_irq(pdev, i);
1681
+
18581682 devm_free_irq(iommu->dev, irq, iommu);
1683
+ }
18591684
1860
- pm_runtime_force_suspend(&pdev->dev);
1685
+skip_free_irq:
1686
+ if (!iommu->dlr_disable)
1687
+ pm_runtime_force_suspend(&pdev->dev);
18611688 }
18621689
18631690 static int __maybe_unused rk_iommu_suspend(struct device *dev)
....@@ -1893,6 +1720,37 @@
18931720 pm_runtime_force_resume)
18941721 };
18951722
1723
+static struct rk_iommu_ops iommu_data_ops_v1 = {
1724
+ .pt_address = &rk_dte_pt_address,
1725
+ .mk_dtentries = &rk_mk_dte,
1726
+ .mk_ptentries = &rk_mk_pte,
1727
+ .dte_addr_phys = &rk_dte_addr_phys,
1728
+ .dma_addr_dte = &rk_dma_addr_dte,
1729
+ .dma_bit_mask = DMA_BIT_MASK(32),
1730
+};
1731
+
1732
+static struct rk_iommu_ops iommu_data_ops_v2 = {
1733
+ .pt_address = &rk_dte_pt_address_v2,
1734
+ .mk_dtentries = &rk_mk_dte_v2,
1735
+ .mk_ptentries = &rk_mk_pte_v2,
1736
+ .dte_addr_phys = &rk_dte_addr_phys_v2,
1737
+ .dma_addr_dte = &rk_dma_addr_dte_v2,
1738
+ .dma_bit_mask = DMA_BIT_MASK(40),
1739
+};
1740
+
1741
+static const struct of_device_id rk_iommu_dt_ids[] = {
1742
+ { .compatible = "rockchip,iommu",
1743
+ .data = &iommu_data_ops_v1,
1744
+ },
1745
+ { .compatible = "rockchip,iommu-v2",
1746
+ .data = &iommu_data_ops_v2,
1747
+ },
1748
+ { .compatible = "rockchip,rk3568-iommu",
1749
+ .data = &iommu_data_ops_v2,
1750
+ },
1751
+ { /* sentinel */ }
1752
+};
1753
+
18961754 static struct platform_driver rk_iommu_driver = {
18971755 .probe = rk_iommu_probe,
18981756 .shutdown = rk_iommu_shutdown,