hc
2024-05-14 bedbef8ad3e75a304af6361af235302bcc61d06b
kernel/drivers/iommu/rockchip-iommu.c
....@@ -1,7 +1,9 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
2
- * This program is free software; you can redistribute it and/or modify
3
- * it under the terms of the GNU General Public License version 2 as
4
- * published by the Free Software Foundation.
3
+ * IOMMU API for Rockchip
4
+ *
5
+ * Module Authors: Simon Xue <xxm@rock-chips.com>
6
+ * Daniel Kurtz <djkurtz@chromium.org>
57 */
68
79 #include <linux/clk.h>
....@@ -18,6 +20,7 @@
1820 #include <linux/list.h>
1921 #include <linux/mm.h>
2022 #include <linux/module.h>
23
+#include <linux/init.h>
2124 #include <linux/of.h>
2225 #include <linux/of_iommu.h>
2326 #include <linux/of_platform.h>
....@@ -84,30 +87,6 @@
8487 */
8588 #define RK_IOMMU_PGSIZE_BITMAP 0x007ff000
8689
87
-#define DT_LO_MASK 0xfffff000
88
-#define DT_HI_MASK GENMASK_ULL(39, 32)
89
-#define DT_SHIFT 28
90
-
91
-#define DTE_BASE_HI_MASK GENMASK(11, 4)
92
-
93
-#define PAGE_DESC_LO_MASK 0xfffff000
94
-#define PAGE_DESC_HI1_LOWER 32
95
-#define PAGE_DESC_HI1_UPPER 35
96
-#define PAGE_DESC_HI2_LOWER 36
97
-#define PAGE_DESC_HI2_UPPER 39
98
-#define PAGE_DESC_HI_MASK1 GENMASK_ULL(PAGE_DESC_HI1_UPPER, PAGE_DESC_HI1_LOWER)
99
-#define PAGE_DESC_HI_MASK2 GENMASK_ULL(PAGE_DESC_HI2_UPPER, PAGE_DESC_HI2_LOWER)
100
-
101
-#define DTE_HI1_LOWER 8
102
-#define DTE_HI1_UPPER 11
103
-#define DTE_HI2_LOWER 4
104
-#define DTE_HI2_UPPER 7
105
-#define DTE_HI_MASK1 GENMASK(DTE_HI1_UPPER, DTE_HI1_LOWER)
106
-#define DTE_HI_MASK2 GENMASK(DTE_HI2_UPPER, DTE_HI2_LOWER)
107
-
108
-#define PAGE_DESC_HI_SHIFT1 (PAGE_DESC_HI1_LOWER - DTE_HI1_LOWER)
109
-#define PAGE_DESC_HI_SHIFT2 (PAGE_DESC_HI2_LOWER - DTE_HI2_LOWER)
110
-
11190 struct rk_iommu_domain {
11291 struct list_head iommus;
11392 u32 *dt; /* page directory table */
....@@ -119,18 +98,24 @@
11998 struct iommu_domain domain;
12099 };
121100
122
-struct rockchip_iommu_data {
123
- u32 version;
101
+struct rk_iommu_ops {
102
+ phys_addr_t (*pt_address)(u32 dte);
103
+ u32 (*mk_dtentries)(dma_addr_t pt_dma);
104
+ u32 (*mk_ptentries)(phys_addr_t page, int prot);
105
+ phys_addr_t (*dte_addr_phys)(u32 addr);
106
+ u32 (*dma_addr_dte)(dma_addr_t dt_dma);
107
+ u64 dma_bit_mask;
124108 };
125109
126110 struct rk_iommu {
127111 struct device *dev;
128112 void __iomem **bases;
129113 int num_mmu;
114
+ int num_irq;
130115 struct clk_bulk_data *clocks;
131116 int num_clocks;
132117 bool reset_disabled;
133
- bool skip_read; /* rk3126/rk3128 can't read vop iommu registers */
118
+ bool skip_read; /* rk3126/rk3128 can't read vop iommu registers */
134119 bool dlr_disable; /* avoid access iommu when runtime ops called */
135120 bool cmd_retry;
136121 bool master_handle_irq;
....@@ -138,8 +123,8 @@
138123 struct list_head node; /* entry in rk_iommu_domain.iommus */
139124 struct iommu_domain *domain; /* domain to which iommu is attached */
140125 struct iommu_group *group;
141
- u32 version;
142126 bool shootdown_entire;
127
+ bool iommu_enabled;
143128 bool need_res_map;
144129 };
145130
....@@ -150,6 +135,7 @@
150135 };
151136
152137 static struct device *dma_dev;
138
+static const struct rk_iommu_ops *rk_ops;
153139 static struct rk_iommu *rk_iommu_from_dev(struct device *dev);
154140 static char reserve_range[PAGE_SIZE] __aligned(PAGE_SIZE);
155141 static phys_addr_t res_page;
....@@ -211,6 +197,11 @@
211197 #define RK_DTE_PT_ADDRESS_MASK 0xfffff000
212198 #define RK_DTE_PT_VALID BIT(0)
213199
200
+static inline phys_addr_t rk_dte_pt_address(u32 dte)
201
+{
202
+ return (phys_addr_t)dte & RK_DTE_PT_ADDRESS_MASK;
203
+}
204
+
214205 /*
215206 * In v2:
216207 * 31:12 - PT address bit 31:0
....@@ -219,20 +210,21 @@
219210 * 3: 1 - Reserved
220211 * 0 - 1 if PT @ PT address is valid
221212 */
222
-#define RK_DTE_PT_ADDRESS_MASK_V2 0xfffffff0
223
-
224
-static inline phys_addr_t rk_dte_pt_address(u32 dte)
225
-{
226
- return (phys_addr_t)dte & RK_DTE_PT_ADDRESS_MASK;
227
-}
213
+#define RK_DTE_PT_ADDRESS_MASK_V2 GENMASK_ULL(31, 4)
214
+#define DTE_HI_MASK1 GENMASK(11, 8)
215
+#define DTE_HI_MASK2 GENMASK(7, 4)
216
+#define DTE_HI_SHIFT1 24 /* shift bit 8 to bit 32 */
217
+#define DTE_HI_SHIFT2 32 /* shift bit 4 to bit 36 */
218
+#define PAGE_DESC_HI_MASK1 GENMASK_ULL(35, 32)
219
+#define PAGE_DESC_HI_MASK2 GENMASK_ULL(39, 36)
228220
229221 static inline phys_addr_t rk_dte_pt_address_v2(u32 dte)
230222 {
231223 u64 dte_v2 = dte;
232224
233
- dte_v2 = ((dte_v2 & DTE_HI_MASK2) << PAGE_DESC_HI_SHIFT2) |
234
- ((dte_v2 & DTE_HI_MASK1) << PAGE_DESC_HI_SHIFT1) |
235
- (dte_v2 & PAGE_DESC_LO_MASK);
225
+ dte_v2 = ((dte_v2 & DTE_HI_MASK2) << DTE_HI_SHIFT2) |
226
+ ((dte_v2 & DTE_HI_MASK1) << DTE_HI_SHIFT1) |
227
+ (dte_v2 & RK_DTE_PT_ADDRESS_MASK);
236228
237229 return (phys_addr_t)dte_v2;
238230 }
....@@ -249,9 +241,9 @@
249241
250242 static inline u32 rk_mk_dte_v2(dma_addr_t pt_dma)
251243 {
252
- pt_dma = (pt_dma & PAGE_DESC_LO_MASK) |
253
- ((pt_dma & PAGE_DESC_HI_MASK1) >> PAGE_DESC_HI_SHIFT1) |
254
- (pt_dma & PAGE_DESC_HI_MASK2) >> PAGE_DESC_HI_SHIFT2;
244
+ pt_dma = (pt_dma & RK_DTE_PT_ADDRESS_MASK) |
245
+ ((pt_dma & PAGE_DESC_HI_MASK1) >> DTE_HI_SHIFT1) |
246
+ (pt_dma & PAGE_DESC_HI_MASK2) >> DTE_HI_SHIFT2;
255247
256248 return (pt_dma & RK_DTE_PT_ADDRESS_MASK_V2) | RK_DTE_PT_VALID;
257249 }
....@@ -282,43 +274,12 @@
282274 #define RK_PTE_PAGE_READABLE BIT(1)
283275 #define RK_PTE_PAGE_VALID BIT(0)
284276
285
-/*
286
- * In v2:
287
- * 31:12 - Page address bit 31:0
288
- * 11:9 - Page address bit 34:32
289
- * 8:4 - Page address bit 39:35
290
- * 3 - Security
291
- * 2 - Readable
292
- * 1 - Writable
293
- * 0 - 1 if Page @ Page address is valid
294
- */
295
-#define RK_PTE_PAGE_ADDRESS_MASK_V2 0xfffffff0
296
-#define RK_PTE_PAGE_FLAGS_MASK_V2 0x0000000e
297
-#define RK_PTE_PAGE_READABLE_V2 BIT(2)
298
-#define RK_PTE_PAGE_WRITABLE_V2 BIT(1)
299
-
300
-#define RK_PTE_PAGE_REPRESENT BIT(3)
301
-
302
-static inline phys_addr_t rk_pte_page_address(u32 pte)
303
-{
304
- return (phys_addr_t)pte & RK_PTE_PAGE_ADDRESS_MASK;
305
-}
306
-
307
-static inline phys_addr_t rk_pte_page_address_v2(u32 pte)
308
-{
309
- u64 pte_v2 = pte;
310
-
311
- pte_v2 = ((pte_v2 & DTE_HI_MASK2) << PAGE_DESC_HI_SHIFT2) |
312
- ((pte_v2 & DTE_HI_MASK1) << PAGE_DESC_HI_SHIFT1) |
313
- (pte_v2 & PAGE_DESC_LO_MASK);
314
-
315
- return (phys_addr_t)pte_v2;
316
-}
317
-
318277 static inline bool rk_pte_is_page_valid(u32 pte)
319278 {
320279 return pte & RK_PTE_PAGE_VALID;
321280 }
281
+
282
+#define RK_PTE_PAGE_REPRESENT BIT(3)
322283
323284 static inline bool rk_pte_is_page_represent(u32 pte)
324285 {
....@@ -329,11 +290,9 @@
329290 static u32 rk_mk_pte(phys_addr_t page, int prot)
330291 {
331292 u32 flags = 0;
332
-
333293 flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE : 0;
334294 flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE : 0;
335295 flags |= (prot & IOMMU_PRIV) ? RK_PTE_PAGE_REPRESENT : 0;
336
-
337296 page &= RK_PTE_PAGE_ADDRESS_MASK;
338297 return page | flags | RK_PTE_PAGE_VALID;
339298 }
....@@ -342,19 +301,16 @@
342301 {
343302 u32 flags = 0;
344303
345
- flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE_V2 : 0;
346
- flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE_V2 : 0;
347304 /* If BIT(3) set, don't break iommu_map if BIT(0) set.
348305 * Means we can reupdate a page that already presented. We can use
349306 * this bit to reupdate a pre-mapped 4G range.
350307 */
351308 flags |= (prot & IOMMU_PRIV) ? RK_PTE_PAGE_REPRESENT : 0;
352309
353
- page = (page & PAGE_DESC_LO_MASK) |
354
- ((page & PAGE_DESC_HI_MASK1) >> PAGE_DESC_HI_SHIFT1) |
355
- (page & PAGE_DESC_HI_MASK2) >> PAGE_DESC_HI_SHIFT2;
356
- page &= RK_PTE_PAGE_ADDRESS_MASK_V2;
357
- return page | flags | RK_PTE_PAGE_VALID;
310
+ flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE : 0;
311
+ flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE : 0;
312
+
313
+ return rk_mk_dte_v2(page) | flags;
358314 }
359315
360316 static u32 rk_mk_pte_invalid(u32 pte)
....@@ -600,12 +556,17 @@
600556 return ret;
601557 }
602558
559
+static u32 rk_iommu_read_dte_addr(void __iomem *base)
560
+{
561
+ return rk_iommu_read(base, RK_MMU_DTE_ADDR);
562
+}
563
+
603564 static int rk_iommu_force_reset(struct rk_iommu *iommu)
604565 {
605566 int ret, i;
606567 u32 dte_addr;
607568 bool val;
608
- u32 address_mask;
569
+ u32 dte_address_mask;
609570
610571 if (iommu->reset_disabled)
611572 return 0;
....@@ -622,14 +583,13 @@
622583 * In v2: upper 7 nybbles are read back.
623584 */
624585 for (i = 0; i < iommu->num_mmu; i++) {
625
- rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, DTE_ADDR_DUMMY);
586
+ dte_address_mask = rk_ops->pt_address(DTE_ADDR_DUMMY);
587
+ rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, dte_address_mask);
626588
627
- if (iommu->version >= 0x2)
628
- address_mask = RK_DTE_PT_ADDRESS_MASK_V2;
629
- else
630
- address_mask = RK_DTE_PT_ADDRESS_MASK;
631
- dte_addr = rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR);
632
- if (dte_addr != (DTE_ADDR_DUMMY & address_mask)) {
589
+ ret = readx_poll_timeout(rk_iommu_read_dte_addr, iommu->bases[i], dte_addr,
590
+ dte_addr == dte_address_mask,
591
+ RK_MMU_POLL_PERIOD_US, RK_MMU_POLL_TIMEOUT_US);
592
+ if (ret) {
633593 dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n");
634594 return -EFAULT;
635595 }
....@@ -641,14 +601,41 @@
641601 return 0;
642602
643603 ret = readx_poll_timeout(rk_iommu_is_reset_done, iommu, val,
644
- val, RK_MMU_FORCE_RESET_TIMEOUT_US,
645
- RK_MMU_POLL_TIMEOUT_US);
604
+ val, RK_MMU_POLL_TIMEOUT_US,
605
+ RK_MMU_FORCE_RESET_TIMEOUT_US);
646606 if (ret) {
647607 dev_err(iommu->dev, "FORCE_RESET command timed out\n");
648608 return ret;
649609 }
650610
651611 return 0;
612
+}
613
+
614
+static inline phys_addr_t rk_dte_addr_phys(u32 addr)
615
+{
616
+ return (phys_addr_t)addr;
617
+}
618
+
619
+static inline u32 rk_dma_addr_dte(dma_addr_t dt_dma)
620
+{
621
+ return dt_dma;
622
+}
623
+
624
+#define DT_HI_MASK GENMASK_ULL(39, 32)
625
+#define DTE_BASE_HI_MASK GENMASK(11, 4)
626
+#define DT_SHIFT 28
627
+
628
+static inline phys_addr_t rk_dte_addr_phys_v2(u32 addr)
629
+{
630
+ u64 addr64 = addr;
631
+ return (phys_addr_t)(addr64 & RK_DTE_PT_ADDRESS_MASK) |
632
+ ((addr64 & DTE_BASE_HI_MASK) << DT_SHIFT);
633
+}
634
+
635
+static inline u32 rk_dma_addr_dte_v2(dma_addr_t dt_dma)
636
+{
637
+ return (dt_dma & RK_DTE_PT_ADDRESS_MASK) |
638
+ ((dt_dma & DT_HI_MASK) >> DT_SHIFT);
652639 }
653640
654641 static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova)
....@@ -670,11 +657,7 @@
670657 page_offset = rk_iova_page_offset(iova);
671658
672659 mmu_dte_addr = rk_iommu_read(base, RK_MMU_DTE_ADDR);
673
- mmu_dte_addr_phys = (phys_addr_t)mmu_dte_addr;
674
- if (iommu->version >= 0x2) {
675
- mmu_dte_addr_phys = (mmu_dte_addr_phys & DT_LO_MASK) |
676
- ((mmu_dte_addr_phys & DTE_BASE_HI_MASK) << DT_SHIFT);
677
- }
660
+ mmu_dte_addr_phys = rk_ops->dte_addr_phys(mmu_dte_addr);
678661
679662 dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index);
680663 dte_addr = phys_to_virt(dte_addr_phys);
....@@ -683,20 +666,14 @@
683666 if (!rk_dte_is_pt_valid(dte))
684667 goto print_it;
685668
686
- if (iommu->version >= 0x2)
687
- pte_addr_phys = rk_dte_pt_address_v2(dte) + (pte_index * 4);
688
- else
689
- pte_addr_phys = rk_dte_pt_address(dte) + (pte_index * 4);
669
+ pte_addr_phys = rk_ops->pt_address(dte) + (pte_index * 4);
690670 pte_addr = phys_to_virt(pte_addr_phys);
691671 pte = *pte_addr;
692672
693673 if (!rk_pte_is_page_valid(pte))
694674 goto print_it;
695675
696
- if (iommu->version >= 0x2)
697
- page_addr_phys = rk_pte_page_address_v2(pte) + page_offset;
698
- else
699
- page_addr_phys = rk_pte_page_address(pte) + page_offset;
676
+ page_addr_phys = rk_ops->pt_address(pte) + page_offset;
700677 page_flags = pte & RK_PTE_PAGE_FLAGS_MASK;
701678
702679 print_it:
....@@ -835,41 +812,13 @@
835812 if (!rk_dte_is_pt_valid(dte))
836813 goto out;
837814
838
- pt_phys = rk_dte_pt_address(dte);
815
+ pt_phys = rk_ops->pt_address(dte);
839816 page_table = (u32 *)phys_to_virt(pt_phys);
840817 pte = page_table[rk_iova_pte_index(iova)];
841818 if (!rk_pte_is_page_valid(pte))
842819 goto out;
843820
844
- phys = rk_pte_page_address(pte) + rk_iova_page_offset(iova);
845
-out:
846
- spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
847
-
848
- return phys;
849
-}
850
-
851
-static phys_addr_t rk_iommu_iova_to_phys_v2(struct iommu_domain *domain,
852
- dma_addr_t iova)
853
-{
854
- struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
855
- unsigned long flags;
856
- phys_addr_t pt_phys, phys = 0;
857
- u32 dte, pte;
858
- u32 *page_table;
859
-
860
- spin_lock_irqsave(&rk_domain->dt_lock, flags);
861
-
862
- dte = rk_domain->dt[rk_iova_dte_index(iova)];
863
- if (!rk_dte_is_pt_valid(dte))
864
- goto out;
865
-
866
- pt_phys = rk_dte_pt_address_v2(dte);
867
- page_table = (u32 *)phys_to_virt(pt_phys);
868
- pte = page_table[rk_iova_pte_index(iova)];
869
- if (!rk_pte_is_page_valid(pte))
870
- goto out;
871
-
872
- phys = rk_pte_page_address_v2(pte) + rk_iova_page_offset(iova);
821
+ phys = rk_ops->pt_address(pte) + rk_iova_page_offset(iova);
873822 out:
874823 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
875824
....@@ -945,52 +894,13 @@
945894 return ERR_PTR(-ENOMEM);
946895 }
947896
948
- dte = rk_mk_dte(pt_dma);
897
+ dte = rk_ops->mk_dtentries(pt_dma);
949898 *dte_addr = dte;
950899
951
- rk_table_flush(rk_domain, pt_dma, NUM_PT_ENTRIES);
952900 rk_table_flush(rk_domain,
953901 rk_domain->dt_dma + dte_index * sizeof(u32), 1);
954902 done:
955
- pt_phys = rk_dte_pt_address(dte);
956
- return (u32 *)phys_to_virt(pt_phys);
957
-}
958
-
959
-static u32 *rk_dte_get_page_table_v2(struct rk_iommu_domain *rk_domain,
960
- dma_addr_t iova)
961
-{
962
- u32 *page_table, *dte_addr;
963
- u32 dte_index, dte;
964
- phys_addr_t pt_phys;
965
- dma_addr_t pt_dma;
966
-
967
- assert_spin_locked(&rk_domain->dt_lock);
968
-
969
- dte_index = rk_iova_dte_index(iova);
970
- dte_addr = &rk_domain->dt[dte_index];
971
- dte = *dte_addr;
972
- if (rk_dte_is_pt_valid(dte))
973
- goto done;
974
-
975
- page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
976
- if (!page_table)
977
- return ERR_PTR(-ENOMEM);
978
-
979
- pt_dma = dma_map_single(dma_dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE);
980
- if (dma_mapping_error(dma_dev, pt_dma)) {
981
- dev_err(dma_dev, "DMA mapping error while allocating page table\n");
982
- free_page((unsigned long)page_table);
983
- return ERR_PTR(-ENOMEM);
984
- }
985
-
986
- dte = rk_mk_dte_v2(pt_dma);
987
- *dte_addr = dte;
988
-
989
- rk_table_flush(rk_domain, pt_dma, NUM_PT_ENTRIES);
990
- rk_table_flush(rk_domain,
991
- rk_domain->dt_dma + dte_index * sizeof(u32), 1);
992
-done:
993
- pt_phys = rk_dte_pt_address_v2(dte);
903
+ pt_phys = rk_ops->pt_address(dte);
994904 return (u32 *)phys_to_virt(pt_phys);
995905 }
996906
....@@ -1009,15 +919,11 @@
1009919 if (!rk_pte_is_page_valid(pte))
1010920 break;
1011921
1012
- if (iommu && iommu->need_res_map) {
1013
- if (iommu->version >= 0x2)
1014
- pte_addr[pte_count] = rk_mk_pte_v2(res_page,
922
+ if (iommu && iommu->need_res_map)
923
+ pte_addr[pte_count] = rk_ops->mk_ptentries(res_page,
1015924 prot);
1016
- else
1017
- pte_addr[pte_count] = rk_mk_pte(res_page, prot);
1018
- } else {
925
+ else
1019926 pte_addr[pte_count] = rk_mk_pte_invalid(pte);
1020
- }
1021927 }
1022928
1023929 rk_table_flush(rk_domain, pte_dma, pte_count);
....@@ -1038,6 +944,7 @@
1038944 break;
1039945 }
1040946 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
947
+
1041948 return iommu;
1042949 }
1043950
....@@ -1058,9 +965,10 @@
1058965 goto unwind;
1059966
1060967 if (prot & IOMMU_PRIV) {
1061
- pte_addr[pte_count] = rk_mk_pte(res_page, prot);
968
+ pte_addr[pte_count] = rk_ops->mk_ptentries(res_page, prot);
1062969 } else {
1063
- pte_addr[pte_count] = rk_mk_pte(paddr, prot);
970
+ pte_addr[pte_count] = rk_ops->mk_ptentries(paddr, prot);
971
+
1064972 paddr += SPAGE_SIZE;
1065973 }
1066974 }
....@@ -1082,55 +990,7 @@
1082990 pte_count * SPAGE_SIZE, NULL);
1083991
1084992 iova += pte_count * SPAGE_SIZE;
1085
- page_phys = rk_pte_page_address(pte_addr[pte_count]);
1086
- pr_err("iova: %pad already mapped to %pa cannot remap to phys: %pa prot: %#x\n",
1087
- &iova, &page_phys, &paddr, prot);
1088
-
1089
- return -EADDRINUSE;
1090
-}
1091
-
1092
-static int rk_iommu_map_iova_v2(struct rk_iommu_domain *rk_domain, u32 *pte_addr,
1093
- dma_addr_t pte_dma, dma_addr_t iova,
1094
- phys_addr_t paddr, size_t size, int prot)
1095
-{
1096
- unsigned int pte_count;
1097
- unsigned int pte_total = size / SPAGE_SIZE;
1098
- phys_addr_t page_phys;
1099
-
1100
- assert_spin_locked(&rk_domain->dt_lock);
1101
-
1102
- for (pte_count = 0; pte_count < pte_total; pte_count++) {
1103
- u32 pte = pte_addr[pte_count];
1104
-
1105
- if (rk_pte_is_page_valid(pte) && !rk_pte_is_page_represent(pte))
1106
- goto unwind;
1107
-
1108
- if (prot & IOMMU_PRIV) {
1109
- pte_addr[pte_count] = rk_mk_pte_v2(res_page, prot);
1110
- } else {
1111
- pte_addr[pte_count] = rk_mk_pte_v2(paddr, prot);
1112
- paddr += SPAGE_SIZE;
1113
- }
1114
- }
1115
-
1116
- rk_table_flush(rk_domain, pte_dma, pte_total);
1117
-
1118
- /*
1119
- * Zap the first and last iova to evict from iotlb any previously
1120
- * mapped cachelines holding stale values for its dte and pte.
1121
- * We only zap the first and last iova, since only they could have
1122
- * dte or pte shared with an existing mapping.
1123
- */
1124
- rk_iommu_zap_iova_first_last(rk_domain, iova, size);
1125
-
1126
- return 0;
1127
-unwind:
1128
- /* Unmap the range of iovas that we just mapped */
1129
- rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma,
1130
- pte_count * SPAGE_SIZE, NULL);
1131
-
1132
- iova += pte_count * SPAGE_SIZE;
1133
- page_phys = rk_pte_page_address_v2(pte_addr[pte_count]);
993
+ page_phys = rk_ops->pt_address(pte_addr[pte_count]);
1134994 pr_err("iova: %pad already mapped to %pa cannot remap to phys: %pa prot: %#x\n",
1135995 &iova, &page_phys, &paddr, prot);
1136996
....@@ -1138,7 +998,7 @@
1138998 }
1139999
11401000 static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
1141
- phys_addr_t paddr, size_t size, int prot)
1001
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
11421002 {
11431003 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
11441004 unsigned long flags;
....@@ -1165,7 +1025,7 @@
11651025 dte = rk_domain->dt[rk_iova_dte_index(iova)];
11661026 pte_index = rk_iova_pte_index(iova);
11671027 pte_addr = &page_table[pte_index];
1168
- pte_dma = rk_dte_pt_address(dte) + pte_index * sizeof(u32);
1028
+ pte_dma = rk_ops->pt_address(dte) + pte_index * sizeof(u32);
11691029 ret = rk_iommu_map_iova(rk_domain, pte_addr, pte_dma, iova,
11701030 paddr, size, prot);
11711031
....@@ -1174,45 +1034,8 @@
11741034 return ret;
11751035 }
11761036
1177
-static int rk_iommu_map_v2(struct iommu_domain *domain, unsigned long _iova,
1178
- phys_addr_t paddr, size_t size, int prot)
1179
-{
1180
- struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
1181
- unsigned long flags;
1182
- dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
1183
- u32 *page_table, *pte_addr;
1184
- u32 dte, pte_index;
1185
- int ret;
1186
-
1187
- spin_lock_irqsave(&rk_domain->dt_lock, flags);
1188
-
1189
- /*
1190
- * pgsize_bitmap specifies iova sizes that fit in one page table
1191
- * (1024 4-KiB pages = 4 MiB).
1192
- * So, size will always be 4096 <= size <= 4194304.
1193
- * Since iommu_map() guarantees that both iova and size will be
1194
- * aligned, we will always only be mapping from a single dte here.
1195
- */
1196
- page_table = rk_dte_get_page_table_v2(rk_domain, iova);
1197
- if (IS_ERR(page_table)) {
1198
- spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
1199
- return PTR_ERR(page_table);
1200
- }
1201
-
1202
- dte = rk_domain->dt[rk_iova_dte_index(iova)];
1203
- pte_index = rk_iova_pte_index(iova);
1204
- pte_addr = &page_table[pte_index];
1205
- pte_dma = rk_dte_pt_address_v2(dte) + pte_index * sizeof(u32);
1206
- ret = rk_iommu_map_iova_v2(rk_domain, pte_addr, pte_dma, iova,
1207
- paddr, size, prot);
1208
-
1209
- spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
1210
-
1211
- return ret;
1212
-}
1213
-
12141037 static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
1215
- size_t size)
1038
+ size_t size, struct iommu_iotlb_gather *gather)
12161039 {
12171040 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
12181041 unsigned long flags;
....@@ -1239,49 +1062,7 @@
12391062 return 0;
12401063 }
12411064
1242
- pt_phys = rk_dte_pt_address(dte);
1243
- pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova);
1244
- pte_dma = pt_phys + rk_iova_pte_index(iova) * sizeof(u32);
1245
- unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, size,
1246
- iommu);
1247
-
1248
- spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
1249
-
1250
- /* Shootdown iotlb entries for iova range that was just unmapped */
1251
- rk_iommu_zap_iova(rk_domain, iova, unmap_size);
1252
-
1253
- return unmap_size;
1254
-}
1255
-
1256
-static size_t rk_iommu_unmap_v2(struct iommu_domain *domain, unsigned long _iova,
1257
- size_t size)
1258
-{
1259
- struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
1260
- unsigned long flags;
1261
- dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
1262
- phys_addr_t pt_phys;
1263
- u32 dte;
1264
- u32 *pte_addr;
1265
- size_t unmap_size;
1266
- struct rk_iommu *iommu = rk_iommu_get(rk_domain);
1267
-
1268
- spin_lock_irqsave(&rk_domain->dt_lock, flags);
1269
-
1270
- /*
1271
- * pgsize_bitmap specifies iova sizes that fit in one page table
1272
- * (1024 4-KiB pages = 4 MiB).
1273
- * So, size will always be 4096 <= size <= 4194304.
1274
- * Since iommu_unmap() guarantees that both iova and size will be
1275
- * aligned, we will always only be unmapping from a single dte here.
1276
- */
1277
- dte = rk_domain->dt[rk_iova_dte_index(iova)];
1278
- /* Just return 0 if iova is unmapped */
1279
- if (!rk_dte_is_pt_valid(dte)) {
1280
- spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
1281
- return 0;
1282
- }
1283
-
1284
- pt_phys = rk_dte_pt_address_v2(dte);
1065
+ pt_phys = rk_ops->pt_address(dte);
12851066 pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova);
12861067 pte_dma = pt_phys + rk_iova_pte_index(iova) * sizeof(u32);
12871068 unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, size,
....@@ -1326,7 +1107,7 @@
13261107
13271108 static struct rk_iommu *rk_iommu_from_dev(struct device *dev)
13281109 {
1329
- struct rk_iommudata *data = dev->archdata.iommu;
1110
+ struct rk_iommudata *data = dev_iommu_priv_get(dev);
13301111
13311112 return data ? data->iommu : NULL;
13321113 }
....@@ -1346,6 +1127,8 @@
13461127 }
13471128 rk_iommu_disable_stall(iommu);
13481129 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
1130
+
1131
+ iommu->iommu_enabled = false;
13491132 }
13501133
13511134 int rockchip_iommu_disable(struct device *dev)
....@@ -1368,7 +1151,6 @@
13681151 struct iommu_domain *domain = iommu->domain;
13691152 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
13701153 int ret, i;
1371
- u32 dt_v2;
13721154 u32 auto_gate;
13731155
13741156 ret = clk_bulk_enable(iommu->num_clocks, iommu->clocks);
....@@ -1384,14 +1166,8 @@
13841166 goto out_disable_stall;
13851167
13861168 for (i = 0; i < iommu->num_mmu; i++) {
1387
- if (iommu->version >= 0x2) {
1388
- dt_v2 = (rk_domain->dt_dma & DT_LO_MASK) |
1389
- ((rk_domain->dt_dma & DT_HI_MASK) >> DT_SHIFT);
1390
- rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, dt_v2);
1391
- } else {
1392
- rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR,
1393
- rk_domain->dt_dma);
1394
- }
1169
+ rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR,
1170
+ rk_ops->dma_addr_dte(rk_domain->dt_dma));
13951171 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
13961172 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
13971173
....@@ -1407,6 +1183,10 @@
14071183 rk_iommu_disable_stall(iommu);
14081184 out_disable_clocks:
14091185 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
1186
+
1187
+ if (!ret)
1188
+ iommu->iommu_enabled = true;
1189
+
14101190 return ret;
14111191 }
14121192
....@@ -1430,7 +1210,7 @@
14301210 if (!iommu)
14311211 return false;
14321212
1433
- return rk_iommu_is_paging_enabled(iommu);
1213
+ return iommu->iommu_enabled;
14341214 }
14351215 EXPORT_SYMBOL(rockchip_iommu_is_enabled);
14361216
....@@ -1471,8 +1251,7 @@
14711251
14721252 dev_dbg(dev, "Detaching from iommu domain\n");
14731253
1474
- /* iommu already detached */
1475
- if (iommu->domain != domain)
1254
+ if (!iommu->domain)
14761255 return;
14771256
14781257 iommu->domain = NULL;
....@@ -1506,10 +1285,6 @@
15061285 return 0;
15071286
15081287 dev_dbg(dev, "Attaching to iommu domain\n");
1509
-
1510
- /* iommu already attached */
1511
- if (iommu->domain == domain)
1512
- return 0;
15131288
15141289 if (iommu->domain)
15151290 rk_iommu_detach_device(iommu->domain, dev);
....@@ -1572,8 +1347,6 @@
15721347 goto err_free_dt;
15731348 }
15741349
1575
- rk_table_flush(rk_domain, rk_domain->dt_dma, NUM_DT_ENTRIES);
1576
-
15771350 spin_lock_init(&rk_domain->iommus_lock);
15781351 spin_lock_init(&rk_domain->dt_lock);
15791352 INIT_LIST_HEAD(&rk_domain->iommus);
....@@ -1605,7 +1378,7 @@
16051378 for (i = 0; i < NUM_DT_ENTRIES; i++) {
16061379 u32 dte = rk_domain->dt[i];
16071380 if (rk_dte_is_pt_valid(dte)) {
1608
- phys_addr_t pt_phys = rk_dte_pt_address(dte);
1381
+ phys_addr_t pt_phys = rk_ops->pt_address(dte);
16091382 u32 *page_table = phys_to_virt(pt_phys);
16101383 dma_unmap_single(dma_dev, pt_phys,
16111384 SPAGE_SIZE, DMA_TO_DEVICE);
....@@ -1617,58 +1390,20 @@
16171390 SPAGE_SIZE, DMA_TO_DEVICE);
16181391 free_page((unsigned long)rk_domain->dt);
16191392
1620
- if (domain->type == IOMMU_DOMAIN_DMA)
1621
- iommu_put_dma_cookie(&rk_domain->domain);
16221393 kfree(rk_domain);
16231394 }
16241395
1625
-static void rk_iommu_domain_free_v2(struct iommu_domain *domain)
1396
+static struct iommu_device *rk_iommu_probe_device(struct device *dev)
16261397 {
1627
- struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
1628
- int i;
1629
-
1630
- WARN_ON(!list_empty(&rk_domain->iommus));
1631
-
1632
- for (i = 0; i < NUM_DT_ENTRIES; i++) {
1633
- u32 dte = rk_domain->dt[i];
1634
-
1635
- if (rk_dte_is_pt_valid(dte)) {
1636
- phys_addr_t pt_phys = rk_dte_pt_address_v2(dte);
1637
- u32 *page_table = phys_to_virt(pt_phys);
1638
-
1639
- dma_unmap_single(dma_dev, pt_phys,
1640
- SPAGE_SIZE, DMA_TO_DEVICE);
1641
- free_page((unsigned long)page_table);
1642
- }
1643
- }
1644
-
1645
- dma_unmap_single(dma_dev, rk_domain->dt_dma,
1646
- SPAGE_SIZE, DMA_TO_DEVICE);
1647
- free_page((unsigned long)rk_domain->dt);
1648
-
1649
- if (domain->type == IOMMU_DOMAIN_DMA)
1650
- iommu_put_dma_cookie(&rk_domain->domain);
1651
- kfree(rk_domain);
1652
-}
1653
-
1654
-static int rk_iommu_add_device(struct device *dev)
1655
-{
1656
- struct iommu_group *group;
1657
- struct rk_iommu *iommu;
16581398 struct rk_iommudata *data;
1399
+ struct rk_iommu *iommu;
16591400
1660
- data = dev->archdata.iommu;
1401
+ data = dev_iommu_priv_get(dev);
16611402 if (!data)
1662
- return -ENODEV;
1403
+ return ERR_PTR(-ENODEV);
16631404
16641405 iommu = rk_iommu_from_dev(dev);
16651406
1666
- group = iommu_group_get_for_dev(dev);
1667
- if (IS_ERR(group))
1668
- return PTR_ERR(group);
1669
- iommu_group_put(group);
1670
-
1671
- iommu_device_link(&iommu->iommu, dev);
16721407 data->link = device_link_add(dev, iommu->dev,
16731408 DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME);
16741409
....@@ -1678,26 +1413,18 @@
16781413 if (!dev->dma_parms)
16791414 dev->dma_parms = kzalloc(sizeof(*dev->dma_parms), GFP_KERNEL);
16801415 if (!dev->dma_parms)
1681
- return -ENOMEM;
1416
+ return ERR_PTR(-ENOMEM);
16821417
16831418 dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
16841419
1685
- return 0;
1420
+ return &iommu->iommu;
16861421 }
16871422
1688
-static void rk_iommu_remove_device(struct device *dev)
1423
+static void rk_iommu_release_device(struct device *dev)
16891424 {
1690
- struct rk_iommu *iommu;
1691
- struct rk_iommudata *data = dev->archdata.iommu;
1692
-
1693
- iommu = rk_iommu_from_dev(dev);
1694
-
1695
- kfree(dev->dma_parms);
1696
- dev->dma_parms = NULL;
1425
+ struct rk_iommudata *data = dev_iommu_priv_get(dev);
16971426
16981427 device_link_del(data->link);
1699
- iommu_device_unlink(&iommu->iommu, dev);
1700
- iommu_group_remove_device(dev);
17011428 }
17021429
17031430 static struct iommu_group *rk_iommu_device_group(struct device *dev)
....@@ -1710,9 +1437,9 @@
17101437 }
17111438
17121439 static bool rk_iommu_is_attach_deferred(struct iommu_domain *domain,
1713
- struct device *dev)
1440
+ struct device *dev)
17141441 {
1715
- struct rk_iommudata *data = dev->archdata.iommu;
1442
+ struct rk_iommudata *data = dev_iommu_priv_get(dev);
17161443
17171444 return data->defer_attach;
17181445 }
....@@ -1734,14 +1461,14 @@
17341461 if (strstr(dev_name(dev), "vop"))
17351462 data->defer_attach = true;
17361463
1737
- dev->archdata.iommu = data;
1464
+ dev_iommu_priv_set(dev, data);
17381465
17391466 platform_device_put(iommu_dev);
17401467
17411468 return 0;
17421469 }
17431470
1744
-void rk_iommu_mask_irq(struct device *dev)
1471
+void rockchip_iommu_mask_irq(struct device *dev)
17451472 {
17461473 struct rk_iommu *iommu = rk_iommu_from_dev(dev);
17471474 int i;
....@@ -1752,9 +1479,9 @@
17521479 for (i = 0; i < iommu->num_mmu; i++)
17531480 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0);
17541481 }
1755
-EXPORT_SYMBOL(rk_iommu_mask_irq);
1482
+EXPORT_SYMBOL(rockchip_iommu_mask_irq);
17561483
1757
-void rk_iommu_unmask_irq(struct device *dev)
1484
+void rockchip_iommu_unmask_irq(struct device *dev)
17581485 {
17591486 struct rk_iommu *iommu = rk_iommu_from_dev(dev);
17601487 int i;
....@@ -1770,7 +1497,7 @@
17701497 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_PAGE_FAULT_DONE);
17711498 }
17721499 }
1773
-EXPORT_SYMBOL(rk_iommu_unmask_irq);
1500
+EXPORT_SYMBOL(rockchip_iommu_unmask_irq);
17741501
17751502 static const struct iommu_ops rk_iommu_ops = {
17761503 .domain_alloc = rk_iommu_domain_alloc,
....@@ -1778,11 +1505,10 @@
17781505 .attach_dev = rk_iommu_attach_device,
17791506 .detach_dev = rk_iommu_detach_device,
17801507 .map = rk_iommu_map,
1781
- .map_sg = default_iommu_map_sg,
17821508 .unmap = rk_iommu_unmap,
17831509 .flush_iotlb_all = rk_iommu_flush_tlb_all,
1784
- .add_device = rk_iommu_add_device,
1785
- .remove_device = rk_iommu_remove_device,
1510
+ .probe_device = rk_iommu_probe_device,
1511
+ .release_device = rk_iommu_release_device,
17861512 .iova_to_phys = rk_iommu_iova_to_phys,
17871513 .is_attach_deferred = rk_iommu_is_attach_deferred,
17881514 .device_group = rk_iommu_device_group,
....@@ -1790,68 +1516,33 @@
17901516 .of_xlate = rk_iommu_of_xlate,
17911517 };
17921518
1793
-static const struct iommu_ops rk_iommu_ops_v2 = {
1794
- .domain_alloc = rk_iommu_domain_alloc,
1795
- .domain_free = rk_iommu_domain_free_v2,
1796
- .attach_dev = rk_iommu_attach_device,
1797
- .detach_dev = rk_iommu_detach_device,
1798
- .map = rk_iommu_map_v2,
1799
- .unmap = rk_iommu_unmap_v2,
1800
- .map_sg = default_iommu_map_sg,
1801
- .flush_iotlb_all = rk_iommu_flush_tlb_all,
1802
- .add_device = rk_iommu_add_device,
1803
- .remove_device = rk_iommu_remove_device,
1804
- .iova_to_phys = rk_iommu_iova_to_phys_v2,
1805
- .is_attach_deferred = rk_iommu_is_attach_deferred,
1806
- .device_group = rk_iommu_device_group,
1807
- .pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP,
1808
- .of_xlate = rk_iommu_of_xlate,
1809
-};
1810
-
1811
-static const struct rockchip_iommu_data iommu_data_v1 = {
1812
- .version = 0x1,
1813
-};
1814
-
1815
-static const struct rockchip_iommu_data iommu_data_v2 = {
1816
- .version = 0x2,
1817
-};
1818
-
1819
-static const struct of_device_id rk_iommu_dt_ids[] = {
1820
- { .compatible = "rockchip,iommu",
1821
- .data = &iommu_data_v1,
1822
- }, {
1823
- .compatible = "rockchip,iommu-v2",
1824
- .data = &iommu_data_v2,
1825
- },
1826
- { /* sentinel */ }
1827
-};
1828
-MODULE_DEVICE_TABLE(of, rk_iommu_dt_ids);
1829
-
18301519 static int rk_iommu_probe(struct platform_device *pdev)
18311520 {
18321521 struct device *dev = &pdev->dev;
18331522 struct rk_iommu *iommu;
18341523 struct resource *res;
1524
+ const struct rk_iommu_ops *ops;
18351525 int num_res = pdev->num_resources;
1836
- int err, i, irq;
1837
- const struct of_device_id *match;
1838
- struct rockchip_iommu_data *data;
1526
+ int err, i;
18391527
18401528 iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL);
18411529 if (!iommu)
18421530 return -ENOMEM;
18431531
1844
- match = of_match_device(rk_iommu_dt_ids, dev);
1845
- if (!match)
1846
- return -EINVAL;
1847
-
1848
- data = (struct rockchip_iommu_data *)match->data;
1849
- iommu->version = data->version;
1850
- dev_info(dev, "version = %x\n", iommu->version);
1851
-
18521532 platform_set_drvdata(pdev, iommu);
18531533 iommu->dev = dev;
18541534 iommu->num_mmu = 0;
1535
+
1536
+ ops = of_device_get_match_data(dev);
1537
+ if (!rk_ops)
1538
+ rk_ops = ops;
1539
+
1540
+ /*
1541
+ * That should not happen unless different versions of the
1542
+ * hardware block are embedded the same SoC
1543
+ */
1544
+ if (WARN_ON(rk_ops != ops))
1545
+ return -EINVAL;
18551546
18561547 iommu->bases = devm_kcalloc(dev, num_res, sizeof(*iommu->bases),
18571548 GFP_KERNEL);
....@@ -1870,6 +1561,10 @@
18701561 if (iommu->num_mmu == 0)
18711562 return PTR_ERR(iommu->bases[0]);
18721563
1564
+ iommu->num_irq = platform_irq_count(pdev);
1565
+ if (iommu->num_irq < 0)
1566
+ return iommu->num_irq;
1567
+
18731568 iommu->reset_disabled = device_property_read_bool(dev,
18741569 "rockchip,disable-mmu-reset");
18751570 iommu->skip_read = device_property_read_bool(dev,
....@@ -1884,6 +1579,7 @@
18841579 of_machine_is_compatible("rockchip,rv1109"))
18851580 iommu->cmd_retry = device_property_read_bool(dev,
18861581 "rockchip,enable-cmd-retry");
1582
+
18871583 iommu->need_res_map = device_property_read_bool(dev,
18881584 "rockchip,reserve-map");
18891585
....@@ -1914,10 +1610,8 @@
19141610 if (err)
19151611 goto err_put_group;
19161612
1917
- if (iommu->version >= 0x2)
1918
- iommu_device_set_ops(&iommu->iommu, &rk_iommu_ops_v2);
1919
- else
1920
- iommu_device_set_ops(&iommu->iommu, &rk_iommu_ops);
1613
+ iommu_device_set_ops(&iommu->iommu, &rk_iommu_ops);
1614
+
19211615 iommu_device_set_fwnode(&iommu->iommu, &dev->of_node->fwnode);
19221616
19231617 err = iommu_device_register(&iommu->iommu);
....@@ -1932,35 +1626,39 @@
19321626 if (!dma_dev)
19331627 dma_dev = &pdev->dev;
19341628
1935
- if (iommu->version >= 0x2)
1936
- bus_set_iommu(&platform_bus_type, &rk_iommu_ops_v2);
1937
- else
1938
- bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
1629
+ bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
19391630
19401631 pm_runtime_enable(dev);
19411632
19421633 if (iommu->skip_read)
19431634 goto skip_request_irq;
19441635
1945
- i = 0;
1946
- while ((irq = platform_get_irq(pdev, i++)) != -ENXIO) {
1947
- if (irq < 0)
1948
- return irq;
1636
+ for (i = 0; i < iommu->num_irq; i++) {
1637
+ int irq = platform_get_irq(pdev, i);
1638
+
1639
+ if (irq < 0) {
1640
+ err = irq;
1641
+ goto err_pm_disable;
1642
+ }
19491643
19501644 err = devm_request_irq(iommu->dev, irq, rk_iommu_irq,
19511645 IRQF_SHARED, dev_name(dev), iommu);
1952
- if (err) {
1953
- pm_runtime_disable(dev);
1954
- goto err_remove_sysfs;
1955
- }
1646
+ if (err)
1647
+ goto err_pm_disable;
19561648 }
19571649
19581650 skip_request_irq:
19591651 if (!res_page && iommu->need_res_map) {
19601652 res_page = __pa_symbol(reserve_range);
1653
+
19611654 pr_info("%s,%d, res_page = 0x%pa\n", __func__, __LINE__, &res_page);
19621655 }
1656
+
1657
+ dma_set_mask_and_coherent(dev, rk_ops->dma_bit_mask);
1658
+
19631659 return 0;
1660
+err_pm_disable:
1661
+ pm_runtime_disable(dev);
19641662 err_remove_sysfs:
19651663 iommu_device_sysfs_remove(&iommu->iommu);
19661664 err_put_group:
....@@ -1973,16 +1671,20 @@
19731671 static void rk_iommu_shutdown(struct platform_device *pdev)
19741672 {
19751673 struct rk_iommu *iommu = platform_get_drvdata(pdev);
1976
- int i = 0, irq;
1674
+ int i;
19771675
19781676 if (iommu->skip_read)
19791677 goto skip_free_irq;
19801678
1981
- while ((irq = platform_get_irq(pdev, i++)) != -ENXIO)
1679
+ for (i = 0; i < iommu->num_irq; i++) {
1680
+ int irq = platform_get_irq(pdev, i);
1681
+
19821682 devm_free_irq(iommu->dev, irq, iommu);
1683
+ }
19831684
19841685 skip_free_irq:
1985
- pm_runtime_force_suspend(&pdev->dev);
1686
+ if (!iommu->dlr_disable)
1687
+ pm_runtime_force_suspend(&pdev->dev);
19861688 }
19871689
19881690 static int __maybe_unused rk_iommu_suspend(struct device *dev)
....@@ -2018,6 +1720,37 @@
20181720 pm_runtime_force_resume)
20191721 };
20201722
1723
+static struct rk_iommu_ops iommu_data_ops_v1 = {
1724
+ .pt_address = &rk_dte_pt_address,
1725
+ .mk_dtentries = &rk_mk_dte,
1726
+ .mk_ptentries = &rk_mk_pte,
1727
+ .dte_addr_phys = &rk_dte_addr_phys,
1728
+ .dma_addr_dte = &rk_dma_addr_dte,
1729
+ .dma_bit_mask = DMA_BIT_MASK(32),
1730
+};
1731
+
1732
+static struct rk_iommu_ops iommu_data_ops_v2 = {
1733
+ .pt_address = &rk_dte_pt_address_v2,
1734
+ .mk_dtentries = &rk_mk_dte_v2,
1735
+ .mk_ptentries = &rk_mk_pte_v2,
1736
+ .dte_addr_phys = &rk_dte_addr_phys_v2,
1737
+ .dma_addr_dte = &rk_dma_addr_dte_v2,
1738
+ .dma_bit_mask = DMA_BIT_MASK(40),
1739
+};
1740
+
1741
+static const struct of_device_id rk_iommu_dt_ids[] = {
1742
+ { .compatible = "rockchip,iommu",
1743
+ .data = &iommu_data_ops_v1,
1744
+ },
1745
+ { .compatible = "rockchip,iommu-v2",
1746
+ .data = &iommu_data_ops_v2,
1747
+ },
1748
+ { .compatible = "rockchip,rk3568-iommu",
1749
+ .data = &iommu_data_ops_v2,
1750
+ },
1751
+ { /* sentinel */ }
1752
+};
1753
+
20211754 static struct platform_driver rk_iommu_driver = {
20221755 .probe = rk_iommu_probe,
20231756 .shutdown = rk_iommu_shutdown,