hc
2023-11-06 e3e12f52b214121840b44c91de5b3e5af5d3eb84
kernel/drivers/iommu/rockchip-iommu.c
....@@ -114,6 +114,7 @@
114114 dma_addr_t dt_dma;
115115 spinlock_t iommus_lock; /* lock for iommus list */
116116 spinlock_t dt_lock; /* lock for modifying page directory table */
117
+ bool shootdown_entire;
117118
118119 struct iommu_domain domain;
119120 };
....@@ -132,11 +133,14 @@
132133 bool skip_read; /* rk3126/rk3128 can't read vop iommu registers */
133134 bool dlr_disable; /* avoid access iommu when runtime ops called */
134135 bool cmd_retry;
136
+ bool master_handle_irq;
135137 struct iommu_device iommu;
136138 struct list_head node; /* entry in rk_iommu_domain.iommus */
137139 struct iommu_domain *domain; /* domain to which iommu is attached */
138140 struct iommu_group *group;
139141 u32 version;
142
+ bool shootdown_entire;
143
+ bool need_res_map;
140144 };
141145
142146 struct rk_iommudata {
....@@ -146,6 +150,9 @@
146150 };
147151
148152 static struct device *dma_dev;
153
+static struct rk_iommu *rk_iommu_from_dev(struct device *dev);
154
+static char reserve_range[PAGE_SIZE] __aligned(PAGE_SIZE);
155
+static phys_addr_t res_page;
149156
150157 static inline void rk_table_flush(struct rk_iommu_domain *dom, dma_addr_t dma,
151158 unsigned int count)
....@@ -290,6 +297,8 @@
290297 #define RK_PTE_PAGE_READABLE_V2 BIT(2)
291298 #define RK_PTE_PAGE_WRITABLE_V2 BIT(1)
292299
300
+#define RK_PTE_PAGE_REPRESENT BIT(3)
301
+
293302 static inline phys_addr_t rk_pte_page_address(u32 pte)
294303 {
295304 return (phys_addr_t)pte & RK_PTE_PAGE_ADDRESS_MASK;
....@@ -311,6 +320,11 @@
311320 return pte & RK_PTE_PAGE_VALID;
312321 }
313322
323
+static inline bool rk_pte_is_page_represent(u32 pte)
324
+{
325
+ return pte & RK_PTE_PAGE_REPRESENT;
326
+}
327
+
314328 /* TODO: set cache flags per prot IOMMU_CACHE */
315329 static u32 rk_mk_pte(phys_addr_t page, int prot)
316330 {
....@@ -318,6 +332,8 @@
318332
319333 flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE : 0;
320334 flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE : 0;
335
+ flags |= (prot & IOMMU_PRIV) ? RK_PTE_PAGE_REPRESENT : 0;
336
+
321337 page &= RK_PTE_PAGE_ADDRESS_MASK;
322338 return page | flags | RK_PTE_PAGE_VALID;
323339 }
....@@ -328,6 +344,12 @@
328344
329345 flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE_V2 : 0;
330346 flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE_V2 : 0;
347
+ /* If BIT(3) set, don't break iommu_map if BIT(0) set.
348
+ * Means we can reupdate a page that already presented. We can use
349
+ * this bit to reupdate a pre-mapped 4G range.
350
+ */
351
+ flags |= (prot & IOMMU_PRIV) ? RK_PTE_PAGE_REPRESENT : 0;
352
+
331353 page = (page & PAGE_DESC_LO_MASK) |
332354 ((page & PAGE_DESC_HI_MASK1) >> PAGE_DESC_HI_SHIFT1) |
333355 (page & PAGE_DESC_HI_MASK2) >> PAGE_DESC_HI_SHIFT2;
....@@ -337,7 +359,7 @@
337359
338360 static u32 rk_mk_pte_invalid(u32 pte)
339361 {
340
- return pte & ~RK_PTE_PAGE_VALID;
362
+ return pte & ~(RK_PTE_PAGE_VALID | RK_PTE_PAGE_REPRESENT);
341363 }
342364
343365 /*
....@@ -686,22 +708,14 @@
686708 rk_pte_is_page_valid(pte), &page_addr_phys, page_flags);
687709 }
688710
689
-static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
711
+static int rk_pagefault_done(struct rk_iommu *iommu)
690712 {
691
- struct rk_iommu *iommu = dev_id;
692713 u32 status;
693714 u32 int_status;
694
- u32 int_mask;
695715 dma_addr_t iova;
716
+ int i;
717
+ u32 int_mask;
696718 irqreturn_t ret = IRQ_NONE;
697
- int i, err;
698
-
699
- err = pm_runtime_get_if_in_use(iommu->dev);
700
- if (WARN_ON_ONCE(err <= 0))
701
- return ret;
702
-
703
- if (WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks)))
704
- goto out;
705719
706720 for (i = 0; i < iommu->num_mmu; i++) {
707721 int_status = rk_iommu_read(iommu->bases[i], RK_MMU_INT_STATUS);
....@@ -724,16 +738,18 @@
724738
725739 log_iova(iommu, i, iova);
726740
727
- /*
728
- * Report page fault to any installed handlers.
729
- * Ignore the return code, though, since we always zap cache
730
- * and clear the page fault anyway.
731
- */
732
- if (iommu->domain)
733
- report_iommu_fault(iommu->domain, iommu->dev, iova,
741
+ if (!iommu->master_handle_irq) {
742
+ /*
743
+ * Report page fault to any installed handlers.
744
+ * Ignore the return code, though, since we always zap cache
745
+ * and clear the page fault anyway.
746
+ */
747
+ if (iommu->domain)
748
+ report_iommu_fault(iommu->domain, iommu->dev, iova,
734749 status);
735
- else
736
- dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n");
750
+ else
751
+ dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n");
752
+ }
737753
738754 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
739755
....@@ -755,6 +771,46 @@
755771 int_status);
756772
757773 rk_iommu_write(iommu->bases[i], RK_MMU_INT_CLEAR, int_status);
774
+ }
775
+
776
+ return ret;
777
+}
778
+
779
+int rockchip_pagefault_done(struct device *master_dev)
780
+{
781
+ struct rk_iommu *iommu = rk_iommu_from_dev(master_dev);
782
+
783
+ return rk_pagefault_done(iommu);
784
+}
785
+EXPORT_SYMBOL_GPL(rockchip_pagefault_done);
786
+
787
+void __iomem *rockchip_get_iommu_base(struct device *master_dev, int idx)
788
+{
789
+ struct rk_iommu *iommu = rk_iommu_from_dev(master_dev);
790
+
791
+ return iommu->bases[idx];
792
+}
793
+EXPORT_SYMBOL_GPL(rockchip_get_iommu_base);
794
+
795
+static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
796
+{
797
+ struct rk_iommu *iommu = dev_id;
798
+ irqreturn_t ret = IRQ_NONE;
799
+ int err;
800
+
801
+ err = pm_runtime_get_if_in_use(iommu->dev);
802
+ if (WARN_ON_ONCE(err <= 0))
803
+ return ret;
804
+
805
+ if (WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks)))
806
+ goto out;
807
+
808
+ /* Master must call rockchip_pagefault_done to handle pagefault */
809
+ if (iommu->master_handle_irq) {
810
+ if (iommu->domain)
811
+ ret = report_iommu_fault(iommu->domain, iommu->dev, -1, 0x0);
812
+ } else {
813
+ ret = rk_pagefault_done(iommu);
758814 }
759815
760816 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
....@@ -825,6 +881,10 @@
825881 {
826882 struct list_head *pos;
827883 unsigned long flags;
884
+
885
+ /* Do not zap tlb cache line if shootdown_entire set */
886
+ if (rk_domain->shootdown_entire)
887
+ return;
828888
829889 /* shootdown these iova from all iommus using this domain */
830890 spin_lock_irqsave(&rk_domain->iommus_lock, flags);
....@@ -936,10 +996,11 @@
936996
937997 static size_t rk_iommu_unmap_iova(struct rk_iommu_domain *rk_domain,
938998 u32 *pte_addr, dma_addr_t pte_dma,
939
- size_t size)
999
+ size_t size, struct rk_iommu *iommu)
9401000 {
9411001 unsigned int pte_count;
9421002 unsigned int pte_total = size / SPAGE_SIZE;
1003
+ int prot = IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV;
9431004
9441005 assert_spin_locked(&rk_domain->dt_lock);
9451006
....@@ -948,12 +1009,36 @@
9481009 if (!rk_pte_is_page_valid(pte))
9491010 break;
9501011
951
- pte_addr[pte_count] = rk_mk_pte_invalid(pte);
1012
+ if (iommu && iommu->need_res_map) {
1013
+ if (iommu->version >= 0x2)
1014
+ pte_addr[pte_count] = rk_mk_pte_v2(res_page,
1015
+ prot);
1016
+ else
1017
+ pte_addr[pte_count] = rk_mk_pte(res_page, prot);
1018
+ } else {
1019
+ pte_addr[pte_count] = rk_mk_pte_invalid(pte);
1020
+ }
9521021 }
9531022
9541023 rk_table_flush(rk_domain, pte_dma, pte_count);
9551024
9561025 return pte_count * SPAGE_SIZE;
1026
+}
1027
+
1028
+static struct rk_iommu *rk_iommu_get(struct rk_iommu_domain *rk_domain)
1029
+{
1030
+ unsigned long flags;
1031
+ struct list_head *pos;
1032
+ struct rk_iommu *iommu = NULL;
1033
+
1034
+ spin_lock_irqsave(&rk_domain->iommus_lock, flags);
1035
+ list_for_each(pos, &rk_domain->iommus) {
1036
+ iommu = list_entry(pos, struct rk_iommu, node);
1037
+ if (iommu->need_res_map)
1038
+ break;
1039
+ }
1040
+ spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
1041
+ return iommu;
9571042 }
9581043
9591044 static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr,
....@@ -969,12 +1054,15 @@
9691054 for (pte_count = 0; pte_count < pte_total; pte_count++) {
9701055 u32 pte = pte_addr[pte_count];
9711056
972
- if (rk_pte_is_page_valid(pte))
1057
+ if (rk_pte_is_page_valid(pte) && !rk_pte_is_page_represent(pte))
9731058 goto unwind;
9741059
975
- pte_addr[pte_count] = rk_mk_pte(paddr, prot);
976
-
977
- paddr += SPAGE_SIZE;
1060
+ if (prot & IOMMU_PRIV) {
1061
+ pte_addr[pte_count] = rk_mk_pte(res_page, prot);
1062
+ } else {
1063
+ pte_addr[pte_count] = rk_mk_pte(paddr, prot);
1064
+ paddr += SPAGE_SIZE;
1065
+ }
9781066 }
9791067
9801068 rk_table_flush(rk_domain, pte_dma, pte_total);
....@@ -985,16 +1073,13 @@
9851073 * We only zap the first and last iova, since only they could have
9861074 * dte or pte shared with an existing mapping.
9871075 */
988
-
989
- /* Do not zap tlb cache line if IOMMU_TLB_SHOT_ENTIRE set */
990
- if (!(prot & IOMMU_TLB_SHOT_ENTIRE))
991
- rk_iommu_zap_iova_first_last(rk_domain, iova, size);
1076
+ rk_iommu_zap_iova_first_last(rk_domain, iova, size);
9921077
9931078 return 0;
9941079 unwind:
9951080 /* Unmap the range of iovas that we just mapped */
9961081 rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma,
997
- pte_count * SPAGE_SIZE);
1082
+ pte_count * SPAGE_SIZE, NULL);
9981083
9991084 iova += pte_count * SPAGE_SIZE;
10001085 page_phys = rk_pte_page_address(pte_addr[pte_count]);
....@@ -1017,12 +1102,15 @@
10171102 for (pte_count = 0; pte_count < pte_total; pte_count++) {
10181103 u32 pte = pte_addr[pte_count];
10191104
1020
- if (rk_pte_is_page_valid(pte))
1105
+ if (rk_pte_is_page_valid(pte) && !rk_pte_is_page_represent(pte))
10211106 goto unwind;
10221107
1023
- pte_addr[pte_count] = rk_mk_pte_v2(paddr, prot);
1024
-
1025
- paddr += SPAGE_SIZE;
1108
+ if (prot & IOMMU_PRIV) {
1109
+ pte_addr[pte_count] = rk_mk_pte_v2(res_page, prot);
1110
+ } else {
1111
+ pte_addr[pte_count] = rk_mk_pte_v2(paddr, prot);
1112
+ paddr += SPAGE_SIZE;
1113
+ }
10261114 }
10271115
10281116 rk_table_flush(rk_domain, pte_dma, pte_total);
....@@ -1033,16 +1121,13 @@
10331121 * We only zap the first and last iova, since only they could have
10341122 * dte or pte shared with an existing mapping.
10351123 */
1036
-
1037
- /* Do not zap tlb cache line if IOMMU_TLB_SHOT_ENTIRE set */
1038
- if (!(prot & IOMMU_TLB_SHOT_ENTIRE))
1039
- rk_iommu_zap_iova_first_last(rk_domain, iova, size);
1124
+ rk_iommu_zap_iova_first_last(rk_domain, iova, size);
10401125
10411126 return 0;
10421127 unwind:
10431128 /* Unmap the range of iovas that we just mapped */
10441129 rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma,
1045
- pte_count * SPAGE_SIZE);
1130
+ pte_count * SPAGE_SIZE, NULL);
10461131
10471132 iova += pte_count * SPAGE_SIZE;
10481133 page_phys = rk_pte_page_address_v2(pte_addr[pte_count]);
....@@ -1136,6 +1221,7 @@
11361221 u32 dte;
11371222 u32 *pte_addr;
11381223 size_t unmap_size;
1224
+ struct rk_iommu *iommu = rk_iommu_get(rk_domain);
11391225
11401226 spin_lock_irqsave(&rk_domain->dt_lock, flags);
11411227
....@@ -1156,7 +1242,8 @@
11561242 pt_phys = rk_dte_pt_address(dte);
11571243 pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova);
11581244 pte_dma = pt_phys + rk_iova_pte_index(iova) * sizeof(u32);
1159
- unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, size);
1245
+ unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, size,
1246
+ iommu);
11601247
11611248 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
11621249
....@@ -1176,6 +1263,7 @@
11761263 u32 dte;
11771264 u32 *pte_addr;
11781265 size_t unmap_size;
1266
+ struct rk_iommu *iommu = rk_iommu_get(rk_domain);
11791267
11801268 spin_lock_irqsave(&rk_domain->dt_lock, flags);
11811269
....@@ -1196,7 +1284,8 @@
11961284 pt_phys = rk_dte_pt_address_v2(dte);
11971285 pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova);
11981286 pte_dma = pt_phys + rk_iova_pte_index(iova) * sizeof(u32);
1199
- unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, size);
1287
+ unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, size,
1288
+ iommu);
12001289
12011290 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
12021291
....@@ -1345,6 +1434,28 @@
13451434 }
13461435 EXPORT_SYMBOL(rockchip_iommu_is_enabled);
13471436
1437
+int rockchip_iommu_force_reset(struct device *dev)
1438
+{
1439
+ struct rk_iommu *iommu;
1440
+ int ret;
1441
+
1442
+ iommu = rk_iommu_from_dev(dev);
1443
+ if (!iommu)
1444
+ return -ENODEV;
1445
+
1446
+ ret = rk_iommu_enable_stall(iommu);
1447
+ if (ret)
1448
+ return ret;
1449
+
1450
+ ret = rk_iommu_force_reset(iommu);
1451
+
1452
+ rk_iommu_disable_stall(iommu);
1453
+
1454
+ return ret;
1455
+
1456
+}
1457
+EXPORT_SYMBOL(rockchip_iommu_force_reset);
1458
+
13481459 static void rk_iommu_detach_device(struct iommu_domain *domain,
13491460 struct device *dev)
13501461 {
....@@ -1413,6 +1524,7 @@
14131524 list_add_tail(&iommu->node, &rk_domain->iommus);
14141525 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
14151526
1527
+ rk_domain->shootdown_entire = iommu->shootdown_entire;
14161528 ret = pm_runtime_get_if_in_use(iommu->dev);
14171529 if (!ret || WARN_ON_ONCE(ret < 0))
14181530 return 0;
....@@ -1764,11 +1876,16 @@
17641876 "rockchip,skip-mmu-read");
17651877 iommu->dlr_disable = device_property_read_bool(dev,
17661878 "rockchip,disable-device-link-resume");
1767
-
1879
+ iommu->shootdown_entire = device_property_read_bool(dev,
1880
+ "rockchip,shootdown-entire");
1881
+ iommu->master_handle_irq = device_property_read_bool(dev,
1882
+ "rockchip,master-handle-irq");
17681883 if (of_machine_is_compatible("rockchip,rv1126") ||
17691884 of_machine_is_compatible("rockchip,rv1109"))
17701885 iommu->cmd_retry = device_property_read_bool(dev,
17711886 "rockchip,enable-cmd-retry");
1887
+ iommu->need_res_map = device_property_read_bool(dev,
1888
+ "rockchip,reserve-map");
17721889
17731890 /*
17741891 * iommu clocks should be present for all new devices and devicetrees
....@@ -1839,6 +1956,10 @@
18391956 }
18401957
18411958 skip_request_irq:
1959
+ if (!res_page && iommu->need_res_map) {
1960
+ res_page = __pa_symbol(reserve_range);
1961
+ pr_info("%s,%d, res_page = 0x%pa\n", __func__, __LINE__, &res_page);
1962
+ }
18421963 return 0;
18431964 err_remove_sysfs:
18441965 iommu_device_sysfs_remove(&iommu->iommu);
....@@ -1854,9 +1975,13 @@
18541975 struct rk_iommu *iommu = platform_get_drvdata(pdev);
18551976 int i = 0, irq;
18561977
1978
+ if (iommu->skip_read)
1979
+ goto skip_free_irq;
1980
+
18571981 while ((irq = platform_get_irq(pdev, i++)) != -ENXIO)
18581982 devm_free_irq(iommu->dev, irq, iommu);
18591983
1984
+skip_free_irq:
18601985 pm_runtime_force_suspend(&pdev->dev);
18611986 }
18621987