.. | .. |
---|
114 | 114 | dma_addr_t dt_dma; |
---|
115 | 115 | spinlock_t iommus_lock; /* lock for iommus list */ |
---|
116 | 116 | spinlock_t dt_lock; /* lock for modifying page directory table */ |
---|
| 117 | + bool shootdown_entire; |
---|
117 | 118 | |
---|
118 | 119 | struct iommu_domain domain; |
---|
119 | 120 | }; |
---|
.. | .. |
---|
132 | 133 | bool skip_read; /* rk3126/rk3128 can't read vop iommu registers */ |
---|
133 | 134 | bool dlr_disable; /* avoid access iommu when runtime ops called */ |
---|
134 | 135 | bool cmd_retry; |
---|
| 136 | + bool master_handle_irq; |
---|
135 | 137 | struct iommu_device iommu; |
---|
136 | 138 | struct list_head node; /* entry in rk_iommu_domain.iommus */ |
---|
137 | 139 | struct iommu_domain *domain; /* domain to which iommu is attached */ |
---|
138 | 140 | struct iommu_group *group; |
---|
139 | 141 | u32 version; |
---|
| 142 | + bool shootdown_entire; |
---|
| 143 | + bool need_res_map; |
---|
140 | 144 | }; |
---|
141 | 145 | |
---|
142 | 146 | struct rk_iommudata { |
---|
.. | .. |
---|
146 | 150 | }; |
---|
147 | 151 | |
---|
148 | 152 | static struct device *dma_dev; |
---|
| 153 | +static struct rk_iommu *rk_iommu_from_dev(struct device *dev); |
---|
| 154 | +static char reserve_range[PAGE_SIZE] __aligned(PAGE_SIZE); |
---|
| 155 | +static phys_addr_t res_page; |
---|
149 | 156 | |
---|
150 | 157 | static inline void rk_table_flush(struct rk_iommu_domain *dom, dma_addr_t dma, |
---|
151 | 158 | unsigned int count) |
---|
.. | .. |
---|
290 | 297 | #define RK_PTE_PAGE_READABLE_V2 BIT(2) |
---|
291 | 298 | #define RK_PTE_PAGE_WRITABLE_V2 BIT(1) |
---|
292 | 299 | |
---|
| 300 | +#define RK_PTE_PAGE_REPRESENT BIT(3) |
---|
| 301 | + |
---|
293 | 302 | static inline phys_addr_t rk_pte_page_address(u32 pte) |
---|
294 | 303 | { |
---|
295 | 304 | return (phys_addr_t)pte & RK_PTE_PAGE_ADDRESS_MASK; |
---|
.. | .. |
---|
311 | 320 | return pte & RK_PTE_PAGE_VALID; |
---|
312 | 321 | } |
---|
313 | 322 | |
---|
| 323 | +static inline bool rk_pte_is_page_represent(u32 pte) |
---|
| 324 | +{ |
---|
| 325 | + return pte & RK_PTE_PAGE_REPRESENT; |
---|
| 326 | +} |
---|
| 327 | + |
---|
314 | 328 | /* TODO: set cache flags per prot IOMMU_CACHE */ |
---|
315 | 329 | static u32 rk_mk_pte(phys_addr_t page, int prot) |
---|
316 | 330 | { |
---|
.. | .. |
---|
318 | 332 | |
---|
319 | 333 | flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE : 0; |
---|
320 | 334 | flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE : 0; |
---|
| 335 | + flags |= (prot & IOMMU_PRIV) ? RK_PTE_PAGE_REPRESENT : 0; |
---|
| 336 | + |
---|
321 | 337 | page &= RK_PTE_PAGE_ADDRESS_MASK; |
---|
322 | 338 | return page | flags | RK_PTE_PAGE_VALID; |
---|
323 | 339 | } |
---|
.. | .. |
---|
328 | 344 | |
---|
329 | 345 | flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE_V2 : 0; |
---|
330 | 346 | flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE_V2 : 0; |
---|
| 347 | + /* If BIT(3) set, don't break iommu_map if BIT(0) set. |
---|
| 348 | + * Means we can reupdate a page that already presented. We can use |
---|
| 349 | + * this bit to reupdate a pre-mapped 4G range. |
---|
| 350 | + */ |
---|
| 351 | + flags |= (prot & IOMMU_PRIV) ? RK_PTE_PAGE_REPRESENT : 0; |
---|
| 352 | + |
---|
331 | 353 | page = (page & PAGE_DESC_LO_MASK) | |
---|
332 | 354 | ((page & PAGE_DESC_HI_MASK1) >> PAGE_DESC_HI_SHIFT1) | |
---|
333 | 355 | (page & PAGE_DESC_HI_MASK2) >> PAGE_DESC_HI_SHIFT2; |
---|
.. | .. |
---|
337 | 359 | |
---|
338 | 360 | static u32 rk_mk_pte_invalid(u32 pte) |
---|
339 | 361 | { |
---|
340 | | - return pte & ~RK_PTE_PAGE_VALID; |
---|
| 362 | + return pte & ~(RK_PTE_PAGE_VALID | RK_PTE_PAGE_REPRESENT); |
---|
341 | 363 | } |
---|
342 | 364 | |
---|
343 | 365 | /* |
---|
.. | .. |
---|
686 | 708 | rk_pte_is_page_valid(pte), &page_addr_phys, page_flags); |
---|
687 | 709 | } |
---|
688 | 710 | |
---|
689 | | -static irqreturn_t rk_iommu_irq(int irq, void *dev_id) |
---|
| 711 | +static int rk_pagefault_done(struct rk_iommu *iommu) |
---|
690 | 712 | { |
---|
691 | | - struct rk_iommu *iommu = dev_id; |
---|
692 | 713 | u32 status; |
---|
693 | 714 | u32 int_status; |
---|
694 | | - u32 int_mask; |
---|
695 | 715 | dma_addr_t iova; |
---|
| 716 | + int i; |
---|
| 717 | + u32 int_mask; |
---|
696 | 718 | irqreturn_t ret = IRQ_NONE; |
---|
697 | | - int i, err; |
---|
698 | | - |
---|
699 | | - err = pm_runtime_get_if_in_use(iommu->dev); |
---|
700 | | - if (WARN_ON_ONCE(err <= 0)) |
---|
701 | | - return ret; |
---|
702 | | - |
---|
703 | | - if (WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks))) |
---|
704 | | - goto out; |
---|
705 | 719 | |
---|
706 | 720 | for (i = 0; i < iommu->num_mmu; i++) { |
---|
707 | 721 | int_status = rk_iommu_read(iommu->bases[i], RK_MMU_INT_STATUS); |
---|
.. | .. |
---|
724 | 738 | |
---|
725 | 739 | log_iova(iommu, i, iova); |
---|
726 | 740 | |
---|
727 | | - /* |
---|
728 | | - * Report page fault to any installed handlers. |
---|
729 | | - * Ignore the return code, though, since we always zap cache |
---|
730 | | - * and clear the page fault anyway. |
---|
731 | | - */ |
---|
732 | | - if (iommu->domain) |
---|
733 | | - report_iommu_fault(iommu->domain, iommu->dev, iova, |
---|
| 741 | + if (!iommu->master_handle_irq) { |
---|
| 742 | + /* |
---|
| 743 | + * Report page fault to any installed handlers. |
---|
| 744 | + * Ignore the return code, though, since we always zap cache |
---|
| 745 | + * and clear the page fault anyway. |
---|
| 746 | + */ |
---|
| 747 | + if (iommu->domain) |
---|
| 748 | + report_iommu_fault(iommu->domain, iommu->dev, iova, |
---|
734 | 749 | status); |
---|
735 | | - else |
---|
736 | | - dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n"); |
---|
| 750 | + else |
---|
| 751 | + dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n"); |
---|
| 752 | + } |
---|
737 | 753 | |
---|
738 | 754 | rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE); |
---|
739 | 755 | |
---|
.. | .. |
---|
755 | 771 | int_status); |
---|
756 | 772 | |
---|
757 | 773 | rk_iommu_write(iommu->bases[i], RK_MMU_INT_CLEAR, int_status); |
---|
| 774 | + } |
---|
| 775 | + |
---|
| 776 | + return ret; |
---|
| 777 | +} |
---|
| 778 | + |
---|
| 779 | +int rockchip_pagefault_done(struct device *master_dev) |
---|
| 780 | +{ |
---|
| 781 | + struct rk_iommu *iommu = rk_iommu_from_dev(master_dev); |
---|
| 782 | + |
---|
| 783 | + return rk_pagefault_done(iommu); |
---|
| 784 | +} |
---|
| 785 | +EXPORT_SYMBOL_GPL(rockchip_pagefault_done); |
---|
| 786 | + |
---|
| 787 | +void __iomem *rockchip_get_iommu_base(struct device *master_dev, int idx) |
---|
| 788 | +{ |
---|
| 789 | + struct rk_iommu *iommu = rk_iommu_from_dev(master_dev); |
---|
| 790 | + |
---|
| 791 | + return iommu->bases[idx]; |
---|
| 792 | +} |
---|
| 793 | +EXPORT_SYMBOL_GPL(rockchip_get_iommu_base); |
---|
| 794 | + |
---|
| 795 | +static irqreturn_t rk_iommu_irq(int irq, void *dev_id) |
---|
| 796 | +{ |
---|
| 797 | + struct rk_iommu *iommu = dev_id; |
---|
| 798 | + irqreturn_t ret = IRQ_NONE; |
---|
| 799 | + int err; |
---|
| 800 | + |
---|
| 801 | + err = pm_runtime_get_if_in_use(iommu->dev); |
---|
| 802 | + if (WARN_ON_ONCE(err <= 0)) |
---|
| 803 | + return ret; |
---|
| 804 | + |
---|
| 805 | + if (WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks))) |
---|
| 806 | + goto out; |
---|
| 807 | + |
---|
| 808 | + /* Master must call rockchip_pagefault_done to handle pagefault */ |
---|
| 809 | + if (iommu->master_handle_irq) { |
---|
| 810 | + if (iommu->domain) |
---|
| 811 | + ret = report_iommu_fault(iommu->domain, iommu->dev, -1, 0x0); |
---|
| 812 | + } else { |
---|
| 813 | + ret = rk_pagefault_done(iommu); |
---|
758 | 814 | } |
---|
759 | 815 | |
---|
760 | 816 | clk_bulk_disable(iommu->num_clocks, iommu->clocks); |
---|
.. | .. |
---|
825 | 881 | { |
---|
826 | 882 | struct list_head *pos; |
---|
827 | 883 | unsigned long flags; |
---|
| 884 | + |
---|
| 885 | + /* Do not zap tlb cache line if shootdown_entire set */ |
---|
| 886 | + if (rk_domain->shootdown_entire) |
---|
| 887 | + return; |
---|
828 | 888 | |
---|
829 | 889 | /* shootdown these iova from all iommus using this domain */ |
---|
830 | 890 | spin_lock_irqsave(&rk_domain->iommus_lock, flags); |
---|
.. | .. |
---|
936 | 996 | |
---|
937 | 997 | static size_t rk_iommu_unmap_iova(struct rk_iommu_domain *rk_domain, |
---|
938 | 998 | u32 *pte_addr, dma_addr_t pte_dma, |
---|
939 | | - size_t size) |
---|
| 999 | + size_t size, struct rk_iommu *iommu) |
---|
940 | 1000 | { |
---|
941 | 1001 | unsigned int pte_count; |
---|
942 | 1002 | unsigned int pte_total = size / SPAGE_SIZE; |
---|
| 1003 | + int prot = IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV; |
---|
943 | 1004 | |
---|
944 | 1005 | assert_spin_locked(&rk_domain->dt_lock); |
---|
945 | 1006 | |
---|
.. | .. |
---|
948 | 1009 | if (!rk_pte_is_page_valid(pte)) |
---|
949 | 1010 | break; |
---|
950 | 1011 | |
---|
951 | | - pte_addr[pte_count] = rk_mk_pte_invalid(pte); |
---|
| 1012 | + if (iommu && iommu->need_res_map) { |
---|
| 1013 | + if (iommu->version >= 0x2) |
---|
| 1014 | + pte_addr[pte_count] = rk_mk_pte_v2(res_page, |
---|
| 1015 | + prot); |
---|
| 1016 | + else |
---|
| 1017 | + pte_addr[pte_count] = rk_mk_pte(res_page, prot); |
---|
| 1018 | + } else { |
---|
| 1019 | + pte_addr[pte_count] = rk_mk_pte_invalid(pte); |
---|
| 1020 | + } |
---|
952 | 1021 | } |
---|
953 | 1022 | |
---|
954 | 1023 | rk_table_flush(rk_domain, pte_dma, pte_count); |
---|
955 | 1024 | |
---|
956 | 1025 | return pte_count * SPAGE_SIZE; |
---|
| 1026 | +} |
---|
| 1027 | + |
---|
| 1028 | +static struct rk_iommu *rk_iommu_get(struct rk_iommu_domain *rk_domain) |
---|
| 1029 | +{ |
---|
| 1030 | + unsigned long flags; |
---|
| 1031 | + struct list_head *pos; |
---|
| 1032 | + struct rk_iommu *iommu = NULL; |
---|
| 1033 | + |
---|
| 1034 | + spin_lock_irqsave(&rk_domain->iommus_lock, flags); |
---|
| 1035 | + list_for_each(pos, &rk_domain->iommus) { |
---|
| 1036 | + iommu = list_entry(pos, struct rk_iommu, node); |
---|
| 1037 | + if (iommu->need_res_map) |
---|
| 1038 | + break; |
---|
| 1039 | + } |
---|
| 1040 | + spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); |
---|
| 1041 | + return iommu; |
---|
957 | 1042 | } |
---|
958 | 1043 | |
---|
959 | 1044 | static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr, |
---|
.. | .. |
---|
969 | 1054 | for (pte_count = 0; pte_count < pte_total; pte_count++) { |
---|
970 | 1055 | u32 pte = pte_addr[pte_count]; |
---|
971 | 1056 | |
---|
972 | | - if (rk_pte_is_page_valid(pte)) |
---|
| 1057 | + if (rk_pte_is_page_valid(pte) && !rk_pte_is_page_represent(pte)) |
---|
973 | 1058 | goto unwind; |
---|
974 | 1059 | |
---|
975 | | - pte_addr[pte_count] = rk_mk_pte(paddr, prot); |
---|
976 | | - |
---|
977 | | - paddr += SPAGE_SIZE; |
---|
| 1060 | + if (prot & IOMMU_PRIV) { |
---|
| 1061 | + pte_addr[pte_count] = rk_mk_pte(res_page, prot); |
---|
| 1062 | + } else { |
---|
| 1063 | + pte_addr[pte_count] = rk_mk_pte(paddr, prot); |
---|
| 1064 | + paddr += SPAGE_SIZE; |
---|
| 1065 | + } |
---|
978 | 1066 | } |
---|
979 | 1067 | |
---|
980 | 1068 | rk_table_flush(rk_domain, pte_dma, pte_total); |
---|
.. | .. |
---|
985 | 1073 | * We only zap the first and last iova, since only they could have |
---|
986 | 1074 | * dte or pte shared with an existing mapping. |
---|
987 | 1075 | */ |
---|
988 | | - |
---|
989 | | - /* Do not zap tlb cache line if IOMMU_TLB_SHOT_ENTIRE set */ |
---|
990 | | - if (!(prot & IOMMU_TLB_SHOT_ENTIRE)) |
---|
991 | | - rk_iommu_zap_iova_first_last(rk_domain, iova, size); |
---|
| 1076 | + rk_iommu_zap_iova_first_last(rk_domain, iova, size); |
---|
992 | 1077 | |
---|
993 | 1078 | return 0; |
---|
994 | 1079 | unwind: |
---|
995 | 1080 | /* Unmap the range of iovas that we just mapped */ |
---|
996 | 1081 | rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, |
---|
997 | | - pte_count * SPAGE_SIZE); |
---|
| 1082 | + pte_count * SPAGE_SIZE, NULL); |
---|
998 | 1083 | |
---|
999 | 1084 | iova += pte_count * SPAGE_SIZE; |
---|
1000 | 1085 | page_phys = rk_pte_page_address(pte_addr[pte_count]); |
---|
.. | .. |
---|
1017 | 1102 | for (pte_count = 0; pte_count < pte_total; pte_count++) { |
---|
1018 | 1103 | u32 pte = pte_addr[pte_count]; |
---|
1019 | 1104 | |
---|
1020 | | - if (rk_pte_is_page_valid(pte)) |
---|
| 1105 | + if (rk_pte_is_page_valid(pte) && !rk_pte_is_page_represent(pte)) |
---|
1021 | 1106 | goto unwind; |
---|
1022 | 1107 | |
---|
1023 | | - pte_addr[pte_count] = rk_mk_pte_v2(paddr, prot); |
---|
1024 | | - |
---|
1025 | | - paddr += SPAGE_SIZE; |
---|
| 1108 | + if (prot & IOMMU_PRIV) { |
---|
| 1109 | + pte_addr[pte_count] = rk_mk_pte_v2(res_page, prot); |
---|
| 1110 | + } else { |
---|
| 1111 | + pte_addr[pte_count] = rk_mk_pte_v2(paddr, prot); |
---|
| 1112 | + paddr += SPAGE_SIZE; |
---|
| 1113 | + } |
---|
1026 | 1114 | } |
---|
1027 | 1115 | |
---|
1028 | 1116 | rk_table_flush(rk_domain, pte_dma, pte_total); |
---|
.. | .. |
---|
1033 | 1121 | * We only zap the first and last iova, since only they could have |
---|
1034 | 1122 | * dte or pte shared with an existing mapping. |
---|
1035 | 1123 | */ |
---|
1036 | | - |
---|
1037 | | - /* Do not zap tlb cache line if IOMMU_TLB_SHOT_ENTIRE set */ |
---|
1038 | | - if (!(prot & IOMMU_TLB_SHOT_ENTIRE)) |
---|
1039 | | - rk_iommu_zap_iova_first_last(rk_domain, iova, size); |
---|
| 1124 | + rk_iommu_zap_iova_first_last(rk_domain, iova, size); |
---|
1040 | 1125 | |
---|
1041 | 1126 | return 0; |
---|
1042 | 1127 | unwind: |
---|
1043 | 1128 | /* Unmap the range of iovas that we just mapped */ |
---|
1044 | 1129 | rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, |
---|
1045 | | - pte_count * SPAGE_SIZE); |
---|
| 1130 | + pte_count * SPAGE_SIZE, NULL); |
---|
1046 | 1131 | |
---|
1047 | 1132 | iova += pte_count * SPAGE_SIZE; |
---|
1048 | 1133 | page_phys = rk_pte_page_address_v2(pte_addr[pte_count]); |
---|
.. | .. |
---|
1136 | 1221 | u32 dte; |
---|
1137 | 1222 | u32 *pte_addr; |
---|
1138 | 1223 | size_t unmap_size; |
---|
| 1224 | + struct rk_iommu *iommu = rk_iommu_get(rk_domain); |
---|
1139 | 1225 | |
---|
1140 | 1226 | spin_lock_irqsave(&rk_domain->dt_lock, flags); |
---|
1141 | 1227 | |
---|
.. | .. |
---|
1156 | 1242 | pt_phys = rk_dte_pt_address(dte); |
---|
1157 | 1243 | pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova); |
---|
1158 | 1244 | pte_dma = pt_phys + rk_iova_pte_index(iova) * sizeof(u32); |
---|
1159 | | - unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, size); |
---|
| 1245 | + unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, size, |
---|
| 1246 | + iommu); |
---|
1160 | 1247 | |
---|
1161 | 1248 | spin_unlock_irqrestore(&rk_domain->dt_lock, flags); |
---|
1162 | 1249 | |
---|
.. | .. |
---|
1176 | 1263 | u32 dte; |
---|
1177 | 1264 | u32 *pte_addr; |
---|
1178 | 1265 | size_t unmap_size; |
---|
| 1266 | + struct rk_iommu *iommu = rk_iommu_get(rk_domain); |
---|
1179 | 1267 | |
---|
1180 | 1268 | spin_lock_irqsave(&rk_domain->dt_lock, flags); |
---|
1181 | 1269 | |
---|
.. | .. |
---|
1196 | 1284 | pt_phys = rk_dte_pt_address_v2(dte); |
---|
1197 | 1285 | pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova); |
---|
1198 | 1286 | pte_dma = pt_phys + rk_iova_pte_index(iova) * sizeof(u32); |
---|
1199 | | - unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, size); |
---|
| 1287 | + unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, size, |
---|
| 1288 | + iommu); |
---|
1200 | 1289 | |
---|
1201 | 1290 | spin_unlock_irqrestore(&rk_domain->dt_lock, flags); |
---|
1202 | 1291 | |
---|
.. | .. |
---|
1345 | 1434 | } |
---|
1346 | 1435 | EXPORT_SYMBOL(rockchip_iommu_is_enabled); |
---|
1347 | 1436 | |
---|
| 1437 | +int rockchip_iommu_force_reset(struct device *dev) |
---|
| 1438 | +{ |
---|
| 1439 | + struct rk_iommu *iommu; |
---|
| 1440 | + int ret; |
---|
| 1441 | + |
---|
| 1442 | + iommu = rk_iommu_from_dev(dev); |
---|
| 1443 | + if (!iommu) |
---|
| 1444 | + return -ENODEV; |
---|
| 1445 | + |
---|
| 1446 | + ret = rk_iommu_enable_stall(iommu); |
---|
| 1447 | + if (ret) |
---|
| 1448 | + return ret; |
---|
| 1449 | + |
---|
| 1450 | + ret = rk_iommu_force_reset(iommu); |
---|
| 1451 | + |
---|
| 1452 | + rk_iommu_disable_stall(iommu); |
---|
| 1453 | + |
---|
| 1454 | + return ret; |
---|
| 1455 | + |
---|
| 1456 | +} |
---|
| 1457 | +EXPORT_SYMBOL(rockchip_iommu_force_reset); |
---|
| 1458 | + |
---|
1348 | 1459 | static void rk_iommu_detach_device(struct iommu_domain *domain, |
---|
1349 | 1460 | struct device *dev) |
---|
1350 | 1461 | { |
---|
.. | .. |
---|
1413 | 1524 | list_add_tail(&iommu->node, &rk_domain->iommus); |
---|
1414 | 1525 | spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); |
---|
1415 | 1526 | |
---|
| 1527 | + rk_domain->shootdown_entire = iommu->shootdown_entire; |
---|
1416 | 1528 | ret = pm_runtime_get_if_in_use(iommu->dev); |
---|
1417 | 1529 | if (!ret || WARN_ON_ONCE(ret < 0)) |
---|
1418 | 1530 | return 0; |
---|
.. | .. |
---|
1764 | 1876 | "rockchip,skip-mmu-read"); |
---|
1765 | 1877 | iommu->dlr_disable = device_property_read_bool(dev, |
---|
1766 | 1878 | "rockchip,disable-device-link-resume"); |
---|
1767 | | - |
---|
| 1879 | + iommu->shootdown_entire = device_property_read_bool(dev, |
---|
| 1880 | + "rockchip,shootdown-entire"); |
---|
| 1881 | + iommu->master_handle_irq = device_property_read_bool(dev, |
---|
| 1882 | + "rockchip,master-handle-irq"); |
---|
1768 | 1883 | if (of_machine_is_compatible("rockchip,rv1126") || |
---|
1769 | 1884 | of_machine_is_compatible("rockchip,rv1109")) |
---|
1770 | 1885 | iommu->cmd_retry = device_property_read_bool(dev, |
---|
1771 | 1886 | "rockchip,enable-cmd-retry"); |
---|
| 1887 | + iommu->need_res_map = device_property_read_bool(dev, |
---|
| 1888 | + "rockchip,reserve-map"); |
---|
1772 | 1889 | |
---|
1773 | 1890 | /* |
---|
1774 | 1891 | * iommu clocks should be present for all new devices and devicetrees |
---|
.. | .. |
---|
1839 | 1956 | } |
---|
1840 | 1957 | |
---|
1841 | 1958 | skip_request_irq: |
---|
| 1959 | + if (!res_page && iommu->need_res_map) { |
---|
| 1960 | + res_page = __pa_symbol(reserve_range); |
---|
| 1961 | + pr_info("%s,%d, res_page = 0x%pa\n", __func__, __LINE__, &res_page); |
---|
| 1962 | + } |
---|
1842 | 1963 | return 0; |
---|
1843 | 1964 | err_remove_sysfs: |
---|
1844 | 1965 | iommu_device_sysfs_remove(&iommu->iommu); |
---|
.. | .. |
---|
1854 | 1975 | struct rk_iommu *iommu = platform_get_drvdata(pdev); |
---|
1855 | 1976 | int i = 0, irq; |
---|
1856 | 1977 | |
---|
| 1978 | + if (iommu->skip_read) |
---|
| 1979 | + goto skip_free_irq; |
---|
| 1980 | + |
---|
1857 | 1981 | while ((irq = platform_get_irq(pdev, i++)) != -ENXIO) |
---|
1858 | 1982 | devm_free_irq(iommu->dev, irq, iommu); |
---|
1859 | 1983 | |
---|
| 1984 | +skip_free_irq: |
---|
1860 | 1985 | pm_runtime_force_suspend(&pdev->dev); |
---|
1861 | 1986 | } |
---|
1862 | 1987 | |
---|