.. | .. |
---|
1080 | 1080 | struct i915_ggtt *ggtt = cache_to_ggtt(cache); |
---|
1081 | 1081 | |
---|
1082 | 1082 | intel_gt_flush_ggtt_writes(ggtt->vm.gt); |
---|
1083 | | - io_mapping_unmap_local((void __iomem *)vaddr); |
---|
| 1083 | + io_mapping_unmap_atomic((void __iomem *)vaddr); |
---|
1084 | 1084 | |
---|
1085 | 1085 | if (drm_mm_node_allocated(&cache->node)) { |
---|
1086 | 1086 | ggtt->vm.clear_range(&ggtt->vm, |
---|
.. | .. |
---|
1146 | 1146 | |
---|
1147 | 1147 | if (cache->vaddr) { |
---|
1148 | 1148 | intel_gt_flush_ggtt_writes(ggtt->vm.gt); |
---|
1149 | | - io_mapping_unmap_local((void __force __iomem *) unmask_page(cache->vaddr)); |
---|
| 1149 | + io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr)); |
---|
1150 | 1150 | } else { |
---|
1151 | 1151 | struct i915_vma *vma; |
---|
1152 | 1152 | int err; |
---|
.. | .. |
---|
1194 | 1194 | offset += page << PAGE_SHIFT; |
---|
1195 | 1195 | } |
---|
1196 | 1196 | |
---|
1197 | | - vaddr = (void __force *)io_mapping_map_local_wc(&ggtt->iomap, offset); |
---|
| 1197 | + vaddr = (void __force *)io_mapping_map_atomic_wc(&ggtt->iomap, |
---|
| 1198 | + offset); |
---|
1198 | 1199 | cache->page = page; |
---|
1199 | 1200 | cache->vaddr = (unsigned long)vaddr; |
---|
1200 | 1201 | |
---|