hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/arch/x86/mm/iomap_32.c
....@@ -44,7 +44,28 @@
4444 }
4545 EXPORT_SYMBOL_GPL(iomap_free);
4646
47
-void __iomem *__iomap_local_pfn_prot(unsigned long pfn, pgprot_t prot)
47
+void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
48
+{
49
+ unsigned long vaddr;
50
+ int idx, type;
51
+
52
+ preempt_disable();
53
+ pagefault_disable();
54
+
55
+ type = kmap_atomic_idx_push();
56
+ idx = type + KM_TYPE_NR * smp_processor_id();
57
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
58
+ set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
59
+ arch_flush_lazy_mmu_mode();
60
+
61
+ return (void *)vaddr;
62
+}
63
+
64
+/*
65
+ * Map 'pfn' using protections 'prot'
66
+ */
67
+void __iomem *
68
+iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
4869 {
4970 /*
5071 * For non-PAT systems, translate non-WB request to UC- just in
....@@ -60,6 +81,36 @@
6081 /* Filter out unsupported __PAGE_KERNEL* bits: */
6182 pgprot_val(prot) &= __default_kernel_pte_mask;
6283
63
- return (void __force __iomem *)__kmap_local_pfn_prot(pfn, prot);
84
+ return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, prot);
6485 }
65
-EXPORT_SYMBOL_GPL(__iomap_local_pfn_prot);
86
+EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn);
87
+
88
+void
89
+iounmap_atomic(void __iomem *kvaddr)
90
+{
91
+ unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
92
+
93
+ if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
94
+ vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
95
+ int idx, type;
96
+
97
+ type = kmap_atomic_idx();
98
+ idx = type + KM_TYPE_NR * smp_processor_id();
99
+
100
+#ifdef CONFIG_DEBUG_HIGHMEM
101
+ WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
102
+#endif
103
+ /*
104
+ * Force other mappings to Oops if they'll try to access this
105
+ * pte without first remap it. Keeping stale mappings around
106
+ * is a bad idea also, in case the page changes cacheability
107
+ * attributes or becomes a protected page in a hypervisor.
108
+ */
109
+ kpte_clear_flush(kmap_pte-idx, vaddr);
110
+ kmap_atomic_idx_pop();
111
+ }
112
+
113
+ pagefault_enable();
114
+ preempt_enable();
115
+}
116
+EXPORT_SYMBOL_GPL(iounmap_atomic);