forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-09 95099d4622f8cb224d94e314c7a8e0df60b13f87
kernel/arch/x86/mm/iomap_32.c
....@@ -1,23 +1,10 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /*
23 * Copyright © 2008 Ingo Molnar
3
- *
4
- * This program is free software; you can redistribute it and/or modify
5
- * it under the terms of the GNU General Public License as published by
6
- * the Free Software Foundation; either version 2 of the License, or
7
- * (at your option) any later version.
8
- *
9
- * This program is distributed in the hope that it will be useful, but
10
- * WITHOUT ANY WARRANTY; without even the implied warranty of
11
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12
- * General Public License for more details.
13
- *
14
- * You should have received a copy of the GNU General Public License along
15
- * with this program; if not, write to the Free Software Foundation, Inc.,
16
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
174 */
185
196 #include <asm/iomap.h>
20
-#include <asm/pat.h>
7
+#include <asm/memtype.h>
218 #include <linux/export.h>
229 #include <linux/highmem.h>
2310
....@@ -39,7 +26,7 @@
3926 if (!is_io_mapping_possible(base, size))
4027 return -EINVAL;
4128
42
- ret = io_reserve_memtype(base, base + size, &pcm);
29
+ ret = memtype_reserve_io(base, base + size, &pcm);
4330 if (ret)
4431 return ret;
4532
....@@ -53,32 +40,11 @@
5340
5441 void iomap_free(resource_size_t base, unsigned long size)
5542 {
56
- io_free_memtype(base, base + size);
43
+ memtype_free_io(base, base + size);
5744 }
5845 EXPORT_SYMBOL_GPL(iomap_free);
5946
60
-void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
61
-{
62
- unsigned long vaddr;
63
- int idx, type;
64
-
65
- preempt_disable();
66
- pagefault_disable();
67
-
68
- type = kmap_atomic_idx_push();
69
- idx = type + KM_TYPE_NR * smp_processor_id();
70
- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
71
- set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
72
- arch_flush_lazy_mmu_mode();
73
-
74
- return (void *)vaddr;
75
-}
76
-
77
-/*
78
- * Map 'pfn' using protections 'prot'
79
- */
80
-void __iomem *
81
-iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
47
+void __iomem *__iomap_local_pfn_prot(unsigned long pfn, pgprot_t prot)
8248 {
8349 /*
8450 * For non-PAT systems, translate non-WB request to UC- just in
....@@ -94,36 +60,6 @@
9460 /* Filter out unsupported __PAGE_KERNEL* bits: */
9561 pgprot_val(prot) &= __default_kernel_pte_mask;
9662
97
- return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, prot);
63
+ return (void __force __iomem *)__kmap_local_pfn_prot(pfn, prot);
9864 }
99
-EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn);
100
-
101
-void
102
-iounmap_atomic(void __iomem *kvaddr)
103
-{
104
- unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
105
-
106
- if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
107
- vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
108
- int idx, type;
109
-
110
- type = kmap_atomic_idx();
111
- idx = type + KM_TYPE_NR * smp_processor_id();
112
-
113
-#ifdef CONFIG_DEBUG_HIGHMEM
114
- WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
115
-#endif
116
- /*
117
- * Force other mappings to Oops if they'll try to access this
118
- * pte without first remap it. Keeping stale mappings around
119
- * is a bad idea also, in case the page changes cacheability
120
- * attributes or becomes a protected page in a hypervisor.
121
- */
122
- kpte_clear_flush(kmap_pte-idx, vaddr);
123
- kmap_atomic_idx_pop();
124
- }
125
-
126
- pagefault_enable();
127
- preempt_enable();
128
-}
129
-EXPORT_SYMBOL_GPL(iounmap_atomic);
65
+EXPORT_SYMBOL_GPL(__iomap_local_pfn_prot);