forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-09 95099d4622f8cb224d94e314c7a8e0df60b13f87
kernel/arch/x86/mm/iomap_32.c
....@@ -1,23 +1,10 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /*
23 * Copyright © 2008 Ingo Molnar
3
- *
4
- * This program is free software; you can redistribute it and/or modify
5
- * it under the terms of the GNU General Public License as published by
6
- * the Free Software Foundation; either version 2 of the License, or
7
- * (at your option) any later version.
8
- *
9
- * This program is distributed in the hope that it will be useful, but
10
- * WITHOUT ANY WARRANTY; without even the implied warranty of
11
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12
- * General Public License for more details.
13
- *
14
- * You should have received a copy of the GNU General Public License along
15
- * with this program; if not, write to the Free Software Foundation, Inc.,
16
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
174 */
185
196 #include <asm/iomap.h>
20
-#include <asm/pat.h>
7
+#include <asm/memtype.h>
218 #include <linux/export.h>
229 #include <linux/highmem.h>
2310
....@@ -39,7 +26,7 @@
3926 if (!is_io_mapping_possible(base, size))
4027 return -EINVAL;
4128
42
- ret = io_reserve_memtype(base, base + size, &pcm);
29
+ ret = memtype_reserve_io(base, base + size, &pcm);
4330 if (ret)
4431 return ret;
4532
....@@ -53,38 +40,11 @@
5340
5441 void iomap_free(resource_size_t base, unsigned long size)
5542 {
56
- io_free_memtype(base, base + size);
43
+ memtype_free_io(base, base + size);
5744 }
5845 EXPORT_SYMBOL_GPL(iomap_free);
5946
60
-void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
61
-{
62
- pte_t pte = pfn_pte(pfn, prot);
63
- unsigned long vaddr;
64
- int idx, type;
65
-
66
- preempt_disable();
67
- pagefault_disable();
68
-
69
- type = kmap_atomic_idx_push();
70
- idx = type + KM_TYPE_NR * smp_processor_id();
71
- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
72
- WARN_ON(!pte_none(*(kmap_pte - idx)));
73
-
74
-#ifdef CONFIG_PREEMPT_RT_FULL
75
- current->kmap_pte[type] = pte;
76
-#endif
77
- set_pte(kmap_pte - idx, pte);
78
- arch_flush_lazy_mmu_mode();
79
-
80
- return (void *)vaddr;
81
-}
82
-
83
-/*
84
- * Map 'pfn' using protections 'prot'
85
- */
86
-void __iomem *
87
-iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
47
+void __iomem *__iomap_local_pfn_prot(unsigned long pfn, pgprot_t prot)
8848 {
8949 /*
9050 * For non-PAT systems, translate non-WB request to UC- just in
....@@ -100,39 +60,6 @@
10060 /* Filter out unsupported __PAGE_KERNEL* bits: */
10161 pgprot_val(prot) &= __default_kernel_pte_mask;
10262
103
- return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, prot);
63
+ return (void __force __iomem *)__kmap_local_pfn_prot(pfn, prot);
10464 }
105
-EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn);
106
-
107
-void
108
-iounmap_atomic(void __iomem *kvaddr)
109
-{
110
- unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
111
-
112
- if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
113
- vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
114
- int idx, type;
115
-
116
- type = kmap_atomic_idx();
117
- idx = type + KM_TYPE_NR * smp_processor_id();
118
-
119
-#ifdef CONFIG_DEBUG_HIGHMEM
120
- WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
121
-#endif
122
- /*
123
- * Force other mappings to Oops if they'll try to access this
124
- * pte without first remap it. Keeping stale mappings around
125
- * is a bad idea also, in case the page changes cacheability
126
- * attributes or becomes a protected page in a hypervisor.
127
- */
128
-#ifdef CONFIG_PREEMPT_RT_FULL
129
- current->kmap_pte[type] = __pte(0);
130
-#endif
131
- kpte_clear_flush(kmap_pte-idx, vaddr);
132
- kmap_atomic_idx_pop();
133
- }
134
-
135
- pagefault_enable();
136
- preempt_enable();
137
-}
138
-EXPORT_SYMBOL_GPL(iounmap_atomic);
65
+EXPORT_SYMBOL_GPL(__iomap_local_pfn_prot);