hc
2024-09-20 a36159eec6ca17402b0e146b86efaf76568dc353
kernel/arch/arm/mm/highmem.c
....@@ -0,0 +1,121 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
2
+/*
3
+ * arch/arm/mm/highmem.c -- ARM highmem support
4
+ *
5
+ * Author: Nicolas Pitre
6
+ * Created: september 8, 2008
7
+ * Copyright: Marvell Semiconductors Inc.
8
+ */
9
+
10
+#include <linux/module.h>
11
+#include <linux/highmem.h>
12
+#include <linux/interrupt.h>
13
+#include <asm/fixmap.h>
14
+#include <asm/cacheflush.h>
15
+#include <asm/tlbflush.h>
16
+#include "mm.h"
17
+
18
+static inline void set_fixmap_pte(int idx, pte_t pte)
19
+{
20
+ unsigned long vaddr = __fix_to_virt(idx);
21
+ pte_t *ptep = virt_to_kpte(vaddr);
22
+
23
+ set_pte_ext(ptep, pte, 0);
24
+ local_flush_tlb_kernel_page(vaddr);
25
+}
26
+
27
+static inline pte_t get_fixmap_pte(unsigned long vaddr)
28
+{
29
+ pte_t *ptep = virt_to_kpte(vaddr);
30
+
31
+ return *ptep;
32
+}
33
+
34
+void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
35
+{
36
+ unsigned int idx;
37
+ unsigned long vaddr;
38
+ void *kmap;
39
+ int type;
40
+
41
+#ifdef CONFIG_DEBUG_HIGHMEM
42
+ /*
43
+ * There is no cache coherency issue when non VIVT, so force the
44
+ * dedicated kmap usage for better debugging purposes in that case.
45
+ */
46
+ if (!cache_is_vivt())
47
+ kmap = NULL;
48
+ else
49
+#endif
50
+ kmap = kmap_high_get(page);
51
+ if (kmap)
52
+ return kmap;
53
+
54
+ type = kmap_atomic_idx_push();
55
+
56
+ idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
57
+ vaddr = __fix_to_virt(idx);
58
+#ifdef CONFIG_DEBUG_HIGHMEM
59
+ /*
60
+ * With debugging enabled, kunmap_atomic forces that entry to 0.
61
+ * Make sure it was indeed properly unmapped.
62
+ */
63
+ BUG_ON(!pte_none(get_fixmap_pte(vaddr)));
64
+#endif
65
+ /*
66
+ * When debugging is off, kunmap_atomic leaves the previous mapping
67
+ * in place, so the contained TLB flush ensures the TLB is updated
68
+ * with the new mapping.
69
+ */
70
+ set_fixmap_pte(idx, mk_pte(page, prot));
71
+
72
+ return (void *)vaddr;
73
+}
74
+EXPORT_SYMBOL(kmap_atomic_high_prot);
75
+
76
+void kunmap_atomic_high(void *kvaddr)
77
+{
78
+ unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
79
+ int idx, type;
80
+
81
+ if (kvaddr >= (void *)FIXADDR_START) {
82
+ type = kmap_atomic_idx();
83
+ idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
84
+
85
+ if (cache_is_vivt())
86
+ __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
87
+#ifdef CONFIG_DEBUG_HIGHMEM
88
+ BUG_ON(vaddr != __fix_to_virt(idx));
89
+ set_fixmap_pte(idx, __pte(0));
90
+#else
91
+ (void) idx; /* to kill a warning */
92
+#endif
93
+ kmap_atomic_idx_pop();
94
+ } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
95
+ /* this address was obtained through kmap_high_get() */
96
+ kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
97
+ }
98
+}
99
+EXPORT_SYMBOL(kunmap_atomic_high);
100
+
101
+void *kmap_atomic_pfn(unsigned long pfn)
102
+{
103
+ unsigned long vaddr;
104
+ int idx, type;
105
+ struct page *page = pfn_to_page(pfn);
106
+
107
+ preempt_disable();
108
+ pagefault_disable();
109
+ if (!PageHighMem(page))
110
+ return page_address(page);
111
+
112
+ type = kmap_atomic_idx_push();
113
+ idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
114
+ vaddr = __fix_to_virt(idx);
115
+#ifdef CONFIG_DEBUG_HIGHMEM
116
+ BUG_ON(!pte_none(get_fixmap_pte(vaddr)));
117
+#endif
118
+ set_fixmap_pte(idx, pfn_pte(pfn, kmap_prot));
119
+
120
+ return (void *)vaddr;
121
+}