hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/include/linux/highmem.h
....@@ -11,119 +11,6 @@
1111
1212 #include <asm/cacheflush.h>
1313
14
-#include "highmem-internal.h"
15
-
16
-/**
17
- * kmap - Map a page for long term usage
18
- * @page: Pointer to the page to be mapped
19
- *
20
- * Returns: The virtual address of the mapping
21
- *
22
- * Can only be invoked from preemptible task context because on 32bit
23
- * systems with CONFIG_HIGHMEM enabled this function might sleep.
24
- *
25
- * For systems with CONFIG_HIGHMEM=n and for pages in the low memory area
26
- * this returns the virtual address of the direct kernel mapping.
27
- *
28
- * The returned virtual address is globally visible and valid up to the
29
- * point where it is unmapped via kunmap(). The pointer can be handed to
30
- * other contexts.
31
- *
32
- * For highmem pages on 32bit systems this can be slow as the mapping space
33
- * is limited and protected by a global lock. In case that there is no
34
- * mapping slot available the function blocks until a slot is released via
35
- * kunmap().
36
- */
37
-static inline void *kmap(struct page *page);
38
-
39
-/**
40
- * kunmap - Unmap the virtual address mapped by kmap()
41
- * @addr: Virtual address to be unmapped
42
- *
43
- * Counterpart to kmap(). A NOOP for CONFIG_HIGHMEM=n and for mappings of
44
- * pages in the low memory area.
45
- */
46
-static inline void kunmap(struct page *page);
47
-
48
-/**
49
- * kmap_to_page - Get the page for a kmap'ed address
50
- * @addr: The address to look up
51
- *
52
- * Returns: The page which is mapped to @addr.
53
- */
54
-static inline struct page *kmap_to_page(void *addr);
55
-
56
-/**
57
- * kmap_flush_unused - Flush all unused kmap mappings in order to
58
- * remove stray mappings
59
- */
60
-static inline void kmap_flush_unused(void);
61
-
62
-/**
63
- * kmap_local_page - Map a page for temporary usage
64
- * @page: Pointer to the page to be mapped
65
- *
66
- * Returns: The virtual address of the mapping
67
- *
68
- * Can be invoked from any context.
69
- *
70
- * Requires careful handling when nesting multiple mappings because the map
71
- * management is stack based. The unmap has to be in the reverse order of
72
- * the map operation:
73
- *
74
- * addr1 = kmap_local_page(page1);
75
- * addr2 = kmap_local_page(page2);
76
- * ...
77
- * kunmap_local(addr2);
78
- * kunmap_local(addr1);
79
- *
80
- * Unmapping addr1 before addr2 is invalid and causes malfunction.
81
- *
82
- * Contrary to kmap() mappings the mapping is only valid in the context of
83
- * the caller and cannot be handed to other contexts.
84
- *
85
- * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the
86
- * virtual address of the direct mapping. Only real highmem pages are
87
- * temporarily mapped.
88
- *
89
- * While it is significantly faster than kmap() for the higmem case it
90
- * comes with restrictions about the pointer validity. Only use when really
91
- * necessary.
92
- *
93
- * On HIGHMEM enabled systems mapping a highmem page has the side effect of
94
- * disabling migration in order to keep the virtual address stable across
95
- * preemption. No caller of kmap_local_page() can rely on this side effect.
96
- */
97
-static inline void *kmap_local_page(struct page *page);
98
-
99
-/**
100
- * kmap_atomic - Atomically map a page for temporary usage - Deprecated!
101
- * @page: Pointer to the page to be mapped
102
- *
103
- * Returns: The virtual address of the mapping
104
- *
105
- * Effectively a wrapper around kmap_local_page() which disables pagefaults
106
- * and preemption.
107
- *
108
- * Do not use in new code. Use kmap_local_page() instead.
109
- */
110
-static inline void *kmap_atomic(struct page *page);
111
-
112
-/**
113
- * kunmap_atomic - Unmap the virtual address mapped by kmap_atomic()
114
- * @addr: Virtual address to be unmapped
115
- *
116
- * Counterpart to kmap_atomic().
117
- *
118
- * Effectively a wrapper around kunmap_local() which additionally undoes
119
- * the side effects of kmap_atomic(), i.e. reenabling pagefaults and
120
- * preemption.
121
- */
122
-
123
-/* Highmem related interfaces for management code */
124
-static inline unsigned int nr_free_highpages(void);
125
-static inline unsigned long totalhigh_pages(void);
126
-
12714 #ifndef ARCH_HAS_FLUSH_ANON_PAGE
12815 static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
12916 {
....@@ -142,6 +29,199 @@
14229 }
14330 #endif
14431
32
+#include <asm/kmap_types.h>
33
+
34
+#ifdef CONFIG_HIGHMEM
35
+extern void *kmap_atomic_high_prot(struct page *page, pgprot_t prot);
36
+extern void kunmap_atomic_high(void *kvaddr);
37
+#include <asm/highmem.h>
38
+
39
+#ifndef ARCH_HAS_KMAP_FLUSH_TLB
40
+static inline void kmap_flush_tlb(unsigned long addr) { }
41
+#endif
42
+
43
+#ifndef kmap_prot
44
+#define kmap_prot PAGE_KERNEL
45
+#endif
46
+
47
+void *kmap_high(struct page *page);
48
+static inline void *kmap(struct page *page)
49
+{
50
+ void *addr;
51
+
52
+ might_sleep();
53
+ if (!PageHighMem(page))
54
+ addr = page_address(page);
55
+ else
56
+ addr = kmap_high(page);
57
+ kmap_flush_tlb((unsigned long)addr);
58
+ return addr;
59
+}
60
+
61
+void kunmap_high(struct page *page);
62
+
63
+static inline void kunmap(struct page *page)
64
+{
65
+ might_sleep();
66
+ if (!PageHighMem(page))
67
+ return;
68
+ kunmap_high(page);
69
+}
70
+
71
+/*
72
+ * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
73
+ * no global lock is needed and because the kmap code must perform a global TLB
74
+ * invalidation when the kmap pool wraps.
75
+ *
76
+ * However when holding an atomic kmap it is not legal to sleep, so atomic
77
+ * kmaps are appropriate for short, tight code paths only.
78
+ *
79
+ * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
80
+ * gives a more generic (and caching) interface. But kmap_atomic can
81
+ * be used in IRQ contexts, so in some (very limited) cases we need
82
+ * it.
83
+ */
84
+static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
85
+{
86
+ preempt_disable();
87
+ pagefault_disable();
88
+ if (!PageHighMem(page))
89
+ return page_address(page);
90
+ return kmap_atomic_high_prot(page, prot);
91
+}
92
+#define kmap_atomic(page) kmap_atomic_prot(page, kmap_prot)
93
+
94
+/* declarations for linux/mm/highmem.c */
95
+unsigned int nr_free_highpages(void);
96
+extern atomic_long_t _totalhigh_pages;
97
+static inline unsigned long totalhigh_pages(void)
98
+{
99
+ return (unsigned long)atomic_long_read(&_totalhigh_pages);
100
+}
101
+
102
+static inline void totalhigh_pages_inc(void)
103
+{
104
+ atomic_long_inc(&_totalhigh_pages);
105
+}
106
+
107
+static inline void totalhigh_pages_dec(void)
108
+{
109
+ atomic_long_dec(&_totalhigh_pages);
110
+}
111
+
112
+static inline void totalhigh_pages_add(long count)
113
+{
114
+ atomic_long_add(count, &_totalhigh_pages);
115
+}
116
+
117
+static inline void totalhigh_pages_set(long val)
118
+{
119
+ atomic_long_set(&_totalhigh_pages, val);
120
+}
121
+
122
+void kmap_flush_unused(void);
123
+
124
+struct page *kmap_to_page(void *addr);
125
+
126
+#else /* CONFIG_HIGHMEM */
127
+
128
+static inline unsigned int nr_free_highpages(void) { return 0; }
129
+
130
+static inline struct page *kmap_to_page(void *addr)
131
+{
132
+ return virt_to_page(addr);
133
+}
134
+
135
+static inline unsigned long totalhigh_pages(void) { return 0UL; }
136
+
137
+static inline void *kmap(struct page *page)
138
+{
139
+ might_sleep();
140
+ return page_address(page);
141
+}
142
+
143
+static inline void kunmap_high(struct page *page)
144
+{
145
+}
146
+
147
+static inline void kunmap(struct page *page)
148
+{
149
+#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
150
+ kunmap_flush_on_unmap(page_address(page));
151
+#endif
152
+}
153
+
154
+static inline void *kmap_atomic(struct page *page)
155
+{
156
+ preempt_disable();
157
+ pagefault_disable();
158
+ return page_address(page);
159
+}
160
+#define kmap_atomic_prot(page, prot) kmap_atomic(page)
161
+
162
+static inline void kunmap_atomic_high(void *addr)
163
+{
164
+ /*
165
+ * Mostly nothing to do in the CONFIG_HIGHMEM=n case as kunmap_atomic()
166
+ * handles re-enabling faults + preemption
167
+ */
168
+#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
169
+ kunmap_flush_on_unmap(addr);
170
+#endif
171
+}
172
+
173
+#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
174
+
175
+#define kmap_flush_unused() do {} while(0)
176
+
177
+#endif /* CONFIG_HIGHMEM */
178
+
179
+#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
180
+
181
+DECLARE_PER_CPU(int, __kmap_atomic_idx);
182
+
183
+static inline int kmap_atomic_idx_push(void)
184
+{
185
+ int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1;
186
+
187
+#ifdef CONFIG_DEBUG_HIGHMEM
188
+ WARN_ON_ONCE(in_irq() && !irqs_disabled());
189
+ BUG_ON(idx >= KM_TYPE_NR);
190
+#endif
191
+ return idx;
192
+}
193
+
194
+static inline int kmap_atomic_idx(void)
195
+{
196
+ return __this_cpu_read(__kmap_atomic_idx) - 1;
197
+}
198
+
199
+static inline void kmap_atomic_idx_pop(void)
200
+{
201
+#ifdef CONFIG_DEBUG_HIGHMEM
202
+ int idx = __this_cpu_dec_return(__kmap_atomic_idx);
203
+
204
+ BUG_ON(idx < 0);
205
+#else
206
+ __this_cpu_dec(__kmap_atomic_idx);
207
+#endif
208
+}
209
+
210
+#endif
211
+
212
+/*
213
+ * Prevent people trying to call kunmap_atomic() as if it were kunmap()
214
+ * kunmap_atomic() should get the return value of kmap_atomic, not the page.
215
+ */
216
+#define kunmap_atomic(addr) \
217
+do { \
218
+ BUILD_BUG_ON(__same_type((addr), struct page *)); \
219
+ kunmap_atomic_high(addr); \
220
+ pagefault_enable(); \
221
+ preempt_enable(); \
222
+} while (0)
223
+
224
+
145225 /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
146226 #ifndef clear_user_highpage
147227 static inline void clear_user_highpage(struct page *page, unsigned long vaddr)