hc
2023-12-08 01573e231f18eb2d99162747186f59511f56b64d
kernel/include/linux/highmem.h
....@@ -8,9 +8,121 @@
88 #include <linux/mm.h>
99 #include <linux/uaccess.h>
1010 #include <linux/hardirq.h>
11
-#include <linux/sched.h>
1211
1312 #include <asm/cacheflush.h>
13
+
14
+#include "highmem-internal.h"
15
+
16
+/**
17
+ * kmap - Map a page for long term usage
18
+ * @page: Pointer to the page to be mapped
19
+ *
20
+ * Returns: The virtual address of the mapping
21
+ *
22
+ * Can only be invoked from preemptible task context because on 32bit
23
+ * systems with CONFIG_HIGHMEM enabled this function might sleep.
24
+ *
25
+ * For systems with CONFIG_HIGHMEM=n and for pages in the low memory area
26
+ * this returns the virtual address of the direct kernel mapping.
27
+ *
28
+ * The returned virtual address is globally visible and valid up to the
29
+ * point where it is unmapped via kunmap(). The pointer can be handed to
30
+ * other contexts.
31
+ *
32
+ * For highmem pages on 32bit systems this can be slow as the mapping space
33
+ * is limited and protected by a global lock. In case that there is no
34
+ * mapping slot available the function blocks until a slot is released via
35
+ * kunmap().
36
+ */
37
+static inline void *kmap(struct page *page);
38
+
39
+/**
40
+ * kunmap - Unmap the virtual address mapped by kmap()
41
+ * @addr: Virtual address to be unmapped
42
+ *
43
+ * Counterpart to kmap(). A NOOP for CONFIG_HIGHMEM=n and for mappings of
44
+ * pages in the low memory area.
45
+ */
46
+static inline void kunmap(struct page *page);
47
+
48
+/**
49
+ * kmap_to_page - Get the page for a kmap'ed address
50
+ * @addr: The address to look up
51
+ *
52
+ * Returns: The page which is mapped to @addr.
53
+ */
54
+static inline struct page *kmap_to_page(void *addr);
55
+
56
+/**
57
+ * kmap_flush_unused - Flush all unused kmap mappings in order to
58
+ * remove stray mappings
59
+ */
60
+static inline void kmap_flush_unused(void);
61
+
62
+/**
63
+ * kmap_local_page - Map a page for temporary usage
64
+ * @page: Pointer to the page to be mapped
65
+ *
66
+ * Returns: The virtual address of the mapping
67
+ *
68
+ * Can be invoked from any context.
69
+ *
70
+ * Requires careful handling when nesting multiple mappings because the map
71
+ * management is stack based. The unmap has to be in the reverse order of
72
+ * the map operation:
73
+ *
74
+ * addr1 = kmap_local_page(page1);
75
+ * addr2 = kmap_local_page(page2);
76
+ * ...
77
+ * kunmap_local(addr2);
78
+ * kunmap_local(addr1);
79
+ *
80
+ * Unmapping addr1 before addr2 is invalid and causes malfunction.
81
+ *
82
+ * Contrary to kmap() mappings the mapping is only valid in the context of
83
+ * the caller and cannot be handed to other contexts.
84
+ *
85
+ * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the
86
+ * virtual address of the direct mapping. Only real highmem pages are
87
+ * temporarily mapped.
88
+ *
89
+ * While it is significantly faster than kmap() for the higmem case it
90
+ * comes with restrictions about the pointer validity. Only use when really
91
+ * necessary.
92
+ *
93
+ * On HIGHMEM enabled systems mapping a highmem page has the side effect of
94
+ * disabling migration in order to keep the virtual address stable across
95
+ * preemption. No caller of kmap_local_page() can rely on this side effect.
96
+ */
97
+static inline void *kmap_local_page(struct page *page);
98
+
99
+/**
100
+ * kmap_atomic - Atomically map a page for temporary usage - Deprecated!
101
+ * @page: Pointer to the page to be mapped
102
+ *
103
+ * Returns: The virtual address of the mapping
104
+ *
105
+ * Effectively a wrapper around kmap_local_page() which disables pagefaults
106
+ * and preemption.
107
+ *
108
+ * Do not use in new code. Use kmap_local_page() instead.
109
+ */
110
+static inline void *kmap_atomic(struct page *page);
111
+
112
+/**
113
+ * kunmap_atomic - Unmap the virtual address mapped by kmap_atomic()
114
+ * @addr: Virtual address to be unmapped
115
+ *
116
+ * Counterpart to kmap_atomic().
117
+ *
118
+ * Effectively a wrapper around kunmap_local() which additionally undoes
119
+ * the side effects of kmap_atomic(), i.e. reenabling pagefaults and
120
+ * preemption.
121
+ */
122
+
123
+/* Highmem related interfaces for management code */
124
+static inline unsigned int nr_free_highpages(void);
125
+static inline unsigned long totalhigh_pages(void);
14126
15127 #ifndef ARCH_HAS_FLUSH_ANON_PAGE
16128 static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
....@@ -30,125 +142,6 @@
30142 }
31143 #endif
32144
33
-#include <asm/kmap_types.h>
34
-
35
-#ifdef CONFIG_HIGHMEM
36
-#include <asm/highmem.h>
37
-
38
-/* declarations for linux/mm/highmem.c */
39
-unsigned int nr_free_highpages(void);
40
-extern unsigned long totalhigh_pages;
41
-
42
-void kmap_flush_unused(void);
43
-
44
-struct page *kmap_to_page(void *addr);
45
-
46
-#else /* CONFIG_HIGHMEM */
47
-
48
-static inline unsigned int nr_free_highpages(void) { return 0; }
49
-
50
-static inline struct page *kmap_to_page(void *addr)
51
-{
52
- return virt_to_page(addr);
53
-}
54
-
55
-#define totalhigh_pages 0UL
56
-
57
-#ifndef ARCH_HAS_KMAP
58
-static inline void *kmap(struct page *page)
59
-{
60
- might_sleep();
61
- return page_address(page);
62
-}
63
-
64
-static inline void kunmap(struct page *page)
65
-{
66
-}
67
-
68
-static inline void *kmap_atomic(struct page *page)
69
-{
70
- preempt_disable_nort();
71
- pagefault_disable();
72
- return page_address(page);
73
-}
74
-#define kmap_atomic_prot(page, prot) kmap_atomic(page)
75
-
76
-static inline void __kunmap_atomic(void *addr)
77
-{
78
- pagefault_enable();
79
- preempt_enable_nort();
80
-}
81
-
82
-#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
83
-
84
-#define kmap_flush_unused() do {} while(0)
85
-#endif
86
-
87
-#endif /* CONFIG_HIGHMEM */
88
-
89
-#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
90
-
91
-#ifndef CONFIG_PREEMPT_RT_FULL
92
-DECLARE_PER_CPU(int, __kmap_atomic_idx);
93
-#endif
94
-
95
-static inline int kmap_atomic_idx_push(void)
96
-{
97
-#ifndef CONFIG_PREEMPT_RT_FULL
98
- int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1;
99
-
100
-# ifdef CONFIG_DEBUG_HIGHMEM
101
- WARN_ON_ONCE(in_irq() && !irqs_disabled());
102
- BUG_ON(idx >= KM_TYPE_NR);
103
-# endif
104
- return idx;
105
-#else
106
- current->kmap_idx++;
107
- BUG_ON(current->kmap_idx > KM_TYPE_NR);
108
- return current->kmap_idx - 1;
109
-#endif
110
-}
111
-
112
-static inline int kmap_atomic_idx(void)
113
-{
114
-#ifndef CONFIG_PREEMPT_RT_FULL
115
- return __this_cpu_read(__kmap_atomic_idx) - 1;
116
-#else
117
- return current->kmap_idx - 1;
118
-#endif
119
-}
120
-
121
-static inline void kmap_atomic_idx_pop(void)
122
-{
123
-#ifndef CONFIG_PREEMPT_RT_FULL
124
-# ifdef CONFIG_DEBUG_HIGHMEM
125
- int idx = __this_cpu_dec_return(__kmap_atomic_idx);
126
-
127
- BUG_ON(idx < 0);
128
-# else
129
- __this_cpu_dec(__kmap_atomic_idx);
130
-# endif
131
-#else
132
- current->kmap_idx--;
133
-# ifdef CONFIG_DEBUG_HIGHMEM
134
- BUG_ON(current->kmap_idx < 0);
135
-# endif
136
-#endif
137
-}
138
-
139
-#endif
140
-
141
-/*
142
- * Prevent people trying to call kunmap_atomic() as if it were kunmap()
143
- * kunmap_atomic() should get the return value of kmap_atomic, not the page.
144
- */
145
-#define kunmap_atomic(addr) \
146
-do { \
147
- BUILD_BUG_ON(__same_type((addr), struct page *)); \
148
- __kunmap_atomic(addr); \
149
-} while (0)
150
-
151
-
152145 /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
153146 #ifndef clear_user_highpage
154147 static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
....@@ -159,28 +152,24 @@
159152 }
160153 #endif
161154
162
-#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
155
+#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
163156 /**
164
- * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags
165
- * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE
157
+ * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
166158 * @vma: The VMA the page is to be allocated for
167159 * @vaddr: The virtual address the page will be inserted into
168160 *
169
- * This function will allocate a page for a VMA but the caller is expected
170
- * to specify via movableflags whether the page will be movable in the
171
- * future or not
161
+ * This function will allocate a page for a VMA that the caller knows will
162
+ * be able to migrate in the future using move_pages() or reclaimed
172163 *
173164 * An architecture may override this function by defining
174
- * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own
165
+ * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE and providing their own
175166 * implementation.
176167 */
177168 static inline struct page *
178
-__alloc_zeroed_user_highpage(gfp_t movableflags,
179
- struct vm_area_struct *vma,
180
- unsigned long vaddr)
169
+alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
170
+ unsigned long vaddr)
181171 {
182
- struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags,
183
- vma, vaddr);
172
+ struct page *page = alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_CMA, vma, vaddr);
184173
185174 if (page)
186175 clear_user_highpage(page, vaddr);
....@@ -189,26 +178,6 @@
189178 }
190179 #endif
191180
192
-/**
193
- * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
194
- * @vma: The VMA the page is to be allocated for
195
- * @vaddr: The virtual address the page will be inserted into
196
- *
197
- * This function will allocate a page for a VMA that the caller knows will
198
- * be able to migrate in the future using move_pages() or reclaimed
199
- */
200
-static inline struct page *
201
-alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
202
- unsigned long vaddr)
203
-{
204
-#ifndef CONFIG_CMA
205
- return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
206
-#else
207
- return __alloc_zeroed_user_highpage(__GFP_MOVABLE|__GFP_CMA, vma,
208
- vaddr);
209
-#endif
210
-}
211
-
212181 static inline void clear_highpage(struct page *page)
213182 {
214183 void *kaddr = kmap_atomic(page);
....@@ -216,6 +185,14 @@
216185 kunmap_atomic(kaddr);
217186 }
218187
188
+#ifndef __HAVE_ARCH_TAG_CLEAR_HIGHPAGE
189
+
190
+static inline void tag_clear_highpage(struct page *page)
191
+{
192
+}
193
+
194
+#endif
195
+
219196 static inline void zero_user_segments(struct page *page,
220197 unsigned start1, unsigned end1,
221198 unsigned start2, unsigned end2)