hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/include/linux/highmem.h
....@@ -8,7 +8,6 @@
88 #include <linux/mm.h>
99 #include <linux/uaccess.h>
1010 #include <linux/hardirq.h>
11
-#include <linux/sched.h>
1211
1312 #include <asm/cacheflush.h>
1413
....@@ -33,11 +32,92 @@
3332 #include <asm/kmap_types.h>
3433
3534 #ifdef CONFIG_HIGHMEM
35
+extern void *kmap_atomic_high_prot(struct page *page, pgprot_t prot);
36
+extern void kunmap_atomic_high(void *kvaddr);
3637 #include <asm/highmem.h>
38
+
39
+#ifndef ARCH_HAS_KMAP_FLUSH_TLB
40
+static inline void kmap_flush_tlb(unsigned long addr) { }
41
+#endif
42
+
43
+#ifndef kmap_prot
44
+#define kmap_prot PAGE_KERNEL
45
+#endif
46
+
47
+void *kmap_high(struct page *page);
48
+static inline void *kmap(struct page *page)
49
+{
50
+ void *addr;
51
+
52
+ might_sleep();
53
+ if (!PageHighMem(page))
54
+ addr = page_address(page);
55
+ else
56
+ addr = kmap_high(page);
57
+ kmap_flush_tlb((unsigned long)addr);
58
+ return addr;
59
+}
60
+
61
+void kunmap_high(struct page *page);
62
+
63
+static inline void kunmap(struct page *page)
64
+{
65
+ might_sleep();
66
+ if (!PageHighMem(page))
67
+ return;
68
+ kunmap_high(page);
69
+}
70
+
71
+/*
72
+ * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
73
+ * no global lock is needed and because the kmap code must perform a global TLB
74
+ * invalidation when the kmap pool wraps.
75
+ *
76
+ * However when holding an atomic kmap it is not legal to sleep, so atomic
77
+ * kmaps are appropriate for short, tight code paths only.
78
+ *
79
+ * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
80
+ * gives a more generic (and caching) interface. But kmap_atomic can
81
+ * be used in IRQ contexts, so in some (very limited) cases we need
82
+ * it.
83
+ */
84
+static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
85
+{
86
+ preempt_disable();
87
+ pagefault_disable();
88
+ if (!PageHighMem(page))
89
+ return page_address(page);
90
+ return kmap_atomic_high_prot(page, prot);
91
+}
92
+#define kmap_atomic(page) kmap_atomic_prot(page, kmap_prot)
3793
3894 /* declarations for linux/mm/highmem.c */
3995 unsigned int nr_free_highpages(void);
40
-extern unsigned long totalhigh_pages;
96
+extern atomic_long_t _totalhigh_pages;
97
+static inline unsigned long totalhigh_pages(void)
98
+{
99
+ return (unsigned long)atomic_long_read(&_totalhigh_pages);
100
+}
101
+
102
+static inline void totalhigh_pages_inc(void)
103
+{
104
+ atomic_long_inc(&_totalhigh_pages);
105
+}
106
+
107
+static inline void totalhigh_pages_dec(void)
108
+{
109
+ atomic_long_dec(&_totalhigh_pages);
110
+}
111
+
112
+static inline void totalhigh_pages_add(long count)
113
+{
114
+ atomic_long_add(count, &_totalhigh_pages);
115
+}
116
+
117
+static inline void totalhigh_pages_set(long val)
118
+{
119
+ atomic_long_set(&_totalhigh_pages, val);
120
+}
41121
42122 void kmap_flush_unused(void);
43123
....@@ -52,87 +132,78 @@
52132 return virt_to_page(addr);
53133 }
54134
55
-#define totalhigh_pages 0UL
135
+static inline unsigned long totalhigh_pages(void) { return 0UL; }
56136
57
-#ifndef ARCH_HAS_KMAP
58137 static inline void *kmap(struct page *page)
59138 {
60139 might_sleep();
61140 return page_address(page);
62141 }
63142
143
+static inline void kunmap_high(struct page *page)
144
+{
145
+}
146
+
64147 static inline void kunmap(struct page *page)
65148 {
149
+#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
150
+ kunmap_flush_on_unmap(page_address(page));
151
+#endif
66152 }
67153
68154 static inline void *kmap_atomic(struct page *page)
69155 {
70
- preempt_disable_nort();
156
+ preempt_disable();
71157 pagefault_disable();
72158 return page_address(page);
73159 }
74160 #define kmap_atomic_prot(page, prot) kmap_atomic(page)
75161
76
-static inline void __kunmap_atomic(void *addr)
162
+static inline void kunmap_atomic_high(void *addr)
77163 {
78
- pagefault_enable();
79
- preempt_enable_nort();
164
+ /*
165
+ * Mostly nothing to do in the CONFIG_HIGHMEM=n case as kunmap_atomic()
166
+ * handles re-enabling faults + preemption
167
+ */
168
+#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
169
+ kunmap_flush_on_unmap(addr);
170
+#endif
80171 }
81172
82173 #define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
83174
84175 #define kmap_flush_unused() do {} while(0)
85
-#endif
86176
87177 #endif /* CONFIG_HIGHMEM */
88178
89179 #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
90180
91
-#ifndef CONFIG_PREEMPT_RT_FULL
92181 DECLARE_PER_CPU(int, __kmap_atomic_idx);
93
-#endif
94182
95183 static inline int kmap_atomic_idx_push(void)
96184 {
97
-#ifndef CONFIG_PREEMPT_RT_FULL
98185 int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1;
99186
100
-# ifdef CONFIG_DEBUG_HIGHMEM
187
+#ifdef CONFIG_DEBUG_HIGHMEM
101188 WARN_ON_ONCE(in_irq() && !irqs_disabled());
102189 BUG_ON(idx >= KM_TYPE_NR);
103
-# endif
104
- return idx;
105
-#else
106
- current->kmap_idx++;
107
- BUG_ON(current->kmap_idx > KM_TYPE_NR);
108
- return current->kmap_idx - 1;
109190 #endif
191
+ return idx;
110192 }
111193
112194 static inline int kmap_atomic_idx(void)
113195 {
114
-#ifndef CONFIG_PREEMPT_RT_FULL
115196 return __this_cpu_read(__kmap_atomic_idx) - 1;
116
-#else
117
- return current->kmap_idx - 1;
118
-#endif
119197 }
120198
121199 static inline void kmap_atomic_idx_pop(void)
122200 {
123
-#ifndef CONFIG_PREEMPT_RT_FULL
124
-# ifdef CONFIG_DEBUG_HIGHMEM
201
+#ifdef CONFIG_DEBUG_HIGHMEM
125202 int idx = __this_cpu_dec_return(__kmap_atomic_idx);
126203
127204 BUG_ON(idx < 0);
128
-# else
129
- __this_cpu_dec(__kmap_atomic_idx);
130
-# endif
131205 #else
132
- current->kmap_idx--;
133
-# ifdef CONFIG_DEBUG_HIGHMEM
134
- BUG_ON(current->kmap_idx < 0);
135
-# endif
206
+ __this_cpu_dec(__kmap_atomic_idx);
136207 #endif
137208 }
138209
....@@ -145,7 +216,9 @@
145216 #define kunmap_atomic(addr) \
146217 do { \
147218 BUILD_BUG_ON(__same_type((addr), struct page *)); \
148
- __kunmap_atomic(addr); \
219
+ kunmap_atomic_high(addr); \
220
+ pagefault_enable(); \
221
+ preempt_enable(); \
149222 } while (0)
150223
151224
....@@ -159,28 +232,24 @@
159232 }
160233 #endif
161234
162
-#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
235
+#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
163236 /**
164
- * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags
165
- * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE
237
+ * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
166238 * @vma: The VMA the page is to be allocated for
167239 * @vaddr: The virtual address the page will be inserted into
168240 *
169
- * This function will allocate a page for a VMA but the caller is expected
170
- * to specify via movableflags whether the page will be movable in the
171
- * future or not
241
+ * This function will allocate a page for a VMA that the caller knows will
242
+ * be able to migrate in the future using move_pages() or reclaimed
172243 *
173244 * An architecture may override this function by defining
174
- * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own
245
+ * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE and providing their own
175246 * implementation.
176247 */
177248 static inline struct page *
178
-__alloc_zeroed_user_highpage(gfp_t movableflags,
179
- struct vm_area_struct *vma,
180
- unsigned long vaddr)
249
+alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
250
+ unsigned long vaddr)
181251 {
182
- struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags,
183
- vma, vaddr);
252
+ struct page *page = alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_CMA, vma, vaddr);
184253
185254 if (page)
186255 clear_user_highpage(page, vaddr);
....@@ -189,26 +258,6 @@
189258 }
190259 #endif
191260
192
-/**
193
- * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
194
- * @vma: The VMA the page is to be allocated for
195
- * @vaddr: The virtual address the page will be inserted into
196
- *
197
- * This function will allocate a page for a VMA that the caller knows will
198
- * be able to migrate in the future using move_pages() or reclaimed
199
- */
200
-static inline struct page *
201
-alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
202
- unsigned long vaddr)
203
-{
204
-#ifndef CONFIG_CMA
205
- return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
206
-#else
207
- return __alloc_zeroed_user_highpage(__GFP_MOVABLE|__GFP_CMA, vma,
208
- vaddr);
209
-#endif
210
-}
211
-
212261 static inline void clear_highpage(struct page *page)
213262 {
214263 void *kaddr = kmap_atomic(page);
....@@ -216,6 +265,14 @@
216265 kunmap_atomic(kaddr);
217266 }
218267
268
+#ifndef __HAVE_ARCH_TAG_CLEAR_HIGHPAGE
269
+
270
+static inline void tag_clear_highpage(struct page *page)
271
+{
272
+}
273
+
274
+#endif
275
+
219276 static inline void zero_user_segments(struct page *page,
220277 unsigned start1, unsigned end1,
221278 unsigned start2, unsigned end2)