hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/include/linux/highmem.h
....@@ -32,11 +32,92 @@
3232 #include <asm/kmap_types.h>
3333
3434 #ifdef CONFIG_HIGHMEM
35
+extern void *kmap_atomic_high_prot(struct page *page, pgprot_t prot);
36
+extern void kunmap_atomic_high(void *kvaddr);
3537 #include <asm/highmem.h>
38
+
39
+#ifndef ARCH_HAS_KMAP_FLUSH_TLB
40
+static inline void kmap_flush_tlb(unsigned long addr) { }
41
+#endif
42
+
43
+#ifndef kmap_prot
44
+#define kmap_prot PAGE_KERNEL
45
+#endif
46
+
47
+void *kmap_high(struct page *page);
48
+static inline void *kmap(struct page *page)
49
+{
50
+ void *addr;
51
+
52
+ might_sleep();
53
+ if (!PageHighMem(page))
54
+ addr = page_address(page);
55
+ else
56
+ addr = kmap_high(page);
57
+ kmap_flush_tlb((unsigned long)addr);
58
+ return addr;
59
+}
60
+
61
+void kunmap_high(struct page *page);
62
+
63
+static inline void kunmap(struct page *page)
64
+{
65
+ might_sleep();
66
+ if (!PageHighMem(page))
67
+ return;
68
+ kunmap_high(page);
69
+}
70
+
71
+/*
72
+ * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
73
+ * no global lock is needed and because the kmap code must perform a global TLB
74
+ * invalidation when the kmap pool wraps.
75
+ *
76
+ * However when holding an atomic kmap it is not legal to sleep, so atomic
77
+ * kmaps are appropriate for short, tight code paths only.
78
+ *
79
+ * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
80
+ * gives a more generic (and caching) interface. But kmap_atomic can
81
+ * be used in IRQ contexts, so in some (very limited) cases we need
82
+ * it.
83
+ */
84
+static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
85
+{
86
+ preempt_disable();
87
+ pagefault_disable();
88
+ if (!PageHighMem(page))
89
+ return page_address(page);
90
+ return kmap_atomic_high_prot(page, prot);
91
+}
92
+#define kmap_atomic(page) kmap_atomic_prot(page, kmap_prot)
3693
3794 /* declarations for linux/mm/highmem.c */
3895 unsigned int nr_free_highpages(void);
39
-extern unsigned long totalhigh_pages;
96
+extern atomic_long_t _totalhigh_pages;
97
+static inline unsigned long totalhigh_pages(void)
98
+{
99
+ return (unsigned long)atomic_long_read(&_totalhigh_pages);
100
+}
101
+
102
+static inline void totalhigh_pages_inc(void)
103
+{
104
+ atomic_long_inc(&_totalhigh_pages);
105
+}
106
+
107
+static inline void totalhigh_pages_dec(void)
108
+{
109
+ atomic_long_dec(&_totalhigh_pages);
110
+}
111
+
112
+static inline void totalhigh_pages_add(long count)
113
+{
114
+ atomic_long_add(count, &_totalhigh_pages);
115
+}
116
+
117
+static inline void totalhigh_pages_set(long val)
118
+{
119
+ atomic_long_set(&_totalhigh_pages, val);
120
+}
40121
41122 void kmap_flush_unused(void);
42123
....@@ -51,17 +132,23 @@
51132 return virt_to_page(addr);
52133 }
53134
54
-#define totalhigh_pages 0UL
135
+static inline unsigned long totalhigh_pages(void) { return 0UL; }
55136
56
-#ifndef ARCH_HAS_KMAP
57137 static inline void *kmap(struct page *page)
58138 {
59139 might_sleep();
60140 return page_address(page);
61141 }
62142
143
+static inline void kunmap_high(struct page *page)
144
+{
145
+}
146
+
63147 static inline void kunmap(struct page *page)
64148 {
149
+#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
150
+ kunmap_flush_on_unmap(page_address(page));
151
+#endif
65152 }
66153
67154 static inline void *kmap_atomic(struct page *page)
....@@ -72,16 +159,20 @@
72159 }
73160 #define kmap_atomic_prot(page, prot) kmap_atomic(page)
74161
75
-static inline void __kunmap_atomic(void *addr)
162
+static inline void kunmap_atomic_high(void *addr)
76163 {
77
- pagefault_enable();
78
- preempt_enable();
164
+ /*
165
+ * Mostly nothing to do in the CONFIG_HIGHMEM=n case as kunmap_atomic()
166
+ * handles re-enabling faults + preemption
167
+ */
168
+#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
169
+ kunmap_flush_on_unmap(addr);
170
+#endif
79171 }
80172
81173 #define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
82174
83175 #define kmap_flush_unused() do {} while(0)
84
-#endif
85176
86177 #endif /* CONFIG_HIGHMEM */
87178
....@@ -125,7 +216,9 @@
125216 #define kunmap_atomic(addr) \
126217 do { \
127218 BUILD_BUG_ON(__same_type((addr), struct page *)); \
128
- __kunmap_atomic(addr); \
219
+ kunmap_atomic_high(addr); \
220
+ pagefault_enable(); \
221
+ preempt_enable(); \
129222 } while (0)
130223
131224
....@@ -139,28 +232,24 @@
139232 }
140233 #endif
141234
142
-#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
235
+#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
143236 /**
144
- * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags
145
- * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE
237
+ * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
146238 * @vma: The VMA the page is to be allocated for
147239 * @vaddr: The virtual address the page will be inserted into
148240 *
149
- * This function will allocate a page for a VMA but the caller is expected
150
- * to specify via movableflags whether the page will be movable in the
151
- * future or not
241
+ * This function will allocate a page for a VMA that the caller knows will
242
+ * be able to migrate in the future using move_pages() or reclaimed
152243 *
153244 * An architecture may override this function by defining
154
- * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own
245
+ * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE and providing their own
155246 * implementation.
156247 */
157248 static inline struct page *
158
-__alloc_zeroed_user_highpage(gfp_t movableflags,
159
- struct vm_area_struct *vma,
160
- unsigned long vaddr)
249
+alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
250
+ unsigned long vaddr)
161251 {
162
- struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags,
163
- vma, vaddr);
252
+ struct page *page = alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_CMA, vma, vaddr);
164253
165254 if (page)
166255 clear_user_highpage(page, vaddr);
....@@ -169,26 +258,6 @@
169258 }
170259 #endif
171260
172
-/**
173
- * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
174
- * @vma: The VMA the page is to be allocated for
175
- * @vaddr: The virtual address the page will be inserted into
176
- *
177
- * This function will allocate a page for a VMA that the caller knows will
178
- * be able to migrate in the future using move_pages() or reclaimed
179
- */
180
-static inline struct page *
181
-alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
182
- unsigned long vaddr)
183
-{
184
-#ifndef CONFIG_CMA
185
- return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
186
-#else
187
- return __alloc_zeroed_user_highpage(__GFP_MOVABLE|__GFP_CMA, vma,
188
- vaddr);
189
-#endif
190
-}
191
-
192261 static inline void clear_highpage(struct page *page)
193262 {
194263 void *kaddr = kmap_atomic(page);
....@@ -196,6 +265,14 @@
196265 kunmap_atomic(kaddr);
197266 }
198267
268
+#ifndef __HAVE_ARCH_TAG_CLEAR_HIGHPAGE
269
+
270
+static inline void tag_clear_highpage(struct page *page)
271
+{
272
+}
273
+
274
+#endif
275
+
199276 static inline void zero_user_segments(struct page *page,
200277 unsigned start1, unsigned end1,
201278 unsigned start2, unsigned end2)