.. | .. |
---|
11 | 11 | |
---|
12 | 12 | #include <asm/cacheflush.h> |
---|
13 | 13 | |
---|
| 14 | +#include "highmem-internal.h" |
---|
| 15 | + |
---|
| 16 | +/** |
---|
| 17 | + * kmap - Map a page for long term usage |
---|
| 18 | + * @page: Pointer to the page to be mapped |
---|
| 19 | + * |
---|
| 20 | + * Returns: The virtual address of the mapping |
---|
| 21 | + * |
---|
| 22 | + * Can only be invoked from preemptible task context because on 32bit |
---|
| 23 | + * systems with CONFIG_HIGHMEM enabled this function might sleep. |
---|
| 24 | + * |
---|
| 25 | + * For systems with CONFIG_HIGHMEM=n and for pages in the low memory area |
---|
| 26 | + * this returns the virtual address of the direct kernel mapping. |
---|
| 27 | + * |
---|
| 28 | + * The returned virtual address is globally visible and valid up to the |
---|
| 29 | + * point where it is unmapped via kunmap(). The pointer can be handed to |
---|
| 30 | + * other contexts. |
---|
| 31 | + * |
---|
| 32 | + * For highmem pages on 32bit systems this can be slow as the mapping space |
---|
| 33 | + * is limited and protected by a global lock. In case that there is no |
---|
| 34 | + * mapping slot available the function blocks until a slot is released via |
---|
| 35 | + * kunmap(). |
---|
| 36 | + */ |
---|
| 37 | +static inline void *kmap(struct page *page); |
---|
| 38 | + |
---|
| 39 | +/** |
---|
| 40 | + * kunmap - Unmap the virtual address mapped by kmap() |
---|
| 41 | + * @addr: Virtual address to be unmapped |
---|
| 42 | + * |
---|
| 43 | + * Counterpart to kmap(). A NOOP for CONFIG_HIGHMEM=n and for mappings of |
---|
| 44 | + * pages in the low memory area. |
---|
| 45 | + */ |
---|
| 46 | +static inline void kunmap(struct page *page); |
---|
| 47 | + |
---|
| 48 | +/** |
---|
| 49 | + * kmap_to_page - Get the page for a kmap'ed address |
---|
| 50 | + * @addr: The address to look up |
---|
| 51 | + * |
---|
| 52 | + * Returns: The page which is mapped to @addr. |
---|
| 53 | + */ |
---|
| 54 | +static inline struct page *kmap_to_page(void *addr); |
---|
| 55 | + |
---|
| 56 | +/** |
---|
| 57 | + * kmap_flush_unused - Flush all unused kmap mappings in order to |
---|
| 58 | + * remove stray mappings |
---|
| 59 | + */ |
---|
| 60 | +static inline void kmap_flush_unused(void); |
---|
| 61 | + |
---|
| 62 | +/** |
---|
| 63 | + * kmap_local_page - Map a page for temporary usage |
---|
| 64 | + * @page: Pointer to the page to be mapped |
---|
| 65 | + * |
---|
| 66 | + * Returns: The virtual address of the mapping |
---|
| 67 | + * |
---|
| 68 | + * Can be invoked from any context. |
---|
| 69 | + * |
---|
| 70 | + * Requires careful handling when nesting multiple mappings because the map |
---|
| 71 | + * management is stack based. The unmap has to be in the reverse order of |
---|
| 72 | + * the map operation: |
---|
| 73 | + * |
---|
| 74 | + * addr1 = kmap_local_page(page1); |
---|
| 75 | + * addr2 = kmap_local_page(page2); |
---|
| 76 | + * ... |
---|
| 77 | + * kunmap_local(addr2); |
---|
| 78 | + * kunmap_local(addr1); |
---|
| 79 | + * |
---|
| 80 | + * Unmapping addr1 before addr2 is invalid and causes malfunction. |
---|
| 81 | + * |
---|
| 82 | + * Contrary to kmap() mappings the mapping is only valid in the context of |
---|
| 83 | + * the caller and cannot be handed to other contexts. |
---|
| 84 | + * |
---|
| 85 | + * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the |
---|
| 86 | + * virtual address of the direct mapping. Only real highmem pages are |
---|
| 87 | + * temporarily mapped. |
---|
| 88 | + * |
---|
| 89 | + * While it is significantly faster than kmap() for the higmem case it |
---|
| 90 | + * comes with restrictions about the pointer validity. Only use when really |
---|
| 91 | + * necessary. |
---|
| 92 | + * |
---|
| 93 | + * On HIGHMEM enabled systems mapping a highmem page has the side effect of |
---|
| 94 | + * disabling migration in order to keep the virtual address stable across |
---|
| 95 | + * preemption. No caller of kmap_local_page() can rely on this side effect. |
---|
| 96 | + */ |
---|
| 97 | +static inline void *kmap_local_page(struct page *page); |
---|
| 98 | + |
---|
| 99 | +/** |
---|
| 100 | + * kmap_atomic - Atomically map a page for temporary usage - Deprecated! |
---|
| 101 | + * @page: Pointer to the page to be mapped |
---|
| 102 | + * |
---|
| 103 | + * Returns: The virtual address of the mapping |
---|
| 104 | + * |
---|
| 105 | + * Effectively a wrapper around kmap_local_page() which disables pagefaults |
---|
| 106 | + * and preemption. |
---|
| 107 | + * |
---|
| 108 | + * Do not use in new code. Use kmap_local_page() instead. |
---|
| 109 | + */ |
---|
| 110 | +static inline void *kmap_atomic(struct page *page); |
---|
| 111 | + |
---|
| 112 | +/** |
---|
| 113 | + * kunmap_atomic - Unmap the virtual address mapped by kmap_atomic() |
---|
| 114 | + * @addr: Virtual address to be unmapped |
---|
| 115 | + * |
---|
| 116 | + * Counterpart to kmap_atomic(). |
---|
| 117 | + * |
---|
| 118 | + * Effectively a wrapper around kunmap_local() which additionally undoes |
---|
| 119 | + * the side effects of kmap_atomic(), i.e. reenabling pagefaults and |
---|
| 120 | + * preemption. |
---|
| 121 | + */ |
---|
| 122 | + |
---|
| 123 | +/* Highmem related interfaces for management code */ |
---|
| 124 | +static inline unsigned int nr_free_highpages(void); |
---|
| 125 | +static inline unsigned long totalhigh_pages(void); |
---|
| 126 | + |
---|
14 | 127 | #ifndef ARCH_HAS_FLUSH_ANON_PAGE |
---|
15 | 128 | static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) |
---|
16 | 129 | { |
---|
.. | .. |
---|
29 | 142 | } |
---|
30 | 143 | #endif |
---|
31 | 144 | |
---|
32 | | -#include <asm/kmap_types.h> |
---|
33 | | - |
---|
34 | | -#ifdef CONFIG_HIGHMEM |
---|
35 | | -#include <asm/highmem.h> |
---|
36 | | - |
---|
37 | | -/* declarations for linux/mm/highmem.c */ |
---|
38 | | -unsigned int nr_free_highpages(void); |
---|
39 | | -extern unsigned long totalhigh_pages; |
---|
40 | | - |
---|
41 | | -void kmap_flush_unused(void); |
---|
42 | | - |
---|
43 | | -struct page *kmap_to_page(void *addr); |
---|
44 | | - |
---|
45 | | -#else /* CONFIG_HIGHMEM */ |
---|
46 | | - |
---|
47 | | -static inline unsigned int nr_free_highpages(void) { return 0; } |
---|
48 | | - |
---|
49 | | -static inline struct page *kmap_to_page(void *addr) |
---|
50 | | -{ |
---|
51 | | - return virt_to_page(addr); |
---|
52 | | -} |
---|
53 | | - |
---|
54 | | -#define totalhigh_pages 0UL |
---|
55 | | - |
---|
56 | | -#ifndef ARCH_HAS_KMAP |
---|
57 | | -static inline void *kmap(struct page *page) |
---|
58 | | -{ |
---|
59 | | - might_sleep(); |
---|
60 | | - return page_address(page); |
---|
61 | | -} |
---|
62 | | - |
---|
63 | | -static inline void kunmap(struct page *page) |
---|
64 | | -{ |
---|
65 | | -} |
---|
66 | | - |
---|
67 | | -static inline void *kmap_atomic(struct page *page) |
---|
68 | | -{ |
---|
69 | | - preempt_disable(); |
---|
70 | | - pagefault_disable(); |
---|
71 | | - return page_address(page); |
---|
72 | | -} |
---|
73 | | -#define kmap_atomic_prot(page, prot) kmap_atomic(page) |
---|
74 | | - |
---|
75 | | -static inline void __kunmap_atomic(void *addr) |
---|
76 | | -{ |
---|
77 | | - pagefault_enable(); |
---|
78 | | - preempt_enable(); |
---|
79 | | -} |
---|
80 | | - |
---|
81 | | -#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn)) |
---|
82 | | - |
---|
83 | | -#define kmap_flush_unused() do {} while(0) |
---|
84 | | -#endif |
---|
85 | | - |
---|
86 | | -#endif /* CONFIG_HIGHMEM */ |
---|
87 | | - |
---|
88 | | -#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32) |
---|
89 | | - |
---|
90 | | -DECLARE_PER_CPU(int, __kmap_atomic_idx); |
---|
91 | | - |
---|
92 | | -static inline int kmap_atomic_idx_push(void) |
---|
93 | | -{ |
---|
94 | | - int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1; |
---|
95 | | - |
---|
96 | | -#ifdef CONFIG_DEBUG_HIGHMEM |
---|
97 | | - WARN_ON_ONCE(in_irq() && !irqs_disabled()); |
---|
98 | | - BUG_ON(idx >= KM_TYPE_NR); |
---|
99 | | -#endif |
---|
100 | | - return idx; |
---|
101 | | -} |
---|
102 | | - |
---|
103 | | -static inline int kmap_atomic_idx(void) |
---|
104 | | -{ |
---|
105 | | - return __this_cpu_read(__kmap_atomic_idx) - 1; |
---|
106 | | -} |
---|
107 | | - |
---|
108 | | -static inline void kmap_atomic_idx_pop(void) |
---|
109 | | -{ |
---|
110 | | -#ifdef CONFIG_DEBUG_HIGHMEM |
---|
111 | | - int idx = __this_cpu_dec_return(__kmap_atomic_idx); |
---|
112 | | - |
---|
113 | | - BUG_ON(idx < 0); |
---|
114 | | -#else |
---|
115 | | - __this_cpu_dec(__kmap_atomic_idx); |
---|
116 | | -#endif |
---|
117 | | -} |
---|
118 | | - |
---|
119 | | -#endif |
---|
120 | | - |
---|
121 | | -/* |
---|
122 | | - * Prevent people trying to call kunmap_atomic() as if it were kunmap() |
---|
123 | | - * kunmap_atomic() should get the return value of kmap_atomic, not the page. |
---|
124 | | - */ |
---|
125 | | -#define kunmap_atomic(addr) \ |
---|
126 | | -do { \ |
---|
127 | | - BUILD_BUG_ON(__same_type((addr), struct page *)); \ |
---|
128 | | - __kunmap_atomic(addr); \ |
---|
129 | | -} while (0) |
---|
130 | | - |
---|
131 | | - |
---|
132 | 145 | /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */ |
---|
133 | 146 | #ifndef clear_user_highpage |
---|
134 | 147 | static inline void clear_user_highpage(struct page *page, unsigned long vaddr) |
---|
.. | .. |
---|
139 | 152 | } |
---|
140 | 153 | #endif |
---|
141 | 154 | |
---|
142 | | -#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE |
---|
| 155 | +#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE |
---|
143 | 156 | /** |
---|
144 | | - * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags |
---|
145 | | - * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE |
---|
| 157 | + * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move |
---|
146 | 158 | * @vma: The VMA the page is to be allocated for |
---|
147 | 159 | * @vaddr: The virtual address the page will be inserted into |
---|
148 | 160 | * |
---|
149 | | - * This function will allocate a page for a VMA but the caller is expected |
---|
150 | | - * to specify via movableflags whether the page will be movable in the |
---|
151 | | - * future or not |
---|
| 161 | + * This function will allocate a page for a VMA that the caller knows will |
---|
| 162 | + * be able to migrate in the future using move_pages() or reclaimed |
---|
152 | 163 | * |
---|
153 | 164 | * An architecture may override this function by defining |
---|
154 | | - * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own |
---|
| 165 | + * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE and providing their own |
---|
155 | 166 | * implementation. |
---|
156 | 167 | */ |
---|
157 | 168 | static inline struct page * |
---|
158 | | -__alloc_zeroed_user_highpage(gfp_t movableflags, |
---|
159 | | - struct vm_area_struct *vma, |
---|
160 | | - unsigned long vaddr) |
---|
| 169 | +alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma, |
---|
| 170 | + unsigned long vaddr) |
---|
161 | 171 | { |
---|
162 | | - struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags, |
---|
163 | | - vma, vaddr); |
---|
| 172 | + struct page *page = alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_CMA, vma, vaddr); |
---|
164 | 173 | |
---|
165 | 174 | if (page) |
---|
166 | 175 | clear_user_highpage(page, vaddr); |
---|
.. | .. |
---|
169 | 178 | } |
---|
170 | 179 | #endif |
---|
171 | 180 | |
---|
172 | | -/** |
---|
173 | | - * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move |
---|
174 | | - * @vma: The VMA the page is to be allocated for |
---|
175 | | - * @vaddr: The virtual address the page will be inserted into |
---|
176 | | - * |
---|
177 | | - * This function will allocate a page for a VMA that the caller knows will |
---|
178 | | - * be able to migrate in the future using move_pages() or reclaimed |
---|
179 | | - */ |
---|
180 | | -static inline struct page * |
---|
181 | | -alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma, |
---|
182 | | - unsigned long vaddr) |
---|
183 | | -{ |
---|
184 | | -#ifndef CONFIG_CMA |
---|
185 | | - return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr); |
---|
186 | | -#else |
---|
187 | | - return __alloc_zeroed_user_highpage(__GFP_MOVABLE|__GFP_CMA, vma, |
---|
188 | | - vaddr); |
---|
189 | | -#endif |
---|
190 | | -} |
---|
191 | | - |
---|
192 | 181 | static inline void clear_highpage(struct page *page) |
---|
193 | 182 | { |
---|
194 | 183 | void *kaddr = kmap_atomic(page); |
---|
.. | .. |
---|
196 | 185 | kunmap_atomic(kaddr); |
---|
197 | 186 | } |
---|
198 | 187 | |
---|
| 188 | +#ifndef __HAVE_ARCH_TAG_CLEAR_HIGHPAGE |
---|
| 189 | + |
---|
| 190 | +static inline void tag_clear_highpage(struct page *page) |
---|
| 191 | +{ |
---|
| 192 | +} |
---|
| 193 | + |
---|
| 194 | +#endif |
---|
| 195 | + |
---|
199 | 196 | static inline void zero_user_segments(struct page *page, |
---|
200 | 197 | unsigned start1, unsigned end1, |
---|
201 | 198 | unsigned start2, unsigned end2) |
---|