.. | .. |
---|
1 | 1 | // SPDX-License-Identifier: GPL-2.0 |
---|
2 | 2 | /* |
---|
3 | | - * This file contains common generic and tag-based KASAN code. |
---|
| 3 | + * This file contains common KASAN code. |
---|
4 | 4 | * |
---|
5 | 5 | * Copyright (c) 2014 Samsung Electronics Co., Ltd. |
---|
6 | 6 | * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> |
---|
7 | 7 | * |
---|
8 | 8 | * Some code borrowed from https://github.com/xairy/kasan-prototype by |
---|
9 | 9 | * Andrey Konovalov <andreyknvl@gmail.com> |
---|
10 | | - * |
---|
11 | | - * This program is free software; you can redistribute it and/or modify |
---|
12 | | - * it under the terms of the GNU General Public License version 2 as |
---|
13 | | - * published by the Free Software Foundation. |
---|
14 | | - * |
---|
15 | 10 | */ |
---|
16 | 11 | |
---|
17 | | -#define __KASAN_INTERNAL |
---|
18 | | - |
---|
19 | 12 | #include <linux/export.h> |
---|
20 | | -#include <linux/interrupt.h> |
---|
21 | 13 | #include <linux/init.h> |
---|
22 | 14 | #include <linux/kasan.h> |
---|
23 | 15 | #include <linux/kernel.h> |
---|
24 | | -#include <linux/kmemleak.h> |
---|
25 | 16 | #include <linux/linkage.h> |
---|
26 | 17 | #include <linux/memblock.h> |
---|
27 | 18 | #include <linux/memory.h> |
---|
.. | .. |
---|
34 | 25 | #include <linux/stacktrace.h> |
---|
35 | 26 | #include <linux/string.h> |
---|
36 | 27 | #include <linux/types.h> |
---|
37 | | -#include <linux/vmalloc.h> |
---|
38 | 28 | #include <linux/bug.h> |
---|
39 | | -#include <linux/uaccess.h> |
---|
40 | 29 | |
---|
41 | 30 | #include "kasan.h" |
---|
42 | 31 | #include "../slab.h" |
---|
43 | 32 | |
---|
44 | | -static inline int in_irqentry_text(unsigned long ptr) |
---|
45 | | -{ |
---|
46 | | - return (ptr >= (unsigned long)&__irqentry_text_start && |
---|
47 | | - ptr < (unsigned long)&__irqentry_text_end) || |
---|
48 | | - (ptr >= (unsigned long)&__softirqentry_text_start && |
---|
49 | | - ptr < (unsigned long)&__softirqentry_text_end); |
---|
50 | | -} |
---|
51 | | - |
---|
52 | | -static inline void filter_irq_stacks(struct stack_trace *trace) |
---|
53 | | -{ |
---|
54 | | - int i; |
---|
55 | | - |
---|
56 | | - if (!trace->nr_entries) |
---|
57 | | - return; |
---|
58 | | - for (i = 0; i < trace->nr_entries; i++) |
---|
59 | | - if (in_irqentry_text(trace->entries[i])) { |
---|
60 | | - /* Include the irqentry function into the stack. */ |
---|
61 | | - trace->nr_entries = i + 1; |
---|
62 | | - break; |
---|
63 | | - } |
---|
64 | | -} |
---|
65 | | - |
---|
66 | | -static inline depot_stack_handle_t save_stack(gfp_t flags) |
---|
| 33 | +depot_stack_handle_t kasan_save_stack(gfp_t flags) |
---|
67 | 34 | { |
---|
68 | 35 | unsigned long entries[KASAN_STACK_DEPTH]; |
---|
69 | | - struct stack_trace trace = { |
---|
70 | | - .nr_entries = 0, |
---|
71 | | - .entries = entries, |
---|
72 | | - .max_entries = KASAN_STACK_DEPTH, |
---|
73 | | - .skip = 0 |
---|
74 | | - }; |
---|
| 36 | + unsigned int nr_entries; |
---|
75 | 37 | |
---|
76 | | - save_stack_trace(&trace); |
---|
77 | | - filter_irq_stacks(&trace); |
---|
78 | | - if (trace.nr_entries != 0 && |
---|
79 | | - trace.entries[trace.nr_entries-1] == ULONG_MAX) |
---|
80 | | - trace.nr_entries--; |
---|
81 | | - |
---|
82 | | - return depot_save_stack(&trace, flags); |
---|
| 38 | + nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0); |
---|
| 39 | + nr_entries = filter_irq_stacks(entries, nr_entries); |
---|
| 40 | + return stack_depot_save(entries, nr_entries, flags); |
---|
83 | 41 | } |
---|
84 | 42 | |
---|
85 | | -static inline void set_track(struct kasan_track *track, gfp_t flags) |
---|
| 43 | +void kasan_set_track(struct kasan_track *track, gfp_t flags) |
---|
86 | 44 | { |
---|
87 | 45 | track->pid = current->pid; |
---|
88 | | - track->stack = save_stack(flags); |
---|
| 46 | + track->stack = kasan_save_stack(flags); |
---|
89 | 47 | } |
---|
90 | 48 | |
---|
| 49 | +#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) |
---|
91 | 50 | void kasan_enable_current(void) |
---|
92 | 51 | { |
---|
93 | 52 | current->kasan_depth++; |
---|
.. | .. |
---|
97 | 56 | { |
---|
98 | 57 | current->kasan_depth--; |
---|
99 | 58 | } |
---|
| 59 | +#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ |
---|
100 | 60 | |
---|
101 | | -void kasan_check_read(const volatile void *p, unsigned int size) |
---|
| 61 | +void __kasan_unpoison_range(const void *address, size_t size) |
---|
102 | 62 | { |
---|
103 | | - check_memory_region((unsigned long)p, size, false, _RET_IP_); |
---|
104 | | -} |
---|
105 | | -EXPORT_SYMBOL(kasan_check_read); |
---|
106 | | - |
---|
107 | | -void kasan_check_write(const volatile void *p, unsigned int size) |
---|
108 | | -{ |
---|
109 | | - check_memory_region((unsigned long)p, size, true, _RET_IP_); |
---|
110 | | -} |
---|
111 | | -EXPORT_SYMBOL(kasan_check_write); |
---|
112 | | - |
---|
113 | | -#undef memset |
---|
114 | | -void *memset(void *addr, int c, size_t len) |
---|
115 | | -{ |
---|
116 | | - check_memory_region((unsigned long)addr, len, true, _RET_IP_); |
---|
117 | | - |
---|
118 | | - return __memset(addr, c, len); |
---|
| 63 | + kasan_unpoison(address, size, false); |
---|
119 | 64 | } |
---|
120 | 65 | |
---|
121 | | -#undef memmove |
---|
122 | | -void *memmove(void *dest, const void *src, size_t len) |
---|
123 | | -{ |
---|
124 | | - check_memory_region((unsigned long)src, len, false, _RET_IP_); |
---|
125 | | - check_memory_region((unsigned long)dest, len, true, _RET_IP_); |
---|
126 | | - |
---|
127 | | - return __memmove(dest, src, len); |
---|
128 | | -} |
---|
129 | | - |
---|
130 | | -#undef memcpy |
---|
131 | | -void *memcpy(void *dest, const void *src, size_t len) |
---|
132 | | -{ |
---|
133 | | - check_memory_region((unsigned long)src, len, false, _RET_IP_); |
---|
134 | | - check_memory_region((unsigned long)dest, len, true, _RET_IP_); |
---|
135 | | - |
---|
136 | | - return __memcpy(dest, src, len); |
---|
137 | | -} |
---|
138 | | - |
---|
139 | | -/* |
---|
140 | | - * Poisons the shadow memory for 'size' bytes starting from 'addr'. |
---|
141 | | - * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE. |
---|
142 | | - */ |
---|
143 | | -void kasan_poison_shadow(const void *address, size_t size, u8 value) |
---|
144 | | -{ |
---|
145 | | - void *shadow_start, *shadow_end; |
---|
146 | | - |
---|
147 | | - /* |
---|
148 | | - * Perform shadow offset calculation based on untagged address, as |
---|
149 | | - * some of the callers (e.g. kasan_poison_object_data) pass tagged |
---|
150 | | - * addresses to this function. |
---|
151 | | - */ |
---|
152 | | - address = reset_tag(address); |
---|
153 | | - |
---|
154 | | - shadow_start = kasan_mem_to_shadow(address); |
---|
155 | | - shadow_end = kasan_mem_to_shadow(address + size); |
---|
156 | | - |
---|
157 | | - __memset(shadow_start, value, shadow_end - shadow_start); |
---|
158 | | -} |
---|
159 | | - |
---|
160 | | -void kasan_unpoison_shadow(const void *address, size_t size) |
---|
161 | | -{ |
---|
162 | | - u8 tag = get_tag(address); |
---|
163 | | - |
---|
164 | | - /* |
---|
165 | | - * Perform shadow offset calculation based on untagged address, as |
---|
166 | | - * some of the callers (e.g. kasan_unpoison_object_data) pass tagged |
---|
167 | | - * addresses to this function. |
---|
168 | | - */ |
---|
169 | | - address = reset_tag(address); |
---|
170 | | - |
---|
171 | | - kasan_poison_shadow(address, size, tag); |
---|
172 | | - |
---|
173 | | - if (size & KASAN_SHADOW_MASK) { |
---|
174 | | - u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size); |
---|
175 | | - |
---|
176 | | - if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) |
---|
177 | | - *shadow = tag; |
---|
178 | | - else |
---|
179 | | - *shadow = size & KASAN_SHADOW_MASK; |
---|
180 | | - } |
---|
181 | | -} |
---|
182 | | - |
---|
183 | | -static void __kasan_unpoison_stack(struct task_struct *task, const void *sp) |
---|
184 | | -{ |
---|
185 | | - void *base = task_stack_page(task); |
---|
186 | | - size_t size = sp - base; |
---|
187 | | - |
---|
188 | | - kasan_unpoison_shadow(base, size); |
---|
189 | | -} |
---|
190 | | - |
---|
| 66 | +#ifdef CONFIG_KASAN_STACK |
---|
191 | 67 | /* Unpoison the entire stack for a task. */ |
---|
192 | 68 | void kasan_unpoison_task_stack(struct task_struct *task) |
---|
193 | 69 | { |
---|
194 | | - __kasan_unpoison_stack(task, task_stack_page(task) + THREAD_SIZE); |
---|
| 70 | + void *base = task_stack_page(task); |
---|
| 71 | + |
---|
| 72 | + kasan_unpoison(base, THREAD_SIZE, false); |
---|
195 | 73 | } |
---|
196 | 74 | |
---|
197 | 75 | /* Unpoison the stack for the current task beyond a watermark sp value. */ |
---|
.. | .. |
---|
204 | 82 | */ |
---|
205 | 83 | void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1)); |
---|
206 | 84 | |
---|
207 | | - kasan_unpoison_shadow(base, watermark - base); |
---|
| 85 | + kasan_unpoison(base, watermark - base, false); |
---|
208 | 86 | } |
---|
| 87 | +#endif /* CONFIG_KASAN_STACK */ |
---|
209 | 88 | |
---|
210 | 89 | /* |
---|
211 | | - * Clear all poison for the region between the current SP and a provided |
---|
212 | | - * watermark value, as is sometimes required prior to hand-crafted asm function |
---|
213 | | - * returns in the middle of functions. |
---|
| 90 | + * Only allow cache merging when stack collection is disabled and no metadata |
---|
| 91 | + * is present. |
---|
214 | 92 | */ |
---|
215 | | -void kasan_unpoison_stack_above_sp_to(const void *watermark) |
---|
| 93 | +slab_flags_t __kasan_never_merge(void) |
---|
216 | 94 | { |
---|
217 | | - const void *sp = __builtin_frame_address(0); |
---|
218 | | - size_t size = watermark - sp; |
---|
219 | | - |
---|
220 | | - if (WARN_ON(sp > watermark)) |
---|
221 | | - return; |
---|
222 | | - kasan_unpoison_shadow(sp, size); |
---|
| 95 | + if (kasan_stack_collection_enabled()) |
---|
| 96 | + return SLAB_KASAN; |
---|
| 97 | + return 0; |
---|
223 | 98 | } |
---|
224 | 99 | |
---|
225 | | -void kasan_alloc_pages(struct page *page, unsigned int order) |
---|
| 100 | +void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init) |
---|
226 | 101 | { |
---|
227 | 102 | u8 tag; |
---|
228 | 103 | unsigned long i; |
---|
.. | .. |
---|
230 | 105 | if (unlikely(PageHighMem(page))) |
---|
231 | 106 | return; |
---|
232 | 107 | |
---|
233 | | - tag = random_tag(); |
---|
| 108 | + tag = kasan_random_tag(); |
---|
234 | 109 | for (i = 0; i < (1 << order); i++) |
---|
235 | 110 | page_kasan_tag_set(page + i, tag); |
---|
236 | | - kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order); |
---|
| 111 | + kasan_unpoison(page_address(page), PAGE_SIZE << order, init); |
---|
237 | 112 | } |
---|
238 | 113 | |
---|
239 | | -void kasan_free_pages(struct page *page, unsigned int order) |
---|
| 114 | +void __kasan_poison_pages(struct page *page, unsigned int order, bool init) |
---|
240 | 115 | { |
---|
241 | 116 | if (likely(!PageHighMem(page))) |
---|
242 | | - kasan_poison_shadow(page_address(page), |
---|
243 | | - PAGE_SIZE << order, |
---|
244 | | - KASAN_FREE_PAGE); |
---|
| 117 | + kasan_poison(page_address(page), PAGE_SIZE << order, |
---|
| 118 | + KASAN_FREE_PAGE, init); |
---|
245 | 119 | } |
---|
246 | 120 | |
---|
247 | 121 | /* |
---|
.. | .. |
---|
250 | 124 | */ |
---|
251 | 125 | static inline unsigned int optimal_redzone(unsigned int object_size) |
---|
252 | 126 | { |
---|
253 | | - if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) |
---|
254 | | - return 0; |
---|
255 | | - |
---|
256 | 127 | return |
---|
257 | 128 | object_size <= 64 - 16 ? 16 : |
---|
258 | 129 | object_size <= 128 - 32 ? 32 : |
---|
.. | .. |
---|
263 | 134 | object_size <= (1 << 16) - 1024 ? 1024 : 2048; |
---|
264 | 135 | } |
---|
265 | 136 | |
---|
266 | | -void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, |
---|
267 | | - slab_flags_t *flags) |
---|
| 137 | +void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size, |
---|
| 138 | + slab_flags_t *flags) |
---|
268 | 139 | { |
---|
269 | | - unsigned int orig_size = *size; |
---|
270 | | - unsigned int redzone_size; |
---|
271 | | - int redzone_adjust; |
---|
| 140 | + unsigned int ok_size; |
---|
| 141 | + unsigned int optimal_size; |
---|
272 | 142 | |
---|
273 | | - /* Add alloc meta. */ |
---|
| 143 | + /* |
---|
| 144 | + * SLAB_KASAN is used to mark caches as ones that are sanitized by |
---|
| 145 | + * KASAN. Currently this flag is used in two places: |
---|
| 146 | + * 1. In slab_ksize() when calculating the size of the accessible |
---|
| 147 | + * memory within the object. |
---|
| 148 | + * 2. In slab_common.c to prevent merging of sanitized caches. |
---|
| 149 | + */ |
---|
| 150 | + *flags |= SLAB_KASAN; |
---|
| 151 | + |
---|
| 152 | + if (!kasan_stack_collection_enabled()) |
---|
| 153 | + return; |
---|
| 154 | + |
---|
| 155 | + ok_size = *size; |
---|
| 156 | + |
---|
| 157 | + /* Add alloc meta into redzone. */ |
---|
274 | 158 | cache->kasan_info.alloc_meta_offset = *size; |
---|
275 | 159 | *size += sizeof(struct kasan_alloc_meta); |
---|
276 | 160 | |
---|
277 | | - /* Add free meta. */ |
---|
278 | | - if (IS_ENABLED(CONFIG_KASAN_GENERIC) && |
---|
279 | | - (cache->flags & SLAB_TYPESAFE_BY_RCU || cache->ctor || |
---|
280 | | - cache->object_size < sizeof(struct kasan_free_meta))) { |
---|
281 | | - cache->kasan_info.free_meta_offset = *size; |
---|
282 | | - *size += sizeof(struct kasan_free_meta); |
---|
| 161 | + /* |
---|
| 162 | + * If alloc meta doesn't fit, don't add it. |
---|
| 163 | + * This can only happen with SLAB, as it has KMALLOC_MAX_SIZE equal |
---|
| 164 | + * to KMALLOC_MAX_CACHE_SIZE and doesn't fall back to page_alloc for |
---|
| 165 | + * larger sizes. |
---|
| 166 | + */ |
---|
| 167 | + if (*size > KMALLOC_MAX_SIZE) { |
---|
| 168 | + cache->kasan_info.alloc_meta_offset = 0; |
---|
| 169 | + *size = ok_size; |
---|
| 170 | + /* Continue, since free meta might still fit. */ |
---|
283 | 171 | } |
---|
284 | 172 | |
---|
285 | | - redzone_size = optimal_redzone(cache->object_size); |
---|
286 | | - redzone_adjust = redzone_size - (*size - cache->object_size); |
---|
287 | | - if (redzone_adjust > 0) |
---|
288 | | - *size += redzone_adjust; |
---|
289 | | - |
---|
290 | | - *size = min_t(unsigned int, KMALLOC_MAX_SIZE, |
---|
291 | | - max(*size, cache->object_size + redzone_size)); |
---|
292 | | - |
---|
293 | | - /* |
---|
294 | | - * If the metadata doesn't fit, don't enable KASAN at all. |
---|
295 | | - */ |
---|
296 | | - if (*size <= cache->kasan_info.alloc_meta_offset || |
---|
297 | | - *size <= cache->kasan_info.free_meta_offset) { |
---|
298 | | - cache->kasan_info.alloc_meta_offset = 0; |
---|
299 | | - cache->kasan_info.free_meta_offset = 0; |
---|
300 | | - *size = orig_size; |
---|
| 173 | + /* Only the generic mode uses free meta or flexible redzones. */ |
---|
| 174 | + if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) { |
---|
| 175 | + cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META; |
---|
301 | 176 | return; |
---|
302 | 177 | } |
---|
303 | 178 | |
---|
304 | | - *flags |= SLAB_KASAN; |
---|
| 179 | + /* |
---|
| 180 | + * Add free meta into redzone when it's not possible to store |
---|
| 181 | + * it in the object. This is the case when: |
---|
| 182 | + * 1. Object is SLAB_TYPESAFE_BY_RCU, which means that it can |
---|
| 183 | + * be touched after it was freed, or |
---|
| 184 | + * 2. Object has a constructor, which means it's expected to |
---|
| 185 | + * retain its content until the next allocation, or |
---|
| 186 | + * 3. Object is too small. |
---|
| 187 | + * Otherwise cache->kasan_info.free_meta_offset = 0 is implied. |
---|
| 188 | + */ |
---|
| 189 | + if ((cache->flags & SLAB_TYPESAFE_BY_RCU) || cache->ctor || |
---|
| 190 | + cache->object_size < sizeof(struct kasan_free_meta)) { |
---|
| 191 | + ok_size = *size; |
---|
| 192 | + |
---|
| 193 | + cache->kasan_info.free_meta_offset = *size; |
---|
| 194 | + *size += sizeof(struct kasan_free_meta); |
---|
| 195 | + |
---|
| 196 | + /* If free meta doesn't fit, don't add it. */ |
---|
| 197 | + if (*size > KMALLOC_MAX_SIZE) { |
---|
| 198 | + cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META; |
---|
| 199 | + *size = ok_size; |
---|
| 200 | + } |
---|
| 201 | + } |
---|
| 202 | + |
---|
| 203 | + /* Calculate size with optimal redzone. */ |
---|
| 204 | + optimal_size = cache->object_size + optimal_redzone(cache->object_size); |
---|
| 205 | + /* Limit it with KMALLOC_MAX_SIZE (relevant for SLAB only). */ |
---|
| 206 | + if (optimal_size > KMALLOC_MAX_SIZE) |
---|
| 207 | + optimal_size = KMALLOC_MAX_SIZE; |
---|
| 208 | + /* Use optimal size if the size with added metas is not large enough. */ |
---|
| 209 | + if (*size < optimal_size) |
---|
| 210 | + *size = optimal_size; |
---|
305 | 211 | } |
---|
306 | 212 | |
---|
307 | | -size_t kasan_metadata_size(struct kmem_cache *cache) |
---|
| 213 | +void __kasan_cache_create_kmalloc(struct kmem_cache *cache) |
---|
308 | 214 | { |
---|
| 215 | + cache->kasan_info.is_kmalloc = true; |
---|
| 216 | +} |
---|
| 217 | + |
---|
| 218 | +size_t __kasan_metadata_size(struct kmem_cache *cache) |
---|
| 219 | +{ |
---|
| 220 | + if (!kasan_stack_collection_enabled()) |
---|
| 221 | + return 0; |
---|
309 | 222 | return (cache->kasan_info.alloc_meta_offset ? |
---|
310 | 223 | sizeof(struct kasan_alloc_meta) : 0) + |
---|
311 | 224 | (cache->kasan_info.free_meta_offset ? |
---|
312 | 225 | sizeof(struct kasan_free_meta) : 0); |
---|
313 | 226 | } |
---|
314 | 227 | |
---|
315 | | -struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache, |
---|
316 | | - const void *object) |
---|
| 228 | +struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache, |
---|
| 229 | + const void *object) |
---|
317 | 230 | { |
---|
318 | | - BUILD_BUG_ON(sizeof(struct kasan_alloc_meta) > 32); |
---|
319 | | - return (void *)object + cache->kasan_info.alloc_meta_offset; |
---|
| 231 | + if (!cache->kasan_info.alloc_meta_offset) |
---|
| 232 | + return NULL; |
---|
| 233 | + return kasan_reset_tag(object) + cache->kasan_info.alloc_meta_offset; |
---|
320 | 234 | } |
---|
321 | 235 | |
---|
322 | | -struct kasan_free_meta *get_free_info(struct kmem_cache *cache, |
---|
323 | | - const void *object) |
---|
| 236 | +#ifdef CONFIG_KASAN_GENERIC |
---|
| 237 | +struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache, |
---|
| 238 | + const void *object) |
---|
324 | 239 | { |
---|
325 | 240 | BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32); |
---|
326 | | - return (void *)object + cache->kasan_info.free_meta_offset; |
---|
| 241 | + if (cache->kasan_info.free_meta_offset == KASAN_NO_FREE_META) |
---|
| 242 | + return NULL; |
---|
| 243 | + return kasan_reset_tag(object) + cache->kasan_info.free_meta_offset; |
---|
327 | 244 | } |
---|
| 245 | +#endif |
---|
328 | 246 | |
---|
329 | | -void kasan_poison_slab(struct page *page) |
---|
| 247 | +void __kasan_poison_slab(struct page *page) |
---|
330 | 248 | { |
---|
331 | 249 | unsigned long i; |
---|
332 | 250 | |
---|
333 | | - for (i = 0; i < (1 << compound_order(page)); i++) |
---|
| 251 | + for (i = 0; i < compound_nr(page); i++) |
---|
334 | 252 | page_kasan_tag_reset(page + i); |
---|
335 | | - kasan_poison_shadow(page_address(page), |
---|
336 | | - PAGE_SIZE << compound_order(page), |
---|
337 | | - KASAN_KMALLOC_REDZONE); |
---|
| 253 | + kasan_poison(page_address(page), page_size(page), |
---|
| 254 | + KASAN_KMALLOC_REDZONE, false); |
---|
338 | 255 | } |
---|
339 | 256 | |
---|
340 | | -void kasan_unpoison_object_data(struct kmem_cache *cache, void *object) |
---|
| 257 | +void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object) |
---|
341 | 258 | { |
---|
342 | | - kasan_unpoison_shadow(object, cache->object_size); |
---|
| 259 | + kasan_unpoison(object, cache->object_size, false); |
---|
343 | 260 | } |
---|
344 | 261 | |
---|
345 | | -void kasan_poison_object_data(struct kmem_cache *cache, void *object) |
---|
| 262 | +void __kasan_poison_object_data(struct kmem_cache *cache, void *object) |
---|
346 | 263 | { |
---|
347 | | - kasan_poison_shadow(object, |
---|
348 | | - round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE), |
---|
349 | | - KASAN_KMALLOC_REDZONE); |
---|
| 264 | + kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE), |
---|
| 265 | + KASAN_KMALLOC_REDZONE, false); |
---|
350 | 266 | } |
---|
351 | 267 | |
---|
352 | 268 | /* |
---|
.. | .. |
---|
363 | 279 | * based on objects indexes, so that objects that are next to each other |
---|
364 | 280 | * get different tags. |
---|
365 | 281 | */ |
---|
366 | | -static u8 assign_tag(struct kmem_cache *cache, const void *object, |
---|
367 | | - bool init, bool keep_tag) |
---|
| 282 | +static inline u8 assign_tag(struct kmem_cache *cache, |
---|
| 283 | + const void *object, bool init) |
---|
368 | 284 | { |
---|
369 | | - /* |
---|
370 | | - * 1. When an object is kmalloc()'ed, two hooks are called: |
---|
371 | | - * kasan_slab_alloc() and kasan_kmalloc(). We assign the |
---|
372 | | - * tag only in the first one. |
---|
373 | | - * 2. We reuse the same tag for krealloc'ed objects. |
---|
374 | | - */ |
---|
375 | | - if (keep_tag) |
---|
376 | | - return get_tag(object); |
---|
| 285 | + if (IS_ENABLED(CONFIG_KASAN_GENERIC)) |
---|
| 286 | + return 0xff; |
---|
377 | 287 | |
---|
378 | 288 | /* |
---|
379 | 289 | * If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU |
---|
380 | 290 | * set, assign a tag when the object is being allocated (init == false). |
---|
381 | 291 | */ |
---|
382 | 292 | if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU)) |
---|
383 | | - return init ? KASAN_TAG_KERNEL : random_tag(); |
---|
| 293 | + return init ? KASAN_TAG_KERNEL : kasan_random_tag(); |
---|
384 | 294 | |
---|
385 | 295 | /* For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU: */ |
---|
386 | 296 | #ifdef CONFIG_SLAB |
---|
.. | .. |
---|
391 | 301 | * For SLUB assign a random tag during slab creation, otherwise reuse |
---|
392 | 302 | * the already assigned tag. |
---|
393 | 303 | */ |
---|
394 | | - return init ? random_tag() : get_tag(object); |
---|
| 304 | + return init ? kasan_random_tag() : get_tag(object); |
---|
395 | 305 | #endif |
---|
396 | 306 | } |
---|
397 | 307 | |
---|
398 | | -void * __must_check kasan_init_slab_obj(struct kmem_cache *cache, |
---|
| 308 | +void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache, |
---|
399 | 309 | const void *object) |
---|
400 | 310 | { |
---|
401 | | - struct kasan_alloc_meta *alloc_info; |
---|
| 311 | + struct kasan_alloc_meta *alloc_meta; |
---|
402 | 312 | |
---|
403 | | - if (!(cache->flags & SLAB_KASAN)) |
---|
404 | | - return (void *)object; |
---|
| 313 | + if (kasan_stack_collection_enabled()) { |
---|
| 314 | + alloc_meta = kasan_get_alloc_meta(cache, object); |
---|
| 315 | + if (alloc_meta) |
---|
| 316 | + __memset(alloc_meta, 0, sizeof(*alloc_meta)); |
---|
| 317 | + } |
---|
405 | 318 | |
---|
406 | | - alloc_info = get_alloc_info(cache, object); |
---|
407 | | - __memset(alloc_info, 0, sizeof(*alloc_info)); |
---|
408 | | - |
---|
409 | | - if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) |
---|
410 | | - object = set_tag(object, |
---|
411 | | - assign_tag(cache, object, true, false)); |
---|
| 319 | + /* Tag is ignored in set_tag() without CONFIG_KASAN_SW/HW_TAGS */ |
---|
| 320 | + object = set_tag(object, assign_tag(cache, object, true)); |
---|
412 | 321 | |
---|
413 | 322 | return (void *)object; |
---|
414 | 323 | } |
---|
415 | 324 | |
---|
416 | | -static inline bool shadow_invalid(u8 tag, s8 shadow_byte) |
---|
| 325 | +static inline bool ____kasan_slab_free(struct kmem_cache *cache, void *object, |
---|
| 326 | + unsigned long ip, bool quarantine, bool init) |
---|
417 | 327 | { |
---|
418 | | - if (IS_ENABLED(CONFIG_KASAN_GENERIC)) |
---|
419 | | - return shadow_byte < 0 || |
---|
420 | | - shadow_byte >= KASAN_SHADOW_SCALE_SIZE; |
---|
421 | | - |
---|
422 | | - /* else CONFIG_KASAN_SW_TAGS: */ |
---|
423 | | - if ((u8)shadow_byte == KASAN_TAG_INVALID) |
---|
424 | | - return true; |
---|
425 | | - if ((tag != KASAN_TAG_KERNEL) && (tag != (u8)shadow_byte)) |
---|
426 | | - return true; |
---|
427 | | - |
---|
428 | | - return false; |
---|
429 | | -} |
---|
430 | | - |
---|
431 | | -static bool __kasan_slab_free(struct kmem_cache *cache, void *object, |
---|
432 | | - unsigned long ip, bool quarantine) |
---|
433 | | -{ |
---|
434 | | - s8 shadow_byte; |
---|
435 | 328 | u8 tag; |
---|
436 | 329 | void *tagged_object; |
---|
437 | | - unsigned long rounded_up_size; |
---|
438 | 330 | |
---|
439 | 331 | tag = get_tag(object); |
---|
440 | 332 | tagged_object = object; |
---|
441 | | - object = reset_tag(object); |
---|
| 333 | + object = kasan_reset_tag(object); |
---|
| 334 | + |
---|
| 335 | + if (is_kfence_address(object)) |
---|
| 336 | + return false; |
---|
442 | 337 | |
---|
443 | 338 | if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) != |
---|
444 | 339 | object)) { |
---|
.. | .. |
---|
450 | 345 | if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU)) |
---|
451 | 346 | return false; |
---|
452 | 347 | |
---|
453 | | - shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object)); |
---|
454 | | - if (shadow_invalid(tag, shadow_byte)) { |
---|
| 348 | + if (!kasan_byte_accessible(tagged_object)) { |
---|
455 | 349 | kasan_report_invalid_free(tagged_object, ip); |
---|
456 | 350 | return true; |
---|
457 | 351 | } |
---|
458 | 352 | |
---|
459 | | - rounded_up_size = round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE); |
---|
460 | | - kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE); |
---|
| 353 | + kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE), |
---|
| 354 | + KASAN_KMALLOC_FREE, init); |
---|
461 | 355 | |
---|
462 | | - if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine) || |
---|
463 | | - unlikely(!(cache->flags & SLAB_KASAN))) |
---|
| 356 | + if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine)) |
---|
464 | 357 | return false; |
---|
465 | 358 | |
---|
466 | | - set_track(&get_alloc_info(cache, object)->free_track, GFP_NOWAIT); |
---|
467 | | - quarantine_put(get_free_info(cache, object), cache); |
---|
| 359 | + if (kasan_stack_collection_enabled()) |
---|
| 360 | + kasan_set_free_info(cache, object, tag); |
---|
468 | 361 | |
---|
469 | | - return IS_ENABLED(CONFIG_KASAN_GENERIC); |
---|
| 362 | + return kasan_quarantine_put(cache, object); |
---|
470 | 363 | } |
---|
471 | 364 | |
---|
472 | | -bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip) |
---|
| 365 | +bool __kasan_slab_free(struct kmem_cache *cache, void *object, |
---|
| 366 | + unsigned long ip, bool init) |
---|
473 | 367 | { |
---|
474 | | - return __kasan_slab_free(cache, object, ip, true); |
---|
| 368 | + return ____kasan_slab_free(cache, object, ip, true, init); |
---|
475 | 369 | } |
---|
476 | 370 | |
---|
477 | | -static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object, |
---|
478 | | - size_t size, gfp_t flags, bool keep_tag) |
---|
| 371 | +static inline bool ____kasan_kfree_large(void *ptr, unsigned long ip) |
---|
479 | 372 | { |
---|
480 | | - unsigned long redzone_start; |
---|
481 | | - unsigned long redzone_end; |
---|
482 | | - u8 tag = 0xff; |
---|
| 373 | + if (ptr != page_address(virt_to_head_page(ptr))) { |
---|
| 374 | + kasan_report_invalid_free(ptr, ip); |
---|
| 375 | + return true; |
---|
| 376 | + } |
---|
| 377 | + |
---|
| 378 | + if (!kasan_byte_accessible(ptr)) { |
---|
| 379 | + kasan_report_invalid_free(ptr, ip); |
---|
| 380 | + return true; |
---|
| 381 | + } |
---|
| 382 | + |
---|
| 383 | + /* |
---|
| 384 | + * The object will be poisoned by kasan_free_pages() or |
---|
| 385 | + * kasan_slab_free_mempool(). |
---|
| 386 | + */ |
---|
| 387 | + |
---|
| 388 | + return false; |
---|
| 389 | +} |
---|
| 390 | + |
---|
| 391 | +void __kasan_kfree_large(void *ptr, unsigned long ip) |
---|
| 392 | +{ |
---|
| 393 | + ____kasan_kfree_large(ptr, ip); |
---|
| 394 | +} |
---|
| 395 | + |
---|
| 396 | +void __kasan_slab_free_mempool(void *ptr, unsigned long ip) |
---|
| 397 | +{ |
---|
| 398 | + struct page *page; |
---|
| 399 | + |
---|
| 400 | + page = virt_to_head_page(ptr); |
---|
| 401 | + |
---|
| 402 | + /* |
---|
| 403 | + * Even though this function is only called for kmem_cache_alloc and |
---|
| 404 | + * kmalloc backed mempool allocations, those allocations can still be |
---|
| 405 | + * !PageSlab() when the size provided to kmalloc is larger than |
---|
| 406 | + * KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc. |
---|
| 407 | + */ |
---|
| 408 | + if (unlikely(!PageSlab(page))) { |
---|
| 409 | + if (____kasan_kfree_large(ptr, ip)) |
---|
| 410 | + return; |
---|
| 411 | + kasan_poison(ptr, page_size(page), KASAN_FREE_PAGE, false); |
---|
| 412 | + } else { |
---|
| 413 | + ____kasan_slab_free(page->slab_cache, ptr, ip, false, false); |
---|
| 414 | + } |
---|
| 415 | +} |
---|
| 416 | + |
---|
| 417 | +static void set_alloc_info(struct kmem_cache *cache, void *object, |
---|
| 418 | + gfp_t flags, bool is_kmalloc) |
---|
| 419 | +{ |
---|
| 420 | + struct kasan_alloc_meta *alloc_meta; |
---|
| 421 | + |
---|
| 422 | + /* Don't save alloc info for kmalloc caches in kasan_slab_alloc(). */ |
---|
| 423 | + if (cache->kasan_info.is_kmalloc && !is_kmalloc) |
---|
| 424 | + return; |
---|
| 425 | + |
---|
| 426 | + alloc_meta = kasan_get_alloc_meta(cache, object); |
---|
| 427 | + if (alloc_meta) |
---|
| 428 | + kasan_set_track(&alloc_meta->alloc_track, flags); |
---|
| 429 | +} |
---|
| 430 | + |
---|
| 431 | +void * __must_check __kasan_slab_alloc(struct kmem_cache *cache, |
---|
| 432 | + void *object, gfp_t flags, bool init) |
---|
| 433 | +{ |
---|
| 434 | + u8 tag; |
---|
| 435 | + void *tagged_object; |
---|
483 | 436 | |
---|
484 | 437 | if (gfpflags_allow_blocking(flags)) |
---|
485 | | - quarantine_reduce(); |
---|
| 438 | + kasan_quarantine_reduce(); |
---|
486 | 439 | |
---|
487 | 440 | if (unlikely(object == NULL)) |
---|
488 | 441 | return NULL; |
---|
489 | 442 | |
---|
490 | | - redzone_start = round_up((unsigned long)(object + size), |
---|
491 | | - KASAN_SHADOW_SCALE_SIZE); |
---|
492 | | - redzone_end = round_up((unsigned long)object + cache->object_size, |
---|
493 | | - KASAN_SHADOW_SCALE_SIZE); |
---|
| 443 | + if (is_kfence_address(object)) |
---|
| 444 | + return (void *)object; |
---|
494 | 445 | |
---|
495 | | - if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) |
---|
496 | | - tag = assign_tag(cache, object, false, keep_tag); |
---|
| 446 | + /* |
---|
| 447 | + * Generate and assign random tag for tag-based modes. |
---|
| 448 | + * Tag is ignored in set_tag() for the generic mode. |
---|
| 449 | + */ |
---|
| 450 | + tag = assign_tag(cache, object, false); |
---|
| 451 | + tagged_object = set_tag(object, tag); |
---|
497 | 452 | |
---|
498 | | - /* Tag is ignored in set_tag without CONFIG_KASAN_SW_TAGS */ |
---|
499 | | - kasan_unpoison_shadow(set_tag(object, tag), size); |
---|
500 | | - kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start, |
---|
501 | | - KASAN_KMALLOC_REDZONE); |
---|
| 453 | + /* |
---|
| 454 | + * Unpoison the whole object. |
---|
| 455 | + * For kmalloc() allocations, kasan_kmalloc() will do precise poisoning. |
---|
| 456 | + */ |
---|
| 457 | + kasan_unpoison(tagged_object, cache->object_size, init); |
---|
502 | 458 | |
---|
503 | | - if (cache->flags & SLAB_KASAN) |
---|
504 | | - set_track(&get_alloc_info(cache, object)->alloc_track, flags); |
---|
| 459 | + /* Save alloc info (if possible) for non-kmalloc() allocations. */ |
---|
| 460 | + if (kasan_stack_collection_enabled()) |
---|
| 461 | + set_alloc_info(cache, (void *)object, flags, false); |
---|
505 | 462 | |
---|
506 | | - return set_tag(object, tag); |
---|
| 463 | + return tagged_object; |
---|
507 | 464 | } |
---|
508 | 465 | |
---|
509 | | -void * __must_check kasan_slab_alloc(struct kmem_cache *cache, void *object, |
---|
510 | | - gfp_t flags) |
---|
| 466 | +static inline void *____kasan_kmalloc(struct kmem_cache *cache, |
---|
| 467 | + const void *object, size_t size, gfp_t flags) |
---|
511 | 468 | { |
---|
512 | | - return __kasan_kmalloc(cache, object, cache->object_size, flags, false); |
---|
513 | | -} |
---|
514 | | - |
---|
515 | | -void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object, |
---|
516 | | - size_t size, gfp_t flags) |
---|
517 | | -{ |
---|
518 | | - return __kasan_kmalloc(cache, object, size, flags, true); |
---|
519 | | -} |
---|
520 | | -EXPORT_SYMBOL(kasan_kmalloc); |
---|
521 | | - |
---|
522 | | -void * __must_check kasan_kmalloc_large(const void *ptr, size_t size, |
---|
523 | | - gfp_t flags) |
---|
524 | | -{ |
---|
525 | | - struct page *page; |
---|
526 | 469 | unsigned long redzone_start; |
---|
527 | 470 | unsigned long redzone_end; |
---|
528 | 471 | |
---|
529 | 472 | if (gfpflags_allow_blocking(flags)) |
---|
530 | | - quarantine_reduce(); |
---|
| 473 | + kasan_quarantine_reduce(); |
---|
| 474 | + |
---|
| 475 | + if (unlikely(object == NULL)) |
---|
| 476 | + return NULL; |
---|
| 477 | + |
---|
| 478 | + if (is_kfence_address(kasan_reset_tag(object))) |
---|
| 479 | + return (void *)object; |
---|
| 480 | + |
---|
| 481 | + /* |
---|
| 482 | + * The object has already been unpoisoned by kasan_slab_alloc() for |
---|
| 483 | + * kmalloc() or by kasan_krealloc() for krealloc(). |
---|
| 484 | + */ |
---|
| 485 | + |
---|
| 486 | + /* |
---|
| 487 | + * The redzone has byte-level precision for the generic mode. |
---|
| 488 | + * Partially poison the last object granule to cover the unaligned |
---|
| 489 | + * part of the redzone. |
---|
| 490 | + */ |
---|
| 491 | + if (IS_ENABLED(CONFIG_KASAN_GENERIC)) |
---|
| 492 | + kasan_poison_last_granule((void *)object, size); |
---|
| 493 | + |
---|
| 494 | + /* Poison the aligned part of the redzone. */ |
---|
| 495 | + redzone_start = round_up((unsigned long)(object + size), |
---|
| 496 | + KASAN_GRANULE_SIZE); |
---|
| 497 | + redzone_end = round_up((unsigned long)(object + cache->object_size), |
---|
| 498 | + KASAN_GRANULE_SIZE); |
---|
| 499 | + kasan_poison((void *)redzone_start, redzone_end - redzone_start, |
---|
| 500 | + KASAN_KMALLOC_REDZONE, false); |
---|
| 501 | + |
---|
| 502 | + /* |
---|
| 503 | + * Save alloc info (if possible) for kmalloc() allocations. |
---|
| 504 | + * This also rewrites the alloc info when called from kasan_krealloc(). |
---|
| 505 | + */ |
---|
| 506 | + if (kasan_stack_collection_enabled()) |
---|
| 507 | + set_alloc_info(cache, (void *)object, flags, true); |
---|
| 508 | + |
---|
| 509 | + /* Keep the tag that was set by kasan_slab_alloc(). */ |
---|
| 510 | + return (void *)object; |
---|
| 511 | +} |
---|
| 512 | + |
---|
| 513 | +void * __must_check __kasan_kmalloc(struct kmem_cache *cache, const void *object, |
---|
| 514 | + size_t size, gfp_t flags) |
---|
| 515 | +{ |
---|
| 516 | + return ____kasan_kmalloc(cache, object, size, flags); |
---|
| 517 | +} |
---|
| 518 | +EXPORT_SYMBOL(__kasan_kmalloc); |
---|
| 519 | + |
---|
| 520 | +void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size, |
---|
| 521 | + gfp_t flags) |
---|
| 522 | +{ |
---|
| 523 | + unsigned long redzone_start; |
---|
| 524 | + unsigned long redzone_end; |
---|
| 525 | + |
---|
| 526 | + if (gfpflags_allow_blocking(flags)) |
---|
| 527 | + kasan_quarantine_reduce(); |
---|
531 | 528 | |
---|
532 | 529 | if (unlikely(ptr == NULL)) |
---|
533 | 530 | return NULL; |
---|
534 | 531 | |
---|
535 | | - page = virt_to_page(ptr); |
---|
536 | | - redzone_start = round_up((unsigned long)(ptr + size), |
---|
537 | | - KASAN_SHADOW_SCALE_SIZE); |
---|
538 | | - redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page)); |
---|
| 532 | + /* |
---|
| 533 | + * The object has already been unpoisoned by kasan_alloc_pages() for |
---|
| 534 | + * alloc_pages() or by kasan_krealloc() for krealloc(). |
---|
| 535 | + */ |
---|
539 | 536 | |
---|
540 | | - kasan_unpoison_shadow(ptr, size); |
---|
541 | | - kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start, |
---|
542 | | - KASAN_PAGE_REDZONE); |
---|
| 537 | + /* |
---|
| 538 | + * The redzone has byte-level precision for the generic mode. |
---|
| 539 | + * Partially poison the last object granule to cover the unaligned |
---|
| 540 | + * part of the redzone. |
---|
| 541 | + */ |
---|
| 542 | + if (IS_ENABLED(CONFIG_KASAN_GENERIC)) |
---|
| 543 | + kasan_poison_last_granule(ptr, size); |
---|
| 544 | + |
---|
| 545 | + /* Poison the aligned part of the redzone. */ |
---|
| 546 | + redzone_start = round_up((unsigned long)(ptr + size), |
---|
| 547 | + KASAN_GRANULE_SIZE); |
---|
| 548 | + redzone_end = (unsigned long)ptr + page_size(virt_to_page(ptr)); |
---|
| 549 | + kasan_poison((void *)redzone_start, redzone_end - redzone_start, |
---|
| 550 | + KASAN_PAGE_REDZONE, false); |
---|
543 | 551 | |
---|
544 | 552 | return (void *)ptr; |
---|
545 | 553 | } |
---|
546 | 554 | |
---|
547 | | -void * __must_check kasan_krealloc(const void *object, size_t size, gfp_t flags) |
---|
| 555 | +void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flags) |
---|
548 | 556 | { |
---|
549 | 557 | struct page *page; |
---|
550 | 558 | |
---|
551 | 559 | if (unlikely(object == ZERO_SIZE_PTR)) |
---|
552 | 560 | return (void *)object; |
---|
553 | 561 | |
---|
| 562 | + /* |
---|
| 563 | + * Unpoison the object's data. |
---|
| 564 | + * Part of it might already have been unpoisoned, but it's unknown |
---|
| 565 | + * how big that part is. |
---|
| 566 | + */ |
---|
| 567 | + kasan_unpoison(object, size, false); |
---|
| 568 | + |
---|
554 | 569 | page = virt_to_head_page(object); |
---|
555 | 570 | |
---|
| 571 | + /* Piggy-back on kmalloc() instrumentation to poison the redzone. */ |
---|
556 | 572 | if (unlikely(!PageSlab(page))) |
---|
557 | | - return kasan_kmalloc_large(object, size, flags); |
---|
| 573 | + return __kasan_kmalloc_large(object, size, flags); |
---|
558 | 574 | else |
---|
559 | | - return __kasan_kmalloc(page->slab_cache, object, size, |
---|
560 | | - flags, true); |
---|
| 575 | + return ____kasan_kmalloc(page->slab_cache, object, size, flags); |
---|
561 | 576 | } |
---|
562 | 577 | |
---|
563 | | -void kasan_poison_kfree(void *ptr, unsigned long ip) |
---|
| 578 | +bool __kasan_check_byte(const void *address, unsigned long ip) |
---|
564 | 579 | { |
---|
565 | | - struct page *page; |
---|
566 | | - |
---|
567 | | - page = virt_to_head_page(ptr); |
---|
568 | | - |
---|
569 | | - if (unlikely(!PageSlab(page))) { |
---|
570 | | - if (ptr != page_address(page)) { |
---|
571 | | - kasan_report_invalid_free(ptr, ip); |
---|
572 | | - return; |
---|
573 | | - } |
---|
574 | | - kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page), |
---|
575 | | - KASAN_FREE_PAGE); |
---|
576 | | - } else { |
---|
577 | | - __kasan_slab_free(page->slab_cache, ptr, ip, false); |
---|
578 | | - } |
---|
579 | | -} |
---|
580 | | - |
---|
581 | | -void kasan_kfree_large(void *ptr, unsigned long ip) |
---|
582 | | -{ |
---|
583 | | - if (ptr != page_address(virt_to_head_page(ptr))) |
---|
584 | | - kasan_report_invalid_free(ptr, ip); |
---|
585 | | - /* The object will be poisoned by page_alloc. */ |
---|
586 | | -} |
---|
587 | | - |
---|
588 | | -int kasan_module_alloc(void *addr, size_t size) |
---|
589 | | -{ |
---|
590 | | - void *ret; |
---|
591 | | - size_t scaled_size; |
---|
592 | | - size_t shadow_size; |
---|
593 | | - unsigned long shadow_start; |
---|
594 | | - |
---|
595 | | - shadow_start = (unsigned long)kasan_mem_to_shadow(addr); |
---|
596 | | - scaled_size = (size + KASAN_SHADOW_MASK) >> KASAN_SHADOW_SCALE_SHIFT; |
---|
597 | | - shadow_size = round_up(scaled_size, PAGE_SIZE); |
---|
598 | | - |
---|
599 | | - if (WARN_ON(!PAGE_ALIGNED(shadow_start))) |
---|
600 | | - return -EINVAL; |
---|
601 | | - |
---|
602 | | - ret = __vmalloc_node_range(shadow_size, 1, shadow_start, |
---|
603 | | - shadow_start + shadow_size, |
---|
604 | | - GFP_KERNEL, |
---|
605 | | - PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE, |
---|
606 | | - __builtin_return_address(0)); |
---|
607 | | - |
---|
608 | | - if (ret) { |
---|
609 | | - __memset(ret, KASAN_SHADOW_INIT, shadow_size); |
---|
610 | | - find_vm_area(addr)->flags |= VM_KASAN; |
---|
611 | | - kmemleak_ignore(ret); |
---|
612 | | - return 0; |
---|
613 | | - } |
---|
614 | | - |
---|
615 | | - return -ENOMEM; |
---|
616 | | -} |
---|
617 | | - |
---|
618 | | -void kasan_free_shadow(const struct vm_struct *vm) |
---|
619 | | -{ |
---|
620 | | - if (vm->flags & VM_KASAN) |
---|
621 | | - vfree(kasan_mem_to_shadow(vm->addr)); |
---|
622 | | -} |
---|
623 | | - |
---|
624 | | -extern void __kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip); |
---|
625 | | - |
---|
626 | | -void kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip) |
---|
627 | | -{ |
---|
628 | | - unsigned long flags = user_access_save(); |
---|
629 | | - __kasan_report(addr, size, is_write, ip); |
---|
630 | | - user_access_restore(flags); |
---|
631 | | -} |
---|
632 | | - |
---|
633 | | -#ifdef CONFIG_MEMORY_HOTPLUG |
---|
634 | | -static bool shadow_mapped(unsigned long addr) |
---|
635 | | -{ |
---|
636 | | - pgd_t *pgd = pgd_offset_k(addr); |
---|
637 | | - p4d_t *p4d; |
---|
638 | | - pud_t *pud; |
---|
639 | | - pmd_t *pmd; |
---|
640 | | - pte_t *pte; |
---|
641 | | - |
---|
642 | | - if (pgd_none(*pgd)) |
---|
| 580 | + if (!kasan_byte_accessible(address)) { |
---|
| 581 | + kasan_report((unsigned long)address, 1, false, ip); |
---|
643 | 582 | return false; |
---|
644 | | - p4d = p4d_offset(pgd, addr); |
---|
645 | | - if (p4d_none(*p4d)) |
---|
646 | | - return false; |
---|
647 | | - pud = pud_offset(p4d, addr); |
---|
648 | | - if (pud_none(*pud)) |
---|
649 | | - return false; |
---|
650 | | - |
---|
651 | | - /* |
---|
652 | | - * We can't use pud_large() or pud_huge(), the first one is |
---|
653 | | - * arch-specific, the last one depends on HUGETLB_PAGE. So let's abuse |
---|
654 | | - * pud_bad(), if pud is bad then it's bad because it's huge. |
---|
655 | | - */ |
---|
656 | | - if (pud_bad(*pud)) |
---|
657 | | - return true; |
---|
658 | | - pmd = pmd_offset(pud, addr); |
---|
659 | | - if (pmd_none(*pmd)) |
---|
660 | | - return false; |
---|
661 | | - |
---|
662 | | - if (pmd_bad(*pmd)) |
---|
663 | | - return true; |
---|
664 | | - pte = pte_offset_kernel(pmd, addr); |
---|
665 | | - return !pte_none(*pte); |
---|
666 | | -} |
---|
667 | | - |
---|
668 | | -static int __meminit kasan_mem_notifier(struct notifier_block *nb, |
---|
669 | | - unsigned long action, void *data) |
---|
670 | | -{ |
---|
671 | | - struct memory_notify *mem_data = data; |
---|
672 | | - unsigned long nr_shadow_pages, start_kaddr, shadow_start; |
---|
673 | | - unsigned long shadow_end, shadow_size; |
---|
674 | | - |
---|
675 | | - nr_shadow_pages = mem_data->nr_pages >> KASAN_SHADOW_SCALE_SHIFT; |
---|
676 | | - start_kaddr = (unsigned long)pfn_to_kaddr(mem_data->start_pfn); |
---|
677 | | - shadow_start = (unsigned long)kasan_mem_to_shadow((void *)start_kaddr); |
---|
678 | | - shadow_size = nr_shadow_pages << PAGE_SHIFT; |
---|
679 | | - shadow_end = shadow_start + shadow_size; |
---|
680 | | - |
---|
681 | | - if (WARN_ON(mem_data->nr_pages % KASAN_SHADOW_SCALE_SIZE) || |
---|
682 | | - WARN_ON(start_kaddr % (KASAN_SHADOW_SCALE_SIZE << PAGE_SHIFT))) |
---|
683 | | - return NOTIFY_BAD; |
---|
684 | | - |
---|
685 | | - switch (action) { |
---|
686 | | - case MEM_GOING_ONLINE: { |
---|
687 | | - void *ret; |
---|
688 | | - |
---|
689 | | - /* |
---|
690 | | - * If shadow is mapped already than it must have been mapped |
---|
691 | | - * during the boot. This could happen if we onlining previously |
---|
692 | | - * offlined memory. |
---|
693 | | - */ |
---|
694 | | - if (shadow_mapped(shadow_start)) |
---|
695 | | - return NOTIFY_OK; |
---|
696 | | - |
---|
697 | | - ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start, |
---|
698 | | - shadow_end, GFP_KERNEL, |
---|
699 | | - PAGE_KERNEL, VM_NO_GUARD, |
---|
700 | | - pfn_to_nid(mem_data->start_pfn), |
---|
701 | | - __builtin_return_address(0)); |
---|
702 | | - if (!ret) |
---|
703 | | - return NOTIFY_BAD; |
---|
704 | | - |
---|
705 | | - kmemleak_ignore(ret); |
---|
706 | | - return NOTIFY_OK; |
---|
707 | 583 | } |
---|
708 | | - case MEM_CANCEL_ONLINE: |
---|
709 | | - case MEM_OFFLINE: { |
---|
710 | | - struct vm_struct *vm; |
---|
711 | | - |
---|
712 | | - /* |
---|
713 | | - * shadow_start was either mapped during boot by kasan_init() |
---|
714 | | - * or during memory online by __vmalloc_node_range(). |
---|
715 | | - * In the latter case we can use vfree() to free shadow. |
---|
716 | | - * Non-NULL result of the find_vm_area() will tell us if |
---|
717 | | - * that was the second case. |
---|
718 | | - * |
---|
719 | | - * Currently it's not possible to free shadow mapped |
---|
720 | | - * during boot by kasan_init(). It's because the code |
---|
721 | | - * to do that hasn't been written yet. So we'll just |
---|
722 | | - * leak the memory. |
---|
723 | | - */ |
---|
724 | | - vm = find_vm_area((void *)shadow_start); |
---|
725 | | - if (vm) |
---|
726 | | - vfree((void *)shadow_start); |
---|
727 | | - } |
---|
728 | | - } |
---|
729 | | - |
---|
730 | | - return NOTIFY_OK; |
---|
| 584 | + return true; |
---|
731 | 585 | } |
---|
732 | | - |
---|
733 | | -static int __init kasan_memhotplug_init(void) |
---|
734 | | -{ |
---|
735 | | - hotplug_memory_notifier(kasan_mem_notifier, 0); |
---|
736 | | - |
---|
737 | | - return 0; |
---|
738 | | -} |
---|
739 | | - |
---|
740 | | -core_initcall(kasan_memhotplug_init); |
---|
741 | | -#endif |
---|