hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/mm/kasan/generic.c
....@@ -7,21 +7,14 @@
77 *
88 * Some code borrowed from https://github.com/xairy/kasan-prototype by
99 * Andrey Konovalov <andreyknvl@gmail.com>
10
- *
11
- * This program is free software; you can redistribute it and/or modify
12
- * it under the terms of the GNU General Public License version 2 as
13
- * published by the Free Software Foundation.
14
- *
1510 */
16
-
17
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
-#define DISABLE_BRANCH_PROFILING
1911
2012 #include <linux/export.h>
2113 #include <linux/interrupt.h>
2214 #include <linux/init.h>
2315 #include <linux/kasan.h>
2416 #include <linux/kernel.h>
17
+#include <linux/kfence.h>
2518 #include <linux/kmemleak.h>
2619 #include <linux/linkage.h>
2720 #include <linux/memblock.h>
....@@ -52,7 +45,7 @@
5245 s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr);
5346
5447 if (unlikely(shadow_value)) {
55
- s8 last_accessible_byte = addr & KASAN_SHADOW_MASK;
48
+ s8 last_accessible_byte = addr & KASAN_GRANULE_MASK;
5649 return unlikely(last_accessible_byte >= shadow_value);
5750 }
5851
....@@ -68,7 +61,7 @@
6861 * Access crosses 8(shadow size)-byte boundary. Such access maps
6962 * into 2 shadow bytes, so we need to check them both.
7063 */
71
- if (unlikely(((addr + size - 1) & KASAN_SHADOW_MASK) < size - 1))
64
+ if (unlikely(((addr + size - 1) & KASAN_GRANULE_MASK) < size - 1))
7265 return *shadow_addr || memory_is_poisoned_1(addr + size - 1);
7366
7467 return memory_is_poisoned_1(addr + size - 1);
....@@ -79,7 +72,7 @@
7972 u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
8073
8174 /* Unaligned 16-bytes access maps into 3 shadow bytes. */
82
- if (unlikely(!IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
75
+ if (unlikely(!IS_ALIGNED(addr, KASAN_GRANULE_SIZE)))
8376 return *shadow_addr || memory_is_poisoned_1(addr + 15);
8477
8578 return *shadow_addr;
....@@ -140,7 +133,7 @@
140133 s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte);
141134
142135 if (unlikely(ret != (unsigned long)last_shadow ||
143
- ((long)(last_byte & KASAN_SHADOW_MASK) >= *last_shadow)))
136
+ ((long)(last_byte & KASAN_GRANULE_MASK) >= *last_shadow)))
144137 return true;
145138 }
146139 return false;
....@@ -166,51 +159,60 @@
166159 return memory_is_poisoned_n(addr, size);
167160 }
168161
169
-static __always_inline void check_memory_region_inline(unsigned long addr,
162
+static __always_inline bool check_region_inline(unsigned long addr,
170163 size_t size, bool write,
171164 unsigned long ret_ip)
172165 {
173166 if (unlikely(size == 0))
174
- return;
167
+ return true;
168
+
169
+ if (unlikely(addr + size < addr))
170
+ return !kasan_report(addr, size, write, ret_ip);
175171
176172 if (unlikely((void *)addr <
177173 kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) {
178
- kasan_report(addr, size, write, ret_ip);
179
- return;
174
+ return !kasan_report(addr, size, write, ret_ip);
180175 }
181176
182177 if (likely(!memory_is_poisoned(addr, size)))
183
- return;
178
+ return true;
184179
185
- kasan_report(addr, size, write, ret_ip);
180
+ return !kasan_report(addr, size, write, ret_ip);
186181 }
187182
188
-void check_memory_region(unsigned long addr, size_t size, bool write,
189
- unsigned long ret_ip)
183
+bool kasan_check_range(unsigned long addr, size_t size, bool write,
184
+ unsigned long ret_ip)
190185 {
191
- check_memory_region_inline(addr, size, write, ret_ip);
186
+ return check_region_inline(addr, size, write, ret_ip);
187
+}
188
+
189
+bool kasan_byte_accessible(const void *addr)
190
+{
191
+ s8 shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(addr));
192
+
193
+ return shadow_byte >= 0 && shadow_byte < KASAN_GRANULE_SIZE;
192194 }
193195
194196 void kasan_cache_shrink(struct kmem_cache *cache)
195197 {
196
- quarantine_remove_cache(cache);
198
+ kasan_quarantine_remove_cache(cache);
197199 }
198200
199201 void kasan_cache_shutdown(struct kmem_cache *cache)
200202 {
201203 if (!__kmem_cache_empty(cache))
202
- quarantine_remove_cache(cache);
204
+ kasan_quarantine_remove_cache(cache);
203205 }
204206
205207 static void register_global(struct kasan_global *global)
206208 {
207
- size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE);
209
+ size_t aligned_size = round_up(global->size, KASAN_GRANULE_SIZE);
208210
209
- kasan_unpoison_shadow(global->beg, global->size);
211
+ kasan_unpoison(global->beg, global->size, false);
210212
211
- kasan_poison_shadow(global->beg + aligned_size,
212
- global->size_with_redzone - aligned_size,
213
- KASAN_GLOBAL_REDZONE);
213
+ kasan_poison(global->beg + aligned_size,
214
+ global->size_with_redzone - aligned_size,
215
+ KASAN_GLOBAL_REDZONE, false);
214216 }
215217
216218 void __asan_register_globals(struct kasan_global *globals, size_t size)
....@@ -230,7 +232,7 @@
230232 #define DEFINE_ASAN_LOAD_STORE(size) \
231233 void __asan_load##size(unsigned long addr) \
232234 { \
233
- check_memory_region_inline(addr, size, false, _RET_IP_);\
235
+ check_region_inline(addr, size, false, _RET_IP_); \
234236 } \
235237 EXPORT_SYMBOL(__asan_load##size); \
236238 __alias(__asan_load##size) \
....@@ -238,7 +240,7 @@
238240 EXPORT_SYMBOL(__asan_load##size##_noabort); \
239241 void __asan_store##size(unsigned long addr) \
240242 { \
241
- check_memory_region_inline(addr, size, true, _RET_IP_); \
243
+ check_region_inline(addr, size, true, _RET_IP_); \
242244 } \
243245 EXPORT_SYMBOL(__asan_store##size); \
244246 __alias(__asan_store##size) \
....@@ -253,7 +255,7 @@
253255
254256 void __asan_loadN(unsigned long addr, size_t size)
255257 {
256
- check_memory_region(addr, size, false, _RET_IP_);
258
+ kasan_check_range(addr, size, false, _RET_IP_);
257259 }
258260 EXPORT_SYMBOL(__asan_loadN);
259261
....@@ -263,7 +265,7 @@
263265
264266 void __asan_storeN(unsigned long addr, size_t size)
265267 {
266
- check_memory_region(addr, size, true, _RET_IP_);
268
+ kasan_check_range(addr, size, true, _RET_IP_);
267269 }
268270 EXPORT_SYMBOL(__asan_storeN);
269271
....@@ -278,10 +280,10 @@
278280 /* Emitted by compiler to poison alloca()ed objects. */
279281 void __asan_alloca_poison(unsigned long addr, size_t size)
280282 {
281
- size_t rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
283
+ size_t rounded_up_size = round_up(size, KASAN_GRANULE_SIZE);
282284 size_t padding_size = round_up(size, KASAN_ALLOCA_REDZONE_SIZE) -
283285 rounded_up_size;
284
- size_t rounded_down_size = round_down(size, KASAN_SHADOW_SCALE_SIZE);
286
+ size_t rounded_down_size = round_down(size, KASAN_GRANULE_SIZE);
285287
286288 const void *left_redzone = (const void *)(addr -
287289 KASAN_ALLOCA_REDZONE_SIZE);
....@@ -289,13 +291,12 @@
289291
290292 WARN_ON(!IS_ALIGNED(addr, KASAN_ALLOCA_REDZONE_SIZE));
291293
292
- kasan_unpoison_shadow((const void *)(addr + rounded_down_size),
293
- size - rounded_down_size);
294
- kasan_poison_shadow(left_redzone, KASAN_ALLOCA_REDZONE_SIZE,
295
- KASAN_ALLOCA_LEFT);
296
- kasan_poison_shadow(right_redzone,
297
- padding_size + KASAN_ALLOCA_REDZONE_SIZE,
298
- KASAN_ALLOCA_RIGHT);
294
+ kasan_unpoison((const void *)(addr + rounded_down_size),
295
+ size - rounded_down_size, false);
296
+ kasan_poison(left_redzone, KASAN_ALLOCA_REDZONE_SIZE,
297
+ KASAN_ALLOCA_LEFT, false);
298
+ kasan_poison(right_redzone, padding_size + KASAN_ALLOCA_REDZONE_SIZE,
299
+ KASAN_ALLOCA_RIGHT, false);
299300 }
300301 EXPORT_SYMBOL(__asan_alloca_poison);
301302
....@@ -305,7 +306,7 @@
305306 if (unlikely(!stack_top || stack_top > stack_bottom))
306307 return;
307308
308
- kasan_unpoison_shadow(stack_top, stack_bottom - stack_top);
309
+ kasan_unpoison(stack_top, stack_bottom - stack_top, false);
309310 }
310311 EXPORT_SYMBOL(__asan_allocas_unpoison);
311312
....@@ -323,3 +324,46 @@
323324 DEFINE_ASAN_SET_SHADOW(f3);
324325 DEFINE_ASAN_SET_SHADOW(f5);
325326 DEFINE_ASAN_SET_SHADOW(f8);
327
+
328
+void kasan_record_aux_stack(void *addr)
329
+{
330
+ struct page *page = kasan_addr_to_page(addr);
331
+ struct kmem_cache *cache;
332
+ struct kasan_alloc_meta *alloc_meta;
333
+ void *object;
334
+
335
+ if (is_kfence_address(addr) || !(page && PageSlab(page)))
336
+ return;
337
+
338
+ cache = page->slab_cache;
339
+ object = nearest_obj(cache, page, addr);
340
+ alloc_meta = kasan_get_alloc_meta(cache, object);
341
+ if (!alloc_meta)
342
+ return;
343
+
344
+ alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0];
345
+ alloc_meta->aux_stack[0] = kasan_save_stack(GFP_NOWAIT);
346
+}
347
+
348
+void kasan_set_free_info(struct kmem_cache *cache,
349
+ void *object, u8 tag)
350
+{
351
+ struct kasan_free_meta *free_meta;
352
+
353
+ free_meta = kasan_get_free_meta(cache, object);
354
+ if (!free_meta)
355
+ return;
356
+
357
+ kasan_set_track(&free_meta->free_track, GFP_NOWAIT);
358
+ /* The object was freed and has free track set. */
359
+ *(u8 *)kasan_mem_to_shadow(object) = KASAN_KMALLOC_FREETRACK;
360
+}
361
+
362
+struct kasan_track *kasan_get_free_track(struct kmem_cache *cache,
363
+ void *object, u8 tag)
364
+{
365
+ if (*(u8 *)kasan_mem_to_shadow(object) != KASAN_KMALLOC_FREETRACK)
366
+ return NULL;
367
+ /* Free meta must be present with KASAN_KMALLOC_FREETRACK. */
368
+ return &kasan_get_free_meta(cache, object)->free_track;
369
+}