hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/include/linux/kasan.h
....@@ -2,6 +2,10 @@
22 #ifndef _LINUX_KASAN_H
33 #define _LINUX_KASAN_H
44
5
+#include <linux/bug.h>
6
+#include <linux/kasan-enabled.h>
7
+#include <linux/kernel.h>
8
+#include <linux/static_key.h>
59 #include <linux/types.h>
610
711 struct kmem_cache;
....@@ -11,11 +15,36 @@
1115
1216 #ifdef CONFIG_KASAN
1317
18
+#include <linux/linkage.h>
1419 #include <asm/kasan.h>
15
-#include <asm/pgtable.h>
20
+
21
+/* kasan_data struct is used in KUnit tests for KASAN expected failures */
22
+struct kunit_kasan_expectation {
23
+ bool report_expected;
24
+ bool report_found;
25
+};
26
+
27
+#endif
28
+
29
+#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
30
+
31
+#include <linux/pgtable.h>
32
+
33
+/* Software KASAN implementations use shadow memory. */
34
+
35
+#ifdef CONFIG_KASAN_SW_TAGS
36
+/* This matches KASAN_TAG_INVALID. */
37
+#define KASAN_SHADOW_INIT 0xFE
38
+#else
39
+#define KASAN_SHADOW_INIT 0
40
+#endif
41
+
42
+#ifndef PTE_HWTABLE_PTRS
43
+#define PTE_HWTABLE_PTRS 0
44
+#endif
1645
1746 extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
18
-extern pte_t kasan_early_shadow_pte[PTRS_PER_PTE];
47
+extern pte_t kasan_early_shadow_pte[PTRS_PER_PTE + PTE_HWTABLE_PTRS];
1948 extern pmd_t kasan_early_shadow_pmd[PTRS_PER_PMD];
2049 extern pud_t kasan_early_shadow_pud[PTRS_PER_PUD];
2150 extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
....@@ -29,77 +58,250 @@
2958 + KASAN_SHADOW_OFFSET;
3059 }
3160
61
+int kasan_add_zero_shadow(void *start, unsigned long size);
62
+void kasan_remove_zero_shadow(void *start, unsigned long size);
63
+
3264 /* Enable reporting bugs after kasan_disable_current() */
3365 extern void kasan_enable_current(void);
3466
3567 /* Disable reporting bugs for current task */
3668 extern void kasan_disable_current(void);
3769
38
-void kasan_unpoison_shadow(const void *address, size_t size);
70
+#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
3971
40
-void kasan_unpoison_task_stack(struct task_struct *task);
41
-void kasan_unpoison_stack_above_sp_to(const void *watermark);
72
+static inline int kasan_add_zero_shadow(void *start, unsigned long size)
73
+{
74
+ return 0;
75
+}
76
+static inline void kasan_remove_zero_shadow(void *start,
77
+ unsigned long size)
78
+{}
4279
43
-void kasan_alloc_pages(struct page *page, unsigned int order);
80
+static inline void kasan_enable_current(void) {}
81
+static inline void kasan_disable_current(void) {}
82
+
83
+#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
84
+
85
+#ifdef CONFIG_KASAN_HW_TAGS
86
+
87
+void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags);
4488 void kasan_free_pages(struct page *page, unsigned int order);
4589
46
-void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
47
- slab_flags_t *flags);
90
+#else /* CONFIG_KASAN_HW_TAGS */
4891
49
-void kasan_poison_slab(struct page *page);
50
-void kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
51
-void kasan_poison_object_data(struct kmem_cache *cache, void *object);
52
-void * __must_check kasan_init_slab_obj(struct kmem_cache *cache,
53
- const void *object);
92
+static __always_inline void kasan_alloc_pages(struct page *page,
93
+ unsigned int order, gfp_t flags)
94
+{
95
+ /* Only available for integrated init. */
96
+ BUILD_BUG();
97
+}
5498
55
-void * __must_check kasan_kmalloc_large(const void *ptr, size_t size,
56
- gfp_t flags);
57
-void kasan_kfree_large(void *ptr, unsigned long ip);
58
-void kasan_poison_kfree(void *ptr, unsigned long ip);
59
-void * __must_check kasan_kmalloc(struct kmem_cache *s, const void *object,
60
- size_t size, gfp_t flags);
61
-void * __must_check kasan_krealloc(const void *object, size_t new_size,
62
- gfp_t flags);
99
+static __always_inline void kasan_free_pages(struct page *page,
100
+ unsigned int order)
101
+{
102
+ /* Only available for integrated init. */
103
+ BUILD_BUG();
104
+}
63105
64
-void * __must_check kasan_slab_alloc(struct kmem_cache *s, void *object,
65
- gfp_t flags);
66
-bool kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip);
106
+#endif /* CONFIG_KASAN_HW_TAGS */
107
+
108
+static inline bool kasan_has_integrated_init(void)
109
+{
110
+ return kasan_hw_tags_enabled();
111
+}
112
+
113
+#ifdef CONFIG_KASAN
67114
68115 struct kasan_cache {
69116 int alloc_meta_offset;
70117 int free_meta_offset;
118
+ bool is_kmalloc;
71119 };
72120
73
-int kasan_module_alloc(void *addr, size_t size);
74
-void kasan_free_shadow(const struct vm_struct *vm);
121
+slab_flags_t __kasan_never_merge(void);
122
+static __always_inline slab_flags_t kasan_never_merge(void)
123
+{
124
+ if (kasan_enabled())
125
+ return __kasan_never_merge();
126
+ return 0;
127
+}
75128
76
-int kasan_add_zero_shadow(void *start, unsigned long size);
77
-void kasan_remove_zero_shadow(void *start, unsigned long size);
129
+void __kasan_unpoison_range(const void *addr, size_t size);
130
+static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
131
+{
132
+ if (kasan_enabled())
133
+ __kasan_unpoison_range(addr, size);
134
+}
78135
79
-size_t ksize(const void *);
80
-static inline void kasan_unpoison_slab(const void *ptr) { ksize(ptr); }
81
-size_t kasan_metadata_size(struct kmem_cache *cache);
136
+void __kasan_poison_pages(struct page *page, unsigned int order, bool init);
137
+static __always_inline void kasan_poison_pages(struct page *page,
138
+ unsigned int order, bool init)
139
+{
140
+ if (kasan_enabled())
141
+ __kasan_poison_pages(page, order, init);
142
+}
143
+
144
+void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init);
145
+static __always_inline void kasan_unpoison_pages(struct page *page,
146
+ unsigned int order, bool init)
147
+{
148
+ if (kasan_enabled())
149
+ __kasan_unpoison_pages(page, order, init);
150
+}
151
+
152
+void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
153
+ slab_flags_t *flags);
154
+static __always_inline void kasan_cache_create(struct kmem_cache *cache,
155
+ unsigned int *size, slab_flags_t *flags)
156
+{
157
+ if (kasan_enabled())
158
+ __kasan_cache_create(cache, size, flags);
159
+}
160
+
161
+void __kasan_cache_create_kmalloc(struct kmem_cache *cache);
162
+static __always_inline void kasan_cache_create_kmalloc(struct kmem_cache *cache)
163
+{
164
+ if (kasan_enabled())
165
+ __kasan_cache_create_kmalloc(cache);
166
+}
167
+
168
+size_t __kasan_metadata_size(struct kmem_cache *cache);
169
+static __always_inline size_t kasan_metadata_size(struct kmem_cache *cache)
170
+{
171
+ if (kasan_enabled())
172
+ return __kasan_metadata_size(cache);
173
+ return 0;
174
+}
175
+
176
+void __kasan_poison_slab(struct page *page);
177
+static __always_inline void kasan_poison_slab(struct page *page)
178
+{
179
+ if (kasan_enabled())
180
+ __kasan_poison_slab(page);
181
+}
182
+
183
+void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
184
+static __always_inline void kasan_unpoison_object_data(struct kmem_cache *cache,
185
+ void *object)
186
+{
187
+ if (kasan_enabled())
188
+ __kasan_unpoison_object_data(cache, object);
189
+}
190
+
191
+void __kasan_poison_object_data(struct kmem_cache *cache, void *object);
192
+static __always_inline void kasan_poison_object_data(struct kmem_cache *cache,
193
+ void *object)
194
+{
195
+ if (kasan_enabled())
196
+ __kasan_poison_object_data(cache, object);
197
+}
198
+
199
+void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
200
+ const void *object);
201
+static __always_inline void * __must_check kasan_init_slab_obj(
202
+ struct kmem_cache *cache, const void *object)
203
+{
204
+ if (kasan_enabled())
205
+ return __kasan_init_slab_obj(cache, object);
206
+ return (void *)object;
207
+}
208
+
209
+bool __kasan_slab_free(struct kmem_cache *s, void *object,
210
+ unsigned long ip, bool init);
211
+static __always_inline bool kasan_slab_free(struct kmem_cache *s,
212
+ void *object, bool init)
213
+{
214
+ if (kasan_enabled())
215
+ return __kasan_slab_free(s, object, _RET_IP_, init);
216
+ return false;
217
+}
218
+
219
+void __kasan_kfree_large(void *ptr, unsigned long ip);
220
+static __always_inline void kasan_kfree_large(void *ptr)
221
+{
222
+ if (kasan_enabled())
223
+ __kasan_kfree_large(ptr, _RET_IP_);
224
+}
225
+
226
+void __kasan_slab_free_mempool(void *ptr, unsigned long ip);
227
+static __always_inline void kasan_slab_free_mempool(void *ptr)
228
+{
229
+ if (kasan_enabled())
230
+ __kasan_slab_free_mempool(ptr, _RET_IP_);
231
+}
232
+
233
+void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
234
+ void *object, gfp_t flags, bool init);
235
+static __always_inline void * __must_check kasan_slab_alloc(
236
+ struct kmem_cache *s, void *object, gfp_t flags, bool init)
237
+{
238
+ if (kasan_enabled())
239
+ return __kasan_slab_alloc(s, object, flags, init);
240
+ return object;
241
+}
242
+
243
+void * __must_check __kasan_kmalloc(struct kmem_cache *s, const void *object,
244
+ size_t size, gfp_t flags);
245
+static __always_inline void * __must_check kasan_kmalloc(struct kmem_cache *s,
246
+ const void *object, size_t size, gfp_t flags)
247
+{
248
+ if (kasan_enabled())
249
+ return __kasan_kmalloc(s, object, size, flags);
250
+ return (void *)object;
251
+}
252
+
253
+void * __must_check __kasan_kmalloc_large(const void *ptr,
254
+ size_t size, gfp_t flags);
255
+static __always_inline void * __must_check kasan_kmalloc_large(const void *ptr,
256
+ size_t size, gfp_t flags)
257
+{
258
+ if (kasan_enabled())
259
+ return __kasan_kmalloc_large(ptr, size, flags);
260
+ return (void *)ptr;
261
+}
262
+
263
+void * __must_check __kasan_krealloc(const void *object,
264
+ size_t new_size, gfp_t flags);
265
+static __always_inline void * __must_check kasan_krealloc(const void *object,
266
+ size_t new_size, gfp_t flags)
267
+{
268
+ if (kasan_enabled())
269
+ return __kasan_krealloc(object, new_size, flags);
270
+ return (void *)object;
271
+}
272
+
273
+/*
274
+ * Unlike kasan_check_read/write(), kasan_check_byte() is performed even for
275
+ * the hardware tag-based mode that doesn't rely on compiler instrumentation.
276
+ */
277
+bool __kasan_check_byte(const void *addr, unsigned long ip);
278
+static __always_inline bool kasan_check_byte(const void *addr)
279
+{
280
+ if (kasan_enabled())
281
+ return __kasan_check_byte(addr, _RET_IP_);
282
+ return true;
283
+}
284
+
82285
83286 bool kasan_save_enable_multi_shot(void);
84287 void kasan_restore_multi_shot(bool enabled);
85288
86289 #else /* CONFIG_KASAN */
87290
88
-static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
89
-
90
-static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
91
-static inline void kasan_unpoison_stack_above_sp_to(const void *watermark) {}
92
-
93
-static inline void kasan_enable_current(void) {}
94
-static inline void kasan_disable_current(void) {}
95
-
96
-static inline void kasan_alloc_pages(struct page *page, unsigned int order) {}
97
-static inline void kasan_free_pages(struct page *page, unsigned int order) {}
98
-
291
+static inline slab_flags_t kasan_never_merge(void)
292
+{
293
+ return 0;
294
+}
295
+static inline void kasan_unpoison_range(const void *address, size_t size) {}
296
+static inline void kasan_poison_pages(struct page *page, unsigned int order,
297
+ bool init) {}
298
+static inline void kasan_unpoison_pages(struct page *page, unsigned int order,
299
+ bool init) {}
99300 static inline void kasan_cache_create(struct kmem_cache *cache,
100301 unsigned int *size,
101302 slab_flags_t *flags) {}
102
-
303
+static inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) {}
304
+static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
103305 static inline void kasan_poison_slab(struct page *page) {}
104306 static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
105307 void *object) {}
....@@ -110,85 +312,154 @@
110312 {
111313 return (void *)object;
112314 }
113
-
114
-static inline void *kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags)
315
+static inline bool kasan_slab_free(struct kmem_cache *s, void *object, bool init)
115316 {
116
- return ptr;
317
+ return false;
117318 }
118
-static inline void kasan_kfree_large(void *ptr, unsigned long ip) {}
119
-static inline void kasan_poison_kfree(void *ptr, unsigned long ip) {}
319
+static inline void kasan_kfree_large(void *ptr) {}
320
+static inline void kasan_slab_free_mempool(void *ptr) {}
321
+static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
322
+ gfp_t flags, bool init)
323
+{
324
+ return object;
325
+}
120326 static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object,
121327 size_t size, gfp_t flags)
122328 {
123329 return (void *)object;
330
+}
331
+static inline void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
332
+{
333
+ return (void *)ptr;
124334 }
125335 static inline void *kasan_krealloc(const void *object, size_t new_size,
126336 gfp_t flags)
127337 {
128338 return (void *)object;
129339 }
130
-
131
-static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
132
- gfp_t flags)
340
+static inline bool kasan_check_byte(const void *address)
133341 {
134
- return object;
342
+ return true;
135343 }
136
-static inline bool kasan_slab_free(struct kmem_cache *s, void *object,
137
- unsigned long ip)
138
-{
139
- return false;
140
-}
141
-
142
-static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
143
-static inline void kasan_free_shadow(const struct vm_struct *vm) {}
144
-
145
-static inline int kasan_add_zero_shadow(void *start, unsigned long size)
146
-{
147
- return 0;
148
-}
149
-static inline void kasan_remove_zero_shadow(void *start,
150
- unsigned long size)
151
-{}
152
-
153
-static inline void kasan_unpoison_slab(const void *ptr) { }
154
-static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
155344
156345 #endif /* CONFIG_KASAN */
157346
158
-#ifdef CONFIG_KASAN_GENERIC
347
+#if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
348
+void kasan_unpoison_task_stack(struct task_struct *task);
349
+#else
350
+static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
351
+#endif
159352
160
-#define KASAN_SHADOW_INIT 0
353
+#ifdef CONFIG_KASAN_GENERIC
161354
162355 void kasan_cache_shrink(struct kmem_cache *cache);
163356 void kasan_cache_shutdown(struct kmem_cache *cache);
357
+void kasan_record_aux_stack(void *ptr);
164358
165359 #else /* CONFIG_KASAN_GENERIC */
166360
167361 static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
168362 static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
363
+static inline void kasan_record_aux_stack(void *ptr) {}
169364
170365 #endif /* CONFIG_KASAN_GENERIC */
171366
172
-#ifdef CONFIG_KASAN_SW_TAGS
367
+#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
173368
174
-#define KASAN_SHADOW_INIT 0xFF
369
+static inline void *kasan_reset_tag(const void *addr)
370
+{
371
+ return (void *)arch_kasan_reset_tag(addr);
372
+}
175373
176
-void kasan_init_tags(void);
177
-
178
-void *kasan_reset_tag(const void *addr);
179
-
180
-void kasan_report(unsigned long addr, size_t size,
374
+/**
375
+ * kasan_report - print a report about a bad memory access detected by KASAN
376
+ * @addr: address of the bad access
377
+ * @size: size of the bad access
378
+ * @is_write: whether the bad access is a write or a read
379
+ * @ip: instruction pointer for the accessibility check or the bad access itself
380
+ */
381
+bool kasan_report(unsigned long addr, size_t size,
181382 bool is_write, unsigned long ip);
182383
183
-#else /* CONFIG_KASAN_SW_TAGS */
184
-
185
-static inline void kasan_init_tags(void) { }
384
+#else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
186385
187386 static inline void *kasan_reset_tag(const void *addr)
188387 {
189388 return (void *)addr;
190389 }
191390
192
-#endif /* CONFIG_KASAN_SW_TAGS */
391
+#endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/
392
+
393
+#ifdef CONFIG_KASAN_HW_TAGS
394
+
395
+void kasan_report_async(void);
396
+
397
+#endif /* CONFIG_KASAN_HW_TAGS */
398
+
399
+#ifdef CONFIG_KASAN_SW_TAGS
400
+void __init kasan_init_sw_tags(void);
401
+#else
402
+static inline void kasan_init_sw_tags(void) { }
403
+#endif
404
+
405
+#ifdef CONFIG_KASAN_HW_TAGS
406
+void kasan_init_hw_tags_cpu(void);
407
+void __init kasan_init_hw_tags(void);
408
+#else
409
+static inline void kasan_init_hw_tags_cpu(void) { }
410
+static inline void kasan_init_hw_tags(void) { }
411
+#endif
412
+
413
+#ifdef CONFIG_KASAN_VMALLOC
414
+
415
+int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
416
+void kasan_poison_vmalloc(const void *start, unsigned long size);
417
+void kasan_unpoison_vmalloc(const void *start, unsigned long size);
418
+void kasan_release_vmalloc(unsigned long start, unsigned long end,
419
+ unsigned long free_region_start,
420
+ unsigned long free_region_end);
421
+
422
+#else /* CONFIG_KASAN_VMALLOC */
423
+
424
+static inline int kasan_populate_vmalloc(unsigned long start,
425
+ unsigned long size)
426
+{
427
+ return 0;
428
+}
429
+
430
+static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
431
+{ }
432
+static inline void kasan_unpoison_vmalloc(const void *start, unsigned long size)
433
+{ }
434
+static inline void kasan_release_vmalloc(unsigned long start,
435
+ unsigned long end,
436
+ unsigned long free_region_start,
437
+ unsigned long free_region_end) {}
438
+
439
+#endif /* CONFIG_KASAN_VMALLOC */
440
+
441
+#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
442
+ !defined(CONFIG_KASAN_VMALLOC)
443
+
444
+/*
445
+ * These functions provide a special case to support backing module
446
+ * allocations with real shadow memory. With KASAN vmalloc, the special
447
+ * case is unnecessary, as the work is handled in the generic case.
448
+ */
449
+int kasan_module_alloc(void *addr, size_t size);
450
+void kasan_free_shadow(const struct vm_struct *vm);
451
+
452
+#else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
453
+
454
+static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
455
+static inline void kasan_free_shadow(const struct vm_struct *vm) {}
456
+
457
+#endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
458
+
459
+#ifdef CONFIG_KASAN_INLINE
460
+void kasan_non_canonical_hook(unsigned long addr);
461
+#else /* CONFIG_KASAN_INLINE */
462
+static inline void kasan_non_canonical_hook(unsigned long addr) { }
463
+#endif /* CONFIG_KASAN_INLINE */
193464
194465 #endif /* LINUX_KASAN_H */