hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/mm/kmemleak.c
....@@ -1,22 +1,9 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * mm/kmemleak.c
34 *
45 * Copyright (C) 2008 ARM Limited
56 * Written by Catalin Marinas <catalin.marinas@arm.com>
6
- *
7
- * This program is free software; you can redistribute it and/or modify
8
- * it under the terms of the GNU General Public License version 2 as
9
- * published by the Free Software Foundation.
10
- *
11
- * This program is distributed in the hope that it will be useful,
12
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14
- * GNU General Public License for more details.
15
- *
16
- * You should have received a copy of the GNU General Public License
17
- * along with this program; if not, write to the Free Software
18
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19
- *
207 *
218 * For more information on the algorithm and kmemleak usage, please see
229 * Documentation/dev-tools/kmemleak.rst.
....@@ -26,7 +13,7 @@
2613 *
2714 * The following locks and mutexes are used by kmemleak:
2815 *
29
- * - kmemleak_lock (rwlock): protects the object_list modifications and
16
+ * - kmemleak_lock (raw_spinlock_t): protects the object_list modifications and
3017 * accesses to the object_tree_root. The object_list is the main list
3118 * holding the metadata (struct kmemleak_object) for the allocated memory
3219 * blocks. The object_tree_root is a red black tree used to look-up
....@@ -35,13 +22,13 @@
3522 * object_tree_root in the create_object() function called from the
3623 * kmemleak_alloc() callback and removed in delete_object() called from the
3724 * kmemleak_free() callback
38
- * - kmemleak_object.lock (spinlock): protects a kmemleak_object. Accesses to
39
- * the metadata (e.g. count) are protected by this lock. Note that some
40
- * members of this structure may be protected by other means (atomic or
41
- * kmemleak_lock). This lock is also held when scanning the corresponding
42
- * memory block to avoid the kernel freeing it via the kmemleak_free()
43
- * callback. This is less heavyweight than holding a global lock like
44
- * kmemleak_lock during scanning
25
+ * - kmemleak_object.lock (raw_spinlock_t): protects a kmemleak_object.
26
+ * Accesses to the metadata (e.g. count) are protected by this lock. Note
27
+ * that some members of this structure may be protected by other means
28
+ * (atomic or kmemleak_lock). This lock is also held when scanning the
29
+ * corresponding memory block to avoid the kernel freeing it via the
30
+ * kmemleak_free() callback. This is less heavyweight than holding a global
31
+ * lock like kmemleak_lock during scanning.
4532 * - scan_mutex (mutex): ensures that only one thread may scan the memory for
4633 * unreferenced objects at a time. The gray_list contains the objects which
4734 * are already referenced or marked as false positives and need to be
....@@ -86,12 +73,13 @@
8673 #include <linux/seq_file.h>
8774 #include <linux/cpumask.h>
8875 #include <linux/spinlock.h>
76
+#include <linux/module.h>
8977 #include <linux/mutex.h>
9078 #include <linux/rcupdate.h>
9179 #include <linux/stacktrace.h>
9280 #include <linux/cache.h>
9381 #include <linux/percpu.h>
94
-#include <linux/bootmem.h>
82
+#include <linux/memblock.h>
9583 #include <linux/pfn.h>
9684 #include <linux/mmzone.h>
9785 #include <linux/slab.h>
....@@ -109,6 +97,7 @@
10997 #include <linux/atomic.h>
11098
11199 #include <linux/kasan.h>
100
+#include <linux/kfence.h>
112101 #include <linux/kmemleak.h>
113102 #include <linux/memory_hotplug.h>
114103
....@@ -147,7 +136,7 @@
147136 * (use_count) and freed using the RCU mechanism.
148137 */
149138 struct kmemleak_object {
150
- spinlock_t lock;
139
+ raw_spinlock_t lock;
151140 unsigned int flags; /* object status flags */
152141 struct list_head object_list;
153142 struct list_head gray_list;
....@@ -180,7 +169,10 @@
180169 #define OBJECT_REPORTED (1 << 1)
181170 /* flag set to not scan the object */
182171 #define OBJECT_NO_SCAN (1 << 2)
172
+/* flag set to fully scan the object when scan_area allocation failed */
173
+#define OBJECT_FULL_SCAN (1 << 3)
183174
175
+#define HEX_PREFIX " "
184176 /* number of bytes to print per line; must be 16 or 32 */
185177 #define HEX_ROW_SIZE 16
186178 /* number of bytes to print at a time (1, 2, 4, 8) */
....@@ -194,23 +186,25 @@
194186 static LIST_HEAD(object_list);
195187 /* the list of gray-colored objects (see color_gray comment below) */
196188 static LIST_HEAD(gray_list);
189
+/* memory pool allocation */
190
+static struct kmemleak_object mem_pool[CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE];
191
+static int mem_pool_free_count = ARRAY_SIZE(mem_pool);
192
+static LIST_HEAD(mem_pool_free_list);
197193 /* search tree for object boundaries */
198194 static struct rb_root object_tree_root = RB_ROOT;
199
-/* rw_lock protecting the access to object_list and object_tree_root */
200
-static DEFINE_RWLOCK(kmemleak_lock);
195
+/* protecting the access to object_list and object_tree_root */
196
+static DEFINE_RAW_SPINLOCK(kmemleak_lock);
201197
202198 /* allocation caches for kmemleak internal data */
203199 static struct kmem_cache *object_cache;
204200 static struct kmem_cache *scan_area_cache;
205201
206202 /* set if tracing memory operations is enabled */
207
-static int kmemleak_enabled;
203
+static int kmemleak_enabled = 1;
208204 /* same as above but only for the kmemleak_free() callback */
209
-static int kmemleak_free_enabled;
205
+static int kmemleak_free_enabled = 1;
210206 /* set in the late_initcall if there were no errors */
211207 static int kmemleak_initialized;
212
-/* enables or disables early logging of the memory operations */
213
-static int kmemleak_early_log = 1;
214208 /* set if a kmemleak warning was issued */
215209 static int kmemleak_warning;
216210 /* set if a fatal kmemleak error has occurred */
....@@ -235,48 +229,8 @@
235229 /* If there are leaks that can be reported */
236230 static bool kmemleak_found_leaks;
237231
238
-/*
239
- * Early object allocation/freeing logging. Kmemleak is initialized after the
240
- * kernel allocator. However, both the kernel allocator and kmemleak may
241
- * allocate memory blocks which need to be tracked. Kmemleak defines an
242
- * arbitrary buffer to hold the allocation/freeing information before it is
243
- * fully initialized.
244
- */
245
-
246
-/* kmemleak operation type for early logging */
247
-enum {
248
- KMEMLEAK_ALLOC,
249
- KMEMLEAK_ALLOC_PERCPU,
250
- KMEMLEAK_FREE,
251
- KMEMLEAK_FREE_PART,
252
- KMEMLEAK_FREE_PERCPU,
253
- KMEMLEAK_NOT_LEAK,
254
- KMEMLEAK_IGNORE,
255
- KMEMLEAK_SCAN_AREA,
256
- KMEMLEAK_NO_SCAN,
257
- KMEMLEAK_SET_EXCESS_REF
258
-};
259
-
260
-/*
261
- * Structure holding the information passed to kmemleak callbacks during the
262
- * early logging.
263
- */
264
-struct early_log {
265
- int op_type; /* kmemleak operation type */
266
- int min_count; /* minimum reference count */
267
- const void *ptr; /* allocated/freed memory block */
268
- union {
269
- size_t size; /* memory block size */
270
- unsigned long excess_ref; /* surplus reference passing */
271
- };
272
- unsigned long trace[MAX_TRACE]; /* stack trace */
273
- unsigned int trace_len; /* stack trace length */
274
-};
275
-
276
-/* early logging buffer and current position */
277
-static struct early_log
278
- early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE] __initdata;
279
-static int crt_early_log __initdata;
232
+static bool kmemleak_verbose;
233
+module_param_named(verbose, kmemleak_verbose, bool, 0600);
280234
281235 static void kmemleak_disable(void);
282236
....@@ -299,6 +253,25 @@
299253 kmemleak_disable(); \
300254 } while (0)
301255
256
+#define warn_or_seq_printf(seq, fmt, ...) do { \
257
+ if (seq) \
258
+ seq_printf(seq, fmt, ##__VA_ARGS__); \
259
+ else \
260
+ pr_warn(fmt, ##__VA_ARGS__); \
261
+} while (0)
262
+
263
+static void warn_or_seq_hex_dump(struct seq_file *seq, int prefix_type,
264
+ int rowsize, int groupsize, const void *buf,
265
+ size_t len, bool ascii)
266
+{
267
+ if (seq)
268
+ seq_hex_dump(seq, HEX_PREFIX, prefix_type, rowsize, groupsize,
269
+ buf, len, ascii);
270
+ else
271
+ print_hex_dump(KERN_WARNING, pr_fmt(HEX_PREFIX), prefix_type,
272
+ rowsize, groupsize, buf, len, ascii);
273
+}
274
+
302275 /*
303276 * Printing of the objects hex dump to the seq file. The number of lines to be
304277 * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
....@@ -314,10 +287,10 @@
314287 /* limit the number of lines to HEX_MAX_LINES */
315288 len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE);
316289
317
- seq_printf(seq, " hex dump (first %zu bytes):\n", len);
290
+ warn_or_seq_printf(seq, " hex dump (first %zu bytes):\n", len);
318291 kasan_disable_current();
319
- seq_hex_dump(seq, " ", DUMP_PREFIX_NONE, HEX_ROW_SIZE,
320
- HEX_GROUP_SIZE, ptr, len, HEX_ASCII);
292
+ warn_or_seq_hex_dump(seq, DUMP_PREFIX_NONE, HEX_ROW_SIZE,
293
+ HEX_GROUP_SIZE, kasan_reset_tag((void *)ptr), len, HEX_ASCII);
321294 kasan_enable_current();
322295 }
323296
....@@ -365,17 +338,17 @@
365338 int i;
366339 unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies);
367340
368
- seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
341
+ warn_or_seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
369342 object->pointer, object->size);
370
- seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
343
+ warn_or_seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
371344 object->comm, object->pid, object->jiffies,
372345 msecs_age / 1000, msecs_age % 1000);
373346 hex_dump_object(seq, object);
374
- seq_printf(seq, " backtrace:\n");
347
+ warn_or_seq_printf(seq, " backtrace:\n");
375348
376349 for (i = 0; i < object->trace_len; i++) {
377350 void *ptr = (void *)object->trace[i];
378
- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
351
+ warn_or_seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
379352 }
380353 }
381354
....@@ -386,11 +359,6 @@
386359 */
387360 static void dump_object_info(struct kmemleak_object *object)
388361 {
389
- struct stack_trace trace;
390
-
391
- trace.nr_entries = object->trace_len;
392
- trace.entries = object->trace;
393
-
394362 pr_notice("Object 0x%08lx (size %zu):\n",
395363 object->pointer, object->size);
396364 pr_notice(" comm \"%s\", pid %d, jiffies %lu\n",
....@@ -400,7 +368,7 @@
400368 pr_notice(" flags = 0x%x\n", object->flags);
401369 pr_notice(" checksum = %u\n", object->checksum);
402370 pr_notice(" backtrace:\n");
403
- print_stack_trace(&trace, 4);
371
+ stack_trace_print(object->trace, object->trace_len, 4);
404372 }
405373
406374 /*
....@@ -444,6 +412,54 @@
444412 }
445413
446414 /*
415
+ * Memory pool allocation and freeing. kmemleak_lock must not be held.
416
+ */
417
+static struct kmemleak_object *mem_pool_alloc(gfp_t gfp)
418
+{
419
+ unsigned long flags;
420
+ struct kmemleak_object *object;
421
+
422
+ /* try the slab allocator first */
423
+ if (object_cache) {
424
+ object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
425
+ if (object)
426
+ return object;
427
+ }
428
+
429
+ /* slab allocation failed, try the memory pool */
430
+ raw_spin_lock_irqsave(&kmemleak_lock, flags);
431
+ object = list_first_entry_or_null(&mem_pool_free_list,
432
+ typeof(*object), object_list);
433
+ if (object)
434
+ list_del(&object->object_list);
435
+ else if (mem_pool_free_count)
436
+ object = &mem_pool[--mem_pool_free_count];
437
+ else
438
+ pr_warn_once("Memory pool empty, consider increasing CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE\n");
439
+ raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
440
+
441
+ return object;
442
+}
443
+
444
+/*
445
+ * Return the object to either the slab allocator or the memory pool.
446
+ */
447
+static void mem_pool_free(struct kmemleak_object *object)
448
+{
449
+ unsigned long flags;
450
+
451
+ if (object < mem_pool || object >= mem_pool + ARRAY_SIZE(mem_pool)) {
452
+ kmem_cache_free(object_cache, object);
453
+ return;
454
+ }
455
+
456
+ /* add the object to the memory pool free list */
457
+ raw_spin_lock_irqsave(&kmemleak_lock, flags);
458
+ list_add(&object->object_list, &mem_pool_free_list);
459
+ raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
460
+}
461
+
462
+/*
447463 * RCU callback to free a kmemleak_object.
448464 */
449465 static void free_object_rcu(struct rcu_head *rcu)
....@@ -461,7 +477,7 @@
461477 hlist_del(&area->node);
462478 kmem_cache_free(scan_area_cache, area);
463479 }
464
- kmem_cache_free(object_cache, object);
480
+ mem_pool_free(object);
465481 }
466482
467483 /*
....@@ -479,7 +495,15 @@
479495 /* should only get here after delete_object was called */
480496 WARN_ON(object->flags & OBJECT_ALLOCATED);
481497
482
- call_rcu(&object->rcu, free_object_rcu);
498
+ /*
499
+ * It may be too early for the RCU callbacks, however, there is no
500
+ * concurrent object_list traversal when !object_cache and all objects
501
+ * came from the memory pool. Free the object directly.
502
+ */
503
+ if (object_cache)
504
+ call_rcu(&object->rcu, free_object_rcu);
505
+ else
506
+ free_object_rcu(&object->rcu);
483507 }
484508
485509 /*
....@@ -491,9 +515,9 @@
491515 struct kmemleak_object *object;
492516
493517 rcu_read_lock();
494
- read_lock_irqsave(&kmemleak_lock, flags);
518
+ raw_spin_lock_irqsave(&kmemleak_lock, flags);
495519 object = lookup_object(ptr, alias);
496
- read_unlock_irqrestore(&kmemleak_lock, flags);
520
+ raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
497521
498522 /* check whether the object is still available */
499523 if (object && !get_object(object))
....@@ -501,6 +525,16 @@
501525 rcu_read_unlock();
502526
503527 return object;
528
+}
529
+
530
+/*
531
+ * Remove an object from the object_tree_root and object_list. Must be called
532
+ * with the kmemleak_lock held _if_ kmemleak is still enabled.
533
+ */
534
+static void __remove_object(struct kmemleak_object *object)
535
+{
536
+ rb_erase(&object->rb_node, &object_tree_root);
537
+ list_del_rcu(&object->object_list);
504538 }
505539
506540 /*
....@@ -513,13 +547,11 @@
513547 unsigned long flags;
514548 struct kmemleak_object *object;
515549
516
- write_lock_irqsave(&kmemleak_lock, flags);
550
+ raw_spin_lock_irqsave(&kmemleak_lock, flags);
517551 object = lookup_object(ptr, alias);
518
- if (object) {
519
- rb_erase(&object->rb_node, &object_tree_root);
520
- list_del_rcu(&object->object_list);
521
- }
522
- write_unlock_irqrestore(&kmemleak_lock, flags);
552
+ if (object)
553
+ __remove_object(object);
554
+ raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
523555
524556 return object;
525557 }
....@@ -529,15 +561,7 @@
529561 */
530562 static int __save_stack_trace(unsigned long *trace)
531563 {
532
- struct stack_trace stack_trace;
533
-
534
- stack_trace.max_entries = MAX_TRACE;
535
- stack_trace.nr_entries = 0;
536
- stack_trace.entries = trace;
537
- stack_trace.skip = 2;
538
- save_stack_trace(&stack_trace);
539
-
540
- return stack_trace.nr_entries;
564
+ return stack_trace_save(trace, MAX_TRACE, 2);
541565 }
542566
543567 /*
....@@ -550,8 +574,9 @@
550574 unsigned long flags;
551575 struct kmemleak_object *object, *parent;
552576 struct rb_node **link, *rb_parent;
577
+ unsigned long untagged_ptr;
553578
554
- object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
579
+ object = mem_pool_alloc(gfp);
555580 if (!object) {
556581 pr_warn("Cannot allocate a kmemleak_object structure\n");
557582 kmemleak_disable();
....@@ -561,11 +586,11 @@
561586 INIT_LIST_HEAD(&object->object_list);
562587 INIT_LIST_HEAD(&object->gray_list);
563588 INIT_HLIST_HEAD(&object->area_list);
564
- spin_lock_init(&object->lock);
589
+ raw_spin_lock_init(&object->lock);
565590 atomic_set(&object->use_count, 1);
566591 object->flags = OBJECT_ALLOCATED;
567592 object->pointer = ptr;
568
- object->size = size;
593
+ object->size = kfence_ksize((void *)ptr) ?: size;
569594 object->excess_ref = 0;
570595 object->min_count = min_count;
571596 object->count = 0; /* white color initially */
....@@ -593,10 +618,11 @@
593618 /* kernel backtrace */
594619 object->trace_len = __save_stack_trace(object->trace);
595620
596
- write_lock_irqsave(&kmemleak_lock, flags);
621
+ raw_spin_lock_irqsave(&kmemleak_lock, flags);
597622
598
- min_addr = min(min_addr, ptr);
599
- max_addr = max(max_addr, ptr + size);
623
+ untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
624
+ min_addr = min(min_addr, untagged_ptr);
625
+ max_addr = max(max_addr, untagged_ptr + size);
600626 link = &object_tree_root.rb_node;
601627 rb_parent = NULL;
602628 while (*link) {
....@@ -624,7 +650,7 @@
624650
625651 list_add_tail_rcu(&object->object_list, &object_list);
626652 out:
627
- write_unlock_irqrestore(&kmemleak_lock, flags);
653
+ raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
628654 return object;
629655 }
630656
....@@ -642,9 +668,9 @@
642668 * Locking here also ensures that the corresponding memory block
643669 * cannot be freed when it is being scanned.
644670 */
645
- spin_lock_irqsave(&object->lock, flags);
671
+ raw_spin_lock_irqsave(&object->lock, flags);
646672 object->flags &= ~OBJECT_ALLOCATED;
647
- spin_unlock_irqrestore(&object->lock, flags);
673
+ raw_spin_unlock_irqrestore(&object->lock, flags);
648674 put_object(object);
649675 }
650676
....@@ -689,9 +715,7 @@
689715 /*
690716 * Create one or two objects that may result from the memory block
691717 * split. Note that partial freeing is only done by free_bootmem() and
692
- * this happens before kmemleak_init() is called. The path below is
693
- * only executed during early log recording in kmemleak_init(), so
694
- * GFP_KERNEL is enough.
718
+ * this happens before kmemleak_init() is called.
695719 */
696720 start = object->pointer;
697721 end = object->pointer + object->size;
....@@ -716,9 +740,9 @@
716740 {
717741 unsigned long flags;
718742
719
- spin_lock_irqsave(&object->lock, flags);
743
+ raw_spin_lock_irqsave(&object->lock, flags);
720744 __paint_it(object, color);
721
- spin_unlock_irqrestore(&object->lock, flags);
745
+ raw_spin_unlock_irqrestore(&object->lock, flags);
722746 }
723747
724748 static void paint_ptr(unsigned long ptr, int color)
....@@ -763,7 +787,9 @@
763787 {
764788 unsigned long flags;
765789 struct kmemleak_object *object;
766
- struct kmemleak_scan_area *area;
790
+ struct kmemleak_scan_area *area = NULL;
791
+ unsigned long untagged_ptr;
792
+ unsigned long untagged_objp;
767793
768794 object = find_and_get_object(ptr, 1);
769795 if (!object) {
....@@ -772,16 +798,22 @@
772798 return;
773799 }
774800
775
- area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
776
- if (!area) {
777
- pr_warn("Cannot allocate a scan area\n");
778
- goto out;
779
- }
801
+ untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
802
+ untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer);
780803
781
- spin_lock_irqsave(&object->lock, flags);
804
+ if (scan_area_cache)
805
+ area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
806
+
807
+ raw_spin_lock_irqsave(&object->lock, flags);
808
+ if (!area) {
809
+ pr_warn_once("Cannot allocate a scan area, scanning the full object\n");
810
+ /* mark the object for full scan to avoid false positives */
811
+ object->flags |= OBJECT_FULL_SCAN;
812
+ goto out_unlock;
813
+ }
782814 if (size == SIZE_MAX) {
783
- size = object->pointer + object->size - ptr;
784
- } else if (ptr + size > object->pointer + object->size) {
815
+ size = untagged_objp + object->size - untagged_ptr;
816
+ } else if (untagged_ptr + size > untagged_objp + object->size) {
785817 kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
786818 dump_object_info(object);
787819 kmem_cache_free(scan_area_cache, area);
....@@ -794,8 +826,7 @@
794826
795827 hlist_add_head(&area->node, &object->area_list);
796828 out_unlock:
797
- spin_unlock_irqrestore(&object->lock, flags);
798
-out:
829
+ raw_spin_unlock_irqrestore(&object->lock, flags);
799830 put_object(object);
800831 }
801832
....@@ -817,9 +848,9 @@
817848 return;
818849 }
819850
820
- spin_lock_irqsave(&object->lock, flags);
851
+ raw_spin_lock_irqsave(&object->lock, flags);
821852 object->excess_ref = excess_ref;
822
- spin_unlock_irqrestore(&object->lock, flags);
853
+ raw_spin_unlock_irqrestore(&object->lock, flags);
823854 put_object(object);
824855 }
825856
....@@ -839,90 +870,10 @@
839870 return;
840871 }
841872
842
- spin_lock_irqsave(&object->lock, flags);
873
+ raw_spin_lock_irqsave(&object->lock, flags);
843874 object->flags |= OBJECT_NO_SCAN;
844
- spin_unlock_irqrestore(&object->lock, flags);
875
+ raw_spin_unlock_irqrestore(&object->lock, flags);
845876 put_object(object);
846
-}
847
-
848
-/*
849
- * Log an early kmemleak_* call to the early_log buffer. These calls will be
850
- * processed later once kmemleak is fully initialized.
851
- */
852
-static void __init log_early(int op_type, const void *ptr, size_t size,
853
- int min_count)
854
-{
855
- unsigned long flags;
856
- struct early_log *log;
857
-
858
- if (kmemleak_error) {
859
- /* kmemleak stopped recording, just count the requests */
860
- crt_early_log++;
861
- return;
862
- }
863
-
864
- if (crt_early_log >= ARRAY_SIZE(early_log)) {
865
- crt_early_log++;
866
- kmemleak_disable();
867
- return;
868
- }
869
-
870
- /*
871
- * There is no need for locking since the kernel is still in UP mode
872
- * at this stage. Disabling the IRQs is enough.
873
- */
874
- local_irq_save(flags);
875
- log = &early_log[crt_early_log];
876
- log->op_type = op_type;
877
- log->ptr = ptr;
878
- log->size = size;
879
- log->min_count = min_count;
880
- log->trace_len = __save_stack_trace(log->trace);
881
- crt_early_log++;
882
- local_irq_restore(flags);
883
-}
884
-
885
-/*
886
- * Log an early allocated block and populate the stack trace.
887
- */
888
-static void early_alloc(struct early_log *log)
889
-{
890
- struct kmemleak_object *object;
891
- unsigned long flags;
892
- int i;
893
-
894
- if (!kmemleak_enabled || !log->ptr || IS_ERR(log->ptr))
895
- return;
896
-
897
- /*
898
- * RCU locking needed to ensure object is not freed via put_object().
899
- */
900
- rcu_read_lock();
901
- object = create_object((unsigned long)log->ptr, log->size,
902
- log->min_count, GFP_ATOMIC);
903
- if (!object)
904
- goto out;
905
- spin_lock_irqsave(&object->lock, flags);
906
- for (i = 0; i < log->trace_len; i++)
907
- object->trace[i] = log->trace[i];
908
- object->trace_len = log->trace_len;
909
- spin_unlock_irqrestore(&object->lock, flags);
910
-out:
911
- rcu_read_unlock();
912
-}
913
-
914
-/*
915
- * Log an early allocated block and populate the stack trace.
916
- */
917
-static void early_alloc_percpu(struct early_log *log)
918
-{
919
- unsigned int cpu;
920
- const void __percpu *ptr = log->ptr;
921
-
922
- for_each_possible_cpu(cpu) {
923
- log->ptr = per_cpu_ptr(ptr, cpu);
924
- early_alloc(log);
925
- }
926877 }
927878
928879 /**
....@@ -946,8 +897,6 @@
946897
947898 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
948899 create_object((unsigned long)ptr, size, min_count, gfp);
949
- else if (kmemleak_early_log)
950
- log_early(KMEMLEAK_ALLOC, ptr, size, min_count);
951900 }
952901 EXPORT_SYMBOL_GPL(kmemleak_alloc);
953902
....@@ -975,8 +924,6 @@
975924 for_each_possible_cpu(cpu)
976925 create_object((unsigned long)per_cpu_ptr(ptr, cpu),
977926 size, 0, gfp);
978
- else if (kmemleak_early_log)
979
- log_early(KMEMLEAK_ALLOC_PERCPU, ptr, size, 0);
980927 }
981928 EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
982929
....@@ -1001,11 +948,6 @@
1001948 create_object((unsigned long)area->addr, size, 2, gfp);
1002949 object_set_excess_ref((unsigned long)area,
1003950 (unsigned long)area->addr);
1004
- } else if (kmemleak_early_log) {
1005
- log_early(KMEMLEAK_ALLOC, area->addr, size, 2);
1006
- /* reusing early_log.size for storing area->addr */
1007
- log_early(KMEMLEAK_SET_EXCESS_REF,
1008
- area, (unsigned long)area->addr, 0);
1009951 }
1010952 }
1011953 EXPORT_SYMBOL_GPL(kmemleak_vmalloc);
....@@ -1023,8 +965,6 @@
1023965
1024966 if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
1025967 delete_object_full((unsigned long)ptr);
1026
- else if (kmemleak_early_log)
1027
- log_early(KMEMLEAK_FREE, ptr, 0, 0);
1028968 }
1029969 EXPORT_SYMBOL_GPL(kmemleak_free);
1030970
....@@ -1043,8 +983,6 @@
1043983
1044984 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1045985 delete_object_part((unsigned long)ptr, size);
1046
- else if (kmemleak_early_log)
1047
- log_early(KMEMLEAK_FREE_PART, ptr, size, 0);
1048986 }
1049987 EXPORT_SYMBOL_GPL(kmemleak_free_part);
1050988
....@@ -1065,8 +1003,6 @@
10651003 for_each_possible_cpu(cpu)
10661004 delete_object_full((unsigned long)per_cpu_ptr(ptr,
10671005 cpu));
1068
- else if (kmemleak_early_log)
1069
- log_early(KMEMLEAK_FREE_PERCPU, ptr, 0, 0);
10701006 }
10711007 EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
10721008
....@@ -1096,9 +1032,9 @@
10961032 return;
10971033 }
10981034
1099
- spin_lock_irqsave(&object->lock, flags);
1035
+ raw_spin_lock_irqsave(&object->lock, flags);
11001036 object->trace_len = __save_stack_trace(object->trace);
1101
- spin_unlock_irqrestore(&object->lock, flags);
1037
+ raw_spin_unlock_irqrestore(&object->lock, flags);
11021038
11031039 put_object(object);
11041040 }
....@@ -1117,8 +1053,6 @@
11171053
11181054 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
11191055 make_gray_object((unsigned long)ptr);
1120
- else if (kmemleak_early_log)
1121
- log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0);
11221056 }
11231057 EXPORT_SYMBOL(kmemleak_not_leak);
11241058
....@@ -1137,8 +1071,6 @@
11371071
11381072 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
11391073 make_black_object((unsigned long)ptr);
1140
- else if (kmemleak_early_log)
1141
- log_early(KMEMLEAK_IGNORE, ptr, 0, 0);
11421074 }
11431075 EXPORT_SYMBOL(kmemleak_ignore);
11441076
....@@ -1159,8 +1091,6 @@
11591091
11601092 if (kmemleak_enabled && ptr && size && !IS_ERR(ptr))
11611093 add_scan_area((unsigned long)ptr, size, gfp);
1162
- else if (kmemleak_early_log)
1163
- log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0);
11641094 }
11651095 EXPORT_SYMBOL(kmemleak_scan_area);
11661096
....@@ -1179,8 +1109,6 @@
11791109
11801110 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
11811111 object_no_scan((unsigned long)ptr);
1182
- else if (kmemleak_early_log)
1183
- log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0);
11841112 }
11851113 EXPORT_SYMBOL(kmemleak_no_scan);
11861114
....@@ -1247,8 +1175,10 @@
12471175 u32 old_csum = object->checksum;
12481176
12491177 kasan_disable_current();
1250
- object->checksum = crc32(0, (void *)object->pointer, object->size);
1178
+ kcsan_disable_current();
1179
+ object->checksum = crc32(0, kasan_reset_tag((void *)object->pointer), object->size);
12511180 kasan_enable_current();
1181
+ kcsan_enable_current();
12521182
12531183 return object->checksum != old_csum;
12541184 }
....@@ -1309,8 +1239,9 @@
13091239 unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
13101240 unsigned long *end = _end - (BYTES_PER_POINTER - 1);
13111241 unsigned long flags;
1242
+ unsigned long untagged_ptr;
13121243
1313
- read_lock_irqsave(&kmemleak_lock, flags);
1244
+ raw_spin_lock_irqsave(&kmemleak_lock, flags);
13141245 for (ptr = start; ptr < end; ptr++) {
13151246 struct kmemleak_object *object;
13161247 unsigned long pointer;
....@@ -1320,10 +1251,11 @@
13201251 break;
13211252
13221253 kasan_disable_current();
1323
- pointer = *ptr;
1254
+ pointer = *(unsigned long *)kasan_reset_tag((void *)ptr);
13241255 kasan_enable_current();
13251256
1326
- if (pointer < min_addr || pointer >= max_addr)
1257
+ untagged_ptr = (unsigned long)kasan_reset_tag((void *)pointer);
1258
+ if (untagged_ptr < min_addr || untagged_ptr >= max_addr)
13271259 continue;
13281260
13291261 /*
....@@ -1344,7 +1276,7 @@
13441276 * previously acquired in scan_object(). These locks are
13451277 * enclosed by scan_mutex.
13461278 */
1347
- spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1279
+ raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
13481280 /* only pass surplus references (object already gray) */
13491281 if (color_gray(object)) {
13501282 excess_ref = object->excess_ref;
....@@ -1353,7 +1285,7 @@
13531285 excess_ref = 0;
13541286 update_refs(object);
13551287 }
1356
- spin_unlock(&object->lock);
1288
+ raw_spin_unlock(&object->lock);
13571289
13581290 if (excess_ref) {
13591291 object = lookup_object(excess_ref, 0);
....@@ -1362,12 +1294,12 @@
13621294 if (object == scanned)
13631295 /* circular reference, ignore */
13641296 continue;
1365
- spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1297
+ raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
13661298 update_refs(object);
1367
- spin_unlock(&object->lock);
1299
+ raw_spin_unlock(&object->lock);
13681300 }
13691301 }
1370
- read_unlock_irqrestore(&kmemleak_lock, flags);
1302
+ raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
13711303 }
13721304
13731305 /*
....@@ -1400,13 +1332,14 @@
14001332 * Once the object->lock is acquired, the corresponding memory block
14011333 * cannot be freed (the same lock is acquired in delete_object).
14021334 */
1403
- spin_lock_irqsave(&object->lock, flags);
1335
+ raw_spin_lock_irqsave(&object->lock, flags);
14041336 if (object->flags & OBJECT_NO_SCAN)
14051337 goto out;
14061338 if (!(object->flags & OBJECT_ALLOCATED))
14071339 /* already freed object */
14081340 goto out;
1409
- if (hlist_empty(&object->area_list)) {
1341
+ if (hlist_empty(&object->area_list) ||
1342
+ object->flags & OBJECT_FULL_SCAN) {
14101343 void *start = (void *)object->pointer;
14111344 void *end = (void *)(object->pointer + object->size);
14121345 void *next;
....@@ -1419,9 +1352,9 @@
14191352 if (start >= end)
14201353 break;
14211354
1422
- spin_unlock_irqrestore(&object->lock, flags);
1355
+ raw_spin_unlock_irqrestore(&object->lock, flags);
14231356 cond_resched();
1424
- spin_lock_irqsave(&object->lock, flags);
1357
+ raw_spin_lock_irqsave(&object->lock, flags);
14251358 } while (object->flags & OBJECT_ALLOCATED);
14261359 } else
14271360 hlist_for_each_entry(area, &object->area_list, node)
....@@ -1429,7 +1362,7 @@
14291362 (void *)(area->start + area->size),
14301363 object);
14311364 out:
1432
- spin_unlock_irqrestore(&object->lock, flags);
1365
+ raw_spin_unlock_irqrestore(&object->lock, flags);
14331366 }
14341367
14351368 /*
....@@ -1474,7 +1407,8 @@
14741407 {
14751408 unsigned long flags;
14761409 struct kmemleak_object *object;
1477
- int i;
1410
+ struct zone *zone;
1411
+ int __maybe_unused i;
14781412 int new_leaks = 0;
14791413
14801414 jiffies_last_scan = jiffies;
....@@ -1482,7 +1416,7 @@
14821416 /* prepare the kmemleak_object's */
14831417 rcu_read_lock();
14841418 list_for_each_entry_rcu(object, &object_list, object_list) {
1485
- spin_lock_irqsave(&object->lock, flags);
1419
+ raw_spin_lock_irqsave(&object->lock, flags);
14861420 #ifdef DEBUG
14871421 /*
14881422 * With a few exceptions there should be a maximum of
....@@ -1499,7 +1433,7 @@
14991433 if (color_gray(object) && get_object(object))
15001434 list_add_tail(&object->gray_list, &gray_list);
15011435
1502
- spin_unlock_irqrestore(&object->lock, flags);
1436
+ raw_spin_unlock_irqrestore(&object->lock, flags);
15031437 }
15041438 rcu_read_unlock();
15051439
....@@ -1514,17 +1448,20 @@
15141448 * Struct page scanning for each node.
15151449 */
15161450 get_online_mems();
1517
- for_each_online_node(i) {
1518
- unsigned long start_pfn = node_start_pfn(i);
1519
- unsigned long end_pfn = node_end_pfn(i);
1451
+ for_each_populated_zone(zone) {
1452
+ unsigned long start_pfn = zone->zone_start_pfn;
1453
+ unsigned long end_pfn = zone_end_pfn(zone);
15201454 unsigned long pfn;
15211455
15221456 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1523
- struct page *page;
1457
+ struct page *page = pfn_to_online_page(pfn);
15241458
1525
- if (!pfn_valid(pfn))
1459
+ if (!page)
15261460 continue;
1527
- page = pfn_to_page(pfn);
1461
+
1462
+ /* only scan pages belonging to this zone */
1463
+ if (page_zone(page) != zone)
1464
+ continue;
15281465 /* only scan if page is in use */
15291466 if (page_count(page) == 0)
15301467 continue;
....@@ -1541,15 +1478,15 @@
15411478 if (kmemleak_stack_scan) {
15421479 struct task_struct *p, *g;
15431480
1544
- read_lock(&tasklist_lock);
1545
- do_each_thread(g, p) {
1481
+ rcu_read_lock();
1482
+ for_each_process_thread(g, p) {
15461483 void *stack = try_get_task_stack(p);
15471484 if (stack) {
15481485 scan_block(stack, stack + THREAD_SIZE, NULL);
15491486 put_task_stack(p);
15501487 }
1551
- } while_each_thread(g, p);
1552
- read_unlock(&tasklist_lock);
1488
+ }
1489
+ rcu_read_unlock();
15531490 }
15541491
15551492 /*
....@@ -1564,14 +1501,14 @@
15641501 */
15651502 rcu_read_lock();
15661503 list_for_each_entry_rcu(object, &object_list, object_list) {
1567
- spin_lock_irqsave(&object->lock, flags);
1504
+ raw_spin_lock_irqsave(&object->lock, flags);
15681505 if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
15691506 && update_checksum(object) && get_object(object)) {
15701507 /* color it gray temporarily */
15711508 object->count = object->min_count;
15721509 list_add_tail(&object->gray_list, &gray_list);
15731510 }
1574
- spin_unlock_irqrestore(&object->lock, flags);
1511
+ raw_spin_unlock_irqrestore(&object->lock, flags);
15751512 }
15761513 rcu_read_unlock();
15771514
....@@ -1591,13 +1528,17 @@
15911528 */
15921529 rcu_read_lock();
15931530 list_for_each_entry_rcu(object, &object_list, object_list) {
1594
- spin_lock_irqsave(&object->lock, flags);
1531
+ raw_spin_lock_irqsave(&object->lock, flags);
15951532 if (unreferenced_object(object) &&
15961533 !(object->flags & OBJECT_REPORTED)) {
15971534 object->flags |= OBJECT_REPORTED;
1535
+
1536
+ if (kmemleak_verbose)
1537
+ print_unreferenced(NULL, object);
1538
+
15981539 new_leaks++;
15991540 }
1600
- spin_unlock_irqrestore(&object->lock, flags);
1541
+ raw_spin_unlock_irqrestore(&object->lock, flags);
16011542 }
16021543 rcu_read_unlock();
16031544
....@@ -1616,7 +1557,7 @@
16161557 */
16171558 static int kmemleak_scan_thread(void *arg)
16181559 {
1619
- static int first_run = 1;
1560
+ static int first_run = IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN);
16201561
16211562 pr_info("Automatic memory scanning thread started\n");
16221563 set_user_nice(current, 10);
....@@ -1749,10 +1690,10 @@
17491690 struct kmemleak_object *object = v;
17501691 unsigned long flags;
17511692
1752
- spin_lock_irqsave(&object->lock, flags);
1693
+ raw_spin_lock_irqsave(&object->lock, flags);
17531694 if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
17541695 print_unreferenced(seq, object);
1755
- spin_unlock_irqrestore(&object->lock, flags);
1696
+ raw_spin_unlock_irqrestore(&object->lock, flags);
17561697 return 0;
17571698 }
17581699
....@@ -1782,9 +1723,9 @@
17821723 return -EINVAL;
17831724 }
17841725
1785
- spin_lock_irqsave(&object->lock, flags);
1726
+ raw_spin_lock_irqsave(&object->lock, flags);
17861727 dump_object_info(object);
1787
- spin_unlock_irqrestore(&object->lock, flags);
1728
+ raw_spin_unlock_irqrestore(&object->lock, flags);
17881729
17891730 put_object(object);
17901731 return 0;
....@@ -1803,11 +1744,11 @@
18031744
18041745 rcu_read_lock();
18051746 list_for_each_entry_rcu(object, &object_list, object_list) {
1806
- spin_lock_irqsave(&object->lock, flags);
1747
+ raw_spin_lock_irqsave(&object->lock, flags);
18071748 if ((object->flags & OBJECT_REPORTED) &&
18081749 unreferenced_object(object))
18091750 __paint_it(object, KMEMLEAK_GREY);
1810
- spin_unlock_irqrestore(&object->lock, flags);
1751
+ raw_spin_unlock_irqrestore(&object->lock, flags);
18111752 }
18121753 rcu_read_unlock();
18131754
....@@ -1857,7 +1798,7 @@
18571798 }
18581799
18591800 if (!kmemleak_enabled) {
1860
- ret = -EBUSY;
1801
+ ret = -EPERM;
18611802 goto out;
18621803 }
18631804
....@@ -1910,12 +1851,16 @@
19101851
19111852 static void __kmemleak_do_cleanup(void)
19121853 {
1913
- struct kmemleak_object *object;
1854
+ struct kmemleak_object *object, *tmp;
19141855
1915
- rcu_read_lock();
1916
- list_for_each_entry_rcu(object, &object_list, object_list)
1917
- delete_object_full(object->pointer);
1918
- rcu_read_unlock();
1856
+ /*
1857
+ * Kmemleak has already been disabled, no need for RCU list traversal
1858
+ * or kmemleak_lock held.
1859
+ */
1860
+ list_for_each_entry_safe(object, tmp, &object_list, object_list) {
1861
+ __remove_object(object);
1862
+ __delete_object(object);
1863
+ }
19191864 }
19201865
19211866 /*
....@@ -1984,54 +1929,26 @@
19841929 }
19851930 early_param("kmemleak", kmemleak_boot_config);
19861931
1987
-static void __init print_log_trace(struct early_log *log)
1988
-{
1989
- struct stack_trace trace;
1990
-
1991
- trace.nr_entries = log->trace_len;
1992
- trace.entries = log->trace;
1993
-
1994
- pr_notice("Early log backtrace:\n");
1995
- print_stack_trace(&trace, 2);
1996
-}
1997
-
19981932 /*
19991933 * Kmemleak initialization.
20001934 */
20011935 void __init kmemleak_init(void)
20021936 {
2003
- int i;
2004
- unsigned long flags;
2005
-
20061937 #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
20071938 if (!kmemleak_skip_disable) {
2008
- kmemleak_early_log = 0;
20091939 kmemleak_disable();
20101940 return;
20111941 }
20121942 #endif
1943
+
1944
+ if (kmemleak_error)
1945
+ return;
20131946
20141947 jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
20151948 jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
20161949
20171950 object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
20181951 scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
2019
-
2020
- if (crt_early_log > ARRAY_SIZE(early_log))
2021
- pr_warn("Early log buffer exceeded (%d), please increase DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n",
2022
- crt_early_log);
2023
-
2024
- /* the kernel is still in UP mode, so disabling the IRQs is enough */
2025
- local_irq_save(flags);
2026
- kmemleak_early_log = 0;
2027
- if (kmemleak_error) {
2028
- local_irq_restore(flags);
2029
- return;
2030
- } else {
2031
- kmemleak_enabled = 1;
2032
- kmemleak_free_enabled = 1;
2033
- }
2034
- local_irq_restore(flags);
20351952
20361953 /* register the data/bss sections */
20371954 create_object((unsigned long)_sdata, _edata - _sdata,
....@@ -2043,57 +1960,6 @@
20431960 create_object((unsigned long)__start_ro_after_init,
20441961 __end_ro_after_init - __start_ro_after_init,
20451962 KMEMLEAK_GREY, GFP_ATOMIC);
2046
-
2047
- /*
2048
- * This is the point where tracking allocations is safe. Automatic
2049
- * scanning is started during the late initcall. Add the early logged
2050
- * callbacks to the kmemleak infrastructure.
2051
- */
2052
- for (i = 0; i < crt_early_log; i++) {
2053
- struct early_log *log = &early_log[i];
2054
-
2055
- switch (log->op_type) {
2056
- case KMEMLEAK_ALLOC:
2057
- early_alloc(log);
2058
- break;
2059
- case KMEMLEAK_ALLOC_PERCPU:
2060
- early_alloc_percpu(log);
2061
- break;
2062
- case KMEMLEAK_FREE:
2063
- kmemleak_free(log->ptr);
2064
- break;
2065
- case KMEMLEAK_FREE_PART:
2066
- kmemleak_free_part(log->ptr, log->size);
2067
- break;
2068
- case KMEMLEAK_FREE_PERCPU:
2069
- kmemleak_free_percpu(log->ptr);
2070
- break;
2071
- case KMEMLEAK_NOT_LEAK:
2072
- kmemleak_not_leak(log->ptr);
2073
- break;
2074
- case KMEMLEAK_IGNORE:
2075
- kmemleak_ignore(log->ptr);
2076
- break;
2077
- case KMEMLEAK_SCAN_AREA:
2078
- kmemleak_scan_area(log->ptr, log->size, GFP_KERNEL);
2079
- break;
2080
- case KMEMLEAK_NO_SCAN:
2081
- kmemleak_no_scan(log->ptr);
2082
- break;
2083
- case KMEMLEAK_SET_EXCESS_REF:
2084
- object_set_excess_ref((unsigned long)log->ptr,
2085
- log->excess_ref);
2086
- break;
2087
- default:
2088
- kmemleak_warn("Unknown early log operation: %d\n",
2089
- log->op_type);
2090
- }
2091
-
2092
- if (kmemleak_warning) {
2093
- print_log_trace(log);
2094
- kmemleak_warning = 0;
2095
- }
2096
- }
20971963 }
20981964
20991965 /*
....@@ -2101,14 +1967,9 @@
21011967 */
21021968 static int __init kmemleak_late_init(void)
21031969 {
2104
- struct dentry *dentry;
2105
-
21061970 kmemleak_initialized = 1;
21071971
2108
- dentry = debugfs_create_file("kmemleak", 0644, NULL, NULL,
2109
- &kmemleak_fops);
2110
- if (!dentry)
2111
- pr_warn("Failed to create the debugfs kmemleak file\n");
1972
+ debugfs_create_file("kmemleak", 0644, NULL, NULL, &kmemleak_fops);
21121973
21131974 if (kmemleak_error) {
21141975 /*
....@@ -2121,11 +1982,14 @@
21211982 return -ENOMEM;
21221983 }
21231984
2124
- mutex_lock(&scan_mutex);
2125
- start_scan_thread();
2126
- mutex_unlock(&scan_mutex);
1985
+ if (IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN)) {
1986
+ mutex_lock(&scan_mutex);
1987
+ start_scan_thread();
1988
+ mutex_unlock(&scan_mutex);
1989
+ }
21271990
2128
- pr_info("Kernel memory leak detector initialized\n");
1991
+ pr_info("Kernel memory leak detector initialized (mem pool available: %d)\n",
1992
+ mem_pool_free_count);
21291993
21301994 return 0;
21311995 }