| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * mm/kmemleak.c |
|---|
| 3 | 4 | * |
|---|
| 4 | 5 | * Copyright (C) 2008 ARM Limited |
|---|
| 5 | 6 | * Written by Catalin Marinas <catalin.marinas@arm.com> |
|---|
| 6 | | - * |
|---|
| 7 | | - * This program is free software; you can redistribute it and/or modify |
|---|
| 8 | | - * it under the terms of the GNU General Public License version 2 as |
|---|
| 9 | | - * published by the Free Software Foundation. |
|---|
| 10 | | - * |
|---|
| 11 | | - * This program is distributed in the hope that it will be useful, |
|---|
| 12 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
|---|
| 13 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|---|
| 14 | | - * GNU General Public License for more details. |
|---|
| 15 | | - * |
|---|
| 16 | | - * You should have received a copy of the GNU General Public License |
|---|
| 17 | | - * along with this program; if not, write to the Free Software |
|---|
| 18 | | - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
|---|
| 19 | | - * |
|---|
| 20 | 7 | * |
|---|
| 21 | 8 | * For more information on the algorithm and kmemleak usage, please see |
|---|
| 22 | 9 | * Documentation/dev-tools/kmemleak.rst. |
|---|
| .. | .. |
|---|
| 26 | 13 | * |
|---|
| 27 | 14 | * The following locks and mutexes are used by kmemleak: |
|---|
| 28 | 15 | * |
|---|
| 29 | | - * - kmemleak_lock (raw spinlock): protects the object_list modifications and |
|---|
| 16 | + * - kmemleak_lock (raw_spinlock_t): protects the object_list modifications and |
|---|
| 30 | 17 | * accesses to the object_tree_root. The object_list is the main list |
|---|
| 31 | 18 | * holding the metadata (struct kmemleak_object) for the allocated memory |
|---|
| 32 | 19 | * blocks. The object_tree_root is a red black tree used to look-up |
|---|
| .. | .. |
|---|
| 35 | 22 | * object_tree_root in the create_object() function called from the |
|---|
| 36 | 23 | * kmemleak_alloc() callback and removed in delete_object() called from the |
|---|
| 37 | 24 | * kmemleak_free() callback |
|---|
| 38 | | - * - kmemleak_object.lock (spinlock): protects a kmemleak_object. Accesses to |
|---|
| 39 | | - * the metadata (e.g. count) are protected by this lock. Note that some |
|---|
| 40 | | - * members of this structure may be protected by other means (atomic or |
|---|
| 41 | | - * kmemleak_lock). This lock is also held when scanning the corresponding |
|---|
| 42 | | - * memory block to avoid the kernel freeing it via the kmemleak_free() |
|---|
| 43 | | - * callback. This is less heavyweight than holding a global lock like |
|---|
| 44 | | - * kmemleak_lock during scanning |
|---|
| 25 | + * - kmemleak_object.lock (raw_spinlock_t): protects a kmemleak_object. |
|---|
| 26 | + * Accesses to the metadata (e.g. count) are protected by this lock. Note |
|---|
| 27 | + * that some members of this structure may be protected by other means |
|---|
| 28 | + * (atomic or kmemleak_lock). This lock is also held when scanning the |
|---|
| 29 | + * corresponding memory block to avoid the kernel freeing it via the |
|---|
| 30 | + * kmemleak_free() callback. This is less heavyweight than holding a global |
|---|
| 31 | + * lock like kmemleak_lock during scanning. |
|---|
| 45 | 32 | * - scan_mutex (mutex): ensures that only one thread may scan the memory for |
|---|
| 46 | 33 | * unreferenced objects at a time. The gray_list contains the objects which |
|---|
| 47 | 34 | * are already referenced or marked as false positives and need to be |
|---|
| .. | .. |
|---|
| 86 | 73 | #include <linux/seq_file.h> |
|---|
| 87 | 74 | #include <linux/cpumask.h> |
|---|
| 88 | 75 | #include <linux/spinlock.h> |
|---|
| 76 | +#include <linux/module.h> |
|---|
| 89 | 77 | #include <linux/mutex.h> |
|---|
| 90 | 78 | #include <linux/rcupdate.h> |
|---|
| 91 | 79 | #include <linux/stacktrace.h> |
|---|
| 92 | 80 | #include <linux/cache.h> |
|---|
| 93 | 81 | #include <linux/percpu.h> |
|---|
| 94 | | -#include <linux/bootmem.h> |
|---|
| 82 | +#include <linux/memblock.h> |
|---|
| 95 | 83 | #include <linux/pfn.h> |
|---|
| 96 | 84 | #include <linux/mmzone.h> |
|---|
| 97 | 85 | #include <linux/slab.h> |
|---|
| .. | .. |
|---|
| 109 | 97 | #include <linux/atomic.h> |
|---|
| 110 | 98 | |
|---|
| 111 | 99 | #include <linux/kasan.h> |
|---|
| 100 | +#include <linux/kfence.h> |
|---|
| 112 | 101 | #include <linux/kmemleak.h> |
|---|
| 113 | 102 | #include <linux/memory_hotplug.h> |
|---|
| 114 | 103 | |
|---|
| .. | .. |
|---|
| 180 | 169 | #define OBJECT_REPORTED (1 << 1) |
|---|
| 181 | 170 | /* flag set to not scan the object */ |
|---|
| 182 | 171 | #define OBJECT_NO_SCAN (1 << 2) |
|---|
| 172 | +/* flag set to fully scan the object when scan_area allocation failed */ |
|---|
| 173 | +#define OBJECT_FULL_SCAN (1 << 3) |
|---|
| 183 | 174 | |
|---|
| 175 | +#define HEX_PREFIX " " |
|---|
| 184 | 176 | /* number of bytes to print per line; must be 16 or 32 */ |
|---|
| 185 | 177 | #define HEX_ROW_SIZE 16 |
|---|
| 186 | 178 | /* number of bytes to print at a time (1, 2, 4, 8) */ |
|---|
| .. | .. |
|---|
| 194 | 186 | static LIST_HEAD(object_list); |
|---|
| 195 | 187 | /* the list of gray-colored objects (see color_gray comment below) */ |
|---|
| 196 | 188 | static LIST_HEAD(gray_list); |
|---|
| 189 | +/* memory pool allocation */ |
|---|
| 190 | +static struct kmemleak_object mem_pool[CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE]; |
|---|
| 191 | +static int mem_pool_free_count = ARRAY_SIZE(mem_pool); |
|---|
| 192 | +static LIST_HEAD(mem_pool_free_list); |
|---|
| 197 | 193 | /* search tree for object boundaries */ |
|---|
| 198 | 194 | static struct rb_root object_tree_root = RB_ROOT; |
|---|
| 199 | | -/* rw_lock protecting the access to object_list and object_tree_root */ |
|---|
| 195 | +/* protecting the access to object_list and object_tree_root */ |
|---|
| 200 | 196 | static DEFINE_RAW_SPINLOCK(kmemleak_lock); |
|---|
| 201 | 197 | |
|---|
| 202 | 198 | /* allocation caches for kmemleak internal data */ |
|---|
| .. | .. |
|---|
| 204 | 200 | static struct kmem_cache *scan_area_cache; |
|---|
| 205 | 201 | |
|---|
| 206 | 202 | /* set if tracing memory operations is enabled */ |
|---|
| 207 | | -static int kmemleak_enabled; |
|---|
| 203 | +static int kmemleak_enabled = 1; |
|---|
| 208 | 204 | /* same as above but only for the kmemleak_free() callback */ |
|---|
| 209 | | -static int kmemleak_free_enabled; |
|---|
| 205 | +static int kmemleak_free_enabled = 1; |
|---|
| 210 | 206 | /* set in the late_initcall if there were no errors */ |
|---|
| 211 | 207 | static int kmemleak_initialized; |
|---|
| 212 | | -/* enables or disables early logging of the memory operations */ |
|---|
| 213 | | -static int kmemleak_early_log = 1; |
|---|
| 214 | 208 | /* set if a kmemleak warning was issued */ |
|---|
| 215 | 209 | static int kmemleak_warning; |
|---|
| 216 | 210 | /* set if a fatal kmemleak error has occurred */ |
|---|
| .. | .. |
|---|
| 235 | 229 | /* If there are leaks that can be reported */ |
|---|
| 236 | 230 | static bool kmemleak_found_leaks; |
|---|
| 237 | 231 | |
|---|
| 238 | | -/* |
|---|
| 239 | | - * Early object allocation/freeing logging. Kmemleak is initialized after the |
|---|
| 240 | | - * kernel allocator. However, both the kernel allocator and kmemleak may |
|---|
| 241 | | - * allocate memory blocks which need to be tracked. Kmemleak defines an |
|---|
| 242 | | - * arbitrary buffer to hold the allocation/freeing information before it is |
|---|
| 243 | | - * fully initialized. |
|---|
| 244 | | - */ |
|---|
| 245 | | - |
|---|
| 246 | | -/* kmemleak operation type for early logging */ |
|---|
| 247 | | -enum { |
|---|
| 248 | | - KMEMLEAK_ALLOC, |
|---|
| 249 | | - KMEMLEAK_ALLOC_PERCPU, |
|---|
| 250 | | - KMEMLEAK_FREE, |
|---|
| 251 | | - KMEMLEAK_FREE_PART, |
|---|
| 252 | | - KMEMLEAK_FREE_PERCPU, |
|---|
| 253 | | - KMEMLEAK_NOT_LEAK, |
|---|
| 254 | | - KMEMLEAK_IGNORE, |
|---|
| 255 | | - KMEMLEAK_SCAN_AREA, |
|---|
| 256 | | - KMEMLEAK_NO_SCAN, |
|---|
| 257 | | - KMEMLEAK_SET_EXCESS_REF |
|---|
| 258 | | -}; |
|---|
| 259 | | - |
|---|
| 260 | | -/* |
|---|
| 261 | | - * Structure holding the information passed to kmemleak callbacks during the |
|---|
| 262 | | - * early logging. |
|---|
| 263 | | - */ |
|---|
| 264 | | -struct early_log { |
|---|
| 265 | | - int op_type; /* kmemleak operation type */ |
|---|
| 266 | | - int min_count; /* minimum reference count */ |
|---|
| 267 | | - const void *ptr; /* allocated/freed memory block */ |
|---|
| 268 | | - union { |
|---|
| 269 | | - size_t size; /* memory block size */ |
|---|
| 270 | | - unsigned long excess_ref; /* surplus reference passing */ |
|---|
| 271 | | - }; |
|---|
| 272 | | - unsigned long trace[MAX_TRACE]; /* stack trace */ |
|---|
| 273 | | - unsigned int trace_len; /* stack trace length */ |
|---|
| 274 | | -}; |
|---|
| 275 | | - |
|---|
| 276 | | -/* early logging buffer and current position */ |
|---|
| 277 | | -static struct early_log |
|---|
| 278 | | - early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE] __initdata; |
|---|
| 279 | | -static int crt_early_log __initdata; |
|---|
| 232 | +static bool kmemleak_verbose; |
|---|
| 233 | +module_param_named(verbose, kmemleak_verbose, bool, 0600); |
|---|
| 280 | 234 | |
|---|
| 281 | 235 | static void kmemleak_disable(void); |
|---|
| 282 | 236 | |
|---|
| .. | .. |
|---|
| 299 | 253 | kmemleak_disable(); \ |
|---|
| 300 | 254 | } while (0) |
|---|
| 301 | 255 | |
|---|
| 256 | +#define warn_or_seq_printf(seq, fmt, ...) do { \ |
|---|
| 257 | + if (seq) \ |
|---|
| 258 | + seq_printf(seq, fmt, ##__VA_ARGS__); \ |
|---|
| 259 | + else \ |
|---|
| 260 | + pr_warn(fmt, ##__VA_ARGS__); \ |
|---|
| 261 | +} while (0) |
|---|
| 262 | + |
|---|
| 263 | +static void warn_or_seq_hex_dump(struct seq_file *seq, int prefix_type, |
|---|
| 264 | + int rowsize, int groupsize, const void *buf, |
|---|
| 265 | + size_t len, bool ascii) |
|---|
| 266 | +{ |
|---|
| 267 | + if (seq) |
|---|
| 268 | + seq_hex_dump(seq, HEX_PREFIX, prefix_type, rowsize, groupsize, |
|---|
| 269 | + buf, len, ascii); |
|---|
| 270 | + else |
|---|
| 271 | + print_hex_dump(KERN_WARNING, pr_fmt(HEX_PREFIX), prefix_type, |
|---|
| 272 | + rowsize, groupsize, buf, len, ascii); |
|---|
| 273 | +} |
|---|
| 274 | + |
|---|
| 302 | 275 | /* |
|---|
| 303 | 276 | * Printing of the objects hex dump to the seq file. The number of lines to be |
|---|
| 304 | 277 | * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The |
|---|
| .. | .. |
|---|
| 314 | 287 | /* limit the number of lines to HEX_MAX_LINES */ |
|---|
| 315 | 288 | len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE); |
|---|
| 316 | 289 | |
|---|
| 317 | | - seq_printf(seq, " hex dump (first %zu bytes):\n", len); |
|---|
| 290 | + warn_or_seq_printf(seq, " hex dump (first %zu bytes):\n", len); |
|---|
| 318 | 291 | kasan_disable_current(); |
|---|
| 319 | | - seq_hex_dump(seq, " ", DUMP_PREFIX_NONE, HEX_ROW_SIZE, |
|---|
| 320 | | - HEX_GROUP_SIZE, ptr, len, HEX_ASCII); |
|---|
| 292 | + warn_or_seq_hex_dump(seq, DUMP_PREFIX_NONE, HEX_ROW_SIZE, |
|---|
| 293 | + HEX_GROUP_SIZE, kasan_reset_tag((void *)ptr), len, HEX_ASCII); |
|---|
| 321 | 294 | kasan_enable_current(); |
|---|
| 322 | 295 | } |
|---|
| 323 | 296 | |
|---|
| .. | .. |
|---|
| 365 | 338 | int i; |
|---|
| 366 | 339 | unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies); |
|---|
| 367 | 340 | |
|---|
| 368 | | - seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n", |
|---|
| 341 | + warn_or_seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n", |
|---|
| 369 | 342 | object->pointer, object->size); |
|---|
| 370 | | - seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n", |
|---|
| 343 | + warn_or_seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n", |
|---|
| 371 | 344 | object->comm, object->pid, object->jiffies, |
|---|
| 372 | 345 | msecs_age / 1000, msecs_age % 1000); |
|---|
| 373 | 346 | hex_dump_object(seq, object); |
|---|
| 374 | | - seq_printf(seq, " backtrace:\n"); |
|---|
| 347 | + warn_or_seq_printf(seq, " backtrace:\n"); |
|---|
| 375 | 348 | |
|---|
| 376 | 349 | for (i = 0; i < object->trace_len; i++) { |
|---|
| 377 | 350 | void *ptr = (void *)object->trace[i]; |
|---|
| 378 | | - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr); |
|---|
| 351 | + warn_or_seq_printf(seq, " [<%p>] %pS\n", ptr, ptr); |
|---|
| 379 | 352 | } |
|---|
| 380 | 353 | } |
|---|
| 381 | 354 | |
|---|
| .. | .. |
|---|
| 386 | 359 | */ |
|---|
| 387 | 360 | static void dump_object_info(struct kmemleak_object *object) |
|---|
| 388 | 361 | { |
|---|
| 389 | | - struct stack_trace trace; |
|---|
| 390 | | - |
|---|
| 391 | | - trace.nr_entries = object->trace_len; |
|---|
| 392 | | - trace.entries = object->trace; |
|---|
| 393 | | - |
|---|
| 394 | 362 | pr_notice("Object 0x%08lx (size %zu):\n", |
|---|
| 395 | 363 | object->pointer, object->size); |
|---|
| 396 | 364 | pr_notice(" comm \"%s\", pid %d, jiffies %lu\n", |
|---|
| .. | .. |
|---|
| 400 | 368 | pr_notice(" flags = 0x%x\n", object->flags); |
|---|
| 401 | 369 | pr_notice(" checksum = %u\n", object->checksum); |
|---|
| 402 | 370 | pr_notice(" backtrace:\n"); |
|---|
| 403 | | - print_stack_trace(&trace, 4); |
|---|
| 371 | + stack_trace_print(object->trace, object->trace_len, 4); |
|---|
| 404 | 372 | } |
|---|
| 405 | 373 | |
|---|
| 406 | 374 | /* |
|---|
| .. | .. |
|---|
| 444 | 412 | } |
|---|
| 445 | 413 | |
|---|
| 446 | 414 | /* |
|---|
| 415 | + * Memory pool allocation and freeing. kmemleak_lock must not be held. |
|---|
| 416 | + */ |
|---|
| 417 | +static struct kmemleak_object *mem_pool_alloc(gfp_t gfp) |
|---|
| 418 | +{ |
|---|
| 419 | + unsigned long flags; |
|---|
| 420 | + struct kmemleak_object *object; |
|---|
| 421 | + |
|---|
| 422 | + /* try the slab allocator first */ |
|---|
| 423 | + if (object_cache) { |
|---|
| 424 | + object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp)); |
|---|
| 425 | + if (object) |
|---|
| 426 | + return object; |
|---|
| 427 | + } |
|---|
| 428 | + |
|---|
| 429 | + /* slab allocation failed, try the memory pool */ |
|---|
| 430 | + raw_spin_lock_irqsave(&kmemleak_lock, flags); |
|---|
| 431 | + object = list_first_entry_or_null(&mem_pool_free_list, |
|---|
| 432 | + typeof(*object), object_list); |
|---|
| 433 | + if (object) |
|---|
| 434 | + list_del(&object->object_list); |
|---|
| 435 | + else if (mem_pool_free_count) |
|---|
| 436 | + object = &mem_pool[--mem_pool_free_count]; |
|---|
| 437 | + else |
|---|
| 438 | + pr_warn_once("Memory pool empty, consider increasing CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE\n"); |
|---|
| 439 | + raw_spin_unlock_irqrestore(&kmemleak_lock, flags); |
|---|
| 440 | + |
|---|
| 441 | + return object; |
|---|
| 442 | +} |
|---|
| 443 | + |
|---|
| 444 | +/* |
|---|
| 445 | + * Return the object to either the slab allocator or the memory pool. |
|---|
| 446 | + */ |
|---|
| 447 | +static void mem_pool_free(struct kmemleak_object *object) |
|---|
| 448 | +{ |
|---|
| 449 | + unsigned long flags; |
|---|
| 450 | + |
|---|
| 451 | + if (object < mem_pool || object >= mem_pool + ARRAY_SIZE(mem_pool)) { |
|---|
| 452 | + kmem_cache_free(object_cache, object); |
|---|
| 453 | + return; |
|---|
| 454 | + } |
|---|
| 455 | + |
|---|
| 456 | + /* add the object to the memory pool free list */ |
|---|
| 457 | + raw_spin_lock_irqsave(&kmemleak_lock, flags); |
|---|
| 458 | + list_add(&object->object_list, &mem_pool_free_list); |
|---|
| 459 | + raw_spin_unlock_irqrestore(&kmemleak_lock, flags); |
|---|
| 460 | +} |
|---|
| 461 | + |
|---|
| 462 | +/* |
|---|
| 447 | 463 | * RCU callback to free a kmemleak_object. |
|---|
| 448 | 464 | */ |
|---|
| 449 | 465 | static void free_object_rcu(struct rcu_head *rcu) |
|---|
| .. | .. |
|---|
| 461 | 477 | hlist_del(&area->node); |
|---|
| 462 | 478 | kmem_cache_free(scan_area_cache, area); |
|---|
| 463 | 479 | } |
|---|
| 464 | | - kmem_cache_free(object_cache, object); |
|---|
| 480 | + mem_pool_free(object); |
|---|
| 465 | 481 | } |
|---|
| 466 | 482 | |
|---|
| 467 | 483 | /* |
|---|
| .. | .. |
|---|
| 479 | 495 | /* should only get here after delete_object was called */ |
|---|
| 480 | 496 | WARN_ON(object->flags & OBJECT_ALLOCATED); |
|---|
| 481 | 497 | |
|---|
| 482 | | - call_rcu(&object->rcu, free_object_rcu); |
|---|
| 498 | + /* |
|---|
| 499 | + * It may be too early for the RCU callbacks, however, there is no |
|---|
| 500 | + * concurrent object_list traversal when !object_cache and all objects |
|---|
| 501 | + * came from the memory pool. Free the object directly. |
|---|
| 502 | + */ |
|---|
| 503 | + if (object_cache) |
|---|
| 504 | + call_rcu(&object->rcu, free_object_rcu); |
|---|
| 505 | + else |
|---|
| 506 | + free_object_rcu(&object->rcu); |
|---|
| 483 | 507 | } |
|---|
| 484 | 508 | |
|---|
| 485 | 509 | /* |
|---|
| .. | .. |
|---|
| 504 | 528 | } |
|---|
| 505 | 529 | |
|---|
| 506 | 530 | /* |
|---|
| 531 | + * Remove an object from the object_tree_root and object_list. Must be called |
|---|
| 532 | + * with the kmemleak_lock held _if_ kmemleak is still enabled. |
|---|
| 533 | + */ |
|---|
| 534 | +static void __remove_object(struct kmemleak_object *object) |
|---|
| 535 | +{ |
|---|
| 536 | + rb_erase(&object->rb_node, &object_tree_root); |
|---|
| 537 | + list_del_rcu(&object->object_list); |
|---|
| 538 | +} |
|---|
| 539 | + |
|---|
| 540 | +/* |
|---|
| 507 | 541 | * Look up an object in the object search tree and remove it from both |
|---|
| 508 | 542 | * object_tree_root and object_list. The returned object's use_count should be |
|---|
| 509 | 543 | * at least 1, as initially set by create_object(). |
|---|
| .. | .. |
|---|
| 515 | 549 | |
|---|
| 516 | 550 | raw_spin_lock_irqsave(&kmemleak_lock, flags); |
|---|
| 517 | 551 | object = lookup_object(ptr, alias); |
|---|
| 518 | | - if (object) { |
|---|
| 519 | | - rb_erase(&object->rb_node, &object_tree_root); |
|---|
| 520 | | - list_del_rcu(&object->object_list); |
|---|
| 521 | | - } |
|---|
| 552 | + if (object) |
|---|
| 553 | + __remove_object(object); |
|---|
| 522 | 554 | raw_spin_unlock_irqrestore(&kmemleak_lock, flags); |
|---|
| 523 | 555 | |
|---|
| 524 | 556 | return object; |
|---|
| .. | .. |
|---|
| 529 | 561 | */ |
|---|
| 530 | 562 | static int __save_stack_trace(unsigned long *trace) |
|---|
| 531 | 563 | { |
|---|
| 532 | | - struct stack_trace stack_trace; |
|---|
| 533 | | - |
|---|
| 534 | | - stack_trace.max_entries = MAX_TRACE; |
|---|
| 535 | | - stack_trace.nr_entries = 0; |
|---|
| 536 | | - stack_trace.entries = trace; |
|---|
| 537 | | - stack_trace.skip = 2; |
|---|
| 538 | | - save_stack_trace(&stack_trace); |
|---|
| 539 | | - |
|---|
| 540 | | - return stack_trace.nr_entries; |
|---|
| 564 | + return stack_trace_save(trace, MAX_TRACE, 2); |
|---|
| 541 | 565 | } |
|---|
| 542 | 566 | |
|---|
| 543 | 567 | /* |
|---|
| .. | .. |
|---|
| 550 | 574 | unsigned long flags; |
|---|
| 551 | 575 | struct kmemleak_object *object, *parent; |
|---|
| 552 | 576 | struct rb_node **link, *rb_parent; |
|---|
| 577 | + unsigned long untagged_ptr; |
|---|
| 553 | 578 | |
|---|
| 554 | | - object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp)); |
|---|
| 579 | + object = mem_pool_alloc(gfp); |
|---|
| 555 | 580 | if (!object) { |
|---|
| 556 | 581 | pr_warn("Cannot allocate a kmemleak_object structure\n"); |
|---|
| 557 | 582 | kmemleak_disable(); |
|---|
| .. | .. |
|---|
| 565 | 590 | atomic_set(&object->use_count, 1); |
|---|
| 566 | 591 | object->flags = OBJECT_ALLOCATED; |
|---|
| 567 | 592 | object->pointer = ptr; |
|---|
| 568 | | - object->size = size; |
|---|
| 593 | + object->size = kfence_ksize((void *)ptr) ?: size; |
|---|
| 569 | 594 | object->excess_ref = 0; |
|---|
| 570 | 595 | object->min_count = min_count; |
|---|
| 571 | 596 | object->count = 0; /* white color initially */ |
|---|
| .. | .. |
|---|
| 595 | 620 | |
|---|
| 596 | 621 | raw_spin_lock_irqsave(&kmemleak_lock, flags); |
|---|
| 597 | 622 | |
|---|
| 598 | | - min_addr = min(min_addr, ptr); |
|---|
| 599 | | - max_addr = max(max_addr, ptr + size); |
|---|
| 623 | + untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr); |
|---|
| 624 | + min_addr = min(min_addr, untagged_ptr); |
|---|
| 625 | + max_addr = max(max_addr, untagged_ptr + size); |
|---|
| 600 | 626 | link = &object_tree_root.rb_node; |
|---|
| 601 | 627 | rb_parent = NULL; |
|---|
| 602 | 628 | while (*link) { |
|---|
| .. | .. |
|---|
| 689 | 715 | /* |
|---|
| 690 | 716 | * Create one or two objects that may result from the memory block |
|---|
| 691 | 717 | * split. Note that partial freeing is only done by free_bootmem() and |
|---|
| 692 | | - * this happens before kmemleak_init() is called. The path below is |
|---|
| 693 | | - * only executed during early log recording in kmemleak_init(), so |
|---|
| 694 | | - * GFP_KERNEL is enough. |
|---|
| 718 | + * this happens before kmemleak_init() is called. |
|---|
| 695 | 719 | */ |
|---|
| 696 | 720 | start = object->pointer; |
|---|
| 697 | 721 | end = object->pointer + object->size; |
|---|
| .. | .. |
|---|
| 763 | 787 | { |
|---|
| 764 | 788 | unsigned long flags; |
|---|
| 765 | 789 | struct kmemleak_object *object; |
|---|
| 766 | | - struct kmemleak_scan_area *area; |
|---|
| 790 | + struct kmemleak_scan_area *area = NULL; |
|---|
| 791 | + unsigned long untagged_ptr; |
|---|
| 792 | + unsigned long untagged_objp; |
|---|
| 767 | 793 | |
|---|
| 768 | 794 | object = find_and_get_object(ptr, 1); |
|---|
| 769 | 795 | if (!object) { |
|---|
| .. | .. |
|---|
| 772 | 798 | return; |
|---|
| 773 | 799 | } |
|---|
| 774 | 800 | |
|---|
| 775 | | - area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp)); |
|---|
| 776 | | - if (!area) { |
|---|
| 777 | | - pr_warn("Cannot allocate a scan area\n"); |
|---|
| 778 | | - goto out; |
|---|
| 779 | | - } |
|---|
| 801 | + untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr); |
|---|
| 802 | + untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer); |
|---|
| 803 | + |
|---|
| 804 | + if (scan_area_cache) |
|---|
| 805 | + area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp)); |
|---|
| 780 | 806 | |
|---|
| 781 | 807 | raw_spin_lock_irqsave(&object->lock, flags); |
|---|
| 808 | + if (!area) { |
|---|
| 809 | + pr_warn_once("Cannot allocate a scan area, scanning the full object\n"); |
|---|
| 810 | + /* mark the object for full scan to avoid false positives */ |
|---|
| 811 | + object->flags |= OBJECT_FULL_SCAN; |
|---|
| 812 | + goto out_unlock; |
|---|
| 813 | + } |
|---|
| 782 | 814 | if (size == SIZE_MAX) { |
|---|
| 783 | | - size = object->pointer + object->size - ptr; |
|---|
| 784 | | - } else if (ptr + size > object->pointer + object->size) { |
|---|
| 815 | + size = untagged_objp + object->size - untagged_ptr; |
|---|
| 816 | + } else if (untagged_ptr + size > untagged_objp + object->size) { |
|---|
| 785 | 817 | kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr); |
|---|
| 786 | 818 | dump_object_info(object); |
|---|
| 787 | 819 | kmem_cache_free(scan_area_cache, area); |
|---|
| .. | .. |
|---|
| 795 | 827 | hlist_add_head(&area->node, &object->area_list); |
|---|
| 796 | 828 | out_unlock: |
|---|
| 797 | 829 | raw_spin_unlock_irqrestore(&object->lock, flags); |
|---|
| 798 | | -out: |
|---|
| 799 | 830 | put_object(object); |
|---|
| 800 | 831 | } |
|---|
| 801 | 832 | |
|---|
| .. | .. |
|---|
| 845 | 876 | put_object(object); |
|---|
| 846 | 877 | } |
|---|
| 847 | 878 | |
|---|
| 848 | | -/* |
|---|
| 849 | | - * Log an early kmemleak_* call to the early_log buffer. These calls will be |
|---|
| 850 | | - * processed later once kmemleak is fully initialized. |
|---|
| 851 | | - */ |
|---|
| 852 | | -static void __init log_early(int op_type, const void *ptr, size_t size, |
|---|
| 853 | | - int min_count) |
|---|
| 854 | | -{ |
|---|
| 855 | | - unsigned long flags; |
|---|
| 856 | | - struct early_log *log; |
|---|
| 857 | | - |
|---|
| 858 | | - if (kmemleak_error) { |
|---|
| 859 | | - /* kmemleak stopped recording, just count the requests */ |
|---|
| 860 | | - crt_early_log++; |
|---|
| 861 | | - return; |
|---|
| 862 | | - } |
|---|
| 863 | | - |
|---|
| 864 | | - if (crt_early_log >= ARRAY_SIZE(early_log)) { |
|---|
| 865 | | - crt_early_log++; |
|---|
| 866 | | - kmemleak_disable(); |
|---|
| 867 | | - return; |
|---|
| 868 | | - } |
|---|
| 869 | | - |
|---|
| 870 | | - /* |
|---|
| 871 | | - * There is no need for locking since the kernel is still in UP mode |
|---|
| 872 | | - * at this stage. Disabling the IRQs is enough. |
|---|
| 873 | | - */ |
|---|
| 874 | | - local_irq_save(flags); |
|---|
| 875 | | - log = &early_log[crt_early_log]; |
|---|
| 876 | | - log->op_type = op_type; |
|---|
| 877 | | - log->ptr = ptr; |
|---|
| 878 | | - log->size = size; |
|---|
| 879 | | - log->min_count = min_count; |
|---|
| 880 | | - log->trace_len = __save_stack_trace(log->trace); |
|---|
| 881 | | - crt_early_log++; |
|---|
| 882 | | - local_irq_restore(flags); |
|---|
| 883 | | -} |
|---|
| 884 | | - |
|---|
| 885 | | -/* |
|---|
| 886 | | - * Log an early allocated block and populate the stack trace. |
|---|
| 887 | | - */ |
|---|
| 888 | | -static void early_alloc(struct early_log *log) |
|---|
| 889 | | -{ |
|---|
| 890 | | - struct kmemleak_object *object; |
|---|
| 891 | | - unsigned long flags; |
|---|
| 892 | | - int i; |
|---|
| 893 | | - |
|---|
| 894 | | - if (!kmemleak_enabled || !log->ptr || IS_ERR(log->ptr)) |
|---|
| 895 | | - return; |
|---|
| 896 | | - |
|---|
| 897 | | - /* |
|---|
| 898 | | - * RCU locking needed to ensure object is not freed via put_object(). |
|---|
| 899 | | - */ |
|---|
| 900 | | - rcu_read_lock(); |
|---|
| 901 | | - object = create_object((unsigned long)log->ptr, log->size, |
|---|
| 902 | | - log->min_count, GFP_ATOMIC); |
|---|
| 903 | | - if (!object) |
|---|
| 904 | | - goto out; |
|---|
| 905 | | - raw_spin_lock_irqsave(&object->lock, flags); |
|---|
| 906 | | - for (i = 0; i < log->trace_len; i++) |
|---|
| 907 | | - object->trace[i] = log->trace[i]; |
|---|
| 908 | | - object->trace_len = log->trace_len; |
|---|
| 909 | | - raw_spin_unlock_irqrestore(&object->lock, flags); |
|---|
| 910 | | -out: |
|---|
| 911 | | - rcu_read_unlock(); |
|---|
| 912 | | -} |
|---|
| 913 | | - |
|---|
| 914 | | -/* |
|---|
| 915 | | - * Log an early allocated block and populate the stack trace. |
|---|
| 916 | | - */ |
|---|
| 917 | | -static void early_alloc_percpu(struct early_log *log) |
|---|
| 918 | | -{ |
|---|
| 919 | | - unsigned int cpu; |
|---|
| 920 | | - const void __percpu *ptr = log->ptr; |
|---|
| 921 | | - |
|---|
| 922 | | - for_each_possible_cpu(cpu) { |
|---|
| 923 | | - log->ptr = per_cpu_ptr(ptr, cpu); |
|---|
| 924 | | - early_alloc(log); |
|---|
| 925 | | - } |
|---|
| 926 | | -} |
|---|
| 927 | | - |
|---|
| 928 | 879 | /** |
|---|
| 929 | 880 | * kmemleak_alloc - register a newly allocated object |
|---|
| 930 | 881 | * @ptr: pointer to beginning of the object |
|---|
| .. | .. |
|---|
| 946 | 897 | |
|---|
| 947 | 898 | if (kmemleak_enabled && ptr && !IS_ERR(ptr)) |
|---|
| 948 | 899 | create_object((unsigned long)ptr, size, min_count, gfp); |
|---|
| 949 | | - else if (kmemleak_early_log) |
|---|
| 950 | | - log_early(KMEMLEAK_ALLOC, ptr, size, min_count); |
|---|
| 951 | 900 | } |
|---|
| 952 | 901 | EXPORT_SYMBOL_GPL(kmemleak_alloc); |
|---|
| 953 | 902 | |
|---|
| .. | .. |
|---|
| 975 | 924 | for_each_possible_cpu(cpu) |
|---|
| 976 | 925 | create_object((unsigned long)per_cpu_ptr(ptr, cpu), |
|---|
| 977 | 926 | size, 0, gfp); |
|---|
| 978 | | - else if (kmemleak_early_log) |
|---|
| 979 | | - log_early(KMEMLEAK_ALLOC_PERCPU, ptr, size, 0); |
|---|
| 980 | 927 | } |
|---|
| 981 | 928 | EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu); |
|---|
| 982 | 929 | |
|---|
| .. | .. |
|---|
| 1001 | 948 | create_object((unsigned long)area->addr, size, 2, gfp); |
|---|
| 1002 | 949 | object_set_excess_ref((unsigned long)area, |
|---|
| 1003 | 950 | (unsigned long)area->addr); |
|---|
| 1004 | | - } else if (kmemleak_early_log) { |
|---|
| 1005 | | - log_early(KMEMLEAK_ALLOC, area->addr, size, 2); |
|---|
| 1006 | | - /* reusing early_log.size for storing area->addr */ |
|---|
| 1007 | | - log_early(KMEMLEAK_SET_EXCESS_REF, |
|---|
| 1008 | | - area, (unsigned long)area->addr, 0); |
|---|
| 1009 | 951 | } |
|---|
| 1010 | 952 | } |
|---|
| 1011 | 953 | EXPORT_SYMBOL_GPL(kmemleak_vmalloc); |
|---|
| .. | .. |
|---|
| 1023 | 965 | |
|---|
| 1024 | 966 | if (kmemleak_free_enabled && ptr && !IS_ERR(ptr)) |
|---|
| 1025 | 967 | delete_object_full((unsigned long)ptr); |
|---|
| 1026 | | - else if (kmemleak_early_log) |
|---|
| 1027 | | - log_early(KMEMLEAK_FREE, ptr, 0, 0); |
|---|
| 1028 | 968 | } |
|---|
| 1029 | 969 | EXPORT_SYMBOL_GPL(kmemleak_free); |
|---|
| 1030 | 970 | |
|---|
| .. | .. |
|---|
| 1043 | 983 | |
|---|
| 1044 | 984 | if (kmemleak_enabled && ptr && !IS_ERR(ptr)) |
|---|
| 1045 | 985 | delete_object_part((unsigned long)ptr, size); |
|---|
| 1046 | | - else if (kmemleak_early_log) |
|---|
| 1047 | | - log_early(KMEMLEAK_FREE_PART, ptr, size, 0); |
|---|
| 1048 | 986 | } |
|---|
| 1049 | 987 | EXPORT_SYMBOL_GPL(kmemleak_free_part); |
|---|
| 1050 | 988 | |
|---|
| .. | .. |
|---|
| 1065 | 1003 | for_each_possible_cpu(cpu) |
|---|
| 1066 | 1004 | delete_object_full((unsigned long)per_cpu_ptr(ptr, |
|---|
| 1067 | 1005 | cpu)); |
|---|
| 1068 | | - else if (kmemleak_early_log) |
|---|
| 1069 | | - log_early(KMEMLEAK_FREE_PERCPU, ptr, 0, 0); |
|---|
| 1070 | 1006 | } |
|---|
| 1071 | 1007 | EXPORT_SYMBOL_GPL(kmemleak_free_percpu); |
|---|
| 1072 | 1008 | |
|---|
| .. | .. |
|---|
| 1117 | 1053 | |
|---|
| 1118 | 1054 | if (kmemleak_enabled && ptr && !IS_ERR(ptr)) |
|---|
| 1119 | 1055 | make_gray_object((unsigned long)ptr); |
|---|
| 1120 | | - else if (kmemleak_early_log) |
|---|
| 1121 | | - log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0); |
|---|
| 1122 | 1056 | } |
|---|
| 1123 | 1057 | EXPORT_SYMBOL(kmemleak_not_leak); |
|---|
| 1124 | 1058 | |
|---|
| .. | .. |
|---|
| 1137 | 1071 | |
|---|
| 1138 | 1072 | if (kmemleak_enabled && ptr && !IS_ERR(ptr)) |
|---|
| 1139 | 1073 | make_black_object((unsigned long)ptr); |
|---|
| 1140 | | - else if (kmemleak_early_log) |
|---|
| 1141 | | - log_early(KMEMLEAK_IGNORE, ptr, 0, 0); |
|---|
| 1142 | 1074 | } |
|---|
| 1143 | 1075 | EXPORT_SYMBOL(kmemleak_ignore); |
|---|
| 1144 | 1076 | |
|---|
| .. | .. |
|---|
| 1159 | 1091 | |
|---|
| 1160 | 1092 | if (kmemleak_enabled && ptr && size && !IS_ERR(ptr)) |
|---|
| 1161 | 1093 | add_scan_area((unsigned long)ptr, size, gfp); |
|---|
| 1162 | | - else if (kmemleak_early_log) |
|---|
| 1163 | | - log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0); |
|---|
| 1164 | 1094 | } |
|---|
| 1165 | 1095 | EXPORT_SYMBOL(kmemleak_scan_area); |
|---|
| 1166 | 1096 | |
|---|
| .. | .. |
|---|
| 1179 | 1109 | |
|---|
| 1180 | 1110 | if (kmemleak_enabled && ptr && !IS_ERR(ptr)) |
|---|
| 1181 | 1111 | object_no_scan((unsigned long)ptr); |
|---|
| 1182 | | - else if (kmemleak_early_log) |
|---|
| 1183 | | - log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0); |
|---|
| 1184 | 1112 | } |
|---|
| 1185 | 1113 | EXPORT_SYMBOL(kmemleak_no_scan); |
|---|
| 1186 | 1114 | |
|---|
| .. | .. |
|---|
| 1247 | 1175 | u32 old_csum = object->checksum; |
|---|
| 1248 | 1176 | |
|---|
| 1249 | 1177 | kasan_disable_current(); |
|---|
| 1250 | | - object->checksum = crc32(0, (void *)object->pointer, object->size); |
|---|
| 1178 | + kcsan_disable_current(); |
|---|
| 1179 | + object->checksum = crc32(0, kasan_reset_tag((void *)object->pointer), object->size); |
|---|
| 1251 | 1180 | kasan_enable_current(); |
|---|
| 1181 | + kcsan_enable_current(); |
|---|
| 1252 | 1182 | |
|---|
| 1253 | 1183 | return object->checksum != old_csum; |
|---|
| 1254 | 1184 | } |
|---|
| .. | .. |
|---|
| 1309 | 1239 | unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER); |
|---|
| 1310 | 1240 | unsigned long *end = _end - (BYTES_PER_POINTER - 1); |
|---|
| 1311 | 1241 | unsigned long flags; |
|---|
| 1242 | + unsigned long untagged_ptr; |
|---|
| 1312 | 1243 | |
|---|
| 1313 | 1244 | raw_spin_lock_irqsave(&kmemleak_lock, flags); |
|---|
| 1314 | 1245 | for (ptr = start; ptr < end; ptr++) { |
|---|
| .. | .. |
|---|
| 1320 | 1251 | break; |
|---|
| 1321 | 1252 | |
|---|
| 1322 | 1253 | kasan_disable_current(); |
|---|
| 1323 | | - pointer = *ptr; |
|---|
| 1254 | + pointer = *(unsigned long *)kasan_reset_tag((void *)ptr); |
|---|
| 1324 | 1255 | kasan_enable_current(); |
|---|
| 1325 | 1256 | |
|---|
| 1326 | | - if (pointer < min_addr || pointer >= max_addr) |
|---|
| 1257 | + untagged_ptr = (unsigned long)kasan_reset_tag((void *)pointer); |
|---|
| 1258 | + if (untagged_ptr < min_addr || untagged_ptr >= max_addr) |
|---|
| 1327 | 1259 | continue; |
|---|
| 1328 | 1260 | |
|---|
| 1329 | 1261 | /* |
|---|
| .. | .. |
|---|
| 1406 | 1338 | if (!(object->flags & OBJECT_ALLOCATED)) |
|---|
| 1407 | 1339 | /* already freed object */ |
|---|
| 1408 | 1340 | goto out; |
|---|
| 1409 | | - if (hlist_empty(&object->area_list)) { |
|---|
| 1341 | + if (hlist_empty(&object->area_list) || |
|---|
| 1342 | + object->flags & OBJECT_FULL_SCAN) { |
|---|
| 1410 | 1343 | void *start = (void *)object->pointer; |
|---|
| 1411 | 1344 | void *end = (void *)(object->pointer + object->size); |
|---|
| 1412 | 1345 | void *next; |
|---|
| .. | .. |
|---|
| 1474 | 1407 | { |
|---|
| 1475 | 1408 | unsigned long flags; |
|---|
| 1476 | 1409 | struct kmemleak_object *object; |
|---|
| 1477 | | - int i; |
|---|
| 1410 | + struct zone *zone; |
|---|
| 1411 | + int __maybe_unused i; |
|---|
| 1478 | 1412 | int new_leaks = 0; |
|---|
| 1479 | 1413 | |
|---|
| 1480 | 1414 | jiffies_last_scan = jiffies; |
|---|
| .. | .. |
|---|
| 1514 | 1448 | * Struct page scanning for each node. |
|---|
| 1515 | 1449 | */ |
|---|
| 1516 | 1450 | get_online_mems(); |
|---|
| 1517 | | - for_each_online_node(i) { |
|---|
| 1518 | | - unsigned long start_pfn = node_start_pfn(i); |
|---|
| 1519 | | - unsigned long end_pfn = node_end_pfn(i); |
|---|
| 1451 | + for_each_populated_zone(zone) { |
|---|
| 1452 | + unsigned long start_pfn = zone->zone_start_pfn; |
|---|
| 1453 | + unsigned long end_pfn = zone_end_pfn(zone); |
|---|
| 1520 | 1454 | unsigned long pfn; |
|---|
| 1521 | 1455 | |
|---|
| 1522 | 1456 | for (pfn = start_pfn; pfn < end_pfn; pfn++) { |
|---|
| 1523 | | - struct page *page; |
|---|
| 1457 | + struct page *page = pfn_to_online_page(pfn); |
|---|
| 1524 | 1458 | |
|---|
| 1525 | | - if (!pfn_valid(pfn)) |
|---|
| 1459 | + if (!page) |
|---|
| 1526 | 1460 | continue; |
|---|
| 1527 | | - page = pfn_to_page(pfn); |
|---|
| 1461 | + |
|---|
| 1462 | + /* only scan pages belonging to this zone */ |
|---|
| 1463 | + if (page_zone(page) != zone) |
|---|
| 1464 | + continue; |
|---|
| 1528 | 1465 | /* only scan if page is in use */ |
|---|
| 1529 | 1466 | if (page_count(page) == 0) |
|---|
| 1530 | 1467 | continue; |
|---|
| .. | .. |
|---|
| 1541 | 1478 | if (kmemleak_stack_scan) { |
|---|
| 1542 | 1479 | struct task_struct *p, *g; |
|---|
| 1543 | 1480 | |
|---|
| 1544 | | - read_lock(&tasklist_lock); |
|---|
| 1545 | | - do_each_thread(g, p) { |
|---|
| 1481 | + rcu_read_lock(); |
|---|
| 1482 | + for_each_process_thread(g, p) { |
|---|
| 1546 | 1483 | void *stack = try_get_task_stack(p); |
|---|
| 1547 | 1484 | if (stack) { |
|---|
| 1548 | 1485 | scan_block(stack, stack + THREAD_SIZE, NULL); |
|---|
| 1549 | 1486 | put_task_stack(p); |
|---|
| 1550 | 1487 | } |
|---|
| 1551 | | - } while_each_thread(g, p); |
|---|
| 1552 | | - read_unlock(&tasklist_lock); |
|---|
| 1488 | + } |
|---|
| 1489 | + rcu_read_unlock(); |
|---|
| 1553 | 1490 | } |
|---|
| 1554 | 1491 | |
|---|
| 1555 | 1492 | /* |
|---|
| .. | .. |
|---|
| 1595 | 1532 | if (unreferenced_object(object) && |
|---|
| 1596 | 1533 | !(object->flags & OBJECT_REPORTED)) { |
|---|
| 1597 | 1534 | object->flags |= OBJECT_REPORTED; |
|---|
| 1535 | + |
|---|
| 1536 | + if (kmemleak_verbose) |
|---|
| 1537 | + print_unreferenced(NULL, object); |
|---|
| 1538 | + |
|---|
| 1598 | 1539 | new_leaks++; |
|---|
| 1599 | 1540 | } |
|---|
| 1600 | 1541 | raw_spin_unlock_irqrestore(&object->lock, flags); |
|---|
| .. | .. |
|---|
| 1616 | 1557 | */ |
|---|
| 1617 | 1558 | static int kmemleak_scan_thread(void *arg) |
|---|
| 1618 | 1559 | { |
|---|
| 1619 | | - static int first_run = 1; |
|---|
| 1560 | + static int first_run = IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN); |
|---|
| 1620 | 1561 | |
|---|
| 1621 | 1562 | pr_info("Automatic memory scanning thread started\n"); |
|---|
| 1622 | 1563 | set_user_nice(current, 10); |
|---|
| .. | .. |
|---|
| 1857 | 1798 | } |
|---|
| 1858 | 1799 | |
|---|
| 1859 | 1800 | if (!kmemleak_enabled) { |
|---|
| 1860 | | - ret = -EBUSY; |
|---|
| 1801 | + ret = -EPERM; |
|---|
| 1861 | 1802 | goto out; |
|---|
| 1862 | 1803 | } |
|---|
| 1863 | 1804 | |
|---|
| .. | .. |
|---|
| 1910 | 1851 | |
|---|
| 1911 | 1852 | static void __kmemleak_do_cleanup(void) |
|---|
| 1912 | 1853 | { |
|---|
| 1913 | | - struct kmemleak_object *object; |
|---|
| 1854 | + struct kmemleak_object *object, *tmp; |
|---|
| 1914 | 1855 | |
|---|
| 1915 | | - rcu_read_lock(); |
|---|
| 1916 | | - list_for_each_entry_rcu(object, &object_list, object_list) |
|---|
| 1917 | | - delete_object_full(object->pointer); |
|---|
| 1918 | | - rcu_read_unlock(); |
|---|
| 1856 | + /* |
|---|
| 1857 | + * Kmemleak has already been disabled, no need for RCU list traversal |
|---|
| 1858 | + * or kmemleak_lock held. |
|---|
| 1859 | + */ |
|---|
| 1860 | + list_for_each_entry_safe(object, tmp, &object_list, object_list) { |
|---|
| 1861 | + __remove_object(object); |
|---|
| 1862 | + __delete_object(object); |
|---|
| 1863 | + } |
|---|
| 1919 | 1864 | } |
|---|
| 1920 | 1865 | |
|---|
| 1921 | 1866 | /* |
|---|
| .. | .. |
|---|
| 1984 | 1929 | } |
|---|
| 1985 | 1930 | early_param("kmemleak", kmemleak_boot_config); |
|---|
| 1986 | 1931 | |
|---|
| 1987 | | -static void __init print_log_trace(struct early_log *log) |
|---|
| 1988 | | -{ |
|---|
| 1989 | | - struct stack_trace trace; |
|---|
| 1990 | | - |
|---|
| 1991 | | - trace.nr_entries = log->trace_len; |
|---|
| 1992 | | - trace.entries = log->trace; |
|---|
| 1993 | | - |
|---|
| 1994 | | - pr_notice("Early log backtrace:\n"); |
|---|
| 1995 | | - print_stack_trace(&trace, 2); |
|---|
| 1996 | | -} |
|---|
| 1997 | | - |
|---|
| 1998 | 1932 | /* |
|---|
| 1999 | 1933 | * Kmemleak initialization. |
|---|
| 2000 | 1934 | */ |
|---|
| 2001 | 1935 | void __init kmemleak_init(void) |
|---|
| 2002 | 1936 | { |
|---|
| 2003 | | - int i; |
|---|
| 2004 | | - unsigned long flags; |
|---|
| 2005 | | - |
|---|
| 2006 | 1937 | #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF |
|---|
| 2007 | 1938 | if (!kmemleak_skip_disable) { |
|---|
| 2008 | | - kmemleak_early_log = 0; |
|---|
| 2009 | 1939 | kmemleak_disable(); |
|---|
| 2010 | 1940 | return; |
|---|
| 2011 | 1941 | } |
|---|
| 2012 | 1942 | #endif |
|---|
| 1943 | + |
|---|
| 1944 | + if (kmemleak_error) |
|---|
| 1945 | + return; |
|---|
| 2013 | 1946 | |
|---|
| 2014 | 1947 | jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE); |
|---|
| 2015 | 1948 | jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000); |
|---|
| 2016 | 1949 | |
|---|
| 2017 | 1950 | object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE); |
|---|
| 2018 | 1951 | scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE); |
|---|
| 2019 | | - |
|---|
| 2020 | | - if (crt_early_log > ARRAY_SIZE(early_log)) |
|---|
| 2021 | | - pr_warn("Early log buffer exceeded (%d), please increase DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n", |
|---|
| 2022 | | - crt_early_log); |
|---|
| 2023 | | - |
|---|
| 2024 | | - /* the kernel is still in UP mode, so disabling the IRQs is enough */ |
|---|
| 2025 | | - local_irq_save(flags); |
|---|
| 2026 | | - kmemleak_early_log = 0; |
|---|
| 2027 | | - if (kmemleak_error) { |
|---|
| 2028 | | - local_irq_restore(flags); |
|---|
| 2029 | | - return; |
|---|
| 2030 | | - } else { |
|---|
| 2031 | | - kmemleak_enabled = 1; |
|---|
| 2032 | | - kmemleak_free_enabled = 1; |
|---|
| 2033 | | - } |
|---|
| 2034 | | - local_irq_restore(flags); |
|---|
| 2035 | 1952 | |
|---|
| 2036 | 1953 | /* register the data/bss sections */ |
|---|
| 2037 | 1954 | create_object((unsigned long)_sdata, _edata - _sdata, |
|---|
| .. | .. |
|---|
| 2043 | 1960 | create_object((unsigned long)__start_ro_after_init, |
|---|
| 2044 | 1961 | __end_ro_after_init - __start_ro_after_init, |
|---|
| 2045 | 1962 | KMEMLEAK_GREY, GFP_ATOMIC); |
|---|
| 2046 | | - |
|---|
| 2047 | | - /* |
|---|
| 2048 | | - * This is the point where tracking allocations is safe. Automatic |
|---|
| 2049 | | - * scanning is started during the late initcall. Add the early logged |
|---|
| 2050 | | - * callbacks to the kmemleak infrastructure. |
|---|
| 2051 | | - */ |
|---|
| 2052 | | - for (i = 0; i < crt_early_log; i++) { |
|---|
| 2053 | | - struct early_log *log = &early_log[i]; |
|---|
| 2054 | | - |
|---|
| 2055 | | - switch (log->op_type) { |
|---|
| 2056 | | - case KMEMLEAK_ALLOC: |
|---|
| 2057 | | - early_alloc(log); |
|---|
| 2058 | | - break; |
|---|
| 2059 | | - case KMEMLEAK_ALLOC_PERCPU: |
|---|
| 2060 | | - early_alloc_percpu(log); |
|---|
| 2061 | | - break; |
|---|
| 2062 | | - case KMEMLEAK_FREE: |
|---|
| 2063 | | - kmemleak_free(log->ptr); |
|---|
| 2064 | | - break; |
|---|
| 2065 | | - case KMEMLEAK_FREE_PART: |
|---|
| 2066 | | - kmemleak_free_part(log->ptr, log->size); |
|---|
| 2067 | | - break; |
|---|
| 2068 | | - case KMEMLEAK_FREE_PERCPU: |
|---|
| 2069 | | - kmemleak_free_percpu(log->ptr); |
|---|
| 2070 | | - break; |
|---|
| 2071 | | - case KMEMLEAK_NOT_LEAK: |
|---|
| 2072 | | - kmemleak_not_leak(log->ptr); |
|---|
| 2073 | | - break; |
|---|
| 2074 | | - case KMEMLEAK_IGNORE: |
|---|
| 2075 | | - kmemleak_ignore(log->ptr); |
|---|
| 2076 | | - break; |
|---|
| 2077 | | - case KMEMLEAK_SCAN_AREA: |
|---|
| 2078 | | - kmemleak_scan_area(log->ptr, log->size, GFP_KERNEL); |
|---|
| 2079 | | - break; |
|---|
| 2080 | | - case KMEMLEAK_NO_SCAN: |
|---|
| 2081 | | - kmemleak_no_scan(log->ptr); |
|---|
| 2082 | | - break; |
|---|
| 2083 | | - case KMEMLEAK_SET_EXCESS_REF: |
|---|
| 2084 | | - object_set_excess_ref((unsigned long)log->ptr, |
|---|
| 2085 | | - log->excess_ref); |
|---|
| 2086 | | - break; |
|---|
| 2087 | | - default: |
|---|
| 2088 | | - kmemleak_warn("Unknown early log operation: %d\n", |
|---|
| 2089 | | - log->op_type); |
|---|
| 2090 | | - } |
|---|
| 2091 | | - |
|---|
| 2092 | | - if (kmemleak_warning) { |
|---|
| 2093 | | - print_log_trace(log); |
|---|
| 2094 | | - kmemleak_warning = 0; |
|---|
| 2095 | | - } |
|---|
| 2096 | | - } |
|---|
| 2097 | 1963 | } |
|---|
| 2098 | 1964 | |
|---|
| 2099 | 1965 | /* |
|---|
| .. | .. |
|---|
| 2101 | 1967 | */ |
|---|
| 2102 | 1968 | static int __init kmemleak_late_init(void) |
|---|
| 2103 | 1969 | { |
|---|
| 2104 | | - struct dentry *dentry; |
|---|
| 2105 | | - |
|---|
| 2106 | 1970 | kmemleak_initialized = 1; |
|---|
| 2107 | 1971 | |
|---|
| 2108 | | - dentry = debugfs_create_file("kmemleak", 0644, NULL, NULL, |
|---|
| 2109 | | - &kmemleak_fops); |
|---|
| 2110 | | - if (!dentry) |
|---|
| 2111 | | - pr_warn("Failed to create the debugfs kmemleak file\n"); |
|---|
| 1972 | + debugfs_create_file("kmemleak", 0644, NULL, NULL, &kmemleak_fops); |
|---|
| 2112 | 1973 | |
|---|
| 2113 | 1974 | if (kmemleak_error) { |
|---|
| 2114 | 1975 | /* |
|---|
| .. | .. |
|---|
| 2121 | 1982 | return -ENOMEM; |
|---|
| 2122 | 1983 | } |
|---|
| 2123 | 1984 | |
|---|
| 2124 | | - mutex_lock(&scan_mutex); |
|---|
| 2125 | | - start_scan_thread(); |
|---|
| 2126 | | - mutex_unlock(&scan_mutex); |
|---|
| 1985 | + if (IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN)) { |
|---|
| 1986 | + mutex_lock(&scan_mutex); |
|---|
| 1987 | + start_scan_thread(); |
|---|
| 1988 | + mutex_unlock(&scan_mutex); |
|---|
| 1989 | + } |
|---|
| 2127 | 1990 | |
|---|
| 2128 | | - pr_info("Kernel memory leak detector initialized\n"); |
|---|
| 1991 | + pr_info("Kernel memory leak detector initialized (mem pool available: %d)\n", |
|---|
| 1992 | + mem_pool_free_count); |
|---|
| 2129 | 1993 | |
|---|
| 2130 | 1994 | return 0; |
|---|
| 2131 | 1995 | } |
|---|