| .. | .. |
|---|
| 129 | 129 | |
|---|
| 130 | 130 | static void fill_pool(void) |
|---|
| 131 | 131 | { |
|---|
| 132 | | - gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; |
|---|
| 132 | + gfp_t gfp = __GFP_HIGH | __GFP_NOWARN; |
|---|
| 133 | 133 | struct debug_obj *obj; |
|---|
| 134 | 134 | unsigned long flags; |
|---|
| 135 | 135 | |
|---|
| .. | .. |
|---|
| 219 | 219 | return obj; |
|---|
| 220 | 220 | } |
|---|
| 221 | 221 | |
|---|
| 222 | | -/* |
|---|
| 223 | | - * Allocate a new object. If the pool is empty, switch off the debugger. |
|---|
| 224 | | - * Must be called with interrupts disabled. |
|---|
| 225 | | - */ |
|---|
| 226 | 222 | static struct debug_obj * |
|---|
| 227 | 223 | alloc_object(void *addr, struct debug_bucket *b, const struct debug_obj_descr *descr) |
|---|
| 228 | 224 | { |
|---|
| .. | .. |
|---|
| 440 | 436 | struct debug_percpu_free *percpu_pool; |
|---|
| 441 | 437 | struct hlist_node *tmp; |
|---|
| 442 | 438 | struct debug_obj *obj; |
|---|
| 439 | + unsigned long flags; |
|---|
| 443 | 440 | |
|---|
| 444 | 441 | /* Remote access is safe as the CPU is dead already */ |
|---|
| 445 | 442 | percpu_pool = per_cpu_ptr(&percpu_obj_pool, cpu); |
|---|
| .. | .. |
|---|
| 447 | 444 | hlist_del(&obj->node); |
|---|
| 448 | 445 | kmem_cache_free(obj_cache, obj); |
|---|
| 449 | 446 | } |
|---|
| 447 | + |
|---|
| 448 | + raw_spin_lock_irqsave(&pool_lock, flags); |
|---|
| 449 | + obj_pool_used -= percpu_pool->obj_free; |
|---|
| 450 | + debug_objects_freed += percpu_pool->obj_free; |
|---|
| 451 | + raw_spin_unlock_irqrestore(&pool_lock, flags); |
|---|
| 452 | + |
|---|
| 450 | 453 | percpu_pool->obj_free = 0; |
|---|
| 451 | 454 | |
|---|
| 452 | 455 | return 0; |
|---|
| .. | .. |
|---|
| 498 | 501 | const struct debug_obj_descr *descr = obj->descr; |
|---|
| 499 | 502 | static int limit; |
|---|
| 500 | 503 | |
|---|
| 504 | + /* |
|---|
| 505 | + * Don't report if lookup_object_or_alloc() by the current thread |
|---|
| 506 | + * failed because lookup_object_or_alloc()/debug_objects_oom() by a |
|---|
| 507 | + * concurrent thread turned off debug_objects_enabled and cleared |
|---|
| 508 | + * the hash buckets. |
|---|
| 509 | + */ |
|---|
| 510 | + if (!debug_objects_enabled) |
|---|
| 511 | + return; |
|---|
| 512 | + |
|---|
| 501 | 513 | if (limit < 5 && descr != descr_test) { |
|---|
| 502 | 514 | void *hint = descr->debug_hint ? |
|---|
| 503 | 515 | descr->debug_hint(obj->object) : NULL; |
|---|
| .. | .. |
|---|
| 548 | 560 | WARN_ON(1); |
|---|
| 549 | 561 | } |
|---|
| 550 | 562 | |
|---|
| 563 | +static struct debug_obj *lookup_object_or_alloc(void *addr, struct debug_bucket *b, |
|---|
| 564 | + const struct debug_obj_descr *descr, |
|---|
| 565 | + bool onstack, bool alloc_ifstatic) |
|---|
| 566 | +{ |
|---|
| 567 | + struct debug_obj *obj = lookup_object(addr, b); |
|---|
| 568 | + enum debug_obj_state state = ODEBUG_STATE_NONE; |
|---|
| 569 | + |
|---|
| 570 | + if (likely(obj)) |
|---|
| 571 | + return obj; |
|---|
| 572 | + |
|---|
| 573 | + /* |
|---|
| 574 | + * debug_object_init() unconditionally allocates untracked |
|---|
| 575 | + * objects. It does not matter whether it is a static object or |
|---|
| 576 | + * not. |
|---|
| 577 | + * |
|---|
| 578 | + * debug_object_assert_init() and debug_object_activate() allow |
|---|
| 579 | + * allocation only if the descriptor callback confirms that the |
|---|
| 580 | + * object is static and considered initialized. For non-static |
|---|
| 581 | + * objects the allocation needs to be done from the fixup callback. |
|---|
| 582 | + */ |
|---|
| 583 | + if (unlikely(alloc_ifstatic)) { |
|---|
| 584 | + if (!descr->is_static_object || !descr->is_static_object(addr)) |
|---|
| 585 | + return ERR_PTR(-ENOENT); |
|---|
| 586 | + /* Statically allocated objects are considered initialized */ |
|---|
| 587 | + state = ODEBUG_STATE_INIT; |
|---|
| 588 | + } |
|---|
| 589 | + |
|---|
| 590 | + obj = alloc_object(addr, b, descr); |
|---|
| 591 | + if (likely(obj)) { |
|---|
| 592 | + obj->state = state; |
|---|
| 593 | + debug_object_is_on_stack(addr, onstack); |
|---|
| 594 | + return obj; |
|---|
| 595 | + } |
|---|
| 596 | + |
|---|
| 597 | + /* Out of memory. Do the cleanup outside of the locked region */ |
|---|
| 598 | + debug_objects_enabled = 0; |
|---|
| 599 | + return NULL; |
|---|
| 600 | +} |
|---|
| 601 | + |
|---|
| 602 | +static void debug_objects_fill_pool(void) |
|---|
| 603 | +{ |
|---|
| 604 | + /* |
|---|
| 605 | + * On RT enabled kernels the pool refill must happen in preemptible |
|---|
| 606 | + * context: |
|---|
| 607 | + */ |
|---|
| 608 | + if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) |
|---|
| 609 | + fill_pool(); |
|---|
| 610 | +} |
|---|
| 611 | + |
|---|
| 551 | 612 | static void |
|---|
| 552 | 613 | __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack) |
|---|
| 553 | 614 | { |
|---|
| 554 | 615 | enum debug_obj_state state; |
|---|
| 555 | | - bool check_stack = false; |
|---|
| 556 | 616 | struct debug_bucket *db; |
|---|
| 557 | 617 | struct debug_obj *obj; |
|---|
| 558 | 618 | unsigned long flags; |
|---|
| 559 | 619 | |
|---|
| 560 | | - fill_pool(); |
|---|
| 620 | + debug_objects_fill_pool(); |
|---|
| 561 | 621 | |
|---|
| 562 | 622 | db = get_bucket((unsigned long) addr); |
|---|
| 563 | 623 | |
|---|
| 564 | 624 | raw_spin_lock_irqsave(&db->lock, flags); |
|---|
| 565 | 625 | |
|---|
| 566 | | - obj = lookup_object(addr, db); |
|---|
| 567 | | - if (!obj) { |
|---|
| 568 | | - obj = alloc_object(addr, db, descr); |
|---|
| 569 | | - if (!obj) { |
|---|
| 570 | | - debug_objects_enabled = 0; |
|---|
| 571 | | - raw_spin_unlock_irqrestore(&db->lock, flags); |
|---|
| 572 | | - debug_objects_oom(); |
|---|
| 573 | | - return; |
|---|
| 574 | | - } |
|---|
| 575 | | - check_stack = true; |
|---|
| 626 | + obj = lookup_object_or_alloc(addr, db, descr, onstack, false); |
|---|
| 627 | + if (unlikely(!obj)) { |
|---|
| 628 | + raw_spin_unlock_irqrestore(&db->lock, flags); |
|---|
| 629 | + debug_objects_oom(); |
|---|
| 630 | + return; |
|---|
| 576 | 631 | } |
|---|
| 577 | 632 | |
|---|
| 578 | 633 | switch (obj->state) { |
|---|
| .. | .. |
|---|
| 598 | 653 | } |
|---|
| 599 | 654 | |
|---|
| 600 | 655 | raw_spin_unlock_irqrestore(&db->lock, flags); |
|---|
| 601 | | - if (check_stack) |
|---|
| 602 | | - debug_object_is_on_stack(addr, onstack); |
|---|
| 603 | 656 | } |
|---|
| 604 | 657 | |
|---|
| 605 | 658 | /** |
|---|
| .. | .. |
|---|
| 639 | 692 | */ |
|---|
| 640 | 693 | int debug_object_activate(void *addr, const struct debug_obj_descr *descr) |
|---|
| 641 | 694 | { |
|---|
| 695 | + struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr }; |
|---|
| 642 | 696 | enum debug_obj_state state; |
|---|
| 643 | 697 | struct debug_bucket *db; |
|---|
| 644 | 698 | struct debug_obj *obj; |
|---|
| 645 | 699 | unsigned long flags; |
|---|
| 646 | 700 | int ret; |
|---|
| 647 | | - struct debug_obj o = { .object = addr, |
|---|
| 648 | | - .state = ODEBUG_STATE_NOTAVAILABLE, |
|---|
| 649 | | - .descr = descr }; |
|---|
| 650 | 701 | |
|---|
| 651 | 702 | if (!debug_objects_enabled) |
|---|
| 652 | 703 | return 0; |
|---|
| 704 | + |
|---|
| 705 | + debug_objects_fill_pool(); |
|---|
| 653 | 706 | |
|---|
| 654 | 707 | db = get_bucket((unsigned long) addr); |
|---|
| 655 | 708 | |
|---|
| 656 | 709 | raw_spin_lock_irqsave(&db->lock, flags); |
|---|
| 657 | 710 | |
|---|
| 658 | | - obj = lookup_object(addr, db); |
|---|
| 659 | | - if (obj) { |
|---|
| 711 | + obj = lookup_object_or_alloc(addr, db, descr, false, true); |
|---|
| 712 | + if (likely(!IS_ERR_OR_NULL(obj))) { |
|---|
| 660 | 713 | bool print_object = false; |
|---|
| 661 | 714 | |
|---|
| 662 | 715 | switch (obj->state) { |
|---|
| .. | .. |
|---|
| 689 | 742 | |
|---|
| 690 | 743 | raw_spin_unlock_irqrestore(&db->lock, flags); |
|---|
| 691 | 744 | |
|---|
| 692 | | - /* |
|---|
| 693 | | - * We are here when a static object is activated. We |
|---|
| 694 | | - * let the type specific code confirm whether this is |
|---|
| 695 | | - * true or not. if true, we just make sure that the |
|---|
| 696 | | - * static object is tracked in the object tracker. If |
|---|
| 697 | | - * not, this must be a bug, so we try to fix it up. |
|---|
| 698 | | - */ |
|---|
| 699 | | - if (descr->is_static_object && descr->is_static_object(addr)) { |
|---|
| 700 | | - /* track this static object */ |
|---|
| 701 | | - debug_object_init(addr, descr); |
|---|
| 702 | | - debug_object_activate(addr, descr); |
|---|
| 703 | | - } else { |
|---|
| 704 | | - debug_print_object(&o, "activate"); |
|---|
| 705 | | - ret = debug_object_fixup(descr->fixup_activate, addr, |
|---|
| 706 | | - ODEBUG_STATE_NOTAVAILABLE); |
|---|
| 707 | | - return ret ? 0 : -EINVAL; |
|---|
| 745 | + /* If NULL the allocation has hit OOM */ |
|---|
| 746 | + if (!obj) { |
|---|
| 747 | + debug_objects_oom(); |
|---|
| 748 | + return 0; |
|---|
| 708 | 749 | } |
|---|
| 709 | | - return 0; |
|---|
| 750 | + |
|---|
| 751 | + /* Object is neither static nor tracked. It's not initialized */ |
|---|
| 752 | + debug_print_object(&o, "activate"); |
|---|
| 753 | + ret = debug_object_fixup(descr->fixup_activate, addr, ODEBUG_STATE_NOTAVAILABLE); |
|---|
| 754 | + return ret ? 0 : -EINVAL; |
|---|
| 710 | 755 | } |
|---|
| 711 | 756 | EXPORT_SYMBOL_GPL(debug_object_activate); |
|---|
| 712 | 757 | |
|---|
| .. | .. |
|---|
| 860 | 905 | */ |
|---|
| 861 | 906 | void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr) |
|---|
| 862 | 907 | { |
|---|
| 908 | + struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr }; |
|---|
| 863 | 909 | struct debug_bucket *db; |
|---|
| 864 | 910 | struct debug_obj *obj; |
|---|
| 865 | 911 | unsigned long flags; |
|---|
| .. | .. |
|---|
| 867 | 913 | if (!debug_objects_enabled) |
|---|
| 868 | 914 | return; |
|---|
| 869 | 915 | |
|---|
| 916 | + debug_objects_fill_pool(); |
|---|
| 917 | + |
|---|
| 870 | 918 | db = get_bucket((unsigned long) addr); |
|---|
| 871 | 919 | |
|---|
| 872 | 920 | raw_spin_lock_irqsave(&db->lock, flags); |
|---|
| 921 | + obj = lookup_object_or_alloc(addr, db, descr, false, true); |
|---|
| 922 | + raw_spin_unlock_irqrestore(&db->lock, flags); |
|---|
| 923 | + if (likely(!IS_ERR_OR_NULL(obj))) |
|---|
| 924 | + return; |
|---|
| 873 | 925 | |
|---|
| 874 | | - obj = lookup_object(addr, db); |
|---|
| 926 | + /* If NULL the allocation has hit OOM */ |
|---|
| 875 | 927 | if (!obj) { |
|---|
| 876 | | - struct debug_obj o = { .object = addr, |
|---|
| 877 | | - .state = ODEBUG_STATE_NOTAVAILABLE, |
|---|
| 878 | | - .descr = descr }; |
|---|
| 879 | | - |
|---|
| 880 | | - raw_spin_unlock_irqrestore(&db->lock, flags); |
|---|
| 881 | | - /* |
|---|
| 882 | | - * Maybe the object is static, and we let the type specific |
|---|
| 883 | | - * code confirm. Track this static object if true, else invoke |
|---|
| 884 | | - * fixup. |
|---|
| 885 | | - */ |
|---|
| 886 | | - if (descr->is_static_object && descr->is_static_object(addr)) { |
|---|
| 887 | | - /* Track this static object */ |
|---|
| 888 | | - debug_object_init(addr, descr); |
|---|
| 889 | | - } else { |
|---|
| 890 | | - debug_print_object(&o, "assert_init"); |
|---|
| 891 | | - debug_object_fixup(descr->fixup_assert_init, addr, |
|---|
| 892 | | - ODEBUG_STATE_NOTAVAILABLE); |
|---|
| 893 | | - } |
|---|
| 928 | + debug_objects_oom(); |
|---|
| 894 | 929 | return; |
|---|
| 895 | 930 | } |
|---|
| 896 | 931 | |
|---|
| 897 | | - raw_spin_unlock_irqrestore(&db->lock, flags); |
|---|
| 932 | + /* Object is neither tracked nor static. It's not initialized. */ |
|---|
| 933 | + debug_print_object(&o, "assert_init"); |
|---|
| 934 | + debug_object_fixup(descr->fixup_assert_init, addr, ODEBUG_STATE_NOTAVAILABLE); |
|---|
| 898 | 935 | } |
|---|
| 899 | 936 | EXPORT_SYMBOL_GPL(debug_object_assert_init); |
|---|
| 900 | 937 | |
|---|
| .. | .. |
|---|
| 1316 | 1353 | hlist_add_head(&obj->node, &objects); |
|---|
| 1317 | 1354 | } |
|---|
| 1318 | 1355 | |
|---|
| 1356 | + debug_objects_allocated += i; |
|---|
| 1357 | + |
|---|
| 1319 | 1358 | /* |
|---|
| 1320 | 1359 | * debug_objects_mem_init() is now called early that only one CPU is up |
|---|
| 1321 | 1360 | * and interrupts have been disabled, so it is safe to replace the |
|---|
| .. | .. |
|---|
| 1384 | 1423 | debug_objects_enabled = 0; |
|---|
| 1385 | 1424 | kmem_cache_destroy(obj_cache); |
|---|
| 1386 | 1425 | pr_warn("out of memory.\n"); |
|---|
| 1426 | + return; |
|---|
| 1387 | 1427 | } else |
|---|
| 1388 | 1428 | debug_objects_selftest(); |
|---|
| 1389 | 1429 | |
|---|