.. | .. |
---|
26 | 26 | * |
---|
27 | 27 | * The following locks and mutexes are used by kmemleak: |
---|
28 | 28 | * |
---|
29 | | - * - kmemleak_lock (rwlock): protects the object_list modifications and |
---|
| 29 | + * - kmemleak_lock (raw spinlock): protects the object_list modifications and |
---|
30 | 30 | * accesses to the object_tree_root. The object_list is the main list |
---|
31 | 31 | * holding the metadata (struct kmemleak_object) for the allocated memory |
---|
32 | 32 | * blocks. The object_tree_root is a red black tree used to look-up |
---|
.. | .. |
---|
147 | 147 | * (use_count) and freed using the RCU mechanism. |
---|
148 | 148 | */ |
---|
149 | 149 | struct kmemleak_object { |
---|
150 | | - spinlock_t lock; |
---|
| 150 | + raw_spinlock_t lock; |
---|
151 | 151 | unsigned int flags; /* object status flags */ |
---|
152 | 152 | struct list_head object_list; |
---|
153 | 153 | struct list_head gray_list; |
---|
.. | .. |
---|
197 | 197 | /* search tree for object boundaries */ |
---|
198 | 198 | static struct rb_root object_tree_root = RB_ROOT; |
---|
199 | 199 | /* rw_lock protecting the access to object_list and object_tree_root */ |
---|
200 | | -static DEFINE_RWLOCK(kmemleak_lock); |
---|
| 200 | +static DEFINE_RAW_SPINLOCK(kmemleak_lock); |
---|
201 | 201 | |
---|
202 | 202 | /* allocation caches for kmemleak internal data */ |
---|
203 | 203 | static struct kmem_cache *object_cache; |
---|
.. | .. |
---|
491 | 491 | struct kmemleak_object *object; |
---|
492 | 492 | |
---|
493 | 493 | rcu_read_lock(); |
---|
494 | | - read_lock_irqsave(&kmemleak_lock, flags); |
---|
| 494 | + raw_spin_lock_irqsave(&kmemleak_lock, flags); |
---|
495 | 495 | object = lookup_object(ptr, alias); |
---|
496 | | - read_unlock_irqrestore(&kmemleak_lock, flags); |
---|
| 496 | + raw_spin_unlock_irqrestore(&kmemleak_lock, flags); |
---|
497 | 497 | |
---|
498 | 498 | /* check whether the object is still available */ |
---|
499 | 499 | if (object && !get_object(object)) |
---|
.. | .. |
---|
513 | 513 | unsigned long flags; |
---|
514 | 514 | struct kmemleak_object *object; |
---|
515 | 515 | |
---|
516 | | - write_lock_irqsave(&kmemleak_lock, flags); |
---|
| 516 | + raw_spin_lock_irqsave(&kmemleak_lock, flags); |
---|
517 | 517 | object = lookup_object(ptr, alias); |
---|
518 | 518 | if (object) { |
---|
519 | 519 | rb_erase(&object->rb_node, &object_tree_root); |
---|
520 | 520 | list_del_rcu(&object->object_list); |
---|
521 | 521 | } |
---|
522 | | - write_unlock_irqrestore(&kmemleak_lock, flags); |
---|
| 522 | + raw_spin_unlock_irqrestore(&kmemleak_lock, flags); |
---|
523 | 523 | |
---|
524 | 524 | return object; |
---|
525 | 525 | } |
---|
.. | .. |
---|
561 | 561 | INIT_LIST_HEAD(&object->object_list); |
---|
562 | 562 | INIT_LIST_HEAD(&object->gray_list); |
---|
563 | 563 | INIT_HLIST_HEAD(&object->area_list); |
---|
564 | | - spin_lock_init(&object->lock); |
---|
| 564 | + raw_spin_lock_init(&object->lock); |
---|
565 | 565 | atomic_set(&object->use_count, 1); |
---|
566 | 566 | object->flags = OBJECT_ALLOCATED; |
---|
567 | 567 | object->pointer = ptr; |
---|
.. | .. |
---|
593 | 593 | /* kernel backtrace */ |
---|
594 | 594 | object->trace_len = __save_stack_trace(object->trace); |
---|
595 | 595 | |
---|
596 | | - write_lock_irqsave(&kmemleak_lock, flags); |
---|
| 596 | + raw_spin_lock_irqsave(&kmemleak_lock, flags); |
---|
597 | 597 | |
---|
598 | 598 | min_addr = min(min_addr, ptr); |
---|
599 | 599 | max_addr = max(max_addr, ptr + size); |
---|
.. | .. |
---|
624 | 624 | |
---|
625 | 625 | list_add_tail_rcu(&object->object_list, &object_list); |
---|
626 | 626 | out: |
---|
627 | | - write_unlock_irqrestore(&kmemleak_lock, flags); |
---|
| 627 | + raw_spin_unlock_irqrestore(&kmemleak_lock, flags); |
---|
628 | 628 | return object; |
---|
629 | 629 | } |
---|
630 | 630 | |
---|
.. | .. |
---|
642 | 642 | * Locking here also ensures that the corresponding memory block |
---|
643 | 643 | * cannot be freed when it is being scanned. |
---|
644 | 644 | */ |
---|
645 | | - spin_lock_irqsave(&object->lock, flags); |
---|
| 645 | + raw_spin_lock_irqsave(&object->lock, flags); |
---|
646 | 646 | object->flags &= ~OBJECT_ALLOCATED; |
---|
647 | | - spin_unlock_irqrestore(&object->lock, flags); |
---|
| 647 | + raw_spin_unlock_irqrestore(&object->lock, flags); |
---|
648 | 648 | put_object(object); |
---|
649 | 649 | } |
---|
650 | 650 | |
---|
.. | .. |
---|
716 | 716 | { |
---|
717 | 717 | unsigned long flags; |
---|
718 | 718 | |
---|
719 | | - spin_lock_irqsave(&object->lock, flags); |
---|
| 719 | + raw_spin_lock_irqsave(&object->lock, flags); |
---|
720 | 720 | __paint_it(object, color); |
---|
721 | | - spin_unlock_irqrestore(&object->lock, flags); |
---|
| 721 | + raw_spin_unlock_irqrestore(&object->lock, flags); |
---|
722 | 722 | } |
---|
723 | 723 | |
---|
724 | 724 | static void paint_ptr(unsigned long ptr, int color) |
---|
.. | .. |
---|
778 | 778 | goto out; |
---|
779 | 779 | } |
---|
780 | 780 | |
---|
781 | | - spin_lock_irqsave(&object->lock, flags); |
---|
| 781 | + raw_spin_lock_irqsave(&object->lock, flags); |
---|
782 | 782 | if (size == SIZE_MAX) { |
---|
783 | 783 | size = object->pointer + object->size - ptr; |
---|
784 | 784 | } else if (ptr + size > object->pointer + object->size) { |
---|
.. | .. |
---|
794 | 794 | |
---|
795 | 795 | hlist_add_head(&area->node, &object->area_list); |
---|
796 | 796 | out_unlock: |
---|
797 | | - spin_unlock_irqrestore(&object->lock, flags); |
---|
| 797 | + raw_spin_unlock_irqrestore(&object->lock, flags); |
---|
798 | 798 | out: |
---|
799 | 799 | put_object(object); |
---|
800 | 800 | } |
---|
.. | .. |
---|
817 | 817 | return; |
---|
818 | 818 | } |
---|
819 | 819 | |
---|
820 | | - spin_lock_irqsave(&object->lock, flags); |
---|
| 820 | + raw_spin_lock_irqsave(&object->lock, flags); |
---|
821 | 821 | object->excess_ref = excess_ref; |
---|
822 | | - spin_unlock_irqrestore(&object->lock, flags); |
---|
| 822 | + raw_spin_unlock_irqrestore(&object->lock, flags); |
---|
823 | 823 | put_object(object); |
---|
824 | 824 | } |
---|
825 | 825 | |
---|
.. | .. |
---|
839 | 839 | return; |
---|
840 | 840 | } |
---|
841 | 841 | |
---|
842 | | - spin_lock_irqsave(&object->lock, flags); |
---|
| 842 | + raw_spin_lock_irqsave(&object->lock, flags); |
---|
843 | 843 | object->flags |= OBJECT_NO_SCAN; |
---|
844 | | - spin_unlock_irqrestore(&object->lock, flags); |
---|
| 844 | + raw_spin_unlock_irqrestore(&object->lock, flags); |
---|
845 | 845 | put_object(object); |
---|
846 | 846 | } |
---|
847 | 847 | |
---|
.. | .. |
---|
902 | 902 | log->min_count, GFP_ATOMIC); |
---|
903 | 903 | if (!object) |
---|
904 | 904 | goto out; |
---|
905 | | - spin_lock_irqsave(&object->lock, flags); |
---|
| 905 | + raw_spin_lock_irqsave(&object->lock, flags); |
---|
906 | 906 | for (i = 0; i < log->trace_len; i++) |
---|
907 | 907 | object->trace[i] = log->trace[i]; |
---|
908 | 908 | object->trace_len = log->trace_len; |
---|
909 | | - spin_unlock_irqrestore(&object->lock, flags); |
---|
| 909 | + raw_spin_unlock_irqrestore(&object->lock, flags); |
---|
910 | 910 | out: |
---|
911 | 911 | rcu_read_unlock(); |
---|
912 | 912 | } |
---|
.. | .. |
---|
1096 | 1096 | return; |
---|
1097 | 1097 | } |
---|
1098 | 1098 | |
---|
1099 | | - spin_lock_irqsave(&object->lock, flags); |
---|
| 1099 | + raw_spin_lock_irqsave(&object->lock, flags); |
---|
1100 | 1100 | object->trace_len = __save_stack_trace(object->trace); |
---|
1101 | | - spin_unlock_irqrestore(&object->lock, flags); |
---|
| 1101 | + raw_spin_unlock_irqrestore(&object->lock, flags); |
---|
1102 | 1102 | |
---|
1103 | 1103 | put_object(object); |
---|
1104 | 1104 | } |
---|
.. | .. |
---|
1310 | 1310 | unsigned long *end = _end - (BYTES_PER_POINTER - 1); |
---|
1311 | 1311 | unsigned long flags; |
---|
1312 | 1312 | |
---|
1313 | | - read_lock_irqsave(&kmemleak_lock, flags); |
---|
| 1313 | + raw_spin_lock_irqsave(&kmemleak_lock, flags); |
---|
1314 | 1314 | for (ptr = start; ptr < end; ptr++) { |
---|
1315 | 1315 | struct kmemleak_object *object; |
---|
1316 | 1316 | unsigned long pointer; |
---|
.. | .. |
---|
1344 | 1344 | * previously acquired in scan_object(). These locks are |
---|
1345 | 1345 | * enclosed by scan_mutex. |
---|
1346 | 1346 | */ |
---|
1347 | | - spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING); |
---|
| 1347 | + raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING); |
---|
1348 | 1348 | /* only pass surplus references (object already gray) */ |
---|
1349 | 1349 | if (color_gray(object)) { |
---|
1350 | 1350 | excess_ref = object->excess_ref; |
---|
.. | .. |
---|
1353 | 1353 | excess_ref = 0; |
---|
1354 | 1354 | update_refs(object); |
---|
1355 | 1355 | } |
---|
1356 | | - spin_unlock(&object->lock); |
---|
| 1356 | + raw_spin_unlock(&object->lock); |
---|
1357 | 1357 | |
---|
1358 | 1358 | if (excess_ref) { |
---|
1359 | 1359 | object = lookup_object(excess_ref, 0); |
---|
.. | .. |
---|
1362 | 1362 | if (object == scanned) |
---|
1363 | 1363 | /* circular reference, ignore */ |
---|
1364 | 1364 | continue; |
---|
1365 | | - spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING); |
---|
| 1365 | + raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING); |
---|
1366 | 1366 | update_refs(object); |
---|
1367 | | - spin_unlock(&object->lock); |
---|
| 1367 | + raw_spin_unlock(&object->lock); |
---|
1368 | 1368 | } |
---|
1369 | 1369 | } |
---|
1370 | | - read_unlock_irqrestore(&kmemleak_lock, flags); |
---|
| 1370 | + raw_spin_unlock_irqrestore(&kmemleak_lock, flags); |
---|
1371 | 1371 | } |
---|
1372 | 1372 | |
---|
1373 | 1373 | /* |
---|
.. | .. |
---|
1400 | 1400 | * Once the object->lock is acquired, the corresponding memory block |
---|
1401 | 1401 | * cannot be freed (the same lock is acquired in delete_object). |
---|
1402 | 1402 | */ |
---|
1403 | | - spin_lock_irqsave(&object->lock, flags); |
---|
| 1403 | + raw_spin_lock_irqsave(&object->lock, flags); |
---|
1404 | 1404 | if (object->flags & OBJECT_NO_SCAN) |
---|
1405 | 1405 | goto out; |
---|
1406 | 1406 | if (!(object->flags & OBJECT_ALLOCATED)) |
---|
.. | .. |
---|
1419 | 1419 | if (start >= end) |
---|
1420 | 1420 | break; |
---|
1421 | 1421 | |
---|
1422 | | - spin_unlock_irqrestore(&object->lock, flags); |
---|
| 1422 | + raw_spin_unlock_irqrestore(&object->lock, flags); |
---|
1423 | 1423 | cond_resched(); |
---|
1424 | | - spin_lock_irqsave(&object->lock, flags); |
---|
| 1424 | + raw_spin_lock_irqsave(&object->lock, flags); |
---|
1425 | 1425 | } while (object->flags & OBJECT_ALLOCATED); |
---|
1426 | 1426 | } else |
---|
1427 | 1427 | hlist_for_each_entry(area, &object->area_list, node) |
---|
.. | .. |
---|
1429 | 1429 | (void *)(area->start + area->size), |
---|
1430 | 1430 | object); |
---|
1431 | 1431 | out: |
---|
1432 | | - spin_unlock_irqrestore(&object->lock, flags); |
---|
| 1432 | + raw_spin_unlock_irqrestore(&object->lock, flags); |
---|
1433 | 1433 | } |
---|
1434 | 1434 | |
---|
1435 | 1435 | /* |
---|
.. | .. |
---|
1482 | 1482 | /* prepare the kmemleak_object's */ |
---|
1483 | 1483 | rcu_read_lock(); |
---|
1484 | 1484 | list_for_each_entry_rcu(object, &object_list, object_list) { |
---|
1485 | | - spin_lock_irqsave(&object->lock, flags); |
---|
| 1485 | + raw_spin_lock_irqsave(&object->lock, flags); |
---|
1486 | 1486 | #ifdef DEBUG |
---|
1487 | 1487 | /* |
---|
1488 | 1488 | * With a few exceptions there should be a maximum of |
---|
.. | .. |
---|
1499 | 1499 | if (color_gray(object) && get_object(object)) |
---|
1500 | 1500 | list_add_tail(&object->gray_list, &gray_list); |
---|
1501 | 1501 | |
---|
1502 | | - spin_unlock_irqrestore(&object->lock, flags); |
---|
| 1502 | + raw_spin_unlock_irqrestore(&object->lock, flags); |
---|
1503 | 1503 | } |
---|
1504 | 1504 | rcu_read_unlock(); |
---|
1505 | 1505 | |
---|
.. | .. |
---|
1564 | 1564 | */ |
---|
1565 | 1565 | rcu_read_lock(); |
---|
1566 | 1566 | list_for_each_entry_rcu(object, &object_list, object_list) { |
---|
1567 | | - spin_lock_irqsave(&object->lock, flags); |
---|
| 1567 | + raw_spin_lock_irqsave(&object->lock, flags); |
---|
1568 | 1568 | if (color_white(object) && (object->flags & OBJECT_ALLOCATED) |
---|
1569 | 1569 | && update_checksum(object) && get_object(object)) { |
---|
1570 | 1570 | /* color it gray temporarily */ |
---|
1571 | 1571 | object->count = object->min_count; |
---|
1572 | 1572 | list_add_tail(&object->gray_list, &gray_list); |
---|
1573 | 1573 | } |
---|
1574 | | - spin_unlock_irqrestore(&object->lock, flags); |
---|
| 1574 | + raw_spin_unlock_irqrestore(&object->lock, flags); |
---|
1575 | 1575 | } |
---|
1576 | 1576 | rcu_read_unlock(); |
---|
1577 | 1577 | |
---|
.. | .. |
---|
1591 | 1591 | */ |
---|
1592 | 1592 | rcu_read_lock(); |
---|
1593 | 1593 | list_for_each_entry_rcu(object, &object_list, object_list) { |
---|
1594 | | - spin_lock_irqsave(&object->lock, flags); |
---|
| 1594 | + raw_spin_lock_irqsave(&object->lock, flags); |
---|
1595 | 1595 | if (unreferenced_object(object) && |
---|
1596 | 1596 | !(object->flags & OBJECT_REPORTED)) { |
---|
1597 | 1597 | object->flags |= OBJECT_REPORTED; |
---|
1598 | 1598 | new_leaks++; |
---|
1599 | 1599 | } |
---|
1600 | | - spin_unlock_irqrestore(&object->lock, flags); |
---|
| 1600 | + raw_spin_unlock_irqrestore(&object->lock, flags); |
---|
1601 | 1601 | } |
---|
1602 | 1602 | rcu_read_unlock(); |
---|
1603 | 1603 | |
---|
.. | .. |
---|
1749 | 1749 | struct kmemleak_object *object = v; |
---|
1750 | 1750 | unsigned long flags; |
---|
1751 | 1751 | |
---|
1752 | | - spin_lock_irqsave(&object->lock, flags); |
---|
| 1752 | + raw_spin_lock_irqsave(&object->lock, flags); |
---|
1753 | 1753 | if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object)) |
---|
1754 | 1754 | print_unreferenced(seq, object); |
---|
1755 | | - spin_unlock_irqrestore(&object->lock, flags); |
---|
| 1755 | + raw_spin_unlock_irqrestore(&object->lock, flags); |
---|
1756 | 1756 | return 0; |
---|
1757 | 1757 | } |
---|
1758 | 1758 | |
---|
.. | .. |
---|
1782 | 1782 | return -EINVAL; |
---|
1783 | 1783 | } |
---|
1784 | 1784 | |
---|
1785 | | - spin_lock_irqsave(&object->lock, flags); |
---|
| 1785 | + raw_spin_lock_irqsave(&object->lock, flags); |
---|
1786 | 1786 | dump_object_info(object); |
---|
1787 | | - spin_unlock_irqrestore(&object->lock, flags); |
---|
| 1787 | + raw_spin_unlock_irqrestore(&object->lock, flags); |
---|
1788 | 1788 | |
---|
1789 | 1789 | put_object(object); |
---|
1790 | 1790 | return 0; |
---|
.. | .. |
---|
1803 | 1803 | |
---|
1804 | 1804 | rcu_read_lock(); |
---|
1805 | 1805 | list_for_each_entry_rcu(object, &object_list, object_list) { |
---|
1806 | | - spin_lock_irqsave(&object->lock, flags); |
---|
| 1806 | + raw_spin_lock_irqsave(&object->lock, flags); |
---|
1807 | 1807 | if ((object->flags & OBJECT_REPORTED) && |
---|
1808 | 1808 | unreferenced_object(object)) |
---|
1809 | 1809 | __paint_it(object, KMEMLEAK_GREY); |
---|
1810 | | - spin_unlock_irqrestore(&object->lock, flags); |
---|
| 1810 | + raw_spin_unlock_irqrestore(&object->lock, flags); |
---|
1811 | 1811 | } |
---|
1812 | 1812 | rcu_read_unlock(); |
---|
1813 | 1813 | |
---|