hc
2023-11-06 e3e12f52b214121840b44c91de5b3e5af5d3eb84
kernel/mm/kmemleak.c
....@@ -26,7 +26,7 @@
2626 *
2727 * The following locks and mutexes are used by kmemleak:
2828 *
29
- * - kmemleak_lock (rwlock): protects the object_list modifications and
29
+ * - kmemleak_lock (raw spinlock): protects the object_list modifications and
3030 * accesses to the object_tree_root. The object_list is the main list
3131 * holding the metadata (struct kmemleak_object) for the allocated memory
3232 * blocks. The object_tree_root is a red black tree used to look-up
....@@ -147,7 +147,7 @@
147147 * (use_count) and freed using the RCU mechanism.
148148 */
149149 struct kmemleak_object {
150
- spinlock_t lock;
150
+ raw_spinlock_t lock;
151151 unsigned int flags; /* object status flags */
152152 struct list_head object_list;
153153 struct list_head gray_list;
....@@ -197,7 +197,7 @@
197197 /* search tree for object boundaries */
198198 static struct rb_root object_tree_root = RB_ROOT;
199199 /* rw_lock protecting the access to object_list and object_tree_root */
200
-static DEFINE_RWLOCK(kmemleak_lock);
200
+static DEFINE_RAW_SPINLOCK(kmemleak_lock);
201201
202202 /* allocation caches for kmemleak internal data */
203203 static struct kmem_cache *object_cache;
....@@ -491,9 +491,9 @@
491491 struct kmemleak_object *object;
492492
493493 rcu_read_lock();
494
- read_lock_irqsave(&kmemleak_lock, flags);
494
+ raw_spin_lock_irqsave(&kmemleak_lock, flags);
495495 object = lookup_object(ptr, alias);
496
- read_unlock_irqrestore(&kmemleak_lock, flags);
496
+ raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
497497
498498 /* check whether the object is still available */
499499 if (object && !get_object(object))
....@@ -513,13 +513,13 @@
513513 unsigned long flags;
514514 struct kmemleak_object *object;
515515
516
- write_lock_irqsave(&kmemleak_lock, flags);
516
+ raw_spin_lock_irqsave(&kmemleak_lock, flags);
517517 object = lookup_object(ptr, alias);
518518 if (object) {
519519 rb_erase(&object->rb_node, &object_tree_root);
520520 list_del_rcu(&object->object_list);
521521 }
522
- write_unlock_irqrestore(&kmemleak_lock, flags);
522
+ raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
523523
524524 return object;
525525 }
....@@ -561,7 +561,7 @@
561561 INIT_LIST_HEAD(&object->object_list);
562562 INIT_LIST_HEAD(&object->gray_list);
563563 INIT_HLIST_HEAD(&object->area_list);
564
- spin_lock_init(&object->lock);
564
+ raw_spin_lock_init(&object->lock);
565565 atomic_set(&object->use_count, 1);
566566 object->flags = OBJECT_ALLOCATED;
567567 object->pointer = ptr;
....@@ -593,7 +593,7 @@
593593 /* kernel backtrace */
594594 object->trace_len = __save_stack_trace(object->trace);
595595
596
- write_lock_irqsave(&kmemleak_lock, flags);
596
+ raw_spin_lock_irqsave(&kmemleak_lock, flags);
597597
598598 min_addr = min(min_addr, ptr);
599599 max_addr = max(max_addr, ptr + size);
....@@ -624,7 +624,7 @@
624624
625625 list_add_tail_rcu(&object->object_list, &object_list);
626626 out:
627
- write_unlock_irqrestore(&kmemleak_lock, flags);
627
+ raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
628628 return object;
629629 }
630630
....@@ -642,9 +642,9 @@
642642 * Locking here also ensures that the corresponding memory block
643643 * cannot be freed when it is being scanned.
644644 */
645
- spin_lock_irqsave(&object->lock, flags);
645
+ raw_spin_lock_irqsave(&object->lock, flags);
646646 object->flags &= ~OBJECT_ALLOCATED;
647
- spin_unlock_irqrestore(&object->lock, flags);
647
+ raw_spin_unlock_irqrestore(&object->lock, flags);
648648 put_object(object);
649649 }
650650
....@@ -716,9 +716,9 @@
716716 {
717717 unsigned long flags;
718718
719
- spin_lock_irqsave(&object->lock, flags);
719
+ raw_spin_lock_irqsave(&object->lock, flags);
720720 __paint_it(object, color);
721
- spin_unlock_irqrestore(&object->lock, flags);
721
+ raw_spin_unlock_irqrestore(&object->lock, flags);
722722 }
723723
724724 static void paint_ptr(unsigned long ptr, int color)
....@@ -778,7 +778,7 @@
778778 goto out;
779779 }
780780
781
- spin_lock_irqsave(&object->lock, flags);
781
+ raw_spin_lock_irqsave(&object->lock, flags);
782782 if (size == SIZE_MAX) {
783783 size = object->pointer + object->size - ptr;
784784 } else if (ptr + size > object->pointer + object->size) {
....@@ -794,7 +794,7 @@
794794
795795 hlist_add_head(&area->node, &object->area_list);
796796 out_unlock:
797
- spin_unlock_irqrestore(&object->lock, flags);
797
+ raw_spin_unlock_irqrestore(&object->lock, flags);
798798 out:
799799 put_object(object);
800800 }
....@@ -817,9 +817,9 @@
817817 return;
818818 }
819819
820
- spin_lock_irqsave(&object->lock, flags);
820
+ raw_spin_lock_irqsave(&object->lock, flags);
821821 object->excess_ref = excess_ref;
822
- spin_unlock_irqrestore(&object->lock, flags);
822
+ raw_spin_unlock_irqrestore(&object->lock, flags);
823823 put_object(object);
824824 }
825825
....@@ -839,9 +839,9 @@
839839 return;
840840 }
841841
842
- spin_lock_irqsave(&object->lock, flags);
842
+ raw_spin_lock_irqsave(&object->lock, flags);
843843 object->flags |= OBJECT_NO_SCAN;
844
- spin_unlock_irqrestore(&object->lock, flags);
844
+ raw_spin_unlock_irqrestore(&object->lock, flags);
845845 put_object(object);
846846 }
847847
....@@ -902,11 +902,11 @@
902902 log->min_count, GFP_ATOMIC);
903903 if (!object)
904904 goto out;
905
- spin_lock_irqsave(&object->lock, flags);
905
+ raw_spin_lock_irqsave(&object->lock, flags);
906906 for (i = 0; i < log->trace_len; i++)
907907 object->trace[i] = log->trace[i];
908908 object->trace_len = log->trace_len;
909
- spin_unlock_irqrestore(&object->lock, flags);
909
+ raw_spin_unlock_irqrestore(&object->lock, flags);
910910 out:
911911 rcu_read_unlock();
912912 }
....@@ -1096,9 +1096,9 @@
10961096 return;
10971097 }
10981098
1099
- spin_lock_irqsave(&object->lock, flags);
1099
+ raw_spin_lock_irqsave(&object->lock, flags);
11001100 object->trace_len = __save_stack_trace(object->trace);
1101
- spin_unlock_irqrestore(&object->lock, flags);
1101
+ raw_spin_unlock_irqrestore(&object->lock, flags);
11021102
11031103 put_object(object);
11041104 }
....@@ -1310,7 +1310,7 @@
13101310 unsigned long *end = _end - (BYTES_PER_POINTER - 1);
13111311 unsigned long flags;
13121312
1313
- read_lock_irqsave(&kmemleak_lock, flags);
1313
+ raw_spin_lock_irqsave(&kmemleak_lock, flags);
13141314 for (ptr = start; ptr < end; ptr++) {
13151315 struct kmemleak_object *object;
13161316 unsigned long pointer;
....@@ -1344,7 +1344,7 @@
13441344 * previously acquired in scan_object(). These locks are
13451345 * enclosed by scan_mutex.
13461346 */
1347
- spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1347
+ raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
13481348 /* only pass surplus references (object already gray) */
13491349 if (color_gray(object)) {
13501350 excess_ref = object->excess_ref;
....@@ -1353,7 +1353,7 @@
13531353 excess_ref = 0;
13541354 update_refs(object);
13551355 }
1356
- spin_unlock(&object->lock);
1356
+ raw_spin_unlock(&object->lock);
13571357
13581358 if (excess_ref) {
13591359 object = lookup_object(excess_ref, 0);
....@@ -1362,12 +1362,12 @@
13621362 if (object == scanned)
13631363 /* circular reference, ignore */
13641364 continue;
1365
- spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1365
+ raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
13661366 update_refs(object);
1367
- spin_unlock(&object->lock);
1367
+ raw_spin_unlock(&object->lock);
13681368 }
13691369 }
1370
- read_unlock_irqrestore(&kmemleak_lock, flags);
1370
+ raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
13711371 }
13721372
13731373 /*
....@@ -1400,7 +1400,7 @@
14001400 * Once the object->lock is acquired, the corresponding memory block
14011401 * cannot be freed (the same lock is acquired in delete_object).
14021402 */
1403
- spin_lock_irqsave(&object->lock, flags);
1403
+ raw_spin_lock_irqsave(&object->lock, flags);
14041404 if (object->flags & OBJECT_NO_SCAN)
14051405 goto out;
14061406 if (!(object->flags & OBJECT_ALLOCATED))
....@@ -1419,9 +1419,9 @@
14191419 if (start >= end)
14201420 break;
14211421
1422
- spin_unlock_irqrestore(&object->lock, flags);
1422
+ raw_spin_unlock_irqrestore(&object->lock, flags);
14231423 cond_resched();
1424
- spin_lock_irqsave(&object->lock, flags);
1424
+ raw_spin_lock_irqsave(&object->lock, flags);
14251425 } while (object->flags & OBJECT_ALLOCATED);
14261426 } else
14271427 hlist_for_each_entry(area, &object->area_list, node)
....@@ -1429,7 +1429,7 @@
14291429 (void *)(area->start + area->size),
14301430 object);
14311431 out:
1432
- spin_unlock_irqrestore(&object->lock, flags);
1432
+ raw_spin_unlock_irqrestore(&object->lock, flags);
14331433 }
14341434
14351435 /*
....@@ -1482,7 +1482,7 @@
14821482 /* prepare the kmemleak_object's */
14831483 rcu_read_lock();
14841484 list_for_each_entry_rcu(object, &object_list, object_list) {
1485
- spin_lock_irqsave(&object->lock, flags);
1485
+ raw_spin_lock_irqsave(&object->lock, flags);
14861486 #ifdef DEBUG
14871487 /*
14881488 * With a few exceptions there should be a maximum of
....@@ -1499,7 +1499,7 @@
14991499 if (color_gray(object) && get_object(object))
15001500 list_add_tail(&object->gray_list, &gray_list);
15011501
1502
- spin_unlock_irqrestore(&object->lock, flags);
1502
+ raw_spin_unlock_irqrestore(&object->lock, flags);
15031503 }
15041504 rcu_read_unlock();
15051505
....@@ -1564,14 +1564,14 @@
15641564 */
15651565 rcu_read_lock();
15661566 list_for_each_entry_rcu(object, &object_list, object_list) {
1567
- spin_lock_irqsave(&object->lock, flags);
1567
+ raw_spin_lock_irqsave(&object->lock, flags);
15681568 if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
15691569 && update_checksum(object) && get_object(object)) {
15701570 /* color it gray temporarily */
15711571 object->count = object->min_count;
15721572 list_add_tail(&object->gray_list, &gray_list);
15731573 }
1574
- spin_unlock_irqrestore(&object->lock, flags);
1574
+ raw_spin_unlock_irqrestore(&object->lock, flags);
15751575 }
15761576 rcu_read_unlock();
15771577
....@@ -1591,13 +1591,13 @@
15911591 */
15921592 rcu_read_lock();
15931593 list_for_each_entry_rcu(object, &object_list, object_list) {
1594
- spin_lock_irqsave(&object->lock, flags);
1594
+ raw_spin_lock_irqsave(&object->lock, flags);
15951595 if (unreferenced_object(object) &&
15961596 !(object->flags & OBJECT_REPORTED)) {
15971597 object->flags |= OBJECT_REPORTED;
15981598 new_leaks++;
15991599 }
1600
- spin_unlock_irqrestore(&object->lock, flags);
1600
+ raw_spin_unlock_irqrestore(&object->lock, flags);
16011601 }
16021602 rcu_read_unlock();
16031603
....@@ -1749,10 +1749,10 @@
17491749 struct kmemleak_object *object = v;
17501750 unsigned long flags;
17511751
1752
- spin_lock_irqsave(&object->lock, flags);
1752
+ raw_spin_lock_irqsave(&object->lock, flags);
17531753 if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
17541754 print_unreferenced(seq, object);
1755
- spin_unlock_irqrestore(&object->lock, flags);
1755
+ raw_spin_unlock_irqrestore(&object->lock, flags);
17561756 return 0;
17571757 }
17581758
....@@ -1782,9 +1782,9 @@
17821782 return -EINVAL;
17831783 }
17841784
1785
- spin_lock_irqsave(&object->lock, flags);
1785
+ raw_spin_lock_irqsave(&object->lock, flags);
17861786 dump_object_info(object);
1787
- spin_unlock_irqrestore(&object->lock, flags);
1787
+ raw_spin_unlock_irqrestore(&object->lock, flags);
17881788
17891789 put_object(object);
17901790 return 0;
....@@ -1803,11 +1803,11 @@
18031803
18041804 rcu_read_lock();
18051805 list_for_each_entry_rcu(object, &object_list, object_list) {
1806
- spin_lock_irqsave(&object->lock, flags);
1806
+ raw_spin_lock_irqsave(&object->lock, flags);
18071807 if ((object->flags & OBJECT_REPORTED) &&
18081808 unreferenced_object(object))
18091809 __paint_it(object, KMEMLEAK_GREY);
1810
- spin_unlock_irqrestore(&object->lock, flags);
1810
+ raw_spin_unlock_irqrestore(&object->lock, flags);
18111811 }
18121812 rcu_read_unlock();
18131813