hc
2024-11-01 2f529f9b558ca1c1bd74be7437a84e4711743404
kernel/kernel/locking/lockdep.c
....@@ -42,6 +42,7 @@
4242 #include <linux/stacktrace.h>
4343 #include <linux/debug_locks.h>
4444 #include <linux/irqflags.h>
45
+#include <linux/irqstage.h>
4546 #include <linux/utsname.h>
4647 #include <linux/hash.h>
4748 #include <linux/ftrace.h>
....@@ -104,9 +105,56 @@
104105 static arch_spinlock_t __lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
105106 static struct task_struct *__owner;
106107
108
+static __always_inline bool lockdep_stage_disabled(void)
109
+{
110
+ return stage_disabled();
111
+}
112
+
113
+#ifdef CONFIG_IRQ_PIPELINE
114
+/*
115
+ * If LOCKDEP is enabled, we want irqs to be disabled for both stages
116
+ * when traversing the lockdep code for hard and mutable locks (at the
117
+ * expense of massive latency overhead though).
118
+ */
119
+static __always_inline unsigned long lockdep_stage_test_and_disable(int *irqsoff)
120
+{
121
+ return test_and_lock_stage(irqsoff);
122
+}
123
+
124
+static __always_inline unsigned long lockdep_stage_disable(void)
125
+{
126
+ return lockdep_stage_test_and_disable(NULL);
127
+}
128
+
129
+static __always_inline void lockdep_stage_restore(unsigned long flags)
130
+{
131
+ unlock_stage(flags);
132
+}
133
+
134
+#else
135
+
136
+#define lockdep_stage_test_and_disable(__irqsoff) \
137
+ ({ \
138
+ unsigned long __flags; \
139
+ raw_local_irq_save(__flags); \
140
+ *(__irqsoff) = irqs_disabled_flags(__flags); \
141
+ __flags; \
142
+ })
143
+
144
+#define lockdep_stage_disable() \
145
+ ({ \
146
+ unsigned long __flags; \
147
+ raw_local_irq_save(__flags); \
148
+ __flags; \
149
+ })
150
+
151
+#define lockdep_stage_restore(__flags) raw_local_irq_restore(__flags)
152
+
153
+#endif /* !CONFIG_IRQ_PIPELINE */
154
+
107155 static inline void lockdep_lock(void)
108156 {
109
- DEBUG_LOCKS_WARN_ON(!irqs_disabled());
157
+ DEBUG_LOCKS_WARN_ON(!hard_irqs_disabled());
110158
111159 __this_cpu_inc(lockdep_recursion);
112160 arch_spin_lock(&__lock);
....@@ -115,7 +163,7 @@
115163
116164 static inline void lockdep_unlock(void)
117165 {
118
- DEBUG_LOCKS_WARN_ON(!irqs_disabled());
166
+ DEBUG_LOCKS_WARN_ON(!hard_irqs_disabled());
119167
120168 if (debug_locks && DEBUG_LOCKS_WARN_ON(__owner != current))
121169 return;
....@@ -882,7 +930,7 @@
882930 /*
883931 * We do an RCU walk of the hash, see lockdep_free_key_range().
884932 */
885
- if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
933
+ if (DEBUG_LOCKS_WARN_ON(!lockdep_stage_disabled()))
886934 return NULL;
887935
888936 hlist_for_each_entry_rcu_notrace(class, hash_head, hash_entry) {
....@@ -1179,7 +1227,7 @@
11791227 return;
11801228 hash_head = keyhashentry(key);
11811229
1182
- raw_local_irq_save(flags);
1230
+ flags = lockdep_stage_disable();
11831231 if (!graph_lock())
11841232 goto restore_irqs;
11851233 hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
....@@ -1190,7 +1238,7 @@
11901238 out_unlock:
11911239 graph_unlock();
11921240 restore_irqs:
1193
- raw_local_irq_restore(flags);
1241
+ lockdep_stage_restore(flags);
11941242 }
11951243 EXPORT_SYMBOL_GPL(lockdep_register_key);
11961244
....@@ -1239,7 +1287,7 @@
12391287 struct lock_class *class;
12401288 int idx;
12411289
1242
- DEBUG_LOCKS_WARN_ON(!irqs_disabled());
1290
+ DEBUG_LOCKS_WARN_ON(!lockdep_stage_disabled());
12431291
12441292 class = look_up_lock_class(lock, subclass);
12451293 if (likely(class))
....@@ -2035,11 +2083,11 @@
20352083
20362084 __bfs_init_root(&this, class);
20372085
2038
- raw_local_irq_save(flags);
2086
+ flags = lockdep_stage_disable();
20392087 lockdep_lock();
20402088 ret = __lockdep_count_forward_deps(&this);
20412089 lockdep_unlock();
2042
- raw_local_irq_restore(flags);
2090
+ lockdep_stage_restore(flags);
20432091
20442092 return ret;
20452093 }
....@@ -2061,11 +2109,11 @@
20612109
20622110 __bfs_init_root(&this, class);
20632111
2064
- raw_local_irq_save(flags);
2112
+ flags = lockdep_stage_disable();
20652113 lockdep_lock();
20662114 ret = __lockdep_count_backward_deps(&this);
20672115 lockdep_unlock();
2068
- raw_local_irq_restore(flags);
2116
+ lockdep_stage_restore(flags);
20692117
20702118 return ret;
20712119 }
....@@ -4170,6 +4218,8 @@
41704218 */
41714219 void lockdep_hardirqs_on_prepare(unsigned long ip)
41724220 {
4221
+ unsigned long flags;
4222
+
41734223 if (unlikely(!debug_locks))
41744224 return;
41754225
....@@ -4192,38 +4242,43 @@
41924242 return;
41934243 }
41944244
4245
+ flags = hard_cond_local_irq_save();
4246
+
41954247 /*
41964248 * We're enabling irqs and according to our state above irqs weren't
41974249 * already enabled, yet we find the hardware thinks they are in fact
41984250 * enabled.. someone messed up their IRQ state tracing.
41994251 */
4200
- if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
4201
- return;
4252
+ if (DEBUG_LOCKS_WARN_ON(!lockdep_stage_disabled()))
4253
+ goto out;
42024254
42034255 /*
42044256 * See the fine text that goes along with this variable definition.
42054257 */
42064258 if (DEBUG_LOCKS_WARN_ON(early_boot_irqs_disabled))
4207
- return;
4259
+ goto out;
42084260
42094261 /*
42104262 * Can't allow enabling interrupts while in an interrupt handler,
42114263 * that's general bad form and such. Recursion, limited stack etc..
42124264 */
4213
- if (DEBUG_LOCKS_WARN_ON(lockdep_hardirq_context()))
4214
- return;
4265
+ if (DEBUG_LOCKS_WARN_ON(running_inband() && lockdep_hardirq_context()))
4266
+ goto out;
42154267
42164268 current->hardirq_chain_key = current->curr_chain_key;
42174269
42184270 lockdep_recursion_inc();
42194271 __trace_hardirqs_on_caller();
42204272 lockdep_recursion_finish();
4273
+out:
4274
+ hard_cond_local_irq_restore(flags);
42214275 }
42224276 EXPORT_SYMBOL_GPL(lockdep_hardirqs_on_prepare);
42234277
42244278 void noinstr lockdep_hardirqs_on(unsigned long ip)
42254279 {
42264280 struct irqtrace_events *trace = &current->irqtrace;
4281
+ unsigned long flags;
42274282
42284283 if (unlikely(!debug_locks))
42294284 return;
....@@ -4261,13 +4316,15 @@
42614316 return;
42624317 }
42634318
4319
+ flags = hard_cond_local_irq_save();
4320
+
42644321 /*
42654322 * We're enabling irqs and according to our state above irqs weren't
42664323 * already enabled, yet we find the hardware thinks they are in fact
42674324 * enabled.. someone messed up their IRQ state tracing.
42684325 */
4269
- if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
4270
- return;
4326
+ if (DEBUG_LOCKS_WARN_ON(!lockdep_stage_disabled()))
4327
+ goto out;
42714328
42724329 /*
42734330 * Ensure the lock stack remained unchanged between
....@@ -4282,6 +4339,8 @@
42824339 trace->hardirq_enable_ip = ip;
42834340 trace->hardirq_enable_event = ++trace->irq_events;
42844341 debug_atomic_inc(hardirqs_on_events);
4342
+out:
4343
+ hard_cond_local_irq_restore(flags);
42854344 }
42864345 EXPORT_SYMBOL_GPL(lockdep_hardirqs_on);
42874346
....@@ -4290,6 +4349,8 @@
42904349 */
42914350 void noinstr lockdep_hardirqs_off(unsigned long ip)
42924351 {
4352
+ unsigned long flags;
4353
+
42934354 if (unlikely(!debug_locks))
42944355 return;
42954356
....@@ -4304,12 +4365,14 @@
43044365 } else if (__this_cpu_read(lockdep_recursion))
43054366 return;
43064367
4368
+ flags = hard_cond_local_irq_save();
4369
+
43074370 /*
43084371 * So we're supposed to get called after you mask local IRQs, but for
43094372 * some reason the hardware doesn't quite think you did a proper job.
43104373 */
4311
- if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
4312
- return;
4374
+ if (DEBUG_LOCKS_WARN_ON(!lockdep_stage_disabled()))
4375
+ goto out;
43134376
43144377 if (lockdep_hardirqs_enabled()) {
43154378 struct irqtrace_events *trace = &current->irqtrace;
....@@ -4324,6 +4387,8 @@
43244387 } else {
43254388 debug_atomic_inc(redundant_hardirqs_off);
43264389 }
4390
+out:
4391
+ hard_cond_local_irq_restore(flags);
43274392 }
43284393 EXPORT_SYMBOL_GPL(lockdep_hardirqs_off);
43294394
....@@ -4333,20 +4398,23 @@
43334398 void lockdep_softirqs_on(unsigned long ip)
43344399 {
43354400 struct irqtrace_events *trace = &current->irqtrace;
4401
+ unsigned long flags;
43364402
43374403 if (unlikely(!lockdep_enabled()))
43384404 return;
4405
+
4406
+ flags = hard_cond_local_irq_save();
43394407
43404408 /*
43414409 * We fancy IRQs being disabled here, see softirq.c, avoids
43424410 * funny state and nesting things.
43434411 */
4344
- if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
4345
- return;
4412
+ if (DEBUG_LOCKS_WARN_ON(!lockdep_stage_disabled()))
4413
+ goto out;
43464414
43474415 if (current->softirqs_enabled) {
43484416 debug_atomic_inc(redundant_softirqs_on);
4349
- return;
4417
+ goto out;
43504418 }
43514419
43524420 lockdep_recursion_inc();
....@@ -4365,6 +4433,8 @@
43654433 if (lockdep_hardirqs_enabled())
43664434 mark_held_locks(current, LOCK_ENABLED_SOFTIRQ);
43674435 lockdep_recursion_finish();
4436
+out:
4437
+ hard_cond_local_irq_restore(flags);
43684438 }
43694439
43704440 /*
....@@ -4372,14 +4442,18 @@
43724442 */
43734443 void lockdep_softirqs_off(unsigned long ip)
43744444 {
4445
+ unsigned long flags;
4446
+
43754447 if (unlikely(!lockdep_enabled()))
43764448 return;
4449
+
4450
+ flags = hard_cond_local_irq_save();
43774451
43784452 /*
43794453 * We fancy IRQs being disabled here, see softirq.c
43804454 */
4381
- if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
4382
- return;
4455
+ if (DEBUG_LOCKS_WARN_ON(!lockdep_stage_disabled()))
4456
+ goto out;
43834457
43844458 if (current->softirqs_enabled) {
43854459 struct irqtrace_events *trace = &current->irqtrace;
....@@ -4397,6 +4471,8 @@
43974471 DEBUG_LOCKS_WARN_ON(!softirq_count());
43984472 } else
43994473 debug_atomic_inc(redundant_softirqs_off);
4474
+out:
4475
+ hard_cond_local_irq_restore(flags);
44004476 }
44014477
44024478 static int
....@@ -4751,11 +4827,11 @@
47514827 if (DEBUG_LOCKS_WARN_ON(!lockdep_enabled()))
47524828 return;
47534829
4754
- raw_local_irq_save(flags);
4830
+ flags = lockdep_stage_disable();
47554831 lockdep_recursion_inc();
47564832 register_lock_class(lock, subclass, 1);
47574833 lockdep_recursion_finish();
4758
- raw_local_irq_restore(flags);
4834
+ lockdep_stage_restore(flags);
47594835 }
47604836 }
47614837 EXPORT_SYMBOL_GPL(lockdep_init_map_type);
....@@ -5085,7 +5161,7 @@
50855161 struct held_lock *hlock;
50865162 int first_idx = idx;
50875163
5088
- if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
5164
+ if (DEBUG_LOCKS_WARN_ON(!lockdep_stage_disabled()))
50895165 return 0;
50905166
50915167 for (hlock = curr->held_locks + idx; idx < depth; idx++, hlock++) {
....@@ -5397,7 +5473,13 @@
53975473 static noinstr void check_flags(unsigned long flags)
53985474 {
53995475 #if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP)
5400
- if (!debug_locks)
5476
+ /*
5477
+ * irq_pipeline: we can't and don't want to check the
5478
+ * consistency of the irq tracer when running over the
5479
+ * pipeline entry or oob stage contexts, since the inband
5480
+ * stall bit does not reflect the current irq state there.
5481
+ */
5482
+ if (on_pipeline_entry() || running_oob() || !debug_locks)
54015483 return;
54025484
54035485 /* Get the warning out.. */
....@@ -5444,13 +5526,13 @@
54445526 if (unlikely(!lockdep_enabled()))
54455527 return;
54465528
5447
- raw_local_irq_save(flags);
5529
+ flags = lockdep_stage_disable();
54485530 lockdep_recursion_inc();
54495531 check_flags(flags);
54505532 if (__lock_set_class(lock, name, key, subclass, ip))
54515533 check_chain_key(current);
54525534 lockdep_recursion_finish();
5453
- raw_local_irq_restore(flags);
5535
+ lockdep_stage_restore(flags);
54545536 }
54555537 EXPORT_SYMBOL_GPL(lock_set_class);
54565538
....@@ -5461,13 +5543,13 @@
54615543 if (unlikely(!lockdep_enabled()))
54625544 return;
54635545
5464
- raw_local_irq_save(flags);
5546
+ flags = lockdep_stage_disable();
54655547 lockdep_recursion_inc();
54665548 check_flags(flags);
54675549 if (__lock_downgrade(lock, ip))
54685550 check_chain_key(current);
54695551 lockdep_recursion_finish();
5470
- raw_local_irq_restore(flags);
5552
+ lockdep_stage_restore(flags);
54715553 }
54725554 EXPORT_SYMBOL_GPL(lock_downgrade);
54735555
....@@ -5532,6 +5614,7 @@
55325614 struct lockdep_map *nest_lock, unsigned long ip)
55335615 {
55345616 unsigned long flags;
5617
+ int irqsoff;
55355618
55365619 trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
55375620
....@@ -5558,14 +5641,14 @@
55585641 return;
55595642 }
55605643
5561
- raw_local_irq_save(flags);
5644
+ flags = lockdep_stage_test_and_disable(&irqsoff);
55625645 check_flags(flags);
55635646
55645647 lockdep_recursion_inc();
55655648 __lock_acquire(lock, subclass, trylock, read, check,
5566
- irqs_disabled_flags(flags), nest_lock, ip, 0, 0);
5649
+ irqsoff, nest_lock, ip, 0, 0);
55675650 lockdep_recursion_finish();
5568
- raw_local_irq_restore(flags);
5651
+ lockdep_stage_restore(flags);
55695652 }
55705653 EXPORT_SYMBOL_GPL(lock_acquire);
55715654
....@@ -5578,14 +5661,14 @@
55785661 if (unlikely(!lockdep_enabled()))
55795662 return;
55805663
5581
- raw_local_irq_save(flags);
5664
+ flags = lockdep_stage_disable();
55825665 check_flags(flags);
55835666
55845667 lockdep_recursion_inc();
55855668 if (__lock_release(lock, ip))
55865669 check_chain_key(current);
55875670 lockdep_recursion_finish();
5588
- raw_local_irq_restore(flags);
5671
+ lockdep_stage_restore(flags);
55895672 }
55905673 EXPORT_SYMBOL_GPL(lock_release);
55915674
....@@ -5597,13 +5680,13 @@
55975680 if (unlikely(!lockdep_enabled()))
55985681 return 1; /* avoid false negative lockdep_assert_held() */
55995682
5600
- raw_local_irq_save(flags);
5683
+ flags = lockdep_stage_disable();
56015684 check_flags(flags);
56025685
56035686 lockdep_recursion_inc();
56045687 ret = __lock_is_held(lock, read);
56055688 lockdep_recursion_finish();
5606
- raw_local_irq_restore(flags);
5689
+ lockdep_stage_restore(flags);
56075690
56085691 return ret;
56095692 }
....@@ -5618,13 +5701,13 @@
56185701 if (unlikely(!lockdep_enabled()))
56195702 return cookie;
56205703
5621
- raw_local_irq_save(flags);
5704
+ flags = lockdep_stage_disable();
56225705 check_flags(flags);
56235706
56245707 lockdep_recursion_inc();
56255708 cookie = __lock_pin_lock(lock);
56265709 lockdep_recursion_finish();
5627
- raw_local_irq_restore(flags);
5710
+ lockdep_stage_restore(flags);
56285711
56295712 return cookie;
56305713 }
....@@ -5637,13 +5720,13 @@
56375720 if (unlikely(!lockdep_enabled()))
56385721 return;
56395722
5640
- raw_local_irq_save(flags);
5723
+ flags = lockdep_stage_disable();
56415724 check_flags(flags);
56425725
56435726 lockdep_recursion_inc();
56445727 __lock_repin_lock(lock, cookie);
56455728 lockdep_recursion_finish();
5646
- raw_local_irq_restore(flags);
5729
+ lockdep_stage_restore(flags);
56475730 }
56485731 EXPORT_SYMBOL_GPL(lock_repin_lock);
56495732
....@@ -5654,13 +5737,13 @@
56545737 if (unlikely(!lockdep_enabled()))
56555738 return;
56565739
5657
- raw_local_irq_save(flags);
5740
+ flags = lockdep_stage_disable();
56585741 check_flags(flags);
56595742
56605743 lockdep_recursion_inc();
56615744 __lock_unpin_lock(lock, cookie);
56625745 lockdep_recursion_finish();
5663
- raw_local_irq_restore(flags);
5746
+ lockdep_stage_restore(flags);
56645747 }
56655748 EXPORT_SYMBOL_GPL(lock_unpin_lock);
56665749
....@@ -5790,12 +5873,12 @@
57905873 if (unlikely(!lock_stat || !lockdep_enabled()))
57915874 return;
57925875
5793
- raw_local_irq_save(flags);
5876
+ flags = lockdep_stage_disable();
57945877 check_flags(flags);
57955878 lockdep_recursion_inc();
57965879 __lock_contended(lock, ip);
57975880 lockdep_recursion_finish();
5798
- raw_local_irq_restore(flags);
5881
+ lockdep_stage_restore(flags);
57995882 }
58005883 EXPORT_SYMBOL_GPL(lock_contended);
58015884
....@@ -5808,12 +5891,12 @@
58085891 if (unlikely(!lock_stat || !lockdep_enabled()))
58095892 return;
58105893
5811
- raw_local_irq_save(flags);
5894
+ flags = lockdep_stage_disable();
58125895 check_flags(flags);
58135896 lockdep_recursion_inc();
58145897 __lock_acquired(lock, ip);
58155898 lockdep_recursion_finish();
5816
- raw_local_irq_restore(flags);
5899
+ lockdep_stage_restore(flags);
58175900 }
58185901 EXPORT_SYMBOL_GPL(lock_acquired);
58195902 #endif
....@@ -5828,7 +5911,7 @@
58285911 unsigned long flags;
58295912 int i;
58305913
5831
- raw_local_irq_save(flags);
5914
+ flags = lockdep_stage_disable();
58325915 lockdep_init_task(current);
58335916 memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock));
58345917 nr_hardirq_chains = 0;
....@@ -5837,7 +5920,7 @@
58375920 debug_locks = 1;
58385921 for (i = 0; i < CHAINHASH_SIZE; i++)
58395922 INIT_HLIST_HEAD(chainhash_table + i);
5840
- raw_local_irq_restore(flags);
5923
+ lockdep_stage_restore(flags);
58415924 }
58425925
58435926 /* Remove a class from a lock chain. Must be called with the graph lock held. */
....@@ -6014,7 +6097,7 @@
60146097 if (WARN_ON_ONCE(ch != &delayed_free.rcu_head))
60156098 return;
60166099
6017
- raw_local_irq_save(flags);
6100
+ flags = lockdep_stage_disable();
60186101 lockdep_lock();
60196102
60206103 /* closed head */
....@@ -6028,7 +6111,7 @@
60286111 call_rcu_zapped(delayed_free.pf + delayed_free.index);
60296112
60306113 lockdep_unlock();
6031
- raw_local_irq_restore(flags);
6114
+ lockdep_stage_restore(flags);
60326115 }
60336116
60346117 /*
....@@ -6071,13 +6154,13 @@
60716154
60726155 init_data_structures_once();
60736156
6074
- raw_local_irq_save(flags);
6157
+ flags = lockdep_stage_disable();
60756158 lockdep_lock();
60766159 pf = get_pending_free();
60776160 __lockdep_free_key_range(pf, start, size);
60786161 call_rcu_zapped(pf);
60796162 lockdep_unlock();
6080
- raw_local_irq_restore(flags);
6163
+ lockdep_stage_restore(flags);
60816164
60826165 /*
60836166 * Wait for any possible iterators from look_up_lock_class() to pass
....@@ -6097,12 +6180,12 @@
60976180
60986181 init_data_structures_once();
60996182
6100
- raw_local_irq_save(flags);
6183
+ flags = lockdep_stage_disable();
61016184 lockdep_lock();
61026185 __lockdep_free_key_range(pf, start, size);
61036186 __free_zapped_classes(pf);
61046187 lockdep_unlock();
6105
- raw_local_irq_restore(flags);
6188
+ lockdep_stage_restore(flags);
61066189 }
61076190
61086191 void lockdep_free_key_range(void *start, unsigned long size)
....@@ -6173,7 +6256,7 @@
61736256 unsigned long flags;
61746257 int locked;
61756258
6176
- raw_local_irq_save(flags);
6259
+ flags = lockdep_stage_disable();
61776260 locked = graph_lock();
61786261 if (!locked)
61796262 goto out_irq;
....@@ -6184,7 +6267,7 @@
61846267
61856268 graph_unlock();
61866269 out_irq:
6187
- raw_local_irq_restore(flags);
6270
+ lockdep_stage_restore(flags);
61886271 }
61896272
61906273 /*
....@@ -6196,12 +6279,12 @@
61966279 struct pending_free *pf = delayed_free.pf;
61976280 unsigned long flags;
61986281
6199
- raw_local_irq_save(flags);
6282
+ flags = lockdep_stage_disable();
62006283 lockdep_lock();
62016284 __lockdep_reset_lock(pf, lock);
62026285 __free_zapped_classes(pf);
62036286 lockdep_unlock();
6204
- raw_local_irq_restore(flags);
6287
+ lockdep_stage_restore(flags);
62056288 }
62066289
62076290 void lockdep_reset_lock(struct lockdep_map *lock)
....@@ -6234,7 +6317,7 @@
62346317 if (WARN_ON_ONCE(static_obj(key)))
62356318 return;
62366319
6237
- raw_local_irq_save(flags);
6320
+ flags = lockdep_stage_disable();
62386321 lockdep_lock();
62396322
62406323 hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
....@@ -6251,7 +6334,7 @@
62516334 call_rcu_zapped(pf);
62526335 }
62536336 lockdep_unlock();
6254
- raw_local_irq_restore(flags);
6337
+ lockdep_stage_restore(flags);
62556338
62566339 /* Wait until is_dynamic_key() has finished accessing k->hash_entry. */
62576340 synchronize_rcu();
....@@ -6342,7 +6425,7 @@
63426425 if (unlikely(!debug_locks))
63436426 return;
63446427
6345
- raw_local_irq_save(flags);
6428
+ flags = lockdep_stage_disable();
63466429 for (i = 0; i < curr->lockdep_depth; i++) {
63476430 hlock = curr->held_locks + i;
63486431
....@@ -6353,7 +6436,7 @@
63536436 print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);
63546437 break;
63556438 }
6356
- raw_local_irq_restore(flags);
6439
+ lockdep_stage_restore(flags);
63576440 }
63586441 EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
63596442