hc
2023-12-08 01573e231f18eb2d99162747186f59511f56b64d
kernel/include/linux/lockdep.h
....@@ -5,21 +5,20 @@
55 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
66 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
77 *
8
- * see Documentation/locking/lockdep-design.txt for more details.
8
+ * see Documentation/locking/lockdep-design.rst for more details.
99 */
1010 #ifndef __LINUX_LOCKDEP_H
1111 #define __LINUX_LOCKDEP_H
1212
13
+#include <linux/lockdep_types.h>
14
+#include <linux/smp.h>
15
+#include <asm/percpu.h>
16
+
1317 struct task_struct;
14
-struct lockdep_map;
1518
1619 /* for sysctl */
1720 extern int prove_locking;
1821 extern int lock_stat;
19
-
20
-#define MAX_LOCKDEP_SUBCLASSES 8UL
21
-
22
-#include <linux/types.h>
2322
2423 #ifdef CONFIG_LOCKDEP
2524
....@@ -27,138 +26,6 @@
2726 #include <linux/list.h>
2827 #include <linux/debug_locks.h>
2928 #include <linux/stacktrace.h>
30
-
31
-/*
32
- * We'd rather not expose kernel/lockdep_states.h this wide, but we do need
33
- * the total number of states... :-(
34
- */
35
-#define XXX_LOCK_USAGE_STATES (1+2*4)
36
-
37
-/*
38
- * NR_LOCKDEP_CACHING_CLASSES ... Number of classes
39
- * cached in the instance of lockdep_map
40
- *
41
- * Currently main class (subclass == 0) and signle depth subclass
42
- * are cached in lockdep_map. This optimization is mainly targeting
43
- * on rq->lock. double_rq_lock() acquires this highly competitive with
44
- * single depth.
45
- */
46
-#define NR_LOCKDEP_CACHING_CLASSES 2
47
-
48
-/*
49
- * Lock-classes are keyed via unique addresses, by embedding the
50
- * lockclass-key into the kernel (or module) .data section. (For
51
- * static locks we use the lock address itself as the key.)
52
- */
53
-struct lockdep_subclass_key {
54
- char __one_byte;
55
-} __attribute__ ((__packed__));
56
-
57
-struct lock_class_key {
58
- struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
59
-};
60
-
61
-extern struct lock_class_key __lockdep_no_validate__;
62
-
63
-#define LOCKSTAT_POINTS 4
64
-
65
-/*
66
- * The lock-class itself:
67
- */
68
-struct lock_class {
69
- /*
70
- * class-hash:
71
- */
72
- struct hlist_node hash_entry;
73
-
74
- /*
75
- * global list of all lock-classes:
76
- */
77
- struct list_head lock_entry;
78
-
79
- struct lockdep_subclass_key *key;
80
- unsigned int subclass;
81
- unsigned int dep_gen_id;
82
-
83
- /*
84
- * IRQ/softirq usage tracking bits:
85
- */
86
- unsigned long usage_mask;
87
- struct stack_trace usage_traces[XXX_LOCK_USAGE_STATES];
88
-
89
- /*
90
- * These fields represent a directed graph of lock dependencies,
91
- * to every node we attach a list of "forward" and a list of
92
- * "backward" graph nodes.
93
- */
94
- struct list_head locks_after, locks_before;
95
-
96
- /*
97
- * Generation counter, when doing certain classes of graph walking,
98
- * to ensure that we check one node only once:
99
- */
100
- unsigned int version;
101
-
102
- /*
103
- * Statistics counter:
104
- */
105
- unsigned long ops;
106
-
107
- const char *name;
108
- int name_version;
109
-
110
-#ifdef CONFIG_LOCK_STAT
111
- unsigned long contention_point[LOCKSTAT_POINTS];
112
- unsigned long contending_point[LOCKSTAT_POINTS];
113
-#endif
114
-};
115
-
116
-#ifdef CONFIG_LOCK_STAT
117
-struct lock_time {
118
- s64 min;
119
- s64 max;
120
- s64 total;
121
- unsigned long nr;
122
-};
123
-
124
-enum bounce_type {
125
- bounce_acquired_write,
126
- bounce_acquired_read,
127
- bounce_contended_write,
128
- bounce_contended_read,
129
- nr_bounce_types,
130
-
131
- bounce_acquired = bounce_acquired_write,
132
- bounce_contended = bounce_contended_write,
133
-};
134
-
135
-struct lock_class_stats {
136
- unsigned long contention_point[LOCKSTAT_POINTS];
137
- unsigned long contending_point[LOCKSTAT_POINTS];
138
- struct lock_time read_waittime;
139
- struct lock_time write_waittime;
140
- struct lock_time read_holdtime;
141
- struct lock_time write_holdtime;
142
- unsigned long bounces[nr_bounce_types];
143
-};
144
-
145
-struct lock_class_stats lock_stats(struct lock_class *class);
146
-void clear_lock_stats(struct lock_class *class);
147
-#endif
148
-
149
-/*
150
- * Map the lock object (the lock instance) to the lock-class object.
151
- * This is embedded into specific lock instances:
152
- */
153
-struct lockdep_map {
154
- struct lock_class_key *key;
155
- struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES];
156
- const char *name;
157
-#ifdef CONFIG_LOCK_STAT
158
- int cpu;
159
- unsigned long ip;
160
-#endif
161
-};
16229
16330 static inline void lockdep_copy_map(struct lockdep_map *to,
16431 struct lockdep_map *from)
....@@ -185,8 +52,13 @@
18552 struct lock_list {
18653 struct list_head entry;
18754 struct lock_class *class;
188
- struct stack_trace trace;
189
- int distance;
55
+ struct lock_class *links_to;
56
+ const struct lock_trace *trace;
57
+ u16 distance;
58
+ /* bitmap of different dependencies from head to this */
59
+ u8 dep;
60
+ /* used by BFS to record whether "prev -> this" only has -(*R)-> */
61
+ u8 only_xr;
19062
19163 /*
19264 * The parent field is used to implement breadth-first search, and the
....@@ -195,11 +67,17 @@
19567 struct lock_list *parent;
19668 };
19769
198
-/*
199
- * We record lock dependency chains, so that we can cache them:
70
+/**
71
+ * struct lock_chain - lock dependency chain record
72
+ *
73
+ * @irq_context: the same as irq_context in held_lock below
74
+ * @depth: the number of held locks in this chain
75
+ * @base: the index in chain_hlocks for this chain
76
+ * @entry: the collided lock chains in lock_chain hash list
77
+ * @chain_key: the hash key of this lock_chain
20078 */
20179 struct lock_chain {
202
- /* see BUILD_BUG_ON()s in lookup_chain_cache() */
80
+ /* see BUILD_BUG_ON()s in add_chain_cache() */
20381 unsigned int irq_context : 2,
20482 depth : 6,
20583 base : 24;
....@@ -209,12 +87,8 @@
20987 };
21088
21189 #define MAX_LOCKDEP_KEYS_BITS 13
212
-/*
213
- * Subtract one because we offset hlock->class_idx by 1 in order
214
- * to make 0 mean no class. This avoids overflowing the class_idx
215
- * bitfield and hitting the BUG in hlock_class().
216
- */
217
-#define MAX_LOCKDEP_KEYS ((1UL << MAX_LOCKDEP_KEYS_BITS) - 1)
90
+#define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS)
91
+#define INITIAL_CHAIN_KEY -1
21892
21993 struct held_lock {
22094 /*
....@@ -239,6 +113,11 @@
239113 u64 waittime_stamp;
240114 u64 holdtime_stamp;
241115 #endif
116
+ /*
117
+ * class_idx is zero-indexed; it points to the element in
118
+ * lock_classes this held lock instance belongs to. class_idx is in
119
+ * the range from 0 to (MAX_LOCKDEP_KEYS-1) inclusive.
120
+ */
242121 unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS;
243122 /*
244123 * The lock-stack is unified in that the lock chains of interrupt
....@@ -271,9 +150,34 @@
271150 extern void lockdep_reset_lock(struct lockdep_map *lock);
272151 extern void lockdep_free_key_range(void *start, unsigned long size);
273152 extern asmlinkage void lockdep_sys_exit(void);
153
+extern void lockdep_set_selftest_task(struct task_struct *task);
274154
275
-extern void lockdep_off(void);
276
-extern void lockdep_on(void);
155
+extern void lockdep_init_task(struct task_struct *task);
156
+
157
+/*
158
+ * Split the recrursion counter in two to readily detect 'off' vs recursion.
159
+ */
160
+#define LOCKDEP_RECURSION_BITS 16
161
+#define LOCKDEP_OFF (1U << LOCKDEP_RECURSION_BITS)
162
+#define LOCKDEP_RECURSION_MASK (LOCKDEP_OFF - 1)
163
+
164
+/*
165
+ * lockdep_{off,on}() are macros to avoid tracing and kprobes; not inlines due
166
+ * to header dependencies.
167
+ */
168
+
169
+#define lockdep_off() \
170
+do { \
171
+ current->lockdep_recursion += LOCKDEP_OFF; \
172
+} while (0)
173
+
174
+#define lockdep_on() \
175
+do { \
176
+ current->lockdep_recursion -= LOCKDEP_OFF; \
177
+} while (0)
178
+
179
+extern void lockdep_register_key(struct lock_class_key *key);
180
+extern void lockdep_unregister_key(struct lock_class_key *key);
277181
278182 /*
279183 * These methods are used by specific locking variants (spinlocks,
....@@ -281,8 +185,28 @@
281185 * to lockdep:
282186 */
283187
284
-extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
285
- struct lock_class_key *key, int subclass);
188
+extern void lockdep_init_map_type(struct lockdep_map *lock, const char *name,
189
+ struct lock_class_key *key, int subclass, u8 inner, u8 outer, u8 lock_type);
190
+
191
+static inline void
192
+lockdep_init_map_waits(struct lockdep_map *lock, const char *name,
193
+ struct lock_class_key *key, int subclass, u8 inner, u8 outer)
194
+{
195
+ lockdep_init_map_type(lock, name, key, subclass, inner, outer, LD_LOCK_NORMAL);
196
+}
197
+
198
+static inline void
199
+lockdep_init_map_wait(struct lockdep_map *lock, const char *name,
200
+ struct lock_class_key *key, int subclass, u8 inner)
201
+{
202
+ lockdep_init_map_waits(lock, name, key, subclass, inner, LD_WAIT_INV);
203
+}
204
+
205
+static inline void lockdep_init_map(struct lockdep_map *lock, const char *name,
206
+ struct lock_class_key *key, int subclass)
207
+{
208
+ lockdep_init_map_wait(lock, name, key, subclass, LD_WAIT_INV);
209
+}
286210
287211 /*
288212 * Reinitialize a lock key - for cases where there is special locking or
....@@ -290,18 +214,33 @@
290214 * of dependencies wrong: they are either too broad (they need a class-split)
291215 * or they are too narrow (they suffer from a false class-split):
292216 */
293
-#define lockdep_set_class(lock, key) \
294
- lockdep_init_map(&(lock)->dep_map, #key, key, 0)
295
-#define lockdep_set_class_and_name(lock, key, name) \
296
- lockdep_init_map(&(lock)->dep_map, name, key, 0)
297
-#define lockdep_set_class_and_subclass(lock, key, sub) \
298
- lockdep_init_map(&(lock)->dep_map, #key, key, sub)
299
-#define lockdep_set_subclass(lock, sub) \
300
- lockdep_init_map(&(lock)->dep_map, #lock, \
301
- (lock)->dep_map.key, sub)
217
+#define lockdep_set_class(lock, key) \
218
+ lockdep_init_map_type(&(lock)->dep_map, #key, key, 0, \
219
+ (lock)->dep_map.wait_type_inner, \
220
+ (lock)->dep_map.wait_type_outer, \
221
+ (lock)->dep_map.lock_type)
222
+
223
+#define lockdep_set_class_and_name(lock, key, name) \
224
+ lockdep_init_map_type(&(lock)->dep_map, name, key, 0, \
225
+ (lock)->dep_map.wait_type_inner, \
226
+ (lock)->dep_map.wait_type_outer, \
227
+ (lock)->dep_map.lock_type)
228
+
229
+#define lockdep_set_class_and_subclass(lock, key, sub) \
230
+ lockdep_init_map_type(&(lock)->dep_map, #key, key, sub, \
231
+ (lock)->dep_map.wait_type_inner, \
232
+ (lock)->dep_map.wait_type_outer, \
233
+ (lock)->dep_map.lock_type)
234
+
235
+#define lockdep_set_subclass(lock, sub) \
236
+ lockdep_init_map_type(&(lock)->dep_map, #lock, (lock)->dep_map.key, sub,\
237
+ (lock)->dep_map.wait_type_inner, \
238
+ (lock)->dep_map.wait_type_outer, \
239
+ (lock)->dep_map.lock_type)
302240
303241 #define lockdep_set_novalidate_class(lock) \
304242 lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock)
243
+
305244 /*
306245 * Compare locking classes
307246 */
....@@ -331,8 +270,7 @@
331270 int trylock, int read, int check,
332271 struct lockdep_map *nest_lock, unsigned long ip);
333272
334
-extern void lock_release(struct lockdep_map *lock, int nested,
335
- unsigned long ip);
273
+extern void lock_release(struct lockdep_map *lock, unsigned long ip);
336274
337275 /*
338276 * Same "read" as for lock_acquire(), except -1 means any.
....@@ -359,8 +297,6 @@
359297
360298 extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip);
361299
362
-struct pin_cookie { unsigned int val; };
363
-
364300 #define NIL_COOKIE (struct pin_cookie){ .val = 0U, }
365301
366302 extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock);
....@@ -373,7 +309,7 @@
373309 WARN_ON(debug_locks && !lockdep_is_held(l)); \
374310 } while (0)
375311
376
-#define lockdep_assert_held_exclusive(l) do { \
312
+#define lockdep_assert_held_write(l) do { \
377313 WARN_ON(debug_locks && !lockdep_is_held_type(l, 0)); \
378314 } while (0)
379315
....@@ -393,6 +329,10 @@
393329
394330 #else /* !CONFIG_LOCKDEP */
395331
332
+static inline void lockdep_init_task(struct task_struct *task)
333
+{
334
+}
335
+
396336 static inline void lockdep_off(void)
397337 {
398338 }
....@@ -401,12 +341,22 @@
401341 {
402342 }
403343
344
+static inline void lockdep_set_selftest_task(struct task_struct *task)
345
+{
346
+}
347
+
404348 # define lock_acquire(l, s, t, r, c, n, i) do { } while (0)
405
-# define lock_release(l, n, i) do { } while (0)
349
+# define lock_release(l, i) do { } while (0)
406350 # define lock_downgrade(l, i) do { } while (0)
407351 # define lock_set_class(l, n, k, s, i) do { } while (0)
408352 # define lock_set_subclass(l, s, i) do { } while (0)
409353 # define lockdep_init() do { } while (0)
354
+# define lockdep_init_map_type(lock, name, key, sub, inner, outer, type) \
355
+ do { (void)(name); (void)(key); } while (0)
356
+# define lockdep_init_map_waits(lock, name, key, sub, inner, outer) \
357
+ do { (void)(name); (void)(key); } while (0)
358
+# define lockdep_init_map_wait(lock, name, key, sub, inner) \
359
+ do { (void)(name); (void)(key); } while (0)
410360 # define lockdep_init_map(lock, name, key, sub) \
411361 do { (void)(name); (void)(key); } while (0)
412362 # define lockdep_set_class(lock, key) do { (void)(key); } while (0)
....@@ -427,32 +377,29 @@
427377 # define lockdep_reset() do { debug_locks = 1; } while (0)
428378 # define lockdep_free_key_range(start, size) do { } while (0)
429379 # define lockdep_sys_exit() do { } while (0)
430
-/*
431
- * The class key takes no space if lockdep is disabled:
432
- */
433
-struct lock_class_key { };
434380
435
-/*
436
- * The lockdep_map takes no space if lockdep is disabled:
437
- */
438
-struct lockdep_map { };
381
+static inline void lockdep_register_key(struct lock_class_key *key)
382
+{
383
+}
384
+
385
+static inline void lockdep_unregister_key(struct lock_class_key *key)
386
+{
387
+}
439388
440389 #define lockdep_depth(tsk) (0)
441390
442391 #define lockdep_is_held_type(l, r) (1)
443392
444393 #define lockdep_assert_held(l) do { (void)(l); } while (0)
445
-#define lockdep_assert_held_exclusive(l) do { (void)(l); } while (0)
394
+#define lockdep_assert_held_write(l) do { (void)(l); } while (0)
446395 #define lockdep_assert_held_read(l) do { (void)(l); } while (0)
447396 #define lockdep_assert_held_once(l) do { (void)(l); } while (0)
448397
449398 #define lockdep_recursing(tsk) (0)
450399
451
-struct pin_cookie { };
452
-
453400 #define NIL_COOKIE (struct pin_cookie){ }
454401
455
-#define lockdep_pin_lock(l) ({ struct pin_cookie cookie; cookie; })
402
+#define lockdep_pin_lock(l) ({ struct pin_cookie cookie = { }; cookie; })
456403 #define lockdep_repin_lock(l, c) do { (void)(l); (void)(c); } while (0)
457404 #define lockdep_unpin_lock(l, c) do { (void)(l); (void)(c); } while (0)
458405
....@@ -473,7 +420,6 @@
473420 { .name = (_name), .key = (void *)(_key), }
474421
475422 static inline void lockdep_invariant_state(bool force) {}
476
-static inline void lockdep_init_task(struct task_struct *task) {}
477423 static inline void lockdep_free_task(struct task_struct *task) {}
478424
479425 #ifdef CONFIG_LOCK_STAT
....@@ -540,6 +486,20 @@
540486 }
541487 #endif
542488
489
+/* Variable used to make lockdep treat read_lock() as recursive in selftests */
490
+#ifdef CONFIG_DEBUG_LOCKING_API_SELFTESTS
491
+extern unsigned int force_read_lock_recursive;
492
+#else /* CONFIG_DEBUG_LOCKING_API_SELFTESTS */
493
+#define force_read_lock_recursive 0
494
+#endif /* CONFIG_DEBUG_LOCKING_API_SELFTESTS */
495
+
496
+#ifdef CONFIG_LOCKDEP
497
+extern bool read_lock_is_recursive(void);
498
+#else /* CONFIG_LOCKDEP */
499
+/* If !LOCKDEP, the value is meaningless */
500
+#define read_lock_is_recursive() 0
501
+#endif
502
+
543503 /*
544504 * For trivial one-depth nesting of a lock-class, the following
545505 * global define can be used. (Subsystems with multiple levels
....@@ -558,61 +518,121 @@
558518
559519 #define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
560520 #define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
561
-#define spin_release(l, n, i) lock_release(l, n, i)
521
+#define spin_release(l, i) lock_release(l, i)
562522
563523 #define rwlock_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
564
-#define rwlock_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i)
565
-#define rwlock_release(l, n, i) lock_release(l, n, i)
524
+#define rwlock_acquire_read(l, s, t, i) \
525
+do { \
526
+ if (read_lock_is_recursive()) \
527
+ lock_acquire_shared_recursive(l, s, t, NULL, i); \
528
+ else \
529
+ lock_acquire_shared(l, s, t, NULL, i); \
530
+} while (0)
531
+
532
+#define rwlock_release(l, i) lock_release(l, i)
566533
567534 #define seqcount_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
568535 #define seqcount_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i)
569
-#define seqcount_release(l, n, i) lock_release(l, n, i)
536
+#define seqcount_release(l, i) lock_release(l, i)
570537
571538 #define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
572539 #define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
573
-#define mutex_release(l, n, i) lock_release(l, n, i)
540
+#define mutex_release(l, i) lock_release(l, i)
574541
575542 #define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
576543 #define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
577544 #define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i)
578
-#define rwsem_release(l, n, i) lock_release(l, n, i)
545
+#define rwsem_release(l, i) lock_release(l, i)
579546
580547 #define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
581548 #define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
582549 #define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_)
583
-#define lock_map_release(l) lock_release(l, 1, _THIS_IP_)
550
+#define lock_map_release(l) lock_release(l, _THIS_IP_)
584551
585552 #ifdef CONFIG_PROVE_LOCKING
586
-# define might_lock(lock) \
553
+# define might_lock(lock) \
587554 do { \
588555 typecheck(struct lockdep_map *, &(lock)->dep_map); \
589556 lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_); \
590
- lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
557
+ lock_release(&(lock)->dep_map, _THIS_IP_); \
591558 } while (0)
592
-# define might_lock_read(lock) \
559
+# define might_lock_read(lock) \
593560 do { \
594561 typecheck(struct lockdep_map *, &(lock)->dep_map); \
595562 lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \
596
- lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
563
+ lock_release(&(lock)->dep_map, _THIS_IP_); \
564
+} while (0)
565
+# define might_lock_nested(lock, subclass) \
566
+do { \
567
+ typecheck(struct lockdep_map *, &(lock)->dep_map); \
568
+ lock_acquire(&(lock)->dep_map, subclass, 0, 1, 1, NULL, \
569
+ _THIS_IP_); \
570
+ lock_release(&(lock)->dep_map, _THIS_IP_); \
597571 } while (0)
598572
599
-#define lockdep_assert_irqs_enabled() do { \
600
- WARN_ONCE(debug_locks && !current->lockdep_recursion && \
601
- !current->hardirqs_enabled, \
602
- "IRQs not enabled as expected\n"); \
603
- } while (0)
573
+DECLARE_PER_CPU(int, hardirqs_enabled);
574
+DECLARE_PER_CPU(int, hardirq_context);
575
+DECLARE_PER_CPU(unsigned int, lockdep_recursion);
604576
605
-#define lockdep_assert_irqs_disabled() do { \
606
- WARN_ONCE(debug_locks && !current->lockdep_recursion && \
607
- current->hardirqs_enabled, \
608
- "IRQs not disabled as expected\n"); \
609
- } while (0)
577
+#define __lockdep_enabled (debug_locks && !this_cpu_read(lockdep_recursion))
578
+
579
+#define lockdep_assert_irqs_enabled() \
580
+do { \
581
+ WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirqs_enabled)); \
582
+} while (0)
583
+
584
+#define lockdep_assert_irqs_disabled() \
585
+do { \
586
+ WARN_ON_ONCE(__lockdep_enabled && this_cpu_read(hardirqs_enabled)); \
587
+} while (0)
588
+
589
+#define lockdep_assert_in_irq() \
590
+do { \
591
+ WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirq_context)); \
592
+} while (0)
593
+
594
+#define lockdep_assert_preemption_enabled() \
595
+do { \
596
+ WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \
597
+ __lockdep_enabled && \
598
+ (preempt_count() != 0 || \
599
+ !this_cpu_read(hardirqs_enabled))); \
600
+} while (0)
601
+
602
+#define lockdep_assert_preemption_disabled() \
603
+do { \
604
+ WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \
605
+ __lockdep_enabled && \
606
+ (preempt_count() == 0 && \
607
+ this_cpu_read(hardirqs_enabled))); \
608
+} while (0)
610609
611610 #else
612611 # define might_lock(lock) do { } while (0)
613612 # define might_lock_read(lock) do { } while (0)
613
+# define might_lock_nested(lock, subclass) do { } while (0)
614
+
614615 # define lockdep_assert_irqs_enabled() do { } while (0)
615616 # define lockdep_assert_irqs_disabled() do { } while (0)
617
+# define lockdep_assert_in_irq() do { } while (0)
618
+
619
+# define lockdep_assert_preemption_enabled() do { } while (0)
620
+# define lockdep_assert_preemption_disabled() do { } while (0)
621
+#endif
622
+
623
+#ifdef CONFIG_PROVE_RAW_LOCK_NESTING
624
+
625
+# define lockdep_assert_RT_in_threaded_ctx() do { \
626
+ WARN_ONCE(debug_locks && !current->lockdep_recursion && \
627
+ lockdep_hardirq_context() && \
628
+ !(current->hardirq_threaded || current->irq_config), \
629
+ "Not in threaded context on PREEMPT_RT as expected\n"); \
630
+} while (0)
631
+
632
+#else
633
+
634
+# define lockdep_assert_RT_in_threaded_ctx() do { } while (0)
635
+
616636 #endif
617637
618638 #ifdef CONFIG_LOCKDEP