.. | .. |
---|
5 | 5 | * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> |
---|
6 | 6 | * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra |
---|
7 | 7 | * |
---|
8 | | - * see Documentation/locking/lockdep-design.txt for more details. |
---|
| 8 | + * see Documentation/locking/lockdep-design.rst for more details. |
---|
9 | 9 | */ |
---|
10 | 10 | #ifndef __LINUX_LOCKDEP_H |
---|
11 | 11 | #define __LINUX_LOCKDEP_H |
---|
12 | 12 | |
---|
| 13 | +#include <linux/lockdep_types.h> |
---|
| 14 | +#include <linux/smp.h> |
---|
| 15 | +#include <asm/percpu.h> |
---|
| 16 | + |
---|
13 | 17 | struct task_struct; |
---|
14 | | -struct lockdep_map; |
---|
15 | 18 | |
---|
16 | 19 | /* for sysctl */ |
---|
17 | 20 | extern int prove_locking; |
---|
18 | 21 | extern int lock_stat; |
---|
19 | | - |
---|
20 | | -#define MAX_LOCKDEP_SUBCLASSES 8UL |
---|
21 | | - |
---|
22 | | -#include <linux/types.h> |
---|
23 | 22 | |
---|
24 | 23 | #ifdef CONFIG_LOCKDEP |
---|
25 | 24 | |
---|
.. | .. |
---|
27 | 26 | #include <linux/list.h> |
---|
28 | 27 | #include <linux/debug_locks.h> |
---|
29 | 28 | #include <linux/stacktrace.h> |
---|
30 | | - |
---|
31 | | -/* |
---|
32 | | - * We'd rather not expose kernel/lockdep_states.h this wide, but we do need |
---|
33 | | - * the total number of states... :-( |
---|
34 | | - */ |
---|
35 | | -#define XXX_LOCK_USAGE_STATES (1+2*4) |
---|
36 | | - |
---|
37 | | -/* |
---|
38 | | - * NR_LOCKDEP_CACHING_CLASSES ... Number of classes |
---|
39 | | - * cached in the instance of lockdep_map |
---|
40 | | - * |
---|
41 | | - * Currently main class (subclass == 0) and signle depth subclass |
---|
42 | | - * are cached in lockdep_map. This optimization is mainly targeting |
---|
43 | | - * on rq->lock. double_rq_lock() acquires this highly competitive with |
---|
44 | | - * single depth. |
---|
45 | | - */ |
---|
46 | | -#define NR_LOCKDEP_CACHING_CLASSES 2 |
---|
47 | | - |
---|
48 | | -/* |
---|
49 | | - * Lock-classes are keyed via unique addresses, by embedding the |
---|
50 | | - * lockclass-key into the kernel (or module) .data section. (For |
---|
51 | | - * static locks we use the lock address itself as the key.) |
---|
52 | | - */ |
---|
53 | | -struct lockdep_subclass_key { |
---|
54 | | - char __one_byte; |
---|
55 | | -} __attribute__ ((__packed__)); |
---|
56 | | - |
---|
57 | | -struct lock_class_key { |
---|
58 | | - struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES]; |
---|
59 | | -}; |
---|
60 | | - |
---|
61 | | -extern struct lock_class_key __lockdep_no_validate__; |
---|
62 | | - |
---|
63 | | -#define LOCKSTAT_POINTS 4 |
---|
64 | | - |
---|
65 | | -/* |
---|
66 | | - * The lock-class itself: |
---|
67 | | - */ |
---|
68 | | -struct lock_class { |
---|
69 | | - /* |
---|
70 | | - * class-hash: |
---|
71 | | - */ |
---|
72 | | - struct hlist_node hash_entry; |
---|
73 | | - |
---|
74 | | - /* |
---|
75 | | - * global list of all lock-classes: |
---|
76 | | - */ |
---|
77 | | - struct list_head lock_entry; |
---|
78 | | - |
---|
79 | | - struct lockdep_subclass_key *key; |
---|
80 | | - unsigned int subclass; |
---|
81 | | - unsigned int dep_gen_id; |
---|
82 | | - |
---|
83 | | - /* |
---|
84 | | - * IRQ/softirq usage tracking bits: |
---|
85 | | - */ |
---|
86 | | - unsigned long usage_mask; |
---|
87 | | - struct stack_trace usage_traces[XXX_LOCK_USAGE_STATES]; |
---|
88 | | - |
---|
89 | | - /* |
---|
90 | | - * These fields represent a directed graph of lock dependencies, |
---|
91 | | - * to every node we attach a list of "forward" and a list of |
---|
92 | | - * "backward" graph nodes. |
---|
93 | | - */ |
---|
94 | | - struct list_head locks_after, locks_before; |
---|
95 | | - |
---|
96 | | - /* |
---|
97 | | - * Generation counter, when doing certain classes of graph walking, |
---|
98 | | - * to ensure that we check one node only once: |
---|
99 | | - */ |
---|
100 | | - unsigned int version; |
---|
101 | | - |
---|
102 | | - /* |
---|
103 | | - * Statistics counter: |
---|
104 | | - */ |
---|
105 | | - unsigned long ops; |
---|
106 | | - |
---|
107 | | - const char *name; |
---|
108 | | - int name_version; |
---|
109 | | - |
---|
110 | | -#ifdef CONFIG_LOCK_STAT |
---|
111 | | - unsigned long contention_point[LOCKSTAT_POINTS]; |
---|
112 | | - unsigned long contending_point[LOCKSTAT_POINTS]; |
---|
113 | | -#endif |
---|
114 | | -}; |
---|
115 | | - |
---|
116 | | -#ifdef CONFIG_LOCK_STAT |
---|
117 | | -struct lock_time { |
---|
118 | | - s64 min; |
---|
119 | | - s64 max; |
---|
120 | | - s64 total; |
---|
121 | | - unsigned long nr; |
---|
122 | | -}; |
---|
123 | | - |
---|
124 | | -enum bounce_type { |
---|
125 | | - bounce_acquired_write, |
---|
126 | | - bounce_acquired_read, |
---|
127 | | - bounce_contended_write, |
---|
128 | | - bounce_contended_read, |
---|
129 | | - nr_bounce_types, |
---|
130 | | - |
---|
131 | | - bounce_acquired = bounce_acquired_write, |
---|
132 | | - bounce_contended = bounce_contended_write, |
---|
133 | | -}; |
---|
134 | | - |
---|
135 | | -struct lock_class_stats { |
---|
136 | | - unsigned long contention_point[LOCKSTAT_POINTS]; |
---|
137 | | - unsigned long contending_point[LOCKSTAT_POINTS]; |
---|
138 | | - struct lock_time read_waittime; |
---|
139 | | - struct lock_time write_waittime; |
---|
140 | | - struct lock_time read_holdtime; |
---|
141 | | - struct lock_time write_holdtime; |
---|
142 | | - unsigned long bounces[nr_bounce_types]; |
---|
143 | | -}; |
---|
144 | | - |
---|
145 | | -struct lock_class_stats lock_stats(struct lock_class *class); |
---|
146 | | -void clear_lock_stats(struct lock_class *class); |
---|
147 | | -#endif |
---|
148 | | - |
---|
149 | | -/* |
---|
150 | | - * Map the lock object (the lock instance) to the lock-class object. |
---|
151 | | - * This is embedded into specific lock instances: |
---|
152 | | - */ |
---|
153 | | -struct lockdep_map { |
---|
154 | | - struct lock_class_key *key; |
---|
155 | | - struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES]; |
---|
156 | | - const char *name; |
---|
157 | | -#ifdef CONFIG_LOCK_STAT |
---|
158 | | - int cpu; |
---|
159 | | - unsigned long ip; |
---|
160 | | -#endif |
---|
161 | | -}; |
---|
162 | 29 | |
---|
163 | 30 | static inline void lockdep_copy_map(struct lockdep_map *to, |
---|
164 | 31 | struct lockdep_map *from) |
---|
.. | .. |
---|
185 | 52 | struct lock_list { |
---|
186 | 53 | struct list_head entry; |
---|
187 | 54 | struct lock_class *class; |
---|
188 | | - struct stack_trace trace; |
---|
189 | | - int distance; |
---|
| 55 | + struct lock_class *links_to; |
---|
| 56 | + const struct lock_trace *trace; |
---|
| 57 | + u16 distance; |
---|
| 58 | + /* bitmap of different dependencies from head to this */ |
---|
| 59 | + u8 dep; |
---|
| 60 | + /* used by BFS to record whether "prev -> this" only has -(*R)-> */ |
---|
| 61 | + u8 only_xr; |
---|
190 | 62 | |
---|
191 | 63 | /* |
---|
192 | 64 | * The parent field is used to implement breadth-first search, and the |
---|
.. | .. |
---|
195 | 67 | struct lock_list *parent; |
---|
196 | 68 | }; |
---|
197 | 69 | |
---|
198 | | -/* |
---|
199 | | - * We record lock dependency chains, so that we can cache them: |
---|
| 70 | +/** |
---|
| 71 | + * struct lock_chain - lock dependency chain record |
---|
| 72 | + * |
---|
| 73 | + * @irq_context: the same as irq_context in held_lock below |
---|
| 74 | + * @depth: the number of held locks in this chain |
---|
| 75 | + * @base: the index in chain_hlocks for this chain |
---|
| 76 | + * @entry: the collided lock chains in lock_chain hash list |
---|
| 77 | + * @chain_key: the hash key of this lock_chain |
---|
200 | 78 | */ |
---|
201 | 79 | struct lock_chain { |
---|
202 | | - /* see BUILD_BUG_ON()s in lookup_chain_cache() */ |
---|
| 80 | + /* see BUILD_BUG_ON()s in add_chain_cache() */ |
---|
203 | 81 | unsigned int irq_context : 2, |
---|
204 | 82 | depth : 6, |
---|
205 | 83 | base : 24; |
---|
.. | .. |
---|
209 | 87 | }; |
---|
210 | 88 | |
---|
211 | 89 | #define MAX_LOCKDEP_KEYS_BITS 13 |
---|
212 | | -/* |
---|
213 | | - * Subtract one because we offset hlock->class_idx by 1 in order |
---|
214 | | - * to make 0 mean no class. This avoids overflowing the class_idx |
---|
215 | | - * bitfield and hitting the BUG in hlock_class(). |
---|
216 | | - */ |
---|
217 | | -#define MAX_LOCKDEP_KEYS ((1UL << MAX_LOCKDEP_KEYS_BITS) - 1) |
---|
| 90 | +#define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS) |
---|
| 91 | +#define INITIAL_CHAIN_KEY -1 |
---|
218 | 92 | |
---|
219 | 93 | struct held_lock { |
---|
220 | 94 | /* |
---|
.. | .. |
---|
239 | 113 | u64 waittime_stamp; |
---|
240 | 114 | u64 holdtime_stamp; |
---|
241 | 115 | #endif |
---|
| 116 | + /* |
---|
| 117 | + * class_idx is zero-indexed; it points to the element in |
---|
| 118 | + * lock_classes this held lock instance belongs to. class_idx is in |
---|
| 119 | + * the range from 0 to (MAX_LOCKDEP_KEYS-1) inclusive. |
---|
| 120 | + */ |
---|
242 | 121 | unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS; |
---|
243 | 122 | /* |
---|
244 | 123 | * The lock-stack is unified in that the lock chains of interrupt |
---|
.. | .. |
---|
271 | 150 | extern void lockdep_reset_lock(struct lockdep_map *lock); |
---|
272 | 151 | extern void lockdep_free_key_range(void *start, unsigned long size); |
---|
273 | 152 | extern asmlinkage void lockdep_sys_exit(void); |
---|
| 153 | +extern void lockdep_set_selftest_task(struct task_struct *task); |
---|
274 | 154 | |
---|
275 | | -extern void lockdep_off(void); |
---|
276 | | -extern void lockdep_on(void); |
---|
| 155 | +extern void lockdep_init_task(struct task_struct *task); |
---|
| 156 | + |
---|
| 157 | +/* |
---|
| 158 | + * Split the recrursion counter in two to readily detect 'off' vs recursion. |
---|
| 159 | + */ |
---|
| 160 | +#define LOCKDEP_RECURSION_BITS 16 |
---|
| 161 | +#define LOCKDEP_OFF (1U << LOCKDEP_RECURSION_BITS) |
---|
| 162 | +#define LOCKDEP_RECURSION_MASK (LOCKDEP_OFF - 1) |
---|
| 163 | + |
---|
| 164 | +/* |
---|
| 165 | + * lockdep_{off,on}() are macros to avoid tracing and kprobes; not inlines due |
---|
| 166 | + * to header dependencies. |
---|
| 167 | + */ |
---|
| 168 | + |
---|
| 169 | +#define lockdep_off() \ |
---|
| 170 | +do { \ |
---|
| 171 | + current->lockdep_recursion += LOCKDEP_OFF; \ |
---|
| 172 | +} while (0) |
---|
| 173 | + |
---|
| 174 | +#define lockdep_on() \ |
---|
| 175 | +do { \ |
---|
| 176 | + current->lockdep_recursion -= LOCKDEP_OFF; \ |
---|
| 177 | +} while (0) |
---|
| 178 | + |
---|
| 179 | +extern void lockdep_register_key(struct lock_class_key *key); |
---|
| 180 | +extern void lockdep_unregister_key(struct lock_class_key *key); |
---|
277 | 181 | |
---|
278 | 182 | /* |
---|
279 | 183 | * These methods are used by specific locking variants (spinlocks, |
---|
.. | .. |
---|
281 | 185 | * to lockdep: |
---|
282 | 186 | */ |
---|
283 | 187 | |
---|
284 | | -extern void lockdep_init_map(struct lockdep_map *lock, const char *name, |
---|
285 | | - struct lock_class_key *key, int subclass); |
---|
| 188 | +extern void lockdep_init_map_type(struct lockdep_map *lock, const char *name, |
---|
| 189 | + struct lock_class_key *key, int subclass, u8 inner, u8 outer, u8 lock_type); |
---|
| 190 | + |
---|
| 191 | +static inline void |
---|
| 192 | +lockdep_init_map_waits(struct lockdep_map *lock, const char *name, |
---|
| 193 | + struct lock_class_key *key, int subclass, u8 inner, u8 outer) |
---|
| 194 | +{ |
---|
| 195 | + lockdep_init_map_type(lock, name, key, subclass, inner, outer, LD_LOCK_NORMAL); |
---|
| 196 | +} |
---|
| 197 | + |
---|
| 198 | +static inline void |
---|
| 199 | +lockdep_init_map_wait(struct lockdep_map *lock, const char *name, |
---|
| 200 | + struct lock_class_key *key, int subclass, u8 inner) |
---|
| 201 | +{ |
---|
| 202 | + lockdep_init_map_waits(lock, name, key, subclass, inner, LD_WAIT_INV); |
---|
| 203 | +} |
---|
| 204 | + |
---|
| 205 | +static inline void lockdep_init_map(struct lockdep_map *lock, const char *name, |
---|
| 206 | + struct lock_class_key *key, int subclass) |
---|
| 207 | +{ |
---|
| 208 | + lockdep_init_map_wait(lock, name, key, subclass, LD_WAIT_INV); |
---|
| 209 | +} |
---|
286 | 210 | |
---|
287 | 211 | /* |
---|
288 | 212 | * Reinitialize a lock key - for cases where there is special locking or |
---|
.. | .. |
---|
290 | 214 | * of dependencies wrong: they are either too broad (they need a class-split) |
---|
291 | 215 | * or they are too narrow (they suffer from a false class-split): |
---|
292 | 216 | */ |
---|
293 | | -#define lockdep_set_class(lock, key) \ |
---|
294 | | - lockdep_init_map(&(lock)->dep_map, #key, key, 0) |
---|
295 | | -#define lockdep_set_class_and_name(lock, key, name) \ |
---|
296 | | - lockdep_init_map(&(lock)->dep_map, name, key, 0) |
---|
297 | | -#define lockdep_set_class_and_subclass(lock, key, sub) \ |
---|
298 | | - lockdep_init_map(&(lock)->dep_map, #key, key, sub) |
---|
299 | | -#define lockdep_set_subclass(lock, sub) \ |
---|
300 | | - lockdep_init_map(&(lock)->dep_map, #lock, \ |
---|
301 | | - (lock)->dep_map.key, sub) |
---|
| 217 | +#define lockdep_set_class(lock, key) \ |
---|
| 218 | + lockdep_init_map_type(&(lock)->dep_map, #key, key, 0, \ |
---|
| 219 | + (lock)->dep_map.wait_type_inner, \ |
---|
| 220 | + (lock)->dep_map.wait_type_outer, \ |
---|
| 221 | + (lock)->dep_map.lock_type) |
---|
| 222 | + |
---|
| 223 | +#define lockdep_set_class_and_name(lock, key, name) \ |
---|
| 224 | + lockdep_init_map_type(&(lock)->dep_map, name, key, 0, \ |
---|
| 225 | + (lock)->dep_map.wait_type_inner, \ |
---|
| 226 | + (lock)->dep_map.wait_type_outer, \ |
---|
| 227 | + (lock)->dep_map.lock_type) |
---|
| 228 | + |
---|
| 229 | +#define lockdep_set_class_and_subclass(lock, key, sub) \ |
---|
| 230 | + lockdep_init_map_type(&(lock)->dep_map, #key, key, sub, \ |
---|
| 231 | + (lock)->dep_map.wait_type_inner, \ |
---|
| 232 | + (lock)->dep_map.wait_type_outer, \ |
---|
| 233 | + (lock)->dep_map.lock_type) |
---|
| 234 | + |
---|
| 235 | +#define lockdep_set_subclass(lock, sub) \ |
---|
| 236 | + lockdep_init_map_type(&(lock)->dep_map, #lock, (lock)->dep_map.key, sub,\ |
---|
| 237 | + (lock)->dep_map.wait_type_inner, \ |
---|
| 238 | + (lock)->dep_map.wait_type_outer, \ |
---|
| 239 | + (lock)->dep_map.lock_type) |
---|
302 | 240 | |
---|
303 | 241 | #define lockdep_set_novalidate_class(lock) \ |
---|
304 | 242 | lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock) |
---|
| 243 | + |
---|
305 | 244 | /* |
---|
306 | 245 | * Compare locking classes |
---|
307 | 246 | */ |
---|
.. | .. |
---|
331 | 270 | int trylock, int read, int check, |
---|
332 | 271 | struct lockdep_map *nest_lock, unsigned long ip); |
---|
333 | 272 | |
---|
334 | | -extern void lock_release(struct lockdep_map *lock, int nested, |
---|
335 | | - unsigned long ip); |
---|
| 273 | +extern void lock_release(struct lockdep_map *lock, unsigned long ip); |
---|
336 | 274 | |
---|
337 | 275 | /* |
---|
338 | 276 | * Same "read" as for lock_acquire(), except -1 means any. |
---|
.. | .. |
---|
359 | 297 | |
---|
360 | 298 | extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip); |
---|
361 | 299 | |
---|
362 | | -struct pin_cookie { unsigned int val; }; |
---|
363 | | - |
---|
364 | 300 | #define NIL_COOKIE (struct pin_cookie){ .val = 0U, } |
---|
365 | 301 | |
---|
366 | 302 | extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock); |
---|
.. | .. |
---|
373 | 309 | WARN_ON(debug_locks && !lockdep_is_held(l)); \ |
---|
374 | 310 | } while (0) |
---|
375 | 311 | |
---|
376 | | -#define lockdep_assert_held_exclusive(l) do { \ |
---|
| 312 | +#define lockdep_assert_held_write(l) do { \ |
---|
377 | 313 | WARN_ON(debug_locks && !lockdep_is_held_type(l, 0)); \ |
---|
378 | 314 | } while (0) |
---|
379 | 315 | |
---|
.. | .. |
---|
393 | 329 | |
---|
394 | 330 | #else /* !CONFIG_LOCKDEP */ |
---|
395 | 331 | |
---|
| 332 | +static inline void lockdep_init_task(struct task_struct *task) |
---|
| 333 | +{ |
---|
| 334 | +} |
---|
| 335 | + |
---|
396 | 336 | static inline void lockdep_off(void) |
---|
397 | 337 | { |
---|
398 | 338 | } |
---|
.. | .. |
---|
401 | 341 | { |
---|
402 | 342 | } |
---|
403 | 343 | |
---|
| 344 | +static inline void lockdep_set_selftest_task(struct task_struct *task) |
---|
| 345 | +{ |
---|
| 346 | +} |
---|
| 347 | + |
---|
404 | 348 | # define lock_acquire(l, s, t, r, c, n, i) do { } while (0) |
---|
405 | | -# define lock_release(l, n, i) do { } while (0) |
---|
| 349 | +# define lock_release(l, i) do { } while (0) |
---|
406 | 350 | # define lock_downgrade(l, i) do { } while (0) |
---|
407 | 351 | # define lock_set_class(l, n, k, s, i) do { } while (0) |
---|
408 | 352 | # define lock_set_subclass(l, s, i) do { } while (0) |
---|
409 | 353 | # define lockdep_init() do { } while (0) |
---|
| 354 | +# define lockdep_init_map_type(lock, name, key, sub, inner, outer, type) \ |
---|
| 355 | + do { (void)(name); (void)(key); } while (0) |
---|
| 356 | +# define lockdep_init_map_waits(lock, name, key, sub, inner, outer) \ |
---|
| 357 | + do { (void)(name); (void)(key); } while (0) |
---|
| 358 | +# define lockdep_init_map_wait(lock, name, key, sub, inner) \ |
---|
| 359 | + do { (void)(name); (void)(key); } while (0) |
---|
410 | 360 | # define lockdep_init_map(lock, name, key, sub) \ |
---|
411 | 361 | do { (void)(name); (void)(key); } while (0) |
---|
412 | 362 | # define lockdep_set_class(lock, key) do { (void)(key); } while (0) |
---|
.. | .. |
---|
427 | 377 | # define lockdep_reset() do { debug_locks = 1; } while (0) |
---|
428 | 378 | # define lockdep_free_key_range(start, size) do { } while (0) |
---|
429 | 379 | # define lockdep_sys_exit() do { } while (0) |
---|
430 | | -/* |
---|
431 | | - * The class key takes no space if lockdep is disabled: |
---|
432 | | - */ |
---|
433 | | -struct lock_class_key { }; |
---|
434 | 380 | |
---|
435 | | -/* |
---|
436 | | - * The lockdep_map takes no space if lockdep is disabled: |
---|
437 | | - */ |
---|
438 | | -struct lockdep_map { }; |
---|
| 381 | +static inline void lockdep_register_key(struct lock_class_key *key) |
---|
| 382 | +{ |
---|
| 383 | +} |
---|
| 384 | + |
---|
| 385 | +static inline void lockdep_unregister_key(struct lock_class_key *key) |
---|
| 386 | +{ |
---|
| 387 | +} |
---|
439 | 388 | |
---|
440 | 389 | #define lockdep_depth(tsk) (0) |
---|
441 | 390 | |
---|
442 | 391 | #define lockdep_is_held_type(l, r) (1) |
---|
443 | 392 | |
---|
444 | 393 | #define lockdep_assert_held(l) do { (void)(l); } while (0) |
---|
445 | | -#define lockdep_assert_held_exclusive(l) do { (void)(l); } while (0) |
---|
| 394 | +#define lockdep_assert_held_write(l) do { (void)(l); } while (0) |
---|
446 | 395 | #define lockdep_assert_held_read(l) do { (void)(l); } while (0) |
---|
447 | 396 | #define lockdep_assert_held_once(l) do { (void)(l); } while (0) |
---|
448 | 397 | |
---|
449 | 398 | #define lockdep_recursing(tsk) (0) |
---|
450 | 399 | |
---|
451 | | -struct pin_cookie { }; |
---|
452 | | - |
---|
453 | 400 | #define NIL_COOKIE (struct pin_cookie){ } |
---|
454 | 401 | |
---|
455 | | -#define lockdep_pin_lock(l) ({ struct pin_cookie cookie; cookie; }) |
---|
| 402 | +#define lockdep_pin_lock(l) ({ struct pin_cookie cookie = { }; cookie; }) |
---|
456 | 403 | #define lockdep_repin_lock(l, c) do { (void)(l); (void)(c); } while (0) |
---|
457 | 404 | #define lockdep_unpin_lock(l, c) do { (void)(l); (void)(c); } while (0) |
---|
458 | 405 | |
---|
.. | .. |
---|
473 | 420 | { .name = (_name), .key = (void *)(_key), } |
---|
474 | 421 | |
---|
475 | 422 | static inline void lockdep_invariant_state(bool force) {} |
---|
476 | | -static inline void lockdep_init_task(struct task_struct *task) {} |
---|
477 | 423 | static inline void lockdep_free_task(struct task_struct *task) {} |
---|
478 | 424 | |
---|
479 | 425 | #ifdef CONFIG_LOCK_STAT |
---|
.. | .. |
---|
540 | 486 | } |
---|
541 | 487 | #endif |
---|
542 | 488 | |
---|
| 489 | +/* Variable used to make lockdep treat read_lock() as recursive in selftests */ |
---|
| 490 | +#ifdef CONFIG_DEBUG_LOCKING_API_SELFTESTS |
---|
| 491 | +extern unsigned int force_read_lock_recursive; |
---|
| 492 | +#else /* CONFIG_DEBUG_LOCKING_API_SELFTESTS */ |
---|
| 493 | +#define force_read_lock_recursive 0 |
---|
| 494 | +#endif /* CONFIG_DEBUG_LOCKING_API_SELFTESTS */ |
---|
| 495 | + |
---|
| 496 | +#ifdef CONFIG_LOCKDEP |
---|
| 497 | +extern bool read_lock_is_recursive(void); |
---|
| 498 | +#else /* CONFIG_LOCKDEP */ |
---|
| 499 | +/* If !LOCKDEP, the value is meaningless */ |
---|
| 500 | +#define read_lock_is_recursive() 0 |
---|
| 501 | +#endif |
---|
| 502 | + |
---|
543 | 503 | /* |
---|
544 | 504 | * For trivial one-depth nesting of a lock-class, the following |
---|
545 | 505 | * global define can be used. (Subsystems with multiple levels |
---|
.. | .. |
---|
558 | 518 | |
---|
559 | 519 | #define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
---|
560 | 520 | #define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) |
---|
561 | | -#define spin_release(l, n, i) lock_release(l, n, i) |
---|
| 521 | +#define spin_release(l, i) lock_release(l, i) |
---|
562 | 522 | |
---|
563 | 523 | #define rwlock_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
---|
564 | | -#define rwlock_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i) |
---|
565 | | -#define rwlock_release(l, n, i) lock_release(l, n, i) |
---|
| 524 | +#define rwlock_acquire_read(l, s, t, i) \ |
---|
| 525 | +do { \ |
---|
| 526 | + if (read_lock_is_recursive()) \ |
---|
| 527 | + lock_acquire_shared_recursive(l, s, t, NULL, i); \ |
---|
| 528 | + else \ |
---|
| 529 | + lock_acquire_shared(l, s, t, NULL, i); \ |
---|
| 530 | +} while (0) |
---|
| 531 | + |
---|
| 532 | +#define rwlock_release(l, i) lock_release(l, i) |
---|
566 | 533 | |
---|
567 | 534 | #define seqcount_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
---|
568 | 535 | #define seqcount_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i) |
---|
569 | | -#define seqcount_release(l, n, i) lock_release(l, n, i) |
---|
| 536 | +#define seqcount_release(l, i) lock_release(l, i) |
---|
570 | 537 | |
---|
571 | 538 | #define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
---|
572 | 539 | #define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) |
---|
573 | | -#define mutex_release(l, n, i) lock_release(l, n, i) |
---|
| 540 | +#define mutex_release(l, i) lock_release(l, i) |
---|
574 | 541 | |
---|
575 | 542 | #define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
---|
576 | 543 | #define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) |
---|
577 | 544 | #define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i) |
---|
578 | | -#define rwsem_release(l, n, i) lock_release(l, n, i) |
---|
| 545 | +#define rwsem_release(l, i) lock_release(l, i) |
---|
579 | 546 | |
---|
580 | 547 | #define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_) |
---|
581 | 548 | #define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_) |
---|
582 | 549 | #define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_) |
---|
583 | | -#define lock_map_release(l) lock_release(l, 1, _THIS_IP_) |
---|
| 550 | +#define lock_map_release(l) lock_release(l, _THIS_IP_) |
---|
584 | 551 | |
---|
585 | 552 | #ifdef CONFIG_PROVE_LOCKING |
---|
586 | | -# define might_lock(lock) \ |
---|
| 553 | +# define might_lock(lock) \ |
---|
587 | 554 | do { \ |
---|
588 | 555 | typecheck(struct lockdep_map *, &(lock)->dep_map); \ |
---|
589 | 556 | lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_); \ |
---|
590 | | - lock_release(&(lock)->dep_map, 0, _THIS_IP_); \ |
---|
| 557 | + lock_release(&(lock)->dep_map, _THIS_IP_); \ |
---|
591 | 558 | } while (0) |
---|
592 | | -# define might_lock_read(lock) \ |
---|
| 559 | +# define might_lock_read(lock) \ |
---|
593 | 560 | do { \ |
---|
594 | 561 | typecheck(struct lockdep_map *, &(lock)->dep_map); \ |
---|
595 | 562 | lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \ |
---|
596 | | - lock_release(&(lock)->dep_map, 0, _THIS_IP_); \ |
---|
| 563 | + lock_release(&(lock)->dep_map, _THIS_IP_); \ |
---|
| 564 | +} while (0) |
---|
| 565 | +# define might_lock_nested(lock, subclass) \ |
---|
| 566 | +do { \ |
---|
| 567 | + typecheck(struct lockdep_map *, &(lock)->dep_map); \ |
---|
| 568 | + lock_acquire(&(lock)->dep_map, subclass, 0, 1, 1, NULL, \ |
---|
| 569 | + _THIS_IP_); \ |
---|
| 570 | + lock_release(&(lock)->dep_map, _THIS_IP_); \ |
---|
597 | 571 | } while (0) |
---|
598 | 572 | |
---|
599 | | -#define lockdep_assert_irqs_enabled() do { \ |
---|
600 | | - WARN_ONCE(debug_locks && !current->lockdep_recursion && \ |
---|
601 | | - !current->hardirqs_enabled, \ |
---|
602 | | - "IRQs not enabled as expected\n"); \ |
---|
603 | | - } while (0) |
---|
| 573 | +DECLARE_PER_CPU(int, hardirqs_enabled); |
---|
| 574 | +DECLARE_PER_CPU(int, hardirq_context); |
---|
| 575 | +DECLARE_PER_CPU(unsigned int, lockdep_recursion); |
---|
604 | 576 | |
---|
605 | | -#define lockdep_assert_irqs_disabled() do { \ |
---|
606 | | - WARN_ONCE(debug_locks && !current->lockdep_recursion && \ |
---|
607 | | - current->hardirqs_enabled, \ |
---|
608 | | - "IRQs not disabled as expected\n"); \ |
---|
609 | | - } while (0) |
---|
| 577 | +#define __lockdep_enabled (debug_locks && !this_cpu_read(lockdep_recursion)) |
---|
| 578 | + |
---|
| 579 | +#define lockdep_assert_irqs_enabled() \ |
---|
| 580 | +do { \ |
---|
| 581 | + WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirqs_enabled)); \ |
---|
| 582 | +} while (0) |
---|
| 583 | + |
---|
| 584 | +#define lockdep_assert_irqs_disabled() \ |
---|
| 585 | +do { \ |
---|
| 586 | + WARN_ON_ONCE(__lockdep_enabled && this_cpu_read(hardirqs_enabled)); \ |
---|
| 587 | +} while (0) |
---|
| 588 | + |
---|
| 589 | +#define lockdep_assert_in_irq() \ |
---|
| 590 | +do { \ |
---|
| 591 | + WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirq_context)); \ |
---|
| 592 | +} while (0) |
---|
| 593 | + |
---|
| 594 | +#define lockdep_assert_preemption_enabled() \ |
---|
| 595 | +do { \ |
---|
| 596 | + WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \ |
---|
| 597 | + __lockdep_enabled && \ |
---|
| 598 | + (preempt_count() != 0 || \ |
---|
| 599 | + !this_cpu_read(hardirqs_enabled))); \ |
---|
| 600 | +} while (0) |
---|
| 601 | + |
---|
| 602 | +#define lockdep_assert_preemption_disabled() \ |
---|
| 603 | +do { \ |
---|
| 604 | + WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \ |
---|
| 605 | + __lockdep_enabled && \ |
---|
| 606 | + (preempt_count() == 0 && \ |
---|
| 607 | + this_cpu_read(hardirqs_enabled))); \ |
---|
| 608 | +} while (0) |
---|
610 | 609 | |
---|
611 | 610 | #else |
---|
612 | 611 | # define might_lock(lock) do { } while (0) |
---|
613 | 612 | # define might_lock_read(lock) do { } while (0) |
---|
| 613 | +# define might_lock_nested(lock, subclass) do { } while (0) |
---|
| 614 | + |
---|
614 | 615 | # define lockdep_assert_irqs_enabled() do { } while (0) |
---|
615 | 616 | # define lockdep_assert_irqs_disabled() do { } while (0) |
---|
| 617 | +# define lockdep_assert_in_irq() do { } while (0) |
---|
| 618 | + |
---|
| 619 | +# define lockdep_assert_preemption_enabled() do { } while (0) |
---|
| 620 | +# define lockdep_assert_preemption_disabled() do { } while (0) |
---|
| 621 | +#endif |
---|
| 622 | + |
---|
| 623 | +#ifdef CONFIG_PROVE_RAW_LOCK_NESTING |
---|
| 624 | + |
---|
| 625 | +# define lockdep_assert_RT_in_threaded_ctx() do { \ |
---|
| 626 | + WARN_ONCE(debug_locks && !current->lockdep_recursion && \ |
---|
| 627 | + lockdep_hardirq_context() && \ |
---|
| 628 | + !(current->hardirq_threaded || current->irq_config), \ |
---|
| 629 | + "Not in threaded context on PREEMPT_RT as expected\n"); \ |
---|
| 630 | +} while (0) |
---|
| 631 | + |
---|
| 632 | +#else |
---|
| 633 | + |
---|
| 634 | +# define lockdep_assert_RT_in_threaded_ctx() do { } while (0) |
---|
| 635 | + |
---|
616 | 636 | #endif |
---|
617 | 637 | |
---|
618 | 638 | #ifdef CONFIG_LOCKDEP |
---|