.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * kernel/lockdep.c |
---|
3 | 4 | * |
---|
.. | .. |
---|
45 | 46 | #include <linux/hash.h> |
---|
46 | 47 | #include <linux/ftrace.h> |
---|
47 | 48 | #include <linux/stringify.h> |
---|
| 49 | +#include <linux/bitmap.h> |
---|
48 | 50 | #include <linux/bitops.h> |
---|
49 | 51 | #include <linux/gfp.h> |
---|
50 | 52 | #include <linux/random.h> |
---|
51 | 53 | #include <linux/jhash.h> |
---|
52 | 54 | #include <linux/nmi.h> |
---|
| 55 | +#include <linux/rcupdate.h> |
---|
| 56 | +#include <linux/kprobes.h> |
---|
53 | 57 | |
---|
54 | 58 | #include <asm/sections.h> |
---|
55 | 59 | |
---|
.. | .. |
---|
72 | 76 | #define lock_stat 0 |
---|
73 | 77 | #endif |
---|
74 | 78 | |
---|
| 79 | +DEFINE_PER_CPU(unsigned int, lockdep_recursion); |
---|
| 80 | +EXPORT_PER_CPU_SYMBOL_GPL(lockdep_recursion); |
---|
| 81 | + |
---|
| 82 | +static __always_inline bool lockdep_enabled(void) |
---|
| 83 | +{ |
---|
| 84 | + if (!debug_locks) |
---|
| 85 | + return false; |
---|
| 86 | + |
---|
| 87 | + if (this_cpu_read(lockdep_recursion)) |
---|
| 88 | + return false; |
---|
| 89 | + |
---|
| 90 | + if (current->lockdep_recursion) |
---|
| 91 | + return false; |
---|
| 92 | + |
---|
| 93 | + return true; |
---|
| 94 | +} |
---|
| 95 | + |
---|
75 | 96 | /* |
---|
76 | 97 | * lockdep_lock: protects the lockdep graph, the hashes and the |
---|
77 | 98 | * class/list/hash allocators. |
---|
.. | .. |
---|
80 | 101 | * to use a raw spinlock - we really dont want the spinlock |
---|
81 | 102 | * code to recurse back into the lockdep code... |
---|
82 | 103 | */ |
---|
83 | | -static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
---|
| 104 | +static arch_spinlock_t __lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
---|
| 105 | +static struct task_struct *__owner; |
---|
| 106 | + |
---|
| 107 | +static inline void lockdep_lock(void) |
---|
| 108 | +{ |
---|
| 109 | + DEBUG_LOCKS_WARN_ON(!irqs_disabled()); |
---|
| 110 | + |
---|
| 111 | + __this_cpu_inc(lockdep_recursion); |
---|
| 112 | + arch_spin_lock(&__lock); |
---|
| 113 | + __owner = current; |
---|
| 114 | +} |
---|
| 115 | + |
---|
| 116 | +static inline void lockdep_unlock(void) |
---|
| 117 | +{ |
---|
| 118 | + DEBUG_LOCKS_WARN_ON(!irqs_disabled()); |
---|
| 119 | + |
---|
| 120 | + if (debug_locks && DEBUG_LOCKS_WARN_ON(__owner != current)) |
---|
| 121 | + return; |
---|
| 122 | + |
---|
| 123 | + __owner = NULL; |
---|
| 124 | + arch_spin_unlock(&__lock); |
---|
| 125 | + __this_cpu_dec(lockdep_recursion); |
---|
| 126 | +} |
---|
| 127 | + |
---|
| 128 | +static inline bool lockdep_assert_locked(void) |
---|
| 129 | +{ |
---|
| 130 | + return DEBUG_LOCKS_WARN_ON(__owner != current); |
---|
| 131 | +} |
---|
| 132 | + |
---|
| 133 | +static struct task_struct *lockdep_selftest_task_struct; |
---|
| 134 | + |
---|
84 | 135 | |
---|
85 | 136 | static int graph_lock(void) |
---|
86 | 137 | { |
---|
87 | | - arch_spin_lock(&lockdep_lock); |
---|
| 138 | + lockdep_lock(); |
---|
88 | 139 | /* |
---|
89 | 140 | * Make sure that if another CPU detected a bug while |
---|
90 | 141 | * walking the graph we dont change it (while the other |
---|
.. | .. |
---|
92 | 143 | * dropped already) |
---|
93 | 144 | */ |
---|
94 | 145 | if (!debug_locks) { |
---|
95 | | - arch_spin_unlock(&lockdep_lock); |
---|
| 146 | + lockdep_unlock(); |
---|
96 | 147 | return 0; |
---|
97 | 148 | } |
---|
98 | | - /* prevent any recursions within lockdep from causing deadlocks */ |
---|
99 | | - current->lockdep_recursion++; |
---|
100 | 149 | return 1; |
---|
101 | 150 | } |
---|
102 | 151 | |
---|
103 | | -static inline int graph_unlock(void) |
---|
| 152 | +static inline void graph_unlock(void) |
---|
104 | 153 | { |
---|
105 | | - if (debug_locks && !arch_spin_is_locked(&lockdep_lock)) { |
---|
106 | | - /* |
---|
107 | | - * The lockdep graph lock isn't locked while we expect it to |
---|
108 | | - * be, we're confused now, bye! |
---|
109 | | - */ |
---|
110 | | - return DEBUG_LOCKS_WARN_ON(1); |
---|
111 | | - } |
---|
112 | | - |
---|
113 | | - current->lockdep_recursion--; |
---|
114 | | - arch_spin_unlock(&lockdep_lock); |
---|
115 | | - return 0; |
---|
| 154 | + lockdep_unlock(); |
---|
116 | 155 | } |
---|
117 | 156 | |
---|
118 | 157 | /* |
---|
.. | .. |
---|
123 | 162 | { |
---|
124 | 163 | int ret = debug_locks_off(); |
---|
125 | 164 | |
---|
126 | | - arch_spin_unlock(&lockdep_lock); |
---|
| 165 | + lockdep_unlock(); |
---|
127 | 166 | |
---|
128 | 167 | return ret; |
---|
129 | 168 | } |
---|
130 | 169 | |
---|
131 | 170 | unsigned long nr_list_entries; |
---|
132 | 171 | static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES]; |
---|
| 172 | +static DECLARE_BITMAP(list_entries_in_use, MAX_LOCKDEP_ENTRIES); |
---|
133 | 173 | |
---|
134 | 174 | /* |
---|
135 | 175 | * All data structures here are protected by the global debug_lock. |
---|
136 | 176 | * |
---|
137 | | - * Mutex key structs only get allocated, once during bootup, and never |
---|
138 | | - * get freed - this significantly simplifies the debugging code. |
---|
| 177 | + * nr_lock_classes is the number of elements of lock_classes[] that is |
---|
| 178 | + * in use. |
---|
139 | 179 | */ |
---|
| 180 | +#define KEYHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1) |
---|
| 181 | +#define KEYHASH_SIZE (1UL << KEYHASH_BITS) |
---|
| 182 | +static struct hlist_head lock_keys_hash[KEYHASH_SIZE]; |
---|
140 | 183 | unsigned long nr_lock_classes; |
---|
141 | | -static struct lock_class lock_classes[MAX_LOCKDEP_KEYS]; |
---|
| 184 | +unsigned long nr_zapped_classes; |
---|
| 185 | +unsigned long max_lock_class_idx; |
---|
| 186 | +struct lock_class lock_classes[MAX_LOCKDEP_KEYS]; |
---|
| 187 | +DECLARE_BITMAP(lock_classes_in_use, MAX_LOCKDEP_KEYS); |
---|
142 | 188 | |
---|
143 | 189 | static inline struct lock_class *hlock_class(struct held_lock *hlock) |
---|
144 | 190 | { |
---|
145 | | - if (!hlock->class_idx) { |
---|
| 191 | + unsigned int class_idx = hlock->class_idx; |
---|
| 192 | + |
---|
| 193 | + /* Don't re-read hlock->class_idx, can't use READ_ONCE() on bitfield */ |
---|
| 194 | + barrier(); |
---|
| 195 | + |
---|
| 196 | + if (!test_bit(class_idx, lock_classes_in_use)) { |
---|
146 | 197 | /* |
---|
147 | 198 | * Someone passed in garbage, we give up. |
---|
148 | 199 | */ |
---|
149 | 200 | DEBUG_LOCKS_WARN_ON(1); |
---|
150 | 201 | return NULL; |
---|
151 | 202 | } |
---|
152 | | - return lock_classes + hlock->class_idx - 1; |
---|
| 203 | + |
---|
| 204 | + /* |
---|
| 205 | + * At this point, if the passed hlock->class_idx is still garbage, |
---|
| 206 | + * we just have to live with it |
---|
| 207 | + */ |
---|
| 208 | + return lock_classes + class_idx; |
---|
153 | 209 | } |
---|
154 | 210 | |
---|
155 | 211 | #ifdef CONFIG_LOCK_STAT |
---|
.. | .. |
---|
274 | 330 | #endif |
---|
275 | 331 | |
---|
276 | 332 | /* |
---|
277 | | - * We keep a global list of all lock classes. The list only grows, |
---|
278 | | - * never shrinks. The list is only accessed with the lockdep |
---|
279 | | - * spinlock lock held. |
---|
| 333 | + * We keep a global list of all lock classes. The list is only accessed with |
---|
| 334 | + * the lockdep spinlock lock held. free_lock_classes is a list with free |
---|
| 335 | + * elements. These elements are linked together by the lock_entry member in |
---|
| 336 | + * struct lock_class. |
---|
280 | 337 | */ |
---|
281 | | -LIST_HEAD(all_lock_classes); |
---|
| 338 | +static LIST_HEAD(all_lock_classes); |
---|
| 339 | +static LIST_HEAD(free_lock_classes); |
---|
| 340 | + |
---|
| 341 | +/** |
---|
| 342 | + * struct pending_free - information about data structures about to be freed |
---|
| 343 | + * @zapped: Head of a list with struct lock_class elements. |
---|
| 344 | + * @lock_chains_being_freed: Bitmap that indicates which lock_chains[] elements |
---|
| 345 | + * are about to be freed. |
---|
| 346 | + */ |
---|
| 347 | +struct pending_free { |
---|
| 348 | + struct list_head zapped; |
---|
| 349 | + DECLARE_BITMAP(lock_chains_being_freed, MAX_LOCKDEP_CHAINS); |
---|
| 350 | +}; |
---|
| 351 | + |
---|
| 352 | +/** |
---|
| 353 | + * struct delayed_free - data structures used for delayed freeing |
---|
| 354 | + * |
---|
| 355 | + * A data structure for delayed freeing of data structures that may be |
---|
| 356 | + * accessed by RCU readers at the time these were freed. |
---|
| 357 | + * |
---|
| 358 | + * @rcu_head: Used to schedule an RCU callback for freeing data structures. |
---|
| 359 | + * @index: Index of @pf to which freed data structures are added. |
---|
| 360 | + * @scheduled: Whether or not an RCU callback has been scheduled. |
---|
| 361 | + * @pf: Array with information about data structures about to be freed. |
---|
| 362 | + */ |
---|
| 363 | +static struct delayed_free { |
---|
| 364 | + struct rcu_head rcu_head; |
---|
| 365 | + int index; |
---|
| 366 | + int scheduled; |
---|
| 367 | + struct pending_free pf[2]; |
---|
| 368 | +} delayed_free; |
---|
282 | 369 | |
---|
283 | 370 | /* |
---|
284 | 371 | * The lockdep classes are in a hash-table as well, for fast lookup: |
---|
.. | .. |
---|
302 | 389 | static struct hlist_head chainhash_table[CHAINHASH_SIZE]; |
---|
303 | 390 | |
---|
304 | 391 | /* |
---|
| 392 | + * the id of held_lock |
---|
| 393 | + */ |
---|
| 394 | +static inline u16 hlock_id(struct held_lock *hlock) |
---|
| 395 | +{ |
---|
| 396 | + BUILD_BUG_ON(MAX_LOCKDEP_KEYS_BITS + 2 > 16); |
---|
| 397 | + |
---|
| 398 | + return (hlock->class_idx | (hlock->read << MAX_LOCKDEP_KEYS_BITS)); |
---|
| 399 | +} |
---|
| 400 | + |
---|
| 401 | +static inline unsigned int chain_hlock_class_idx(u16 hlock_id) |
---|
| 402 | +{ |
---|
| 403 | + return hlock_id & (MAX_LOCKDEP_KEYS - 1); |
---|
| 404 | +} |
---|
| 405 | + |
---|
| 406 | +/* |
---|
305 | 407 | * The hash key of the lock dependency chains is a hash itself too: |
---|
306 | 408 | * it's a hash of all locks taken up to that lock, including that lock. |
---|
307 | 409 | * It's a 64-bit hash, because it's important for the keys to be |
---|
.. | .. |
---|
316 | 418 | return k0 | (u64)k1 << 32; |
---|
317 | 419 | } |
---|
318 | 420 | |
---|
319 | | -void lockdep_off(void) |
---|
| 421 | +void lockdep_init_task(struct task_struct *task) |
---|
320 | 422 | { |
---|
321 | | - current->lockdep_recursion++; |
---|
| 423 | + task->lockdep_depth = 0; /* no locks held yet */ |
---|
| 424 | + task->curr_chain_key = INITIAL_CHAIN_KEY; |
---|
| 425 | + task->lockdep_recursion = 0; |
---|
322 | 426 | } |
---|
323 | | -EXPORT_SYMBOL(lockdep_off); |
---|
324 | 427 | |
---|
325 | | -void lockdep_on(void) |
---|
| 428 | +static __always_inline void lockdep_recursion_inc(void) |
---|
326 | 429 | { |
---|
327 | | - current->lockdep_recursion--; |
---|
| 430 | + __this_cpu_inc(lockdep_recursion); |
---|
328 | 431 | } |
---|
329 | | -EXPORT_SYMBOL(lockdep_on); |
---|
| 432 | + |
---|
| 433 | +static __always_inline void lockdep_recursion_finish(void) |
---|
| 434 | +{ |
---|
| 435 | + if (WARN_ON_ONCE(__this_cpu_dec_return(lockdep_recursion))) |
---|
| 436 | + __this_cpu_write(lockdep_recursion, 0); |
---|
| 437 | +} |
---|
| 438 | + |
---|
| 439 | +void lockdep_set_selftest_task(struct task_struct *task) |
---|
| 440 | +{ |
---|
| 441 | + lockdep_selftest_task_struct = task; |
---|
| 442 | +} |
---|
330 | 443 | |
---|
331 | 444 | /* |
---|
332 | 445 | * Debugging switches: |
---|
.. | .. |
---|
371 | 484 | return 0; |
---|
372 | 485 | } |
---|
373 | 486 | |
---|
374 | | -/* |
---|
375 | | - * Stack-trace: tightly packed array of stack backtrace |
---|
376 | | - * addresses. Protected by the graph_lock. |
---|
377 | | - */ |
---|
378 | | -unsigned long nr_stack_trace_entries; |
---|
379 | | -static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES]; |
---|
380 | | - |
---|
381 | 487 | static void print_lockdep_off(const char *bug_msg) |
---|
382 | 488 | { |
---|
383 | 489 | printk(KERN_DEBUG "%s\n", bug_msg); |
---|
.. | .. |
---|
387 | 493 | #endif |
---|
388 | 494 | } |
---|
389 | 495 | |
---|
390 | | -static int save_trace(struct stack_trace *trace) |
---|
| 496 | +unsigned long nr_stack_trace_entries; |
---|
| 497 | + |
---|
| 498 | +#ifdef CONFIG_PROVE_LOCKING |
---|
| 499 | +/** |
---|
| 500 | + * struct lock_trace - single stack backtrace |
---|
| 501 | + * @hash_entry: Entry in a stack_trace_hash[] list. |
---|
| 502 | + * @hash: jhash() of @entries. |
---|
| 503 | + * @nr_entries: Number of entries in @entries. |
---|
| 504 | + * @entries: Actual stack backtrace. |
---|
| 505 | + */ |
---|
| 506 | +struct lock_trace { |
---|
| 507 | + struct hlist_node hash_entry; |
---|
| 508 | + u32 hash; |
---|
| 509 | + u32 nr_entries; |
---|
| 510 | + unsigned long entries[] __aligned(sizeof(unsigned long)); |
---|
| 511 | +}; |
---|
| 512 | +#define LOCK_TRACE_SIZE_IN_LONGS \ |
---|
| 513 | + (sizeof(struct lock_trace) / sizeof(unsigned long)) |
---|
| 514 | +/* |
---|
| 515 | + * Stack-trace: sequence of lock_trace structures. Protected by the graph_lock. |
---|
| 516 | + */ |
---|
| 517 | +static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES]; |
---|
| 518 | +static struct hlist_head stack_trace_hash[STACK_TRACE_HASH_SIZE]; |
---|
| 519 | + |
---|
| 520 | +static bool traces_identical(struct lock_trace *t1, struct lock_trace *t2) |
---|
391 | 521 | { |
---|
392 | | - trace->nr_entries = 0; |
---|
393 | | - trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries; |
---|
394 | | - trace->entries = stack_trace + nr_stack_trace_entries; |
---|
| 522 | + return t1->hash == t2->hash && t1->nr_entries == t2->nr_entries && |
---|
| 523 | + memcmp(t1->entries, t2->entries, |
---|
| 524 | + t1->nr_entries * sizeof(t1->entries[0])) == 0; |
---|
| 525 | +} |
---|
395 | 526 | |
---|
396 | | - trace->skip = 3; |
---|
| 527 | +static struct lock_trace *save_trace(void) |
---|
| 528 | +{ |
---|
| 529 | + struct lock_trace *trace, *t2; |
---|
| 530 | + struct hlist_head *hash_head; |
---|
| 531 | + u32 hash; |
---|
| 532 | + int max_entries; |
---|
397 | 533 | |
---|
398 | | - save_stack_trace(trace); |
---|
| 534 | + BUILD_BUG_ON_NOT_POWER_OF_2(STACK_TRACE_HASH_SIZE); |
---|
| 535 | + BUILD_BUG_ON(LOCK_TRACE_SIZE_IN_LONGS >= MAX_STACK_TRACE_ENTRIES); |
---|
399 | 536 | |
---|
400 | | - /* |
---|
401 | | - * Some daft arches put -1 at the end to indicate its a full trace. |
---|
402 | | - * |
---|
403 | | - * <rant> this is buggy anyway, since it takes a whole extra entry so a |
---|
404 | | - * complete trace that maxes out the entries provided will be reported |
---|
405 | | - * as incomplete, friggin useless </rant> |
---|
406 | | - */ |
---|
407 | | - if (trace->nr_entries != 0 && |
---|
408 | | - trace->entries[trace->nr_entries-1] == ULONG_MAX) |
---|
409 | | - trace->nr_entries--; |
---|
| 537 | + trace = (struct lock_trace *)(stack_trace + nr_stack_trace_entries); |
---|
| 538 | + max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries - |
---|
| 539 | + LOCK_TRACE_SIZE_IN_LONGS; |
---|
410 | 540 | |
---|
411 | | - trace->max_entries = trace->nr_entries; |
---|
412 | | - |
---|
413 | | - nr_stack_trace_entries += trace->nr_entries; |
---|
414 | | - |
---|
415 | | - if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) { |
---|
| 541 | + if (max_entries <= 0) { |
---|
416 | 542 | if (!debug_locks_off_graph_unlock()) |
---|
417 | | - return 0; |
---|
| 543 | + return NULL; |
---|
418 | 544 | |
---|
419 | 545 | print_lockdep_off("BUG: MAX_STACK_TRACE_ENTRIES too low!"); |
---|
420 | 546 | dump_stack(); |
---|
421 | 547 | |
---|
422 | | - return 0; |
---|
| 548 | + return NULL; |
---|
| 549 | + } |
---|
| 550 | + trace->nr_entries = stack_trace_save(trace->entries, max_entries, 3); |
---|
| 551 | + |
---|
| 552 | + hash = jhash(trace->entries, trace->nr_entries * |
---|
| 553 | + sizeof(trace->entries[0]), 0); |
---|
| 554 | + trace->hash = hash; |
---|
| 555 | + hash_head = stack_trace_hash + (hash & (STACK_TRACE_HASH_SIZE - 1)); |
---|
| 556 | + hlist_for_each_entry(t2, hash_head, hash_entry) { |
---|
| 557 | + if (traces_identical(trace, t2)) |
---|
| 558 | + return t2; |
---|
| 559 | + } |
---|
| 560 | + nr_stack_trace_entries += LOCK_TRACE_SIZE_IN_LONGS + trace->nr_entries; |
---|
| 561 | + hlist_add_head(&trace->hash_entry, hash_head); |
---|
| 562 | + |
---|
| 563 | + return trace; |
---|
| 564 | +} |
---|
| 565 | + |
---|
| 566 | +/* Return the number of stack traces in the stack_trace[] array. */ |
---|
| 567 | +u64 lockdep_stack_trace_count(void) |
---|
| 568 | +{ |
---|
| 569 | + struct lock_trace *trace; |
---|
| 570 | + u64 c = 0; |
---|
| 571 | + int i; |
---|
| 572 | + |
---|
| 573 | + for (i = 0; i < ARRAY_SIZE(stack_trace_hash); i++) { |
---|
| 574 | + hlist_for_each_entry(trace, &stack_trace_hash[i], hash_entry) { |
---|
| 575 | + c++; |
---|
| 576 | + } |
---|
423 | 577 | } |
---|
424 | 578 | |
---|
425 | | - return 1; |
---|
| 579 | + return c; |
---|
426 | 580 | } |
---|
| 581 | + |
---|
| 582 | +/* Return the number of stack hash chains that have at least one stack trace. */ |
---|
| 583 | +u64 lockdep_stack_hash_count(void) |
---|
| 584 | +{ |
---|
| 585 | + u64 c = 0; |
---|
| 586 | + int i; |
---|
| 587 | + |
---|
| 588 | + for (i = 0; i < ARRAY_SIZE(stack_trace_hash); i++) |
---|
| 589 | + if (!hlist_empty(&stack_trace_hash[i])) |
---|
| 590 | + c++; |
---|
| 591 | + |
---|
| 592 | + return c; |
---|
| 593 | +} |
---|
| 594 | +#endif |
---|
427 | 595 | |
---|
428 | 596 | unsigned int nr_hardirq_chains; |
---|
429 | 597 | unsigned int nr_softirq_chains; |
---|
.. | .. |
---|
437 | 605 | DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats); |
---|
438 | 606 | #endif |
---|
439 | 607 | |
---|
| 608 | +#ifdef CONFIG_PROVE_LOCKING |
---|
440 | 609 | /* |
---|
441 | 610 | * Locking printouts: |
---|
442 | 611 | */ |
---|
.. | .. |
---|
453 | 622 | #include "lockdep_states.h" |
---|
454 | 623 | #undef LOCKDEP_STATE |
---|
455 | 624 | [LOCK_USED] = "INITIAL USE", |
---|
| 625 | + [LOCK_USED_READ] = "INITIAL READ USE", |
---|
| 626 | + /* abused as string storage for verify_lock_unused() */ |
---|
| 627 | + [LOCK_USAGE_STATES] = "IN-NMI", |
---|
456 | 628 | }; |
---|
| 629 | +#endif |
---|
457 | 630 | |
---|
458 | | -const char * __get_key_name(struct lockdep_subclass_key *key, char *str) |
---|
| 631 | +const char *__get_key_name(const struct lockdep_subclass_key *key, char *str) |
---|
459 | 632 | { |
---|
460 | 633 | return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str); |
---|
461 | 634 | } |
---|
.. | .. |
---|
467 | 640 | |
---|
468 | 641 | static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit) |
---|
469 | 642 | { |
---|
| 643 | + /* |
---|
| 644 | + * The usage character defaults to '.' (i.e., irqs disabled and not in |
---|
| 645 | + * irq context), which is the safest usage category. |
---|
| 646 | + */ |
---|
470 | 647 | char c = '.'; |
---|
471 | 648 | |
---|
472 | | - if (class->usage_mask & lock_flag(bit + 2)) |
---|
| 649 | + /* |
---|
| 650 | + * The order of the following usage checks matters, which will |
---|
| 651 | + * result in the outcome character as follows: |
---|
| 652 | + * |
---|
| 653 | + * - '+': irq is enabled and not in irq context |
---|
| 654 | + * - '-': in irq context and irq is disabled |
---|
| 655 | + * - '?': in irq context and irq is enabled |
---|
| 656 | + */ |
---|
| 657 | + if (class->usage_mask & lock_flag(bit + LOCK_USAGE_DIR_MASK)) { |
---|
473 | 658 | c = '+'; |
---|
474 | | - if (class->usage_mask & lock_flag(bit)) { |
---|
475 | | - c = '-'; |
---|
476 | | - if (class->usage_mask & lock_flag(bit + 2)) |
---|
| 659 | + if (class->usage_mask & lock_flag(bit)) |
---|
477 | 660 | c = '?'; |
---|
478 | | - } |
---|
| 661 | + } else if (class->usage_mask & lock_flag(bit)) |
---|
| 662 | + c = '-'; |
---|
479 | 663 | |
---|
480 | 664 | return c; |
---|
481 | 665 | } |
---|
.. | .. |
---|
519 | 703 | |
---|
520 | 704 | printk(KERN_CONT " ("); |
---|
521 | 705 | __print_lock_name(class); |
---|
522 | | - printk(KERN_CONT "){%s}", usage); |
---|
| 706 | + printk(KERN_CONT "){%s}-{%d:%d}", usage, |
---|
| 707 | + class->wait_type_outer ?: class->wait_type_inner, |
---|
| 708 | + class->wait_type_inner); |
---|
523 | 709 | } |
---|
524 | 710 | |
---|
525 | 711 | static void print_lockdep_cache(struct lockdep_map *lock) |
---|
.. | .. |
---|
539 | 725 | /* |
---|
540 | 726 | * We can be called locklessly through debug_show_all_locks() so be |
---|
541 | 727 | * extra careful, the hlock might have been released and cleared. |
---|
| 728 | + * |
---|
| 729 | + * If this indeed happens, lets pretend it does not hurt to continue |
---|
| 730 | + * to print the lock unless the hlock class_idx does not point to a |
---|
| 731 | + * registered class. The rationale here is: since we don't attempt |
---|
| 732 | + * to distinguish whether we are in this situation, if it just |
---|
| 733 | + * happened we can't count on class_idx to tell either. |
---|
542 | 734 | */ |
---|
543 | | - unsigned int class_idx = hlock->class_idx; |
---|
| 735 | + struct lock_class *lock = hlock_class(hlock); |
---|
544 | 736 | |
---|
545 | | - /* Don't re-read hlock->class_idx, can't use READ_ONCE() on bitfields: */ |
---|
546 | | - barrier(); |
---|
547 | | - |
---|
548 | | - if (!class_idx || (class_idx - 1) >= MAX_LOCKDEP_KEYS) { |
---|
| 737 | + if (!lock) { |
---|
549 | 738 | printk(KERN_CONT "<RELEASED>\n"); |
---|
550 | 739 | return; |
---|
551 | 740 | } |
---|
552 | 741 | |
---|
553 | | - printk(KERN_CONT "%p", hlock->instance); |
---|
554 | | - print_lock_name(lock_classes + class_idx - 1); |
---|
| 742 | + printk(KERN_CONT "%px", hlock->instance); |
---|
| 743 | + print_lock_name(lock); |
---|
555 | 744 | printk(KERN_CONT ", at: %pS\n", (void *)hlock->acquire_ip); |
---|
556 | 745 | } |
---|
557 | 746 | |
---|
.. | .. |
---|
596 | 785 | * Is this the address of a static object: |
---|
597 | 786 | */ |
---|
598 | 787 | #ifdef __KERNEL__ |
---|
599 | | -static int static_obj(void *obj) |
---|
| 788 | +static int static_obj(const void *obj) |
---|
600 | 789 | { |
---|
601 | 790 | unsigned long start = (unsigned long) &_stext, |
---|
602 | 791 | end = (unsigned long) &_end, |
---|
603 | 792 | addr = (unsigned long) obj; |
---|
| 793 | + |
---|
| 794 | + if (arch_is_kernel_initmem_freed(addr)) |
---|
| 795 | + return 0; |
---|
604 | 796 | |
---|
605 | 797 | /* |
---|
606 | 798 | * static variable? |
---|
.. | .. |
---|
626 | 818 | |
---|
627 | 819 | /* |
---|
628 | 820 | * To make lock name printouts unique, we calculate a unique |
---|
629 | | - * class->name_version generation counter: |
---|
| 821 | + * class->name_version generation counter. The caller must hold the graph |
---|
| 822 | + * lock. |
---|
630 | 823 | */ |
---|
631 | 824 | static int count_matching_names(struct lock_class *new_class) |
---|
632 | 825 | { |
---|
.. | .. |
---|
636 | 829 | if (!new_class->name) |
---|
637 | 830 | return 0; |
---|
638 | 831 | |
---|
639 | | - list_for_each_entry_rcu(class, &all_lock_classes, lock_entry) { |
---|
| 832 | + list_for_each_entry(class, &all_lock_classes, lock_entry) { |
---|
640 | 833 | if (new_class->key - new_class->subclass == class->key) |
---|
641 | 834 | return class->name_version; |
---|
642 | 835 | if (class->name && !strcmp(class->name, new_class->name)) |
---|
.. | .. |
---|
646 | 839 | return count + 1; |
---|
647 | 840 | } |
---|
648 | 841 | |
---|
649 | | -static inline struct lock_class * |
---|
| 842 | +/* used from NMI context -- must be lockless */ |
---|
| 843 | +static noinstr struct lock_class * |
---|
650 | 844 | look_up_lock_class(const struct lockdep_map *lock, unsigned int subclass) |
---|
651 | 845 | { |
---|
652 | 846 | struct lockdep_subclass_key *key; |
---|
.. | .. |
---|
654 | 848 | struct lock_class *class; |
---|
655 | 849 | |
---|
656 | 850 | if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) { |
---|
| 851 | + instrumentation_begin(); |
---|
657 | 852 | debug_locks_off(); |
---|
658 | 853 | printk(KERN_ERR |
---|
659 | 854 | "BUG: looking up invalid subclass: %u\n", subclass); |
---|
660 | 855 | printk(KERN_ERR |
---|
661 | 856 | "turning off the locking correctness validator.\n"); |
---|
662 | 857 | dump_stack(); |
---|
| 858 | + instrumentation_end(); |
---|
663 | 859 | return NULL; |
---|
664 | 860 | } |
---|
665 | 861 | |
---|
.. | .. |
---|
695 | 891 | * Huh! same key, different name? Did someone trample |
---|
696 | 892 | * on some memory? We're most confused. |
---|
697 | 893 | */ |
---|
698 | | - WARN_ON_ONCE(class->name != lock->name); |
---|
| 894 | + WARN_ON_ONCE(class->name != lock->name && |
---|
| 895 | + lock->key != &__lockdep_no_validate__); |
---|
699 | 896 | return class; |
---|
700 | 897 | } |
---|
701 | 898 | } |
---|
.. | .. |
---|
711 | 908 | static bool assign_lock_key(struct lockdep_map *lock) |
---|
712 | 909 | { |
---|
713 | 910 | unsigned long can_addr, addr = (unsigned long)lock; |
---|
| 911 | + |
---|
| 912 | +#ifdef __KERNEL__ |
---|
| 913 | + /* |
---|
| 914 | + * lockdep_free_key_range() assumes that struct lock_class_key |
---|
| 915 | + * objects do not overlap. Since we use the address of lock |
---|
| 916 | + * objects as class key for static objects, check whether the |
---|
| 917 | + * size of lock_class_key objects does not exceed the size of |
---|
| 918 | + * the smallest lock object. |
---|
| 919 | + */ |
---|
| 920 | + BUILD_BUG_ON(sizeof(struct lock_class_key) > sizeof(raw_spinlock_t)); |
---|
| 921 | +#endif |
---|
714 | 922 | |
---|
715 | 923 | if (__is_kernel_percpu_address(addr, &can_addr)) |
---|
716 | 924 | lock->key = (void *)can_addr; |
---|
.. | .. |
---|
732 | 940 | return true; |
---|
733 | 941 | } |
---|
734 | 942 | |
---|
| 943 | +#ifdef CONFIG_DEBUG_LOCKDEP |
---|
| 944 | + |
---|
| 945 | +/* Check whether element @e occurs in list @h */ |
---|
| 946 | +static bool in_list(struct list_head *e, struct list_head *h) |
---|
| 947 | +{ |
---|
| 948 | + struct list_head *f; |
---|
| 949 | + |
---|
| 950 | + list_for_each(f, h) { |
---|
| 951 | + if (e == f) |
---|
| 952 | + return true; |
---|
| 953 | + } |
---|
| 954 | + |
---|
| 955 | + return false; |
---|
| 956 | +} |
---|
| 957 | + |
---|
| 958 | +/* |
---|
| 959 | + * Check whether entry @e occurs in any of the locks_after or locks_before |
---|
| 960 | + * lists. |
---|
| 961 | + */ |
---|
| 962 | +static bool in_any_class_list(struct list_head *e) |
---|
| 963 | +{ |
---|
| 964 | + struct lock_class *class; |
---|
| 965 | + int i; |
---|
| 966 | + |
---|
| 967 | + for (i = 0; i < ARRAY_SIZE(lock_classes); i++) { |
---|
| 968 | + class = &lock_classes[i]; |
---|
| 969 | + if (in_list(e, &class->locks_after) || |
---|
| 970 | + in_list(e, &class->locks_before)) |
---|
| 971 | + return true; |
---|
| 972 | + } |
---|
| 973 | + return false; |
---|
| 974 | +} |
---|
| 975 | + |
---|
| 976 | +static bool class_lock_list_valid(struct lock_class *c, struct list_head *h) |
---|
| 977 | +{ |
---|
| 978 | + struct lock_list *e; |
---|
| 979 | + |
---|
| 980 | + list_for_each_entry(e, h, entry) { |
---|
| 981 | + if (e->links_to != c) { |
---|
| 982 | + printk(KERN_INFO "class %s: mismatch for lock entry %ld; class %s <> %s", |
---|
| 983 | + c->name ? : "(?)", |
---|
| 984 | + (unsigned long)(e - list_entries), |
---|
| 985 | + e->links_to && e->links_to->name ? |
---|
| 986 | + e->links_to->name : "(?)", |
---|
| 987 | + e->class && e->class->name ? e->class->name : |
---|
| 988 | + "(?)"); |
---|
| 989 | + return false; |
---|
| 990 | + } |
---|
| 991 | + } |
---|
| 992 | + return true; |
---|
| 993 | +} |
---|
| 994 | + |
---|
| 995 | +#ifdef CONFIG_PROVE_LOCKING |
---|
| 996 | +static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS]; |
---|
| 997 | +#endif |
---|
| 998 | + |
---|
| 999 | +static bool check_lock_chain_key(struct lock_chain *chain) |
---|
| 1000 | +{ |
---|
| 1001 | +#ifdef CONFIG_PROVE_LOCKING |
---|
| 1002 | + u64 chain_key = INITIAL_CHAIN_KEY; |
---|
| 1003 | + int i; |
---|
| 1004 | + |
---|
| 1005 | + for (i = chain->base; i < chain->base + chain->depth; i++) |
---|
| 1006 | + chain_key = iterate_chain_key(chain_key, chain_hlocks[i]); |
---|
| 1007 | + /* |
---|
| 1008 | + * The 'unsigned long long' casts avoid that a compiler warning |
---|
| 1009 | + * is reported when building tools/lib/lockdep. |
---|
| 1010 | + */ |
---|
| 1011 | + if (chain->chain_key != chain_key) { |
---|
| 1012 | + printk(KERN_INFO "chain %lld: key %#llx <> %#llx\n", |
---|
| 1013 | + (unsigned long long)(chain - lock_chains), |
---|
| 1014 | + (unsigned long long)chain->chain_key, |
---|
| 1015 | + (unsigned long long)chain_key); |
---|
| 1016 | + return false; |
---|
| 1017 | + } |
---|
| 1018 | +#endif |
---|
| 1019 | + return true; |
---|
| 1020 | +} |
---|
| 1021 | + |
---|
| 1022 | +static bool in_any_zapped_class_list(struct lock_class *class) |
---|
| 1023 | +{ |
---|
| 1024 | + struct pending_free *pf; |
---|
| 1025 | + int i; |
---|
| 1026 | + |
---|
| 1027 | + for (i = 0, pf = delayed_free.pf; i < ARRAY_SIZE(delayed_free.pf); i++, pf++) { |
---|
| 1028 | + if (in_list(&class->lock_entry, &pf->zapped)) |
---|
| 1029 | + return true; |
---|
| 1030 | + } |
---|
| 1031 | + |
---|
| 1032 | + return false; |
---|
| 1033 | +} |
---|
| 1034 | + |
---|
| 1035 | +static bool __check_data_structures(void) |
---|
| 1036 | +{ |
---|
| 1037 | + struct lock_class *class; |
---|
| 1038 | + struct lock_chain *chain; |
---|
| 1039 | + struct hlist_head *head; |
---|
| 1040 | + struct lock_list *e; |
---|
| 1041 | + int i; |
---|
| 1042 | + |
---|
| 1043 | + /* Check whether all classes occur in a lock list. */ |
---|
| 1044 | + for (i = 0; i < ARRAY_SIZE(lock_classes); i++) { |
---|
| 1045 | + class = &lock_classes[i]; |
---|
| 1046 | + if (!in_list(&class->lock_entry, &all_lock_classes) && |
---|
| 1047 | + !in_list(&class->lock_entry, &free_lock_classes) && |
---|
| 1048 | + !in_any_zapped_class_list(class)) { |
---|
| 1049 | + printk(KERN_INFO "class %px/%s is not in any class list\n", |
---|
| 1050 | + class, class->name ? : "(?)"); |
---|
| 1051 | + return false; |
---|
| 1052 | + } |
---|
| 1053 | + } |
---|
| 1054 | + |
---|
| 1055 | + /* Check whether all classes have valid lock lists. */ |
---|
| 1056 | + for (i = 0; i < ARRAY_SIZE(lock_classes); i++) { |
---|
| 1057 | + class = &lock_classes[i]; |
---|
| 1058 | + if (!class_lock_list_valid(class, &class->locks_before)) |
---|
| 1059 | + return false; |
---|
| 1060 | + if (!class_lock_list_valid(class, &class->locks_after)) |
---|
| 1061 | + return false; |
---|
| 1062 | + } |
---|
| 1063 | + |
---|
| 1064 | + /* Check the chain_key of all lock chains. */ |
---|
| 1065 | + for (i = 0; i < ARRAY_SIZE(chainhash_table); i++) { |
---|
| 1066 | + head = chainhash_table + i; |
---|
| 1067 | + hlist_for_each_entry_rcu(chain, head, entry) { |
---|
| 1068 | + if (!check_lock_chain_key(chain)) |
---|
| 1069 | + return false; |
---|
| 1070 | + } |
---|
| 1071 | + } |
---|
| 1072 | + |
---|
| 1073 | + /* |
---|
| 1074 | + * Check whether all list entries that are in use occur in a class |
---|
| 1075 | + * lock list. |
---|
| 1076 | + */ |
---|
| 1077 | + for_each_set_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) { |
---|
| 1078 | + e = list_entries + i; |
---|
| 1079 | + if (!in_any_class_list(&e->entry)) { |
---|
| 1080 | + printk(KERN_INFO "list entry %d is not in any class list; class %s <> %s\n", |
---|
| 1081 | + (unsigned int)(e - list_entries), |
---|
| 1082 | + e->class->name ? : "(?)", |
---|
| 1083 | + e->links_to->name ? : "(?)"); |
---|
| 1084 | + return false; |
---|
| 1085 | + } |
---|
| 1086 | + } |
---|
| 1087 | + |
---|
| 1088 | + /* |
---|
| 1089 | + * Check whether all list entries that are not in use do not occur in |
---|
| 1090 | + * a class lock list. |
---|
| 1091 | + */ |
---|
| 1092 | + for_each_clear_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) { |
---|
| 1093 | + e = list_entries + i; |
---|
| 1094 | + if (in_any_class_list(&e->entry)) { |
---|
| 1095 | + printk(KERN_INFO "list entry %d occurs in a class list; class %s <> %s\n", |
---|
| 1096 | + (unsigned int)(e - list_entries), |
---|
| 1097 | + e->class && e->class->name ? e->class->name : |
---|
| 1098 | + "(?)", |
---|
| 1099 | + e->links_to && e->links_to->name ? |
---|
| 1100 | + e->links_to->name : "(?)"); |
---|
| 1101 | + return false; |
---|
| 1102 | + } |
---|
| 1103 | + } |
---|
| 1104 | + |
---|
| 1105 | + return true; |
---|
| 1106 | +} |
---|
| 1107 | + |
---|
| 1108 | +int check_consistency = 0; |
---|
| 1109 | +module_param(check_consistency, int, 0644); |
---|
| 1110 | + |
---|
| 1111 | +static void check_data_structures(void) |
---|
| 1112 | +{ |
---|
| 1113 | + static bool once = false; |
---|
| 1114 | + |
---|
| 1115 | + if (check_consistency && !once) { |
---|
| 1116 | + if (!__check_data_structures()) { |
---|
| 1117 | + once = true; |
---|
| 1118 | + WARN_ON(once); |
---|
| 1119 | + } |
---|
| 1120 | + } |
---|
| 1121 | +} |
---|
| 1122 | + |
---|
| 1123 | +#else /* CONFIG_DEBUG_LOCKDEP */ |
---|
| 1124 | + |
---|
| 1125 | +static inline void check_data_structures(void) { } |
---|
| 1126 | + |
---|
| 1127 | +#endif /* CONFIG_DEBUG_LOCKDEP */ |
---|
| 1128 | + |
---|
| 1129 | +static void init_chain_block_buckets(void); |
---|
| 1130 | + |
---|
| 1131 | +/* |
---|
| 1132 | + * Initialize the lock_classes[] array elements, the free_lock_classes list |
---|
| 1133 | + * and also the delayed_free structure. |
---|
| 1134 | + */ |
---|
| 1135 | +static void init_data_structures_once(void) |
---|
| 1136 | +{ |
---|
| 1137 | + static bool __read_mostly ds_initialized, rcu_head_initialized; |
---|
| 1138 | + int i; |
---|
| 1139 | + |
---|
| 1140 | + if (likely(rcu_head_initialized)) |
---|
| 1141 | + return; |
---|
| 1142 | + |
---|
| 1143 | + if (system_state >= SYSTEM_SCHEDULING) { |
---|
| 1144 | + init_rcu_head(&delayed_free.rcu_head); |
---|
| 1145 | + rcu_head_initialized = true; |
---|
| 1146 | + } |
---|
| 1147 | + |
---|
| 1148 | + if (ds_initialized) |
---|
| 1149 | + return; |
---|
| 1150 | + |
---|
| 1151 | + ds_initialized = true; |
---|
| 1152 | + |
---|
| 1153 | + INIT_LIST_HEAD(&delayed_free.pf[0].zapped); |
---|
| 1154 | + INIT_LIST_HEAD(&delayed_free.pf[1].zapped); |
---|
| 1155 | + |
---|
| 1156 | + for (i = 0; i < ARRAY_SIZE(lock_classes); i++) { |
---|
| 1157 | + list_add_tail(&lock_classes[i].lock_entry, &free_lock_classes); |
---|
| 1158 | + INIT_LIST_HEAD(&lock_classes[i].locks_after); |
---|
| 1159 | + INIT_LIST_HEAD(&lock_classes[i].locks_before); |
---|
| 1160 | + } |
---|
| 1161 | + init_chain_block_buckets(); |
---|
| 1162 | +} |
---|
| 1163 | + |
---|
| 1164 | +static inline struct hlist_head *keyhashentry(const struct lock_class_key *key) |
---|
| 1165 | +{ |
---|
| 1166 | + unsigned long hash = hash_long((uintptr_t)key, KEYHASH_BITS); |
---|
| 1167 | + |
---|
| 1168 | + return lock_keys_hash + hash; |
---|
| 1169 | +} |
---|
| 1170 | + |
---|
| 1171 | +/* Register a dynamically allocated key. */ |
---|
| 1172 | +void lockdep_register_key(struct lock_class_key *key) |
---|
| 1173 | +{ |
---|
| 1174 | + struct hlist_head *hash_head; |
---|
| 1175 | + struct lock_class_key *k; |
---|
| 1176 | + unsigned long flags; |
---|
| 1177 | + |
---|
| 1178 | + if (WARN_ON_ONCE(static_obj(key))) |
---|
| 1179 | + return; |
---|
| 1180 | + hash_head = keyhashentry(key); |
---|
| 1181 | + |
---|
| 1182 | + raw_local_irq_save(flags); |
---|
| 1183 | + if (!graph_lock()) |
---|
| 1184 | + goto restore_irqs; |
---|
| 1185 | + hlist_for_each_entry_rcu(k, hash_head, hash_entry) { |
---|
| 1186 | + if (WARN_ON_ONCE(k == key)) |
---|
| 1187 | + goto out_unlock; |
---|
| 1188 | + } |
---|
| 1189 | + hlist_add_head_rcu(&key->hash_entry, hash_head); |
---|
| 1190 | +out_unlock: |
---|
| 1191 | + graph_unlock(); |
---|
| 1192 | +restore_irqs: |
---|
| 1193 | + raw_local_irq_restore(flags); |
---|
| 1194 | +} |
---|
| 1195 | +EXPORT_SYMBOL_GPL(lockdep_register_key); |
---|
| 1196 | + |
---|
| 1197 | +/* Check whether a key has been registered as a dynamic key. */ |
---|
| 1198 | +static bool is_dynamic_key(const struct lock_class_key *key) |
---|
| 1199 | +{ |
---|
| 1200 | + struct hlist_head *hash_head; |
---|
| 1201 | + struct lock_class_key *k; |
---|
| 1202 | + bool found = false; |
---|
| 1203 | + |
---|
| 1204 | + if (WARN_ON_ONCE(static_obj(key))) |
---|
| 1205 | + return false; |
---|
| 1206 | + |
---|
| 1207 | + /* |
---|
| 1208 | + * If lock debugging is disabled lock_keys_hash[] may contain |
---|
| 1209 | + * pointers to memory that has already been freed. Avoid triggering |
---|
| 1210 | + * a use-after-free in that case by returning early. |
---|
| 1211 | + */ |
---|
| 1212 | + if (!debug_locks) |
---|
| 1213 | + return true; |
---|
| 1214 | + |
---|
| 1215 | + hash_head = keyhashentry(key); |
---|
| 1216 | + |
---|
| 1217 | + rcu_read_lock(); |
---|
| 1218 | + hlist_for_each_entry_rcu(k, hash_head, hash_entry) { |
---|
| 1219 | + if (k == key) { |
---|
| 1220 | + found = true; |
---|
| 1221 | + break; |
---|
| 1222 | + } |
---|
| 1223 | + } |
---|
| 1224 | + rcu_read_unlock(); |
---|
| 1225 | + |
---|
| 1226 | + return found; |
---|
| 1227 | +} |
---|
| 1228 | + |
---|
735 | 1229 | /* |
---|
736 | 1230 | * Register a lock's class in the hash-table, if the class is not present |
---|
737 | 1231 | * yet. Otherwise we look it up. We cache the result in the lock object |
---|
.. | .. |
---|
743 | 1237 | struct lockdep_subclass_key *key; |
---|
744 | 1238 | struct hlist_head *hash_head; |
---|
745 | 1239 | struct lock_class *class; |
---|
| 1240 | + int idx; |
---|
746 | 1241 | |
---|
747 | 1242 | DEBUG_LOCKS_WARN_ON(!irqs_disabled()); |
---|
748 | 1243 | |
---|
.. | .. |
---|
753 | 1248 | if (!lock->key) { |
---|
754 | 1249 | if (!assign_lock_key(lock)) |
---|
755 | 1250 | return NULL; |
---|
756 | | - } else if (!static_obj(lock->key)) { |
---|
| 1251 | + } else if (!static_obj(lock->key) && !is_dynamic_key(lock->key)) { |
---|
757 | 1252 | return NULL; |
---|
758 | 1253 | } |
---|
759 | 1254 | |
---|
.. | .. |
---|
772 | 1267 | goto out_unlock_set; |
---|
773 | 1268 | } |
---|
774 | 1269 | |
---|
775 | | - /* |
---|
776 | | - * Allocate a new key from the static array, and add it to |
---|
777 | | - * the hash: |
---|
778 | | - */ |
---|
779 | | - if (nr_lock_classes >= MAX_LOCKDEP_KEYS) { |
---|
| 1270 | + init_data_structures_once(); |
---|
| 1271 | + |
---|
| 1272 | + /* Allocate a new lock class and add it to the hash. */ |
---|
| 1273 | + class = list_first_entry_or_null(&free_lock_classes, typeof(*class), |
---|
| 1274 | + lock_entry); |
---|
| 1275 | + if (!class) { |
---|
780 | 1276 | if (!debug_locks_off_graph_unlock()) { |
---|
781 | 1277 | return NULL; |
---|
782 | 1278 | } |
---|
.. | .. |
---|
785 | 1281 | dump_stack(); |
---|
786 | 1282 | return NULL; |
---|
787 | 1283 | } |
---|
788 | | - class = lock_classes + nr_lock_classes++; |
---|
| 1284 | + nr_lock_classes++; |
---|
| 1285 | + __set_bit(class - lock_classes, lock_classes_in_use); |
---|
789 | 1286 | debug_atomic_inc(nr_unused_locks); |
---|
790 | 1287 | class->key = key; |
---|
791 | 1288 | class->name = lock->name; |
---|
792 | 1289 | class->subclass = subclass; |
---|
793 | | - INIT_LIST_HEAD(&class->lock_entry); |
---|
794 | | - INIT_LIST_HEAD(&class->locks_before); |
---|
795 | | - INIT_LIST_HEAD(&class->locks_after); |
---|
| 1290 | + WARN_ON_ONCE(!list_empty(&class->locks_before)); |
---|
| 1291 | + WARN_ON_ONCE(!list_empty(&class->locks_after)); |
---|
796 | 1292 | class->name_version = count_matching_names(class); |
---|
| 1293 | + class->wait_type_inner = lock->wait_type_inner; |
---|
| 1294 | + class->wait_type_outer = lock->wait_type_outer; |
---|
| 1295 | + class->lock_type = lock->lock_type; |
---|
797 | 1296 | /* |
---|
798 | 1297 | * We use RCU's safe list-add method to make |
---|
799 | 1298 | * parallel walking of the hash-list safe: |
---|
800 | 1299 | */ |
---|
801 | 1300 | hlist_add_head_rcu(&class->hash_entry, hash_head); |
---|
802 | 1301 | /* |
---|
803 | | - * Add it to the global list of classes: |
---|
| 1302 | + * Remove the class from the free list and add it to the global list |
---|
| 1303 | + * of classes. |
---|
804 | 1304 | */ |
---|
805 | | - list_add_tail_rcu(&class->lock_entry, &all_lock_classes); |
---|
| 1305 | + list_move_tail(&class->lock_entry, &all_lock_classes); |
---|
| 1306 | + idx = class - lock_classes; |
---|
| 1307 | + if (idx > max_lock_class_idx) |
---|
| 1308 | + max_lock_class_idx = idx; |
---|
806 | 1309 | |
---|
807 | 1310 | if (verbose(class)) { |
---|
808 | 1311 | graph_unlock(); |
---|
.. | .. |
---|
843 | 1346 | */ |
---|
844 | 1347 | static struct lock_list *alloc_list_entry(void) |
---|
845 | 1348 | { |
---|
846 | | - if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) { |
---|
| 1349 | + int idx = find_first_zero_bit(list_entries_in_use, |
---|
| 1350 | + ARRAY_SIZE(list_entries)); |
---|
| 1351 | + |
---|
| 1352 | + if (idx >= ARRAY_SIZE(list_entries)) { |
---|
847 | 1353 | if (!debug_locks_off_graph_unlock()) |
---|
848 | 1354 | return NULL; |
---|
849 | 1355 | |
---|
.. | .. |
---|
851 | 1357 | dump_stack(); |
---|
852 | 1358 | return NULL; |
---|
853 | 1359 | } |
---|
854 | | - return list_entries + nr_list_entries++; |
---|
| 1360 | + nr_list_entries++; |
---|
| 1361 | + __set_bit(idx, list_entries_in_use); |
---|
| 1362 | + return list_entries + idx; |
---|
855 | 1363 | } |
---|
856 | 1364 | |
---|
857 | 1365 | /* |
---|
858 | 1366 | * Add a new dependency to the head of the list: |
---|
859 | 1367 | */ |
---|
860 | | -static int add_lock_to_list(struct lock_class *this, struct list_head *head, |
---|
861 | | - unsigned long ip, int distance, |
---|
862 | | - struct stack_trace *trace) |
---|
| 1368 | +static int add_lock_to_list(struct lock_class *this, |
---|
| 1369 | + struct lock_class *links_to, struct list_head *head, |
---|
| 1370 | + unsigned long ip, u16 distance, u8 dep, |
---|
| 1371 | + const struct lock_trace *trace) |
---|
863 | 1372 | { |
---|
864 | 1373 | struct lock_list *entry; |
---|
865 | 1374 | /* |
---|
.. | .. |
---|
871 | 1380 | return 0; |
---|
872 | 1381 | |
---|
873 | 1382 | entry->class = this; |
---|
| 1383 | + entry->links_to = links_to; |
---|
| 1384 | + entry->dep = dep; |
---|
874 | 1385 | entry->distance = distance; |
---|
875 | | - entry->trace = *trace; |
---|
| 1386 | + entry->trace = trace; |
---|
876 | 1387 | /* |
---|
877 | 1388 | * Both allocation and removal are done under the graph lock; but |
---|
878 | 1389 | * iteration is under RCU-sched; see look_up_lock_class() and |
---|
.. | .. |
---|
886 | 1397 | /* |
---|
887 | 1398 | * For good efficiency of modular, we use power of 2 |
---|
888 | 1399 | */ |
---|
889 | | -#define MAX_CIRCULAR_QUEUE_SIZE 4096UL |
---|
| 1400 | +#define MAX_CIRCULAR_QUEUE_SIZE (1UL << CONFIG_LOCKDEP_CIRCULAR_QUEUE_BITS) |
---|
890 | 1401 | #define CQ_MASK (MAX_CIRCULAR_QUEUE_SIZE-1) |
---|
891 | 1402 | |
---|
892 | 1403 | /* |
---|
893 | | - * The circular_queue and helpers is used to implement the |
---|
894 | | - * breadth-first search(BFS)algorithem, by which we can build |
---|
895 | | - * the shortest path from the next lock to be acquired to the |
---|
896 | | - * previous held lock if there is a circular between them. |
---|
| 1404 | + * The circular_queue and helpers are used to implement graph |
---|
| 1405 | + * breadth-first search (BFS) algorithm, by which we can determine |
---|
| 1406 | + * whether there is a path from a lock to another. In deadlock checks, |
---|
| 1407 | + * a path from the next lock to be acquired to a previous held lock |
---|
| 1408 | + * indicates that adding the <prev> -> <next> lock dependency will |
---|
| 1409 | + * produce a circle in the graph. Breadth-first search instead of |
---|
| 1410 | + * depth-first search is used in order to find the shortest (circular) |
---|
| 1411 | + * path. |
---|
897 | 1412 | */ |
---|
898 | 1413 | struct circular_queue { |
---|
899 | | - unsigned long element[MAX_CIRCULAR_QUEUE_SIZE]; |
---|
| 1414 | + struct lock_list *element[MAX_CIRCULAR_QUEUE_SIZE]; |
---|
900 | 1415 | unsigned int front, rear; |
---|
901 | 1416 | }; |
---|
902 | 1417 | |
---|
.. | .. |
---|
922 | 1437 | return ((cq->rear + 1) & CQ_MASK) == cq->front; |
---|
923 | 1438 | } |
---|
924 | 1439 | |
---|
925 | | -static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem) |
---|
| 1440 | +static inline int __cq_enqueue(struct circular_queue *cq, struct lock_list *elem) |
---|
926 | 1441 | { |
---|
927 | 1442 | if (__cq_full(cq)) |
---|
928 | 1443 | return -1; |
---|
.. | .. |
---|
932 | 1447 | return 0; |
---|
933 | 1448 | } |
---|
934 | 1449 | |
---|
935 | | -static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem) |
---|
| 1450 | +/* |
---|
| 1451 | + * Dequeue an element from the circular_queue, return a lock_list if |
---|
| 1452 | + * the queue is not empty, or NULL if otherwise. |
---|
| 1453 | + */ |
---|
| 1454 | +static inline struct lock_list * __cq_dequeue(struct circular_queue *cq) |
---|
936 | 1455 | { |
---|
937 | | - if (__cq_empty(cq)) |
---|
938 | | - return -1; |
---|
| 1456 | + struct lock_list * lock; |
---|
939 | 1457 | |
---|
940 | | - *elem = cq->element[cq->front]; |
---|
| 1458 | + if (__cq_empty(cq)) |
---|
| 1459 | + return NULL; |
---|
| 1460 | + |
---|
| 1461 | + lock = cq->element[cq->front]; |
---|
941 | 1462 | cq->front = (cq->front + 1) & CQ_MASK; |
---|
942 | | - return 0; |
---|
| 1463 | + |
---|
| 1464 | + return lock; |
---|
943 | 1465 | } |
---|
944 | 1466 | |
---|
945 | 1467 | static inline unsigned int __cq_get_elem_count(struct circular_queue *cq) |
---|
.. | .. |
---|
947 | 1469 | return (cq->rear - cq->front) & CQ_MASK; |
---|
948 | 1470 | } |
---|
949 | 1471 | |
---|
950 | | -static inline void mark_lock_accessed(struct lock_list *lock, |
---|
951 | | - struct lock_list *parent) |
---|
| 1472 | +static inline void mark_lock_accessed(struct lock_list *lock) |
---|
952 | 1473 | { |
---|
953 | | - unsigned long nr; |
---|
954 | | - |
---|
955 | | - nr = lock - list_entries; |
---|
956 | | - WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */ |
---|
957 | | - lock->parent = parent; |
---|
958 | 1474 | lock->class->dep_gen_id = lockdep_dependency_gen_id; |
---|
| 1475 | +} |
---|
| 1476 | + |
---|
| 1477 | +static inline void visit_lock_entry(struct lock_list *lock, |
---|
| 1478 | + struct lock_list *parent) |
---|
| 1479 | +{ |
---|
| 1480 | + lock->parent = parent; |
---|
959 | 1481 | } |
---|
960 | 1482 | |
---|
961 | 1483 | static inline unsigned long lock_accessed(struct lock_list *lock) |
---|
962 | 1484 | { |
---|
963 | | - unsigned long nr; |
---|
964 | | - |
---|
965 | | - nr = lock - list_entries; |
---|
966 | | - WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */ |
---|
967 | 1485 | return lock->class->dep_gen_id == lockdep_dependency_gen_id; |
---|
968 | 1486 | } |
---|
969 | 1487 | |
---|
.. | .. |
---|
984 | 1502 | return depth; |
---|
985 | 1503 | } |
---|
986 | 1504 | |
---|
987 | | -static int __bfs(struct lock_list *source_entry, |
---|
988 | | - void *data, |
---|
989 | | - int (*match)(struct lock_list *entry, void *data), |
---|
990 | | - struct lock_list **target_entry, |
---|
991 | | - int forward) |
---|
| 1505 | +/* |
---|
| 1506 | + * Return the forward or backward dependency list. |
---|
| 1507 | + * |
---|
| 1508 | + * @lock: the lock_list to get its class's dependency list |
---|
| 1509 | + * @offset: the offset to struct lock_class to determine whether it is |
---|
| 1510 | + * locks_after or locks_before |
---|
| 1511 | + */ |
---|
| 1512 | +static inline struct list_head *get_dep_list(struct lock_list *lock, int offset) |
---|
992 | 1513 | { |
---|
993 | | - struct lock_list *entry; |
---|
994 | | - struct list_head *head; |
---|
995 | | - struct circular_queue *cq = &lock_cq; |
---|
996 | | - int ret = 1; |
---|
| 1514 | + void *lock_class = lock->class; |
---|
997 | 1515 | |
---|
998 | | - if (match(source_entry, data)) { |
---|
999 | | - *target_entry = source_entry; |
---|
1000 | | - ret = 0; |
---|
1001 | | - goto exit; |
---|
1002 | | - } |
---|
1003 | | - |
---|
1004 | | - if (forward) |
---|
1005 | | - head = &source_entry->class->locks_after; |
---|
1006 | | - else |
---|
1007 | | - head = &source_entry->class->locks_before; |
---|
1008 | | - |
---|
1009 | | - if (list_empty(head)) |
---|
1010 | | - goto exit; |
---|
1011 | | - |
---|
1012 | | - __cq_init(cq); |
---|
1013 | | - __cq_enqueue(cq, (unsigned long)source_entry); |
---|
1014 | | - |
---|
1015 | | - while (!__cq_empty(cq)) { |
---|
1016 | | - struct lock_list *lock; |
---|
1017 | | - |
---|
1018 | | - __cq_dequeue(cq, (unsigned long *)&lock); |
---|
1019 | | - |
---|
1020 | | - if (!lock->class) { |
---|
1021 | | - ret = -2; |
---|
1022 | | - goto exit; |
---|
1023 | | - } |
---|
1024 | | - |
---|
1025 | | - if (forward) |
---|
1026 | | - head = &lock->class->locks_after; |
---|
1027 | | - else |
---|
1028 | | - head = &lock->class->locks_before; |
---|
1029 | | - |
---|
1030 | | - DEBUG_LOCKS_WARN_ON(!irqs_disabled()); |
---|
1031 | | - |
---|
1032 | | - list_for_each_entry_rcu(entry, head, entry) { |
---|
1033 | | - if (!lock_accessed(entry)) { |
---|
1034 | | - unsigned int cq_depth; |
---|
1035 | | - mark_lock_accessed(entry, lock); |
---|
1036 | | - if (match(entry, data)) { |
---|
1037 | | - *target_entry = entry; |
---|
1038 | | - ret = 0; |
---|
1039 | | - goto exit; |
---|
1040 | | - } |
---|
1041 | | - |
---|
1042 | | - if (__cq_enqueue(cq, (unsigned long)entry)) { |
---|
1043 | | - ret = -1; |
---|
1044 | | - goto exit; |
---|
1045 | | - } |
---|
1046 | | - cq_depth = __cq_get_elem_count(cq); |
---|
1047 | | - if (max_bfs_queue_depth < cq_depth) |
---|
1048 | | - max_bfs_queue_depth = cq_depth; |
---|
1049 | | - } |
---|
1050 | | - } |
---|
1051 | | - } |
---|
1052 | | -exit: |
---|
1053 | | - return ret; |
---|
| 1516 | + return lock_class + offset; |
---|
1054 | 1517 | } |
---|
| 1518 | +/* |
---|
| 1519 | + * Return values of a bfs search: |
---|
| 1520 | + * |
---|
| 1521 | + * BFS_E* indicates an error |
---|
| 1522 | + * BFS_R* indicates a result (match or not) |
---|
| 1523 | + * |
---|
| 1524 | + * BFS_EINVALIDNODE: Find a invalid node in the graph. |
---|
| 1525 | + * |
---|
| 1526 | + * BFS_EQUEUEFULL: The queue is full while doing the bfs. |
---|
| 1527 | + * |
---|
| 1528 | + * BFS_RMATCH: Find the matched node in the graph, and put that node into |
---|
| 1529 | + * *@target_entry. |
---|
| 1530 | + * |
---|
| 1531 | + * BFS_RNOMATCH: Haven't found the matched node and keep *@target_entry |
---|
| 1532 | + * _unchanged_. |
---|
| 1533 | + */ |
---|
| 1534 | +enum bfs_result { |
---|
| 1535 | + BFS_EINVALIDNODE = -2, |
---|
| 1536 | + BFS_EQUEUEFULL = -1, |
---|
| 1537 | + BFS_RMATCH = 0, |
---|
| 1538 | + BFS_RNOMATCH = 1, |
---|
| 1539 | +}; |
---|
1055 | 1540 | |
---|
1056 | | -static inline int __bfs_forwards(struct lock_list *src_entry, |
---|
1057 | | - void *data, |
---|
1058 | | - int (*match)(struct lock_list *entry, void *data), |
---|
1059 | | - struct lock_list **target_entry) |
---|
| 1541 | +/* |
---|
| 1542 | + * bfs_result < 0 means error |
---|
| 1543 | + */ |
---|
| 1544 | +static inline bool bfs_error(enum bfs_result res) |
---|
1060 | 1545 | { |
---|
1061 | | - return __bfs(src_entry, data, match, target_entry, 1); |
---|
1062 | | - |
---|
1063 | | -} |
---|
1064 | | - |
---|
1065 | | -static inline int __bfs_backwards(struct lock_list *src_entry, |
---|
1066 | | - void *data, |
---|
1067 | | - int (*match)(struct lock_list *entry, void *data), |
---|
1068 | | - struct lock_list **target_entry) |
---|
1069 | | -{ |
---|
1070 | | - return __bfs(src_entry, data, match, target_entry, 0); |
---|
1071 | | - |
---|
| 1546 | + return res < 0; |
---|
1072 | 1547 | } |
---|
1073 | 1548 | |
---|
1074 | 1549 | /* |
---|
1075 | | - * Recursive, forwards-direction lock-dependency checking, used for |
---|
1076 | | - * both noncyclic checking and for hardirq-unsafe/softirq-unsafe |
---|
1077 | | - * checking. |
---|
| 1550 | + * DEP_*_BIT in lock_list::dep |
---|
| 1551 | + * |
---|
| 1552 | + * For dependency @prev -> @next: |
---|
| 1553 | + * |
---|
| 1554 | + * SR: @prev is shared reader (->read != 0) and @next is recursive reader |
---|
| 1555 | + * (->read == 2) |
---|
| 1556 | + * ER: @prev is exclusive locker (->read == 0) and @next is recursive reader |
---|
| 1557 | + * SN: @prev is shared reader and @next is non-recursive locker (->read != 2) |
---|
| 1558 | + * EN: @prev is exclusive locker and @next is non-recursive locker |
---|
| 1559 | + * |
---|
| 1560 | + * Note that we define the value of DEP_*_BITs so that: |
---|
| 1561 | + * bit0 is prev->read == 0 |
---|
| 1562 | + * bit1 is next->read != 2 |
---|
1078 | 1563 | */ |
---|
| 1564 | +#define DEP_SR_BIT (0 + (0 << 1)) /* 0 */ |
---|
| 1565 | +#define DEP_ER_BIT (1 + (0 << 1)) /* 1 */ |
---|
| 1566 | +#define DEP_SN_BIT (0 + (1 << 1)) /* 2 */ |
---|
| 1567 | +#define DEP_EN_BIT (1 + (1 << 1)) /* 3 */ |
---|
| 1568 | + |
---|
| 1569 | +#define DEP_SR_MASK (1U << (DEP_SR_BIT)) |
---|
| 1570 | +#define DEP_ER_MASK (1U << (DEP_ER_BIT)) |
---|
| 1571 | +#define DEP_SN_MASK (1U << (DEP_SN_BIT)) |
---|
| 1572 | +#define DEP_EN_MASK (1U << (DEP_EN_BIT)) |
---|
| 1573 | + |
---|
| 1574 | +static inline unsigned int |
---|
| 1575 | +__calc_dep_bit(struct held_lock *prev, struct held_lock *next) |
---|
| 1576 | +{ |
---|
| 1577 | + return (prev->read == 0) + ((next->read != 2) << 1); |
---|
| 1578 | +} |
---|
| 1579 | + |
---|
| 1580 | +static inline u8 calc_dep(struct held_lock *prev, struct held_lock *next) |
---|
| 1581 | +{ |
---|
| 1582 | + return 1U << __calc_dep_bit(prev, next); |
---|
| 1583 | +} |
---|
| 1584 | + |
---|
| 1585 | +/* |
---|
| 1586 | + * calculate the dep_bit for backwards edges. We care about whether @prev is |
---|
| 1587 | + * shared and whether @next is recursive. |
---|
| 1588 | + */ |
---|
| 1589 | +static inline unsigned int |
---|
| 1590 | +__calc_dep_bitb(struct held_lock *prev, struct held_lock *next) |
---|
| 1591 | +{ |
---|
| 1592 | + return (next->read != 2) + ((prev->read == 0) << 1); |
---|
| 1593 | +} |
---|
| 1594 | + |
---|
| 1595 | +static inline u8 calc_depb(struct held_lock *prev, struct held_lock *next) |
---|
| 1596 | +{ |
---|
| 1597 | + return 1U << __calc_dep_bitb(prev, next); |
---|
| 1598 | +} |
---|
| 1599 | + |
---|
| 1600 | +/* |
---|
| 1601 | + * Initialize a lock_list entry @lock belonging to @class as the root for a BFS |
---|
| 1602 | + * search. |
---|
| 1603 | + */ |
---|
| 1604 | +static inline void __bfs_init_root(struct lock_list *lock, |
---|
| 1605 | + struct lock_class *class) |
---|
| 1606 | +{ |
---|
| 1607 | + lock->class = class; |
---|
| 1608 | + lock->parent = NULL; |
---|
| 1609 | + lock->only_xr = 0; |
---|
| 1610 | +} |
---|
| 1611 | + |
---|
| 1612 | +/* |
---|
| 1613 | + * Initialize a lock_list entry @lock based on a lock acquisition @hlock as the |
---|
| 1614 | + * root for a BFS search. |
---|
| 1615 | + * |
---|
| 1616 | + * ->only_xr of the initial lock node is set to @hlock->read == 2, to make sure |
---|
| 1617 | + * that <prev> -> @hlock and @hlock -> <whatever __bfs() found> is not -(*R)-> |
---|
| 1618 | + * and -(S*)->. |
---|
| 1619 | + */ |
---|
| 1620 | +static inline void bfs_init_root(struct lock_list *lock, |
---|
| 1621 | + struct held_lock *hlock) |
---|
| 1622 | +{ |
---|
| 1623 | + __bfs_init_root(lock, hlock_class(hlock)); |
---|
| 1624 | + lock->only_xr = (hlock->read == 2); |
---|
| 1625 | +} |
---|
| 1626 | + |
---|
| 1627 | +/* |
---|
| 1628 | + * Similar to bfs_init_root() but initialize the root for backwards BFS. |
---|
| 1629 | + * |
---|
| 1630 | + * ->only_xr of the initial lock node is set to @hlock->read != 0, to make sure |
---|
| 1631 | + * that <next> -> @hlock and @hlock -> <whatever backwards BFS found> is not |
---|
| 1632 | + * -(*S)-> and -(R*)-> (reverse order of -(*R)-> and -(S*)->). |
---|
| 1633 | + */ |
---|
| 1634 | +static inline void bfs_init_rootb(struct lock_list *lock, |
---|
| 1635 | + struct held_lock *hlock) |
---|
| 1636 | +{ |
---|
| 1637 | + __bfs_init_root(lock, hlock_class(hlock)); |
---|
| 1638 | + lock->only_xr = (hlock->read != 0); |
---|
| 1639 | +} |
---|
| 1640 | + |
---|
| 1641 | +static inline struct lock_list *__bfs_next(struct lock_list *lock, int offset) |
---|
| 1642 | +{ |
---|
| 1643 | + if (!lock || !lock->parent) |
---|
| 1644 | + return NULL; |
---|
| 1645 | + |
---|
| 1646 | + return list_next_or_null_rcu(get_dep_list(lock->parent, offset), |
---|
| 1647 | + &lock->entry, struct lock_list, entry); |
---|
| 1648 | +} |
---|
| 1649 | + |
---|
| 1650 | +/* |
---|
| 1651 | + * Breadth-First Search to find a strong path in the dependency graph. |
---|
| 1652 | + * |
---|
| 1653 | + * @source_entry: the source of the path we are searching for. |
---|
| 1654 | + * @data: data used for the second parameter of @match function |
---|
| 1655 | + * @match: match function for the search |
---|
| 1656 | + * @target_entry: pointer to the target of a matched path |
---|
| 1657 | + * @offset: the offset to struct lock_class to determine whether it is |
---|
| 1658 | + * locks_after or locks_before |
---|
| 1659 | + * |
---|
| 1660 | + * We may have multiple edges (considering different kinds of dependencies, |
---|
| 1661 | + * e.g. ER and SN) between two nodes in the dependency graph. But |
---|
| 1662 | + * only the strong dependency path in the graph is relevant to deadlocks. A |
---|
| 1663 | + * strong dependency path is a dependency path that doesn't have two adjacent |
---|
| 1664 | + * dependencies as -(*R)-> -(S*)->, please see: |
---|
| 1665 | + * |
---|
| 1666 | + * Documentation/locking/lockdep-design.rst |
---|
| 1667 | + * |
---|
| 1668 | + * for more explanation of the definition of strong dependency paths |
---|
| 1669 | + * |
---|
| 1670 | + * In __bfs(), we only traverse in the strong dependency path: |
---|
| 1671 | + * |
---|
| 1672 | + * In lock_list::only_xr, we record whether the previous dependency only |
---|
| 1673 | + * has -(*R)-> in the search, and if it does (prev only has -(*R)->), we |
---|
| 1674 | + * filter out any -(S*)-> in the current dependency and after that, the |
---|
| 1675 | + * ->only_xr is set according to whether we only have -(*R)-> left. |
---|
| 1676 | + */ |
---|
| 1677 | +static enum bfs_result __bfs(struct lock_list *source_entry, |
---|
| 1678 | + void *data, |
---|
| 1679 | + bool (*match)(struct lock_list *entry, void *data), |
---|
| 1680 | + struct lock_list **target_entry, |
---|
| 1681 | + int offset) |
---|
| 1682 | +{ |
---|
| 1683 | + struct circular_queue *cq = &lock_cq; |
---|
| 1684 | + struct lock_list *lock = NULL; |
---|
| 1685 | + struct lock_list *entry; |
---|
| 1686 | + struct list_head *head; |
---|
| 1687 | + unsigned int cq_depth; |
---|
| 1688 | + bool first; |
---|
| 1689 | + |
---|
| 1690 | + lockdep_assert_locked(); |
---|
| 1691 | + |
---|
| 1692 | + __cq_init(cq); |
---|
| 1693 | + __cq_enqueue(cq, source_entry); |
---|
| 1694 | + |
---|
| 1695 | + while ((lock = __bfs_next(lock, offset)) || (lock = __cq_dequeue(cq))) { |
---|
| 1696 | + if (!lock->class) |
---|
| 1697 | + return BFS_EINVALIDNODE; |
---|
| 1698 | + |
---|
| 1699 | + /* |
---|
| 1700 | + * Step 1: check whether we already finish on this one. |
---|
| 1701 | + * |
---|
| 1702 | + * If we have visited all the dependencies from this @lock to |
---|
| 1703 | + * others (iow, if we have visited all lock_list entries in |
---|
| 1704 | + * @lock->class->locks_{after,before}) we skip, otherwise go |
---|
| 1705 | + * and visit all the dependencies in the list and mark this |
---|
| 1706 | + * list accessed. |
---|
| 1707 | + */ |
---|
| 1708 | + if (lock_accessed(lock)) |
---|
| 1709 | + continue; |
---|
| 1710 | + else |
---|
| 1711 | + mark_lock_accessed(lock); |
---|
| 1712 | + |
---|
| 1713 | + /* |
---|
| 1714 | + * Step 2: check whether prev dependency and this form a strong |
---|
| 1715 | + * dependency path. |
---|
| 1716 | + */ |
---|
| 1717 | + if (lock->parent) { /* Parent exists, check prev dependency */ |
---|
| 1718 | + u8 dep = lock->dep; |
---|
| 1719 | + bool prev_only_xr = lock->parent->only_xr; |
---|
| 1720 | + |
---|
| 1721 | + /* |
---|
| 1722 | + * Mask out all -(S*)-> if we only have *R in previous |
---|
| 1723 | + * step, because -(*R)-> -(S*)-> don't make up a strong |
---|
| 1724 | + * dependency. |
---|
| 1725 | + */ |
---|
| 1726 | + if (prev_only_xr) |
---|
| 1727 | + dep &= ~(DEP_SR_MASK | DEP_SN_MASK); |
---|
| 1728 | + |
---|
| 1729 | + /* If nothing left, we skip */ |
---|
| 1730 | + if (!dep) |
---|
| 1731 | + continue; |
---|
| 1732 | + |
---|
| 1733 | + /* If there are only -(*R)-> left, set that for the next step */ |
---|
| 1734 | + lock->only_xr = !(dep & (DEP_SN_MASK | DEP_EN_MASK)); |
---|
| 1735 | + } |
---|
| 1736 | + |
---|
| 1737 | + /* |
---|
| 1738 | + * Step 3: we haven't visited this and there is a strong |
---|
| 1739 | + * dependency path to this, so check with @match. |
---|
| 1740 | + */ |
---|
| 1741 | + if (match(lock, data)) { |
---|
| 1742 | + *target_entry = lock; |
---|
| 1743 | + return BFS_RMATCH; |
---|
| 1744 | + } |
---|
| 1745 | + |
---|
| 1746 | + /* |
---|
| 1747 | + * Step 4: if not match, expand the path by adding the |
---|
| 1748 | + * forward or backwards dependencis in the search |
---|
| 1749 | + * |
---|
| 1750 | + */ |
---|
| 1751 | + first = true; |
---|
| 1752 | + head = get_dep_list(lock, offset); |
---|
| 1753 | + list_for_each_entry_rcu(entry, head, entry) { |
---|
| 1754 | + visit_lock_entry(entry, lock); |
---|
| 1755 | + |
---|
| 1756 | + /* |
---|
| 1757 | + * Note we only enqueue the first of the list into the |
---|
| 1758 | + * queue, because we can always find a sibling |
---|
| 1759 | + * dependency from one (see __bfs_next()), as a result |
---|
| 1760 | + * the space of queue is saved. |
---|
| 1761 | + */ |
---|
| 1762 | + if (!first) |
---|
| 1763 | + continue; |
---|
| 1764 | + |
---|
| 1765 | + first = false; |
---|
| 1766 | + |
---|
| 1767 | + if (__cq_enqueue(cq, entry)) |
---|
| 1768 | + return BFS_EQUEUEFULL; |
---|
| 1769 | + |
---|
| 1770 | + cq_depth = __cq_get_elem_count(cq); |
---|
| 1771 | + if (max_bfs_queue_depth < cq_depth) |
---|
| 1772 | + max_bfs_queue_depth = cq_depth; |
---|
| 1773 | + } |
---|
| 1774 | + } |
---|
| 1775 | + |
---|
| 1776 | + return BFS_RNOMATCH; |
---|
| 1777 | +} |
---|
| 1778 | + |
---|
| 1779 | +static inline enum bfs_result |
---|
| 1780 | +__bfs_forwards(struct lock_list *src_entry, |
---|
| 1781 | + void *data, |
---|
| 1782 | + bool (*match)(struct lock_list *entry, void *data), |
---|
| 1783 | + struct lock_list **target_entry) |
---|
| 1784 | +{ |
---|
| 1785 | + return __bfs(src_entry, data, match, target_entry, |
---|
| 1786 | + offsetof(struct lock_class, locks_after)); |
---|
| 1787 | + |
---|
| 1788 | +} |
---|
| 1789 | + |
---|
| 1790 | +static inline enum bfs_result |
---|
| 1791 | +__bfs_backwards(struct lock_list *src_entry, |
---|
| 1792 | + void *data, |
---|
| 1793 | + bool (*match)(struct lock_list *entry, void *data), |
---|
| 1794 | + struct lock_list **target_entry) |
---|
| 1795 | +{ |
---|
| 1796 | + return __bfs(src_entry, data, match, target_entry, |
---|
| 1797 | + offsetof(struct lock_class, locks_before)); |
---|
| 1798 | + |
---|
| 1799 | +} |
---|
| 1800 | + |
---|
| 1801 | +static void print_lock_trace(const struct lock_trace *trace, |
---|
| 1802 | + unsigned int spaces) |
---|
| 1803 | +{ |
---|
| 1804 | + stack_trace_print(trace->entries, trace->nr_entries, spaces); |
---|
| 1805 | +} |
---|
1079 | 1806 | |
---|
1080 | 1807 | /* |
---|
1081 | 1808 | * Print a dependency chain entry (this is only done when a deadlock |
---|
1082 | 1809 | * has been detected): |
---|
1083 | 1810 | */ |
---|
1084 | | -static noinline int |
---|
| 1811 | +static noinline void |
---|
1085 | 1812 | print_circular_bug_entry(struct lock_list *target, int depth) |
---|
1086 | 1813 | { |
---|
1087 | 1814 | if (debug_locks_silent) |
---|
1088 | | - return 0; |
---|
| 1815 | + return; |
---|
1089 | 1816 | printk("\n-> #%u", depth); |
---|
1090 | 1817 | print_lock_name(target->class); |
---|
1091 | 1818 | printk(KERN_CONT ":\n"); |
---|
1092 | | - print_stack_trace(&target->trace, 6); |
---|
1093 | | - |
---|
1094 | | - return 0; |
---|
| 1819 | + print_lock_trace(target->trace, 6); |
---|
1095 | 1820 | } |
---|
1096 | 1821 | |
---|
1097 | 1822 | static void |
---|
.. | .. |
---|
1148 | 1873 | * When a circular dependency is detected, print the |
---|
1149 | 1874 | * header first: |
---|
1150 | 1875 | */ |
---|
1151 | | -static noinline int |
---|
| 1876 | +static noinline void |
---|
1152 | 1877 | print_circular_bug_header(struct lock_list *entry, unsigned int depth, |
---|
1153 | 1878 | struct held_lock *check_src, |
---|
1154 | 1879 | struct held_lock *check_tgt) |
---|
.. | .. |
---|
1156 | 1881 | struct task_struct *curr = current; |
---|
1157 | 1882 | |
---|
1158 | 1883 | if (debug_locks_silent) |
---|
1159 | | - return 0; |
---|
| 1884 | + return; |
---|
1160 | 1885 | |
---|
1161 | 1886 | pr_warn("\n"); |
---|
1162 | 1887 | pr_warn("======================================================\n"); |
---|
.. | .. |
---|
1174 | 1899 | pr_warn("\nthe existing dependency chain (in reverse order) is:\n"); |
---|
1175 | 1900 | |
---|
1176 | 1901 | print_circular_bug_entry(entry, depth); |
---|
1177 | | - |
---|
1178 | | - return 0; |
---|
1179 | 1902 | } |
---|
1180 | 1903 | |
---|
1181 | | -static inline int class_equal(struct lock_list *entry, void *data) |
---|
| 1904 | +/* |
---|
| 1905 | + * We are about to add A -> B into the dependency graph, and in __bfs() a |
---|
| 1906 | + * strong dependency path A -> .. -> B is found: hlock_class equals |
---|
| 1907 | + * entry->class. |
---|
| 1908 | + * |
---|
| 1909 | + * If A -> .. -> B can replace A -> B in any __bfs() search (means the former |
---|
| 1910 | + * is _stronger_ than or equal to the latter), we consider A -> B as redundant. |
---|
| 1911 | + * For example if A -> .. -> B is -(EN)-> (i.e. A -(E*)-> .. -(*N)-> B), and A |
---|
| 1912 | + * -> B is -(ER)-> or -(EN)->, then we don't need to add A -> B into the |
---|
| 1913 | + * dependency graph, as any strong path ..-> A -> B ->.. we can get with |
---|
| 1914 | + * having dependency A -> B, we could already get a equivalent path ..-> A -> |
---|
| 1915 | + * .. -> B -> .. with A -> .. -> B. Therefore A -> B is reduntant. |
---|
| 1916 | + * |
---|
| 1917 | + * We need to make sure both the start and the end of A -> .. -> B is not |
---|
| 1918 | + * weaker than A -> B. For the start part, please see the comment in |
---|
| 1919 | + * check_redundant(). For the end part, we need: |
---|
| 1920 | + * |
---|
| 1921 | + * Either |
---|
| 1922 | + * |
---|
| 1923 | + * a) A -> B is -(*R)-> (everything is not weaker than that) |
---|
| 1924 | + * |
---|
| 1925 | + * or |
---|
| 1926 | + * |
---|
| 1927 | + * b) A -> .. -> B is -(*N)-> (nothing is stronger than this) |
---|
| 1928 | + * |
---|
| 1929 | + */ |
---|
| 1930 | +static inline bool hlock_equal(struct lock_list *entry, void *data) |
---|
1182 | 1931 | { |
---|
1183 | | - return entry->class == data; |
---|
| 1932 | + struct held_lock *hlock = (struct held_lock *)data; |
---|
| 1933 | + |
---|
| 1934 | + return hlock_class(hlock) == entry->class && /* Found A -> .. -> B */ |
---|
| 1935 | + (hlock->read == 2 || /* A -> B is -(*R)-> */ |
---|
| 1936 | + !entry->only_xr); /* A -> .. -> B is -(*N)-> */ |
---|
1184 | 1937 | } |
---|
1185 | 1938 | |
---|
1186 | | -static noinline int print_circular_bug(struct lock_list *this, |
---|
| 1939 | +/* |
---|
| 1940 | + * We are about to add B -> A into the dependency graph, and in __bfs() a |
---|
| 1941 | + * strong dependency path A -> .. -> B is found: hlock_class equals |
---|
| 1942 | + * entry->class. |
---|
| 1943 | + * |
---|
| 1944 | + * We will have a deadlock case (conflict) if A -> .. -> B -> A is a strong |
---|
| 1945 | + * dependency cycle, that means: |
---|
| 1946 | + * |
---|
| 1947 | + * Either |
---|
| 1948 | + * |
---|
| 1949 | + * a) B -> A is -(E*)-> |
---|
| 1950 | + * |
---|
| 1951 | + * or |
---|
| 1952 | + * |
---|
| 1953 | + * b) A -> .. -> B is -(*N)-> (i.e. A -> .. -(*N)-> B) |
---|
| 1954 | + * |
---|
| 1955 | + * as then we don't have -(*R)-> -(S*)-> in the cycle. |
---|
| 1956 | + */ |
---|
| 1957 | +static inline bool hlock_conflict(struct lock_list *entry, void *data) |
---|
| 1958 | +{ |
---|
| 1959 | + struct held_lock *hlock = (struct held_lock *)data; |
---|
| 1960 | + |
---|
| 1961 | + return hlock_class(hlock) == entry->class && /* Found A -> .. -> B */ |
---|
| 1962 | + (hlock->read == 0 || /* B -> A is -(E*)-> */ |
---|
| 1963 | + !entry->only_xr); /* A -> .. -> B is -(*N)-> */ |
---|
| 1964 | +} |
---|
| 1965 | + |
---|
| 1966 | +static noinline void print_circular_bug(struct lock_list *this, |
---|
1187 | 1967 | struct lock_list *target, |
---|
1188 | 1968 | struct held_lock *check_src, |
---|
1189 | | - struct held_lock *check_tgt, |
---|
1190 | | - struct stack_trace *trace) |
---|
| 1969 | + struct held_lock *check_tgt) |
---|
1191 | 1970 | { |
---|
1192 | 1971 | struct task_struct *curr = current; |
---|
1193 | 1972 | struct lock_list *parent; |
---|
.. | .. |
---|
1195 | 1974 | int depth; |
---|
1196 | 1975 | |
---|
1197 | 1976 | if (!debug_locks_off_graph_unlock() || debug_locks_silent) |
---|
1198 | | - return 0; |
---|
| 1977 | + return; |
---|
1199 | 1978 | |
---|
1200 | | - if (!save_trace(&this->trace)) |
---|
1201 | | - return 0; |
---|
| 1979 | + this->trace = save_trace(); |
---|
| 1980 | + if (!this->trace) |
---|
| 1981 | + return; |
---|
1202 | 1982 | |
---|
1203 | 1983 | depth = get_lock_depth(target); |
---|
1204 | 1984 | |
---|
.. | .. |
---|
1220 | 2000 | |
---|
1221 | 2001 | printk("\nstack backtrace:\n"); |
---|
1222 | 2002 | dump_stack(); |
---|
1223 | | - |
---|
1224 | | - return 0; |
---|
1225 | 2003 | } |
---|
1226 | 2004 | |
---|
1227 | | -static noinline int print_bfs_bug(int ret) |
---|
| 2005 | +static noinline void print_bfs_bug(int ret) |
---|
1228 | 2006 | { |
---|
1229 | 2007 | if (!debug_locks_off_graph_unlock()) |
---|
1230 | | - return 0; |
---|
| 2008 | + return; |
---|
1231 | 2009 | |
---|
1232 | 2010 | /* |
---|
1233 | 2011 | * Breadth-first-search failed, graph got corrupted? |
---|
1234 | 2012 | */ |
---|
1235 | 2013 | WARN(1, "lockdep bfs error:%d\n", ret); |
---|
1236 | | - |
---|
1237 | | - return 0; |
---|
1238 | 2014 | } |
---|
1239 | 2015 | |
---|
1240 | | -static int noop_count(struct lock_list *entry, void *data) |
---|
| 2016 | +static bool noop_count(struct lock_list *entry, void *data) |
---|
1241 | 2017 | { |
---|
1242 | 2018 | (*(unsigned long *)data)++; |
---|
1243 | | - return 0; |
---|
| 2019 | + return false; |
---|
1244 | 2020 | } |
---|
1245 | 2021 | |
---|
1246 | 2022 | static unsigned long __lockdep_count_forward_deps(struct lock_list *this) |
---|
1247 | 2023 | { |
---|
1248 | 2024 | unsigned long count = 0; |
---|
1249 | | - struct lock_list *uninitialized_var(target_entry); |
---|
| 2025 | + struct lock_list *target_entry; |
---|
1250 | 2026 | |
---|
1251 | 2027 | __bfs_forwards(this, (void *)&count, noop_count, &target_entry); |
---|
1252 | 2028 | |
---|
.. | .. |
---|
1257 | 2033 | unsigned long ret, flags; |
---|
1258 | 2034 | struct lock_list this; |
---|
1259 | 2035 | |
---|
1260 | | - this.parent = NULL; |
---|
1261 | | - this.class = class; |
---|
| 2036 | + __bfs_init_root(&this, class); |
---|
1262 | 2037 | |
---|
1263 | 2038 | raw_local_irq_save(flags); |
---|
1264 | | - current->lockdep_recursion = 1; |
---|
1265 | | - arch_spin_lock(&lockdep_lock); |
---|
| 2039 | + lockdep_lock(); |
---|
1266 | 2040 | ret = __lockdep_count_forward_deps(&this); |
---|
1267 | | - arch_spin_unlock(&lockdep_lock); |
---|
1268 | | - current->lockdep_recursion = 0; |
---|
| 2041 | + lockdep_unlock(); |
---|
1269 | 2042 | raw_local_irq_restore(flags); |
---|
1270 | 2043 | |
---|
1271 | 2044 | return ret; |
---|
.. | .. |
---|
1274 | 2047 | static unsigned long __lockdep_count_backward_deps(struct lock_list *this) |
---|
1275 | 2048 | { |
---|
1276 | 2049 | unsigned long count = 0; |
---|
1277 | | - struct lock_list *uninitialized_var(target_entry); |
---|
| 2050 | + struct lock_list *target_entry; |
---|
1278 | 2051 | |
---|
1279 | 2052 | __bfs_backwards(this, (void *)&count, noop_count, &target_entry); |
---|
1280 | 2053 | |
---|
.. | .. |
---|
1286 | 2059 | unsigned long ret, flags; |
---|
1287 | 2060 | struct lock_list this; |
---|
1288 | 2061 | |
---|
1289 | | - this.parent = NULL; |
---|
1290 | | - this.class = class; |
---|
| 2062 | + __bfs_init_root(&this, class); |
---|
1291 | 2063 | |
---|
1292 | 2064 | raw_local_irq_save(flags); |
---|
1293 | | - current->lockdep_recursion = 1; |
---|
1294 | | - arch_spin_lock(&lockdep_lock); |
---|
| 2065 | + lockdep_lock(); |
---|
1295 | 2066 | ret = __lockdep_count_backward_deps(&this); |
---|
1296 | | - arch_spin_unlock(&lockdep_lock); |
---|
1297 | | - current->lockdep_recursion = 0; |
---|
| 2067 | + lockdep_unlock(); |
---|
1298 | 2068 | raw_local_irq_restore(flags); |
---|
1299 | 2069 | |
---|
1300 | 2070 | return ret; |
---|
1301 | 2071 | } |
---|
1302 | 2072 | |
---|
1303 | 2073 | /* |
---|
1304 | | - * Prove that the dependency graph starting at <entry> can not |
---|
1305 | | - * lead to <target>. Print an error and return 0 if it does. |
---|
| 2074 | + * Check that the dependency graph starting at <src> can lead to |
---|
| 2075 | + * <target> or not. |
---|
1306 | 2076 | */ |
---|
1307 | | -static noinline int |
---|
1308 | | -check_noncircular(struct lock_list *root, struct lock_class *target, |
---|
1309 | | - struct lock_list **target_entry) |
---|
| 2077 | +static noinline enum bfs_result |
---|
| 2078 | +check_path(struct held_lock *target, struct lock_list *src_entry, |
---|
| 2079 | + bool (*match)(struct lock_list *entry, void *data), |
---|
| 2080 | + struct lock_list **target_entry) |
---|
1310 | 2081 | { |
---|
1311 | | - int result; |
---|
| 2082 | + enum bfs_result ret; |
---|
| 2083 | + |
---|
| 2084 | + ret = __bfs_forwards(src_entry, target, match, target_entry); |
---|
| 2085 | + |
---|
| 2086 | + if (unlikely(bfs_error(ret))) |
---|
| 2087 | + print_bfs_bug(ret); |
---|
| 2088 | + |
---|
| 2089 | + return ret; |
---|
| 2090 | +} |
---|
| 2091 | + |
---|
| 2092 | +/* |
---|
| 2093 | + * Prove that the dependency graph starting at <src> can not |
---|
| 2094 | + * lead to <target>. If it can, there is a circle when adding |
---|
| 2095 | + * <target> -> <src> dependency. |
---|
| 2096 | + * |
---|
| 2097 | + * Print an error and return BFS_RMATCH if it does. |
---|
| 2098 | + */ |
---|
| 2099 | +static noinline enum bfs_result |
---|
| 2100 | +check_noncircular(struct held_lock *src, struct held_lock *target, |
---|
| 2101 | + struct lock_trace **const trace) |
---|
| 2102 | +{ |
---|
| 2103 | + enum bfs_result ret; |
---|
| 2104 | + struct lock_list *target_entry; |
---|
| 2105 | + struct lock_list src_entry; |
---|
| 2106 | + |
---|
| 2107 | + bfs_init_root(&src_entry, src); |
---|
1312 | 2108 | |
---|
1313 | 2109 | debug_atomic_inc(nr_cyclic_checks); |
---|
1314 | 2110 | |
---|
1315 | | - result = __bfs_forwards(root, target, class_equal, target_entry); |
---|
| 2111 | + ret = check_path(target, &src_entry, hlock_conflict, &target_entry); |
---|
1316 | 2112 | |
---|
1317 | | - return result; |
---|
| 2113 | + if (unlikely(ret == BFS_RMATCH)) { |
---|
| 2114 | + if (!*trace) { |
---|
| 2115 | + /* |
---|
| 2116 | + * If save_trace fails here, the printing might |
---|
| 2117 | + * trigger a WARN but because of the !nr_entries it |
---|
| 2118 | + * should not do bad things. |
---|
| 2119 | + */ |
---|
| 2120 | + *trace = save_trace(); |
---|
| 2121 | + } |
---|
| 2122 | + |
---|
| 2123 | + print_circular_bug(&src_entry, target_entry, src, target); |
---|
| 2124 | + } |
---|
| 2125 | + |
---|
| 2126 | + return ret; |
---|
1318 | 2127 | } |
---|
1319 | 2128 | |
---|
1320 | | -static noinline int |
---|
1321 | | -check_redundant(struct lock_list *root, struct lock_class *target, |
---|
1322 | | - struct lock_list **target_entry) |
---|
| 2129 | +#ifdef CONFIG_LOCKDEP_SMALL |
---|
| 2130 | +/* |
---|
| 2131 | + * Check that the dependency graph starting at <src> can lead to |
---|
| 2132 | + * <target> or not. If it can, <src> -> <target> dependency is already |
---|
| 2133 | + * in the graph. |
---|
| 2134 | + * |
---|
| 2135 | + * Return BFS_RMATCH if it does, or BFS_RMATCH if it does not, return BFS_E* if |
---|
| 2136 | + * any error appears in the bfs search. |
---|
| 2137 | + */ |
---|
| 2138 | +static noinline enum bfs_result |
---|
| 2139 | +check_redundant(struct held_lock *src, struct held_lock *target) |
---|
1323 | 2140 | { |
---|
1324 | | - int result; |
---|
| 2141 | + enum bfs_result ret; |
---|
| 2142 | + struct lock_list *target_entry; |
---|
| 2143 | + struct lock_list src_entry; |
---|
| 2144 | + |
---|
| 2145 | + bfs_init_root(&src_entry, src); |
---|
| 2146 | + /* |
---|
| 2147 | + * Special setup for check_redundant(). |
---|
| 2148 | + * |
---|
| 2149 | + * To report redundant, we need to find a strong dependency path that |
---|
| 2150 | + * is equal to or stronger than <src> -> <target>. So if <src> is E, |
---|
| 2151 | + * we need to let __bfs() only search for a path starting at a -(E*)->, |
---|
| 2152 | + * we achieve this by setting the initial node's ->only_xr to true in |
---|
| 2153 | + * that case. And if <prev> is S, we set initial ->only_xr to false |
---|
| 2154 | + * because both -(S*)-> (equal) and -(E*)-> (stronger) are redundant. |
---|
| 2155 | + */ |
---|
| 2156 | + src_entry.only_xr = src->read == 0; |
---|
1325 | 2157 | |
---|
1326 | 2158 | debug_atomic_inc(nr_redundant_checks); |
---|
1327 | 2159 | |
---|
1328 | | - result = __bfs_forwards(root, target, class_equal, target_entry); |
---|
| 2160 | + ret = check_path(target, &src_entry, hlock_equal, &target_entry); |
---|
1329 | 2161 | |
---|
1330 | | - return result; |
---|
| 2162 | + if (ret == BFS_RMATCH) |
---|
| 2163 | + debug_atomic_inc(nr_redundant); |
---|
| 2164 | + |
---|
| 2165 | + return ret; |
---|
1331 | 2166 | } |
---|
| 2167 | +#endif |
---|
1332 | 2168 | |
---|
1333 | | -#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) |
---|
| 2169 | +#ifdef CONFIG_TRACE_IRQFLAGS |
---|
| 2170 | + |
---|
1334 | 2171 | /* |
---|
1335 | 2172 | * Forwards and backwards subgraph searching, for the purposes of |
---|
1336 | 2173 | * proving that two subgraphs can be connected by a new dependency |
---|
1337 | 2174 | * without creating any illegal irq-safe -> irq-unsafe lock dependency. |
---|
| 2175 | + * |
---|
| 2176 | + * A irq safe->unsafe deadlock happens with the following conditions: |
---|
| 2177 | + * |
---|
| 2178 | + * 1) We have a strong dependency path A -> ... -> B |
---|
| 2179 | + * |
---|
| 2180 | + * 2) and we have ENABLED_IRQ usage of B and USED_IN_IRQ usage of A, therefore |
---|
| 2181 | + * irq can create a new dependency B -> A (consider the case that a holder |
---|
| 2182 | + * of B gets interrupted by an irq whose handler will try to acquire A). |
---|
| 2183 | + * |
---|
| 2184 | + * 3) the dependency circle A -> ... -> B -> A we get from 1) and 2) is a |
---|
| 2185 | + * strong circle: |
---|
| 2186 | + * |
---|
| 2187 | + * For the usage bits of B: |
---|
| 2188 | + * a) if A -> B is -(*N)->, then B -> A could be any type, so any |
---|
| 2189 | + * ENABLED_IRQ usage suffices. |
---|
| 2190 | + * b) if A -> B is -(*R)->, then B -> A must be -(E*)->, so only |
---|
| 2191 | + * ENABLED_IRQ_*_READ usage suffices. |
---|
| 2192 | + * |
---|
| 2193 | + * For the usage bits of A: |
---|
| 2194 | + * c) if A -> B is -(E*)->, then B -> A could be any type, so any |
---|
| 2195 | + * USED_IN_IRQ usage suffices. |
---|
| 2196 | + * d) if A -> B is -(S*)->, then B -> A must be -(*N)->, so only |
---|
| 2197 | + * USED_IN_IRQ_*_READ usage suffices. |
---|
1338 | 2198 | */ |
---|
1339 | 2199 | |
---|
1340 | | -static inline int usage_match(struct lock_list *entry, void *bit) |
---|
| 2200 | +/* |
---|
| 2201 | + * There is a strong dependency path in the dependency graph: A -> B, and now |
---|
| 2202 | + * we need to decide which usage bit of A should be accumulated to detect |
---|
| 2203 | + * safe->unsafe bugs. |
---|
| 2204 | + * |
---|
| 2205 | + * Note that usage_accumulate() is used in backwards search, so ->only_xr |
---|
| 2206 | + * stands for whether A -> B only has -(S*)-> (in this case ->only_xr is true). |
---|
| 2207 | + * |
---|
| 2208 | + * As above, if only_xr is false, which means A -> B has -(E*)-> dependency |
---|
| 2209 | + * path, any usage of A should be considered. Otherwise, we should only |
---|
| 2210 | + * consider _READ usage. |
---|
| 2211 | + */ |
---|
| 2212 | +static inline bool usage_accumulate(struct lock_list *entry, void *mask) |
---|
1341 | 2213 | { |
---|
1342 | | - return entry->class->usage_mask & (1 << (enum lock_usage_bit)bit); |
---|
| 2214 | + if (!entry->only_xr) |
---|
| 2215 | + *(unsigned long *)mask |= entry->class->usage_mask; |
---|
| 2216 | + else /* Mask out _READ usage bits */ |
---|
| 2217 | + *(unsigned long *)mask |= (entry->class->usage_mask & LOCKF_IRQ); |
---|
| 2218 | + |
---|
| 2219 | + return false; |
---|
1343 | 2220 | } |
---|
1344 | 2221 | |
---|
1345 | | - |
---|
| 2222 | +/* |
---|
| 2223 | + * There is a strong dependency path in the dependency graph: A -> B, and now |
---|
| 2224 | + * we need to decide which usage bit of B conflicts with the usage bits of A, |
---|
| 2225 | + * i.e. which usage bit of B may introduce safe->unsafe deadlocks. |
---|
| 2226 | + * |
---|
| 2227 | + * As above, if only_xr is false, which means A -> B has -(*N)-> dependency |
---|
| 2228 | + * path, any usage of B should be considered. Otherwise, we should only |
---|
| 2229 | + * consider _READ usage. |
---|
| 2230 | + */ |
---|
| 2231 | +static inline bool usage_match(struct lock_list *entry, void *mask) |
---|
| 2232 | +{ |
---|
| 2233 | + if (!entry->only_xr) |
---|
| 2234 | + return !!(entry->class->usage_mask & *(unsigned long *)mask); |
---|
| 2235 | + else /* Mask out _READ usage bits */ |
---|
| 2236 | + return !!((entry->class->usage_mask & LOCKF_IRQ) & *(unsigned long *)mask); |
---|
| 2237 | +} |
---|
1346 | 2238 | |
---|
1347 | 2239 | /* |
---|
1348 | 2240 | * Find a node in the forwards-direction dependency sub-graph starting |
---|
1349 | 2241 | * at @root->class that matches @bit. |
---|
1350 | 2242 | * |
---|
1351 | | - * Return 0 if such a node exists in the subgraph, and put that node |
---|
| 2243 | + * Return BFS_MATCH if such a node exists in the subgraph, and put that node |
---|
1352 | 2244 | * into *@target_entry. |
---|
1353 | | - * |
---|
1354 | | - * Return 1 otherwise and keep *@target_entry unchanged. |
---|
1355 | | - * Return <0 on error. |
---|
1356 | 2245 | */ |
---|
1357 | | -static int |
---|
1358 | | -find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit, |
---|
| 2246 | +static enum bfs_result |
---|
| 2247 | +find_usage_forwards(struct lock_list *root, unsigned long usage_mask, |
---|
1359 | 2248 | struct lock_list **target_entry) |
---|
1360 | 2249 | { |
---|
1361 | | - int result; |
---|
| 2250 | + enum bfs_result result; |
---|
1362 | 2251 | |
---|
1363 | 2252 | debug_atomic_inc(nr_find_usage_forwards_checks); |
---|
1364 | 2253 | |
---|
1365 | | - result = __bfs_forwards(root, (void *)bit, usage_match, target_entry); |
---|
| 2254 | + result = __bfs_forwards(root, &usage_mask, usage_match, target_entry); |
---|
1366 | 2255 | |
---|
1367 | 2256 | return result; |
---|
1368 | 2257 | } |
---|
.. | .. |
---|
1370 | 2259 | /* |
---|
1371 | 2260 | * Find a node in the backwards-direction dependency sub-graph starting |
---|
1372 | 2261 | * at @root->class that matches @bit. |
---|
1373 | | - * |
---|
1374 | | - * Return 0 if such a node exists in the subgraph, and put that node |
---|
1375 | | - * into *@target_entry. |
---|
1376 | | - * |
---|
1377 | | - * Return 1 otherwise and keep *@target_entry unchanged. |
---|
1378 | | - * Return <0 on error. |
---|
1379 | 2262 | */ |
---|
1380 | | -static int |
---|
1381 | | -find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit, |
---|
| 2263 | +static enum bfs_result |
---|
| 2264 | +find_usage_backwards(struct lock_list *root, unsigned long usage_mask, |
---|
1382 | 2265 | struct lock_list **target_entry) |
---|
1383 | 2266 | { |
---|
1384 | | - int result; |
---|
| 2267 | + enum bfs_result result; |
---|
1385 | 2268 | |
---|
1386 | 2269 | debug_atomic_inc(nr_find_usage_backwards_checks); |
---|
1387 | 2270 | |
---|
1388 | | - result = __bfs_backwards(root, (void *)bit, usage_match, target_entry); |
---|
| 2271 | + result = __bfs_backwards(root, &usage_mask, usage_match, target_entry); |
---|
1389 | 2272 | |
---|
1390 | 2273 | return result; |
---|
1391 | 2274 | } |
---|
.. | .. |
---|
1396 | 2279 | |
---|
1397 | 2280 | printk("%*s->", depth, ""); |
---|
1398 | 2281 | print_lock_name(class); |
---|
1399 | | - printk(KERN_CONT " ops: %lu", class->ops); |
---|
| 2282 | +#ifdef CONFIG_DEBUG_LOCKDEP |
---|
| 2283 | + printk(KERN_CONT " ops: %lu", debug_class_ops_read(class)); |
---|
| 2284 | +#endif |
---|
1400 | 2285 | printk(KERN_CONT " {\n"); |
---|
1401 | 2286 | |
---|
1402 | | - for (bit = 0; bit < LOCK_USAGE_STATES; bit++) { |
---|
| 2287 | + for (bit = 0; bit < LOCK_TRACE_STATES; bit++) { |
---|
1403 | 2288 | if (class->usage_mask & (1 << bit)) { |
---|
1404 | 2289 | int len = depth; |
---|
1405 | 2290 | |
---|
1406 | 2291 | len += printk("%*s %s", depth, "", usage_str[bit]); |
---|
1407 | 2292 | len += printk(KERN_CONT " at:\n"); |
---|
1408 | | - print_stack_trace(class->usage_traces + bit, len); |
---|
| 2293 | + print_lock_trace(class->usage_traces[bit], len); |
---|
1409 | 2294 | } |
---|
1410 | 2295 | } |
---|
1411 | 2296 | printk("%*s }\n", depth, ""); |
---|
.. | .. |
---|
1415 | 2300 | } |
---|
1416 | 2301 | |
---|
1417 | 2302 | /* |
---|
1418 | | - * printk the shortest lock dependencies from @start to @end in reverse order: |
---|
| 2303 | + * Dependency path printing: |
---|
| 2304 | + * |
---|
| 2305 | + * After BFS we get a lock dependency path (linked via ->parent of lock_list), |
---|
| 2306 | + * printing out each lock in the dependency path will help on understanding how |
---|
| 2307 | + * the deadlock could happen. Here are some details about dependency path |
---|
| 2308 | + * printing: |
---|
| 2309 | + * |
---|
| 2310 | + * 1) A lock_list can be either forwards or backwards for a lock dependency, |
---|
| 2311 | + * for a lock dependency A -> B, there are two lock_lists: |
---|
| 2312 | + * |
---|
| 2313 | + * a) lock_list in the ->locks_after list of A, whose ->class is B and |
---|
| 2314 | + * ->links_to is A. In this case, we can say the lock_list is |
---|
| 2315 | + * "A -> B" (forwards case). |
---|
| 2316 | + * |
---|
| 2317 | + * b) lock_list in the ->locks_before list of B, whose ->class is A |
---|
| 2318 | + * and ->links_to is B. In this case, we can say the lock_list is |
---|
| 2319 | + * "B <- A" (bacwards case). |
---|
| 2320 | + * |
---|
| 2321 | + * The ->trace of both a) and b) point to the call trace where B was |
---|
| 2322 | + * acquired with A held. |
---|
| 2323 | + * |
---|
| 2324 | + * 2) A "helper" lock_list is introduced during BFS, this lock_list doesn't |
---|
| 2325 | + * represent a certain lock dependency, it only provides an initial entry |
---|
| 2326 | + * for BFS. For example, BFS may introduce a "helper" lock_list whose |
---|
| 2327 | + * ->class is A, as a result BFS will search all dependencies starting with |
---|
| 2328 | + * A, e.g. A -> B or A -> C. |
---|
| 2329 | + * |
---|
| 2330 | + * The notation of a forwards helper lock_list is like "-> A", which means |
---|
| 2331 | + * we should search the forwards dependencies starting with "A", e.g A -> B |
---|
| 2332 | + * or A -> C. |
---|
| 2333 | + * |
---|
| 2334 | + * The notation of a bacwards helper lock_list is like "<- B", which means |
---|
| 2335 | + * we should search the backwards dependencies ending with "B", e.g. |
---|
| 2336 | + * B <- A or B <- C. |
---|
| 2337 | + */ |
---|
| 2338 | + |
---|
| 2339 | +/* |
---|
| 2340 | + * printk the shortest lock dependencies from @root to @leaf in reverse order. |
---|
| 2341 | + * |
---|
| 2342 | + * We have a lock dependency path as follow: |
---|
| 2343 | + * |
---|
| 2344 | + * @root @leaf |
---|
| 2345 | + * | | |
---|
| 2346 | + * V V |
---|
| 2347 | + * ->parent ->parent |
---|
| 2348 | + * | lock_list | <--------- | lock_list | ... | lock_list | <--------- | lock_list | |
---|
| 2349 | + * | -> L1 | | L1 -> L2 | ... |Ln-2 -> Ln-1| | Ln-1 -> Ln| |
---|
| 2350 | + * |
---|
| 2351 | + * , so it's natural that we start from @leaf and print every ->class and |
---|
| 2352 | + * ->trace until we reach the @root. |
---|
1419 | 2353 | */ |
---|
1420 | 2354 | static void __used |
---|
1421 | 2355 | print_shortest_lock_dependencies(struct lock_list *leaf, |
---|
1422 | | - struct lock_list *root) |
---|
| 2356 | + struct lock_list *root) |
---|
1423 | 2357 | { |
---|
1424 | 2358 | struct lock_list *entry = leaf; |
---|
1425 | 2359 | int depth; |
---|
.. | .. |
---|
1430 | 2364 | do { |
---|
1431 | 2365 | print_lock_class_header(entry->class, depth); |
---|
1432 | 2366 | printk("%*s ... acquired at:\n", depth, ""); |
---|
1433 | | - print_stack_trace(&entry->trace, 2); |
---|
| 2367 | + print_lock_trace(entry->trace, 2); |
---|
1434 | 2368 | printk("\n"); |
---|
1435 | 2369 | |
---|
1436 | 2370 | if (depth == 0 && (entry != root)) { |
---|
.. | .. |
---|
1441 | 2375 | entry = get_lock_parent(entry); |
---|
1442 | 2376 | depth--; |
---|
1443 | 2377 | } while (entry && (depth >= 0)); |
---|
| 2378 | +} |
---|
1444 | 2379 | |
---|
1445 | | - return; |
---|
| 2380 | +/* |
---|
| 2381 | + * printk the shortest lock dependencies from @leaf to @root. |
---|
| 2382 | + * |
---|
| 2383 | + * We have a lock dependency path (from a backwards search) as follow: |
---|
| 2384 | + * |
---|
| 2385 | + * @leaf @root |
---|
| 2386 | + * | | |
---|
| 2387 | + * V V |
---|
| 2388 | + * ->parent ->parent |
---|
| 2389 | + * | lock_list | ---------> | lock_list | ... | lock_list | ---------> | lock_list | |
---|
| 2390 | + * | L2 <- L1 | | L3 <- L2 | ... | Ln <- Ln-1 | | <- Ln | |
---|
| 2391 | + * |
---|
| 2392 | + * , so when we iterate from @leaf to @root, we actually print the lock |
---|
| 2393 | + * dependency path L1 -> L2 -> .. -> Ln in the non-reverse order. |
---|
| 2394 | + * |
---|
| 2395 | + * Another thing to notice here is that ->class of L2 <- L1 is L1, while the |
---|
| 2396 | + * ->trace of L2 <- L1 is the call trace of L2, in fact we don't have the call |
---|
| 2397 | + * trace of L1 in the dependency path, which is alright, because most of the |
---|
| 2398 | + * time we can figure out where L1 is held from the call trace of L2. |
---|
| 2399 | + */ |
---|
| 2400 | +static void __used |
---|
| 2401 | +print_shortest_lock_dependencies_backwards(struct lock_list *leaf, |
---|
| 2402 | + struct lock_list *root) |
---|
| 2403 | +{ |
---|
| 2404 | + struct lock_list *entry = leaf; |
---|
| 2405 | + const struct lock_trace *trace = NULL; |
---|
| 2406 | + int depth; |
---|
| 2407 | + |
---|
| 2408 | + /*compute depth from generated tree by BFS*/ |
---|
| 2409 | + depth = get_lock_depth(leaf); |
---|
| 2410 | + |
---|
| 2411 | + do { |
---|
| 2412 | + print_lock_class_header(entry->class, depth); |
---|
| 2413 | + if (trace) { |
---|
| 2414 | + printk("%*s ... acquired at:\n", depth, ""); |
---|
| 2415 | + print_lock_trace(trace, 2); |
---|
| 2416 | + printk("\n"); |
---|
| 2417 | + } |
---|
| 2418 | + |
---|
| 2419 | + /* |
---|
| 2420 | + * Record the pointer to the trace for the next lock_list |
---|
| 2421 | + * entry, see the comments for the function. |
---|
| 2422 | + */ |
---|
| 2423 | + trace = entry->trace; |
---|
| 2424 | + |
---|
| 2425 | + if (depth == 0 && (entry != root)) { |
---|
| 2426 | + printk("lockdep:%s bad path found in chain graph\n", __func__); |
---|
| 2427 | + break; |
---|
| 2428 | + } |
---|
| 2429 | + |
---|
| 2430 | + entry = get_lock_parent(entry); |
---|
| 2431 | + depth--; |
---|
| 2432 | + } while (entry && (depth >= 0)); |
---|
1446 | 2433 | } |
---|
1447 | 2434 | |
---|
1448 | 2435 | static void |
---|
.. | .. |
---|
1501 | 2488 | printk("\n *** DEADLOCK ***\n\n"); |
---|
1502 | 2489 | } |
---|
1503 | 2490 | |
---|
1504 | | -static int |
---|
| 2491 | +static void |
---|
1505 | 2492 | print_bad_irq_dependency(struct task_struct *curr, |
---|
1506 | 2493 | struct lock_list *prev_root, |
---|
1507 | 2494 | struct lock_list *next_root, |
---|
.. | .. |
---|
1514 | 2501 | const char *irqclass) |
---|
1515 | 2502 | { |
---|
1516 | 2503 | if (!debug_locks_off_graph_unlock() || debug_locks_silent) |
---|
1517 | | - return 0; |
---|
| 2504 | + return; |
---|
1518 | 2505 | |
---|
1519 | 2506 | pr_warn("\n"); |
---|
1520 | 2507 | pr_warn("=====================================================\n"); |
---|
.. | .. |
---|
1524 | 2511 | pr_warn("-----------------------------------------------------\n"); |
---|
1525 | 2512 | pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n", |
---|
1526 | 2513 | curr->comm, task_pid_nr(curr), |
---|
1527 | | - curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT, |
---|
| 2514 | + lockdep_hardirq_context(), hardirq_count() >> HARDIRQ_SHIFT, |
---|
1528 | 2515 | curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT, |
---|
1529 | | - curr->hardirqs_enabled, |
---|
| 2516 | + lockdep_hardirqs_enabled(), |
---|
1530 | 2517 | curr->softirqs_enabled); |
---|
1531 | 2518 | print_lock(next); |
---|
1532 | 2519 | |
---|
.. | .. |
---|
1543 | 2530 | print_lock_name(backwards_entry->class); |
---|
1544 | 2531 | pr_warn("\n... which became %s-irq-safe at:\n", irqclass); |
---|
1545 | 2532 | |
---|
1546 | | - print_stack_trace(backwards_entry->class->usage_traces + bit1, 1); |
---|
| 2533 | + print_lock_trace(backwards_entry->class->usage_traces[bit1], 1); |
---|
1547 | 2534 | |
---|
1548 | 2535 | pr_warn("\nto a %s-irq-unsafe lock:\n", irqclass); |
---|
1549 | 2536 | print_lock_name(forwards_entry->class); |
---|
1550 | 2537 | pr_warn("\n... which became %s-irq-unsafe at:\n", irqclass); |
---|
1551 | 2538 | pr_warn("..."); |
---|
1552 | 2539 | |
---|
1553 | | - print_stack_trace(forwards_entry->class->usage_traces + bit2, 1); |
---|
| 2540 | + print_lock_trace(forwards_entry->class->usage_traces[bit2], 1); |
---|
1554 | 2541 | |
---|
1555 | 2542 | pr_warn("\nother info that might help us debug this:\n\n"); |
---|
1556 | 2543 | print_irq_lock_scenario(backwards_entry, forwards_entry, |
---|
.. | .. |
---|
1559 | 2546 | lockdep_print_held_locks(curr); |
---|
1560 | 2547 | |
---|
1561 | 2548 | pr_warn("\nthe dependencies between %s-irq-safe lock and the holding lock:\n", irqclass); |
---|
1562 | | - if (!save_trace(&prev_root->trace)) |
---|
1563 | | - return 0; |
---|
1564 | | - print_shortest_lock_dependencies(backwards_entry, prev_root); |
---|
| 2549 | + prev_root->trace = save_trace(); |
---|
| 2550 | + if (!prev_root->trace) |
---|
| 2551 | + return; |
---|
| 2552 | + print_shortest_lock_dependencies_backwards(backwards_entry, prev_root); |
---|
1565 | 2553 | |
---|
1566 | 2554 | pr_warn("\nthe dependencies between the lock to be acquired"); |
---|
1567 | 2555 | pr_warn(" and %s-irq-unsafe lock:\n", irqclass); |
---|
1568 | | - if (!save_trace(&next_root->trace)) |
---|
1569 | | - return 0; |
---|
| 2556 | + next_root->trace = save_trace(); |
---|
| 2557 | + if (!next_root->trace) |
---|
| 2558 | + return; |
---|
1570 | 2559 | print_shortest_lock_dependencies(forwards_entry, next_root); |
---|
1571 | 2560 | |
---|
1572 | 2561 | pr_warn("\nstack backtrace:\n"); |
---|
1573 | 2562 | dump_stack(); |
---|
1574 | | - |
---|
1575 | | - return 0; |
---|
1576 | | -} |
---|
1577 | | - |
---|
1578 | | -static int |
---|
1579 | | -check_usage(struct task_struct *curr, struct held_lock *prev, |
---|
1580 | | - struct held_lock *next, enum lock_usage_bit bit_backwards, |
---|
1581 | | - enum lock_usage_bit bit_forwards, const char *irqclass) |
---|
1582 | | -{ |
---|
1583 | | - int ret; |
---|
1584 | | - struct lock_list this, that; |
---|
1585 | | - struct lock_list *uninitialized_var(target_entry); |
---|
1586 | | - struct lock_list *uninitialized_var(target_entry1); |
---|
1587 | | - |
---|
1588 | | - this.parent = NULL; |
---|
1589 | | - |
---|
1590 | | - this.class = hlock_class(prev); |
---|
1591 | | - ret = find_usage_backwards(&this, bit_backwards, &target_entry); |
---|
1592 | | - if (ret < 0) |
---|
1593 | | - return print_bfs_bug(ret); |
---|
1594 | | - if (ret == 1) |
---|
1595 | | - return ret; |
---|
1596 | | - |
---|
1597 | | - that.parent = NULL; |
---|
1598 | | - that.class = hlock_class(next); |
---|
1599 | | - ret = find_usage_forwards(&that, bit_forwards, &target_entry1); |
---|
1600 | | - if (ret < 0) |
---|
1601 | | - return print_bfs_bug(ret); |
---|
1602 | | - if (ret == 1) |
---|
1603 | | - return ret; |
---|
1604 | | - |
---|
1605 | | - return print_bad_irq_dependency(curr, &this, &that, |
---|
1606 | | - target_entry, target_entry1, |
---|
1607 | | - prev, next, |
---|
1608 | | - bit_backwards, bit_forwards, irqclass); |
---|
1609 | 2563 | } |
---|
1610 | 2564 | |
---|
1611 | 2565 | static const char *state_names[] = { |
---|
.. | .. |
---|
1624 | 2578 | |
---|
1625 | 2579 | static inline const char *state_name(enum lock_usage_bit bit) |
---|
1626 | 2580 | { |
---|
1627 | | - return (bit & 1) ? state_rnames[bit >> 2] : state_names[bit >> 2]; |
---|
| 2581 | + if (bit & LOCK_USAGE_READ_MASK) |
---|
| 2582 | + return state_rnames[bit >> LOCK_USAGE_DIR_MASK]; |
---|
| 2583 | + else |
---|
| 2584 | + return state_names[bit >> LOCK_USAGE_DIR_MASK]; |
---|
1628 | 2585 | } |
---|
1629 | 2586 | |
---|
| 2587 | +/* |
---|
| 2588 | + * The bit number is encoded like: |
---|
| 2589 | + * |
---|
| 2590 | + * bit0: 0 exclusive, 1 read lock |
---|
| 2591 | + * bit1: 0 used in irq, 1 irq enabled |
---|
| 2592 | + * bit2-n: state |
---|
| 2593 | + */ |
---|
1630 | 2594 | static int exclusive_bit(int new_bit) |
---|
1631 | 2595 | { |
---|
1632 | | - /* |
---|
1633 | | - * USED_IN |
---|
1634 | | - * USED_IN_READ |
---|
1635 | | - * ENABLED |
---|
1636 | | - * ENABLED_READ |
---|
1637 | | - * |
---|
1638 | | - * bit 0 - write/read |
---|
1639 | | - * bit 1 - used_in/enabled |
---|
1640 | | - * bit 2+ state |
---|
1641 | | - */ |
---|
1642 | | - |
---|
1643 | | - int state = new_bit & ~3; |
---|
1644 | | - int dir = new_bit & 2; |
---|
| 2596 | + int state = new_bit & LOCK_USAGE_STATE_MASK; |
---|
| 2597 | + int dir = new_bit & LOCK_USAGE_DIR_MASK; |
---|
1645 | 2598 | |
---|
1646 | 2599 | /* |
---|
1647 | 2600 | * keep state, bit flip the direction and strip read. |
---|
1648 | 2601 | */ |
---|
1649 | | - return state | (dir ^ 2); |
---|
| 2602 | + return state | (dir ^ LOCK_USAGE_DIR_MASK); |
---|
1650 | 2603 | } |
---|
1651 | 2604 | |
---|
1652 | | -static int check_irq_usage(struct task_struct *curr, struct held_lock *prev, |
---|
1653 | | - struct held_lock *next, enum lock_usage_bit bit) |
---|
| 2605 | +/* |
---|
| 2606 | + * Observe that when given a bitmask where each bitnr is encoded as above, a |
---|
| 2607 | + * right shift of the mask transforms the individual bitnrs as -1 and |
---|
| 2608 | + * conversely, a left shift transforms into +1 for the individual bitnrs. |
---|
| 2609 | + * |
---|
| 2610 | + * So for all bits whose number have LOCK_ENABLED_* set (bitnr1 == 1), we can |
---|
| 2611 | + * create the mask with those bit numbers using LOCK_USED_IN_* (bitnr1 == 0) |
---|
| 2612 | + * instead by subtracting the bit number by 2, or shifting the mask right by 2. |
---|
| 2613 | + * |
---|
| 2614 | + * Similarly, bitnr1 == 0 becomes bitnr1 == 1 by adding 2, or shifting left 2. |
---|
| 2615 | + * |
---|
| 2616 | + * So split the mask (note that LOCKF_ENABLED_IRQ_ALL|LOCKF_USED_IN_IRQ_ALL is |
---|
| 2617 | + * all bits set) and recompose with bitnr1 flipped. |
---|
| 2618 | + */ |
---|
| 2619 | +static unsigned long invert_dir_mask(unsigned long mask) |
---|
1654 | 2620 | { |
---|
1655 | | - /* |
---|
1656 | | - * Prove that the new dependency does not connect a hardirq-safe |
---|
1657 | | - * lock with a hardirq-unsafe lock - to achieve this we search |
---|
1658 | | - * the backwards-subgraph starting at <prev>, and the |
---|
1659 | | - * forwards-subgraph starting at <next>: |
---|
1660 | | - */ |
---|
1661 | | - if (!check_usage(curr, prev, next, bit, |
---|
1662 | | - exclusive_bit(bit), state_name(bit))) |
---|
1663 | | - return 0; |
---|
| 2621 | + unsigned long excl = 0; |
---|
1664 | 2622 | |
---|
1665 | | - bit++; /* _READ */ |
---|
| 2623 | + /* Invert dir */ |
---|
| 2624 | + excl |= (mask & LOCKF_ENABLED_IRQ_ALL) >> LOCK_USAGE_DIR_MASK; |
---|
| 2625 | + excl |= (mask & LOCKF_USED_IN_IRQ_ALL) << LOCK_USAGE_DIR_MASK; |
---|
1666 | 2626 | |
---|
1667 | | - /* |
---|
1668 | | - * Prove that the new dependency does not connect a hardirq-safe-read |
---|
1669 | | - * lock with a hardirq-unsafe lock - to achieve this we search |
---|
1670 | | - * the backwards-subgraph starting at <prev>, and the |
---|
1671 | | - * forwards-subgraph starting at <next>: |
---|
1672 | | - */ |
---|
1673 | | - if (!check_usage(curr, prev, next, bit, |
---|
1674 | | - exclusive_bit(bit), state_name(bit))) |
---|
1675 | | - return 0; |
---|
1676 | | - |
---|
1677 | | - return 1; |
---|
| 2627 | + return excl; |
---|
1678 | 2628 | } |
---|
1679 | 2629 | |
---|
1680 | | -static int |
---|
1681 | | -check_prev_add_irq(struct task_struct *curr, struct held_lock *prev, |
---|
1682 | | - struct held_lock *next) |
---|
| 2630 | +/* |
---|
| 2631 | + * Note that a LOCK_ENABLED_IRQ_*_READ usage and a LOCK_USED_IN_IRQ_*_READ |
---|
| 2632 | + * usage may cause deadlock too, for example: |
---|
| 2633 | + * |
---|
| 2634 | + * P1 P2 |
---|
| 2635 | + * <irq disabled> |
---|
| 2636 | + * write_lock(l1); <irq enabled> |
---|
| 2637 | + * read_lock(l2); |
---|
| 2638 | + * write_lock(l2); |
---|
| 2639 | + * <in irq> |
---|
| 2640 | + * read_lock(l1); |
---|
| 2641 | + * |
---|
| 2642 | + * , in above case, l1 will be marked as LOCK_USED_IN_IRQ_HARDIRQ_READ and l2 |
---|
| 2643 | + * will marked as LOCK_ENABLE_IRQ_HARDIRQ_READ, and this is a possible |
---|
| 2644 | + * deadlock. |
---|
| 2645 | + * |
---|
| 2646 | + * In fact, all of the following cases may cause deadlocks: |
---|
| 2647 | + * |
---|
| 2648 | + * LOCK_USED_IN_IRQ_* -> LOCK_ENABLED_IRQ_* |
---|
| 2649 | + * LOCK_USED_IN_IRQ_*_READ -> LOCK_ENABLED_IRQ_* |
---|
| 2650 | + * LOCK_USED_IN_IRQ_* -> LOCK_ENABLED_IRQ_*_READ |
---|
| 2651 | + * LOCK_USED_IN_IRQ_*_READ -> LOCK_ENABLED_IRQ_*_READ |
---|
| 2652 | + * |
---|
| 2653 | + * As a result, to calculate the "exclusive mask", first we invert the |
---|
| 2654 | + * direction (USED_IN/ENABLED) of the original mask, and 1) for all bits with |
---|
| 2655 | + * bitnr0 set (LOCK_*_READ), add those with bitnr0 cleared (LOCK_*). 2) for all |
---|
| 2656 | + * bits with bitnr0 cleared (LOCK_*_READ), add those with bitnr0 set (LOCK_*). |
---|
| 2657 | + */ |
---|
| 2658 | +static unsigned long exclusive_mask(unsigned long mask) |
---|
1683 | 2659 | { |
---|
1684 | | -#define LOCKDEP_STATE(__STATE) \ |
---|
1685 | | - if (!check_irq_usage(curr, prev, next, LOCK_USED_IN_##__STATE)) \ |
---|
1686 | | - return 0; |
---|
1687 | | -#include "lockdep_states.h" |
---|
1688 | | -#undef LOCKDEP_STATE |
---|
| 2660 | + unsigned long excl = invert_dir_mask(mask); |
---|
1689 | 2661 | |
---|
1690 | | - return 1; |
---|
| 2662 | + excl |= (excl & LOCKF_IRQ_READ) >> LOCK_USAGE_READ_MASK; |
---|
| 2663 | + excl |= (excl & LOCKF_IRQ) << LOCK_USAGE_READ_MASK; |
---|
| 2664 | + |
---|
| 2665 | + return excl; |
---|
1691 | 2666 | } |
---|
1692 | 2667 | |
---|
1693 | | -static void inc_chains(void) |
---|
| 2668 | +/* |
---|
| 2669 | + * Retrieve the _possible_ original mask to which @mask is |
---|
| 2670 | + * exclusive. Ie: this is the opposite of exclusive_mask(). |
---|
| 2671 | + * Note that 2 possible original bits can match an exclusive |
---|
| 2672 | + * bit: one has LOCK_USAGE_READ_MASK set, the other has it |
---|
| 2673 | + * cleared. So both are returned for each exclusive bit. |
---|
| 2674 | + */ |
---|
| 2675 | +static unsigned long original_mask(unsigned long mask) |
---|
1694 | 2676 | { |
---|
1695 | | - if (current->hardirq_context) |
---|
1696 | | - nr_hardirq_chains++; |
---|
1697 | | - else { |
---|
1698 | | - if (current->softirq_context) |
---|
1699 | | - nr_softirq_chains++; |
---|
1700 | | - else |
---|
1701 | | - nr_process_chains++; |
---|
| 2677 | + unsigned long excl = invert_dir_mask(mask); |
---|
| 2678 | + |
---|
| 2679 | + /* Include read in existing usages */ |
---|
| 2680 | + excl |= (excl & LOCKF_IRQ_READ) >> LOCK_USAGE_READ_MASK; |
---|
| 2681 | + excl |= (excl & LOCKF_IRQ) << LOCK_USAGE_READ_MASK; |
---|
| 2682 | + |
---|
| 2683 | + return excl; |
---|
| 2684 | +} |
---|
| 2685 | + |
---|
| 2686 | +/* |
---|
| 2687 | + * Find the first pair of bit match between an original |
---|
| 2688 | + * usage mask and an exclusive usage mask. |
---|
| 2689 | + */ |
---|
| 2690 | +static int find_exclusive_match(unsigned long mask, |
---|
| 2691 | + unsigned long excl_mask, |
---|
| 2692 | + enum lock_usage_bit *bitp, |
---|
| 2693 | + enum lock_usage_bit *excl_bitp) |
---|
| 2694 | +{ |
---|
| 2695 | + int bit, excl, excl_read; |
---|
| 2696 | + |
---|
| 2697 | + for_each_set_bit(bit, &mask, LOCK_USED) { |
---|
| 2698 | + /* |
---|
| 2699 | + * exclusive_bit() strips the read bit, however, |
---|
| 2700 | + * LOCK_ENABLED_IRQ_*_READ may cause deadlocks too, so we need |
---|
| 2701 | + * to search excl | LOCK_USAGE_READ_MASK as well. |
---|
| 2702 | + */ |
---|
| 2703 | + excl = exclusive_bit(bit); |
---|
| 2704 | + excl_read = excl | LOCK_USAGE_READ_MASK; |
---|
| 2705 | + if (excl_mask & lock_flag(excl)) { |
---|
| 2706 | + *bitp = bit; |
---|
| 2707 | + *excl_bitp = excl; |
---|
| 2708 | + return 0; |
---|
| 2709 | + } else if (excl_mask & lock_flag(excl_read)) { |
---|
| 2710 | + *bitp = bit; |
---|
| 2711 | + *excl_bitp = excl_read; |
---|
| 2712 | + return 0; |
---|
| 2713 | + } |
---|
1702 | 2714 | } |
---|
| 2715 | + return -1; |
---|
| 2716 | +} |
---|
| 2717 | + |
---|
| 2718 | +/* |
---|
| 2719 | + * Prove that the new dependency does not connect a hardirq-safe(-read) |
---|
| 2720 | + * lock with a hardirq-unsafe lock - to achieve this we search |
---|
| 2721 | + * the backwards-subgraph starting at <prev>, and the |
---|
| 2722 | + * forwards-subgraph starting at <next>: |
---|
| 2723 | + */ |
---|
| 2724 | +static int check_irq_usage(struct task_struct *curr, struct held_lock *prev, |
---|
| 2725 | + struct held_lock *next) |
---|
| 2726 | +{ |
---|
| 2727 | + unsigned long usage_mask = 0, forward_mask, backward_mask; |
---|
| 2728 | + enum lock_usage_bit forward_bit = 0, backward_bit = 0; |
---|
| 2729 | + struct lock_list *target_entry1; |
---|
| 2730 | + struct lock_list *target_entry; |
---|
| 2731 | + struct lock_list this, that; |
---|
| 2732 | + enum bfs_result ret; |
---|
| 2733 | + |
---|
| 2734 | + /* |
---|
| 2735 | + * Step 1: gather all hard/soft IRQs usages backward in an |
---|
| 2736 | + * accumulated usage mask. |
---|
| 2737 | + */ |
---|
| 2738 | + bfs_init_rootb(&this, prev); |
---|
| 2739 | + |
---|
| 2740 | + ret = __bfs_backwards(&this, &usage_mask, usage_accumulate, NULL); |
---|
| 2741 | + if (bfs_error(ret)) { |
---|
| 2742 | + print_bfs_bug(ret); |
---|
| 2743 | + return 0; |
---|
| 2744 | + } |
---|
| 2745 | + |
---|
| 2746 | + usage_mask &= LOCKF_USED_IN_IRQ_ALL; |
---|
| 2747 | + if (!usage_mask) |
---|
| 2748 | + return 1; |
---|
| 2749 | + |
---|
| 2750 | + /* |
---|
| 2751 | + * Step 2: find exclusive uses forward that match the previous |
---|
| 2752 | + * backward accumulated mask. |
---|
| 2753 | + */ |
---|
| 2754 | + forward_mask = exclusive_mask(usage_mask); |
---|
| 2755 | + |
---|
| 2756 | + bfs_init_root(&that, next); |
---|
| 2757 | + |
---|
| 2758 | + ret = find_usage_forwards(&that, forward_mask, &target_entry1); |
---|
| 2759 | + if (bfs_error(ret)) { |
---|
| 2760 | + print_bfs_bug(ret); |
---|
| 2761 | + return 0; |
---|
| 2762 | + } |
---|
| 2763 | + if (ret == BFS_RNOMATCH) |
---|
| 2764 | + return 1; |
---|
| 2765 | + |
---|
| 2766 | + /* |
---|
| 2767 | + * Step 3: we found a bad match! Now retrieve a lock from the backward |
---|
| 2768 | + * list whose usage mask matches the exclusive usage mask from the |
---|
| 2769 | + * lock found on the forward list. |
---|
| 2770 | + * |
---|
| 2771 | + * Note, we should only keep the LOCKF_ENABLED_IRQ_ALL bits, considering |
---|
| 2772 | + * the follow case: |
---|
| 2773 | + * |
---|
| 2774 | + * When trying to add A -> B to the graph, we find that there is a |
---|
| 2775 | + * hardirq-safe L, that L -> ... -> A, and another hardirq-unsafe M, |
---|
| 2776 | + * that B -> ... -> M. However M is **softirq-safe**, if we use exact |
---|
| 2777 | + * invert bits of M's usage_mask, we will find another lock N that is |
---|
| 2778 | + * **softirq-unsafe** and N -> ... -> A, however N -> .. -> M will not |
---|
| 2779 | + * cause a inversion deadlock. |
---|
| 2780 | + */ |
---|
| 2781 | + backward_mask = original_mask(target_entry1->class->usage_mask & LOCKF_ENABLED_IRQ_ALL); |
---|
| 2782 | + |
---|
| 2783 | + ret = find_usage_backwards(&this, backward_mask, &target_entry); |
---|
| 2784 | + if (bfs_error(ret)) { |
---|
| 2785 | + print_bfs_bug(ret); |
---|
| 2786 | + return 0; |
---|
| 2787 | + } |
---|
| 2788 | + if (DEBUG_LOCKS_WARN_ON(ret == BFS_RNOMATCH)) |
---|
| 2789 | + return 1; |
---|
| 2790 | + |
---|
| 2791 | + /* |
---|
| 2792 | + * Step 4: narrow down to a pair of incompatible usage bits |
---|
| 2793 | + * and report it. |
---|
| 2794 | + */ |
---|
| 2795 | + ret = find_exclusive_match(target_entry->class->usage_mask, |
---|
| 2796 | + target_entry1->class->usage_mask, |
---|
| 2797 | + &backward_bit, &forward_bit); |
---|
| 2798 | + if (DEBUG_LOCKS_WARN_ON(ret == -1)) |
---|
| 2799 | + return 1; |
---|
| 2800 | + |
---|
| 2801 | + print_bad_irq_dependency(curr, &this, &that, |
---|
| 2802 | + target_entry, target_entry1, |
---|
| 2803 | + prev, next, |
---|
| 2804 | + backward_bit, forward_bit, |
---|
| 2805 | + state_name(backward_bit)); |
---|
| 2806 | + |
---|
| 2807 | + return 0; |
---|
1703 | 2808 | } |
---|
1704 | 2809 | |
---|
1705 | 2810 | #else |
---|
1706 | 2811 | |
---|
1707 | | -static inline int |
---|
1708 | | -check_prev_add_irq(struct task_struct *curr, struct held_lock *prev, |
---|
1709 | | - struct held_lock *next) |
---|
| 2812 | +static inline int check_irq_usage(struct task_struct *curr, |
---|
| 2813 | + struct held_lock *prev, struct held_lock *next) |
---|
1710 | 2814 | { |
---|
1711 | 2815 | return 1; |
---|
1712 | 2816 | } |
---|
| 2817 | +#endif /* CONFIG_TRACE_IRQFLAGS */ |
---|
1713 | 2818 | |
---|
1714 | | -static inline void inc_chains(void) |
---|
| 2819 | +static void inc_chains(int irq_context) |
---|
1715 | 2820 | { |
---|
1716 | | - nr_process_chains++; |
---|
| 2821 | + if (irq_context & LOCK_CHAIN_HARDIRQ_CONTEXT) |
---|
| 2822 | + nr_hardirq_chains++; |
---|
| 2823 | + else if (irq_context & LOCK_CHAIN_SOFTIRQ_CONTEXT) |
---|
| 2824 | + nr_softirq_chains++; |
---|
| 2825 | + else |
---|
| 2826 | + nr_process_chains++; |
---|
1717 | 2827 | } |
---|
1718 | 2828 | |
---|
1719 | | -#endif |
---|
| 2829 | +static void dec_chains(int irq_context) |
---|
| 2830 | +{ |
---|
| 2831 | + if (irq_context & LOCK_CHAIN_HARDIRQ_CONTEXT) |
---|
| 2832 | + nr_hardirq_chains--; |
---|
| 2833 | + else if (irq_context & LOCK_CHAIN_SOFTIRQ_CONTEXT) |
---|
| 2834 | + nr_softirq_chains--; |
---|
| 2835 | + else |
---|
| 2836 | + nr_process_chains--; |
---|
| 2837 | +} |
---|
1720 | 2838 | |
---|
1721 | 2839 | static void |
---|
1722 | | -print_deadlock_scenario(struct held_lock *nxt, |
---|
1723 | | - struct held_lock *prv) |
---|
| 2840 | +print_deadlock_scenario(struct held_lock *nxt, struct held_lock *prv) |
---|
1724 | 2841 | { |
---|
1725 | 2842 | struct lock_class *next = hlock_class(nxt); |
---|
1726 | 2843 | struct lock_class *prev = hlock_class(prv); |
---|
.. | .. |
---|
1738 | 2855 | printk(" May be due to missing lock nesting notation\n\n"); |
---|
1739 | 2856 | } |
---|
1740 | 2857 | |
---|
1741 | | -static int |
---|
| 2858 | +static void |
---|
1742 | 2859 | print_deadlock_bug(struct task_struct *curr, struct held_lock *prev, |
---|
1743 | 2860 | struct held_lock *next) |
---|
1744 | 2861 | { |
---|
1745 | 2862 | if (!debug_locks_off_graph_unlock() || debug_locks_silent) |
---|
1746 | | - return 0; |
---|
| 2863 | + return; |
---|
1747 | 2864 | |
---|
1748 | 2865 | pr_warn("\n"); |
---|
1749 | 2866 | pr_warn("============================================\n"); |
---|
.. | .. |
---|
1762 | 2879 | |
---|
1763 | 2880 | pr_warn("\nstack backtrace:\n"); |
---|
1764 | 2881 | dump_stack(); |
---|
1765 | | - |
---|
1766 | | - return 0; |
---|
1767 | 2882 | } |
---|
1768 | 2883 | |
---|
1769 | 2884 | /* |
---|
.. | .. |
---|
1772 | 2887 | * (Note that this has to be done separately, because the graph cannot |
---|
1773 | 2888 | * detect such classes of deadlocks.) |
---|
1774 | 2889 | * |
---|
1775 | | - * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read |
---|
| 2890 | + * Returns: 0 on deadlock detected, 1 on OK, 2 if another lock with the same |
---|
| 2891 | + * lock class is held but nest_lock is also held, i.e. we rely on the |
---|
| 2892 | + * nest_lock to avoid the deadlock. |
---|
1776 | 2893 | */ |
---|
1777 | 2894 | static int |
---|
1778 | | -check_deadlock(struct task_struct *curr, struct held_lock *next, |
---|
1779 | | - struct lockdep_map *next_instance, int read) |
---|
| 2895 | +check_deadlock(struct task_struct *curr, struct held_lock *next) |
---|
1780 | 2896 | { |
---|
1781 | 2897 | struct held_lock *prev; |
---|
1782 | 2898 | struct held_lock *nest = NULL; |
---|
.. | .. |
---|
1795 | 2911 | * Allow read-after-read recursion of the same |
---|
1796 | 2912 | * lock class (i.e. read_lock(lock)+read_lock(lock)): |
---|
1797 | 2913 | */ |
---|
1798 | | - if ((read == 2) && prev->read) |
---|
1799 | | - return 2; |
---|
| 2914 | + if ((next->read == 2) && prev->read) |
---|
| 2915 | + continue; |
---|
1800 | 2916 | |
---|
1801 | 2917 | /* |
---|
1802 | 2918 | * We're holding the nest_lock, which serializes this lock's |
---|
.. | .. |
---|
1805 | 2921 | if (nest) |
---|
1806 | 2922 | return 2; |
---|
1807 | 2923 | |
---|
1808 | | - return print_deadlock_bug(curr, prev, next); |
---|
| 2924 | + print_deadlock_bug(curr, prev, next); |
---|
| 2925 | + return 0; |
---|
1809 | 2926 | } |
---|
1810 | 2927 | return 1; |
---|
1811 | 2928 | } |
---|
1812 | 2929 | |
---|
1813 | 2930 | /* |
---|
1814 | 2931 | * There was a chain-cache miss, and we are about to add a new dependency |
---|
1815 | | - * to a previous lock. We recursively validate the following rules: |
---|
| 2932 | + * to a previous lock. We validate the following rules: |
---|
1816 | 2933 | * |
---|
1817 | 2934 | * - would the adding of the <prev> -> <next> dependency create a |
---|
1818 | 2935 | * circular dependency in the graph? [== circular deadlock] |
---|
.. | .. |
---|
1834 | 2951 | */ |
---|
1835 | 2952 | static int |
---|
1836 | 2953 | check_prev_add(struct task_struct *curr, struct held_lock *prev, |
---|
1837 | | - struct held_lock *next, int distance, struct stack_trace *trace, |
---|
1838 | | - int (*save)(struct stack_trace *trace)) |
---|
| 2954 | + struct held_lock *next, u16 distance, |
---|
| 2955 | + struct lock_trace **const trace) |
---|
1839 | 2956 | { |
---|
1840 | | - struct lock_list *uninitialized_var(target_entry); |
---|
1841 | 2957 | struct lock_list *entry; |
---|
1842 | | - struct lock_list this; |
---|
1843 | | - int ret; |
---|
| 2958 | + enum bfs_result ret; |
---|
| 2959 | + |
---|
| 2960 | + if (!hlock_class(prev)->key || !hlock_class(next)->key) { |
---|
| 2961 | + /* |
---|
| 2962 | + * The warning statements below may trigger a use-after-free |
---|
| 2963 | + * of the class name. It is better to trigger a use-after free |
---|
| 2964 | + * and to have the class name most of the time instead of not |
---|
| 2965 | + * having the class name available. |
---|
| 2966 | + */ |
---|
| 2967 | + WARN_ONCE(!debug_locks_silent && !hlock_class(prev)->key, |
---|
| 2968 | + "Detected use-after-free of lock class %px/%s\n", |
---|
| 2969 | + hlock_class(prev), |
---|
| 2970 | + hlock_class(prev)->name); |
---|
| 2971 | + WARN_ONCE(!debug_locks_silent && !hlock_class(next)->key, |
---|
| 2972 | + "Detected use-after-free of lock class %px/%s\n", |
---|
| 2973 | + hlock_class(next), |
---|
| 2974 | + hlock_class(next)->name); |
---|
| 2975 | + return 2; |
---|
| 2976 | + } |
---|
1844 | 2977 | |
---|
1845 | 2978 | /* |
---|
1846 | 2979 | * Prove that the new <prev> -> <next> dependency would not |
---|
1847 | 2980 | * create a circular dependency in the graph. (We do this by |
---|
1848 | | - * forward-recursing into the graph starting at <next>, and |
---|
1849 | | - * checking whether we can reach <prev>.) |
---|
| 2981 | + * a breadth-first search into the graph starting at <next>, |
---|
| 2982 | + * and check whether we can reach <prev>.) |
---|
1850 | 2983 | * |
---|
1851 | | - * We are using global variables to control the recursion, to |
---|
1852 | | - * keep the stackframe size of the recursive functions low: |
---|
| 2984 | + * The search is limited by the size of the circular queue (i.e., |
---|
| 2985 | + * MAX_CIRCULAR_QUEUE_SIZE) which keeps track of a breadth of nodes |
---|
| 2986 | + * in the graph whose neighbours are to be checked. |
---|
1853 | 2987 | */ |
---|
1854 | | - this.class = hlock_class(next); |
---|
1855 | | - this.parent = NULL; |
---|
1856 | | - ret = check_noncircular(&this, hlock_class(prev), &target_entry); |
---|
1857 | | - if (unlikely(!ret)) { |
---|
1858 | | - if (!trace->entries) { |
---|
1859 | | - /* |
---|
1860 | | - * If @save fails here, the printing might trigger |
---|
1861 | | - * a WARN but because of the !nr_entries it should |
---|
1862 | | - * not do bad things. |
---|
1863 | | - */ |
---|
1864 | | - save(trace); |
---|
1865 | | - } |
---|
1866 | | - return print_circular_bug(&this, target_entry, next, prev, trace); |
---|
1867 | | - } |
---|
1868 | | - else if (unlikely(ret < 0)) |
---|
1869 | | - return print_bfs_bug(ret); |
---|
1870 | | - |
---|
1871 | | - if (!check_prev_add_irq(curr, prev, next)) |
---|
| 2988 | + ret = check_noncircular(next, prev, trace); |
---|
| 2989 | + if (unlikely(bfs_error(ret) || ret == BFS_RMATCH)) |
---|
1872 | 2990 | return 0; |
---|
1873 | 2991 | |
---|
1874 | | - /* |
---|
1875 | | - * For recursive read-locks we do all the dependency checks, |
---|
1876 | | - * but we dont store read-triggered dependencies (only |
---|
1877 | | - * write-triggered dependencies). This ensures that only the |
---|
1878 | | - * write-side dependencies matter, and that if for example a |
---|
1879 | | - * write-lock never takes any other locks, then the reads are |
---|
1880 | | - * equivalent to a NOP. |
---|
1881 | | - */ |
---|
1882 | | - if (next->read == 2 || prev->read == 2) |
---|
1883 | | - return 1; |
---|
| 2992 | + if (!check_irq_usage(curr, prev, next)) |
---|
| 2993 | + return 0; |
---|
| 2994 | + |
---|
1884 | 2995 | /* |
---|
1885 | 2996 | * Is the <prev> -> <next> dependency already present? |
---|
1886 | 2997 | * |
---|
.. | .. |
---|
1893 | 3004 | if (entry->class == hlock_class(next)) { |
---|
1894 | 3005 | if (distance == 1) |
---|
1895 | 3006 | entry->distance = 1; |
---|
1896 | | - return 1; |
---|
| 3007 | + entry->dep |= calc_dep(prev, next); |
---|
| 3008 | + |
---|
| 3009 | + /* |
---|
| 3010 | + * Also, update the reverse dependency in @next's |
---|
| 3011 | + * ->locks_before list. |
---|
| 3012 | + * |
---|
| 3013 | + * Here we reuse @entry as the cursor, which is fine |
---|
| 3014 | + * because we won't go to the next iteration of the |
---|
| 3015 | + * outer loop: |
---|
| 3016 | + * |
---|
| 3017 | + * For normal cases, we return in the inner loop. |
---|
| 3018 | + * |
---|
| 3019 | + * If we fail to return, we have inconsistency, i.e. |
---|
| 3020 | + * <prev>::locks_after contains <next> while |
---|
| 3021 | + * <next>::locks_before doesn't contain <prev>. In |
---|
| 3022 | + * that case, we return after the inner and indicate |
---|
| 3023 | + * something is wrong. |
---|
| 3024 | + */ |
---|
| 3025 | + list_for_each_entry(entry, &hlock_class(next)->locks_before, entry) { |
---|
| 3026 | + if (entry->class == hlock_class(prev)) { |
---|
| 3027 | + if (distance == 1) |
---|
| 3028 | + entry->distance = 1; |
---|
| 3029 | + entry->dep |= calc_depb(prev, next); |
---|
| 3030 | + return 1; |
---|
| 3031 | + } |
---|
| 3032 | + } |
---|
| 3033 | + |
---|
| 3034 | + /* <prev> is not found in <next>::locks_before */ |
---|
| 3035 | + return 0; |
---|
1897 | 3036 | } |
---|
1898 | 3037 | } |
---|
1899 | 3038 | |
---|
| 3039 | +#ifdef CONFIG_LOCKDEP_SMALL |
---|
1900 | 3040 | /* |
---|
1901 | 3041 | * Is the <prev> -> <next> link redundant? |
---|
1902 | 3042 | */ |
---|
1903 | | - this.class = hlock_class(prev); |
---|
1904 | | - this.parent = NULL; |
---|
1905 | | - ret = check_redundant(&this, hlock_class(next), &target_entry); |
---|
1906 | | - if (!ret) { |
---|
1907 | | - debug_atomic_inc(nr_redundant); |
---|
1908 | | - return 2; |
---|
1909 | | - } |
---|
1910 | | - if (ret < 0) |
---|
1911 | | - return print_bfs_bug(ret); |
---|
1912 | | - |
---|
1913 | | - |
---|
1914 | | - if (!trace->entries && !save(trace)) |
---|
| 3043 | + ret = check_redundant(prev, next); |
---|
| 3044 | + if (bfs_error(ret)) |
---|
1915 | 3045 | return 0; |
---|
| 3046 | + else if (ret == BFS_RMATCH) |
---|
| 3047 | + return 2; |
---|
| 3048 | +#endif |
---|
| 3049 | + |
---|
| 3050 | + if (!*trace) { |
---|
| 3051 | + *trace = save_trace(); |
---|
| 3052 | + if (!*trace) |
---|
| 3053 | + return 0; |
---|
| 3054 | + } |
---|
1916 | 3055 | |
---|
1917 | 3056 | /* |
---|
1918 | 3057 | * Ok, all validations passed, add the new lock |
---|
1919 | 3058 | * to the previous lock's dependency list: |
---|
1920 | 3059 | */ |
---|
1921 | | - ret = add_lock_to_list(hlock_class(next), |
---|
| 3060 | + ret = add_lock_to_list(hlock_class(next), hlock_class(prev), |
---|
1922 | 3061 | &hlock_class(prev)->locks_after, |
---|
1923 | | - next->acquire_ip, distance, trace); |
---|
| 3062 | + next->acquire_ip, distance, |
---|
| 3063 | + calc_dep(prev, next), |
---|
| 3064 | + *trace); |
---|
1924 | 3065 | |
---|
1925 | 3066 | if (!ret) |
---|
1926 | 3067 | return 0; |
---|
1927 | 3068 | |
---|
1928 | | - ret = add_lock_to_list(hlock_class(prev), |
---|
| 3069 | + ret = add_lock_to_list(hlock_class(prev), hlock_class(next), |
---|
1929 | 3070 | &hlock_class(next)->locks_before, |
---|
1930 | | - next->acquire_ip, distance, trace); |
---|
| 3071 | + next->acquire_ip, distance, |
---|
| 3072 | + calc_depb(prev, next), |
---|
| 3073 | + *trace); |
---|
1931 | 3074 | if (!ret) |
---|
1932 | 3075 | return 0; |
---|
1933 | 3076 | |
---|
.. | .. |
---|
1943 | 3086 | static int |
---|
1944 | 3087 | check_prevs_add(struct task_struct *curr, struct held_lock *next) |
---|
1945 | 3088 | { |
---|
| 3089 | + struct lock_trace *trace = NULL; |
---|
1946 | 3090 | int depth = curr->lockdep_depth; |
---|
1947 | 3091 | struct held_lock *hlock; |
---|
1948 | | - struct stack_trace trace = { |
---|
1949 | | - .nr_entries = 0, |
---|
1950 | | - .max_entries = 0, |
---|
1951 | | - .entries = NULL, |
---|
1952 | | - .skip = 0, |
---|
1953 | | - }; |
---|
1954 | 3092 | |
---|
1955 | 3093 | /* |
---|
1956 | 3094 | * Debugging checks. |
---|
.. | .. |
---|
1968 | 3106 | goto out_bug; |
---|
1969 | 3107 | |
---|
1970 | 3108 | for (;;) { |
---|
1971 | | - int distance = curr->lockdep_depth - depth + 1; |
---|
| 3109 | + u16 distance = curr->lockdep_depth - depth + 1; |
---|
1972 | 3110 | hlock = curr->held_locks + depth - 1; |
---|
1973 | 3111 | |
---|
1974 | | - /* |
---|
1975 | | - * Only non-recursive-read entries get new dependencies |
---|
1976 | | - * added: |
---|
1977 | | - */ |
---|
1978 | | - if (hlock->read != 2 && hlock->check) { |
---|
1979 | | - int ret = check_prev_add(curr, hlock, next, distance, &trace, save_trace); |
---|
| 3112 | + if (hlock->check) { |
---|
| 3113 | + int ret = check_prev_add(curr, hlock, next, distance, &trace); |
---|
1980 | 3114 | if (!ret) |
---|
1981 | 3115 | return 0; |
---|
1982 | 3116 | |
---|
.. | .. |
---|
2018 | 3152 | return 0; |
---|
2019 | 3153 | } |
---|
2020 | 3154 | |
---|
2021 | | -unsigned long nr_lock_chains; |
---|
2022 | 3155 | struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS]; |
---|
2023 | | -int nr_chain_hlocks; |
---|
| 3156 | +static DECLARE_BITMAP(lock_chains_in_use, MAX_LOCKDEP_CHAINS); |
---|
2024 | 3157 | static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS]; |
---|
| 3158 | +unsigned long nr_zapped_lock_chains; |
---|
| 3159 | +unsigned int nr_free_chain_hlocks; /* Free chain_hlocks in buckets */ |
---|
| 3160 | +unsigned int nr_lost_chain_hlocks; /* Lost chain_hlocks */ |
---|
| 3161 | +unsigned int nr_large_chain_blocks; /* size > MAX_CHAIN_BUCKETS */ |
---|
| 3162 | + |
---|
| 3163 | +/* |
---|
| 3164 | + * The first 2 chain_hlocks entries in the chain block in the bucket |
---|
| 3165 | + * list contains the following meta data: |
---|
| 3166 | + * |
---|
| 3167 | + * entry[0]: |
---|
| 3168 | + * Bit 15 - always set to 1 (it is not a class index) |
---|
| 3169 | + * Bits 0-14 - upper 15 bits of the next block index |
---|
| 3170 | + * entry[1] - lower 16 bits of next block index |
---|
| 3171 | + * |
---|
| 3172 | + * A next block index of all 1 bits means it is the end of the list. |
---|
| 3173 | + * |
---|
| 3174 | + * On the unsized bucket (bucket-0), the 3rd and 4th entries contain |
---|
| 3175 | + * the chain block size: |
---|
| 3176 | + * |
---|
| 3177 | + * entry[2] - upper 16 bits of the chain block size |
---|
| 3178 | + * entry[3] - lower 16 bits of the chain block size |
---|
| 3179 | + */ |
---|
| 3180 | +#define MAX_CHAIN_BUCKETS 16 |
---|
| 3181 | +#define CHAIN_BLK_FLAG (1U << 15) |
---|
| 3182 | +#define CHAIN_BLK_LIST_END 0xFFFFU |
---|
| 3183 | + |
---|
| 3184 | +static int chain_block_buckets[MAX_CHAIN_BUCKETS]; |
---|
| 3185 | + |
---|
| 3186 | +static inline int size_to_bucket(int size) |
---|
| 3187 | +{ |
---|
| 3188 | + if (size > MAX_CHAIN_BUCKETS) |
---|
| 3189 | + return 0; |
---|
| 3190 | + |
---|
| 3191 | + return size - 1; |
---|
| 3192 | +} |
---|
| 3193 | + |
---|
| 3194 | +/* |
---|
| 3195 | + * Iterate all the chain blocks in a bucket. |
---|
| 3196 | + */ |
---|
| 3197 | +#define for_each_chain_block(bucket, prev, curr) \ |
---|
| 3198 | + for ((prev) = -1, (curr) = chain_block_buckets[bucket]; \ |
---|
| 3199 | + (curr) >= 0; \ |
---|
| 3200 | + (prev) = (curr), (curr) = chain_block_next(curr)) |
---|
| 3201 | + |
---|
| 3202 | +/* |
---|
| 3203 | + * next block or -1 |
---|
| 3204 | + */ |
---|
| 3205 | +static inline int chain_block_next(int offset) |
---|
| 3206 | +{ |
---|
| 3207 | + int next = chain_hlocks[offset]; |
---|
| 3208 | + |
---|
| 3209 | + WARN_ON_ONCE(!(next & CHAIN_BLK_FLAG)); |
---|
| 3210 | + |
---|
| 3211 | + if (next == CHAIN_BLK_LIST_END) |
---|
| 3212 | + return -1; |
---|
| 3213 | + |
---|
| 3214 | + next &= ~CHAIN_BLK_FLAG; |
---|
| 3215 | + next <<= 16; |
---|
| 3216 | + next |= chain_hlocks[offset + 1]; |
---|
| 3217 | + |
---|
| 3218 | + return next; |
---|
| 3219 | +} |
---|
| 3220 | + |
---|
| 3221 | +/* |
---|
| 3222 | + * bucket-0 only |
---|
| 3223 | + */ |
---|
| 3224 | +static inline int chain_block_size(int offset) |
---|
| 3225 | +{ |
---|
| 3226 | + return (chain_hlocks[offset + 2] << 16) | chain_hlocks[offset + 3]; |
---|
| 3227 | +} |
---|
| 3228 | + |
---|
| 3229 | +static inline void init_chain_block(int offset, int next, int bucket, int size) |
---|
| 3230 | +{ |
---|
| 3231 | + chain_hlocks[offset] = (next >> 16) | CHAIN_BLK_FLAG; |
---|
| 3232 | + chain_hlocks[offset + 1] = (u16)next; |
---|
| 3233 | + |
---|
| 3234 | + if (size && !bucket) { |
---|
| 3235 | + chain_hlocks[offset + 2] = size >> 16; |
---|
| 3236 | + chain_hlocks[offset + 3] = (u16)size; |
---|
| 3237 | + } |
---|
| 3238 | +} |
---|
| 3239 | + |
---|
| 3240 | +static inline void add_chain_block(int offset, int size) |
---|
| 3241 | +{ |
---|
| 3242 | + int bucket = size_to_bucket(size); |
---|
| 3243 | + int next = chain_block_buckets[bucket]; |
---|
| 3244 | + int prev, curr; |
---|
| 3245 | + |
---|
| 3246 | + if (unlikely(size < 2)) { |
---|
| 3247 | + /* |
---|
| 3248 | + * We can't store single entries on the freelist. Leak them. |
---|
| 3249 | + * |
---|
| 3250 | + * One possible way out would be to uniquely mark them, other |
---|
| 3251 | + * than with CHAIN_BLK_FLAG, such that we can recover them when |
---|
| 3252 | + * the block before it is re-added. |
---|
| 3253 | + */ |
---|
| 3254 | + if (size) |
---|
| 3255 | + nr_lost_chain_hlocks++; |
---|
| 3256 | + return; |
---|
| 3257 | + } |
---|
| 3258 | + |
---|
| 3259 | + nr_free_chain_hlocks += size; |
---|
| 3260 | + if (!bucket) { |
---|
| 3261 | + nr_large_chain_blocks++; |
---|
| 3262 | + |
---|
| 3263 | + /* |
---|
| 3264 | + * Variable sized, sort large to small. |
---|
| 3265 | + */ |
---|
| 3266 | + for_each_chain_block(0, prev, curr) { |
---|
| 3267 | + if (size >= chain_block_size(curr)) |
---|
| 3268 | + break; |
---|
| 3269 | + } |
---|
| 3270 | + init_chain_block(offset, curr, 0, size); |
---|
| 3271 | + if (prev < 0) |
---|
| 3272 | + chain_block_buckets[0] = offset; |
---|
| 3273 | + else |
---|
| 3274 | + init_chain_block(prev, offset, 0, 0); |
---|
| 3275 | + return; |
---|
| 3276 | + } |
---|
| 3277 | + /* |
---|
| 3278 | + * Fixed size, add to head. |
---|
| 3279 | + */ |
---|
| 3280 | + init_chain_block(offset, next, bucket, size); |
---|
| 3281 | + chain_block_buckets[bucket] = offset; |
---|
| 3282 | +} |
---|
| 3283 | + |
---|
| 3284 | +/* |
---|
| 3285 | + * Only the first block in the list can be deleted. |
---|
| 3286 | + * |
---|
| 3287 | + * For the variable size bucket[0], the first block (the largest one) is |
---|
| 3288 | + * returned, broken up and put back into the pool. So if a chain block of |
---|
| 3289 | + * length > MAX_CHAIN_BUCKETS is ever used and zapped, it will just be |
---|
| 3290 | + * queued up after the primordial chain block and never be used until the |
---|
| 3291 | + * hlock entries in the primordial chain block is almost used up. That |
---|
| 3292 | + * causes fragmentation and reduce allocation efficiency. That can be |
---|
| 3293 | + * monitored by looking at the "large chain blocks" number in lockdep_stats. |
---|
| 3294 | + */ |
---|
| 3295 | +static inline void del_chain_block(int bucket, int size, int next) |
---|
| 3296 | +{ |
---|
| 3297 | + nr_free_chain_hlocks -= size; |
---|
| 3298 | + chain_block_buckets[bucket] = next; |
---|
| 3299 | + |
---|
| 3300 | + if (!bucket) |
---|
| 3301 | + nr_large_chain_blocks--; |
---|
| 3302 | +} |
---|
| 3303 | + |
---|
| 3304 | +static void init_chain_block_buckets(void) |
---|
| 3305 | +{ |
---|
| 3306 | + int i; |
---|
| 3307 | + |
---|
| 3308 | + for (i = 0; i < MAX_CHAIN_BUCKETS; i++) |
---|
| 3309 | + chain_block_buckets[i] = -1; |
---|
| 3310 | + |
---|
| 3311 | + add_chain_block(0, ARRAY_SIZE(chain_hlocks)); |
---|
| 3312 | +} |
---|
| 3313 | + |
---|
| 3314 | +/* |
---|
| 3315 | + * Return offset of a chain block of the right size or -1 if not found. |
---|
| 3316 | + * |
---|
| 3317 | + * Fairly simple worst-fit allocator with the addition of a number of size |
---|
| 3318 | + * specific free lists. |
---|
| 3319 | + */ |
---|
| 3320 | +static int alloc_chain_hlocks(int req) |
---|
| 3321 | +{ |
---|
| 3322 | + int bucket, curr, size; |
---|
| 3323 | + |
---|
| 3324 | + /* |
---|
| 3325 | + * We rely on the MSB to act as an escape bit to denote freelist |
---|
| 3326 | + * pointers. Make sure this bit isn't set in 'normal' class_idx usage. |
---|
| 3327 | + */ |
---|
| 3328 | + BUILD_BUG_ON((MAX_LOCKDEP_KEYS-1) & CHAIN_BLK_FLAG); |
---|
| 3329 | + |
---|
| 3330 | + init_data_structures_once(); |
---|
| 3331 | + |
---|
| 3332 | + if (nr_free_chain_hlocks < req) |
---|
| 3333 | + return -1; |
---|
| 3334 | + |
---|
| 3335 | + /* |
---|
| 3336 | + * We require a minimum of 2 (u16) entries to encode a freelist |
---|
| 3337 | + * 'pointer'. |
---|
| 3338 | + */ |
---|
| 3339 | + req = max(req, 2); |
---|
| 3340 | + bucket = size_to_bucket(req); |
---|
| 3341 | + curr = chain_block_buckets[bucket]; |
---|
| 3342 | + |
---|
| 3343 | + if (bucket) { |
---|
| 3344 | + if (curr >= 0) { |
---|
| 3345 | + del_chain_block(bucket, req, chain_block_next(curr)); |
---|
| 3346 | + return curr; |
---|
| 3347 | + } |
---|
| 3348 | + /* Try bucket 0 */ |
---|
| 3349 | + curr = chain_block_buckets[0]; |
---|
| 3350 | + } |
---|
| 3351 | + |
---|
| 3352 | + /* |
---|
| 3353 | + * The variable sized freelist is sorted by size; the first entry is |
---|
| 3354 | + * the largest. Use it if it fits. |
---|
| 3355 | + */ |
---|
| 3356 | + if (curr >= 0) { |
---|
| 3357 | + size = chain_block_size(curr); |
---|
| 3358 | + if (likely(size >= req)) { |
---|
| 3359 | + del_chain_block(0, size, chain_block_next(curr)); |
---|
| 3360 | + add_chain_block(curr + req, size - req); |
---|
| 3361 | + return curr; |
---|
| 3362 | + } |
---|
| 3363 | + } |
---|
| 3364 | + |
---|
| 3365 | + /* |
---|
| 3366 | + * Last resort, split a block in a larger sized bucket. |
---|
| 3367 | + */ |
---|
| 3368 | + for (size = MAX_CHAIN_BUCKETS; size > req; size--) { |
---|
| 3369 | + bucket = size_to_bucket(size); |
---|
| 3370 | + curr = chain_block_buckets[bucket]; |
---|
| 3371 | + if (curr < 0) |
---|
| 3372 | + continue; |
---|
| 3373 | + |
---|
| 3374 | + del_chain_block(bucket, size, chain_block_next(curr)); |
---|
| 3375 | + add_chain_block(curr + req, size - req); |
---|
| 3376 | + return curr; |
---|
| 3377 | + } |
---|
| 3378 | + |
---|
| 3379 | + return -1; |
---|
| 3380 | +} |
---|
| 3381 | + |
---|
| 3382 | +static inline void free_chain_hlocks(int base, int size) |
---|
| 3383 | +{ |
---|
| 3384 | + add_chain_block(base, max(size, 2)); |
---|
| 3385 | +} |
---|
2025 | 3386 | |
---|
2026 | 3387 | struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i) |
---|
2027 | 3388 | { |
---|
2028 | | - return lock_classes + chain_hlocks[chain->base + i]; |
---|
| 3389 | + u16 chain_hlock = chain_hlocks[chain->base + i]; |
---|
| 3390 | + unsigned int class_idx = chain_hlock_class_idx(chain_hlock); |
---|
| 3391 | + |
---|
| 3392 | + return lock_classes + class_idx; |
---|
2029 | 3393 | } |
---|
2030 | 3394 | |
---|
2031 | 3395 | /* |
---|
.. | .. |
---|
2051 | 3415 | /* |
---|
2052 | 3416 | * Returns the next chain_key iteration |
---|
2053 | 3417 | */ |
---|
2054 | | -static u64 print_chain_key_iteration(int class_idx, u64 chain_key) |
---|
| 3418 | +static u64 print_chain_key_iteration(u16 hlock_id, u64 chain_key) |
---|
2055 | 3419 | { |
---|
2056 | | - u64 new_chain_key = iterate_chain_key(chain_key, class_idx); |
---|
| 3420 | + u64 new_chain_key = iterate_chain_key(chain_key, hlock_id); |
---|
2057 | 3421 | |
---|
2058 | | - printk(" class_idx:%d -> chain_key:%016Lx", |
---|
2059 | | - class_idx, |
---|
| 3422 | + printk(" hlock_id:%d -> chain_key:%016Lx", |
---|
| 3423 | + (unsigned int)hlock_id, |
---|
2060 | 3424 | (unsigned long long)new_chain_key); |
---|
2061 | 3425 | return new_chain_key; |
---|
2062 | 3426 | } |
---|
.. | .. |
---|
2065 | 3429 | print_chain_keys_held_locks(struct task_struct *curr, struct held_lock *hlock_next) |
---|
2066 | 3430 | { |
---|
2067 | 3431 | struct held_lock *hlock; |
---|
2068 | | - u64 chain_key = 0; |
---|
| 3432 | + u64 chain_key = INITIAL_CHAIN_KEY; |
---|
2069 | 3433 | int depth = curr->lockdep_depth; |
---|
2070 | | - int i; |
---|
| 3434 | + int i = get_first_held_lock(curr, hlock_next); |
---|
2071 | 3435 | |
---|
2072 | | - printk("depth: %u\n", depth + 1); |
---|
2073 | | - for (i = get_first_held_lock(curr, hlock_next); i < depth; i++) { |
---|
| 3436 | + printk("depth: %u (irq_context %u)\n", depth - i + 1, |
---|
| 3437 | + hlock_next->irq_context); |
---|
| 3438 | + for (; i < depth; i++) { |
---|
2074 | 3439 | hlock = curr->held_locks + i; |
---|
2075 | | - chain_key = print_chain_key_iteration(hlock->class_idx, chain_key); |
---|
| 3440 | + chain_key = print_chain_key_iteration(hlock_id(hlock), chain_key); |
---|
2076 | 3441 | |
---|
2077 | 3442 | print_lock(hlock); |
---|
2078 | 3443 | } |
---|
2079 | 3444 | |
---|
2080 | | - print_chain_key_iteration(hlock_next->class_idx, chain_key); |
---|
| 3445 | + print_chain_key_iteration(hlock_id(hlock_next), chain_key); |
---|
2081 | 3446 | print_lock(hlock_next); |
---|
2082 | 3447 | } |
---|
2083 | 3448 | |
---|
2084 | 3449 | static void print_chain_keys_chain(struct lock_chain *chain) |
---|
2085 | 3450 | { |
---|
2086 | 3451 | int i; |
---|
2087 | | - u64 chain_key = 0; |
---|
2088 | | - int class_id; |
---|
| 3452 | + u64 chain_key = INITIAL_CHAIN_KEY; |
---|
| 3453 | + u16 hlock_id; |
---|
2089 | 3454 | |
---|
2090 | 3455 | printk("depth: %u\n", chain->depth); |
---|
2091 | 3456 | for (i = 0; i < chain->depth; i++) { |
---|
2092 | | - class_id = chain_hlocks[chain->base + i]; |
---|
2093 | | - chain_key = print_chain_key_iteration(class_id + 1, chain_key); |
---|
| 3457 | + hlock_id = chain_hlocks[chain->base + i]; |
---|
| 3458 | + chain_key = print_chain_key_iteration(hlock_id, chain_key); |
---|
2094 | 3459 | |
---|
2095 | | - print_lock_name(lock_classes + class_id); |
---|
| 3460 | + print_lock_name(lock_classes + chain_hlock_class_idx(hlock_id)); |
---|
2096 | 3461 | printk("\n"); |
---|
2097 | 3462 | } |
---|
2098 | 3463 | } |
---|
.. | .. |
---|
2141 | 3506 | } |
---|
2142 | 3507 | |
---|
2143 | 3508 | for (j = 0; j < chain->depth - 1; j++, i++) { |
---|
2144 | | - id = curr->held_locks[i].class_idx - 1; |
---|
| 3509 | + id = hlock_id(&curr->held_locks[i]); |
---|
2145 | 3510 | |
---|
2146 | 3511 | if (DEBUG_LOCKS_WARN_ON(chain_hlocks[chain->base + j] != id)) { |
---|
2147 | 3512 | print_collision(curr, hlock, chain); |
---|
.. | .. |
---|
2153 | 3518 | } |
---|
2154 | 3519 | |
---|
2155 | 3520 | /* |
---|
2156 | | - * This is for building a chain between just two different classes, |
---|
2157 | | - * instead of adding a new hlock upon current, which is done by |
---|
2158 | | - * add_chain_cache(). |
---|
2159 | | - * |
---|
2160 | | - * This can be called in any context with two classes, while |
---|
2161 | | - * add_chain_cache() must be done within the lock owener's context |
---|
2162 | | - * since it uses hlock which might be racy in another context. |
---|
| 3521 | + * Given an index that is >= -1, return the index of the next lock chain. |
---|
| 3522 | + * Return -2 if there is no next lock chain. |
---|
2163 | 3523 | */ |
---|
2164 | | -static inline int add_chain_cache_classes(unsigned int prev, |
---|
2165 | | - unsigned int next, |
---|
2166 | | - unsigned int irq_context, |
---|
2167 | | - u64 chain_key) |
---|
| 3524 | +long lockdep_next_lockchain(long i) |
---|
2168 | 3525 | { |
---|
2169 | | - struct hlist_head *hash_head = chainhashentry(chain_key); |
---|
2170 | | - struct lock_chain *chain; |
---|
| 3526 | + i = find_next_bit(lock_chains_in_use, ARRAY_SIZE(lock_chains), i + 1); |
---|
| 3527 | + return i < ARRAY_SIZE(lock_chains) ? i : -2; |
---|
| 3528 | +} |
---|
2171 | 3529 | |
---|
2172 | | - /* |
---|
2173 | | - * Allocate a new chain entry from the static array, and add |
---|
2174 | | - * it to the hash: |
---|
2175 | | - */ |
---|
| 3530 | +unsigned long lock_chain_count(void) |
---|
| 3531 | +{ |
---|
| 3532 | + return bitmap_weight(lock_chains_in_use, ARRAY_SIZE(lock_chains)); |
---|
| 3533 | +} |
---|
2176 | 3534 | |
---|
2177 | | - /* |
---|
2178 | | - * We might need to take the graph lock, ensure we've got IRQs |
---|
2179 | | - * disabled to make this an IRQ-safe lock.. for recursion reasons |
---|
2180 | | - * lockdep won't complain about its own locking errors. |
---|
2181 | | - */ |
---|
2182 | | - if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) |
---|
2183 | | - return 0; |
---|
| 3535 | +/* Must be called with the graph lock held. */ |
---|
| 3536 | +static struct lock_chain *alloc_lock_chain(void) |
---|
| 3537 | +{ |
---|
| 3538 | + int idx = find_first_zero_bit(lock_chains_in_use, |
---|
| 3539 | + ARRAY_SIZE(lock_chains)); |
---|
2184 | 3540 | |
---|
2185 | | - if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) { |
---|
2186 | | - if (!debug_locks_off_graph_unlock()) |
---|
2187 | | - return 0; |
---|
2188 | | - |
---|
2189 | | - print_lockdep_off("BUG: MAX_LOCKDEP_CHAINS too low!"); |
---|
2190 | | - dump_stack(); |
---|
2191 | | - return 0; |
---|
2192 | | - } |
---|
2193 | | - |
---|
2194 | | - chain = lock_chains + nr_lock_chains++; |
---|
2195 | | - chain->chain_key = chain_key; |
---|
2196 | | - chain->irq_context = irq_context; |
---|
2197 | | - chain->depth = 2; |
---|
2198 | | - if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) { |
---|
2199 | | - chain->base = nr_chain_hlocks; |
---|
2200 | | - nr_chain_hlocks += chain->depth; |
---|
2201 | | - chain_hlocks[chain->base] = prev - 1; |
---|
2202 | | - chain_hlocks[chain->base + 1] = next -1; |
---|
2203 | | - } |
---|
2204 | | -#ifdef CONFIG_DEBUG_LOCKDEP |
---|
2205 | | - /* |
---|
2206 | | - * Important for check_no_collision(). |
---|
2207 | | - */ |
---|
2208 | | - else { |
---|
2209 | | - if (!debug_locks_off_graph_unlock()) |
---|
2210 | | - return 0; |
---|
2211 | | - |
---|
2212 | | - print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!"); |
---|
2213 | | - dump_stack(); |
---|
2214 | | - return 0; |
---|
2215 | | - } |
---|
2216 | | -#endif |
---|
2217 | | - |
---|
2218 | | - hlist_add_head_rcu(&chain->entry, hash_head); |
---|
2219 | | - debug_atomic_inc(chain_lookup_misses); |
---|
2220 | | - inc_chains(); |
---|
2221 | | - |
---|
2222 | | - return 1; |
---|
| 3541 | + if (unlikely(idx >= ARRAY_SIZE(lock_chains))) |
---|
| 3542 | + return NULL; |
---|
| 3543 | + __set_bit(idx, lock_chains_in_use); |
---|
| 3544 | + return lock_chains + idx; |
---|
2223 | 3545 | } |
---|
2224 | 3546 | |
---|
2225 | 3547 | /* |
---|
.. | .. |
---|
2233 | 3555 | struct held_lock *hlock, |
---|
2234 | 3556 | u64 chain_key) |
---|
2235 | 3557 | { |
---|
2236 | | - struct lock_class *class = hlock_class(hlock); |
---|
2237 | 3558 | struct hlist_head *hash_head = chainhashentry(chain_key); |
---|
2238 | 3559 | struct lock_chain *chain; |
---|
2239 | 3560 | int i, j; |
---|
2240 | 3561 | |
---|
2241 | 3562 | /* |
---|
2242 | | - * Allocate a new chain entry from the static array, and add |
---|
2243 | | - * it to the hash: |
---|
2244 | | - */ |
---|
2245 | | - |
---|
2246 | | - /* |
---|
2247 | | - * We might need to take the graph lock, ensure we've got IRQs |
---|
| 3563 | + * The caller must hold the graph lock, ensure we've got IRQs |
---|
2248 | 3564 | * disabled to make this an IRQ-safe lock.. for recursion reasons |
---|
2249 | 3565 | * lockdep won't complain about its own locking errors. |
---|
2250 | 3566 | */ |
---|
2251 | | - if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) |
---|
| 3567 | + if (lockdep_assert_locked()) |
---|
2252 | 3568 | return 0; |
---|
2253 | 3569 | |
---|
2254 | | - if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) { |
---|
| 3570 | + chain = alloc_lock_chain(); |
---|
| 3571 | + if (!chain) { |
---|
2255 | 3572 | if (!debug_locks_off_graph_unlock()) |
---|
2256 | 3573 | return 0; |
---|
2257 | 3574 | |
---|
.. | .. |
---|
2259 | 3576 | dump_stack(); |
---|
2260 | 3577 | return 0; |
---|
2261 | 3578 | } |
---|
2262 | | - chain = lock_chains + nr_lock_chains++; |
---|
2263 | 3579 | chain->chain_key = chain_key; |
---|
2264 | 3580 | chain->irq_context = hlock->irq_context; |
---|
2265 | 3581 | i = get_first_held_lock(curr, hlock); |
---|
.. | .. |
---|
2269 | 3585 | BUILD_BUG_ON((1UL << 6) <= ARRAY_SIZE(curr->held_locks)); |
---|
2270 | 3586 | BUILD_BUG_ON((1UL << 8*sizeof(chain_hlocks[0])) <= ARRAY_SIZE(lock_classes)); |
---|
2271 | 3587 | |
---|
2272 | | - if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) { |
---|
2273 | | - chain->base = nr_chain_hlocks; |
---|
2274 | | - for (j = 0; j < chain->depth - 1; j++, i++) { |
---|
2275 | | - int lock_id = curr->held_locks[i].class_idx - 1; |
---|
2276 | | - chain_hlocks[chain->base + j] = lock_id; |
---|
2277 | | - } |
---|
2278 | | - chain_hlocks[chain->base + j] = class - lock_classes; |
---|
2279 | | - } |
---|
2280 | | - |
---|
2281 | | - if (nr_chain_hlocks < MAX_LOCKDEP_CHAIN_HLOCKS) |
---|
2282 | | - nr_chain_hlocks += chain->depth; |
---|
2283 | | - |
---|
2284 | | -#ifdef CONFIG_DEBUG_LOCKDEP |
---|
2285 | | - /* |
---|
2286 | | - * Important for check_no_collision(). |
---|
2287 | | - */ |
---|
2288 | | - if (unlikely(nr_chain_hlocks > MAX_LOCKDEP_CHAIN_HLOCKS)) { |
---|
| 3588 | + j = alloc_chain_hlocks(chain->depth); |
---|
| 3589 | + if (j < 0) { |
---|
2289 | 3590 | if (!debug_locks_off_graph_unlock()) |
---|
2290 | 3591 | return 0; |
---|
2291 | 3592 | |
---|
.. | .. |
---|
2293 | 3594 | dump_stack(); |
---|
2294 | 3595 | return 0; |
---|
2295 | 3596 | } |
---|
2296 | | -#endif |
---|
2297 | 3597 | |
---|
| 3598 | + chain->base = j; |
---|
| 3599 | + for (j = 0; j < chain->depth - 1; j++, i++) { |
---|
| 3600 | + int lock_id = hlock_id(curr->held_locks + i); |
---|
| 3601 | + |
---|
| 3602 | + chain_hlocks[chain->base + j] = lock_id; |
---|
| 3603 | + } |
---|
| 3604 | + chain_hlocks[chain->base + j] = hlock_id(hlock); |
---|
2298 | 3605 | hlist_add_head_rcu(&chain->entry, hash_head); |
---|
2299 | 3606 | debug_atomic_inc(chain_lookup_misses); |
---|
2300 | | - inc_chains(); |
---|
| 3607 | + inc_chains(chain->irq_context); |
---|
2301 | 3608 | |
---|
2302 | 3609 | return 1; |
---|
2303 | 3610 | } |
---|
2304 | 3611 | |
---|
2305 | 3612 | /* |
---|
2306 | | - * Look up a dependency chain. |
---|
| 3613 | + * Look up a dependency chain. Must be called with either the graph lock or |
---|
| 3614 | + * the RCU read lock held. |
---|
2307 | 3615 | */ |
---|
2308 | 3616 | static inline struct lock_chain *lookup_chain_cache(u64 chain_key) |
---|
2309 | 3617 | { |
---|
2310 | 3618 | struct hlist_head *hash_head = chainhashentry(chain_key); |
---|
2311 | 3619 | struct lock_chain *chain; |
---|
2312 | 3620 | |
---|
2313 | | - /* |
---|
2314 | | - * We can walk it lock-free, because entries only get added |
---|
2315 | | - * to the hash: |
---|
2316 | | - */ |
---|
2317 | 3621 | hlist_for_each_entry_rcu(chain, hash_head, entry) { |
---|
2318 | | - if (chain->chain_key == chain_key) { |
---|
| 3622 | + if (READ_ONCE(chain->chain_key) == chain_key) { |
---|
2319 | 3623 | debug_atomic_inc(chain_lookup_hits); |
---|
2320 | 3624 | return chain; |
---|
2321 | 3625 | } |
---|
.. | .. |
---|
2374 | 3678 | return 1; |
---|
2375 | 3679 | } |
---|
2376 | 3680 | |
---|
2377 | | -static int validate_chain(struct task_struct *curr, struct lockdep_map *lock, |
---|
2378 | | - struct held_lock *hlock, int chain_head, u64 chain_key) |
---|
| 3681 | +static int validate_chain(struct task_struct *curr, |
---|
| 3682 | + struct held_lock *hlock, |
---|
| 3683 | + int chain_head, u64 chain_key) |
---|
2379 | 3684 | { |
---|
2380 | 3685 | /* |
---|
2381 | 3686 | * Trylock needs to maintain the stack of held locks, but it |
---|
.. | .. |
---|
2396 | 3701 | * - is softirq-safe, if this lock is hardirq-unsafe |
---|
2397 | 3702 | * |
---|
2398 | 3703 | * And check whether the new lock's dependency graph |
---|
2399 | | - * could lead back to the previous lock. |
---|
| 3704 | + * could lead back to the previous lock: |
---|
2400 | 3705 | * |
---|
2401 | | - * any of these scenarios could lead to a deadlock. If |
---|
2402 | | - * All validations |
---|
| 3706 | + * - within the current held-lock stack |
---|
| 3707 | + * - across our accumulated lock dependency records |
---|
| 3708 | + * |
---|
| 3709 | + * any of these scenarios could lead to a deadlock. |
---|
2403 | 3710 | */ |
---|
2404 | | - int ret = check_deadlock(curr, hlock, lock, hlock->read); |
---|
| 3711 | + /* |
---|
| 3712 | + * The simple case: does the current hold the same lock |
---|
| 3713 | + * already? |
---|
| 3714 | + */ |
---|
| 3715 | + int ret = check_deadlock(curr, hlock); |
---|
2405 | 3716 | |
---|
2406 | 3717 | if (!ret) |
---|
2407 | 3718 | return 0; |
---|
2408 | 3719 | /* |
---|
2409 | | - * Mark recursive read, as we jump over it when |
---|
2410 | | - * building dependencies (just like we jump over |
---|
2411 | | - * trylock entries): |
---|
2412 | | - */ |
---|
2413 | | - if (ret == 2) |
---|
2414 | | - hlock->read = 2; |
---|
2415 | | - /* |
---|
2416 | 3720 | * Add dependency only if this lock is not the head |
---|
2417 | | - * of the chain, and if it's not a secondary read-lock: |
---|
| 3721 | + * of the chain, and if the new lock introduces no more |
---|
| 3722 | + * lock dependency (because we already hold a lock with the |
---|
| 3723 | + * same lock class) nor deadlock (because the nest_lock |
---|
| 3724 | + * serializes nesting locks), see the comments for |
---|
| 3725 | + * check_deadlock(). |
---|
2418 | 3726 | */ |
---|
2419 | 3727 | if (!chain_head && ret != 2) { |
---|
2420 | 3728 | if (!check_prevs_add(curr, hlock)) |
---|
.. | .. |
---|
2432 | 3740 | } |
---|
2433 | 3741 | #else |
---|
2434 | 3742 | static inline int validate_chain(struct task_struct *curr, |
---|
2435 | | - struct lockdep_map *lock, struct held_lock *hlock, |
---|
2436 | | - int chain_head, u64 chain_key) |
---|
| 3743 | + struct held_lock *hlock, |
---|
| 3744 | + int chain_head, u64 chain_key) |
---|
2437 | 3745 | { |
---|
2438 | 3746 | return 1; |
---|
2439 | 3747 | } |
---|
2440 | | -#endif |
---|
| 3748 | + |
---|
| 3749 | +static void init_chain_block_buckets(void) { } |
---|
| 3750 | +#endif /* CONFIG_PROVE_LOCKING */ |
---|
2441 | 3751 | |
---|
2442 | 3752 | /* |
---|
2443 | 3753 | * We are building curr_chain_key incrementally, so double-check |
---|
.. | .. |
---|
2448 | 3758 | #ifdef CONFIG_DEBUG_LOCKDEP |
---|
2449 | 3759 | struct held_lock *hlock, *prev_hlock = NULL; |
---|
2450 | 3760 | unsigned int i; |
---|
2451 | | - u64 chain_key = 0; |
---|
| 3761 | + u64 chain_key = INITIAL_CHAIN_KEY; |
---|
2452 | 3762 | |
---|
2453 | 3763 | for (i = 0; i < curr->lockdep_depth; i++) { |
---|
2454 | 3764 | hlock = curr->held_locks + i; |
---|
.. | .. |
---|
2464 | 3774 | (unsigned long long)hlock->prev_chain_key); |
---|
2465 | 3775 | return; |
---|
2466 | 3776 | } |
---|
| 3777 | + |
---|
2467 | 3778 | /* |
---|
2468 | | - * Whoops ran out of static storage again? |
---|
| 3779 | + * hlock->class_idx can't go beyond MAX_LOCKDEP_KEYS, but is |
---|
| 3780 | + * it registered lock class index? |
---|
2469 | 3781 | */ |
---|
2470 | | - if (DEBUG_LOCKS_WARN_ON(hlock->class_idx > MAX_LOCKDEP_KEYS)) |
---|
| 3782 | + if (DEBUG_LOCKS_WARN_ON(!test_bit(hlock->class_idx, lock_classes_in_use))) |
---|
2471 | 3783 | return; |
---|
2472 | 3784 | |
---|
2473 | 3785 | if (prev_hlock && (prev_hlock->irq_context != |
---|
2474 | 3786 | hlock->irq_context)) |
---|
2475 | | - chain_key = 0; |
---|
2476 | | - chain_key = iterate_chain_key(chain_key, hlock->class_idx); |
---|
| 3787 | + chain_key = INITIAL_CHAIN_KEY; |
---|
| 3788 | + chain_key = iterate_chain_key(chain_key, hlock_id(hlock)); |
---|
2477 | 3789 | prev_hlock = hlock; |
---|
2478 | 3790 | } |
---|
2479 | 3791 | if (chain_key != curr->curr_chain_key) { |
---|
.. | .. |
---|
2490 | 3802 | #endif |
---|
2491 | 3803 | } |
---|
2492 | 3804 | |
---|
2493 | | -static void |
---|
2494 | | -print_usage_bug_scenario(struct held_lock *lock) |
---|
| 3805 | +#ifdef CONFIG_PROVE_LOCKING |
---|
| 3806 | +static int mark_lock(struct task_struct *curr, struct held_lock *this, |
---|
| 3807 | + enum lock_usage_bit new_bit); |
---|
| 3808 | + |
---|
| 3809 | +static void print_usage_bug_scenario(struct held_lock *lock) |
---|
2495 | 3810 | { |
---|
2496 | 3811 | struct lock_class *class = hlock_class(lock); |
---|
2497 | 3812 | |
---|
.. | .. |
---|
2508 | 3823 | printk("\n *** DEADLOCK ***\n\n"); |
---|
2509 | 3824 | } |
---|
2510 | 3825 | |
---|
2511 | | -static int |
---|
| 3826 | +static void |
---|
2512 | 3827 | print_usage_bug(struct task_struct *curr, struct held_lock *this, |
---|
2513 | 3828 | enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit) |
---|
2514 | 3829 | { |
---|
2515 | | - if (!debug_locks_off_graph_unlock() || debug_locks_silent) |
---|
2516 | | - return 0; |
---|
| 3830 | + if (!debug_locks_off() || debug_locks_silent) |
---|
| 3831 | + return; |
---|
2517 | 3832 | |
---|
2518 | 3833 | pr_warn("\n"); |
---|
2519 | 3834 | pr_warn("================================\n"); |
---|
.. | .. |
---|
2526 | 3841 | |
---|
2527 | 3842 | pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n", |
---|
2528 | 3843 | curr->comm, task_pid_nr(curr), |
---|
2529 | | - trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT, |
---|
2530 | | - trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT, |
---|
2531 | | - trace_hardirqs_enabled(curr), |
---|
2532 | | - trace_softirqs_enabled(curr)); |
---|
| 3844 | + lockdep_hardirq_context(), hardirq_count() >> HARDIRQ_SHIFT, |
---|
| 3845 | + lockdep_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT, |
---|
| 3846 | + lockdep_hardirqs_enabled(), |
---|
| 3847 | + lockdep_softirqs_enabled(curr)); |
---|
2533 | 3848 | print_lock(this); |
---|
2534 | 3849 | |
---|
2535 | 3850 | pr_warn("{%s} state was registered at:\n", usage_str[prev_bit]); |
---|
2536 | | - print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1); |
---|
| 3851 | + print_lock_trace(hlock_class(this)->usage_traces[prev_bit], 1); |
---|
2537 | 3852 | |
---|
2538 | 3853 | print_irqtrace_events(curr); |
---|
2539 | 3854 | pr_warn("\nother info that might help us debug this:\n"); |
---|
.. | .. |
---|
2543 | 3858 | |
---|
2544 | 3859 | pr_warn("\nstack backtrace:\n"); |
---|
2545 | 3860 | dump_stack(); |
---|
2546 | | - |
---|
2547 | | - return 0; |
---|
2548 | 3861 | } |
---|
2549 | 3862 | |
---|
2550 | 3863 | /* |
---|
.. | .. |
---|
2554 | 3867 | valid_state(struct task_struct *curr, struct held_lock *this, |
---|
2555 | 3868 | enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit) |
---|
2556 | 3869 | { |
---|
2557 | | - if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit))) |
---|
2558 | | - return print_usage_bug(curr, this, bad_bit, new_bit); |
---|
| 3870 | + if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit))) { |
---|
| 3871 | + graph_unlock(); |
---|
| 3872 | + print_usage_bug(curr, this, bad_bit, new_bit); |
---|
| 3873 | + return 0; |
---|
| 3874 | + } |
---|
2559 | 3875 | return 1; |
---|
2560 | 3876 | } |
---|
2561 | 3877 | |
---|
2562 | | -static int mark_lock(struct task_struct *curr, struct held_lock *this, |
---|
2563 | | - enum lock_usage_bit new_bit); |
---|
2564 | | - |
---|
2565 | | -#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) |
---|
2566 | 3878 | |
---|
2567 | 3879 | /* |
---|
2568 | 3880 | * print irq inversion bug: |
---|
2569 | 3881 | */ |
---|
2570 | | -static int |
---|
| 3882 | +static void |
---|
2571 | 3883 | print_irq_inversion_bug(struct task_struct *curr, |
---|
2572 | 3884 | struct lock_list *root, struct lock_list *other, |
---|
2573 | 3885 | struct held_lock *this, int forwards, |
---|
.. | .. |
---|
2578 | 3890 | int depth; |
---|
2579 | 3891 | |
---|
2580 | 3892 | if (!debug_locks_off_graph_unlock() || debug_locks_silent) |
---|
2581 | | - return 0; |
---|
| 3893 | + return; |
---|
2582 | 3894 | |
---|
2583 | 3895 | pr_warn("\n"); |
---|
2584 | 3896 | pr_warn("========================================================\n"); |
---|
.. | .. |
---|
2618 | 3930 | lockdep_print_held_locks(curr); |
---|
2619 | 3931 | |
---|
2620 | 3932 | pr_warn("\nthe shortest dependencies between 2nd lock and 1st lock:\n"); |
---|
2621 | | - if (!save_trace(&root->trace)) |
---|
2622 | | - return 0; |
---|
| 3933 | + root->trace = save_trace(); |
---|
| 3934 | + if (!root->trace) |
---|
| 3935 | + return; |
---|
2623 | 3936 | print_shortest_lock_dependencies(other, root); |
---|
2624 | 3937 | |
---|
2625 | 3938 | pr_warn("\nstack backtrace:\n"); |
---|
2626 | 3939 | dump_stack(); |
---|
2627 | | - |
---|
2628 | | - return 0; |
---|
2629 | 3940 | } |
---|
2630 | 3941 | |
---|
2631 | 3942 | /* |
---|
.. | .. |
---|
2634 | 3945 | */ |
---|
2635 | 3946 | static int |
---|
2636 | 3947 | check_usage_forwards(struct task_struct *curr, struct held_lock *this, |
---|
2637 | | - enum lock_usage_bit bit, const char *irqclass) |
---|
| 3948 | + enum lock_usage_bit bit) |
---|
2638 | 3949 | { |
---|
2639 | | - int ret; |
---|
| 3950 | + enum bfs_result ret; |
---|
2640 | 3951 | struct lock_list root; |
---|
2641 | | - struct lock_list *uninitialized_var(target_entry); |
---|
| 3952 | + struct lock_list *target_entry; |
---|
| 3953 | + enum lock_usage_bit read_bit = bit + LOCK_USAGE_READ_MASK; |
---|
| 3954 | + unsigned usage_mask = lock_flag(bit) | lock_flag(read_bit); |
---|
2642 | 3955 | |
---|
2643 | | - root.parent = NULL; |
---|
2644 | | - root.class = hlock_class(this); |
---|
2645 | | - ret = find_usage_forwards(&root, bit, &target_entry); |
---|
2646 | | - if (ret < 0) |
---|
2647 | | - return print_bfs_bug(ret); |
---|
2648 | | - if (ret == 1) |
---|
2649 | | - return ret; |
---|
| 3956 | + bfs_init_root(&root, this); |
---|
| 3957 | + ret = find_usage_forwards(&root, usage_mask, &target_entry); |
---|
| 3958 | + if (bfs_error(ret)) { |
---|
| 3959 | + print_bfs_bug(ret); |
---|
| 3960 | + return 0; |
---|
| 3961 | + } |
---|
| 3962 | + if (ret == BFS_RNOMATCH) |
---|
| 3963 | + return 1; |
---|
2650 | 3964 | |
---|
2651 | | - return print_irq_inversion_bug(curr, &root, target_entry, |
---|
2652 | | - this, 1, irqclass); |
---|
| 3965 | + /* Check whether write or read usage is the match */ |
---|
| 3966 | + if (target_entry->class->usage_mask & lock_flag(bit)) { |
---|
| 3967 | + print_irq_inversion_bug(curr, &root, target_entry, |
---|
| 3968 | + this, 1, state_name(bit)); |
---|
| 3969 | + } else { |
---|
| 3970 | + print_irq_inversion_bug(curr, &root, target_entry, |
---|
| 3971 | + this, 1, state_name(read_bit)); |
---|
| 3972 | + } |
---|
| 3973 | + |
---|
| 3974 | + return 0; |
---|
2653 | 3975 | } |
---|
2654 | 3976 | |
---|
2655 | 3977 | /* |
---|
.. | .. |
---|
2658 | 3980 | */ |
---|
2659 | 3981 | static int |
---|
2660 | 3982 | check_usage_backwards(struct task_struct *curr, struct held_lock *this, |
---|
2661 | | - enum lock_usage_bit bit, const char *irqclass) |
---|
| 3983 | + enum lock_usage_bit bit) |
---|
2662 | 3984 | { |
---|
2663 | | - int ret; |
---|
| 3985 | + enum bfs_result ret; |
---|
2664 | 3986 | struct lock_list root; |
---|
2665 | | - struct lock_list *uninitialized_var(target_entry); |
---|
| 3987 | + struct lock_list *target_entry; |
---|
| 3988 | + enum lock_usage_bit read_bit = bit + LOCK_USAGE_READ_MASK; |
---|
| 3989 | + unsigned usage_mask = lock_flag(bit) | lock_flag(read_bit); |
---|
2666 | 3990 | |
---|
2667 | | - root.parent = NULL; |
---|
2668 | | - root.class = hlock_class(this); |
---|
2669 | | - ret = find_usage_backwards(&root, bit, &target_entry); |
---|
2670 | | - if (ret < 0) |
---|
2671 | | - return print_bfs_bug(ret); |
---|
2672 | | - if (ret == 1) |
---|
2673 | | - return ret; |
---|
| 3991 | + bfs_init_rootb(&root, this); |
---|
| 3992 | + ret = find_usage_backwards(&root, usage_mask, &target_entry); |
---|
| 3993 | + if (bfs_error(ret)) { |
---|
| 3994 | + print_bfs_bug(ret); |
---|
| 3995 | + return 0; |
---|
| 3996 | + } |
---|
| 3997 | + if (ret == BFS_RNOMATCH) |
---|
| 3998 | + return 1; |
---|
2674 | 3999 | |
---|
2675 | | - return print_irq_inversion_bug(curr, &root, target_entry, |
---|
2676 | | - this, 0, irqclass); |
---|
| 4000 | + /* Check whether write or read usage is the match */ |
---|
| 4001 | + if (target_entry->class->usage_mask & lock_flag(bit)) { |
---|
| 4002 | + print_irq_inversion_bug(curr, &root, target_entry, |
---|
| 4003 | + this, 0, state_name(bit)); |
---|
| 4004 | + } else { |
---|
| 4005 | + print_irq_inversion_bug(curr, &root, target_entry, |
---|
| 4006 | + this, 0, state_name(read_bit)); |
---|
| 4007 | + } |
---|
| 4008 | + |
---|
| 4009 | + return 0; |
---|
2677 | 4010 | } |
---|
2678 | 4011 | |
---|
2679 | 4012 | void print_irqtrace_events(struct task_struct *curr) |
---|
2680 | 4013 | { |
---|
2681 | | - printk("irq event stamp: %u\n", curr->irq_events); |
---|
| 4014 | + const struct irqtrace_events *trace = &curr->irqtrace; |
---|
| 4015 | + |
---|
| 4016 | + printk("irq event stamp: %u\n", trace->irq_events); |
---|
2682 | 4017 | printk("hardirqs last enabled at (%u): [<%px>] %pS\n", |
---|
2683 | | - curr->hardirq_enable_event, (void *)curr->hardirq_enable_ip, |
---|
2684 | | - (void *)curr->hardirq_enable_ip); |
---|
| 4018 | + trace->hardirq_enable_event, (void *)trace->hardirq_enable_ip, |
---|
| 4019 | + (void *)trace->hardirq_enable_ip); |
---|
2685 | 4020 | printk("hardirqs last disabled at (%u): [<%px>] %pS\n", |
---|
2686 | | - curr->hardirq_disable_event, (void *)curr->hardirq_disable_ip, |
---|
2687 | | - (void *)curr->hardirq_disable_ip); |
---|
| 4021 | + trace->hardirq_disable_event, (void *)trace->hardirq_disable_ip, |
---|
| 4022 | + (void *)trace->hardirq_disable_ip); |
---|
2688 | 4023 | printk("softirqs last enabled at (%u): [<%px>] %pS\n", |
---|
2689 | | - curr->softirq_enable_event, (void *)curr->softirq_enable_ip, |
---|
2690 | | - (void *)curr->softirq_enable_ip); |
---|
| 4024 | + trace->softirq_enable_event, (void *)trace->softirq_enable_ip, |
---|
| 4025 | + (void *)trace->softirq_enable_ip); |
---|
2691 | 4026 | printk("softirqs last disabled at (%u): [<%px>] %pS\n", |
---|
2692 | | - curr->softirq_disable_event, (void *)curr->softirq_disable_ip, |
---|
2693 | | - (void *)curr->softirq_disable_ip); |
---|
| 4027 | + trace->softirq_disable_event, (void *)trace->softirq_disable_ip, |
---|
| 4028 | + (void *)trace->softirq_disable_ip); |
---|
2694 | 4029 | } |
---|
2695 | 4030 | |
---|
2696 | 4031 | static int HARDIRQ_verbose(struct lock_class *class) |
---|
.. | .. |
---|
2709 | 4044 | return 0; |
---|
2710 | 4045 | } |
---|
2711 | 4046 | |
---|
2712 | | -#define STRICT_READ_CHECKS 1 |
---|
2713 | | - |
---|
2714 | 4047 | static int (*state_verbose_f[])(struct lock_class *class) = { |
---|
2715 | 4048 | #define LOCKDEP_STATE(__STATE) \ |
---|
2716 | 4049 | __STATE##_verbose, |
---|
.. | .. |
---|
2721 | 4054 | static inline int state_verbose(enum lock_usage_bit bit, |
---|
2722 | 4055 | struct lock_class *class) |
---|
2723 | 4056 | { |
---|
2724 | | - return state_verbose_f[bit >> 2](class); |
---|
| 4057 | + return state_verbose_f[bit >> LOCK_USAGE_DIR_MASK](class); |
---|
2725 | 4058 | } |
---|
2726 | 4059 | |
---|
2727 | 4060 | typedef int (*check_usage_f)(struct task_struct *, struct held_lock *, |
---|
.. | .. |
---|
2732 | 4065 | enum lock_usage_bit new_bit) |
---|
2733 | 4066 | { |
---|
2734 | 4067 | int excl_bit = exclusive_bit(new_bit); |
---|
2735 | | - int read = new_bit & 1; |
---|
2736 | | - int dir = new_bit & 2; |
---|
2737 | | - |
---|
2738 | | - /* |
---|
2739 | | - * mark USED_IN has to look forwards -- to ensure no dependency |
---|
2740 | | - * has ENABLED state, which would allow recursion deadlocks. |
---|
2741 | | - * |
---|
2742 | | - * mark ENABLED has to look backwards -- to ensure no dependee |
---|
2743 | | - * has USED_IN state, which, again, would allow recursion deadlocks. |
---|
2744 | | - */ |
---|
2745 | | - check_usage_f usage = dir ? |
---|
2746 | | - check_usage_backwards : check_usage_forwards; |
---|
| 4068 | + int read = new_bit & LOCK_USAGE_READ_MASK; |
---|
| 4069 | + int dir = new_bit & LOCK_USAGE_DIR_MASK; |
---|
2747 | 4070 | |
---|
2748 | 4071 | /* |
---|
2749 | 4072 | * Validate that this particular lock does not have conflicting |
---|
.. | .. |
---|
2753 | 4076 | return 0; |
---|
2754 | 4077 | |
---|
2755 | 4078 | /* |
---|
| 4079 | + * Check for read in write conflicts |
---|
| 4080 | + */ |
---|
| 4081 | + if (!read && !valid_state(curr, this, new_bit, |
---|
| 4082 | + excl_bit + LOCK_USAGE_READ_MASK)) |
---|
| 4083 | + return 0; |
---|
| 4084 | + |
---|
| 4085 | + |
---|
| 4086 | + /* |
---|
2756 | 4087 | * Validate that the lock dependencies don't have conflicting usage |
---|
2757 | 4088 | * states. |
---|
2758 | 4089 | */ |
---|
2759 | | - if ((!read || !dir || STRICT_READ_CHECKS) && |
---|
2760 | | - !usage(curr, this, excl_bit, state_name(new_bit & ~1))) |
---|
2761 | | - return 0; |
---|
2762 | | - |
---|
2763 | | - /* |
---|
2764 | | - * Check for read in write conflicts |
---|
2765 | | - */ |
---|
2766 | | - if (!read) { |
---|
2767 | | - if (!valid_state(curr, this, new_bit, excl_bit + 1)) |
---|
| 4090 | + if (dir) { |
---|
| 4091 | + /* |
---|
| 4092 | + * mark ENABLED has to look backwards -- to ensure no dependee |
---|
| 4093 | + * has USED_IN state, which, again, would allow recursion deadlocks. |
---|
| 4094 | + */ |
---|
| 4095 | + if (!check_usage_backwards(curr, this, excl_bit)) |
---|
2768 | 4096 | return 0; |
---|
2769 | | - |
---|
2770 | | - if (STRICT_READ_CHECKS && |
---|
2771 | | - !usage(curr, this, excl_bit + 1, |
---|
2772 | | - state_name(new_bit + 1))) |
---|
| 4097 | + } else { |
---|
| 4098 | + /* |
---|
| 4099 | + * mark USED_IN has to look forwards -- to ensure no dependency |
---|
| 4100 | + * has ENABLED state, which would allow recursion deadlocks. |
---|
| 4101 | + */ |
---|
| 4102 | + if (!check_usage_forwards(curr, this, excl_bit)) |
---|
2773 | 4103 | return 0; |
---|
2774 | 4104 | } |
---|
2775 | 4105 | |
---|
.. | .. |
---|
2779 | 4109 | return 1; |
---|
2780 | 4110 | } |
---|
2781 | 4111 | |
---|
2782 | | -enum mark_type { |
---|
2783 | | -#define LOCKDEP_STATE(__STATE) __STATE, |
---|
2784 | | -#include "lockdep_states.h" |
---|
2785 | | -#undef LOCKDEP_STATE |
---|
2786 | | -}; |
---|
2787 | | - |
---|
2788 | 4112 | /* |
---|
2789 | 4113 | * Mark all held locks with a usage bit: |
---|
2790 | 4114 | */ |
---|
2791 | 4115 | static int |
---|
2792 | | -mark_held_locks(struct task_struct *curr, enum mark_type mark) |
---|
| 4116 | +mark_held_locks(struct task_struct *curr, enum lock_usage_bit base_bit) |
---|
2793 | 4117 | { |
---|
2794 | | - enum lock_usage_bit usage_bit; |
---|
2795 | 4118 | struct held_lock *hlock; |
---|
2796 | 4119 | int i; |
---|
2797 | 4120 | |
---|
2798 | 4121 | for (i = 0; i < curr->lockdep_depth; i++) { |
---|
| 4122 | + enum lock_usage_bit hlock_bit = base_bit; |
---|
2799 | 4123 | hlock = curr->held_locks + i; |
---|
2800 | 4124 | |
---|
2801 | | - usage_bit = 2 + (mark << 2); /* ENABLED */ |
---|
2802 | 4125 | if (hlock->read) |
---|
2803 | | - usage_bit += 1; /* READ */ |
---|
| 4126 | + hlock_bit += LOCK_USAGE_READ_MASK; |
---|
2804 | 4127 | |
---|
2805 | | - BUG_ON(usage_bit >= LOCK_USAGE_STATES); |
---|
| 4128 | + BUG_ON(hlock_bit >= LOCK_USAGE_STATES); |
---|
2806 | 4129 | |
---|
2807 | 4130 | if (!hlock->check) |
---|
2808 | 4131 | continue; |
---|
2809 | 4132 | |
---|
2810 | | - if (!mark_lock(curr, hlock, usage_bit)) |
---|
| 4133 | + if (!mark_lock(curr, hlock, hlock_bit)) |
---|
2811 | 4134 | return 0; |
---|
2812 | 4135 | } |
---|
2813 | 4136 | |
---|
.. | .. |
---|
2817 | 4140 | /* |
---|
2818 | 4141 | * Hardirqs will be enabled: |
---|
2819 | 4142 | */ |
---|
2820 | | -static void __trace_hardirqs_on_caller(unsigned long ip) |
---|
| 4143 | +static void __trace_hardirqs_on_caller(void) |
---|
2821 | 4144 | { |
---|
2822 | 4145 | struct task_struct *curr = current; |
---|
2823 | | - |
---|
2824 | | - /* we'll do an OFF -> ON transition: */ |
---|
2825 | | - curr->hardirqs_enabled = 1; |
---|
2826 | 4146 | |
---|
2827 | 4147 | /* |
---|
2828 | 4148 | * We are going to turn hardirqs on, so set the |
---|
2829 | 4149 | * usage bit for all held locks: |
---|
2830 | 4150 | */ |
---|
2831 | | - if (!mark_held_locks(curr, HARDIRQ)) |
---|
| 4151 | + if (!mark_held_locks(curr, LOCK_ENABLED_HARDIRQ)) |
---|
2832 | 4152 | return; |
---|
2833 | 4153 | /* |
---|
2834 | 4154 | * If we have softirqs enabled, then set the usage |
---|
.. | .. |
---|
2836 | 4156 | * this bit from being set before) |
---|
2837 | 4157 | */ |
---|
2838 | 4158 | if (curr->softirqs_enabled) |
---|
2839 | | - if (!mark_held_locks(curr, SOFTIRQ)) |
---|
2840 | | - return; |
---|
2841 | | - |
---|
2842 | | - curr->hardirq_enable_ip = ip; |
---|
2843 | | - curr->hardirq_enable_event = ++curr->irq_events; |
---|
2844 | | - debug_atomic_inc(hardirqs_on_events); |
---|
| 4159 | + mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ); |
---|
2845 | 4160 | } |
---|
2846 | 4161 | |
---|
2847 | | -void lockdep_hardirqs_on(unsigned long ip) |
---|
| 4162 | +/** |
---|
| 4163 | + * lockdep_hardirqs_on_prepare - Prepare for enabling interrupts |
---|
| 4164 | + * @ip: Caller address |
---|
| 4165 | + * |
---|
| 4166 | + * Invoked before a possible transition to RCU idle from exit to user or |
---|
| 4167 | + * guest mode. This ensures that all RCU operations are done before RCU |
---|
| 4168 | + * stops watching. After the RCU transition lockdep_hardirqs_on() has to be |
---|
| 4169 | + * invoked to set the final state. |
---|
| 4170 | + */ |
---|
| 4171 | +void lockdep_hardirqs_on_prepare(unsigned long ip) |
---|
2848 | 4172 | { |
---|
2849 | | - if (unlikely(!debug_locks || current->lockdep_recursion)) |
---|
| 4173 | + if (unlikely(!debug_locks)) |
---|
2850 | 4174 | return; |
---|
2851 | 4175 | |
---|
2852 | | - if (unlikely(current->hardirqs_enabled)) { |
---|
| 4176 | + /* |
---|
| 4177 | + * NMIs do not (and cannot) track lock dependencies, nothing to do. |
---|
| 4178 | + */ |
---|
| 4179 | + if (unlikely(in_nmi())) |
---|
| 4180 | + return; |
---|
| 4181 | + |
---|
| 4182 | + if (unlikely(this_cpu_read(lockdep_recursion))) |
---|
| 4183 | + return; |
---|
| 4184 | + |
---|
| 4185 | + if (unlikely(lockdep_hardirqs_enabled())) { |
---|
2853 | 4186 | /* |
---|
2854 | 4187 | * Neither irq nor preemption are disabled here |
---|
2855 | 4188 | * so this is racy by nature but losing one hit |
---|
.. | .. |
---|
2870 | 4203 | /* |
---|
2871 | 4204 | * See the fine text that goes along with this variable definition. |
---|
2872 | 4205 | */ |
---|
2873 | | - if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled))) |
---|
| 4206 | + if (DEBUG_LOCKS_WARN_ON(early_boot_irqs_disabled)) |
---|
2874 | 4207 | return; |
---|
2875 | 4208 | |
---|
2876 | 4209 | /* |
---|
2877 | 4210 | * Can't allow enabling interrupts while in an interrupt handler, |
---|
2878 | 4211 | * that's general bad form and such. Recursion, limited stack etc.. |
---|
2879 | 4212 | */ |
---|
2880 | | - if (DEBUG_LOCKS_WARN_ON(current->hardirq_context)) |
---|
| 4213 | + if (DEBUG_LOCKS_WARN_ON(lockdep_hardirq_context())) |
---|
2881 | 4214 | return; |
---|
2882 | 4215 | |
---|
2883 | | - current->lockdep_recursion = 1; |
---|
2884 | | - __trace_hardirqs_on_caller(ip); |
---|
2885 | | - current->lockdep_recursion = 0; |
---|
| 4216 | + current->hardirq_chain_key = current->curr_chain_key; |
---|
| 4217 | + |
---|
| 4218 | + lockdep_recursion_inc(); |
---|
| 4219 | + __trace_hardirqs_on_caller(); |
---|
| 4220 | + lockdep_recursion_finish(); |
---|
2886 | 4221 | } |
---|
| 4222 | +EXPORT_SYMBOL_GPL(lockdep_hardirqs_on_prepare); |
---|
| 4223 | + |
---|
| 4224 | +void noinstr lockdep_hardirqs_on(unsigned long ip) |
---|
| 4225 | +{ |
---|
| 4226 | + struct irqtrace_events *trace = ¤t->irqtrace; |
---|
| 4227 | + |
---|
| 4228 | + if (unlikely(!debug_locks)) |
---|
| 4229 | + return; |
---|
| 4230 | + |
---|
| 4231 | + /* |
---|
| 4232 | + * NMIs can happen in the middle of local_irq_{en,dis}able() where the |
---|
| 4233 | + * tracking state and hardware state are out of sync. |
---|
| 4234 | + * |
---|
| 4235 | + * NMIs must save lockdep_hardirqs_enabled() to restore IRQ state from, |
---|
| 4236 | + * and not rely on hardware state like normal interrupts. |
---|
| 4237 | + */ |
---|
| 4238 | + if (unlikely(in_nmi())) { |
---|
| 4239 | + if (!IS_ENABLED(CONFIG_TRACE_IRQFLAGS_NMI)) |
---|
| 4240 | + return; |
---|
| 4241 | + |
---|
| 4242 | + /* |
---|
| 4243 | + * Skip: |
---|
| 4244 | + * - recursion check, because NMI can hit lockdep; |
---|
| 4245 | + * - hardware state check, because above; |
---|
| 4246 | + * - chain_key check, see lockdep_hardirqs_on_prepare(). |
---|
| 4247 | + */ |
---|
| 4248 | + goto skip_checks; |
---|
| 4249 | + } |
---|
| 4250 | + |
---|
| 4251 | + if (unlikely(this_cpu_read(lockdep_recursion))) |
---|
| 4252 | + return; |
---|
| 4253 | + |
---|
| 4254 | + if (lockdep_hardirqs_enabled()) { |
---|
| 4255 | + /* |
---|
| 4256 | + * Neither irq nor preemption are disabled here |
---|
| 4257 | + * so this is racy by nature but losing one hit |
---|
| 4258 | + * in a stat is not a big deal. |
---|
| 4259 | + */ |
---|
| 4260 | + __debug_atomic_inc(redundant_hardirqs_on); |
---|
| 4261 | + return; |
---|
| 4262 | + } |
---|
| 4263 | + |
---|
| 4264 | + /* |
---|
| 4265 | + * We're enabling irqs and according to our state above irqs weren't |
---|
| 4266 | + * already enabled, yet we find the hardware thinks they are in fact |
---|
| 4267 | + * enabled.. someone messed up their IRQ state tracing. |
---|
| 4268 | + */ |
---|
| 4269 | + if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) |
---|
| 4270 | + return; |
---|
| 4271 | + |
---|
| 4272 | + /* |
---|
| 4273 | + * Ensure the lock stack remained unchanged between |
---|
| 4274 | + * lockdep_hardirqs_on_prepare() and lockdep_hardirqs_on(). |
---|
| 4275 | + */ |
---|
| 4276 | + DEBUG_LOCKS_WARN_ON(current->hardirq_chain_key != |
---|
| 4277 | + current->curr_chain_key); |
---|
| 4278 | + |
---|
| 4279 | +skip_checks: |
---|
| 4280 | + /* we'll do an OFF -> ON transition: */ |
---|
| 4281 | + __this_cpu_write(hardirqs_enabled, 1); |
---|
| 4282 | + trace->hardirq_enable_ip = ip; |
---|
| 4283 | + trace->hardirq_enable_event = ++trace->irq_events; |
---|
| 4284 | + debug_atomic_inc(hardirqs_on_events); |
---|
| 4285 | +} |
---|
| 4286 | +EXPORT_SYMBOL_GPL(lockdep_hardirqs_on); |
---|
2887 | 4287 | |
---|
2888 | 4288 | /* |
---|
2889 | 4289 | * Hardirqs were disabled: |
---|
2890 | 4290 | */ |
---|
2891 | | -void lockdep_hardirqs_off(unsigned long ip) |
---|
| 4291 | +void noinstr lockdep_hardirqs_off(unsigned long ip) |
---|
2892 | 4292 | { |
---|
2893 | | - struct task_struct *curr = current; |
---|
| 4293 | + if (unlikely(!debug_locks)) |
---|
| 4294 | + return; |
---|
2894 | 4295 | |
---|
2895 | | - if (unlikely(!debug_locks || current->lockdep_recursion)) |
---|
| 4296 | + /* |
---|
| 4297 | + * Matching lockdep_hardirqs_on(), allow NMIs in the middle of lockdep; |
---|
| 4298 | + * they will restore the software state. This ensures the software |
---|
| 4299 | + * state is consistent inside NMIs as well. |
---|
| 4300 | + */ |
---|
| 4301 | + if (in_nmi()) { |
---|
| 4302 | + if (!IS_ENABLED(CONFIG_TRACE_IRQFLAGS_NMI)) |
---|
| 4303 | + return; |
---|
| 4304 | + } else if (__this_cpu_read(lockdep_recursion)) |
---|
2896 | 4305 | return; |
---|
2897 | 4306 | |
---|
2898 | 4307 | /* |
---|
.. | .. |
---|
2902 | 4311 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) |
---|
2903 | 4312 | return; |
---|
2904 | 4313 | |
---|
2905 | | - if (curr->hardirqs_enabled) { |
---|
| 4314 | + if (lockdep_hardirqs_enabled()) { |
---|
| 4315 | + struct irqtrace_events *trace = ¤t->irqtrace; |
---|
| 4316 | + |
---|
2906 | 4317 | /* |
---|
2907 | 4318 | * We have done an ON -> OFF transition: |
---|
2908 | 4319 | */ |
---|
2909 | | - curr->hardirqs_enabled = 0; |
---|
2910 | | - curr->hardirq_disable_ip = ip; |
---|
2911 | | - curr->hardirq_disable_event = ++curr->irq_events; |
---|
| 4320 | + __this_cpu_write(hardirqs_enabled, 0); |
---|
| 4321 | + trace->hardirq_disable_ip = ip; |
---|
| 4322 | + trace->hardirq_disable_event = ++trace->irq_events; |
---|
2912 | 4323 | debug_atomic_inc(hardirqs_off_events); |
---|
2913 | | - } else |
---|
| 4324 | + } else { |
---|
2914 | 4325 | debug_atomic_inc(redundant_hardirqs_off); |
---|
| 4326 | + } |
---|
2915 | 4327 | } |
---|
| 4328 | +EXPORT_SYMBOL_GPL(lockdep_hardirqs_off); |
---|
2916 | 4329 | |
---|
2917 | 4330 | /* |
---|
2918 | 4331 | * Softirqs will be enabled: |
---|
2919 | 4332 | */ |
---|
2920 | | -void trace_softirqs_on(unsigned long ip) |
---|
| 4333 | +void lockdep_softirqs_on(unsigned long ip) |
---|
2921 | 4334 | { |
---|
2922 | | - struct task_struct *curr = current; |
---|
| 4335 | + struct irqtrace_events *trace = ¤t->irqtrace; |
---|
2923 | 4336 | |
---|
2924 | | - if (unlikely(!debug_locks || current->lockdep_recursion)) |
---|
| 4337 | + if (unlikely(!lockdep_enabled())) |
---|
2925 | 4338 | return; |
---|
2926 | 4339 | |
---|
2927 | 4340 | /* |
---|
.. | .. |
---|
2931 | 4344 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) |
---|
2932 | 4345 | return; |
---|
2933 | 4346 | |
---|
2934 | | - if (curr->softirqs_enabled) { |
---|
| 4347 | + if (current->softirqs_enabled) { |
---|
2935 | 4348 | debug_atomic_inc(redundant_softirqs_on); |
---|
2936 | 4349 | return; |
---|
2937 | 4350 | } |
---|
2938 | 4351 | |
---|
2939 | | - current->lockdep_recursion = 1; |
---|
| 4352 | + lockdep_recursion_inc(); |
---|
2940 | 4353 | /* |
---|
2941 | 4354 | * We'll do an OFF -> ON transition: |
---|
2942 | 4355 | */ |
---|
2943 | | - curr->softirqs_enabled = 1; |
---|
2944 | | - curr->softirq_enable_ip = ip; |
---|
2945 | | - curr->softirq_enable_event = ++curr->irq_events; |
---|
| 4356 | + current->softirqs_enabled = 1; |
---|
| 4357 | + trace->softirq_enable_ip = ip; |
---|
| 4358 | + trace->softirq_enable_event = ++trace->irq_events; |
---|
2946 | 4359 | debug_atomic_inc(softirqs_on_events); |
---|
2947 | 4360 | /* |
---|
2948 | 4361 | * We are going to turn softirqs on, so set the |
---|
2949 | 4362 | * usage bit for all held locks, if hardirqs are |
---|
2950 | 4363 | * enabled too: |
---|
2951 | 4364 | */ |
---|
2952 | | - if (curr->hardirqs_enabled) |
---|
2953 | | - mark_held_locks(curr, SOFTIRQ); |
---|
2954 | | - current->lockdep_recursion = 0; |
---|
| 4365 | + if (lockdep_hardirqs_enabled()) |
---|
| 4366 | + mark_held_locks(current, LOCK_ENABLED_SOFTIRQ); |
---|
| 4367 | + lockdep_recursion_finish(); |
---|
2955 | 4368 | } |
---|
2956 | 4369 | |
---|
2957 | 4370 | /* |
---|
2958 | 4371 | * Softirqs were disabled: |
---|
2959 | 4372 | */ |
---|
2960 | | -void trace_softirqs_off(unsigned long ip) |
---|
| 4373 | +void lockdep_softirqs_off(unsigned long ip) |
---|
2961 | 4374 | { |
---|
2962 | | - struct task_struct *curr = current; |
---|
2963 | | - |
---|
2964 | | - if (unlikely(!debug_locks || current->lockdep_recursion)) |
---|
| 4375 | + if (unlikely(!lockdep_enabled())) |
---|
2965 | 4376 | return; |
---|
2966 | 4377 | |
---|
2967 | 4378 | /* |
---|
.. | .. |
---|
2970 | 4381 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) |
---|
2971 | 4382 | return; |
---|
2972 | 4383 | |
---|
2973 | | - if (curr->softirqs_enabled) { |
---|
| 4384 | + if (current->softirqs_enabled) { |
---|
| 4385 | + struct irqtrace_events *trace = ¤t->irqtrace; |
---|
| 4386 | + |
---|
2974 | 4387 | /* |
---|
2975 | 4388 | * We have done an ON -> OFF transition: |
---|
2976 | 4389 | */ |
---|
2977 | | - curr->softirqs_enabled = 0; |
---|
2978 | | - curr->softirq_disable_ip = ip; |
---|
2979 | | - curr->softirq_disable_event = ++curr->irq_events; |
---|
| 4390 | + current->softirqs_enabled = 0; |
---|
| 4391 | + trace->softirq_disable_ip = ip; |
---|
| 4392 | + trace->softirq_disable_event = ++trace->irq_events; |
---|
2980 | 4393 | debug_atomic_inc(softirqs_off_events); |
---|
2981 | 4394 | /* |
---|
2982 | 4395 | * Whoops, we wanted softirqs off, so why aren't they? |
---|
.. | .. |
---|
2986 | 4399 | debug_atomic_inc(redundant_softirqs_off); |
---|
2987 | 4400 | } |
---|
2988 | 4401 | |
---|
2989 | | -static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock) |
---|
| 4402 | +static int |
---|
| 4403 | +mark_usage(struct task_struct *curr, struct held_lock *hlock, int check) |
---|
2990 | 4404 | { |
---|
| 4405 | + if (!check) |
---|
| 4406 | + goto lock_used; |
---|
| 4407 | + |
---|
2991 | 4408 | /* |
---|
2992 | 4409 | * If non-trylock use in a hardirq or softirq context, then |
---|
2993 | 4410 | * mark the lock as used in these contexts: |
---|
2994 | 4411 | */ |
---|
2995 | 4412 | if (!hlock->trylock) { |
---|
2996 | 4413 | if (hlock->read) { |
---|
2997 | | - if (curr->hardirq_context) |
---|
| 4414 | + if (lockdep_hardirq_context()) |
---|
2998 | 4415 | if (!mark_lock(curr, hlock, |
---|
2999 | 4416 | LOCK_USED_IN_HARDIRQ_READ)) |
---|
3000 | 4417 | return 0; |
---|
.. | .. |
---|
3003 | 4420 | LOCK_USED_IN_SOFTIRQ_READ)) |
---|
3004 | 4421 | return 0; |
---|
3005 | 4422 | } else { |
---|
3006 | | - if (curr->hardirq_context) |
---|
| 4423 | + if (lockdep_hardirq_context()) |
---|
3007 | 4424 | if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ)) |
---|
3008 | 4425 | return 0; |
---|
3009 | 4426 | if (curr->softirq_context) |
---|
.. | .. |
---|
3031 | 4448 | } |
---|
3032 | 4449 | } |
---|
3033 | 4450 | |
---|
| 4451 | +lock_used: |
---|
| 4452 | + /* mark it as used: */ |
---|
| 4453 | + if (!mark_lock(curr, hlock, LOCK_USED)) |
---|
| 4454 | + return 0; |
---|
| 4455 | + |
---|
3034 | 4456 | return 1; |
---|
3035 | 4457 | } |
---|
3036 | 4458 | |
---|
3037 | 4459 | static inline unsigned int task_irq_context(struct task_struct *task) |
---|
3038 | 4460 | { |
---|
3039 | | - return 2 * !!task->hardirq_context + !!task->softirq_context; |
---|
| 4461 | + return LOCK_CHAIN_HARDIRQ_CONTEXT * !!lockdep_hardirq_context() + |
---|
| 4462 | + LOCK_CHAIN_SOFTIRQ_CONTEXT * !!task->softirq_context; |
---|
3040 | 4463 | } |
---|
3041 | 4464 | |
---|
3042 | 4465 | static int separate_irq_context(struct task_struct *curr, |
---|
.. | .. |
---|
3062 | 4485 | return 0; |
---|
3063 | 4486 | } |
---|
3064 | 4487 | |
---|
3065 | | -#else /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */ |
---|
3066 | | - |
---|
3067 | | -static inline |
---|
3068 | | -int mark_lock_irq(struct task_struct *curr, struct held_lock *this, |
---|
3069 | | - enum lock_usage_bit new_bit) |
---|
3070 | | -{ |
---|
3071 | | - WARN_ON(1); /* Impossible innit? when we don't have TRACE_IRQFLAG */ |
---|
3072 | | - return 1; |
---|
3073 | | -} |
---|
3074 | | - |
---|
3075 | | -static inline int mark_irqflags(struct task_struct *curr, |
---|
3076 | | - struct held_lock *hlock) |
---|
3077 | | -{ |
---|
3078 | | - return 1; |
---|
3079 | | -} |
---|
3080 | | - |
---|
3081 | | -static inline unsigned int task_irq_context(struct task_struct *task) |
---|
3082 | | -{ |
---|
3083 | | - return 0; |
---|
3084 | | -} |
---|
3085 | | - |
---|
3086 | | -static inline int separate_irq_context(struct task_struct *curr, |
---|
3087 | | - struct held_lock *hlock) |
---|
3088 | | -{ |
---|
3089 | | - return 0; |
---|
3090 | | -} |
---|
3091 | | - |
---|
3092 | | -#endif /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */ |
---|
3093 | | - |
---|
3094 | 4488 | /* |
---|
3095 | 4489 | * Mark a lock with a usage bit, and validate the state transition: |
---|
3096 | 4490 | */ |
---|
3097 | 4491 | static int mark_lock(struct task_struct *curr, struct held_lock *this, |
---|
3098 | 4492 | enum lock_usage_bit new_bit) |
---|
3099 | 4493 | { |
---|
3100 | | - unsigned int new_mask = 1 << new_bit, ret = 1; |
---|
| 4494 | + unsigned int new_mask, ret = 1; |
---|
| 4495 | + |
---|
| 4496 | + if (new_bit >= LOCK_USAGE_STATES) { |
---|
| 4497 | + DEBUG_LOCKS_WARN_ON(1); |
---|
| 4498 | + return 0; |
---|
| 4499 | + } |
---|
| 4500 | + |
---|
| 4501 | + if (new_bit == LOCK_USED && this->read) |
---|
| 4502 | + new_bit = LOCK_USED_READ; |
---|
| 4503 | + |
---|
| 4504 | + new_mask = 1 << new_bit; |
---|
3101 | 4505 | |
---|
3102 | 4506 | /* |
---|
3103 | 4507 | * If already set then do not dirty the cacheline, |
---|
.. | .. |
---|
3111 | 4515 | /* |
---|
3112 | 4516 | * Make sure we didn't race: |
---|
3113 | 4517 | */ |
---|
3114 | | - if (unlikely(hlock_class(this)->usage_mask & new_mask)) { |
---|
3115 | | - graph_unlock(); |
---|
3116 | | - return 1; |
---|
3117 | | - } |
---|
| 4518 | + if (unlikely(hlock_class(this)->usage_mask & new_mask)) |
---|
| 4519 | + goto unlock; |
---|
| 4520 | + |
---|
| 4521 | + if (!hlock_class(this)->usage_mask) |
---|
| 4522 | + debug_atomic_dec(nr_unused_locks); |
---|
3118 | 4523 | |
---|
3119 | 4524 | hlock_class(this)->usage_mask |= new_mask; |
---|
3120 | 4525 | |
---|
3121 | | - if (!save_trace(hlock_class(this)->usage_traces + new_bit)) |
---|
3122 | | - return 0; |
---|
| 4526 | + if (new_bit < LOCK_TRACE_STATES) { |
---|
| 4527 | + if (!(hlock_class(this)->usage_traces[new_bit] = save_trace())) |
---|
| 4528 | + return 0; |
---|
| 4529 | + } |
---|
3123 | 4530 | |
---|
3124 | | - switch (new_bit) { |
---|
3125 | | -#define LOCKDEP_STATE(__STATE) \ |
---|
3126 | | - case LOCK_USED_IN_##__STATE: \ |
---|
3127 | | - case LOCK_USED_IN_##__STATE##_READ: \ |
---|
3128 | | - case LOCK_ENABLED_##__STATE: \ |
---|
3129 | | - case LOCK_ENABLED_##__STATE##_READ: |
---|
3130 | | -#include "lockdep_states.h" |
---|
3131 | | -#undef LOCKDEP_STATE |
---|
| 4531 | + if (new_bit < LOCK_USED) { |
---|
3132 | 4532 | ret = mark_lock_irq(curr, this, new_bit); |
---|
3133 | 4533 | if (!ret) |
---|
3134 | 4534 | return 0; |
---|
3135 | | - break; |
---|
3136 | | - case LOCK_USED: |
---|
3137 | | - debug_atomic_dec(nr_unused_locks); |
---|
3138 | | - break; |
---|
3139 | | - default: |
---|
3140 | | - if (!debug_locks_off_graph_unlock()) |
---|
3141 | | - return 0; |
---|
3142 | | - WARN_ON(1); |
---|
3143 | | - return 0; |
---|
3144 | 4535 | } |
---|
3145 | 4536 | |
---|
| 4537 | +unlock: |
---|
3146 | 4538 | graph_unlock(); |
---|
3147 | 4539 | |
---|
3148 | 4540 | /* |
---|
.. | .. |
---|
3158 | 4550 | return ret; |
---|
3159 | 4551 | } |
---|
3160 | 4552 | |
---|
| 4553 | +static inline short task_wait_context(struct task_struct *curr) |
---|
| 4554 | +{ |
---|
| 4555 | + /* |
---|
| 4556 | + * Set appropriate wait type for the context; for IRQs we have to take |
---|
| 4557 | + * into account force_irqthread as that is implied by PREEMPT_RT. |
---|
| 4558 | + */ |
---|
| 4559 | + if (lockdep_hardirq_context()) { |
---|
| 4560 | + /* |
---|
| 4561 | + * Check if force_irqthreads will run us threaded. |
---|
| 4562 | + */ |
---|
| 4563 | + if (curr->hardirq_threaded || curr->irq_config) |
---|
| 4564 | + return LD_WAIT_CONFIG; |
---|
| 4565 | + |
---|
| 4566 | + return LD_WAIT_SPIN; |
---|
| 4567 | + } else if (curr->softirq_context) { |
---|
| 4568 | + /* |
---|
| 4569 | + * Softirqs are always threaded. |
---|
| 4570 | + */ |
---|
| 4571 | + return LD_WAIT_CONFIG; |
---|
| 4572 | + } |
---|
| 4573 | + |
---|
| 4574 | + return LD_WAIT_MAX; |
---|
| 4575 | +} |
---|
| 4576 | + |
---|
| 4577 | +static int |
---|
| 4578 | +print_lock_invalid_wait_context(struct task_struct *curr, |
---|
| 4579 | + struct held_lock *hlock) |
---|
| 4580 | +{ |
---|
| 4581 | + short curr_inner; |
---|
| 4582 | + |
---|
| 4583 | + if (!debug_locks_off()) |
---|
| 4584 | + return 0; |
---|
| 4585 | + if (debug_locks_silent) |
---|
| 4586 | + return 0; |
---|
| 4587 | + |
---|
| 4588 | + pr_warn("\n"); |
---|
| 4589 | + pr_warn("=============================\n"); |
---|
| 4590 | + pr_warn("[ BUG: Invalid wait context ]\n"); |
---|
| 4591 | + print_kernel_ident(); |
---|
| 4592 | + pr_warn("-----------------------------\n"); |
---|
| 4593 | + |
---|
| 4594 | + pr_warn("%s/%d is trying to lock:\n", curr->comm, task_pid_nr(curr)); |
---|
| 4595 | + print_lock(hlock); |
---|
| 4596 | + |
---|
| 4597 | + pr_warn("other info that might help us debug this:\n"); |
---|
| 4598 | + |
---|
| 4599 | + curr_inner = task_wait_context(curr); |
---|
| 4600 | + pr_warn("context-{%d:%d}\n", curr_inner, curr_inner); |
---|
| 4601 | + |
---|
| 4602 | + lockdep_print_held_locks(curr); |
---|
| 4603 | + |
---|
| 4604 | + pr_warn("stack backtrace:\n"); |
---|
| 4605 | + dump_stack(); |
---|
| 4606 | + |
---|
| 4607 | + return 0; |
---|
| 4608 | +} |
---|
| 4609 | + |
---|
| 4610 | +/* |
---|
| 4611 | + * Verify the wait_type context. |
---|
| 4612 | + * |
---|
| 4613 | + * This check validates we takes locks in the right wait-type order; that is it |
---|
| 4614 | + * ensures that we do not take mutexes inside spinlocks and do not attempt to |
---|
| 4615 | + * acquire spinlocks inside raw_spinlocks and the sort. |
---|
| 4616 | + * |
---|
| 4617 | + * The entire thing is slightly more complex because of RCU, RCU is a lock that |
---|
| 4618 | + * can be taken from (pretty much) any context but also has constraints. |
---|
| 4619 | + * However when taken in a stricter environment the RCU lock does not loosen |
---|
| 4620 | + * the constraints. |
---|
| 4621 | + * |
---|
| 4622 | + * Therefore we must look for the strictest environment in the lock stack and |
---|
| 4623 | + * compare that to the lock we're trying to acquire. |
---|
| 4624 | + */ |
---|
| 4625 | +static int check_wait_context(struct task_struct *curr, struct held_lock *next) |
---|
| 4626 | +{ |
---|
| 4627 | + u8 next_inner = hlock_class(next)->wait_type_inner; |
---|
| 4628 | + u8 next_outer = hlock_class(next)->wait_type_outer; |
---|
| 4629 | + u8 curr_inner; |
---|
| 4630 | + int depth; |
---|
| 4631 | + |
---|
| 4632 | + if (!next_inner || next->trylock) |
---|
| 4633 | + return 0; |
---|
| 4634 | + |
---|
| 4635 | + if (!next_outer) |
---|
| 4636 | + next_outer = next_inner; |
---|
| 4637 | + |
---|
| 4638 | + /* |
---|
| 4639 | + * Find start of current irq_context.. |
---|
| 4640 | + */ |
---|
| 4641 | + for (depth = curr->lockdep_depth - 1; depth >= 0; depth--) { |
---|
| 4642 | + struct held_lock *prev = curr->held_locks + depth; |
---|
| 4643 | + if (prev->irq_context != next->irq_context) |
---|
| 4644 | + break; |
---|
| 4645 | + } |
---|
| 4646 | + depth++; |
---|
| 4647 | + |
---|
| 4648 | + curr_inner = task_wait_context(curr); |
---|
| 4649 | + |
---|
| 4650 | + for (; depth < curr->lockdep_depth; depth++) { |
---|
| 4651 | + struct held_lock *prev = curr->held_locks + depth; |
---|
| 4652 | + u8 prev_inner = hlock_class(prev)->wait_type_inner; |
---|
| 4653 | + |
---|
| 4654 | + if (prev_inner) { |
---|
| 4655 | + /* |
---|
| 4656 | + * We can have a bigger inner than a previous one |
---|
| 4657 | + * when outer is smaller than inner, as with RCU. |
---|
| 4658 | + * |
---|
| 4659 | + * Also due to trylocks. |
---|
| 4660 | + */ |
---|
| 4661 | + curr_inner = min(curr_inner, prev_inner); |
---|
| 4662 | + } |
---|
| 4663 | + } |
---|
| 4664 | + |
---|
| 4665 | + if (next_outer > curr_inner) |
---|
| 4666 | + return print_lock_invalid_wait_context(curr, next); |
---|
| 4667 | + |
---|
| 4668 | + return 0; |
---|
| 4669 | +} |
---|
| 4670 | + |
---|
| 4671 | +#else /* CONFIG_PROVE_LOCKING */ |
---|
| 4672 | + |
---|
| 4673 | +static inline int |
---|
| 4674 | +mark_usage(struct task_struct *curr, struct held_lock *hlock, int check) |
---|
| 4675 | +{ |
---|
| 4676 | + return 1; |
---|
| 4677 | +} |
---|
| 4678 | + |
---|
| 4679 | +static inline unsigned int task_irq_context(struct task_struct *task) |
---|
| 4680 | +{ |
---|
| 4681 | + return 0; |
---|
| 4682 | +} |
---|
| 4683 | + |
---|
| 4684 | +static inline int separate_irq_context(struct task_struct *curr, |
---|
| 4685 | + struct held_lock *hlock) |
---|
| 4686 | +{ |
---|
| 4687 | + return 0; |
---|
| 4688 | +} |
---|
| 4689 | + |
---|
| 4690 | +static inline int check_wait_context(struct task_struct *curr, |
---|
| 4691 | + struct held_lock *next) |
---|
| 4692 | +{ |
---|
| 4693 | + return 0; |
---|
| 4694 | +} |
---|
| 4695 | + |
---|
| 4696 | +#endif /* CONFIG_PROVE_LOCKING */ |
---|
| 4697 | + |
---|
3161 | 4698 | /* |
---|
3162 | 4699 | * Initialize a lock instance's lock-class mapping info: |
---|
3163 | 4700 | */ |
---|
3164 | | -static void __lockdep_init_map(struct lockdep_map *lock, const char *name, |
---|
3165 | | - struct lock_class_key *key, int subclass) |
---|
| 4701 | +void lockdep_init_map_type(struct lockdep_map *lock, const char *name, |
---|
| 4702 | + struct lock_class_key *key, int subclass, |
---|
| 4703 | + u8 inner, u8 outer, u8 lock_type) |
---|
3166 | 4704 | { |
---|
3167 | 4705 | int i; |
---|
3168 | 4706 | |
---|
.. | .. |
---|
3183 | 4721 | |
---|
3184 | 4722 | lock->name = name; |
---|
3185 | 4723 | |
---|
| 4724 | + lock->wait_type_outer = outer; |
---|
| 4725 | + lock->wait_type_inner = inner; |
---|
| 4726 | + lock->lock_type = lock_type; |
---|
| 4727 | + |
---|
3186 | 4728 | /* |
---|
3187 | 4729 | * No key, no joy, we need to hash something. |
---|
3188 | 4730 | */ |
---|
3189 | 4731 | if (DEBUG_LOCKS_WARN_ON(!key)) |
---|
3190 | 4732 | return; |
---|
3191 | 4733 | /* |
---|
3192 | | - * Sanity check, the lock-class key must be persistent: |
---|
| 4734 | + * Sanity check, the lock-class key must either have been allocated |
---|
| 4735 | + * statically or must have been registered as a dynamic key. |
---|
3193 | 4736 | */ |
---|
3194 | | - if (!static_obj(key)) { |
---|
3195 | | - printk("BUG: key %px not in .data!\n", key); |
---|
3196 | | - /* |
---|
3197 | | - * What it says above ^^^^^, I suggest you read it. |
---|
3198 | | - */ |
---|
| 4737 | + if (!static_obj(key) && !is_dynamic_key(key)) { |
---|
| 4738 | + if (debug_locks) |
---|
| 4739 | + printk(KERN_ERR "BUG: key %px has not been registered!\n", key); |
---|
3199 | 4740 | DEBUG_LOCKS_WARN_ON(1); |
---|
3200 | 4741 | return; |
---|
3201 | 4742 | } |
---|
.. | .. |
---|
3207 | 4748 | if (subclass) { |
---|
3208 | 4749 | unsigned long flags; |
---|
3209 | 4750 | |
---|
3210 | | - if (DEBUG_LOCKS_WARN_ON(current->lockdep_recursion)) |
---|
| 4751 | + if (DEBUG_LOCKS_WARN_ON(!lockdep_enabled())) |
---|
3211 | 4752 | return; |
---|
3212 | 4753 | |
---|
3213 | 4754 | raw_local_irq_save(flags); |
---|
3214 | | - current->lockdep_recursion = 1; |
---|
| 4755 | + lockdep_recursion_inc(); |
---|
3215 | 4756 | register_lock_class(lock, subclass, 1); |
---|
3216 | | - current->lockdep_recursion = 0; |
---|
| 4757 | + lockdep_recursion_finish(); |
---|
3217 | 4758 | raw_local_irq_restore(flags); |
---|
3218 | 4759 | } |
---|
3219 | 4760 | } |
---|
3220 | | - |
---|
3221 | | -void lockdep_init_map(struct lockdep_map *lock, const char *name, |
---|
3222 | | - struct lock_class_key *key, int subclass) |
---|
3223 | | -{ |
---|
3224 | | - __lockdep_init_map(lock, name, key, subclass); |
---|
3225 | | -} |
---|
3226 | | -EXPORT_SYMBOL_GPL(lockdep_init_map); |
---|
| 4761 | +EXPORT_SYMBOL_GPL(lockdep_init_map_type); |
---|
3227 | 4762 | |
---|
3228 | 4763 | struct lock_class_key __lockdep_no_validate__; |
---|
3229 | 4764 | EXPORT_SYMBOL_GPL(__lockdep_no_validate__); |
---|
3230 | 4765 | |
---|
3231 | | -static int |
---|
| 4766 | +static void |
---|
3232 | 4767 | print_lock_nested_lock_not_held(struct task_struct *curr, |
---|
3233 | 4768 | struct held_lock *hlock, |
---|
3234 | 4769 | unsigned long ip) |
---|
3235 | 4770 | { |
---|
3236 | 4771 | if (!debug_locks_off()) |
---|
3237 | | - return 0; |
---|
| 4772 | + return; |
---|
3238 | 4773 | if (debug_locks_silent) |
---|
3239 | | - return 0; |
---|
| 4774 | + return; |
---|
3240 | 4775 | |
---|
3241 | 4776 | pr_warn("\n"); |
---|
3242 | 4777 | pr_warn("==================================\n"); |
---|
.. | .. |
---|
3258 | 4793 | |
---|
3259 | 4794 | pr_warn("\nstack backtrace:\n"); |
---|
3260 | 4795 | dump_stack(); |
---|
3261 | | - |
---|
3262 | | - return 0; |
---|
3263 | 4796 | } |
---|
3264 | 4797 | |
---|
3265 | 4798 | static int __lock_is_held(const struct lockdep_map *lock, int read); |
---|
.. | .. |
---|
3267 | 4800 | /* |
---|
3268 | 4801 | * This gets called for every mutex_lock*()/spin_lock*() operation. |
---|
3269 | 4802 | * We maintain the dependency maps and validate the locking attempt: |
---|
| 4803 | + * |
---|
| 4804 | + * The callers must make sure that IRQs are disabled before calling it, |
---|
| 4805 | + * otherwise we could get an interrupt which would want to take locks, |
---|
| 4806 | + * which would end up in lockdep again. |
---|
3270 | 4807 | */ |
---|
3271 | 4808 | static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, |
---|
3272 | 4809 | int trylock, int read, int check, int hardirqs_off, |
---|
.. | .. |
---|
3284 | 4821 | if (unlikely(!debug_locks)) |
---|
3285 | 4822 | return 0; |
---|
3286 | 4823 | |
---|
3287 | | - /* |
---|
3288 | | - * Lockdep should run with IRQs disabled, otherwise we could |
---|
3289 | | - * get an interrupt which would want to take locks, which would |
---|
3290 | | - * end up in lockdep and have you got a head-ache already? |
---|
3291 | | - */ |
---|
3292 | | - if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) |
---|
3293 | | - return 0; |
---|
3294 | | - |
---|
3295 | 4824 | if (!prove_locking || lock->key == &__lockdep_no_validate__) |
---|
3296 | 4825 | check = 0; |
---|
3297 | 4826 | |
---|
.. | .. |
---|
3305 | 4834 | if (!class) |
---|
3306 | 4835 | return 0; |
---|
3307 | 4836 | } |
---|
3308 | | - atomic_inc((atomic_t *)&class->ops); |
---|
| 4837 | + |
---|
| 4838 | + debug_class_ops_inc(class); |
---|
| 4839 | + |
---|
3309 | 4840 | if (very_verbose(class)) { |
---|
3310 | 4841 | printk("\nacquire class [%px] %s", class->key, class->name); |
---|
3311 | 4842 | if (class->name_version > 1) |
---|
.. | .. |
---|
3326 | 4857 | if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH)) |
---|
3327 | 4858 | return 0; |
---|
3328 | 4859 | |
---|
3329 | | - class_idx = class - lock_classes + 1; |
---|
| 4860 | + class_idx = class - lock_classes; |
---|
3330 | 4861 | |
---|
3331 | | - if (depth) { |
---|
| 4862 | + if (depth) { /* we're holding locks */ |
---|
3332 | 4863 | hlock = curr->held_locks + depth - 1; |
---|
3333 | 4864 | if (hlock->class_idx == class_idx && nest_lock) { |
---|
3334 | 4865 | if (!references) |
---|
.. | .. |
---|
3343 | 4874 | if (DEBUG_LOCKS_WARN_ON(hlock->references < references)) |
---|
3344 | 4875 | return 0; |
---|
3345 | 4876 | |
---|
3346 | | - return 1; |
---|
| 4877 | + return 2; |
---|
3347 | 4878 | } |
---|
3348 | 4879 | } |
---|
3349 | 4880 | |
---|
.. | .. |
---|
3370 | 4901 | #endif |
---|
3371 | 4902 | hlock->pin_count = pin_count; |
---|
3372 | 4903 | |
---|
3373 | | - if (check && !mark_irqflags(curr, hlock)) |
---|
| 4904 | + if (check_wait_context(curr, hlock)) |
---|
3374 | 4905 | return 0; |
---|
3375 | 4906 | |
---|
3376 | | - /* mark it as used: */ |
---|
3377 | | - if (!mark_lock(curr, hlock, LOCK_USED)) |
---|
| 4907 | + /* Initialize the lock usage bit */ |
---|
| 4908 | + if (!mark_usage(curr, hlock, check)) |
---|
3378 | 4909 | return 0; |
---|
3379 | 4910 | |
---|
3380 | 4911 | /* |
---|
.. | .. |
---|
3388 | 4919 | * the hash, not class->key. |
---|
3389 | 4920 | */ |
---|
3390 | 4921 | /* |
---|
3391 | | - * Whoops, we did it again.. ran straight out of our static allocation. |
---|
| 4922 | + * Whoops, we did it again.. class_idx is invalid. |
---|
3392 | 4923 | */ |
---|
3393 | | - if (DEBUG_LOCKS_WARN_ON(class_idx > MAX_LOCKDEP_KEYS)) |
---|
| 4924 | + if (DEBUG_LOCKS_WARN_ON(!test_bit(class_idx, lock_classes_in_use))) |
---|
3394 | 4925 | return 0; |
---|
3395 | 4926 | |
---|
3396 | 4927 | chain_key = curr->curr_chain_key; |
---|
.. | .. |
---|
3398 | 4929 | /* |
---|
3399 | 4930 | * How can we have a chain hash when we ain't got no keys?! |
---|
3400 | 4931 | */ |
---|
3401 | | - if (DEBUG_LOCKS_WARN_ON(chain_key != 0)) |
---|
| 4932 | + if (DEBUG_LOCKS_WARN_ON(chain_key != INITIAL_CHAIN_KEY)) |
---|
3402 | 4933 | return 0; |
---|
3403 | 4934 | chain_head = 1; |
---|
3404 | 4935 | } |
---|
3405 | 4936 | |
---|
3406 | 4937 | hlock->prev_chain_key = chain_key; |
---|
3407 | 4938 | if (separate_irq_context(curr, hlock)) { |
---|
3408 | | - chain_key = 0; |
---|
| 4939 | + chain_key = INITIAL_CHAIN_KEY; |
---|
3409 | 4940 | chain_head = 1; |
---|
3410 | 4941 | } |
---|
3411 | | - chain_key = iterate_chain_key(chain_key, class_idx); |
---|
| 4942 | + chain_key = iterate_chain_key(chain_key, hlock_id(hlock)); |
---|
3412 | 4943 | |
---|
3413 | | - if (nest_lock && !__lock_is_held(nest_lock, -1)) |
---|
3414 | | - return print_lock_nested_lock_not_held(curr, hlock, ip); |
---|
| 4944 | + if (nest_lock && !__lock_is_held(nest_lock, -1)) { |
---|
| 4945 | + print_lock_nested_lock_not_held(curr, hlock, ip); |
---|
| 4946 | + return 0; |
---|
| 4947 | + } |
---|
3415 | 4948 | |
---|
3416 | | - if (!validate_chain(curr, lock, hlock, chain_head, chain_key)) |
---|
| 4949 | + if (!debug_locks_silent) { |
---|
| 4950 | + WARN_ON_ONCE(depth && !hlock_class(hlock - 1)->key); |
---|
| 4951 | + WARN_ON_ONCE(!hlock_class(hlock)->key); |
---|
| 4952 | + } |
---|
| 4953 | + |
---|
| 4954 | + if (!validate_chain(curr, hlock, chain_head, chain_key)) |
---|
3417 | 4955 | return 0; |
---|
3418 | 4956 | |
---|
3419 | 4957 | curr->curr_chain_key = chain_key; |
---|
.. | .. |
---|
3442 | 4980 | return 1; |
---|
3443 | 4981 | } |
---|
3444 | 4982 | |
---|
3445 | | -static int |
---|
3446 | | -print_unlock_imbalance_bug(struct task_struct *curr, struct lockdep_map *lock, |
---|
3447 | | - unsigned long ip) |
---|
| 4983 | +static void print_unlock_imbalance_bug(struct task_struct *curr, |
---|
| 4984 | + struct lockdep_map *lock, |
---|
| 4985 | + unsigned long ip) |
---|
3448 | 4986 | { |
---|
3449 | 4987 | if (!debug_locks_off()) |
---|
3450 | | - return 0; |
---|
| 4988 | + return; |
---|
3451 | 4989 | if (debug_locks_silent) |
---|
3452 | | - return 0; |
---|
| 4990 | + return; |
---|
3453 | 4991 | |
---|
3454 | 4992 | pr_warn("\n"); |
---|
3455 | 4993 | pr_warn("=====================================\n"); |
---|
.. | .. |
---|
3460 | 4998 | curr->comm, task_pid_nr(curr)); |
---|
3461 | 4999 | print_lockdep_cache(lock); |
---|
3462 | 5000 | pr_cont(") at:\n"); |
---|
3463 | | - print_ip_sym(ip); |
---|
| 5001 | + print_ip_sym(KERN_WARNING, ip); |
---|
3464 | 5002 | pr_warn("but there are no more locks to release!\n"); |
---|
3465 | 5003 | pr_warn("\nother info that might help us debug this:\n"); |
---|
3466 | 5004 | lockdep_print_held_locks(curr); |
---|
3467 | 5005 | |
---|
3468 | 5006 | pr_warn("\nstack backtrace:\n"); |
---|
3469 | 5007 | dump_stack(); |
---|
3470 | | - |
---|
3471 | | - return 0; |
---|
3472 | 5008 | } |
---|
3473 | 5009 | |
---|
3474 | | -static int match_held_lock(const struct held_lock *hlock, |
---|
3475 | | - const struct lockdep_map *lock) |
---|
| 5010 | +static noinstr int match_held_lock(const struct held_lock *hlock, |
---|
| 5011 | + const struct lockdep_map *lock) |
---|
3476 | 5012 | { |
---|
3477 | 5013 | if (hlock->instance == lock) |
---|
3478 | 5014 | return 1; |
---|
.. | .. |
---|
3500 | 5036 | if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock)) |
---|
3501 | 5037 | return 0; |
---|
3502 | 5038 | |
---|
3503 | | - if (hlock->class_idx == class - lock_classes + 1) |
---|
| 5039 | + if (hlock->class_idx == class - lock_classes) |
---|
3504 | 5040 | return 1; |
---|
3505 | 5041 | } |
---|
3506 | 5042 | |
---|
.. | .. |
---|
3544 | 5080 | } |
---|
3545 | 5081 | |
---|
3546 | 5082 | static int reacquire_held_locks(struct task_struct *curr, unsigned int depth, |
---|
3547 | | - int idx) |
---|
| 5083 | + int idx, unsigned int *merged) |
---|
3548 | 5084 | { |
---|
3549 | 5085 | struct held_lock *hlock; |
---|
| 5086 | + int first_idx = idx; |
---|
| 5087 | + |
---|
| 5088 | + if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) |
---|
| 5089 | + return 0; |
---|
3550 | 5090 | |
---|
3551 | 5091 | for (hlock = curr->held_locks + idx; idx < depth; idx++, hlock++) { |
---|
3552 | | - if (!__lock_acquire(hlock->instance, |
---|
| 5092 | + switch (__lock_acquire(hlock->instance, |
---|
3553 | 5093 | hlock_class(hlock)->subclass, |
---|
3554 | 5094 | hlock->trylock, |
---|
3555 | 5095 | hlock->read, hlock->check, |
---|
3556 | 5096 | hlock->hardirqs_off, |
---|
3557 | 5097 | hlock->nest_lock, hlock->acquire_ip, |
---|
3558 | | - hlock->references, hlock->pin_count)) |
---|
| 5098 | + hlock->references, hlock->pin_count)) { |
---|
| 5099 | + case 0: |
---|
3559 | 5100 | return 1; |
---|
| 5101 | + case 1: |
---|
| 5102 | + break; |
---|
| 5103 | + case 2: |
---|
| 5104 | + *merged += (idx == first_idx); |
---|
| 5105 | + break; |
---|
| 5106 | + default: |
---|
| 5107 | + WARN_ON(1); |
---|
| 5108 | + return 0; |
---|
| 5109 | + } |
---|
3560 | 5110 | } |
---|
3561 | 5111 | return 0; |
---|
3562 | 5112 | } |
---|
.. | .. |
---|
3567 | 5117 | unsigned long ip) |
---|
3568 | 5118 | { |
---|
3569 | 5119 | struct task_struct *curr = current; |
---|
| 5120 | + unsigned int depth, merged = 0; |
---|
3570 | 5121 | struct held_lock *hlock; |
---|
3571 | 5122 | struct lock_class *class; |
---|
3572 | | - unsigned int depth; |
---|
3573 | | - int i; |
---|
3574 | | - |
---|
3575 | | - depth = curr->lockdep_depth; |
---|
3576 | | - /* |
---|
3577 | | - * This function is about (re)setting the class of a held lock, |
---|
3578 | | - * yet we're not actually holding any locks. Naughty user! |
---|
3579 | | - */ |
---|
3580 | | - if (DEBUG_LOCKS_WARN_ON(!depth)) |
---|
3581 | | - return 0; |
---|
3582 | | - |
---|
3583 | | - hlock = find_held_lock(curr, lock, depth, &i); |
---|
3584 | | - if (!hlock) |
---|
3585 | | - return print_unlock_imbalance_bug(curr, lock, ip); |
---|
3586 | | - |
---|
3587 | | - lockdep_init_map(lock, name, key, 0); |
---|
3588 | | - class = register_lock_class(lock, subclass, 0); |
---|
3589 | | - hlock->class_idx = class - lock_classes + 1; |
---|
3590 | | - |
---|
3591 | | - curr->lockdep_depth = i; |
---|
3592 | | - curr->curr_chain_key = hlock->prev_chain_key; |
---|
3593 | | - |
---|
3594 | | - if (reacquire_held_locks(curr, depth, i)) |
---|
3595 | | - return 0; |
---|
3596 | | - |
---|
3597 | | - /* |
---|
3598 | | - * I took it apart and put it back together again, except now I have |
---|
3599 | | - * these 'spare' parts.. where shall I put them. |
---|
3600 | | - */ |
---|
3601 | | - if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth)) |
---|
3602 | | - return 0; |
---|
3603 | | - return 1; |
---|
3604 | | -} |
---|
3605 | | - |
---|
3606 | | -static int __lock_downgrade(struct lockdep_map *lock, unsigned long ip) |
---|
3607 | | -{ |
---|
3608 | | - struct task_struct *curr = current; |
---|
3609 | | - struct held_lock *hlock; |
---|
3610 | | - unsigned int depth; |
---|
3611 | 5123 | int i; |
---|
3612 | 5124 | |
---|
3613 | 5125 | if (unlikely(!debug_locks)) |
---|
.. | .. |
---|
3622 | 5134 | return 0; |
---|
3623 | 5135 | |
---|
3624 | 5136 | hlock = find_held_lock(curr, lock, depth, &i); |
---|
3625 | | - if (!hlock) |
---|
3626 | | - return print_unlock_imbalance_bug(curr, lock, ip); |
---|
| 5137 | + if (!hlock) { |
---|
| 5138 | + print_unlock_imbalance_bug(curr, lock, ip); |
---|
| 5139 | + return 0; |
---|
| 5140 | + } |
---|
| 5141 | + |
---|
| 5142 | + lockdep_init_map_type(lock, name, key, 0, |
---|
| 5143 | + lock->wait_type_inner, |
---|
| 5144 | + lock->wait_type_outer, |
---|
| 5145 | + lock->lock_type); |
---|
| 5146 | + class = register_lock_class(lock, subclass, 0); |
---|
| 5147 | + hlock->class_idx = class - lock_classes; |
---|
| 5148 | + |
---|
| 5149 | + curr->lockdep_depth = i; |
---|
| 5150 | + curr->curr_chain_key = hlock->prev_chain_key; |
---|
| 5151 | + |
---|
| 5152 | + if (reacquire_held_locks(curr, depth, i, &merged)) |
---|
| 5153 | + return 0; |
---|
| 5154 | + |
---|
| 5155 | + /* |
---|
| 5156 | + * I took it apart and put it back together again, except now I have |
---|
| 5157 | + * these 'spare' parts.. where shall I put them. |
---|
| 5158 | + */ |
---|
| 5159 | + if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - merged)) |
---|
| 5160 | + return 0; |
---|
| 5161 | + return 1; |
---|
| 5162 | +} |
---|
| 5163 | + |
---|
| 5164 | +static int __lock_downgrade(struct lockdep_map *lock, unsigned long ip) |
---|
| 5165 | +{ |
---|
| 5166 | + struct task_struct *curr = current; |
---|
| 5167 | + unsigned int depth, merged = 0; |
---|
| 5168 | + struct held_lock *hlock; |
---|
| 5169 | + int i; |
---|
| 5170 | + |
---|
| 5171 | + if (unlikely(!debug_locks)) |
---|
| 5172 | + return 0; |
---|
| 5173 | + |
---|
| 5174 | + depth = curr->lockdep_depth; |
---|
| 5175 | + /* |
---|
| 5176 | + * This function is about (re)setting the class of a held lock, |
---|
| 5177 | + * yet we're not actually holding any locks. Naughty user! |
---|
| 5178 | + */ |
---|
| 5179 | + if (DEBUG_LOCKS_WARN_ON(!depth)) |
---|
| 5180 | + return 0; |
---|
| 5181 | + |
---|
| 5182 | + hlock = find_held_lock(curr, lock, depth, &i); |
---|
| 5183 | + if (!hlock) { |
---|
| 5184 | + print_unlock_imbalance_bug(curr, lock, ip); |
---|
| 5185 | + return 0; |
---|
| 5186 | + } |
---|
3627 | 5187 | |
---|
3628 | 5188 | curr->lockdep_depth = i; |
---|
3629 | 5189 | curr->curr_chain_key = hlock->prev_chain_key; |
---|
.. | .. |
---|
3632 | 5192 | hlock->read = 1; |
---|
3633 | 5193 | hlock->acquire_ip = ip; |
---|
3634 | 5194 | |
---|
3635 | | - if (reacquire_held_locks(curr, depth, i)) |
---|
| 5195 | + if (reacquire_held_locks(curr, depth, i, &merged)) |
---|
| 5196 | + return 0; |
---|
| 5197 | + |
---|
| 5198 | + /* Merging can't happen with unchanged classes.. */ |
---|
| 5199 | + if (DEBUG_LOCKS_WARN_ON(merged)) |
---|
3636 | 5200 | return 0; |
---|
3637 | 5201 | |
---|
3638 | 5202 | /* |
---|
.. | .. |
---|
3641 | 5205 | */ |
---|
3642 | 5206 | if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth)) |
---|
3643 | 5207 | return 0; |
---|
| 5208 | + |
---|
3644 | 5209 | return 1; |
---|
3645 | 5210 | } |
---|
3646 | 5211 | |
---|
3647 | 5212 | /* |
---|
3648 | | - * Remove the lock to the list of currently held locks - this gets |
---|
| 5213 | + * Remove the lock from the list of currently held locks - this gets |
---|
3649 | 5214 | * called on mutex_unlock()/spin_unlock*() (or on a failed |
---|
3650 | 5215 | * mutex_lock_interruptible()). |
---|
3651 | | - * |
---|
3652 | | - * @nested is an hysterical artifact, needs a tree wide cleanup. |
---|
3653 | 5216 | */ |
---|
3654 | 5217 | static int |
---|
3655 | | -__lock_release(struct lockdep_map *lock, int nested, unsigned long ip) |
---|
| 5218 | +__lock_release(struct lockdep_map *lock, unsigned long ip) |
---|
3656 | 5219 | { |
---|
3657 | 5220 | struct task_struct *curr = current; |
---|
| 5221 | + unsigned int depth, merged = 1; |
---|
3658 | 5222 | struct held_lock *hlock; |
---|
3659 | | - unsigned int depth; |
---|
3660 | 5223 | int i; |
---|
3661 | 5224 | |
---|
3662 | 5225 | if (unlikely(!debug_locks)) |
---|
.. | .. |
---|
3667 | 5230 | * So we're all set to release this lock.. wait what lock? We don't |
---|
3668 | 5231 | * own any locks, you've been drinking again? |
---|
3669 | 5232 | */ |
---|
3670 | | - if (DEBUG_LOCKS_WARN_ON(depth <= 0)) |
---|
3671 | | - return print_unlock_imbalance_bug(curr, lock, ip); |
---|
| 5233 | + if (depth <= 0) { |
---|
| 5234 | + print_unlock_imbalance_bug(curr, lock, ip); |
---|
| 5235 | + return 0; |
---|
| 5236 | + } |
---|
3672 | 5237 | |
---|
3673 | 5238 | /* |
---|
3674 | 5239 | * Check whether the lock exists in the current stack |
---|
3675 | 5240 | * of held locks: |
---|
3676 | 5241 | */ |
---|
3677 | 5242 | hlock = find_held_lock(curr, lock, depth, &i); |
---|
3678 | | - if (!hlock) |
---|
3679 | | - return print_unlock_imbalance_bug(curr, lock, ip); |
---|
| 5243 | + if (!hlock) { |
---|
| 5244 | + print_unlock_imbalance_bug(curr, lock, ip); |
---|
| 5245 | + return 0; |
---|
| 5246 | + } |
---|
3680 | 5247 | |
---|
3681 | 5248 | if (hlock->instance == lock) |
---|
3682 | 5249 | lock_release_holdtime(hlock); |
---|
.. | .. |
---|
3704 | 5271 | curr->lockdep_depth = i; |
---|
3705 | 5272 | curr->curr_chain_key = hlock->prev_chain_key; |
---|
3706 | 5273 | |
---|
3707 | | - if (reacquire_held_locks(curr, depth, i + 1)) |
---|
| 5274 | + /* |
---|
| 5275 | + * The most likely case is when the unlock is on the innermost |
---|
| 5276 | + * lock. In this case, we are done! |
---|
| 5277 | + */ |
---|
| 5278 | + if (i == depth-1) |
---|
| 5279 | + return 1; |
---|
| 5280 | + |
---|
| 5281 | + if (reacquire_held_locks(curr, depth, i + 1, &merged)) |
---|
3708 | 5282 | return 0; |
---|
3709 | 5283 | |
---|
3710 | 5284 | /* |
---|
3711 | 5285 | * We had N bottles of beer on the wall, we drank one, but now |
---|
3712 | 5286 | * there's not N-1 bottles of beer left on the wall... |
---|
| 5287 | + * Pouring two of the bottles together is acceptable. |
---|
3713 | 5288 | */ |
---|
3714 | | - if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1)) |
---|
3715 | | - return 0; |
---|
| 5289 | + DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - merged); |
---|
3716 | 5290 | |
---|
3717 | | - return 1; |
---|
| 5291 | + /* |
---|
| 5292 | + * Since reacquire_held_locks() would have called check_chain_key() |
---|
| 5293 | + * indirectly via __lock_acquire(), we don't need to do it again |
---|
| 5294 | + * on return. |
---|
| 5295 | + */ |
---|
| 5296 | + return 0; |
---|
3718 | 5297 | } |
---|
3719 | 5298 | |
---|
3720 | | -static int __lock_is_held(const struct lockdep_map *lock, int read) |
---|
| 5299 | +static __always_inline |
---|
| 5300 | +int __lock_is_held(const struct lockdep_map *lock, int read) |
---|
3721 | 5301 | { |
---|
3722 | 5302 | struct task_struct *curr = current; |
---|
3723 | 5303 | int i; |
---|
.. | .. |
---|
3726 | 5306 | struct held_lock *hlock = curr->held_locks + i; |
---|
3727 | 5307 | |
---|
3728 | 5308 | if (match_held_lock(hlock, lock)) { |
---|
3729 | | - if (read == -1 || hlock->read == read) |
---|
| 5309 | + if (read == -1 || !!hlock->read == read) |
---|
3730 | 5310 | return 1; |
---|
3731 | 5311 | |
---|
3732 | 5312 | return 0; |
---|
.. | .. |
---|
3814 | 5394 | /* |
---|
3815 | 5395 | * Check whether we follow the irq-flags state precisely: |
---|
3816 | 5396 | */ |
---|
3817 | | -static void check_flags(unsigned long flags) |
---|
| 5397 | +static noinstr void check_flags(unsigned long flags) |
---|
3818 | 5398 | { |
---|
3819 | | -#if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP) && \ |
---|
3820 | | - defined(CONFIG_TRACE_IRQFLAGS) |
---|
| 5399 | +#if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP) |
---|
3821 | 5400 | if (!debug_locks) |
---|
3822 | 5401 | return; |
---|
3823 | 5402 | |
---|
| 5403 | + /* Get the warning out.. */ |
---|
| 5404 | + instrumentation_begin(); |
---|
| 5405 | + |
---|
3824 | 5406 | if (irqs_disabled_flags(flags)) { |
---|
3825 | | - if (DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled)) { |
---|
| 5407 | + if (DEBUG_LOCKS_WARN_ON(lockdep_hardirqs_enabled())) { |
---|
3826 | 5408 | printk("possible reason: unannotated irqs-off.\n"); |
---|
3827 | 5409 | } |
---|
3828 | 5410 | } else { |
---|
3829 | | - if (DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled)) { |
---|
| 5411 | + if (DEBUG_LOCKS_WARN_ON(!lockdep_hardirqs_enabled())) { |
---|
3830 | 5412 | printk("possible reason: unannotated irqs-on.\n"); |
---|
3831 | 5413 | } |
---|
3832 | 5414 | } |
---|
3833 | 5415 | |
---|
3834 | | -#ifndef CONFIG_PREEMPT_RT_FULL |
---|
3835 | 5416 | /* |
---|
3836 | 5417 | * We dont accurately track softirq state in e.g. |
---|
3837 | 5418 | * hardirq contexts (such as on 4KSTACKS), so only |
---|
.. | .. |
---|
3846 | 5427 | DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled); |
---|
3847 | 5428 | } |
---|
3848 | 5429 | } |
---|
3849 | | -#endif |
---|
3850 | 5430 | |
---|
3851 | 5431 | if (!debug_locks) |
---|
3852 | 5432 | print_irqtrace_events(current); |
---|
| 5433 | + |
---|
| 5434 | + instrumentation_end(); |
---|
3853 | 5435 | #endif |
---|
3854 | 5436 | } |
---|
3855 | 5437 | |
---|
.. | .. |
---|
3859 | 5441 | { |
---|
3860 | 5442 | unsigned long flags; |
---|
3861 | 5443 | |
---|
3862 | | - if (unlikely(current->lockdep_recursion)) |
---|
| 5444 | + if (unlikely(!lockdep_enabled())) |
---|
3863 | 5445 | return; |
---|
3864 | 5446 | |
---|
3865 | 5447 | raw_local_irq_save(flags); |
---|
3866 | | - current->lockdep_recursion = 1; |
---|
| 5448 | + lockdep_recursion_inc(); |
---|
3867 | 5449 | check_flags(flags); |
---|
3868 | 5450 | if (__lock_set_class(lock, name, key, subclass, ip)) |
---|
3869 | 5451 | check_chain_key(current); |
---|
3870 | | - current->lockdep_recursion = 0; |
---|
| 5452 | + lockdep_recursion_finish(); |
---|
3871 | 5453 | raw_local_irq_restore(flags); |
---|
3872 | 5454 | } |
---|
3873 | 5455 | EXPORT_SYMBOL_GPL(lock_set_class); |
---|
.. | .. |
---|
3876 | 5458 | { |
---|
3877 | 5459 | unsigned long flags; |
---|
3878 | 5460 | |
---|
3879 | | - if (unlikely(current->lockdep_recursion)) |
---|
| 5461 | + if (unlikely(!lockdep_enabled())) |
---|
3880 | 5462 | return; |
---|
3881 | 5463 | |
---|
3882 | 5464 | raw_local_irq_save(flags); |
---|
3883 | | - current->lockdep_recursion = 1; |
---|
| 5465 | + lockdep_recursion_inc(); |
---|
3884 | 5466 | check_flags(flags); |
---|
3885 | 5467 | if (__lock_downgrade(lock, ip)) |
---|
3886 | 5468 | check_chain_key(current); |
---|
3887 | | - current->lockdep_recursion = 0; |
---|
| 5469 | + lockdep_recursion_finish(); |
---|
3888 | 5470 | raw_local_irq_restore(flags); |
---|
3889 | 5471 | } |
---|
3890 | 5472 | EXPORT_SYMBOL_GPL(lock_downgrade); |
---|
| 5473 | + |
---|
| 5474 | +/* NMI context !!! */ |
---|
| 5475 | +static void verify_lock_unused(struct lockdep_map *lock, struct held_lock *hlock, int subclass) |
---|
| 5476 | +{ |
---|
| 5477 | +#ifdef CONFIG_PROVE_LOCKING |
---|
| 5478 | + struct lock_class *class = look_up_lock_class(lock, subclass); |
---|
| 5479 | + unsigned long mask = LOCKF_USED; |
---|
| 5480 | + |
---|
| 5481 | + /* if it doesn't have a class (yet), it certainly hasn't been used yet */ |
---|
| 5482 | + if (!class) |
---|
| 5483 | + return; |
---|
| 5484 | + |
---|
| 5485 | + /* |
---|
| 5486 | + * READ locks only conflict with USED, such that if we only ever use |
---|
| 5487 | + * READ locks, there is no deadlock possible -- RCU. |
---|
| 5488 | + */ |
---|
| 5489 | + if (!hlock->read) |
---|
| 5490 | + mask |= LOCKF_USED_READ; |
---|
| 5491 | + |
---|
| 5492 | + if (!(class->usage_mask & mask)) |
---|
| 5493 | + return; |
---|
| 5494 | + |
---|
| 5495 | + hlock->class_idx = class - lock_classes; |
---|
| 5496 | + |
---|
| 5497 | + print_usage_bug(current, hlock, LOCK_USED, LOCK_USAGE_STATES); |
---|
| 5498 | +#endif |
---|
| 5499 | +} |
---|
| 5500 | + |
---|
| 5501 | +static bool lockdep_nmi(void) |
---|
| 5502 | +{ |
---|
| 5503 | + if (raw_cpu_read(lockdep_recursion)) |
---|
| 5504 | + return false; |
---|
| 5505 | + |
---|
| 5506 | + if (!in_nmi()) |
---|
| 5507 | + return false; |
---|
| 5508 | + |
---|
| 5509 | + return true; |
---|
| 5510 | +} |
---|
| 5511 | + |
---|
| 5512 | +/* |
---|
| 5513 | + * read_lock() is recursive if: |
---|
| 5514 | + * 1. We force lockdep think this way in selftests or |
---|
| 5515 | + * 2. The implementation is not queued read/write lock or |
---|
| 5516 | + * 3. The locker is at an in_interrupt() context. |
---|
| 5517 | + */ |
---|
| 5518 | +bool read_lock_is_recursive(void) |
---|
| 5519 | +{ |
---|
| 5520 | + return force_read_lock_recursive || |
---|
| 5521 | + !IS_ENABLED(CONFIG_QUEUED_RWLOCKS) || |
---|
| 5522 | + in_interrupt(); |
---|
| 5523 | +} |
---|
| 5524 | +EXPORT_SYMBOL_GPL(read_lock_is_recursive); |
---|
3891 | 5525 | |
---|
3892 | 5526 | /* |
---|
3893 | 5527 | * We are not always called with irqs disabled - do that here, |
---|
.. | .. |
---|
3899 | 5533 | { |
---|
3900 | 5534 | unsigned long flags; |
---|
3901 | 5535 | |
---|
3902 | | - if (unlikely(current->lockdep_recursion)) |
---|
| 5536 | + trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip); |
---|
| 5537 | + |
---|
| 5538 | + if (!debug_locks) |
---|
3903 | 5539 | return; |
---|
| 5540 | + |
---|
| 5541 | + if (unlikely(!lockdep_enabled())) { |
---|
| 5542 | + /* XXX allow trylock from NMI ?!? */ |
---|
| 5543 | + if (lockdep_nmi() && !trylock) { |
---|
| 5544 | + struct held_lock hlock; |
---|
| 5545 | + |
---|
| 5546 | + hlock.acquire_ip = ip; |
---|
| 5547 | + hlock.instance = lock; |
---|
| 5548 | + hlock.nest_lock = nest_lock; |
---|
| 5549 | + hlock.irq_context = 2; // XXX |
---|
| 5550 | + hlock.trylock = trylock; |
---|
| 5551 | + hlock.read = read; |
---|
| 5552 | + hlock.check = check; |
---|
| 5553 | + hlock.hardirqs_off = true; |
---|
| 5554 | + hlock.references = 0; |
---|
| 5555 | + |
---|
| 5556 | + verify_lock_unused(lock, &hlock, subclass); |
---|
| 5557 | + } |
---|
| 5558 | + return; |
---|
| 5559 | + } |
---|
3904 | 5560 | |
---|
3905 | 5561 | raw_local_irq_save(flags); |
---|
3906 | 5562 | check_flags(flags); |
---|
3907 | 5563 | |
---|
3908 | | - current->lockdep_recursion = 1; |
---|
3909 | | - trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip); |
---|
| 5564 | + lockdep_recursion_inc(); |
---|
3910 | 5565 | __lock_acquire(lock, subclass, trylock, read, check, |
---|
3911 | 5566 | irqs_disabled_flags(flags), nest_lock, ip, 0, 0); |
---|
3912 | | - current->lockdep_recursion = 0; |
---|
| 5567 | + lockdep_recursion_finish(); |
---|
3913 | 5568 | raw_local_irq_restore(flags); |
---|
3914 | 5569 | } |
---|
3915 | 5570 | EXPORT_SYMBOL_GPL(lock_acquire); |
---|
3916 | 5571 | |
---|
3917 | | -void lock_release(struct lockdep_map *lock, int nested, |
---|
3918 | | - unsigned long ip) |
---|
| 5572 | +void lock_release(struct lockdep_map *lock, unsigned long ip) |
---|
3919 | 5573 | { |
---|
3920 | 5574 | unsigned long flags; |
---|
3921 | 5575 | |
---|
3922 | | - if (unlikely(current->lockdep_recursion)) |
---|
| 5576 | + trace_lock_release(lock, ip); |
---|
| 5577 | + |
---|
| 5578 | + if (unlikely(!lockdep_enabled())) |
---|
3923 | 5579 | return; |
---|
3924 | 5580 | |
---|
3925 | 5581 | raw_local_irq_save(flags); |
---|
3926 | 5582 | check_flags(flags); |
---|
3927 | | - current->lockdep_recursion = 1; |
---|
3928 | | - trace_lock_release(lock, ip); |
---|
3929 | | - if (__lock_release(lock, nested, ip)) |
---|
| 5583 | + |
---|
| 5584 | + lockdep_recursion_inc(); |
---|
| 5585 | + if (__lock_release(lock, ip)) |
---|
3930 | 5586 | check_chain_key(current); |
---|
3931 | | - current->lockdep_recursion = 0; |
---|
| 5587 | + lockdep_recursion_finish(); |
---|
3932 | 5588 | raw_local_irq_restore(flags); |
---|
3933 | 5589 | } |
---|
3934 | 5590 | EXPORT_SYMBOL_GPL(lock_release); |
---|
3935 | 5591 | |
---|
3936 | | -int lock_is_held_type(const struct lockdep_map *lock, int read) |
---|
| 5592 | +noinstr int lock_is_held_type(const struct lockdep_map *lock, int read) |
---|
3937 | 5593 | { |
---|
3938 | 5594 | unsigned long flags; |
---|
3939 | 5595 | int ret = 0; |
---|
3940 | 5596 | |
---|
3941 | | - if (unlikely(current->lockdep_recursion)) |
---|
| 5597 | + if (unlikely(!lockdep_enabled())) |
---|
3942 | 5598 | return 1; /* avoid false negative lockdep_assert_held() */ |
---|
3943 | 5599 | |
---|
3944 | 5600 | raw_local_irq_save(flags); |
---|
3945 | 5601 | check_flags(flags); |
---|
3946 | 5602 | |
---|
3947 | | - current->lockdep_recursion = 1; |
---|
| 5603 | + lockdep_recursion_inc(); |
---|
3948 | 5604 | ret = __lock_is_held(lock, read); |
---|
3949 | | - current->lockdep_recursion = 0; |
---|
| 5605 | + lockdep_recursion_finish(); |
---|
3950 | 5606 | raw_local_irq_restore(flags); |
---|
3951 | 5607 | |
---|
3952 | 5608 | return ret; |
---|
3953 | 5609 | } |
---|
3954 | 5610 | EXPORT_SYMBOL_GPL(lock_is_held_type); |
---|
| 5611 | +NOKPROBE_SYMBOL(lock_is_held_type); |
---|
3955 | 5612 | |
---|
3956 | 5613 | struct pin_cookie lock_pin_lock(struct lockdep_map *lock) |
---|
3957 | 5614 | { |
---|
3958 | 5615 | struct pin_cookie cookie = NIL_COOKIE; |
---|
3959 | 5616 | unsigned long flags; |
---|
3960 | 5617 | |
---|
3961 | | - if (unlikely(current->lockdep_recursion)) |
---|
| 5618 | + if (unlikely(!lockdep_enabled())) |
---|
3962 | 5619 | return cookie; |
---|
3963 | 5620 | |
---|
3964 | 5621 | raw_local_irq_save(flags); |
---|
3965 | 5622 | check_flags(flags); |
---|
3966 | 5623 | |
---|
3967 | | - current->lockdep_recursion = 1; |
---|
| 5624 | + lockdep_recursion_inc(); |
---|
3968 | 5625 | cookie = __lock_pin_lock(lock); |
---|
3969 | | - current->lockdep_recursion = 0; |
---|
| 5626 | + lockdep_recursion_finish(); |
---|
3970 | 5627 | raw_local_irq_restore(flags); |
---|
3971 | 5628 | |
---|
3972 | 5629 | return cookie; |
---|
.. | .. |
---|
3977 | 5634 | { |
---|
3978 | 5635 | unsigned long flags; |
---|
3979 | 5636 | |
---|
3980 | | - if (unlikely(current->lockdep_recursion)) |
---|
| 5637 | + if (unlikely(!lockdep_enabled())) |
---|
3981 | 5638 | return; |
---|
3982 | 5639 | |
---|
3983 | 5640 | raw_local_irq_save(flags); |
---|
3984 | 5641 | check_flags(flags); |
---|
3985 | 5642 | |
---|
3986 | | - current->lockdep_recursion = 1; |
---|
| 5643 | + lockdep_recursion_inc(); |
---|
3987 | 5644 | __lock_repin_lock(lock, cookie); |
---|
3988 | | - current->lockdep_recursion = 0; |
---|
| 5645 | + lockdep_recursion_finish(); |
---|
3989 | 5646 | raw_local_irq_restore(flags); |
---|
3990 | 5647 | } |
---|
3991 | 5648 | EXPORT_SYMBOL_GPL(lock_repin_lock); |
---|
.. | .. |
---|
3994 | 5651 | { |
---|
3995 | 5652 | unsigned long flags; |
---|
3996 | 5653 | |
---|
3997 | | - if (unlikely(current->lockdep_recursion)) |
---|
| 5654 | + if (unlikely(!lockdep_enabled())) |
---|
3998 | 5655 | return; |
---|
3999 | 5656 | |
---|
4000 | 5657 | raw_local_irq_save(flags); |
---|
4001 | 5658 | check_flags(flags); |
---|
4002 | 5659 | |
---|
4003 | | - current->lockdep_recursion = 1; |
---|
| 5660 | + lockdep_recursion_inc(); |
---|
4004 | 5661 | __lock_unpin_lock(lock, cookie); |
---|
4005 | | - current->lockdep_recursion = 0; |
---|
| 5662 | + lockdep_recursion_finish(); |
---|
4006 | 5663 | raw_local_irq_restore(flags); |
---|
4007 | 5664 | } |
---|
4008 | 5665 | EXPORT_SYMBOL_GPL(lock_unpin_lock); |
---|
4009 | 5666 | |
---|
4010 | 5667 | #ifdef CONFIG_LOCK_STAT |
---|
4011 | | -static int |
---|
4012 | | -print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock, |
---|
4013 | | - unsigned long ip) |
---|
| 5668 | +static void print_lock_contention_bug(struct task_struct *curr, |
---|
| 5669 | + struct lockdep_map *lock, |
---|
| 5670 | + unsigned long ip) |
---|
4014 | 5671 | { |
---|
4015 | 5672 | if (!debug_locks_off()) |
---|
4016 | | - return 0; |
---|
| 5673 | + return; |
---|
4017 | 5674 | if (debug_locks_silent) |
---|
4018 | | - return 0; |
---|
| 5675 | + return; |
---|
4019 | 5676 | |
---|
4020 | 5677 | pr_warn("\n"); |
---|
4021 | 5678 | pr_warn("=================================\n"); |
---|
.. | .. |
---|
4026 | 5683 | curr->comm, task_pid_nr(curr)); |
---|
4027 | 5684 | print_lockdep_cache(lock); |
---|
4028 | 5685 | pr_cont(") at:\n"); |
---|
4029 | | - print_ip_sym(ip); |
---|
| 5686 | + print_ip_sym(KERN_WARNING, ip); |
---|
4030 | 5687 | pr_warn("but there are no locks held!\n"); |
---|
4031 | 5688 | pr_warn("\nother info that might help us debug this:\n"); |
---|
4032 | 5689 | lockdep_print_held_locks(curr); |
---|
4033 | 5690 | |
---|
4034 | 5691 | pr_warn("\nstack backtrace:\n"); |
---|
4035 | 5692 | dump_stack(); |
---|
4036 | | - |
---|
4037 | | - return 0; |
---|
4038 | 5693 | } |
---|
4039 | 5694 | |
---|
4040 | 5695 | static void |
---|
.. | .. |
---|
4112 | 5767 | hlock->holdtime_stamp = now; |
---|
4113 | 5768 | } |
---|
4114 | 5769 | |
---|
4115 | | - trace_lock_acquired(lock, ip); |
---|
4116 | | - |
---|
4117 | 5770 | stats = get_lock_stats(hlock_class(hlock)); |
---|
4118 | 5771 | if (waittime) { |
---|
4119 | 5772 | if (hlock->read) |
---|
.. | .. |
---|
4132 | 5785 | { |
---|
4133 | 5786 | unsigned long flags; |
---|
4134 | 5787 | |
---|
4135 | | - if (unlikely(!lock_stat || !debug_locks)) |
---|
4136 | | - return; |
---|
| 5788 | + trace_lock_contended(lock, ip); |
---|
4137 | 5789 | |
---|
4138 | | - if (unlikely(current->lockdep_recursion)) |
---|
| 5790 | + if (unlikely(!lock_stat || !lockdep_enabled())) |
---|
4139 | 5791 | return; |
---|
4140 | 5792 | |
---|
4141 | 5793 | raw_local_irq_save(flags); |
---|
4142 | 5794 | check_flags(flags); |
---|
4143 | | - current->lockdep_recursion = 1; |
---|
4144 | | - trace_lock_contended(lock, ip); |
---|
| 5795 | + lockdep_recursion_inc(); |
---|
4145 | 5796 | __lock_contended(lock, ip); |
---|
4146 | | - current->lockdep_recursion = 0; |
---|
| 5797 | + lockdep_recursion_finish(); |
---|
4147 | 5798 | raw_local_irq_restore(flags); |
---|
4148 | 5799 | } |
---|
4149 | 5800 | EXPORT_SYMBOL_GPL(lock_contended); |
---|
.. | .. |
---|
4152 | 5803 | { |
---|
4153 | 5804 | unsigned long flags; |
---|
4154 | 5805 | |
---|
4155 | | - if (unlikely(!lock_stat || !debug_locks)) |
---|
4156 | | - return; |
---|
| 5806 | + trace_lock_acquired(lock, ip); |
---|
4157 | 5807 | |
---|
4158 | | - if (unlikely(current->lockdep_recursion)) |
---|
| 5808 | + if (unlikely(!lock_stat || !lockdep_enabled())) |
---|
4159 | 5809 | return; |
---|
4160 | 5810 | |
---|
4161 | 5811 | raw_local_irq_save(flags); |
---|
4162 | 5812 | check_flags(flags); |
---|
4163 | | - current->lockdep_recursion = 1; |
---|
| 5813 | + lockdep_recursion_inc(); |
---|
4164 | 5814 | __lock_acquired(lock, ip); |
---|
4165 | | - current->lockdep_recursion = 0; |
---|
| 5815 | + lockdep_recursion_finish(); |
---|
4166 | 5816 | raw_local_irq_restore(flags); |
---|
4167 | 5817 | } |
---|
4168 | 5818 | EXPORT_SYMBOL_GPL(lock_acquired); |
---|
.. | .. |
---|
4179 | 5829 | int i; |
---|
4180 | 5830 | |
---|
4181 | 5831 | raw_local_irq_save(flags); |
---|
4182 | | - current->curr_chain_key = 0; |
---|
4183 | | - current->lockdep_depth = 0; |
---|
4184 | | - current->lockdep_recursion = 0; |
---|
| 5832 | + lockdep_init_task(current); |
---|
4185 | 5833 | memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock)); |
---|
4186 | 5834 | nr_hardirq_chains = 0; |
---|
4187 | 5835 | nr_softirq_chains = 0; |
---|
.. | .. |
---|
4192 | 5840 | raw_local_irq_restore(flags); |
---|
4193 | 5841 | } |
---|
4194 | 5842 | |
---|
4195 | | -static void zap_class(struct lock_class *class) |
---|
| 5843 | +/* Remove a class from a lock chain. Must be called with the graph lock held. */ |
---|
| 5844 | +static void remove_class_from_lock_chain(struct pending_free *pf, |
---|
| 5845 | + struct lock_chain *chain, |
---|
| 5846 | + struct lock_class *class) |
---|
4196 | 5847 | { |
---|
| 5848 | +#ifdef CONFIG_PROVE_LOCKING |
---|
4197 | 5849 | int i; |
---|
| 5850 | + |
---|
| 5851 | + for (i = chain->base; i < chain->base + chain->depth; i++) { |
---|
| 5852 | + if (chain_hlock_class_idx(chain_hlocks[i]) != class - lock_classes) |
---|
| 5853 | + continue; |
---|
| 5854 | + /* |
---|
| 5855 | + * Each lock class occurs at most once in a lock chain so once |
---|
| 5856 | + * we found a match we can break out of this loop. |
---|
| 5857 | + */ |
---|
| 5858 | + goto free_lock_chain; |
---|
| 5859 | + } |
---|
| 5860 | + /* Since the chain has not been modified, return. */ |
---|
| 5861 | + return; |
---|
| 5862 | + |
---|
| 5863 | +free_lock_chain: |
---|
| 5864 | + free_chain_hlocks(chain->base, chain->depth); |
---|
| 5865 | + /* Overwrite the chain key for concurrent RCU readers. */ |
---|
| 5866 | + WRITE_ONCE(chain->chain_key, INITIAL_CHAIN_KEY); |
---|
| 5867 | + dec_chains(chain->irq_context); |
---|
| 5868 | + |
---|
| 5869 | + /* |
---|
| 5870 | + * Note: calling hlist_del_rcu() from inside a |
---|
| 5871 | + * hlist_for_each_entry_rcu() loop is safe. |
---|
| 5872 | + */ |
---|
| 5873 | + hlist_del_rcu(&chain->entry); |
---|
| 5874 | + __set_bit(chain - lock_chains, pf->lock_chains_being_freed); |
---|
| 5875 | + nr_zapped_lock_chains++; |
---|
| 5876 | +#endif |
---|
| 5877 | +} |
---|
| 5878 | + |
---|
| 5879 | +/* Must be called with the graph lock held. */ |
---|
| 5880 | +static void remove_class_from_lock_chains(struct pending_free *pf, |
---|
| 5881 | + struct lock_class *class) |
---|
| 5882 | +{ |
---|
| 5883 | + struct lock_chain *chain; |
---|
| 5884 | + struct hlist_head *head; |
---|
| 5885 | + int i; |
---|
| 5886 | + |
---|
| 5887 | + for (i = 0; i < ARRAY_SIZE(chainhash_table); i++) { |
---|
| 5888 | + head = chainhash_table + i; |
---|
| 5889 | + hlist_for_each_entry_rcu(chain, head, entry) { |
---|
| 5890 | + remove_class_from_lock_chain(pf, chain, class); |
---|
| 5891 | + } |
---|
| 5892 | + } |
---|
| 5893 | +} |
---|
| 5894 | + |
---|
| 5895 | +/* |
---|
| 5896 | + * Remove all references to a lock class. The caller must hold the graph lock. |
---|
| 5897 | + */ |
---|
| 5898 | +static void zap_class(struct pending_free *pf, struct lock_class *class) |
---|
| 5899 | +{ |
---|
| 5900 | + struct lock_list *entry; |
---|
| 5901 | + int i; |
---|
| 5902 | + |
---|
| 5903 | + WARN_ON_ONCE(!class->key); |
---|
4198 | 5904 | |
---|
4199 | 5905 | /* |
---|
4200 | 5906 | * Remove all dependencies this lock is |
---|
4201 | 5907 | * involved in: |
---|
4202 | 5908 | */ |
---|
4203 | | - for (i = 0; i < nr_list_entries; i++) { |
---|
4204 | | - if (list_entries[i].class == class) |
---|
4205 | | - list_del_rcu(&list_entries[i].entry); |
---|
| 5909 | + for_each_set_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) { |
---|
| 5910 | + entry = list_entries + i; |
---|
| 5911 | + if (entry->class != class && entry->links_to != class) |
---|
| 5912 | + continue; |
---|
| 5913 | + __clear_bit(i, list_entries_in_use); |
---|
| 5914 | + nr_list_entries--; |
---|
| 5915 | + list_del_rcu(&entry->entry); |
---|
4206 | 5916 | } |
---|
4207 | | - /* |
---|
4208 | | - * Unhash the class and remove it from the all_lock_classes list: |
---|
4209 | | - */ |
---|
4210 | | - hlist_del_rcu(&class->hash_entry); |
---|
4211 | | - list_del_rcu(&class->lock_entry); |
---|
| 5917 | + if (list_empty(&class->locks_after) && |
---|
| 5918 | + list_empty(&class->locks_before)) { |
---|
| 5919 | + list_move_tail(&class->lock_entry, &pf->zapped); |
---|
| 5920 | + hlist_del_rcu(&class->hash_entry); |
---|
| 5921 | + WRITE_ONCE(class->key, NULL); |
---|
| 5922 | + WRITE_ONCE(class->name, NULL); |
---|
| 5923 | + nr_lock_classes--; |
---|
| 5924 | + __clear_bit(class - lock_classes, lock_classes_in_use); |
---|
| 5925 | + if (class - lock_classes == max_lock_class_idx) |
---|
| 5926 | + max_lock_class_idx--; |
---|
| 5927 | + } else { |
---|
| 5928 | + WARN_ONCE(true, "%s() failed for class %s\n", __func__, |
---|
| 5929 | + class->name); |
---|
| 5930 | + } |
---|
4212 | 5931 | |
---|
4213 | | - RCU_INIT_POINTER(class->key, NULL); |
---|
4214 | | - RCU_INIT_POINTER(class->name, NULL); |
---|
| 5932 | + remove_class_from_lock_chains(pf, class); |
---|
| 5933 | + nr_zapped_classes++; |
---|
| 5934 | +} |
---|
| 5935 | + |
---|
| 5936 | +static void reinit_class(struct lock_class *class) |
---|
| 5937 | +{ |
---|
| 5938 | + void *const p = class; |
---|
| 5939 | + const unsigned int offset = offsetof(struct lock_class, key); |
---|
| 5940 | + |
---|
| 5941 | + WARN_ON_ONCE(!class->lock_entry.next); |
---|
| 5942 | + WARN_ON_ONCE(!list_empty(&class->locks_after)); |
---|
| 5943 | + WARN_ON_ONCE(!list_empty(&class->locks_before)); |
---|
| 5944 | + memset(p + offset, 0, sizeof(*class) - offset); |
---|
| 5945 | + WARN_ON_ONCE(!class->lock_entry.next); |
---|
| 5946 | + WARN_ON_ONCE(!list_empty(&class->locks_after)); |
---|
| 5947 | + WARN_ON_ONCE(!list_empty(&class->locks_before)); |
---|
4215 | 5948 | } |
---|
4216 | 5949 | |
---|
4217 | 5950 | static inline int within(const void *addr, void *start, unsigned long size) |
---|
.. | .. |
---|
4219 | 5952 | return addr >= start && addr < start + size; |
---|
4220 | 5953 | } |
---|
4221 | 5954 | |
---|
| 5955 | +static bool inside_selftest(void) |
---|
| 5956 | +{ |
---|
| 5957 | + return current == lockdep_selftest_task_struct; |
---|
| 5958 | +} |
---|
| 5959 | + |
---|
| 5960 | +/* The caller must hold the graph lock. */ |
---|
| 5961 | +static struct pending_free *get_pending_free(void) |
---|
| 5962 | +{ |
---|
| 5963 | + return delayed_free.pf + delayed_free.index; |
---|
| 5964 | +} |
---|
| 5965 | + |
---|
| 5966 | +static void free_zapped_rcu(struct rcu_head *cb); |
---|
| 5967 | + |
---|
| 5968 | +/* |
---|
| 5969 | + * Schedule an RCU callback if no RCU callback is pending. Must be called with |
---|
| 5970 | + * the graph lock held. |
---|
| 5971 | + */ |
---|
| 5972 | +static void call_rcu_zapped(struct pending_free *pf) |
---|
| 5973 | +{ |
---|
| 5974 | + WARN_ON_ONCE(inside_selftest()); |
---|
| 5975 | + |
---|
| 5976 | + if (list_empty(&pf->zapped)) |
---|
| 5977 | + return; |
---|
| 5978 | + |
---|
| 5979 | + if (delayed_free.scheduled) |
---|
| 5980 | + return; |
---|
| 5981 | + |
---|
| 5982 | + delayed_free.scheduled = true; |
---|
| 5983 | + |
---|
| 5984 | + WARN_ON_ONCE(delayed_free.pf + delayed_free.index != pf); |
---|
| 5985 | + delayed_free.index ^= 1; |
---|
| 5986 | + |
---|
| 5987 | + call_rcu(&delayed_free.rcu_head, free_zapped_rcu); |
---|
| 5988 | +} |
---|
| 5989 | + |
---|
| 5990 | +/* The caller must hold the graph lock. May be called from RCU context. */ |
---|
| 5991 | +static void __free_zapped_classes(struct pending_free *pf) |
---|
| 5992 | +{ |
---|
| 5993 | + struct lock_class *class; |
---|
| 5994 | + |
---|
| 5995 | + check_data_structures(); |
---|
| 5996 | + |
---|
| 5997 | + list_for_each_entry(class, &pf->zapped, lock_entry) |
---|
| 5998 | + reinit_class(class); |
---|
| 5999 | + |
---|
| 6000 | + list_splice_init(&pf->zapped, &free_lock_classes); |
---|
| 6001 | + |
---|
| 6002 | +#ifdef CONFIG_PROVE_LOCKING |
---|
| 6003 | + bitmap_andnot(lock_chains_in_use, lock_chains_in_use, |
---|
| 6004 | + pf->lock_chains_being_freed, ARRAY_SIZE(lock_chains)); |
---|
| 6005 | + bitmap_clear(pf->lock_chains_being_freed, 0, ARRAY_SIZE(lock_chains)); |
---|
| 6006 | +#endif |
---|
| 6007 | +} |
---|
| 6008 | + |
---|
| 6009 | +static void free_zapped_rcu(struct rcu_head *ch) |
---|
| 6010 | +{ |
---|
| 6011 | + struct pending_free *pf; |
---|
| 6012 | + unsigned long flags; |
---|
| 6013 | + |
---|
| 6014 | + if (WARN_ON_ONCE(ch != &delayed_free.rcu_head)) |
---|
| 6015 | + return; |
---|
| 6016 | + |
---|
| 6017 | + raw_local_irq_save(flags); |
---|
| 6018 | + lockdep_lock(); |
---|
| 6019 | + |
---|
| 6020 | + /* closed head */ |
---|
| 6021 | + pf = delayed_free.pf + (delayed_free.index ^ 1); |
---|
| 6022 | + __free_zapped_classes(pf); |
---|
| 6023 | + delayed_free.scheduled = false; |
---|
| 6024 | + |
---|
| 6025 | + /* |
---|
| 6026 | + * If there's anything on the open list, close and start a new callback. |
---|
| 6027 | + */ |
---|
| 6028 | + call_rcu_zapped(delayed_free.pf + delayed_free.index); |
---|
| 6029 | + |
---|
| 6030 | + lockdep_unlock(); |
---|
| 6031 | + raw_local_irq_restore(flags); |
---|
| 6032 | +} |
---|
| 6033 | + |
---|
| 6034 | +/* |
---|
| 6035 | + * Remove all lock classes from the class hash table and from the |
---|
| 6036 | + * all_lock_classes list whose key or name is in the address range [start, |
---|
| 6037 | + * start + size). Move these lock classes to the zapped_classes list. Must |
---|
| 6038 | + * be called with the graph lock held. |
---|
| 6039 | + */ |
---|
| 6040 | +static void __lockdep_free_key_range(struct pending_free *pf, void *start, |
---|
| 6041 | + unsigned long size) |
---|
| 6042 | +{ |
---|
| 6043 | + struct lock_class *class; |
---|
| 6044 | + struct hlist_head *head; |
---|
| 6045 | + int i; |
---|
| 6046 | + |
---|
| 6047 | + /* Unhash all classes that were created by a module. */ |
---|
| 6048 | + for (i = 0; i < CLASSHASH_SIZE; i++) { |
---|
| 6049 | + head = classhash_table + i; |
---|
| 6050 | + hlist_for_each_entry_rcu(class, head, hash_entry) { |
---|
| 6051 | + if (!within(class->key, start, size) && |
---|
| 6052 | + !within(class->name, start, size)) |
---|
| 6053 | + continue; |
---|
| 6054 | + zap_class(pf, class); |
---|
| 6055 | + } |
---|
| 6056 | + } |
---|
| 6057 | +} |
---|
| 6058 | + |
---|
4222 | 6059 | /* |
---|
4223 | 6060 | * Used in module.c to remove lock classes from memory that is going to be |
---|
4224 | 6061 | * freed; and possibly re-used by other modules. |
---|
4225 | 6062 | * |
---|
4226 | | - * We will have had one sync_sched() before getting here, so we're guaranteed |
---|
4227 | | - * nobody will look up these exact classes -- they're properly dead but still |
---|
4228 | | - * allocated. |
---|
| 6063 | + * We will have had one synchronize_rcu() before getting here, so we're |
---|
| 6064 | + * guaranteed nobody will look up these exact classes -- they're properly dead |
---|
| 6065 | + * but still allocated. |
---|
4229 | 6066 | */ |
---|
4230 | | -void lockdep_free_key_range(void *start, unsigned long size) |
---|
| 6067 | +static void lockdep_free_key_range_reg(void *start, unsigned long size) |
---|
4231 | 6068 | { |
---|
4232 | | - struct lock_class *class; |
---|
4233 | | - struct hlist_head *head; |
---|
| 6069 | + struct pending_free *pf; |
---|
4234 | 6070 | unsigned long flags; |
---|
4235 | | - int i; |
---|
4236 | | - int locked; |
---|
| 6071 | + |
---|
| 6072 | + init_data_structures_once(); |
---|
4237 | 6073 | |
---|
4238 | 6074 | raw_local_irq_save(flags); |
---|
4239 | | - locked = graph_lock(); |
---|
4240 | | - |
---|
4241 | | - /* |
---|
4242 | | - * Unhash all classes that were created by this module: |
---|
4243 | | - */ |
---|
4244 | | - for (i = 0; i < CLASSHASH_SIZE; i++) { |
---|
4245 | | - head = classhash_table + i; |
---|
4246 | | - hlist_for_each_entry_rcu(class, head, hash_entry) { |
---|
4247 | | - if (within(class->key, start, size)) |
---|
4248 | | - zap_class(class); |
---|
4249 | | - else if (within(class->name, start, size)) |
---|
4250 | | - zap_class(class); |
---|
4251 | | - } |
---|
4252 | | - } |
---|
4253 | | - |
---|
4254 | | - if (locked) |
---|
4255 | | - graph_unlock(); |
---|
| 6075 | + lockdep_lock(); |
---|
| 6076 | + pf = get_pending_free(); |
---|
| 6077 | + __lockdep_free_key_range(pf, start, size); |
---|
| 6078 | + call_rcu_zapped(pf); |
---|
| 6079 | + lockdep_unlock(); |
---|
4256 | 6080 | raw_local_irq_restore(flags); |
---|
4257 | 6081 | |
---|
4258 | 6082 | /* |
---|
4259 | 6083 | * Wait for any possible iterators from look_up_lock_class() to pass |
---|
4260 | 6084 | * before continuing to free the memory they refer to. |
---|
4261 | | - * |
---|
4262 | | - * sync_sched() is sufficient because the read-side is IRQ disable. |
---|
4263 | 6085 | */ |
---|
4264 | | - synchronize_sched(); |
---|
4265 | | - |
---|
4266 | | - /* |
---|
4267 | | - * XXX at this point we could return the resources to the pool; |
---|
4268 | | - * instead we leak them. We would need to change to bitmap allocators |
---|
4269 | | - * instead of the linear allocators we have now. |
---|
4270 | | - */ |
---|
| 6086 | + synchronize_rcu(); |
---|
4271 | 6087 | } |
---|
4272 | 6088 | |
---|
4273 | | -void lockdep_reset_lock(struct lockdep_map *lock) |
---|
| 6089 | +/* |
---|
| 6090 | + * Free all lockdep keys in the range [start, start+size). Does not sleep. |
---|
| 6091 | + * Ignores debug_locks. Must only be used by the lockdep selftests. |
---|
| 6092 | + */ |
---|
| 6093 | +static void lockdep_free_key_range_imm(void *start, unsigned long size) |
---|
| 6094 | +{ |
---|
| 6095 | + struct pending_free *pf = delayed_free.pf; |
---|
| 6096 | + unsigned long flags; |
---|
| 6097 | + |
---|
| 6098 | + init_data_structures_once(); |
---|
| 6099 | + |
---|
| 6100 | + raw_local_irq_save(flags); |
---|
| 6101 | + lockdep_lock(); |
---|
| 6102 | + __lockdep_free_key_range(pf, start, size); |
---|
| 6103 | + __free_zapped_classes(pf); |
---|
| 6104 | + lockdep_unlock(); |
---|
| 6105 | + raw_local_irq_restore(flags); |
---|
| 6106 | +} |
---|
| 6107 | + |
---|
| 6108 | +void lockdep_free_key_range(void *start, unsigned long size) |
---|
| 6109 | +{ |
---|
| 6110 | + init_data_structures_once(); |
---|
| 6111 | + |
---|
| 6112 | + if (inside_selftest()) |
---|
| 6113 | + lockdep_free_key_range_imm(start, size); |
---|
| 6114 | + else |
---|
| 6115 | + lockdep_free_key_range_reg(start, size); |
---|
| 6116 | +} |
---|
| 6117 | + |
---|
| 6118 | +/* |
---|
| 6119 | + * Check whether any element of the @lock->class_cache[] array refers to a |
---|
| 6120 | + * registered lock class. The caller must hold either the graph lock or the |
---|
| 6121 | + * RCU read lock. |
---|
| 6122 | + */ |
---|
| 6123 | +static bool lock_class_cache_is_registered(struct lockdep_map *lock) |
---|
4274 | 6124 | { |
---|
4275 | 6125 | struct lock_class *class; |
---|
4276 | 6126 | struct hlist_head *head; |
---|
4277 | | - unsigned long flags; |
---|
4278 | 6127 | int i, j; |
---|
4279 | | - int locked; |
---|
4280 | 6128 | |
---|
4281 | | - raw_local_irq_save(flags); |
---|
| 6129 | + for (i = 0; i < CLASSHASH_SIZE; i++) { |
---|
| 6130 | + head = classhash_table + i; |
---|
| 6131 | + hlist_for_each_entry_rcu(class, head, hash_entry) { |
---|
| 6132 | + for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++) |
---|
| 6133 | + if (lock->class_cache[j] == class) |
---|
| 6134 | + return true; |
---|
| 6135 | + } |
---|
| 6136 | + } |
---|
| 6137 | + return false; |
---|
| 6138 | +} |
---|
| 6139 | + |
---|
| 6140 | +/* The caller must hold the graph lock. Does not sleep. */ |
---|
| 6141 | +static void __lockdep_reset_lock(struct pending_free *pf, |
---|
| 6142 | + struct lockdep_map *lock) |
---|
| 6143 | +{ |
---|
| 6144 | + struct lock_class *class; |
---|
| 6145 | + int j; |
---|
4282 | 6146 | |
---|
4283 | 6147 | /* |
---|
4284 | 6148 | * Remove all classes this lock might have: |
---|
.. | .. |
---|
4289 | 6153 | */ |
---|
4290 | 6154 | class = look_up_lock_class(lock, j); |
---|
4291 | 6155 | if (class) |
---|
4292 | | - zap_class(class); |
---|
| 6156 | + zap_class(pf, class); |
---|
4293 | 6157 | } |
---|
4294 | 6158 | /* |
---|
4295 | 6159 | * Debug check: in the end all mapped classes should |
---|
4296 | 6160 | * be gone. |
---|
4297 | 6161 | */ |
---|
| 6162 | + if (WARN_ON_ONCE(lock_class_cache_is_registered(lock))) |
---|
| 6163 | + debug_locks_off(); |
---|
| 6164 | +} |
---|
| 6165 | + |
---|
| 6166 | +/* |
---|
| 6167 | + * Remove all information lockdep has about a lock if debug_locks == 1. Free |
---|
| 6168 | + * released data structures from RCU context. |
---|
| 6169 | + */ |
---|
| 6170 | +static void lockdep_reset_lock_reg(struct lockdep_map *lock) |
---|
| 6171 | +{ |
---|
| 6172 | + struct pending_free *pf; |
---|
| 6173 | + unsigned long flags; |
---|
| 6174 | + int locked; |
---|
| 6175 | + |
---|
| 6176 | + raw_local_irq_save(flags); |
---|
4298 | 6177 | locked = graph_lock(); |
---|
4299 | | - for (i = 0; i < CLASSHASH_SIZE; i++) { |
---|
4300 | | - head = classhash_table + i; |
---|
4301 | | - hlist_for_each_entry_rcu(class, head, hash_entry) { |
---|
4302 | | - int match = 0; |
---|
| 6178 | + if (!locked) |
---|
| 6179 | + goto out_irq; |
---|
4303 | 6180 | |
---|
4304 | | - for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++) |
---|
4305 | | - match |= class == lock->class_cache[j]; |
---|
| 6181 | + pf = get_pending_free(); |
---|
| 6182 | + __lockdep_reset_lock(pf, lock); |
---|
| 6183 | + call_rcu_zapped(pf); |
---|
4306 | 6184 | |
---|
4307 | | - if (unlikely(match)) { |
---|
4308 | | - if (debug_locks_off_graph_unlock()) { |
---|
4309 | | - /* |
---|
4310 | | - * We all just reset everything, how did it match? |
---|
4311 | | - */ |
---|
4312 | | - WARN_ON(1); |
---|
4313 | | - } |
---|
4314 | | - goto out_restore; |
---|
4315 | | - } |
---|
4316 | | - } |
---|
4317 | | - } |
---|
4318 | | - if (locked) |
---|
4319 | | - graph_unlock(); |
---|
4320 | | - |
---|
4321 | | -out_restore: |
---|
| 6185 | + graph_unlock(); |
---|
| 6186 | +out_irq: |
---|
4322 | 6187 | raw_local_irq_restore(flags); |
---|
4323 | 6188 | } |
---|
| 6189 | + |
---|
| 6190 | +/* |
---|
| 6191 | + * Reset a lock. Does not sleep. Ignores debug_locks. Must only be used by the |
---|
| 6192 | + * lockdep selftests. |
---|
| 6193 | + */ |
---|
| 6194 | +static void lockdep_reset_lock_imm(struct lockdep_map *lock) |
---|
| 6195 | +{ |
---|
| 6196 | + struct pending_free *pf = delayed_free.pf; |
---|
| 6197 | + unsigned long flags; |
---|
| 6198 | + |
---|
| 6199 | + raw_local_irq_save(flags); |
---|
| 6200 | + lockdep_lock(); |
---|
| 6201 | + __lockdep_reset_lock(pf, lock); |
---|
| 6202 | + __free_zapped_classes(pf); |
---|
| 6203 | + lockdep_unlock(); |
---|
| 6204 | + raw_local_irq_restore(flags); |
---|
| 6205 | +} |
---|
| 6206 | + |
---|
| 6207 | +void lockdep_reset_lock(struct lockdep_map *lock) |
---|
| 6208 | +{ |
---|
| 6209 | + init_data_structures_once(); |
---|
| 6210 | + |
---|
| 6211 | + if (inside_selftest()) |
---|
| 6212 | + lockdep_reset_lock_imm(lock); |
---|
| 6213 | + else |
---|
| 6214 | + lockdep_reset_lock_reg(lock); |
---|
| 6215 | +} |
---|
| 6216 | + |
---|
| 6217 | +/* |
---|
| 6218 | + * Unregister a dynamically allocated key. |
---|
| 6219 | + * |
---|
| 6220 | + * Unlike lockdep_register_key(), a search is always done to find a matching |
---|
| 6221 | + * key irrespective of debug_locks to avoid potential invalid access to freed |
---|
| 6222 | + * memory in lock_class entry. |
---|
| 6223 | + */ |
---|
| 6224 | +void lockdep_unregister_key(struct lock_class_key *key) |
---|
| 6225 | +{ |
---|
| 6226 | + struct hlist_head *hash_head = keyhashentry(key); |
---|
| 6227 | + struct lock_class_key *k; |
---|
| 6228 | + struct pending_free *pf; |
---|
| 6229 | + unsigned long flags; |
---|
| 6230 | + bool found = false; |
---|
| 6231 | + |
---|
| 6232 | + might_sleep(); |
---|
| 6233 | + |
---|
| 6234 | + if (WARN_ON_ONCE(static_obj(key))) |
---|
| 6235 | + return; |
---|
| 6236 | + |
---|
| 6237 | + raw_local_irq_save(flags); |
---|
| 6238 | + lockdep_lock(); |
---|
| 6239 | + |
---|
| 6240 | + hlist_for_each_entry_rcu(k, hash_head, hash_entry) { |
---|
| 6241 | + if (k == key) { |
---|
| 6242 | + hlist_del_rcu(&k->hash_entry); |
---|
| 6243 | + found = true; |
---|
| 6244 | + break; |
---|
| 6245 | + } |
---|
| 6246 | + } |
---|
| 6247 | + WARN_ON_ONCE(!found && debug_locks); |
---|
| 6248 | + if (found) { |
---|
| 6249 | + pf = get_pending_free(); |
---|
| 6250 | + __lockdep_free_key_range(pf, key, 1); |
---|
| 6251 | + call_rcu_zapped(pf); |
---|
| 6252 | + } |
---|
| 6253 | + lockdep_unlock(); |
---|
| 6254 | + raw_local_irq_restore(flags); |
---|
| 6255 | + |
---|
| 6256 | + /* Wait until is_dynamic_key() has finished accessing k->hash_entry. */ |
---|
| 6257 | + synchronize_rcu(); |
---|
| 6258 | +} |
---|
| 6259 | +EXPORT_SYMBOL_GPL(lockdep_unregister_key); |
---|
4324 | 6260 | |
---|
4325 | 6261 | void __init lockdep_init(void) |
---|
4326 | 6262 | { |
---|
.. | .. |
---|
4334 | 6270 | printk("... MAX_LOCKDEP_CHAINS: %lu\n", MAX_LOCKDEP_CHAINS); |
---|
4335 | 6271 | printk("... CHAINHASH_SIZE: %lu\n", CHAINHASH_SIZE); |
---|
4336 | 6272 | |
---|
4337 | | - printk(" memory used by lock dependency info: %lu kB\n", |
---|
4338 | | - (sizeof(struct lock_class) * MAX_LOCKDEP_KEYS + |
---|
4339 | | - sizeof(struct list_head) * CLASSHASH_SIZE + |
---|
4340 | | - sizeof(struct lock_list) * MAX_LOCKDEP_ENTRIES + |
---|
4341 | | - sizeof(struct lock_chain) * MAX_LOCKDEP_CHAINS + |
---|
4342 | | - sizeof(struct list_head) * CHAINHASH_SIZE |
---|
| 6273 | + printk(" memory used by lock dependency info: %zu kB\n", |
---|
| 6274 | + (sizeof(lock_classes) + |
---|
| 6275 | + sizeof(lock_classes_in_use) + |
---|
| 6276 | + sizeof(classhash_table) + |
---|
| 6277 | + sizeof(list_entries) + |
---|
| 6278 | + sizeof(list_entries_in_use) + |
---|
| 6279 | + sizeof(chainhash_table) + |
---|
| 6280 | + sizeof(delayed_free) |
---|
4343 | 6281 | #ifdef CONFIG_PROVE_LOCKING |
---|
4344 | | - + sizeof(struct circular_queue) |
---|
| 6282 | + + sizeof(lock_cq) |
---|
| 6283 | + + sizeof(lock_chains) |
---|
| 6284 | + + sizeof(lock_chains_in_use) |
---|
| 6285 | + + sizeof(chain_hlocks) |
---|
4345 | 6286 | #endif |
---|
4346 | 6287 | ) / 1024 |
---|
4347 | 6288 | ); |
---|
4348 | 6289 | |
---|
4349 | | - printk(" per task-struct memory footprint: %lu bytes\n", |
---|
4350 | | - sizeof(struct held_lock) * MAX_LOCK_DEPTH); |
---|
| 6290 | +#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) |
---|
| 6291 | + printk(" memory used for stack traces: %zu kB\n", |
---|
| 6292 | + (sizeof(stack_trace) + sizeof(stack_trace_hash)) / 1024 |
---|
| 6293 | + ); |
---|
| 6294 | +#endif |
---|
| 6295 | + |
---|
| 6296 | + printk(" per task-struct memory footprint: %zu bytes\n", |
---|
| 6297 | + sizeof(((struct task_struct *)NULL)->held_locks)); |
---|
4351 | 6298 | } |
---|
4352 | 6299 | |
---|
4353 | 6300 | static void |
---|
.. | .. |
---|
4515 | 6462 | pr_warn("\n%srcu_scheduler_active = %d, debug_locks = %d\n", |
---|
4516 | 6463 | !rcu_lockdep_current_cpu_online() |
---|
4517 | 6464 | ? "RCU used illegally from offline CPU!\n" |
---|
4518 | | - : !rcu_is_watching() |
---|
4519 | | - ? "RCU used illegally from idle CPU!\n" |
---|
4520 | | - : "", |
---|
| 6465 | + : "", |
---|
4521 | 6466 | rcu_scheduler_active, debug_locks); |
---|
4522 | 6467 | |
---|
4523 | 6468 | /* |
---|