hc
2024-01-31 f70575805708cabdedea7498aaa3f710fde4d920
kernel/kernel/locking/lockdep.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * kernel/lockdep.c
34 *
....@@ -45,11 +46,14 @@
4546 #include <linux/hash.h>
4647 #include <linux/ftrace.h>
4748 #include <linux/stringify.h>
49
+#include <linux/bitmap.h>
4850 #include <linux/bitops.h>
4951 #include <linux/gfp.h>
5052 #include <linux/random.h>
5153 #include <linux/jhash.h>
5254 #include <linux/nmi.h>
55
+#include <linux/rcupdate.h>
56
+#include <linux/kprobes.h>
5357
5458 #include <asm/sections.h>
5559
....@@ -72,6 +76,23 @@
7276 #define lock_stat 0
7377 #endif
7478
79
+DEFINE_PER_CPU(unsigned int, lockdep_recursion);
80
+EXPORT_PER_CPU_SYMBOL_GPL(lockdep_recursion);
81
+
82
+static __always_inline bool lockdep_enabled(void)
83
+{
84
+ if (!debug_locks)
85
+ return false;
86
+
87
+ if (this_cpu_read(lockdep_recursion))
88
+ return false;
89
+
90
+ if (current->lockdep_recursion)
91
+ return false;
92
+
93
+ return true;
94
+}
95
+
7596 /*
7697 * lockdep_lock: protects the lockdep graph, the hashes and the
7798 * class/list/hash allocators.
....@@ -80,11 +101,41 @@
80101 * to use a raw spinlock - we really dont want the spinlock
81102 * code to recurse back into the lockdep code...
82103 */
83
-static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
104
+static arch_spinlock_t __lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
105
+static struct task_struct *__owner;
106
+
107
+static inline void lockdep_lock(void)
108
+{
109
+ DEBUG_LOCKS_WARN_ON(!irqs_disabled());
110
+
111
+ __this_cpu_inc(lockdep_recursion);
112
+ arch_spin_lock(&__lock);
113
+ __owner = current;
114
+}
115
+
116
+static inline void lockdep_unlock(void)
117
+{
118
+ DEBUG_LOCKS_WARN_ON(!irqs_disabled());
119
+
120
+ if (debug_locks && DEBUG_LOCKS_WARN_ON(__owner != current))
121
+ return;
122
+
123
+ __owner = NULL;
124
+ arch_spin_unlock(&__lock);
125
+ __this_cpu_dec(lockdep_recursion);
126
+}
127
+
128
+static inline bool lockdep_assert_locked(void)
129
+{
130
+ return DEBUG_LOCKS_WARN_ON(__owner != current);
131
+}
132
+
133
+static struct task_struct *lockdep_selftest_task_struct;
134
+
84135
85136 static int graph_lock(void)
86137 {
87
- arch_spin_lock(&lockdep_lock);
138
+ lockdep_lock();
88139 /*
89140 * Make sure that if another CPU detected a bug while
90141 * walking the graph we dont change it (while the other
....@@ -92,27 +143,15 @@
92143 * dropped already)
93144 */
94145 if (!debug_locks) {
95
- arch_spin_unlock(&lockdep_lock);
146
+ lockdep_unlock();
96147 return 0;
97148 }
98
- /* prevent any recursions within lockdep from causing deadlocks */
99
- current->lockdep_recursion++;
100149 return 1;
101150 }
102151
103
-static inline int graph_unlock(void)
152
+static inline void graph_unlock(void)
104153 {
105
- if (debug_locks && !arch_spin_is_locked(&lockdep_lock)) {
106
- /*
107
- * The lockdep graph lock isn't locked while we expect it to
108
- * be, we're confused now, bye!
109
- */
110
- return DEBUG_LOCKS_WARN_ON(1);
111
- }
112
-
113
- current->lockdep_recursion--;
114
- arch_spin_unlock(&lockdep_lock);
115
- return 0;
154
+ lockdep_unlock();
116155 }
117156
118157 /*
....@@ -123,33 +162,50 @@
123162 {
124163 int ret = debug_locks_off();
125164
126
- arch_spin_unlock(&lockdep_lock);
165
+ lockdep_unlock();
127166
128167 return ret;
129168 }
130169
131170 unsigned long nr_list_entries;
132171 static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
172
+static DECLARE_BITMAP(list_entries_in_use, MAX_LOCKDEP_ENTRIES);
133173
134174 /*
135175 * All data structures here are protected by the global debug_lock.
136176 *
137
- * Mutex key structs only get allocated, once during bootup, and never
138
- * get freed - this significantly simplifies the debugging code.
177
+ * nr_lock_classes is the number of elements of lock_classes[] that is
178
+ * in use.
139179 */
180
+#define KEYHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1)
181
+#define KEYHASH_SIZE (1UL << KEYHASH_BITS)
182
+static struct hlist_head lock_keys_hash[KEYHASH_SIZE];
140183 unsigned long nr_lock_classes;
141
-static struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
184
+unsigned long nr_zapped_classes;
185
+unsigned long max_lock_class_idx;
186
+struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
187
+DECLARE_BITMAP(lock_classes_in_use, MAX_LOCKDEP_KEYS);
142188
143189 static inline struct lock_class *hlock_class(struct held_lock *hlock)
144190 {
145
- if (!hlock->class_idx) {
191
+ unsigned int class_idx = hlock->class_idx;
192
+
193
+ /* Don't re-read hlock->class_idx, can't use READ_ONCE() on bitfield */
194
+ barrier();
195
+
196
+ if (!test_bit(class_idx, lock_classes_in_use)) {
146197 /*
147198 * Someone passed in garbage, we give up.
148199 */
149200 DEBUG_LOCKS_WARN_ON(1);
150201 return NULL;
151202 }
152
- return lock_classes + hlock->class_idx - 1;
203
+
204
+ /*
205
+ * At this point, if the passed hlock->class_idx is still garbage,
206
+ * we just have to live with it
207
+ */
208
+ return lock_classes + class_idx;
153209 }
154210
155211 #ifdef CONFIG_LOCK_STAT
....@@ -274,11 +330,42 @@
274330 #endif
275331
276332 /*
277
- * We keep a global list of all lock classes. The list only grows,
278
- * never shrinks. The list is only accessed with the lockdep
279
- * spinlock lock held.
333
+ * We keep a global list of all lock classes. The list is only accessed with
334
+ * the lockdep spinlock lock held. free_lock_classes is a list with free
335
+ * elements. These elements are linked together by the lock_entry member in
336
+ * struct lock_class.
280337 */
281
-LIST_HEAD(all_lock_classes);
338
+static LIST_HEAD(all_lock_classes);
339
+static LIST_HEAD(free_lock_classes);
340
+
341
+/**
342
+ * struct pending_free - information about data structures about to be freed
343
+ * @zapped: Head of a list with struct lock_class elements.
344
+ * @lock_chains_being_freed: Bitmap that indicates which lock_chains[] elements
345
+ * are about to be freed.
346
+ */
347
+struct pending_free {
348
+ struct list_head zapped;
349
+ DECLARE_BITMAP(lock_chains_being_freed, MAX_LOCKDEP_CHAINS);
350
+};
351
+
352
+/**
353
+ * struct delayed_free - data structures used for delayed freeing
354
+ *
355
+ * A data structure for delayed freeing of data structures that may be
356
+ * accessed by RCU readers at the time these were freed.
357
+ *
358
+ * @rcu_head: Used to schedule an RCU callback for freeing data structures.
359
+ * @index: Index of @pf to which freed data structures are added.
360
+ * @scheduled: Whether or not an RCU callback has been scheduled.
361
+ * @pf: Array with information about data structures about to be freed.
362
+ */
363
+static struct delayed_free {
364
+ struct rcu_head rcu_head;
365
+ int index;
366
+ int scheduled;
367
+ struct pending_free pf[2];
368
+} delayed_free;
282369
283370 /*
284371 * The lockdep classes are in a hash-table as well, for fast lookup:
....@@ -302,6 +389,21 @@
302389 static struct hlist_head chainhash_table[CHAINHASH_SIZE];
303390
304391 /*
392
+ * the id of held_lock
393
+ */
394
+static inline u16 hlock_id(struct held_lock *hlock)
395
+{
396
+ BUILD_BUG_ON(MAX_LOCKDEP_KEYS_BITS + 2 > 16);
397
+
398
+ return (hlock->class_idx | (hlock->read << MAX_LOCKDEP_KEYS_BITS));
399
+}
400
+
401
+static inline unsigned int chain_hlock_class_idx(u16 hlock_id)
402
+{
403
+ return hlock_id & (MAX_LOCKDEP_KEYS - 1);
404
+}
405
+
406
+/*
305407 * The hash key of the lock dependency chains is a hash itself too:
306408 * it's a hash of all locks taken up to that lock, including that lock.
307409 * It's a 64-bit hash, because it's important for the keys to be
....@@ -316,17 +418,28 @@
316418 return k0 | (u64)k1 << 32;
317419 }
318420
319
-void lockdep_off(void)
421
+void lockdep_init_task(struct task_struct *task)
320422 {
321
- current->lockdep_recursion++;
423
+ task->lockdep_depth = 0; /* no locks held yet */
424
+ task->curr_chain_key = INITIAL_CHAIN_KEY;
425
+ task->lockdep_recursion = 0;
322426 }
323
-EXPORT_SYMBOL(lockdep_off);
324427
325
-void lockdep_on(void)
428
+static __always_inline void lockdep_recursion_inc(void)
326429 {
327
- current->lockdep_recursion--;
430
+ __this_cpu_inc(lockdep_recursion);
328431 }
329
-EXPORT_SYMBOL(lockdep_on);
432
+
433
+static __always_inline void lockdep_recursion_finish(void)
434
+{
435
+ if (WARN_ON_ONCE(__this_cpu_dec_return(lockdep_recursion)))
436
+ __this_cpu_write(lockdep_recursion, 0);
437
+}
438
+
439
+void lockdep_set_selftest_task(struct task_struct *task)
440
+{
441
+ lockdep_selftest_task_struct = task;
442
+}
330443
331444 /*
332445 * Debugging switches:
....@@ -371,13 +484,6 @@
371484 return 0;
372485 }
373486
374
-/*
375
- * Stack-trace: tightly packed array of stack backtrace
376
- * addresses. Protected by the graph_lock.
377
- */
378
-unsigned long nr_stack_trace_entries;
379
-static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];
380
-
381487 static void print_lockdep_off(const char *bug_msg)
382488 {
383489 printk(KERN_DEBUG "%s\n", bug_msg);
....@@ -387,43 +493,105 @@
387493 #endif
388494 }
389495
390
-static int save_trace(struct stack_trace *trace)
496
+unsigned long nr_stack_trace_entries;
497
+
498
+#ifdef CONFIG_PROVE_LOCKING
499
+/**
500
+ * struct lock_trace - single stack backtrace
501
+ * @hash_entry: Entry in a stack_trace_hash[] list.
502
+ * @hash: jhash() of @entries.
503
+ * @nr_entries: Number of entries in @entries.
504
+ * @entries: Actual stack backtrace.
505
+ */
506
+struct lock_trace {
507
+ struct hlist_node hash_entry;
508
+ u32 hash;
509
+ u32 nr_entries;
510
+ unsigned long entries[] __aligned(sizeof(unsigned long));
511
+};
512
+#define LOCK_TRACE_SIZE_IN_LONGS \
513
+ (sizeof(struct lock_trace) / sizeof(unsigned long))
514
+/*
515
+ * Stack-trace: sequence of lock_trace structures. Protected by the graph_lock.
516
+ */
517
+static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];
518
+static struct hlist_head stack_trace_hash[STACK_TRACE_HASH_SIZE];
519
+
520
+static bool traces_identical(struct lock_trace *t1, struct lock_trace *t2)
391521 {
392
- trace->nr_entries = 0;
393
- trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
394
- trace->entries = stack_trace + nr_stack_trace_entries;
522
+ return t1->hash == t2->hash && t1->nr_entries == t2->nr_entries &&
523
+ memcmp(t1->entries, t2->entries,
524
+ t1->nr_entries * sizeof(t1->entries[0])) == 0;
525
+}
395526
396
- trace->skip = 3;
527
+static struct lock_trace *save_trace(void)
528
+{
529
+ struct lock_trace *trace, *t2;
530
+ struct hlist_head *hash_head;
531
+ u32 hash;
532
+ int max_entries;
397533
398
- save_stack_trace(trace);
534
+ BUILD_BUG_ON_NOT_POWER_OF_2(STACK_TRACE_HASH_SIZE);
535
+ BUILD_BUG_ON(LOCK_TRACE_SIZE_IN_LONGS >= MAX_STACK_TRACE_ENTRIES);
399536
400
- /*
401
- * Some daft arches put -1 at the end to indicate its a full trace.
402
- *
403
- * <rant> this is buggy anyway, since it takes a whole extra entry so a
404
- * complete trace that maxes out the entries provided will be reported
405
- * as incomplete, friggin useless </rant>
406
- */
407
- if (trace->nr_entries != 0 &&
408
- trace->entries[trace->nr_entries-1] == ULONG_MAX)
409
- trace->nr_entries--;
537
+ trace = (struct lock_trace *)(stack_trace + nr_stack_trace_entries);
538
+ max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries -
539
+ LOCK_TRACE_SIZE_IN_LONGS;
410540
411
- trace->max_entries = trace->nr_entries;
412
-
413
- nr_stack_trace_entries += trace->nr_entries;
414
-
415
- if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) {
541
+ if (max_entries <= 0) {
416542 if (!debug_locks_off_graph_unlock())
417
- return 0;
543
+ return NULL;
418544
419545 print_lockdep_off("BUG: MAX_STACK_TRACE_ENTRIES too low!");
420546 dump_stack();
421547
422
- return 0;
548
+ return NULL;
549
+ }
550
+ trace->nr_entries = stack_trace_save(trace->entries, max_entries, 3);
551
+
552
+ hash = jhash(trace->entries, trace->nr_entries *
553
+ sizeof(trace->entries[0]), 0);
554
+ trace->hash = hash;
555
+ hash_head = stack_trace_hash + (hash & (STACK_TRACE_HASH_SIZE - 1));
556
+ hlist_for_each_entry(t2, hash_head, hash_entry) {
557
+ if (traces_identical(trace, t2))
558
+ return t2;
559
+ }
560
+ nr_stack_trace_entries += LOCK_TRACE_SIZE_IN_LONGS + trace->nr_entries;
561
+ hlist_add_head(&trace->hash_entry, hash_head);
562
+
563
+ return trace;
564
+}
565
+
566
+/* Return the number of stack traces in the stack_trace[] array. */
567
+u64 lockdep_stack_trace_count(void)
568
+{
569
+ struct lock_trace *trace;
570
+ u64 c = 0;
571
+ int i;
572
+
573
+ for (i = 0; i < ARRAY_SIZE(stack_trace_hash); i++) {
574
+ hlist_for_each_entry(trace, &stack_trace_hash[i], hash_entry) {
575
+ c++;
576
+ }
423577 }
424578
425
- return 1;
579
+ return c;
426580 }
581
+
582
+/* Return the number of stack hash chains that have at least one stack trace. */
583
+u64 lockdep_stack_hash_count(void)
584
+{
585
+ u64 c = 0;
586
+ int i;
587
+
588
+ for (i = 0; i < ARRAY_SIZE(stack_trace_hash); i++)
589
+ if (!hlist_empty(&stack_trace_hash[i]))
590
+ c++;
591
+
592
+ return c;
593
+}
594
+#endif
427595
428596 unsigned int nr_hardirq_chains;
429597 unsigned int nr_softirq_chains;
....@@ -437,6 +605,7 @@
437605 DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats);
438606 #endif
439607
608
+#ifdef CONFIG_PROVE_LOCKING
440609 /*
441610 * Locking printouts:
442611 */
....@@ -453,9 +622,13 @@
453622 #include "lockdep_states.h"
454623 #undef LOCKDEP_STATE
455624 [LOCK_USED] = "INITIAL USE",
625
+ [LOCK_USED_READ] = "INITIAL READ USE",
626
+ /* abused as string storage for verify_lock_unused() */
627
+ [LOCK_USAGE_STATES] = "IN-NMI",
456628 };
629
+#endif
457630
458
-const char * __get_key_name(struct lockdep_subclass_key *key, char *str)
631
+const char *__get_key_name(const struct lockdep_subclass_key *key, char *str)
459632 {
460633 return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str);
461634 }
....@@ -467,15 +640,26 @@
467640
468641 static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit)
469642 {
643
+ /*
644
+ * The usage character defaults to '.' (i.e., irqs disabled and not in
645
+ * irq context), which is the safest usage category.
646
+ */
470647 char c = '.';
471648
472
- if (class->usage_mask & lock_flag(bit + 2))
649
+ /*
650
+ * The order of the following usage checks matters, which will
651
+ * result in the outcome character as follows:
652
+ *
653
+ * - '+': irq is enabled and not in irq context
654
+ * - '-': in irq context and irq is disabled
655
+ * - '?': in irq context and irq is enabled
656
+ */
657
+ if (class->usage_mask & lock_flag(bit + LOCK_USAGE_DIR_MASK)) {
473658 c = '+';
474
- if (class->usage_mask & lock_flag(bit)) {
475
- c = '-';
476
- if (class->usage_mask & lock_flag(bit + 2))
659
+ if (class->usage_mask & lock_flag(bit))
477660 c = '?';
478
- }
661
+ } else if (class->usage_mask & lock_flag(bit))
662
+ c = '-';
479663
480664 return c;
481665 }
....@@ -519,7 +703,9 @@
519703
520704 printk(KERN_CONT " (");
521705 __print_lock_name(class);
522
- printk(KERN_CONT "){%s}", usage);
706
+ printk(KERN_CONT "){%s}-{%d:%d}", usage,
707
+ class->wait_type_outer ?: class->wait_type_inner,
708
+ class->wait_type_inner);
523709 }
524710
525711 static void print_lockdep_cache(struct lockdep_map *lock)
....@@ -539,19 +725,22 @@
539725 /*
540726 * We can be called locklessly through debug_show_all_locks() so be
541727 * extra careful, the hlock might have been released and cleared.
728
+ *
729
+ * If this indeed happens, lets pretend it does not hurt to continue
730
+ * to print the lock unless the hlock class_idx does not point to a
731
+ * registered class. The rationale here is: since we don't attempt
732
+ * to distinguish whether we are in this situation, if it just
733
+ * happened we can't count on class_idx to tell either.
542734 */
543
- unsigned int class_idx = hlock->class_idx;
735
+ struct lock_class *lock = hlock_class(hlock);
544736
545
- /* Don't re-read hlock->class_idx, can't use READ_ONCE() on bitfields: */
546
- barrier();
547
-
548
- if (!class_idx || (class_idx - 1) >= MAX_LOCKDEP_KEYS) {
737
+ if (!lock) {
549738 printk(KERN_CONT "<RELEASED>\n");
550739 return;
551740 }
552741
553
- printk(KERN_CONT "%p", hlock->instance);
554
- print_lock_name(lock_classes + class_idx - 1);
742
+ printk(KERN_CONT "%px", hlock->instance);
743
+ print_lock_name(lock);
555744 printk(KERN_CONT ", at: %pS\n", (void *)hlock->acquire_ip);
556745 }
557746
....@@ -596,11 +785,14 @@
596785 * Is this the address of a static object:
597786 */
598787 #ifdef __KERNEL__
599
-static int static_obj(void *obj)
788
+static int static_obj(const void *obj)
600789 {
601790 unsigned long start = (unsigned long) &_stext,
602791 end = (unsigned long) &_end,
603792 addr = (unsigned long) obj;
793
+
794
+ if (arch_is_kernel_initmem_freed(addr))
795
+ return 0;
604796
605797 /*
606798 * static variable?
....@@ -626,7 +818,8 @@
626818
627819 /*
628820 * To make lock name printouts unique, we calculate a unique
629
- * class->name_version generation counter:
821
+ * class->name_version generation counter. The caller must hold the graph
822
+ * lock.
630823 */
631824 static int count_matching_names(struct lock_class *new_class)
632825 {
....@@ -636,7 +829,7 @@
636829 if (!new_class->name)
637830 return 0;
638831
639
- list_for_each_entry_rcu(class, &all_lock_classes, lock_entry) {
832
+ list_for_each_entry(class, &all_lock_classes, lock_entry) {
640833 if (new_class->key - new_class->subclass == class->key)
641834 return class->name_version;
642835 if (class->name && !strcmp(class->name, new_class->name))
....@@ -646,7 +839,8 @@
646839 return count + 1;
647840 }
648841
649
-static inline struct lock_class *
842
+/* used from NMI context -- must be lockless */
843
+static noinstr struct lock_class *
650844 look_up_lock_class(const struct lockdep_map *lock, unsigned int subclass)
651845 {
652846 struct lockdep_subclass_key *key;
....@@ -654,12 +848,14 @@
654848 struct lock_class *class;
655849
656850 if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
851
+ instrumentation_begin();
657852 debug_locks_off();
658853 printk(KERN_ERR
659854 "BUG: looking up invalid subclass: %u\n", subclass);
660855 printk(KERN_ERR
661856 "turning off the locking correctness validator.\n");
662857 dump_stack();
858
+ instrumentation_end();
663859 return NULL;
664860 }
665861
....@@ -695,7 +891,8 @@
695891 * Huh! same key, different name? Did someone trample
696892 * on some memory? We're most confused.
697893 */
698
- WARN_ON_ONCE(class->name != lock->name);
894
+ WARN_ON_ONCE(class->name != lock->name &&
895
+ lock->key != &__lockdep_no_validate__);
699896 return class;
700897 }
701898 }
....@@ -711,6 +908,17 @@
711908 static bool assign_lock_key(struct lockdep_map *lock)
712909 {
713910 unsigned long can_addr, addr = (unsigned long)lock;
911
+
912
+#ifdef __KERNEL__
913
+ /*
914
+ * lockdep_free_key_range() assumes that struct lock_class_key
915
+ * objects do not overlap. Since we use the address of lock
916
+ * objects as class key for static objects, check whether the
917
+ * size of lock_class_key objects does not exceed the size of
918
+ * the smallest lock object.
919
+ */
920
+ BUILD_BUG_ON(sizeof(struct lock_class_key) > sizeof(raw_spinlock_t));
921
+#endif
714922
715923 if (__is_kernel_percpu_address(addr, &can_addr))
716924 lock->key = (void *)can_addr;
....@@ -732,6 +940,292 @@
732940 return true;
733941 }
734942
943
+#ifdef CONFIG_DEBUG_LOCKDEP
944
+
945
+/* Check whether element @e occurs in list @h */
946
+static bool in_list(struct list_head *e, struct list_head *h)
947
+{
948
+ struct list_head *f;
949
+
950
+ list_for_each(f, h) {
951
+ if (e == f)
952
+ return true;
953
+ }
954
+
955
+ return false;
956
+}
957
+
958
+/*
959
+ * Check whether entry @e occurs in any of the locks_after or locks_before
960
+ * lists.
961
+ */
962
+static bool in_any_class_list(struct list_head *e)
963
+{
964
+ struct lock_class *class;
965
+ int i;
966
+
967
+ for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
968
+ class = &lock_classes[i];
969
+ if (in_list(e, &class->locks_after) ||
970
+ in_list(e, &class->locks_before))
971
+ return true;
972
+ }
973
+ return false;
974
+}
975
+
976
+static bool class_lock_list_valid(struct lock_class *c, struct list_head *h)
977
+{
978
+ struct lock_list *e;
979
+
980
+ list_for_each_entry(e, h, entry) {
981
+ if (e->links_to != c) {
982
+ printk(KERN_INFO "class %s: mismatch for lock entry %ld; class %s <> %s",
983
+ c->name ? : "(?)",
984
+ (unsigned long)(e - list_entries),
985
+ e->links_to && e->links_to->name ?
986
+ e->links_to->name : "(?)",
987
+ e->class && e->class->name ? e->class->name :
988
+ "(?)");
989
+ return false;
990
+ }
991
+ }
992
+ return true;
993
+}
994
+
995
+#ifdef CONFIG_PROVE_LOCKING
996
+static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
997
+#endif
998
+
999
+static bool check_lock_chain_key(struct lock_chain *chain)
1000
+{
1001
+#ifdef CONFIG_PROVE_LOCKING
1002
+ u64 chain_key = INITIAL_CHAIN_KEY;
1003
+ int i;
1004
+
1005
+ for (i = chain->base; i < chain->base + chain->depth; i++)
1006
+ chain_key = iterate_chain_key(chain_key, chain_hlocks[i]);
1007
+ /*
1008
+ * The 'unsigned long long' casts avoid that a compiler warning
1009
+ * is reported when building tools/lib/lockdep.
1010
+ */
1011
+ if (chain->chain_key != chain_key) {
1012
+ printk(KERN_INFO "chain %lld: key %#llx <> %#llx\n",
1013
+ (unsigned long long)(chain - lock_chains),
1014
+ (unsigned long long)chain->chain_key,
1015
+ (unsigned long long)chain_key);
1016
+ return false;
1017
+ }
1018
+#endif
1019
+ return true;
1020
+}
1021
+
1022
+static bool in_any_zapped_class_list(struct lock_class *class)
1023
+{
1024
+ struct pending_free *pf;
1025
+ int i;
1026
+
1027
+ for (i = 0, pf = delayed_free.pf; i < ARRAY_SIZE(delayed_free.pf); i++, pf++) {
1028
+ if (in_list(&class->lock_entry, &pf->zapped))
1029
+ return true;
1030
+ }
1031
+
1032
+ return false;
1033
+}
1034
+
1035
+static bool __check_data_structures(void)
1036
+{
1037
+ struct lock_class *class;
1038
+ struct lock_chain *chain;
1039
+ struct hlist_head *head;
1040
+ struct lock_list *e;
1041
+ int i;
1042
+
1043
+ /* Check whether all classes occur in a lock list. */
1044
+ for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
1045
+ class = &lock_classes[i];
1046
+ if (!in_list(&class->lock_entry, &all_lock_classes) &&
1047
+ !in_list(&class->lock_entry, &free_lock_classes) &&
1048
+ !in_any_zapped_class_list(class)) {
1049
+ printk(KERN_INFO "class %px/%s is not in any class list\n",
1050
+ class, class->name ? : "(?)");
1051
+ return false;
1052
+ }
1053
+ }
1054
+
1055
+ /* Check whether all classes have valid lock lists. */
1056
+ for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
1057
+ class = &lock_classes[i];
1058
+ if (!class_lock_list_valid(class, &class->locks_before))
1059
+ return false;
1060
+ if (!class_lock_list_valid(class, &class->locks_after))
1061
+ return false;
1062
+ }
1063
+
1064
+ /* Check the chain_key of all lock chains. */
1065
+ for (i = 0; i < ARRAY_SIZE(chainhash_table); i++) {
1066
+ head = chainhash_table + i;
1067
+ hlist_for_each_entry_rcu(chain, head, entry) {
1068
+ if (!check_lock_chain_key(chain))
1069
+ return false;
1070
+ }
1071
+ }
1072
+
1073
+ /*
1074
+ * Check whether all list entries that are in use occur in a class
1075
+ * lock list.
1076
+ */
1077
+ for_each_set_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) {
1078
+ e = list_entries + i;
1079
+ if (!in_any_class_list(&e->entry)) {
1080
+ printk(KERN_INFO "list entry %d is not in any class list; class %s <> %s\n",
1081
+ (unsigned int)(e - list_entries),
1082
+ e->class->name ? : "(?)",
1083
+ e->links_to->name ? : "(?)");
1084
+ return false;
1085
+ }
1086
+ }
1087
+
1088
+ /*
1089
+ * Check whether all list entries that are not in use do not occur in
1090
+ * a class lock list.
1091
+ */
1092
+ for_each_clear_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) {
1093
+ e = list_entries + i;
1094
+ if (in_any_class_list(&e->entry)) {
1095
+ printk(KERN_INFO "list entry %d occurs in a class list; class %s <> %s\n",
1096
+ (unsigned int)(e - list_entries),
1097
+ e->class && e->class->name ? e->class->name :
1098
+ "(?)",
1099
+ e->links_to && e->links_to->name ?
1100
+ e->links_to->name : "(?)");
1101
+ return false;
1102
+ }
1103
+ }
1104
+
1105
+ return true;
1106
+}
1107
+
1108
+int check_consistency = 0;
1109
+module_param(check_consistency, int, 0644);
1110
+
1111
+static void check_data_structures(void)
1112
+{
1113
+ static bool once = false;
1114
+
1115
+ if (check_consistency && !once) {
1116
+ if (!__check_data_structures()) {
1117
+ once = true;
1118
+ WARN_ON(once);
1119
+ }
1120
+ }
1121
+}
1122
+
1123
+#else /* CONFIG_DEBUG_LOCKDEP */
1124
+
1125
+static inline void check_data_structures(void) { }
1126
+
1127
+#endif /* CONFIG_DEBUG_LOCKDEP */
1128
+
1129
+static void init_chain_block_buckets(void);
1130
+
1131
+/*
1132
+ * Initialize the lock_classes[] array elements, the free_lock_classes list
1133
+ * and also the delayed_free structure.
1134
+ */
1135
+static void init_data_structures_once(void)
1136
+{
1137
+ static bool __read_mostly ds_initialized, rcu_head_initialized;
1138
+ int i;
1139
+
1140
+ if (likely(rcu_head_initialized))
1141
+ return;
1142
+
1143
+ if (system_state >= SYSTEM_SCHEDULING) {
1144
+ init_rcu_head(&delayed_free.rcu_head);
1145
+ rcu_head_initialized = true;
1146
+ }
1147
+
1148
+ if (ds_initialized)
1149
+ return;
1150
+
1151
+ ds_initialized = true;
1152
+
1153
+ INIT_LIST_HEAD(&delayed_free.pf[0].zapped);
1154
+ INIT_LIST_HEAD(&delayed_free.pf[1].zapped);
1155
+
1156
+ for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
1157
+ list_add_tail(&lock_classes[i].lock_entry, &free_lock_classes);
1158
+ INIT_LIST_HEAD(&lock_classes[i].locks_after);
1159
+ INIT_LIST_HEAD(&lock_classes[i].locks_before);
1160
+ }
1161
+ init_chain_block_buckets();
1162
+}
1163
+
1164
+static inline struct hlist_head *keyhashentry(const struct lock_class_key *key)
1165
+{
1166
+ unsigned long hash = hash_long((uintptr_t)key, KEYHASH_BITS);
1167
+
1168
+ return lock_keys_hash + hash;
1169
+}
1170
+
1171
+/* Register a dynamically allocated key. */
1172
+void lockdep_register_key(struct lock_class_key *key)
1173
+{
1174
+ struct hlist_head *hash_head;
1175
+ struct lock_class_key *k;
1176
+ unsigned long flags;
1177
+
1178
+ if (WARN_ON_ONCE(static_obj(key)))
1179
+ return;
1180
+ hash_head = keyhashentry(key);
1181
+
1182
+ raw_local_irq_save(flags);
1183
+ if (!graph_lock())
1184
+ goto restore_irqs;
1185
+ hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
1186
+ if (WARN_ON_ONCE(k == key))
1187
+ goto out_unlock;
1188
+ }
1189
+ hlist_add_head_rcu(&key->hash_entry, hash_head);
1190
+out_unlock:
1191
+ graph_unlock();
1192
+restore_irqs:
1193
+ raw_local_irq_restore(flags);
1194
+}
1195
+EXPORT_SYMBOL_GPL(lockdep_register_key);
1196
+
1197
+/* Check whether a key has been registered as a dynamic key. */
1198
+static bool is_dynamic_key(const struct lock_class_key *key)
1199
+{
1200
+ struct hlist_head *hash_head;
1201
+ struct lock_class_key *k;
1202
+ bool found = false;
1203
+
1204
+ if (WARN_ON_ONCE(static_obj(key)))
1205
+ return false;
1206
+
1207
+ /*
1208
+ * If lock debugging is disabled lock_keys_hash[] may contain
1209
+ * pointers to memory that has already been freed. Avoid triggering
1210
+ * a use-after-free in that case by returning early.
1211
+ */
1212
+ if (!debug_locks)
1213
+ return true;
1214
+
1215
+ hash_head = keyhashentry(key);
1216
+
1217
+ rcu_read_lock();
1218
+ hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
1219
+ if (k == key) {
1220
+ found = true;
1221
+ break;
1222
+ }
1223
+ }
1224
+ rcu_read_unlock();
1225
+
1226
+ return found;
1227
+}
1228
+
7351229 /*
7361230 * Register a lock's class in the hash-table, if the class is not present
7371231 * yet. Otherwise we look it up. We cache the result in the lock object
....@@ -743,6 +1237,7 @@
7431237 struct lockdep_subclass_key *key;
7441238 struct hlist_head *hash_head;
7451239 struct lock_class *class;
1240
+ int idx;
7461241
7471242 DEBUG_LOCKS_WARN_ON(!irqs_disabled());
7481243
....@@ -753,7 +1248,7 @@
7531248 if (!lock->key) {
7541249 if (!assign_lock_key(lock))
7551250 return NULL;
756
- } else if (!static_obj(lock->key)) {
1251
+ } else if (!static_obj(lock->key) && !is_dynamic_key(lock->key)) {
7571252 return NULL;
7581253 }
7591254
....@@ -772,11 +1267,12 @@
7721267 goto out_unlock_set;
7731268 }
7741269
775
- /*
776
- * Allocate a new key from the static array, and add it to
777
- * the hash:
778
- */
779
- if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
1270
+ init_data_structures_once();
1271
+
1272
+ /* Allocate a new lock class and add it to the hash. */
1273
+ class = list_first_entry_or_null(&free_lock_classes, typeof(*class),
1274
+ lock_entry);
1275
+ if (!class) {
7801276 if (!debug_locks_off_graph_unlock()) {
7811277 return NULL;
7821278 }
....@@ -785,24 +1281,31 @@
7851281 dump_stack();
7861282 return NULL;
7871283 }
788
- class = lock_classes + nr_lock_classes++;
1284
+ nr_lock_classes++;
1285
+ __set_bit(class - lock_classes, lock_classes_in_use);
7891286 debug_atomic_inc(nr_unused_locks);
7901287 class->key = key;
7911288 class->name = lock->name;
7921289 class->subclass = subclass;
793
- INIT_LIST_HEAD(&class->lock_entry);
794
- INIT_LIST_HEAD(&class->locks_before);
795
- INIT_LIST_HEAD(&class->locks_after);
1290
+ WARN_ON_ONCE(!list_empty(&class->locks_before));
1291
+ WARN_ON_ONCE(!list_empty(&class->locks_after));
7961292 class->name_version = count_matching_names(class);
1293
+ class->wait_type_inner = lock->wait_type_inner;
1294
+ class->wait_type_outer = lock->wait_type_outer;
1295
+ class->lock_type = lock->lock_type;
7971296 /*
7981297 * We use RCU's safe list-add method to make
7991298 * parallel walking of the hash-list safe:
8001299 */
8011300 hlist_add_head_rcu(&class->hash_entry, hash_head);
8021301 /*
803
- * Add it to the global list of classes:
1302
+ * Remove the class from the free list and add it to the global list
1303
+ * of classes.
8041304 */
805
- list_add_tail_rcu(&class->lock_entry, &all_lock_classes);
1305
+ list_move_tail(&class->lock_entry, &all_lock_classes);
1306
+ idx = class - lock_classes;
1307
+ if (idx > max_lock_class_idx)
1308
+ max_lock_class_idx = idx;
8061309
8071310 if (verbose(class)) {
8081311 graph_unlock();
....@@ -843,7 +1346,10 @@
8431346 */
8441347 static struct lock_list *alloc_list_entry(void)
8451348 {
846
- if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) {
1349
+ int idx = find_first_zero_bit(list_entries_in_use,
1350
+ ARRAY_SIZE(list_entries));
1351
+
1352
+ if (idx >= ARRAY_SIZE(list_entries)) {
8471353 if (!debug_locks_off_graph_unlock())
8481354 return NULL;
8491355
....@@ -851,15 +1357,18 @@
8511357 dump_stack();
8521358 return NULL;
8531359 }
854
- return list_entries + nr_list_entries++;
1360
+ nr_list_entries++;
1361
+ __set_bit(idx, list_entries_in_use);
1362
+ return list_entries + idx;
8551363 }
8561364
8571365 /*
8581366 * Add a new dependency to the head of the list:
8591367 */
860
-static int add_lock_to_list(struct lock_class *this, struct list_head *head,
861
- unsigned long ip, int distance,
862
- struct stack_trace *trace)
1368
+static int add_lock_to_list(struct lock_class *this,
1369
+ struct lock_class *links_to, struct list_head *head,
1370
+ unsigned long ip, u16 distance, u8 dep,
1371
+ const struct lock_trace *trace)
8631372 {
8641373 struct lock_list *entry;
8651374 /*
....@@ -871,8 +1380,10 @@
8711380 return 0;
8721381
8731382 entry->class = this;
1383
+ entry->links_to = links_to;
1384
+ entry->dep = dep;
8741385 entry->distance = distance;
875
- entry->trace = *trace;
1386
+ entry->trace = trace;
8761387 /*
8771388 * Both allocation and removal are done under the graph lock; but
8781389 * iteration is under RCU-sched; see look_up_lock_class() and
....@@ -886,17 +1397,21 @@
8861397 /*
8871398 * For good efficiency of modular, we use power of 2
8881399 */
889
-#define MAX_CIRCULAR_QUEUE_SIZE 4096UL
1400
+#define MAX_CIRCULAR_QUEUE_SIZE (1UL << CONFIG_LOCKDEP_CIRCULAR_QUEUE_BITS)
8901401 #define CQ_MASK (MAX_CIRCULAR_QUEUE_SIZE-1)
8911402
8921403 /*
893
- * The circular_queue and helpers is used to implement the
894
- * breadth-first search(BFS)algorithem, by which we can build
895
- * the shortest path from the next lock to be acquired to the
896
- * previous held lock if there is a circular between them.
1404
+ * The circular_queue and helpers are used to implement graph
1405
+ * breadth-first search (BFS) algorithm, by which we can determine
1406
+ * whether there is a path from a lock to another. In deadlock checks,
1407
+ * a path from the next lock to be acquired to a previous held lock
1408
+ * indicates that adding the <prev> -> <next> lock dependency will
1409
+ * produce a circle in the graph. Breadth-first search instead of
1410
+ * depth-first search is used in order to find the shortest (circular)
1411
+ * path.
8971412 */
8981413 struct circular_queue {
899
- unsigned long element[MAX_CIRCULAR_QUEUE_SIZE];
1414
+ struct lock_list *element[MAX_CIRCULAR_QUEUE_SIZE];
9001415 unsigned int front, rear;
9011416 };
9021417
....@@ -922,7 +1437,7 @@
9221437 return ((cq->rear + 1) & CQ_MASK) == cq->front;
9231438 }
9241439
925
-static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
1440
+static inline int __cq_enqueue(struct circular_queue *cq, struct lock_list *elem)
9261441 {
9271442 if (__cq_full(cq))
9281443 return -1;
....@@ -932,14 +1447,21 @@
9321447 return 0;
9331448 }
9341449
935
-static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem)
1450
+/*
1451
+ * Dequeue an element from the circular_queue, return a lock_list if
1452
+ * the queue is not empty, or NULL if otherwise.
1453
+ */
1454
+static inline struct lock_list * __cq_dequeue(struct circular_queue *cq)
9361455 {
937
- if (__cq_empty(cq))
938
- return -1;
1456
+ struct lock_list * lock;
9391457
940
- *elem = cq->element[cq->front];
1458
+ if (__cq_empty(cq))
1459
+ return NULL;
1460
+
1461
+ lock = cq->element[cq->front];
9411462 cq->front = (cq->front + 1) & CQ_MASK;
942
- return 0;
1463
+
1464
+ return lock;
9431465 }
9441466
9451467 static inline unsigned int __cq_get_elem_count(struct circular_queue *cq)
....@@ -947,23 +1469,19 @@
9471469 return (cq->rear - cq->front) & CQ_MASK;
9481470 }
9491471
950
-static inline void mark_lock_accessed(struct lock_list *lock,
951
- struct lock_list *parent)
1472
+static inline void mark_lock_accessed(struct lock_list *lock)
9521473 {
953
- unsigned long nr;
954
-
955
- nr = lock - list_entries;
956
- WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */
957
- lock->parent = parent;
9581474 lock->class->dep_gen_id = lockdep_dependency_gen_id;
1475
+}
1476
+
1477
+static inline void visit_lock_entry(struct lock_list *lock,
1478
+ struct lock_list *parent)
1479
+{
1480
+ lock->parent = parent;
9591481 }
9601482
9611483 static inline unsigned long lock_accessed(struct lock_list *lock)
9621484 {
963
- unsigned long nr;
964
-
965
- nr = lock - list_entries;
966
- WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */
9671485 return lock->class->dep_gen_id == lockdep_dependency_gen_id;
9681486 }
9691487
....@@ -984,114 +1502,321 @@
9841502 return depth;
9851503 }
9861504
987
-static int __bfs(struct lock_list *source_entry,
988
- void *data,
989
- int (*match)(struct lock_list *entry, void *data),
990
- struct lock_list **target_entry,
991
- int forward)
1505
+/*
1506
+ * Return the forward or backward dependency list.
1507
+ *
1508
+ * @lock: the lock_list to get its class's dependency list
1509
+ * @offset: the offset to struct lock_class to determine whether it is
1510
+ * locks_after or locks_before
1511
+ */
1512
+static inline struct list_head *get_dep_list(struct lock_list *lock, int offset)
9921513 {
993
- struct lock_list *entry;
994
- struct list_head *head;
995
- struct circular_queue *cq = &lock_cq;
996
- int ret = 1;
1514
+ void *lock_class = lock->class;
9971515
998
- if (match(source_entry, data)) {
999
- *target_entry = source_entry;
1000
- ret = 0;
1001
- goto exit;
1002
- }
1003
-
1004
- if (forward)
1005
- head = &source_entry->class->locks_after;
1006
- else
1007
- head = &source_entry->class->locks_before;
1008
-
1009
- if (list_empty(head))
1010
- goto exit;
1011
-
1012
- __cq_init(cq);
1013
- __cq_enqueue(cq, (unsigned long)source_entry);
1014
-
1015
- while (!__cq_empty(cq)) {
1016
- struct lock_list *lock;
1017
-
1018
- __cq_dequeue(cq, (unsigned long *)&lock);
1019
-
1020
- if (!lock->class) {
1021
- ret = -2;
1022
- goto exit;
1023
- }
1024
-
1025
- if (forward)
1026
- head = &lock->class->locks_after;
1027
- else
1028
- head = &lock->class->locks_before;
1029
-
1030
- DEBUG_LOCKS_WARN_ON(!irqs_disabled());
1031
-
1032
- list_for_each_entry_rcu(entry, head, entry) {
1033
- if (!lock_accessed(entry)) {
1034
- unsigned int cq_depth;
1035
- mark_lock_accessed(entry, lock);
1036
- if (match(entry, data)) {
1037
- *target_entry = entry;
1038
- ret = 0;
1039
- goto exit;
1040
- }
1041
-
1042
- if (__cq_enqueue(cq, (unsigned long)entry)) {
1043
- ret = -1;
1044
- goto exit;
1045
- }
1046
- cq_depth = __cq_get_elem_count(cq);
1047
- if (max_bfs_queue_depth < cq_depth)
1048
- max_bfs_queue_depth = cq_depth;
1049
- }
1050
- }
1051
- }
1052
-exit:
1053
- return ret;
1516
+ return lock_class + offset;
10541517 }
1518
+/*
1519
+ * Return values of a bfs search:
1520
+ *
1521
+ * BFS_E* indicates an error
1522
+ * BFS_R* indicates a result (match or not)
1523
+ *
1524
+ * BFS_EINVALIDNODE: Find a invalid node in the graph.
1525
+ *
1526
+ * BFS_EQUEUEFULL: The queue is full while doing the bfs.
1527
+ *
1528
+ * BFS_RMATCH: Find the matched node in the graph, and put that node into
1529
+ * *@target_entry.
1530
+ *
1531
+ * BFS_RNOMATCH: Haven't found the matched node and keep *@target_entry
1532
+ * _unchanged_.
1533
+ */
1534
+enum bfs_result {
1535
+ BFS_EINVALIDNODE = -2,
1536
+ BFS_EQUEUEFULL = -1,
1537
+ BFS_RMATCH = 0,
1538
+ BFS_RNOMATCH = 1,
1539
+};
10551540
1056
-static inline int __bfs_forwards(struct lock_list *src_entry,
1057
- void *data,
1058
- int (*match)(struct lock_list *entry, void *data),
1059
- struct lock_list **target_entry)
1541
+/*
1542
+ * bfs_result < 0 means error
1543
+ */
1544
+static inline bool bfs_error(enum bfs_result res)
10601545 {
1061
- return __bfs(src_entry, data, match, target_entry, 1);
1062
-
1063
-}
1064
-
1065
-static inline int __bfs_backwards(struct lock_list *src_entry,
1066
- void *data,
1067
- int (*match)(struct lock_list *entry, void *data),
1068
- struct lock_list **target_entry)
1069
-{
1070
- return __bfs(src_entry, data, match, target_entry, 0);
1071
-
1546
+ return res < 0;
10721547 }
10731548
10741549 /*
1075
- * Recursive, forwards-direction lock-dependency checking, used for
1076
- * both noncyclic checking and for hardirq-unsafe/softirq-unsafe
1077
- * checking.
1550
+ * DEP_*_BIT in lock_list::dep
1551
+ *
1552
+ * For dependency @prev -> @next:
1553
+ *
1554
+ * SR: @prev is shared reader (->read != 0) and @next is recursive reader
1555
+ * (->read == 2)
1556
+ * ER: @prev is exclusive locker (->read == 0) and @next is recursive reader
1557
+ * SN: @prev is shared reader and @next is non-recursive locker (->read != 2)
1558
+ * EN: @prev is exclusive locker and @next is non-recursive locker
1559
+ *
1560
+ * Note that we define the value of DEP_*_BITs so that:
1561
+ * bit0 is prev->read == 0
1562
+ * bit1 is next->read != 2
10781563 */
1564
+#define DEP_SR_BIT (0 + (0 << 1)) /* 0 */
1565
+#define DEP_ER_BIT (1 + (0 << 1)) /* 1 */
1566
+#define DEP_SN_BIT (0 + (1 << 1)) /* 2 */
1567
+#define DEP_EN_BIT (1 + (1 << 1)) /* 3 */
1568
+
1569
+#define DEP_SR_MASK (1U << (DEP_SR_BIT))
1570
+#define DEP_ER_MASK (1U << (DEP_ER_BIT))
1571
+#define DEP_SN_MASK (1U << (DEP_SN_BIT))
1572
+#define DEP_EN_MASK (1U << (DEP_EN_BIT))
1573
+
1574
+static inline unsigned int
1575
+__calc_dep_bit(struct held_lock *prev, struct held_lock *next)
1576
+{
1577
+ return (prev->read == 0) + ((next->read != 2) << 1);
1578
+}
1579
+
1580
+static inline u8 calc_dep(struct held_lock *prev, struct held_lock *next)
1581
+{
1582
+ return 1U << __calc_dep_bit(prev, next);
1583
+}
1584
+
1585
+/*
1586
+ * calculate the dep_bit for backwards edges. We care about whether @prev is
1587
+ * shared and whether @next is recursive.
1588
+ */
1589
+static inline unsigned int
1590
+__calc_dep_bitb(struct held_lock *prev, struct held_lock *next)
1591
+{
1592
+ return (next->read != 2) + ((prev->read == 0) << 1);
1593
+}
1594
+
1595
+static inline u8 calc_depb(struct held_lock *prev, struct held_lock *next)
1596
+{
1597
+ return 1U << __calc_dep_bitb(prev, next);
1598
+}
1599
+
1600
+/*
1601
+ * Initialize a lock_list entry @lock belonging to @class as the root for a BFS
1602
+ * search.
1603
+ */
1604
+static inline void __bfs_init_root(struct lock_list *lock,
1605
+ struct lock_class *class)
1606
+{
1607
+ lock->class = class;
1608
+ lock->parent = NULL;
1609
+ lock->only_xr = 0;
1610
+}
1611
+
1612
+/*
1613
+ * Initialize a lock_list entry @lock based on a lock acquisition @hlock as the
1614
+ * root for a BFS search.
1615
+ *
1616
+ * ->only_xr of the initial lock node is set to @hlock->read == 2, to make sure
1617
+ * that <prev> -> @hlock and @hlock -> <whatever __bfs() found> is not -(*R)->
1618
+ * and -(S*)->.
1619
+ */
1620
+static inline void bfs_init_root(struct lock_list *lock,
1621
+ struct held_lock *hlock)
1622
+{
1623
+ __bfs_init_root(lock, hlock_class(hlock));
1624
+ lock->only_xr = (hlock->read == 2);
1625
+}
1626
+
1627
+/*
1628
+ * Similar to bfs_init_root() but initialize the root for backwards BFS.
1629
+ *
1630
+ * ->only_xr of the initial lock node is set to @hlock->read != 0, to make sure
1631
+ * that <next> -> @hlock and @hlock -> <whatever backwards BFS found> is not
1632
+ * -(*S)-> and -(R*)-> (reverse order of -(*R)-> and -(S*)->).
1633
+ */
1634
+static inline void bfs_init_rootb(struct lock_list *lock,
1635
+ struct held_lock *hlock)
1636
+{
1637
+ __bfs_init_root(lock, hlock_class(hlock));
1638
+ lock->only_xr = (hlock->read != 0);
1639
+}
1640
+
1641
+static inline struct lock_list *__bfs_next(struct lock_list *lock, int offset)
1642
+{
1643
+ if (!lock || !lock->parent)
1644
+ return NULL;
1645
+
1646
+ return list_next_or_null_rcu(get_dep_list(lock->parent, offset),
1647
+ &lock->entry, struct lock_list, entry);
1648
+}
1649
+
1650
+/*
1651
+ * Breadth-First Search to find a strong path in the dependency graph.
1652
+ *
1653
+ * @source_entry: the source of the path we are searching for.
1654
+ * @data: data used for the second parameter of @match function
1655
+ * @match: match function for the search
1656
+ * @target_entry: pointer to the target of a matched path
1657
+ * @offset: the offset to struct lock_class to determine whether it is
1658
+ * locks_after or locks_before
1659
+ *
1660
+ * We may have multiple edges (considering different kinds of dependencies,
1661
+ * e.g. ER and SN) between two nodes in the dependency graph. But
1662
+ * only the strong dependency path in the graph is relevant to deadlocks. A
1663
+ * strong dependency path is a dependency path that doesn't have two adjacent
1664
+ * dependencies as -(*R)-> -(S*)->, please see:
1665
+ *
1666
+ * Documentation/locking/lockdep-design.rst
1667
+ *
1668
+ * for more explanation of the definition of strong dependency paths
1669
+ *
1670
+ * In __bfs(), we only traverse in the strong dependency path:
1671
+ *
1672
+ * In lock_list::only_xr, we record whether the previous dependency only
1673
+ * has -(*R)-> in the search, and if it does (prev only has -(*R)->), we
1674
+ * filter out any -(S*)-> in the current dependency and after that, the
1675
+ * ->only_xr is set according to whether we only have -(*R)-> left.
1676
+ */
1677
+static enum bfs_result __bfs(struct lock_list *source_entry,
1678
+ void *data,
1679
+ bool (*match)(struct lock_list *entry, void *data),
1680
+ struct lock_list **target_entry,
1681
+ int offset)
1682
+{
1683
+ struct circular_queue *cq = &lock_cq;
1684
+ struct lock_list *lock = NULL;
1685
+ struct lock_list *entry;
1686
+ struct list_head *head;
1687
+ unsigned int cq_depth;
1688
+ bool first;
1689
+
1690
+ lockdep_assert_locked();
1691
+
1692
+ __cq_init(cq);
1693
+ __cq_enqueue(cq, source_entry);
1694
+
1695
+ while ((lock = __bfs_next(lock, offset)) || (lock = __cq_dequeue(cq))) {
1696
+ if (!lock->class)
1697
+ return BFS_EINVALIDNODE;
1698
+
1699
+ /*
1700
+ * Step 1: check whether we already finish on this one.
1701
+ *
1702
+ * If we have visited all the dependencies from this @lock to
1703
+ * others (iow, if we have visited all lock_list entries in
1704
+ * @lock->class->locks_{after,before}) we skip, otherwise go
1705
+ * and visit all the dependencies in the list and mark this
1706
+ * list accessed.
1707
+ */
1708
+ if (lock_accessed(lock))
1709
+ continue;
1710
+ else
1711
+ mark_lock_accessed(lock);
1712
+
1713
+ /*
1714
+ * Step 2: check whether prev dependency and this form a strong
1715
+ * dependency path.
1716
+ */
1717
+ if (lock->parent) { /* Parent exists, check prev dependency */
1718
+ u8 dep = lock->dep;
1719
+ bool prev_only_xr = lock->parent->only_xr;
1720
+
1721
+ /*
1722
+ * Mask out all -(S*)-> if we only have *R in previous
1723
+ * step, because -(*R)-> -(S*)-> don't make up a strong
1724
+ * dependency.
1725
+ */
1726
+ if (prev_only_xr)
1727
+ dep &= ~(DEP_SR_MASK | DEP_SN_MASK);
1728
+
1729
+ /* If nothing left, we skip */
1730
+ if (!dep)
1731
+ continue;
1732
+
1733
+ /* If there are only -(*R)-> left, set that for the next step */
1734
+ lock->only_xr = !(dep & (DEP_SN_MASK | DEP_EN_MASK));
1735
+ }
1736
+
1737
+ /*
1738
+ * Step 3: we haven't visited this and there is a strong
1739
+ * dependency path to this, so check with @match.
1740
+ */
1741
+ if (match(lock, data)) {
1742
+ *target_entry = lock;
1743
+ return BFS_RMATCH;
1744
+ }
1745
+
1746
+ /*
1747
+ * Step 4: if not match, expand the path by adding the
1748
+ * forward or backwards dependencis in the search
1749
+ *
1750
+ */
1751
+ first = true;
1752
+ head = get_dep_list(lock, offset);
1753
+ list_for_each_entry_rcu(entry, head, entry) {
1754
+ visit_lock_entry(entry, lock);
1755
+
1756
+ /*
1757
+ * Note we only enqueue the first of the list into the
1758
+ * queue, because we can always find a sibling
1759
+ * dependency from one (see __bfs_next()), as a result
1760
+ * the space of queue is saved.
1761
+ */
1762
+ if (!first)
1763
+ continue;
1764
+
1765
+ first = false;
1766
+
1767
+ if (__cq_enqueue(cq, entry))
1768
+ return BFS_EQUEUEFULL;
1769
+
1770
+ cq_depth = __cq_get_elem_count(cq);
1771
+ if (max_bfs_queue_depth < cq_depth)
1772
+ max_bfs_queue_depth = cq_depth;
1773
+ }
1774
+ }
1775
+
1776
+ return BFS_RNOMATCH;
1777
+}
1778
+
1779
+static inline enum bfs_result
1780
+__bfs_forwards(struct lock_list *src_entry,
1781
+ void *data,
1782
+ bool (*match)(struct lock_list *entry, void *data),
1783
+ struct lock_list **target_entry)
1784
+{
1785
+ return __bfs(src_entry, data, match, target_entry,
1786
+ offsetof(struct lock_class, locks_after));
1787
+
1788
+}
1789
+
1790
+static inline enum bfs_result
1791
+__bfs_backwards(struct lock_list *src_entry,
1792
+ void *data,
1793
+ bool (*match)(struct lock_list *entry, void *data),
1794
+ struct lock_list **target_entry)
1795
+{
1796
+ return __bfs(src_entry, data, match, target_entry,
1797
+ offsetof(struct lock_class, locks_before));
1798
+
1799
+}
1800
+
1801
+static void print_lock_trace(const struct lock_trace *trace,
1802
+ unsigned int spaces)
1803
+{
1804
+ stack_trace_print(trace->entries, trace->nr_entries, spaces);
1805
+}
10791806
10801807 /*
10811808 * Print a dependency chain entry (this is only done when a deadlock
10821809 * has been detected):
10831810 */
1084
-static noinline int
1811
+static noinline void
10851812 print_circular_bug_entry(struct lock_list *target, int depth)
10861813 {
10871814 if (debug_locks_silent)
1088
- return 0;
1815
+ return;
10891816 printk("\n-> #%u", depth);
10901817 print_lock_name(target->class);
10911818 printk(KERN_CONT ":\n");
1092
- print_stack_trace(&target->trace, 6);
1093
-
1094
- return 0;
1819
+ print_lock_trace(target->trace, 6);
10951820 }
10961821
10971822 static void
....@@ -1148,7 +1873,7 @@
11481873 * When a circular dependency is detected, print the
11491874 * header first:
11501875 */
1151
-static noinline int
1876
+static noinline void
11521877 print_circular_bug_header(struct lock_list *entry, unsigned int depth,
11531878 struct held_lock *check_src,
11541879 struct held_lock *check_tgt)
....@@ -1156,7 +1881,7 @@
11561881 struct task_struct *curr = current;
11571882
11581883 if (debug_locks_silent)
1159
- return 0;
1884
+ return;
11601885
11611886 pr_warn("\n");
11621887 pr_warn("======================================================\n");
....@@ -1174,20 +1899,74 @@
11741899 pr_warn("\nthe existing dependency chain (in reverse order) is:\n");
11751900
11761901 print_circular_bug_entry(entry, depth);
1177
-
1178
- return 0;
11791902 }
11801903
1181
-static inline int class_equal(struct lock_list *entry, void *data)
1904
+/*
1905
+ * We are about to add A -> B into the dependency graph, and in __bfs() a
1906
+ * strong dependency path A -> .. -> B is found: hlock_class equals
1907
+ * entry->class.
1908
+ *
1909
+ * If A -> .. -> B can replace A -> B in any __bfs() search (means the former
1910
+ * is _stronger_ than or equal to the latter), we consider A -> B as redundant.
1911
+ * For example if A -> .. -> B is -(EN)-> (i.e. A -(E*)-> .. -(*N)-> B), and A
1912
+ * -> B is -(ER)-> or -(EN)->, then we don't need to add A -> B into the
1913
+ * dependency graph, as any strong path ..-> A -> B ->.. we can get with
1914
+ * having dependency A -> B, we could already get a equivalent path ..-> A ->
1915
+ * .. -> B -> .. with A -> .. -> B. Therefore A -> B is reduntant.
1916
+ *
1917
+ * We need to make sure both the start and the end of A -> .. -> B is not
1918
+ * weaker than A -> B. For the start part, please see the comment in
1919
+ * check_redundant(). For the end part, we need:
1920
+ *
1921
+ * Either
1922
+ *
1923
+ * a) A -> B is -(*R)-> (everything is not weaker than that)
1924
+ *
1925
+ * or
1926
+ *
1927
+ * b) A -> .. -> B is -(*N)-> (nothing is stronger than this)
1928
+ *
1929
+ */
1930
+static inline bool hlock_equal(struct lock_list *entry, void *data)
11821931 {
1183
- return entry->class == data;
1932
+ struct held_lock *hlock = (struct held_lock *)data;
1933
+
1934
+ return hlock_class(hlock) == entry->class && /* Found A -> .. -> B */
1935
+ (hlock->read == 2 || /* A -> B is -(*R)-> */
1936
+ !entry->only_xr); /* A -> .. -> B is -(*N)-> */
11841937 }
11851938
1186
-static noinline int print_circular_bug(struct lock_list *this,
1939
+/*
1940
+ * We are about to add B -> A into the dependency graph, and in __bfs() a
1941
+ * strong dependency path A -> .. -> B is found: hlock_class equals
1942
+ * entry->class.
1943
+ *
1944
+ * We will have a deadlock case (conflict) if A -> .. -> B -> A is a strong
1945
+ * dependency cycle, that means:
1946
+ *
1947
+ * Either
1948
+ *
1949
+ * a) B -> A is -(E*)->
1950
+ *
1951
+ * or
1952
+ *
1953
+ * b) A -> .. -> B is -(*N)-> (i.e. A -> .. -(*N)-> B)
1954
+ *
1955
+ * as then we don't have -(*R)-> -(S*)-> in the cycle.
1956
+ */
1957
+static inline bool hlock_conflict(struct lock_list *entry, void *data)
1958
+{
1959
+ struct held_lock *hlock = (struct held_lock *)data;
1960
+
1961
+ return hlock_class(hlock) == entry->class && /* Found A -> .. -> B */
1962
+ (hlock->read == 0 || /* B -> A is -(E*)-> */
1963
+ !entry->only_xr); /* A -> .. -> B is -(*N)-> */
1964
+}
1965
+
1966
+static noinline void print_circular_bug(struct lock_list *this,
11871967 struct lock_list *target,
11881968 struct held_lock *check_src,
1189
- struct held_lock *check_tgt,
1190
- struct stack_trace *trace)
1969
+ struct held_lock *check_tgt)
11911970 {
11921971 struct task_struct *curr = current;
11931972 struct lock_list *parent;
....@@ -1195,10 +1974,11 @@
11951974 int depth;
11961975
11971976 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1198
- return 0;
1977
+ return;
11991978
1200
- if (!save_trace(&this->trace))
1201
- return 0;
1979
+ this->trace = save_trace();
1980
+ if (!this->trace)
1981
+ return;
12021982
12031983 depth = get_lock_depth(target);
12041984
....@@ -1220,33 +2000,29 @@
12202000
12212001 printk("\nstack backtrace:\n");
12222002 dump_stack();
1223
-
1224
- return 0;
12252003 }
12262004
1227
-static noinline int print_bfs_bug(int ret)
2005
+static noinline void print_bfs_bug(int ret)
12282006 {
12292007 if (!debug_locks_off_graph_unlock())
1230
- return 0;
2008
+ return;
12312009
12322010 /*
12332011 * Breadth-first-search failed, graph got corrupted?
12342012 */
12352013 WARN(1, "lockdep bfs error:%d\n", ret);
1236
-
1237
- return 0;
12382014 }
12392015
1240
-static int noop_count(struct lock_list *entry, void *data)
2016
+static bool noop_count(struct lock_list *entry, void *data)
12412017 {
12422018 (*(unsigned long *)data)++;
1243
- return 0;
2019
+ return false;
12442020 }
12452021
12462022 static unsigned long __lockdep_count_forward_deps(struct lock_list *this)
12472023 {
12482024 unsigned long count = 0;
1249
- struct lock_list *uninitialized_var(target_entry);
2025
+ struct lock_list *target_entry;
12502026
12512027 __bfs_forwards(this, (void *)&count, noop_count, &target_entry);
12522028
....@@ -1257,15 +2033,12 @@
12572033 unsigned long ret, flags;
12582034 struct lock_list this;
12592035
1260
- this.parent = NULL;
1261
- this.class = class;
2036
+ __bfs_init_root(&this, class);
12622037
12632038 raw_local_irq_save(flags);
1264
- current->lockdep_recursion = 1;
1265
- arch_spin_lock(&lockdep_lock);
2039
+ lockdep_lock();
12662040 ret = __lockdep_count_forward_deps(&this);
1267
- arch_spin_unlock(&lockdep_lock);
1268
- current->lockdep_recursion = 0;
2041
+ lockdep_unlock();
12692042 raw_local_irq_restore(flags);
12702043
12712044 return ret;
....@@ -1274,7 +2047,7 @@
12742047 static unsigned long __lockdep_count_backward_deps(struct lock_list *this)
12752048 {
12762049 unsigned long count = 0;
1277
- struct lock_list *uninitialized_var(target_entry);
2050
+ struct lock_list *target_entry;
12782051
12792052 __bfs_backwards(this, (void *)&count, noop_count, &target_entry);
12802053
....@@ -1286,83 +2059,199 @@
12862059 unsigned long ret, flags;
12872060 struct lock_list this;
12882061
1289
- this.parent = NULL;
1290
- this.class = class;
2062
+ __bfs_init_root(&this, class);
12912063
12922064 raw_local_irq_save(flags);
1293
- current->lockdep_recursion = 1;
1294
- arch_spin_lock(&lockdep_lock);
2065
+ lockdep_lock();
12952066 ret = __lockdep_count_backward_deps(&this);
1296
- arch_spin_unlock(&lockdep_lock);
1297
- current->lockdep_recursion = 0;
2067
+ lockdep_unlock();
12982068 raw_local_irq_restore(flags);
12992069
13002070 return ret;
13012071 }
13022072
13032073 /*
1304
- * Prove that the dependency graph starting at <entry> can not
1305
- * lead to <target>. Print an error and return 0 if it does.
2074
+ * Check that the dependency graph starting at <src> can lead to
2075
+ * <target> or not.
13062076 */
1307
-static noinline int
1308
-check_noncircular(struct lock_list *root, struct lock_class *target,
1309
- struct lock_list **target_entry)
2077
+static noinline enum bfs_result
2078
+check_path(struct held_lock *target, struct lock_list *src_entry,
2079
+ bool (*match)(struct lock_list *entry, void *data),
2080
+ struct lock_list **target_entry)
13102081 {
1311
- int result;
2082
+ enum bfs_result ret;
2083
+
2084
+ ret = __bfs_forwards(src_entry, target, match, target_entry);
2085
+
2086
+ if (unlikely(bfs_error(ret)))
2087
+ print_bfs_bug(ret);
2088
+
2089
+ return ret;
2090
+}
2091
+
2092
+/*
2093
+ * Prove that the dependency graph starting at <src> can not
2094
+ * lead to <target>. If it can, there is a circle when adding
2095
+ * <target> -> <src> dependency.
2096
+ *
2097
+ * Print an error and return BFS_RMATCH if it does.
2098
+ */
2099
+static noinline enum bfs_result
2100
+check_noncircular(struct held_lock *src, struct held_lock *target,
2101
+ struct lock_trace **const trace)
2102
+{
2103
+ enum bfs_result ret;
2104
+ struct lock_list *target_entry;
2105
+ struct lock_list src_entry;
2106
+
2107
+ bfs_init_root(&src_entry, src);
13122108
13132109 debug_atomic_inc(nr_cyclic_checks);
13142110
1315
- result = __bfs_forwards(root, target, class_equal, target_entry);
2111
+ ret = check_path(target, &src_entry, hlock_conflict, &target_entry);
13162112
1317
- return result;
2113
+ if (unlikely(ret == BFS_RMATCH)) {
2114
+ if (!*trace) {
2115
+ /*
2116
+ * If save_trace fails here, the printing might
2117
+ * trigger a WARN but because of the !nr_entries it
2118
+ * should not do bad things.
2119
+ */
2120
+ *trace = save_trace();
2121
+ }
2122
+
2123
+ print_circular_bug(&src_entry, target_entry, src, target);
2124
+ }
2125
+
2126
+ return ret;
13182127 }
13192128
1320
-static noinline int
1321
-check_redundant(struct lock_list *root, struct lock_class *target,
1322
- struct lock_list **target_entry)
2129
+#ifdef CONFIG_LOCKDEP_SMALL
2130
+/*
2131
+ * Check that the dependency graph starting at <src> can lead to
2132
+ * <target> or not. If it can, <src> -> <target> dependency is already
2133
+ * in the graph.
2134
+ *
2135
+ * Return BFS_RMATCH if it does, or BFS_RMATCH if it does not, return BFS_E* if
2136
+ * any error appears in the bfs search.
2137
+ */
2138
+static noinline enum bfs_result
2139
+check_redundant(struct held_lock *src, struct held_lock *target)
13232140 {
1324
- int result;
2141
+ enum bfs_result ret;
2142
+ struct lock_list *target_entry;
2143
+ struct lock_list src_entry;
2144
+
2145
+ bfs_init_root(&src_entry, src);
2146
+ /*
2147
+ * Special setup for check_redundant().
2148
+ *
2149
+ * To report redundant, we need to find a strong dependency path that
2150
+ * is equal to or stronger than <src> -> <target>. So if <src> is E,
2151
+ * we need to let __bfs() only search for a path starting at a -(E*)->,
2152
+ * we achieve this by setting the initial node's ->only_xr to true in
2153
+ * that case. And if <prev> is S, we set initial ->only_xr to false
2154
+ * because both -(S*)-> (equal) and -(E*)-> (stronger) are redundant.
2155
+ */
2156
+ src_entry.only_xr = src->read == 0;
13252157
13262158 debug_atomic_inc(nr_redundant_checks);
13272159
1328
- result = __bfs_forwards(root, target, class_equal, target_entry);
2160
+ ret = check_path(target, &src_entry, hlock_equal, &target_entry);
13292161
1330
- return result;
2162
+ if (ret == BFS_RMATCH)
2163
+ debug_atomic_inc(nr_redundant);
2164
+
2165
+ return ret;
13312166 }
2167
+#endif
13322168
1333
-#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
2169
+#ifdef CONFIG_TRACE_IRQFLAGS
2170
+
13342171 /*
13352172 * Forwards and backwards subgraph searching, for the purposes of
13362173 * proving that two subgraphs can be connected by a new dependency
13372174 * without creating any illegal irq-safe -> irq-unsafe lock dependency.
2175
+ *
2176
+ * A irq safe->unsafe deadlock happens with the following conditions:
2177
+ *
2178
+ * 1) We have a strong dependency path A -> ... -> B
2179
+ *
2180
+ * 2) and we have ENABLED_IRQ usage of B and USED_IN_IRQ usage of A, therefore
2181
+ * irq can create a new dependency B -> A (consider the case that a holder
2182
+ * of B gets interrupted by an irq whose handler will try to acquire A).
2183
+ *
2184
+ * 3) the dependency circle A -> ... -> B -> A we get from 1) and 2) is a
2185
+ * strong circle:
2186
+ *
2187
+ * For the usage bits of B:
2188
+ * a) if A -> B is -(*N)->, then B -> A could be any type, so any
2189
+ * ENABLED_IRQ usage suffices.
2190
+ * b) if A -> B is -(*R)->, then B -> A must be -(E*)->, so only
2191
+ * ENABLED_IRQ_*_READ usage suffices.
2192
+ *
2193
+ * For the usage bits of A:
2194
+ * c) if A -> B is -(E*)->, then B -> A could be any type, so any
2195
+ * USED_IN_IRQ usage suffices.
2196
+ * d) if A -> B is -(S*)->, then B -> A must be -(*N)->, so only
2197
+ * USED_IN_IRQ_*_READ usage suffices.
13382198 */
13392199
1340
-static inline int usage_match(struct lock_list *entry, void *bit)
2200
+/*
2201
+ * There is a strong dependency path in the dependency graph: A -> B, and now
2202
+ * we need to decide which usage bit of A should be accumulated to detect
2203
+ * safe->unsafe bugs.
2204
+ *
2205
+ * Note that usage_accumulate() is used in backwards search, so ->only_xr
2206
+ * stands for whether A -> B only has -(S*)-> (in this case ->only_xr is true).
2207
+ *
2208
+ * As above, if only_xr is false, which means A -> B has -(E*)-> dependency
2209
+ * path, any usage of A should be considered. Otherwise, we should only
2210
+ * consider _READ usage.
2211
+ */
2212
+static inline bool usage_accumulate(struct lock_list *entry, void *mask)
13412213 {
1342
- return entry->class->usage_mask & (1 << (enum lock_usage_bit)bit);
2214
+ if (!entry->only_xr)
2215
+ *(unsigned long *)mask |= entry->class->usage_mask;
2216
+ else /* Mask out _READ usage bits */
2217
+ *(unsigned long *)mask |= (entry->class->usage_mask & LOCKF_IRQ);
2218
+
2219
+ return false;
13432220 }
13442221
1345
-
2222
+/*
2223
+ * There is a strong dependency path in the dependency graph: A -> B, and now
2224
+ * we need to decide which usage bit of B conflicts with the usage bits of A,
2225
+ * i.e. which usage bit of B may introduce safe->unsafe deadlocks.
2226
+ *
2227
+ * As above, if only_xr is false, which means A -> B has -(*N)-> dependency
2228
+ * path, any usage of B should be considered. Otherwise, we should only
2229
+ * consider _READ usage.
2230
+ */
2231
+static inline bool usage_match(struct lock_list *entry, void *mask)
2232
+{
2233
+ if (!entry->only_xr)
2234
+ return !!(entry->class->usage_mask & *(unsigned long *)mask);
2235
+ else /* Mask out _READ usage bits */
2236
+ return !!((entry->class->usage_mask & LOCKF_IRQ) & *(unsigned long *)mask);
2237
+}
13462238
13472239 /*
13482240 * Find a node in the forwards-direction dependency sub-graph starting
13492241 * at @root->class that matches @bit.
13502242 *
1351
- * Return 0 if such a node exists in the subgraph, and put that node
2243
+ * Return BFS_MATCH if such a node exists in the subgraph, and put that node
13522244 * into *@target_entry.
1353
- *
1354
- * Return 1 otherwise and keep *@target_entry unchanged.
1355
- * Return <0 on error.
13562245 */
1357
-static int
1358
-find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit,
2246
+static enum bfs_result
2247
+find_usage_forwards(struct lock_list *root, unsigned long usage_mask,
13592248 struct lock_list **target_entry)
13602249 {
1361
- int result;
2250
+ enum bfs_result result;
13622251
13632252 debug_atomic_inc(nr_find_usage_forwards_checks);
13642253
1365
- result = __bfs_forwards(root, (void *)bit, usage_match, target_entry);
2254
+ result = __bfs_forwards(root, &usage_mask, usage_match, target_entry);
13662255
13672256 return result;
13682257 }
....@@ -1370,22 +2259,16 @@
13702259 /*
13712260 * Find a node in the backwards-direction dependency sub-graph starting
13722261 * at @root->class that matches @bit.
1373
- *
1374
- * Return 0 if such a node exists in the subgraph, and put that node
1375
- * into *@target_entry.
1376
- *
1377
- * Return 1 otherwise and keep *@target_entry unchanged.
1378
- * Return <0 on error.
13792262 */
1380
-static int
1381
-find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit,
2263
+static enum bfs_result
2264
+find_usage_backwards(struct lock_list *root, unsigned long usage_mask,
13822265 struct lock_list **target_entry)
13832266 {
1384
- int result;
2267
+ enum bfs_result result;
13852268
13862269 debug_atomic_inc(nr_find_usage_backwards_checks);
13872270
1388
- result = __bfs_backwards(root, (void *)bit, usage_match, target_entry);
2271
+ result = __bfs_backwards(root, &usage_mask, usage_match, target_entry);
13892272
13902273 return result;
13912274 }
....@@ -1396,16 +2279,18 @@
13962279
13972280 printk("%*s->", depth, "");
13982281 print_lock_name(class);
1399
- printk(KERN_CONT " ops: %lu", class->ops);
2282
+#ifdef CONFIG_DEBUG_LOCKDEP
2283
+ printk(KERN_CONT " ops: %lu", debug_class_ops_read(class));
2284
+#endif
14002285 printk(KERN_CONT " {\n");
14012286
1402
- for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
2287
+ for (bit = 0; bit < LOCK_TRACE_STATES; bit++) {
14032288 if (class->usage_mask & (1 << bit)) {
14042289 int len = depth;
14052290
14062291 len += printk("%*s %s", depth, "", usage_str[bit]);
14072292 len += printk(KERN_CONT " at:\n");
1408
- print_stack_trace(class->usage_traces + bit, len);
2293
+ print_lock_trace(class->usage_traces[bit], len);
14092294 }
14102295 }
14112296 printk("%*s }\n", depth, "");
....@@ -1415,11 +2300,60 @@
14152300 }
14162301
14172302 /*
1418
- * printk the shortest lock dependencies from @start to @end in reverse order:
2303
+ * Dependency path printing:
2304
+ *
2305
+ * After BFS we get a lock dependency path (linked via ->parent of lock_list),
2306
+ * printing out each lock in the dependency path will help on understanding how
2307
+ * the deadlock could happen. Here are some details about dependency path
2308
+ * printing:
2309
+ *
2310
+ * 1) A lock_list can be either forwards or backwards for a lock dependency,
2311
+ * for a lock dependency A -> B, there are two lock_lists:
2312
+ *
2313
+ * a) lock_list in the ->locks_after list of A, whose ->class is B and
2314
+ * ->links_to is A. In this case, we can say the lock_list is
2315
+ * "A -> B" (forwards case).
2316
+ *
2317
+ * b) lock_list in the ->locks_before list of B, whose ->class is A
2318
+ * and ->links_to is B. In this case, we can say the lock_list is
2319
+ * "B <- A" (bacwards case).
2320
+ *
2321
+ * The ->trace of both a) and b) point to the call trace where B was
2322
+ * acquired with A held.
2323
+ *
2324
+ * 2) A "helper" lock_list is introduced during BFS, this lock_list doesn't
2325
+ * represent a certain lock dependency, it only provides an initial entry
2326
+ * for BFS. For example, BFS may introduce a "helper" lock_list whose
2327
+ * ->class is A, as a result BFS will search all dependencies starting with
2328
+ * A, e.g. A -> B or A -> C.
2329
+ *
2330
+ * The notation of a forwards helper lock_list is like "-> A", which means
2331
+ * we should search the forwards dependencies starting with "A", e.g A -> B
2332
+ * or A -> C.
2333
+ *
2334
+ * The notation of a bacwards helper lock_list is like "<- B", which means
2335
+ * we should search the backwards dependencies ending with "B", e.g.
2336
+ * B <- A or B <- C.
2337
+ */
2338
+
2339
+/*
2340
+ * printk the shortest lock dependencies from @root to @leaf in reverse order.
2341
+ *
2342
+ * We have a lock dependency path as follow:
2343
+ *
2344
+ * @root @leaf
2345
+ * | |
2346
+ * V V
2347
+ * ->parent ->parent
2348
+ * | lock_list | <--------- | lock_list | ... | lock_list | <--------- | lock_list |
2349
+ * | -> L1 | | L1 -> L2 | ... |Ln-2 -> Ln-1| | Ln-1 -> Ln|
2350
+ *
2351
+ * , so it's natural that we start from @leaf and print every ->class and
2352
+ * ->trace until we reach the @root.
14192353 */
14202354 static void __used
14212355 print_shortest_lock_dependencies(struct lock_list *leaf,
1422
- struct lock_list *root)
2356
+ struct lock_list *root)
14232357 {
14242358 struct lock_list *entry = leaf;
14252359 int depth;
....@@ -1430,7 +2364,7 @@
14302364 do {
14312365 print_lock_class_header(entry->class, depth);
14322366 printk("%*s ... acquired at:\n", depth, "");
1433
- print_stack_trace(&entry->trace, 2);
2367
+ print_lock_trace(entry->trace, 2);
14342368 printk("\n");
14352369
14362370 if (depth == 0 && (entry != root)) {
....@@ -1441,8 +2375,61 @@
14412375 entry = get_lock_parent(entry);
14422376 depth--;
14432377 } while (entry && (depth >= 0));
2378
+}
14442379
1445
- return;
2380
+/*
2381
+ * printk the shortest lock dependencies from @leaf to @root.
2382
+ *
2383
+ * We have a lock dependency path (from a backwards search) as follow:
2384
+ *
2385
+ * @leaf @root
2386
+ * | |
2387
+ * V V
2388
+ * ->parent ->parent
2389
+ * | lock_list | ---------> | lock_list | ... | lock_list | ---------> | lock_list |
2390
+ * | L2 <- L1 | | L3 <- L2 | ... | Ln <- Ln-1 | | <- Ln |
2391
+ *
2392
+ * , so when we iterate from @leaf to @root, we actually print the lock
2393
+ * dependency path L1 -> L2 -> .. -> Ln in the non-reverse order.
2394
+ *
2395
+ * Another thing to notice here is that ->class of L2 <- L1 is L1, while the
2396
+ * ->trace of L2 <- L1 is the call trace of L2, in fact we don't have the call
2397
+ * trace of L1 in the dependency path, which is alright, because most of the
2398
+ * time we can figure out where L1 is held from the call trace of L2.
2399
+ */
2400
+static void __used
2401
+print_shortest_lock_dependencies_backwards(struct lock_list *leaf,
2402
+ struct lock_list *root)
2403
+{
2404
+ struct lock_list *entry = leaf;
2405
+ const struct lock_trace *trace = NULL;
2406
+ int depth;
2407
+
2408
+ /*compute depth from generated tree by BFS*/
2409
+ depth = get_lock_depth(leaf);
2410
+
2411
+ do {
2412
+ print_lock_class_header(entry->class, depth);
2413
+ if (trace) {
2414
+ printk("%*s ... acquired at:\n", depth, "");
2415
+ print_lock_trace(trace, 2);
2416
+ printk("\n");
2417
+ }
2418
+
2419
+ /*
2420
+ * Record the pointer to the trace for the next lock_list
2421
+ * entry, see the comments for the function.
2422
+ */
2423
+ trace = entry->trace;
2424
+
2425
+ if (depth == 0 && (entry != root)) {
2426
+ printk("lockdep:%s bad path found in chain graph\n", __func__);
2427
+ break;
2428
+ }
2429
+
2430
+ entry = get_lock_parent(entry);
2431
+ depth--;
2432
+ } while (entry && (depth >= 0));
14462433 }
14472434
14482435 static void
....@@ -1501,7 +2488,7 @@
15012488 printk("\n *** DEADLOCK ***\n\n");
15022489 }
15032490
1504
-static int
2491
+static void
15052492 print_bad_irq_dependency(struct task_struct *curr,
15062493 struct lock_list *prev_root,
15072494 struct lock_list *next_root,
....@@ -1514,7 +2501,7 @@
15142501 const char *irqclass)
15152502 {
15162503 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1517
- return 0;
2504
+ return;
15182505
15192506 pr_warn("\n");
15202507 pr_warn("=====================================================\n");
....@@ -1524,9 +2511,9 @@
15242511 pr_warn("-----------------------------------------------------\n");
15252512 pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
15262513 curr->comm, task_pid_nr(curr),
1527
- curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT,
2514
+ lockdep_hardirq_context(), hardirq_count() >> HARDIRQ_SHIFT,
15282515 curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT,
1529
- curr->hardirqs_enabled,
2516
+ lockdep_hardirqs_enabled(),
15302517 curr->softirqs_enabled);
15312518 print_lock(next);
15322519
....@@ -1543,14 +2530,14 @@
15432530 print_lock_name(backwards_entry->class);
15442531 pr_warn("\n... which became %s-irq-safe at:\n", irqclass);
15452532
1546
- print_stack_trace(backwards_entry->class->usage_traces + bit1, 1);
2533
+ print_lock_trace(backwards_entry->class->usage_traces[bit1], 1);
15472534
15482535 pr_warn("\nto a %s-irq-unsafe lock:\n", irqclass);
15492536 print_lock_name(forwards_entry->class);
15502537 pr_warn("\n... which became %s-irq-unsafe at:\n", irqclass);
15512538 pr_warn("...");
15522539
1553
- print_stack_trace(forwards_entry->class->usage_traces + bit2, 1);
2540
+ print_lock_trace(forwards_entry->class->usage_traces[bit2], 1);
15542541
15552542 pr_warn("\nother info that might help us debug this:\n\n");
15562543 print_irq_lock_scenario(backwards_entry, forwards_entry,
....@@ -1559,53 +2546,20 @@
15592546 lockdep_print_held_locks(curr);
15602547
15612548 pr_warn("\nthe dependencies between %s-irq-safe lock and the holding lock:\n", irqclass);
1562
- if (!save_trace(&prev_root->trace))
1563
- return 0;
1564
- print_shortest_lock_dependencies(backwards_entry, prev_root);
2549
+ prev_root->trace = save_trace();
2550
+ if (!prev_root->trace)
2551
+ return;
2552
+ print_shortest_lock_dependencies_backwards(backwards_entry, prev_root);
15652553
15662554 pr_warn("\nthe dependencies between the lock to be acquired");
15672555 pr_warn(" and %s-irq-unsafe lock:\n", irqclass);
1568
- if (!save_trace(&next_root->trace))
1569
- return 0;
2556
+ next_root->trace = save_trace();
2557
+ if (!next_root->trace)
2558
+ return;
15702559 print_shortest_lock_dependencies(forwards_entry, next_root);
15712560
15722561 pr_warn("\nstack backtrace:\n");
15732562 dump_stack();
1574
-
1575
- return 0;
1576
-}
1577
-
1578
-static int
1579
-check_usage(struct task_struct *curr, struct held_lock *prev,
1580
- struct held_lock *next, enum lock_usage_bit bit_backwards,
1581
- enum lock_usage_bit bit_forwards, const char *irqclass)
1582
-{
1583
- int ret;
1584
- struct lock_list this, that;
1585
- struct lock_list *uninitialized_var(target_entry);
1586
- struct lock_list *uninitialized_var(target_entry1);
1587
-
1588
- this.parent = NULL;
1589
-
1590
- this.class = hlock_class(prev);
1591
- ret = find_usage_backwards(&this, bit_backwards, &target_entry);
1592
- if (ret < 0)
1593
- return print_bfs_bug(ret);
1594
- if (ret == 1)
1595
- return ret;
1596
-
1597
- that.parent = NULL;
1598
- that.class = hlock_class(next);
1599
- ret = find_usage_forwards(&that, bit_forwards, &target_entry1);
1600
- if (ret < 0)
1601
- return print_bfs_bug(ret);
1602
- if (ret == 1)
1603
- return ret;
1604
-
1605
- return print_bad_irq_dependency(curr, &this, &that,
1606
- target_entry, target_entry1,
1607
- prev, next,
1608
- bit_backwards, bit_forwards, irqclass);
16092563 }
16102564
16112565 static const char *state_names[] = {
....@@ -1624,103 +2578,266 @@
16242578
16252579 static inline const char *state_name(enum lock_usage_bit bit)
16262580 {
1627
- return (bit & 1) ? state_rnames[bit >> 2] : state_names[bit >> 2];
2581
+ if (bit & LOCK_USAGE_READ_MASK)
2582
+ return state_rnames[bit >> LOCK_USAGE_DIR_MASK];
2583
+ else
2584
+ return state_names[bit >> LOCK_USAGE_DIR_MASK];
16282585 }
16292586
2587
+/*
2588
+ * The bit number is encoded like:
2589
+ *
2590
+ * bit0: 0 exclusive, 1 read lock
2591
+ * bit1: 0 used in irq, 1 irq enabled
2592
+ * bit2-n: state
2593
+ */
16302594 static int exclusive_bit(int new_bit)
16312595 {
1632
- /*
1633
- * USED_IN
1634
- * USED_IN_READ
1635
- * ENABLED
1636
- * ENABLED_READ
1637
- *
1638
- * bit 0 - write/read
1639
- * bit 1 - used_in/enabled
1640
- * bit 2+ state
1641
- */
1642
-
1643
- int state = new_bit & ~3;
1644
- int dir = new_bit & 2;
2596
+ int state = new_bit & LOCK_USAGE_STATE_MASK;
2597
+ int dir = new_bit & LOCK_USAGE_DIR_MASK;
16452598
16462599 /*
16472600 * keep state, bit flip the direction and strip read.
16482601 */
1649
- return state | (dir ^ 2);
2602
+ return state | (dir ^ LOCK_USAGE_DIR_MASK);
16502603 }
16512604
1652
-static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
1653
- struct held_lock *next, enum lock_usage_bit bit)
2605
+/*
2606
+ * Observe that when given a bitmask where each bitnr is encoded as above, a
2607
+ * right shift of the mask transforms the individual bitnrs as -1 and
2608
+ * conversely, a left shift transforms into +1 for the individual bitnrs.
2609
+ *
2610
+ * So for all bits whose number have LOCK_ENABLED_* set (bitnr1 == 1), we can
2611
+ * create the mask with those bit numbers using LOCK_USED_IN_* (bitnr1 == 0)
2612
+ * instead by subtracting the bit number by 2, or shifting the mask right by 2.
2613
+ *
2614
+ * Similarly, bitnr1 == 0 becomes bitnr1 == 1 by adding 2, or shifting left 2.
2615
+ *
2616
+ * So split the mask (note that LOCKF_ENABLED_IRQ_ALL|LOCKF_USED_IN_IRQ_ALL is
2617
+ * all bits set) and recompose with bitnr1 flipped.
2618
+ */
2619
+static unsigned long invert_dir_mask(unsigned long mask)
16542620 {
1655
- /*
1656
- * Prove that the new dependency does not connect a hardirq-safe
1657
- * lock with a hardirq-unsafe lock - to achieve this we search
1658
- * the backwards-subgraph starting at <prev>, and the
1659
- * forwards-subgraph starting at <next>:
1660
- */
1661
- if (!check_usage(curr, prev, next, bit,
1662
- exclusive_bit(bit), state_name(bit)))
1663
- return 0;
2621
+ unsigned long excl = 0;
16642622
1665
- bit++; /* _READ */
2623
+ /* Invert dir */
2624
+ excl |= (mask & LOCKF_ENABLED_IRQ_ALL) >> LOCK_USAGE_DIR_MASK;
2625
+ excl |= (mask & LOCKF_USED_IN_IRQ_ALL) << LOCK_USAGE_DIR_MASK;
16662626
1667
- /*
1668
- * Prove that the new dependency does not connect a hardirq-safe-read
1669
- * lock with a hardirq-unsafe lock - to achieve this we search
1670
- * the backwards-subgraph starting at <prev>, and the
1671
- * forwards-subgraph starting at <next>:
1672
- */
1673
- if (!check_usage(curr, prev, next, bit,
1674
- exclusive_bit(bit), state_name(bit)))
1675
- return 0;
1676
-
1677
- return 1;
2627
+ return excl;
16782628 }
16792629
1680
-static int
1681
-check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
1682
- struct held_lock *next)
2630
+/*
2631
+ * Note that a LOCK_ENABLED_IRQ_*_READ usage and a LOCK_USED_IN_IRQ_*_READ
2632
+ * usage may cause deadlock too, for example:
2633
+ *
2634
+ * P1 P2
2635
+ * <irq disabled>
2636
+ * write_lock(l1); <irq enabled>
2637
+ * read_lock(l2);
2638
+ * write_lock(l2);
2639
+ * <in irq>
2640
+ * read_lock(l1);
2641
+ *
2642
+ * , in above case, l1 will be marked as LOCK_USED_IN_IRQ_HARDIRQ_READ and l2
2643
+ * will marked as LOCK_ENABLE_IRQ_HARDIRQ_READ, and this is a possible
2644
+ * deadlock.
2645
+ *
2646
+ * In fact, all of the following cases may cause deadlocks:
2647
+ *
2648
+ * LOCK_USED_IN_IRQ_* -> LOCK_ENABLED_IRQ_*
2649
+ * LOCK_USED_IN_IRQ_*_READ -> LOCK_ENABLED_IRQ_*
2650
+ * LOCK_USED_IN_IRQ_* -> LOCK_ENABLED_IRQ_*_READ
2651
+ * LOCK_USED_IN_IRQ_*_READ -> LOCK_ENABLED_IRQ_*_READ
2652
+ *
2653
+ * As a result, to calculate the "exclusive mask", first we invert the
2654
+ * direction (USED_IN/ENABLED) of the original mask, and 1) for all bits with
2655
+ * bitnr0 set (LOCK_*_READ), add those with bitnr0 cleared (LOCK_*). 2) for all
2656
+ * bits with bitnr0 cleared (LOCK_*_READ), add those with bitnr0 set (LOCK_*).
2657
+ */
2658
+static unsigned long exclusive_mask(unsigned long mask)
16832659 {
1684
-#define LOCKDEP_STATE(__STATE) \
1685
- if (!check_irq_usage(curr, prev, next, LOCK_USED_IN_##__STATE)) \
1686
- return 0;
1687
-#include "lockdep_states.h"
1688
-#undef LOCKDEP_STATE
2660
+ unsigned long excl = invert_dir_mask(mask);
16892661
1690
- return 1;
2662
+ excl |= (excl & LOCKF_IRQ_READ) >> LOCK_USAGE_READ_MASK;
2663
+ excl |= (excl & LOCKF_IRQ) << LOCK_USAGE_READ_MASK;
2664
+
2665
+ return excl;
16912666 }
16922667
1693
-static void inc_chains(void)
2668
+/*
2669
+ * Retrieve the _possible_ original mask to which @mask is
2670
+ * exclusive. Ie: this is the opposite of exclusive_mask().
2671
+ * Note that 2 possible original bits can match an exclusive
2672
+ * bit: one has LOCK_USAGE_READ_MASK set, the other has it
2673
+ * cleared. So both are returned for each exclusive bit.
2674
+ */
2675
+static unsigned long original_mask(unsigned long mask)
16942676 {
1695
- if (current->hardirq_context)
1696
- nr_hardirq_chains++;
1697
- else {
1698
- if (current->softirq_context)
1699
- nr_softirq_chains++;
1700
- else
1701
- nr_process_chains++;
2677
+ unsigned long excl = invert_dir_mask(mask);
2678
+
2679
+ /* Include read in existing usages */
2680
+ excl |= (excl & LOCKF_IRQ_READ) >> LOCK_USAGE_READ_MASK;
2681
+ excl |= (excl & LOCKF_IRQ) << LOCK_USAGE_READ_MASK;
2682
+
2683
+ return excl;
2684
+}
2685
+
2686
+/*
2687
+ * Find the first pair of bit match between an original
2688
+ * usage mask and an exclusive usage mask.
2689
+ */
2690
+static int find_exclusive_match(unsigned long mask,
2691
+ unsigned long excl_mask,
2692
+ enum lock_usage_bit *bitp,
2693
+ enum lock_usage_bit *excl_bitp)
2694
+{
2695
+ int bit, excl, excl_read;
2696
+
2697
+ for_each_set_bit(bit, &mask, LOCK_USED) {
2698
+ /*
2699
+ * exclusive_bit() strips the read bit, however,
2700
+ * LOCK_ENABLED_IRQ_*_READ may cause deadlocks too, so we need
2701
+ * to search excl | LOCK_USAGE_READ_MASK as well.
2702
+ */
2703
+ excl = exclusive_bit(bit);
2704
+ excl_read = excl | LOCK_USAGE_READ_MASK;
2705
+ if (excl_mask & lock_flag(excl)) {
2706
+ *bitp = bit;
2707
+ *excl_bitp = excl;
2708
+ return 0;
2709
+ } else if (excl_mask & lock_flag(excl_read)) {
2710
+ *bitp = bit;
2711
+ *excl_bitp = excl_read;
2712
+ return 0;
2713
+ }
17022714 }
2715
+ return -1;
2716
+}
2717
+
2718
+/*
2719
+ * Prove that the new dependency does not connect a hardirq-safe(-read)
2720
+ * lock with a hardirq-unsafe lock - to achieve this we search
2721
+ * the backwards-subgraph starting at <prev>, and the
2722
+ * forwards-subgraph starting at <next>:
2723
+ */
2724
+static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
2725
+ struct held_lock *next)
2726
+{
2727
+ unsigned long usage_mask = 0, forward_mask, backward_mask;
2728
+ enum lock_usage_bit forward_bit = 0, backward_bit = 0;
2729
+ struct lock_list *target_entry1;
2730
+ struct lock_list *target_entry;
2731
+ struct lock_list this, that;
2732
+ enum bfs_result ret;
2733
+
2734
+ /*
2735
+ * Step 1: gather all hard/soft IRQs usages backward in an
2736
+ * accumulated usage mask.
2737
+ */
2738
+ bfs_init_rootb(&this, prev);
2739
+
2740
+ ret = __bfs_backwards(&this, &usage_mask, usage_accumulate, NULL);
2741
+ if (bfs_error(ret)) {
2742
+ print_bfs_bug(ret);
2743
+ return 0;
2744
+ }
2745
+
2746
+ usage_mask &= LOCKF_USED_IN_IRQ_ALL;
2747
+ if (!usage_mask)
2748
+ return 1;
2749
+
2750
+ /*
2751
+ * Step 2: find exclusive uses forward that match the previous
2752
+ * backward accumulated mask.
2753
+ */
2754
+ forward_mask = exclusive_mask(usage_mask);
2755
+
2756
+ bfs_init_root(&that, next);
2757
+
2758
+ ret = find_usage_forwards(&that, forward_mask, &target_entry1);
2759
+ if (bfs_error(ret)) {
2760
+ print_bfs_bug(ret);
2761
+ return 0;
2762
+ }
2763
+ if (ret == BFS_RNOMATCH)
2764
+ return 1;
2765
+
2766
+ /*
2767
+ * Step 3: we found a bad match! Now retrieve a lock from the backward
2768
+ * list whose usage mask matches the exclusive usage mask from the
2769
+ * lock found on the forward list.
2770
+ *
2771
+ * Note, we should only keep the LOCKF_ENABLED_IRQ_ALL bits, considering
2772
+ * the follow case:
2773
+ *
2774
+ * When trying to add A -> B to the graph, we find that there is a
2775
+ * hardirq-safe L, that L -> ... -> A, and another hardirq-unsafe M,
2776
+ * that B -> ... -> M. However M is **softirq-safe**, if we use exact
2777
+ * invert bits of M's usage_mask, we will find another lock N that is
2778
+ * **softirq-unsafe** and N -> ... -> A, however N -> .. -> M will not
2779
+ * cause a inversion deadlock.
2780
+ */
2781
+ backward_mask = original_mask(target_entry1->class->usage_mask & LOCKF_ENABLED_IRQ_ALL);
2782
+
2783
+ ret = find_usage_backwards(&this, backward_mask, &target_entry);
2784
+ if (bfs_error(ret)) {
2785
+ print_bfs_bug(ret);
2786
+ return 0;
2787
+ }
2788
+ if (DEBUG_LOCKS_WARN_ON(ret == BFS_RNOMATCH))
2789
+ return 1;
2790
+
2791
+ /*
2792
+ * Step 4: narrow down to a pair of incompatible usage bits
2793
+ * and report it.
2794
+ */
2795
+ ret = find_exclusive_match(target_entry->class->usage_mask,
2796
+ target_entry1->class->usage_mask,
2797
+ &backward_bit, &forward_bit);
2798
+ if (DEBUG_LOCKS_WARN_ON(ret == -1))
2799
+ return 1;
2800
+
2801
+ print_bad_irq_dependency(curr, &this, &that,
2802
+ target_entry, target_entry1,
2803
+ prev, next,
2804
+ backward_bit, forward_bit,
2805
+ state_name(backward_bit));
2806
+
2807
+ return 0;
17032808 }
17042809
17052810 #else
17062811
1707
-static inline int
1708
-check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
1709
- struct held_lock *next)
2812
+static inline int check_irq_usage(struct task_struct *curr,
2813
+ struct held_lock *prev, struct held_lock *next)
17102814 {
17112815 return 1;
17122816 }
2817
+#endif /* CONFIG_TRACE_IRQFLAGS */
17132818
1714
-static inline void inc_chains(void)
2819
+static void inc_chains(int irq_context)
17152820 {
1716
- nr_process_chains++;
2821
+ if (irq_context & LOCK_CHAIN_HARDIRQ_CONTEXT)
2822
+ nr_hardirq_chains++;
2823
+ else if (irq_context & LOCK_CHAIN_SOFTIRQ_CONTEXT)
2824
+ nr_softirq_chains++;
2825
+ else
2826
+ nr_process_chains++;
17172827 }
17182828
1719
-#endif
2829
+static void dec_chains(int irq_context)
2830
+{
2831
+ if (irq_context & LOCK_CHAIN_HARDIRQ_CONTEXT)
2832
+ nr_hardirq_chains--;
2833
+ else if (irq_context & LOCK_CHAIN_SOFTIRQ_CONTEXT)
2834
+ nr_softirq_chains--;
2835
+ else
2836
+ nr_process_chains--;
2837
+}
17202838
17212839 static void
1722
-print_deadlock_scenario(struct held_lock *nxt,
1723
- struct held_lock *prv)
2840
+print_deadlock_scenario(struct held_lock *nxt, struct held_lock *prv)
17242841 {
17252842 struct lock_class *next = hlock_class(nxt);
17262843 struct lock_class *prev = hlock_class(prv);
....@@ -1738,12 +2855,12 @@
17382855 printk(" May be due to missing lock nesting notation\n\n");
17392856 }
17402857
1741
-static int
2858
+static void
17422859 print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
17432860 struct held_lock *next)
17442861 {
17452862 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1746
- return 0;
2863
+ return;
17472864
17482865 pr_warn("\n");
17492866 pr_warn("============================================\n");
....@@ -1762,8 +2879,6 @@
17622879
17632880 pr_warn("\nstack backtrace:\n");
17642881 dump_stack();
1765
-
1766
- return 0;
17672882 }
17682883
17692884 /*
....@@ -1772,11 +2887,12 @@
17722887 * (Note that this has to be done separately, because the graph cannot
17732888 * detect such classes of deadlocks.)
17742889 *
1775
- * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read
2890
+ * Returns: 0 on deadlock detected, 1 on OK, 2 if another lock with the same
2891
+ * lock class is held but nest_lock is also held, i.e. we rely on the
2892
+ * nest_lock to avoid the deadlock.
17762893 */
17772894 static int
1778
-check_deadlock(struct task_struct *curr, struct held_lock *next,
1779
- struct lockdep_map *next_instance, int read)
2895
+check_deadlock(struct task_struct *curr, struct held_lock *next)
17802896 {
17812897 struct held_lock *prev;
17822898 struct held_lock *nest = NULL;
....@@ -1795,8 +2911,8 @@
17952911 * Allow read-after-read recursion of the same
17962912 * lock class (i.e. read_lock(lock)+read_lock(lock)):
17972913 */
1798
- if ((read == 2) && prev->read)
1799
- return 2;
2914
+ if ((next->read == 2) && prev->read)
2915
+ continue;
18002916
18012917 /*
18022918 * We're holding the nest_lock, which serializes this lock's
....@@ -1805,14 +2921,15 @@
18052921 if (nest)
18062922 return 2;
18072923
1808
- return print_deadlock_bug(curr, prev, next);
2924
+ print_deadlock_bug(curr, prev, next);
2925
+ return 0;
18092926 }
18102927 return 1;
18112928 }
18122929
18132930 /*
18142931 * There was a chain-cache miss, and we are about to add a new dependency
1815
- * to a previous lock. We recursively validate the following rules:
2932
+ * to a previous lock. We validate the following rules:
18162933 *
18172934 * - would the adding of the <prev> -> <next> dependency create a
18182935 * circular dependency in the graph? [== circular deadlock]
....@@ -1834,53 +2951,47 @@
18342951 */
18352952 static int
18362953 check_prev_add(struct task_struct *curr, struct held_lock *prev,
1837
- struct held_lock *next, int distance, struct stack_trace *trace,
1838
- int (*save)(struct stack_trace *trace))
2954
+ struct held_lock *next, u16 distance,
2955
+ struct lock_trace **const trace)
18392956 {
1840
- struct lock_list *uninitialized_var(target_entry);
18412957 struct lock_list *entry;
1842
- struct lock_list this;
1843
- int ret;
2958
+ enum bfs_result ret;
2959
+
2960
+ if (!hlock_class(prev)->key || !hlock_class(next)->key) {
2961
+ /*
2962
+ * The warning statements below may trigger a use-after-free
2963
+ * of the class name. It is better to trigger a use-after free
2964
+ * and to have the class name most of the time instead of not
2965
+ * having the class name available.
2966
+ */
2967
+ WARN_ONCE(!debug_locks_silent && !hlock_class(prev)->key,
2968
+ "Detected use-after-free of lock class %px/%s\n",
2969
+ hlock_class(prev),
2970
+ hlock_class(prev)->name);
2971
+ WARN_ONCE(!debug_locks_silent && !hlock_class(next)->key,
2972
+ "Detected use-after-free of lock class %px/%s\n",
2973
+ hlock_class(next),
2974
+ hlock_class(next)->name);
2975
+ return 2;
2976
+ }
18442977
18452978 /*
18462979 * Prove that the new <prev> -> <next> dependency would not
18472980 * create a circular dependency in the graph. (We do this by
1848
- * forward-recursing into the graph starting at <next>, and
1849
- * checking whether we can reach <prev>.)
2981
+ * a breadth-first search into the graph starting at <next>,
2982
+ * and check whether we can reach <prev>.)
18502983 *
1851
- * We are using global variables to control the recursion, to
1852
- * keep the stackframe size of the recursive functions low:
2984
+ * The search is limited by the size of the circular queue (i.e.,
2985
+ * MAX_CIRCULAR_QUEUE_SIZE) which keeps track of a breadth of nodes
2986
+ * in the graph whose neighbours are to be checked.
18532987 */
1854
- this.class = hlock_class(next);
1855
- this.parent = NULL;
1856
- ret = check_noncircular(&this, hlock_class(prev), &target_entry);
1857
- if (unlikely(!ret)) {
1858
- if (!trace->entries) {
1859
- /*
1860
- * If @save fails here, the printing might trigger
1861
- * a WARN but because of the !nr_entries it should
1862
- * not do bad things.
1863
- */
1864
- save(trace);
1865
- }
1866
- return print_circular_bug(&this, target_entry, next, prev, trace);
1867
- }
1868
- else if (unlikely(ret < 0))
1869
- return print_bfs_bug(ret);
1870
-
1871
- if (!check_prev_add_irq(curr, prev, next))
2988
+ ret = check_noncircular(next, prev, trace);
2989
+ if (unlikely(bfs_error(ret) || ret == BFS_RMATCH))
18722990 return 0;
18732991
1874
- /*
1875
- * For recursive read-locks we do all the dependency checks,
1876
- * but we dont store read-triggered dependencies (only
1877
- * write-triggered dependencies). This ensures that only the
1878
- * write-side dependencies matter, and that if for example a
1879
- * write-lock never takes any other locks, then the reads are
1880
- * equivalent to a NOP.
1881
- */
1882
- if (next->read == 2 || prev->read == 2)
1883
- return 1;
2992
+ if (!check_irq_usage(curr, prev, next))
2993
+ return 0;
2994
+
18842995 /*
18852996 * Is the <prev> -> <next> dependency already present?
18862997 *
....@@ -1893,41 +3004,73 @@
18933004 if (entry->class == hlock_class(next)) {
18943005 if (distance == 1)
18953006 entry->distance = 1;
1896
- return 1;
3007
+ entry->dep |= calc_dep(prev, next);
3008
+
3009
+ /*
3010
+ * Also, update the reverse dependency in @next's
3011
+ * ->locks_before list.
3012
+ *
3013
+ * Here we reuse @entry as the cursor, which is fine
3014
+ * because we won't go to the next iteration of the
3015
+ * outer loop:
3016
+ *
3017
+ * For normal cases, we return in the inner loop.
3018
+ *
3019
+ * If we fail to return, we have inconsistency, i.e.
3020
+ * <prev>::locks_after contains <next> while
3021
+ * <next>::locks_before doesn't contain <prev>. In
3022
+ * that case, we return after the inner and indicate
3023
+ * something is wrong.
3024
+ */
3025
+ list_for_each_entry(entry, &hlock_class(next)->locks_before, entry) {
3026
+ if (entry->class == hlock_class(prev)) {
3027
+ if (distance == 1)
3028
+ entry->distance = 1;
3029
+ entry->dep |= calc_depb(prev, next);
3030
+ return 1;
3031
+ }
3032
+ }
3033
+
3034
+ /* <prev> is not found in <next>::locks_before */
3035
+ return 0;
18973036 }
18983037 }
18993038
3039
+#ifdef CONFIG_LOCKDEP_SMALL
19003040 /*
19013041 * Is the <prev> -> <next> link redundant?
19023042 */
1903
- this.class = hlock_class(prev);
1904
- this.parent = NULL;
1905
- ret = check_redundant(&this, hlock_class(next), &target_entry);
1906
- if (!ret) {
1907
- debug_atomic_inc(nr_redundant);
1908
- return 2;
1909
- }
1910
- if (ret < 0)
1911
- return print_bfs_bug(ret);
1912
-
1913
-
1914
- if (!trace->entries && !save(trace))
3043
+ ret = check_redundant(prev, next);
3044
+ if (bfs_error(ret))
19153045 return 0;
3046
+ else if (ret == BFS_RMATCH)
3047
+ return 2;
3048
+#endif
3049
+
3050
+ if (!*trace) {
3051
+ *trace = save_trace();
3052
+ if (!*trace)
3053
+ return 0;
3054
+ }
19163055
19173056 /*
19183057 * Ok, all validations passed, add the new lock
19193058 * to the previous lock's dependency list:
19203059 */
1921
- ret = add_lock_to_list(hlock_class(next),
3060
+ ret = add_lock_to_list(hlock_class(next), hlock_class(prev),
19223061 &hlock_class(prev)->locks_after,
1923
- next->acquire_ip, distance, trace);
3062
+ next->acquire_ip, distance,
3063
+ calc_dep(prev, next),
3064
+ *trace);
19243065
19253066 if (!ret)
19263067 return 0;
19273068
1928
- ret = add_lock_to_list(hlock_class(prev),
3069
+ ret = add_lock_to_list(hlock_class(prev), hlock_class(next),
19293070 &hlock_class(next)->locks_before,
1930
- next->acquire_ip, distance, trace);
3071
+ next->acquire_ip, distance,
3072
+ calc_depb(prev, next),
3073
+ *trace);
19313074 if (!ret)
19323075 return 0;
19333076
....@@ -1943,14 +3086,9 @@
19433086 static int
19443087 check_prevs_add(struct task_struct *curr, struct held_lock *next)
19453088 {
3089
+ struct lock_trace *trace = NULL;
19463090 int depth = curr->lockdep_depth;
19473091 struct held_lock *hlock;
1948
- struct stack_trace trace = {
1949
- .nr_entries = 0,
1950
- .max_entries = 0,
1951
- .entries = NULL,
1952
- .skip = 0,
1953
- };
19543092
19553093 /*
19563094 * Debugging checks.
....@@ -1968,15 +3106,11 @@
19683106 goto out_bug;
19693107
19703108 for (;;) {
1971
- int distance = curr->lockdep_depth - depth + 1;
3109
+ u16 distance = curr->lockdep_depth - depth + 1;
19723110 hlock = curr->held_locks + depth - 1;
19733111
1974
- /*
1975
- * Only non-recursive-read entries get new dependencies
1976
- * added:
1977
- */
1978
- if (hlock->read != 2 && hlock->check) {
1979
- int ret = check_prev_add(curr, hlock, next, distance, &trace, save_trace);
3112
+ if (hlock->check) {
3113
+ int ret = check_prev_add(curr, hlock, next, distance, &trace);
19803114 if (!ret)
19813115 return 0;
19823116
....@@ -2018,14 +3152,244 @@
20183152 return 0;
20193153 }
20203154
2021
-unsigned long nr_lock_chains;
20223155 struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
2023
-int nr_chain_hlocks;
3156
+static DECLARE_BITMAP(lock_chains_in_use, MAX_LOCKDEP_CHAINS);
20243157 static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
3158
+unsigned long nr_zapped_lock_chains;
3159
+unsigned int nr_free_chain_hlocks; /* Free chain_hlocks in buckets */
3160
+unsigned int nr_lost_chain_hlocks; /* Lost chain_hlocks */
3161
+unsigned int nr_large_chain_blocks; /* size > MAX_CHAIN_BUCKETS */
3162
+
3163
+/*
3164
+ * The first 2 chain_hlocks entries in the chain block in the bucket
3165
+ * list contains the following meta data:
3166
+ *
3167
+ * entry[0]:
3168
+ * Bit 15 - always set to 1 (it is not a class index)
3169
+ * Bits 0-14 - upper 15 bits of the next block index
3170
+ * entry[1] - lower 16 bits of next block index
3171
+ *
3172
+ * A next block index of all 1 bits means it is the end of the list.
3173
+ *
3174
+ * On the unsized bucket (bucket-0), the 3rd and 4th entries contain
3175
+ * the chain block size:
3176
+ *
3177
+ * entry[2] - upper 16 bits of the chain block size
3178
+ * entry[3] - lower 16 bits of the chain block size
3179
+ */
3180
+#define MAX_CHAIN_BUCKETS 16
3181
+#define CHAIN_BLK_FLAG (1U << 15)
3182
+#define CHAIN_BLK_LIST_END 0xFFFFU
3183
+
3184
+static int chain_block_buckets[MAX_CHAIN_BUCKETS];
3185
+
3186
+static inline int size_to_bucket(int size)
3187
+{
3188
+ if (size > MAX_CHAIN_BUCKETS)
3189
+ return 0;
3190
+
3191
+ return size - 1;
3192
+}
3193
+
3194
+/*
3195
+ * Iterate all the chain blocks in a bucket.
3196
+ */
3197
+#define for_each_chain_block(bucket, prev, curr) \
3198
+ for ((prev) = -1, (curr) = chain_block_buckets[bucket]; \
3199
+ (curr) >= 0; \
3200
+ (prev) = (curr), (curr) = chain_block_next(curr))
3201
+
3202
+/*
3203
+ * next block or -1
3204
+ */
3205
+static inline int chain_block_next(int offset)
3206
+{
3207
+ int next = chain_hlocks[offset];
3208
+
3209
+ WARN_ON_ONCE(!(next & CHAIN_BLK_FLAG));
3210
+
3211
+ if (next == CHAIN_BLK_LIST_END)
3212
+ return -1;
3213
+
3214
+ next &= ~CHAIN_BLK_FLAG;
3215
+ next <<= 16;
3216
+ next |= chain_hlocks[offset + 1];
3217
+
3218
+ return next;
3219
+}
3220
+
3221
+/*
3222
+ * bucket-0 only
3223
+ */
3224
+static inline int chain_block_size(int offset)
3225
+{
3226
+ return (chain_hlocks[offset + 2] << 16) | chain_hlocks[offset + 3];
3227
+}
3228
+
3229
+static inline void init_chain_block(int offset, int next, int bucket, int size)
3230
+{
3231
+ chain_hlocks[offset] = (next >> 16) | CHAIN_BLK_FLAG;
3232
+ chain_hlocks[offset + 1] = (u16)next;
3233
+
3234
+ if (size && !bucket) {
3235
+ chain_hlocks[offset + 2] = size >> 16;
3236
+ chain_hlocks[offset + 3] = (u16)size;
3237
+ }
3238
+}
3239
+
3240
+static inline void add_chain_block(int offset, int size)
3241
+{
3242
+ int bucket = size_to_bucket(size);
3243
+ int next = chain_block_buckets[bucket];
3244
+ int prev, curr;
3245
+
3246
+ if (unlikely(size < 2)) {
3247
+ /*
3248
+ * We can't store single entries on the freelist. Leak them.
3249
+ *
3250
+ * One possible way out would be to uniquely mark them, other
3251
+ * than with CHAIN_BLK_FLAG, such that we can recover them when
3252
+ * the block before it is re-added.
3253
+ */
3254
+ if (size)
3255
+ nr_lost_chain_hlocks++;
3256
+ return;
3257
+ }
3258
+
3259
+ nr_free_chain_hlocks += size;
3260
+ if (!bucket) {
3261
+ nr_large_chain_blocks++;
3262
+
3263
+ /*
3264
+ * Variable sized, sort large to small.
3265
+ */
3266
+ for_each_chain_block(0, prev, curr) {
3267
+ if (size >= chain_block_size(curr))
3268
+ break;
3269
+ }
3270
+ init_chain_block(offset, curr, 0, size);
3271
+ if (prev < 0)
3272
+ chain_block_buckets[0] = offset;
3273
+ else
3274
+ init_chain_block(prev, offset, 0, 0);
3275
+ return;
3276
+ }
3277
+ /*
3278
+ * Fixed size, add to head.
3279
+ */
3280
+ init_chain_block(offset, next, bucket, size);
3281
+ chain_block_buckets[bucket] = offset;
3282
+}
3283
+
3284
+/*
3285
+ * Only the first block in the list can be deleted.
3286
+ *
3287
+ * For the variable size bucket[0], the first block (the largest one) is
3288
+ * returned, broken up and put back into the pool. So if a chain block of
3289
+ * length > MAX_CHAIN_BUCKETS is ever used and zapped, it will just be
3290
+ * queued up after the primordial chain block and never be used until the
3291
+ * hlock entries in the primordial chain block is almost used up. That
3292
+ * causes fragmentation and reduce allocation efficiency. That can be
3293
+ * monitored by looking at the "large chain blocks" number in lockdep_stats.
3294
+ */
3295
+static inline void del_chain_block(int bucket, int size, int next)
3296
+{
3297
+ nr_free_chain_hlocks -= size;
3298
+ chain_block_buckets[bucket] = next;
3299
+
3300
+ if (!bucket)
3301
+ nr_large_chain_blocks--;
3302
+}
3303
+
3304
+static void init_chain_block_buckets(void)
3305
+{
3306
+ int i;
3307
+
3308
+ for (i = 0; i < MAX_CHAIN_BUCKETS; i++)
3309
+ chain_block_buckets[i] = -1;
3310
+
3311
+ add_chain_block(0, ARRAY_SIZE(chain_hlocks));
3312
+}
3313
+
3314
+/*
3315
+ * Return offset of a chain block of the right size or -1 if not found.
3316
+ *
3317
+ * Fairly simple worst-fit allocator with the addition of a number of size
3318
+ * specific free lists.
3319
+ */
3320
+static int alloc_chain_hlocks(int req)
3321
+{
3322
+ int bucket, curr, size;
3323
+
3324
+ /*
3325
+ * We rely on the MSB to act as an escape bit to denote freelist
3326
+ * pointers. Make sure this bit isn't set in 'normal' class_idx usage.
3327
+ */
3328
+ BUILD_BUG_ON((MAX_LOCKDEP_KEYS-1) & CHAIN_BLK_FLAG);
3329
+
3330
+ init_data_structures_once();
3331
+
3332
+ if (nr_free_chain_hlocks < req)
3333
+ return -1;
3334
+
3335
+ /*
3336
+ * We require a minimum of 2 (u16) entries to encode a freelist
3337
+ * 'pointer'.
3338
+ */
3339
+ req = max(req, 2);
3340
+ bucket = size_to_bucket(req);
3341
+ curr = chain_block_buckets[bucket];
3342
+
3343
+ if (bucket) {
3344
+ if (curr >= 0) {
3345
+ del_chain_block(bucket, req, chain_block_next(curr));
3346
+ return curr;
3347
+ }
3348
+ /* Try bucket 0 */
3349
+ curr = chain_block_buckets[0];
3350
+ }
3351
+
3352
+ /*
3353
+ * The variable sized freelist is sorted by size; the first entry is
3354
+ * the largest. Use it if it fits.
3355
+ */
3356
+ if (curr >= 0) {
3357
+ size = chain_block_size(curr);
3358
+ if (likely(size >= req)) {
3359
+ del_chain_block(0, size, chain_block_next(curr));
3360
+ add_chain_block(curr + req, size - req);
3361
+ return curr;
3362
+ }
3363
+ }
3364
+
3365
+ /*
3366
+ * Last resort, split a block in a larger sized bucket.
3367
+ */
3368
+ for (size = MAX_CHAIN_BUCKETS; size > req; size--) {
3369
+ bucket = size_to_bucket(size);
3370
+ curr = chain_block_buckets[bucket];
3371
+ if (curr < 0)
3372
+ continue;
3373
+
3374
+ del_chain_block(bucket, size, chain_block_next(curr));
3375
+ add_chain_block(curr + req, size - req);
3376
+ return curr;
3377
+ }
3378
+
3379
+ return -1;
3380
+}
3381
+
3382
+static inline void free_chain_hlocks(int base, int size)
3383
+{
3384
+ add_chain_block(base, max(size, 2));
3385
+}
20253386
20263387 struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i)
20273388 {
2028
- return lock_classes + chain_hlocks[chain->base + i];
3389
+ u16 chain_hlock = chain_hlocks[chain->base + i];
3390
+ unsigned int class_idx = chain_hlock_class_idx(chain_hlock);
3391
+
3392
+ return lock_classes + class_idx;
20293393 }
20303394
20313395 /*
....@@ -2051,12 +3415,12 @@
20513415 /*
20523416 * Returns the next chain_key iteration
20533417 */
2054
-static u64 print_chain_key_iteration(int class_idx, u64 chain_key)
3418
+static u64 print_chain_key_iteration(u16 hlock_id, u64 chain_key)
20553419 {
2056
- u64 new_chain_key = iterate_chain_key(chain_key, class_idx);
3420
+ u64 new_chain_key = iterate_chain_key(chain_key, hlock_id);
20573421
2058
- printk(" class_idx:%d -> chain_key:%016Lx",
2059
- class_idx,
3422
+ printk(" hlock_id:%d -> chain_key:%016Lx",
3423
+ (unsigned int)hlock_id,
20603424 (unsigned long long)new_chain_key);
20613425 return new_chain_key;
20623426 }
....@@ -2065,34 +3429,35 @@
20653429 print_chain_keys_held_locks(struct task_struct *curr, struct held_lock *hlock_next)
20663430 {
20673431 struct held_lock *hlock;
2068
- u64 chain_key = 0;
3432
+ u64 chain_key = INITIAL_CHAIN_KEY;
20693433 int depth = curr->lockdep_depth;
2070
- int i;
3434
+ int i = get_first_held_lock(curr, hlock_next);
20713435
2072
- printk("depth: %u\n", depth + 1);
2073
- for (i = get_first_held_lock(curr, hlock_next); i < depth; i++) {
3436
+ printk("depth: %u (irq_context %u)\n", depth - i + 1,
3437
+ hlock_next->irq_context);
3438
+ for (; i < depth; i++) {
20743439 hlock = curr->held_locks + i;
2075
- chain_key = print_chain_key_iteration(hlock->class_idx, chain_key);
3440
+ chain_key = print_chain_key_iteration(hlock_id(hlock), chain_key);
20763441
20773442 print_lock(hlock);
20783443 }
20793444
2080
- print_chain_key_iteration(hlock_next->class_idx, chain_key);
3445
+ print_chain_key_iteration(hlock_id(hlock_next), chain_key);
20813446 print_lock(hlock_next);
20823447 }
20833448
20843449 static void print_chain_keys_chain(struct lock_chain *chain)
20853450 {
20863451 int i;
2087
- u64 chain_key = 0;
2088
- int class_id;
3452
+ u64 chain_key = INITIAL_CHAIN_KEY;
3453
+ u16 hlock_id;
20893454
20903455 printk("depth: %u\n", chain->depth);
20913456 for (i = 0; i < chain->depth; i++) {
2092
- class_id = chain_hlocks[chain->base + i];
2093
- chain_key = print_chain_key_iteration(class_id + 1, chain_key);
3457
+ hlock_id = chain_hlocks[chain->base + i];
3458
+ chain_key = print_chain_key_iteration(hlock_id, chain_key);
20943459
2095
- print_lock_name(lock_classes + class_id);
3460
+ print_lock_name(lock_classes + chain_hlock_class_idx(hlock_id));
20963461 printk("\n");
20973462 }
20983463 }
....@@ -2141,7 +3506,7 @@
21413506 }
21423507
21433508 for (j = 0; j < chain->depth - 1; j++, i++) {
2144
- id = curr->held_locks[i].class_idx - 1;
3509
+ id = hlock_id(&curr->held_locks[i]);
21453510
21463511 if (DEBUG_LOCKS_WARN_ON(chain_hlocks[chain->base + j] != id)) {
21473512 print_collision(curr, hlock, chain);
....@@ -2153,73 +3518,30 @@
21533518 }
21543519
21553520 /*
2156
- * This is for building a chain between just two different classes,
2157
- * instead of adding a new hlock upon current, which is done by
2158
- * add_chain_cache().
2159
- *
2160
- * This can be called in any context with two classes, while
2161
- * add_chain_cache() must be done within the lock owener's context
2162
- * since it uses hlock which might be racy in another context.
3521
+ * Given an index that is >= -1, return the index of the next lock chain.
3522
+ * Return -2 if there is no next lock chain.
21633523 */
2164
-static inline int add_chain_cache_classes(unsigned int prev,
2165
- unsigned int next,
2166
- unsigned int irq_context,
2167
- u64 chain_key)
3524
+long lockdep_next_lockchain(long i)
21683525 {
2169
- struct hlist_head *hash_head = chainhashentry(chain_key);
2170
- struct lock_chain *chain;
3526
+ i = find_next_bit(lock_chains_in_use, ARRAY_SIZE(lock_chains), i + 1);
3527
+ return i < ARRAY_SIZE(lock_chains) ? i : -2;
3528
+}
21713529
2172
- /*
2173
- * Allocate a new chain entry from the static array, and add
2174
- * it to the hash:
2175
- */
3530
+unsigned long lock_chain_count(void)
3531
+{
3532
+ return bitmap_weight(lock_chains_in_use, ARRAY_SIZE(lock_chains));
3533
+}
21763534
2177
- /*
2178
- * We might need to take the graph lock, ensure we've got IRQs
2179
- * disabled to make this an IRQ-safe lock.. for recursion reasons
2180
- * lockdep won't complain about its own locking errors.
2181
- */
2182
- if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2183
- return 0;
3535
+/* Must be called with the graph lock held. */
3536
+static struct lock_chain *alloc_lock_chain(void)
3537
+{
3538
+ int idx = find_first_zero_bit(lock_chains_in_use,
3539
+ ARRAY_SIZE(lock_chains));
21843540
2185
- if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) {
2186
- if (!debug_locks_off_graph_unlock())
2187
- return 0;
2188
-
2189
- print_lockdep_off("BUG: MAX_LOCKDEP_CHAINS too low!");
2190
- dump_stack();
2191
- return 0;
2192
- }
2193
-
2194
- chain = lock_chains + nr_lock_chains++;
2195
- chain->chain_key = chain_key;
2196
- chain->irq_context = irq_context;
2197
- chain->depth = 2;
2198
- if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
2199
- chain->base = nr_chain_hlocks;
2200
- nr_chain_hlocks += chain->depth;
2201
- chain_hlocks[chain->base] = prev - 1;
2202
- chain_hlocks[chain->base + 1] = next -1;
2203
- }
2204
-#ifdef CONFIG_DEBUG_LOCKDEP
2205
- /*
2206
- * Important for check_no_collision().
2207
- */
2208
- else {
2209
- if (!debug_locks_off_graph_unlock())
2210
- return 0;
2211
-
2212
- print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!");
2213
- dump_stack();
2214
- return 0;
2215
- }
2216
-#endif
2217
-
2218
- hlist_add_head_rcu(&chain->entry, hash_head);
2219
- debug_atomic_inc(chain_lookup_misses);
2220
- inc_chains();
2221
-
2222
- return 1;
3541
+ if (unlikely(idx >= ARRAY_SIZE(lock_chains)))
3542
+ return NULL;
3543
+ __set_bit(idx, lock_chains_in_use);
3544
+ return lock_chains + idx;
22233545 }
22243546
22253547 /*
....@@ -2233,25 +3555,20 @@
22333555 struct held_lock *hlock,
22343556 u64 chain_key)
22353557 {
2236
- struct lock_class *class = hlock_class(hlock);
22373558 struct hlist_head *hash_head = chainhashentry(chain_key);
22383559 struct lock_chain *chain;
22393560 int i, j;
22403561
22413562 /*
2242
- * Allocate a new chain entry from the static array, and add
2243
- * it to the hash:
2244
- */
2245
-
2246
- /*
2247
- * We might need to take the graph lock, ensure we've got IRQs
3563
+ * The caller must hold the graph lock, ensure we've got IRQs
22483564 * disabled to make this an IRQ-safe lock.. for recursion reasons
22493565 * lockdep won't complain about its own locking errors.
22503566 */
2251
- if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3567
+ if (lockdep_assert_locked())
22523568 return 0;
22533569
2254
- if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) {
3570
+ chain = alloc_lock_chain();
3571
+ if (!chain) {
22553572 if (!debug_locks_off_graph_unlock())
22563573 return 0;
22573574
....@@ -2259,7 +3576,6 @@
22593576 dump_stack();
22603577 return 0;
22613578 }
2262
- chain = lock_chains + nr_lock_chains++;
22633579 chain->chain_key = chain_key;
22643580 chain->irq_context = hlock->irq_context;
22653581 i = get_first_held_lock(curr, hlock);
....@@ -2269,23 +3585,8 @@
22693585 BUILD_BUG_ON((1UL << 6) <= ARRAY_SIZE(curr->held_locks));
22703586 BUILD_BUG_ON((1UL << 8*sizeof(chain_hlocks[0])) <= ARRAY_SIZE(lock_classes));
22713587
2272
- if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
2273
- chain->base = nr_chain_hlocks;
2274
- for (j = 0; j < chain->depth - 1; j++, i++) {
2275
- int lock_id = curr->held_locks[i].class_idx - 1;
2276
- chain_hlocks[chain->base + j] = lock_id;
2277
- }
2278
- chain_hlocks[chain->base + j] = class - lock_classes;
2279
- }
2280
-
2281
- if (nr_chain_hlocks < MAX_LOCKDEP_CHAIN_HLOCKS)
2282
- nr_chain_hlocks += chain->depth;
2283
-
2284
-#ifdef CONFIG_DEBUG_LOCKDEP
2285
- /*
2286
- * Important for check_no_collision().
2287
- */
2288
- if (unlikely(nr_chain_hlocks > MAX_LOCKDEP_CHAIN_HLOCKS)) {
3588
+ j = alloc_chain_hlocks(chain->depth);
3589
+ if (j < 0) {
22893590 if (!debug_locks_off_graph_unlock())
22903591 return 0;
22913592
....@@ -2293,29 +3594,32 @@
22933594 dump_stack();
22943595 return 0;
22953596 }
2296
-#endif
22973597
3598
+ chain->base = j;
3599
+ for (j = 0; j < chain->depth - 1; j++, i++) {
3600
+ int lock_id = hlock_id(curr->held_locks + i);
3601
+
3602
+ chain_hlocks[chain->base + j] = lock_id;
3603
+ }
3604
+ chain_hlocks[chain->base + j] = hlock_id(hlock);
22983605 hlist_add_head_rcu(&chain->entry, hash_head);
22993606 debug_atomic_inc(chain_lookup_misses);
2300
- inc_chains();
3607
+ inc_chains(chain->irq_context);
23013608
23023609 return 1;
23033610 }
23043611
23053612 /*
2306
- * Look up a dependency chain.
3613
+ * Look up a dependency chain. Must be called with either the graph lock or
3614
+ * the RCU read lock held.
23073615 */
23083616 static inline struct lock_chain *lookup_chain_cache(u64 chain_key)
23093617 {
23103618 struct hlist_head *hash_head = chainhashentry(chain_key);
23113619 struct lock_chain *chain;
23123620
2313
- /*
2314
- * We can walk it lock-free, because entries only get added
2315
- * to the hash:
2316
- */
23173621 hlist_for_each_entry_rcu(chain, hash_head, entry) {
2318
- if (chain->chain_key == chain_key) {
3622
+ if (READ_ONCE(chain->chain_key) == chain_key) {
23193623 debug_atomic_inc(chain_lookup_hits);
23203624 return chain;
23213625 }
....@@ -2374,8 +3678,9 @@
23743678 return 1;
23753679 }
23763680
2377
-static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
2378
- struct held_lock *hlock, int chain_head, u64 chain_key)
3681
+static int validate_chain(struct task_struct *curr,
3682
+ struct held_lock *hlock,
3683
+ int chain_head, u64 chain_key)
23793684 {
23803685 /*
23813686 * Trylock needs to maintain the stack of held locks, but it
....@@ -2396,25 +3701,28 @@
23963701 * - is softirq-safe, if this lock is hardirq-unsafe
23973702 *
23983703 * And check whether the new lock's dependency graph
2399
- * could lead back to the previous lock.
3704
+ * could lead back to the previous lock:
24003705 *
2401
- * any of these scenarios could lead to a deadlock. If
2402
- * All validations
3706
+ * - within the current held-lock stack
3707
+ * - across our accumulated lock dependency records
3708
+ *
3709
+ * any of these scenarios could lead to a deadlock.
24033710 */
2404
- int ret = check_deadlock(curr, hlock, lock, hlock->read);
3711
+ /*
3712
+ * The simple case: does the current hold the same lock
3713
+ * already?
3714
+ */
3715
+ int ret = check_deadlock(curr, hlock);
24053716
24063717 if (!ret)
24073718 return 0;
24083719 /*
2409
- * Mark recursive read, as we jump over it when
2410
- * building dependencies (just like we jump over
2411
- * trylock entries):
2412
- */
2413
- if (ret == 2)
2414
- hlock->read = 2;
2415
- /*
24163720 * Add dependency only if this lock is not the head
2417
- * of the chain, and if it's not a secondary read-lock:
3721
+ * of the chain, and if the new lock introduces no more
3722
+ * lock dependency (because we already hold a lock with the
3723
+ * same lock class) nor deadlock (because the nest_lock
3724
+ * serializes nesting locks), see the comments for
3725
+ * check_deadlock().
24183726 */
24193727 if (!chain_head && ret != 2) {
24203728 if (!check_prevs_add(curr, hlock))
....@@ -2432,12 +3740,14 @@
24323740 }
24333741 #else
24343742 static inline int validate_chain(struct task_struct *curr,
2435
- struct lockdep_map *lock, struct held_lock *hlock,
2436
- int chain_head, u64 chain_key)
3743
+ struct held_lock *hlock,
3744
+ int chain_head, u64 chain_key)
24373745 {
24383746 return 1;
24393747 }
2440
-#endif
3748
+
3749
+static void init_chain_block_buckets(void) { }
3750
+#endif /* CONFIG_PROVE_LOCKING */
24413751
24423752 /*
24433753 * We are building curr_chain_key incrementally, so double-check
....@@ -2448,7 +3758,7 @@
24483758 #ifdef CONFIG_DEBUG_LOCKDEP
24493759 struct held_lock *hlock, *prev_hlock = NULL;
24503760 unsigned int i;
2451
- u64 chain_key = 0;
3761
+ u64 chain_key = INITIAL_CHAIN_KEY;
24523762
24533763 for (i = 0; i < curr->lockdep_depth; i++) {
24543764 hlock = curr->held_locks + i;
....@@ -2464,16 +3774,18 @@
24643774 (unsigned long long)hlock->prev_chain_key);
24653775 return;
24663776 }
3777
+
24673778 /*
2468
- * Whoops ran out of static storage again?
3779
+ * hlock->class_idx can't go beyond MAX_LOCKDEP_KEYS, but is
3780
+ * it registered lock class index?
24693781 */
2470
- if (DEBUG_LOCKS_WARN_ON(hlock->class_idx > MAX_LOCKDEP_KEYS))
3782
+ if (DEBUG_LOCKS_WARN_ON(!test_bit(hlock->class_idx, lock_classes_in_use)))
24713783 return;
24723784
24733785 if (prev_hlock && (prev_hlock->irq_context !=
24743786 hlock->irq_context))
2475
- chain_key = 0;
2476
- chain_key = iterate_chain_key(chain_key, hlock->class_idx);
3787
+ chain_key = INITIAL_CHAIN_KEY;
3788
+ chain_key = iterate_chain_key(chain_key, hlock_id(hlock));
24773789 prev_hlock = hlock;
24783790 }
24793791 if (chain_key != curr->curr_chain_key) {
....@@ -2490,8 +3802,11 @@
24903802 #endif
24913803 }
24923804
2493
-static void
2494
-print_usage_bug_scenario(struct held_lock *lock)
3805
+#ifdef CONFIG_PROVE_LOCKING
3806
+static int mark_lock(struct task_struct *curr, struct held_lock *this,
3807
+ enum lock_usage_bit new_bit);
3808
+
3809
+static void print_usage_bug_scenario(struct held_lock *lock)
24953810 {
24963811 struct lock_class *class = hlock_class(lock);
24973812
....@@ -2508,12 +3823,12 @@
25083823 printk("\n *** DEADLOCK ***\n\n");
25093824 }
25103825
2511
-static int
3826
+static void
25123827 print_usage_bug(struct task_struct *curr, struct held_lock *this,
25133828 enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
25143829 {
2515
- if (!debug_locks_off_graph_unlock() || debug_locks_silent)
2516
- return 0;
3830
+ if (!debug_locks_off() || debug_locks_silent)
3831
+ return;
25173832
25183833 pr_warn("\n");
25193834 pr_warn("================================\n");
....@@ -2526,14 +3841,14 @@
25263841
25273842 pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
25283843 curr->comm, task_pid_nr(curr),
2529
- trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT,
2530
- trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
2531
- trace_hardirqs_enabled(curr),
2532
- trace_softirqs_enabled(curr));
3844
+ lockdep_hardirq_context(), hardirq_count() >> HARDIRQ_SHIFT,
3845
+ lockdep_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
3846
+ lockdep_hardirqs_enabled(),
3847
+ lockdep_softirqs_enabled(curr));
25333848 print_lock(this);
25343849
25353850 pr_warn("{%s} state was registered at:\n", usage_str[prev_bit]);
2536
- print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1);
3851
+ print_lock_trace(hlock_class(this)->usage_traces[prev_bit], 1);
25373852
25383853 print_irqtrace_events(curr);
25393854 pr_warn("\nother info that might help us debug this:\n");
....@@ -2543,8 +3858,6 @@
25433858
25443859 pr_warn("\nstack backtrace:\n");
25453860 dump_stack();
2546
-
2547
- return 0;
25483861 }
25493862
25503863 /*
....@@ -2554,20 +3867,19 @@
25543867 valid_state(struct task_struct *curr, struct held_lock *this,
25553868 enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
25563869 {
2557
- if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit)))
2558
- return print_usage_bug(curr, this, bad_bit, new_bit);
3870
+ if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit))) {
3871
+ graph_unlock();
3872
+ print_usage_bug(curr, this, bad_bit, new_bit);
3873
+ return 0;
3874
+ }
25593875 return 1;
25603876 }
25613877
2562
-static int mark_lock(struct task_struct *curr, struct held_lock *this,
2563
- enum lock_usage_bit new_bit);
2564
-
2565
-#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
25663878
25673879 /*
25683880 * print irq inversion bug:
25693881 */
2570
-static int
3882
+static void
25713883 print_irq_inversion_bug(struct task_struct *curr,
25723884 struct lock_list *root, struct lock_list *other,
25733885 struct held_lock *this, int forwards,
....@@ -2578,7 +3890,7 @@
25783890 int depth;
25793891
25803892 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
2581
- return 0;
3893
+ return;
25823894
25833895 pr_warn("\n");
25843896 pr_warn("========================================================\n");
....@@ -2618,14 +3930,13 @@
26183930 lockdep_print_held_locks(curr);
26193931
26203932 pr_warn("\nthe shortest dependencies between 2nd lock and 1st lock:\n");
2621
- if (!save_trace(&root->trace))
2622
- return 0;
3933
+ root->trace = save_trace();
3934
+ if (!root->trace)
3935
+ return;
26233936 print_shortest_lock_dependencies(other, root);
26243937
26253938 pr_warn("\nstack backtrace:\n");
26263939 dump_stack();
2627
-
2628
- return 0;
26293940 }
26303941
26313942 /*
....@@ -2634,22 +3945,33 @@
26343945 */
26353946 static int
26363947 check_usage_forwards(struct task_struct *curr, struct held_lock *this,
2637
- enum lock_usage_bit bit, const char *irqclass)
3948
+ enum lock_usage_bit bit)
26383949 {
2639
- int ret;
3950
+ enum bfs_result ret;
26403951 struct lock_list root;
2641
- struct lock_list *uninitialized_var(target_entry);
3952
+ struct lock_list *target_entry;
3953
+ enum lock_usage_bit read_bit = bit + LOCK_USAGE_READ_MASK;
3954
+ unsigned usage_mask = lock_flag(bit) | lock_flag(read_bit);
26423955
2643
- root.parent = NULL;
2644
- root.class = hlock_class(this);
2645
- ret = find_usage_forwards(&root, bit, &target_entry);
2646
- if (ret < 0)
2647
- return print_bfs_bug(ret);
2648
- if (ret == 1)
2649
- return ret;
3956
+ bfs_init_root(&root, this);
3957
+ ret = find_usage_forwards(&root, usage_mask, &target_entry);
3958
+ if (bfs_error(ret)) {
3959
+ print_bfs_bug(ret);
3960
+ return 0;
3961
+ }
3962
+ if (ret == BFS_RNOMATCH)
3963
+ return 1;
26503964
2651
- return print_irq_inversion_bug(curr, &root, target_entry,
2652
- this, 1, irqclass);
3965
+ /* Check whether write or read usage is the match */
3966
+ if (target_entry->class->usage_mask & lock_flag(bit)) {
3967
+ print_irq_inversion_bug(curr, &root, target_entry,
3968
+ this, 1, state_name(bit));
3969
+ } else {
3970
+ print_irq_inversion_bug(curr, &root, target_entry,
3971
+ this, 1, state_name(read_bit));
3972
+ }
3973
+
3974
+ return 0;
26533975 }
26543976
26553977 /*
....@@ -2658,39 +3980,52 @@
26583980 */
26593981 static int
26603982 check_usage_backwards(struct task_struct *curr, struct held_lock *this,
2661
- enum lock_usage_bit bit, const char *irqclass)
3983
+ enum lock_usage_bit bit)
26623984 {
2663
- int ret;
3985
+ enum bfs_result ret;
26643986 struct lock_list root;
2665
- struct lock_list *uninitialized_var(target_entry);
3987
+ struct lock_list *target_entry;
3988
+ enum lock_usage_bit read_bit = bit + LOCK_USAGE_READ_MASK;
3989
+ unsigned usage_mask = lock_flag(bit) | lock_flag(read_bit);
26663990
2667
- root.parent = NULL;
2668
- root.class = hlock_class(this);
2669
- ret = find_usage_backwards(&root, bit, &target_entry);
2670
- if (ret < 0)
2671
- return print_bfs_bug(ret);
2672
- if (ret == 1)
2673
- return ret;
3991
+ bfs_init_rootb(&root, this);
3992
+ ret = find_usage_backwards(&root, usage_mask, &target_entry);
3993
+ if (bfs_error(ret)) {
3994
+ print_bfs_bug(ret);
3995
+ return 0;
3996
+ }
3997
+ if (ret == BFS_RNOMATCH)
3998
+ return 1;
26743999
2675
- return print_irq_inversion_bug(curr, &root, target_entry,
2676
- this, 0, irqclass);
4000
+ /* Check whether write or read usage is the match */
4001
+ if (target_entry->class->usage_mask & lock_flag(bit)) {
4002
+ print_irq_inversion_bug(curr, &root, target_entry,
4003
+ this, 0, state_name(bit));
4004
+ } else {
4005
+ print_irq_inversion_bug(curr, &root, target_entry,
4006
+ this, 0, state_name(read_bit));
4007
+ }
4008
+
4009
+ return 0;
26774010 }
26784011
26794012 void print_irqtrace_events(struct task_struct *curr)
26804013 {
2681
- printk("irq event stamp: %u\n", curr->irq_events);
4014
+ const struct irqtrace_events *trace = &curr->irqtrace;
4015
+
4016
+ printk("irq event stamp: %u\n", trace->irq_events);
26824017 printk("hardirqs last enabled at (%u): [<%px>] %pS\n",
2683
- curr->hardirq_enable_event, (void *)curr->hardirq_enable_ip,
2684
- (void *)curr->hardirq_enable_ip);
4018
+ trace->hardirq_enable_event, (void *)trace->hardirq_enable_ip,
4019
+ (void *)trace->hardirq_enable_ip);
26854020 printk("hardirqs last disabled at (%u): [<%px>] %pS\n",
2686
- curr->hardirq_disable_event, (void *)curr->hardirq_disable_ip,
2687
- (void *)curr->hardirq_disable_ip);
4021
+ trace->hardirq_disable_event, (void *)trace->hardirq_disable_ip,
4022
+ (void *)trace->hardirq_disable_ip);
26884023 printk("softirqs last enabled at (%u): [<%px>] %pS\n",
2689
- curr->softirq_enable_event, (void *)curr->softirq_enable_ip,
2690
- (void *)curr->softirq_enable_ip);
4024
+ trace->softirq_enable_event, (void *)trace->softirq_enable_ip,
4025
+ (void *)trace->softirq_enable_ip);
26914026 printk("softirqs last disabled at (%u): [<%px>] %pS\n",
2692
- curr->softirq_disable_event, (void *)curr->softirq_disable_ip,
2693
- (void *)curr->softirq_disable_ip);
4027
+ trace->softirq_disable_event, (void *)trace->softirq_disable_ip,
4028
+ (void *)trace->softirq_disable_ip);
26944029 }
26954030
26964031 static int HARDIRQ_verbose(struct lock_class *class)
....@@ -2709,8 +4044,6 @@
27094044 return 0;
27104045 }
27114046
2712
-#define STRICT_READ_CHECKS 1
2713
-
27144047 static int (*state_verbose_f[])(struct lock_class *class) = {
27154048 #define LOCKDEP_STATE(__STATE) \
27164049 __STATE##_verbose,
....@@ -2721,7 +4054,7 @@
27214054 static inline int state_verbose(enum lock_usage_bit bit,
27224055 struct lock_class *class)
27234056 {
2724
- return state_verbose_f[bit >> 2](class);
4057
+ return state_verbose_f[bit >> LOCK_USAGE_DIR_MASK](class);
27254058 }
27264059
27274060 typedef int (*check_usage_f)(struct task_struct *, struct held_lock *,
....@@ -2732,18 +4065,8 @@
27324065 enum lock_usage_bit new_bit)
27334066 {
27344067 int excl_bit = exclusive_bit(new_bit);
2735
- int read = new_bit & 1;
2736
- int dir = new_bit & 2;
2737
-
2738
- /*
2739
- * mark USED_IN has to look forwards -- to ensure no dependency
2740
- * has ENABLED state, which would allow recursion deadlocks.
2741
- *
2742
- * mark ENABLED has to look backwards -- to ensure no dependee
2743
- * has USED_IN state, which, again, would allow recursion deadlocks.
2744
- */
2745
- check_usage_f usage = dir ?
2746
- check_usage_backwards : check_usage_forwards;
4068
+ int read = new_bit & LOCK_USAGE_READ_MASK;
4069
+ int dir = new_bit & LOCK_USAGE_DIR_MASK;
27474070
27484071 /*
27494072 * Validate that this particular lock does not have conflicting
....@@ -2753,23 +4076,30 @@
27534076 return 0;
27544077
27554078 /*
4079
+ * Check for read in write conflicts
4080
+ */
4081
+ if (!read && !valid_state(curr, this, new_bit,
4082
+ excl_bit + LOCK_USAGE_READ_MASK))
4083
+ return 0;
4084
+
4085
+
4086
+ /*
27564087 * Validate that the lock dependencies don't have conflicting usage
27574088 * states.
27584089 */
2759
- if ((!read || !dir || STRICT_READ_CHECKS) &&
2760
- !usage(curr, this, excl_bit, state_name(new_bit & ~1)))
2761
- return 0;
2762
-
2763
- /*
2764
- * Check for read in write conflicts
2765
- */
2766
- if (!read) {
2767
- if (!valid_state(curr, this, new_bit, excl_bit + 1))
4090
+ if (dir) {
4091
+ /*
4092
+ * mark ENABLED has to look backwards -- to ensure no dependee
4093
+ * has USED_IN state, which, again, would allow recursion deadlocks.
4094
+ */
4095
+ if (!check_usage_backwards(curr, this, excl_bit))
27684096 return 0;
2769
-
2770
- if (STRICT_READ_CHECKS &&
2771
- !usage(curr, this, excl_bit + 1,
2772
- state_name(new_bit + 1)))
4097
+ } else {
4098
+ /*
4099
+ * mark USED_IN has to look forwards -- to ensure no dependency
4100
+ * has ENABLED state, which would allow recursion deadlocks.
4101
+ */
4102
+ if (!check_usage_forwards(curr, this, excl_bit))
27734103 return 0;
27744104 }
27754105
....@@ -2779,35 +4109,28 @@
27794109 return 1;
27804110 }
27814111
2782
-enum mark_type {
2783
-#define LOCKDEP_STATE(__STATE) __STATE,
2784
-#include "lockdep_states.h"
2785
-#undef LOCKDEP_STATE
2786
-};
2787
-
27884112 /*
27894113 * Mark all held locks with a usage bit:
27904114 */
27914115 static int
2792
-mark_held_locks(struct task_struct *curr, enum mark_type mark)
4116
+mark_held_locks(struct task_struct *curr, enum lock_usage_bit base_bit)
27934117 {
2794
- enum lock_usage_bit usage_bit;
27954118 struct held_lock *hlock;
27964119 int i;
27974120
27984121 for (i = 0; i < curr->lockdep_depth; i++) {
4122
+ enum lock_usage_bit hlock_bit = base_bit;
27994123 hlock = curr->held_locks + i;
28004124
2801
- usage_bit = 2 + (mark << 2); /* ENABLED */
28024125 if (hlock->read)
2803
- usage_bit += 1; /* READ */
4126
+ hlock_bit += LOCK_USAGE_READ_MASK;
28044127
2805
- BUG_ON(usage_bit >= LOCK_USAGE_STATES);
4128
+ BUG_ON(hlock_bit >= LOCK_USAGE_STATES);
28064129
28074130 if (!hlock->check)
28084131 continue;
28094132
2810
- if (!mark_lock(curr, hlock, usage_bit))
4133
+ if (!mark_lock(curr, hlock, hlock_bit))
28114134 return 0;
28124135 }
28134136
....@@ -2817,18 +4140,15 @@
28174140 /*
28184141 * Hardirqs will be enabled:
28194142 */
2820
-static void __trace_hardirqs_on_caller(unsigned long ip)
4143
+static void __trace_hardirqs_on_caller(void)
28214144 {
28224145 struct task_struct *curr = current;
2823
-
2824
- /* we'll do an OFF -> ON transition: */
2825
- curr->hardirqs_enabled = 1;
28264146
28274147 /*
28284148 * We are going to turn hardirqs on, so set the
28294149 * usage bit for all held locks:
28304150 */
2831
- if (!mark_held_locks(curr, HARDIRQ))
4151
+ if (!mark_held_locks(curr, LOCK_ENABLED_HARDIRQ))
28324152 return;
28334153 /*
28344154 * If we have softirqs enabled, then set the usage
....@@ -2836,20 +4156,33 @@
28364156 * this bit from being set before)
28374157 */
28384158 if (curr->softirqs_enabled)
2839
- if (!mark_held_locks(curr, SOFTIRQ))
2840
- return;
2841
-
2842
- curr->hardirq_enable_ip = ip;
2843
- curr->hardirq_enable_event = ++curr->irq_events;
2844
- debug_atomic_inc(hardirqs_on_events);
4159
+ mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ);
28454160 }
28464161
2847
-void lockdep_hardirqs_on(unsigned long ip)
4162
+/**
4163
+ * lockdep_hardirqs_on_prepare - Prepare for enabling interrupts
4164
+ * @ip: Caller address
4165
+ *
4166
+ * Invoked before a possible transition to RCU idle from exit to user or
4167
+ * guest mode. This ensures that all RCU operations are done before RCU
4168
+ * stops watching. After the RCU transition lockdep_hardirqs_on() has to be
4169
+ * invoked to set the final state.
4170
+ */
4171
+void lockdep_hardirqs_on_prepare(unsigned long ip)
28484172 {
2849
- if (unlikely(!debug_locks || current->lockdep_recursion))
4173
+ if (unlikely(!debug_locks))
28504174 return;
28514175
2852
- if (unlikely(current->hardirqs_enabled)) {
4176
+ /*
4177
+ * NMIs do not (and cannot) track lock dependencies, nothing to do.
4178
+ */
4179
+ if (unlikely(in_nmi()))
4180
+ return;
4181
+
4182
+ if (unlikely(this_cpu_read(lockdep_recursion)))
4183
+ return;
4184
+
4185
+ if (unlikely(lockdep_hardirqs_enabled())) {
28534186 /*
28544187 * Neither irq nor preemption are disabled here
28554188 * so this is racy by nature but losing one hit
....@@ -2870,29 +4203,105 @@
28704203 /*
28714204 * See the fine text that goes along with this variable definition.
28724205 */
2873
- if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled)))
4206
+ if (DEBUG_LOCKS_WARN_ON(early_boot_irqs_disabled))
28744207 return;
28754208
28764209 /*
28774210 * Can't allow enabling interrupts while in an interrupt handler,
28784211 * that's general bad form and such. Recursion, limited stack etc..
28794212 */
2880
- if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
4213
+ if (DEBUG_LOCKS_WARN_ON(lockdep_hardirq_context()))
28814214 return;
28824215
2883
- current->lockdep_recursion = 1;
2884
- __trace_hardirqs_on_caller(ip);
2885
- current->lockdep_recursion = 0;
4216
+ current->hardirq_chain_key = current->curr_chain_key;
4217
+
4218
+ lockdep_recursion_inc();
4219
+ __trace_hardirqs_on_caller();
4220
+ lockdep_recursion_finish();
28864221 }
4222
+EXPORT_SYMBOL_GPL(lockdep_hardirqs_on_prepare);
4223
+
4224
+void noinstr lockdep_hardirqs_on(unsigned long ip)
4225
+{
4226
+ struct irqtrace_events *trace = &current->irqtrace;
4227
+
4228
+ if (unlikely(!debug_locks))
4229
+ return;
4230
+
4231
+ /*
4232
+ * NMIs can happen in the middle of local_irq_{en,dis}able() where the
4233
+ * tracking state and hardware state are out of sync.
4234
+ *
4235
+ * NMIs must save lockdep_hardirqs_enabled() to restore IRQ state from,
4236
+ * and not rely on hardware state like normal interrupts.
4237
+ */
4238
+ if (unlikely(in_nmi())) {
4239
+ if (!IS_ENABLED(CONFIG_TRACE_IRQFLAGS_NMI))
4240
+ return;
4241
+
4242
+ /*
4243
+ * Skip:
4244
+ * - recursion check, because NMI can hit lockdep;
4245
+ * - hardware state check, because above;
4246
+ * - chain_key check, see lockdep_hardirqs_on_prepare().
4247
+ */
4248
+ goto skip_checks;
4249
+ }
4250
+
4251
+ if (unlikely(this_cpu_read(lockdep_recursion)))
4252
+ return;
4253
+
4254
+ if (lockdep_hardirqs_enabled()) {
4255
+ /*
4256
+ * Neither irq nor preemption are disabled here
4257
+ * so this is racy by nature but losing one hit
4258
+ * in a stat is not a big deal.
4259
+ */
4260
+ __debug_atomic_inc(redundant_hardirqs_on);
4261
+ return;
4262
+ }
4263
+
4264
+ /*
4265
+ * We're enabling irqs and according to our state above irqs weren't
4266
+ * already enabled, yet we find the hardware thinks they are in fact
4267
+ * enabled.. someone messed up their IRQ state tracing.
4268
+ */
4269
+ if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
4270
+ return;
4271
+
4272
+ /*
4273
+ * Ensure the lock stack remained unchanged between
4274
+ * lockdep_hardirqs_on_prepare() and lockdep_hardirqs_on().
4275
+ */
4276
+ DEBUG_LOCKS_WARN_ON(current->hardirq_chain_key !=
4277
+ current->curr_chain_key);
4278
+
4279
+skip_checks:
4280
+ /* we'll do an OFF -> ON transition: */
4281
+ __this_cpu_write(hardirqs_enabled, 1);
4282
+ trace->hardirq_enable_ip = ip;
4283
+ trace->hardirq_enable_event = ++trace->irq_events;
4284
+ debug_atomic_inc(hardirqs_on_events);
4285
+}
4286
+EXPORT_SYMBOL_GPL(lockdep_hardirqs_on);
28874287
28884288 /*
28894289 * Hardirqs were disabled:
28904290 */
2891
-void lockdep_hardirqs_off(unsigned long ip)
4291
+void noinstr lockdep_hardirqs_off(unsigned long ip)
28924292 {
2893
- struct task_struct *curr = current;
4293
+ if (unlikely(!debug_locks))
4294
+ return;
28944295
2895
- if (unlikely(!debug_locks || current->lockdep_recursion))
4296
+ /*
4297
+ * Matching lockdep_hardirqs_on(), allow NMIs in the middle of lockdep;
4298
+ * they will restore the software state. This ensures the software
4299
+ * state is consistent inside NMIs as well.
4300
+ */
4301
+ if (in_nmi()) {
4302
+ if (!IS_ENABLED(CONFIG_TRACE_IRQFLAGS_NMI))
4303
+ return;
4304
+ } else if (__this_cpu_read(lockdep_recursion))
28964305 return;
28974306
28984307 /*
....@@ -2902,26 +4311,30 @@
29024311 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
29034312 return;
29044313
2905
- if (curr->hardirqs_enabled) {
4314
+ if (lockdep_hardirqs_enabled()) {
4315
+ struct irqtrace_events *trace = &current->irqtrace;
4316
+
29064317 /*
29074318 * We have done an ON -> OFF transition:
29084319 */
2909
- curr->hardirqs_enabled = 0;
2910
- curr->hardirq_disable_ip = ip;
2911
- curr->hardirq_disable_event = ++curr->irq_events;
4320
+ __this_cpu_write(hardirqs_enabled, 0);
4321
+ trace->hardirq_disable_ip = ip;
4322
+ trace->hardirq_disable_event = ++trace->irq_events;
29124323 debug_atomic_inc(hardirqs_off_events);
2913
- } else
4324
+ } else {
29144325 debug_atomic_inc(redundant_hardirqs_off);
4326
+ }
29154327 }
4328
+EXPORT_SYMBOL_GPL(lockdep_hardirqs_off);
29164329
29174330 /*
29184331 * Softirqs will be enabled:
29194332 */
2920
-void trace_softirqs_on(unsigned long ip)
4333
+void lockdep_softirqs_on(unsigned long ip)
29214334 {
2922
- struct task_struct *curr = current;
4335
+ struct irqtrace_events *trace = &current->irqtrace;
29234336
2924
- if (unlikely(!debug_locks || current->lockdep_recursion))
4337
+ if (unlikely(!lockdep_enabled()))
29254338 return;
29264339
29274340 /*
....@@ -2931,37 +4344,35 @@
29314344 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
29324345 return;
29334346
2934
- if (curr->softirqs_enabled) {
4347
+ if (current->softirqs_enabled) {
29354348 debug_atomic_inc(redundant_softirqs_on);
29364349 return;
29374350 }
29384351
2939
- current->lockdep_recursion = 1;
4352
+ lockdep_recursion_inc();
29404353 /*
29414354 * We'll do an OFF -> ON transition:
29424355 */
2943
- curr->softirqs_enabled = 1;
2944
- curr->softirq_enable_ip = ip;
2945
- curr->softirq_enable_event = ++curr->irq_events;
4356
+ current->softirqs_enabled = 1;
4357
+ trace->softirq_enable_ip = ip;
4358
+ trace->softirq_enable_event = ++trace->irq_events;
29464359 debug_atomic_inc(softirqs_on_events);
29474360 /*
29484361 * We are going to turn softirqs on, so set the
29494362 * usage bit for all held locks, if hardirqs are
29504363 * enabled too:
29514364 */
2952
- if (curr->hardirqs_enabled)
2953
- mark_held_locks(curr, SOFTIRQ);
2954
- current->lockdep_recursion = 0;
4365
+ if (lockdep_hardirqs_enabled())
4366
+ mark_held_locks(current, LOCK_ENABLED_SOFTIRQ);
4367
+ lockdep_recursion_finish();
29554368 }
29564369
29574370 /*
29584371 * Softirqs were disabled:
29594372 */
2960
-void trace_softirqs_off(unsigned long ip)
4373
+void lockdep_softirqs_off(unsigned long ip)
29614374 {
2962
- struct task_struct *curr = current;
2963
-
2964
- if (unlikely(!debug_locks || current->lockdep_recursion))
4375
+ if (unlikely(!lockdep_enabled()))
29654376 return;
29664377
29674378 /*
....@@ -2970,13 +4381,15 @@
29704381 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
29714382 return;
29724383
2973
- if (curr->softirqs_enabled) {
4384
+ if (current->softirqs_enabled) {
4385
+ struct irqtrace_events *trace = &current->irqtrace;
4386
+
29744387 /*
29754388 * We have done an ON -> OFF transition:
29764389 */
2977
- curr->softirqs_enabled = 0;
2978
- curr->softirq_disable_ip = ip;
2979
- curr->softirq_disable_event = ++curr->irq_events;
4390
+ current->softirqs_enabled = 0;
4391
+ trace->softirq_disable_ip = ip;
4392
+ trace->softirq_disable_event = ++trace->irq_events;
29804393 debug_atomic_inc(softirqs_off_events);
29814394 /*
29824395 * Whoops, we wanted softirqs off, so why aren't they?
....@@ -2986,15 +4399,19 @@
29864399 debug_atomic_inc(redundant_softirqs_off);
29874400 }
29884401
2989
-static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
4402
+static int
4403
+mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
29904404 {
4405
+ if (!check)
4406
+ goto lock_used;
4407
+
29914408 /*
29924409 * If non-trylock use in a hardirq or softirq context, then
29934410 * mark the lock as used in these contexts:
29944411 */
29954412 if (!hlock->trylock) {
29964413 if (hlock->read) {
2997
- if (curr->hardirq_context)
4414
+ if (lockdep_hardirq_context())
29984415 if (!mark_lock(curr, hlock,
29994416 LOCK_USED_IN_HARDIRQ_READ))
30004417 return 0;
....@@ -3003,7 +4420,7 @@
30034420 LOCK_USED_IN_SOFTIRQ_READ))
30044421 return 0;
30054422 } else {
3006
- if (curr->hardirq_context)
4423
+ if (lockdep_hardirq_context())
30074424 if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ))
30084425 return 0;
30094426 if (curr->softirq_context)
....@@ -3031,12 +4448,18 @@
30314448 }
30324449 }
30334450
4451
+lock_used:
4452
+ /* mark it as used: */
4453
+ if (!mark_lock(curr, hlock, LOCK_USED))
4454
+ return 0;
4455
+
30344456 return 1;
30354457 }
30364458
30374459 static inline unsigned int task_irq_context(struct task_struct *task)
30384460 {
3039
- return 2 * !!task->hardirq_context + !!task->softirq_context;
4461
+ return LOCK_CHAIN_HARDIRQ_CONTEXT * !!lockdep_hardirq_context() +
4462
+ LOCK_CHAIN_SOFTIRQ_CONTEXT * !!task->softirq_context;
30404463 }
30414464
30424465 static int separate_irq_context(struct task_struct *curr,
....@@ -3062,42 +4485,23 @@
30624485 return 0;
30634486 }
30644487
3065
-#else /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
3066
-
3067
-static inline
3068
-int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
3069
- enum lock_usage_bit new_bit)
3070
-{
3071
- WARN_ON(1); /* Impossible innit? when we don't have TRACE_IRQFLAG */
3072
- return 1;
3073
-}
3074
-
3075
-static inline int mark_irqflags(struct task_struct *curr,
3076
- struct held_lock *hlock)
3077
-{
3078
- return 1;
3079
-}
3080
-
3081
-static inline unsigned int task_irq_context(struct task_struct *task)
3082
-{
3083
- return 0;
3084
-}
3085
-
3086
-static inline int separate_irq_context(struct task_struct *curr,
3087
- struct held_lock *hlock)
3088
-{
3089
- return 0;
3090
-}
3091
-
3092
-#endif /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
3093
-
30944488 /*
30954489 * Mark a lock with a usage bit, and validate the state transition:
30964490 */
30974491 static int mark_lock(struct task_struct *curr, struct held_lock *this,
30984492 enum lock_usage_bit new_bit)
30994493 {
3100
- unsigned int new_mask = 1 << new_bit, ret = 1;
4494
+ unsigned int new_mask, ret = 1;
4495
+
4496
+ if (new_bit >= LOCK_USAGE_STATES) {
4497
+ DEBUG_LOCKS_WARN_ON(1);
4498
+ return 0;
4499
+ }
4500
+
4501
+ if (new_bit == LOCK_USED && this->read)
4502
+ new_bit = LOCK_USED_READ;
4503
+
4504
+ new_mask = 1 << new_bit;
31014505
31024506 /*
31034507 * If already set then do not dirty the cacheline,
....@@ -3111,38 +4515,26 @@
31114515 /*
31124516 * Make sure we didn't race:
31134517 */
3114
- if (unlikely(hlock_class(this)->usage_mask & new_mask)) {
3115
- graph_unlock();
3116
- return 1;
3117
- }
4518
+ if (unlikely(hlock_class(this)->usage_mask & new_mask))
4519
+ goto unlock;
4520
+
4521
+ if (!hlock_class(this)->usage_mask)
4522
+ debug_atomic_dec(nr_unused_locks);
31184523
31194524 hlock_class(this)->usage_mask |= new_mask;
31204525
3121
- if (!save_trace(hlock_class(this)->usage_traces + new_bit))
3122
- return 0;
4526
+ if (new_bit < LOCK_TRACE_STATES) {
4527
+ if (!(hlock_class(this)->usage_traces[new_bit] = save_trace()))
4528
+ return 0;
4529
+ }
31234530
3124
- switch (new_bit) {
3125
-#define LOCKDEP_STATE(__STATE) \
3126
- case LOCK_USED_IN_##__STATE: \
3127
- case LOCK_USED_IN_##__STATE##_READ: \
3128
- case LOCK_ENABLED_##__STATE: \
3129
- case LOCK_ENABLED_##__STATE##_READ:
3130
-#include "lockdep_states.h"
3131
-#undef LOCKDEP_STATE
4531
+ if (new_bit < LOCK_USED) {
31324532 ret = mark_lock_irq(curr, this, new_bit);
31334533 if (!ret)
31344534 return 0;
3135
- break;
3136
- case LOCK_USED:
3137
- debug_atomic_dec(nr_unused_locks);
3138
- break;
3139
- default:
3140
- if (!debug_locks_off_graph_unlock())
3141
- return 0;
3142
- WARN_ON(1);
3143
- return 0;
31444535 }
31454536
4537
+unlock:
31464538 graph_unlock();
31474539
31484540 /*
....@@ -3158,11 +4550,157 @@
31584550 return ret;
31594551 }
31604552
4553
+static inline short task_wait_context(struct task_struct *curr)
4554
+{
4555
+ /*
4556
+ * Set appropriate wait type for the context; for IRQs we have to take
4557
+ * into account force_irqthread as that is implied by PREEMPT_RT.
4558
+ */
4559
+ if (lockdep_hardirq_context()) {
4560
+ /*
4561
+ * Check if force_irqthreads will run us threaded.
4562
+ */
4563
+ if (curr->hardirq_threaded || curr->irq_config)
4564
+ return LD_WAIT_CONFIG;
4565
+
4566
+ return LD_WAIT_SPIN;
4567
+ } else if (curr->softirq_context) {
4568
+ /*
4569
+ * Softirqs are always threaded.
4570
+ */
4571
+ return LD_WAIT_CONFIG;
4572
+ }
4573
+
4574
+ return LD_WAIT_MAX;
4575
+}
4576
+
4577
+static int
4578
+print_lock_invalid_wait_context(struct task_struct *curr,
4579
+ struct held_lock *hlock)
4580
+{
4581
+ short curr_inner;
4582
+
4583
+ if (!debug_locks_off())
4584
+ return 0;
4585
+ if (debug_locks_silent)
4586
+ return 0;
4587
+
4588
+ pr_warn("\n");
4589
+ pr_warn("=============================\n");
4590
+ pr_warn("[ BUG: Invalid wait context ]\n");
4591
+ print_kernel_ident();
4592
+ pr_warn("-----------------------------\n");
4593
+
4594
+ pr_warn("%s/%d is trying to lock:\n", curr->comm, task_pid_nr(curr));
4595
+ print_lock(hlock);
4596
+
4597
+ pr_warn("other info that might help us debug this:\n");
4598
+
4599
+ curr_inner = task_wait_context(curr);
4600
+ pr_warn("context-{%d:%d}\n", curr_inner, curr_inner);
4601
+
4602
+ lockdep_print_held_locks(curr);
4603
+
4604
+ pr_warn("stack backtrace:\n");
4605
+ dump_stack();
4606
+
4607
+ return 0;
4608
+}
4609
+
4610
+/*
4611
+ * Verify the wait_type context.
4612
+ *
4613
+ * This check validates we takes locks in the right wait-type order; that is it
4614
+ * ensures that we do not take mutexes inside spinlocks and do not attempt to
4615
+ * acquire spinlocks inside raw_spinlocks and the sort.
4616
+ *
4617
+ * The entire thing is slightly more complex because of RCU, RCU is a lock that
4618
+ * can be taken from (pretty much) any context but also has constraints.
4619
+ * However when taken in a stricter environment the RCU lock does not loosen
4620
+ * the constraints.
4621
+ *
4622
+ * Therefore we must look for the strictest environment in the lock stack and
4623
+ * compare that to the lock we're trying to acquire.
4624
+ */
4625
+static int check_wait_context(struct task_struct *curr, struct held_lock *next)
4626
+{
4627
+ u8 next_inner = hlock_class(next)->wait_type_inner;
4628
+ u8 next_outer = hlock_class(next)->wait_type_outer;
4629
+ u8 curr_inner;
4630
+ int depth;
4631
+
4632
+ if (!next_inner || next->trylock)
4633
+ return 0;
4634
+
4635
+ if (!next_outer)
4636
+ next_outer = next_inner;
4637
+
4638
+ /*
4639
+ * Find start of current irq_context..
4640
+ */
4641
+ for (depth = curr->lockdep_depth - 1; depth >= 0; depth--) {
4642
+ struct held_lock *prev = curr->held_locks + depth;
4643
+ if (prev->irq_context != next->irq_context)
4644
+ break;
4645
+ }
4646
+ depth++;
4647
+
4648
+ curr_inner = task_wait_context(curr);
4649
+
4650
+ for (; depth < curr->lockdep_depth; depth++) {
4651
+ struct held_lock *prev = curr->held_locks + depth;
4652
+ u8 prev_inner = hlock_class(prev)->wait_type_inner;
4653
+
4654
+ if (prev_inner) {
4655
+ /*
4656
+ * We can have a bigger inner than a previous one
4657
+ * when outer is smaller than inner, as with RCU.
4658
+ *
4659
+ * Also due to trylocks.
4660
+ */
4661
+ curr_inner = min(curr_inner, prev_inner);
4662
+ }
4663
+ }
4664
+
4665
+ if (next_outer > curr_inner)
4666
+ return print_lock_invalid_wait_context(curr, next);
4667
+
4668
+ return 0;
4669
+}
4670
+
4671
+#else /* CONFIG_PROVE_LOCKING */
4672
+
4673
+static inline int
4674
+mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
4675
+{
4676
+ return 1;
4677
+}
4678
+
4679
+static inline unsigned int task_irq_context(struct task_struct *task)
4680
+{
4681
+ return 0;
4682
+}
4683
+
4684
+static inline int separate_irq_context(struct task_struct *curr,
4685
+ struct held_lock *hlock)
4686
+{
4687
+ return 0;
4688
+}
4689
+
4690
+static inline int check_wait_context(struct task_struct *curr,
4691
+ struct held_lock *next)
4692
+{
4693
+ return 0;
4694
+}
4695
+
4696
+#endif /* CONFIG_PROVE_LOCKING */
4697
+
31614698 /*
31624699 * Initialize a lock instance's lock-class mapping info:
31634700 */
3164
-static void __lockdep_init_map(struct lockdep_map *lock, const char *name,
3165
- struct lock_class_key *key, int subclass)
4701
+void lockdep_init_map_type(struct lockdep_map *lock, const char *name,
4702
+ struct lock_class_key *key, int subclass,
4703
+ u8 inner, u8 outer, u8 lock_type)
31664704 {
31674705 int i;
31684706
....@@ -3183,19 +4721,22 @@
31834721
31844722 lock->name = name;
31854723
4724
+ lock->wait_type_outer = outer;
4725
+ lock->wait_type_inner = inner;
4726
+ lock->lock_type = lock_type;
4727
+
31864728 /*
31874729 * No key, no joy, we need to hash something.
31884730 */
31894731 if (DEBUG_LOCKS_WARN_ON(!key))
31904732 return;
31914733 /*
3192
- * Sanity check, the lock-class key must be persistent:
4734
+ * Sanity check, the lock-class key must either have been allocated
4735
+ * statically or must have been registered as a dynamic key.
31934736 */
3194
- if (!static_obj(key)) {
3195
- printk("BUG: key %px not in .data!\n", key);
3196
- /*
3197
- * What it says above ^^^^^, I suggest you read it.
3198
- */
4737
+ if (!static_obj(key) && !is_dynamic_key(key)) {
4738
+ if (debug_locks)
4739
+ printk(KERN_ERR "BUG: key %px has not been registered!\n", key);
31994740 DEBUG_LOCKS_WARN_ON(1);
32004741 return;
32014742 }
....@@ -3207,36 +4748,30 @@
32074748 if (subclass) {
32084749 unsigned long flags;
32094750
3210
- if (DEBUG_LOCKS_WARN_ON(current->lockdep_recursion))
4751
+ if (DEBUG_LOCKS_WARN_ON(!lockdep_enabled()))
32114752 return;
32124753
32134754 raw_local_irq_save(flags);
3214
- current->lockdep_recursion = 1;
4755
+ lockdep_recursion_inc();
32154756 register_lock_class(lock, subclass, 1);
3216
- current->lockdep_recursion = 0;
4757
+ lockdep_recursion_finish();
32174758 raw_local_irq_restore(flags);
32184759 }
32194760 }
3220
-
3221
-void lockdep_init_map(struct lockdep_map *lock, const char *name,
3222
- struct lock_class_key *key, int subclass)
3223
-{
3224
- __lockdep_init_map(lock, name, key, subclass);
3225
-}
3226
-EXPORT_SYMBOL_GPL(lockdep_init_map);
4761
+EXPORT_SYMBOL_GPL(lockdep_init_map_type);
32274762
32284763 struct lock_class_key __lockdep_no_validate__;
32294764 EXPORT_SYMBOL_GPL(__lockdep_no_validate__);
32304765
3231
-static int
4766
+static void
32324767 print_lock_nested_lock_not_held(struct task_struct *curr,
32334768 struct held_lock *hlock,
32344769 unsigned long ip)
32354770 {
32364771 if (!debug_locks_off())
3237
- return 0;
4772
+ return;
32384773 if (debug_locks_silent)
3239
- return 0;
4774
+ return;
32404775
32414776 pr_warn("\n");
32424777 pr_warn("==================================\n");
....@@ -3258,8 +4793,6 @@
32584793
32594794 pr_warn("\nstack backtrace:\n");
32604795 dump_stack();
3261
-
3262
- return 0;
32634796 }
32644797
32654798 static int __lock_is_held(const struct lockdep_map *lock, int read);
....@@ -3267,6 +4800,10 @@
32674800 /*
32684801 * This gets called for every mutex_lock*()/spin_lock*() operation.
32694802 * We maintain the dependency maps and validate the locking attempt:
4803
+ *
4804
+ * The callers must make sure that IRQs are disabled before calling it,
4805
+ * otherwise we could get an interrupt which would want to take locks,
4806
+ * which would end up in lockdep again.
32704807 */
32714808 static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
32724809 int trylock, int read, int check, int hardirqs_off,
....@@ -3284,14 +4821,6 @@
32844821 if (unlikely(!debug_locks))
32854822 return 0;
32864823
3287
- /*
3288
- * Lockdep should run with IRQs disabled, otherwise we could
3289
- * get an interrupt which would want to take locks, which would
3290
- * end up in lockdep and have you got a head-ache already?
3291
- */
3292
- if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3293
- return 0;
3294
-
32954824 if (!prove_locking || lock->key == &__lockdep_no_validate__)
32964825 check = 0;
32974826
....@@ -3305,7 +4834,9 @@
33054834 if (!class)
33064835 return 0;
33074836 }
3308
- atomic_inc((atomic_t *)&class->ops);
4837
+
4838
+ debug_class_ops_inc(class);
4839
+
33094840 if (very_verbose(class)) {
33104841 printk("\nacquire class [%px] %s", class->key, class->name);
33114842 if (class->name_version > 1)
....@@ -3326,9 +4857,9 @@
33264857 if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
33274858 return 0;
33284859
3329
- class_idx = class - lock_classes + 1;
4860
+ class_idx = class - lock_classes;
33304861
3331
- if (depth) {
4862
+ if (depth) { /* we're holding locks */
33324863 hlock = curr->held_locks + depth - 1;
33334864 if (hlock->class_idx == class_idx && nest_lock) {
33344865 if (!references)
....@@ -3343,7 +4874,7 @@
33434874 if (DEBUG_LOCKS_WARN_ON(hlock->references < references))
33444875 return 0;
33454876
3346
- return 1;
4877
+ return 2;
33474878 }
33484879 }
33494880
....@@ -3370,11 +4901,11 @@
33704901 #endif
33714902 hlock->pin_count = pin_count;
33724903
3373
- if (check && !mark_irqflags(curr, hlock))
4904
+ if (check_wait_context(curr, hlock))
33744905 return 0;
33754906
3376
- /* mark it as used: */
3377
- if (!mark_lock(curr, hlock, LOCK_USED))
4907
+ /* Initialize the lock usage bit */
4908
+ if (!mark_usage(curr, hlock, check))
33784909 return 0;
33794910
33804911 /*
....@@ -3388,9 +4919,9 @@
33884919 * the hash, not class->key.
33894920 */
33904921 /*
3391
- * Whoops, we did it again.. ran straight out of our static allocation.
4922
+ * Whoops, we did it again.. class_idx is invalid.
33924923 */
3393
- if (DEBUG_LOCKS_WARN_ON(class_idx > MAX_LOCKDEP_KEYS))
4924
+ if (DEBUG_LOCKS_WARN_ON(!test_bit(class_idx, lock_classes_in_use)))
33944925 return 0;
33954926
33964927 chain_key = curr->curr_chain_key;
....@@ -3398,22 +4929,29 @@
33984929 /*
33994930 * How can we have a chain hash when we ain't got no keys?!
34004931 */
3401
- if (DEBUG_LOCKS_WARN_ON(chain_key != 0))
4932
+ if (DEBUG_LOCKS_WARN_ON(chain_key != INITIAL_CHAIN_KEY))
34024933 return 0;
34034934 chain_head = 1;
34044935 }
34054936
34064937 hlock->prev_chain_key = chain_key;
34074938 if (separate_irq_context(curr, hlock)) {
3408
- chain_key = 0;
4939
+ chain_key = INITIAL_CHAIN_KEY;
34094940 chain_head = 1;
34104941 }
3411
- chain_key = iterate_chain_key(chain_key, class_idx);
4942
+ chain_key = iterate_chain_key(chain_key, hlock_id(hlock));
34124943
3413
- if (nest_lock && !__lock_is_held(nest_lock, -1))
3414
- return print_lock_nested_lock_not_held(curr, hlock, ip);
4944
+ if (nest_lock && !__lock_is_held(nest_lock, -1)) {
4945
+ print_lock_nested_lock_not_held(curr, hlock, ip);
4946
+ return 0;
4947
+ }
34154948
3416
- if (!validate_chain(curr, lock, hlock, chain_head, chain_key))
4949
+ if (!debug_locks_silent) {
4950
+ WARN_ON_ONCE(depth && !hlock_class(hlock - 1)->key);
4951
+ WARN_ON_ONCE(!hlock_class(hlock)->key);
4952
+ }
4953
+
4954
+ if (!validate_chain(curr, hlock, chain_head, chain_key))
34174955 return 0;
34184956
34194957 curr->curr_chain_key = chain_key;
....@@ -3442,14 +4980,14 @@
34424980 return 1;
34434981 }
34444982
3445
-static int
3446
-print_unlock_imbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
3447
- unsigned long ip)
4983
+static void print_unlock_imbalance_bug(struct task_struct *curr,
4984
+ struct lockdep_map *lock,
4985
+ unsigned long ip)
34484986 {
34494987 if (!debug_locks_off())
3450
- return 0;
4988
+ return;
34514989 if (debug_locks_silent)
3452
- return 0;
4990
+ return;
34534991
34544992 pr_warn("\n");
34554993 pr_warn("=====================================\n");
....@@ -3460,19 +4998,17 @@
34604998 curr->comm, task_pid_nr(curr));
34614999 print_lockdep_cache(lock);
34625000 pr_cont(") at:\n");
3463
- print_ip_sym(ip);
5001
+ print_ip_sym(KERN_WARNING, ip);
34645002 pr_warn("but there are no more locks to release!\n");
34655003 pr_warn("\nother info that might help us debug this:\n");
34665004 lockdep_print_held_locks(curr);
34675005
34685006 pr_warn("\nstack backtrace:\n");
34695007 dump_stack();
3470
-
3471
- return 0;
34725008 }
34735009
3474
-static int match_held_lock(const struct held_lock *hlock,
3475
- const struct lockdep_map *lock)
5010
+static noinstr int match_held_lock(const struct held_lock *hlock,
5011
+ const struct lockdep_map *lock)
34765012 {
34775013 if (hlock->instance == lock)
34785014 return 1;
....@@ -3500,7 +5036,7 @@
35005036 if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock))
35015037 return 0;
35025038
3503
- if (hlock->class_idx == class - lock_classes + 1)
5039
+ if (hlock->class_idx == class - lock_classes)
35045040 return 1;
35055041 }
35065042
....@@ -3544,19 +5080,33 @@
35445080 }
35455081
35465082 static int reacquire_held_locks(struct task_struct *curr, unsigned int depth,
3547
- int idx)
5083
+ int idx, unsigned int *merged)
35485084 {
35495085 struct held_lock *hlock;
5086
+ int first_idx = idx;
5087
+
5088
+ if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
5089
+ return 0;
35505090
35515091 for (hlock = curr->held_locks + idx; idx < depth; idx++, hlock++) {
3552
- if (!__lock_acquire(hlock->instance,
5092
+ switch (__lock_acquire(hlock->instance,
35535093 hlock_class(hlock)->subclass,
35545094 hlock->trylock,
35555095 hlock->read, hlock->check,
35565096 hlock->hardirqs_off,
35575097 hlock->nest_lock, hlock->acquire_ip,
3558
- hlock->references, hlock->pin_count))
5098
+ hlock->references, hlock->pin_count)) {
5099
+ case 0:
35595100 return 1;
5101
+ case 1:
5102
+ break;
5103
+ case 2:
5104
+ *merged += (idx == first_idx);
5105
+ break;
5106
+ default:
5107
+ WARN_ON(1);
5108
+ return 0;
5109
+ }
35605110 }
35615111 return 0;
35625112 }
....@@ -3567,47 +5117,9 @@
35675117 unsigned long ip)
35685118 {
35695119 struct task_struct *curr = current;
5120
+ unsigned int depth, merged = 0;
35705121 struct held_lock *hlock;
35715122 struct lock_class *class;
3572
- unsigned int depth;
3573
- int i;
3574
-
3575
- depth = curr->lockdep_depth;
3576
- /*
3577
- * This function is about (re)setting the class of a held lock,
3578
- * yet we're not actually holding any locks. Naughty user!
3579
- */
3580
- if (DEBUG_LOCKS_WARN_ON(!depth))
3581
- return 0;
3582
-
3583
- hlock = find_held_lock(curr, lock, depth, &i);
3584
- if (!hlock)
3585
- return print_unlock_imbalance_bug(curr, lock, ip);
3586
-
3587
- lockdep_init_map(lock, name, key, 0);
3588
- class = register_lock_class(lock, subclass, 0);
3589
- hlock->class_idx = class - lock_classes + 1;
3590
-
3591
- curr->lockdep_depth = i;
3592
- curr->curr_chain_key = hlock->prev_chain_key;
3593
-
3594
- if (reacquire_held_locks(curr, depth, i))
3595
- return 0;
3596
-
3597
- /*
3598
- * I took it apart and put it back together again, except now I have
3599
- * these 'spare' parts.. where shall I put them.
3600
- */
3601
- if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
3602
- return 0;
3603
- return 1;
3604
-}
3605
-
3606
-static int __lock_downgrade(struct lockdep_map *lock, unsigned long ip)
3607
-{
3608
- struct task_struct *curr = current;
3609
- struct held_lock *hlock;
3610
- unsigned int depth;
36115123 int i;
36125124
36135125 if (unlikely(!debug_locks))
....@@ -3622,8 +5134,56 @@
36225134 return 0;
36235135
36245136 hlock = find_held_lock(curr, lock, depth, &i);
3625
- if (!hlock)
3626
- return print_unlock_imbalance_bug(curr, lock, ip);
5137
+ if (!hlock) {
5138
+ print_unlock_imbalance_bug(curr, lock, ip);
5139
+ return 0;
5140
+ }
5141
+
5142
+ lockdep_init_map_type(lock, name, key, 0,
5143
+ lock->wait_type_inner,
5144
+ lock->wait_type_outer,
5145
+ lock->lock_type);
5146
+ class = register_lock_class(lock, subclass, 0);
5147
+ hlock->class_idx = class - lock_classes;
5148
+
5149
+ curr->lockdep_depth = i;
5150
+ curr->curr_chain_key = hlock->prev_chain_key;
5151
+
5152
+ if (reacquire_held_locks(curr, depth, i, &merged))
5153
+ return 0;
5154
+
5155
+ /*
5156
+ * I took it apart and put it back together again, except now I have
5157
+ * these 'spare' parts.. where shall I put them.
5158
+ */
5159
+ if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - merged))
5160
+ return 0;
5161
+ return 1;
5162
+}
5163
+
5164
+static int __lock_downgrade(struct lockdep_map *lock, unsigned long ip)
5165
+{
5166
+ struct task_struct *curr = current;
5167
+ unsigned int depth, merged = 0;
5168
+ struct held_lock *hlock;
5169
+ int i;
5170
+
5171
+ if (unlikely(!debug_locks))
5172
+ return 0;
5173
+
5174
+ depth = curr->lockdep_depth;
5175
+ /*
5176
+ * This function is about (re)setting the class of a held lock,
5177
+ * yet we're not actually holding any locks. Naughty user!
5178
+ */
5179
+ if (DEBUG_LOCKS_WARN_ON(!depth))
5180
+ return 0;
5181
+
5182
+ hlock = find_held_lock(curr, lock, depth, &i);
5183
+ if (!hlock) {
5184
+ print_unlock_imbalance_bug(curr, lock, ip);
5185
+ return 0;
5186
+ }
36275187
36285188 curr->lockdep_depth = i;
36295189 curr->curr_chain_key = hlock->prev_chain_key;
....@@ -3632,7 +5192,11 @@
36325192 hlock->read = 1;
36335193 hlock->acquire_ip = ip;
36345194
3635
- if (reacquire_held_locks(curr, depth, i))
5195
+ if (reacquire_held_locks(curr, depth, i, &merged))
5196
+ return 0;
5197
+
5198
+ /* Merging can't happen with unchanged classes.. */
5199
+ if (DEBUG_LOCKS_WARN_ON(merged))
36365200 return 0;
36375201
36385202 /*
....@@ -3641,22 +5205,21 @@
36415205 */
36425206 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
36435207 return 0;
5208
+
36445209 return 1;
36455210 }
36465211
36475212 /*
3648
- * Remove the lock to the list of currently held locks - this gets
5213
+ * Remove the lock from the list of currently held locks - this gets
36495214 * called on mutex_unlock()/spin_unlock*() (or on a failed
36505215 * mutex_lock_interruptible()).
3651
- *
3652
- * @nested is an hysterical artifact, needs a tree wide cleanup.
36535216 */
36545217 static int
3655
-__lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
5218
+__lock_release(struct lockdep_map *lock, unsigned long ip)
36565219 {
36575220 struct task_struct *curr = current;
5221
+ unsigned int depth, merged = 1;
36585222 struct held_lock *hlock;
3659
- unsigned int depth;
36605223 int i;
36615224
36625225 if (unlikely(!debug_locks))
....@@ -3667,16 +5230,20 @@
36675230 * So we're all set to release this lock.. wait what lock? We don't
36685231 * own any locks, you've been drinking again?
36695232 */
3670
- if (DEBUG_LOCKS_WARN_ON(depth <= 0))
3671
- return print_unlock_imbalance_bug(curr, lock, ip);
5233
+ if (depth <= 0) {
5234
+ print_unlock_imbalance_bug(curr, lock, ip);
5235
+ return 0;
5236
+ }
36725237
36735238 /*
36745239 * Check whether the lock exists in the current stack
36755240 * of held locks:
36765241 */
36775242 hlock = find_held_lock(curr, lock, depth, &i);
3678
- if (!hlock)
3679
- return print_unlock_imbalance_bug(curr, lock, ip);
5243
+ if (!hlock) {
5244
+ print_unlock_imbalance_bug(curr, lock, ip);
5245
+ return 0;
5246
+ }
36805247
36815248 if (hlock->instance == lock)
36825249 lock_release_holdtime(hlock);
....@@ -3704,20 +5271,33 @@
37045271 curr->lockdep_depth = i;
37055272 curr->curr_chain_key = hlock->prev_chain_key;
37065273
3707
- if (reacquire_held_locks(curr, depth, i + 1))
5274
+ /*
5275
+ * The most likely case is when the unlock is on the innermost
5276
+ * lock. In this case, we are done!
5277
+ */
5278
+ if (i == depth-1)
5279
+ return 1;
5280
+
5281
+ if (reacquire_held_locks(curr, depth, i + 1, &merged))
37085282 return 0;
37095283
37105284 /*
37115285 * We had N bottles of beer on the wall, we drank one, but now
37125286 * there's not N-1 bottles of beer left on the wall...
5287
+ * Pouring two of the bottles together is acceptable.
37135288 */
3714
- if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1))
3715
- return 0;
5289
+ DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - merged);
37165290
3717
- return 1;
5291
+ /*
5292
+ * Since reacquire_held_locks() would have called check_chain_key()
5293
+ * indirectly via __lock_acquire(), we don't need to do it again
5294
+ * on return.
5295
+ */
5296
+ return 0;
37185297 }
37195298
3720
-static int __lock_is_held(const struct lockdep_map *lock, int read)
5299
+static __always_inline
5300
+int __lock_is_held(const struct lockdep_map *lock, int read)
37215301 {
37225302 struct task_struct *curr = current;
37235303 int i;
....@@ -3726,7 +5306,7 @@
37265306 struct held_lock *hlock = curr->held_locks + i;
37275307
37285308 if (match_held_lock(hlock, lock)) {
3729
- if (read == -1 || hlock->read == read)
5309
+ if (read == -1 || !!hlock->read == read)
37305310 return 1;
37315311
37325312 return 0;
....@@ -3814,24 +5394,25 @@
38145394 /*
38155395 * Check whether we follow the irq-flags state precisely:
38165396 */
3817
-static void check_flags(unsigned long flags)
5397
+static noinstr void check_flags(unsigned long flags)
38185398 {
3819
-#if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP) && \
3820
- defined(CONFIG_TRACE_IRQFLAGS)
5399
+#if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP)
38215400 if (!debug_locks)
38225401 return;
38235402
5403
+ /* Get the warning out.. */
5404
+ instrumentation_begin();
5405
+
38245406 if (irqs_disabled_flags(flags)) {
3825
- if (DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled)) {
5407
+ if (DEBUG_LOCKS_WARN_ON(lockdep_hardirqs_enabled())) {
38265408 printk("possible reason: unannotated irqs-off.\n");
38275409 }
38285410 } else {
3829
- if (DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled)) {
5411
+ if (DEBUG_LOCKS_WARN_ON(!lockdep_hardirqs_enabled())) {
38305412 printk("possible reason: unannotated irqs-on.\n");
38315413 }
38325414 }
38335415
3834
-#ifndef CONFIG_PREEMPT_RT_FULL
38355416 /*
38365417 * We dont accurately track softirq state in e.g.
38375418 * hardirq contexts (such as on 4KSTACKS), so only
....@@ -3846,10 +5427,11 @@
38465427 DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
38475428 }
38485429 }
3849
-#endif
38505430
38515431 if (!debug_locks)
38525432 print_irqtrace_events(current);
5433
+
5434
+ instrumentation_end();
38535435 #endif
38545436 }
38555437
....@@ -3859,15 +5441,15 @@
38595441 {
38605442 unsigned long flags;
38615443
3862
- if (unlikely(current->lockdep_recursion))
5444
+ if (unlikely(!lockdep_enabled()))
38635445 return;
38645446
38655447 raw_local_irq_save(flags);
3866
- current->lockdep_recursion = 1;
5448
+ lockdep_recursion_inc();
38675449 check_flags(flags);
38685450 if (__lock_set_class(lock, name, key, subclass, ip))
38695451 check_chain_key(current);
3870
- current->lockdep_recursion = 0;
5452
+ lockdep_recursion_finish();
38715453 raw_local_irq_restore(flags);
38725454 }
38735455 EXPORT_SYMBOL_GPL(lock_set_class);
....@@ -3876,18 +5458,70 @@
38765458 {
38775459 unsigned long flags;
38785460
3879
- if (unlikely(current->lockdep_recursion))
5461
+ if (unlikely(!lockdep_enabled()))
38805462 return;
38815463
38825464 raw_local_irq_save(flags);
3883
- current->lockdep_recursion = 1;
5465
+ lockdep_recursion_inc();
38845466 check_flags(flags);
38855467 if (__lock_downgrade(lock, ip))
38865468 check_chain_key(current);
3887
- current->lockdep_recursion = 0;
5469
+ lockdep_recursion_finish();
38885470 raw_local_irq_restore(flags);
38895471 }
38905472 EXPORT_SYMBOL_GPL(lock_downgrade);
5473
+
5474
+/* NMI context !!! */
5475
+static void verify_lock_unused(struct lockdep_map *lock, struct held_lock *hlock, int subclass)
5476
+{
5477
+#ifdef CONFIG_PROVE_LOCKING
5478
+ struct lock_class *class = look_up_lock_class(lock, subclass);
5479
+ unsigned long mask = LOCKF_USED;
5480
+
5481
+ /* if it doesn't have a class (yet), it certainly hasn't been used yet */
5482
+ if (!class)
5483
+ return;
5484
+
5485
+ /*
5486
+ * READ locks only conflict with USED, such that if we only ever use
5487
+ * READ locks, there is no deadlock possible -- RCU.
5488
+ */
5489
+ if (!hlock->read)
5490
+ mask |= LOCKF_USED_READ;
5491
+
5492
+ if (!(class->usage_mask & mask))
5493
+ return;
5494
+
5495
+ hlock->class_idx = class - lock_classes;
5496
+
5497
+ print_usage_bug(current, hlock, LOCK_USED, LOCK_USAGE_STATES);
5498
+#endif
5499
+}
5500
+
5501
+static bool lockdep_nmi(void)
5502
+{
5503
+ if (raw_cpu_read(lockdep_recursion))
5504
+ return false;
5505
+
5506
+ if (!in_nmi())
5507
+ return false;
5508
+
5509
+ return true;
5510
+}
5511
+
5512
+/*
5513
+ * read_lock() is recursive if:
5514
+ * 1. We force lockdep think this way in selftests or
5515
+ * 2. The implementation is not queued read/write lock or
5516
+ * 3. The locker is at an in_interrupt() context.
5517
+ */
5518
+bool read_lock_is_recursive(void)
5519
+{
5520
+ return force_read_lock_recursive ||
5521
+ !IS_ENABLED(CONFIG_QUEUED_RWLOCKS) ||
5522
+ in_interrupt();
5523
+}
5524
+EXPORT_SYMBOL_GPL(read_lock_is_recursive);
38915525
38925526 /*
38935527 * We are not always called with irqs disabled - do that here,
....@@ -3899,74 +5533,97 @@
38995533 {
39005534 unsigned long flags;
39015535
3902
- if (unlikely(current->lockdep_recursion))
5536
+ trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
5537
+
5538
+ if (!debug_locks)
39035539 return;
5540
+
5541
+ if (unlikely(!lockdep_enabled())) {
5542
+ /* XXX allow trylock from NMI ?!? */
5543
+ if (lockdep_nmi() && !trylock) {
5544
+ struct held_lock hlock;
5545
+
5546
+ hlock.acquire_ip = ip;
5547
+ hlock.instance = lock;
5548
+ hlock.nest_lock = nest_lock;
5549
+ hlock.irq_context = 2; // XXX
5550
+ hlock.trylock = trylock;
5551
+ hlock.read = read;
5552
+ hlock.check = check;
5553
+ hlock.hardirqs_off = true;
5554
+ hlock.references = 0;
5555
+
5556
+ verify_lock_unused(lock, &hlock, subclass);
5557
+ }
5558
+ return;
5559
+ }
39045560
39055561 raw_local_irq_save(flags);
39065562 check_flags(flags);
39075563
3908
- current->lockdep_recursion = 1;
3909
- trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
5564
+ lockdep_recursion_inc();
39105565 __lock_acquire(lock, subclass, trylock, read, check,
39115566 irqs_disabled_flags(flags), nest_lock, ip, 0, 0);
3912
- current->lockdep_recursion = 0;
5567
+ lockdep_recursion_finish();
39135568 raw_local_irq_restore(flags);
39145569 }
39155570 EXPORT_SYMBOL_GPL(lock_acquire);
39165571
3917
-void lock_release(struct lockdep_map *lock, int nested,
3918
- unsigned long ip)
5572
+void lock_release(struct lockdep_map *lock, unsigned long ip)
39195573 {
39205574 unsigned long flags;
39215575
3922
- if (unlikely(current->lockdep_recursion))
5576
+ trace_lock_release(lock, ip);
5577
+
5578
+ if (unlikely(!lockdep_enabled()))
39235579 return;
39245580
39255581 raw_local_irq_save(flags);
39265582 check_flags(flags);
3927
- current->lockdep_recursion = 1;
3928
- trace_lock_release(lock, ip);
3929
- if (__lock_release(lock, nested, ip))
5583
+
5584
+ lockdep_recursion_inc();
5585
+ if (__lock_release(lock, ip))
39305586 check_chain_key(current);
3931
- current->lockdep_recursion = 0;
5587
+ lockdep_recursion_finish();
39325588 raw_local_irq_restore(flags);
39335589 }
39345590 EXPORT_SYMBOL_GPL(lock_release);
39355591
3936
-int lock_is_held_type(const struct lockdep_map *lock, int read)
5592
+noinstr int lock_is_held_type(const struct lockdep_map *lock, int read)
39375593 {
39385594 unsigned long flags;
39395595 int ret = 0;
39405596
3941
- if (unlikely(current->lockdep_recursion))
5597
+ if (unlikely(!lockdep_enabled()))
39425598 return 1; /* avoid false negative lockdep_assert_held() */
39435599
39445600 raw_local_irq_save(flags);
39455601 check_flags(flags);
39465602
3947
- current->lockdep_recursion = 1;
5603
+ lockdep_recursion_inc();
39485604 ret = __lock_is_held(lock, read);
3949
- current->lockdep_recursion = 0;
5605
+ lockdep_recursion_finish();
39505606 raw_local_irq_restore(flags);
39515607
39525608 return ret;
39535609 }
39545610 EXPORT_SYMBOL_GPL(lock_is_held_type);
5611
+NOKPROBE_SYMBOL(lock_is_held_type);
39555612
39565613 struct pin_cookie lock_pin_lock(struct lockdep_map *lock)
39575614 {
39585615 struct pin_cookie cookie = NIL_COOKIE;
39595616 unsigned long flags;
39605617
3961
- if (unlikely(current->lockdep_recursion))
5618
+ if (unlikely(!lockdep_enabled()))
39625619 return cookie;
39635620
39645621 raw_local_irq_save(flags);
39655622 check_flags(flags);
39665623
3967
- current->lockdep_recursion = 1;
5624
+ lockdep_recursion_inc();
39685625 cookie = __lock_pin_lock(lock);
3969
- current->lockdep_recursion = 0;
5626
+ lockdep_recursion_finish();
39705627 raw_local_irq_restore(flags);
39715628
39725629 return cookie;
....@@ -3977,15 +5634,15 @@
39775634 {
39785635 unsigned long flags;
39795636
3980
- if (unlikely(current->lockdep_recursion))
5637
+ if (unlikely(!lockdep_enabled()))
39815638 return;
39825639
39835640 raw_local_irq_save(flags);
39845641 check_flags(flags);
39855642
3986
- current->lockdep_recursion = 1;
5643
+ lockdep_recursion_inc();
39875644 __lock_repin_lock(lock, cookie);
3988
- current->lockdep_recursion = 0;
5645
+ lockdep_recursion_finish();
39895646 raw_local_irq_restore(flags);
39905647 }
39915648 EXPORT_SYMBOL_GPL(lock_repin_lock);
....@@ -3994,28 +5651,28 @@
39945651 {
39955652 unsigned long flags;
39965653
3997
- if (unlikely(current->lockdep_recursion))
5654
+ if (unlikely(!lockdep_enabled()))
39985655 return;
39995656
40005657 raw_local_irq_save(flags);
40015658 check_flags(flags);
40025659
4003
- current->lockdep_recursion = 1;
5660
+ lockdep_recursion_inc();
40045661 __lock_unpin_lock(lock, cookie);
4005
- current->lockdep_recursion = 0;
5662
+ lockdep_recursion_finish();
40065663 raw_local_irq_restore(flags);
40075664 }
40085665 EXPORT_SYMBOL_GPL(lock_unpin_lock);
40095666
40105667 #ifdef CONFIG_LOCK_STAT
4011
-static int
4012
-print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
4013
- unsigned long ip)
5668
+static void print_lock_contention_bug(struct task_struct *curr,
5669
+ struct lockdep_map *lock,
5670
+ unsigned long ip)
40145671 {
40155672 if (!debug_locks_off())
4016
- return 0;
5673
+ return;
40175674 if (debug_locks_silent)
4018
- return 0;
5675
+ return;
40195676
40205677 pr_warn("\n");
40215678 pr_warn("=================================\n");
....@@ -4026,15 +5683,13 @@
40265683 curr->comm, task_pid_nr(curr));
40275684 print_lockdep_cache(lock);
40285685 pr_cont(") at:\n");
4029
- print_ip_sym(ip);
5686
+ print_ip_sym(KERN_WARNING, ip);
40305687 pr_warn("but there are no locks held!\n");
40315688 pr_warn("\nother info that might help us debug this:\n");
40325689 lockdep_print_held_locks(curr);
40335690
40345691 pr_warn("\nstack backtrace:\n");
40355692 dump_stack();
4036
-
4037
- return 0;
40385693 }
40395694
40405695 static void
....@@ -4112,8 +5767,6 @@
41125767 hlock->holdtime_stamp = now;
41135768 }
41145769
4115
- trace_lock_acquired(lock, ip);
4116
-
41175770 stats = get_lock_stats(hlock_class(hlock));
41185771 if (waittime) {
41195772 if (hlock->read)
....@@ -4132,18 +5785,16 @@
41325785 {
41335786 unsigned long flags;
41345787
4135
- if (unlikely(!lock_stat || !debug_locks))
4136
- return;
5788
+ trace_lock_contended(lock, ip);
41375789
4138
- if (unlikely(current->lockdep_recursion))
5790
+ if (unlikely(!lock_stat || !lockdep_enabled()))
41395791 return;
41405792
41415793 raw_local_irq_save(flags);
41425794 check_flags(flags);
4143
- current->lockdep_recursion = 1;
4144
- trace_lock_contended(lock, ip);
5795
+ lockdep_recursion_inc();
41455796 __lock_contended(lock, ip);
4146
- current->lockdep_recursion = 0;
5797
+ lockdep_recursion_finish();
41475798 raw_local_irq_restore(flags);
41485799 }
41495800 EXPORT_SYMBOL_GPL(lock_contended);
....@@ -4152,17 +5803,16 @@
41525803 {
41535804 unsigned long flags;
41545805
4155
- if (unlikely(!lock_stat || !debug_locks))
4156
- return;
5806
+ trace_lock_acquired(lock, ip);
41575807
4158
- if (unlikely(current->lockdep_recursion))
5808
+ if (unlikely(!lock_stat || !lockdep_enabled()))
41595809 return;
41605810
41615811 raw_local_irq_save(flags);
41625812 check_flags(flags);
4163
- current->lockdep_recursion = 1;
5813
+ lockdep_recursion_inc();
41645814 __lock_acquired(lock, ip);
4165
- current->lockdep_recursion = 0;
5815
+ lockdep_recursion_finish();
41665816 raw_local_irq_restore(flags);
41675817 }
41685818 EXPORT_SYMBOL_GPL(lock_acquired);
....@@ -4179,9 +5829,7 @@
41795829 int i;
41805830
41815831 raw_local_irq_save(flags);
4182
- current->curr_chain_key = 0;
4183
- current->lockdep_depth = 0;
4184
- current->lockdep_recursion = 0;
5832
+ lockdep_init_task(current);
41855833 memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock));
41865834 nr_hardirq_chains = 0;
41875835 nr_softirq_chains = 0;
....@@ -4192,26 +5840,111 @@
41925840 raw_local_irq_restore(flags);
41935841 }
41945842
4195
-static void zap_class(struct lock_class *class)
5843
+/* Remove a class from a lock chain. Must be called with the graph lock held. */
5844
+static void remove_class_from_lock_chain(struct pending_free *pf,
5845
+ struct lock_chain *chain,
5846
+ struct lock_class *class)
41965847 {
5848
+#ifdef CONFIG_PROVE_LOCKING
41975849 int i;
5850
+
5851
+ for (i = chain->base; i < chain->base + chain->depth; i++) {
5852
+ if (chain_hlock_class_idx(chain_hlocks[i]) != class - lock_classes)
5853
+ continue;
5854
+ /*
5855
+ * Each lock class occurs at most once in a lock chain so once
5856
+ * we found a match we can break out of this loop.
5857
+ */
5858
+ goto free_lock_chain;
5859
+ }
5860
+ /* Since the chain has not been modified, return. */
5861
+ return;
5862
+
5863
+free_lock_chain:
5864
+ free_chain_hlocks(chain->base, chain->depth);
5865
+ /* Overwrite the chain key for concurrent RCU readers. */
5866
+ WRITE_ONCE(chain->chain_key, INITIAL_CHAIN_KEY);
5867
+ dec_chains(chain->irq_context);
5868
+
5869
+ /*
5870
+ * Note: calling hlist_del_rcu() from inside a
5871
+ * hlist_for_each_entry_rcu() loop is safe.
5872
+ */
5873
+ hlist_del_rcu(&chain->entry);
5874
+ __set_bit(chain - lock_chains, pf->lock_chains_being_freed);
5875
+ nr_zapped_lock_chains++;
5876
+#endif
5877
+}
5878
+
5879
+/* Must be called with the graph lock held. */
5880
+static void remove_class_from_lock_chains(struct pending_free *pf,
5881
+ struct lock_class *class)
5882
+{
5883
+ struct lock_chain *chain;
5884
+ struct hlist_head *head;
5885
+ int i;
5886
+
5887
+ for (i = 0; i < ARRAY_SIZE(chainhash_table); i++) {
5888
+ head = chainhash_table + i;
5889
+ hlist_for_each_entry_rcu(chain, head, entry) {
5890
+ remove_class_from_lock_chain(pf, chain, class);
5891
+ }
5892
+ }
5893
+}
5894
+
5895
+/*
5896
+ * Remove all references to a lock class. The caller must hold the graph lock.
5897
+ */
5898
+static void zap_class(struct pending_free *pf, struct lock_class *class)
5899
+{
5900
+ struct lock_list *entry;
5901
+ int i;
5902
+
5903
+ WARN_ON_ONCE(!class->key);
41985904
41995905 /*
42005906 * Remove all dependencies this lock is
42015907 * involved in:
42025908 */
4203
- for (i = 0; i < nr_list_entries; i++) {
4204
- if (list_entries[i].class == class)
4205
- list_del_rcu(&list_entries[i].entry);
5909
+ for_each_set_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) {
5910
+ entry = list_entries + i;
5911
+ if (entry->class != class && entry->links_to != class)
5912
+ continue;
5913
+ __clear_bit(i, list_entries_in_use);
5914
+ nr_list_entries--;
5915
+ list_del_rcu(&entry->entry);
42065916 }
4207
- /*
4208
- * Unhash the class and remove it from the all_lock_classes list:
4209
- */
4210
- hlist_del_rcu(&class->hash_entry);
4211
- list_del_rcu(&class->lock_entry);
5917
+ if (list_empty(&class->locks_after) &&
5918
+ list_empty(&class->locks_before)) {
5919
+ list_move_tail(&class->lock_entry, &pf->zapped);
5920
+ hlist_del_rcu(&class->hash_entry);
5921
+ WRITE_ONCE(class->key, NULL);
5922
+ WRITE_ONCE(class->name, NULL);
5923
+ nr_lock_classes--;
5924
+ __clear_bit(class - lock_classes, lock_classes_in_use);
5925
+ if (class - lock_classes == max_lock_class_idx)
5926
+ max_lock_class_idx--;
5927
+ } else {
5928
+ WARN_ONCE(true, "%s() failed for class %s\n", __func__,
5929
+ class->name);
5930
+ }
42125931
4213
- RCU_INIT_POINTER(class->key, NULL);
4214
- RCU_INIT_POINTER(class->name, NULL);
5932
+ remove_class_from_lock_chains(pf, class);
5933
+ nr_zapped_classes++;
5934
+}
5935
+
5936
+static void reinit_class(struct lock_class *class)
5937
+{
5938
+ void *const p = class;
5939
+ const unsigned int offset = offsetof(struct lock_class, key);
5940
+
5941
+ WARN_ON_ONCE(!class->lock_entry.next);
5942
+ WARN_ON_ONCE(!list_empty(&class->locks_after));
5943
+ WARN_ON_ONCE(!list_empty(&class->locks_before));
5944
+ memset(p + offset, 0, sizeof(*class) - offset);
5945
+ WARN_ON_ONCE(!class->lock_entry.next);
5946
+ WARN_ON_ONCE(!list_empty(&class->locks_after));
5947
+ WARN_ON_ONCE(!list_empty(&class->locks_before));
42155948 }
42165949
42175950 static inline int within(const void *addr, void *start, unsigned long size)
....@@ -4219,66 +5952,197 @@
42195952 return addr >= start && addr < start + size;
42205953 }
42215954
5955
+static bool inside_selftest(void)
5956
+{
5957
+ return current == lockdep_selftest_task_struct;
5958
+}
5959
+
5960
+/* The caller must hold the graph lock. */
5961
+static struct pending_free *get_pending_free(void)
5962
+{
5963
+ return delayed_free.pf + delayed_free.index;
5964
+}
5965
+
5966
+static void free_zapped_rcu(struct rcu_head *cb);
5967
+
5968
+/*
5969
+ * Schedule an RCU callback if no RCU callback is pending. Must be called with
5970
+ * the graph lock held.
5971
+ */
5972
+static void call_rcu_zapped(struct pending_free *pf)
5973
+{
5974
+ WARN_ON_ONCE(inside_selftest());
5975
+
5976
+ if (list_empty(&pf->zapped))
5977
+ return;
5978
+
5979
+ if (delayed_free.scheduled)
5980
+ return;
5981
+
5982
+ delayed_free.scheduled = true;
5983
+
5984
+ WARN_ON_ONCE(delayed_free.pf + delayed_free.index != pf);
5985
+ delayed_free.index ^= 1;
5986
+
5987
+ call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
5988
+}
5989
+
5990
+/* The caller must hold the graph lock. May be called from RCU context. */
5991
+static void __free_zapped_classes(struct pending_free *pf)
5992
+{
5993
+ struct lock_class *class;
5994
+
5995
+ check_data_structures();
5996
+
5997
+ list_for_each_entry(class, &pf->zapped, lock_entry)
5998
+ reinit_class(class);
5999
+
6000
+ list_splice_init(&pf->zapped, &free_lock_classes);
6001
+
6002
+#ifdef CONFIG_PROVE_LOCKING
6003
+ bitmap_andnot(lock_chains_in_use, lock_chains_in_use,
6004
+ pf->lock_chains_being_freed, ARRAY_SIZE(lock_chains));
6005
+ bitmap_clear(pf->lock_chains_being_freed, 0, ARRAY_SIZE(lock_chains));
6006
+#endif
6007
+}
6008
+
6009
+static void free_zapped_rcu(struct rcu_head *ch)
6010
+{
6011
+ struct pending_free *pf;
6012
+ unsigned long flags;
6013
+
6014
+ if (WARN_ON_ONCE(ch != &delayed_free.rcu_head))
6015
+ return;
6016
+
6017
+ raw_local_irq_save(flags);
6018
+ lockdep_lock();
6019
+
6020
+ /* closed head */
6021
+ pf = delayed_free.pf + (delayed_free.index ^ 1);
6022
+ __free_zapped_classes(pf);
6023
+ delayed_free.scheduled = false;
6024
+
6025
+ /*
6026
+ * If there's anything on the open list, close and start a new callback.
6027
+ */
6028
+ call_rcu_zapped(delayed_free.pf + delayed_free.index);
6029
+
6030
+ lockdep_unlock();
6031
+ raw_local_irq_restore(flags);
6032
+}
6033
+
6034
+/*
6035
+ * Remove all lock classes from the class hash table and from the
6036
+ * all_lock_classes list whose key or name is in the address range [start,
6037
+ * start + size). Move these lock classes to the zapped_classes list. Must
6038
+ * be called with the graph lock held.
6039
+ */
6040
+static void __lockdep_free_key_range(struct pending_free *pf, void *start,
6041
+ unsigned long size)
6042
+{
6043
+ struct lock_class *class;
6044
+ struct hlist_head *head;
6045
+ int i;
6046
+
6047
+ /* Unhash all classes that were created by a module. */
6048
+ for (i = 0; i < CLASSHASH_SIZE; i++) {
6049
+ head = classhash_table + i;
6050
+ hlist_for_each_entry_rcu(class, head, hash_entry) {
6051
+ if (!within(class->key, start, size) &&
6052
+ !within(class->name, start, size))
6053
+ continue;
6054
+ zap_class(pf, class);
6055
+ }
6056
+ }
6057
+}
6058
+
42226059 /*
42236060 * Used in module.c to remove lock classes from memory that is going to be
42246061 * freed; and possibly re-used by other modules.
42256062 *
4226
- * We will have had one sync_sched() before getting here, so we're guaranteed
4227
- * nobody will look up these exact classes -- they're properly dead but still
4228
- * allocated.
6063
+ * We will have had one synchronize_rcu() before getting here, so we're
6064
+ * guaranteed nobody will look up these exact classes -- they're properly dead
6065
+ * but still allocated.
42296066 */
4230
-void lockdep_free_key_range(void *start, unsigned long size)
6067
+static void lockdep_free_key_range_reg(void *start, unsigned long size)
42316068 {
4232
- struct lock_class *class;
4233
- struct hlist_head *head;
6069
+ struct pending_free *pf;
42346070 unsigned long flags;
4235
- int i;
4236
- int locked;
6071
+
6072
+ init_data_structures_once();
42376073
42386074 raw_local_irq_save(flags);
4239
- locked = graph_lock();
4240
-
4241
- /*
4242
- * Unhash all classes that were created by this module:
4243
- */
4244
- for (i = 0; i < CLASSHASH_SIZE; i++) {
4245
- head = classhash_table + i;
4246
- hlist_for_each_entry_rcu(class, head, hash_entry) {
4247
- if (within(class->key, start, size))
4248
- zap_class(class);
4249
- else if (within(class->name, start, size))
4250
- zap_class(class);
4251
- }
4252
- }
4253
-
4254
- if (locked)
4255
- graph_unlock();
6075
+ lockdep_lock();
6076
+ pf = get_pending_free();
6077
+ __lockdep_free_key_range(pf, start, size);
6078
+ call_rcu_zapped(pf);
6079
+ lockdep_unlock();
42566080 raw_local_irq_restore(flags);
42576081
42586082 /*
42596083 * Wait for any possible iterators from look_up_lock_class() to pass
42606084 * before continuing to free the memory they refer to.
4261
- *
4262
- * sync_sched() is sufficient because the read-side is IRQ disable.
42636085 */
4264
- synchronize_sched();
4265
-
4266
- /*
4267
- * XXX at this point we could return the resources to the pool;
4268
- * instead we leak them. We would need to change to bitmap allocators
4269
- * instead of the linear allocators we have now.
4270
- */
6086
+ synchronize_rcu();
42716087 }
42726088
4273
-void lockdep_reset_lock(struct lockdep_map *lock)
6089
+/*
6090
+ * Free all lockdep keys in the range [start, start+size). Does not sleep.
6091
+ * Ignores debug_locks. Must only be used by the lockdep selftests.
6092
+ */
6093
+static void lockdep_free_key_range_imm(void *start, unsigned long size)
6094
+{
6095
+ struct pending_free *pf = delayed_free.pf;
6096
+ unsigned long flags;
6097
+
6098
+ init_data_structures_once();
6099
+
6100
+ raw_local_irq_save(flags);
6101
+ lockdep_lock();
6102
+ __lockdep_free_key_range(pf, start, size);
6103
+ __free_zapped_classes(pf);
6104
+ lockdep_unlock();
6105
+ raw_local_irq_restore(flags);
6106
+}
6107
+
6108
+void lockdep_free_key_range(void *start, unsigned long size)
6109
+{
6110
+ init_data_structures_once();
6111
+
6112
+ if (inside_selftest())
6113
+ lockdep_free_key_range_imm(start, size);
6114
+ else
6115
+ lockdep_free_key_range_reg(start, size);
6116
+}
6117
+
6118
+/*
6119
+ * Check whether any element of the @lock->class_cache[] array refers to a
6120
+ * registered lock class. The caller must hold either the graph lock or the
6121
+ * RCU read lock.
6122
+ */
6123
+static bool lock_class_cache_is_registered(struct lockdep_map *lock)
42746124 {
42756125 struct lock_class *class;
42766126 struct hlist_head *head;
4277
- unsigned long flags;
42786127 int i, j;
4279
- int locked;
42806128
4281
- raw_local_irq_save(flags);
6129
+ for (i = 0; i < CLASSHASH_SIZE; i++) {
6130
+ head = classhash_table + i;
6131
+ hlist_for_each_entry_rcu(class, head, hash_entry) {
6132
+ for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++)
6133
+ if (lock->class_cache[j] == class)
6134
+ return true;
6135
+ }
6136
+ }
6137
+ return false;
6138
+}
6139
+
6140
+/* The caller must hold the graph lock. Does not sleep. */
6141
+static void __lockdep_reset_lock(struct pending_free *pf,
6142
+ struct lockdep_map *lock)
6143
+{
6144
+ struct lock_class *class;
6145
+ int j;
42826146
42836147 /*
42846148 * Remove all classes this lock might have:
....@@ -4289,38 +6153,110 @@
42896153 */
42906154 class = look_up_lock_class(lock, j);
42916155 if (class)
4292
- zap_class(class);
6156
+ zap_class(pf, class);
42936157 }
42946158 /*
42956159 * Debug check: in the end all mapped classes should
42966160 * be gone.
42976161 */
6162
+ if (WARN_ON_ONCE(lock_class_cache_is_registered(lock)))
6163
+ debug_locks_off();
6164
+}
6165
+
6166
+/*
6167
+ * Remove all information lockdep has about a lock if debug_locks == 1. Free
6168
+ * released data structures from RCU context.
6169
+ */
6170
+static void lockdep_reset_lock_reg(struct lockdep_map *lock)
6171
+{
6172
+ struct pending_free *pf;
6173
+ unsigned long flags;
6174
+ int locked;
6175
+
6176
+ raw_local_irq_save(flags);
42986177 locked = graph_lock();
4299
- for (i = 0; i < CLASSHASH_SIZE; i++) {
4300
- head = classhash_table + i;
4301
- hlist_for_each_entry_rcu(class, head, hash_entry) {
4302
- int match = 0;
6178
+ if (!locked)
6179
+ goto out_irq;
43036180
4304
- for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++)
4305
- match |= class == lock->class_cache[j];
6181
+ pf = get_pending_free();
6182
+ __lockdep_reset_lock(pf, lock);
6183
+ call_rcu_zapped(pf);
43066184
4307
- if (unlikely(match)) {
4308
- if (debug_locks_off_graph_unlock()) {
4309
- /*
4310
- * We all just reset everything, how did it match?
4311
- */
4312
- WARN_ON(1);
4313
- }
4314
- goto out_restore;
4315
- }
4316
- }
4317
- }
4318
- if (locked)
4319
- graph_unlock();
4320
-
4321
-out_restore:
6185
+ graph_unlock();
6186
+out_irq:
43226187 raw_local_irq_restore(flags);
43236188 }
6189
+
6190
+/*
6191
+ * Reset a lock. Does not sleep. Ignores debug_locks. Must only be used by the
6192
+ * lockdep selftests.
6193
+ */
6194
+static void lockdep_reset_lock_imm(struct lockdep_map *lock)
6195
+{
6196
+ struct pending_free *pf = delayed_free.pf;
6197
+ unsigned long flags;
6198
+
6199
+ raw_local_irq_save(flags);
6200
+ lockdep_lock();
6201
+ __lockdep_reset_lock(pf, lock);
6202
+ __free_zapped_classes(pf);
6203
+ lockdep_unlock();
6204
+ raw_local_irq_restore(flags);
6205
+}
6206
+
6207
+void lockdep_reset_lock(struct lockdep_map *lock)
6208
+{
6209
+ init_data_structures_once();
6210
+
6211
+ if (inside_selftest())
6212
+ lockdep_reset_lock_imm(lock);
6213
+ else
6214
+ lockdep_reset_lock_reg(lock);
6215
+}
6216
+
6217
+/*
6218
+ * Unregister a dynamically allocated key.
6219
+ *
6220
+ * Unlike lockdep_register_key(), a search is always done to find a matching
6221
+ * key irrespective of debug_locks to avoid potential invalid access to freed
6222
+ * memory in lock_class entry.
6223
+ */
6224
+void lockdep_unregister_key(struct lock_class_key *key)
6225
+{
6226
+ struct hlist_head *hash_head = keyhashentry(key);
6227
+ struct lock_class_key *k;
6228
+ struct pending_free *pf;
6229
+ unsigned long flags;
6230
+ bool found = false;
6231
+
6232
+ might_sleep();
6233
+
6234
+ if (WARN_ON_ONCE(static_obj(key)))
6235
+ return;
6236
+
6237
+ raw_local_irq_save(flags);
6238
+ lockdep_lock();
6239
+
6240
+ hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
6241
+ if (k == key) {
6242
+ hlist_del_rcu(&k->hash_entry);
6243
+ found = true;
6244
+ break;
6245
+ }
6246
+ }
6247
+ WARN_ON_ONCE(!found && debug_locks);
6248
+ if (found) {
6249
+ pf = get_pending_free();
6250
+ __lockdep_free_key_range(pf, key, 1);
6251
+ call_rcu_zapped(pf);
6252
+ }
6253
+ lockdep_unlock();
6254
+ raw_local_irq_restore(flags);
6255
+
6256
+ /* Wait until is_dynamic_key() has finished accessing k->hash_entry. */
6257
+ synchronize_rcu();
6258
+}
6259
+EXPORT_SYMBOL_GPL(lockdep_unregister_key);
43246260
43256261 void __init lockdep_init(void)
43266262 {
....@@ -4334,20 +6270,31 @@
43346270 printk("... MAX_LOCKDEP_CHAINS: %lu\n", MAX_LOCKDEP_CHAINS);
43356271 printk("... CHAINHASH_SIZE: %lu\n", CHAINHASH_SIZE);
43366272
4337
- printk(" memory used by lock dependency info: %lu kB\n",
4338
- (sizeof(struct lock_class) * MAX_LOCKDEP_KEYS +
4339
- sizeof(struct list_head) * CLASSHASH_SIZE +
4340
- sizeof(struct lock_list) * MAX_LOCKDEP_ENTRIES +
4341
- sizeof(struct lock_chain) * MAX_LOCKDEP_CHAINS +
4342
- sizeof(struct list_head) * CHAINHASH_SIZE
6273
+ printk(" memory used by lock dependency info: %zu kB\n",
6274
+ (sizeof(lock_classes) +
6275
+ sizeof(lock_classes_in_use) +
6276
+ sizeof(classhash_table) +
6277
+ sizeof(list_entries) +
6278
+ sizeof(list_entries_in_use) +
6279
+ sizeof(chainhash_table) +
6280
+ sizeof(delayed_free)
43436281 #ifdef CONFIG_PROVE_LOCKING
4344
- + sizeof(struct circular_queue)
6282
+ + sizeof(lock_cq)
6283
+ + sizeof(lock_chains)
6284
+ + sizeof(lock_chains_in_use)
6285
+ + sizeof(chain_hlocks)
43456286 #endif
43466287 ) / 1024
43476288 );
43486289
4349
- printk(" per task-struct memory footprint: %lu bytes\n",
4350
- sizeof(struct held_lock) * MAX_LOCK_DEPTH);
6290
+#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
6291
+ printk(" memory used for stack traces: %zu kB\n",
6292
+ (sizeof(stack_trace) + sizeof(stack_trace_hash)) / 1024
6293
+ );
6294
+#endif
6295
+
6296
+ printk(" per task-struct memory footprint: %zu bytes\n",
6297
+ sizeof(((struct task_struct *)NULL)->held_locks));
43516298 }
43526299
43536300 static void
....@@ -4515,9 +6462,7 @@
45156462 pr_warn("\n%srcu_scheduler_active = %d, debug_locks = %d\n",
45166463 !rcu_lockdep_current_cpu_online()
45176464 ? "RCU used illegally from offline CPU!\n"
4518
- : !rcu_is_watching()
4519
- ? "RCU used illegally from idle CPU!\n"
4520
- : "",
6465
+ : "",
45216466 rcu_scheduler_active, debug_locks);
45226467
45236468 /*