| .. | .. |
|---|
| 19 | 19 | #include "lockdep_states.h" |
|---|
| 20 | 20 | #undef LOCKDEP_STATE |
|---|
| 21 | 21 | LOCK_USED, |
|---|
| 22 | | - LOCK_USAGE_STATES |
|---|
| 22 | + LOCK_USED_READ, |
|---|
| 23 | + LOCK_USAGE_STATES, |
|---|
| 23 | 24 | }; |
|---|
| 25 | + |
|---|
| 26 | +/* states after LOCK_USED_READ are not traced and printed */ |
|---|
| 27 | +static_assert(LOCK_TRACE_STATES == LOCK_USAGE_STATES); |
|---|
| 28 | + |
|---|
| 29 | +#define LOCK_USAGE_READ_MASK 1 |
|---|
| 30 | +#define LOCK_USAGE_DIR_MASK 2 |
|---|
| 31 | +#define LOCK_USAGE_STATE_MASK (~(LOCK_USAGE_READ_MASK | LOCK_USAGE_DIR_MASK)) |
|---|
| 24 | 32 | |
|---|
| 25 | 33 | /* |
|---|
| 26 | 34 | * Usage-state bitmasks: |
|---|
| .. | .. |
|---|
| 36 | 44 | #include "lockdep_states.h" |
|---|
| 37 | 45 | #undef LOCKDEP_STATE |
|---|
| 38 | 46 | __LOCKF(USED) |
|---|
| 47 | + __LOCKF(USED_READ) |
|---|
| 39 | 48 | }; |
|---|
| 40 | 49 | |
|---|
| 41 | | -#define LOCKF_ENABLED_IRQ (LOCKF_ENABLED_HARDIRQ | LOCKF_ENABLED_SOFTIRQ) |
|---|
| 42 | | -#define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ) |
|---|
| 50 | +#define LOCKDEP_STATE(__STATE) LOCKF_ENABLED_##__STATE | |
|---|
| 51 | +static const unsigned long LOCKF_ENABLED_IRQ = |
|---|
| 52 | +#include "lockdep_states.h" |
|---|
| 53 | + 0; |
|---|
| 54 | +#undef LOCKDEP_STATE |
|---|
| 43 | 55 | |
|---|
| 44 | | -#define LOCKF_ENABLED_IRQ_READ \ |
|---|
| 45 | | - (LOCKF_ENABLED_HARDIRQ_READ | LOCKF_ENABLED_SOFTIRQ_READ) |
|---|
| 46 | | -#define LOCKF_USED_IN_IRQ_READ \ |
|---|
| 47 | | - (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ) |
|---|
| 56 | +#define LOCKDEP_STATE(__STATE) LOCKF_USED_IN_##__STATE | |
|---|
| 57 | +static const unsigned long LOCKF_USED_IN_IRQ = |
|---|
| 58 | +#include "lockdep_states.h" |
|---|
| 59 | + 0; |
|---|
| 60 | +#undef LOCKDEP_STATE |
|---|
| 61 | + |
|---|
| 62 | +#define LOCKDEP_STATE(__STATE) LOCKF_ENABLED_##__STATE##_READ | |
|---|
| 63 | +static const unsigned long LOCKF_ENABLED_IRQ_READ = |
|---|
| 64 | +#include "lockdep_states.h" |
|---|
| 65 | + 0; |
|---|
| 66 | +#undef LOCKDEP_STATE |
|---|
| 67 | + |
|---|
| 68 | +#define LOCKDEP_STATE(__STATE) LOCKF_USED_IN_##__STATE##_READ | |
|---|
| 69 | +static const unsigned long LOCKF_USED_IN_IRQ_READ = |
|---|
| 70 | +#include "lockdep_states.h" |
|---|
| 71 | + 0; |
|---|
| 72 | +#undef LOCKDEP_STATE |
|---|
| 73 | + |
|---|
| 74 | +#define LOCKF_ENABLED_IRQ_ALL (LOCKF_ENABLED_IRQ | LOCKF_ENABLED_IRQ_READ) |
|---|
| 75 | +#define LOCKF_USED_IN_IRQ_ALL (LOCKF_USED_IN_IRQ | LOCKF_USED_IN_IRQ_READ) |
|---|
| 76 | + |
|---|
| 77 | +#define LOCKF_IRQ (LOCKF_ENABLED_IRQ | LOCKF_USED_IN_IRQ) |
|---|
| 78 | +#define LOCKF_IRQ_READ (LOCKF_ENABLED_IRQ_READ | LOCKF_USED_IN_IRQ_READ) |
|---|
| 48 | 79 | |
|---|
| 49 | 80 | /* |
|---|
| 50 | 81 | * CONFIG_LOCKDEP_SMALL is defined for sparc. Sparc requires .text, |
|---|
| .. | .. |
|---|
| 66 | 97 | #define MAX_LOCKDEP_ENTRIES 16384UL |
|---|
| 67 | 98 | #define MAX_LOCKDEP_CHAINS_BITS 15 |
|---|
| 68 | 99 | #define MAX_STACK_TRACE_ENTRIES 262144UL |
|---|
| 100 | +#define STACK_TRACE_HASH_SIZE 8192 |
|---|
| 69 | 101 | #else |
|---|
| 70 | | -#define MAX_LOCKDEP_ENTRIES 32768UL |
|---|
| 102 | +#define MAX_LOCKDEP_ENTRIES (1UL << CONFIG_LOCKDEP_BITS) |
|---|
| 71 | 103 | |
|---|
| 72 | | -#define MAX_LOCKDEP_CHAINS_BITS 16 |
|---|
| 104 | +#define MAX_LOCKDEP_CHAINS_BITS CONFIG_LOCKDEP_CHAINS_BITS |
|---|
| 73 | 105 | |
|---|
| 74 | 106 | /* |
|---|
| 75 | 107 | * Stack-trace: tightly packed array of stack backtrace |
|---|
| 76 | 108 | * addresses. Protected by the hash_lock. |
|---|
| 77 | 109 | */ |
|---|
| 78 | | -#define MAX_STACK_TRACE_ENTRIES 524288UL |
|---|
| 110 | +#define MAX_STACK_TRACE_ENTRIES (1UL << CONFIG_LOCKDEP_STACK_TRACE_BITS) |
|---|
| 111 | +#define STACK_TRACE_HASH_SIZE (1 << CONFIG_LOCKDEP_STACK_TRACE_HASH_BITS) |
|---|
| 79 | 112 | #endif |
|---|
| 113 | + |
|---|
| 114 | +/* |
|---|
| 115 | + * Bit definitions for lock_chain.irq_context |
|---|
| 116 | + */ |
|---|
| 117 | +#define LOCK_CHAIN_SOFTIRQ_CONTEXT (1 << 0) |
|---|
| 118 | +#define LOCK_CHAIN_HARDIRQ_CONTEXT (1 << 1) |
|---|
| 80 | 119 | |
|---|
| 81 | 120 | #define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS) |
|---|
| 82 | 121 | |
|---|
| 83 | 122 | #define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5) |
|---|
| 84 | 123 | |
|---|
| 85 | | -extern struct list_head all_lock_classes; |
|---|
| 86 | 124 | extern struct lock_chain lock_chains[]; |
|---|
| 87 | 125 | |
|---|
| 88 | | -#define LOCK_USAGE_CHARS (1+LOCK_USAGE_STATES/2) |
|---|
| 126 | +#define LOCK_USAGE_CHARS (2*XXX_LOCK_USAGE_STATES + 1) |
|---|
| 89 | 127 | |
|---|
| 90 | 128 | extern void get_usage_chars(struct lock_class *class, |
|---|
| 91 | 129 | char usage[LOCK_USAGE_CHARS]); |
|---|
| 92 | 130 | |
|---|
| 93 | | -extern const char * __get_key_name(struct lockdep_subclass_key *key, char *str); |
|---|
| 131 | +extern const char *__get_key_name(const struct lockdep_subclass_key *key, |
|---|
| 132 | + char *str); |
|---|
| 94 | 133 | |
|---|
| 95 | 134 | struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i); |
|---|
| 96 | 135 | |
|---|
| 97 | 136 | extern unsigned long nr_lock_classes; |
|---|
| 137 | +extern unsigned long nr_zapped_classes; |
|---|
| 138 | +extern unsigned long nr_zapped_lock_chains; |
|---|
| 98 | 139 | extern unsigned long nr_list_entries; |
|---|
| 99 | | -extern unsigned long nr_lock_chains; |
|---|
| 100 | | -extern int nr_chain_hlocks; |
|---|
| 140 | +long lockdep_next_lockchain(long i); |
|---|
| 141 | +unsigned long lock_chain_count(void); |
|---|
| 101 | 142 | extern unsigned long nr_stack_trace_entries; |
|---|
| 102 | 143 | |
|---|
| 103 | 144 | extern unsigned int nr_hardirq_chains; |
|---|
| 104 | 145 | extern unsigned int nr_softirq_chains; |
|---|
| 105 | 146 | extern unsigned int nr_process_chains; |
|---|
| 106 | | -extern unsigned int max_lockdep_depth; |
|---|
| 107 | | -extern unsigned int max_recursion_depth; |
|---|
| 147 | +extern unsigned int nr_free_chain_hlocks; |
|---|
| 148 | +extern unsigned int nr_lost_chain_hlocks; |
|---|
| 149 | +extern unsigned int nr_large_chain_blocks; |
|---|
| 108 | 150 | |
|---|
| 151 | +extern unsigned int max_lockdep_depth; |
|---|
| 109 | 152 | extern unsigned int max_bfs_queue_depth; |
|---|
| 153 | +extern unsigned long max_lock_class_idx; |
|---|
| 154 | + |
|---|
| 155 | +extern struct lock_class lock_classes[MAX_LOCKDEP_KEYS]; |
|---|
| 156 | +extern unsigned long lock_classes_in_use[]; |
|---|
| 110 | 157 | |
|---|
| 111 | 158 | #ifdef CONFIG_PROVE_LOCKING |
|---|
| 112 | 159 | extern unsigned long lockdep_count_forward_deps(struct lock_class *); |
|---|
| 113 | 160 | extern unsigned long lockdep_count_backward_deps(struct lock_class *); |
|---|
| 161 | +#ifdef CONFIG_TRACE_IRQFLAGS |
|---|
| 162 | +u64 lockdep_stack_trace_count(void); |
|---|
| 163 | +u64 lockdep_stack_hash_count(void); |
|---|
| 164 | +#endif |
|---|
| 114 | 165 | #else |
|---|
| 115 | 166 | static inline unsigned long |
|---|
| 116 | 167 | lockdep_count_forward_deps(struct lock_class *class) |
|---|
| .. | .. |
|---|
| 133 | 184 | * and we want to avoid too much cache bouncing. |
|---|
| 134 | 185 | */ |
|---|
| 135 | 186 | struct lockdep_stats { |
|---|
| 136 | | - int chain_lookup_hits; |
|---|
| 137 | | - int chain_lookup_misses; |
|---|
| 138 | | - int hardirqs_on_events; |
|---|
| 139 | | - int hardirqs_off_events; |
|---|
| 140 | | - int redundant_hardirqs_on; |
|---|
| 141 | | - int redundant_hardirqs_off; |
|---|
| 142 | | - int softirqs_on_events; |
|---|
| 143 | | - int softirqs_off_events; |
|---|
| 144 | | - int redundant_softirqs_on; |
|---|
| 145 | | - int redundant_softirqs_off; |
|---|
| 146 | | - int nr_unused_locks; |
|---|
| 147 | | - int nr_redundant_checks; |
|---|
| 148 | | - int nr_redundant; |
|---|
| 149 | | - int nr_cyclic_checks; |
|---|
| 150 | | - int nr_cyclic_check_recursions; |
|---|
| 151 | | - int nr_find_usage_forwards_checks; |
|---|
| 152 | | - int nr_find_usage_forwards_recursions; |
|---|
| 153 | | - int nr_find_usage_backwards_checks; |
|---|
| 154 | | - int nr_find_usage_backwards_recursions; |
|---|
| 187 | + unsigned long chain_lookup_hits; |
|---|
| 188 | + unsigned int chain_lookup_misses; |
|---|
| 189 | + unsigned long hardirqs_on_events; |
|---|
| 190 | + unsigned long hardirqs_off_events; |
|---|
| 191 | + unsigned long redundant_hardirqs_on; |
|---|
| 192 | + unsigned long redundant_hardirqs_off; |
|---|
| 193 | + unsigned long softirqs_on_events; |
|---|
| 194 | + unsigned long softirqs_off_events; |
|---|
| 195 | + unsigned long redundant_softirqs_on; |
|---|
| 196 | + unsigned long redundant_softirqs_off; |
|---|
| 197 | + int nr_unused_locks; |
|---|
| 198 | + unsigned int nr_redundant_checks; |
|---|
| 199 | + unsigned int nr_redundant; |
|---|
| 200 | + unsigned int nr_cyclic_checks; |
|---|
| 201 | + unsigned int nr_find_usage_forwards_checks; |
|---|
| 202 | + unsigned int nr_find_usage_backwards_checks; |
|---|
| 203 | + |
|---|
| 204 | + /* |
|---|
| 205 | + * Per lock class locking operation stat counts |
|---|
| 206 | + */ |
|---|
| 207 | + unsigned long lock_class_ops[MAX_LOCKDEP_KEYS]; |
|---|
| 155 | 208 | }; |
|---|
| 156 | 209 | |
|---|
| 157 | 210 | DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats); |
|---|
| .. | .. |
|---|
| 179 | 232 | } \ |
|---|
| 180 | 233 | __total; \ |
|---|
| 181 | 234 | }) |
|---|
| 235 | + |
|---|
| 236 | +static inline void debug_class_ops_inc(struct lock_class *class) |
|---|
| 237 | +{ |
|---|
| 238 | + int idx; |
|---|
| 239 | + |
|---|
| 240 | + idx = class - lock_classes; |
|---|
| 241 | + __debug_atomic_inc(lock_class_ops[idx]); |
|---|
| 242 | +} |
|---|
| 243 | + |
|---|
| 244 | +static inline unsigned long debug_class_ops_read(struct lock_class *class) |
|---|
| 245 | +{ |
|---|
| 246 | + int idx, cpu; |
|---|
| 247 | + unsigned long ops = 0; |
|---|
| 248 | + |
|---|
| 249 | + idx = class - lock_classes; |
|---|
| 250 | + for_each_possible_cpu(cpu) |
|---|
| 251 | + ops += per_cpu(lockdep_stats.lock_class_ops[idx], cpu); |
|---|
| 252 | + return ops; |
|---|
| 253 | +} |
|---|
| 254 | + |
|---|
| 182 | 255 | #else |
|---|
| 183 | 256 | # define __debug_atomic_inc(ptr) do { } while (0) |
|---|
| 184 | 257 | # define debug_atomic_inc(ptr) do { } while (0) |
|---|
| 185 | 258 | # define debug_atomic_dec(ptr) do { } while (0) |
|---|
| 186 | 259 | # define debug_atomic_read(ptr) 0 |
|---|
| 260 | +# define debug_class_ops_inc(ptr) do { } while (0) |
|---|
| 187 | 261 | #endif |
|---|