hc
2024-10-22 8ac6c7a54ed1b98d142dce24b11c6de6a1e239a5
kernel/kernel/locking/lockdep_internals.h
....@@ -19,8 +19,16 @@
1919 #include "lockdep_states.h"
2020 #undef LOCKDEP_STATE
2121 LOCK_USED,
22
- LOCK_USAGE_STATES
22
+ LOCK_USED_READ,
23
+ LOCK_USAGE_STATES,
2324 };
25
+
26
+/* states after LOCK_USED_READ are not traced and printed */
27
+static_assert(LOCK_TRACE_STATES == LOCK_USAGE_STATES);
28
+
29
+#define LOCK_USAGE_READ_MASK 1
30
+#define LOCK_USAGE_DIR_MASK 2
31
+#define LOCK_USAGE_STATE_MASK (~(LOCK_USAGE_READ_MASK | LOCK_USAGE_DIR_MASK))
2432
2533 /*
2634 * Usage-state bitmasks:
....@@ -36,15 +44,38 @@
3644 #include "lockdep_states.h"
3745 #undef LOCKDEP_STATE
3846 __LOCKF(USED)
47
+ __LOCKF(USED_READ)
3948 };
4049
41
-#define LOCKF_ENABLED_IRQ (LOCKF_ENABLED_HARDIRQ | LOCKF_ENABLED_SOFTIRQ)
42
-#define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ)
50
+#define LOCKDEP_STATE(__STATE) LOCKF_ENABLED_##__STATE |
51
+static const unsigned long LOCKF_ENABLED_IRQ =
52
+#include "lockdep_states.h"
53
+ 0;
54
+#undef LOCKDEP_STATE
4355
44
-#define LOCKF_ENABLED_IRQ_READ \
45
- (LOCKF_ENABLED_HARDIRQ_READ | LOCKF_ENABLED_SOFTIRQ_READ)
46
-#define LOCKF_USED_IN_IRQ_READ \
47
- (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
56
+#define LOCKDEP_STATE(__STATE) LOCKF_USED_IN_##__STATE |
57
+static const unsigned long LOCKF_USED_IN_IRQ =
58
+#include "lockdep_states.h"
59
+ 0;
60
+#undef LOCKDEP_STATE
61
+
62
+#define LOCKDEP_STATE(__STATE) LOCKF_ENABLED_##__STATE##_READ |
63
+static const unsigned long LOCKF_ENABLED_IRQ_READ =
64
+#include "lockdep_states.h"
65
+ 0;
66
+#undef LOCKDEP_STATE
67
+
68
+#define LOCKDEP_STATE(__STATE) LOCKF_USED_IN_##__STATE##_READ |
69
+static const unsigned long LOCKF_USED_IN_IRQ_READ =
70
+#include "lockdep_states.h"
71
+ 0;
72
+#undef LOCKDEP_STATE
73
+
74
+#define LOCKF_ENABLED_IRQ_ALL (LOCKF_ENABLED_IRQ | LOCKF_ENABLED_IRQ_READ)
75
+#define LOCKF_USED_IN_IRQ_ALL (LOCKF_USED_IN_IRQ | LOCKF_USED_IN_IRQ_READ)
76
+
77
+#define LOCKF_IRQ (LOCKF_ENABLED_IRQ | LOCKF_USED_IN_IRQ)
78
+#define LOCKF_IRQ_READ (LOCKF_ENABLED_IRQ_READ | LOCKF_USED_IN_IRQ_READ)
4879
4980 /*
5081 * CONFIG_LOCKDEP_SMALL is defined for sparc. Sparc requires .text,
....@@ -66,51 +97,71 @@
6697 #define MAX_LOCKDEP_ENTRIES 16384UL
6798 #define MAX_LOCKDEP_CHAINS_BITS 15
6899 #define MAX_STACK_TRACE_ENTRIES 262144UL
100
+#define STACK_TRACE_HASH_SIZE 8192
69101 #else
70
-#define MAX_LOCKDEP_ENTRIES 32768UL
102
+#define MAX_LOCKDEP_ENTRIES (1UL << CONFIG_LOCKDEP_BITS)
71103
72
-#define MAX_LOCKDEP_CHAINS_BITS 16
104
+#define MAX_LOCKDEP_CHAINS_BITS CONFIG_LOCKDEP_CHAINS_BITS
73105
74106 /*
75107 * Stack-trace: tightly packed array of stack backtrace
76108 * addresses. Protected by the hash_lock.
77109 */
78
-#define MAX_STACK_TRACE_ENTRIES 524288UL
110
+#define MAX_STACK_TRACE_ENTRIES (1UL << CONFIG_LOCKDEP_STACK_TRACE_BITS)
111
+#define STACK_TRACE_HASH_SIZE (1 << CONFIG_LOCKDEP_STACK_TRACE_HASH_BITS)
79112 #endif
113
+
114
+/*
115
+ * Bit definitions for lock_chain.irq_context
116
+ */
117
+#define LOCK_CHAIN_SOFTIRQ_CONTEXT (1 << 0)
118
+#define LOCK_CHAIN_HARDIRQ_CONTEXT (1 << 1)
80119
81120 #define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS)
82121
83122 #define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
84123
85
-extern struct list_head all_lock_classes;
86124 extern struct lock_chain lock_chains[];
87125
88
-#define LOCK_USAGE_CHARS (1+LOCK_USAGE_STATES/2)
126
+#define LOCK_USAGE_CHARS (2*XXX_LOCK_USAGE_STATES + 1)
89127
90128 extern void get_usage_chars(struct lock_class *class,
91129 char usage[LOCK_USAGE_CHARS]);
92130
93
-extern const char * __get_key_name(struct lockdep_subclass_key *key, char *str);
131
+extern const char *__get_key_name(const struct lockdep_subclass_key *key,
132
+ char *str);
94133
95134 struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i);
96135
97136 extern unsigned long nr_lock_classes;
137
+extern unsigned long nr_zapped_classes;
138
+extern unsigned long nr_zapped_lock_chains;
98139 extern unsigned long nr_list_entries;
99
-extern unsigned long nr_lock_chains;
100
-extern int nr_chain_hlocks;
140
+long lockdep_next_lockchain(long i);
141
+unsigned long lock_chain_count(void);
101142 extern unsigned long nr_stack_trace_entries;
102143
103144 extern unsigned int nr_hardirq_chains;
104145 extern unsigned int nr_softirq_chains;
105146 extern unsigned int nr_process_chains;
106
-extern unsigned int max_lockdep_depth;
107
-extern unsigned int max_recursion_depth;
147
+extern unsigned int nr_free_chain_hlocks;
148
+extern unsigned int nr_lost_chain_hlocks;
149
+extern unsigned int nr_large_chain_blocks;
108150
151
+extern unsigned int max_lockdep_depth;
109152 extern unsigned int max_bfs_queue_depth;
153
+extern unsigned long max_lock_class_idx;
154
+
155
+extern struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
156
+extern unsigned long lock_classes_in_use[];
110157
111158 #ifdef CONFIG_PROVE_LOCKING
112159 extern unsigned long lockdep_count_forward_deps(struct lock_class *);
113160 extern unsigned long lockdep_count_backward_deps(struct lock_class *);
161
+#ifdef CONFIG_TRACE_IRQFLAGS
162
+u64 lockdep_stack_trace_count(void);
163
+u64 lockdep_stack_hash_count(void);
164
+#endif
114165 #else
115166 static inline unsigned long
116167 lockdep_count_forward_deps(struct lock_class *class)
....@@ -133,25 +184,27 @@
133184 * and we want to avoid too much cache bouncing.
134185 */
135186 struct lockdep_stats {
136
- int chain_lookup_hits;
137
- int chain_lookup_misses;
138
- int hardirqs_on_events;
139
- int hardirqs_off_events;
140
- int redundant_hardirqs_on;
141
- int redundant_hardirqs_off;
142
- int softirqs_on_events;
143
- int softirqs_off_events;
144
- int redundant_softirqs_on;
145
- int redundant_softirqs_off;
146
- int nr_unused_locks;
147
- int nr_redundant_checks;
148
- int nr_redundant;
149
- int nr_cyclic_checks;
150
- int nr_cyclic_check_recursions;
151
- int nr_find_usage_forwards_checks;
152
- int nr_find_usage_forwards_recursions;
153
- int nr_find_usage_backwards_checks;
154
- int nr_find_usage_backwards_recursions;
187
+ unsigned long chain_lookup_hits;
188
+ unsigned int chain_lookup_misses;
189
+ unsigned long hardirqs_on_events;
190
+ unsigned long hardirqs_off_events;
191
+ unsigned long redundant_hardirqs_on;
192
+ unsigned long redundant_hardirqs_off;
193
+ unsigned long softirqs_on_events;
194
+ unsigned long softirqs_off_events;
195
+ unsigned long redundant_softirqs_on;
196
+ unsigned long redundant_softirqs_off;
197
+ int nr_unused_locks;
198
+ unsigned int nr_redundant_checks;
199
+ unsigned int nr_redundant;
200
+ unsigned int nr_cyclic_checks;
201
+ unsigned int nr_find_usage_forwards_checks;
202
+ unsigned int nr_find_usage_backwards_checks;
203
+
204
+ /*
205
+ * Per lock class locking operation stat counts
206
+ */
207
+ unsigned long lock_class_ops[MAX_LOCKDEP_KEYS];
155208 };
156209
157210 DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats);
....@@ -179,9 +232,30 @@
179232 } \
180233 __total; \
181234 })
235
+
236
+static inline void debug_class_ops_inc(struct lock_class *class)
237
+{
238
+ int idx;
239
+
240
+ idx = class - lock_classes;
241
+ __debug_atomic_inc(lock_class_ops[idx]);
242
+}
243
+
244
+static inline unsigned long debug_class_ops_read(struct lock_class *class)
245
+{
246
+ int idx, cpu;
247
+ unsigned long ops = 0;
248
+
249
+ idx = class - lock_classes;
250
+ for_each_possible_cpu(cpu)
251
+ ops += per_cpu(lockdep_stats.lock_class_ops[idx], cpu);
252
+ return ops;
253
+}
254
+
182255 #else
183256 # define __debug_atomic_inc(ptr) do { } while (0)
184257 # define debug_atomic_inc(ptr) do { } while (0)
185258 # define debug_atomic_dec(ptr) do { } while (0)
186259 # define debug_atomic_read(ptr) 0
260
+# define debug_class_ops_inc(ptr) do { } while (0)
187261 #endif