hc
2024-11-01 2f529f9b558ca1c1bd74be7437a84e4711743404
kernel/include/linux/spinlock_types.h
....@@ -43,9 +43,15 @@
4343 .name = #lockname, \
4444 .wait_type_inner = LD_WAIT_CONFIG, \
4545 }
46
+# define HARD_SPIN_DEP_MAP_INIT(lockname) \
47
+ .dep_map = { \
48
+ .name = #lockname, \
49
+ .wait_type_inner = LD_WAIT_INV, \
50
+ }
4651 #else
4752 # define RAW_SPIN_DEP_MAP_INIT(lockname)
4853 # define SPIN_DEP_MAP_INIT(lockname)
54
+# define HARD_SPIN_DEP_MAP_INIT(lockname)
4955 #endif
5056
5157 #ifdef CONFIG_DEBUG_SPINLOCK
....@@ -96,6 +102,154 @@
96102
97103 #define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
98104
105
+#ifdef CONFIG_IRQ_PIPELINE
106
+
107
+void __bad_spinlock_type(void);
108
+
109
+#define __RAWLOCK(x) ((struct raw_spinlock *)(x))
110
+
111
+#define LOCK_ALTERNATIVES(__lock, __base_op, __raw_form, __args...) \
112
+ do { \
113
+ if (__builtin_types_compatible_p(typeof(__lock), \
114
+ raw_spinlock_t *)) \
115
+ __raw_form; \
116
+ else if (__builtin_types_compatible_p(typeof(__lock), \
117
+ hard_spinlock_t *)) \
118
+ hard_ ## __base_op(__RAWLOCK(__lock), ##__args); \
119
+ else if (__builtin_types_compatible_p(typeof(__lock), \
120
+ hybrid_spinlock_t *)) \
121
+ hybrid_ ## __base_op(__RAWLOCK(__lock), ##__args); \
122
+ else \
123
+ __bad_spinlock_type(); \
124
+ } while (0)
125
+
126
+#define LOCK_ALTERNATIVES_RET(__lock, __base_op, __raw_form, __args...) \
127
+ ({ \
128
+ long __ret = 0; \
129
+ if (__builtin_types_compatible_p(typeof(__lock), \
130
+ raw_spinlock_t *)) \
131
+ __ret = __raw_form; \
132
+ else if (__builtin_types_compatible_p(typeof(__lock), \
133
+ hard_spinlock_t *)) \
134
+ __ret = hard_ ## __base_op(__RAWLOCK(__lock), ##__args); \
135
+ else if (__builtin_types_compatible_p(typeof(__lock), \
136
+ hybrid_spinlock_t *)) \
137
+ __ret = hybrid_ ## __base_op(__RAWLOCK(__lock), ##__args); \
138
+ else \
139
+ __bad_spinlock_type(); \
140
+ __ret; \
141
+ })
142
+
143
+#define LOCKDEP_ALT_DEPMAP(__lock) \
144
+ ({ \
145
+ struct lockdep_map *__ret; \
146
+ if (__builtin_types_compatible_p(typeof(&(__lock)->dep_map), \
147
+ struct phony_lockdep_map *)) \
148
+ __ret = &__RAWLOCK(__lock)->dep_map; \
149
+ else \
150
+ __ret = (struct lockdep_map *)(&(__lock)->dep_map); \
151
+ __ret; \
152
+ })
153
+
154
+#define LOCKDEP_HARD_DEBUG(__lock, __nodebug, __debug) \
155
+ do { \
156
+ if (__builtin_types_compatible_p(typeof(__lock), \
157
+ raw_spinlock_t *) || \
158
+ irq_pipeline_debug_locking()) { \
159
+ __debug; \
160
+ } else { \
161
+ __nodebug; \
162
+ } \
163
+ } while (0)
164
+
165
+#define LOCKDEP_HARD_DEBUG_RET(__lock, __nodebug, __debug) \
166
+ ({ \
167
+ typeof(__nodebug) __ret; \
168
+ if (__builtin_types_compatible_p(typeof(__lock), \
169
+ raw_spinlock_t *) || \
170
+ irq_pipeline_debug_locking()) { \
171
+ __ret = (__debug); \
172
+ } else { \
173
+ __ret = (__nodebug); \
174
+ } \
175
+ __ret; \
176
+ })
177
+
178
+#define __HARD_SPIN_LOCK_INITIALIZER(x) { \
179
+ .rlock = { \
180
+ .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
181
+ SPIN_DEBUG_INIT(x) \
182
+ HARD_SPIN_DEP_MAP_INIT(x) \
183
+ } \
184
+ }
185
+
186
+#define __HARD_SPIN_LOCK_UNLOCKED(x) \
187
+ (hard_spinlock_t) __HARD_SPIN_LOCK_INITIALIZER(x)
188
+
189
+#define DEFINE_HARD_SPINLOCK(x) hard_spinlock_t x = __HARD_SPIN_LOCK_UNLOCKED(x)
190
+
191
+#define DECLARE_HARD_SPINLOCK(x) hard_spinlock_t x
192
+
193
+/*
194
+ * The presence of a phony depmap is tested by LOCKDEP_ALT_DEPMAP() to
195
+ * locate the real depmap without enumerating every spinlock type
196
+ * which may contain one.
197
+ */
198
+struct phony_lockdep_map { };
199
+
200
+typedef struct hard_spinlock {
201
+ /* XXX: offset_of(struct hard_spinlock, rlock) == 0 */
202
+ struct raw_spinlock rlock;
203
+ struct phony_lockdep_map dep_map;
204
+} hard_spinlock_t;
205
+
206
+#define DEFINE_MUTABLE_SPINLOCK(x) hybrid_spinlock_t x = { \
207
+ .rlock = __RAW_SPIN_LOCK_UNLOCKED(x), \
208
+ }
209
+
210
+#define DECLARE_MUTABLE_SPINLOCK(x) hybrid_spinlock_t x
211
+
212
+typedef struct hybrid_spinlock {
213
+ /* XXX: offset_of(struct hybrid_spinlock, rlock) == 0 */
214
+ struct raw_spinlock rlock;
215
+ unsigned long hwflags;
216
+ struct phony_lockdep_map dep_map;
217
+} hybrid_spinlock_t;
218
+
219
+#else
220
+
221
+typedef raw_spinlock_t hard_spinlock_t;
222
+
223
+typedef raw_spinlock_t hybrid_spinlock_t;
224
+
225
+#define LOCK_ALTERNATIVES(__lock, __base_op, __raw_form, __args...) \
226
+ __raw_form
227
+
228
+#define LOCK_ALTERNATIVES_RET(__lock, __base_op, __raw_form, __args...) \
229
+ __raw_form
230
+
231
+#define LOCKDEP_ALT_DEPMAP(__lock) (&(__lock)->dep_map)
232
+
233
+#define LOCKDEP_HARD_DEBUG(__lock, __nondebug, __debug) do { __debug; } while (0)
234
+
235
+#define LOCKDEP_HARD_DEBUG_RET(__lock, __nondebug, __debug) ({ __debug; })
236
+
237
+#define DEFINE_HARD_SPINLOCK(x) DEFINE_RAW_SPINLOCK(x)
238
+
239
+#define DECLARE_HARD_SPINLOCK(x) raw_spinlock_t x
240
+
241
+#define DEFINE_MUTABLE_SPINLOCK(x) DEFINE_RAW_SPINLOCK(x)
242
+
243
+#define DECLARE_MUTABLE_SPINLOCK(x) raw_spinlock_t x
244
+
245
+#define __RAWLOCK(x) (x)
246
+
247
+#define __HARD_SPIN_LOCK_UNLOCKED(__lock) __RAW_SPIN_LOCK_UNLOCKED(__lock)
248
+
249
+#define __HARD_SPIN_LOCK_INITIALIZER(__lock) __RAW_SPIN_LOCK_UNLOCKED(__lock)
250
+
251
+#endif /* CONFIG_IRQ_PIPELINE */
252
+
99253 #include <linux/rwlock_types.h>
100254
101255 #endif /* __LINUX_SPINLOCK_TYPES_H */