hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/include/linux/spinlock.h
....@@ -56,7 +56,9 @@
5656 #include <linux/kernel.h>
5757 #include <linux/stringify.h>
5858 #include <linux/bottom_half.h>
59
+#include <linux/lockdep.h>
5960 #include <asm/barrier.h>
61
+#include <asm/mmiowb.h>
6062
6163
6264 /*
....@@ -74,7 +76,7 @@
7476 #define LOCK_SECTION_END \
7577 ".previous\n\t"
7678
77
-#define __lockfunc __attribute__((section(".spinlock.text")))
79
+#define __lockfunc __section(".spinlock.text")
7880
7981 /*
8082 * Pull the arch_spinlock_t and arch_rwlock_t definitions:
....@@ -92,12 +94,13 @@
9294
9395 #ifdef CONFIG_DEBUG_SPINLOCK
9496 extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
95
- struct lock_class_key *key);
96
-# define raw_spin_lock_init(lock) \
97
-do { \
98
- static struct lock_class_key __key; \
99
- \
100
- __raw_spin_lock_init((lock), #lock, &__key); \
97
+ struct lock_class_key *key, short inner);
98
+
99
+# define raw_spin_lock_init(lock) \
100
+do { \
101
+ static struct lock_class_key __key; \
102
+ \
103
+ __raw_spin_lock_init((lock), #lock, &__key, LD_WAIT_SPIN); \
101104 } while (0)
102105
103106 #else
....@@ -178,6 +181,7 @@
178181 {
179182 __acquire(lock);
180183 arch_spin_lock(&lock->raw_lock);
184
+ mmiowb_spin_lock();
181185 }
182186
183187 #ifndef arch_spin_lock_flags
....@@ -189,15 +193,22 @@
189193 {
190194 __acquire(lock);
191195 arch_spin_lock_flags(&lock->raw_lock, *flags);
196
+ mmiowb_spin_lock();
192197 }
193198
194199 static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
195200 {
196
- return arch_spin_trylock(&(lock)->raw_lock);
201
+ int ret = arch_spin_trylock(&(lock)->raw_lock);
202
+
203
+ if (ret)
204
+ mmiowb_spin_lock();
205
+
206
+ return ret;
197207 }
198208
199209 static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
200210 {
211
+ mmiowb_spin_unlock();
201212 arch_spin_unlock(&lock->raw_lock);
202213 __release(lock);
203214 }
....@@ -205,7 +216,7 @@
205216
206217 /*
207218 * Define the various spin_lock methods. Note we define these
208
- * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The
219
+ * regardless of whether CONFIG_SMP or CONFIG_PREEMPTION are set. The
209220 * various methods are defined as nops in the case they are not
210221 * required.
211222 */
....@@ -318,12 +329,26 @@
318329 return &lock->rlock;
319330 }
320331
321
-#define spin_lock_init(_lock) \
322
-do { \
323
- spinlock_check(_lock); \
324
- raw_spin_lock_init(&(_lock)->rlock); \
332
+#ifdef CONFIG_DEBUG_SPINLOCK
333
+
334
+# define spin_lock_init(lock) \
335
+do { \
336
+ static struct lock_class_key __key; \
337
+ \
338
+ __raw_spin_lock_init(spinlock_check(lock), \
339
+ #lock, &__key, LD_WAIT_CONFIG); \
325340 } while (0)
326341
342
+#else
343
+
344
+# define spin_lock_init(_lock) \
345
+do { \
346
+ spinlock_check(_lock); \
347
+ *(_lock) = __SPIN_LOCK_UNLOCKED(_lock); \
348
+} while (0)
349
+
350
+#endif
351
+
327352 static __always_inline void spin_lock(spinlock_t *lock)
328353 {
329354 raw_spin_lock(&lock->rlock);