hc
2023-12-09 b22da3d8526a935aa31e086e63f60ff3246cb61c
kernel/include/linux/spinlock.h
....@@ -56,7 +56,9 @@
5656 #include <linux/kernel.h>
5757 #include <linux/stringify.h>
5858 #include <linux/bottom_half.h>
59
+#include <linux/lockdep.h>
5960 #include <asm/barrier.h>
61
+#include <asm/mmiowb.h>
6062
6163
6264 /*
....@@ -74,7 +76,7 @@
7476 #define LOCK_SECTION_END \
7577 ".previous\n\t"
7678
77
-#define __lockfunc __attribute__((section(".spinlock.text")))
79
+#define __lockfunc __section(".spinlock.text")
7880
7981 /*
8082 * Pull the arch_spinlock_t and arch_rwlock_t definitions:
....@@ -92,12 +94,13 @@
9294
9395 #ifdef CONFIG_DEBUG_SPINLOCK
9496 extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
95
- struct lock_class_key *key);
96
-# define raw_spin_lock_init(lock) \
97
-do { \
98
- static struct lock_class_key __key; \
99
- \
100
- __raw_spin_lock_init((lock), #lock, &__key); \
97
+ struct lock_class_key *key, short inner);
98
+
99
+# define raw_spin_lock_init(lock) \
100
+do { \
101
+ static struct lock_class_key __key; \
102
+ \
103
+ __raw_spin_lock_init((lock), #lock, &__key, LD_WAIT_SPIN); \
101104 } while (0)
102105
103106 #else
....@@ -178,6 +181,7 @@
178181 {
179182 __acquire(lock);
180183 arch_spin_lock(&lock->raw_lock);
184
+ mmiowb_spin_lock();
181185 }
182186
183187 #ifndef arch_spin_lock_flags
....@@ -189,15 +193,22 @@
189193 {
190194 __acquire(lock);
191195 arch_spin_lock_flags(&lock->raw_lock, *flags);
196
+ mmiowb_spin_lock();
192197 }
193198
194199 static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
195200 {
196
- return arch_spin_trylock(&(lock)->raw_lock);
201
+ int ret = arch_spin_trylock(&(lock)->raw_lock);
202
+
203
+ if (ret)
204
+ mmiowb_spin_lock();
205
+
206
+ return ret;
197207 }
198208
199209 static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
200210 {
211
+ mmiowb_spin_unlock();
201212 arch_spin_unlock(&lock->raw_lock);
202213 __release(lock);
203214 }
....@@ -205,7 +216,7 @@
205216
206217 /*
207218 * Define the various spin_lock methods. Note we define these
208
- * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The
219
+ * regardless of whether CONFIG_SMP or CONFIG_PREEMPTION are set. The
209220 * various methods are defined as nops in the case they are not
210221 * required.
211222 */
....@@ -298,7 +309,7 @@
298309 })
299310
300311 /* Include rwlock functions */
301
-#ifdef CONFIG_PREEMPT_RT_FULL
312
+#ifdef CONFIG_PREEMPT_RT
302313 # include <linux/rwlock_rt.h>
303314 #else
304315 # include <linux/rwlock.h>
....@@ -313,9 +324,9 @@
313324 # include <linux/spinlock_api_up.h>
314325 #endif
315326
316
-#ifdef CONFIG_PREEMPT_RT_FULL
327
+#ifdef CONFIG_PREEMPT_RT
317328 # include <linux/spinlock_rt.h>
318
-#else /* PREEMPT_RT_FULL */
329
+#else /* PREEMPT_RT */
319330
320331 /*
321332 * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
....@@ -326,11 +337,25 @@
326337 return &lock->rlock;
327338 }
328339
329
-#define spin_lock_init(_lock) \
330
-do { \
331
- spinlock_check(_lock); \
332
- raw_spin_lock_init(&(_lock)->rlock); \
340
+#ifdef CONFIG_DEBUG_SPINLOCK
341
+
342
+# define spin_lock_init(lock) \
343
+do { \
344
+ static struct lock_class_key __key; \
345
+ \
346
+ __raw_spin_lock_init(spinlock_check(lock), \
347
+ #lock, &__key, LD_WAIT_CONFIG); \
333348 } while (0)
349
+
350
+#else
351
+
352
+# define spin_lock_init(_lock) \
353
+do { \
354
+ spinlock_check(_lock); \
355
+ *(_lock) = __SPIN_LOCK_UNLOCKED(_lock); \
356
+} while (0)
357
+
358
+#endif
334359
335360 static __always_inline void spin_lock(spinlock_t *lock)
336361 {
....@@ -437,7 +462,7 @@
437462
438463 #define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock)
439464
440
-#endif /* !PREEMPT_RT_FULL */
465
+#endif /* !PREEMPT_RT */
441466
442467 /*
443468 * Pull the atomic_t declaration: