hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/arch/arc/include/asm/spinlock.h
....@@ -1,9 +1,6 @@
1
+/* SPDX-License-Identifier: GPL-2.0-only */
12 /*
23 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3
- *
4
- * This program is free software; you can redistribute it and/or modify
5
- * it under the terms of the GNU General Public License version 2 as
6
- * published by the Free Software Foundation.
74 */
85
96 #ifndef __ASM_SPINLOCK_H
....@@ -21,8 +18,6 @@
2118 {
2219 unsigned int val;
2320
24
- smp_mb();
25
-
2621 __asm__ __volatile__(
2722 "1: llock %[val], [%[slock]] \n"
2823 " breq %[val], %[LOCKED], 1b \n" /* spin while LOCKED */
....@@ -34,6 +29,14 @@
3429 [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
3530 : "memory", "cc");
3631
32
+ /*
33
+ * ACQUIRE barrier to ensure load/store after taking the lock
34
+ * don't "bleed-up" out of the critical section (leak-in is allowed)
35
+ * http://www.spinics.net/lists/kernel/msg2010409.html
36
+ *
37
+ * ARCv2 only has load-load, store-store and all-all barrier
38
+ * thus need the full all-all barrier
39
+ */
3740 smp_mb();
3841 }
3942
....@@ -41,8 +44,6 @@
4144 static inline int arch_spin_trylock(arch_spinlock_t *lock)
4245 {
4346 unsigned int val, got_it = 0;
44
-
45
- smp_mb();
4647
4748 __asm__ __volatile__(
4849 "1: llock %[val], [%[slock]] \n"
....@@ -67,9 +68,7 @@
6768 {
6869 smp_mb();
6970
70
- lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
71
-
72
- smp_mb();
71
+ WRITE_ONCE(lock->slock, __ARCH_SPIN_LOCK_UNLOCKED__);
7372 }
7473
7574 /*
....@@ -80,8 +79,6 @@
8079 static inline void arch_read_lock(arch_rwlock_t *rw)
8180 {
8281 unsigned int val;
83
-
84
- smp_mb();
8582
8683 /*
8784 * zero means writer holds the lock exclusively, deny Reader.
....@@ -113,8 +110,6 @@
113110 {
114111 unsigned int val, got_it = 0;
115112
116
- smp_mb();
117
-
118113 __asm__ __volatile__(
119114 "1: llock %[val], [%[rwlock]] \n"
120115 " brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */
....@@ -139,8 +134,6 @@
139134 static inline void arch_write_lock(arch_rwlock_t *rw)
140135 {
141136 unsigned int val;
142
-
143
- smp_mb();
144137
145138 /*
146139 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
....@@ -174,8 +167,6 @@
174167 static inline int arch_write_trylock(arch_rwlock_t *rw)
175168 {
176169 unsigned int val, got_it = 0;
177
-
178
- smp_mb();
179170
180171 __asm__ __volatile__(
181172 "1: llock %[val], [%[rwlock]] \n"
....@@ -217,17 +208,13 @@
217208 : [val] "=&r" (val)
218209 : [rwlock] "r" (&(rw->counter))
219210 : "memory", "cc");
220
-
221
- smp_mb();
222211 }
223212
224213 static inline void arch_write_unlock(arch_rwlock_t *rw)
225214 {
226215 smp_mb();
227216
228
- rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
229
-
230
- smp_mb();
217
+ WRITE_ONCE(rw->counter, __ARCH_RW_LOCK_UNLOCKED__);
231218 }
232219
233220 #else /* !CONFIG_ARC_HAS_LLSC */
....@@ -237,34 +224,19 @@
237224 unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
238225
239226 /*
240
- * This smp_mb() is technically superfluous, we only need the one
241
- * after the lock for providing the ACQUIRE semantics.
242
- * However doing the "right" thing was regressing hackbench
243
- * so keeping this, pending further investigation
227
+ * Per lkmm, smp_mb() is only required after _lock (and before_unlock)
228
+ * for ACQ and REL semantics respectively. However EX based spinlocks
229
+ * need the extra smp_mb to workaround a hardware quirk.
244230 */
245231 smp_mb();
246232
247233 __asm__ __volatile__(
248234 "1: ex %0, [%1] \n"
249
-#ifdef CONFIG_EZNPS_MTM_EXT
250
- " .word %3 \n"
251
-#endif
252235 " breq %0, %2, 1b \n"
253236 : "+&r" (val)
254237 : "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
255
-#ifdef CONFIG_EZNPS_MTM_EXT
256
- , "i"(CTOP_INST_SCHD_RW)
257
-#endif
258238 : "memory");
259239
260
- /*
261
- * ACQUIRE barrier to ensure load/store after taking the lock
262
- * don't "bleed-up" out of the critical section (leak-in is allowed)
263
- * http://www.spinics.net/lists/kernel/msg2010409.html
264
- *
265
- * ARCv2 only has load-load, store-store and all-all barrier
266
- * thus need the full all-all barrier
267
- */
268240 smp_mb();
269241 }
270242
....@@ -309,8 +281,7 @@
309281 : "memory");
310282
311283 /*
312
- * superfluous, but keeping for now - see pairing version in
313
- * arch_spin_lock above
284
+ * see pairing version/comment in arch_spin_lock above
314285 */
315286 smp_mb();
316287 }
....@@ -344,7 +315,6 @@
344315 arch_spin_unlock(&(rw->lock_mutex));
345316 local_irq_restore(flags);
346317
347
- smp_mb();
348318 return ret;
349319 }
350320