| .. | .. |
|---|
| 1 | +/* SPDX-License-Identifier: GPL-2.0-only */ |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) |
|---|
| 3 | | - * |
|---|
| 4 | | - * This program is free software; you can redistribute it and/or modify |
|---|
| 5 | | - * it under the terms of the GNU General Public License version 2 as |
|---|
| 6 | | - * published by the Free Software Foundation. |
|---|
| 7 | 4 | */ |
|---|
| 8 | 5 | |
|---|
| 9 | 6 | #ifndef __ASM_SPINLOCK_H |
|---|
| .. | .. |
|---|
| 21 | 18 | { |
|---|
| 22 | 19 | unsigned int val; |
|---|
| 23 | 20 | |
|---|
| 24 | | - smp_mb(); |
|---|
| 25 | | - |
|---|
| 26 | 21 | __asm__ __volatile__( |
|---|
| 27 | 22 | "1: llock %[val], [%[slock]] \n" |
|---|
| 28 | 23 | " breq %[val], %[LOCKED], 1b \n" /* spin while LOCKED */ |
|---|
| .. | .. |
|---|
| 34 | 29 | [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__) |
|---|
| 35 | 30 | : "memory", "cc"); |
|---|
| 36 | 31 | |
|---|
| 32 | + /* |
|---|
| 33 | + * ACQUIRE barrier to ensure load/store after taking the lock |
|---|
| 34 | + * don't "bleed-up" out of the critical section (leak-in is allowed) |
|---|
| 35 | + * http://www.spinics.net/lists/kernel/msg2010409.html |
|---|
| 36 | + * |
|---|
| 37 | + * ARCv2 only has load-load, store-store and all-all barrier |
|---|
| 38 | + * thus need the full all-all barrier |
|---|
| 39 | + */ |
|---|
| 37 | 40 | smp_mb(); |
|---|
| 38 | 41 | } |
|---|
| 39 | 42 | |
|---|
| .. | .. |
|---|
| 41 | 44 | static inline int arch_spin_trylock(arch_spinlock_t *lock) |
|---|
| 42 | 45 | { |
|---|
| 43 | 46 | unsigned int val, got_it = 0; |
|---|
| 44 | | - |
|---|
| 45 | | - smp_mb(); |
|---|
| 46 | 47 | |
|---|
| 47 | 48 | __asm__ __volatile__( |
|---|
| 48 | 49 | "1: llock %[val], [%[slock]] \n" |
|---|
| .. | .. |
|---|
| 67 | 68 | { |
|---|
| 68 | 69 | smp_mb(); |
|---|
| 69 | 70 | |
|---|
| 70 | | - lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__; |
|---|
| 71 | | - |
|---|
| 72 | | - smp_mb(); |
|---|
| 71 | + WRITE_ONCE(lock->slock, __ARCH_SPIN_LOCK_UNLOCKED__); |
|---|
| 73 | 72 | } |
|---|
| 74 | 73 | |
|---|
| 75 | 74 | /* |
|---|
| .. | .. |
|---|
| 80 | 79 | static inline void arch_read_lock(arch_rwlock_t *rw) |
|---|
| 81 | 80 | { |
|---|
| 82 | 81 | unsigned int val; |
|---|
| 83 | | - |
|---|
| 84 | | - smp_mb(); |
|---|
| 85 | 82 | |
|---|
| 86 | 83 | /* |
|---|
| 87 | 84 | * zero means writer holds the lock exclusively, deny Reader. |
|---|
| .. | .. |
|---|
| 113 | 110 | { |
|---|
| 114 | 111 | unsigned int val, got_it = 0; |
|---|
| 115 | 112 | |
|---|
| 116 | | - smp_mb(); |
|---|
| 117 | | - |
|---|
| 118 | 113 | __asm__ __volatile__( |
|---|
| 119 | 114 | "1: llock %[val], [%[rwlock]] \n" |
|---|
| 120 | 115 | " brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */ |
|---|
| .. | .. |
|---|
| 139 | 134 | static inline void arch_write_lock(arch_rwlock_t *rw) |
|---|
| 140 | 135 | { |
|---|
| 141 | 136 | unsigned int val; |
|---|
| 142 | | - |
|---|
| 143 | | - smp_mb(); |
|---|
| 144 | 137 | |
|---|
| 145 | 138 | /* |
|---|
| 146 | 139 | * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__), |
|---|
| .. | .. |
|---|
| 174 | 167 | static inline int arch_write_trylock(arch_rwlock_t *rw) |
|---|
| 175 | 168 | { |
|---|
| 176 | 169 | unsigned int val, got_it = 0; |
|---|
| 177 | | - |
|---|
| 178 | | - smp_mb(); |
|---|
| 179 | 170 | |
|---|
| 180 | 171 | __asm__ __volatile__( |
|---|
| 181 | 172 | "1: llock %[val], [%[rwlock]] \n" |
|---|
| .. | .. |
|---|
| 217 | 208 | : [val] "=&r" (val) |
|---|
| 218 | 209 | : [rwlock] "r" (&(rw->counter)) |
|---|
| 219 | 210 | : "memory", "cc"); |
|---|
| 220 | | - |
|---|
| 221 | | - smp_mb(); |
|---|
| 222 | 211 | } |
|---|
| 223 | 212 | |
|---|
| 224 | 213 | static inline void arch_write_unlock(arch_rwlock_t *rw) |
|---|
| 225 | 214 | { |
|---|
| 226 | 215 | smp_mb(); |
|---|
| 227 | 216 | |
|---|
| 228 | | - rw->counter = __ARCH_RW_LOCK_UNLOCKED__; |
|---|
| 229 | | - |
|---|
| 230 | | - smp_mb(); |
|---|
| 217 | + WRITE_ONCE(rw->counter, __ARCH_RW_LOCK_UNLOCKED__); |
|---|
| 231 | 218 | } |
|---|
| 232 | 219 | |
|---|
| 233 | 220 | #else /* !CONFIG_ARC_HAS_LLSC */ |
|---|
| .. | .. |
|---|
| 237 | 224 | unsigned int val = __ARCH_SPIN_LOCK_LOCKED__; |
|---|
| 238 | 225 | |
|---|
| 239 | 226 | /* |
|---|
| 240 | | - * This smp_mb() is technically superfluous, we only need the one |
|---|
| 241 | | - * after the lock for providing the ACQUIRE semantics. |
|---|
| 242 | | - * However doing the "right" thing was regressing hackbench |
|---|
| 243 | | - * so keeping this, pending further investigation |
|---|
| 227 | + * Per lkmm, smp_mb() is only required after _lock (and before_unlock) |
|---|
| 228 | + * for ACQ and REL semantics respectively. However EX based spinlocks |
|---|
| 229 | + * need the extra smp_mb to workaround a hardware quirk. |
|---|
| 244 | 230 | */ |
|---|
| 245 | 231 | smp_mb(); |
|---|
| 246 | 232 | |
|---|
| 247 | 233 | __asm__ __volatile__( |
|---|
| 248 | 234 | "1: ex %0, [%1] \n" |
|---|
| 249 | | -#ifdef CONFIG_EZNPS_MTM_EXT |
|---|
| 250 | | - " .word %3 \n" |
|---|
| 251 | | -#endif |
|---|
| 252 | 235 | " breq %0, %2, 1b \n" |
|---|
| 253 | 236 | : "+&r" (val) |
|---|
| 254 | 237 | : "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__) |
|---|
| 255 | | -#ifdef CONFIG_EZNPS_MTM_EXT |
|---|
| 256 | | - , "i"(CTOP_INST_SCHD_RW) |
|---|
| 257 | | -#endif |
|---|
| 258 | 238 | : "memory"); |
|---|
| 259 | 239 | |
|---|
| 260 | | - /* |
|---|
| 261 | | - * ACQUIRE barrier to ensure load/store after taking the lock |
|---|
| 262 | | - * don't "bleed-up" out of the critical section (leak-in is allowed) |
|---|
| 263 | | - * http://www.spinics.net/lists/kernel/msg2010409.html |
|---|
| 264 | | - * |
|---|
| 265 | | - * ARCv2 only has load-load, store-store and all-all barrier |
|---|
| 266 | | - * thus need the full all-all barrier |
|---|
| 267 | | - */ |
|---|
| 268 | 240 | smp_mb(); |
|---|
| 269 | 241 | } |
|---|
| 270 | 242 | |
|---|
| .. | .. |
|---|
| 309 | 281 | : "memory"); |
|---|
| 310 | 282 | |
|---|
| 311 | 283 | /* |
|---|
| 312 | | - * superfluous, but keeping for now - see pairing version in |
|---|
| 313 | | - * arch_spin_lock above |
|---|
| 284 | + * see pairing version/comment in arch_spin_lock above |
|---|
| 314 | 285 | */ |
|---|
| 315 | 286 | smp_mb(); |
|---|
| 316 | 287 | } |
|---|
| .. | .. |
|---|
| 344 | 315 | arch_spin_unlock(&(rw->lock_mutex)); |
|---|
| 345 | 316 | local_irq_restore(flags); |
|---|
| 346 | 317 | |
|---|
| 347 | | - smp_mb(); |
|---|
| 348 | 318 | return ret; |
|---|
| 349 | 319 | } |
|---|
| 350 | 320 | |
|---|