.. | .. |
---|
| 1 | +/* SPDX-License-Identifier: GPL-2.0-or-later */ |
---|
1 | 2 | #ifndef __ASM_SPINLOCK_H |
---|
2 | 3 | #define __ASM_SPINLOCK_H |
---|
3 | 4 | #ifdef __KERNEL__ |
---|
4 | 5 | |
---|
5 | | -/* |
---|
6 | | - * Simple spin lock operations. |
---|
7 | | - * |
---|
8 | | - * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM |
---|
9 | | - * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM |
---|
10 | | - * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM |
---|
11 | | - * Rework to support virtual processors |
---|
12 | | - * |
---|
13 | | - * Type of int is used as a full 64b word is not necessary. |
---|
14 | | - * |
---|
15 | | - * This program is free software; you can redistribute it and/or |
---|
16 | | - * modify it under the terms of the GNU General Public License |
---|
17 | | - * as published by the Free Software Foundation; either version |
---|
18 | | - * 2 of the License, or (at your option) any later version. |
---|
19 | | - * |
---|
20 | | - * (the type definitions are in asm/spinlock_types.h) |
---|
21 | | - */ |
---|
22 | | -#include <linux/jump_label.h> |
---|
23 | | -#include <linux/irqflags.h> |
---|
24 | | -#ifdef CONFIG_PPC64 |
---|
25 | | -#include <asm/paca.h> |
---|
26 | | -#include <asm/hvcall.h> |
---|
27 | | -#endif |
---|
28 | | -#include <asm/synch.h> |
---|
29 | | -#include <asm/ppc-opcode.h> |
---|
30 | | -#include <asm/asm-405.h> |
---|
31 | | - |
---|
32 | | -#ifdef CONFIG_PPC64 |
---|
33 | | -/* use 0x800000yy when locked, where yy == CPU number */ |
---|
34 | | -#ifdef __BIG_ENDIAN__ |
---|
35 | | -#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token)) |
---|
| 6 | +#ifdef CONFIG_PPC_QUEUED_SPINLOCKS |
---|
| 7 | +#include <asm/qspinlock.h> |
---|
| 8 | +#include <asm/qrwlock.h> |
---|
36 | 9 | #else |
---|
37 | | -#define LOCK_TOKEN (*(u32 *)(&get_paca()->paca_index)) |
---|
38 | | -#endif |
---|
39 | | -#else |
---|
40 | | -#define LOCK_TOKEN 1 |
---|
| 10 | +#include <asm/simple_spinlock.h> |
---|
41 | 11 | #endif |
---|
42 | 12 | |
---|
43 | | -#if defined(CONFIG_PPC64) && defined(CONFIG_SMP) |
---|
44 | | -#define CLEAR_IO_SYNC (get_paca()->io_sync = 0) |
---|
45 | | -#define SYNC_IO do { \ |
---|
46 | | - if (unlikely(get_paca()->io_sync)) { \ |
---|
47 | | - mb(); \ |
---|
48 | | - get_paca()->io_sync = 0; \ |
---|
49 | | - } \ |
---|
50 | | - } while (0) |
---|
51 | | -#else |
---|
52 | | -#define CLEAR_IO_SYNC |
---|
53 | | -#define SYNC_IO |
---|
| 13 | +#ifndef CONFIG_PARAVIRT_SPINLOCKS |
---|
| 14 | +static inline void pv_spinlocks_init(void) { } |
---|
54 | 15 | #endif |
---|
55 | | - |
---|
56 | | -#ifdef CONFIG_PPC_PSERIES |
---|
57 | | -DECLARE_STATIC_KEY_FALSE(shared_processor); |
---|
58 | | - |
---|
59 | | -#define vcpu_is_preempted vcpu_is_preempted |
---|
60 | | -static inline bool vcpu_is_preempted(int cpu) |
---|
61 | | -{ |
---|
62 | | - if (!static_branch_unlikely(&shared_processor)) |
---|
63 | | - return false; |
---|
64 | | - return !!(be32_to_cpu(lppaca_of(cpu).yield_count) & 1); |
---|
65 | | -} |
---|
66 | | -#endif |
---|
67 | | - |
---|
68 | | -static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) |
---|
69 | | -{ |
---|
70 | | - return lock.slock == 0; |
---|
71 | | -} |
---|
72 | | - |
---|
73 | | -static inline int arch_spin_is_locked(arch_spinlock_t *lock) |
---|
74 | | -{ |
---|
75 | | - smp_mb(); |
---|
76 | | - return !arch_spin_value_unlocked(*lock); |
---|
77 | | -} |
---|
78 | | - |
---|
79 | | -/* |
---|
80 | | - * This returns the old value in the lock, so we succeeded |
---|
81 | | - * in getting the lock if the return value is 0. |
---|
82 | | - */ |
---|
83 | | -static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock) |
---|
84 | | -{ |
---|
85 | | - unsigned long tmp, token; |
---|
86 | | - |
---|
87 | | - token = LOCK_TOKEN; |
---|
88 | | - __asm__ __volatile__( |
---|
89 | | -"1: " PPC_LWARX(%0,0,%2,1) "\n\ |
---|
90 | | - cmpwi 0,%0,0\n\ |
---|
91 | | - bne- 2f\n\ |
---|
92 | | - stwcx. %1,0,%2\n\ |
---|
93 | | - bne- 1b\n" |
---|
94 | | - PPC_ACQUIRE_BARRIER |
---|
95 | | -"2:" |
---|
96 | | - : "=&r" (tmp) |
---|
97 | | - : "r" (token), "r" (&lock->slock) |
---|
98 | | - : "cr0", "memory"); |
---|
99 | | - |
---|
100 | | - return tmp; |
---|
101 | | -} |
---|
102 | | - |
---|
103 | | -static inline int arch_spin_trylock(arch_spinlock_t *lock) |
---|
104 | | -{ |
---|
105 | | - CLEAR_IO_SYNC; |
---|
106 | | - return __arch_spin_trylock(lock) == 0; |
---|
107 | | -} |
---|
108 | | - |
---|
109 | | -/* |
---|
110 | | - * On a system with shared processors (that is, where a physical |
---|
111 | | - * processor is multiplexed between several virtual processors), |
---|
112 | | - * there is no point spinning on a lock if the holder of the lock |
---|
113 | | - * isn't currently scheduled on a physical processor. Instead |
---|
114 | | - * we detect this situation and ask the hypervisor to give the |
---|
115 | | - * rest of our timeslice to the lock holder. |
---|
116 | | - * |
---|
117 | | - * So that we can tell which virtual processor is holding a lock, |
---|
118 | | - * we put 0x80000000 | smp_processor_id() in the lock when it is |
---|
119 | | - * held. Conveniently, we have a word in the paca that holds this |
---|
120 | | - * value. |
---|
121 | | - */ |
---|
122 | | - |
---|
123 | | -#if defined(CONFIG_PPC_SPLPAR) |
---|
124 | | -/* We only yield to the hypervisor if we are in shared processor mode */ |
---|
125 | | -#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr)) |
---|
126 | | -extern void __spin_yield(arch_spinlock_t *lock); |
---|
127 | | -extern void __rw_yield(arch_rwlock_t *lock); |
---|
128 | | -#else /* SPLPAR */ |
---|
129 | | -#define __spin_yield(x) barrier() |
---|
130 | | -#define __rw_yield(x) barrier() |
---|
131 | | -#define SHARED_PROCESSOR 0 |
---|
132 | | -#endif |
---|
133 | | - |
---|
134 | | -static inline void arch_spin_lock(arch_spinlock_t *lock) |
---|
135 | | -{ |
---|
136 | | - CLEAR_IO_SYNC; |
---|
137 | | - while (1) { |
---|
138 | | - if (likely(__arch_spin_trylock(lock) == 0)) |
---|
139 | | - break; |
---|
140 | | - do { |
---|
141 | | - HMT_low(); |
---|
142 | | - if (SHARED_PROCESSOR) |
---|
143 | | - __spin_yield(lock); |
---|
144 | | - } while (unlikely(lock->slock != 0)); |
---|
145 | | - HMT_medium(); |
---|
146 | | - } |
---|
147 | | -} |
---|
148 | | - |
---|
149 | | -static inline |
---|
150 | | -void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) |
---|
151 | | -{ |
---|
152 | | - unsigned long flags_dis; |
---|
153 | | - |
---|
154 | | - CLEAR_IO_SYNC; |
---|
155 | | - while (1) { |
---|
156 | | - if (likely(__arch_spin_trylock(lock) == 0)) |
---|
157 | | - break; |
---|
158 | | - local_save_flags(flags_dis); |
---|
159 | | - local_irq_restore(flags); |
---|
160 | | - do { |
---|
161 | | - HMT_low(); |
---|
162 | | - if (SHARED_PROCESSOR) |
---|
163 | | - __spin_yield(lock); |
---|
164 | | - } while (unlikely(lock->slock != 0)); |
---|
165 | | - HMT_medium(); |
---|
166 | | - local_irq_restore(flags_dis); |
---|
167 | | - } |
---|
168 | | -} |
---|
169 | | -#define arch_spin_lock_flags arch_spin_lock_flags |
---|
170 | | - |
---|
171 | | -static inline void arch_spin_unlock(arch_spinlock_t *lock) |
---|
172 | | -{ |
---|
173 | | - SYNC_IO; |
---|
174 | | - __asm__ __volatile__("# arch_spin_unlock\n\t" |
---|
175 | | - PPC_RELEASE_BARRIER: : :"memory"); |
---|
176 | | - lock->slock = 0; |
---|
177 | | -} |
---|
178 | | - |
---|
179 | | -/* |
---|
180 | | - * Read-write spinlocks, allowing multiple readers |
---|
181 | | - * but only one writer. |
---|
182 | | - * |
---|
183 | | - * NOTE! it is quite common to have readers in interrupts |
---|
184 | | - * but no interrupt writers. For those circumstances we |
---|
185 | | - * can "mix" irq-safe locks - any writer needs to get a |
---|
186 | | - * irq-safe write-lock, but readers can get non-irqsafe |
---|
187 | | - * read-locks. |
---|
188 | | - */ |
---|
189 | | - |
---|
190 | | -#ifdef CONFIG_PPC64 |
---|
191 | | -#define __DO_SIGN_EXTEND "extsw %0,%0\n" |
---|
192 | | -#define WRLOCK_TOKEN LOCK_TOKEN /* it's negative */ |
---|
193 | | -#else |
---|
194 | | -#define __DO_SIGN_EXTEND |
---|
195 | | -#define WRLOCK_TOKEN (-1) |
---|
196 | | -#endif |
---|
197 | | - |
---|
198 | | -/* |
---|
199 | | - * This returns the old value in the lock + 1, |
---|
200 | | - * so we got a read lock if the return value is > 0. |
---|
201 | | - */ |
---|
202 | | -static inline long __arch_read_trylock(arch_rwlock_t *rw) |
---|
203 | | -{ |
---|
204 | | - long tmp; |
---|
205 | | - |
---|
206 | | - __asm__ __volatile__( |
---|
207 | | -"1: " PPC_LWARX(%0,0,%1,1) "\n" |
---|
208 | | - __DO_SIGN_EXTEND |
---|
209 | | -" addic. %0,%0,1\n\ |
---|
210 | | - ble- 2f\n" |
---|
211 | | - PPC405_ERR77(0,%1) |
---|
212 | | -" stwcx. %0,0,%1\n\ |
---|
213 | | - bne- 1b\n" |
---|
214 | | - PPC_ACQUIRE_BARRIER |
---|
215 | | -"2:" : "=&r" (tmp) |
---|
216 | | - : "r" (&rw->lock) |
---|
217 | | - : "cr0", "xer", "memory"); |
---|
218 | | - |
---|
219 | | - return tmp; |
---|
220 | | -} |
---|
221 | | - |
---|
222 | | -/* |
---|
223 | | - * This returns the old value in the lock, |
---|
224 | | - * so we got the write lock if the return value is 0. |
---|
225 | | - */ |
---|
226 | | -static inline long __arch_write_trylock(arch_rwlock_t *rw) |
---|
227 | | -{ |
---|
228 | | - long tmp, token; |
---|
229 | | - |
---|
230 | | - token = WRLOCK_TOKEN; |
---|
231 | | - __asm__ __volatile__( |
---|
232 | | -"1: " PPC_LWARX(%0,0,%2,1) "\n\ |
---|
233 | | - cmpwi 0,%0,0\n\ |
---|
234 | | - bne- 2f\n" |
---|
235 | | - PPC405_ERR77(0,%1) |
---|
236 | | -" stwcx. %1,0,%2\n\ |
---|
237 | | - bne- 1b\n" |
---|
238 | | - PPC_ACQUIRE_BARRIER |
---|
239 | | -"2:" : "=&r" (tmp) |
---|
240 | | - : "r" (token), "r" (&rw->lock) |
---|
241 | | - : "cr0", "memory"); |
---|
242 | | - |
---|
243 | | - return tmp; |
---|
244 | | -} |
---|
245 | | - |
---|
246 | | -static inline void arch_read_lock(arch_rwlock_t *rw) |
---|
247 | | -{ |
---|
248 | | - while (1) { |
---|
249 | | - if (likely(__arch_read_trylock(rw) > 0)) |
---|
250 | | - break; |
---|
251 | | - do { |
---|
252 | | - HMT_low(); |
---|
253 | | - if (SHARED_PROCESSOR) |
---|
254 | | - __rw_yield(rw); |
---|
255 | | - } while (unlikely(rw->lock < 0)); |
---|
256 | | - HMT_medium(); |
---|
257 | | - } |
---|
258 | | -} |
---|
259 | | - |
---|
260 | | -static inline void arch_write_lock(arch_rwlock_t *rw) |
---|
261 | | -{ |
---|
262 | | - while (1) { |
---|
263 | | - if (likely(__arch_write_trylock(rw) == 0)) |
---|
264 | | - break; |
---|
265 | | - do { |
---|
266 | | - HMT_low(); |
---|
267 | | - if (SHARED_PROCESSOR) |
---|
268 | | - __rw_yield(rw); |
---|
269 | | - } while (unlikely(rw->lock != 0)); |
---|
270 | | - HMT_medium(); |
---|
271 | | - } |
---|
272 | | -} |
---|
273 | | - |
---|
274 | | -static inline int arch_read_trylock(arch_rwlock_t *rw) |
---|
275 | | -{ |
---|
276 | | - return __arch_read_trylock(rw) > 0; |
---|
277 | | -} |
---|
278 | | - |
---|
279 | | -static inline int arch_write_trylock(arch_rwlock_t *rw) |
---|
280 | | -{ |
---|
281 | | - return __arch_write_trylock(rw) == 0; |
---|
282 | | -} |
---|
283 | | - |
---|
284 | | -static inline void arch_read_unlock(arch_rwlock_t *rw) |
---|
285 | | -{ |
---|
286 | | - long tmp; |
---|
287 | | - |
---|
288 | | - __asm__ __volatile__( |
---|
289 | | - "# read_unlock\n\t" |
---|
290 | | - PPC_RELEASE_BARRIER |
---|
291 | | -"1: lwarx %0,0,%1\n\ |
---|
292 | | - addic %0,%0,-1\n" |
---|
293 | | - PPC405_ERR77(0,%1) |
---|
294 | | -" stwcx. %0,0,%1\n\ |
---|
295 | | - bne- 1b" |
---|
296 | | - : "=&r"(tmp) |
---|
297 | | - : "r"(&rw->lock) |
---|
298 | | - : "cr0", "xer", "memory"); |
---|
299 | | -} |
---|
300 | | - |
---|
301 | | -static inline void arch_write_unlock(arch_rwlock_t *rw) |
---|
302 | | -{ |
---|
303 | | - __asm__ __volatile__("# write_unlock\n\t" |
---|
304 | | - PPC_RELEASE_BARRIER: : :"memory"); |
---|
305 | | - rw->lock = 0; |
---|
306 | | -} |
---|
307 | | - |
---|
308 | | -#define arch_spin_relax(lock) __spin_yield(lock) |
---|
309 | | -#define arch_read_relax(lock) __rw_yield(lock) |
---|
310 | | -#define arch_write_relax(lock) __rw_yield(lock) |
---|
311 | | - |
---|
312 | | -/* See include/linux/spinlock.h */ |
---|
313 | | -#define smp_mb__after_spinlock() smp_mb() |
---|
314 | 16 | |
---|
315 | 17 | #endif /* __KERNEL__ */ |
---|
316 | 18 | #endif /* __ASM_SPINLOCK_H */ |
---|