hc
2024-05-10 23fa18eaa71266feff7ba8d83022d9e1cc83c65a
kernel/arch/powerpc/include/asm/atomic.h
....@@ -10,9 +10,6 @@
1010 #include <linux/types.h>
1111 #include <asm/cmpxchg.h>
1212 #include <asm/barrier.h>
13
-#include <asm/asm-405.h>
14
-
15
-#define ATOMIC_INIT(i) { (i) }
1613
1714 /*
1815 * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
....@@ -47,7 +44,6 @@
4744 __asm__ __volatile__( \
4845 "1: lwarx %0,0,%3 # atomic_" #op "\n" \
4946 #asm_op " %0,%2,%0\n" \
50
- PPC405_ERR77(0,%3) \
5147 " stwcx. %0,0,%3 \n" \
5248 " bne- 1b\n" \
5349 : "=&r" (t), "+m" (v->counter) \
....@@ -63,7 +59,6 @@
6359 __asm__ __volatile__( \
6460 "1: lwarx %0,0,%3 # atomic_" #op "_return_relaxed\n" \
6561 #asm_op " %0,%2,%0\n" \
66
- PPC405_ERR77(0, %3) \
6762 " stwcx. %0,0,%3\n" \
6863 " bne- 1b\n" \
6964 : "=&r" (t), "+m" (v->counter) \
....@@ -81,7 +76,6 @@
8176 __asm__ __volatile__( \
8277 "1: lwarx %0,0,%4 # atomic_fetch_" #op "_relaxed\n" \
8378 #asm_op " %1,%3,%0\n" \
84
- PPC405_ERR77(0, %4) \
8579 " stwcx. %1,0,%4\n" \
8680 " bne- 1b\n" \
8781 : "=&r" (res), "=&r" (t), "+m" (v->counter) \
....@@ -130,7 +124,6 @@
130124 __asm__ __volatile__(
131125 "1: lwarx %0,0,%2 # atomic_inc\n\
132126 addic %0,%0,1\n"
133
- PPC405_ERR77(0,%2)
134127 " stwcx. %0,0,%2 \n\
135128 bne- 1b"
136129 : "=&r" (t), "+m" (v->counter)
....@@ -146,7 +139,6 @@
146139 __asm__ __volatile__(
147140 "1: lwarx %0,0,%2 # atomic_inc_return_relaxed\n"
148141 " addic %0,%0,1\n"
149
- PPC405_ERR77(0, %2)
150142 " stwcx. %0,0,%2\n"
151143 " bne- 1b"
152144 : "=&r" (t), "+m" (v->counter)
....@@ -163,7 +155,6 @@
163155 __asm__ __volatile__(
164156 "1: lwarx %0,0,%2 # atomic_dec\n\
165157 addic %0,%0,-1\n"
166
- PPC405_ERR77(0,%2)\
167158 " stwcx. %0,0,%2\n\
168159 bne- 1b"
169160 : "=&r" (t), "+m" (v->counter)
....@@ -179,7 +170,6 @@
179170 __asm__ __volatile__(
180171 "1: lwarx %0,0,%2 # atomic_dec_return_relaxed\n"
181172 " addic %0,%0,-1\n"
182
- PPC405_ERR77(0, %2)
183173 " stwcx. %0,0,%2\n"
184174 " bne- 1b"
185175 : "=&r" (t), "+m" (v->counter)
....@@ -201,6 +191,34 @@
201191 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
202192 #define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
203193
194
+/*
195
+ * Don't want to override the generic atomic_try_cmpxchg_acquire, because
196
+ * we add a lock hint to the lwarx, which may not be wanted for the
197
+ * _acquire case (and is not used by the other _acquire variants so it
198
+ * would be a surprise).
199
+ */
200
+static __always_inline bool
201
+atomic_try_cmpxchg_lock(atomic_t *v, int *old, int new)
202
+{
203
+ int r, o = *old;
204
+
205
+ __asm__ __volatile__ (
206
+"1:\t" PPC_LWARX(%0,0,%2,1) " # atomic_try_cmpxchg_acquire \n"
207
+" cmpw 0,%0,%3 \n"
208
+" bne- 2f \n"
209
+" stwcx. %4,0,%2 \n"
210
+" bne- 1b \n"
211
+"\t" PPC_ACQUIRE_BARRIER " \n"
212
+"2: \n"
213
+ : "=&r" (r), "+m" (v->counter)
214
+ : "r" (&v->counter), "r" (o), "r" (new)
215
+ : "cr0", "memory");
216
+
217
+ if (unlikely(r != o))
218
+ *old = r;
219
+ return likely(r == o);
220
+}
221
+
204222 /**
205223 * atomic_fetch_add_unless - add unless the number is a given value
206224 * @v: pointer of type atomic_t
....@@ -220,7 +238,6 @@
220238 cmpw 0,%0,%3 \n\
221239 beq 2f \n\
222240 add %0,%2,%0 \n"
223
- PPC405_ERR77(0,%2)
224241 " stwcx. %0,0,%1 \n\
225242 bne- 1b \n"
226243 PPC_ATOMIC_EXIT_BARRIER
....@@ -251,7 +268,6 @@
251268 cmpwi 0,%0,0\n\
252269 beq- 2f\n\
253270 addic %1,%0,1\n"
254
- PPC405_ERR77(0,%2)
255271 " stwcx. %1,0,%2\n\
256272 bne- 1b\n"
257273 PPC_ATOMIC_EXIT_BARRIER
....@@ -280,7 +296,6 @@
280296 cmpwi %0,1\n\
281297 addi %0,%0,-1\n\
282298 blt- 2f\n"
283
- PPC405_ERR77(0,%1)
284299 " stwcx. %0,0,%1\n\
285300 bne- 1b"
286301 PPC_ATOMIC_EXIT_BARRIER
....@@ -297,24 +312,24 @@
297312
298313 #define ATOMIC64_INIT(i) { (i) }
299314
300
-static __inline__ long atomic64_read(const atomic64_t *v)
315
+static __inline__ s64 atomic64_read(const atomic64_t *v)
301316 {
302
- long t;
317
+ s64 t;
303318
304319 __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
305320
306321 return t;
307322 }
308323
309
-static __inline__ void atomic64_set(atomic64_t *v, long i)
324
+static __inline__ void atomic64_set(atomic64_t *v, s64 i)
310325 {
311326 __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
312327 }
313328
314329 #define ATOMIC64_OP(op, asm_op) \
315
-static __inline__ void atomic64_##op(long a, atomic64_t *v) \
330
+static __inline__ void atomic64_##op(s64 a, atomic64_t *v) \
316331 { \
317
- long t; \
332
+ s64 t; \
318333 \
319334 __asm__ __volatile__( \
320335 "1: ldarx %0,0,%3 # atomic64_" #op "\n" \
....@@ -327,10 +342,10 @@
327342 }
328343
329344 #define ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
330
-static inline long \
331
-atomic64_##op##_return_relaxed(long a, atomic64_t *v) \
345
+static inline s64 \
346
+atomic64_##op##_return_relaxed(s64 a, atomic64_t *v) \
332347 { \
333
- long t; \
348
+ s64 t; \
334349 \
335350 __asm__ __volatile__( \
336351 "1: ldarx %0,0,%3 # atomic64_" #op "_return_relaxed\n" \
....@@ -345,10 +360,10 @@
345360 }
346361
347362 #define ATOMIC64_FETCH_OP_RELAXED(op, asm_op) \
348
-static inline long \
349
-atomic64_fetch_##op##_relaxed(long a, atomic64_t *v) \
363
+static inline s64 \
364
+atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v) \
350365 { \
351
- long res, t; \
366
+ s64 res, t; \
352367 \
353368 __asm__ __volatile__( \
354369 "1: ldarx %0,0,%4 # atomic64_fetch_" #op "_relaxed\n" \
....@@ -396,7 +411,7 @@
396411
397412 static __inline__ void atomic64_inc(atomic64_t *v)
398413 {
399
- long t;
414
+ s64 t;
400415
401416 __asm__ __volatile__(
402417 "1: ldarx %0,0,%2 # atomic64_inc\n\
....@@ -409,9 +424,9 @@
409424 }
410425 #define atomic64_inc atomic64_inc
411426
412
-static __inline__ long atomic64_inc_return_relaxed(atomic64_t *v)
427
+static __inline__ s64 atomic64_inc_return_relaxed(atomic64_t *v)
413428 {
414
- long t;
429
+ s64 t;
415430
416431 __asm__ __volatile__(
417432 "1: ldarx %0,0,%2 # atomic64_inc_return_relaxed\n"
....@@ -427,7 +442,7 @@
427442
428443 static __inline__ void atomic64_dec(atomic64_t *v)
429444 {
430
- long t;
445
+ s64 t;
431446
432447 __asm__ __volatile__(
433448 "1: ldarx %0,0,%2 # atomic64_dec\n\
....@@ -440,9 +455,9 @@
440455 }
441456 #define atomic64_dec atomic64_dec
442457
443
-static __inline__ long atomic64_dec_return_relaxed(atomic64_t *v)
458
+static __inline__ s64 atomic64_dec_return_relaxed(atomic64_t *v)
444459 {
445
- long t;
460
+ s64 t;
446461
447462 __asm__ __volatile__(
448463 "1: ldarx %0,0,%2 # atomic64_dec_return_relaxed\n"
....@@ -463,9 +478,9 @@
463478 * Atomically test *v and decrement if it is greater than 0.
464479 * The function returns the old value of *v minus 1.
465480 */
466
-static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
481
+static __inline__ s64 atomic64_dec_if_positive(atomic64_t *v)
467482 {
468
- long t;
483
+ s64 t;
469484
470485 __asm__ __volatile__(
471486 PPC_ATOMIC_ENTRY_BARRIER
....@@ -502,9 +517,9 @@
502517 * Atomically adds @a to @v, so long as it was not @u.
503518 * Returns the old value of @v.
504519 */
505
-static __inline__ long atomic64_fetch_add_unless(atomic64_t *v, long a, long u)
520
+static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
506521 {
507
- long t;
522
+ s64 t;
508523
509524 __asm__ __volatile__ (
510525 PPC_ATOMIC_ENTRY_BARRIER
....@@ -534,7 +549,7 @@
534549 */
535550 static __inline__ int atomic64_inc_not_zero(atomic64_t *v)
536551 {
537
- long t1, t2;
552
+ s64 t1, t2;
538553
539554 __asm__ __volatile__ (
540555 PPC_ATOMIC_ENTRY_BARRIER