| .. | .. |
|---|
| 10 | 10 | #include <linux/types.h> |
|---|
| 11 | 11 | #include <asm/cmpxchg.h> |
|---|
| 12 | 12 | #include <asm/barrier.h> |
|---|
| 13 | | -#include <asm/asm-405.h> |
|---|
| 14 | | - |
|---|
| 15 | | -#define ATOMIC_INIT(i) { (i) } |
|---|
| 16 | 13 | |
|---|
| 17 | 14 | /* |
|---|
| 18 | 15 | * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with |
|---|
| .. | .. |
|---|
| 47 | 44 | __asm__ __volatile__( \ |
|---|
| 48 | 45 | "1: lwarx %0,0,%3 # atomic_" #op "\n" \ |
|---|
| 49 | 46 | #asm_op " %0,%2,%0\n" \ |
|---|
| 50 | | - PPC405_ERR77(0,%3) \ |
|---|
| 51 | 47 | " stwcx. %0,0,%3 \n" \ |
|---|
| 52 | 48 | " bne- 1b\n" \ |
|---|
| 53 | 49 | : "=&r" (t), "+m" (v->counter) \ |
|---|
| .. | .. |
|---|
| 63 | 59 | __asm__ __volatile__( \ |
|---|
| 64 | 60 | "1: lwarx %0,0,%3 # atomic_" #op "_return_relaxed\n" \ |
|---|
| 65 | 61 | #asm_op " %0,%2,%0\n" \ |
|---|
| 66 | | - PPC405_ERR77(0, %3) \ |
|---|
| 67 | 62 | " stwcx. %0,0,%3\n" \ |
|---|
| 68 | 63 | " bne- 1b\n" \ |
|---|
| 69 | 64 | : "=&r" (t), "+m" (v->counter) \ |
|---|
| .. | .. |
|---|
| 81 | 76 | __asm__ __volatile__( \ |
|---|
| 82 | 77 | "1: lwarx %0,0,%4 # atomic_fetch_" #op "_relaxed\n" \ |
|---|
| 83 | 78 | #asm_op " %1,%3,%0\n" \ |
|---|
| 84 | | - PPC405_ERR77(0, %4) \ |
|---|
| 85 | 79 | " stwcx. %1,0,%4\n" \ |
|---|
| 86 | 80 | " bne- 1b\n" \ |
|---|
| 87 | 81 | : "=&r" (res), "=&r" (t), "+m" (v->counter) \ |
|---|
| .. | .. |
|---|
| 130 | 124 | __asm__ __volatile__( |
|---|
| 131 | 125 | "1: lwarx %0,0,%2 # atomic_inc\n\ |
|---|
| 132 | 126 | addic %0,%0,1\n" |
|---|
| 133 | | - PPC405_ERR77(0,%2) |
|---|
| 134 | 127 | " stwcx. %0,0,%2 \n\ |
|---|
| 135 | 128 | bne- 1b" |
|---|
| 136 | 129 | : "=&r" (t), "+m" (v->counter) |
|---|
| .. | .. |
|---|
| 146 | 139 | __asm__ __volatile__( |
|---|
| 147 | 140 | "1: lwarx %0,0,%2 # atomic_inc_return_relaxed\n" |
|---|
| 148 | 141 | " addic %0,%0,1\n" |
|---|
| 149 | | - PPC405_ERR77(0, %2) |
|---|
| 150 | 142 | " stwcx. %0,0,%2\n" |
|---|
| 151 | 143 | " bne- 1b" |
|---|
| 152 | 144 | : "=&r" (t), "+m" (v->counter) |
|---|
| .. | .. |
|---|
| 163 | 155 | __asm__ __volatile__( |
|---|
| 164 | 156 | "1: lwarx %0,0,%2 # atomic_dec\n\ |
|---|
| 165 | 157 | addic %0,%0,-1\n" |
|---|
| 166 | | - PPC405_ERR77(0,%2)\ |
|---|
| 167 | 158 | " stwcx. %0,0,%2\n\ |
|---|
| 168 | 159 | bne- 1b" |
|---|
| 169 | 160 | : "=&r" (t), "+m" (v->counter) |
|---|
| .. | .. |
|---|
| 179 | 170 | __asm__ __volatile__( |
|---|
| 180 | 171 | "1: lwarx %0,0,%2 # atomic_dec_return_relaxed\n" |
|---|
| 181 | 172 | " addic %0,%0,-1\n" |
|---|
| 182 | | - PPC405_ERR77(0, %2) |
|---|
| 183 | 173 | " stwcx. %0,0,%2\n" |
|---|
| 184 | 174 | " bne- 1b" |
|---|
| 185 | 175 | : "=&r" (t), "+m" (v->counter) |
|---|
| .. | .. |
|---|
| 201 | 191 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) |
|---|
| 202 | 192 | #define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new)) |
|---|
| 203 | 193 | |
|---|
| 194 | +/* |
|---|
| 195 | + * Don't want to override the generic atomic_try_cmpxchg_acquire, because |
|---|
| 196 | + * we add a lock hint to the lwarx, which may not be wanted for the |
|---|
| 197 | + * _acquire case (and is not used by the other _acquire variants so it |
|---|
| 198 | + * would be a surprise). |
|---|
| 199 | + */ |
|---|
| 200 | +static __always_inline bool |
|---|
| 201 | +atomic_try_cmpxchg_lock(atomic_t *v, int *old, int new) |
|---|
| 202 | +{ |
|---|
| 203 | + int r, o = *old; |
|---|
| 204 | + |
|---|
| 205 | + __asm__ __volatile__ ( |
|---|
| 206 | +"1:\t" PPC_LWARX(%0,0,%2,1) " # atomic_try_cmpxchg_acquire \n" |
|---|
| 207 | +" cmpw 0,%0,%3 \n" |
|---|
| 208 | +" bne- 2f \n" |
|---|
| 209 | +" stwcx. %4,0,%2 \n" |
|---|
| 210 | +" bne- 1b \n" |
|---|
| 211 | +"\t" PPC_ACQUIRE_BARRIER " \n" |
|---|
| 212 | +"2: \n" |
|---|
| 213 | + : "=&r" (r), "+m" (v->counter) |
|---|
| 214 | + : "r" (&v->counter), "r" (o), "r" (new) |
|---|
| 215 | + : "cr0", "memory"); |
|---|
| 216 | + |
|---|
| 217 | + if (unlikely(r != o)) |
|---|
| 218 | + *old = r; |
|---|
| 219 | + return likely(r == o); |
|---|
| 220 | +} |
|---|
| 221 | + |
|---|
| 204 | 222 | /** |
|---|
| 205 | 223 | * atomic_fetch_add_unless - add unless the number is a given value |
|---|
| 206 | 224 | * @v: pointer of type atomic_t |
|---|
| .. | .. |
|---|
| 220 | 238 | cmpw 0,%0,%3 \n\ |
|---|
| 221 | 239 | beq 2f \n\ |
|---|
| 222 | 240 | add %0,%2,%0 \n" |
|---|
| 223 | | - PPC405_ERR77(0,%2) |
|---|
| 224 | 241 | " stwcx. %0,0,%1 \n\ |
|---|
| 225 | 242 | bne- 1b \n" |
|---|
| 226 | 243 | PPC_ATOMIC_EXIT_BARRIER |
|---|
| .. | .. |
|---|
| 251 | 268 | cmpwi 0,%0,0\n\ |
|---|
| 252 | 269 | beq- 2f\n\ |
|---|
| 253 | 270 | addic %1,%0,1\n" |
|---|
| 254 | | - PPC405_ERR77(0,%2) |
|---|
| 255 | 271 | " stwcx. %1,0,%2\n\ |
|---|
| 256 | 272 | bne- 1b\n" |
|---|
| 257 | 273 | PPC_ATOMIC_EXIT_BARRIER |
|---|
| .. | .. |
|---|
| 280 | 296 | cmpwi %0,1\n\ |
|---|
| 281 | 297 | addi %0,%0,-1\n\ |
|---|
| 282 | 298 | blt- 2f\n" |
|---|
| 283 | | - PPC405_ERR77(0,%1) |
|---|
| 284 | 299 | " stwcx. %0,0,%1\n\ |
|---|
| 285 | 300 | bne- 1b" |
|---|
| 286 | 301 | PPC_ATOMIC_EXIT_BARRIER |
|---|
| .. | .. |
|---|
| 297 | 312 | |
|---|
| 298 | 313 | #define ATOMIC64_INIT(i) { (i) } |
|---|
| 299 | 314 | |
|---|
| 300 | | -static __inline__ long atomic64_read(const atomic64_t *v) |
|---|
| 315 | +static __inline__ s64 atomic64_read(const atomic64_t *v) |
|---|
| 301 | 316 | { |
|---|
| 302 | | - long t; |
|---|
| 317 | + s64 t; |
|---|
| 303 | 318 | |
|---|
| 304 | 319 | __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter)); |
|---|
| 305 | 320 | |
|---|
| 306 | 321 | return t; |
|---|
| 307 | 322 | } |
|---|
| 308 | 323 | |
|---|
| 309 | | -static __inline__ void atomic64_set(atomic64_t *v, long i) |
|---|
| 324 | +static __inline__ void atomic64_set(atomic64_t *v, s64 i) |
|---|
| 310 | 325 | { |
|---|
| 311 | 326 | __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i)); |
|---|
| 312 | 327 | } |
|---|
| 313 | 328 | |
|---|
| 314 | 329 | #define ATOMIC64_OP(op, asm_op) \ |
|---|
| 315 | | -static __inline__ void atomic64_##op(long a, atomic64_t *v) \ |
|---|
| 330 | +static __inline__ void atomic64_##op(s64 a, atomic64_t *v) \ |
|---|
| 316 | 331 | { \ |
|---|
| 317 | | - long t; \ |
|---|
| 332 | + s64 t; \ |
|---|
| 318 | 333 | \ |
|---|
| 319 | 334 | __asm__ __volatile__( \ |
|---|
| 320 | 335 | "1: ldarx %0,0,%3 # atomic64_" #op "\n" \ |
|---|
| .. | .. |
|---|
| 327 | 342 | } |
|---|
| 328 | 343 | |
|---|
| 329 | 344 | #define ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \ |
|---|
| 330 | | -static inline long \ |
|---|
| 331 | | -atomic64_##op##_return_relaxed(long a, atomic64_t *v) \ |
|---|
| 345 | +static inline s64 \ |
|---|
| 346 | +atomic64_##op##_return_relaxed(s64 a, atomic64_t *v) \ |
|---|
| 332 | 347 | { \ |
|---|
| 333 | | - long t; \ |
|---|
| 348 | + s64 t; \ |
|---|
| 334 | 349 | \ |
|---|
| 335 | 350 | __asm__ __volatile__( \ |
|---|
| 336 | 351 | "1: ldarx %0,0,%3 # atomic64_" #op "_return_relaxed\n" \ |
|---|
| .. | .. |
|---|
| 345 | 360 | } |
|---|
| 346 | 361 | |
|---|
| 347 | 362 | #define ATOMIC64_FETCH_OP_RELAXED(op, asm_op) \ |
|---|
| 348 | | -static inline long \ |
|---|
| 349 | | -atomic64_fetch_##op##_relaxed(long a, atomic64_t *v) \ |
|---|
| 363 | +static inline s64 \ |
|---|
| 364 | +atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v) \ |
|---|
| 350 | 365 | { \ |
|---|
| 351 | | - long res, t; \ |
|---|
| 366 | + s64 res, t; \ |
|---|
| 352 | 367 | \ |
|---|
| 353 | 368 | __asm__ __volatile__( \ |
|---|
| 354 | 369 | "1: ldarx %0,0,%4 # atomic64_fetch_" #op "_relaxed\n" \ |
|---|
| .. | .. |
|---|
| 396 | 411 | |
|---|
| 397 | 412 | static __inline__ void atomic64_inc(atomic64_t *v) |
|---|
| 398 | 413 | { |
|---|
| 399 | | - long t; |
|---|
| 414 | + s64 t; |
|---|
| 400 | 415 | |
|---|
| 401 | 416 | __asm__ __volatile__( |
|---|
| 402 | 417 | "1: ldarx %0,0,%2 # atomic64_inc\n\ |
|---|
| .. | .. |
|---|
| 409 | 424 | } |
|---|
| 410 | 425 | #define atomic64_inc atomic64_inc |
|---|
| 411 | 426 | |
|---|
| 412 | | -static __inline__ long atomic64_inc_return_relaxed(atomic64_t *v) |
|---|
| 427 | +static __inline__ s64 atomic64_inc_return_relaxed(atomic64_t *v) |
|---|
| 413 | 428 | { |
|---|
| 414 | | - long t; |
|---|
| 429 | + s64 t; |
|---|
| 415 | 430 | |
|---|
| 416 | 431 | __asm__ __volatile__( |
|---|
| 417 | 432 | "1: ldarx %0,0,%2 # atomic64_inc_return_relaxed\n" |
|---|
| .. | .. |
|---|
| 427 | 442 | |
|---|
| 428 | 443 | static __inline__ void atomic64_dec(atomic64_t *v) |
|---|
| 429 | 444 | { |
|---|
| 430 | | - long t; |
|---|
| 445 | + s64 t; |
|---|
| 431 | 446 | |
|---|
| 432 | 447 | __asm__ __volatile__( |
|---|
| 433 | 448 | "1: ldarx %0,0,%2 # atomic64_dec\n\ |
|---|
| .. | .. |
|---|
| 440 | 455 | } |
|---|
| 441 | 456 | #define atomic64_dec atomic64_dec |
|---|
| 442 | 457 | |
|---|
| 443 | | -static __inline__ long atomic64_dec_return_relaxed(atomic64_t *v) |
|---|
| 458 | +static __inline__ s64 atomic64_dec_return_relaxed(atomic64_t *v) |
|---|
| 444 | 459 | { |
|---|
| 445 | | - long t; |
|---|
| 460 | + s64 t; |
|---|
| 446 | 461 | |
|---|
| 447 | 462 | __asm__ __volatile__( |
|---|
| 448 | 463 | "1: ldarx %0,0,%2 # atomic64_dec_return_relaxed\n" |
|---|
| .. | .. |
|---|
| 463 | 478 | * Atomically test *v and decrement if it is greater than 0. |
|---|
| 464 | 479 | * The function returns the old value of *v minus 1. |
|---|
| 465 | 480 | */ |
|---|
| 466 | | -static __inline__ long atomic64_dec_if_positive(atomic64_t *v) |
|---|
| 481 | +static __inline__ s64 atomic64_dec_if_positive(atomic64_t *v) |
|---|
| 467 | 482 | { |
|---|
| 468 | | - long t; |
|---|
| 483 | + s64 t; |
|---|
| 469 | 484 | |
|---|
| 470 | 485 | __asm__ __volatile__( |
|---|
| 471 | 486 | PPC_ATOMIC_ENTRY_BARRIER |
|---|
| .. | .. |
|---|
| 502 | 517 | * Atomically adds @a to @v, so long as it was not @u. |
|---|
| 503 | 518 | * Returns the old value of @v. |
|---|
| 504 | 519 | */ |
|---|
| 505 | | -static __inline__ long atomic64_fetch_add_unless(atomic64_t *v, long a, long u) |
|---|
| 520 | +static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) |
|---|
| 506 | 521 | { |
|---|
| 507 | | - long t; |
|---|
| 522 | + s64 t; |
|---|
| 508 | 523 | |
|---|
| 509 | 524 | __asm__ __volatile__ ( |
|---|
| 510 | 525 | PPC_ATOMIC_ENTRY_BARRIER |
|---|
| .. | .. |
|---|
| 534 | 549 | */ |
|---|
| 535 | 550 | static __inline__ int atomic64_inc_not_zero(atomic64_t *v) |
|---|
| 536 | 551 | { |
|---|
| 537 | | - long t1, t2; |
|---|
| 552 | + s64 t1, t2; |
|---|
| 538 | 553 | |
|---|
| 539 | 554 | __asm__ __volatile__ ( |
|---|
| 540 | 555 | PPC_ATOMIC_ENTRY_BARRIER |
|---|