| .. | .. |
|---|
| 16 | 16 | |
|---|
| 17 | 17 | /* |
|---|
| 18 | 18 | * To ensure dependency ordering is preserved for the _relaxed and |
|---|
| 19 | | - * _release atomics, an smp_read_barrier_depends() is unconditionally |
|---|
| 20 | | - * inserted into the _relaxed variants, which are used to build the |
|---|
| 21 | | - * barriered versions. Avoid redundant back-to-back fences in the |
|---|
| 22 | | - * _acquire and _fence versions. |
|---|
| 19 | + * _release atomics, an smp_mb() is unconditionally inserted into the |
|---|
| 20 | + * _relaxed variants, which are used to build the barriered versions. |
|---|
| 21 | + * Avoid redundant back-to-back fences in the _acquire and _fence |
|---|
| 22 | + * versions. |
|---|
| 23 | 23 | */ |
|---|
| 24 | 24 | #define __atomic_acquire_fence() |
|---|
| 25 | 25 | #define __atomic_post_full_fence() |
|---|
| 26 | 26 | |
|---|
| 27 | | -#define ATOMIC_INIT(i) { (i) } |
|---|
| 28 | 27 | #define ATOMIC64_INIT(i) { (i) } |
|---|
| 29 | 28 | |
|---|
| 30 | 29 | #define atomic_read(v) READ_ONCE((v)->counter) |
|---|
| .. | .. |
|---|
| 70 | 69 | ".previous" \ |
|---|
| 71 | 70 | :"=&r" (temp), "=m" (v->counter), "=&r" (result) \ |
|---|
| 72 | 71 | :"Ir" (i), "m" (v->counter) : "memory"); \ |
|---|
| 73 | | - smp_read_barrier_depends(); \ |
|---|
| 72 | + smp_mb(); \ |
|---|
| 74 | 73 | return result; \ |
|---|
| 75 | 74 | } |
|---|
| 76 | 75 | |
|---|
| .. | .. |
|---|
| 88 | 87 | ".previous" \ |
|---|
| 89 | 88 | :"=&r" (temp), "=m" (v->counter), "=&r" (result) \ |
|---|
| 90 | 89 | :"Ir" (i), "m" (v->counter) : "memory"); \ |
|---|
| 91 | | - smp_read_barrier_depends(); \ |
|---|
| 90 | + smp_mb(); \ |
|---|
| 92 | 91 | return result; \ |
|---|
| 93 | 92 | } |
|---|
| 94 | 93 | |
|---|
| 95 | 94 | #define ATOMIC64_OP(op, asm_op) \ |
|---|
| 96 | | -static __inline__ void atomic64_##op(long i, atomic64_t * v) \ |
|---|
| 95 | +static __inline__ void atomic64_##op(s64 i, atomic64_t * v) \ |
|---|
| 97 | 96 | { \ |
|---|
| 98 | | - unsigned long temp; \ |
|---|
| 97 | + s64 temp; \ |
|---|
| 99 | 98 | __asm__ __volatile__( \ |
|---|
| 100 | 99 | "1: ldq_l %0,%1\n" \ |
|---|
| 101 | 100 | " " #asm_op " %0,%2,%0\n" \ |
|---|
| .. | .. |
|---|
| 109 | 108 | } \ |
|---|
| 110 | 109 | |
|---|
| 111 | 110 | #define ATOMIC64_OP_RETURN(op, asm_op) \ |
|---|
| 112 | | -static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \ |
|---|
| 111 | +static __inline__ s64 atomic64_##op##_return_relaxed(s64 i, atomic64_t * v) \ |
|---|
| 113 | 112 | { \ |
|---|
| 114 | | - long temp, result; \ |
|---|
| 113 | + s64 temp, result; \ |
|---|
| 115 | 114 | __asm__ __volatile__( \ |
|---|
| 116 | 115 | "1: ldq_l %0,%1\n" \ |
|---|
| 117 | 116 | " " #asm_op " %0,%3,%2\n" \ |
|---|
| .. | .. |
|---|
| 123 | 122 | ".previous" \ |
|---|
| 124 | 123 | :"=&r" (temp), "=m" (v->counter), "=&r" (result) \ |
|---|
| 125 | 124 | :"Ir" (i), "m" (v->counter) : "memory"); \ |
|---|
| 126 | | - smp_read_barrier_depends(); \ |
|---|
| 125 | + smp_mb(); \ |
|---|
| 127 | 126 | return result; \ |
|---|
| 128 | 127 | } |
|---|
| 129 | 128 | |
|---|
| 130 | 129 | #define ATOMIC64_FETCH_OP(op, asm_op) \ |
|---|
| 131 | | -static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \ |
|---|
| 130 | +static __inline__ s64 atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v) \ |
|---|
| 132 | 131 | { \ |
|---|
| 133 | | - long temp, result; \ |
|---|
| 132 | + s64 temp, result; \ |
|---|
| 134 | 133 | __asm__ __volatile__( \ |
|---|
| 135 | 134 | "1: ldq_l %2,%1\n" \ |
|---|
| 136 | 135 | " " #asm_op " %2,%3,%0\n" \ |
|---|
| .. | .. |
|---|
| 141 | 140 | ".previous" \ |
|---|
| 142 | 141 | :"=&r" (temp), "=m" (v->counter), "=&r" (result) \ |
|---|
| 143 | 142 | :"Ir" (i), "m" (v->counter) : "memory"); \ |
|---|
| 144 | | - smp_read_barrier_depends(); \ |
|---|
| 143 | + smp_mb(); \ |
|---|
| 145 | 144 | return result; \ |
|---|
| 146 | 145 | } |
|---|
| 147 | 146 | |
|---|
| .. | .. |
|---|
| 246 | 245 | * Atomically adds @a to @v, so long as it was not @u. |
|---|
| 247 | 246 | * Returns the old value of @v. |
|---|
| 248 | 247 | */ |
|---|
| 249 | | -static __inline__ long atomic64_fetch_add_unless(atomic64_t *v, long a, long u) |
|---|
| 248 | +static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) |
|---|
| 250 | 249 | { |
|---|
| 251 | | - long c, new, old; |
|---|
| 250 | + s64 c, new, old; |
|---|
| 252 | 251 | smp_mb(); |
|---|
| 253 | 252 | __asm__ __volatile__( |
|---|
| 254 | 253 | "1: ldq_l %[old],%[mem]\n" |
|---|
| .. | .. |
|---|
| 276 | 275 | * The function returns the old value of *v minus 1, even if |
|---|
| 277 | 276 | * the atomic variable, v, was not decremented. |
|---|
| 278 | 277 | */ |
|---|
| 279 | | -static inline long atomic64_dec_if_positive(atomic64_t *v) |
|---|
| 278 | +static inline s64 atomic64_dec_if_positive(atomic64_t *v) |
|---|
| 280 | 279 | { |
|---|
| 281 | | - long old, tmp; |
|---|
| 280 | + s64 old, tmp; |
|---|
| 282 | 281 | smp_mb(); |
|---|
| 283 | 282 | __asm__ __volatile__( |
|---|
| 284 | 283 | "1: ldq_l %[old],%[mem]\n" |
|---|