hc
2024-10-22 8ac6c7a54ed1b98d142dce24b11c6de6a1e239a5
kernel/arch/alpha/include/asm/atomic.h
....@@ -16,15 +16,14 @@
1616
1717 /*
1818 * To ensure dependency ordering is preserved for the _relaxed and
19
- * _release atomics, an smp_read_barrier_depends() is unconditionally
20
- * inserted into the _relaxed variants, which are used to build the
21
- * barriered versions. Avoid redundant back-to-back fences in the
22
- * _acquire and _fence versions.
19
+ * _release atomics, an smp_mb() is unconditionally inserted into the
20
+ * _relaxed variants, which are used to build the barriered versions.
21
+ * Avoid redundant back-to-back fences in the _acquire and _fence
22
+ * versions.
2323 */
2424 #define __atomic_acquire_fence()
2525 #define __atomic_post_full_fence()
2626
27
-#define ATOMIC_INIT(i) { (i) }
2827 #define ATOMIC64_INIT(i) { (i) }
2928
3029 #define atomic_read(v) READ_ONCE((v)->counter)
....@@ -70,7 +69,7 @@
7069 ".previous" \
7170 :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
7271 :"Ir" (i), "m" (v->counter) : "memory"); \
73
- smp_read_barrier_depends(); \
72
+ smp_mb(); \
7473 return result; \
7574 }
7675
....@@ -88,14 +87,14 @@
8887 ".previous" \
8988 :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
9089 :"Ir" (i), "m" (v->counter) : "memory"); \
91
- smp_read_barrier_depends(); \
90
+ smp_mb(); \
9291 return result; \
9392 }
9493
9594 #define ATOMIC64_OP(op, asm_op) \
96
-static __inline__ void atomic64_##op(long i, atomic64_t * v) \
95
+static __inline__ void atomic64_##op(s64 i, atomic64_t * v) \
9796 { \
98
- unsigned long temp; \
97
+ s64 temp; \
9998 __asm__ __volatile__( \
10099 "1: ldq_l %0,%1\n" \
101100 " " #asm_op " %0,%2,%0\n" \
....@@ -109,9 +108,9 @@
109108 } \
110109
111110 #define ATOMIC64_OP_RETURN(op, asm_op) \
112
-static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \
111
+static __inline__ s64 atomic64_##op##_return_relaxed(s64 i, atomic64_t * v) \
113112 { \
114
- long temp, result; \
113
+ s64 temp, result; \
115114 __asm__ __volatile__( \
116115 "1: ldq_l %0,%1\n" \
117116 " " #asm_op " %0,%3,%2\n" \
....@@ -123,14 +122,14 @@
123122 ".previous" \
124123 :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
125124 :"Ir" (i), "m" (v->counter) : "memory"); \
126
- smp_read_barrier_depends(); \
125
+ smp_mb(); \
127126 return result; \
128127 }
129128
130129 #define ATOMIC64_FETCH_OP(op, asm_op) \
131
-static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \
130
+static __inline__ s64 atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v) \
132131 { \
133
- long temp, result; \
132
+ s64 temp, result; \
134133 __asm__ __volatile__( \
135134 "1: ldq_l %2,%1\n" \
136135 " " #asm_op " %2,%3,%0\n" \
....@@ -141,7 +140,7 @@
141140 ".previous" \
142141 :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
143142 :"Ir" (i), "m" (v->counter) : "memory"); \
144
- smp_read_barrier_depends(); \
143
+ smp_mb(); \
145144 return result; \
146145 }
147146
....@@ -246,9 +245,9 @@
246245 * Atomically adds @a to @v, so long as it was not @u.
247246 * Returns the old value of @v.
248247 */
249
-static __inline__ long atomic64_fetch_add_unless(atomic64_t *v, long a, long u)
248
+static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
250249 {
251
- long c, new, old;
250
+ s64 c, new, old;
252251 smp_mb();
253252 __asm__ __volatile__(
254253 "1: ldq_l %[old],%[mem]\n"
....@@ -276,9 +275,9 @@
276275 * The function returns the old value of *v minus 1, even if
277276 * the atomic variable, v, was not decremented.
278277 */
279
-static inline long atomic64_dec_if_positive(atomic64_t *v)
278
+static inline s64 atomic64_dec_if_positive(atomic64_t *v)
280279 {
281
- long old, tmp;
280
+ s64 old, tmp;
282281 smp_mb();
283282 __asm__ __volatile__(
284283 "1: ldq_l %[old],%[mem]\n"