.. | .. |
---|
17 | 17 | * Atomically reads the value of @v. |
---|
18 | 18 | * Doesn't imply a read memory barrier. |
---|
19 | 19 | */ |
---|
20 | | -static inline long arch_atomic64_read(const atomic64_t *v) |
---|
| 20 | +static inline s64 arch_atomic64_read(const atomic64_t *v) |
---|
21 | 21 | { |
---|
22 | | - return READ_ONCE((v)->counter); |
---|
| 22 | + return __READ_ONCE((v)->counter); |
---|
23 | 23 | } |
---|
24 | 24 | |
---|
25 | 25 | /** |
---|
.. | .. |
---|
29 | 29 | * |
---|
30 | 30 | * Atomically sets the value of @v to @i. |
---|
31 | 31 | */ |
---|
32 | | -static inline void arch_atomic64_set(atomic64_t *v, long i) |
---|
| 32 | +static inline void arch_atomic64_set(atomic64_t *v, s64 i) |
---|
33 | 33 | { |
---|
34 | | - WRITE_ONCE(v->counter, i); |
---|
| 34 | + __WRITE_ONCE(v->counter, i); |
---|
35 | 35 | } |
---|
36 | 36 | |
---|
37 | 37 | /** |
---|
.. | .. |
---|
41 | 41 | * |
---|
42 | 42 | * Atomically adds @i to @v. |
---|
43 | 43 | */ |
---|
44 | | -static __always_inline void arch_atomic64_add(long i, atomic64_t *v) |
---|
| 44 | +static __always_inline void arch_atomic64_add(s64 i, atomic64_t *v) |
---|
45 | 45 | { |
---|
46 | 46 | asm volatile(LOCK_PREFIX "addq %1,%0" |
---|
47 | 47 | : "=m" (v->counter) |
---|
.. | .. |
---|
55 | 55 | * |
---|
56 | 56 | * Atomically subtracts @i from @v. |
---|
57 | 57 | */ |
---|
58 | | -static inline void arch_atomic64_sub(long i, atomic64_t *v) |
---|
| 58 | +static inline void arch_atomic64_sub(s64 i, atomic64_t *v) |
---|
59 | 59 | { |
---|
60 | 60 | asm volatile(LOCK_PREFIX "subq %1,%0" |
---|
61 | 61 | : "=m" (v->counter) |
---|
.. | .. |
---|
71 | 71 | * true if the result is zero, or false for all |
---|
72 | 72 | * other cases. |
---|
73 | 73 | */ |
---|
74 | | -static inline bool arch_atomic64_sub_and_test(long i, atomic64_t *v) |
---|
| 74 | +static inline bool arch_atomic64_sub_and_test(s64 i, atomic64_t *v) |
---|
75 | 75 | { |
---|
76 | | - GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", e); |
---|
| 76 | + return GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, e, "er", i); |
---|
77 | 77 | } |
---|
78 | 78 | #define arch_atomic64_sub_and_test arch_atomic64_sub_and_test |
---|
79 | 79 | |
---|
.. | .. |
---|
115 | 115 | */ |
---|
116 | 116 | static inline bool arch_atomic64_dec_and_test(atomic64_t *v) |
---|
117 | 117 | { |
---|
118 | | - GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", e); |
---|
| 118 | + return GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, e); |
---|
119 | 119 | } |
---|
120 | 120 | #define arch_atomic64_dec_and_test arch_atomic64_dec_and_test |
---|
121 | 121 | |
---|
.. | .. |
---|
129 | 129 | */ |
---|
130 | 130 | static inline bool arch_atomic64_inc_and_test(atomic64_t *v) |
---|
131 | 131 | { |
---|
132 | | - GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", e); |
---|
| 132 | + return GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, e); |
---|
133 | 133 | } |
---|
134 | 134 | #define arch_atomic64_inc_and_test arch_atomic64_inc_and_test |
---|
135 | 135 | |
---|
.. | .. |
---|
142 | 142 | * if the result is negative, or false when |
---|
143 | 143 | * result is greater than or equal to zero. |
---|
144 | 144 | */ |
---|
145 | | -static inline bool arch_atomic64_add_negative(long i, atomic64_t *v) |
---|
| 145 | +static inline bool arch_atomic64_add_negative(s64 i, atomic64_t *v) |
---|
146 | 146 | { |
---|
147 | | - GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", s); |
---|
| 147 | + return GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, s, "er", i); |
---|
148 | 148 | } |
---|
149 | 149 | #define arch_atomic64_add_negative arch_atomic64_add_negative |
---|
150 | 150 | |
---|
.. | .. |
---|
155 | 155 | * |
---|
156 | 156 | * Atomically adds @i to @v and returns @i + @v |
---|
157 | 157 | */ |
---|
158 | | -static __always_inline long arch_atomic64_add_return(long i, atomic64_t *v) |
---|
| 158 | +static __always_inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v) |
---|
159 | 159 | { |
---|
160 | 160 | return i + xadd(&v->counter, i); |
---|
161 | 161 | } |
---|
| 162 | +#define arch_atomic64_add_return arch_atomic64_add_return |
---|
162 | 163 | |
---|
163 | | -static inline long arch_atomic64_sub_return(long i, atomic64_t *v) |
---|
| 164 | +static inline s64 arch_atomic64_sub_return(s64 i, atomic64_t *v) |
---|
164 | 165 | { |
---|
165 | 166 | return arch_atomic64_add_return(-i, v); |
---|
166 | 167 | } |
---|
| 168 | +#define arch_atomic64_sub_return arch_atomic64_sub_return |
---|
167 | 169 | |
---|
168 | | -static inline long arch_atomic64_fetch_add(long i, atomic64_t *v) |
---|
| 170 | +static inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v) |
---|
169 | 171 | { |
---|
170 | 172 | return xadd(&v->counter, i); |
---|
171 | 173 | } |
---|
| 174 | +#define arch_atomic64_fetch_add arch_atomic64_fetch_add |
---|
172 | 175 | |
---|
173 | | -static inline long arch_atomic64_fetch_sub(long i, atomic64_t *v) |
---|
| 176 | +static inline s64 arch_atomic64_fetch_sub(s64 i, atomic64_t *v) |
---|
174 | 177 | { |
---|
175 | 178 | return xadd(&v->counter, -i); |
---|
176 | 179 | } |
---|
| 180 | +#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub |
---|
177 | 181 | |
---|
178 | | -static inline long arch_atomic64_cmpxchg(atomic64_t *v, long old, long new) |
---|
| 182 | +static inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) |
---|
179 | 183 | { |
---|
180 | 184 | return arch_cmpxchg(&v->counter, old, new); |
---|
181 | 185 | } |
---|
| 186 | +#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg |
---|
182 | 187 | |
---|
183 | | -#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg |
---|
184 | | -static __always_inline bool arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, long new) |
---|
| 188 | +static __always_inline bool arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) |
---|
185 | 189 | { |
---|
186 | 190 | return try_cmpxchg(&v->counter, old, new); |
---|
187 | 191 | } |
---|
| 192 | +#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg |
---|
188 | 193 | |
---|
189 | | -static inline long arch_atomic64_xchg(atomic64_t *v, long new) |
---|
| 194 | +static inline s64 arch_atomic64_xchg(atomic64_t *v, s64 new) |
---|
190 | 195 | { |
---|
191 | 196 | return arch_xchg(&v->counter, new); |
---|
192 | 197 | } |
---|
| 198 | +#define arch_atomic64_xchg arch_atomic64_xchg |
---|
193 | 199 | |
---|
194 | | -static inline void arch_atomic64_and(long i, atomic64_t *v) |
---|
| 200 | +static inline void arch_atomic64_and(s64 i, atomic64_t *v) |
---|
195 | 201 | { |
---|
196 | 202 | asm volatile(LOCK_PREFIX "andq %1,%0" |
---|
197 | 203 | : "+m" (v->counter) |
---|
.. | .. |
---|
199 | 205 | : "memory"); |
---|
200 | 206 | } |
---|
201 | 207 | |
---|
202 | | -static inline long arch_atomic64_fetch_and(long i, atomic64_t *v) |
---|
| 208 | +static inline s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v) |
---|
203 | 209 | { |
---|
204 | 210 | s64 val = arch_atomic64_read(v); |
---|
205 | 211 | |
---|
.. | .. |
---|
207 | 213 | } while (!arch_atomic64_try_cmpxchg(v, &val, val & i)); |
---|
208 | 214 | return val; |
---|
209 | 215 | } |
---|
| 216 | +#define arch_atomic64_fetch_and arch_atomic64_fetch_and |
---|
210 | 217 | |
---|
211 | | -static inline void arch_atomic64_or(long i, atomic64_t *v) |
---|
| 218 | +static inline void arch_atomic64_or(s64 i, atomic64_t *v) |
---|
212 | 219 | { |
---|
213 | 220 | asm volatile(LOCK_PREFIX "orq %1,%0" |
---|
214 | 221 | : "+m" (v->counter) |
---|
.. | .. |
---|
216 | 223 | : "memory"); |
---|
217 | 224 | } |
---|
218 | 225 | |
---|
219 | | -static inline long arch_atomic64_fetch_or(long i, atomic64_t *v) |
---|
| 226 | +static inline s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v) |
---|
220 | 227 | { |
---|
221 | 228 | s64 val = arch_atomic64_read(v); |
---|
222 | 229 | |
---|
.. | .. |
---|
224 | 231 | } while (!arch_atomic64_try_cmpxchg(v, &val, val | i)); |
---|
225 | 232 | return val; |
---|
226 | 233 | } |
---|
| 234 | +#define arch_atomic64_fetch_or arch_atomic64_fetch_or |
---|
227 | 235 | |
---|
228 | | -static inline void arch_atomic64_xor(long i, atomic64_t *v) |
---|
| 236 | +static inline void arch_atomic64_xor(s64 i, atomic64_t *v) |
---|
229 | 237 | { |
---|
230 | 238 | asm volatile(LOCK_PREFIX "xorq %1,%0" |
---|
231 | 239 | : "+m" (v->counter) |
---|
.. | .. |
---|
233 | 241 | : "memory"); |
---|
234 | 242 | } |
---|
235 | 243 | |
---|
236 | | -static inline long arch_atomic64_fetch_xor(long i, atomic64_t *v) |
---|
| 244 | +static inline s64 arch_atomic64_fetch_xor(s64 i, atomic64_t *v) |
---|
237 | 245 | { |
---|
238 | 246 | s64 val = arch_atomic64_read(v); |
---|
239 | 247 | |
---|
.. | .. |
---|
241 | 249 | } while (!arch_atomic64_try_cmpxchg(v, &val, val ^ i)); |
---|
242 | 250 | return val; |
---|
243 | 251 | } |
---|
| 252 | +#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor |
---|
244 | 253 | |
---|
245 | 254 | #endif /* _ASM_X86_ATOMIC64_64_H */ |
---|