| .. | .. |
|---|
| 14 | 14 | * resource counting etc.. |
|---|
| 15 | 15 | */ |
|---|
| 16 | 16 | |
|---|
| 17 | | -#define ATOMIC_INIT(i) { (i) } |
|---|
| 18 | | - |
|---|
| 19 | 17 | /** |
|---|
| 20 | 18 | * arch_atomic_read - read atomic variable |
|---|
| 21 | 19 | * @v: pointer of type atomic_t |
|---|
| .. | .. |
|---|
| 28 | 26 | * Note for KASAN: we deliberately don't use READ_ONCE_NOCHECK() here, |
|---|
| 29 | 27 | * it's non-inlined function that increases binary size and stack usage. |
|---|
| 30 | 28 | */ |
|---|
| 31 | | - return READ_ONCE((v)->counter); |
|---|
| 29 | + return __READ_ONCE((v)->counter); |
|---|
| 32 | 30 | } |
|---|
| 33 | 31 | |
|---|
| 34 | 32 | /** |
|---|
| .. | .. |
|---|
| 40 | 38 | */ |
|---|
| 41 | 39 | static __always_inline void arch_atomic_set(atomic_t *v, int i) |
|---|
| 42 | 40 | { |
|---|
| 43 | | - WRITE_ONCE(v->counter, i); |
|---|
| 41 | + __WRITE_ONCE(v->counter, i); |
|---|
| 44 | 42 | } |
|---|
| 45 | 43 | |
|---|
| 46 | 44 | /** |
|---|
| .. | .. |
|---|
| 82 | 80 | */ |
|---|
| 83 | 81 | static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v) |
|---|
| 84 | 82 | { |
|---|
| 85 | | - GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", e); |
|---|
| 83 | + return GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, e, "er", i); |
|---|
| 86 | 84 | } |
|---|
| 87 | 85 | #define arch_atomic_sub_and_test arch_atomic_sub_and_test |
|---|
| 88 | 86 | |
|---|
| .. | .. |
|---|
| 122 | 120 | */ |
|---|
| 123 | 121 | static __always_inline bool arch_atomic_dec_and_test(atomic_t *v) |
|---|
| 124 | 122 | { |
|---|
| 125 | | - GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", e); |
|---|
| 123 | + return GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, e); |
|---|
| 126 | 124 | } |
|---|
| 127 | 125 | #define arch_atomic_dec_and_test arch_atomic_dec_and_test |
|---|
| 128 | 126 | |
|---|
| .. | .. |
|---|
| 136 | 134 | */ |
|---|
| 137 | 135 | static __always_inline bool arch_atomic_inc_and_test(atomic_t *v) |
|---|
| 138 | 136 | { |
|---|
| 139 | | - GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", e); |
|---|
| 137 | + return GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, e); |
|---|
| 140 | 138 | } |
|---|
| 141 | 139 | #define arch_atomic_inc_and_test arch_atomic_inc_and_test |
|---|
| 142 | 140 | |
|---|
| .. | .. |
|---|
| 151 | 149 | */ |
|---|
| 152 | 150 | static __always_inline bool arch_atomic_add_negative(int i, atomic_t *v) |
|---|
| 153 | 151 | { |
|---|
| 154 | | - GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", s); |
|---|
| 152 | + return GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, s, "er", i); |
|---|
| 155 | 153 | } |
|---|
| 156 | 154 | #define arch_atomic_add_negative arch_atomic_add_negative |
|---|
| 157 | 155 | |
|---|
| .. | .. |
|---|
| 166 | 164 | { |
|---|
| 167 | 165 | return i + xadd(&v->counter, i); |
|---|
| 168 | 166 | } |
|---|
| 167 | +#define arch_atomic_add_return arch_atomic_add_return |
|---|
| 169 | 168 | |
|---|
| 170 | 169 | /** |
|---|
| 171 | 170 | * arch_atomic_sub_return - subtract integer and return |
|---|
| .. | .. |
|---|
| 178 | 177 | { |
|---|
| 179 | 178 | return arch_atomic_add_return(-i, v); |
|---|
| 180 | 179 | } |
|---|
| 180 | +#define arch_atomic_sub_return arch_atomic_sub_return |
|---|
| 181 | 181 | |
|---|
| 182 | 182 | static __always_inline int arch_atomic_fetch_add(int i, atomic_t *v) |
|---|
| 183 | 183 | { |
|---|
| 184 | 184 | return xadd(&v->counter, i); |
|---|
| 185 | 185 | } |
|---|
| 186 | +#define arch_atomic_fetch_add arch_atomic_fetch_add |
|---|
| 186 | 187 | |
|---|
| 187 | 188 | static __always_inline int arch_atomic_fetch_sub(int i, atomic_t *v) |
|---|
| 188 | 189 | { |
|---|
| 189 | 190 | return xadd(&v->counter, -i); |
|---|
| 190 | 191 | } |
|---|
| 192 | +#define arch_atomic_fetch_sub arch_atomic_fetch_sub |
|---|
| 191 | 193 | |
|---|
| 192 | 194 | static __always_inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new) |
|---|
| 193 | 195 | { |
|---|
| 194 | 196 | return arch_cmpxchg(&v->counter, old, new); |
|---|
| 195 | 197 | } |
|---|
| 198 | +#define arch_atomic_cmpxchg arch_atomic_cmpxchg |
|---|
| 196 | 199 | |
|---|
| 197 | | -#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg |
|---|
| 198 | 200 | static __always_inline bool arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new) |
|---|
| 199 | 201 | { |
|---|
| 200 | 202 | return try_cmpxchg(&v->counter, old, new); |
|---|
| 201 | 203 | } |
|---|
| 204 | +#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg |
|---|
| 202 | 205 | |
|---|
| 203 | | -static inline int arch_atomic_xchg(atomic_t *v, int new) |
|---|
| 206 | +static __always_inline int arch_atomic_xchg(atomic_t *v, int new) |
|---|
| 204 | 207 | { |
|---|
| 205 | 208 | return arch_xchg(&v->counter, new); |
|---|
| 206 | 209 | } |
|---|
| 210 | +#define arch_atomic_xchg arch_atomic_xchg |
|---|
| 207 | 211 | |
|---|
| 208 | | -static inline void arch_atomic_and(int i, atomic_t *v) |
|---|
| 212 | +static __always_inline void arch_atomic_and(int i, atomic_t *v) |
|---|
| 209 | 213 | { |
|---|
| 210 | 214 | asm volatile(LOCK_PREFIX "andl %1,%0" |
|---|
| 211 | 215 | : "+m" (v->counter) |
|---|
| .. | .. |
|---|
| 213 | 217 | : "memory"); |
|---|
| 214 | 218 | } |
|---|
| 215 | 219 | |
|---|
| 216 | | -static inline int arch_atomic_fetch_and(int i, atomic_t *v) |
|---|
| 220 | +static __always_inline int arch_atomic_fetch_and(int i, atomic_t *v) |
|---|
| 217 | 221 | { |
|---|
| 218 | 222 | int val = arch_atomic_read(v); |
|---|
| 219 | 223 | |
|---|
| .. | .. |
|---|
| 221 | 225 | |
|---|
| 222 | 226 | return val; |
|---|
| 223 | 227 | } |
|---|
| 228 | +#define arch_atomic_fetch_and arch_atomic_fetch_and |
|---|
| 224 | 229 | |
|---|
| 225 | | -static inline void arch_atomic_or(int i, atomic_t *v) |
|---|
| 230 | +static __always_inline void arch_atomic_or(int i, atomic_t *v) |
|---|
| 226 | 231 | { |
|---|
| 227 | 232 | asm volatile(LOCK_PREFIX "orl %1,%0" |
|---|
| 228 | 233 | : "+m" (v->counter) |
|---|
| .. | .. |
|---|
| 230 | 235 | : "memory"); |
|---|
| 231 | 236 | } |
|---|
| 232 | 237 | |
|---|
| 233 | | -static inline int arch_atomic_fetch_or(int i, atomic_t *v) |
|---|
| 238 | +static __always_inline int arch_atomic_fetch_or(int i, atomic_t *v) |
|---|
| 234 | 239 | { |
|---|
| 235 | 240 | int val = arch_atomic_read(v); |
|---|
| 236 | 241 | |
|---|
| .. | .. |
|---|
| 238 | 243 | |
|---|
| 239 | 244 | return val; |
|---|
| 240 | 245 | } |
|---|
| 246 | +#define arch_atomic_fetch_or arch_atomic_fetch_or |
|---|
| 241 | 247 | |
|---|
| 242 | | -static inline void arch_atomic_xor(int i, atomic_t *v) |
|---|
| 248 | +static __always_inline void arch_atomic_xor(int i, atomic_t *v) |
|---|
| 243 | 249 | { |
|---|
| 244 | 250 | asm volatile(LOCK_PREFIX "xorl %1,%0" |
|---|
| 245 | 251 | : "+m" (v->counter) |
|---|
| .. | .. |
|---|
| 247 | 253 | : "memory"); |
|---|
| 248 | 254 | } |
|---|
| 249 | 255 | |
|---|
| 250 | | -static inline int arch_atomic_fetch_xor(int i, atomic_t *v) |
|---|
| 256 | +static __always_inline int arch_atomic_fetch_xor(int i, atomic_t *v) |
|---|
| 251 | 257 | { |
|---|
| 252 | 258 | int val = arch_atomic_read(v); |
|---|
| 253 | 259 | |
|---|
| .. | .. |
|---|
| 255 | 261 | |
|---|
| 256 | 262 | return val; |
|---|
| 257 | 263 | } |
|---|
| 264 | +#define arch_atomic_fetch_xor arch_atomic_fetch_xor |
|---|
| 258 | 265 | |
|---|
| 259 | 266 | #ifdef CONFIG_X86_32 |
|---|
| 260 | 267 | # include <asm/atomic64_32.h> |
|---|
| .. | .. |
|---|
| 262 | 269 | # include <asm/atomic64_64.h> |
|---|
| 263 | 270 | #endif |
|---|
| 264 | 271 | |
|---|
| 265 | | -#include <asm-generic/atomic-instrumented.h> |
|---|
| 272 | +#define ARCH_ATOMIC |
|---|
| 266 | 273 | |
|---|
| 267 | 274 | #endif /* _ASM_X86_ATOMIC_H */ |
|---|