| .. | .. |
|---|
| 11 | 11 | #define __ASM_ATOMIC_LSE_H |
|---|
| 12 | 12 | |
|---|
| 13 | 13 | #define ATOMIC_OP(op, asm_op) \ |
|---|
| 14 | | -static inline void __lse_atomic_##op(int i, atomic_t *v) \ |
|---|
| 14 | +static inline void __lse_atomic_##op(int i, atomic_t *v) \ |
|---|
| 15 | 15 | { \ |
|---|
| 16 | 16 | asm volatile( \ |
|---|
| 17 | 17 | __LSE_PREAMBLE \ |
|---|
| 18 | | -" " #asm_op " %w[i], %[v]\n" \ |
|---|
| 18 | + " " #asm_op " %w[i], %[v]\n" \ |
|---|
| 19 | 19 | : [i] "+r" (i), [v] "+Q" (v->counter) \ |
|---|
| 20 | 20 | : "r" (v)); \ |
|---|
| 21 | 21 | } |
|---|
| .. | .. |
|---|
| 32 | 32 | { \ |
|---|
| 33 | 33 | asm volatile( \ |
|---|
| 34 | 34 | __LSE_PREAMBLE \ |
|---|
| 35 | | -" " #asm_op #mb " %w[i], %w[i], %[v]" \ |
|---|
| 35 | + " " #asm_op #mb " %w[i], %w[i], %[v]" \ |
|---|
| 36 | 36 | : [i] "+r" (i), [v] "+Q" (v->counter) \ |
|---|
| 37 | 37 | : "r" (v) \ |
|---|
| 38 | 38 | : cl); \ |
|---|
| .. | .. |
|---|
| 130 | 130 | " add %w[i], %w[i], %w[tmp]" \ |
|---|
| 131 | 131 | : [i] "+&r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \ |
|---|
| 132 | 132 | : "r" (v) \ |
|---|
| 133 | | - : cl); \ |
|---|
| 133 | + : cl); \ |
|---|
| 134 | 134 | \ |
|---|
| 135 | 135 | return i; \ |
|---|
| 136 | 136 | } |
|---|
| .. | .. |
|---|
| 168 | 168 | { \ |
|---|
| 169 | 169 | asm volatile( \ |
|---|
| 170 | 170 | __LSE_PREAMBLE \ |
|---|
| 171 | | -" " #asm_op " %[i], %[v]\n" \ |
|---|
| 171 | + " " #asm_op " %[i], %[v]\n" \ |
|---|
| 172 | 172 | : [i] "+r" (i), [v] "+Q" (v->counter) \ |
|---|
| 173 | 173 | : "r" (v)); \ |
|---|
| 174 | 174 | } |
|---|
| .. | .. |
|---|
| 185 | 185 | { \ |
|---|
| 186 | 186 | asm volatile( \ |
|---|
| 187 | 187 | __LSE_PREAMBLE \ |
|---|
| 188 | | -" " #asm_op #mb " %[i], %[i], %[v]" \ |
|---|
| 188 | + " " #asm_op #mb " %[i], %[i], %[v]" \ |
|---|
| 189 | 189 | : [i] "+r" (i), [v] "+Q" (v->counter) \ |
|---|
| 190 | 190 | : "r" (v) \ |
|---|
| 191 | 191 | : cl); \ |
|---|
| .. | .. |
|---|
| 272 | 272 | } |
|---|
| 273 | 273 | |
|---|
| 274 | 274 | #define ATOMIC64_OP_SUB_RETURN(name, mb, cl...) \ |
|---|
| 275 | | -static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v) \ |
|---|
| 275 | +static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v)\ |
|---|
| 276 | 276 | { \ |
|---|
| 277 | 277 | unsigned long tmp; \ |
|---|
| 278 | 278 | \ |
|---|
| .. | .. |
|---|
| 403 | 403 | " eor %[old2], %[old2], %[oldval2]\n" \ |
|---|
| 404 | 404 | " orr %[old1], %[old1], %[old2]" \ |
|---|
| 405 | 405 | : [old1] "+&r" (x0), [old2] "+&r" (x1), \ |
|---|
| 406 | | - [v] "+Q" (*(unsigned long *)ptr) \ |
|---|
| 406 | + [v] "+Q" (*(__uint128_t *)ptr) \ |
|---|
| 407 | 407 | : [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \ |
|---|
| 408 | 408 | [oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \ |
|---|
| 409 | 409 | : cl); \ |
|---|