.. | .. |
---|
20 | 20 | #include <asm/compiler.h> |
---|
21 | 21 | #include <asm/cpu-features.h> |
---|
22 | 22 | #include <asm/cmpxchg.h> |
---|
| 23 | +#include <asm/llsc.h> |
---|
| 24 | +#include <asm/sync.h> |
---|
23 | 25 | #include <asm/war.h> |
---|
24 | 26 | |
---|
25 | | -/* |
---|
26 | | - * Using a branch-likely instruction to check the result of an sc instruction |
---|
27 | | - * works around a bug present in R10000 CPUs prior to revision 3.0 that could |
---|
28 | | - * cause ll-sc sequences to execute non-atomically. |
---|
29 | | - */ |
---|
30 | | -#if R10000_LLSC_WAR |
---|
31 | | -# define __scbeqz "beqzl" |
---|
32 | | -#else |
---|
33 | | -# define __scbeqz "beqz" |
---|
| 27 | +#define ATOMIC_OPS(pfx, type) \ |
---|
| 28 | +static __always_inline type pfx##_read(const pfx##_t *v) \ |
---|
| 29 | +{ \ |
---|
| 30 | + return READ_ONCE(v->counter); \ |
---|
| 31 | +} \ |
---|
| 32 | + \ |
---|
| 33 | +static __always_inline void pfx##_set(pfx##_t *v, type i) \ |
---|
| 34 | +{ \ |
---|
| 35 | + WRITE_ONCE(v->counter, i); \ |
---|
| 36 | +} \ |
---|
| 37 | + \ |
---|
| 38 | +static __always_inline type pfx##_cmpxchg(pfx##_t *v, type o, type n) \ |
---|
| 39 | +{ \ |
---|
| 40 | + return cmpxchg(&v->counter, o, n); \ |
---|
| 41 | +} \ |
---|
| 42 | + \ |
---|
| 43 | +static __always_inline type pfx##_xchg(pfx##_t *v, type n) \ |
---|
| 44 | +{ \ |
---|
| 45 | + return xchg(&v->counter, n); \ |
---|
| 46 | +} |
---|
| 47 | + |
---|
| 48 | +ATOMIC_OPS(atomic, int) |
---|
| 49 | + |
---|
| 50 | +#ifdef CONFIG_64BIT |
---|
| 51 | +# define ATOMIC64_INIT(i) { (i) } |
---|
| 52 | +ATOMIC_OPS(atomic64, s64) |
---|
34 | 53 | #endif |
---|
35 | 54 | |
---|
36 | | -#define ATOMIC_INIT(i) { (i) } |
---|
37 | | - |
---|
38 | | -/* |
---|
39 | | - * atomic_read - read atomic variable |
---|
40 | | - * @v: pointer of type atomic_t |
---|
41 | | - * |
---|
42 | | - * Atomically reads the value of @v. |
---|
43 | | - */ |
---|
44 | | -#define atomic_read(v) READ_ONCE((v)->counter) |
---|
45 | | - |
---|
46 | | -/* |
---|
47 | | - * atomic_set - set atomic variable |
---|
48 | | - * @v: pointer of type atomic_t |
---|
49 | | - * @i: required value |
---|
50 | | - * |
---|
51 | | - * Atomically sets the value of @v to @i. |
---|
52 | | - */ |
---|
53 | | -#define atomic_set(v, i) WRITE_ONCE((v)->counter, (i)) |
---|
54 | | - |
---|
55 | | -#define ATOMIC_OP(op, c_op, asm_op) \ |
---|
56 | | -static __inline__ void atomic_##op(int i, atomic_t * v) \ |
---|
57 | | -{ \ |
---|
58 | | - if (kernel_uses_llsc) { \ |
---|
59 | | - int temp; \ |
---|
60 | | - \ |
---|
61 | | - __asm__ __volatile__( \ |
---|
62 | | - " .set "MIPS_ISA_LEVEL" \n" \ |
---|
63 | | - "1: ll %0, %1 # atomic_" #op " \n" \ |
---|
64 | | - " " #asm_op " %0, %2 \n" \ |
---|
65 | | - " sc %0, %1 \n" \ |
---|
66 | | - "\t" __scbeqz " %0, 1b \n" \ |
---|
67 | | - " .set mips0 \n" \ |
---|
68 | | - : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \ |
---|
69 | | - : "Ir" (i)); \ |
---|
70 | | - } else { \ |
---|
71 | | - unsigned long flags; \ |
---|
72 | | - \ |
---|
73 | | - raw_local_irq_save(flags); \ |
---|
74 | | - v->counter c_op i; \ |
---|
75 | | - raw_local_irq_restore(flags); \ |
---|
76 | | - } \ |
---|
| 55 | +#define ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc) \ |
---|
| 56 | +static __inline__ void pfx##_##op(type i, pfx##_t * v) \ |
---|
| 57 | +{ \ |
---|
| 58 | + type temp; \ |
---|
| 59 | + \ |
---|
| 60 | + if (!kernel_uses_llsc) { \ |
---|
| 61 | + unsigned long flags; \ |
---|
| 62 | + \ |
---|
| 63 | + raw_local_irq_save(flags); \ |
---|
| 64 | + v->counter c_op i; \ |
---|
| 65 | + raw_local_irq_restore(flags); \ |
---|
| 66 | + return; \ |
---|
| 67 | + } \ |
---|
| 68 | + \ |
---|
| 69 | + __asm__ __volatile__( \ |
---|
| 70 | + " .set push \n" \ |
---|
| 71 | + " .set " MIPS_ISA_LEVEL " \n" \ |
---|
| 72 | + " " __SYNC(full, loongson3_war) " \n" \ |
---|
| 73 | + "1: " #ll " %0, %1 # " #pfx "_" #op " \n" \ |
---|
| 74 | + " " #asm_op " %0, %2 \n" \ |
---|
| 75 | + " " #sc " %0, %1 \n" \ |
---|
| 76 | + "\t" __SC_BEQZ "%0, 1b \n" \ |
---|
| 77 | + " .set pop \n" \ |
---|
| 78 | + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \ |
---|
| 79 | + : "Ir" (i) : __LLSC_CLOBBER); \ |
---|
77 | 80 | } |
---|
78 | 81 | |
---|
79 | | -#define ATOMIC_OP_RETURN(op, c_op, asm_op) \ |
---|
80 | | -static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v) \ |
---|
81 | | -{ \ |
---|
82 | | - int result; \ |
---|
83 | | - \ |
---|
84 | | - if (kernel_uses_llsc) { \ |
---|
85 | | - int temp; \ |
---|
86 | | - \ |
---|
87 | | - __asm__ __volatile__( \ |
---|
88 | | - " .set "MIPS_ISA_LEVEL" \n" \ |
---|
89 | | - "1: ll %1, %2 # atomic_" #op "_return \n" \ |
---|
90 | | - " " #asm_op " %0, %1, %3 \n" \ |
---|
91 | | - " sc %0, %2 \n" \ |
---|
92 | | - "\t" __scbeqz " %0, 1b \n" \ |
---|
93 | | - " " #asm_op " %0, %1, %3 \n" \ |
---|
94 | | - " .set mips0 \n" \ |
---|
95 | | - : "=&r" (result), "=&r" (temp), \ |
---|
96 | | - "+" GCC_OFF_SMALL_ASM() (v->counter) \ |
---|
97 | | - : "Ir" (i)); \ |
---|
98 | | - } else { \ |
---|
99 | | - unsigned long flags; \ |
---|
100 | | - \ |
---|
101 | | - raw_local_irq_save(flags); \ |
---|
102 | | - result = v->counter; \ |
---|
103 | | - result c_op i; \ |
---|
104 | | - v->counter = result; \ |
---|
105 | | - raw_local_irq_restore(flags); \ |
---|
106 | | - } \ |
---|
107 | | - \ |
---|
108 | | - return result; \ |
---|
| 82 | +#define ATOMIC_OP_RETURN(pfx, op, type, c_op, asm_op, ll, sc) \ |
---|
| 83 | +static __inline__ type pfx##_##op##_return_relaxed(type i, pfx##_t * v) \ |
---|
| 84 | +{ \ |
---|
| 85 | + type temp, result; \ |
---|
| 86 | + \ |
---|
| 87 | + if (!kernel_uses_llsc) { \ |
---|
| 88 | + unsigned long flags; \ |
---|
| 89 | + \ |
---|
| 90 | + raw_local_irq_save(flags); \ |
---|
| 91 | + result = v->counter; \ |
---|
| 92 | + result c_op i; \ |
---|
| 93 | + v->counter = result; \ |
---|
| 94 | + raw_local_irq_restore(flags); \ |
---|
| 95 | + return result; \ |
---|
| 96 | + } \ |
---|
| 97 | + \ |
---|
| 98 | + __asm__ __volatile__( \ |
---|
| 99 | + " .set push \n" \ |
---|
| 100 | + " .set " MIPS_ISA_LEVEL " \n" \ |
---|
| 101 | + " " __SYNC(full, loongson3_war) " \n" \ |
---|
| 102 | + "1: " #ll " %1, %2 # " #pfx "_" #op "_return\n" \ |
---|
| 103 | + " " #asm_op " %0, %1, %3 \n" \ |
---|
| 104 | + " " #sc " %0, %2 \n" \ |
---|
| 105 | + "\t" __SC_BEQZ "%0, 1b \n" \ |
---|
| 106 | + " " #asm_op " %0, %1, %3 \n" \ |
---|
| 107 | + " .set pop \n" \ |
---|
| 108 | + : "=&r" (result), "=&r" (temp), \ |
---|
| 109 | + "+" GCC_OFF_SMALL_ASM() (v->counter) \ |
---|
| 110 | + : "Ir" (i) : __LLSC_CLOBBER); \ |
---|
| 111 | + \ |
---|
| 112 | + return result; \ |
---|
109 | 113 | } |
---|
110 | 114 | |
---|
111 | | -#define ATOMIC_FETCH_OP(op, c_op, asm_op) \ |
---|
112 | | -static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v) \ |
---|
113 | | -{ \ |
---|
114 | | - int result; \ |
---|
115 | | - \ |
---|
116 | | - if (kernel_uses_llsc) { \ |
---|
117 | | - int temp; \ |
---|
118 | | - \ |
---|
119 | | - __asm__ __volatile__( \ |
---|
120 | | - " .set "MIPS_ISA_LEVEL" \n" \ |
---|
121 | | - "1: ll %1, %2 # atomic_fetch_" #op " \n" \ |
---|
122 | | - " " #asm_op " %0, %1, %3 \n" \ |
---|
123 | | - " sc %0, %2 \n" \ |
---|
124 | | - "\t" __scbeqz " %0, 1b \n" \ |
---|
125 | | - " .set mips0 \n" \ |
---|
126 | | - " move %0, %1 \n" \ |
---|
127 | | - : "=&r" (result), "=&r" (temp), \ |
---|
128 | | - "+" GCC_OFF_SMALL_ASM() (v->counter) \ |
---|
129 | | - : "Ir" (i)); \ |
---|
130 | | - } else { \ |
---|
131 | | - unsigned long flags; \ |
---|
132 | | - \ |
---|
133 | | - raw_local_irq_save(flags); \ |
---|
134 | | - result = v->counter; \ |
---|
135 | | - v->counter c_op i; \ |
---|
136 | | - raw_local_irq_restore(flags); \ |
---|
137 | | - } \ |
---|
138 | | - \ |
---|
139 | | - return result; \ |
---|
| 115 | +#define ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc) \ |
---|
| 116 | +static __inline__ type pfx##_fetch_##op##_relaxed(type i, pfx##_t * v) \ |
---|
| 117 | +{ \ |
---|
| 118 | + int temp, result; \ |
---|
| 119 | + \ |
---|
| 120 | + if (!kernel_uses_llsc) { \ |
---|
| 121 | + unsigned long flags; \ |
---|
| 122 | + \ |
---|
| 123 | + raw_local_irq_save(flags); \ |
---|
| 124 | + result = v->counter; \ |
---|
| 125 | + v->counter c_op i; \ |
---|
| 126 | + raw_local_irq_restore(flags); \ |
---|
| 127 | + return result; \ |
---|
| 128 | + } \ |
---|
| 129 | + \ |
---|
| 130 | + __asm__ __volatile__( \ |
---|
| 131 | + " .set push \n" \ |
---|
| 132 | + " .set " MIPS_ISA_LEVEL " \n" \ |
---|
| 133 | + " " __SYNC(full, loongson3_war) " \n" \ |
---|
| 134 | + "1: " #ll " %1, %2 # " #pfx "_fetch_" #op "\n" \ |
---|
| 135 | + " " #asm_op " %0, %1, %3 \n" \ |
---|
| 136 | + " " #sc " %0, %2 \n" \ |
---|
| 137 | + "\t" __SC_BEQZ "%0, 1b \n" \ |
---|
| 138 | + " .set pop \n" \ |
---|
| 139 | + " move %0, %1 \n" \ |
---|
| 140 | + : "=&r" (result), "=&r" (temp), \ |
---|
| 141 | + "+" GCC_OFF_SMALL_ASM() (v->counter) \ |
---|
| 142 | + : "Ir" (i) : __LLSC_CLOBBER); \ |
---|
| 143 | + \ |
---|
| 144 | + return result; \ |
---|
140 | 145 | } |
---|
141 | 146 | |
---|
142 | | -#define ATOMIC_OPS(op, c_op, asm_op) \ |
---|
143 | | - ATOMIC_OP(op, c_op, asm_op) \ |
---|
144 | | - ATOMIC_OP_RETURN(op, c_op, asm_op) \ |
---|
145 | | - ATOMIC_FETCH_OP(op, c_op, asm_op) |
---|
| 147 | +#undef ATOMIC_OPS |
---|
| 148 | +#define ATOMIC_OPS(pfx, op, type, c_op, asm_op, ll, sc) \ |
---|
| 149 | + ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc) \ |
---|
| 150 | + ATOMIC_OP_RETURN(pfx, op, type, c_op, asm_op, ll, sc) \ |
---|
| 151 | + ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc) |
---|
146 | 152 | |
---|
147 | | -ATOMIC_OPS(add, +=, addu) |
---|
148 | | -ATOMIC_OPS(sub, -=, subu) |
---|
| 153 | +ATOMIC_OPS(atomic, add, int, +=, addu, ll, sc) |
---|
| 154 | +ATOMIC_OPS(atomic, sub, int, -=, subu, ll, sc) |
---|
149 | 155 | |
---|
150 | 156 | #define atomic_add_return_relaxed atomic_add_return_relaxed |
---|
151 | 157 | #define atomic_sub_return_relaxed atomic_sub_return_relaxed |
---|
152 | 158 | #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed |
---|
153 | 159 | #define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed |
---|
154 | 160 | |
---|
155 | | -#undef ATOMIC_OPS |
---|
156 | | -#define ATOMIC_OPS(op, c_op, asm_op) \ |
---|
157 | | - ATOMIC_OP(op, c_op, asm_op) \ |
---|
158 | | - ATOMIC_FETCH_OP(op, c_op, asm_op) |
---|
| 161 | +#ifdef CONFIG_64BIT |
---|
| 162 | +ATOMIC_OPS(atomic64, add, s64, +=, daddu, lld, scd) |
---|
| 163 | +ATOMIC_OPS(atomic64, sub, s64, -=, dsubu, lld, scd) |
---|
| 164 | +# define atomic64_add_return_relaxed atomic64_add_return_relaxed |
---|
| 165 | +# define atomic64_sub_return_relaxed atomic64_sub_return_relaxed |
---|
| 166 | +# define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed |
---|
| 167 | +# define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed |
---|
| 168 | +#endif /* CONFIG_64BIT */ |
---|
159 | 169 | |
---|
160 | | -ATOMIC_OPS(and, &=, and) |
---|
161 | | -ATOMIC_OPS(or, |=, or) |
---|
162 | | -ATOMIC_OPS(xor, ^=, xor) |
---|
| 170 | +#undef ATOMIC_OPS |
---|
| 171 | +#define ATOMIC_OPS(pfx, op, type, c_op, asm_op, ll, sc) \ |
---|
| 172 | + ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc) \ |
---|
| 173 | + ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc) |
---|
| 174 | + |
---|
| 175 | +ATOMIC_OPS(atomic, and, int, &=, and, ll, sc) |
---|
| 176 | +ATOMIC_OPS(atomic, or, int, |=, or, ll, sc) |
---|
| 177 | +ATOMIC_OPS(atomic, xor, int, ^=, xor, ll, sc) |
---|
163 | 178 | |
---|
164 | 179 | #define atomic_fetch_and_relaxed atomic_fetch_and_relaxed |
---|
165 | 180 | #define atomic_fetch_or_relaxed atomic_fetch_or_relaxed |
---|
166 | 181 | #define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed |
---|
| 182 | + |
---|
| 183 | +#ifdef CONFIG_64BIT |
---|
| 184 | +ATOMIC_OPS(atomic64, and, s64, &=, and, lld, scd) |
---|
| 185 | +ATOMIC_OPS(atomic64, or, s64, |=, or, lld, scd) |
---|
| 186 | +ATOMIC_OPS(atomic64, xor, s64, ^=, xor, lld, scd) |
---|
| 187 | +# define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed |
---|
| 188 | +# define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed |
---|
| 189 | +# define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed |
---|
| 190 | +#endif |
---|
167 | 191 | |
---|
168 | 192 | #undef ATOMIC_OPS |
---|
169 | 193 | #undef ATOMIC_FETCH_OP |
---|
.. | .. |
---|
178 | 202 | * Atomically test @v and subtract @i if @v is greater or equal than @i. |
---|
179 | 203 | * The function returns the old value of @v minus @i. |
---|
180 | 204 | */ |
---|
181 | | -static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) |
---|
182 | | -{ |
---|
183 | | - int result; |
---|
184 | | - |
---|
185 | | - smp_mb__before_llsc(); |
---|
186 | | - |
---|
187 | | - if (kernel_uses_llsc) { |
---|
188 | | - int temp; |
---|
189 | | - |
---|
190 | | - __asm__ __volatile__( |
---|
191 | | - " .set "MIPS_ISA_LEVEL" \n" |
---|
192 | | - "1: ll %1, %2 # atomic_sub_if_positive\n" |
---|
193 | | - " .set mips0 \n" |
---|
194 | | - " subu %0, %1, %3 \n" |
---|
195 | | - " move %1, %0 \n" |
---|
196 | | - " bltz %0, 1f \n" |
---|
197 | | - " .set "MIPS_ISA_LEVEL" \n" |
---|
198 | | - " sc %1, %2 \n" |
---|
199 | | - "\t" __scbeqz " %1, 1b \n" |
---|
200 | | - "1: \n" |
---|
201 | | - " .set mips0 \n" |
---|
202 | | - : "=&r" (result), "=&r" (temp), |
---|
203 | | - "+" GCC_OFF_SMALL_ASM() (v->counter) |
---|
204 | | - : "Ir" (i)); |
---|
205 | | - } else { |
---|
206 | | - unsigned long flags; |
---|
207 | | - |
---|
208 | | - raw_local_irq_save(flags); |
---|
209 | | - result = v->counter; |
---|
210 | | - result -= i; |
---|
211 | | - if (result >= 0) |
---|
212 | | - v->counter = result; |
---|
213 | | - raw_local_irq_restore(flags); |
---|
214 | | - } |
---|
215 | | - |
---|
216 | | - smp_llsc_mb(); |
---|
217 | | - |
---|
218 | | - return result; |
---|
| 205 | +#define ATOMIC_SIP_OP(pfx, type, op, ll, sc) \ |
---|
| 206 | +static __inline__ type pfx##_sub_if_positive(type i, pfx##_t * v) \ |
---|
| 207 | +{ \ |
---|
| 208 | + type temp, result; \ |
---|
| 209 | + \ |
---|
| 210 | + smp_mb__before_atomic(); \ |
---|
| 211 | + \ |
---|
| 212 | + if (!kernel_uses_llsc) { \ |
---|
| 213 | + unsigned long flags; \ |
---|
| 214 | + \ |
---|
| 215 | + raw_local_irq_save(flags); \ |
---|
| 216 | + result = v->counter; \ |
---|
| 217 | + result -= i; \ |
---|
| 218 | + if (result >= 0) \ |
---|
| 219 | + v->counter = result; \ |
---|
| 220 | + raw_local_irq_restore(flags); \ |
---|
| 221 | + smp_mb__after_atomic(); \ |
---|
| 222 | + return result; \ |
---|
| 223 | + } \ |
---|
| 224 | + \ |
---|
| 225 | + __asm__ __volatile__( \ |
---|
| 226 | + " .set push \n" \ |
---|
| 227 | + " .set " MIPS_ISA_LEVEL " \n" \ |
---|
| 228 | + " " __SYNC(full, loongson3_war) " \n" \ |
---|
| 229 | + "1: " #ll " %1, %2 # atomic_sub_if_positive\n" \ |
---|
| 230 | + " .set pop \n" \ |
---|
| 231 | + " " #op " %0, %1, %3 \n" \ |
---|
| 232 | + " move %1, %0 \n" \ |
---|
| 233 | + " bltz %0, 2f \n" \ |
---|
| 234 | + " .set push \n" \ |
---|
| 235 | + " .set " MIPS_ISA_LEVEL " \n" \ |
---|
| 236 | + " " #sc " %1, %2 \n" \ |
---|
| 237 | + " " __SC_BEQZ "%1, 1b \n" \ |
---|
| 238 | + "2: " __SYNC(full, loongson3_war) " \n" \ |
---|
| 239 | + " .set pop \n" \ |
---|
| 240 | + : "=&r" (result), "=&r" (temp), \ |
---|
| 241 | + "+" GCC_OFF_SMALL_ASM() (v->counter) \ |
---|
| 242 | + : "Ir" (i) \ |
---|
| 243 | + : __LLSC_CLOBBER); \ |
---|
| 244 | + \ |
---|
| 245 | + /* \ |
---|
| 246 | + * In the Loongson3 workaround case we already have a \ |
---|
| 247 | + * completion barrier at 2: above, which is needed due to the \ |
---|
| 248 | + * bltz that can branch to code outside of the LL/SC loop. As \ |
---|
| 249 | + * such, we don't need to emit another barrier here. \ |
---|
| 250 | + */ \ |
---|
| 251 | + if (__SYNC_loongson3_war == 0) \ |
---|
| 252 | + smp_mb__after_atomic(); \ |
---|
| 253 | + \ |
---|
| 254 | + return result; \ |
---|
219 | 255 | } |
---|
220 | 256 | |
---|
221 | | -#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) |
---|
222 | | -#define atomic_xchg(v, new) (xchg(&((v)->counter), (new))) |
---|
223 | | - |
---|
224 | | -/* |
---|
225 | | - * atomic_dec_if_positive - decrement by 1 if old value positive |
---|
226 | | - * @v: pointer of type atomic_t |
---|
227 | | - */ |
---|
| 257 | +ATOMIC_SIP_OP(atomic, int, subu, ll, sc) |
---|
228 | 258 | #define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v) |
---|
229 | 259 | |
---|
230 | 260 | #ifdef CONFIG_64BIT |
---|
231 | | - |
---|
232 | | -#define ATOMIC64_INIT(i) { (i) } |
---|
233 | | - |
---|
234 | | -/* |
---|
235 | | - * atomic64_read - read atomic variable |
---|
236 | | - * @v: pointer of type atomic64_t |
---|
237 | | - * |
---|
238 | | - */ |
---|
239 | | -#define atomic64_read(v) READ_ONCE((v)->counter) |
---|
240 | | - |
---|
241 | | -/* |
---|
242 | | - * atomic64_set - set atomic variable |
---|
243 | | - * @v: pointer of type atomic64_t |
---|
244 | | - * @i: required value |
---|
245 | | - */ |
---|
246 | | -#define atomic64_set(v, i) WRITE_ONCE((v)->counter, (i)) |
---|
247 | | - |
---|
248 | | -#define ATOMIC64_OP(op, c_op, asm_op) \ |
---|
249 | | -static __inline__ void atomic64_##op(long i, atomic64_t * v) \ |
---|
250 | | -{ \ |
---|
251 | | - if (kernel_uses_llsc) { \ |
---|
252 | | - long temp; \ |
---|
253 | | - \ |
---|
254 | | - __asm__ __volatile__( \ |
---|
255 | | - " .set "MIPS_ISA_LEVEL" \n" \ |
---|
256 | | - "1: lld %0, %1 # atomic64_" #op " \n" \ |
---|
257 | | - " " #asm_op " %0, %2 \n" \ |
---|
258 | | - " scd %0, %1 \n" \ |
---|
259 | | - "\t" __scbeqz " %0, 1b \n" \ |
---|
260 | | - " .set mips0 \n" \ |
---|
261 | | - : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \ |
---|
262 | | - : "Ir" (i)); \ |
---|
263 | | - } else { \ |
---|
264 | | - unsigned long flags; \ |
---|
265 | | - \ |
---|
266 | | - raw_local_irq_save(flags); \ |
---|
267 | | - v->counter c_op i; \ |
---|
268 | | - raw_local_irq_restore(flags); \ |
---|
269 | | - } \ |
---|
270 | | -} |
---|
271 | | - |
---|
272 | | -#define ATOMIC64_OP_RETURN(op, c_op, asm_op) \ |
---|
273 | | -static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \ |
---|
274 | | -{ \ |
---|
275 | | - long result; \ |
---|
276 | | - \ |
---|
277 | | - if (kernel_uses_llsc) { \ |
---|
278 | | - long temp; \ |
---|
279 | | - \ |
---|
280 | | - __asm__ __volatile__( \ |
---|
281 | | - " .set "MIPS_ISA_LEVEL" \n" \ |
---|
282 | | - "1: lld %1, %2 # atomic64_" #op "_return\n" \ |
---|
283 | | - " " #asm_op " %0, %1, %3 \n" \ |
---|
284 | | - " scd %0, %2 \n" \ |
---|
285 | | - "\t" __scbeqz " %0, 1b \n" \ |
---|
286 | | - " " #asm_op " %0, %1, %3 \n" \ |
---|
287 | | - " .set mips0 \n" \ |
---|
288 | | - : "=&r" (result), "=&r" (temp), \ |
---|
289 | | - "+" GCC_OFF_SMALL_ASM() (v->counter) \ |
---|
290 | | - : "Ir" (i)); \ |
---|
291 | | - } else { \ |
---|
292 | | - unsigned long flags; \ |
---|
293 | | - \ |
---|
294 | | - raw_local_irq_save(flags); \ |
---|
295 | | - result = v->counter; \ |
---|
296 | | - result c_op i; \ |
---|
297 | | - v->counter = result; \ |
---|
298 | | - raw_local_irq_restore(flags); \ |
---|
299 | | - } \ |
---|
300 | | - \ |
---|
301 | | - return result; \ |
---|
302 | | -} |
---|
303 | | - |
---|
304 | | -#define ATOMIC64_FETCH_OP(op, c_op, asm_op) \ |
---|
305 | | -static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \ |
---|
306 | | -{ \ |
---|
307 | | - long result; \ |
---|
308 | | - \ |
---|
309 | | - if (kernel_uses_llsc) { \ |
---|
310 | | - long temp; \ |
---|
311 | | - \ |
---|
312 | | - __asm__ __volatile__( \ |
---|
313 | | - " .set "MIPS_ISA_LEVEL" \n" \ |
---|
314 | | - "1: lld %1, %2 # atomic64_fetch_" #op "\n" \ |
---|
315 | | - " " #asm_op " %0, %1, %3 \n" \ |
---|
316 | | - " scd %0, %2 \n" \ |
---|
317 | | - "\t" __scbeqz " %0, 1b \n" \ |
---|
318 | | - " move %0, %1 \n" \ |
---|
319 | | - " .set mips0 \n" \ |
---|
320 | | - : "=&r" (result), "=&r" (temp), \ |
---|
321 | | - "+" GCC_OFF_SMALL_ASM() (v->counter) \ |
---|
322 | | - : "Ir" (i)); \ |
---|
323 | | - } else { \ |
---|
324 | | - unsigned long flags; \ |
---|
325 | | - \ |
---|
326 | | - raw_local_irq_save(flags); \ |
---|
327 | | - result = v->counter; \ |
---|
328 | | - v->counter c_op i; \ |
---|
329 | | - raw_local_irq_restore(flags); \ |
---|
330 | | - } \ |
---|
331 | | - \ |
---|
332 | | - return result; \ |
---|
333 | | -} |
---|
334 | | - |
---|
335 | | -#define ATOMIC64_OPS(op, c_op, asm_op) \ |
---|
336 | | - ATOMIC64_OP(op, c_op, asm_op) \ |
---|
337 | | - ATOMIC64_OP_RETURN(op, c_op, asm_op) \ |
---|
338 | | - ATOMIC64_FETCH_OP(op, c_op, asm_op) |
---|
339 | | - |
---|
340 | | -ATOMIC64_OPS(add, +=, daddu) |
---|
341 | | -ATOMIC64_OPS(sub, -=, dsubu) |
---|
342 | | - |
---|
343 | | -#define atomic64_add_return_relaxed atomic64_add_return_relaxed |
---|
344 | | -#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed |
---|
345 | | -#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed |
---|
346 | | -#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed |
---|
347 | | - |
---|
348 | | -#undef ATOMIC64_OPS |
---|
349 | | -#define ATOMIC64_OPS(op, c_op, asm_op) \ |
---|
350 | | - ATOMIC64_OP(op, c_op, asm_op) \ |
---|
351 | | - ATOMIC64_FETCH_OP(op, c_op, asm_op) |
---|
352 | | - |
---|
353 | | -ATOMIC64_OPS(and, &=, and) |
---|
354 | | -ATOMIC64_OPS(or, |=, or) |
---|
355 | | -ATOMIC64_OPS(xor, ^=, xor) |
---|
356 | | - |
---|
357 | | -#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed |
---|
358 | | -#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed |
---|
359 | | -#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed |
---|
360 | | - |
---|
361 | | -#undef ATOMIC64_OPS |
---|
362 | | -#undef ATOMIC64_FETCH_OP |
---|
363 | | -#undef ATOMIC64_OP_RETURN |
---|
364 | | -#undef ATOMIC64_OP |
---|
365 | | - |
---|
366 | | -/* |
---|
367 | | - * atomic64_sub_if_positive - conditionally subtract integer from atomic |
---|
368 | | - * variable |
---|
369 | | - * @i: integer value to subtract |
---|
370 | | - * @v: pointer of type atomic64_t |
---|
371 | | - * |
---|
372 | | - * Atomically test @v and subtract @i if @v is greater or equal than @i. |
---|
373 | | - * The function returns the old value of @v minus @i. |
---|
374 | | - */ |
---|
375 | | -static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) |
---|
376 | | -{ |
---|
377 | | - long result; |
---|
378 | | - |
---|
379 | | - smp_mb__before_llsc(); |
---|
380 | | - |
---|
381 | | - if (kernel_uses_llsc) { |
---|
382 | | - long temp; |
---|
383 | | - |
---|
384 | | - __asm__ __volatile__( |
---|
385 | | - " .set "MIPS_ISA_LEVEL" \n" |
---|
386 | | - "1: lld %1, %2 # atomic64_sub_if_positive\n" |
---|
387 | | - " dsubu %0, %1, %3 \n" |
---|
388 | | - " move %1, %0 \n" |
---|
389 | | - " bltz %0, 1f \n" |
---|
390 | | - " scd %1, %2 \n" |
---|
391 | | - "\t" __scbeqz " %1, 1b \n" |
---|
392 | | - "1: \n" |
---|
393 | | - " .set mips0 \n" |
---|
394 | | - : "=&r" (result), "=&r" (temp), |
---|
395 | | - "+" GCC_OFF_SMALL_ASM() (v->counter) |
---|
396 | | - : "Ir" (i)); |
---|
397 | | - } else { |
---|
398 | | - unsigned long flags; |
---|
399 | | - |
---|
400 | | - raw_local_irq_save(flags); |
---|
401 | | - result = v->counter; |
---|
402 | | - result -= i; |
---|
403 | | - if (result >= 0) |
---|
404 | | - v->counter = result; |
---|
405 | | - raw_local_irq_restore(flags); |
---|
406 | | - } |
---|
407 | | - |
---|
408 | | - smp_llsc_mb(); |
---|
409 | | - |
---|
410 | | - return result; |
---|
411 | | -} |
---|
412 | | - |
---|
413 | | -#define atomic64_cmpxchg(v, o, n) \ |
---|
414 | | - ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n))) |
---|
415 | | -#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new))) |
---|
416 | | - |
---|
417 | | -/* |
---|
418 | | - * atomic64_dec_if_positive - decrement by 1 if old value positive |
---|
419 | | - * @v: pointer of type atomic64_t |
---|
420 | | - */ |
---|
| 261 | +ATOMIC_SIP_OP(atomic64, s64, dsubu, lld, scd) |
---|
421 | 262 | #define atomic64_dec_if_positive(v) atomic64_sub_if_positive(1, v) |
---|
| 263 | +#endif |
---|
422 | 264 | |
---|
423 | | -#endif /* CONFIG_64BIT */ |
---|
| 265 | +#undef ATOMIC_SIP_OP |
---|
424 | 266 | |
---|
425 | 267 | #endif /* _ASM_ATOMIC_H */ |
---|