.. | .. |
---|
| 1 | +/* SPDX-License-Identifier: GPL-2.0-only */ |
---|
1 | 2 | /* |
---|
2 | 3 | * Based on arch/arm/include/asm/atomic.h |
---|
3 | 4 | * |
---|
4 | 5 | * Copyright (C) 1996 Russell King. |
---|
5 | 6 | * Copyright (C) 2002 Deep Blue Solutions Ltd. |
---|
6 | 7 | * Copyright (C) 2012 ARM Ltd. |
---|
7 | | - * |
---|
8 | | - * This program is free software; you can redistribute it and/or modify |
---|
9 | | - * it under the terms of the GNU General Public License version 2 as |
---|
10 | | - * published by the Free Software Foundation. |
---|
11 | | - * |
---|
12 | | - * This program is distributed in the hope that it will be useful, |
---|
13 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
14 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
---|
15 | | - * GNU General Public License for more details. |
---|
16 | | - * |
---|
17 | | - * You should have received a copy of the GNU General Public License |
---|
18 | | - * along with this program. If not, see <http://www.gnu.org/licenses/>. |
---|
19 | 8 | */ |
---|
20 | 9 | |
---|
21 | 10 | #ifndef __ASM_ATOMIC_LL_SC_H |
---|
22 | 11 | #define __ASM_ATOMIC_LL_SC_H |
---|
23 | 12 | |
---|
24 | | -#ifndef __ARM64_IN_ATOMIC_IMPL |
---|
25 | | -#error "please don't include this file directly" |
---|
| 13 | +#include <linux/stringify.h> |
---|
| 14 | + |
---|
| 15 | +#ifndef CONFIG_CC_HAS_K_CONSTRAINT |
---|
| 16 | +#define K |
---|
26 | 17 | #endif |
---|
27 | 18 | |
---|
28 | 19 | /* |
---|
29 | 20 | * AArch64 UP and SMP safe atomic ops. We use load exclusive and |
---|
30 | 21 | * store exclusive to ensure that these are atomic. We may loop |
---|
31 | 22 | * to ensure that the update happens. |
---|
32 | | - * |
---|
33 | | - * NOTE: these functions do *not* follow the PCS and must explicitly |
---|
34 | | - * save any clobbered registers other than x0 (regardless of return |
---|
35 | | - * value). This is achieved through -fcall-saved-* compiler flags for |
---|
36 | | - * this file, which unfortunately don't work on a per-function basis |
---|
37 | | - * (the optimize attribute silently ignores these options). |
---|
38 | 23 | */ |
---|
39 | 24 | |
---|
40 | 25 | #define ATOMIC_OP(op, asm_op, constraint) \ |
---|
41 | | -__LL_SC_INLINE void \ |
---|
42 | | -__LL_SC_PREFIX(atomic_##op(int i, atomic_t *v)) \ |
---|
| 26 | +static inline void \ |
---|
| 27 | +__ll_sc_atomic_##op(int i, atomic_t *v) \ |
---|
43 | 28 | { \ |
---|
44 | 29 | unsigned long tmp; \ |
---|
45 | 30 | int result; \ |
---|
46 | 31 | \ |
---|
47 | 32 | asm volatile("// atomic_" #op "\n" \ |
---|
48 | | -" prfm pstl1strm, %2\n" \ |
---|
49 | | -"1: ldxr %w0, %2\n" \ |
---|
50 | | -" " #asm_op " %w0, %w0, %w3\n" \ |
---|
51 | | -" stxr %w1, %w0, %2\n" \ |
---|
52 | | -" cbnz %w1, 1b" \ |
---|
| 33 | + " prfm pstl1strm, %2\n" \ |
---|
| 34 | + "1: ldxr %w0, %2\n" \ |
---|
| 35 | + " " #asm_op " %w0, %w0, %w3\n" \ |
---|
| 36 | + " stxr %w1, %w0, %2\n" \ |
---|
| 37 | + " cbnz %w1, 1b\n" \ |
---|
53 | 38 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ |
---|
54 | | - : #constraint "r" (i)); \ |
---|
55 | | -} \ |
---|
56 | | -__LL_SC_EXPORT(atomic_##op); |
---|
| 39 | + : __stringify(constraint) "r" (i)); \ |
---|
| 40 | +} |
---|
57 | 41 | |
---|
58 | 42 | #define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\ |
---|
59 | | -__LL_SC_INLINE int \ |
---|
60 | | -__LL_SC_PREFIX(atomic_##op##_return##name(int i, atomic_t *v)) \ |
---|
| 43 | +static inline int \ |
---|
| 44 | +__ll_sc_atomic_##op##_return##name(int i, atomic_t *v) \ |
---|
61 | 45 | { \ |
---|
62 | 46 | unsigned long tmp; \ |
---|
63 | 47 | int result; \ |
---|
64 | 48 | \ |
---|
65 | 49 | asm volatile("// atomic_" #op "_return" #name "\n" \ |
---|
66 | | -" prfm pstl1strm, %2\n" \ |
---|
67 | | -"1: ld" #acq "xr %w0, %2\n" \ |
---|
68 | | -" " #asm_op " %w0, %w0, %w3\n" \ |
---|
69 | | -" st" #rel "xr %w1, %w0, %2\n" \ |
---|
70 | | -" cbnz %w1, 1b\n" \ |
---|
71 | | -" " #mb \ |
---|
| 50 | + " prfm pstl1strm, %2\n" \ |
---|
| 51 | + "1: ld" #acq "xr %w0, %2\n" \ |
---|
| 52 | + " " #asm_op " %w0, %w0, %w3\n" \ |
---|
| 53 | + " st" #rel "xr %w1, %w0, %2\n" \ |
---|
| 54 | + " cbnz %w1, 1b\n" \ |
---|
| 55 | + " " #mb \ |
---|
72 | 56 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ |
---|
73 | | - : #constraint "r" (i) \ |
---|
| 57 | + : __stringify(constraint) "r" (i) \ |
---|
74 | 58 | : cl); \ |
---|
75 | 59 | \ |
---|
76 | 60 | return result; \ |
---|
77 | | -} \ |
---|
78 | | -__LL_SC_EXPORT(atomic_##op##_return##name); |
---|
| 61 | +} |
---|
79 | 62 | |
---|
80 | | -#define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint) \ |
---|
81 | | -__LL_SC_INLINE int \ |
---|
82 | | -__LL_SC_PREFIX(atomic_fetch_##op##name(int i, atomic_t *v)) \ |
---|
| 63 | +#define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint) \ |
---|
| 64 | +static inline int \ |
---|
| 65 | +__ll_sc_atomic_fetch_##op##name(int i, atomic_t *v) \ |
---|
83 | 66 | { \ |
---|
84 | 67 | unsigned long tmp; \ |
---|
85 | 68 | int val, result; \ |
---|
86 | 69 | \ |
---|
87 | 70 | asm volatile("// atomic_fetch_" #op #name "\n" \ |
---|
88 | | -" prfm pstl1strm, %3\n" \ |
---|
89 | | -"1: ld" #acq "xr %w0, %3\n" \ |
---|
90 | | -" " #asm_op " %w1, %w0, %w4\n" \ |
---|
91 | | -" st" #rel "xr %w2, %w1, %3\n" \ |
---|
92 | | -" cbnz %w2, 1b\n" \ |
---|
93 | | -" " #mb \ |
---|
| 71 | + " prfm pstl1strm, %3\n" \ |
---|
| 72 | + "1: ld" #acq "xr %w0, %3\n" \ |
---|
| 73 | + " " #asm_op " %w1, %w0, %w4\n" \ |
---|
| 74 | + " st" #rel "xr %w2, %w1, %3\n" \ |
---|
| 75 | + " cbnz %w2, 1b\n" \ |
---|
| 76 | + " " #mb \ |
---|
94 | 77 | : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \ |
---|
95 | | - : #constraint "r" (i) \ |
---|
| 78 | + : __stringify(constraint) "r" (i) \ |
---|
96 | 79 | : cl); \ |
---|
97 | 80 | \ |
---|
98 | 81 | return result; \ |
---|
99 | | -} \ |
---|
100 | | -__LL_SC_EXPORT(atomic_fetch_##op##name); |
---|
| 82 | +} |
---|
101 | 83 | |
---|
102 | 84 | #define ATOMIC_OPS(...) \ |
---|
103 | 85 | ATOMIC_OP(__VA_ARGS__) \ |
---|
.. | .. |
---|
121 | 103 | ATOMIC_FETCH_OP (_acquire, , a, , "memory", __VA_ARGS__)\ |
---|
122 | 104 | ATOMIC_FETCH_OP (_release, , , l, "memory", __VA_ARGS__) |
---|
123 | 105 | |
---|
124 | | -ATOMIC_OPS(and, and, ) |
---|
| 106 | +ATOMIC_OPS(and, and, K) |
---|
| 107 | +ATOMIC_OPS(or, orr, K) |
---|
| 108 | +ATOMIC_OPS(xor, eor, K) |
---|
| 109 | +/* |
---|
| 110 | + * GAS converts the mysterious and undocumented BIC (immediate) alias to |
---|
| 111 | + * an AND (immediate) instruction with the immediate inverted. We don't |
---|
| 112 | + * have a constraint for this, so fall back to register. |
---|
| 113 | + */ |
---|
125 | 114 | ATOMIC_OPS(andnot, bic, ) |
---|
126 | | -ATOMIC_OPS(or, orr, ) |
---|
127 | | -ATOMIC_OPS(xor, eor, ) |
---|
128 | 115 | |
---|
129 | 116 | #undef ATOMIC_OPS |
---|
130 | 117 | #undef ATOMIC_FETCH_OP |
---|
.. | .. |
---|
132 | 119 | #undef ATOMIC_OP |
---|
133 | 120 | |
---|
134 | 121 | #define ATOMIC64_OP(op, asm_op, constraint) \ |
---|
135 | | -__LL_SC_INLINE void \ |
---|
136 | | -__LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v)) \ |
---|
| 122 | +static inline void \ |
---|
| 123 | +__ll_sc_atomic64_##op(s64 i, atomic64_t *v) \ |
---|
137 | 124 | { \ |
---|
138 | | - long result; \ |
---|
| 125 | + s64 result; \ |
---|
139 | 126 | unsigned long tmp; \ |
---|
140 | 127 | \ |
---|
141 | 128 | asm volatile("// atomic64_" #op "\n" \ |
---|
142 | | -" prfm pstl1strm, %2\n" \ |
---|
143 | | -"1: ldxr %0, %2\n" \ |
---|
144 | | -" " #asm_op " %0, %0, %3\n" \ |
---|
145 | | -" stxr %w1, %0, %2\n" \ |
---|
146 | | -" cbnz %w1, 1b" \ |
---|
| 129 | + " prfm pstl1strm, %2\n" \ |
---|
| 130 | + "1: ldxr %0, %2\n" \ |
---|
| 131 | + " " #asm_op " %0, %0, %3\n" \ |
---|
| 132 | + " stxr %w1, %0, %2\n" \ |
---|
| 133 | + " cbnz %w1, 1b" \ |
---|
147 | 134 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ |
---|
148 | | - : #constraint "r" (i)); \ |
---|
149 | | -} \ |
---|
150 | | -__LL_SC_EXPORT(atomic64_##op); |
---|
| 135 | + : __stringify(constraint) "r" (i)); \ |
---|
| 136 | +} |
---|
151 | 137 | |
---|
152 | 138 | #define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\ |
---|
153 | | -__LL_SC_INLINE long \ |
---|
154 | | -__LL_SC_PREFIX(atomic64_##op##_return##name(long i, atomic64_t *v)) \ |
---|
| 139 | +static inline long \ |
---|
| 140 | +__ll_sc_atomic64_##op##_return##name(s64 i, atomic64_t *v) \ |
---|
155 | 141 | { \ |
---|
156 | | - long result; \ |
---|
| 142 | + s64 result; \ |
---|
157 | 143 | unsigned long tmp; \ |
---|
158 | 144 | \ |
---|
159 | 145 | asm volatile("// atomic64_" #op "_return" #name "\n" \ |
---|
160 | | -" prfm pstl1strm, %2\n" \ |
---|
161 | | -"1: ld" #acq "xr %0, %2\n" \ |
---|
162 | | -" " #asm_op " %0, %0, %3\n" \ |
---|
163 | | -" st" #rel "xr %w1, %0, %2\n" \ |
---|
164 | | -" cbnz %w1, 1b\n" \ |
---|
165 | | -" " #mb \ |
---|
| 146 | + " prfm pstl1strm, %2\n" \ |
---|
| 147 | + "1: ld" #acq "xr %0, %2\n" \ |
---|
| 148 | + " " #asm_op " %0, %0, %3\n" \ |
---|
| 149 | + " st" #rel "xr %w1, %0, %2\n" \ |
---|
| 150 | + " cbnz %w1, 1b\n" \ |
---|
| 151 | + " " #mb \ |
---|
166 | 152 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ |
---|
167 | | - : #constraint "r" (i) \ |
---|
| 153 | + : __stringify(constraint) "r" (i) \ |
---|
168 | 154 | : cl); \ |
---|
169 | 155 | \ |
---|
170 | 156 | return result; \ |
---|
171 | | -} \ |
---|
172 | | -__LL_SC_EXPORT(atomic64_##op##_return##name); |
---|
| 157 | +} |
---|
173 | 158 | |
---|
174 | 159 | #define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint)\ |
---|
175 | | -__LL_SC_INLINE long \ |
---|
176 | | -__LL_SC_PREFIX(atomic64_fetch_##op##name(long i, atomic64_t *v)) \ |
---|
| 160 | +static inline long \ |
---|
| 161 | +__ll_sc_atomic64_fetch_##op##name(s64 i, atomic64_t *v) \ |
---|
177 | 162 | { \ |
---|
178 | | - long result, val; \ |
---|
| 163 | + s64 result, val; \ |
---|
179 | 164 | unsigned long tmp; \ |
---|
180 | 165 | \ |
---|
181 | 166 | asm volatile("// atomic64_fetch_" #op #name "\n" \ |
---|
182 | | -" prfm pstl1strm, %3\n" \ |
---|
183 | | -"1: ld" #acq "xr %0, %3\n" \ |
---|
184 | | -" " #asm_op " %1, %0, %4\n" \ |
---|
185 | | -" st" #rel "xr %w2, %1, %3\n" \ |
---|
186 | | -" cbnz %w2, 1b\n" \ |
---|
187 | | -" " #mb \ |
---|
| 167 | + " prfm pstl1strm, %3\n" \ |
---|
| 168 | + "1: ld" #acq "xr %0, %3\n" \ |
---|
| 169 | + " " #asm_op " %1, %0, %4\n" \ |
---|
| 170 | + " st" #rel "xr %w2, %1, %3\n" \ |
---|
| 171 | + " cbnz %w2, 1b\n" \ |
---|
| 172 | + " " #mb \ |
---|
188 | 173 | : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \ |
---|
189 | | - : #constraint "r" (i) \ |
---|
| 174 | + : __stringify(constraint) "r" (i) \ |
---|
190 | 175 | : cl); \ |
---|
191 | 176 | \ |
---|
192 | 177 | return result; \ |
---|
193 | | -} \ |
---|
194 | | -__LL_SC_EXPORT(atomic64_fetch_##op##name); |
---|
| 178 | +} |
---|
195 | 179 | |
---|
196 | 180 | #define ATOMIC64_OPS(...) \ |
---|
197 | 181 | ATOMIC64_OP(__VA_ARGS__) \ |
---|
.. | .. |
---|
216 | 200 | ATOMIC64_FETCH_OP (_release,, , l, "memory", __VA_ARGS__) |
---|
217 | 201 | |
---|
218 | 202 | ATOMIC64_OPS(and, and, L) |
---|
219 | | -ATOMIC64_OPS(andnot, bic, ) |
---|
220 | 203 | ATOMIC64_OPS(or, orr, L) |
---|
221 | 204 | ATOMIC64_OPS(xor, eor, L) |
---|
| 205 | +/* |
---|
| 206 | + * GAS converts the mysterious and undocumented BIC (immediate) alias to |
---|
| 207 | + * an AND (immediate) instruction with the immediate inverted. We don't |
---|
| 208 | + * have a constraint for this, so fall back to register. |
---|
| 209 | + */ |
---|
| 210 | +ATOMIC64_OPS(andnot, bic, ) |
---|
222 | 211 | |
---|
223 | 212 | #undef ATOMIC64_OPS |
---|
224 | 213 | #undef ATOMIC64_FETCH_OP |
---|
225 | 214 | #undef ATOMIC64_OP_RETURN |
---|
226 | 215 | #undef ATOMIC64_OP |
---|
227 | 216 | |
---|
228 | | -__LL_SC_INLINE long |
---|
229 | | -__LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v)) |
---|
| 217 | +static inline s64 |
---|
| 218 | +__ll_sc_atomic64_dec_if_positive(atomic64_t *v) |
---|
230 | 219 | { |
---|
231 | | - long result; |
---|
| 220 | + s64 result; |
---|
232 | 221 | unsigned long tmp; |
---|
233 | 222 | |
---|
234 | 223 | asm volatile("// atomic64_dec_if_positive\n" |
---|
235 | | -" prfm pstl1strm, %2\n" |
---|
236 | | -"1: ldxr %0, %2\n" |
---|
237 | | -" subs %0, %0, #1\n" |
---|
238 | | -" b.lt 2f\n" |
---|
239 | | -" stlxr %w1, %0, %2\n" |
---|
240 | | -" cbnz %w1, 1b\n" |
---|
241 | | -" dmb ish\n" |
---|
242 | | -"2:" |
---|
| 224 | + " prfm pstl1strm, %2\n" |
---|
| 225 | + "1: ldxr %0, %2\n" |
---|
| 226 | + " subs %0, %0, #1\n" |
---|
| 227 | + " b.lt 2f\n" |
---|
| 228 | + " stlxr %w1, %0, %2\n" |
---|
| 229 | + " cbnz %w1, 1b\n" |
---|
| 230 | + " dmb ish\n" |
---|
| 231 | + "2:" |
---|
243 | 232 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) |
---|
244 | 233 | : |
---|
245 | 234 | : "cc", "memory"); |
---|
246 | 235 | |
---|
247 | 236 | return result; |
---|
248 | 237 | } |
---|
249 | | -__LL_SC_EXPORT(atomic64_dec_if_positive); |
---|
250 | 238 | |
---|
251 | | -#define __CMPXCHG_CASE(w, sz, name, mb, acq, rel, cl, constraint) \ |
---|
252 | | -__LL_SC_INLINE unsigned long \ |
---|
253 | | -__LL_SC_PREFIX(__cmpxchg_case_##name(volatile void *ptr, \ |
---|
254 | | - unsigned long old, \ |
---|
255 | | - unsigned long new)) \ |
---|
| 239 | +#define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl, constraint) \ |
---|
| 240 | +static inline u##sz \ |
---|
| 241 | +__ll_sc__cmpxchg_case_##name##sz(volatile void *ptr, \ |
---|
| 242 | + unsigned long old, \ |
---|
| 243 | + u##sz new) \ |
---|
256 | 244 | { \ |
---|
257 | | - unsigned long tmp, oldval; \ |
---|
| 245 | + unsigned long tmp; \ |
---|
| 246 | + u##sz oldval; \ |
---|
| 247 | + \ |
---|
| 248 | + /* \ |
---|
| 249 | + * Sub-word sizes require explicit casting so that the compare \ |
---|
| 250 | + * part of the cmpxchg doesn't end up interpreting non-zero \ |
---|
| 251 | + * upper bits of the register containing "old". \ |
---|
| 252 | + */ \ |
---|
| 253 | + if (sz < 32) \ |
---|
| 254 | + old = (u##sz)old; \ |
---|
258 | 255 | \ |
---|
259 | 256 | asm volatile( \ |
---|
260 | 257 | " prfm pstl1strm, %[v]\n" \ |
---|
261 | | - "1: ld" #acq "xr" #sz "\t%" #w "[oldval], %[v]\n" \ |
---|
| 258 | + "1: ld" #acq "xr" #sfx "\t%" #w "[oldval], %[v]\n" \ |
---|
262 | 259 | " eor %" #w "[tmp], %" #w "[oldval], %" #w "[old]\n" \ |
---|
263 | 260 | " cbnz %" #w "[tmp], 2f\n" \ |
---|
264 | | - " st" #rel "xr" #sz "\t%w[tmp], %" #w "[new], %[v]\n" \ |
---|
| 261 | + " st" #rel "xr" #sfx "\t%w[tmp], %" #w "[new], %[v]\n" \ |
---|
265 | 262 | " cbnz %w[tmp], 1b\n" \ |
---|
266 | 263 | " " #mb "\n" \ |
---|
267 | 264 | "2:" \ |
---|
268 | 265 | : [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \ |
---|
269 | | - [v] "+Q" (*(unsigned long *)ptr) \ |
---|
270 | | - : [old] #constraint "r" (old), [new] "r" (new) \ |
---|
| 266 | + [v] "+Q" (*(u##sz *)ptr) \ |
---|
| 267 | + : [old] __stringify(constraint) "r" (old), [new] "r" (new) \ |
---|
271 | 268 | : cl); \ |
---|
272 | 269 | \ |
---|
273 | 270 | return oldval; \ |
---|
274 | | -} \ |
---|
275 | | -__LL_SC_EXPORT(__cmpxchg_case_##name); |
---|
| 271 | +} |
---|
276 | 272 | |
---|
277 | 273 | /* |
---|
278 | 274 | * Earlier versions of GCC (no later than 8.1.0) appear to incorrectly |
---|
279 | 275 | * handle the 'K' constraint for the value 4294967295 - thus we use no |
---|
280 | 276 | * constraint for 32 bit operations. |
---|
281 | 277 | */ |
---|
282 | | -__CMPXCHG_CASE(w, b, 1, , , , , ) |
---|
283 | | -__CMPXCHG_CASE(w, h, 2, , , , , ) |
---|
284 | | -__CMPXCHG_CASE(w, , 4, , , , , ) |
---|
285 | | -__CMPXCHG_CASE( , , 8, , , , , L) |
---|
286 | | -__CMPXCHG_CASE(w, b, acq_1, , a, , "memory", ) |
---|
287 | | -__CMPXCHG_CASE(w, h, acq_2, , a, , "memory", ) |
---|
288 | | -__CMPXCHG_CASE(w, , acq_4, , a, , "memory", ) |
---|
289 | | -__CMPXCHG_CASE( , , acq_8, , a, , "memory", L) |
---|
290 | | -__CMPXCHG_CASE(w, b, rel_1, , , l, "memory", ) |
---|
291 | | -__CMPXCHG_CASE(w, h, rel_2, , , l, "memory", ) |
---|
292 | | -__CMPXCHG_CASE(w, , rel_4, , , l, "memory", ) |
---|
293 | | -__CMPXCHG_CASE( , , rel_8, , , l, "memory", L) |
---|
294 | | -__CMPXCHG_CASE(w, b, mb_1, dmb ish, , l, "memory", ) |
---|
295 | | -__CMPXCHG_CASE(w, h, mb_2, dmb ish, , l, "memory", ) |
---|
296 | | -__CMPXCHG_CASE(w, , mb_4, dmb ish, , l, "memory", ) |
---|
297 | | -__CMPXCHG_CASE( , , mb_8, dmb ish, , l, "memory", L) |
---|
| 278 | +__CMPXCHG_CASE(w, b, , 8, , , , , K) |
---|
| 279 | +__CMPXCHG_CASE(w, h, , 16, , , , , K) |
---|
| 280 | +__CMPXCHG_CASE(w, , , 32, , , , , K) |
---|
| 281 | +__CMPXCHG_CASE( , , , 64, , , , , L) |
---|
| 282 | +__CMPXCHG_CASE(w, b, acq_, 8, , a, , "memory", K) |
---|
| 283 | +__CMPXCHG_CASE(w, h, acq_, 16, , a, , "memory", K) |
---|
| 284 | +__CMPXCHG_CASE(w, , acq_, 32, , a, , "memory", K) |
---|
| 285 | +__CMPXCHG_CASE( , , acq_, 64, , a, , "memory", L) |
---|
| 286 | +__CMPXCHG_CASE(w, b, rel_, 8, , , l, "memory", K) |
---|
| 287 | +__CMPXCHG_CASE(w, h, rel_, 16, , , l, "memory", K) |
---|
| 288 | +__CMPXCHG_CASE(w, , rel_, 32, , , l, "memory", K) |
---|
| 289 | +__CMPXCHG_CASE( , , rel_, 64, , , l, "memory", L) |
---|
| 290 | +__CMPXCHG_CASE(w, b, mb_, 8, dmb ish, , l, "memory", K) |
---|
| 291 | +__CMPXCHG_CASE(w, h, mb_, 16, dmb ish, , l, "memory", K) |
---|
| 292 | +__CMPXCHG_CASE(w, , mb_, 32, dmb ish, , l, "memory", K) |
---|
| 293 | +__CMPXCHG_CASE( , , mb_, 64, dmb ish, , l, "memory", L) |
---|
298 | 294 | |
---|
299 | 295 | #undef __CMPXCHG_CASE |
---|
300 | 296 | |
---|
301 | 297 | #define __CMPXCHG_DBL(name, mb, rel, cl) \ |
---|
302 | | -__LL_SC_INLINE long \ |
---|
303 | | -__LL_SC_PREFIX(__cmpxchg_double##name(unsigned long old1, \ |
---|
| 298 | +static inline long \ |
---|
| 299 | +__ll_sc__cmpxchg_double##name(unsigned long old1, \ |
---|
304 | 300 | unsigned long old2, \ |
---|
305 | 301 | unsigned long new1, \ |
---|
306 | 302 | unsigned long new2, \ |
---|
307 | | - volatile void *ptr)) \ |
---|
| 303 | + volatile void *ptr) \ |
---|
308 | 304 | { \ |
---|
309 | 305 | unsigned long tmp, ret; \ |
---|
310 | 306 | \ |
---|
.. | .. |
---|
319 | 315 | " cbnz %w0, 1b\n" \ |
---|
320 | 316 | " " #mb "\n" \ |
---|
321 | 317 | "2:" \ |
---|
322 | | - : "=&r" (tmp), "=&r" (ret), "+Q" (*(unsigned long *)ptr) \ |
---|
| 318 | + : "=&r" (tmp), "=&r" (ret), "+Q" (*(__uint128_t *)ptr) \ |
---|
323 | 319 | : "r" (old1), "r" (old2), "r" (new1), "r" (new2) \ |
---|
324 | 320 | : cl); \ |
---|
325 | 321 | \ |
---|
326 | 322 | return ret; \ |
---|
327 | | -} \ |
---|
328 | | -__LL_SC_EXPORT(__cmpxchg_double##name); |
---|
| 323 | +} |
---|
329 | 324 | |
---|
330 | 325 | __CMPXCHG_DBL( , , , ) |
---|
331 | 326 | __CMPXCHG_DBL(_mb, dmb ish, l, "memory") |
---|
332 | 327 | |
---|
333 | 328 | #undef __CMPXCHG_DBL |
---|
| 329 | +#undef K |
---|
334 | 330 | |
---|
335 | 331 | #endif /* __ASM_ATOMIC_LL_SC_H */ |
---|