| .. | .. |
|---|
| 1 | +/* SPDX-License-Identifier: GPL-2.0-only */ |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * Based on arch/arm/include/asm/atomic.h |
|---|
| 3 | 4 | * |
|---|
| 4 | 5 | * Copyright (C) 1996 Russell King. |
|---|
| 5 | 6 | * Copyright (C) 2002 Deep Blue Solutions Ltd. |
|---|
| 6 | 7 | * Copyright (C) 2012 ARM Ltd. |
|---|
| 7 | | - * |
|---|
| 8 | | - * This program is free software; you can redistribute it and/or modify |
|---|
| 9 | | - * it under the terms of the GNU General Public License version 2 as |
|---|
| 10 | | - * published by the Free Software Foundation. |
|---|
| 11 | | - * |
|---|
| 12 | | - * This program is distributed in the hope that it will be useful, |
|---|
| 13 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
|---|
| 14 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|---|
| 15 | | - * GNU General Public License for more details. |
|---|
| 16 | | - * |
|---|
| 17 | | - * You should have received a copy of the GNU General Public License |
|---|
| 18 | | - * along with this program. If not, see <http://www.gnu.org/licenses/>. |
|---|
| 19 | 8 | */ |
|---|
| 20 | 9 | |
|---|
| 21 | 10 | #ifndef __ASM_ATOMIC_LL_SC_H |
|---|
| 22 | 11 | #define __ASM_ATOMIC_LL_SC_H |
|---|
| 23 | 12 | |
|---|
| 24 | | -#ifndef __ARM64_IN_ATOMIC_IMPL |
|---|
| 25 | | -#error "please don't include this file directly" |
|---|
| 13 | +#include <linux/stringify.h> |
|---|
| 14 | + |
|---|
| 15 | +#ifdef CONFIG_ARM64_LSE_ATOMICS |
|---|
| 16 | +#define __LL_SC_FALLBACK(asm_ops) \ |
|---|
| 17 | +" b 3f\n" \ |
|---|
| 18 | +" .subsection 1\n" \ |
|---|
| 19 | +"3:\n" \ |
|---|
| 20 | +asm_ops "\n" \ |
|---|
| 21 | +" b 4f\n" \ |
|---|
| 22 | +" .previous\n" \ |
|---|
| 23 | +"4:\n" |
|---|
| 24 | +#else |
|---|
| 25 | +#define __LL_SC_FALLBACK(asm_ops) asm_ops |
|---|
| 26 | +#endif |
|---|
| 27 | + |
|---|
| 28 | +#ifndef CONFIG_CC_HAS_K_CONSTRAINT |
|---|
| 29 | +#define K |
|---|
| 26 | 30 | #endif |
|---|
| 27 | 31 | |
|---|
| 28 | 32 | /* |
|---|
| 29 | 33 | * AArch64 UP and SMP safe atomic ops. We use load exclusive and |
|---|
| 30 | 34 | * store exclusive to ensure that these are atomic. We may loop |
|---|
| 31 | 35 | * to ensure that the update happens. |
|---|
| 32 | | - * |
|---|
| 33 | | - * NOTE: these functions do *not* follow the PCS and must explicitly |
|---|
| 34 | | - * save any clobbered registers other than x0 (regardless of return |
|---|
| 35 | | - * value). This is achieved through -fcall-saved-* compiler flags for |
|---|
| 36 | | - * this file, which unfortunately don't work on a per-function basis |
|---|
| 37 | | - * (the optimize attribute silently ignores these options). |
|---|
| 38 | 36 | */ |
|---|
| 39 | 37 | |
|---|
| 40 | 38 | #define ATOMIC_OP(op, asm_op, constraint) \ |
|---|
| 41 | | -__LL_SC_INLINE void \ |
|---|
| 42 | | -__LL_SC_PREFIX(atomic_##op(int i, atomic_t *v)) \ |
|---|
| 39 | +static inline void \ |
|---|
| 40 | +__ll_sc_atomic_##op(int i, atomic_t *v) \ |
|---|
| 43 | 41 | { \ |
|---|
| 44 | 42 | unsigned long tmp; \ |
|---|
| 45 | 43 | int result; \ |
|---|
| 46 | 44 | \ |
|---|
| 47 | 45 | asm volatile("// atomic_" #op "\n" \ |
|---|
| 46 | + __LL_SC_FALLBACK( \ |
|---|
| 48 | 47 | " prfm pstl1strm, %2\n" \ |
|---|
| 49 | 48 | "1: ldxr %w0, %2\n" \ |
|---|
| 50 | 49 | " " #asm_op " %w0, %w0, %w3\n" \ |
|---|
| 51 | 50 | " stxr %w1, %w0, %2\n" \ |
|---|
| 52 | | -" cbnz %w1, 1b" \ |
|---|
| 51 | +" cbnz %w1, 1b\n") \ |
|---|
| 53 | 52 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ |
|---|
| 54 | | - : #constraint "r" (i)); \ |
|---|
| 55 | | -} \ |
|---|
| 56 | | -__LL_SC_EXPORT(atomic_##op); |
|---|
| 53 | + : __stringify(constraint) "r" (i)); \ |
|---|
| 54 | +} |
|---|
| 57 | 55 | |
|---|
| 58 | 56 | #define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\ |
|---|
| 59 | | -__LL_SC_INLINE int \ |
|---|
| 60 | | -__LL_SC_PREFIX(atomic_##op##_return##name(int i, atomic_t *v)) \ |
|---|
| 57 | +static inline int \ |
|---|
| 58 | +__ll_sc_atomic_##op##_return##name(int i, atomic_t *v) \ |
|---|
| 61 | 59 | { \ |
|---|
| 62 | 60 | unsigned long tmp; \ |
|---|
| 63 | 61 | int result; \ |
|---|
| 64 | 62 | \ |
|---|
| 65 | 63 | asm volatile("// atomic_" #op "_return" #name "\n" \ |
|---|
| 64 | + __LL_SC_FALLBACK( \ |
|---|
| 66 | 65 | " prfm pstl1strm, %2\n" \ |
|---|
| 67 | 66 | "1: ld" #acq "xr %w0, %2\n" \ |
|---|
| 68 | 67 | " " #asm_op " %w0, %w0, %w3\n" \ |
|---|
| 69 | 68 | " st" #rel "xr %w1, %w0, %2\n" \ |
|---|
| 70 | 69 | " cbnz %w1, 1b\n" \ |
|---|
| 71 | | -" " #mb \ |
|---|
| 70 | +" " #mb ) \ |
|---|
| 72 | 71 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ |
|---|
| 73 | | - : #constraint "r" (i) \ |
|---|
| 72 | + : __stringify(constraint) "r" (i) \ |
|---|
| 74 | 73 | : cl); \ |
|---|
| 75 | 74 | \ |
|---|
| 76 | 75 | return result; \ |
|---|
| 77 | | -} \ |
|---|
| 78 | | -__LL_SC_EXPORT(atomic_##op##_return##name); |
|---|
| 76 | +} |
|---|
| 79 | 77 | |
|---|
| 80 | | -#define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint) \ |
|---|
| 81 | | -__LL_SC_INLINE int \ |
|---|
| 82 | | -__LL_SC_PREFIX(atomic_fetch_##op##name(int i, atomic_t *v)) \ |
|---|
| 78 | +#define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint) \ |
|---|
| 79 | +static inline int \ |
|---|
| 80 | +__ll_sc_atomic_fetch_##op##name(int i, atomic_t *v) \ |
|---|
| 83 | 81 | { \ |
|---|
| 84 | 82 | unsigned long tmp; \ |
|---|
| 85 | 83 | int val, result; \ |
|---|
| 86 | 84 | \ |
|---|
| 87 | 85 | asm volatile("// atomic_fetch_" #op #name "\n" \ |
|---|
| 86 | + __LL_SC_FALLBACK( \ |
|---|
| 88 | 87 | " prfm pstl1strm, %3\n" \ |
|---|
| 89 | 88 | "1: ld" #acq "xr %w0, %3\n" \ |
|---|
| 90 | 89 | " " #asm_op " %w1, %w0, %w4\n" \ |
|---|
| 91 | 90 | " st" #rel "xr %w2, %w1, %3\n" \ |
|---|
| 92 | 91 | " cbnz %w2, 1b\n" \ |
|---|
| 93 | | -" " #mb \ |
|---|
| 92 | +" " #mb ) \ |
|---|
| 94 | 93 | : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \ |
|---|
| 95 | | - : #constraint "r" (i) \ |
|---|
| 94 | + : __stringify(constraint) "r" (i) \ |
|---|
| 96 | 95 | : cl); \ |
|---|
| 97 | 96 | \ |
|---|
| 98 | 97 | return result; \ |
|---|
| 99 | | -} \ |
|---|
| 100 | | -__LL_SC_EXPORT(atomic_fetch_##op##name); |
|---|
| 98 | +} |
|---|
| 101 | 99 | |
|---|
| 102 | 100 | #define ATOMIC_OPS(...) \ |
|---|
| 103 | 101 | ATOMIC_OP(__VA_ARGS__) \ |
|---|
| .. | .. |
|---|
| 121 | 119 | ATOMIC_FETCH_OP (_acquire, , a, , "memory", __VA_ARGS__)\ |
|---|
| 122 | 120 | ATOMIC_FETCH_OP (_release, , , l, "memory", __VA_ARGS__) |
|---|
| 123 | 121 | |
|---|
| 124 | | -ATOMIC_OPS(and, and, ) |
|---|
| 122 | +ATOMIC_OPS(and, and, K) |
|---|
| 123 | +ATOMIC_OPS(or, orr, K) |
|---|
| 124 | +ATOMIC_OPS(xor, eor, K) |
|---|
| 125 | +/* |
|---|
| 126 | + * GAS converts the mysterious and undocumented BIC (immediate) alias to |
|---|
| 127 | + * an AND (immediate) instruction with the immediate inverted. We don't |
|---|
| 128 | + * have a constraint for this, so fall back to register. |
|---|
| 129 | + */ |
|---|
| 125 | 130 | ATOMIC_OPS(andnot, bic, ) |
|---|
| 126 | | -ATOMIC_OPS(or, orr, ) |
|---|
| 127 | | -ATOMIC_OPS(xor, eor, ) |
|---|
| 128 | 131 | |
|---|
| 129 | 132 | #undef ATOMIC_OPS |
|---|
| 130 | 133 | #undef ATOMIC_FETCH_OP |
|---|
| .. | .. |
|---|
| 132 | 135 | #undef ATOMIC_OP |
|---|
| 133 | 136 | |
|---|
| 134 | 137 | #define ATOMIC64_OP(op, asm_op, constraint) \ |
|---|
| 135 | | -__LL_SC_INLINE void \ |
|---|
| 136 | | -__LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v)) \ |
|---|
| 138 | +static inline void \ |
|---|
| 139 | +__ll_sc_atomic64_##op(s64 i, atomic64_t *v) \ |
|---|
| 137 | 140 | { \ |
|---|
| 138 | | - long result; \ |
|---|
| 141 | + s64 result; \ |
|---|
| 139 | 142 | unsigned long tmp; \ |
|---|
| 140 | 143 | \ |
|---|
| 141 | 144 | asm volatile("// atomic64_" #op "\n" \ |
|---|
| 145 | + __LL_SC_FALLBACK( \ |
|---|
| 142 | 146 | " prfm pstl1strm, %2\n" \ |
|---|
| 143 | 147 | "1: ldxr %0, %2\n" \ |
|---|
| 144 | 148 | " " #asm_op " %0, %0, %3\n" \ |
|---|
| 145 | 149 | " stxr %w1, %0, %2\n" \ |
|---|
| 146 | | -" cbnz %w1, 1b" \ |
|---|
| 150 | +" cbnz %w1, 1b") \ |
|---|
| 147 | 151 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ |
|---|
| 148 | | - : #constraint "r" (i)); \ |
|---|
| 149 | | -} \ |
|---|
| 150 | | -__LL_SC_EXPORT(atomic64_##op); |
|---|
| 152 | + : __stringify(constraint) "r" (i)); \ |
|---|
| 153 | +} |
|---|
| 151 | 154 | |
|---|
| 152 | 155 | #define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\ |
|---|
| 153 | | -__LL_SC_INLINE long \ |
|---|
| 154 | | -__LL_SC_PREFIX(atomic64_##op##_return##name(long i, atomic64_t *v)) \ |
|---|
| 156 | +static inline long \ |
|---|
| 157 | +__ll_sc_atomic64_##op##_return##name(s64 i, atomic64_t *v) \ |
|---|
| 155 | 158 | { \ |
|---|
| 156 | | - long result; \ |
|---|
| 159 | + s64 result; \ |
|---|
| 157 | 160 | unsigned long tmp; \ |
|---|
| 158 | 161 | \ |
|---|
| 159 | 162 | asm volatile("// atomic64_" #op "_return" #name "\n" \ |
|---|
| 163 | + __LL_SC_FALLBACK( \ |
|---|
| 160 | 164 | " prfm pstl1strm, %2\n" \ |
|---|
| 161 | 165 | "1: ld" #acq "xr %0, %2\n" \ |
|---|
| 162 | 166 | " " #asm_op " %0, %0, %3\n" \ |
|---|
| 163 | 167 | " st" #rel "xr %w1, %0, %2\n" \ |
|---|
| 164 | 168 | " cbnz %w1, 1b\n" \ |
|---|
| 165 | | -" " #mb \ |
|---|
| 169 | +" " #mb ) \ |
|---|
| 166 | 170 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ |
|---|
| 167 | | - : #constraint "r" (i) \ |
|---|
| 171 | + : __stringify(constraint) "r" (i) \ |
|---|
| 168 | 172 | : cl); \ |
|---|
| 169 | 173 | \ |
|---|
| 170 | 174 | return result; \ |
|---|
| 171 | | -} \ |
|---|
| 172 | | -__LL_SC_EXPORT(atomic64_##op##_return##name); |
|---|
| 175 | +} |
|---|
| 173 | 176 | |
|---|
| 174 | 177 | #define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint)\ |
|---|
| 175 | | -__LL_SC_INLINE long \ |
|---|
| 176 | | -__LL_SC_PREFIX(atomic64_fetch_##op##name(long i, atomic64_t *v)) \ |
|---|
| 178 | +static inline long \ |
|---|
| 179 | +__ll_sc_atomic64_fetch_##op##name(s64 i, atomic64_t *v) \ |
|---|
| 177 | 180 | { \ |
|---|
| 178 | | - long result, val; \ |
|---|
| 181 | + s64 result, val; \ |
|---|
| 179 | 182 | unsigned long tmp; \ |
|---|
| 180 | 183 | \ |
|---|
| 181 | 184 | asm volatile("// atomic64_fetch_" #op #name "\n" \ |
|---|
| 185 | + __LL_SC_FALLBACK( \ |
|---|
| 182 | 186 | " prfm pstl1strm, %3\n" \ |
|---|
| 183 | 187 | "1: ld" #acq "xr %0, %3\n" \ |
|---|
| 184 | 188 | " " #asm_op " %1, %0, %4\n" \ |
|---|
| 185 | 189 | " st" #rel "xr %w2, %1, %3\n" \ |
|---|
| 186 | 190 | " cbnz %w2, 1b\n" \ |
|---|
| 187 | | -" " #mb \ |
|---|
| 191 | +" " #mb ) \ |
|---|
| 188 | 192 | : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \ |
|---|
| 189 | | - : #constraint "r" (i) \ |
|---|
| 193 | + : __stringify(constraint) "r" (i) \ |
|---|
| 190 | 194 | : cl); \ |
|---|
| 191 | 195 | \ |
|---|
| 192 | 196 | return result; \ |
|---|
| 193 | | -} \ |
|---|
| 194 | | -__LL_SC_EXPORT(atomic64_fetch_##op##name); |
|---|
| 197 | +} |
|---|
| 195 | 198 | |
|---|
| 196 | 199 | #define ATOMIC64_OPS(...) \ |
|---|
| 197 | 200 | ATOMIC64_OP(__VA_ARGS__) \ |
|---|
| .. | .. |
|---|
| 216 | 219 | ATOMIC64_FETCH_OP (_release,, , l, "memory", __VA_ARGS__) |
|---|
| 217 | 220 | |
|---|
| 218 | 221 | ATOMIC64_OPS(and, and, L) |
|---|
| 219 | | -ATOMIC64_OPS(andnot, bic, ) |
|---|
| 220 | 222 | ATOMIC64_OPS(or, orr, L) |
|---|
| 221 | 223 | ATOMIC64_OPS(xor, eor, L) |
|---|
| 224 | +/* |
|---|
| 225 | + * GAS converts the mysterious and undocumented BIC (immediate) alias to |
|---|
| 226 | + * an AND (immediate) instruction with the immediate inverted. We don't |
|---|
| 227 | + * have a constraint for this, so fall back to register. |
|---|
| 228 | + */ |
|---|
| 229 | +ATOMIC64_OPS(andnot, bic, ) |
|---|
| 222 | 230 | |
|---|
| 223 | 231 | #undef ATOMIC64_OPS |
|---|
| 224 | 232 | #undef ATOMIC64_FETCH_OP |
|---|
| 225 | 233 | #undef ATOMIC64_OP_RETURN |
|---|
| 226 | 234 | #undef ATOMIC64_OP |
|---|
| 227 | 235 | |
|---|
| 228 | | -__LL_SC_INLINE long |
|---|
| 229 | | -__LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v)) |
|---|
| 236 | +static inline s64 |
|---|
| 237 | +__ll_sc_atomic64_dec_if_positive(atomic64_t *v) |
|---|
| 230 | 238 | { |
|---|
| 231 | | - long result; |
|---|
| 239 | + s64 result; |
|---|
| 232 | 240 | unsigned long tmp; |
|---|
| 233 | 241 | |
|---|
| 234 | 242 | asm volatile("// atomic64_dec_if_positive\n" |
|---|
| 243 | + __LL_SC_FALLBACK( |
|---|
| 235 | 244 | " prfm pstl1strm, %2\n" |
|---|
| 236 | 245 | "1: ldxr %0, %2\n" |
|---|
| 237 | 246 | " subs %0, %0, #1\n" |
|---|
| .. | .. |
|---|
| 239 | 248 | " stlxr %w1, %0, %2\n" |
|---|
| 240 | 249 | " cbnz %w1, 1b\n" |
|---|
| 241 | 250 | " dmb ish\n" |
|---|
| 242 | | -"2:" |
|---|
| 251 | +"2:") |
|---|
| 243 | 252 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) |
|---|
| 244 | 253 | : |
|---|
| 245 | 254 | : "cc", "memory"); |
|---|
| 246 | 255 | |
|---|
| 247 | 256 | return result; |
|---|
| 248 | 257 | } |
|---|
| 249 | | -__LL_SC_EXPORT(atomic64_dec_if_positive); |
|---|
| 250 | 258 | |
|---|
| 251 | | -#define __CMPXCHG_CASE(w, sz, name, mb, acq, rel, cl, constraint) \ |
|---|
| 252 | | -__LL_SC_INLINE unsigned long \ |
|---|
| 253 | | -__LL_SC_PREFIX(__cmpxchg_case_##name(volatile void *ptr, \ |
|---|
| 254 | | - unsigned long old, \ |
|---|
| 255 | | - unsigned long new)) \ |
|---|
| 259 | +#define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl, constraint) \ |
|---|
| 260 | +static inline u##sz \ |
|---|
| 261 | +__ll_sc__cmpxchg_case_##name##sz(volatile void *ptr, \ |
|---|
| 262 | + unsigned long old, \ |
|---|
| 263 | + u##sz new) \ |
|---|
| 256 | 264 | { \ |
|---|
| 257 | | - unsigned long tmp, oldval; \ |
|---|
| 265 | + unsigned long tmp; \ |
|---|
| 266 | + u##sz oldval; \ |
|---|
| 267 | + \ |
|---|
| 268 | + /* \ |
|---|
| 269 | + * Sub-word sizes require explicit casting so that the compare \ |
|---|
| 270 | + * part of the cmpxchg doesn't end up interpreting non-zero \ |
|---|
| 271 | + * upper bits of the register containing "old". \ |
|---|
| 272 | + */ \ |
|---|
| 273 | + if (sz < 32) \ |
|---|
| 274 | + old = (u##sz)old; \ |
|---|
| 258 | 275 | \ |
|---|
| 259 | 276 | asm volatile( \ |
|---|
| 277 | + __LL_SC_FALLBACK( \ |
|---|
| 260 | 278 | " prfm pstl1strm, %[v]\n" \ |
|---|
| 261 | | - "1: ld" #acq "xr" #sz "\t%" #w "[oldval], %[v]\n" \ |
|---|
| 279 | + "1: ld" #acq "xr" #sfx "\t%" #w "[oldval], %[v]\n" \ |
|---|
| 262 | 280 | " eor %" #w "[tmp], %" #w "[oldval], %" #w "[old]\n" \ |
|---|
| 263 | 281 | " cbnz %" #w "[tmp], 2f\n" \ |
|---|
| 264 | | - " st" #rel "xr" #sz "\t%w[tmp], %" #w "[new], %[v]\n" \ |
|---|
| 282 | + " st" #rel "xr" #sfx "\t%w[tmp], %" #w "[new], %[v]\n" \ |
|---|
| 265 | 283 | " cbnz %w[tmp], 1b\n" \ |
|---|
| 266 | 284 | " " #mb "\n" \ |
|---|
| 267 | | - "2:" \ |
|---|
| 285 | + "2:") \ |
|---|
| 268 | 286 | : [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \ |
|---|
| 269 | | - [v] "+Q" (*(unsigned long *)ptr) \ |
|---|
| 270 | | - : [old] #constraint "r" (old), [new] "r" (new) \ |
|---|
| 287 | + [v] "+Q" (*(u##sz *)ptr) \ |
|---|
| 288 | + : [old] __stringify(constraint) "r" (old), [new] "r" (new) \ |
|---|
| 271 | 289 | : cl); \ |
|---|
| 272 | 290 | \ |
|---|
| 273 | 291 | return oldval; \ |
|---|
| 274 | | -} \ |
|---|
| 275 | | -__LL_SC_EXPORT(__cmpxchg_case_##name); |
|---|
| 292 | +} |
|---|
| 276 | 293 | |
|---|
| 277 | 294 | /* |
|---|
| 278 | 295 | * Earlier versions of GCC (no later than 8.1.0) appear to incorrectly |
|---|
| 279 | 296 | * handle the 'K' constraint for the value 4294967295 - thus we use no |
|---|
| 280 | 297 | * constraint for 32 bit operations. |
|---|
| 281 | 298 | */ |
|---|
| 282 | | -__CMPXCHG_CASE(w, b, 1, , , , , ) |
|---|
| 283 | | -__CMPXCHG_CASE(w, h, 2, , , , , ) |
|---|
| 284 | | -__CMPXCHG_CASE(w, , 4, , , , , ) |
|---|
| 285 | | -__CMPXCHG_CASE( , , 8, , , , , L) |
|---|
| 286 | | -__CMPXCHG_CASE(w, b, acq_1, , a, , "memory", ) |
|---|
| 287 | | -__CMPXCHG_CASE(w, h, acq_2, , a, , "memory", ) |
|---|
| 288 | | -__CMPXCHG_CASE(w, , acq_4, , a, , "memory", ) |
|---|
| 289 | | -__CMPXCHG_CASE( , , acq_8, , a, , "memory", L) |
|---|
| 290 | | -__CMPXCHG_CASE(w, b, rel_1, , , l, "memory", ) |
|---|
| 291 | | -__CMPXCHG_CASE(w, h, rel_2, , , l, "memory", ) |
|---|
| 292 | | -__CMPXCHG_CASE(w, , rel_4, , , l, "memory", ) |
|---|
| 293 | | -__CMPXCHG_CASE( , , rel_8, , , l, "memory", L) |
|---|
| 294 | | -__CMPXCHG_CASE(w, b, mb_1, dmb ish, , l, "memory", ) |
|---|
| 295 | | -__CMPXCHG_CASE(w, h, mb_2, dmb ish, , l, "memory", ) |
|---|
| 296 | | -__CMPXCHG_CASE(w, , mb_4, dmb ish, , l, "memory", ) |
|---|
| 297 | | -__CMPXCHG_CASE( , , mb_8, dmb ish, , l, "memory", L) |
|---|
| 299 | +__CMPXCHG_CASE(w, b, , 8, , , , , K) |
|---|
| 300 | +__CMPXCHG_CASE(w, h, , 16, , , , , K) |
|---|
| 301 | +__CMPXCHG_CASE(w, , , 32, , , , , K) |
|---|
| 302 | +__CMPXCHG_CASE( , , , 64, , , , , L) |
|---|
| 303 | +__CMPXCHG_CASE(w, b, acq_, 8, , a, , "memory", K) |
|---|
| 304 | +__CMPXCHG_CASE(w, h, acq_, 16, , a, , "memory", K) |
|---|
| 305 | +__CMPXCHG_CASE(w, , acq_, 32, , a, , "memory", K) |
|---|
| 306 | +__CMPXCHG_CASE( , , acq_, 64, , a, , "memory", L) |
|---|
| 307 | +__CMPXCHG_CASE(w, b, rel_, 8, , , l, "memory", K) |
|---|
| 308 | +__CMPXCHG_CASE(w, h, rel_, 16, , , l, "memory", K) |
|---|
| 309 | +__CMPXCHG_CASE(w, , rel_, 32, , , l, "memory", K) |
|---|
| 310 | +__CMPXCHG_CASE( , , rel_, 64, , , l, "memory", L) |
|---|
| 311 | +__CMPXCHG_CASE(w, b, mb_, 8, dmb ish, , l, "memory", K) |
|---|
| 312 | +__CMPXCHG_CASE(w, h, mb_, 16, dmb ish, , l, "memory", K) |
|---|
| 313 | +__CMPXCHG_CASE(w, , mb_, 32, dmb ish, , l, "memory", K) |
|---|
| 314 | +__CMPXCHG_CASE( , , mb_, 64, dmb ish, , l, "memory", L) |
|---|
| 298 | 315 | |
|---|
| 299 | 316 | #undef __CMPXCHG_CASE |
|---|
| 300 | 317 | |
|---|
| 301 | 318 | #define __CMPXCHG_DBL(name, mb, rel, cl) \ |
|---|
| 302 | | -__LL_SC_INLINE long \ |
|---|
| 303 | | -__LL_SC_PREFIX(__cmpxchg_double##name(unsigned long old1, \ |
|---|
| 319 | +static inline long \ |
|---|
| 320 | +__ll_sc__cmpxchg_double##name(unsigned long old1, \ |
|---|
| 304 | 321 | unsigned long old2, \ |
|---|
| 305 | 322 | unsigned long new1, \ |
|---|
| 306 | 323 | unsigned long new2, \ |
|---|
| 307 | | - volatile void *ptr)) \ |
|---|
| 324 | + volatile void *ptr) \ |
|---|
| 308 | 325 | { \ |
|---|
| 309 | 326 | unsigned long tmp, ret; \ |
|---|
| 310 | 327 | \ |
|---|
| 311 | 328 | asm volatile("// __cmpxchg_double" #name "\n" \ |
|---|
| 329 | + __LL_SC_FALLBACK( \ |
|---|
| 312 | 330 | " prfm pstl1strm, %2\n" \ |
|---|
| 313 | 331 | "1: ldxp %0, %1, %2\n" \ |
|---|
| 314 | 332 | " eor %0, %0, %3\n" \ |
|---|
| .. | .. |
|---|
| 318 | 336 | " st" #rel "xp %w0, %5, %6, %2\n" \ |
|---|
| 319 | 337 | " cbnz %w0, 1b\n" \ |
|---|
| 320 | 338 | " " #mb "\n" \ |
|---|
| 321 | | - "2:" \ |
|---|
| 339 | + "2:") \ |
|---|
| 322 | 340 | : "=&r" (tmp), "=&r" (ret), "+Q" (*(unsigned long *)ptr) \ |
|---|
| 323 | 341 | : "r" (old1), "r" (old2), "r" (new1), "r" (new2) \ |
|---|
| 324 | 342 | : cl); \ |
|---|
| 325 | 343 | \ |
|---|
| 326 | 344 | return ret; \ |
|---|
| 327 | | -} \ |
|---|
| 328 | | -__LL_SC_EXPORT(__cmpxchg_double##name); |
|---|
| 345 | +} |
|---|
| 329 | 346 | |
|---|
| 330 | 347 | __CMPXCHG_DBL( , , , ) |
|---|
| 331 | 348 | __CMPXCHG_DBL(_mb, dmb ish, l, "memory") |
|---|
| 332 | 349 | |
|---|
| 333 | 350 | #undef __CMPXCHG_DBL |
|---|
| 351 | +#undef K |
|---|
| 334 | 352 | |
|---|
| 335 | 353 | #endif /* __ASM_ATOMIC_LL_SC_H */ |
|---|