| .. | .. |
|---|
| 1 | +/* SPDX-License-Identifier: GPL-2.0-only */ |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * Based on arch/arm/include/asm/atomic.h |
|---|
| 3 | 4 | * |
|---|
| 4 | 5 | * Copyright (C) 1996 Russell King. |
|---|
| 5 | 6 | * Copyright (C) 2002 Deep Blue Solutions Ltd. |
|---|
| 6 | 7 | * Copyright (C) 2012 ARM Ltd. |
|---|
| 7 | | - * |
|---|
| 8 | | - * This program is free software; you can redistribute it and/or modify |
|---|
| 9 | | - * it under the terms of the GNU General Public License version 2 as |
|---|
| 10 | | - * published by the Free Software Foundation. |
|---|
| 11 | | - * |
|---|
| 12 | | - * This program is distributed in the hope that it will be useful, |
|---|
| 13 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
|---|
| 14 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|---|
| 15 | | - * GNU General Public License for more details. |
|---|
| 16 | | - * |
|---|
| 17 | | - * You should have received a copy of the GNU General Public License |
|---|
| 18 | | - * along with this program. If not, see <http://www.gnu.org/licenses/>. |
|---|
| 19 | 8 | */ |
|---|
| 20 | 9 | |
|---|
| 21 | 10 | #ifndef __ASM_ATOMIC_LSE_H |
|---|
| 22 | 11 | #define __ASM_ATOMIC_LSE_H |
|---|
| 23 | 12 | |
|---|
| 24 | | -#ifndef __ARM64_IN_ATOMIC_IMPL |
|---|
| 25 | | -#error "please don't include this file directly" |
|---|
| 26 | | -#endif |
|---|
| 27 | | - |
|---|
| 28 | | -#define __LL_SC_ATOMIC(op) __LL_SC_CALL(atomic_##op) |
|---|
| 29 | 13 | #define ATOMIC_OP(op, asm_op) \ |
|---|
| 30 | | -static inline void atomic_##op(int i, atomic_t *v) \ |
|---|
| 14 | +static inline void __lse_atomic_##op(int i, atomic_t *v) \ |
|---|
| 31 | 15 | { \ |
|---|
| 32 | | - register int w0 asm ("w0") = i; \ |
|---|
| 33 | | - register atomic_t *x1 asm ("x1") = v; \ |
|---|
| 34 | | - \ |
|---|
| 35 | 16 | asm volatile( \ |
|---|
| 36 | 17 | __LSE_PREAMBLE \ |
|---|
| 37 | | - ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(op), \ |
|---|
| 38 | | -" " #asm_op " %w[i], %[v]\n") \ |
|---|
| 39 | | - : [i] "+r" (w0), [v] "+Q" (v->counter) \ |
|---|
| 40 | | - : "r" (x1) \ |
|---|
| 41 | | - : __LL_SC_CLOBBERS); \ |
|---|
| 18 | +" " #asm_op " %w[i], %[v]\n" \ |
|---|
| 19 | + : [i] "+r" (i), [v] "+Q" (v->counter) \ |
|---|
| 20 | + : "r" (v)); \ |
|---|
| 42 | 21 | } |
|---|
| 43 | 22 | |
|---|
| 44 | 23 | ATOMIC_OP(andnot, stclr) |
|---|
| .. | .. |
|---|
| 49 | 28 | #undef ATOMIC_OP |
|---|
| 50 | 29 | |
|---|
| 51 | 30 | #define ATOMIC_FETCH_OP(name, mb, op, asm_op, cl...) \ |
|---|
| 52 | | -static inline int atomic_fetch_##op##name(int i, atomic_t *v) \ |
|---|
| 31 | +static inline int __lse_atomic_fetch_##op##name(int i, atomic_t *v) \ |
|---|
| 53 | 32 | { \ |
|---|
| 54 | | - register int w0 asm ("w0") = i; \ |
|---|
| 55 | | - register atomic_t *x1 asm ("x1") = v; \ |
|---|
| 56 | | - \ |
|---|
| 57 | 33 | asm volatile( \ |
|---|
| 58 | 34 | __LSE_PREAMBLE \ |
|---|
| 59 | | - ARM64_LSE_ATOMIC_INSN( \ |
|---|
| 60 | | - /* LL/SC */ \ |
|---|
| 61 | | - __LL_SC_ATOMIC(fetch_##op##name), \ |
|---|
| 62 | | - /* LSE atomics */ \ |
|---|
| 63 | | -" " #asm_op #mb " %w[i], %w[i], %[v]") \ |
|---|
| 64 | | - : [i] "+r" (w0), [v] "+Q" (v->counter) \ |
|---|
| 65 | | - : "r" (x1) \ |
|---|
| 66 | | - : __LL_SC_CLOBBERS, ##cl); \ |
|---|
| 35 | +" " #asm_op #mb " %w[i], %w[i], %[v]" \ |
|---|
| 36 | + : [i] "+r" (i), [v] "+Q" (v->counter) \ |
|---|
| 37 | + : "r" (v) \ |
|---|
| 38 | + : cl); \ |
|---|
| 67 | 39 | \ |
|---|
| 68 | | - return w0; \ |
|---|
| 40 | + return i; \ |
|---|
| 69 | 41 | } |
|---|
| 70 | 42 | |
|---|
| 71 | 43 | #define ATOMIC_FETCH_OPS(op, asm_op) \ |
|---|
| .. | .. |
|---|
| 83 | 55 | #undef ATOMIC_FETCH_OPS |
|---|
| 84 | 56 | |
|---|
| 85 | 57 | #define ATOMIC_OP_ADD_RETURN(name, mb, cl...) \ |
|---|
| 86 | | -static inline int atomic_add_return##name(int i, atomic_t *v) \ |
|---|
| 58 | +static inline int __lse_atomic_add_return##name(int i, atomic_t *v) \ |
|---|
| 87 | 59 | { \ |
|---|
| 88 | | - register int w0 asm ("w0") = i; \ |
|---|
| 89 | | - register atomic_t *x1 asm ("x1") = v; \ |
|---|
| 60 | + u32 tmp; \ |
|---|
| 90 | 61 | \ |
|---|
| 91 | 62 | asm volatile( \ |
|---|
| 92 | 63 | __LSE_PREAMBLE \ |
|---|
| 93 | | - ARM64_LSE_ATOMIC_INSN( \ |
|---|
| 94 | | - /* LL/SC */ \ |
|---|
| 95 | | - __LL_SC_ATOMIC(add_return##name) \ |
|---|
| 96 | | - __nops(1), \ |
|---|
| 97 | | - /* LSE atomics */ \ |
|---|
| 98 | | - " ldadd" #mb " %w[i], w30, %[v]\n" \ |
|---|
| 99 | | - " add %w[i], %w[i], w30") \ |
|---|
| 100 | | - : [i] "+r" (w0), [v] "+Q" (v->counter) \ |
|---|
| 101 | | - : "r" (x1) \ |
|---|
| 102 | | - : __LL_SC_CLOBBERS, ##cl); \ |
|---|
| 64 | + " ldadd" #mb " %w[i], %w[tmp], %[v]\n" \ |
|---|
| 65 | + " add %w[i], %w[i], %w[tmp]" \ |
|---|
| 66 | + : [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \ |
|---|
| 67 | + : "r" (v) \ |
|---|
| 68 | + : cl); \ |
|---|
| 103 | 69 | \ |
|---|
| 104 | | - return w0; \ |
|---|
| 70 | + return i; \ |
|---|
| 105 | 71 | } |
|---|
| 106 | 72 | |
|---|
| 107 | 73 | ATOMIC_OP_ADD_RETURN(_relaxed, ) |
|---|
| .. | .. |
|---|
| 111 | 77 | |
|---|
| 112 | 78 | #undef ATOMIC_OP_ADD_RETURN |
|---|
| 113 | 79 | |
|---|
| 114 | | -static inline void atomic_and(int i, atomic_t *v) |
|---|
| 80 | +static inline void __lse_atomic_and(int i, atomic_t *v) |
|---|
| 115 | 81 | { |
|---|
| 116 | | - register int w0 asm ("w0") = i; |
|---|
| 117 | | - register atomic_t *x1 asm ("x1") = v; |
|---|
| 118 | | - |
|---|
| 119 | 82 | asm volatile( |
|---|
| 120 | 83 | __LSE_PREAMBLE |
|---|
| 121 | | - ARM64_LSE_ATOMIC_INSN( |
|---|
| 122 | | - /* LL/SC */ |
|---|
| 123 | | - __LL_SC_ATOMIC(and) |
|---|
| 124 | | - __nops(1), |
|---|
| 125 | | - /* LSE atomics */ |
|---|
| 126 | 84 | " mvn %w[i], %w[i]\n" |
|---|
| 127 | | - " stclr %w[i], %[v]") |
|---|
| 128 | | - : [i] "+&r" (w0), [v] "+Q" (v->counter) |
|---|
| 129 | | - : "r" (x1) |
|---|
| 130 | | - : __LL_SC_CLOBBERS); |
|---|
| 85 | + " stclr %w[i], %[v]" |
|---|
| 86 | + : [i] "+&r" (i), [v] "+Q" (v->counter) |
|---|
| 87 | + : "r" (v)); |
|---|
| 131 | 88 | } |
|---|
| 132 | 89 | |
|---|
| 133 | 90 | #define ATOMIC_FETCH_OP_AND(name, mb, cl...) \ |
|---|
| 134 | | -static inline int atomic_fetch_and##name(int i, atomic_t *v) \ |
|---|
| 91 | +static inline int __lse_atomic_fetch_and##name(int i, atomic_t *v) \ |
|---|
| 135 | 92 | { \ |
|---|
| 136 | | - register int w0 asm ("w0") = i; \ |
|---|
| 137 | | - register atomic_t *x1 asm ("x1") = v; \ |
|---|
| 138 | | - \ |
|---|
| 139 | 93 | asm volatile( \ |
|---|
| 140 | 94 | __LSE_PREAMBLE \ |
|---|
| 141 | | - ARM64_LSE_ATOMIC_INSN( \ |
|---|
| 142 | | - /* LL/SC */ \ |
|---|
| 143 | | - __LL_SC_ATOMIC(fetch_and##name) \ |
|---|
| 144 | | - __nops(1), \ |
|---|
| 145 | | - /* LSE atomics */ \ |
|---|
| 146 | 95 | " mvn %w[i], %w[i]\n" \ |
|---|
| 147 | | - " ldclr" #mb " %w[i], %w[i], %[v]") \ |
|---|
| 148 | | - : [i] "+&r" (w0), [v] "+Q" (v->counter) \ |
|---|
| 149 | | - : "r" (x1) \ |
|---|
| 150 | | - : __LL_SC_CLOBBERS, ##cl); \ |
|---|
| 96 | + " ldclr" #mb " %w[i], %w[i], %[v]" \ |
|---|
| 97 | + : [i] "+&r" (i), [v] "+Q" (v->counter) \ |
|---|
| 98 | + : "r" (v) \ |
|---|
| 99 | + : cl); \ |
|---|
| 151 | 100 | \ |
|---|
| 152 | | - return w0; \ |
|---|
| 101 | + return i; \ |
|---|
| 153 | 102 | } |
|---|
| 154 | 103 | |
|---|
| 155 | 104 | ATOMIC_FETCH_OP_AND(_relaxed, ) |
|---|
| .. | .. |
|---|
| 159 | 108 | |
|---|
| 160 | 109 | #undef ATOMIC_FETCH_OP_AND |
|---|
| 161 | 110 | |
|---|
| 162 | | -static inline void atomic_sub(int i, atomic_t *v) |
|---|
| 111 | +static inline void __lse_atomic_sub(int i, atomic_t *v) |
|---|
| 163 | 112 | { |
|---|
| 164 | | - register int w0 asm ("w0") = i; |
|---|
| 165 | | - register atomic_t *x1 asm ("x1") = v; |
|---|
| 166 | | - |
|---|
| 167 | 113 | asm volatile( |
|---|
| 168 | 114 | __LSE_PREAMBLE |
|---|
| 169 | | - ARM64_LSE_ATOMIC_INSN( |
|---|
| 170 | | - /* LL/SC */ |
|---|
| 171 | | - __LL_SC_ATOMIC(sub) |
|---|
| 172 | | - __nops(1), |
|---|
| 173 | | - /* LSE atomics */ |
|---|
| 174 | 115 | " neg %w[i], %w[i]\n" |
|---|
| 175 | | - " stadd %w[i], %[v]") |
|---|
| 176 | | - : [i] "+&r" (w0), [v] "+Q" (v->counter) |
|---|
| 177 | | - : "r" (x1) |
|---|
| 178 | | - : __LL_SC_CLOBBERS); |
|---|
| 116 | + " stadd %w[i], %[v]" |
|---|
| 117 | + : [i] "+&r" (i), [v] "+Q" (v->counter) |
|---|
| 118 | + : "r" (v)); |
|---|
| 179 | 119 | } |
|---|
| 180 | 120 | |
|---|
| 181 | 121 | #define ATOMIC_OP_SUB_RETURN(name, mb, cl...) \ |
|---|
| 182 | | -static inline int atomic_sub_return##name(int i, atomic_t *v) \ |
|---|
| 122 | +static inline int __lse_atomic_sub_return##name(int i, atomic_t *v) \ |
|---|
| 183 | 123 | { \ |
|---|
| 184 | | - register int w0 asm ("w0") = i; \ |
|---|
| 185 | | - register atomic_t *x1 asm ("x1") = v; \ |
|---|
| 124 | + u32 tmp; \ |
|---|
| 186 | 125 | \ |
|---|
| 187 | 126 | asm volatile( \ |
|---|
| 188 | 127 | __LSE_PREAMBLE \ |
|---|
| 189 | | - ARM64_LSE_ATOMIC_INSN( \ |
|---|
| 190 | | - /* LL/SC */ \ |
|---|
| 191 | | - __LL_SC_ATOMIC(sub_return##name) \ |
|---|
| 192 | | - __nops(2), \ |
|---|
| 193 | | - /* LSE atomics */ \ |
|---|
| 194 | 128 | " neg %w[i], %w[i]\n" \ |
|---|
| 195 | | - " ldadd" #mb " %w[i], w30, %[v]\n" \ |
|---|
| 196 | | - " add %w[i], %w[i], w30") \ |
|---|
| 197 | | - : [i] "+&r" (w0), [v] "+Q" (v->counter) \ |
|---|
| 198 | | - : "r" (x1) \ |
|---|
| 199 | | - : __LL_SC_CLOBBERS , ##cl); \ |
|---|
| 129 | + " ldadd" #mb " %w[i], %w[tmp], %[v]\n" \ |
|---|
| 130 | + " add %w[i], %w[i], %w[tmp]" \ |
|---|
| 131 | + : [i] "+&r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \ |
|---|
| 132 | + : "r" (v) \ |
|---|
| 133 | + : cl); \ |
|---|
| 200 | 134 | \ |
|---|
| 201 | | - return w0; \ |
|---|
| 135 | + return i; \ |
|---|
| 202 | 136 | } |
|---|
| 203 | 137 | |
|---|
| 204 | 138 | ATOMIC_OP_SUB_RETURN(_relaxed, ) |
|---|
| .. | .. |
|---|
| 209 | 143 | #undef ATOMIC_OP_SUB_RETURN |
|---|
| 210 | 144 | |
|---|
| 211 | 145 | #define ATOMIC_FETCH_OP_SUB(name, mb, cl...) \ |
|---|
| 212 | | -static inline int atomic_fetch_sub##name(int i, atomic_t *v) \ |
|---|
| 146 | +static inline int __lse_atomic_fetch_sub##name(int i, atomic_t *v) \ |
|---|
| 213 | 147 | { \ |
|---|
| 214 | | - register int w0 asm ("w0") = i; \ |
|---|
| 215 | | - register atomic_t *x1 asm ("x1") = v; \ |
|---|
| 216 | | - \ |
|---|
| 217 | 148 | asm volatile( \ |
|---|
| 218 | 149 | __LSE_PREAMBLE \ |
|---|
| 219 | | - ARM64_LSE_ATOMIC_INSN( \ |
|---|
| 220 | | - /* LL/SC */ \ |
|---|
| 221 | | - __LL_SC_ATOMIC(fetch_sub##name) \ |
|---|
| 222 | | - __nops(1), \ |
|---|
| 223 | | - /* LSE atomics */ \ |
|---|
| 224 | 150 | " neg %w[i], %w[i]\n" \ |
|---|
| 225 | | - " ldadd" #mb " %w[i], %w[i], %[v]") \ |
|---|
| 226 | | - : [i] "+&r" (w0), [v] "+Q" (v->counter) \ |
|---|
| 227 | | - : "r" (x1) \ |
|---|
| 228 | | - : __LL_SC_CLOBBERS, ##cl); \ |
|---|
| 151 | + " ldadd" #mb " %w[i], %w[i], %[v]" \ |
|---|
| 152 | + : [i] "+&r" (i), [v] "+Q" (v->counter) \ |
|---|
| 153 | + : "r" (v) \ |
|---|
| 154 | + : cl); \ |
|---|
| 229 | 155 | \ |
|---|
| 230 | | - return w0; \ |
|---|
| 156 | + return i; \ |
|---|
| 231 | 157 | } |
|---|
| 232 | 158 | |
|---|
| 233 | 159 | ATOMIC_FETCH_OP_SUB(_relaxed, ) |
|---|
| .. | .. |
|---|
| 236 | 162 | ATOMIC_FETCH_OP_SUB( , al, "memory") |
|---|
| 237 | 163 | |
|---|
| 238 | 164 | #undef ATOMIC_FETCH_OP_SUB |
|---|
| 239 | | -#undef __LL_SC_ATOMIC |
|---|
| 240 | 165 | |
|---|
| 241 | | -#define __LL_SC_ATOMIC64(op) __LL_SC_CALL(atomic64_##op) |
|---|
| 242 | 166 | #define ATOMIC64_OP(op, asm_op) \ |
|---|
| 243 | | -static inline void atomic64_##op(long i, atomic64_t *v) \ |
|---|
| 167 | +static inline void __lse_atomic64_##op(s64 i, atomic64_t *v) \ |
|---|
| 244 | 168 | { \ |
|---|
| 245 | | - register long x0 asm ("x0") = i; \ |
|---|
| 246 | | - register atomic64_t *x1 asm ("x1") = v; \ |
|---|
| 247 | | - \ |
|---|
| 248 | 169 | asm volatile( \ |
|---|
| 249 | 170 | __LSE_PREAMBLE \ |
|---|
| 250 | | - ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(op), \ |
|---|
| 251 | | -" " #asm_op " %[i], %[v]\n") \ |
|---|
| 252 | | - : [i] "+r" (x0), [v] "+Q" (v->counter) \ |
|---|
| 253 | | - : "r" (x1) \ |
|---|
| 254 | | - : __LL_SC_CLOBBERS); \ |
|---|
| 171 | +" " #asm_op " %[i], %[v]\n" \ |
|---|
| 172 | + : [i] "+r" (i), [v] "+Q" (v->counter) \ |
|---|
| 173 | + : "r" (v)); \ |
|---|
| 255 | 174 | } |
|---|
| 256 | 175 | |
|---|
| 257 | 176 | ATOMIC64_OP(andnot, stclr) |
|---|
| .. | .. |
|---|
| 262 | 181 | #undef ATOMIC64_OP |
|---|
| 263 | 182 | |
|---|
| 264 | 183 | #define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...) \ |
|---|
| 265 | | -static inline long atomic64_fetch_##op##name(long i, atomic64_t *v) \ |
|---|
| 184 | +static inline long __lse_atomic64_fetch_##op##name(s64 i, atomic64_t *v)\ |
|---|
| 266 | 185 | { \ |
|---|
| 267 | | - register long x0 asm ("x0") = i; \ |
|---|
| 268 | | - register atomic64_t *x1 asm ("x1") = v; \ |
|---|
| 269 | | - \ |
|---|
| 270 | 186 | asm volatile( \ |
|---|
| 271 | 187 | __LSE_PREAMBLE \ |
|---|
| 272 | | - ARM64_LSE_ATOMIC_INSN( \ |
|---|
| 273 | | - /* LL/SC */ \ |
|---|
| 274 | | - __LL_SC_ATOMIC64(fetch_##op##name), \ |
|---|
| 275 | | - /* LSE atomics */ \ |
|---|
| 276 | | -" " #asm_op #mb " %[i], %[i], %[v]") \ |
|---|
| 277 | | - : [i] "+r" (x0), [v] "+Q" (v->counter) \ |
|---|
| 278 | | - : "r" (x1) \ |
|---|
| 279 | | - : __LL_SC_CLOBBERS, ##cl); \ |
|---|
| 188 | +" " #asm_op #mb " %[i], %[i], %[v]" \ |
|---|
| 189 | + : [i] "+r" (i), [v] "+Q" (v->counter) \ |
|---|
| 190 | + : "r" (v) \ |
|---|
| 191 | + : cl); \ |
|---|
| 280 | 192 | \ |
|---|
| 281 | | - return x0; \ |
|---|
| 193 | + return i; \ |
|---|
| 282 | 194 | } |
|---|
| 283 | 195 | |
|---|
| 284 | 196 | #define ATOMIC64_FETCH_OPS(op, asm_op) \ |
|---|
| .. | .. |
|---|
| 296 | 208 | #undef ATOMIC64_FETCH_OPS |
|---|
| 297 | 209 | |
|---|
| 298 | 210 | #define ATOMIC64_OP_ADD_RETURN(name, mb, cl...) \ |
|---|
| 299 | | -static inline long atomic64_add_return##name(long i, atomic64_t *v) \ |
|---|
| 211 | +static inline long __lse_atomic64_add_return##name(s64 i, atomic64_t *v)\ |
|---|
| 300 | 212 | { \ |
|---|
| 301 | | - register long x0 asm ("x0") = i; \ |
|---|
| 302 | | - register atomic64_t *x1 asm ("x1") = v; \ |
|---|
| 213 | + unsigned long tmp; \ |
|---|
| 303 | 214 | \ |
|---|
| 304 | 215 | asm volatile( \ |
|---|
| 305 | 216 | __LSE_PREAMBLE \ |
|---|
| 306 | | - ARM64_LSE_ATOMIC_INSN( \ |
|---|
| 307 | | - /* LL/SC */ \ |
|---|
| 308 | | - __LL_SC_ATOMIC64(add_return##name) \ |
|---|
| 309 | | - __nops(1), \ |
|---|
| 310 | | - /* LSE atomics */ \ |
|---|
| 311 | | - " ldadd" #mb " %[i], x30, %[v]\n" \ |
|---|
| 312 | | - " add %[i], %[i], x30") \ |
|---|
| 313 | | - : [i] "+r" (x0), [v] "+Q" (v->counter) \ |
|---|
| 314 | | - : "r" (x1) \ |
|---|
| 315 | | - : __LL_SC_CLOBBERS, ##cl); \ |
|---|
| 217 | + " ldadd" #mb " %[i], %x[tmp], %[v]\n" \ |
|---|
| 218 | + " add %[i], %[i], %x[tmp]" \ |
|---|
| 219 | + : [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \ |
|---|
| 220 | + : "r" (v) \ |
|---|
| 221 | + : cl); \ |
|---|
| 316 | 222 | \ |
|---|
| 317 | | - return x0; \ |
|---|
| 223 | + return i; \ |
|---|
| 318 | 224 | } |
|---|
| 319 | 225 | |
|---|
| 320 | 226 | ATOMIC64_OP_ADD_RETURN(_relaxed, ) |
|---|
| .. | .. |
|---|
| 324 | 230 | |
|---|
| 325 | 231 | #undef ATOMIC64_OP_ADD_RETURN |
|---|
| 326 | 232 | |
|---|
| 327 | | -static inline void atomic64_and(long i, atomic64_t *v) |
|---|
| 233 | +static inline void __lse_atomic64_and(s64 i, atomic64_t *v) |
|---|
| 328 | 234 | { |
|---|
| 329 | | - register long x0 asm ("x0") = i; |
|---|
| 330 | | - register atomic64_t *x1 asm ("x1") = v; |
|---|
| 331 | | - |
|---|
| 332 | 235 | asm volatile( |
|---|
| 333 | 236 | __LSE_PREAMBLE |
|---|
| 334 | | - ARM64_LSE_ATOMIC_INSN( |
|---|
| 335 | | - /* LL/SC */ |
|---|
| 336 | | - __LL_SC_ATOMIC64(and) |
|---|
| 337 | | - __nops(1), |
|---|
| 338 | | - /* LSE atomics */ |
|---|
| 339 | 237 | " mvn %[i], %[i]\n" |
|---|
| 340 | | - " stclr %[i], %[v]") |
|---|
| 341 | | - : [i] "+&r" (x0), [v] "+Q" (v->counter) |
|---|
| 342 | | - : "r" (x1) |
|---|
| 343 | | - : __LL_SC_CLOBBERS); |
|---|
| 238 | + " stclr %[i], %[v]" |
|---|
| 239 | + : [i] "+&r" (i), [v] "+Q" (v->counter) |
|---|
| 240 | + : "r" (v)); |
|---|
| 344 | 241 | } |
|---|
| 345 | 242 | |
|---|
| 346 | 243 | #define ATOMIC64_FETCH_OP_AND(name, mb, cl...) \ |
|---|
| 347 | | -static inline long atomic64_fetch_and##name(long i, atomic64_t *v) \ |
|---|
| 244 | +static inline long __lse_atomic64_fetch_and##name(s64 i, atomic64_t *v) \ |
|---|
| 348 | 245 | { \ |
|---|
| 349 | | - register long x0 asm ("x0") = i; \ |
|---|
| 350 | | - register atomic64_t *x1 asm ("x1") = v; \ |
|---|
| 351 | | - \ |
|---|
| 352 | 246 | asm volatile( \ |
|---|
| 353 | 247 | __LSE_PREAMBLE \ |
|---|
| 354 | | - ARM64_LSE_ATOMIC_INSN( \ |
|---|
| 355 | | - /* LL/SC */ \ |
|---|
| 356 | | - __LL_SC_ATOMIC64(fetch_and##name) \ |
|---|
| 357 | | - __nops(1), \ |
|---|
| 358 | | - /* LSE atomics */ \ |
|---|
| 359 | 248 | " mvn %[i], %[i]\n" \ |
|---|
| 360 | | - " ldclr" #mb " %[i], %[i], %[v]") \ |
|---|
| 361 | | - : [i] "+&r" (x0), [v] "+Q" (v->counter) \ |
|---|
| 362 | | - : "r" (x1) \ |
|---|
| 363 | | - : __LL_SC_CLOBBERS, ##cl); \ |
|---|
| 249 | + " ldclr" #mb " %[i], %[i], %[v]" \ |
|---|
| 250 | + : [i] "+&r" (i), [v] "+Q" (v->counter) \ |
|---|
| 251 | + : "r" (v) \ |
|---|
| 252 | + : cl); \ |
|---|
| 364 | 253 | \ |
|---|
| 365 | | - return x0; \ |
|---|
| 254 | + return i; \ |
|---|
| 366 | 255 | } |
|---|
| 367 | 256 | |
|---|
| 368 | 257 | ATOMIC64_FETCH_OP_AND(_relaxed, ) |
|---|
| .. | .. |
|---|
| 372 | 261 | |
|---|
| 373 | 262 | #undef ATOMIC64_FETCH_OP_AND |
|---|
| 374 | 263 | |
|---|
| 375 | | -static inline void atomic64_sub(long i, atomic64_t *v) |
|---|
| 264 | +static inline void __lse_atomic64_sub(s64 i, atomic64_t *v) |
|---|
| 376 | 265 | { |
|---|
| 377 | | - register long x0 asm ("x0") = i; |
|---|
| 378 | | - register atomic64_t *x1 asm ("x1") = v; |
|---|
| 379 | | - |
|---|
| 380 | 266 | asm volatile( |
|---|
| 381 | 267 | __LSE_PREAMBLE |
|---|
| 382 | | - ARM64_LSE_ATOMIC_INSN( |
|---|
| 383 | | - /* LL/SC */ |
|---|
| 384 | | - __LL_SC_ATOMIC64(sub) |
|---|
| 385 | | - __nops(1), |
|---|
| 386 | | - /* LSE atomics */ |
|---|
| 387 | 268 | " neg %[i], %[i]\n" |
|---|
| 388 | | - " stadd %[i], %[v]") |
|---|
| 389 | | - : [i] "+&r" (x0), [v] "+Q" (v->counter) |
|---|
| 390 | | - : "r" (x1) |
|---|
| 391 | | - : __LL_SC_CLOBBERS); |
|---|
| 269 | + " stadd %[i], %[v]" |
|---|
| 270 | + : [i] "+&r" (i), [v] "+Q" (v->counter) |
|---|
| 271 | + : "r" (v)); |
|---|
| 392 | 272 | } |
|---|
| 393 | 273 | |
|---|
| 394 | 274 | #define ATOMIC64_OP_SUB_RETURN(name, mb, cl...) \ |
|---|
| 395 | | -static inline long atomic64_sub_return##name(long i, atomic64_t *v) \ |
|---|
| 275 | +static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v) \ |
|---|
| 396 | 276 | { \ |
|---|
| 397 | | - register long x0 asm ("x0") = i; \ |
|---|
| 398 | | - register atomic64_t *x1 asm ("x1") = v; \ |
|---|
| 277 | + unsigned long tmp; \ |
|---|
| 399 | 278 | \ |
|---|
| 400 | 279 | asm volatile( \ |
|---|
| 401 | 280 | __LSE_PREAMBLE \ |
|---|
| 402 | | - ARM64_LSE_ATOMIC_INSN( \ |
|---|
| 403 | | - /* LL/SC */ \ |
|---|
| 404 | | - __LL_SC_ATOMIC64(sub_return##name) \ |
|---|
| 405 | | - __nops(2), \ |
|---|
| 406 | | - /* LSE atomics */ \ |
|---|
| 407 | 281 | " neg %[i], %[i]\n" \ |
|---|
| 408 | | - " ldadd" #mb " %[i], x30, %[v]\n" \ |
|---|
| 409 | | - " add %[i], %[i], x30") \ |
|---|
| 410 | | - : [i] "+&r" (x0), [v] "+Q" (v->counter) \ |
|---|
| 411 | | - : "r" (x1) \ |
|---|
| 412 | | - : __LL_SC_CLOBBERS, ##cl); \ |
|---|
| 282 | + " ldadd" #mb " %[i], %x[tmp], %[v]\n" \ |
|---|
| 283 | + " add %[i], %[i], %x[tmp]" \ |
|---|
| 284 | + : [i] "+&r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \ |
|---|
| 285 | + : "r" (v) \ |
|---|
| 286 | + : cl); \ |
|---|
| 413 | 287 | \ |
|---|
| 414 | | - return x0; \ |
|---|
| 288 | + return i; \ |
|---|
| 415 | 289 | } |
|---|
| 416 | 290 | |
|---|
| 417 | 291 | ATOMIC64_OP_SUB_RETURN(_relaxed, ) |
|---|
| .. | .. |
|---|
| 422 | 296 | #undef ATOMIC64_OP_SUB_RETURN |
|---|
| 423 | 297 | |
|---|
| 424 | 298 | #define ATOMIC64_FETCH_OP_SUB(name, mb, cl...) \ |
|---|
| 425 | | -static inline long atomic64_fetch_sub##name(long i, atomic64_t *v) \ |
|---|
| 299 | +static inline long __lse_atomic64_fetch_sub##name(s64 i, atomic64_t *v) \ |
|---|
| 426 | 300 | { \ |
|---|
| 427 | | - register long x0 asm ("x0") = i; \ |
|---|
| 428 | | - register atomic64_t *x1 asm ("x1") = v; \ |
|---|
| 429 | | - \ |
|---|
| 430 | 301 | asm volatile( \ |
|---|
| 431 | 302 | __LSE_PREAMBLE \ |
|---|
| 432 | | - ARM64_LSE_ATOMIC_INSN( \ |
|---|
| 433 | | - /* LL/SC */ \ |
|---|
| 434 | | - __LL_SC_ATOMIC64(fetch_sub##name) \ |
|---|
| 435 | | - __nops(1), \ |
|---|
| 436 | | - /* LSE atomics */ \ |
|---|
| 437 | 303 | " neg %[i], %[i]\n" \ |
|---|
| 438 | | - " ldadd" #mb " %[i], %[i], %[v]") \ |
|---|
| 439 | | - : [i] "+&r" (x0), [v] "+Q" (v->counter) \ |
|---|
| 440 | | - : "r" (x1) \ |
|---|
| 441 | | - : __LL_SC_CLOBBERS, ##cl); \ |
|---|
| 304 | + " ldadd" #mb " %[i], %[i], %[v]" \ |
|---|
| 305 | + : [i] "+&r" (i), [v] "+Q" (v->counter) \ |
|---|
| 306 | + : "r" (v) \ |
|---|
| 307 | + : cl); \ |
|---|
| 442 | 308 | \ |
|---|
| 443 | | - return x0; \ |
|---|
| 309 | + return i; \ |
|---|
| 444 | 310 | } |
|---|
| 445 | 311 | |
|---|
| 446 | 312 | ATOMIC64_FETCH_OP_SUB(_relaxed, ) |
|---|
| .. | .. |
|---|
| 450 | 316 | |
|---|
| 451 | 317 | #undef ATOMIC64_FETCH_OP_SUB |
|---|
| 452 | 318 | |
|---|
| 453 | | -static inline long atomic64_dec_if_positive(atomic64_t *v) |
|---|
| 319 | +static inline s64 __lse_atomic64_dec_if_positive(atomic64_t *v) |
|---|
| 454 | 320 | { |
|---|
| 455 | | - register long x0 asm ("x0") = (long)v; |
|---|
| 321 | + unsigned long tmp; |
|---|
| 456 | 322 | |
|---|
| 457 | 323 | asm volatile( |
|---|
| 458 | 324 | __LSE_PREAMBLE |
|---|
| 459 | | - ARM64_LSE_ATOMIC_INSN( |
|---|
| 460 | | - /* LL/SC */ |
|---|
| 461 | | - __LL_SC_ATOMIC64(dec_if_positive) |
|---|
| 462 | | - __nops(6), |
|---|
| 463 | | - /* LSE atomics */ |
|---|
| 464 | | - "1: ldr x30, %[v]\n" |
|---|
| 465 | | - " subs %[ret], x30, #1\n" |
|---|
| 325 | + "1: ldr %x[tmp], %[v]\n" |
|---|
| 326 | + " subs %[ret], %x[tmp], #1\n" |
|---|
| 466 | 327 | " b.lt 2f\n" |
|---|
| 467 | | - " casal x30, %[ret], %[v]\n" |
|---|
| 468 | | - " sub x30, x30, #1\n" |
|---|
| 469 | | - " sub x30, x30, %[ret]\n" |
|---|
| 470 | | - " cbnz x30, 1b\n" |
|---|
| 471 | | - "2:") |
|---|
| 472 | | - : [ret] "+&r" (x0), [v] "+Q" (v->counter) |
|---|
| 328 | + " casal %x[tmp], %[ret], %[v]\n" |
|---|
| 329 | + " sub %x[tmp], %x[tmp], #1\n" |
|---|
| 330 | + " sub %x[tmp], %x[tmp], %[ret]\n" |
|---|
| 331 | + " cbnz %x[tmp], 1b\n" |
|---|
| 332 | + "2:" |
|---|
| 333 | + : [ret] "+&r" (v), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) |
|---|
| 473 | 334 | : |
|---|
| 474 | | - : __LL_SC_CLOBBERS, "cc", "memory"); |
|---|
| 335 | + : "cc", "memory"); |
|---|
| 475 | 336 | |
|---|
| 476 | | - return x0; |
|---|
| 337 | + return (long)v; |
|---|
| 477 | 338 | } |
|---|
| 478 | 339 | |
|---|
| 479 | | -#undef __LL_SC_ATOMIC64 |
|---|
| 480 | | - |
|---|
| 481 | | -#define __LL_SC_CMPXCHG(op) __LL_SC_CALL(__cmpxchg_case_##op) |
|---|
| 482 | | - |
|---|
| 483 | | -#define __CMPXCHG_CASE(w, sz, name, mb, cl...) \ |
|---|
| 484 | | -static inline unsigned long __cmpxchg_case_##name(volatile void *ptr, \ |
|---|
| 485 | | - unsigned long old, \ |
|---|
| 486 | | - unsigned long new) \ |
|---|
| 340 | +#define __CMPXCHG_CASE(w, sfx, name, sz, mb, cl...) \ |
|---|
| 341 | +static __always_inline u##sz \ |
|---|
| 342 | +__lse__cmpxchg_case_##name##sz(volatile void *ptr, \ |
|---|
| 343 | + u##sz old, \ |
|---|
| 344 | + u##sz new) \ |
|---|
| 487 | 345 | { \ |
|---|
| 488 | 346 | register unsigned long x0 asm ("x0") = (unsigned long)ptr; \ |
|---|
| 489 | | - register unsigned long x1 asm ("x1") = old; \ |
|---|
| 490 | | - register unsigned long x2 asm ("x2") = new; \ |
|---|
| 347 | + register u##sz x1 asm ("x1") = old; \ |
|---|
| 348 | + register u##sz x2 asm ("x2") = new; \ |
|---|
| 349 | + unsigned long tmp; \ |
|---|
| 491 | 350 | \ |
|---|
| 492 | 351 | asm volatile( \ |
|---|
| 493 | 352 | __LSE_PREAMBLE \ |
|---|
| 494 | | - ARM64_LSE_ATOMIC_INSN( \ |
|---|
| 495 | | - /* LL/SC */ \ |
|---|
| 496 | | - __LL_SC_CMPXCHG(name) \ |
|---|
| 497 | | - __nops(2), \ |
|---|
| 498 | | - /* LSE atomics */ \ |
|---|
| 499 | | - " mov " #w "30, %" #w "[old]\n" \ |
|---|
| 500 | | - " cas" #mb #sz "\t" #w "30, %" #w "[new], %[v]\n" \ |
|---|
| 501 | | - " mov %" #w "[ret], " #w "30") \ |
|---|
| 502 | | - : [ret] "+r" (x0), [v] "+Q" (*(unsigned long *)ptr) \ |
|---|
| 353 | + " mov %" #w "[tmp], %" #w "[old]\n" \ |
|---|
| 354 | + " cas" #mb #sfx "\t%" #w "[tmp], %" #w "[new], %[v]\n" \ |
|---|
| 355 | + " mov %" #w "[ret], %" #w "[tmp]" \ |
|---|
| 356 | + : [ret] "+r" (x0), [v] "+Q" (*(unsigned long *)ptr), \ |
|---|
| 357 | + [tmp] "=&r" (tmp) \ |
|---|
| 503 | 358 | : [old] "r" (x1), [new] "r" (x2) \ |
|---|
| 504 | | - : __LL_SC_CLOBBERS, ##cl); \ |
|---|
| 359 | + : cl); \ |
|---|
| 505 | 360 | \ |
|---|
| 506 | 361 | return x0; \ |
|---|
| 507 | 362 | } |
|---|
| 508 | 363 | |
|---|
| 509 | | -__CMPXCHG_CASE(w, b, 1, ) |
|---|
| 510 | | -__CMPXCHG_CASE(w, h, 2, ) |
|---|
| 511 | | -__CMPXCHG_CASE(w, , 4, ) |
|---|
| 512 | | -__CMPXCHG_CASE(x, , 8, ) |
|---|
| 513 | | -__CMPXCHG_CASE(w, b, acq_1, a, "memory") |
|---|
| 514 | | -__CMPXCHG_CASE(w, h, acq_2, a, "memory") |
|---|
| 515 | | -__CMPXCHG_CASE(w, , acq_4, a, "memory") |
|---|
| 516 | | -__CMPXCHG_CASE(x, , acq_8, a, "memory") |
|---|
| 517 | | -__CMPXCHG_CASE(w, b, rel_1, l, "memory") |
|---|
| 518 | | -__CMPXCHG_CASE(w, h, rel_2, l, "memory") |
|---|
| 519 | | -__CMPXCHG_CASE(w, , rel_4, l, "memory") |
|---|
| 520 | | -__CMPXCHG_CASE(x, , rel_8, l, "memory") |
|---|
| 521 | | -__CMPXCHG_CASE(w, b, mb_1, al, "memory") |
|---|
| 522 | | -__CMPXCHG_CASE(w, h, mb_2, al, "memory") |
|---|
| 523 | | -__CMPXCHG_CASE(w, , mb_4, al, "memory") |
|---|
| 524 | | -__CMPXCHG_CASE(x, , mb_8, al, "memory") |
|---|
| 364 | +__CMPXCHG_CASE(w, b, , 8, ) |
|---|
| 365 | +__CMPXCHG_CASE(w, h, , 16, ) |
|---|
| 366 | +__CMPXCHG_CASE(w, , , 32, ) |
|---|
| 367 | +__CMPXCHG_CASE(x, , , 64, ) |
|---|
| 368 | +__CMPXCHG_CASE(w, b, acq_, 8, a, "memory") |
|---|
| 369 | +__CMPXCHG_CASE(w, h, acq_, 16, a, "memory") |
|---|
| 370 | +__CMPXCHG_CASE(w, , acq_, 32, a, "memory") |
|---|
| 371 | +__CMPXCHG_CASE(x, , acq_, 64, a, "memory") |
|---|
| 372 | +__CMPXCHG_CASE(w, b, rel_, 8, l, "memory") |
|---|
| 373 | +__CMPXCHG_CASE(w, h, rel_, 16, l, "memory") |
|---|
| 374 | +__CMPXCHG_CASE(w, , rel_, 32, l, "memory") |
|---|
| 375 | +__CMPXCHG_CASE(x, , rel_, 64, l, "memory") |
|---|
| 376 | +__CMPXCHG_CASE(w, b, mb_, 8, al, "memory") |
|---|
| 377 | +__CMPXCHG_CASE(w, h, mb_, 16, al, "memory") |
|---|
| 378 | +__CMPXCHG_CASE(w, , mb_, 32, al, "memory") |
|---|
| 379 | +__CMPXCHG_CASE(x, , mb_, 64, al, "memory") |
|---|
| 525 | 380 | |
|---|
| 526 | | -#undef __LL_SC_CMPXCHG |
|---|
| 527 | 381 | #undef __CMPXCHG_CASE |
|---|
| 528 | 382 | |
|---|
| 529 | | -#define __LL_SC_CMPXCHG_DBL(op) __LL_SC_CALL(__cmpxchg_double##op) |
|---|
| 530 | | - |
|---|
| 531 | 383 | #define __CMPXCHG_DBL(name, mb, cl...) \ |
|---|
| 532 | | -static inline long __cmpxchg_double##name(unsigned long old1, \ |
|---|
| 384 | +static __always_inline long \ |
|---|
| 385 | +__lse__cmpxchg_double##name(unsigned long old1, \ |
|---|
| 533 | 386 | unsigned long old2, \ |
|---|
| 534 | 387 | unsigned long new1, \ |
|---|
| 535 | 388 | unsigned long new2, \ |
|---|
| .. | .. |
|---|
| 545 | 398 | \ |
|---|
| 546 | 399 | asm volatile( \ |
|---|
| 547 | 400 | __LSE_PREAMBLE \ |
|---|
| 548 | | - ARM64_LSE_ATOMIC_INSN( \ |
|---|
| 549 | | - /* LL/SC */ \ |
|---|
| 550 | | - __LL_SC_CMPXCHG_DBL(name) \ |
|---|
| 551 | | - __nops(3), \ |
|---|
| 552 | | - /* LSE atomics */ \ |
|---|
| 553 | 401 | " casp" #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\ |
|---|
| 554 | 402 | " eor %[old1], %[old1], %[oldval1]\n" \ |
|---|
| 555 | 403 | " eor %[old2], %[old2], %[oldval2]\n" \ |
|---|
| 556 | | - " orr %[old1], %[old1], %[old2]") \ |
|---|
| 404 | + " orr %[old1], %[old1], %[old2]" \ |
|---|
| 557 | 405 | : [old1] "+&r" (x0), [old2] "+&r" (x1), \ |
|---|
| 558 | 406 | [v] "+Q" (*(unsigned long *)ptr) \ |
|---|
| 559 | 407 | : [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \ |
|---|
| 560 | 408 | [oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \ |
|---|
| 561 | | - : __LL_SC_CLOBBERS, ##cl); \ |
|---|
| 409 | + : cl); \ |
|---|
| 562 | 410 | \ |
|---|
| 563 | 411 | return x0; \ |
|---|
| 564 | 412 | } |
|---|
| .. | .. |
|---|
| 566 | 414 | __CMPXCHG_DBL( , ) |
|---|
| 567 | 415 | __CMPXCHG_DBL(_mb, al, "memory") |
|---|
| 568 | 416 | |
|---|
| 569 | | -#undef __LL_SC_CMPXCHG_DBL |
|---|
| 570 | 417 | #undef __CMPXCHG_DBL |
|---|
| 571 | 418 | |
|---|
| 572 | 419 | #endif /* __ASM_ATOMIC_LSE_H */ |
|---|