forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-01-04 1543e317f1da31b75942316931e8f491a8920811
kernel/arch/arm64/include/asm/atomic_ll_sc.h
....@@ -1,103 +1,85 @@
1
+/* SPDX-License-Identifier: GPL-2.0-only */
12 /*
23 * Based on arch/arm/include/asm/atomic.h
34 *
45 * Copyright (C) 1996 Russell King.
56 * Copyright (C) 2002 Deep Blue Solutions Ltd.
67 * Copyright (C) 2012 ARM Ltd.
7
- *
8
- * This program is free software; you can redistribute it and/or modify
9
- * it under the terms of the GNU General Public License version 2 as
10
- * published by the Free Software Foundation.
11
- *
12
- * This program is distributed in the hope that it will be useful,
13
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15
- * GNU General Public License for more details.
16
- *
17
- * You should have received a copy of the GNU General Public License
18
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
198 */
209
2110 #ifndef __ASM_ATOMIC_LL_SC_H
2211 #define __ASM_ATOMIC_LL_SC_H
2312
24
-#ifndef __ARM64_IN_ATOMIC_IMPL
25
-#error "please don't include this file directly"
13
+#include <linux/stringify.h>
14
+
15
+#ifndef CONFIG_CC_HAS_K_CONSTRAINT
16
+#define K
2617 #endif
2718
2819 /*
2920 * AArch64 UP and SMP safe atomic ops. We use load exclusive and
3021 * store exclusive to ensure that these are atomic. We may loop
3122 * to ensure that the update happens.
32
- *
33
- * NOTE: these functions do *not* follow the PCS and must explicitly
34
- * save any clobbered registers other than x0 (regardless of return
35
- * value). This is achieved through -fcall-saved-* compiler flags for
36
- * this file, which unfortunately don't work on a per-function basis
37
- * (the optimize attribute silently ignores these options).
3823 */
3924
4025 #define ATOMIC_OP(op, asm_op, constraint) \
41
-__LL_SC_INLINE void \
42
-__LL_SC_PREFIX(atomic_##op(int i, atomic_t *v)) \
26
+static inline void \
27
+__ll_sc_atomic_##op(int i, atomic_t *v) \
4328 { \
4429 unsigned long tmp; \
4530 int result; \
4631 \
4732 asm volatile("// atomic_" #op "\n" \
48
-" prfm pstl1strm, %2\n" \
49
-"1: ldxr %w0, %2\n" \
50
-" " #asm_op " %w0, %w0, %w3\n" \
51
-" stxr %w1, %w0, %2\n" \
52
-" cbnz %w1, 1b" \
33
+ " prfm pstl1strm, %2\n" \
34
+ "1: ldxr %w0, %2\n" \
35
+ " " #asm_op " %w0, %w0, %w3\n" \
36
+ " stxr %w1, %w0, %2\n" \
37
+ " cbnz %w1, 1b\n" \
5338 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
54
- : #constraint "r" (i)); \
55
-} \
56
-__LL_SC_EXPORT(atomic_##op);
39
+ : __stringify(constraint) "r" (i)); \
40
+}
5741
5842 #define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\
59
-__LL_SC_INLINE int \
60
-__LL_SC_PREFIX(atomic_##op##_return##name(int i, atomic_t *v)) \
43
+static inline int \
44
+__ll_sc_atomic_##op##_return##name(int i, atomic_t *v) \
6145 { \
6246 unsigned long tmp; \
6347 int result; \
6448 \
6549 asm volatile("// atomic_" #op "_return" #name "\n" \
66
-" prfm pstl1strm, %2\n" \
67
-"1: ld" #acq "xr %w0, %2\n" \
68
-" " #asm_op " %w0, %w0, %w3\n" \
69
-" st" #rel "xr %w1, %w0, %2\n" \
70
-" cbnz %w1, 1b\n" \
71
-" " #mb \
50
+ " prfm pstl1strm, %2\n" \
51
+ "1: ld" #acq "xr %w0, %2\n" \
52
+ " " #asm_op " %w0, %w0, %w3\n" \
53
+ " st" #rel "xr %w1, %w0, %2\n" \
54
+ " cbnz %w1, 1b\n" \
55
+ " " #mb \
7256 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
73
- : #constraint "r" (i) \
57
+ : __stringify(constraint) "r" (i) \
7458 : cl); \
7559 \
7660 return result; \
77
-} \
78
-__LL_SC_EXPORT(atomic_##op##_return##name);
61
+}
7962
80
-#define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint) \
81
-__LL_SC_INLINE int \
82
-__LL_SC_PREFIX(atomic_fetch_##op##name(int i, atomic_t *v)) \
63
+#define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint) \
64
+static inline int \
65
+__ll_sc_atomic_fetch_##op##name(int i, atomic_t *v) \
8366 { \
8467 unsigned long tmp; \
8568 int val, result; \
8669 \
8770 asm volatile("// atomic_fetch_" #op #name "\n" \
88
-" prfm pstl1strm, %3\n" \
89
-"1: ld" #acq "xr %w0, %3\n" \
90
-" " #asm_op " %w1, %w0, %w4\n" \
91
-" st" #rel "xr %w2, %w1, %3\n" \
92
-" cbnz %w2, 1b\n" \
93
-" " #mb \
71
+ " prfm pstl1strm, %3\n" \
72
+ "1: ld" #acq "xr %w0, %3\n" \
73
+ " " #asm_op " %w1, %w0, %w4\n" \
74
+ " st" #rel "xr %w2, %w1, %3\n" \
75
+ " cbnz %w2, 1b\n" \
76
+ " " #mb \
9477 : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \
95
- : #constraint "r" (i) \
78
+ : __stringify(constraint) "r" (i) \
9679 : cl); \
9780 \
9881 return result; \
99
-} \
100
-__LL_SC_EXPORT(atomic_fetch_##op##name);
82
+}
10183
10284 #define ATOMIC_OPS(...) \
10385 ATOMIC_OP(__VA_ARGS__) \
....@@ -121,10 +103,15 @@
121103 ATOMIC_FETCH_OP (_acquire, , a, , "memory", __VA_ARGS__)\
122104 ATOMIC_FETCH_OP (_release, , , l, "memory", __VA_ARGS__)
123105
124
-ATOMIC_OPS(and, and, )
106
+ATOMIC_OPS(and, and, K)
107
+ATOMIC_OPS(or, orr, K)
108
+ATOMIC_OPS(xor, eor, K)
109
+/*
110
+ * GAS converts the mysterious and undocumented BIC (immediate) alias to
111
+ * an AND (immediate) instruction with the immediate inverted. We don't
112
+ * have a constraint for this, so fall back to register.
113
+ */
125114 ATOMIC_OPS(andnot, bic, )
126
-ATOMIC_OPS(or, orr, )
127
-ATOMIC_OPS(xor, eor, )
128115
129116 #undef ATOMIC_OPS
130117 #undef ATOMIC_FETCH_OP
....@@ -132,66 +119,63 @@
132119 #undef ATOMIC_OP
133120
134121 #define ATOMIC64_OP(op, asm_op, constraint) \
135
-__LL_SC_INLINE void \
136
-__LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v)) \
122
+static inline void \
123
+__ll_sc_atomic64_##op(s64 i, atomic64_t *v) \
137124 { \
138
- long result; \
125
+ s64 result; \
139126 unsigned long tmp; \
140127 \
141128 asm volatile("// atomic64_" #op "\n" \
142
-" prfm pstl1strm, %2\n" \
143
-"1: ldxr %0, %2\n" \
144
-" " #asm_op " %0, %0, %3\n" \
145
-" stxr %w1, %0, %2\n" \
146
-" cbnz %w1, 1b" \
129
+ " prfm pstl1strm, %2\n" \
130
+ "1: ldxr %0, %2\n" \
131
+ " " #asm_op " %0, %0, %3\n" \
132
+ " stxr %w1, %0, %2\n" \
133
+ " cbnz %w1, 1b" \
147134 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
148
- : #constraint "r" (i)); \
149
-} \
150
-__LL_SC_EXPORT(atomic64_##op);
135
+ : __stringify(constraint) "r" (i)); \
136
+}
151137
152138 #define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\
153
-__LL_SC_INLINE long \
154
-__LL_SC_PREFIX(atomic64_##op##_return##name(long i, atomic64_t *v)) \
139
+static inline long \
140
+__ll_sc_atomic64_##op##_return##name(s64 i, atomic64_t *v) \
155141 { \
156
- long result; \
142
+ s64 result; \
157143 unsigned long tmp; \
158144 \
159145 asm volatile("// atomic64_" #op "_return" #name "\n" \
160
-" prfm pstl1strm, %2\n" \
161
-"1: ld" #acq "xr %0, %2\n" \
162
-" " #asm_op " %0, %0, %3\n" \
163
-" st" #rel "xr %w1, %0, %2\n" \
164
-" cbnz %w1, 1b\n" \
165
-" " #mb \
146
+ " prfm pstl1strm, %2\n" \
147
+ "1: ld" #acq "xr %0, %2\n" \
148
+ " " #asm_op " %0, %0, %3\n" \
149
+ " st" #rel "xr %w1, %0, %2\n" \
150
+ " cbnz %w1, 1b\n" \
151
+ " " #mb \
166152 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
167
- : #constraint "r" (i) \
153
+ : __stringify(constraint) "r" (i) \
168154 : cl); \
169155 \
170156 return result; \
171
-} \
172
-__LL_SC_EXPORT(atomic64_##op##_return##name);
157
+}
173158
174159 #define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint)\
175
-__LL_SC_INLINE long \
176
-__LL_SC_PREFIX(atomic64_fetch_##op##name(long i, atomic64_t *v)) \
160
+static inline long \
161
+__ll_sc_atomic64_fetch_##op##name(s64 i, atomic64_t *v) \
177162 { \
178
- long result, val; \
163
+ s64 result, val; \
179164 unsigned long tmp; \
180165 \
181166 asm volatile("// atomic64_fetch_" #op #name "\n" \
182
-" prfm pstl1strm, %3\n" \
183
-"1: ld" #acq "xr %0, %3\n" \
184
-" " #asm_op " %1, %0, %4\n" \
185
-" st" #rel "xr %w2, %1, %3\n" \
186
-" cbnz %w2, 1b\n" \
187
-" " #mb \
167
+ " prfm pstl1strm, %3\n" \
168
+ "1: ld" #acq "xr %0, %3\n" \
169
+ " " #asm_op " %1, %0, %4\n" \
170
+ " st" #rel "xr %w2, %1, %3\n" \
171
+ " cbnz %w2, 1b\n" \
172
+ " " #mb \
188173 : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \
189
- : #constraint "r" (i) \
174
+ : __stringify(constraint) "r" (i) \
190175 : cl); \
191176 \
192177 return result; \
193
-} \
194
-__LL_SC_EXPORT(atomic64_fetch_##op##name);
178
+}
195179
196180 #define ATOMIC64_OPS(...) \
197181 ATOMIC64_OP(__VA_ARGS__) \
....@@ -216,95 +200,107 @@
216200 ATOMIC64_FETCH_OP (_release,, , l, "memory", __VA_ARGS__)
217201
218202 ATOMIC64_OPS(and, and, L)
219
-ATOMIC64_OPS(andnot, bic, )
220203 ATOMIC64_OPS(or, orr, L)
221204 ATOMIC64_OPS(xor, eor, L)
205
+/*
206
+ * GAS converts the mysterious and undocumented BIC (immediate) alias to
207
+ * an AND (immediate) instruction with the immediate inverted. We don't
208
+ * have a constraint for this, so fall back to register.
209
+ */
210
+ATOMIC64_OPS(andnot, bic, )
222211
223212 #undef ATOMIC64_OPS
224213 #undef ATOMIC64_FETCH_OP
225214 #undef ATOMIC64_OP_RETURN
226215 #undef ATOMIC64_OP
227216
228
-__LL_SC_INLINE long
229
-__LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v))
217
+static inline s64
218
+__ll_sc_atomic64_dec_if_positive(atomic64_t *v)
230219 {
231
- long result;
220
+ s64 result;
232221 unsigned long tmp;
233222
234223 asm volatile("// atomic64_dec_if_positive\n"
235
-" prfm pstl1strm, %2\n"
236
-"1: ldxr %0, %2\n"
237
-" subs %0, %0, #1\n"
238
-" b.lt 2f\n"
239
-" stlxr %w1, %0, %2\n"
240
-" cbnz %w1, 1b\n"
241
-" dmb ish\n"
242
-"2:"
224
+ " prfm pstl1strm, %2\n"
225
+ "1: ldxr %0, %2\n"
226
+ " subs %0, %0, #1\n"
227
+ " b.lt 2f\n"
228
+ " stlxr %w1, %0, %2\n"
229
+ " cbnz %w1, 1b\n"
230
+ " dmb ish\n"
231
+ "2:"
243232 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
244233 :
245234 : "cc", "memory");
246235
247236 return result;
248237 }
249
-__LL_SC_EXPORT(atomic64_dec_if_positive);
250238
251
-#define __CMPXCHG_CASE(w, sz, name, mb, acq, rel, cl, constraint) \
252
-__LL_SC_INLINE unsigned long \
253
-__LL_SC_PREFIX(__cmpxchg_case_##name(volatile void *ptr, \
254
- unsigned long old, \
255
- unsigned long new)) \
239
+#define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl, constraint) \
240
+static inline u##sz \
241
+__ll_sc__cmpxchg_case_##name##sz(volatile void *ptr, \
242
+ unsigned long old, \
243
+ u##sz new) \
256244 { \
257
- unsigned long tmp, oldval; \
245
+ unsigned long tmp; \
246
+ u##sz oldval; \
247
+ \
248
+ /* \
249
+ * Sub-word sizes require explicit casting so that the compare \
250
+ * part of the cmpxchg doesn't end up interpreting non-zero \
251
+ * upper bits of the register containing "old". \
252
+ */ \
253
+ if (sz < 32) \
254
+ old = (u##sz)old; \
258255 \
259256 asm volatile( \
260257 " prfm pstl1strm, %[v]\n" \
261
- "1: ld" #acq "xr" #sz "\t%" #w "[oldval], %[v]\n" \
258
+ "1: ld" #acq "xr" #sfx "\t%" #w "[oldval], %[v]\n" \
262259 " eor %" #w "[tmp], %" #w "[oldval], %" #w "[old]\n" \
263260 " cbnz %" #w "[tmp], 2f\n" \
264
- " st" #rel "xr" #sz "\t%w[tmp], %" #w "[new], %[v]\n" \
261
+ " st" #rel "xr" #sfx "\t%w[tmp], %" #w "[new], %[v]\n" \
265262 " cbnz %w[tmp], 1b\n" \
266263 " " #mb "\n" \
267264 "2:" \
268265 : [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \
269
- [v] "+Q" (*(unsigned long *)ptr) \
270
- : [old] #constraint "r" (old), [new] "r" (new) \
266
+ [v] "+Q" (*(u##sz *)ptr) \
267
+ : [old] __stringify(constraint) "r" (old), [new] "r" (new) \
271268 : cl); \
272269 \
273270 return oldval; \
274
-} \
275
-__LL_SC_EXPORT(__cmpxchg_case_##name);
271
+}
276272
277273 /*
278274 * Earlier versions of GCC (no later than 8.1.0) appear to incorrectly
279275 * handle the 'K' constraint for the value 4294967295 - thus we use no
280276 * constraint for 32 bit operations.
281277 */
282
-__CMPXCHG_CASE(w, b, 1, , , , , )
283
-__CMPXCHG_CASE(w, h, 2, , , , , )
284
-__CMPXCHG_CASE(w, , 4, , , , , )
285
-__CMPXCHG_CASE( , , 8, , , , , L)
286
-__CMPXCHG_CASE(w, b, acq_1, , a, , "memory", )
287
-__CMPXCHG_CASE(w, h, acq_2, , a, , "memory", )
288
-__CMPXCHG_CASE(w, , acq_4, , a, , "memory", )
289
-__CMPXCHG_CASE( , , acq_8, , a, , "memory", L)
290
-__CMPXCHG_CASE(w, b, rel_1, , , l, "memory", )
291
-__CMPXCHG_CASE(w, h, rel_2, , , l, "memory", )
292
-__CMPXCHG_CASE(w, , rel_4, , , l, "memory", )
293
-__CMPXCHG_CASE( , , rel_8, , , l, "memory", L)
294
-__CMPXCHG_CASE(w, b, mb_1, dmb ish, , l, "memory", )
295
-__CMPXCHG_CASE(w, h, mb_2, dmb ish, , l, "memory", )
296
-__CMPXCHG_CASE(w, , mb_4, dmb ish, , l, "memory", )
297
-__CMPXCHG_CASE( , , mb_8, dmb ish, , l, "memory", L)
278
+__CMPXCHG_CASE(w, b, , 8, , , , , K)
279
+__CMPXCHG_CASE(w, h, , 16, , , , , K)
280
+__CMPXCHG_CASE(w, , , 32, , , , , K)
281
+__CMPXCHG_CASE( , , , 64, , , , , L)
282
+__CMPXCHG_CASE(w, b, acq_, 8, , a, , "memory", K)
283
+__CMPXCHG_CASE(w, h, acq_, 16, , a, , "memory", K)
284
+__CMPXCHG_CASE(w, , acq_, 32, , a, , "memory", K)
285
+__CMPXCHG_CASE( , , acq_, 64, , a, , "memory", L)
286
+__CMPXCHG_CASE(w, b, rel_, 8, , , l, "memory", K)
287
+__CMPXCHG_CASE(w, h, rel_, 16, , , l, "memory", K)
288
+__CMPXCHG_CASE(w, , rel_, 32, , , l, "memory", K)
289
+__CMPXCHG_CASE( , , rel_, 64, , , l, "memory", L)
290
+__CMPXCHG_CASE(w, b, mb_, 8, dmb ish, , l, "memory", K)
291
+__CMPXCHG_CASE(w, h, mb_, 16, dmb ish, , l, "memory", K)
292
+__CMPXCHG_CASE(w, , mb_, 32, dmb ish, , l, "memory", K)
293
+__CMPXCHG_CASE( , , mb_, 64, dmb ish, , l, "memory", L)
298294
299295 #undef __CMPXCHG_CASE
300296
301297 #define __CMPXCHG_DBL(name, mb, rel, cl) \
302
-__LL_SC_INLINE long \
303
-__LL_SC_PREFIX(__cmpxchg_double##name(unsigned long old1, \
298
+static inline long \
299
+__ll_sc__cmpxchg_double##name(unsigned long old1, \
304300 unsigned long old2, \
305301 unsigned long new1, \
306302 unsigned long new2, \
307
- volatile void *ptr)) \
303
+ volatile void *ptr) \
308304 { \
309305 unsigned long tmp, ret; \
310306 \
....@@ -319,17 +315,17 @@
319315 " cbnz %w0, 1b\n" \
320316 " " #mb "\n" \
321317 "2:" \
322
- : "=&r" (tmp), "=&r" (ret), "+Q" (*(unsigned long *)ptr) \
318
+ : "=&r" (tmp), "=&r" (ret), "+Q" (*(__uint128_t *)ptr) \
323319 : "r" (old1), "r" (old2), "r" (new1), "r" (new2) \
324320 : cl); \
325321 \
326322 return ret; \
327
-} \
328
-__LL_SC_EXPORT(__cmpxchg_double##name);
323
+}
329324
330325 __CMPXCHG_DBL( , , , )
331326 __CMPXCHG_DBL(_mb, dmb ish, l, "memory")
332327
333328 #undef __CMPXCHG_DBL
329
+#undef K
334330
335331 #endif /* __ASM_ATOMIC_LL_SC_H */