forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-01-04 1543e317f1da31b75942316931e8f491a8920811
kernel/arch/arm64/include/asm/cmpxchg.h
....@@ -1,19 +1,8 @@
1
+/* SPDX-License-Identifier: GPL-2.0-only */
12 /*
23 * Based on arch/arm/include/asm/cmpxchg.h
34 *
45 * Copyright (C) 2012 ARM Ltd.
5
- *
6
- * This program is free software; you can redistribute it and/or modify
7
- * it under the terms of the GNU General Public License version 2 as
8
- * published by the Free Software Foundation.
9
- *
10
- * This program is distributed in the hope that it will be useful,
11
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
12
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13
- * GNU General Public License for more details.
14
- *
15
- * You should have received a copy of the GNU General Public License
16
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
176 */
187 #ifndef __ASM_CMPXCHG_H
198 #define __ASM_CMPXCHG_H
....@@ -21,7 +10,6 @@
2110 #include <linux/build_bug.h>
2211 #include <linux/compiler.h>
2312
24
-#include <asm/atomic.h>
2513 #include <asm/barrier.h>
2614 #include <asm/lse.h>
2715
....@@ -30,46 +18,46 @@
3018 * barrier case is generated as release+dmb for the former and
3119 * acquire+release for the latter.
3220 */
33
-#define __XCHG_CASE(w, sz, name, mb, nop_lse, acq, acq_lse, rel, cl) \
34
-static inline unsigned long __xchg_case_##name(unsigned long x, \
35
- volatile void *ptr) \
36
-{ \
37
- unsigned long ret, tmp; \
38
- \
39
- asm volatile(ARM64_LSE_ATOMIC_INSN( \
40
- /* LL/SC */ \
41
- " prfm pstl1strm, %2\n" \
42
- "1: ld" #acq "xr" #sz "\t%" #w "0, %2\n" \
43
- " st" #rel "xr" #sz "\t%w1, %" #w "3, %2\n" \
44
- " cbnz %w1, 1b\n" \
45
- " " #mb, \
46
- /* LSE atomics */ \
47
- " swp" #acq_lse #rel #sz "\t%" #w "3, %" #w "0, %2\n" \
48
- __nops(3) \
49
- " " #nop_lse) \
50
- : "=&r" (ret), "=&r" (tmp), "+Q" (*(unsigned long *)ptr) \
51
- : "r" (x) \
52
- : cl); \
53
- \
54
- return ret; \
21
+#define __XCHG_CASE(w, sfx, name, sz, mb, nop_lse, acq, acq_lse, rel, cl) \
22
+static inline u##sz __xchg_case_##name##sz(u##sz x, volatile void *ptr) \
23
+{ \
24
+ u##sz ret; \
25
+ unsigned long tmp; \
26
+ \
27
+ asm volatile(ARM64_LSE_ATOMIC_INSN( \
28
+ /* LL/SC */ \
29
+ " prfm pstl1strm, %2\n" \
30
+ "1: ld" #acq "xr" #sfx "\t%" #w "0, %2\n" \
31
+ " st" #rel "xr" #sfx "\t%w1, %" #w "3, %2\n" \
32
+ " cbnz %w1, 1b\n" \
33
+ " " #mb, \
34
+ /* LSE atomics */ \
35
+ " swp" #acq_lse #rel #sfx "\t%" #w "3, %" #w "0, %2\n" \
36
+ __nops(3) \
37
+ " " #nop_lse) \
38
+ : "=&r" (ret), "=&r" (tmp), "+Q" (*(u##sz *)ptr) \
39
+ : "r" (x) \
40
+ : cl); \
41
+ \
42
+ return ret; \
5543 }
5644
57
-__XCHG_CASE(w, b, 1, , , , , , )
58
-__XCHG_CASE(w, h, 2, , , , , , )
59
-__XCHG_CASE(w, , 4, , , , , , )
60
-__XCHG_CASE( , , 8, , , , , , )
61
-__XCHG_CASE(w, b, acq_1, , , a, a, , "memory")
62
-__XCHG_CASE(w, h, acq_2, , , a, a, , "memory")
63
-__XCHG_CASE(w, , acq_4, , , a, a, , "memory")
64
-__XCHG_CASE( , , acq_8, , , a, a, , "memory")
65
-__XCHG_CASE(w, b, rel_1, , , , , l, "memory")
66
-__XCHG_CASE(w, h, rel_2, , , , , l, "memory")
67
-__XCHG_CASE(w, , rel_4, , , , , l, "memory")
68
-__XCHG_CASE( , , rel_8, , , , , l, "memory")
69
-__XCHG_CASE(w, b, mb_1, dmb ish, nop, , a, l, "memory")
70
-__XCHG_CASE(w, h, mb_2, dmb ish, nop, , a, l, "memory")
71
-__XCHG_CASE(w, , mb_4, dmb ish, nop, , a, l, "memory")
72
-__XCHG_CASE( , , mb_8, dmb ish, nop, , a, l, "memory")
45
+__XCHG_CASE(w, b, , 8, , , , , , )
46
+__XCHG_CASE(w, h, , 16, , , , , , )
47
+__XCHG_CASE(w, , , 32, , , , , , )
48
+__XCHG_CASE( , , , 64, , , , , , )
49
+__XCHG_CASE(w, b, acq_, 8, , , a, a, , "memory")
50
+__XCHG_CASE(w, h, acq_, 16, , , a, a, , "memory")
51
+__XCHG_CASE(w, , acq_, 32, , , a, a, , "memory")
52
+__XCHG_CASE( , , acq_, 64, , , a, a, , "memory")
53
+__XCHG_CASE(w, b, rel_, 8, , , , , l, "memory")
54
+__XCHG_CASE(w, h, rel_, 16, , , , , l, "memory")
55
+__XCHG_CASE(w, , rel_, 32, , , , , l, "memory")
56
+__XCHG_CASE( , , rel_, 64, , , , , l, "memory")
57
+__XCHG_CASE(w, b, mb_, 8, dmb ish, nop, , a, l, "memory")
58
+__XCHG_CASE(w, h, mb_, 16, dmb ish, nop, , a, l, "memory")
59
+__XCHG_CASE(w, , mb_, 32, dmb ish, nop, , a, l, "memory")
60
+__XCHG_CASE( , , mb_, 64, dmb ish, nop, , a, l, "memory")
7361
7462 #undef __XCHG_CASE
7563
....@@ -80,13 +68,13 @@
8068 { \
8169 switch (size) { \
8270 case 1: \
83
- return __xchg_case##sfx##_1(x, ptr); \
84
- case 2: \
85
- return __xchg_case##sfx##_2(x, ptr); \
86
- case 4: \
87
- return __xchg_case##sfx##_4(x, ptr); \
88
- case 8: \
8971 return __xchg_case##sfx##_8(x, ptr); \
72
+ case 2: \
73
+ return __xchg_case##sfx##_16(x, ptr); \
74
+ case 4: \
75
+ return __xchg_case##sfx##_32(x, ptr); \
76
+ case 8: \
77
+ return __xchg_case##sfx##_64(x, ptr); \
9078 default: \
9179 BUILD_BUG(); \
9280 } \
....@@ -110,10 +98,54 @@
11098 })
11199
112100 /* xchg */
113
-#define xchg_relaxed(...) __xchg_wrapper( , __VA_ARGS__)
114
-#define xchg_acquire(...) __xchg_wrapper(_acq, __VA_ARGS__)
115
-#define xchg_release(...) __xchg_wrapper(_rel, __VA_ARGS__)
116
-#define xchg(...) __xchg_wrapper( _mb, __VA_ARGS__)
101
+#define arch_xchg_relaxed(...) __xchg_wrapper( , __VA_ARGS__)
102
+#define arch_xchg_acquire(...) __xchg_wrapper(_acq, __VA_ARGS__)
103
+#define arch_xchg_release(...) __xchg_wrapper(_rel, __VA_ARGS__)
104
+#define arch_xchg(...) __xchg_wrapper( _mb, __VA_ARGS__)
105
+
106
+#define __CMPXCHG_CASE(name, sz) \
107
+static inline u##sz __cmpxchg_case_##name##sz(volatile void *ptr, \
108
+ u##sz old, \
109
+ u##sz new) \
110
+{ \
111
+ return __lse_ll_sc_body(_cmpxchg_case_##name##sz, \
112
+ ptr, old, new); \
113
+}
114
+
115
+__CMPXCHG_CASE( , 8)
116
+__CMPXCHG_CASE( , 16)
117
+__CMPXCHG_CASE( , 32)
118
+__CMPXCHG_CASE( , 64)
119
+__CMPXCHG_CASE(acq_, 8)
120
+__CMPXCHG_CASE(acq_, 16)
121
+__CMPXCHG_CASE(acq_, 32)
122
+__CMPXCHG_CASE(acq_, 64)
123
+__CMPXCHG_CASE(rel_, 8)
124
+__CMPXCHG_CASE(rel_, 16)
125
+__CMPXCHG_CASE(rel_, 32)
126
+__CMPXCHG_CASE(rel_, 64)
127
+__CMPXCHG_CASE(mb_, 8)
128
+__CMPXCHG_CASE(mb_, 16)
129
+__CMPXCHG_CASE(mb_, 32)
130
+__CMPXCHG_CASE(mb_, 64)
131
+
132
+#undef __CMPXCHG_CASE
133
+
134
+#define __CMPXCHG_DBL(name) \
135
+static inline long __cmpxchg_double##name(unsigned long old1, \
136
+ unsigned long old2, \
137
+ unsigned long new1, \
138
+ unsigned long new2, \
139
+ volatile void *ptr) \
140
+{ \
141
+ return __lse_ll_sc_body(_cmpxchg_double##name, \
142
+ old1, old2, new1, new2, ptr); \
143
+}
144
+
145
+__CMPXCHG_DBL( )
146
+__CMPXCHG_DBL(_mb)
147
+
148
+#undef __CMPXCHG_DBL
117149
118150 #define __CMPXCHG_GEN(sfx) \
119151 static __always_inline unsigned long __cmpxchg##sfx(volatile void *ptr, \
....@@ -123,13 +155,13 @@
123155 { \
124156 switch (size) { \
125157 case 1: \
126
- return __cmpxchg_case##sfx##_1(ptr, (u8)old, new); \
127
- case 2: \
128
- return __cmpxchg_case##sfx##_2(ptr, (u16)old, new); \
129
- case 4: \
130
- return __cmpxchg_case##sfx##_4(ptr, old, new); \
131
- case 8: \
132158 return __cmpxchg_case##sfx##_8(ptr, old, new); \
159
+ case 2: \
160
+ return __cmpxchg_case##sfx##_16(ptr, old, new); \
161
+ case 4: \
162
+ return __cmpxchg_case##sfx##_32(ptr, old, new); \
163
+ case 8: \
164
+ return __cmpxchg_case##sfx##_64(ptr, old, new); \
133165 default: \
134166 BUILD_BUG(); \
135167 } \
....@@ -154,18 +186,18 @@
154186 })
155187
156188 /* cmpxchg */
157
-#define cmpxchg_relaxed(...) __cmpxchg_wrapper( , __VA_ARGS__)
158
-#define cmpxchg_acquire(...) __cmpxchg_wrapper(_acq, __VA_ARGS__)
159
-#define cmpxchg_release(...) __cmpxchg_wrapper(_rel, __VA_ARGS__)
160
-#define cmpxchg(...) __cmpxchg_wrapper( _mb, __VA_ARGS__)
161
-#define cmpxchg_local cmpxchg_relaxed
189
+#define arch_cmpxchg_relaxed(...) __cmpxchg_wrapper( , __VA_ARGS__)
190
+#define arch_cmpxchg_acquire(...) __cmpxchg_wrapper(_acq, __VA_ARGS__)
191
+#define arch_cmpxchg_release(...) __cmpxchg_wrapper(_rel, __VA_ARGS__)
192
+#define arch_cmpxchg(...) __cmpxchg_wrapper( _mb, __VA_ARGS__)
193
+#define arch_cmpxchg_local arch_cmpxchg_relaxed
162194
163195 /* cmpxchg64 */
164
-#define cmpxchg64_relaxed cmpxchg_relaxed
165
-#define cmpxchg64_acquire cmpxchg_acquire
166
-#define cmpxchg64_release cmpxchg_release
167
-#define cmpxchg64 cmpxchg
168
-#define cmpxchg64_local cmpxchg_local
196
+#define arch_cmpxchg64_relaxed arch_cmpxchg_relaxed
197
+#define arch_cmpxchg64_acquire arch_cmpxchg_acquire
198
+#define arch_cmpxchg64_release arch_cmpxchg_release
199
+#define arch_cmpxchg64 arch_cmpxchg
200
+#define arch_cmpxchg64_local arch_cmpxchg_local
169201
170202 /* cmpxchg_double */
171203 #define system_has_cmpxchg_double() 1
....@@ -177,36 +209,36 @@
177209 VM_BUG_ON((unsigned long *)(ptr2) - (unsigned long *)(ptr1) != 1); \
178210 })
179211
180
-#define cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2) \
181
-({\
182
- int __ret;\
183
- __cmpxchg_double_check(ptr1, ptr2); \
184
- __ret = !__cmpxchg_double_mb((unsigned long)(o1), (unsigned long)(o2), \
185
- (unsigned long)(n1), (unsigned long)(n2), \
186
- ptr1); \
187
- __ret; \
212
+#define arch_cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2) \
213
+({ \
214
+ int __ret; \
215
+ __cmpxchg_double_check(ptr1, ptr2); \
216
+ __ret = !__cmpxchg_double_mb((unsigned long)(o1), (unsigned long)(o2), \
217
+ (unsigned long)(n1), (unsigned long)(n2), \
218
+ ptr1); \
219
+ __ret; \
188220 })
189221
190
-#define cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2) \
191
-({\
192
- int __ret;\
193
- __cmpxchg_double_check(ptr1, ptr2); \
194
- __ret = !__cmpxchg_double((unsigned long)(o1), (unsigned long)(o2), \
195
- (unsigned long)(n1), (unsigned long)(n2), \
196
- ptr1); \
197
- __ret; \
222
+#define arch_cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2) \
223
+({ \
224
+ int __ret; \
225
+ __cmpxchg_double_check(ptr1, ptr2); \
226
+ __ret = !__cmpxchg_double((unsigned long)(o1), (unsigned long)(o2), \
227
+ (unsigned long)(n1), (unsigned long)(n2), \
228
+ ptr1); \
229
+ __ret; \
198230 })
199231
200
-#define __CMPWAIT_CASE(w, sz, name) \
201
-static inline void __cmpwait_case_##name(volatile void *ptr, \
202
- unsigned long val) \
232
+#define __CMPWAIT_CASE(w, sfx, sz) \
233
+static inline void __cmpwait_case_##sz(volatile void *ptr, \
234
+ unsigned long val) \
203235 { \
204236 unsigned long tmp; \
205237 \
206238 asm volatile( \
207239 " sevl\n" \
208240 " wfe\n" \
209
- " ldxr" #sz "\t%" #w "[tmp], %[v]\n" \
241
+ " ldxr" #sfx "\t%" #w "[tmp], %[v]\n" \
210242 " eor %" #w "[tmp], %" #w "[tmp], %" #w "[val]\n" \
211243 " cbnz %" #w "[tmp], 1f\n" \
212244 " wfe\n" \
....@@ -215,10 +247,10 @@
215247 : [val] "r" (val)); \
216248 }
217249
218
-__CMPWAIT_CASE(w, b, 1);
219
-__CMPWAIT_CASE(w, h, 2);
220
-__CMPWAIT_CASE(w, , 4);
221
-__CMPWAIT_CASE( , , 8);
250
+__CMPWAIT_CASE(w, b, 8);
251
+__CMPWAIT_CASE(w, h, 16);
252
+__CMPWAIT_CASE(w, , 32);
253
+__CMPWAIT_CASE( , , 64);
222254
223255 #undef __CMPWAIT_CASE
224256
....@@ -229,13 +261,13 @@
229261 { \
230262 switch (size) { \
231263 case 1: \
232
- return __cmpwait_case##sfx##_1(ptr, (u8)val); \
264
+ return __cmpwait_case##sfx##_8(ptr, (u8)val); \
233265 case 2: \
234
- return __cmpwait_case##sfx##_2(ptr, (u16)val); \
266
+ return __cmpwait_case##sfx##_16(ptr, (u16)val); \
235267 case 4: \
236
- return __cmpwait_case##sfx##_4(ptr, val); \
268
+ return __cmpwait_case##sfx##_32(ptr, val); \
237269 case 8: \
238
- return __cmpwait_case##sfx##_8(ptr, val); \
270
+ return __cmpwait_case##sfx##_64(ptr, val); \
239271 default: \
240272 BUILD_BUG(); \
241273 } \