forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-08 01573e231f18eb2d99162747186f59511f56b64d
kernel/arch/arc/include/asm/atomic.h
....@@ -1,9 +1,6 @@
1
+/* SPDX-License-Identifier: GPL-2.0-only */
12 /*
23 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3
- *
4
- * This program is free software; you can redistribute it and/or modify
5
- * it under the terms of the GNU General Public License version 2 as
6
- * published by the Free Software Foundation.
74 */
85
96 #ifndef _ASM_ARC_ATOMIC_H
....@@ -16,10 +13,6 @@
1613 #include <asm/cmpxchg.h>
1714 #include <asm/barrier.h>
1815 #include <asm/smp.h>
19
-
20
-#define ATOMIC_INIT(i) { (i) }
21
-
22
-#ifndef CONFIG_ARC_PLAT_EZNPS
2316
2417 #define atomic_read(v) READ_ONCE((v)->counter)
2518
....@@ -50,7 +43,7 @@
5043 \
5144 /* \
5245 * Explicit full memory barrier needed before/after as \
53
- * LLOCK/SCOND thmeselves don't provide any such semantics \
46
+ * LLOCK/SCOND themselves don't provide any such semantics \
5447 */ \
5548 smp_mb(); \
5649 \
....@@ -76,7 +69,7 @@
7669 \
7770 /* \
7871 * Explicit full memory barrier needed before/after as \
79
- * LLOCK/SCOND thmeselves don't provide any such semantics \
72
+ * LLOCK/SCOND themselves don't provide any such semantics \
8073 */ \
8174 smp_mb(); \
8275 \
....@@ -200,108 +193,6 @@
200193 ATOMIC_OPS(or, |=, or)
201194 ATOMIC_OPS(xor, ^=, xor)
202195
203
-#else /* CONFIG_ARC_PLAT_EZNPS */
204
-
205
-static inline int atomic_read(const atomic_t *v)
206
-{
207
- int temp;
208
-
209
- __asm__ __volatile__(
210
- " ld.di %0, [%1]"
211
- : "=r"(temp)
212
- : "r"(&v->counter)
213
- : "memory");
214
- return temp;
215
-}
216
-
217
-static inline void atomic_set(atomic_t *v, int i)
218
-{
219
- __asm__ __volatile__(
220
- " st.di %0,[%1]"
221
- :
222
- : "r"(i), "r"(&v->counter)
223
- : "memory");
224
-}
225
-
226
-#define ATOMIC_OP(op, c_op, asm_op) \
227
-static inline void atomic_##op(int i, atomic_t *v) \
228
-{ \
229
- __asm__ __volatile__( \
230
- " mov r2, %0\n" \
231
- " mov r3, %1\n" \
232
- " .word %2\n" \
233
- : \
234
- : "r"(i), "r"(&v->counter), "i"(asm_op) \
235
- : "r2", "r3", "memory"); \
236
-} \
237
-
238
-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
239
-static inline int atomic_##op##_return(int i, atomic_t *v) \
240
-{ \
241
- unsigned int temp = i; \
242
- \
243
- /* Explicit full memory barrier needed before/after */ \
244
- smp_mb(); \
245
- \
246
- __asm__ __volatile__( \
247
- " mov r2, %0\n" \
248
- " mov r3, %1\n" \
249
- " .word %2\n" \
250
- " mov %0, r2" \
251
- : "+r"(temp) \
252
- : "r"(&v->counter), "i"(asm_op) \
253
- : "r2", "r3", "memory"); \
254
- \
255
- smp_mb(); \
256
- \
257
- temp c_op i; \
258
- \
259
- return temp; \
260
-}
261
-
262
-#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
263
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
264
-{ \
265
- unsigned int temp = i; \
266
- \
267
- /* Explicit full memory barrier needed before/after */ \
268
- smp_mb(); \
269
- \
270
- __asm__ __volatile__( \
271
- " mov r2, %0\n" \
272
- " mov r3, %1\n" \
273
- " .word %2\n" \
274
- " mov %0, r2" \
275
- : "+r"(temp) \
276
- : "r"(&v->counter), "i"(asm_op) \
277
- : "r2", "r3", "memory"); \
278
- \
279
- smp_mb(); \
280
- \
281
- return temp; \
282
-}
283
-
284
-#define ATOMIC_OPS(op, c_op, asm_op) \
285
- ATOMIC_OP(op, c_op, asm_op) \
286
- ATOMIC_OP_RETURN(op, c_op, asm_op) \
287
- ATOMIC_FETCH_OP(op, c_op, asm_op)
288
-
289
-ATOMIC_OPS(add, +=, CTOP_INST_AADD_DI_R2_R2_R3)
290
-#define atomic_sub(i, v) atomic_add(-(i), (v))
291
-#define atomic_sub_return(i, v) atomic_add_return(-(i), (v))
292
-#define atomic_fetch_sub(i, v) atomic_fetch_add(-(i), (v))
293
-
294
-#undef ATOMIC_OPS
295
-#define ATOMIC_OPS(op, c_op, asm_op) \
296
- ATOMIC_OP(op, c_op, asm_op) \
297
- ATOMIC_FETCH_OP(op, c_op, asm_op)
298
-
299
-ATOMIC_OPS(and, &=, CTOP_INST_AAND_DI_R2_R2_R3)
300
-ATOMIC_OPS(or, |=, CTOP_INST_AOR_DI_R2_R2_R3)
301
-ATOMIC_OPS(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3)
302
-
303
-#endif /* CONFIG_ARC_PLAT_EZNPS */
304
-
305196 #undef ATOMIC_OPS
306197 #undef ATOMIC_FETCH_OP
307198 #undef ATOMIC_OP_RETURN
....@@ -324,14 +215,14 @@
324215 */
325216
326217 typedef struct {
327
- aligned_u64 counter;
218
+ s64 __aligned(8) counter;
328219 } atomic64_t;
329220
330221 #define ATOMIC64_INIT(a) { (a) }
331222
332
-static inline long long atomic64_read(const atomic64_t *v)
223
+static inline s64 atomic64_read(const atomic64_t *v)
333224 {
334
- unsigned long long val;
225
+ s64 val;
335226
336227 __asm__ __volatile__(
337228 " ldd %0, [%1] \n"
....@@ -341,7 +232,7 @@
341232 return val;
342233 }
343234
344
-static inline void atomic64_set(atomic64_t *v, long long a)
235
+static inline void atomic64_set(atomic64_t *v, s64 a)
345236 {
346237 /*
347238 * This could have been a simple assignment in "C" but would need
....@@ -362,9 +253,9 @@
362253 }
363254
364255 #define ATOMIC64_OP(op, op1, op2) \
365
-static inline void atomic64_##op(long long a, atomic64_t *v) \
256
+static inline void atomic64_##op(s64 a, atomic64_t *v) \
366257 { \
367
- unsigned long long val; \
258
+ s64 val; \
368259 \
369260 __asm__ __volatile__( \
370261 "1: \n" \
....@@ -375,13 +266,13 @@
375266 " bnz 1b \n" \
376267 : "=&r"(val) \
377268 : "r"(&v->counter), "ir"(a) \
378
- : "cc"); \
269
+ : "cc"); \
379270 } \
380271
381272 #define ATOMIC64_OP_RETURN(op, op1, op2) \
382
-static inline long long atomic64_##op##_return(long long a, atomic64_t *v) \
273
+static inline s64 atomic64_##op##_return(s64 a, atomic64_t *v) \
383274 { \
384
- unsigned long long val; \
275
+ s64 val; \
385276 \
386277 smp_mb(); \
387278 \
....@@ -402,9 +293,9 @@
402293 }
403294
404295 #define ATOMIC64_FETCH_OP(op, op1, op2) \
405
-static inline long long atomic64_fetch_##op(long long a, atomic64_t *v) \
296
+static inline s64 atomic64_fetch_##op(s64 a, atomic64_t *v) \
406297 { \
407
- unsigned long long val, orig; \
298
+ s64 val, orig; \
408299 \
409300 smp_mb(); \
410301 \
....@@ -444,10 +335,10 @@
444335 #undef ATOMIC64_OP_RETURN
445336 #undef ATOMIC64_OP
446337
447
-static inline long long
448
-atomic64_cmpxchg(atomic64_t *ptr, long long expected, long long new)
338
+static inline s64
339
+atomic64_cmpxchg(atomic64_t *ptr, s64 expected, s64 new)
449340 {
450
- long long prev;
341
+ s64 prev;
451342
452343 smp_mb();
453344
....@@ -467,9 +358,9 @@
467358 return prev;
468359 }
469360
470
-static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
361
+static inline s64 atomic64_xchg(atomic64_t *ptr, s64 new)
471362 {
472
- long long prev;
363
+ s64 prev;
473364
474365 smp_mb();
475366
....@@ -495,9 +386,9 @@
495386 * the atomic variable, v, was not decremented.
496387 */
497388
498
-static inline long long atomic64_dec_if_positive(atomic64_t *v)
389
+static inline s64 atomic64_dec_if_positive(atomic64_t *v)
499390 {
500
- long long val;
391
+ s64 val;
501392
502393 smp_mb();
503394
....@@ -528,10 +419,9 @@
528419 * Atomically adds @a to @v, if it was not @u.
529420 * Returns the old value of @v
530421 */
531
-static inline long long atomic64_fetch_add_unless(atomic64_t *v, long long a,
532
- long long u)
422
+static inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
533423 {
534
- long long old, temp;
424
+ s64 old, temp;
535425
536426 smp_mb();
537427