hc
2024-05-10 9999e48639b3cecb08ffb37358bcba3b48161b29
kernel/arch/riscv/include/asm/atomic.h
....@@ -1,12 +1,8 @@
1
+/* SPDX-License-Identifier: GPL-2.0-or-later */
12 /*
23 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
34 * Copyright (C) 2012 Regents of the University of California
45 * Copyright (C) 2017 SiFive
5
- *
6
- * This program is free software; you can redistribute it and/or
7
- * modify it under the terms of the GNU General Public Licence
8
- * as published by the Free Software Foundation; either version
9
- * 2 of the Licence, or (at your option) any later version.
106 */
117
128 #ifndef _ASM_RISCV_ATOMIC_H
....@@ -22,8 +18,6 @@
2218
2319 #include <asm/cmpxchg.h>
2420 #include <asm/barrier.h>
25
-
26
-#define ATOMIC_INIT(i) { (i) }
2721
2822 #define __atomic_acquire_fence() \
2923 __asm__ __volatile__(RISCV_ACQUIRE_BARRIER "" ::: "memory")
....@@ -42,11 +36,11 @@
4236
4337 #ifndef CONFIG_GENERIC_ATOMIC64
4438 #define ATOMIC64_INIT(i) { (i) }
45
-static __always_inline long atomic64_read(const atomic64_t *v)
39
+static __always_inline s64 atomic64_read(const atomic64_t *v)
4640 {
4741 return READ_ONCE(v->counter);
4842 }
49
-static __always_inline void atomic64_set(atomic64_t *v, long i)
43
+static __always_inline void atomic64_set(atomic64_t *v, s64 i)
5044 {
5145 WRITE_ONCE(v->counter, i);
5246 }
....@@ -70,11 +64,11 @@
7064
7165 #ifdef CONFIG_GENERIC_ATOMIC64
7266 #define ATOMIC_OPS(op, asm_op, I) \
73
- ATOMIC_OP (op, asm_op, I, w, int, )
67
+ ATOMIC_OP (op, asm_op, I, w, int, )
7468 #else
7569 #define ATOMIC_OPS(op, asm_op, I) \
76
- ATOMIC_OP (op, asm_op, I, w, int, ) \
77
- ATOMIC_OP (op, asm_op, I, d, long, 64)
70
+ ATOMIC_OP (op, asm_op, I, w, int, ) \
71
+ ATOMIC_OP (op, asm_op, I, d, s64, 64)
7872 #endif
7973
8074 ATOMIC_OPS(add, add, i)
....@@ -131,14 +125,14 @@
131125
132126 #ifdef CONFIG_GENERIC_ATOMIC64
133127 #define ATOMIC_OPS(op, asm_op, c_op, I) \
134
- ATOMIC_FETCH_OP( op, asm_op, I, w, int, ) \
135
- ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int, )
128
+ ATOMIC_FETCH_OP( op, asm_op, I, w, int, ) \
129
+ ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int, )
136130 #else
137131 #define ATOMIC_OPS(op, asm_op, c_op, I) \
138
- ATOMIC_FETCH_OP( op, asm_op, I, w, int, ) \
139
- ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int, ) \
140
- ATOMIC_FETCH_OP( op, asm_op, I, d, long, 64) \
141
- ATOMIC_OP_RETURN(op, asm_op, c_op, I, d, long, 64)
132
+ ATOMIC_FETCH_OP( op, asm_op, I, w, int, ) \
133
+ ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int, ) \
134
+ ATOMIC_FETCH_OP( op, asm_op, I, d, s64, 64) \
135
+ ATOMIC_OP_RETURN(op, asm_op, c_op, I, d, s64, 64)
142136 #endif
143137
144138 ATOMIC_OPS(add, add, +, i)
....@@ -170,11 +164,11 @@
170164
171165 #ifdef CONFIG_GENERIC_ATOMIC64
172166 #define ATOMIC_OPS(op, asm_op, I) \
173
- ATOMIC_FETCH_OP(op, asm_op, I, w, int, )
167
+ ATOMIC_FETCH_OP(op, asm_op, I, w, int, )
174168 #else
175169 #define ATOMIC_OPS(op, asm_op, I) \
176
- ATOMIC_FETCH_OP(op, asm_op, I, w, int, ) \
177
- ATOMIC_FETCH_OP(op, asm_op, I, d, long, 64)
170
+ ATOMIC_FETCH_OP(op, asm_op, I, w, int, ) \
171
+ ATOMIC_FETCH_OP(op, asm_op, I, d, s64, 64)
178172 #endif
179173
180174 ATOMIC_OPS(and, and, i)
....@@ -223,9 +217,10 @@
223217 #define atomic_fetch_add_unless atomic_fetch_add_unless
224218
225219 #ifndef CONFIG_GENERIC_ATOMIC64
226
-static __always_inline long atomic64_fetch_add_unless(atomic64_t *v, long a, long u)
220
+static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
227221 {
228
- long prev, rc;
222
+ s64 prev;
223
+ long rc;
229224
230225 __asm__ __volatile__ (
231226 "0: lr.d %[p], %[c]\n"
....@@ -294,14 +289,23 @@
294289
295290 #ifdef CONFIG_GENERIC_ATOMIC64
296291 #define ATOMIC_OPS() \
297
- ATOMIC_OP( int, , 4)
292
+ ATOMIC_OP(int, , 4)
298293 #else
299294 #define ATOMIC_OPS() \
300
- ATOMIC_OP( int, , 4) \
301
- ATOMIC_OP(long, 64, 8)
295
+ ATOMIC_OP(int, , 4) \
296
+ ATOMIC_OP(s64, 64, 8)
302297 #endif
303298
304299 ATOMIC_OPS()
300
+
301
+#define atomic_xchg_relaxed atomic_xchg_relaxed
302
+#define atomic_xchg_acquire atomic_xchg_acquire
303
+#define atomic_xchg_release atomic_xchg_release
304
+#define atomic_xchg atomic_xchg
305
+#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
306
+#define atomic_cmpxchg_acquire atomic_cmpxchg_acquire
307
+#define atomic_cmpxchg_release atomic_cmpxchg_release
308
+#define atomic_cmpxchg atomic_cmpxchg
305309
306310 #undef ATOMIC_OPS
307311 #undef ATOMIC_OP
....@@ -327,9 +331,10 @@
327331 #define atomic_dec_if_positive(v) atomic_sub_if_positive(v, 1)
328332
329333 #ifndef CONFIG_GENERIC_ATOMIC64
330
-static __always_inline long atomic64_sub_if_positive(atomic64_t *v, int offset)
334
+static __always_inline s64 atomic64_sub_if_positive(atomic64_t *v, s64 offset)
331335 {
332
- long prev, rc;
336
+ s64 prev;
337
+ long rc;
333338
334339 __asm__ __volatile__ (
335340 "0: lr.d %[p], %[c]\n"