.. | .. |
---|
| 1 | +/* SPDX-License-Identifier: GPL-2.0-or-later */ |
---|
1 | 2 | /* |
---|
2 | 3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. |
---|
3 | 4 | * Copyright (C) 2012 Regents of the University of California |
---|
4 | 5 | * Copyright (C) 2017 SiFive |
---|
5 | | - * |
---|
6 | | - * This program is free software; you can redistribute it and/or |
---|
7 | | - * modify it under the terms of the GNU General Public Licence |
---|
8 | | - * as published by the Free Software Foundation; either version |
---|
9 | | - * 2 of the Licence, or (at your option) any later version. |
---|
10 | 6 | */ |
---|
11 | 7 | |
---|
12 | 8 | #ifndef _ASM_RISCV_ATOMIC_H |
---|
.. | .. |
---|
22 | 18 | |
---|
23 | 19 | #include <asm/cmpxchg.h> |
---|
24 | 20 | #include <asm/barrier.h> |
---|
25 | | - |
---|
26 | | -#define ATOMIC_INIT(i) { (i) } |
---|
27 | 21 | |
---|
28 | 22 | #define __atomic_acquire_fence() \ |
---|
29 | 23 | __asm__ __volatile__(RISCV_ACQUIRE_BARRIER "" ::: "memory") |
---|
.. | .. |
---|
42 | 36 | |
---|
43 | 37 | #ifndef CONFIG_GENERIC_ATOMIC64 |
---|
44 | 38 | #define ATOMIC64_INIT(i) { (i) } |
---|
45 | | -static __always_inline long atomic64_read(const atomic64_t *v) |
---|
| 39 | +static __always_inline s64 atomic64_read(const atomic64_t *v) |
---|
46 | 40 | { |
---|
47 | 41 | return READ_ONCE(v->counter); |
---|
48 | 42 | } |
---|
49 | | -static __always_inline void atomic64_set(atomic64_t *v, long i) |
---|
| 43 | +static __always_inline void atomic64_set(atomic64_t *v, s64 i) |
---|
50 | 44 | { |
---|
51 | 45 | WRITE_ONCE(v->counter, i); |
---|
52 | 46 | } |
---|
.. | .. |
---|
70 | 64 | |
---|
71 | 65 | #ifdef CONFIG_GENERIC_ATOMIC64 |
---|
72 | 66 | #define ATOMIC_OPS(op, asm_op, I) \ |
---|
73 | | - ATOMIC_OP (op, asm_op, I, w, int, ) |
---|
| 67 | + ATOMIC_OP (op, asm_op, I, w, int, ) |
---|
74 | 68 | #else |
---|
75 | 69 | #define ATOMIC_OPS(op, asm_op, I) \ |
---|
76 | | - ATOMIC_OP (op, asm_op, I, w, int, ) \ |
---|
77 | | - ATOMIC_OP (op, asm_op, I, d, long, 64) |
---|
| 70 | + ATOMIC_OP (op, asm_op, I, w, int, ) \ |
---|
| 71 | + ATOMIC_OP (op, asm_op, I, d, s64, 64) |
---|
78 | 72 | #endif |
---|
79 | 73 | |
---|
80 | 74 | ATOMIC_OPS(add, add, i) |
---|
.. | .. |
---|
131 | 125 | |
---|
132 | 126 | #ifdef CONFIG_GENERIC_ATOMIC64 |
---|
133 | 127 | #define ATOMIC_OPS(op, asm_op, c_op, I) \ |
---|
134 | | - ATOMIC_FETCH_OP( op, asm_op, I, w, int, ) \ |
---|
135 | | - ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int, ) |
---|
| 128 | + ATOMIC_FETCH_OP( op, asm_op, I, w, int, ) \ |
---|
| 129 | + ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int, ) |
---|
136 | 130 | #else |
---|
137 | 131 | #define ATOMIC_OPS(op, asm_op, c_op, I) \ |
---|
138 | | - ATOMIC_FETCH_OP( op, asm_op, I, w, int, ) \ |
---|
139 | | - ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int, ) \ |
---|
140 | | - ATOMIC_FETCH_OP( op, asm_op, I, d, long, 64) \ |
---|
141 | | - ATOMIC_OP_RETURN(op, asm_op, c_op, I, d, long, 64) |
---|
| 132 | + ATOMIC_FETCH_OP( op, asm_op, I, w, int, ) \ |
---|
| 133 | + ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int, ) \ |
---|
| 134 | + ATOMIC_FETCH_OP( op, asm_op, I, d, s64, 64) \ |
---|
| 135 | + ATOMIC_OP_RETURN(op, asm_op, c_op, I, d, s64, 64) |
---|
142 | 136 | #endif |
---|
143 | 137 | |
---|
144 | 138 | ATOMIC_OPS(add, add, +, i) |
---|
.. | .. |
---|
170 | 164 | |
---|
171 | 165 | #ifdef CONFIG_GENERIC_ATOMIC64 |
---|
172 | 166 | #define ATOMIC_OPS(op, asm_op, I) \ |
---|
173 | | - ATOMIC_FETCH_OP(op, asm_op, I, w, int, ) |
---|
| 167 | + ATOMIC_FETCH_OP(op, asm_op, I, w, int, ) |
---|
174 | 168 | #else |
---|
175 | 169 | #define ATOMIC_OPS(op, asm_op, I) \ |
---|
176 | | - ATOMIC_FETCH_OP(op, asm_op, I, w, int, ) \ |
---|
177 | | - ATOMIC_FETCH_OP(op, asm_op, I, d, long, 64) |
---|
| 170 | + ATOMIC_FETCH_OP(op, asm_op, I, w, int, ) \ |
---|
| 171 | + ATOMIC_FETCH_OP(op, asm_op, I, d, s64, 64) |
---|
178 | 172 | #endif |
---|
179 | 173 | |
---|
180 | 174 | ATOMIC_OPS(and, and, i) |
---|
.. | .. |
---|
223 | 217 | #define atomic_fetch_add_unless atomic_fetch_add_unless |
---|
224 | 218 | |
---|
225 | 219 | #ifndef CONFIG_GENERIC_ATOMIC64 |
---|
226 | | -static __always_inline long atomic64_fetch_add_unless(atomic64_t *v, long a, long u) |
---|
| 220 | +static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) |
---|
227 | 221 | { |
---|
228 | | - long prev, rc; |
---|
| 222 | + s64 prev; |
---|
| 223 | + long rc; |
---|
229 | 224 | |
---|
230 | 225 | __asm__ __volatile__ ( |
---|
231 | 226 | "0: lr.d %[p], %[c]\n" |
---|
.. | .. |
---|
294 | 289 | |
---|
295 | 290 | #ifdef CONFIG_GENERIC_ATOMIC64 |
---|
296 | 291 | #define ATOMIC_OPS() \ |
---|
297 | | - ATOMIC_OP( int, , 4) |
---|
| 292 | + ATOMIC_OP(int, , 4) |
---|
298 | 293 | #else |
---|
299 | 294 | #define ATOMIC_OPS() \ |
---|
300 | | - ATOMIC_OP( int, , 4) \ |
---|
301 | | - ATOMIC_OP(long, 64, 8) |
---|
| 295 | + ATOMIC_OP(int, , 4) \ |
---|
| 296 | + ATOMIC_OP(s64, 64, 8) |
---|
302 | 297 | #endif |
---|
303 | 298 | |
---|
304 | 299 | ATOMIC_OPS() |
---|
| 300 | + |
---|
| 301 | +#define atomic_xchg_relaxed atomic_xchg_relaxed |
---|
| 302 | +#define atomic_xchg_acquire atomic_xchg_acquire |
---|
| 303 | +#define atomic_xchg_release atomic_xchg_release |
---|
| 304 | +#define atomic_xchg atomic_xchg |
---|
| 305 | +#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed |
---|
| 306 | +#define atomic_cmpxchg_acquire atomic_cmpxchg_acquire |
---|
| 307 | +#define atomic_cmpxchg_release atomic_cmpxchg_release |
---|
| 308 | +#define atomic_cmpxchg atomic_cmpxchg |
---|
305 | 309 | |
---|
306 | 310 | #undef ATOMIC_OPS |
---|
307 | 311 | #undef ATOMIC_OP |
---|
.. | .. |
---|
327 | 331 | #define atomic_dec_if_positive(v) atomic_sub_if_positive(v, 1) |
---|
328 | 332 | |
---|
329 | 333 | #ifndef CONFIG_GENERIC_ATOMIC64 |
---|
330 | | -static __always_inline long atomic64_sub_if_positive(atomic64_t *v, int offset) |
---|
| 334 | +static __always_inline s64 atomic64_sub_if_positive(atomic64_t *v, s64 offset) |
---|
331 | 335 | { |
---|
332 | | - long prev, rc; |
---|
| 336 | + s64 prev; |
---|
| 337 | + long rc; |
---|
333 | 338 | |
---|
334 | 339 | __asm__ __volatile__ ( |
---|
335 | 340 | "0: lr.d %[p], %[c]\n" |
---|