.. | .. |
---|
| 1 | +/* SPDX-License-Identifier: GPL-2.0-only */ |
---|
1 | 2 | /* |
---|
2 | 3 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) |
---|
3 | | - * |
---|
4 | | - * This program is free software; you can redistribute it and/or modify |
---|
5 | | - * it under the terms of the GNU General Public License version 2 as |
---|
6 | | - * published by the Free Software Foundation. |
---|
7 | 4 | */ |
---|
8 | 5 | |
---|
9 | 6 | #ifndef _ASM_ARC_ATOMIC_H |
---|
.. | .. |
---|
16 | 13 | #include <asm/cmpxchg.h> |
---|
17 | 14 | #include <asm/barrier.h> |
---|
18 | 15 | #include <asm/smp.h> |
---|
19 | | - |
---|
20 | | -#define ATOMIC_INIT(i) { (i) } |
---|
21 | | - |
---|
22 | | -#ifndef CONFIG_ARC_PLAT_EZNPS |
---|
23 | 16 | |
---|
24 | 17 | #define atomic_read(v) READ_ONCE((v)->counter) |
---|
25 | 18 | |
---|
.. | .. |
---|
50 | 43 | \ |
---|
51 | 44 | /* \ |
---|
52 | 45 | * Explicit full memory barrier needed before/after as \ |
---|
53 | | - * LLOCK/SCOND thmeselves don't provide any such semantics \ |
---|
| 46 | + * LLOCK/SCOND themselves don't provide any such semantics \ |
---|
54 | 47 | */ \ |
---|
55 | 48 | smp_mb(); \ |
---|
56 | 49 | \ |
---|
.. | .. |
---|
76 | 69 | \ |
---|
77 | 70 | /* \ |
---|
78 | 71 | * Explicit full memory barrier needed before/after as \ |
---|
79 | | - * LLOCK/SCOND thmeselves don't provide any such semantics \ |
---|
| 72 | + * LLOCK/SCOND themselves don't provide any such semantics \ |
---|
80 | 73 | */ \ |
---|
81 | 74 | smp_mb(); \ |
---|
82 | 75 | \ |
---|
.. | .. |
---|
200 | 193 | ATOMIC_OPS(or, |=, or) |
---|
201 | 194 | ATOMIC_OPS(xor, ^=, xor) |
---|
202 | 195 | |
---|
203 | | -#else /* CONFIG_ARC_PLAT_EZNPS */ |
---|
204 | | - |
---|
205 | | -static inline int atomic_read(const atomic_t *v) |
---|
206 | | -{ |
---|
207 | | - int temp; |
---|
208 | | - |
---|
209 | | - __asm__ __volatile__( |
---|
210 | | - " ld.di %0, [%1]" |
---|
211 | | - : "=r"(temp) |
---|
212 | | - : "r"(&v->counter) |
---|
213 | | - : "memory"); |
---|
214 | | - return temp; |
---|
215 | | -} |
---|
216 | | - |
---|
217 | | -static inline void atomic_set(atomic_t *v, int i) |
---|
218 | | -{ |
---|
219 | | - __asm__ __volatile__( |
---|
220 | | - " st.di %0,[%1]" |
---|
221 | | - : |
---|
222 | | - : "r"(i), "r"(&v->counter) |
---|
223 | | - : "memory"); |
---|
224 | | -} |
---|
225 | | - |
---|
226 | | -#define ATOMIC_OP(op, c_op, asm_op) \ |
---|
227 | | -static inline void atomic_##op(int i, atomic_t *v) \ |
---|
228 | | -{ \ |
---|
229 | | - __asm__ __volatile__( \ |
---|
230 | | - " mov r2, %0\n" \ |
---|
231 | | - " mov r3, %1\n" \ |
---|
232 | | - " .word %2\n" \ |
---|
233 | | - : \ |
---|
234 | | - : "r"(i), "r"(&v->counter), "i"(asm_op) \ |
---|
235 | | - : "r2", "r3", "memory"); \ |
---|
236 | | -} \ |
---|
237 | | - |
---|
238 | | -#define ATOMIC_OP_RETURN(op, c_op, asm_op) \ |
---|
239 | | -static inline int atomic_##op##_return(int i, atomic_t *v) \ |
---|
240 | | -{ \ |
---|
241 | | - unsigned int temp = i; \ |
---|
242 | | - \ |
---|
243 | | - /* Explicit full memory barrier needed before/after */ \ |
---|
244 | | - smp_mb(); \ |
---|
245 | | - \ |
---|
246 | | - __asm__ __volatile__( \ |
---|
247 | | - " mov r2, %0\n" \ |
---|
248 | | - " mov r3, %1\n" \ |
---|
249 | | - " .word %2\n" \ |
---|
250 | | - " mov %0, r2" \ |
---|
251 | | - : "+r"(temp) \ |
---|
252 | | - : "r"(&v->counter), "i"(asm_op) \ |
---|
253 | | - : "r2", "r3", "memory"); \ |
---|
254 | | - \ |
---|
255 | | - smp_mb(); \ |
---|
256 | | - \ |
---|
257 | | - temp c_op i; \ |
---|
258 | | - \ |
---|
259 | | - return temp; \ |
---|
260 | | -} |
---|
261 | | - |
---|
262 | | -#define ATOMIC_FETCH_OP(op, c_op, asm_op) \ |
---|
263 | | -static inline int atomic_fetch_##op(int i, atomic_t *v) \ |
---|
264 | | -{ \ |
---|
265 | | - unsigned int temp = i; \ |
---|
266 | | - \ |
---|
267 | | - /* Explicit full memory barrier needed before/after */ \ |
---|
268 | | - smp_mb(); \ |
---|
269 | | - \ |
---|
270 | | - __asm__ __volatile__( \ |
---|
271 | | - " mov r2, %0\n" \ |
---|
272 | | - " mov r3, %1\n" \ |
---|
273 | | - " .word %2\n" \ |
---|
274 | | - " mov %0, r2" \ |
---|
275 | | - : "+r"(temp) \ |
---|
276 | | - : "r"(&v->counter), "i"(asm_op) \ |
---|
277 | | - : "r2", "r3", "memory"); \ |
---|
278 | | - \ |
---|
279 | | - smp_mb(); \ |
---|
280 | | - \ |
---|
281 | | - return temp; \ |
---|
282 | | -} |
---|
283 | | - |
---|
284 | | -#define ATOMIC_OPS(op, c_op, asm_op) \ |
---|
285 | | - ATOMIC_OP(op, c_op, asm_op) \ |
---|
286 | | - ATOMIC_OP_RETURN(op, c_op, asm_op) \ |
---|
287 | | - ATOMIC_FETCH_OP(op, c_op, asm_op) |
---|
288 | | - |
---|
289 | | -ATOMIC_OPS(add, +=, CTOP_INST_AADD_DI_R2_R2_R3) |
---|
290 | | -#define atomic_sub(i, v) atomic_add(-(i), (v)) |
---|
291 | | -#define atomic_sub_return(i, v) atomic_add_return(-(i), (v)) |
---|
292 | | -#define atomic_fetch_sub(i, v) atomic_fetch_add(-(i), (v)) |
---|
293 | | - |
---|
294 | | -#undef ATOMIC_OPS |
---|
295 | | -#define ATOMIC_OPS(op, c_op, asm_op) \ |
---|
296 | | - ATOMIC_OP(op, c_op, asm_op) \ |
---|
297 | | - ATOMIC_FETCH_OP(op, c_op, asm_op) |
---|
298 | | - |
---|
299 | | -ATOMIC_OPS(and, &=, CTOP_INST_AAND_DI_R2_R2_R3) |
---|
300 | | -ATOMIC_OPS(or, |=, CTOP_INST_AOR_DI_R2_R2_R3) |
---|
301 | | -ATOMIC_OPS(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3) |
---|
302 | | - |
---|
303 | | -#endif /* CONFIG_ARC_PLAT_EZNPS */ |
---|
304 | | - |
---|
305 | 196 | #undef ATOMIC_OPS |
---|
306 | 197 | #undef ATOMIC_FETCH_OP |
---|
307 | 198 | #undef ATOMIC_OP_RETURN |
---|
.. | .. |
---|
324 | 215 | */ |
---|
325 | 216 | |
---|
326 | 217 | typedef struct { |
---|
327 | | - aligned_u64 counter; |
---|
| 218 | + s64 __aligned(8) counter; |
---|
328 | 219 | } atomic64_t; |
---|
329 | 220 | |
---|
330 | 221 | #define ATOMIC64_INIT(a) { (a) } |
---|
331 | 222 | |
---|
332 | | -static inline long long atomic64_read(const atomic64_t *v) |
---|
| 223 | +static inline s64 atomic64_read(const atomic64_t *v) |
---|
333 | 224 | { |
---|
334 | | - unsigned long long val; |
---|
| 225 | + s64 val; |
---|
335 | 226 | |
---|
336 | 227 | __asm__ __volatile__( |
---|
337 | 228 | " ldd %0, [%1] \n" |
---|
.. | .. |
---|
341 | 232 | return val; |
---|
342 | 233 | } |
---|
343 | 234 | |
---|
344 | | -static inline void atomic64_set(atomic64_t *v, long long a) |
---|
| 235 | +static inline void atomic64_set(atomic64_t *v, s64 a) |
---|
345 | 236 | { |
---|
346 | 237 | /* |
---|
347 | 238 | * This could have been a simple assignment in "C" but would need |
---|
.. | .. |
---|
362 | 253 | } |
---|
363 | 254 | |
---|
364 | 255 | #define ATOMIC64_OP(op, op1, op2) \ |
---|
365 | | -static inline void atomic64_##op(long long a, atomic64_t *v) \ |
---|
| 256 | +static inline void atomic64_##op(s64 a, atomic64_t *v) \ |
---|
366 | 257 | { \ |
---|
367 | | - unsigned long long val; \ |
---|
| 258 | + s64 val; \ |
---|
368 | 259 | \ |
---|
369 | 260 | __asm__ __volatile__( \ |
---|
370 | 261 | "1: \n" \ |
---|
.. | .. |
---|
375 | 266 | " bnz 1b \n" \ |
---|
376 | 267 | : "=&r"(val) \ |
---|
377 | 268 | : "r"(&v->counter), "ir"(a) \ |
---|
378 | | - : "cc"); \ |
---|
| 269 | + : "cc"); \ |
---|
379 | 270 | } \ |
---|
380 | 271 | |
---|
381 | 272 | #define ATOMIC64_OP_RETURN(op, op1, op2) \ |
---|
382 | | -static inline long long atomic64_##op##_return(long long a, atomic64_t *v) \ |
---|
| 273 | +static inline s64 atomic64_##op##_return(s64 a, atomic64_t *v) \ |
---|
383 | 274 | { \ |
---|
384 | | - unsigned long long val; \ |
---|
| 275 | + s64 val; \ |
---|
385 | 276 | \ |
---|
386 | 277 | smp_mb(); \ |
---|
387 | 278 | \ |
---|
.. | .. |
---|
402 | 293 | } |
---|
403 | 294 | |
---|
404 | 295 | #define ATOMIC64_FETCH_OP(op, op1, op2) \ |
---|
405 | | -static inline long long atomic64_fetch_##op(long long a, atomic64_t *v) \ |
---|
| 296 | +static inline s64 atomic64_fetch_##op(s64 a, atomic64_t *v) \ |
---|
406 | 297 | { \ |
---|
407 | | - unsigned long long val, orig; \ |
---|
| 298 | + s64 val, orig; \ |
---|
408 | 299 | \ |
---|
409 | 300 | smp_mb(); \ |
---|
410 | 301 | \ |
---|
.. | .. |
---|
444 | 335 | #undef ATOMIC64_OP_RETURN |
---|
445 | 336 | #undef ATOMIC64_OP |
---|
446 | 337 | |
---|
447 | | -static inline long long |
---|
448 | | -atomic64_cmpxchg(atomic64_t *ptr, long long expected, long long new) |
---|
| 338 | +static inline s64 |
---|
| 339 | +atomic64_cmpxchg(atomic64_t *ptr, s64 expected, s64 new) |
---|
449 | 340 | { |
---|
450 | | - long long prev; |
---|
| 341 | + s64 prev; |
---|
451 | 342 | |
---|
452 | 343 | smp_mb(); |
---|
453 | 344 | |
---|
.. | .. |
---|
467 | 358 | return prev; |
---|
468 | 359 | } |
---|
469 | 360 | |
---|
470 | | -static inline long long atomic64_xchg(atomic64_t *ptr, long long new) |
---|
| 361 | +static inline s64 atomic64_xchg(atomic64_t *ptr, s64 new) |
---|
471 | 362 | { |
---|
472 | | - long long prev; |
---|
| 363 | + s64 prev; |
---|
473 | 364 | |
---|
474 | 365 | smp_mb(); |
---|
475 | 366 | |
---|
.. | .. |
---|
495 | 386 | * the atomic variable, v, was not decremented. |
---|
496 | 387 | */ |
---|
497 | 388 | |
---|
498 | | -static inline long long atomic64_dec_if_positive(atomic64_t *v) |
---|
| 389 | +static inline s64 atomic64_dec_if_positive(atomic64_t *v) |
---|
499 | 390 | { |
---|
500 | | - long long val; |
---|
| 391 | + s64 val; |
---|
501 | 392 | |
---|
502 | 393 | smp_mb(); |
---|
503 | 394 | |
---|
.. | .. |
---|
528 | 419 | * Atomically adds @a to @v, if it was not @u. |
---|
529 | 420 | * Returns the old value of @v |
---|
530 | 421 | */ |
---|
531 | | -static inline long long atomic64_fetch_add_unless(atomic64_t *v, long long a, |
---|
532 | | - long long u) |
---|
| 422 | +static inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) |
---|
533 | 423 | { |
---|
534 | | - long long old, temp; |
---|
| 424 | + s64 old, temp; |
---|
535 | 425 | |
---|
536 | 426 | smp_mb(); |
---|
537 | 427 | |
---|