.. | .. |
---|
15 | 15 | #include <asm/barrier.h> |
---|
16 | 16 | #include <asm/cmpxchg.h> |
---|
17 | 17 | |
---|
18 | | -#define ATOMIC_INIT(i) { (i) } |
---|
19 | | - |
---|
20 | 18 | static inline int atomic_read(const atomic_t *v) |
---|
21 | 19 | { |
---|
22 | 20 | int c; |
---|
.. | .. |
---|
47 | 45 | static inline void atomic_add(int i, atomic_t *v) |
---|
48 | 46 | { |
---|
49 | 47 | #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES |
---|
50 | | - if (__builtin_constant_p(i) && (i > -129) && (i < 128)) { |
---|
| 48 | + /* |
---|
| 49 | + * Order of conditions is important to circumvent gcc 10 bug: |
---|
| 50 | + * https://gcc.gnu.org/pipermail/gcc-patches/2020-July/549318.html |
---|
| 51 | + */ |
---|
| 52 | + if ((i > -129) && (i < 128) && __builtin_constant_p(i)) { |
---|
51 | 53 | __atomic_add_const(i, &v->counter); |
---|
52 | 54 | return; |
---|
53 | 55 | } |
---|
.. | .. |
---|
84 | 86 | |
---|
85 | 87 | #define ATOMIC64_INIT(i) { (i) } |
---|
86 | 88 | |
---|
87 | | -static inline long atomic64_read(const atomic64_t *v) |
---|
| 89 | +static inline s64 atomic64_read(const atomic64_t *v) |
---|
88 | 90 | { |
---|
89 | | - long c; |
---|
| 91 | + s64 c; |
---|
90 | 92 | |
---|
91 | 93 | asm volatile( |
---|
92 | 94 | " lg %0,%1\n" |
---|
.. | .. |
---|
94 | 96 | return c; |
---|
95 | 97 | } |
---|
96 | 98 | |
---|
97 | | -static inline void atomic64_set(atomic64_t *v, long i) |
---|
| 99 | +static inline void atomic64_set(atomic64_t *v, s64 i) |
---|
98 | 100 | { |
---|
99 | 101 | asm volatile( |
---|
100 | 102 | " stg %1,%0\n" |
---|
101 | 103 | : "=Q" (v->counter) : "d" (i)); |
---|
102 | 104 | } |
---|
103 | 105 | |
---|
104 | | -static inline long atomic64_add_return(long i, atomic64_t *v) |
---|
| 106 | +static inline s64 atomic64_add_return(s64 i, atomic64_t *v) |
---|
105 | 107 | { |
---|
106 | | - return __atomic64_add_barrier(i, &v->counter) + i; |
---|
| 108 | + return __atomic64_add_barrier(i, (long *)&v->counter) + i; |
---|
107 | 109 | } |
---|
108 | 110 | |
---|
109 | | -static inline long atomic64_fetch_add(long i, atomic64_t *v) |
---|
| 111 | +static inline s64 atomic64_fetch_add(s64 i, atomic64_t *v) |
---|
110 | 112 | { |
---|
111 | | - return __atomic64_add_barrier(i, &v->counter); |
---|
| 113 | + return __atomic64_add_barrier(i, (long *)&v->counter); |
---|
112 | 114 | } |
---|
113 | 115 | |
---|
114 | | -static inline void atomic64_add(long i, atomic64_t *v) |
---|
| 116 | +static inline void atomic64_add(s64 i, atomic64_t *v) |
---|
115 | 117 | { |
---|
116 | 118 | #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES |
---|
117 | | - if (__builtin_constant_p(i) && (i > -129) && (i < 128)) { |
---|
118 | | - __atomic64_add_const(i, &v->counter); |
---|
| 119 | + /* |
---|
| 120 | + * Order of conditions is important to circumvent gcc 10 bug: |
---|
| 121 | + * https://gcc.gnu.org/pipermail/gcc-patches/2020-July/549318.html |
---|
| 122 | + */ |
---|
| 123 | + if ((i > -129) && (i < 128) && __builtin_constant_p(i)) { |
---|
| 124 | + __atomic64_add_const(i, (long *)&v->counter); |
---|
119 | 125 | return; |
---|
120 | 126 | } |
---|
121 | 127 | #endif |
---|
122 | | - __atomic64_add(i, &v->counter); |
---|
| 128 | + __atomic64_add(i, (long *)&v->counter); |
---|
123 | 129 | } |
---|
124 | 130 | |
---|
125 | 131 | #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) |
---|
126 | 132 | |
---|
127 | | -static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new) |
---|
| 133 | +static inline s64 atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) |
---|
128 | 134 | { |
---|
129 | | - return __atomic64_cmpxchg(&v->counter, old, new); |
---|
| 135 | + return __atomic64_cmpxchg((long *)&v->counter, old, new); |
---|
130 | 136 | } |
---|
131 | 137 | |
---|
132 | 138 | #define ATOMIC64_OPS(op) \ |
---|
133 | | -static inline void atomic64_##op(long i, atomic64_t *v) \ |
---|
| 139 | +static inline void atomic64_##op(s64 i, atomic64_t *v) \ |
---|
134 | 140 | { \ |
---|
135 | | - __atomic64_##op(i, &v->counter); \ |
---|
| 141 | + __atomic64_##op(i, (long *)&v->counter); \ |
---|
136 | 142 | } \ |
---|
137 | | -static inline long atomic64_fetch_##op(long i, atomic64_t *v) \ |
---|
| 143 | +static inline long atomic64_fetch_##op(s64 i, atomic64_t *v) \ |
---|
138 | 144 | { \ |
---|
139 | | - return __atomic64_##op##_barrier(i, &v->counter); \ |
---|
| 145 | + return __atomic64_##op##_barrier(i, (long *)&v->counter); \ |
---|
140 | 146 | } |
---|
141 | 147 | |
---|
142 | 148 | ATOMIC64_OPS(and) |
---|
.. | .. |
---|
145 | 151 | |
---|
146 | 152 | #undef ATOMIC64_OPS |
---|
147 | 153 | |
---|
148 | | -#define atomic64_sub_return(_i, _v) atomic64_add_return(-(long)(_i), _v) |
---|
149 | | -#define atomic64_fetch_sub(_i, _v) atomic64_fetch_add(-(long)(_i), _v) |
---|
150 | | -#define atomic64_sub(_i, _v) atomic64_add(-(long)(_i), _v) |
---|
| 154 | +#define atomic64_sub_return(_i, _v) atomic64_add_return(-(s64)(_i), _v) |
---|
| 155 | +#define atomic64_fetch_sub(_i, _v) atomic64_fetch_add(-(s64)(_i), _v) |
---|
| 156 | +#define atomic64_sub(_i, _v) atomic64_add(-(s64)(_i), _v) |
---|
151 | 157 | |
---|
152 | 158 | #endif /* __ARCH_S390_ATOMIC__ */ |
---|