hc
2024-05-13 9d77db3c730780c8ef5ccd4b66403ff5675cfe4e
kernel/arch/s390/include/asm/atomic.h
....@@ -15,8 +15,6 @@
1515 #include <asm/barrier.h>
1616 #include <asm/cmpxchg.h>
1717
18
-#define ATOMIC_INIT(i) { (i) }
19
-
2018 static inline int atomic_read(const atomic_t *v)
2119 {
2220 int c;
....@@ -47,7 +45,11 @@
4745 static inline void atomic_add(int i, atomic_t *v)
4846 {
4947 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
50
- if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
48
+ /*
49
+ * Order of conditions is important to circumvent gcc 10 bug:
50
+ * https://gcc.gnu.org/pipermail/gcc-patches/2020-July/549318.html
51
+ */
52
+ if ((i > -129) && (i < 128) && __builtin_constant_p(i)) {
5153 __atomic_add_const(i, &v->counter);
5254 return;
5355 }
....@@ -84,9 +86,9 @@
8486
8587 #define ATOMIC64_INIT(i) { (i) }
8688
87
-static inline long atomic64_read(const atomic64_t *v)
89
+static inline s64 atomic64_read(const atomic64_t *v)
8890 {
89
- long c;
91
+ s64 c;
9092
9193 asm volatile(
9294 " lg %0,%1\n"
....@@ -94,49 +96,53 @@
9496 return c;
9597 }
9698
97
-static inline void atomic64_set(atomic64_t *v, long i)
99
+static inline void atomic64_set(atomic64_t *v, s64 i)
98100 {
99101 asm volatile(
100102 " stg %1,%0\n"
101103 : "=Q" (v->counter) : "d" (i));
102104 }
103105
104
-static inline long atomic64_add_return(long i, atomic64_t *v)
106
+static inline s64 atomic64_add_return(s64 i, atomic64_t *v)
105107 {
106
- return __atomic64_add_barrier(i, &v->counter) + i;
108
+ return __atomic64_add_barrier(i, (long *)&v->counter) + i;
107109 }
108110
109
-static inline long atomic64_fetch_add(long i, atomic64_t *v)
111
+static inline s64 atomic64_fetch_add(s64 i, atomic64_t *v)
110112 {
111
- return __atomic64_add_barrier(i, &v->counter);
113
+ return __atomic64_add_barrier(i, (long *)&v->counter);
112114 }
113115
114
-static inline void atomic64_add(long i, atomic64_t *v)
116
+static inline void atomic64_add(s64 i, atomic64_t *v)
115117 {
116118 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
117
- if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
118
- __atomic64_add_const(i, &v->counter);
119
+ /*
120
+ * Order of conditions is important to circumvent gcc 10 bug:
121
+ * https://gcc.gnu.org/pipermail/gcc-patches/2020-July/549318.html
122
+ */
123
+ if ((i > -129) && (i < 128) && __builtin_constant_p(i)) {
124
+ __atomic64_add_const(i, (long *)&v->counter);
119125 return;
120126 }
121127 #endif
122
- __atomic64_add(i, &v->counter);
128
+ __atomic64_add(i, (long *)&v->counter);
123129 }
124130
125131 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
126132
127
-static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
133
+static inline s64 atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
128134 {
129
- return __atomic64_cmpxchg(&v->counter, old, new);
135
+ return __atomic64_cmpxchg((long *)&v->counter, old, new);
130136 }
131137
132138 #define ATOMIC64_OPS(op) \
133
-static inline void atomic64_##op(long i, atomic64_t *v) \
139
+static inline void atomic64_##op(s64 i, atomic64_t *v) \
134140 { \
135
- __atomic64_##op(i, &v->counter); \
141
+ __atomic64_##op(i, (long *)&v->counter); \
136142 } \
137
-static inline long atomic64_fetch_##op(long i, atomic64_t *v) \
143
+static inline long atomic64_fetch_##op(s64 i, atomic64_t *v) \
138144 { \
139
- return __atomic64_##op##_barrier(i, &v->counter); \
145
+ return __atomic64_##op##_barrier(i, (long *)&v->counter); \
140146 }
141147
142148 ATOMIC64_OPS(and)
....@@ -145,8 +151,8 @@
145151
146152 #undef ATOMIC64_OPS
147153
148
-#define atomic64_sub_return(_i, _v) atomic64_add_return(-(long)(_i), _v)
149
-#define atomic64_fetch_sub(_i, _v) atomic64_fetch_add(-(long)(_i), _v)
150
-#define atomic64_sub(_i, _v) atomic64_add(-(long)(_i), _v)
154
+#define atomic64_sub_return(_i, _v) atomic64_add_return(-(s64)(_i), _v)
155
+#define atomic64_fetch_sub(_i, _v) atomic64_fetch_add(-(s64)(_i), _v)
156
+#define atomic64_sub(_i, _v) atomic64_add(-(s64)(_i), _v)
151157
152158 #endif /* __ARCH_S390_ATOMIC__ */