hc
2024-10-12 a5969cabbb4660eab42b6ef0412cbbd1200cf14d
kernel/arch/x86/include/asm/atomic64_64.h
....@@ -17,9 +17,9 @@
1717 * Atomically reads the value of @v.
1818 * Doesn't imply a read memory barrier.
1919 */
20
-static inline long arch_atomic64_read(const atomic64_t *v)
20
+static inline s64 arch_atomic64_read(const atomic64_t *v)
2121 {
22
- return READ_ONCE((v)->counter);
22
+ return __READ_ONCE((v)->counter);
2323 }
2424
2525 /**
....@@ -29,9 +29,9 @@
2929 *
3030 * Atomically sets the value of @v to @i.
3131 */
32
-static inline void arch_atomic64_set(atomic64_t *v, long i)
32
+static inline void arch_atomic64_set(atomic64_t *v, s64 i)
3333 {
34
- WRITE_ONCE(v->counter, i);
34
+ __WRITE_ONCE(v->counter, i);
3535 }
3636
3737 /**
....@@ -41,7 +41,7 @@
4141 *
4242 * Atomically adds @i to @v.
4343 */
44
-static __always_inline void arch_atomic64_add(long i, atomic64_t *v)
44
+static __always_inline void arch_atomic64_add(s64 i, atomic64_t *v)
4545 {
4646 asm volatile(LOCK_PREFIX "addq %1,%0"
4747 : "=m" (v->counter)
....@@ -55,7 +55,7 @@
5555 *
5656 * Atomically subtracts @i from @v.
5757 */
58
-static inline void arch_atomic64_sub(long i, atomic64_t *v)
58
+static inline void arch_atomic64_sub(s64 i, atomic64_t *v)
5959 {
6060 asm volatile(LOCK_PREFIX "subq %1,%0"
6161 : "=m" (v->counter)
....@@ -71,9 +71,9 @@
7171 * true if the result is zero, or false for all
7272 * other cases.
7373 */
74
-static inline bool arch_atomic64_sub_and_test(long i, atomic64_t *v)
74
+static inline bool arch_atomic64_sub_and_test(s64 i, atomic64_t *v)
7575 {
76
- GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", e);
76
+ return GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, e, "er", i);
7777 }
7878 #define arch_atomic64_sub_and_test arch_atomic64_sub_and_test
7979
....@@ -115,7 +115,7 @@
115115 */
116116 static inline bool arch_atomic64_dec_and_test(atomic64_t *v)
117117 {
118
- GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", e);
118
+ return GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, e);
119119 }
120120 #define arch_atomic64_dec_and_test arch_atomic64_dec_and_test
121121
....@@ -129,7 +129,7 @@
129129 */
130130 static inline bool arch_atomic64_inc_and_test(atomic64_t *v)
131131 {
132
- GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", e);
132
+ return GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, e);
133133 }
134134 #define arch_atomic64_inc_and_test arch_atomic64_inc_and_test
135135
....@@ -142,9 +142,9 @@
142142 * if the result is negative, or false when
143143 * result is greater than or equal to zero.
144144 */
145
-static inline bool arch_atomic64_add_negative(long i, atomic64_t *v)
145
+static inline bool arch_atomic64_add_negative(s64 i, atomic64_t *v)
146146 {
147
- GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", s);
147
+ return GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, s, "er", i);
148148 }
149149 #define arch_atomic64_add_negative arch_atomic64_add_negative
150150
....@@ -155,43 +155,49 @@
155155 *
156156 * Atomically adds @i to @v and returns @i + @v
157157 */
158
-static __always_inline long arch_atomic64_add_return(long i, atomic64_t *v)
158
+static __always_inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
159159 {
160160 return i + xadd(&v->counter, i);
161161 }
162
+#define arch_atomic64_add_return arch_atomic64_add_return
162163
163
-static inline long arch_atomic64_sub_return(long i, atomic64_t *v)
164
+static inline s64 arch_atomic64_sub_return(s64 i, atomic64_t *v)
164165 {
165166 return arch_atomic64_add_return(-i, v);
166167 }
168
+#define arch_atomic64_sub_return arch_atomic64_sub_return
167169
168
-static inline long arch_atomic64_fetch_add(long i, atomic64_t *v)
170
+static inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
169171 {
170172 return xadd(&v->counter, i);
171173 }
174
+#define arch_atomic64_fetch_add arch_atomic64_fetch_add
172175
173
-static inline long arch_atomic64_fetch_sub(long i, atomic64_t *v)
176
+static inline s64 arch_atomic64_fetch_sub(s64 i, atomic64_t *v)
174177 {
175178 return xadd(&v->counter, -i);
176179 }
180
+#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
177181
178
-static inline long arch_atomic64_cmpxchg(atomic64_t *v, long old, long new)
182
+static inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
179183 {
180184 return arch_cmpxchg(&v->counter, old, new);
181185 }
186
+#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
182187
183
-#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
184
-static __always_inline bool arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, long new)
188
+static __always_inline bool arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
185189 {
186190 return try_cmpxchg(&v->counter, old, new);
187191 }
192
+#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
188193
189
-static inline long arch_atomic64_xchg(atomic64_t *v, long new)
194
+static inline s64 arch_atomic64_xchg(atomic64_t *v, s64 new)
190195 {
191196 return arch_xchg(&v->counter, new);
192197 }
198
+#define arch_atomic64_xchg arch_atomic64_xchg
193199
194
-static inline void arch_atomic64_and(long i, atomic64_t *v)
200
+static inline void arch_atomic64_and(s64 i, atomic64_t *v)
195201 {
196202 asm volatile(LOCK_PREFIX "andq %1,%0"
197203 : "+m" (v->counter)
....@@ -199,7 +205,7 @@
199205 : "memory");
200206 }
201207
202
-static inline long arch_atomic64_fetch_and(long i, atomic64_t *v)
208
+static inline s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v)
203209 {
204210 s64 val = arch_atomic64_read(v);
205211
....@@ -207,8 +213,9 @@
207213 } while (!arch_atomic64_try_cmpxchg(v, &val, val & i));
208214 return val;
209215 }
216
+#define arch_atomic64_fetch_and arch_atomic64_fetch_and
210217
211
-static inline void arch_atomic64_or(long i, atomic64_t *v)
218
+static inline void arch_atomic64_or(s64 i, atomic64_t *v)
212219 {
213220 asm volatile(LOCK_PREFIX "orq %1,%0"
214221 : "+m" (v->counter)
....@@ -216,7 +223,7 @@
216223 : "memory");
217224 }
218225
219
-static inline long arch_atomic64_fetch_or(long i, atomic64_t *v)
226
+static inline s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v)
220227 {
221228 s64 val = arch_atomic64_read(v);
222229
....@@ -224,8 +231,9 @@
224231 } while (!arch_atomic64_try_cmpxchg(v, &val, val | i));
225232 return val;
226233 }
234
+#define arch_atomic64_fetch_or arch_atomic64_fetch_or
227235
228
-static inline void arch_atomic64_xor(long i, atomic64_t *v)
236
+static inline void arch_atomic64_xor(s64 i, atomic64_t *v)
229237 {
230238 asm volatile(LOCK_PREFIX "xorq %1,%0"
231239 : "+m" (v->counter)
....@@ -233,7 +241,7 @@
233241 : "memory");
234242 }
235243
236
-static inline long arch_atomic64_fetch_xor(long i, atomic64_t *v)
244
+static inline s64 arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
237245 {
238246 s64 val = arch_atomic64_read(v);
239247
....@@ -241,5 +249,6 @@
241249 } while (!arch_atomic64_try_cmpxchg(v, &val, val ^ i));
242250 return val;
243251 }
252
+#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
244253
245254 #endif /* _ASM_X86_ATOMIC64_64_H */