hc
2024-10-12 a5969cabbb4660eab42b6ef0412cbbd1200cf14d
kernel/arch/x86/include/asm/atomic64_32.h
....@@ -9,7 +9,7 @@
99 /* An 64bit atomic type */
1010
1111 typedef struct {
12
- u64 __aligned(8) counter;
12
+ s64 __aligned(8) counter;
1313 } atomic64_t;
1414
1515 #define ATOMIC64_INIT(val) { (val) }
....@@ -71,11 +71,11 @@
7171 * the old value.
7272 */
7373
74
-static inline long long arch_atomic64_cmpxchg(atomic64_t *v, long long o,
75
- long long n)
74
+static inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
7675 {
7776 return arch_cmpxchg64(&v->counter, o, n);
7877 }
78
+#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
7979
8080 /**
8181 * arch_atomic64_xchg - xchg atomic64 variable
....@@ -85,9 +85,9 @@
8585 * Atomically xchgs the value of @v to @n and returns
8686 * the old value.
8787 */
88
-static inline long long arch_atomic64_xchg(atomic64_t *v, long long n)
88
+static inline s64 arch_atomic64_xchg(atomic64_t *v, s64 n)
8989 {
90
- long long o;
90
+ s64 o;
9191 unsigned high = (unsigned)(n >> 32);
9292 unsigned low = (unsigned)n;
9393 alternative_atomic64(xchg, "=&A" (o),
....@@ -95,6 +95,7 @@
9595 : "memory");
9696 return o;
9797 }
98
+#define arch_atomic64_xchg arch_atomic64_xchg
9899
99100 /**
100101 * arch_atomic64_set - set atomic64 variable
....@@ -103,7 +104,7 @@
103104 *
104105 * Atomically sets the value of @v to @n.
105106 */
106
-static inline void arch_atomic64_set(atomic64_t *v, long long i)
107
+static inline void arch_atomic64_set(atomic64_t *v, s64 i)
107108 {
108109 unsigned high = (unsigned)(i >> 32);
109110 unsigned low = (unsigned)i;
....@@ -118,9 +119,9 @@
118119 *
119120 * Atomically reads the value of @v and returns it.
120121 */
121
-static inline long long arch_atomic64_read(const atomic64_t *v)
122
+static inline s64 arch_atomic64_read(const atomic64_t *v)
122123 {
123
- long long r;
124
+ s64 r;
124125 alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
125126 return r;
126127 }
....@@ -132,37 +133,39 @@
132133 *
133134 * Atomically adds @i to @v and returns @i + *@v
134135 */
135
-static inline long long arch_atomic64_add_return(long long i, atomic64_t *v)
136
+static inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
136137 {
137138 alternative_atomic64(add_return,
138139 ASM_OUTPUT2("+A" (i), "+c" (v)),
139140 ASM_NO_INPUT_CLOBBER("memory"));
140141 return i;
141142 }
143
+#define arch_atomic64_add_return arch_atomic64_add_return
142144
143145 /*
144146 * Other variants with different arithmetic operators:
145147 */
146
-static inline long long arch_atomic64_sub_return(long long i, atomic64_t *v)
148
+static inline s64 arch_atomic64_sub_return(s64 i, atomic64_t *v)
147149 {
148150 alternative_atomic64(sub_return,
149151 ASM_OUTPUT2("+A" (i), "+c" (v)),
150152 ASM_NO_INPUT_CLOBBER("memory"));
151153 return i;
152154 }
155
+#define arch_atomic64_sub_return arch_atomic64_sub_return
153156
154
-static inline long long arch_atomic64_inc_return(atomic64_t *v)
157
+static inline s64 arch_atomic64_inc_return(atomic64_t *v)
155158 {
156
- long long a;
159
+ s64 a;
157160 alternative_atomic64(inc_return, "=&A" (a),
158161 "S" (v) : "memory", "ecx");
159162 return a;
160163 }
161164 #define arch_atomic64_inc_return arch_atomic64_inc_return
162165
163
-static inline long long arch_atomic64_dec_return(atomic64_t *v)
166
+static inline s64 arch_atomic64_dec_return(atomic64_t *v)
164167 {
165
- long long a;
168
+ s64 a;
166169 alternative_atomic64(dec_return, "=&A" (a),
167170 "S" (v) : "memory", "ecx");
168171 return a;
....@@ -176,7 +179,7 @@
176179 *
177180 * Atomically adds @i to @v.
178181 */
179
-static inline long long arch_atomic64_add(long long i, atomic64_t *v)
182
+static inline s64 arch_atomic64_add(s64 i, atomic64_t *v)
180183 {
181184 __alternative_atomic64(add, add_return,
182185 ASM_OUTPUT2("+A" (i), "+c" (v)),
....@@ -191,7 +194,7 @@
191194 *
192195 * Atomically subtracts @i from @v.
193196 */
194
-static inline long long arch_atomic64_sub(long long i, atomic64_t *v)
197
+static inline s64 arch_atomic64_sub(s64 i, atomic64_t *v)
195198 {
196199 __alternative_atomic64(sub, sub_return,
197200 ASM_OUTPUT2("+A" (i), "+c" (v)),
....@@ -234,8 +237,7 @@
234237 * Atomically adds @a to @v, so long as it was not @u.
235238 * Returns non-zero if the add was done, zero otherwise.
236239 */
237
-static inline int arch_atomic64_add_unless(atomic64_t *v, long long a,
238
- long long u)
240
+static inline int arch_atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
239241 {
240242 unsigned low = (unsigned)u;
241243 unsigned high = (unsigned)(u >> 32);
....@@ -244,6 +246,7 @@
244246 "S" (v) : "memory");
245247 return (int)a;
246248 }
249
+#define arch_atomic64_add_unless arch_atomic64_add_unless
247250
248251 static inline int arch_atomic64_inc_not_zero(atomic64_t *v)
249252 {
....@@ -254,9 +257,9 @@
254257 }
255258 #define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero
256259
257
-static inline long long arch_atomic64_dec_if_positive(atomic64_t *v)
260
+static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
258261 {
259
- long long r;
262
+ s64 r;
260263 alternative_atomic64(dec_if_positive, "=&A" (r),
261264 "S" (v) : "ecx", "memory");
262265 return r;
....@@ -266,69 +269,73 @@
266269 #undef alternative_atomic64
267270 #undef __alternative_atomic64
268271
269
-static inline void arch_atomic64_and(long long i, atomic64_t *v)
272
+static inline void arch_atomic64_and(s64 i, atomic64_t *v)
270273 {
271
- long long old, c = 0;
274
+ s64 old, c = 0;
272275
273276 while ((old = arch_atomic64_cmpxchg(v, c, c & i)) != c)
274277 c = old;
275278 }
276279
277
-static inline long long arch_atomic64_fetch_and(long long i, atomic64_t *v)
280
+static inline s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v)
278281 {
279
- long long old, c = 0;
282
+ s64 old, c = 0;
280283
281284 while ((old = arch_atomic64_cmpxchg(v, c, c & i)) != c)
282285 c = old;
283286
284287 return old;
285288 }
289
+#define arch_atomic64_fetch_and arch_atomic64_fetch_and
286290
287
-static inline void arch_atomic64_or(long long i, atomic64_t *v)
291
+static inline void arch_atomic64_or(s64 i, atomic64_t *v)
288292 {
289
- long long old, c = 0;
293
+ s64 old, c = 0;
290294
291295 while ((old = arch_atomic64_cmpxchg(v, c, c | i)) != c)
292296 c = old;
293297 }
294298
295
-static inline long long arch_atomic64_fetch_or(long long i, atomic64_t *v)
299
+static inline s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v)
296300 {
297
- long long old, c = 0;
301
+ s64 old, c = 0;
298302
299303 while ((old = arch_atomic64_cmpxchg(v, c, c | i)) != c)
300304 c = old;
301305
302306 return old;
303307 }
308
+#define arch_atomic64_fetch_or arch_atomic64_fetch_or
304309
305
-static inline void arch_atomic64_xor(long long i, atomic64_t *v)
310
+static inline void arch_atomic64_xor(s64 i, atomic64_t *v)
306311 {
307
- long long old, c = 0;
312
+ s64 old, c = 0;
308313
309314 while ((old = arch_atomic64_cmpxchg(v, c, c ^ i)) != c)
310315 c = old;
311316 }
312317
313
-static inline long long arch_atomic64_fetch_xor(long long i, atomic64_t *v)
318
+static inline s64 arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
314319 {
315
- long long old, c = 0;
320
+ s64 old, c = 0;
316321
317322 while ((old = arch_atomic64_cmpxchg(v, c, c ^ i)) != c)
318323 c = old;
319324
320325 return old;
321326 }
327
+#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
322328
323
-static inline long long arch_atomic64_fetch_add(long long i, atomic64_t *v)
329
+static inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
324330 {
325
- long long old, c = 0;
331
+ s64 old, c = 0;
326332
327333 while ((old = arch_atomic64_cmpxchg(v, c, c + i)) != c)
328334 c = old;
329335
330336 return old;
331337 }
338
+#define arch_atomic64_fetch_add arch_atomic64_fetch_add
332339
333340 #define arch_atomic64_fetch_sub(i, v) arch_atomic64_fetch_add(-(i), (v))
334341