hc
2024-10-12 a5969cabbb4660eab42b6ef0412cbbd1200cf14d
kernel/arch/x86/include/asm/atomic.h
....@@ -14,8 +14,6 @@
1414 * resource counting etc..
1515 */
1616
17
-#define ATOMIC_INIT(i) { (i) }
18
-
1917 /**
2018 * arch_atomic_read - read atomic variable
2119 * @v: pointer of type atomic_t
....@@ -28,7 +26,7 @@
2826 * Note for KASAN: we deliberately don't use READ_ONCE_NOCHECK() here,
2927 * it's non-inlined function that increases binary size and stack usage.
3028 */
31
- return READ_ONCE((v)->counter);
29
+ return __READ_ONCE((v)->counter);
3230 }
3331
3432 /**
....@@ -40,7 +38,7 @@
4038 */
4139 static __always_inline void arch_atomic_set(atomic_t *v, int i)
4240 {
43
- WRITE_ONCE(v->counter, i);
41
+ __WRITE_ONCE(v->counter, i);
4442 }
4543
4644 /**
....@@ -82,7 +80,7 @@
8280 */
8381 static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v)
8482 {
85
- GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", e);
83
+ return GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, e, "er", i);
8684 }
8785 #define arch_atomic_sub_and_test arch_atomic_sub_and_test
8886
....@@ -122,7 +120,7 @@
122120 */
123121 static __always_inline bool arch_atomic_dec_and_test(atomic_t *v)
124122 {
125
- GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", e);
123
+ return GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, e);
126124 }
127125 #define arch_atomic_dec_and_test arch_atomic_dec_and_test
128126
....@@ -136,7 +134,7 @@
136134 */
137135 static __always_inline bool arch_atomic_inc_and_test(atomic_t *v)
138136 {
139
- GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", e);
137
+ return GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, e);
140138 }
141139 #define arch_atomic_inc_and_test arch_atomic_inc_and_test
142140
....@@ -151,7 +149,7 @@
151149 */
152150 static __always_inline bool arch_atomic_add_negative(int i, atomic_t *v)
153151 {
154
- GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", s);
152
+ return GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, s, "er", i);
155153 }
156154 #define arch_atomic_add_negative arch_atomic_add_negative
157155
....@@ -166,6 +164,7 @@
166164 {
167165 return i + xadd(&v->counter, i);
168166 }
167
+#define arch_atomic_add_return arch_atomic_add_return
169168
170169 /**
171170 * arch_atomic_sub_return - subtract integer and return
....@@ -178,34 +177,39 @@
178177 {
179178 return arch_atomic_add_return(-i, v);
180179 }
180
+#define arch_atomic_sub_return arch_atomic_sub_return
181181
182182 static __always_inline int arch_atomic_fetch_add(int i, atomic_t *v)
183183 {
184184 return xadd(&v->counter, i);
185185 }
186
+#define arch_atomic_fetch_add arch_atomic_fetch_add
186187
187188 static __always_inline int arch_atomic_fetch_sub(int i, atomic_t *v)
188189 {
189190 return xadd(&v->counter, -i);
190191 }
192
+#define arch_atomic_fetch_sub arch_atomic_fetch_sub
191193
192194 static __always_inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
193195 {
194196 return arch_cmpxchg(&v->counter, old, new);
195197 }
198
+#define arch_atomic_cmpxchg arch_atomic_cmpxchg
196199
197
-#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
198200 static __always_inline bool arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
199201 {
200202 return try_cmpxchg(&v->counter, old, new);
201203 }
204
+#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
202205
203
-static inline int arch_atomic_xchg(atomic_t *v, int new)
206
+static __always_inline int arch_atomic_xchg(atomic_t *v, int new)
204207 {
205208 return arch_xchg(&v->counter, new);
206209 }
210
+#define arch_atomic_xchg arch_atomic_xchg
207211
208
-static inline void arch_atomic_and(int i, atomic_t *v)
212
+static __always_inline void arch_atomic_and(int i, atomic_t *v)
209213 {
210214 asm volatile(LOCK_PREFIX "andl %1,%0"
211215 : "+m" (v->counter)
....@@ -213,7 +217,7 @@
213217 : "memory");
214218 }
215219
216
-static inline int arch_atomic_fetch_and(int i, atomic_t *v)
220
+static __always_inline int arch_atomic_fetch_and(int i, atomic_t *v)
217221 {
218222 int val = arch_atomic_read(v);
219223
....@@ -221,8 +225,9 @@
221225
222226 return val;
223227 }
228
+#define arch_atomic_fetch_and arch_atomic_fetch_and
224229
225
-static inline void arch_atomic_or(int i, atomic_t *v)
230
+static __always_inline void arch_atomic_or(int i, atomic_t *v)
226231 {
227232 asm volatile(LOCK_PREFIX "orl %1,%0"
228233 : "+m" (v->counter)
....@@ -230,7 +235,7 @@
230235 : "memory");
231236 }
232237
233
-static inline int arch_atomic_fetch_or(int i, atomic_t *v)
238
+static __always_inline int arch_atomic_fetch_or(int i, atomic_t *v)
234239 {
235240 int val = arch_atomic_read(v);
236241
....@@ -238,8 +243,9 @@
238243
239244 return val;
240245 }
246
+#define arch_atomic_fetch_or arch_atomic_fetch_or
241247
242
-static inline void arch_atomic_xor(int i, atomic_t *v)
248
+static __always_inline void arch_atomic_xor(int i, atomic_t *v)
243249 {
244250 asm volatile(LOCK_PREFIX "xorl %1,%0"
245251 : "+m" (v->counter)
....@@ -247,7 +253,7 @@
247253 : "memory");
248254 }
249255
250
-static inline int arch_atomic_fetch_xor(int i, atomic_t *v)
256
+static __always_inline int arch_atomic_fetch_xor(int i, atomic_t *v)
251257 {
252258 int val = arch_atomic_read(v);
253259
....@@ -255,6 +261,7 @@
255261
256262 return val;
257263 }
264
+#define arch_atomic_fetch_xor arch_atomic_fetch_xor
258265
259266 #ifdef CONFIG_X86_32
260267 # include <asm/atomic64_32.h>
....@@ -262,6 +269,6 @@
262269 # include <asm/atomic64_64.h>
263270 #endif
264271
265
-#include <asm-generic/atomic-instrumented.h>
272
+#define ARCH_ATOMIC
266273
267274 #endif /* _ASM_X86_ATOMIC_H */