hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/arch/xtensa/include/asm/atomic.h
....@@ -15,13 +15,9 @@
1515
1616 #include <linux/stringify.h>
1717 #include <linux/types.h>
18
-
19
-#ifdef __KERNEL__
2018 #include <asm/processor.h>
2119 #include <asm/cmpxchg.h>
2220 #include <asm/barrier.h>
23
-
24
-#define ATOMIC_INIT(i) { (i) }
2521
2622 /*
2723 * This Xtensa implementation assumes that the right mechanism
....@@ -58,7 +54,67 @@
5854 */
5955 #define atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
6056
61
-#if XCHAL_HAVE_S32C1I
57
+#if XCHAL_HAVE_EXCLUSIVE
58
+#define ATOMIC_OP(op) \
59
+static inline void atomic_##op(int i, atomic_t *v) \
60
+{ \
61
+ unsigned long tmp; \
62
+ int result; \
63
+ \
64
+ __asm__ __volatile__( \
65
+ "1: l32ex %[tmp], %[addr]\n" \
66
+ " " #op " %[result], %[tmp], %[i]\n" \
67
+ " s32ex %[result], %[addr]\n" \
68
+ " getex %[result]\n" \
69
+ " beqz %[result], 1b\n" \
70
+ : [result] "=&a" (result), [tmp] "=&a" (tmp) \
71
+ : [i] "a" (i), [addr] "a" (v) \
72
+ : "memory" \
73
+ ); \
74
+} \
75
+
76
+#define ATOMIC_OP_RETURN(op) \
77
+static inline int atomic_##op##_return(int i, atomic_t *v) \
78
+{ \
79
+ unsigned long tmp; \
80
+ int result; \
81
+ \
82
+ __asm__ __volatile__( \
83
+ "1: l32ex %[tmp], %[addr]\n" \
84
+ " " #op " %[result], %[tmp], %[i]\n" \
85
+ " s32ex %[result], %[addr]\n" \
86
+ " getex %[result]\n" \
87
+ " beqz %[result], 1b\n" \
88
+ " " #op " %[result], %[tmp], %[i]\n" \
89
+ : [result] "=&a" (result), [tmp] "=&a" (tmp) \
90
+ : [i] "a" (i), [addr] "a" (v) \
91
+ : "memory" \
92
+ ); \
93
+ \
94
+ return result; \
95
+}
96
+
97
+#define ATOMIC_FETCH_OP(op) \
98
+static inline int atomic_fetch_##op(int i, atomic_t *v) \
99
+{ \
100
+ unsigned long tmp; \
101
+ int result; \
102
+ \
103
+ __asm__ __volatile__( \
104
+ "1: l32ex %[tmp], %[addr]\n" \
105
+ " " #op " %[result], %[tmp], %[i]\n" \
106
+ " s32ex %[result], %[addr]\n" \
107
+ " getex %[result]\n" \
108
+ " beqz %[result], 1b\n" \
109
+ : [result] "=&a" (result), [tmp] "=&a" (tmp) \
110
+ : [i] "a" (i), [addr] "a" (v) \
111
+ : "memory" \
112
+ ); \
113
+ \
114
+ return tmp; \
115
+}
116
+
117
+#elif XCHAL_HAVE_S32C1I
62118 #define ATOMIC_OP(op) \
63119 static inline void atomic_##op(int i, atomic_t * v) \
64120 { \
....@@ -66,13 +122,14 @@
66122 int result; \
67123 \
68124 __asm__ __volatile__( \
69
- "1: l32i %1, %3, 0\n" \
70
- " wsr %1, scompare1\n" \
71
- " " #op " %0, %1, %2\n" \
72
- " s32c1i %0, %3, 0\n" \
73
- " bne %0, %1, 1b\n" \
74
- : "=&a" (result), "=&a" (tmp) \
75
- : "a" (i), "a" (v) \
125
+ "1: l32i %[tmp], %[mem]\n" \
126
+ " wsr %[tmp], scompare1\n" \
127
+ " " #op " %[result], %[tmp], %[i]\n" \
128
+ " s32c1i %[result], %[mem]\n" \
129
+ " bne %[result], %[tmp], 1b\n" \
130
+ : [result] "=&a" (result), [tmp] "=&a" (tmp), \
131
+ [mem] "+m" (*v) \
132
+ : [i] "a" (i) \
76133 : "memory" \
77134 ); \
78135 } \
....@@ -84,14 +141,15 @@
84141 int result; \
85142 \
86143 __asm__ __volatile__( \
87
- "1: l32i %1, %3, 0\n" \
88
- " wsr %1, scompare1\n" \
89
- " " #op " %0, %1, %2\n" \
90
- " s32c1i %0, %3, 0\n" \
91
- " bne %0, %1, 1b\n" \
92
- " " #op " %0, %0, %2\n" \
93
- : "=&a" (result), "=&a" (tmp) \
94
- : "a" (i), "a" (v) \
144
+ "1: l32i %[tmp], %[mem]\n" \
145
+ " wsr %[tmp], scompare1\n" \
146
+ " " #op " %[result], %[tmp], %[i]\n" \
147
+ " s32c1i %[result], %[mem]\n" \
148
+ " bne %[result], %[tmp], 1b\n" \
149
+ " " #op " %[result], %[result], %[i]\n" \
150
+ : [result] "=&a" (result), [tmp] "=&a" (tmp), \
151
+ [mem] "+m" (*v) \
152
+ : [i] "a" (i) \
95153 : "memory" \
96154 ); \
97155 \
....@@ -105,13 +163,14 @@
105163 int result; \
106164 \
107165 __asm__ __volatile__( \
108
- "1: l32i %1, %3, 0\n" \
109
- " wsr %1, scompare1\n" \
110
- " " #op " %0, %1, %2\n" \
111
- " s32c1i %0, %3, 0\n" \
112
- " bne %0, %1, 1b\n" \
113
- : "=&a" (result), "=&a" (tmp) \
114
- : "a" (i), "a" (v) \
166
+ "1: l32i %[tmp], %[mem]\n" \
167
+ " wsr %[tmp], scompare1\n" \
168
+ " " #op " %[result], %[tmp], %[i]\n" \
169
+ " s32c1i %[result], %[mem]\n" \
170
+ " bne %[result], %[tmp], 1b\n" \
171
+ : [result] "=&a" (result), [tmp] "=&a" (tmp), \
172
+ [mem] "+m" (*v) \
173
+ : [i] "a" (i) \
115174 : "memory" \
116175 ); \
117176 \
....@@ -126,14 +185,14 @@
126185 unsigned int vval; \
127186 \
128187 __asm__ __volatile__( \
129
- " rsil a15, "__stringify(TOPLEVEL)"\n"\
130
- " l32i %0, %2, 0\n" \
131
- " " #op " %0, %0, %1\n" \
132
- " s32i %0, %2, 0\n" \
188
+ " rsil a15, "__stringify(TOPLEVEL)"\n" \
189
+ " l32i %[result], %[mem]\n" \
190
+ " " #op " %[result], %[result], %[i]\n" \
191
+ " s32i %[result], %[mem]\n" \
133192 " wsr a15, ps\n" \
134193 " rsync\n" \
135
- : "=&a" (vval) \
136
- : "a" (i), "a" (v) \
194
+ : [result] "=&a" (vval), [mem] "+m" (*v) \
195
+ : [i] "a" (i) \
137196 : "a15", "memory" \
138197 ); \
139198 } \
....@@ -145,13 +204,13 @@
145204 \
146205 __asm__ __volatile__( \
147206 " rsil a15,"__stringify(TOPLEVEL)"\n" \
148
- " l32i %0, %2, 0\n" \
149
- " " #op " %0, %0, %1\n" \
150
- " s32i %0, %2, 0\n" \
207
+ " l32i %[result], %[mem]\n" \
208
+ " " #op " %[result], %[result], %[i]\n" \
209
+ " s32i %[result], %[mem]\n" \
151210 " wsr a15, ps\n" \
152211 " rsync\n" \
153
- : "=&a" (vval) \
154
- : "a" (i), "a" (v) \
212
+ : [result] "=&a" (vval), [mem] "+m" (*v) \
213
+ : [i] "a" (i) \
155214 : "a15", "memory" \
156215 ); \
157216 \
....@@ -165,13 +224,14 @@
165224 \
166225 __asm__ __volatile__( \
167226 " rsil a15,"__stringify(TOPLEVEL)"\n" \
168
- " l32i %0, %3, 0\n" \
169
- " " #op " %1, %0, %2\n" \
170
- " s32i %1, %3, 0\n" \
227
+ " l32i %[result], %[mem]\n" \
228
+ " " #op " %[tmp], %[result], %[i]\n" \
229
+ " s32i %[tmp], %[mem]\n" \
171230 " wsr a15, ps\n" \
172231 " rsync\n" \
173
- : "=&a" (vval), "=&a" (tmp) \
174
- : "a" (i), "a" (v) \
232
+ : [result] "=&a" (vval), [tmp] "=&a" (tmp), \
233
+ [mem] "+m" (*v) \
234
+ : [i] "a" (i) \
175235 : "a15", "memory" \
176236 ); \
177237 \
....@@ -199,7 +259,5 @@
199259
200260 #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
201261 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
202
-
203
-#endif /* __KERNEL__ */
204262
205263 #endif /* _XTENSA_ATOMIC_H */