hc
2024-05-10 9999e48639b3cecb08ffb37358bcba3b48161b29
kernel/arch/x86/include/asm/bitops.h
....@@ -45,32 +45,16 @@
4545 * We do the locked ops that don't return the old value as
4646 * a mask operation on a byte.
4747 */
48
-#define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
4948 #define CONST_MASK_ADDR(nr, addr) WBYTE_ADDR((void *)(addr) + ((nr)>>3))
5049 #define CONST_MASK(nr) (1 << ((nr) & 7))
5150
52
-/**
53
- * set_bit - Atomically set a bit in memory
54
- * @nr: the bit to set
55
- * @addr: the address to start counting from
56
- *
57
- * This function is atomic and may not be reordered. See __set_bit()
58
- * if you do not require the atomic guarantees.
59
- *
60
- * Note: there are no guarantees that this function will not be reordered
61
- * on non x86 architectures, so if you are writing portable code,
62
- * make sure not to rely on its reordering guarantees.
63
- *
64
- * Note that @nr may be almost arbitrarily large; this function is not
65
- * restricted to acting on a single-word quantity.
66
- */
6751 static __always_inline void
68
-set_bit(long nr, volatile unsigned long *addr)
52
+arch_set_bit(long nr, volatile unsigned long *addr)
6953 {
70
- if (IS_IMMEDIATE(nr)) {
71
- asm volatile(LOCK_PREFIX "orb %1,%0"
54
+ if (__builtin_constant_p(nr)) {
55
+ asm volatile(LOCK_PREFIX "orb %b1,%0"
7256 : CONST_MASK_ADDR(nr, addr)
73
- : "iq" ((u8)CONST_MASK(nr))
57
+ : "iq" (CONST_MASK(nr))
7458 : "memory");
7559 } else {
7660 asm volatile(LOCK_PREFIX __ASM_SIZE(bts) " %1,%0"
....@@ -78,63 +62,40 @@
7862 }
7963 }
8064
81
-/**
82
- * __set_bit - Set a bit in memory
83
- * @nr: the bit to set
84
- * @addr: the address to start counting from
85
- *
86
- * Unlike set_bit(), this function is non-atomic and may be reordered.
87
- * If it's called on the same region of memory simultaneously, the effect
88
- * may be that only one operation succeeds.
89
- */
90
-static __always_inline void __set_bit(long nr, volatile unsigned long *addr)
65
+static __always_inline void
66
+arch___set_bit(long nr, volatile unsigned long *addr)
9167 {
9268 asm volatile(__ASM_SIZE(bts) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
9369 }
9470
95
-/**
96
- * clear_bit - Clears a bit in memory
97
- * @nr: Bit to clear
98
- * @addr: Address to start counting from
99
- *
100
- * clear_bit() is atomic and may not be reordered. However, it does
101
- * not contain a memory barrier, so if it is used for locking purposes,
102
- * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
103
- * in order to ensure changes are visible on other processors.
104
- */
10571 static __always_inline void
106
-clear_bit(long nr, volatile unsigned long *addr)
72
+arch_clear_bit(long nr, volatile unsigned long *addr)
10773 {
108
- if (IS_IMMEDIATE(nr)) {
109
- asm volatile(LOCK_PREFIX "andb %1,%0"
74
+ if (__builtin_constant_p(nr)) {
75
+ asm volatile(LOCK_PREFIX "andb %b1,%0"
11076 : CONST_MASK_ADDR(nr, addr)
111
- : "iq" ((u8)~CONST_MASK(nr)));
77
+ : "iq" (~CONST_MASK(nr)));
11278 } else {
11379 asm volatile(LOCK_PREFIX __ASM_SIZE(btr) " %1,%0"
11480 : : RLONG_ADDR(addr), "Ir" (nr) : "memory");
11581 }
11682 }
11783
118
-/*
119
- * clear_bit_unlock - Clears a bit in memory
120
- * @nr: Bit to clear
121
- * @addr: Address to start counting from
122
- *
123
- * clear_bit() is atomic and implies release semantics before the memory
124
- * operation. It can be used for an unlock.
125
- */
126
-static __always_inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
84
+static __always_inline void
85
+arch_clear_bit_unlock(long nr, volatile unsigned long *addr)
12786 {
12887 barrier();
129
- clear_bit(nr, addr);
88
+ arch_clear_bit(nr, addr);
13089 }
13190
132
-static __always_inline void __clear_bit(long nr, volatile unsigned long *addr)
91
+static __always_inline void
92
+arch___clear_bit(long nr, volatile unsigned long *addr)
13393 {
13494 asm volatile(__ASM_SIZE(btr) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
13595 }
13696
137
-static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
97
+static __always_inline bool
98
+arch_clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
13899 {
139100 bool negative;
140101 asm volatile(LOCK_PREFIX "andb %2,%1"
....@@ -143,96 +104,48 @@
143104 : "ir" ((char) ~(1 << nr)) : "memory");
144105 return negative;
145106 }
107
+#define arch_clear_bit_unlock_is_negative_byte \
108
+ arch_clear_bit_unlock_is_negative_byte
146109
147
-// Let everybody know we have it
148
-#define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte
149
-
150
-/*
151
- * __clear_bit_unlock - Clears a bit in memory
152
- * @nr: Bit to clear
153
- * @addr: Address to start counting from
154
- *
155
- * __clear_bit() is non-atomic and implies release semantics before the memory
156
- * operation. It can be used for an unlock if no other CPUs can concurrently
157
- * modify other bits in the word.
158
- */
159
-static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
110
+static __always_inline void
111
+arch___clear_bit_unlock(long nr, volatile unsigned long *addr)
160112 {
161
- __clear_bit(nr, addr);
113
+ arch___clear_bit(nr, addr);
162114 }
163115
164
-/**
165
- * __change_bit - Toggle a bit in memory
166
- * @nr: the bit to change
167
- * @addr: the address to start counting from
168
- *
169
- * Unlike change_bit(), this function is non-atomic and may be reordered.
170
- * If it's called on the same region of memory simultaneously, the effect
171
- * may be that only one operation succeeds.
172
- */
173
-static __always_inline void __change_bit(long nr, volatile unsigned long *addr)
116
+static __always_inline void
117
+arch___change_bit(long nr, volatile unsigned long *addr)
174118 {
175119 asm volatile(__ASM_SIZE(btc) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
176120 }
177121
178
-/**
179
- * change_bit - Toggle a bit in memory
180
- * @nr: Bit to change
181
- * @addr: Address to start counting from
182
- *
183
- * change_bit() is atomic and may not be reordered.
184
- * Note that @nr may be almost arbitrarily large; this function is not
185
- * restricted to acting on a single-word quantity.
186
- */
187
-static __always_inline void change_bit(long nr, volatile unsigned long *addr)
122
+static __always_inline void
123
+arch_change_bit(long nr, volatile unsigned long *addr)
188124 {
189
- if (IS_IMMEDIATE(nr)) {
190
- asm volatile(LOCK_PREFIX "xorb %1,%0"
125
+ if (__builtin_constant_p(nr)) {
126
+ asm volatile(LOCK_PREFIX "xorb %b1,%0"
191127 : CONST_MASK_ADDR(nr, addr)
192
- : "iq" ((u8)CONST_MASK(nr)));
128
+ : "iq" (CONST_MASK(nr)));
193129 } else {
194130 asm volatile(LOCK_PREFIX __ASM_SIZE(btc) " %1,%0"
195131 : : RLONG_ADDR(addr), "Ir" (nr) : "memory");
196132 }
197133 }
198134
199
-/**
200
- * test_and_set_bit - Set a bit and return its old value
201
- * @nr: Bit to set
202
- * @addr: Address to count from
203
- *
204
- * This operation is atomic and cannot be reordered.
205
- * It also implies a memory barrier.
206
- */
207
-static __always_inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
208
-{
209
- GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(bts),
210
- *addr, "Ir", nr, "%0", c);
211
-}
212
-
213
-/**
214
- * test_and_set_bit_lock - Set a bit and return its old value for lock
215
- * @nr: Bit to set
216
- * @addr: Address to count from
217
- *
218
- * This is the same as test_and_set_bit on x86.
219
- */
220135 static __always_inline bool
221
-test_and_set_bit_lock(long nr, volatile unsigned long *addr)
136
+arch_test_and_set_bit(long nr, volatile unsigned long *addr)
222137 {
223
- return test_and_set_bit(nr, addr);
138
+ return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(bts), *addr, c, "Ir", nr);
224139 }
225140
226
-/**
227
- * __test_and_set_bit - Set a bit and return its old value
228
- * @nr: Bit to set
229
- * @addr: Address to count from
230
- *
231
- * This operation is non-atomic and can be reordered.
232
- * If two examples of this operation race, one can appear to succeed
233
- * but actually fail. You must protect multiple accesses with a lock.
234
- */
235
-static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *addr)
141
+static __always_inline bool
142
+arch_test_and_set_bit_lock(long nr, volatile unsigned long *addr)
143
+{
144
+ return arch_test_and_set_bit(nr, addr);
145
+}
146
+
147
+static __always_inline bool
148
+arch___test_and_set_bit(long nr, volatile unsigned long *addr)
236149 {
237150 bool oldbit;
238151
....@@ -243,29 +156,13 @@
243156 return oldbit;
244157 }
245158
246
-/**
247
- * test_and_clear_bit - Clear a bit and return its old value
248
- * @nr: Bit to clear
249
- * @addr: Address to count from
250
- *
251
- * This operation is atomic and cannot be reordered.
252
- * It also implies a memory barrier.
253
- */
254
-static __always_inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
159
+static __always_inline bool
160
+arch_test_and_clear_bit(long nr, volatile unsigned long *addr)
255161 {
256
- GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btr),
257
- *addr, "Ir", nr, "%0", c);
162
+ return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btr), *addr, c, "Ir", nr);
258163 }
259164
260
-/**
261
- * __test_and_clear_bit - Clear a bit and return its old value
262
- * @nr: Bit to clear
263
- * @addr: Address to count from
264
- *
265
- * This operation is non-atomic and can be reordered.
266
- * If two examples of this operation race, one can appear to succeed
267
- * but actually fail. You must protect multiple accesses with a lock.
268
- *
165
+/*
269166 * Note: the operation is performed atomically with respect to
270167 * the local CPU, but not other CPUs. Portable code should not
271168 * rely on this behaviour.
....@@ -273,7 +170,8 @@
273170 * accessed from a hypervisor on the same CPU if running in a VM: don't change
274171 * this without also updating arch/x86/kernel/kvm.c
275172 */
276
-static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr)
173
+static __always_inline bool
174
+arch___test_and_clear_bit(long nr, volatile unsigned long *addr)
277175 {
278176 bool oldbit;
279177
....@@ -284,8 +182,8 @@
284182 return oldbit;
285183 }
286184
287
-/* WARNING: non atomic and it can be reordered! */
288
-static __always_inline bool __test_and_change_bit(long nr, volatile unsigned long *addr)
185
+static __always_inline bool
186
+arch___test_and_change_bit(long nr, volatile unsigned long *addr)
289187 {
290188 bool oldbit;
291189
....@@ -297,18 +195,10 @@
297195 return oldbit;
298196 }
299197
300
-/**
301
- * test_and_change_bit - Change a bit and return its old value
302
- * @nr: Bit to change
303
- * @addr: Address to count from
304
- *
305
- * This operation is atomic and cannot be reordered.
306
- * It also implies a memory barrier.
307
- */
308
-static __always_inline bool test_and_change_bit(long nr, volatile unsigned long *addr)
198
+static __always_inline bool
199
+arch_test_and_change_bit(long nr, volatile unsigned long *addr)
309200 {
310
- GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btc),
311
- *addr, "Ir", nr, "%0", c);
201
+ return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btc), *addr, c, "Ir", nr);
312202 }
313203
314204 static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr)
....@@ -329,16 +219,7 @@
329219 return oldbit;
330220 }
331221
332
-#if 0 /* Fool kernel-doc since it doesn't do macros yet */
333
-/**
334
- * test_bit - Determine whether a bit is set
335
- * @nr: bit number to test
336
- * @addr: Address to start counting from
337
- */
338
-static bool test_bit(int nr, const volatile unsigned long *addr);
339
-#endif
340
-
341
-#define test_bit(nr, addr) \
222
+#define arch_test_bit(nr, addr) \
342223 (__builtin_constant_p((nr)) \
343224 ? constant_test_bit((nr), (addr)) \
344225 : variable_test_bit((nr), (addr)))
....@@ -440,7 +321,7 @@
440321 * set bit if value is nonzero. The last (most significant) bit is
441322 * at position 32.
442323 */
443
-static __always_inline int fls(int x)
324
+static __always_inline int fls(unsigned int x)
444325 {
445326 int r;
446327
....@@ -507,6 +388,10 @@
507388
508389 #include <asm-generic/bitops/const_hweight.h>
509390
391
+#include <asm-generic/bitops/instrumented-atomic.h>
392
+#include <asm-generic/bitops/instrumented-non-atomic.h>
393
+#include <asm-generic/bitops/instrumented-lock.h>
394
+
510395 #include <asm-generic/bitops/le.h>
511396
512397 #include <asm-generic/bitops/ext2-atomic-setbit.h>