.. | .. |
---|
45 | 45 | * We do the locked ops that don't return the old value as |
---|
46 | 46 | * a mask operation on a byte. |
---|
47 | 47 | */ |
---|
48 | | -#define IS_IMMEDIATE(nr) (__builtin_constant_p(nr)) |
---|
49 | 48 | #define CONST_MASK_ADDR(nr, addr) WBYTE_ADDR((void *)(addr) + ((nr)>>3)) |
---|
50 | 49 | #define CONST_MASK(nr) (1 << ((nr) & 7)) |
---|
51 | 50 | |
---|
52 | | -/** |
---|
53 | | - * set_bit - Atomically set a bit in memory |
---|
54 | | - * @nr: the bit to set |
---|
55 | | - * @addr: the address to start counting from |
---|
56 | | - * |
---|
57 | | - * This function is atomic and may not be reordered. See __set_bit() |
---|
58 | | - * if you do not require the atomic guarantees. |
---|
59 | | - * |
---|
60 | | - * Note: there are no guarantees that this function will not be reordered |
---|
61 | | - * on non x86 architectures, so if you are writing portable code, |
---|
62 | | - * make sure not to rely on its reordering guarantees. |
---|
63 | | - * |
---|
64 | | - * Note that @nr may be almost arbitrarily large; this function is not |
---|
65 | | - * restricted to acting on a single-word quantity. |
---|
66 | | - */ |
---|
67 | 51 | static __always_inline void |
---|
68 | | -set_bit(long nr, volatile unsigned long *addr) |
---|
| 52 | +arch_set_bit(long nr, volatile unsigned long *addr) |
---|
69 | 53 | { |
---|
70 | | - if (IS_IMMEDIATE(nr)) { |
---|
71 | | - asm volatile(LOCK_PREFIX "orb %1,%0" |
---|
| 54 | + if (__builtin_constant_p(nr)) { |
---|
| 55 | + asm volatile(LOCK_PREFIX "orb %b1,%0" |
---|
72 | 56 | : CONST_MASK_ADDR(nr, addr) |
---|
73 | | - : "iq" ((u8)CONST_MASK(nr)) |
---|
| 57 | + : "iq" (CONST_MASK(nr)) |
---|
74 | 58 | : "memory"); |
---|
75 | 59 | } else { |
---|
76 | 60 | asm volatile(LOCK_PREFIX __ASM_SIZE(bts) " %1,%0" |
---|
.. | .. |
---|
78 | 62 | } |
---|
79 | 63 | } |
---|
80 | 64 | |
---|
81 | | -/** |
---|
82 | | - * __set_bit - Set a bit in memory |
---|
83 | | - * @nr: the bit to set |
---|
84 | | - * @addr: the address to start counting from |
---|
85 | | - * |
---|
86 | | - * Unlike set_bit(), this function is non-atomic and may be reordered. |
---|
87 | | - * If it's called on the same region of memory simultaneously, the effect |
---|
88 | | - * may be that only one operation succeeds. |
---|
89 | | - */ |
---|
90 | | -static __always_inline void __set_bit(long nr, volatile unsigned long *addr) |
---|
| 65 | +static __always_inline void |
---|
| 66 | +arch___set_bit(long nr, volatile unsigned long *addr) |
---|
91 | 67 | { |
---|
92 | 68 | asm volatile(__ASM_SIZE(bts) " %1,%0" : : ADDR, "Ir" (nr) : "memory"); |
---|
93 | 69 | } |
---|
94 | 70 | |
---|
95 | | -/** |
---|
96 | | - * clear_bit - Clears a bit in memory |
---|
97 | | - * @nr: Bit to clear |
---|
98 | | - * @addr: Address to start counting from |
---|
99 | | - * |
---|
100 | | - * clear_bit() is atomic and may not be reordered. However, it does |
---|
101 | | - * not contain a memory barrier, so if it is used for locking purposes, |
---|
102 | | - * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic() |
---|
103 | | - * in order to ensure changes are visible on other processors. |
---|
104 | | - */ |
---|
105 | 71 | static __always_inline void |
---|
106 | | -clear_bit(long nr, volatile unsigned long *addr) |
---|
| 72 | +arch_clear_bit(long nr, volatile unsigned long *addr) |
---|
107 | 73 | { |
---|
108 | | - if (IS_IMMEDIATE(nr)) { |
---|
109 | | - asm volatile(LOCK_PREFIX "andb %1,%0" |
---|
| 74 | + if (__builtin_constant_p(nr)) { |
---|
| 75 | + asm volatile(LOCK_PREFIX "andb %b1,%0" |
---|
110 | 76 | : CONST_MASK_ADDR(nr, addr) |
---|
111 | | - : "iq" ((u8)~CONST_MASK(nr))); |
---|
| 77 | + : "iq" (~CONST_MASK(nr))); |
---|
112 | 78 | } else { |
---|
113 | 79 | asm volatile(LOCK_PREFIX __ASM_SIZE(btr) " %1,%0" |
---|
114 | 80 | : : RLONG_ADDR(addr), "Ir" (nr) : "memory"); |
---|
115 | 81 | } |
---|
116 | 82 | } |
---|
117 | 83 | |
---|
118 | | -/* |
---|
119 | | - * clear_bit_unlock - Clears a bit in memory |
---|
120 | | - * @nr: Bit to clear |
---|
121 | | - * @addr: Address to start counting from |
---|
122 | | - * |
---|
123 | | - * clear_bit() is atomic and implies release semantics before the memory |
---|
124 | | - * operation. It can be used for an unlock. |
---|
125 | | - */ |
---|
126 | | -static __always_inline void clear_bit_unlock(long nr, volatile unsigned long *addr) |
---|
| 84 | +static __always_inline void |
---|
| 85 | +arch_clear_bit_unlock(long nr, volatile unsigned long *addr) |
---|
127 | 86 | { |
---|
128 | 87 | barrier(); |
---|
129 | | - clear_bit(nr, addr); |
---|
| 88 | + arch_clear_bit(nr, addr); |
---|
130 | 89 | } |
---|
131 | 90 | |
---|
132 | | -static __always_inline void __clear_bit(long nr, volatile unsigned long *addr) |
---|
| 91 | +static __always_inline void |
---|
| 92 | +arch___clear_bit(long nr, volatile unsigned long *addr) |
---|
133 | 93 | { |
---|
134 | 94 | asm volatile(__ASM_SIZE(btr) " %1,%0" : : ADDR, "Ir" (nr) : "memory"); |
---|
135 | 95 | } |
---|
136 | 96 | |
---|
137 | | -static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr) |
---|
| 97 | +static __always_inline bool |
---|
| 98 | +arch_clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr) |
---|
138 | 99 | { |
---|
139 | 100 | bool negative; |
---|
140 | 101 | asm volatile(LOCK_PREFIX "andb %2,%1" |
---|
.. | .. |
---|
143 | 104 | : "ir" ((char) ~(1 << nr)) : "memory"); |
---|
144 | 105 | return negative; |
---|
145 | 106 | } |
---|
| 107 | +#define arch_clear_bit_unlock_is_negative_byte \ |
---|
| 108 | + arch_clear_bit_unlock_is_negative_byte |
---|
146 | 109 | |
---|
147 | | -// Let everybody know we have it |
---|
148 | | -#define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte |
---|
149 | | - |
---|
150 | | -/* |
---|
151 | | - * __clear_bit_unlock - Clears a bit in memory |
---|
152 | | - * @nr: Bit to clear |
---|
153 | | - * @addr: Address to start counting from |
---|
154 | | - * |
---|
155 | | - * __clear_bit() is non-atomic and implies release semantics before the memory |
---|
156 | | - * operation. It can be used for an unlock if no other CPUs can concurrently |
---|
157 | | - * modify other bits in the word. |
---|
158 | | - */ |
---|
159 | | -static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *addr) |
---|
| 110 | +static __always_inline void |
---|
| 111 | +arch___clear_bit_unlock(long nr, volatile unsigned long *addr) |
---|
160 | 112 | { |
---|
161 | | - __clear_bit(nr, addr); |
---|
| 113 | + arch___clear_bit(nr, addr); |
---|
162 | 114 | } |
---|
163 | 115 | |
---|
164 | | -/** |
---|
165 | | - * __change_bit - Toggle a bit in memory |
---|
166 | | - * @nr: the bit to change |
---|
167 | | - * @addr: the address to start counting from |
---|
168 | | - * |
---|
169 | | - * Unlike change_bit(), this function is non-atomic and may be reordered. |
---|
170 | | - * If it's called on the same region of memory simultaneously, the effect |
---|
171 | | - * may be that only one operation succeeds. |
---|
172 | | - */ |
---|
173 | | -static __always_inline void __change_bit(long nr, volatile unsigned long *addr) |
---|
| 116 | +static __always_inline void |
---|
| 117 | +arch___change_bit(long nr, volatile unsigned long *addr) |
---|
174 | 118 | { |
---|
175 | 119 | asm volatile(__ASM_SIZE(btc) " %1,%0" : : ADDR, "Ir" (nr) : "memory"); |
---|
176 | 120 | } |
---|
177 | 121 | |
---|
178 | | -/** |
---|
179 | | - * change_bit - Toggle a bit in memory |
---|
180 | | - * @nr: Bit to change |
---|
181 | | - * @addr: Address to start counting from |
---|
182 | | - * |
---|
183 | | - * change_bit() is atomic and may not be reordered. |
---|
184 | | - * Note that @nr may be almost arbitrarily large; this function is not |
---|
185 | | - * restricted to acting on a single-word quantity. |
---|
186 | | - */ |
---|
187 | | -static __always_inline void change_bit(long nr, volatile unsigned long *addr) |
---|
| 122 | +static __always_inline void |
---|
| 123 | +arch_change_bit(long nr, volatile unsigned long *addr) |
---|
188 | 124 | { |
---|
189 | | - if (IS_IMMEDIATE(nr)) { |
---|
190 | | - asm volatile(LOCK_PREFIX "xorb %1,%0" |
---|
| 125 | + if (__builtin_constant_p(nr)) { |
---|
| 126 | + asm volatile(LOCK_PREFIX "xorb %b1,%0" |
---|
191 | 127 | : CONST_MASK_ADDR(nr, addr) |
---|
192 | | - : "iq" ((u8)CONST_MASK(nr))); |
---|
| 128 | + : "iq" (CONST_MASK(nr))); |
---|
193 | 129 | } else { |
---|
194 | 130 | asm volatile(LOCK_PREFIX __ASM_SIZE(btc) " %1,%0" |
---|
195 | 131 | : : RLONG_ADDR(addr), "Ir" (nr) : "memory"); |
---|
196 | 132 | } |
---|
197 | 133 | } |
---|
198 | 134 | |
---|
199 | | -/** |
---|
200 | | - * test_and_set_bit - Set a bit and return its old value |
---|
201 | | - * @nr: Bit to set |
---|
202 | | - * @addr: Address to count from |
---|
203 | | - * |
---|
204 | | - * This operation is atomic and cannot be reordered. |
---|
205 | | - * It also implies a memory barrier. |
---|
206 | | - */ |
---|
207 | | -static __always_inline bool test_and_set_bit(long nr, volatile unsigned long *addr) |
---|
208 | | -{ |
---|
209 | | - GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(bts), |
---|
210 | | - *addr, "Ir", nr, "%0", c); |
---|
211 | | -} |
---|
212 | | - |
---|
213 | | -/** |
---|
214 | | - * test_and_set_bit_lock - Set a bit and return its old value for lock |
---|
215 | | - * @nr: Bit to set |
---|
216 | | - * @addr: Address to count from |
---|
217 | | - * |
---|
218 | | - * This is the same as test_and_set_bit on x86. |
---|
219 | | - */ |
---|
220 | 135 | static __always_inline bool |
---|
221 | | -test_and_set_bit_lock(long nr, volatile unsigned long *addr) |
---|
| 136 | +arch_test_and_set_bit(long nr, volatile unsigned long *addr) |
---|
222 | 137 | { |
---|
223 | | - return test_and_set_bit(nr, addr); |
---|
| 138 | + return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(bts), *addr, c, "Ir", nr); |
---|
224 | 139 | } |
---|
225 | 140 | |
---|
226 | | -/** |
---|
227 | | - * __test_and_set_bit - Set a bit and return its old value |
---|
228 | | - * @nr: Bit to set |
---|
229 | | - * @addr: Address to count from |
---|
230 | | - * |
---|
231 | | - * This operation is non-atomic and can be reordered. |
---|
232 | | - * If two examples of this operation race, one can appear to succeed |
---|
233 | | - * but actually fail. You must protect multiple accesses with a lock. |
---|
234 | | - */ |
---|
235 | | -static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *addr) |
---|
| 141 | +static __always_inline bool |
---|
| 142 | +arch_test_and_set_bit_lock(long nr, volatile unsigned long *addr) |
---|
| 143 | +{ |
---|
| 144 | + return arch_test_and_set_bit(nr, addr); |
---|
| 145 | +} |
---|
| 146 | + |
---|
| 147 | +static __always_inline bool |
---|
| 148 | +arch___test_and_set_bit(long nr, volatile unsigned long *addr) |
---|
236 | 149 | { |
---|
237 | 150 | bool oldbit; |
---|
238 | 151 | |
---|
.. | .. |
---|
243 | 156 | return oldbit; |
---|
244 | 157 | } |
---|
245 | 158 | |
---|
246 | | -/** |
---|
247 | | - * test_and_clear_bit - Clear a bit and return its old value |
---|
248 | | - * @nr: Bit to clear |
---|
249 | | - * @addr: Address to count from |
---|
250 | | - * |
---|
251 | | - * This operation is atomic and cannot be reordered. |
---|
252 | | - * It also implies a memory barrier. |
---|
253 | | - */ |
---|
254 | | -static __always_inline bool test_and_clear_bit(long nr, volatile unsigned long *addr) |
---|
| 159 | +static __always_inline bool |
---|
| 160 | +arch_test_and_clear_bit(long nr, volatile unsigned long *addr) |
---|
255 | 161 | { |
---|
256 | | - GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btr), |
---|
257 | | - *addr, "Ir", nr, "%0", c); |
---|
| 162 | + return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btr), *addr, c, "Ir", nr); |
---|
258 | 163 | } |
---|
259 | 164 | |
---|
260 | | -/** |
---|
261 | | - * __test_and_clear_bit - Clear a bit and return its old value |
---|
262 | | - * @nr: Bit to clear |
---|
263 | | - * @addr: Address to count from |
---|
264 | | - * |
---|
265 | | - * This operation is non-atomic and can be reordered. |
---|
266 | | - * If two examples of this operation race, one can appear to succeed |
---|
267 | | - * but actually fail. You must protect multiple accesses with a lock. |
---|
268 | | - * |
---|
| 165 | +/* |
---|
269 | 166 | * Note: the operation is performed atomically with respect to |
---|
270 | 167 | * the local CPU, but not other CPUs. Portable code should not |
---|
271 | 168 | * rely on this behaviour. |
---|
.. | .. |
---|
273 | 170 | * accessed from a hypervisor on the same CPU if running in a VM: don't change |
---|
274 | 171 | * this without also updating arch/x86/kernel/kvm.c |
---|
275 | 172 | */ |
---|
276 | | -static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr) |
---|
| 173 | +static __always_inline bool |
---|
| 174 | +arch___test_and_clear_bit(long nr, volatile unsigned long *addr) |
---|
277 | 175 | { |
---|
278 | 176 | bool oldbit; |
---|
279 | 177 | |
---|
.. | .. |
---|
284 | 182 | return oldbit; |
---|
285 | 183 | } |
---|
286 | 184 | |
---|
287 | | -/* WARNING: non atomic and it can be reordered! */ |
---|
288 | | -static __always_inline bool __test_and_change_bit(long nr, volatile unsigned long *addr) |
---|
| 185 | +static __always_inline bool |
---|
| 186 | +arch___test_and_change_bit(long nr, volatile unsigned long *addr) |
---|
289 | 187 | { |
---|
290 | 188 | bool oldbit; |
---|
291 | 189 | |
---|
.. | .. |
---|
297 | 195 | return oldbit; |
---|
298 | 196 | } |
---|
299 | 197 | |
---|
300 | | -/** |
---|
301 | | - * test_and_change_bit - Change a bit and return its old value |
---|
302 | | - * @nr: Bit to change |
---|
303 | | - * @addr: Address to count from |
---|
304 | | - * |
---|
305 | | - * This operation is atomic and cannot be reordered. |
---|
306 | | - * It also implies a memory barrier. |
---|
307 | | - */ |
---|
308 | | -static __always_inline bool test_and_change_bit(long nr, volatile unsigned long *addr) |
---|
| 198 | +static __always_inline bool |
---|
| 199 | +arch_test_and_change_bit(long nr, volatile unsigned long *addr) |
---|
309 | 200 | { |
---|
310 | | - GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btc), |
---|
311 | | - *addr, "Ir", nr, "%0", c); |
---|
| 201 | + return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btc), *addr, c, "Ir", nr); |
---|
312 | 202 | } |
---|
313 | 203 | |
---|
314 | 204 | static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr) |
---|
.. | .. |
---|
329 | 219 | return oldbit; |
---|
330 | 220 | } |
---|
331 | 221 | |
---|
332 | | -#if 0 /* Fool kernel-doc since it doesn't do macros yet */ |
---|
333 | | -/** |
---|
334 | | - * test_bit - Determine whether a bit is set |
---|
335 | | - * @nr: bit number to test |
---|
336 | | - * @addr: Address to start counting from |
---|
337 | | - */ |
---|
338 | | -static bool test_bit(int nr, const volatile unsigned long *addr); |
---|
339 | | -#endif |
---|
340 | | - |
---|
341 | | -#define test_bit(nr, addr) \ |
---|
| 222 | +#define arch_test_bit(nr, addr) \ |
---|
342 | 223 | (__builtin_constant_p((nr)) \ |
---|
343 | 224 | ? constant_test_bit((nr), (addr)) \ |
---|
344 | 225 | : variable_test_bit((nr), (addr))) |
---|
.. | .. |
---|
440 | 321 | * set bit if value is nonzero. The last (most significant) bit is |
---|
441 | 322 | * at position 32. |
---|
442 | 323 | */ |
---|
443 | | -static __always_inline int fls(int x) |
---|
| 324 | +static __always_inline int fls(unsigned int x) |
---|
444 | 325 | { |
---|
445 | 326 | int r; |
---|
446 | 327 | |
---|
.. | .. |
---|
507 | 388 | |
---|
508 | 389 | #include <asm-generic/bitops/const_hweight.h> |
---|
509 | 390 | |
---|
| 391 | +#include <asm-generic/bitops/instrumented-atomic.h> |
---|
| 392 | +#include <asm-generic/bitops/instrumented-non-atomic.h> |
---|
| 393 | +#include <asm-generic/bitops/instrumented-lock.h> |
---|
| 394 | + |
---|
510 | 395 | #include <asm-generic/bitops/le.h> |
---|
511 | 396 | |
---|
512 | 397 | #include <asm-generic/bitops/ext2-atomic-setbit.h> |
---|