.. | .. |
---|
34 | 34 | /* Can't use raw_spin_lock_irq because of #include problems, so |
---|
35 | 35 | * this is the substitute */ |
---|
36 | 36 | #define _atomic_spin_lock_irqsave(l,f) do { \ |
---|
37 | | - arch_spinlock_t *s = ATOMIC_HASH(l); \ |
---|
| 37 | + arch_spinlock_t *s = ATOMIC_HASH(l); \ |
---|
38 | 38 | local_irq_save(f); \ |
---|
39 | 39 | arch_spin_lock(s); \ |
---|
40 | 40 | } while(0) |
---|
41 | 41 | |
---|
42 | 42 | #define _atomic_spin_unlock_irqrestore(l,f) do { \ |
---|
43 | | - arch_spinlock_t *s = ATOMIC_HASH(l); \ |
---|
| 43 | + arch_spinlock_t *s = ATOMIC_HASH(l); \ |
---|
44 | 44 | arch_spin_unlock(s); \ |
---|
45 | 45 | local_irq_restore(f); \ |
---|
46 | 46 | } while(0) |
---|
.. | .. |
---|
85 | 85 | _atomic_spin_lock_irqsave(v, flags); \ |
---|
86 | 86 | v->counter c_op i; \ |
---|
87 | 87 | _atomic_spin_unlock_irqrestore(v, flags); \ |
---|
88 | | -} \ |
---|
| 88 | +} |
---|
89 | 89 | |
---|
90 | 90 | #define ATOMIC_OP_RETURN(op, c_op) \ |
---|
91 | 91 | static __inline__ int atomic_##op##_return(int i, atomic_t *v) \ |
---|
.. | .. |
---|
136 | 136 | #undef ATOMIC_OP_RETURN |
---|
137 | 137 | #undef ATOMIC_OP |
---|
138 | 138 | |
---|
139 | | -#define ATOMIC_INIT(i) { (i) } |
---|
140 | | - |
---|
141 | 139 | #ifdef CONFIG_64BIT |
---|
142 | 140 | |
---|
143 | 141 | #define ATOMIC64_INIT(i) { (i) } |
---|
.. | .. |
---|
150 | 148 | _atomic_spin_lock_irqsave(v, flags); \ |
---|
151 | 149 | v->counter c_op i; \ |
---|
152 | 150 | _atomic_spin_unlock_irqrestore(v, flags); \ |
---|
153 | | -} \ |
---|
| 151 | +} |
---|
154 | 152 | |
---|
155 | 153 | #define ATOMIC64_OP_RETURN(op, c_op) \ |
---|
156 | 154 | static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v) \ |
---|