| .. | .. |
|---|
| 25 | 25 | * Ensure each lock is in a separate cacheline. |
|---|
| 26 | 26 | */ |
|---|
| 27 | 27 | static union { |
|---|
| 28 | | - raw_spinlock_t lock; |
|---|
| 28 | + hard_spinlock_t lock; |
|---|
| 29 | 29 | char pad[L1_CACHE_BYTES]; |
|---|
| 30 | 30 | } atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp = { |
|---|
| 31 | 31 | [0 ... (NR_LOCKS - 1)] = { |
|---|
| 32 | | - .lock = __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock), |
|---|
| 32 | + .lock = __HARD_SPIN_LOCK_INITIALIZER(atomic64_lock.lock), |
|---|
| 33 | 33 | }, |
|---|
| 34 | 34 | }; |
|---|
| 35 | 35 | |
|---|
| 36 | | -static inline raw_spinlock_t *lock_addr(const atomic64_t *v) |
|---|
| 36 | +static inline hard_spinlock_t *lock_addr(const atomic64_t *v) |
|---|
| 37 | 37 | { |
|---|
| 38 | 38 | unsigned long addr = (unsigned long) v; |
|---|
| 39 | 39 | |
|---|
| .. | .. |
|---|
| 45 | 45 | s64 atomic64_read(const atomic64_t *v) |
|---|
| 46 | 46 | { |
|---|
| 47 | 47 | unsigned long flags; |
|---|
| 48 | | - raw_spinlock_t *lock = lock_addr(v); |
|---|
| 48 | + hard_spinlock_t *lock = lock_addr(v); |
|---|
| 49 | 49 | s64 val; |
|---|
| 50 | 50 | |
|---|
| 51 | 51 | raw_spin_lock_irqsave(lock, flags); |
|---|
| .. | .. |
|---|
| 58 | 58 | void atomic64_set(atomic64_t *v, s64 i) |
|---|
| 59 | 59 | { |
|---|
| 60 | 60 | unsigned long flags; |
|---|
| 61 | | - raw_spinlock_t *lock = lock_addr(v); |
|---|
| 61 | + hard_spinlock_t *lock = lock_addr(v); |
|---|
| 62 | 62 | |
|---|
| 63 | 63 | raw_spin_lock_irqsave(lock, flags); |
|---|
| 64 | 64 | v->counter = i; |
|---|
| .. | .. |
|---|
| 70 | 70 | void atomic64_##op(s64 a, atomic64_t *v) \ |
|---|
| 71 | 71 | { \ |
|---|
| 72 | 72 | unsigned long flags; \ |
|---|
| 73 | | - raw_spinlock_t *lock = lock_addr(v); \ |
|---|
| 73 | + hard_spinlock_t *lock = lock_addr(v); \ |
|---|
| 74 | 74 | \ |
|---|
| 75 | 75 | raw_spin_lock_irqsave(lock, flags); \ |
|---|
| 76 | 76 | v->counter c_op a; \ |
|---|
| .. | .. |
|---|
| 82 | 82 | s64 atomic64_##op##_return(s64 a, atomic64_t *v) \ |
|---|
| 83 | 83 | { \ |
|---|
| 84 | 84 | unsigned long flags; \ |
|---|
| 85 | | - raw_spinlock_t *lock = lock_addr(v); \ |
|---|
| 85 | + hard_spinlock_t *lock = lock_addr(v); \ |
|---|
| 86 | 86 | s64 val; \ |
|---|
| 87 | 87 | \ |
|---|
| 88 | 88 | raw_spin_lock_irqsave(lock, flags); \ |
|---|
| .. | .. |
|---|
| 96 | 96 | s64 atomic64_fetch_##op(s64 a, atomic64_t *v) \ |
|---|
| 97 | 97 | { \ |
|---|
| 98 | 98 | unsigned long flags; \ |
|---|
| 99 | | - raw_spinlock_t *lock = lock_addr(v); \ |
|---|
| 99 | + hard_spinlock_t *lock = lock_addr(v); \ |
|---|
| 100 | 100 | s64 val; \ |
|---|
| 101 | 101 | \ |
|---|
| 102 | 102 | raw_spin_lock_irqsave(lock, flags); \ |
|---|
| .. | .. |
|---|
| 133 | 133 | s64 atomic64_dec_if_positive(atomic64_t *v) |
|---|
| 134 | 134 | { |
|---|
| 135 | 135 | unsigned long flags; |
|---|
| 136 | | - raw_spinlock_t *lock = lock_addr(v); |
|---|
| 136 | + hard_spinlock_t *lock = lock_addr(v); |
|---|
| 137 | 137 | s64 val; |
|---|
| 138 | 138 | |
|---|
| 139 | 139 | raw_spin_lock_irqsave(lock, flags); |
|---|
| .. | .. |
|---|
| 148 | 148 | s64 atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n) |
|---|
| 149 | 149 | { |
|---|
| 150 | 150 | unsigned long flags; |
|---|
| 151 | | - raw_spinlock_t *lock = lock_addr(v); |
|---|
| 151 | + hard_spinlock_t *lock = lock_addr(v); |
|---|
| 152 | 152 | s64 val; |
|---|
| 153 | 153 | |
|---|
| 154 | 154 | raw_spin_lock_irqsave(lock, flags); |
|---|
| .. | .. |
|---|
| 163 | 163 | s64 atomic64_xchg(atomic64_t *v, s64 new) |
|---|
| 164 | 164 | { |
|---|
| 165 | 165 | unsigned long flags; |
|---|
| 166 | | - raw_spinlock_t *lock = lock_addr(v); |
|---|
| 166 | + hard_spinlock_t *lock = lock_addr(v); |
|---|
| 167 | 167 | s64 val; |
|---|
| 168 | 168 | |
|---|
| 169 | 169 | raw_spin_lock_irqsave(lock, flags); |
|---|
| .. | .. |
|---|
| 177 | 177 | s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) |
|---|
| 178 | 178 | { |
|---|
| 179 | 179 | unsigned long flags; |
|---|
| 180 | | - raw_spinlock_t *lock = lock_addr(v); |
|---|
| 180 | + hard_spinlock_t *lock = lock_addr(v); |
|---|
| 181 | 181 | s64 val; |
|---|
| 182 | 182 | |
|---|
| 183 | 183 | raw_spin_lock_irqsave(lock, flags); |
|---|