.. | .. |
---|
96 | 96 | |
---|
97 | 97 | /* |
---|
98 | 98 | * If lockdep is enabled then we use the non-preemption spin-ops |
---|
99 | | - * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are |
---|
| 99 | + * even on CONFIG_PREEMPTION, because lockdep assumes that interrupts are |
---|
100 | 100 | * not re-enabled during lock-acquire (which the preempt-spin-ops do): |
---|
101 | 101 | */ |
---|
102 | 102 | #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) |
---|
.. | .. |
---|
147 | 147 | |
---|
148 | 148 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
---|
149 | 149 | { |
---|
150 | | - spin_release(&lock->dep_map, 1, _RET_IP_); |
---|
| 150 | + spin_release(&lock->dep_map, _RET_IP_); |
---|
151 | 151 | do_raw_spin_unlock(lock); |
---|
152 | 152 | preempt_enable(); |
---|
153 | 153 | } |
---|
.. | .. |
---|
155 | 155 | static inline void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock, |
---|
156 | 156 | unsigned long flags) |
---|
157 | 157 | { |
---|
158 | | - spin_release(&lock->dep_map, 1, _RET_IP_); |
---|
| 158 | + spin_release(&lock->dep_map, _RET_IP_); |
---|
159 | 159 | do_raw_spin_unlock(lock); |
---|
160 | 160 | local_irq_restore(flags); |
---|
161 | 161 | preempt_enable(); |
---|
.. | .. |
---|
163 | 163 | |
---|
164 | 164 | static inline void __raw_spin_unlock_irq(raw_spinlock_t *lock) |
---|
165 | 165 | { |
---|
166 | | - spin_release(&lock->dep_map, 1, _RET_IP_); |
---|
| 166 | + spin_release(&lock->dep_map, _RET_IP_); |
---|
167 | 167 | do_raw_spin_unlock(lock); |
---|
168 | 168 | local_irq_enable(); |
---|
169 | 169 | preempt_enable(); |
---|
.. | .. |
---|
171 | 171 | |
---|
172 | 172 | static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock) |
---|
173 | 173 | { |
---|
174 | | - spin_release(&lock->dep_map, 1, _RET_IP_); |
---|
| 174 | + spin_release(&lock->dep_map, _RET_IP_); |
---|
175 | 175 | do_raw_spin_unlock(lock); |
---|
176 | 176 | __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); |
---|
177 | 177 | } |
---|