hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/include/linux/spinlock_api_smp.h
....@@ -96,7 +96,7 @@
9696
9797 /*
9898 * If lockdep is enabled then we use the non-preemption spin-ops
99
- * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
99
+ * even on CONFIG_PREEMPTION, because lockdep assumes that interrupts are
100100 * not re-enabled during lock-acquire (which the preempt-spin-ops do):
101101 */
102102 #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
....@@ -147,7 +147,7 @@
147147
148148 static inline void __raw_spin_unlock(raw_spinlock_t *lock)
149149 {
150
- spin_release(&lock->dep_map, 1, _RET_IP_);
150
+ spin_release(&lock->dep_map, _RET_IP_);
151151 do_raw_spin_unlock(lock);
152152 preempt_enable();
153153 }
....@@ -155,7 +155,7 @@
155155 static inline void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock,
156156 unsigned long flags)
157157 {
158
- spin_release(&lock->dep_map, 1, _RET_IP_);
158
+ spin_release(&lock->dep_map, _RET_IP_);
159159 do_raw_spin_unlock(lock);
160160 local_irq_restore(flags);
161161 preempt_enable();
....@@ -163,7 +163,7 @@
163163
164164 static inline void __raw_spin_unlock_irq(raw_spinlock_t *lock)
165165 {
166
- spin_release(&lock->dep_map, 1, _RET_IP_);
166
+ spin_release(&lock->dep_map, _RET_IP_);
167167 do_raw_spin_unlock(lock);
168168 local_irq_enable();
169169 preempt_enable();
....@@ -171,7 +171,7 @@
171171
172172 static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock)
173173 {
174
- spin_release(&lock->dep_map, 1, _RET_IP_);
174
+ spin_release(&lock->dep_map, _RET_IP_);
175175 do_raw_spin_unlock(lock);
176176 __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
177177 }