hc
2024-11-01 2f529f9b558ca1c1bd74be7437a84e4711743404
kernel/lib/atomic64.c
....@@ -25,15 +25,15 @@
2525 * Ensure each lock is in a separate cacheline.
2626 */
2727 static union {
28
- raw_spinlock_t lock;
28
+ hard_spinlock_t lock;
2929 char pad[L1_CACHE_BYTES];
3030 } atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp = {
3131 [0 ... (NR_LOCKS - 1)] = {
32
- .lock = __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock),
32
+ .lock = __HARD_SPIN_LOCK_INITIALIZER(atomic64_lock.lock),
3333 },
3434 };
3535
36
-static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
36
+static inline hard_spinlock_t *lock_addr(const atomic64_t *v)
3737 {
3838 unsigned long addr = (unsigned long) v;
3939
....@@ -45,7 +45,7 @@
4545 s64 atomic64_read(const atomic64_t *v)
4646 {
4747 unsigned long flags;
48
- raw_spinlock_t *lock = lock_addr(v);
48
+ hard_spinlock_t *lock = lock_addr(v);
4949 s64 val;
5050
5151 raw_spin_lock_irqsave(lock, flags);
....@@ -58,7 +58,7 @@
5858 void atomic64_set(atomic64_t *v, s64 i)
5959 {
6060 unsigned long flags;
61
- raw_spinlock_t *lock = lock_addr(v);
61
+ hard_spinlock_t *lock = lock_addr(v);
6262
6363 raw_spin_lock_irqsave(lock, flags);
6464 v->counter = i;
....@@ -70,7 +70,7 @@
7070 void atomic64_##op(s64 a, atomic64_t *v) \
7171 { \
7272 unsigned long flags; \
73
- raw_spinlock_t *lock = lock_addr(v); \
73
+ hard_spinlock_t *lock = lock_addr(v); \
7474 \
7575 raw_spin_lock_irqsave(lock, flags); \
7676 v->counter c_op a; \
....@@ -82,7 +82,7 @@
8282 s64 atomic64_##op##_return(s64 a, atomic64_t *v) \
8383 { \
8484 unsigned long flags; \
85
- raw_spinlock_t *lock = lock_addr(v); \
85
+ hard_spinlock_t *lock = lock_addr(v); \
8686 s64 val; \
8787 \
8888 raw_spin_lock_irqsave(lock, flags); \
....@@ -96,7 +96,7 @@
9696 s64 atomic64_fetch_##op(s64 a, atomic64_t *v) \
9797 { \
9898 unsigned long flags; \
99
- raw_spinlock_t *lock = lock_addr(v); \
99
+ hard_spinlock_t *lock = lock_addr(v); \
100100 s64 val; \
101101 \
102102 raw_spin_lock_irqsave(lock, flags); \
....@@ -133,7 +133,7 @@
133133 s64 atomic64_dec_if_positive(atomic64_t *v)
134134 {
135135 unsigned long flags;
136
- raw_spinlock_t *lock = lock_addr(v);
136
+ hard_spinlock_t *lock = lock_addr(v);
137137 s64 val;
138138
139139 raw_spin_lock_irqsave(lock, flags);
....@@ -148,7 +148,7 @@
148148 s64 atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
149149 {
150150 unsigned long flags;
151
- raw_spinlock_t *lock = lock_addr(v);
151
+ hard_spinlock_t *lock = lock_addr(v);
152152 s64 val;
153153
154154 raw_spin_lock_irqsave(lock, flags);
....@@ -163,7 +163,7 @@
163163 s64 atomic64_xchg(atomic64_t *v, s64 new)
164164 {
165165 unsigned long flags;
166
- raw_spinlock_t *lock = lock_addr(v);
166
+ hard_spinlock_t *lock = lock_addr(v);
167167 s64 val;
168168
169169 raw_spin_lock_irqsave(lock, flags);
....@@ -177,7 +177,7 @@
177177 s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
178178 {
179179 unsigned long flags;
180
- raw_spinlock_t *lock = lock_addr(v);
180
+ hard_spinlock_t *lock = lock_addr(v);
181181 s64 val;
182182
183183 raw_spin_lock_irqsave(lock, flags);