hc
2024-01-05 071106ecf68c401173c58808b1cf5f68cc50d390
kernel/arch/parisc/include/asm/spinlock.h
....@@ -10,13 +10,21 @@
1010 static inline int arch_spin_is_locked(arch_spinlock_t *x)
1111 {
1212 volatile unsigned int *a = __ldcw_align(x);
13
- return *a == 0;
13
+ return READ_ONCE(*a) == 0;
1414 }
1515
16
-#define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0)
16
+static inline void arch_spin_lock(arch_spinlock_t *x)
17
+{
18
+ volatile unsigned int *a;
19
+
20
+ a = __ldcw_align(x);
21
+ while (__ldcw(a) == 0)
22
+ while (*a == 0)
23
+ continue;
24
+}
1725
1826 static inline void arch_spin_lock_flags(arch_spinlock_t *x,
19
- unsigned long flags)
27
+ unsigned long flags)
2028 {
2129 volatile unsigned int *a;
2230
....@@ -25,10 +33,8 @@
2533 while (*a == 0)
2634 if (flags & PSW_SM_I) {
2735 local_irq_enable();
28
- cpu_relax();
2936 local_irq_disable();
30
- } else
31
- cpu_relax();
37
+ }
3238 }
3339 #define arch_spin_lock_flags arch_spin_lock_flags
3440
....@@ -37,133 +43,107 @@
3743 volatile unsigned int *a;
3844
3945 a = __ldcw_align(x);
40
- mb();
41
- *a = 1;
46
+ /* Release with ordered store. */
47
+ __asm__ __volatile__("stw,ma %0,0(%1)" : : "r"(1), "r"(a) : "memory");
4248 }
4349
4450 static inline int arch_spin_trylock(arch_spinlock_t *x)
4551 {
4652 volatile unsigned int *a;
47
- int ret;
4853
4954 a = __ldcw_align(x);
50
- ret = __ldcw(a) != 0;
51
-
52
- return ret;
55
+ return __ldcw(a) != 0;
5356 }
5457
5558 /*
5659 * Read-write spinlocks, allowing multiple readers but only one writer.
57
- * Linux rwlocks are unfair to writers; they can be starved for an indefinite
58
- * time by readers. With care, they can also be taken in interrupt context.
60
+ * Unfair locking as Writers could be starved indefinitely by Reader(s)
5961 *
60
- * In the PA-RISC implementation, we have a spinlock and a counter.
61
- * Readers use the lock to serialise their access to the counter (which
62
- * records how many readers currently hold the lock).
63
- * Writers hold the spinlock, preventing any readers or other writers from
64
- * grabbing the rwlock.
62
+ * The spinlock itself is contained in @counter and access to it is
63
+ * serialized with @lock_mutex.
6564 */
6665
67
-/* Note that we have to ensure interrupts are disabled in case we're
68
- * interrupted by some other code that wants to grab the same read lock */
69
-static __inline__ void arch_read_lock(arch_rwlock_t *rw)
66
+/* 1 - lock taken successfully */
67
+static inline int arch_read_trylock(arch_rwlock_t *rw)
7068 {
69
+ int ret = 0;
7170 unsigned long flags;
72
- local_irq_save(flags);
73
- arch_spin_lock_flags(&rw->lock, flags);
74
- rw->counter++;
75
- arch_spin_unlock(&rw->lock);
76
- local_irq_restore(flags);
77
-}
7871
79
-/* Note that we have to ensure interrupts are disabled in case we're
80
- * interrupted by some other code that wants to grab the same read lock */
81
-static __inline__ void arch_read_unlock(arch_rwlock_t *rw)
82
-{
83
- unsigned long flags;
8472 local_irq_save(flags);
85
- arch_spin_lock_flags(&rw->lock, flags);
86
- rw->counter--;
87
- arch_spin_unlock(&rw->lock);
88
- local_irq_restore(flags);
89
-}
73
+ arch_spin_lock(&(rw->lock_mutex));
9074
91
-/* Note that we have to ensure interrupts are disabled in case we're
92
- * interrupted by some other code that wants to grab the same read lock */
93
-static __inline__ int arch_read_trylock(arch_rwlock_t *rw)
94
-{
95
- unsigned long flags;
96
- retry:
97
- local_irq_save(flags);
98
- if (arch_spin_trylock(&rw->lock)) {
99
- rw->counter++;
100
- arch_spin_unlock(&rw->lock);
101
- local_irq_restore(flags);
102
- return 1;
75
+ /*
76
+ * zero means writer holds the lock exclusively, deny Reader.
77
+ * Otherwise grant lock to first/subseq reader
78
+ */
79
+ if (rw->counter > 0) {
80
+ rw->counter--;
81
+ ret = 1;
10382 }
10483
84
+ arch_spin_unlock(&(rw->lock_mutex));
10585 local_irq_restore(flags);
106
- /* If write-locked, we fail to acquire the lock */
107
- if (rw->counter < 0)
108
- return 0;
10986
110
- /* Wait until we have a realistic chance at the lock */
111
- while (arch_spin_is_locked(&rw->lock) && rw->counter >= 0)
87
+ return ret;
88
+}
89
+
90
+/* 1 - lock taken successfully */
91
+static inline int arch_write_trylock(arch_rwlock_t *rw)
92
+{
93
+ int ret = 0;
94
+ unsigned long flags;
95
+
96
+ local_irq_save(flags);
97
+ arch_spin_lock(&(rw->lock_mutex));
98
+
99
+ /*
100
+ * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
101
+ * deny writer. Otherwise if unlocked grant to writer
102
+ * Hence the claim that Linux rwlocks are unfair to writers.
103
+ * (can be starved for an indefinite time by readers).
104
+ */
105
+ if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
106
+ rw->counter = 0;
107
+ ret = 1;
108
+ }
109
+ arch_spin_unlock(&(rw->lock_mutex));
110
+ local_irq_restore(flags);
111
+
112
+ return ret;
113
+}
114
+
115
+static inline void arch_read_lock(arch_rwlock_t *rw)
116
+{
117
+ while (!arch_read_trylock(rw))
112118 cpu_relax();
113
-
114
- goto retry;
115119 }
116120
117
-/* Note that we have to ensure interrupts are disabled in case we're
118
- * interrupted by some other code that wants to read_trylock() this lock */
119
-static __inline__ void arch_write_lock(arch_rwlock_t *rw)
121
+static inline void arch_write_lock(arch_rwlock_t *rw)
122
+{
123
+ while (!arch_write_trylock(rw))
124
+ cpu_relax();
125
+}
126
+
127
+static inline void arch_read_unlock(arch_rwlock_t *rw)
120128 {
121129 unsigned long flags;
122
-retry:
130
+
123131 local_irq_save(flags);
124
- arch_spin_lock_flags(&rw->lock, flags);
125
-
126
- if (rw->counter != 0) {
127
- arch_spin_unlock(&rw->lock);
128
- local_irq_restore(flags);
129
-
130
- while (rw->counter != 0)
131
- cpu_relax();
132
-
133
- goto retry;
134
- }
135
-
136
- rw->counter = -1; /* mark as write-locked */
137
- mb();
132
+ arch_spin_lock(&(rw->lock_mutex));
133
+ rw->counter++;
134
+ arch_spin_unlock(&(rw->lock_mutex));
138135 local_irq_restore(flags);
139136 }
140137
141
-static __inline__ void arch_write_unlock(arch_rwlock_t *rw)
142
-{
143
- rw->counter = 0;
144
- arch_spin_unlock(&rw->lock);
145
-}
146
-
147
-/* Note that we have to ensure interrupts are disabled in case we're
148
- * interrupted by some other code that wants to read_trylock() this lock */
149
-static __inline__ int arch_write_trylock(arch_rwlock_t *rw)
138
+static inline void arch_write_unlock(arch_rwlock_t *rw)
150139 {
151140 unsigned long flags;
152
- int result = 0;
153141
154142 local_irq_save(flags);
155
- if (arch_spin_trylock(&rw->lock)) {
156
- if (rw->counter == 0) {
157
- rw->counter = -1;
158
- result = 1;
159
- } else {
160
- /* Read-locked. Oh well. */
161
- arch_spin_unlock(&rw->lock);
162
- }
163
- }
143
+ arch_spin_lock(&(rw->lock_mutex));
144
+ rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
145
+ arch_spin_unlock(&(rw->lock_mutex));
164146 local_irq_restore(flags);
165
-
166
- return result;
167147 }
168148
169149 #endif /* __ASM_SPINLOCK_H */