.. | .. |
---|
10 | 10 | static inline int arch_spin_is_locked(arch_spinlock_t *x) |
---|
11 | 11 | { |
---|
12 | 12 | volatile unsigned int *a = __ldcw_align(x); |
---|
13 | | - return *a == 0; |
---|
| 13 | + return READ_ONCE(*a) == 0; |
---|
14 | 14 | } |
---|
15 | 15 | |
---|
16 | | -#define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0) |
---|
| 16 | +static inline void arch_spin_lock(arch_spinlock_t *x) |
---|
| 17 | +{ |
---|
| 18 | + volatile unsigned int *a; |
---|
| 19 | + |
---|
| 20 | + a = __ldcw_align(x); |
---|
| 21 | + while (__ldcw(a) == 0) |
---|
| 22 | + while (*a == 0) |
---|
| 23 | + continue; |
---|
| 24 | +} |
---|
17 | 25 | |
---|
18 | 26 | static inline void arch_spin_lock_flags(arch_spinlock_t *x, |
---|
19 | | - unsigned long flags) |
---|
| 27 | + unsigned long flags) |
---|
20 | 28 | { |
---|
21 | 29 | volatile unsigned int *a; |
---|
22 | 30 | |
---|
.. | .. |
---|
25 | 33 | while (*a == 0) |
---|
26 | 34 | if (flags & PSW_SM_I) { |
---|
27 | 35 | local_irq_enable(); |
---|
28 | | - cpu_relax(); |
---|
29 | 36 | local_irq_disable(); |
---|
30 | | - } else |
---|
31 | | - cpu_relax(); |
---|
| 37 | + } |
---|
32 | 38 | } |
---|
33 | 39 | #define arch_spin_lock_flags arch_spin_lock_flags |
---|
34 | 40 | |
---|
.. | .. |
---|
37 | 43 | volatile unsigned int *a; |
---|
38 | 44 | |
---|
39 | 45 | a = __ldcw_align(x); |
---|
40 | | - mb(); |
---|
41 | | - *a = 1; |
---|
| 46 | + /* Release with ordered store. */ |
---|
| 47 | + __asm__ __volatile__("stw,ma %0,0(%1)" : : "r"(1), "r"(a) : "memory"); |
---|
42 | 48 | } |
---|
43 | 49 | |
---|
44 | 50 | static inline int arch_spin_trylock(arch_spinlock_t *x) |
---|
45 | 51 | { |
---|
46 | 52 | volatile unsigned int *a; |
---|
47 | | - int ret; |
---|
48 | 53 | |
---|
49 | 54 | a = __ldcw_align(x); |
---|
50 | | - ret = __ldcw(a) != 0; |
---|
51 | | - |
---|
52 | | - return ret; |
---|
| 55 | + return __ldcw(a) != 0; |
---|
53 | 56 | } |
---|
54 | 57 | |
---|
55 | 58 | /* |
---|
56 | 59 | * Read-write spinlocks, allowing multiple readers but only one writer. |
---|
57 | | - * Linux rwlocks are unfair to writers; they can be starved for an indefinite |
---|
58 | | - * time by readers. With care, they can also be taken in interrupt context. |
---|
| 60 | + * Unfair locking as Writers could be starved indefinitely by Reader(s) |
---|
59 | 61 | * |
---|
60 | | - * In the PA-RISC implementation, we have a spinlock and a counter. |
---|
61 | | - * Readers use the lock to serialise their access to the counter (which |
---|
62 | | - * records how many readers currently hold the lock). |
---|
63 | | - * Writers hold the spinlock, preventing any readers or other writers from |
---|
64 | | - * grabbing the rwlock. |
---|
| 62 | + * The spinlock itself is contained in @counter and access to it is |
---|
| 63 | + * serialized with @lock_mutex. |
---|
65 | 64 | */ |
---|
66 | 65 | |
---|
67 | | -/* Note that we have to ensure interrupts are disabled in case we're |
---|
68 | | - * interrupted by some other code that wants to grab the same read lock */ |
---|
69 | | -static __inline__ void arch_read_lock(arch_rwlock_t *rw) |
---|
| 66 | +/* 1 - lock taken successfully */ |
---|
| 67 | +static inline int arch_read_trylock(arch_rwlock_t *rw) |
---|
70 | 68 | { |
---|
| 69 | + int ret = 0; |
---|
71 | 70 | unsigned long flags; |
---|
72 | | - local_irq_save(flags); |
---|
73 | | - arch_spin_lock_flags(&rw->lock, flags); |
---|
74 | | - rw->counter++; |
---|
75 | | - arch_spin_unlock(&rw->lock); |
---|
76 | | - local_irq_restore(flags); |
---|
77 | | -} |
---|
78 | 71 | |
---|
79 | | -/* Note that we have to ensure interrupts are disabled in case we're |
---|
80 | | - * interrupted by some other code that wants to grab the same read lock */ |
---|
81 | | -static __inline__ void arch_read_unlock(arch_rwlock_t *rw) |
---|
82 | | -{ |
---|
83 | | - unsigned long flags; |
---|
84 | 72 | local_irq_save(flags); |
---|
85 | | - arch_spin_lock_flags(&rw->lock, flags); |
---|
86 | | - rw->counter--; |
---|
87 | | - arch_spin_unlock(&rw->lock); |
---|
88 | | - local_irq_restore(flags); |
---|
89 | | -} |
---|
| 73 | + arch_spin_lock(&(rw->lock_mutex)); |
---|
90 | 74 | |
---|
91 | | -/* Note that we have to ensure interrupts are disabled in case we're |
---|
92 | | - * interrupted by some other code that wants to grab the same read lock */ |
---|
93 | | -static __inline__ int arch_read_trylock(arch_rwlock_t *rw) |
---|
94 | | -{ |
---|
95 | | - unsigned long flags; |
---|
96 | | - retry: |
---|
97 | | - local_irq_save(flags); |
---|
98 | | - if (arch_spin_trylock(&rw->lock)) { |
---|
99 | | - rw->counter++; |
---|
100 | | - arch_spin_unlock(&rw->lock); |
---|
101 | | - local_irq_restore(flags); |
---|
102 | | - return 1; |
---|
| 75 | + /* |
---|
| 76 | + * zero means writer holds the lock exclusively, deny Reader. |
---|
| 77 | + * Otherwise grant lock to first/subseq reader |
---|
| 78 | + */ |
---|
| 79 | + if (rw->counter > 0) { |
---|
| 80 | + rw->counter--; |
---|
| 81 | + ret = 1; |
---|
103 | 82 | } |
---|
104 | 83 | |
---|
| 84 | + arch_spin_unlock(&(rw->lock_mutex)); |
---|
105 | 85 | local_irq_restore(flags); |
---|
106 | | - /* If write-locked, we fail to acquire the lock */ |
---|
107 | | - if (rw->counter < 0) |
---|
108 | | - return 0; |
---|
109 | 86 | |
---|
110 | | - /* Wait until we have a realistic chance at the lock */ |
---|
111 | | - while (arch_spin_is_locked(&rw->lock) && rw->counter >= 0) |
---|
| 87 | + return ret; |
---|
| 88 | +} |
---|
| 89 | + |
---|
| 90 | +/* 1 - lock taken successfully */ |
---|
| 91 | +static inline int arch_write_trylock(arch_rwlock_t *rw) |
---|
| 92 | +{ |
---|
| 93 | + int ret = 0; |
---|
| 94 | + unsigned long flags; |
---|
| 95 | + |
---|
| 96 | + local_irq_save(flags); |
---|
| 97 | + arch_spin_lock(&(rw->lock_mutex)); |
---|
| 98 | + |
---|
| 99 | + /* |
---|
| 100 | + * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__), |
---|
| 101 | + * deny writer. Otherwise if unlocked grant to writer |
---|
| 102 | + * Hence the claim that Linux rwlocks are unfair to writers. |
---|
| 103 | + * (can be starved for an indefinite time by readers). |
---|
| 104 | + */ |
---|
| 105 | + if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) { |
---|
| 106 | + rw->counter = 0; |
---|
| 107 | + ret = 1; |
---|
| 108 | + } |
---|
| 109 | + arch_spin_unlock(&(rw->lock_mutex)); |
---|
| 110 | + local_irq_restore(flags); |
---|
| 111 | + |
---|
| 112 | + return ret; |
---|
| 113 | +} |
---|
| 114 | + |
---|
| 115 | +static inline void arch_read_lock(arch_rwlock_t *rw) |
---|
| 116 | +{ |
---|
| 117 | + while (!arch_read_trylock(rw)) |
---|
112 | 118 | cpu_relax(); |
---|
113 | | - |
---|
114 | | - goto retry; |
---|
115 | 119 | } |
---|
116 | 120 | |
---|
117 | | -/* Note that we have to ensure interrupts are disabled in case we're |
---|
118 | | - * interrupted by some other code that wants to read_trylock() this lock */ |
---|
119 | | -static __inline__ void arch_write_lock(arch_rwlock_t *rw) |
---|
| 121 | +static inline void arch_write_lock(arch_rwlock_t *rw) |
---|
| 122 | +{ |
---|
| 123 | + while (!arch_write_trylock(rw)) |
---|
| 124 | + cpu_relax(); |
---|
| 125 | +} |
---|
| 126 | + |
---|
| 127 | +static inline void arch_read_unlock(arch_rwlock_t *rw) |
---|
120 | 128 | { |
---|
121 | 129 | unsigned long flags; |
---|
122 | | -retry: |
---|
| 130 | + |
---|
123 | 131 | local_irq_save(flags); |
---|
124 | | - arch_spin_lock_flags(&rw->lock, flags); |
---|
125 | | - |
---|
126 | | - if (rw->counter != 0) { |
---|
127 | | - arch_spin_unlock(&rw->lock); |
---|
128 | | - local_irq_restore(flags); |
---|
129 | | - |
---|
130 | | - while (rw->counter != 0) |
---|
131 | | - cpu_relax(); |
---|
132 | | - |
---|
133 | | - goto retry; |
---|
134 | | - } |
---|
135 | | - |
---|
136 | | - rw->counter = -1; /* mark as write-locked */ |
---|
137 | | - mb(); |
---|
| 132 | + arch_spin_lock(&(rw->lock_mutex)); |
---|
| 133 | + rw->counter++; |
---|
| 134 | + arch_spin_unlock(&(rw->lock_mutex)); |
---|
138 | 135 | local_irq_restore(flags); |
---|
139 | 136 | } |
---|
140 | 137 | |
---|
141 | | -static __inline__ void arch_write_unlock(arch_rwlock_t *rw) |
---|
142 | | -{ |
---|
143 | | - rw->counter = 0; |
---|
144 | | - arch_spin_unlock(&rw->lock); |
---|
145 | | -} |
---|
146 | | - |
---|
147 | | -/* Note that we have to ensure interrupts are disabled in case we're |
---|
148 | | - * interrupted by some other code that wants to read_trylock() this lock */ |
---|
149 | | -static __inline__ int arch_write_trylock(arch_rwlock_t *rw) |
---|
| 138 | +static inline void arch_write_unlock(arch_rwlock_t *rw) |
---|
150 | 139 | { |
---|
151 | 140 | unsigned long flags; |
---|
152 | | - int result = 0; |
---|
153 | 141 | |
---|
154 | 142 | local_irq_save(flags); |
---|
155 | | - if (arch_spin_trylock(&rw->lock)) { |
---|
156 | | - if (rw->counter == 0) { |
---|
157 | | - rw->counter = -1; |
---|
158 | | - result = 1; |
---|
159 | | - } else { |
---|
160 | | - /* Read-locked. Oh well. */ |
---|
161 | | - arch_spin_unlock(&rw->lock); |
---|
162 | | - } |
---|
163 | | - } |
---|
| 143 | + arch_spin_lock(&(rw->lock_mutex)); |
---|
| 144 | + rw->counter = __ARCH_RW_LOCK_UNLOCKED__; |
---|
| 145 | + arch_spin_unlock(&(rw->lock_mutex)); |
---|
164 | 146 | local_irq_restore(flags); |
---|
165 | | - |
---|
166 | | - return result; |
---|
167 | 147 | } |
---|
168 | 148 | |
---|
169 | 149 | #endif /* __ASM_SPINLOCK_H */ |
---|