hc
2023-12-09 b22da3d8526a935aa31e086e63f60ff3246cb61c
kernel/kernel/locking/rwlock-rt.c
....@@ -1,5 +1,4 @@
1
-/*
2
- */
1
+// SPDX-License-Identifier: GPL-2.0-only
32 #include <linux/sched/debug.h>
43 #include <linux/export.h>
54
....@@ -62,7 +61,7 @@
6261 lock->rtmutex.save_state = 1;
6362 }
6463
65
-int __read_rt_trylock(struct rt_rw_lock *lock)
64
+static int __read_rt_trylock(struct rt_rw_lock *lock)
6665 {
6766 int r, old;
6867
....@@ -79,7 +78,7 @@
7978 return 0;
8079 }
8180
82
-void __sched __read_rt_lock(struct rt_rw_lock *lock)
81
+static void __read_rt_lock(struct rt_rw_lock *lock)
8382 {
8483 struct rt_mutex *m = &lock->rtmutex;
8584 struct rt_mutex_waiter waiter;
....@@ -142,7 +141,7 @@
142141 debug_rt_mutex_free_waiter(&waiter);
143142 }
144143
145
-void __read_rt_unlock(struct rt_rw_lock *lock)
144
+static void __read_rt_unlock(struct rt_rw_lock *lock)
146145 {
147146 struct rt_mutex *m = &lock->rtmutex;
148147 struct task_struct *tsk;
....@@ -178,7 +177,7 @@
178177 rt_spin_lock_slowunlock(m);
179178 }
180179
181
-void __sched __write_rt_lock(struct rt_rw_lock *lock)
180
+static void __write_rt_lock(struct rt_rw_lock *lock)
182181 {
183182 struct rt_mutex *m = &lock->rtmutex;
184183 struct task_struct *self = current;
....@@ -212,7 +211,7 @@
212211 raw_spin_unlock_irqrestore(&m->wait_lock, flags);
213212
214213 if (atomic_read(&lock->readers) != 0)
215
- schedule();
214
+ preempt_schedule_lock();
216215
217216 raw_spin_lock_irqsave(&m->wait_lock, flags);
218217
....@@ -222,7 +221,7 @@
222221 }
223222 }
224223
225
-int __write_rt_trylock(struct rt_rw_lock *lock)
224
+static int __write_rt_trylock(struct rt_rw_lock *lock)
226225 {
227226 struct rt_mutex *m = &lock->rtmutex;
228227 unsigned long flags;
....@@ -242,50 +241,13 @@
242241 return 0;
243242 }
244243
245
-void __write_rt_unlock(struct rt_rw_lock *lock)
244
+static void __write_rt_unlock(struct rt_rw_lock *lock)
246245 {
247246 struct rt_mutex *m = &lock->rtmutex;
248247 unsigned long flags;
249248
250249 raw_spin_lock_irqsave(&m->wait_lock, flags);
251250 __write_unlock_common(lock, WRITER_BIAS, flags);
252
-}
253
-
254
-/* Map the reader biased implementation */
255
-static inline int do_read_rt_trylock(rwlock_t *rwlock)
256
-{
257
- return __read_rt_trylock(rwlock);
258
-}
259
-
260
-static inline int do_write_rt_trylock(rwlock_t *rwlock)
261
-{
262
- return __write_rt_trylock(rwlock);
263
-}
264
-
265
-static inline void do_read_rt_lock(rwlock_t *rwlock)
266
-{
267
- __read_rt_lock(rwlock);
268
-}
269
-
270
-static inline void do_write_rt_lock(rwlock_t *rwlock)
271
-{
272
- __write_rt_lock(rwlock);
273
-}
274
-
275
-static inline void do_read_rt_unlock(rwlock_t *rwlock)
276
-{
277
- __read_rt_unlock(rwlock);
278
-}
279
-
280
-static inline void do_write_rt_unlock(rwlock_t *rwlock)
281
-{
282
- __write_rt_unlock(rwlock);
283
-}
284
-
285
-static inline void do_rwlock_rt_init(rwlock_t *rwlock, const char *name,
286
- struct lock_class_key *key)
287
-{
288
- __rwlock_biased_rt_init(rwlock, name, key);
289251 }
290252
291253 int __lockfunc rt_read_can_lock(rwlock_t *rwlock)
....@@ -305,15 +267,11 @@
305267 {
306268 int ret;
307269
308
- sleeping_lock_inc();
309
- migrate_disable();
310
- ret = do_read_rt_trylock(rwlock);
270
+ ret = __read_rt_trylock(rwlock);
311271 if (ret) {
312272 rwlock_acquire_read(&rwlock->dep_map, 0, 1, _RET_IP_);
313273 rcu_read_lock();
314
- } else {
315
- migrate_enable();
316
- sleeping_lock_dec();
274
+ migrate_disable();
317275 }
318276 return ret;
319277 }
....@@ -323,15 +281,11 @@
323281 {
324282 int ret;
325283
326
- sleeping_lock_inc();
327
- migrate_disable();
328
- ret = do_write_rt_trylock(rwlock);
284
+ ret = __write_rt_trylock(rwlock);
329285 if (ret) {
330286 rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
331287 rcu_read_lock();
332
- } else {
333
- migrate_enable();
334
- sleeping_lock_dec();
288
+ migrate_disable();
335289 }
336290 return ret;
337291 }
....@@ -339,46 +293,42 @@
339293
340294 void __lockfunc rt_read_lock(rwlock_t *rwlock)
341295 {
342
- sleeping_lock_inc();
296
+ rwlock_acquire_read(&rwlock->dep_map, 0, 0, _RET_IP_);
297
+ __read_rt_lock(rwlock);
343298 rcu_read_lock();
344299 migrate_disable();
345
- rwlock_acquire_read(&rwlock->dep_map, 0, 0, _RET_IP_);
346
- do_read_rt_lock(rwlock);
347300 }
348301 EXPORT_SYMBOL(rt_read_lock);
349302
350303 void __lockfunc rt_write_lock(rwlock_t *rwlock)
351304 {
352
- sleeping_lock_inc();
305
+ rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
306
+ __write_rt_lock(rwlock);
353307 rcu_read_lock();
354308 migrate_disable();
355
- rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
356
- do_write_rt_lock(rwlock);
357309 }
358310 EXPORT_SYMBOL(rt_write_lock);
359311
360312 void __lockfunc rt_read_unlock(rwlock_t *rwlock)
361313 {
362
- rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
363
- do_read_rt_unlock(rwlock);
314
+ rwlock_release(&rwlock->dep_map, _RET_IP_);
364315 migrate_enable();
365316 rcu_read_unlock();
366
- sleeping_lock_dec();
317
+ __read_rt_unlock(rwlock);
367318 }
368319 EXPORT_SYMBOL(rt_read_unlock);
369320
370321 void __lockfunc rt_write_unlock(rwlock_t *rwlock)
371322 {
372
- rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
373
- do_write_rt_unlock(rwlock);
323
+ rwlock_release(&rwlock->dep_map, _RET_IP_);
374324 migrate_enable();
375325 rcu_read_unlock();
376
- sleeping_lock_dec();
326
+ __write_rt_unlock(rwlock);
377327 }
378328 EXPORT_SYMBOL(rt_write_unlock);
379329
380330 void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key)
381331 {
382
- do_rwlock_rt_init(rwlock, name, key);
332
+ __rwlock_biased_rt_init(rwlock, name, key);
383333 }
384334 EXPORT_SYMBOL(__rt_rwlock_init);