hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/kernel/locking/rtmutex.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * RT-Mutexes: simple blocking mutual exclusion locks with PI support
34 *
....@@ -7,13 +8,13 @@
78 * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
89 * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
910 * Copyright (C) 2006 Esben Nielsen
10
- * Adaptive Spinlocks:
11
+ * Adaptive Spinlocks:
1112 * Copyright (C) 2008 Novell, Inc., Gregory Haskins, Sven Dietrich,
1213 * and Peter Morreale,
1314 * Adaptive Spinlocks simplification:
1415 * Copyright (C) 2008 Red Hat, Inc., Steven Rostedt <srostedt@redhat.com>
1516 *
16
- * See Documentation/locking/rt-mutex-design.txt for details.
17
+ * See Documentation/locking/rt-mutex-design.rst for details.
1718 */
1819 #include <linux/spinlock.h>
1920 #include <linux/export.h>
....@@ -23,8 +24,8 @@
2324 #include <linux/sched/wake_q.h>
2425 #include <linux/sched/debug.h>
2526 #include <linux/timer.h>
27
+#include <trace/hooks/dtask.h>
2628 #include <linux/ww_mutex.h>
27
-#include <linux/blkdev.h>
2829
2930 #include "rtmutex_common.h"
3031
....@@ -63,7 +64,7 @@
6364 if (rt_mutex_has_waiters(lock))
6465 val |= RT_MUTEX_HAS_WAITERS;
6566
66
- lock->owner = (struct task_struct *)val;
67
+ WRITE_ONCE(lock->owner, (struct task_struct *)val);
6768 }
6869
6970 static inline void clear_rt_mutex_waiters(struct rt_mutex *lock)
....@@ -153,7 +154,6 @@
153154 * set up.
154155 */
155156 #ifndef CONFIG_DEBUG_RT_MUTEXES
156
-# define rt_mutex_cmpxchg_relaxed(l,c,n) (cmpxchg_relaxed(&l->owner, c, n) == c)
157157 # define rt_mutex_cmpxchg_acquire(l,c,n) (cmpxchg_acquire(&l->owner, c, n) == c)
158158 # define rt_mutex_cmpxchg_release(l,c,n) (cmpxchg_release(&l->owner, c, n) == c)
159159
....@@ -214,7 +214,6 @@
214214 }
215215
216216 #else
217
-# define rt_mutex_cmpxchg_relaxed(l,c,n) (0)
218217 # define rt_mutex_cmpxchg_acquire(l,c,n) (0)
219218 # define rt_mutex_cmpxchg_release(l,c,n) (0)
220219
....@@ -641,7 +640,6 @@
641640 * walk, we detected a deadlock.
642641 */
643642 if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
644
- debug_rt_mutex_deadlock(chwalk, orig_waiter, lock);
645643 raw_spin_unlock(&lock->wait_lock);
646644 ret = -EDEADLK;
647645 goto out_unlock_pi;
....@@ -670,8 +668,7 @@
670668 }
671669
672670 /* [10] Grab the next task, i.e. owner of @lock */
673
- task = rt_mutex_owner(lock);
674
- get_task_struct(task);
671
+ task = get_task_struct(rt_mutex_owner(lock));
675672 raw_spin_lock(&task->pi_lock);
676673
677674 /*
....@@ -754,8 +751,7 @@
754751 }
755752
756753 /* [10] Grab the next task, i.e. the owner of @lock */
757
- task = rt_mutex_owner(lock);
758
- get_task_struct(task);
754
+ task = get_task_struct(rt_mutex_owner(lock));
759755 raw_spin_lock(&task->pi_lock);
760756
761757 /* [11] requeue the pi waiters if necessary */
....@@ -964,7 +960,7 @@
964960 return 1;
965961 }
966962
967
-#ifdef CONFIG_PREEMPT_RT_FULL
963
+#ifdef CONFIG_PREEMPT_RT
968964 /*
969965 * preemptible spin_lock functions:
970966 */
....@@ -1071,10 +1067,8 @@
10711067
10721068 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
10731069
1074
- debug_rt_mutex_print_deadlock(waiter);
1075
-
10761070 if (top_waiter != waiter || adaptive_wait(lock, lock_owner))
1077
- schedule();
1071
+ preempt_schedule_lock();
10781072
10791073 raw_spin_lock_irqsave(&lock->wait_lock, flags);
10801074
....@@ -1141,11 +1135,10 @@
11411135
11421136 void __lockfunc rt_spin_lock(spinlock_t *lock)
11431137 {
1144
- sleeping_lock_inc();
1145
- rcu_read_lock();
1146
- migrate_disable();
11471138 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
11481139 rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
1140
+ rcu_read_lock();
1141
+ migrate_disable();
11491142 }
11501143 EXPORT_SYMBOL(rt_spin_lock);
11511144
....@@ -1157,23 +1150,31 @@
11571150 #ifdef CONFIG_DEBUG_LOCK_ALLOC
11581151 void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass)
11591152 {
1160
- sleeping_lock_inc();
1161
- rcu_read_lock();
1162
- migrate_disable();
11631153 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
11641154 rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
1155
+ rcu_read_lock();
1156
+ migrate_disable();
11651157 }
11661158 EXPORT_SYMBOL(rt_spin_lock_nested);
1159
+
1160
+void __lockfunc rt_spin_lock_nest_lock(spinlock_t *lock,
1161
+ struct lockdep_map *nest_lock)
1162
+{
1163
+ spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
1164
+ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
1165
+ rcu_read_lock();
1166
+ migrate_disable();
1167
+}
1168
+EXPORT_SYMBOL(rt_spin_lock_nest_lock);
11671169 #endif
11681170
11691171 void __lockfunc rt_spin_unlock(spinlock_t *lock)
11701172 {
11711173 /* NOTE: we always pass in '1' for nested, for simplicity */
1172
- spin_release(&lock->dep_map, 1, _RET_IP_);
1173
- rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock);
1174
+ spin_release(&lock->dep_map, _RET_IP_);
11741175 migrate_enable();
11751176 rcu_read_unlock();
1176
- sleeping_lock_dec();
1177
+ rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock);
11771178 }
11781179 EXPORT_SYMBOL(rt_spin_unlock);
11791180
....@@ -1188,26 +1189,22 @@
11881189 * (like raw spinlocks do), we lock and unlock, to force the kernel to
11891190 * schedule if there's contention:
11901191 */
1191
-void __lockfunc rt_spin_unlock_wait(spinlock_t *lock)
1192
+void __lockfunc rt_spin_lock_unlock(spinlock_t *lock)
11921193 {
11931194 spin_lock(lock);
11941195 spin_unlock(lock);
11951196 }
1196
-EXPORT_SYMBOL(rt_spin_unlock_wait);
1197
+EXPORT_SYMBOL(rt_spin_lock_unlock);
11971198
11981199 int __lockfunc rt_spin_trylock(spinlock_t *lock)
11991200 {
12001201 int ret;
12011202
1202
- sleeping_lock_inc();
1203
- migrate_disable();
12041203 ret = __rt_mutex_trylock(&lock->lock);
12051204 if (ret) {
12061205 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
12071206 rcu_read_lock();
1208
- } else {
1209
- migrate_enable();
1210
- sleeping_lock_dec();
1207
+ migrate_disable();
12111208 }
12121209 return ret;
12131210 }
....@@ -1220,31 +1217,15 @@
12201217 local_bh_disable();
12211218 ret = __rt_mutex_trylock(&lock->lock);
12221219 if (ret) {
1223
- sleeping_lock_inc();
1220
+ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
12241221 rcu_read_lock();
12251222 migrate_disable();
1226
- spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1227
- } else
1223
+ } else {
12281224 local_bh_enable();
1229
- return ret;
1230
-}
1231
-EXPORT_SYMBOL(rt_spin_trylock_bh);
1232
-
1233
-int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags)
1234
-{
1235
- int ret;
1236
-
1237
- *flags = 0;
1238
- ret = __rt_mutex_trylock(&lock->lock);
1239
- if (ret) {
1240
- sleeping_lock_inc();
1241
- rcu_read_lock();
1242
- migrate_disable();
1243
- spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
12441225 }
12451226 return ret;
12461227 }
1247
-EXPORT_SYMBOL(rt_spin_trylock_irqsave);
1228
+EXPORT_SYMBOL(rt_spin_trylock_bh);
12481229
12491230 void
12501231 __rt_spin_lock_init(spinlock_t *lock, const char *name, struct lock_class_key *key)
....@@ -1259,9 +1240,9 @@
12591240 }
12601241 EXPORT_SYMBOL(__rt_spin_lock_init);
12611242
1262
-#endif /* PREEMPT_RT_FULL */
1243
+#endif /* PREEMPT_RT */
12631244
1264
-#ifdef CONFIG_PREEMPT_RT_FULL
1245
+#ifdef CONFIG_PREEMPT_RT
12651246 static inline int __sched
12661247 __mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx)
12671248 {
....@@ -1578,6 +1559,7 @@
15781559 {
15791560 int ret = 0;
15801561
1562
+ trace_android_vh_rtmutex_wait_start(lock);
15811563 for (;;) {
15821564 /* Try to acquire the lock: */
15831565 if (try_to_take_rt_mutex(lock, current, waiter))
....@@ -1600,14 +1582,13 @@
16001582
16011583 raw_spin_unlock_irq(&lock->wait_lock);
16021584
1603
- debug_rt_mutex_print_deadlock(waiter);
1604
-
16051585 schedule();
16061586
16071587 raw_spin_lock_irq(&lock->wait_lock);
16081588 set_current_state(state);
16091589 }
16101590
1591
+ trace_android_vh_rtmutex_wait_finish(lock);
16111592 __set_current_state(TASK_RUNNING);
16121593 return ret;
16131594 }
....@@ -1622,10 +1603,6 @@
16221603 if (res != -EDEADLOCK || detect_deadlock)
16231604 return;
16241605
1625
- /*
1626
- * Yell lowdly and stop the task right here.
1627
- */
1628
- rt_mutex_print_deadlock(w);
16291606 while (1) {
16301607 set_current_state(TASK_INTERRUPTIBLE);
16311608 schedule();
....@@ -1672,7 +1649,7 @@
16721649 ww_ctx->acquired++;
16731650 }
16741651
1675
-#ifdef CONFIG_PREEMPT_RT_FULL
1652
+#ifdef CONFIG_PREEMPT_RT
16761653 static void ww_mutex_account_lock(struct rt_mutex *lock,
16771654 struct ww_acquire_ctx *ww_ctx)
16781655 {
....@@ -1716,7 +1693,7 @@
17161693 {
17171694 int ret;
17181695
1719
-#ifdef CONFIG_PREEMPT_RT_FULL
1696
+#ifdef CONFIG_PREEMPT_RT
17201697 if (ww_ctx) {
17211698 struct ww_mutex *ww;
17221699
....@@ -1934,36 +1911,7 @@
19341911 if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
19351912 return 0;
19361913
1937
- /*
1938
- * If rt_mutex blocks, the function sched_submit_work will not call
1939
- * blk_schedule_flush_plug (because tsk_is_pi_blocked would be true).
1940
- * We must call blk_schedule_flush_plug here, if we don't call it,
1941
- * a deadlock in I/O may happen.
1942
- */
1943
- if (unlikely(blk_needs_flush_plug(current)))
1944
- blk_schedule_flush_plug(current);
1945
-
19461914 return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK, ww_ctx);
1947
-}
1948
-
1949
-static inline int
1950
-rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
1951
- struct hrtimer_sleeper *timeout,
1952
- enum rtmutex_chainwalk chwalk,
1953
- struct ww_acquire_ctx *ww_ctx,
1954
- int (*slowfn)(struct rt_mutex *lock, int state,
1955
- struct hrtimer_sleeper *timeout,
1956
- enum rtmutex_chainwalk chwalk,
1957
- struct ww_acquire_ctx *ww_ctx))
1958
-{
1959
- if (chwalk == RT_MUTEX_MIN_CHAINWALK &&
1960
- likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
1961
- return 0;
1962
-
1963
- if (unlikely(blk_needs_flush_plug(current)))
1964
- blk_schedule_flush_plug(current);
1965
-
1966
- return slowfn(lock, state, timeout, chwalk, ww_ctx);
19671915 }
19681916
19691917 static inline int
....@@ -1977,7 +1925,7 @@
19771925 }
19781926
19791927 /*
1980
- * Performs the wakeup of the the top-waiter and re-enables preemption.
1928
+ * Performs the wakeup of the top-waiter and re-enables preemption.
19811929 */
19821930 void rt_mutex_postunlock(struct wake_q_head *wake_q,
19831931 struct wake_q_head *wake_sleeper_q)
....@@ -2025,7 +1973,9 @@
20251973 mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
20261974 ret = __rt_mutex_lock_state(lock, state);
20271975 if (ret)
2028
- mutex_release(&lock->dep_map, 1, _RET_IP_);
1976
+ mutex_release(&lock->dep_map, _RET_IP_);
1977
+ trace_android_vh_record_rtmutex_lock_starttime(current, jiffies);
1978
+
20291979 return ret;
20301980 }
20311981
....@@ -2046,9 +1996,9 @@
20461996 __rt_mutex_lock(lock, subclass);
20471997 }
20481998 EXPORT_SYMBOL_GPL(rt_mutex_lock_nested);
2049
-#endif
20501999
2051
-#ifndef CONFIG_DEBUG_LOCK_ALLOC
2000
+#else /* !CONFIG_DEBUG_LOCK_ALLOC */
2001
+
20522002 /**
20532003 * rt_mutex_lock - lock a rt_mutex
20542004 *
....@@ -2089,57 +2039,9 @@
20892039 return __rt_mutex_slowtrylock(lock);
20902040 }
20912041
2092
-/**
2093
- * rt_mutex_lock_killable - lock a rt_mutex killable
2094
- *
2095
- * @lock: the rt_mutex to be locked
2096
- * @detect_deadlock: deadlock detection on/off
2097
- *
2098
- * Returns:
2099
- * 0 on success
2100
- * -EINTR when interrupted by a signal
2101
- */
2102
-int __sched rt_mutex_lock_killable(struct rt_mutex *lock)
2103
-{
2104
- return rt_mutex_lock_state(lock, 0, TASK_KILLABLE);
2105
-}
2106
-EXPORT_SYMBOL_GPL(rt_mutex_lock_killable);
2107
-
2108
-/**
2109
- * rt_mutex_timed_lock - lock a rt_mutex interruptible
2110
- * the timeout structure is provided
2111
- * by the caller
2112
- *
2113
- * @lock: the rt_mutex to be locked
2114
- * @timeout: timeout structure or NULL (no timeout)
2115
- *
2116
- * Returns:
2117
- * 0 on success
2118
- * -EINTR when interrupted by a signal
2119
- * -ETIMEDOUT when the timeout expired
2120
- */
2121
-int
2122
-rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout)
2123
-{
2124
- int ret;
2125
-
2126
- might_sleep();
2127
-
2128
- mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
2129
- ret = rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
2130
- RT_MUTEX_MIN_CHAINWALK,
2131
- NULL,
2132
- rt_mutex_slowlock);
2133
- if (ret)
2134
- mutex_release(&lock->dep_map, 1, _RET_IP_);
2135
-
2136
- return ret;
2137
-}
2138
-EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
2139
-
21402042 int __sched __rt_mutex_trylock(struct rt_mutex *lock)
21412043 {
2142
-#ifdef CONFIG_PREEMPT_RT_FULL
2044
+#ifdef CONFIG_PREEMPT_RT
21432045 if (WARN_ON_ONCE(in_irq() || in_nmi()))
21442046 #else
21452047 if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq()))
....@@ -2167,10 +2069,11 @@
21672069 ret = __rt_mutex_trylock(lock);
21682070 if (ret)
21692071 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
2072
+ else
2073
+ trace_android_vh_record_rtmutex_lock_starttime(current, jiffies);
21702074
21712075 return ret;
21722076 }
2173
-EXPORT_SYMBOL_GPL(rt_mutex_trylock);
21742077
21752078 void __sched __rt_mutex_unlock(struct rt_mutex *lock)
21762079 {
....@@ -2184,8 +2087,9 @@
21842087 */
21852088 void __sched rt_mutex_unlock(struct rt_mutex *lock)
21862089 {
2187
- mutex_release(&lock->dep_map, 1, _RET_IP_);
2090
+ mutex_release(&lock->dep_map, _RET_IP_);
21882091 __rt_mutex_unlock(lock);
2092
+ trace_android_vh_record_rtmutex_lock_starttime(current, 0);
21892093 }
21902094 EXPORT_SYMBOL_GPL(rt_mutex_unlock);
21912095
....@@ -2250,9 +2154,6 @@
22502154 void rt_mutex_destroy(struct rt_mutex *lock)
22512155 {
22522156 WARN_ON(rt_mutex_is_locked(lock));
2253
-#ifdef CONFIG_DEBUG_RT_MUTEXES
2254
- lock->magic = NULL;
2255
-#endif
22562157 }
22572158 EXPORT_SYMBOL_GPL(rt_mutex_destroy);
22582159
....@@ -2375,7 +2276,7 @@
23752276 if (try_to_take_rt_mutex(lock, task, NULL))
23762277 return 1;
23772278
2378
-#ifdef CONFIG_PREEMPT_RT_FULL
2279
+#ifdef CONFIG_PREEMPT_RT
23792280 /*
23802281 * In PREEMPT_RT there's an added race.
23812282 * If the task, that we are about to requeue, times out,
....@@ -2419,8 +2320,6 @@
24192320
24202321 if (ret)
24212322 fixup_rt_mutex_blocked(lock);
2422
-
2423
- debug_rt_mutex_print_deadlock(waiter);
24242323
24252324 return ret;
24262325 }
....@@ -2486,7 +2385,7 @@
24862385 * been started.
24872386 * @waiter: the pre-initialized rt_mutex_waiter
24882387 *
2489
- * Wait for the the lock acquisition started on our behalf by
2388
+ * Wait for the lock acquisition started on our behalf by
24902389 * rt_mutex_start_proxy_lock(). Upon failure, the caller must call
24912390 * rt_mutex_cleanup_proxy_lock().
24922391 *
....@@ -2580,7 +2479,7 @@
25802479 ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
25812480 {
25822481 #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
2583
- unsigned tmp;
2482
+ unsigned int tmp;
25842483
25852484 if (ctx->deadlock_inject_countdown-- == 0) {
25862485 tmp = ctx->deadlock_inject_interval;
....@@ -2602,7 +2501,7 @@
26022501 return 0;
26032502 }
26042503
2605
-#ifdef CONFIG_PREEMPT_RT_FULL
2504
+#ifdef CONFIG_PREEMPT_RT
26062505 int __sched
26072506 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
26082507 {
....@@ -2615,13 +2514,13 @@
26152514 ret = rt_mutex_slowlock(&lock->base.lock, TASK_INTERRUPTIBLE, NULL, 0,
26162515 ctx);
26172516 if (ret)
2618
- mutex_release(&lock->base.dep_map, 1, _RET_IP_);
2517
+ mutex_release(&lock->base.dep_map, _RET_IP_);
26192518 else if (!ret && ctx && ctx->acquired > 1)
26202519 return ww_mutex_deadlock_injection(lock, ctx);
26212520
26222521 return ret;
26232522 }
2624
-EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
2523
+EXPORT_SYMBOL(ww_mutex_lock_interruptible);
26252524
26262525 int __sched
26272526 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
....@@ -2635,23 +2534,21 @@
26352534 ret = rt_mutex_slowlock(&lock->base.lock, TASK_UNINTERRUPTIBLE, NULL, 0,
26362535 ctx);
26372536 if (ret)
2638
- mutex_release(&lock->base.dep_map, 1, _RET_IP_);
2537
+ mutex_release(&lock->base.dep_map, _RET_IP_);
26392538 else if (!ret && ctx && ctx->acquired > 1)
26402539 return ww_mutex_deadlock_injection(lock, ctx);
26412540
26422541 return ret;
26432542 }
2644
-EXPORT_SYMBOL_GPL(ww_mutex_lock);
2543
+EXPORT_SYMBOL(ww_mutex_lock);
26452544
26462545 void __sched ww_mutex_unlock(struct ww_mutex *lock)
26472546 {
2648
- int nest = !!lock->ctx;
2649
-
26502547 /*
26512548 * The unlocking fastpath is the 0->1 transition from 'locked'
26522549 * into 'unlocked' state:
26532550 */
2654
- if (nest) {
2551
+ if (lock->ctx) {
26552552 #ifdef CONFIG_DEBUG_MUTEXES
26562553 DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
26572554 #endif
....@@ -2660,7 +2557,7 @@
26602557 lock->ctx = NULL;
26612558 }
26622559
2663
- mutex_release(&lock->base.dep_map, nest, _RET_IP_);
2560
+ mutex_release(&lock->base.dep_map, _RET_IP_);
26642561 __rt_mutex_unlock(&lock->base.lock);
26652562 }
26662563 EXPORT_SYMBOL(ww_mutex_unlock);