.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * RT-Mutexes: simple blocking mutual exclusion locks with PI support |
---|
3 | 4 | * |
---|
.. | .. |
---|
7 | 8 | * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com> |
---|
8 | 9 | * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt |
---|
9 | 10 | * Copyright (C) 2006 Esben Nielsen |
---|
10 | | - * Adaptive Spinlocks: |
---|
| 11 | + * Adaptive Spinlocks: |
---|
11 | 12 | * Copyright (C) 2008 Novell, Inc., Gregory Haskins, Sven Dietrich, |
---|
12 | 13 | * and Peter Morreale, |
---|
13 | 14 | * Adaptive Spinlocks simplification: |
---|
14 | 15 | * Copyright (C) 2008 Red Hat, Inc., Steven Rostedt <srostedt@redhat.com> |
---|
15 | 16 | * |
---|
16 | | - * See Documentation/locking/rt-mutex-design.txt for details. |
---|
| 17 | + * See Documentation/locking/rt-mutex-design.rst for details. |
---|
17 | 18 | */ |
---|
18 | 19 | #include <linux/spinlock.h> |
---|
19 | 20 | #include <linux/export.h> |
---|
.. | .. |
---|
23 | 24 | #include <linux/sched/wake_q.h> |
---|
24 | 25 | #include <linux/sched/debug.h> |
---|
25 | 26 | #include <linux/timer.h> |
---|
| 27 | +#include <trace/hooks/dtask.h> |
---|
26 | 28 | #include <linux/ww_mutex.h> |
---|
27 | | -#include <linux/blkdev.h> |
---|
28 | 29 | |
---|
29 | 30 | #include "rtmutex_common.h" |
---|
30 | 31 | |
---|
.. | .. |
---|
63 | 64 | if (rt_mutex_has_waiters(lock)) |
---|
64 | 65 | val |= RT_MUTEX_HAS_WAITERS; |
---|
65 | 66 | |
---|
66 | | - lock->owner = (struct task_struct *)val; |
---|
| 67 | + WRITE_ONCE(lock->owner, (struct task_struct *)val); |
---|
67 | 68 | } |
---|
68 | 69 | |
---|
69 | 70 | static inline void clear_rt_mutex_waiters(struct rt_mutex *lock) |
---|
.. | .. |
---|
153 | 154 | * set up. |
---|
154 | 155 | */ |
---|
155 | 156 | #ifndef CONFIG_DEBUG_RT_MUTEXES |
---|
156 | | -# define rt_mutex_cmpxchg_relaxed(l,c,n) (cmpxchg_relaxed(&l->owner, c, n) == c) |
---|
157 | 157 | # define rt_mutex_cmpxchg_acquire(l,c,n) (cmpxchg_acquire(&l->owner, c, n) == c) |
---|
158 | 158 | # define rt_mutex_cmpxchg_release(l,c,n) (cmpxchg_release(&l->owner, c, n) == c) |
---|
159 | 159 | |
---|
.. | .. |
---|
214 | 214 | } |
---|
215 | 215 | |
---|
216 | 216 | #else |
---|
217 | | -# define rt_mutex_cmpxchg_relaxed(l,c,n) (0) |
---|
218 | 217 | # define rt_mutex_cmpxchg_acquire(l,c,n) (0) |
---|
219 | 218 | # define rt_mutex_cmpxchg_release(l,c,n) (0) |
---|
220 | 219 | |
---|
.. | .. |
---|
641 | 640 | * walk, we detected a deadlock. |
---|
642 | 641 | */ |
---|
643 | 642 | if (lock == orig_lock || rt_mutex_owner(lock) == top_task) { |
---|
644 | | - debug_rt_mutex_deadlock(chwalk, orig_waiter, lock); |
---|
645 | 643 | raw_spin_unlock(&lock->wait_lock); |
---|
646 | 644 | ret = -EDEADLK; |
---|
647 | 645 | goto out_unlock_pi; |
---|
.. | .. |
---|
670 | 668 | } |
---|
671 | 669 | |
---|
672 | 670 | /* [10] Grab the next task, i.e. owner of @lock */ |
---|
673 | | - task = rt_mutex_owner(lock); |
---|
674 | | - get_task_struct(task); |
---|
| 671 | + task = get_task_struct(rt_mutex_owner(lock)); |
---|
675 | 672 | raw_spin_lock(&task->pi_lock); |
---|
676 | 673 | |
---|
677 | 674 | /* |
---|
.. | .. |
---|
754 | 751 | } |
---|
755 | 752 | |
---|
756 | 753 | /* [10] Grab the next task, i.e. the owner of @lock */ |
---|
757 | | - task = rt_mutex_owner(lock); |
---|
758 | | - get_task_struct(task); |
---|
| 754 | + task = get_task_struct(rt_mutex_owner(lock)); |
---|
759 | 755 | raw_spin_lock(&task->pi_lock); |
---|
760 | 756 | |
---|
761 | 757 | /* [11] requeue the pi waiters if necessary */ |
---|
.. | .. |
---|
964 | 960 | return 1; |
---|
965 | 961 | } |
---|
966 | 962 | |
---|
967 | | -#ifdef CONFIG_PREEMPT_RT_FULL |
---|
| 963 | +#ifdef CONFIG_PREEMPT_RT |
---|
968 | 964 | /* |
---|
969 | 965 | * preemptible spin_lock functions: |
---|
970 | 966 | */ |
---|
.. | .. |
---|
1071 | 1067 | |
---|
1072 | 1068 | raw_spin_unlock_irqrestore(&lock->wait_lock, flags); |
---|
1073 | 1069 | |
---|
1074 | | - debug_rt_mutex_print_deadlock(waiter); |
---|
1075 | | - |
---|
1076 | 1070 | if (top_waiter != waiter || adaptive_wait(lock, lock_owner)) |
---|
1077 | | - schedule(); |
---|
| 1071 | + preempt_schedule_lock(); |
---|
1078 | 1072 | |
---|
1079 | 1073 | raw_spin_lock_irqsave(&lock->wait_lock, flags); |
---|
1080 | 1074 | |
---|
.. | .. |
---|
1141 | 1135 | |
---|
1142 | 1136 | void __lockfunc rt_spin_lock(spinlock_t *lock) |
---|
1143 | 1137 | { |
---|
1144 | | - sleeping_lock_inc(); |
---|
1145 | | - rcu_read_lock(); |
---|
1146 | | - migrate_disable(); |
---|
1147 | 1138 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); |
---|
1148 | 1139 | rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock); |
---|
| 1140 | + rcu_read_lock(); |
---|
| 1141 | + migrate_disable(); |
---|
1149 | 1142 | } |
---|
1150 | 1143 | EXPORT_SYMBOL(rt_spin_lock); |
---|
1151 | 1144 | |
---|
.. | .. |
---|
1157 | 1150 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
---|
1158 | 1151 | void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass) |
---|
1159 | 1152 | { |
---|
1160 | | - sleeping_lock_inc(); |
---|
1161 | | - rcu_read_lock(); |
---|
1162 | | - migrate_disable(); |
---|
1163 | 1153 | spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); |
---|
1164 | 1154 | rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock); |
---|
| 1155 | + rcu_read_lock(); |
---|
| 1156 | + migrate_disable(); |
---|
1165 | 1157 | } |
---|
1166 | 1158 | EXPORT_SYMBOL(rt_spin_lock_nested); |
---|
| 1159 | + |
---|
| 1160 | +void __lockfunc rt_spin_lock_nest_lock(spinlock_t *lock, |
---|
| 1161 | + struct lockdep_map *nest_lock) |
---|
| 1162 | +{ |
---|
| 1163 | + spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_); |
---|
| 1164 | + rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock); |
---|
| 1165 | + rcu_read_lock(); |
---|
| 1166 | + migrate_disable(); |
---|
| 1167 | +} |
---|
| 1168 | +EXPORT_SYMBOL(rt_spin_lock_nest_lock); |
---|
1167 | 1169 | #endif |
---|
1168 | 1170 | |
---|
1169 | 1171 | void __lockfunc rt_spin_unlock(spinlock_t *lock) |
---|
1170 | 1172 | { |
---|
1171 | 1173 | /* NOTE: we always pass in '1' for nested, for simplicity */ |
---|
1172 | | - spin_release(&lock->dep_map, 1, _RET_IP_); |
---|
1173 | | - rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock); |
---|
| 1174 | + spin_release(&lock->dep_map, _RET_IP_); |
---|
1174 | 1175 | migrate_enable(); |
---|
1175 | 1176 | rcu_read_unlock(); |
---|
1176 | | - sleeping_lock_dec(); |
---|
| 1177 | + rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock); |
---|
1177 | 1178 | } |
---|
1178 | 1179 | EXPORT_SYMBOL(rt_spin_unlock); |
---|
1179 | 1180 | |
---|
.. | .. |
---|
1188 | 1189 | * (like raw spinlocks do), we lock and unlock, to force the kernel to |
---|
1189 | 1190 | * schedule if there's contention: |
---|
1190 | 1191 | */ |
---|
1191 | | -void __lockfunc rt_spin_unlock_wait(spinlock_t *lock) |
---|
| 1192 | +void __lockfunc rt_spin_lock_unlock(spinlock_t *lock) |
---|
1192 | 1193 | { |
---|
1193 | 1194 | spin_lock(lock); |
---|
1194 | 1195 | spin_unlock(lock); |
---|
1195 | 1196 | } |
---|
1196 | | -EXPORT_SYMBOL(rt_spin_unlock_wait); |
---|
| 1197 | +EXPORT_SYMBOL(rt_spin_lock_unlock); |
---|
1197 | 1198 | |
---|
1198 | 1199 | int __lockfunc rt_spin_trylock(spinlock_t *lock) |
---|
1199 | 1200 | { |
---|
1200 | 1201 | int ret; |
---|
1201 | 1202 | |
---|
1202 | | - sleeping_lock_inc(); |
---|
1203 | | - migrate_disable(); |
---|
1204 | 1203 | ret = __rt_mutex_trylock(&lock->lock); |
---|
1205 | 1204 | if (ret) { |
---|
1206 | 1205 | spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); |
---|
1207 | 1206 | rcu_read_lock(); |
---|
1208 | | - } else { |
---|
1209 | | - migrate_enable(); |
---|
1210 | | - sleeping_lock_dec(); |
---|
| 1207 | + migrate_disable(); |
---|
1211 | 1208 | } |
---|
1212 | 1209 | return ret; |
---|
1213 | 1210 | } |
---|
.. | .. |
---|
1220 | 1217 | local_bh_disable(); |
---|
1221 | 1218 | ret = __rt_mutex_trylock(&lock->lock); |
---|
1222 | 1219 | if (ret) { |
---|
1223 | | - sleeping_lock_inc(); |
---|
| 1220 | + spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); |
---|
1224 | 1221 | rcu_read_lock(); |
---|
1225 | 1222 | migrate_disable(); |
---|
1226 | | - spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); |
---|
1227 | | - } else |
---|
| 1223 | + } else { |
---|
1228 | 1224 | local_bh_enable(); |
---|
1229 | | - return ret; |
---|
1230 | | -} |
---|
1231 | | -EXPORT_SYMBOL(rt_spin_trylock_bh); |
---|
1232 | | - |
---|
1233 | | -int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags) |
---|
1234 | | -{ |
---|
1235 | | - int ret; |
---|
1236 | | - |
---|
1237 | | - *flags = 0; |
---|
1238 | | - ret = __rt_mutex_trylock(&lock->lock); |
---|
1239 | | - if (ret) { |
---|
1240 | | - sleeping_lock_inc(); |
---|
1241 | | - rcu_read_lock(); |
---|
1242 | | - migrate_disable(); |
---|
1243 | | - spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); |
---|
1244 | 1225 | } |
---|
1245 | 1226 | return ret; |
---|
1246 | 1227 | } |
---|
1247 | | -EXPORT_SYMBOL(rt_spin_trylock_irqsave); |
---|
| 1228 | +EXPORT_SYMBOL(rt_spin_trylock_bh); |
---|
1248 | 1229 | |
---|
1249 | 1230 | void |
---|
1250 | 1231 | __rt_spin_lock_init(spinlock_t *lock, const char *name, struct lock_class_key *key) |
---|
.. | .. |
---|
1259 | 1240 | } |
---|
1260 | 1241 | EXPORT_SYMBOL(__rt_spin_lock_init); |
---|
1261 | 1242 | |
---|
1262 | | -#endif /* PREEMPT_RT_FULL */ |
---|
| 1243 | +#endif /* PREEMPT_RT */ |
---|
1263 | 1244 | |
---|
1264 | | -#ifdef CONFIG_PREEMPT_RT_FULL |
---|
| 1245 | +#ifdef CONFIG_PREEMPT_RT |
---|
1265 | 1246 | static inline int __sched |
---|
1266 | 1247 | __mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx) |
---|
1267 | 1248 | { |
---|
.. | .. |
---|
1578 | 1559 | { |
---|
1579 | 1560 | int ret = 0; |
---|
1580 | 1561 | |
---|
| 1562 | + trace_android_vh_rtmutex_wait_start(lock); |
---|
1581 | 1563 | for (;;) { |
---|
1582 | 1564 | /* Try to acquire the lock: */ |
---|
1583 | 1565 | if (try_to_take_rt_mutex(lock, current, waiter)) |
---|
.. | .. |
---|
1600 | 1582 | |
---|
1601 | 1583 | raw_spin_unlock_irq(&lock->wait_lock); |
---|
1602 | 1584 | |
---|
1603 | | - debug_rt_mutex_print_deadlock(waiter); |
---|
1604 | | - |
---|
1605 | 1585 | schedule(); |
---|
1606 | 1586 | |
---|
1607 | 1587 | raw_spin_lock_irq(&lock->wait_lock); |
---|
1608 | 1588 | set_current_state(state); |
---|
1609 | 1589 | } |
---|
1610 | 1590 | |
---|
| 1591 | + trace_android_vh_rtmutex_wait_finish(lock); |
---|
1611 | 1592 | __set_current_state(TASK_RUNNING); |
---|
1612 | 1593 | return ret; |
---|
1613 | 1594 | } |
---|
.. | .. |
---|
1622 | 1603 | if (res != -EDEADLOCK || detect_deadlock) |
---|
1623 | 1604 | return; |
---|
1624 | 1605 | |
---|
1625 | | - /* |
---|
1626 | | - * Yell lowdly and stop the task right here. |
---|
1627 | | - */ |
---|
1628 | | - rt_mutex_print_deadlock(w); |
---|
1629 | 1606 | while (1) { |
---|
1630 | 1607 | set_current_state(TASK_INTERRUPTIBLE); |
---|
1631 | 1608 | schedule(); |
---|
.. | .. |
---|
1672 | 1649 | ww_ctx->acquired++; |
---|
1673 | 1650 | } |
---|
1674 | 1651 | |
---|
1675 | | -#ifdef CONFIG_PREEMPT_RT_FULL |
---|
| 1652 | +#ifdef CONFIG_PREEMPT_RT |
---|
1676 | 1653 | static void ww_mutex_account_lock(struct rt_mutex *lock, |
---|
1677 | 1654 | struct ww_acquire_ctx *ww_ctx) |
---|
1678 | 1655 | { |
---|
.. | .. |
---|
1716 | 1693 | { |
---|
1717 | 1694 | int ret; |
---|
1718 | 1695 | |
---|
1719 | | -#ifdef CONFIG_PREEMPT_RT_FULL |
---|
| 1696 | +#ifdef CONFIG_PREEMPT_RT |
---|
1720 | 1697 | if (ww_ctx) { |
---|
1721 | 1698 | struct ww_mutex *ww; |
---|
1722 | 1699 | |
---|
.. | .. |
---|
1934 | 1911 | if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) |
---|
1935 | 1912 | return 0; |
---|
1936 | 1913 | |
---|
1937 | | - /* |
---|
1938 | | - * If rt_mutex blocks, the function sched_submit_work will not call |
---|
1939 | | - * blk_schedule_flush_plug (because tsk_is_pi_blocked would be true). |
---|
1940 | | - * We must call blk_schedule_flush_plug here, if we don't call it, |
---|
1941 | | - * a deadlock in I/O may happen. |
---|
1942 | | - */ |
---|
1943 | | - if (unlikely(blk_needs_flush_plug(current))) |
---|
1944 | | - blk_schedule_flush_plug(current); |
---|
1945 | | - |
---|
1946 | 1914 | return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK, ww_ctx); |
---|
1947 | | -} |
---|
1948 | | - |
---|
1949 | | -static inline int |
---|
1950 | | -rt_mutex_timed_fastlock(struct rt_mutex *lock, int state, |
---|
1951 | | - struct hrtimer_sleeper *timeout, |
---|
1952 | | - enum rtmutex_chainwalk chwalk, |
---|
1953 | | - struct ww_acquire_ctx *ww_ctx, |
---|
1954 | | - int (*slowfn)(struct rt_mutex *lock, int state, |
---|
1955 | | - struct hrtimer_sleeper *timeout, |
---|
1956 | | - enum rtmutex_chainwalk chwalk, |
---|
1957 | | - struct ww_acquire_ctx *ww_ctx)) |
---|
1958 | | -{ |
---|
1959 | | - if (chwalk == RT_MUTEX_MIN_CHAINWALK && |
---|
1960 | | - likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) |
---|
1961 | | - return 0; |
---|
1962 | | - |
---|
1963 | | - if (unlikely(blk_needs_flush_plug(current))) |
---|
1964 | | - blk_schedule_flush_plug(current); |
---|
1965 | | - |
---|
1966 | | - return slowfn(lock, state, timeout, chwalk, ww_ctx); |
---|
1967 | 1915 | } |
---|
1968 | 1916 | |
---|
1969 | 1917 | static inline int |
---|
.. | .. |
---|
1977 | 1925 | } |
---|
1978 | 1926 | |
---|
1979 | 1927 | /* |
---|
1980 | | - * Performs the wakeup of the the top-waiter and re-enables preemption. |
---|
| 1928 | + * Performs the wakeup of the top-waiter and re-enables preemption. |
---|
1981 | 1929 | */ |
---|
1982 | 1930 | void rt_mutex_postunlock(struct wake_q_head *wake_q, |
---|
1983 | 1931 | struct wake_q_head *wake_sleeper_q) |
---|
.. | .. |
---|
2025 | 1973 | mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_); |
---|
2026 | 1974 | ret = __rt_mutex_lock_state(lock, state); |
---|
2027 | 1975 | if (ret) |
---|
2028 | | - mutex_release(&lock->dep_map, 1, _RET_IP_); |
---|
| 1976 | + mutex_release(&lock->dep_map, _RET_IP_); |
---|
| 1977 | + trace_android_vh_record_rtmutex_lock_starttime(current, jiffies); |
---|
| 1978 | + |
---|
2029 | 1979 | return ret; |
---|
2030 | 1980 | } |
---|
2031 | 1981 | |
---|
.. | .. |
---|
2046 | 1996 | __rt_mutex_lock(lock, subclass); |
---|
2047 | 1997 | } |
---|
2048 | 1998 | EXPORT_SYMBOL_GPL(rt_mutex_lock_nested); |
---|
2049 | | -#endif |
---|
2050 | 1999 | |
---|
2051 | | -#ifndef CONFIG_DEBUG_LOCK_ALLOC |
---|
| 2000 | +#else /* !CONFIG_DEBUG_LOCK_ALLOC */ |
---|
| 2001 | + |
---|
2052 | 2002 | /** |
---|
2053 | 2003 | * rt_mutex_lock - lock a rt_mutex |
---|
2054 | 2004 | * |
---|
.. | .. |
---|
2089 | 2039 | return __rt_mutex_slowtrylock(lock); |
---|
2090 | 2040 | } |
---|
2091 | 2041 | |
---|
2092 | | -/** |
---|
2093 | | - * rt_mutex_lock_killable - lock a rt_mutex killable |
---|
2094 | | - * |
---|
2095 | | - * @lock: the rt_mutex to be locked |
---|
2096 | | - * @detect_deadlock: deadlock detection on/off |
---|
2097 | | - * |
---|
2098 | | - * Returns: |
---|
2099 | | - * 0 on success |
---|
2100 | | - * -EINTR when interrupted by a signal |
---|
2101 | | - */ |
---|
2102 | | -int __sched rt_mutex_lock_killable(struct rt_mutex *lock) |
---|
2103 | | -{ |
---|
2104 | | - return rt_mutex_lock_state(lock, 0, TASK_KILLABLE); |
---|
2105 | | -} |
---|
2106 | | -EXPORT_SYMBOL_GPL(rt_mutex_lock_killable); |
---|
2107 | | - |
---|
2108 | | -/** |
---|
2109 | | - * rt_mutex_timed_lock - lock a rt_mutex interruptible |
---|
2110 | | - * the timeout structure is provided |
---|
2111 | | - * by the caller |
---|
2112 | | - * |
---|
2113 | | - * @lock: the rt_mutex to be locked |
---|
2114 | | - * @timeout: timeout structure or NULL (no timeout) |
---|
2115 | | - * |
---|
2116 | | - * Returns: |
---|
2117 | | - * 0 on success |
---|
2118 | | - * -EINTR when interrupted by a signal |
---|
2119 | | - * -ETIMEDOUT when the timeout expired |
---|
2120 | | - */ |
---|
2121 | | -int |
---|
2122 | | -rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout) |
---|
2123 | | -{ |
---|
2124 | | - int ret; |
---|
2125 | | - |
---|
2126 | | - might_sleep(); |
---|
2127 | | - |
---|
2128 | | - mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); |
---|
2129 | | - ret = rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, |
---|
2130 | | - RT_MUTEX_MIN_CHAINWALK, |
---|
2131 | | - NULL, |
---|
2132 | | - rt_mutex_slowlock); |
---|
2133 | | - if (ret) |
---|
2134 | | - mutex_release(&lock->dep_map, 1, _RET_IP_); |
---|
2135 | | - |
---|
2136 | | - return ret; |
---|
2137 | | -} |
---|
2138 | | -EXPORT_SYMBOL_GPL(rt_mutex_timed_lock); |
---|
2139 | | - |
---|
2140 | 2042 | int __sched __rt_mutex_trylock(struct rt_mutex *lock) |
---|
2141 | 2043 | { |
---|
2142 | | -#ifdef CONFIG_PREEMPT_RT_FULL |
---|
| 2044 | +#ifdef CONFIG_PREEMPT_RT |
---|
2143 | 2045 | if (WARN_ON_ONCE(in_irq() || in_nmi())) |
---|
2144 | 2046 | #else |
---|
2145 | 2047 | if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq())) |
---|
.. | .. |
---|
2167 | 2069 | ret = __rt_mutex_trylock(lock); |
---|
2168 | 2070 | if (ret) |
---|
2169 | 2071 | mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); |
---|
| 2072 | + else |
---|
| 2073 | + trace_android_vh_record_rtmutex_lock_starttime(current, jiffies); |
---|
2170 | 2074 | |
---|
2171 | 2075 | return ret; |
---|
2172 | 2076 | } |
---|
2173 | | -EXPORT_SYMBOL_GPL(rt_mutex_trylock); |
---|
2174 | 2077 | |
---|
2175 | 2078 | void __sched __rt_mutex_unlock(struct rt_mutex *lock) |
---|
2176 | 2079 | { |
---|
.. | .. |
---|
2184 | 2087 | */ |
---|
2185 | 2088 | void __sched rt_mutex_unlock(struct rt_mutex *lock) |
---|
2186 | 2089 | { |
---|
2187 | | - mutex_release(&lock->dep_map, 1, _RET_IP_); |
---|
| 2090 | + mutex_release(&lock->dep_map, _RET_IP_); |
---|
2188 | 2091 | __rt_mutex_unlock(lock); |
---|
| 2092 | + trace_android_vh_record_rtmutex_lock_starttime(current, 0); |
---|
2189 | 2093 | } |
---|
2190 | 2094 | EXPORT_SYMBOL_GPL(rt_mutex_unlock); |
---|
2191 | 2095 | |
---|
.. | .. |
---|
2250 | 2154 | void rt_mutex_destroy(struct rt_mutex *lock) |
---|
2251 | 2155 | { |
---|
2252 | 2156 | WARN_ON(rt_mutex_is_locked(lock)); |
---|
2253 | | -#ifdef CONFIG_DEBUG_RT_MUTEXES |
---|
2254 | | - lock->magic = NULL; |
---|
2255 | | -#endif |
---|
2256 | 2157 | } |
---|
2257 | 2158 | EXPORT_SYMBOL_GPL(rt_mutex_destroy); |
---|
2258 | 2159 | |
---|
.. | .. |
---|
2375 | 2276 | if (try_to_take_rt_mutex(lock, task, NULL)) |
---|
2376 | 2277 | return 1; |
---|
2377 | 2278 | |
---|
2378 | | -#ifdef CONFIG_PREEMPT_RT_FULL |
---|
| 2279 | +#ifdef CONFIG_PREEMPT_RT |
---|
2379 | 2280 | /* |
---|
2380 | 2281 | * In PREEMPT_RT there's an added race. |
---|
2381 | 2282 | * If the task, that we are about to requeue, times out, |
---|
.. | .. |
---|
2419 | 2320 | |
---|
2420 | 2321 | if (ret) |
---|
2421 | 2322 | fixup_rt_mutex_blocked(lock); |
---|
2422 | | - |
---|
2423 | | - debug_rt_mutex_print_deadlock(waiter); |
---|
2424 | 2323 | |
---|
2425 | 2324 | return ret; |
---|
2426 | 2325 | } |
---|
.. | .. |
---|
2486 | 2385 | * been started. |
---|
2487 | 2386 | * @waiter: the pre-initialized rt_mutex_waiter |
---|
2488 | 2387 | * |
---|
2489 | | - * Wait for the the lock acquisition started on our behalf by |
---|
| 2388 | + * Wait for the lock acquisition started on our behalf by |
---|
2490 | 2389 | * rt_mutex_start_proxy_lock(). Upon failure, the caller must call |
---|
2491 | 2390 | * rt_mutex_cleanup_proxy_lock(). |
---|
2492 | 2391 | * |
---|
.. | .. |
---|
2580 | 2479 | ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) |
---|
2581 | 2480 | { |
---|
2582 | 2481 | #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH |
---|
2583 | | - unsigned tmp; |
---|
| 2482 | + unsigned int tmp; |
---|
2584 | 2483 | |
---|
2585 | 2484 | if (ctx->deadlock_inject_countdown-- == 0) { |
---|
2586 | 2485 | tmp = ctx->deadlock_inject_interval; |
---|
.. | .. |
---|
2602 | 2501 | return 0; |
---|
2603 | 2502 | } |
---|
2604 | 2503 | |
---|
2605 | | -#ifdef CONFIG_PREEMPT_RT_FULL |
---|
| 2504 | +#ifdef CONFIG_PREEMPT_RT |
---|
2606 | 2505 | int __sched |
---|
2607 | 2506 | ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) |
---|
2608 | 2507 | { |
---|
.. | .. |
---|
2615 | 2514 | ret = rt_mutex_slowlock(&lock->base.lock, TASK_INTERRUPTIBLE, NULL, 0, |
---|
2616 | 2515 | ctx); |
---|
2617 | 2516 | if (ret) |
---|
2618 | | - mutex_release(&lock->base.dep_map, 1, _RET_IP_); |
---|
| 2517 | + mutex_release(&lock->base.dep_map, _RET_IP_); |
---|
2619 | 2518 | else if (!ret && ctx && ctx->acquired > 1) |
---|
2620 | 2519 | return ww_mutex_deadlock_injection(lock, ctx); |
---|
2621 | 2520 | |
---|
2622 | 2521 | return ret; |
---|
2623 | 2522 | } |
---|
2624 | | -EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible); |
---|
| 2523 | +EXPORT_SYMBOL(ww_mutex_lock_interruptible); |
---|
2625 | 2524 | |
---|
2626 | 2525 | int __sched |
---|
2627 | 2526 | ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) |
---|
.. | .. |
---|
2635 | 2534 | ret = rt_mutex_slowlock(&lock->base.lock, TASK_UNINTERRUPTIBLE, NULL, 0, |
---|
2636 | 2535 | ctx); |
---|
2637 | 2536 | if (ret) |
---|
2638 | | - mutex_release(&lock->base.dep_map, 1, _RET_IP_); |
---|
| 2537 | + mutex_release(&lock->base.dep_map, _RET_IP_); |
---|
2639 | 2538 | else if (!ret && ctx && ctx->acquired > 1) |
---|
2640 | 2539 | return ww_mutex_deadlock_injection(lock, ctx); |
---|
2641 | 2540 | |
---|
2642 | 2541 | return ret; |
---|
2643 | 2542 | } |
---|
2644 | | -EXPORT_SYMBOL_GPL(ww_mutex_lock); |
---|
| 2543 | +EXPORT_SYMBOL(ww_mutex_lock); |
---|
2645 | 2544 | |
---|
2646 | 2545 | void __sched ww_mutex_unlock(struct ww_mutex *lock) |
---|
2647 | 2546 | { |
---|
2648 | | - int nest = !!lock->ctx; |
---|
2649 | | - |
---|
2650 | 2547 | /* |
---|
2651 | 2548 | * The unlocking fastpath is the 0->1 transition from 'locked' |
---|
2652 | 2549 | * into 'unlocked' state: |
---|
2653 | 2550 | */ |
---|
2654 | | - if (nest) { |
---|
| 2551 | + if (lock->ctx) { |
---|
2655 | 2552 | #ifdef CONFIG_DEBUG_MUTEXES |
---|
2656 | 2553 | DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired); |
---|
2657 | 2554 | #endif |
---|
.. | .. |
---|
2660 | 2557 | lock->ctx = NULL; |
---|
2661 | 2558 | } |
---|
2662 | 2559 | |
---|
2663 | | - mutex_release(&lock->base.dep_map, nest, _RET_IP_); |
---|
| 2560 | + mutex_release(&lock->base.dep_map, _RET_IP_); |
---|
2664 | 2561 | __rt_mutex_unlock(&lock->base.lock); |
---|
2665 | 2562 | } |
---|
2666 | 2563 | EXPORT_SYMBOL(ww_mutex_unlock); |
---|