.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * kernel/locking/mutex.c |
---|
3 | 4 | * |
---|
.. | .. |
---|
15 | 16 | * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale |
---|
16 | 17 | * and Sven Dietrich. |
---|
17 | 18 | * |
---|
18 | | - * Also see Documentation/locking/mutex-design.txt. |
---|
| 19 | + * Also see Documentation/locking/mutex-design.rst. |
---|
19 | 20 | */ |
---|
20 | 21 | #include <linux/mutex.h> |
---|
21 | 22 | #include <linux/ww_mutex.h> |
---|
.. | .. |
---|
34 | 35 | #else |
---|
35 | 36 | # include "mutex.h" |
---|
36 | 37 | #endif |
---|
| 38 | + |
---|
| 39 | +#include <trace/hooks/dtask.h> |
---|
37 | 40 | |
---|
38 | 41 | void |
---|
39 | 42 | __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) |
---|
.. | .. |
---|
64 | 67 | |
---|
65 | 68 | #define MUTEX_FLAGS 0x07 |
---|
66 | 69 | |
---|
| 70 | +/* |
---|
| 71 | + * Internal helper function; C doesn't allow us to hide it :/ |
---|
| 72 | + * |
---|
| 73 | + * DO NOT USE (outside of mutex code). |
---|
| 74 | + */ |
---|
| 75 | +static inline struct task_struct *__mutex_owner(struct mutex *lock) |
---|
| 76 | +{ |
---|
| 77 | + return (struct task_struct *)(atomic_long_read(&lock->owner) & ~MUTEX_FLAGS); |
---|
| 78 | +} |
---|
| 79 | + |
---|
67 | 80 | static inline struct task_struct *__owner_task(unsigned long owner) |
---|
68 | 81 | { |
---|
69 | 82 | return (struct task_struct *)(owner & ~MUTEX_FLAGS); |
---|
70 | 83 | } |
---|
| 84 | + |
---|
| 85 | +bool mutex_is_locked(struct mutex *lock) |
---|
| 86 | +{ |
---|
| 87 | + return __mutex_owner(lock) != NULL; |
---|
| 88 | +} |
---|
| 89 | +EXPORT_SYMBOL(mutex_is_locked); |
---|
| 90 | + |
---|
| 91 | +__must_check enum mutex_trylock_recursive_enum |
---|
| 92 | +mutex_trylock_recursive(struct mutex *lock) |
---|
| 93 | +{ |
---|
| 94 | + if (unlikely(__mutex_owner(lock) == current)) |
---|
| 95 | + return MUTEX_TRYLOCK_RECURSIVE; |
---|
| 96 | + |
---|
| 97 | + return mutex_trylock(lock); |
---|
| 98 | +} |
---|
| 99 | +EXPORT_SYMBOL(mutex_trylock_recursive); |
---|
71 | 100 | |
---|
72 | 101 | static inline unsigned long __owner_flags(unsigned long owner) |
---|
73 | 102 | { |
---|
.. | .. |
---|
141 | 170 | unsigned long curr = (unsigned long)current; |
---|
142 | 171 | unsigned long zero = 0UL; |
---|
143 | 172 | |
---|
144 | | - if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr)) |
---|
| 173 | + if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr)) { |
---|
| 174 | + trace_android_vh_record_mutex_lock_starttime(current, jiffies); |
---|
145 | 175 | return true; |
---|
| 176 | + } |
---|
146 | 177 | |
---|
147 | 178 | return false; |
---|
148 | 179 | } |
---|
.. | .. |
---|
181 | 212 | __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, |
---|
182 | 213 | struct list_head *list) |
---|
183 | 214 | { |
---|
| 215 | + bool already_on_list = false; |
---|
184 | 216 | debug_mutex_add_waiter(lock, waiter, current); |
---|
185 | 217 | |
---|
186 | | - list_add_tail(&waiter->list, list); |
---|
| 218 | + trace_android_vh_alter_mutex_list_add(lock, waiter, list, &already_on_list); |
---|
| 219 | + if (!already_on_list) |
---|
| 220 | + list_add_tail(&waiter->list, list); |
---|
187 | 221 | if (__mutex_waiter_is_first(lock, waiter)) |
---|
188 | 222 | __mutex_set_flag(lock, MUTEX_FLAG_WAITERS); |
---|
189 | 223 | } |
---|
.. | .. |
---|
716 | 750 | */ |
---|
717 | 751 | void __sched mutex_unlock(struct mutex *lock) |
---|
718 | 752 | { |
---|
| 753 | + trace_android_vh_record_mutex_lock_starttime(current, 0); |
---|
719 | 754 | #ifndef CONFIG_DEBUG_LOCK_ALLOC |
---|
720 | 755 | if (__mutex_unlock_fast(lock)) |
---|
721 | 756 | return; |
---|
.. | .. |
---|
919 | 954 | |
---|
920 | 955 | might_sleep(); |
---|
921 | 956 | |
---|
| 957 | +#ifdef CONFIG_DEBUG_MUTEXES |
---|
| 958 | + DEBUG_LOCKS_WARN_ON(lock->magic != lock); |
---|
| 959 | +#endif |
---|
| 960 | + |
---|
922 | 961 | ww = container_of(lock, struct ww_mutex, base); |
---|
923 | 962 | if (ww_ctx) { |
---|
924 | 963 | if (unlikely(ww_ctx == READ_ONCE(ww->ctx))) |
---|
.. | .. |
---|
942 | 981 | lock_acquired(&lock->dep_map, ip); |
---|
943 | 982 | if (ww_ctx) |
---|
944 | 983 | ww_mutex_set_context_fastpath(ww, ww_ctx); |
---|
| 984 | + trace_android_vh_record_mutex_lock_starttime(current, jiffies); |
---|
945 | 985 | preempt_enable(); |
---|
946 | 986 | return 0; |
---|
947 | 987 | } |
---|
.. | .. |
---|
983 | 1023 | |
---|
984 | 1024 | waiter.task = current; |
---|
985 | 1025 | |
---|
| 1026 | + trace_android_vh_mutex_wait_start(lock); |
---|
986 | 1027 | set_current_state(state); |
---|
987 | 1028 | for (;;) { |
---|
988 | 1029 | bool first; |
---|
.. | .. |
---|
1001 | 1042 | * wait_lock. This ensures the lock cancellation is ordered |
---|
1002 | 1043 | * against mutex_unlock() and wake-ups do not go missing. |
---|
1003 | 1044 | */ |
---|
1004 | | - if (unlikely(signal_pending_state(state, current))) { |
---|
| 1045 | + if (signal_pending_state(state, current)) { |
---|
1005 | 1046 | ret = -EINTR; |
---|
1006 | 1047 | goto err; |
---|
1007 | 1048 | } |
---|
.. | .. |
---|
1034 | 1075 | spin_lock(&lock->wait_lock); |
---|
1035 | 1076 | acquired: |
---|
1036 | 1077 | __set_current_state(TASK_RUNNING); |
---|
| 1078 | + trace_android_vh_mutex_wait_finish(lock); |
---|
1037 | 1079 | |
---|
1038 | 1080 | if (ww_ctx) { |
---|
1039 | 1081 | /* |
---|
.. | .. |
---|
1058 | 1100 | |
---|
1059 | 1101 | spin_unlock(&lock->wait_lock); |
---|
1060 | 1102 | preempt_enable(); |
---|
| 1103 | + trace_android_vh_record_mutex_lock_starttime(current, jiffies); |
---|
1061 | 1104 | return 0; |
---|
1062 | 1105 | |
---|
1063 | 1106 | err: |
---|
1064 | 1107 | __set_current_state(TASK_RUNNING); |
---|
| 1108 | + trace_android_vh_mutex_wait_finish(lock); |
---|
1065 | 1109 | __mutex_remove_waiter(lock, &waiter); |
---|
1066 | 1110 | err_early_kill: |
---|
1067 | 1111 | spin_unlock(&lock->wait_lock); |
---|
1068 | 1112 | debug_mutex_free_waiter(&waiter); |
---|
1069 | | - mutex_release(&lock->dep_map, 1, ip); |
---|
| 1113 | + mutex_release(&lock->dep_map, ip); |
---|
1070 | 1114 | preempt_enable(); |
---|
1071 | 1115 | return ret; |
---|
1072 | 1116 | } |
---|
.. | .. |
---|
1200 | 1244 | DEFINE_WAKE_Q(wake_q); |
---|
1201 | 1245 | unsigned long owner; |
---|
1202 | 1246 | |
---|
1203 | | - mutex_release(&lock->dep_map, 1, ip); |
---|
| 1247 | + mutex_release(&lock->dep_map, ip); |
---|
1204 | 1248 | |
---|
1205 | 1249 | /* |
---|
1206 | 1250 | * Release the lock before (potentially) taking the spinlock such that |
---|
.. | .. |
---|
1250 | 1294 | if (owner & MUTEX_FLAG_HANDOFF) |
---|
1251 | 1295 | __mutex_handoff(lock, next); |
---|
1252 | 1296 | |
---|
| 1297 | + trace_android_vh_mutex_unlock_slowpath(lock); |
---|
1253 | 1298 | spin_unlock(&lock->wait_lock); |
---|
1254 | 1299 | |
---|
1255 | 1300 | wake_up_q(&wake_q); |
---|
| 1301 | + trace_android_vh_mutex_unlock_slowpath_end(lock, next); |
---|
1256 | 1302 | } |
---|
1257 | 1303 | |
---|
1258 | 1304 | #ifndef CONFIG_DEBUG_LOCK_ALLOC |
---|
.. | .. |
---|
1384 | 1430 | */ |
---|
1385 | 1431 | int __sched mutex_trylock(struct mutex *lock) |
---|
1386 | 1432 | { |
---|
1387 | | - bool locked = __mutex_trylock(lock); |
---|
| 1433 | + bool locked; |
---|
1388 | 1434 | |
---|
1389 | | - if (locked) |
---|
| 1435 | +#ifdef CONFIG_DEBUG_MUTEXES |
---|
| 1436 | + DEBUG_LOCKS_WARN_ON(lock->magic != lock); |
---|
| 1437 | +#endif |
---|
| 1438 | + |
---|
| 1439 | + locked = __mutex_trylock(lock); |
---|
| 1440 | + if (locked) { |
---|
| 1441 | + trace_android_vh_record_mutex_lock_starttime(current, jiffies); |
---|
1390 | 1442 | mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); |
---|
| 1443 | + } |
---|
1391 | 1444 | |
---|
1392 | 1445 | return locked; |
---|
1393 | 1446 | } |
---|