.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * kernel/locking/mutex.c |
---|
3 | 4 | * |
---|
.. | .. |
---|
15 | 16 | * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale |
---|
16 | 17 | * and Sven Dietrich. |
---|
17 | 18 | * |
---|
18 | | - * Also see Documentation/locking/mutex-design.txt. |
---|
| 19 | + * Also see Documentation/locking/mutex-design.rst. |
---|
19 | 20 | */ |
---|
20 | 21 | #include <linux/mutex.h> |
---|
21 | 22 | #include <linux/ww_mutex.h> |
---|
.. | .. |
---|
34 | 35 | #else |
---|
35 | 36 | # include "mutex.h" |
---|
36 | 37 | #endif |
---|
| 38 | + |
---|
| 39 | +#include <trace/hooks/dtask.h> |
---|
37 | 40 | |
---|
38 | 41 | void |
---|
39 | 42 | __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) |
---|
.. | .. |
---|
64 | 67 | |
---|
65 | 68 | #define MUTEX_FLAGS 0x07 |
---|
66 | 69 | |
---|
| 70 | +/* |
---|
| 71 | + * Internal helper function; C doesn't allow us to hide it :/ |
---|
| 72 | + * |
---|
| 73 | + * DO NOT USE (outside of mutex code). |
---|
| 74 | + */ |
---|
| 75 | +static inline struct task_struct *__mutex_owner(struct mutex *lock) |
---|
| 76 | +{ |
---|
| 77 | + return (struct task_struct *)(atomic_long_read(&lock->owner) & ~MUTEX_FLAGS); |
---|
| 78 | +} |
---|
| 79 | + |
---|
67 | 80 | static inline struct task_struct *__owner_task(unsigned long owner) |
---|
68 | 81 | { |
---|
69 | 82 | return (struct task_struct *)(owner & ~MUTEX_FLAGS); |
---|
70 | 83 | } |
---|
| 84 | + |
---|
| 85 | +bool mutex_is_locked(struct mutex *lock) |
---|
| 86 | +{ |
---|
| 87 | + return __mutex_owner(lock) != NULL; |
---|
| 88 | +} |
---|
| 89 | +EXPORT_SYMBOL(mutex_is_locked); |
---|
| 90 | + |
---|
| 91 | +__must_check enum mutex_trylock_recursive_enum |
---|
| 92 | +mutex_trylock_recursive(struct mutex *lock) |
---|
| 93 | +{ |
---|
| 94 | + if (unlikely(__mutex_owner(lock) == current)) |
---|
| 95 | + return MUTEX_TRYLOCK_RECURSIVE; |
---|
| 96 | + |
---|
| 97 | + return mutex_trylock(lock); |
---|
| 98 | +} |
---|
| 99 | +EXPORT_SYMBOL(mutex_trylock_recursive); |
---|
71 | 100 | |
---|
72 | 101 | static inline unsigned long __owner_flags(unsigned long owner) |
---|
73 | 102 | { |
---|
.. | .. |
---|
141 | 170 | unsigned long curr = (unsigned long)current; |
---|
142 | 171 | unsigned long zero = 0UL; |
---|
143 | 172 | |
---|
144 | | - if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr)) |
---|
| 173 | + if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr)) { |
---|
| 174 | + trace_android_vh_record_mutex_lock_starttime(current, jiffies); |
---|
145 | 175 | return true; |
---|
| 176 | + } |
---|
146 | 177 | |
---|
147 | 178 | return false; |
---|
148 | 179 | } |
---|
.. | .. |
---|
181 | 212 | __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, |
---|
182 | 213 | struct list_head *list) |
---|
183 | 214 | { |
---|
| 215 | + bool already_on_list = false; |
---|
184 | 216 | debug_mutex_add_waiter(lock, waiter, current); |
---|
185 | 217 | |
---|
186 | | - list_add_tail(&waiter->list, list); |
---|
| 218 | + trace_android_vh_alter_mutex_list_add(lock, waiter, list, &already_on_list); |
---|
| 219 | + if (!already_on_list) |
---|
| 220 | + list_add_tail(&waiter->list, list); |
---|
187 | 221 | if (__mutex_waiter_is_first(lock, waiter)) |
---|
188 | 222 | __mutex_set_flag(lock, MUTEX_FLAG_WAITERS); |
---|
189 | 223 | } |
---|
.. | .. |
---|
534 | 568 | struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter) |
---|
535 | 569 | { |
---|
536 | 570 | bool ret = true; |
---|
| 571 | + int cnt = 0; |
---|
| 572 | + bool time_out = false; |
---|
537 | 573 | |
---|
538 | 574 | rcu_read_lock(); |
---|
539 | 575 | while (__mutex_owner(lock) == owner) { |
---|
| 576 | + trace_android_vh_mutex_opt_spin_start(lock, &time_out, &cnt); |
---|
| 577 | + if (time_out) { |
---|
| 578 | + ret = false; |
---|
| 579 | + break; |
---|
| 580 | + } |
---|
540 | 581 | /* |
---|
541 | 582 | * Ensure we emit the owner->on_cpu, dereference _after_ |
---|
542 | 583 | * checking lock->owner still matches owner. If that fails, |
---|
.. | .. |
---|
587 | 628 | if (owner) |
---|
588 | 629 | retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner)); |
---|
589 | 630 | rcu_read_unlock(); |
---|
| 631 | + trace_android_vh_mutex_can_spin_on_owner(lock, &retval); |
---|
590 | 632 | |
---|
591 | 633 | /* |
---|
592 | 634 | * If lock->owner is not set, the mutex has been released. Return true |
---|
.. | .. |
---|
668 | 710 | if (!waiter) |
---|
669 | 711 | osq_unlock(&lock->osq); |
---|
670 | 712 | |
---|
| 713 | + trace_android_vh_mutex_opt_spin_finish(lock, true); |
---|
671 | 714 | return true; |
---|
672 | 715 | |
---|
673 | 716 | |
---|
.. | .. |
---|
676 | 719 | osq_unlock(&lock->osq); |
---|
677 | 720 | |
---|
678 | 721 | fail: |
---|
| 722 | + trace_android_vh_mutex_opt_spin_finish(lock, false); |
---|
679 | 723 | /* |
---|
680 | 724 | * If we fell out of the spin path because of need_resched(), |
---|
681 | 725 | * reschedule now, before we try-lock the mutex. This avoids getting |
---|
.. | .. |
---|
717 | 761 | void __sched mutex_unlock(struct mutex *lock) |
---|
718 | 762 | { |
---|
719 | 763 | #ifndef CONFIG_DEBUG_LOCK_ALLOC |
---|
720 | | - if (__mutex_unlock_fast(lock)) |
---|
| 764 | + if (__mutex_unlock_fast(lock)) { |
---|
| 765 | + trace_android_vh_record_mutex_lock_starttime(current, 0); |
---|
721 | 766 | return; |
---|
| 767 | + } |
---|
722 | 768 | #endif |
---|
723 | 769 | __mutex_unlock_slowpath(lock, _RET_IP_); |
---|
| 770 | + trace_android_vh_record_mutex_lock_starttime(current, 0); |
---|
724 | 771 | } |
---|
725 | 772 | EXPORT_SYMBOL(mutex_unlock); |
---|
726 | 773 | |
---|
.. | .. |
---|
919 | 966 | |
---|
920 | 967 | might_sleep(); |
---|
921 | 968 | |
---|
| 969 | +#ifdef CONFIG_DEBUG_MUTEXES |
---|
| 970 | + DEBUG_LOCKS_WARN_ON(lock->magic != lock); |
---|
| 971 | +#endif |
---|
| 972 | + |
---|
922 | 973 | ww = container_of(lock, struct ww_mutex, base); |
---|
923 | 974 | if (ww_ctx) { |
---|
924 | 975 | if (unlikely(ww_ctx == READ_ONCE(ww->ctx))) |
---|
.. | .. |
---|
942 | 993 | lock_acquired(&lock->dep_map, ip); |
---|
943 | 994 | if (ww_ctx) |
---|
944 | 995 | ww_mutex_set_context_fastpath(ww, ww_ctx); |
---|
| 996 | + trace_android_vh_record_mutex_lock_starttime(current, jiffies); |
---|
945 | 997 | preempt_enable(); |
---|
946 | 998 | return 0; |
---|
947 | 999 | } |
---|
.. | .. |
---|
983 | 1035 | |
---|
984 | 1036 | waiter.task = current; |
---|
985 | 1037 | |
---|
| 1038 | + trace_android_vh_mutex_wait_start(lock); |
---|
986 | 1039 | set_current_state(state); |
---|
987 | 1040 | for (;;) { |
---|
988 | 1041 | bool first; |
---|
.. | .. |
---|
1001 | 1054 | * wait_lock. This ensures the lock cancellation is ordered |
---|
1002 | 1055 | * against mutex_unlock() and wake-ups do not go missing. |
---|
1003 | 1056 | */ |
---|
1004 | | - if (unlikely(signal_pending_state(state, current))) { |
---|
| 1057 | + if (signal_pending_state(state, current)) { |
---|
1005 | 1058 | ret = -EINTR; |
---|
1006 | 1059 | goto err; |
---|
1007 | 1060 | } |
---|
.. | .. |
---|
1034 | 1087 | spin_lock(&lock->wait_lock); |
---|
1035 | 1088 | acquired: |
---|
1036 | 1089 | __set_current_state(TASK_RUNNING); |
---|
| 1090 | + trace_android_vh_mutex_wait_finish(lock); |
---|
1037 | 1091 | |
---|
1038 | 1092 | if (ww_ctx) { |
---|
1039 | 1093 | /* |
---|
.. | .. |
---|
1058 | 1112 | |
---|
1059 | 1113 | spin_unlock(&lock->wait_lock); |
---|
1060 | 1114 | preempt_enable(); |
---|
| 1115 | + trace_android_vh_record_mutex_lock_starttime(current, jiffies); |
---|
1061 | 1116 | return 0; |
---|
1062 | 1117 | |
---|
1063 | 1118 | err: |
---|
1064 | 1119 | __set_current_state(TASK_RUNNING); |
---|
| 1120 | + trace_android_vh_mutex_wait_finish(lock); |
---|
1065 | 1121 | __mutex_remove_waiter(lock, &waiter); |
---|
1066 | 1122 | err_early_kill: |
---|
1067 | 1123 | spin_unlock(&lock->wait_lock); |
---|
1068 | 1124 | debug_mutex_free_waiter(&waiter); |
---|
1069 | | - mutex_release(&lock->dep_map, 1, ip); |
---|
| 1125 | + mutex_release(&lock->dep_map, ip); |
---|
1070 | 1126 | preempt_enable(); |
---|
1071 | 1127 | return ret; |
---|
1072 | 1128 | } |
---|
.. | .. |
---|
1200 | 1256 | DEFINE_WAKE_Q(wake_q); |
---|
1201 | 1257 | unsigned long owner; |
---|
1202 | 1258 | |
---|
1203 | | - mutex_release(&lock->dep_map, 1, ip); |
---|
| 1259 | + mutex_release(&lock->dep_map, ip); |
---|
1204 | 1260 | |
---|
1205 | 1261 | /* |
---|
1206 | 1262 | * Release the lock before (potentially) taking the spinlock such that |
---|
.. | .. |
---|
1250 | 1306 | if (owner & MUTEX_FLAG_HANDOFF) |
---|
1251 | 1307 | __mutex_handoff(lock, next); |
---|
1252 | 1308 | |
---|
| 1309 | + trace_android_vh_mutex_unlock_slowpath(lock); |
---|
1253 | 1310 | spin_unlock(&lock->wait_lock); |
---|
1254 | 1311 | |
---|
1255 | 1312 | wake_up_q(&wake_q); |
---|
| 1313 | + trace_android_vh_mutex_unlock_slowpath_end(lock, next); |
---|
1256 | 1314 | } |
---|
1257 | 1315 | |
---|
1258 | 1316 | #ifndef CONFIG_DEBUG_LOCK_ALLOC |
---|
.. | .. |
---|
1384 | 1442 | */ |
---|
1385 | 1443 | int __sched mutex_trylock(struct mutex *lock) |
---|
1386 | 1444 | { |
---|
1387 | | - bool locked = __mutex_trylock(lock); |
---|
| 1445 | + bool locked; |
---|
1388 | 1446 | |
---|
1389 | | - if (locked) |
---|
| 1447 | +#ifdef CONFIG_DEBUG_MUTEXES |
---|
| 1448 | + DEBUG_LOCKS_WARN_ON(lock->magic != lock); |
---|
| 1449 | +#endif |
---|
| 1450 | + |
---|
| 1451 | + locked = __mutex_trylock(lock); |
---|
| 1452 | + if (locked) { |
---|
| 1453 | + trace_android_vh_record_mutex_lock_starttime(current, jiffies); |
---|
1390 | 1454 | mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); |
---|
| 1455 | + } |
---|
1391 | 1456 | |
---|
1392 | 1457 | return locked; |
---|
1393 | 1458 | } |
---|